patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -414,8 +414,8 @@ namespace Microsoft.DotNet.Build.Tasks
return Environment.GetEnvironmentVariable("TMPDIR");
else if (DirExists(Environment.GetEnvironmentVariable("TMP")))
return Environment.GetEnvironmentVariable("TMP");
- else if (DirExists("/home/DDITAdministrator/myagent/_work/_temp"))
- return "/home/DDITAdministrator/myagent/_work/_temp";
+ else if (DirExists(Path.Combine(Environment.GetEnvironmentVariable("HOME"), "myagent/_work/_temp")))
+ return Path.Combine(Environment.GetEnvironmentVariable("HOME"), "myagent/_work/_temp");
else
{
Log.LogMessage("No TEMP dir found."); | 1 | using Microsoft.Build.Framework;
using Newtonsoft.Json;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System;
using System.Runtime.InteropServices;
using System.Text.RegularExpressions;
using System.Diagnostics;
namespace Microsoft.DotNet.Build.Tasks
{
public class CleanupVSTSAgent : Microsoft.Build.Utilities.Task
{
public bool Clean { get; set; }
public bool Report { get; set; }
[Required]
public string AgentDirectory { get; set; }
[Required]
public double RetentionDays { get; set; }
public int? Retries { get; set; }
public int? SleepTimeInMilliseconds { get; set; }
public ITaskItem[] ProcessNamesToKill { get; set; }
private static readonly int s_DefaultRetries = 3;
private static readonly int s_DefaultSleepTime = 2000;
public override bool Execute()
{
KillStaleProcesses();
if (!Directory.Exists(AgentDirectory))
{
Log.LogMessage($"Agent directory specified: '{AgentDirectory}' does not exist.");
return false;
}
if (!Retries.HasValue)
{
Retries = s_DefaultRetries;
}
if (!SleepTimeInMilliseconds.HasValue)
{
SleepTimeInMilliseconds = s_DefaultSleepTime;
}
bool returnValue = true;
if (Report)
{
ReportDiskUsage();
}
if (Clean)
{
returnValue &= CleanupDirsAsync().Result;
// If report and clean are both 'true', then report disk usage both before and after cleanup.
if (Report)
{
Log.LogMessage("Disk usage after 'Clean'.");
ReportDiskUsage();
}
}
return returnValue;
}
private void KillStaleProcesses()
{
foreach (string imageName in ProcessNamesToKill.Select(t => t.ItemSpec))
{
Process[] allInstances = Process.GetProcessesByName(imageName);
foreach (Process proc in allInstances)
{
try
{
if (!proc.HasExited)
{
proc.Kill();
Log.LogMessage($"Killed process {imageName} ({proc.Id})");
}
}
catch (Exception e)
{
Log.LogMessage($"Hit {e.GetType().ToString()} trying to kill process {imageName} ({proc.Id})");
}
}
}
}
private void ReportDiskUsage()
{
string lastDirectoryChecked = AgentDirectory;
try
{
// Report disk usage for agent directory
DriveInfo driveInfo = ReportCommonDiskUsage("Agent", AgentDirectory);
var workingDirectories = Directory.GetDirectories(Path.Combine(AgentDirectory, "_work"));
var totalWorkingDirectories = workingDirectories != null ? workingDirectories.Length : 0;
Log.LogMessage($" Total agent working directories: {totalWorkingDirectories}");
if (totalWorkingDirectories > 0)
{
int nameLength = 0;
foreach (string directoryName in workingDirectories)
{
nameLength = directoryName.Length > nameLength ? directoryName.Length : nameLength;
}
int sizeLength = string.Format("{0:N0}", driveInfo?.TotalSize).Length;
string columnFormat = " {0,-" + nameLength.ToString() + "} {1," + sizeLength.ToString() + ":N0} {2}";
Log.LogMessage(string.Format(columnFormat, "Folder name", "Size (bytes)", "Last Modified DateTime"));
foreach (var workingDirectory in workingDirectories)
{
lastDirectoryChecked = workingDirectory;
Tuple<long, DateTime> directoryAttributes = GetDirectoryAttributes(workingDirectory);
Log.LogMessage(string.Format(columnFormat, workingDirectory, directoryAttributes.Item1, directoryAttributes.Item2));
}
}
// Report disk usage for TEMP directory
ReportCommonDiskUsage("TEMP", GetTEMPDirectory());
// Report disk usage for Nuget Cache directory
List<string> nugetCacheDirs = GetNugetCacheDirectories();
if (nugetCacheDirs.Count == 0)
{
Log.LogMessage($"Disk usage report for Nuget cache directories is not available, because those directories do NOT exist.");
}
foreach (string nugetCacheDir in nugetCacheDirs)
{
ReportCommonDiskUsage("Nuget cache", nugetCacheDir);
}
}
catch (PathTooLongException)
{
Log.LogWarning("Hit PathTooLongException attempting to list info about agent directory. There are likely files which cannot be cleaned up on the agent.");
if (!string.IsNullOrEmpty(lastDirectoryChecked))
{
Log.LogWarning($"Last directory checked : {lastDirectoryChecked} (likely the first inaccessible directory, alphabetically) ");
}
}
catch (UnauthorizedAccessException)
{
Log.LogWarning("Hit UnauthorizedAccessException attempting to list info about agent directory. There are likely files which cannot be cleaned up on the agent.");
if (!string.IsNullOrEmpty(lastDirectoryChecked))
{
Log.LogWarning($"Last directory checked : {lastDirectoryChecked} (likely the first inaccessible directory, alphabetically) ");
}
}
}
private DriveInfo ReportCommonDiskUsage(string dirType, string directory)
{
try
{
if (String.IsNullOrEmpty(directory))
{
Log.LogMessage($"Disk usage report for {dirType} directory is not available, because the directory does NOT exist.");
return null;
}
if (!Directory.Exists(directory))
{
Log.LogMessage($"Disk usage report for {dirType} directory is not available, because the directory {directory} does NOT exist.");
return null;
}
string drive = Path.GetPathRoot(directory);
if (String.IsNullOrEmpty(drive))
{
Log.LogMessage($"Can't parse the drive correctly from directory {directory} because it's null or empty.");
return null;
}
DriveInfo driveInfo = new DriveInfo(drive);
Log.LogMessage($"Disk usage report for {dirType} directory");
Log.LogMessage($" {dirType} directory: {directory}");
Log.LogMessage($" Drive letter: {drive}");
Log.LogMessage($" Total disk size: {string.Format("{0:N0}", driveInfo.TotalSize)} bytes");
Log.LogMessage($" Total disk free space: {string.Format("{0:N0}", driveInfo.TotalFreeSpace)} bytes");
Log.LogMessage($" {dirType} directory info");
Log.LogMessage($" Total size of {dirType} directory: {string.Format("{0:N0}", GetDirectoryAttributes(directory).Item1)} bytes");
return driveInfo;
}
catch (PathTooLongException)
{
Log.LogWarning($"Hit PathTooLongException attempting to list info about directory {directory}. There are likely files which cannot be cleaned up on the agent.");
return null;
}
catch (UnauthorizedAccessException)
{
Log.LogWarning($"Hit UnauthorizedAccessException attempting to list info about directory {directory}. There are likely files which cannot be cleaned up on the agent.");
return null;
}
}
private Tuple<long, DateTime> GetDirectoryAttributes(string directory)
{
DirectoryInfo directoryInfo = new DirectoryInfo(directory);
FileInfo[] fileInfos = directoryInfo.GetFiles();
long totalSize = 0;
DateTime lastModifiedDateTime = directoryInfo.LastWriteTime;
foreach (FileInfo fileInfo in fileInfos)
{
totalSize += fileInfo.Length;
lastModifiedDateTime = fileInfo.LastWriteTime > lastModifiedDateTime ? fileInfo.LastWriteTime : lastModifiedDateTime;
}
string[] directories = Directory.GetDirectories(directory);
foreach (string dir in directories)
{
Tuple<long, DateTime> directoryAttributes = GetDirectoryAttributes(dir);
totalSize += directoryAttributes.Item1;
lastModifiedDateTime = directoryAttributes.Item2 > lastModifiedDateTime ? directoryAttributes.Item2 : lastModifiedDateTime;
}
return Tuple.Create(totalSize, lastModifiedDateTime);
}
private async System.Threading.Tasks.Task<bool> CleanupDirsAsync()
{
bool returnStatus = true;
DateTime now = DateTime.Now;
// Cleanup the agents that the VSTS agent is tracking
string[] sourceFolderJsons = Directory.GetFiles(Path.Combine(AgentDirectory, "_work", "SourceRootMapping"), "SourceFolder.json", SearchOption.AllDirectories);
HashSet<string> knownDirectories = new HashSet<string>();
List<System.Threading.Tasks.Task<bool>> cleanupTasks = new List<System.Threading.Tasks.Task<bool>>();
Log.LogMessage($"Found {sourceFolderJsons.Length} known agent working directories. ");
foreach (var sourceFolderJson in sourceFolderJsons)
{
Log.LogMessage($"Examining {sourceFolderJson} ...");
Tuple<string, string, DateTime> agentInfo = await GetAgentInfoAsync(sourceFolderJson);
string workDirectory = Path.Combine(AgentDirectory, "_work", agentInfo.Item2);
knownDirectories.Add(workDirectory);
TimeSpan span = new TimeSpan(now.Ticks - agentInfo.Item3.Ticks);
if (span.TotalDays > RetentionDays)
{
cleanupTasks.Add(CleanupAgentAsync(workDirectory, Path.GetDirectoryName(agentInfo.Item1)));
}
else
{
Log.LogMessage($"Skipping cleanup for {sourceFolderJson}, it is newer than {RetentionDays} days old, last run date is '{agentInfo.Item3.ToString()}'");
}
}
System.Threading.Tasks.Task.WaitAll(cleanupTasks.ToArray());
foreach (var cleanupTask in cleanupTasks)
{
returnStatus &= cleanupTask.Result;
}
// Attempt to cleanup any working folders which the VSTS agent doesn't know about.
Log.LogMessage("Looking for additional '_work' directories which are unknown to the agent.");
cleanupTasks.Clear();
Regex workingDirectoryRegex = new Regex(@"\\\d+$");
var workingDirectories = Directory.GetDirectories(Path.Combine(AgentDirectory, "_work"), "*", SearchOption.TopDirectoryOnly).Where(w => workingDirectoryRegex.IsMatch(w));
foreach (var workingDirectory in workingDirectories)
{
if (!knownDirectories.Contains(workingDirectory))
{
cleanupTasks.Add(CleanupDirectoryAsync(workingDirectory));
}
}
System.Threading.Tasks.Task.WaitAll(cleanupTasks.ToArray());
foreach (var cleanupTask in cleanupTasks)
{
returnStatus &= cleanupTask.Result;
}
// Cleanup the TEMP folder
string tempDir = GetTEMPDirectory();
Log.LogMessage($"Clean up the TEMP folder {tempDir}.");
System.Threading.Tasks.Task.WaitAll(CleanupDirectoryAsync(tempDir));
// Cleanup the Nuget Cache folders
List<string> nugetCacheDirs = GetNugetCacheDirectories();
Log.LogMessage($"Clean up the Nuget Cache folders.");
if (nugetCacheDirs.Count == 0)
{
Log.LogMessage($"Not necessary to clean up Nuget cache directories, as they do NOT exist.");
return returnStatus;
}
cleanupTasks.Clear();
foreach (string nugetCacheDir in nugetCacheDirs)
cleanupTasks.Add(CleanupDirectoryAsync(nugetCacheDir));
System.Threading.Tasks.Task.WaitAll(cleanupTasks.ToArray());
return returnStatus;
}
private async System.Threading.Tasks.Task<bool> CleanupAgentAsync(string workDirectory, string sourceFolderJson)
{
bool returnStatus = await CleanupDirectoryAsync(workDirectory);
returnStatus &= await CleanupDirectoryAsync(sourceFolderJson).ConfigureAwait(false);
return returnStatus;
}
private async System.Threading.Tasks.Task<bool> CleanupDirectoryAsync(string directory, int attempts = 0)
{
try
{
if (Directory.Exists(directory))
{
Log.LogMessage($"Attempting to cleanup {directory} ... ");
// Unlike OSX and Linux, Windows has a hard limit of 260 chars on paths.
// Some build definitions leave paths this long behind. It's unusual,
// but robocopy has been on Windows by default since XP and understands
// how to stomp on long paths, so we'll use it to clean directories on Windows first.
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
Log.LogMessage($"Preventing PathTooLongException by using robocopy to delete {directory} ");
string emptyFolderToMirror = GetUniqueEmptyFolder();
Process.Start(new ProcessStartInfo("robocopy.exe", $"/mir {emptyFolderToMirror} {directory} /NJH /NJS /NP") { UseShellExecute = false }).WaitForExit();
Directory.Delete(emptyFolderToMirror);
}
Directory.Delete(directory, true);
Log.LogMessage("Success");
}
else
{
Log.LogMessage($"Specified directory, {directory}, does not exist");
}
return true;
}
catch (Exception e)
{
attempts++;
Log.LogMessage($"Failed in cleanup attempt... {Retries - attempts} retries left.");
Log.LogMessage($"{e.GetType().ToString()} - {e.Message}");
Log.LogMessage(e.StackTrace);
if (attempts < Retries)
{
Log.LogMessage($"Will retry again in {SleepTimeInMilliseconds} ms");
await System.Threading.Tasks.Task.Delay(SleepTimeInMilliseconds.Value);
return await CleanupDirectoryAsync(directory, attempts).ConfigureAwait(false);
}
}
Log.LogMessage("Failed to cleanup.");
return false;
}
private static string GetUniqueEmptyFolder()
{
string uniquePath;
do
{
Guid guid = Guid.NewGuid();
string uniqueSubFolderName = guid.ToString();
uniquePath = Path.GetTempPath() + uniqueSubFolderName;
}
while (Directory.Exists(uniquePath));
Directory.CreateDirectory(uniquePath);
return uniquePath;
}
private async System.Threading.Tasks.Task<Tuple<string, string, DateTime>> GetAgentInfoAsync(string sourceFolderJson)
{
Regex getValueRegex = new Regex(".*\": \"(?<value>[^\"]+)\"");
DateTime lastRunOn = DateTime.Now;
string agentBuildDirectory = null;
using (Stream stream = File.OpenRead(sourceFolderJson))
using (StreamReader reader = new StreamReader(stream))
{
while (!reader.EndOfStream)
{
string line = await reader.ReadLineAsync();
if (line.Contains("lastRunOn"))
{
lastRunOn = DateTime.Parse(getValueRegex.Match(line).Groups["value"].Value.ToString());
}
else if (line.Contains("agent_builddirectory"))
{
agentBuildDirectory = getValueRegex.Match(line).Groups["value"].Value.ToString();
}
}
}
return new Tuple<string, string, DateTime>(sourceFolderJson, agentBuildDirectory, lastRunOn);
}
private string GetTEMPDirectory()
{
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
if (DirExists(Environment.GetEnvironmentVariable("TEMP")))
return Environment.GetEnvironmentVariable("TEMP");
else if (DirExists(Environment.GetEnvironmentVariable("TMP")))
return Environment.GetEnvironmentVariable("TMP");
else
{
Log.LogMessage("No TEMP dir found.");
return null;
}
}
else
{
if (DirExists(Environment.GetEnvironmentVariable("TMPDIR")))
return Environment.GetEnvironmentVariable("TMPDIR");
else if (DirExists(Environment.GetEnvironmentVariable("TMP")))
return Environment.GetEnvironmentVariable("TMP");
else if (DirExists("/home/DDITAdministrator/myagent/_work/_temp"))
return "/home/DDITAdministrator/myagent/_work/_temp";
else
{
Log.LogMessage("No TEMP dir found.");
return null;
}
}
}
private bool DirExists(string directory)
{
if (!Directory.Exists(directory))
{
Log.LogMessage($"TEMP dir: {directory} does not exist.");
return false;
}
return true;
}
private List<string> GetNugetCacheDirectories()
{
List<string> nugetCacheDirs = new List<string>();
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
AddDirToListIfExist(nugetCacheDirs, Path.Combine(Environment.GetEnvironmentVariable("LocalAppData"), "NuGet"));
AddDirToListIfExist(nugetCacheDirs, Path.Combine(Environment.GetEnvironmentVariable("UserProfile"), ".nuget\\packages"));
}
else // OSX or Linux
{
AddDirToListIfExist(nugetCacheDirs, "/home/DDITAdministrator/.local/share/NuGet");
AddDirToListIfExist(nugetCacheDirs, "/home/DDITAdministrator/.nuget");
}
return nugetCacheDirs;
}
private void AddDirToListIfExist(List<string> dirs, string directory)
{
if (Directory.Exists(directory))
{
dirs.Add(directory);
Log.LogMessage($"Successfully add directory: {directory} to the list.");
}
else
{
Log.LogMessage($"Fail to add directory: {directory} to the list because it doesn't exist.");
}
}
}
}
| 1 | 12,756 | If all you're doing is checking for the existence of a directory as your "temp" directory, why does it matter what OS you're on? | dotnet-buildtools | .cs |
@@ -60,10 +60,11 @@
package server
import (
- "fmt"
+ v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"net/http"
- "github.com/gorilla/mux"
+ "github.com/labstack/echo/v4"
"github.com/algorand/go-algorand/daemon/algod/api/server/common"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib" | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
// Package server Algod REST API.
//
// API Endpoint for AlgoD Operations.
//
//
// Schemes: http
// Host: localhost
// BasePath: /
// Version: 0.0.1
// License:
// Contact: [email protected]
//
// Consumes:
// - application/json
//
// Produces:
// - application/json
//
// Security:
// - api_key:
//
// SecurityDefinitions:
// api_key:
// type: apiKey
// name: X-Algo-API-Token
// in: header
// description: >-
// Generated header parameter. This token can be generated using the Goal command line tool. Example value
// ='b7e384d0317b8050ce45900a94a1931e28540e1f69b2d242b424659c341b4697'
// required: true
// x-example: b7e384d0317b8050ce45900a94a1931e28540e1f69b2d242b424659c341b4697
//
// swagger:meta
//---
// Currently, server implementation annotations serve as the API ground truth. From that,
// we use go-swagger to generate a swagger spec.
//
// Autogenerate the swagger json - automatically run by the 'make build' step.
// Base path must be a fully specified package name (else, it seems that swagger feeds a relative path to
// loader.Config.Import(), and that breaks the vendor directory if the source is symlinked from elsewhere)
//go:generate swagger generate spec -o="../swagger.json"
//go:generate swagger validate ../swagger.json --stop-on-error
//go:generate ./lib/bundle_swagger_json.sh
package server
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
"github.com/algorand/go-algorand/daemon/algod/api/server/common"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib/middlewares"
"github.com/algorand/go-algorand/daemon/algod/api/server/v1/routes"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
)
const (
apiV1Tag = "v1"
debugRouteName = "debug"
pprofEndpointPrefix = "/debug/pprof/"
urlAuthEndpointPrefix = "/urlAuth/{apiToken:[0-9a-f]+}"
)
// wrapCtx is used to pass common context to each request without using any
// global variables.
func wrapCtx(ctx lib.ReqContext, handler func(lib.ReqContext, http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
handler(ctx, w, r)
}
}
// registerHandler registers a set of Routes to [router]. if [prefix] is not empty, it
// registers the routes to a new sub-router [prefix]
func registerHandlers(router *mux.Router, prefix string, routes lib.Routes, ctx lib.ReqContext) {
if prefix != "" {
router = router.PathPrefix(fmt.Sprintf("/%s", prefix)).Subrouter()
}
for _, route := range routes {
r := router.NewRoute()
if route.Path != "" {
r = r.Path(route.Path)
}
r = r.Name(route.Name)
r = r.Methods(route.Method)
r.HandlerFunc(wrapCtx(ctx, route.HandlerFunc))
}
}
// NewRouter builds and returns a new router from routes
func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-chan struct{}, apiToken string) *mux.Router {
router := mux.NewRouter().StrictSlash(true)
// Middleware
router.Use(middlewares.Logger(logger))
router.Use(middlewares.Auth(logger, apiToken))
router.Use(middlewares.CORS)
// Request Context
ctx := lib.ReqContext{Node: node, Log: logger, Shutdown: shutdown}
// Route pprof requests
if node.Config().EnableProfiler {
// Registers /debug/pprof handler under root path and under /urlAuth path
// to support header or url-provided token.
router.PathPrefix(pprofEndpointPrefix).Handler(http.DefaultServeMux)
urlAuthRouter := router.PathPrefix(urlAuthEndpointPrefix)
urlAuthRouter.PathPrefix(pprofEndpointPrefix).Handler(http.DefaultServeMux).Name(debugRouteName)
}
// Registering common routes
registerHandlers(router, "", common.Routes, ctx)
// Registering v1 routes
registerHandlers(router, apiV1Tag, routes.Routes, ctx)
return router
}
| 1 | 37,801 | nit- would be cleaner to import it once | algorand-go-algorand | go |
@@ -159,6 +159,14 @@ class GenericProxyHandler(BaseHTTPRequestHandler):
# allow pre-flight CORS headers by default
if 'Access-Control-Allow-Origin' not in response.headers:
self.send_header('Access-Control-Allow-Origin', '*')
+ if 'Access-Control-Allow-Headers' not in response.headers:
+ self.send_header('Access-Control-Allow-Headers', ','.join([
+ 'authorization',
+ 'x-amz-content-sha256',
+ 'x-amz-date',
+ 'x-amz-security-token',
+ 'x-amz-user-agent'
+ ])
self.end_headers()
if len(response.content): | 1 | from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import requests
import os
import sys
import json
import traceback
import logging
import ssl
from requests.structures import CaseInsensitiveDict
from requests.models import Response, Request
from six import iteritems, string_types
from six.moves.socketserver import ThreadingMixIn
from six.moves.urllib.parse import urlparse
from localstack.config import DEFAULT_ENCODING, TMP_FOLDER, USE_SSL
from localstack.utils.common import FuncThread, generate_ssl_cert
from localstack.utils.compat import bytes_
QUIET = False
# path for test certificate
SERVER_CERT_PEM_FILE = '%s/server.test.pem' % (TMP_FOLDER)
# set up logger
LOGGER = logging.getLogger(__name__)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle each request in a separate thread."""
class GenericProxyHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.proxy = server.my_object
self.data_bytes = None
self.protocol_version = self.proxy.protocol_version
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def parse_request(self):
result = BaseHTTPRequestHandler.parse_request(self)
if not result:
return result
if sys.version_info[0] >= 3:
return result
# Required fix for Python 2 (otherwise S3 uploads are hanging), based on the Python 3 code:
# https://sourcecodebrowser.com/python3.2/3.2.3/http_2server_8py_source.html#l00332
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if self.request_version != 'HTTP/0.9':
self.wfile.write(("%s %d %s\r\n" %
(self.protocol_version, 100, 'Continue')).encode('latin1', 'strict'))
self.end_headers()
return result
def do_GET(self):
self.method = requests.get
content_length = self.headers.get('Content-Length')
if content_length:
self.data_bytes = self.rfile.read(int(content_length))
self.forward('GET')
def do_PUT(self):
self.data_bytes = self.rfile.read(int(self.headers['Content-Length']))
self.method = requests.put
self.forward('PUT')
def do_POST(self):
self.data_bytes = self.rfile.read(int(self.headers['Content-Length']))
self.method = requests.post
self.forward('POST')
def do_DELETE(self):
self.method = requests.delete
self.forward('DELETE')
def do_HEAD(self):
self.method = requests.head
self.forward('HEAD')
def do_PATCH(self):
self.method = requests.patch
self.data_bytes = self.rfile.read(int(self.headers['Content-Length']))
self.forward('PATCH')
def do_OPTIONS(self):
self.method = requests.options
self.forward('OPTIONS')
def forward(self, method):
path = self.path
if '://' in path:
path = '/' + path.split('://', 1)[1].split('/', 1)[1]
proxy_url = 'http://%s%s' % (self.proxy.forward_host, path)
target_url = self.path
if '://' not in target_url:
target_url = 'http://%s%s' % (self.proxy.forward_host, target_url)
data = None
if method in ['POST', 'PUT', 'PATCH']:
data_string = self.data_bytes
try:
if not isinstance(data_string, string_types):
data_string = data_string.decode(DEFAULT_ENCODING)
data = json.loads(data_string)
except Exception as e:
# unable to parse JSON, fallback to verbatim string/bytes
data = data_string
forward_headers = CaseInsensitiveDict(self.headers)
# update original "Host" header (moto s3 relies on this behavior)
if not forward_headers.get('Host'):
forward_headers['host'] = urlparse(target_url).netloc
if 'localhost.atlassian.io' in forward_headers.get('Host'):
forward_headers['host'] = 'localhost'
try:
response = None
modified_request = None
# update listener (pre-invocation)
if self.proxy.update_listener:
listener_result = self.proxy.update_listener(method=method, path=path,
data=data, headers=forward_headers, return_forward_info=True)
if isinstance(listener_result, Response):
response = listener_result
elif isinstance(listener_result, Request):
modified_request = listener_result
data = modified_request.data
forward_headers = modified_request.headers
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
self.send_response(code)
self.end_headers()
return
if response is None:
if modified_request:
response = self.method(proxy_url, data=modified_request.data,
headers=modified_request.headers)
else:
response = self.method(proxy_url, data=self.data_bytes,
headers=forward_headers)
# update listener (post-invocation)
if self.proxy.update_listener:
updated_response = self.proxy.update_listener(method=method, path=path,
data=data, headers=forward_headers, response=response)
if isinstance(updated_response, Response):
response = updated_response
# copy headers and return response
self.send_response(response.status_code)
for header_key, header_value in iteritems(response.headers):
if header_key.lower() != 'Content-Length'.lower():
self.send_header(header_key, header_value)
self.send_header('Content-Length', '%s' % len(response.content))
# allow pre-flight CORS headers by default
if 'Access-Control-Allow-Origin' not in response.headers:
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
if len(response.content):
self.wfile.write(bytes_(response.content))
self.wfile.flush()
except Exception as e:
if not self.proxy.quiet or 'ConnectionRefusedError' not in str(traceback.format_exc()):
LOGGER.error("Error forwarding request: %s %s" % (e, traceback.format_exc()))
self.send_response(502) # bad gateway
self.end_headers()
def log_message(self, format, *args):
return
class GenericProxy(FuncThread):
def __init__(self, port, forward_host=None, ssl=False, update_listener=None, quiet=False, params={}):
FuncThread.__init__(self, self.run_cmd, params, quiet=quiet)
self.httpd = None
self.port = port
self.ssl = ssl
self.quiet = quiet
self.forward_host = forward_host
self.update_listener = update_listener
self.server_stopped = False
# Required to enable 'Connection: keep-alive' for S3 uploads
self.protocol_version = params.get('protocol_version') or 'HTTP/1.1'
def run_cmd(self, params):
try:
self.httpd = ThreadedHTTPServer(("", self.port), GenericProxyHandler)
if self.ssl:
# make sure we have a cert generated
combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert()
self.httpd.socket = ssl.wrap_socket(self.httpd.socket,
server_side=True, certfile=combined_file)
self.httpd.my_object = self
self.httpd.serve_forever()
except Exception as e:
if not self.quiet or not self.server_stopped:
LOGGER.error('Exception running proxy on port %s: %s' % (self.port, traceback.format_exc()))
def stop(self, quiet=False):
self.quiet = quiet
if self.httpd:
self.httpd.server_close()
self.server_stopped = True
@classmethod
def create_ssl_cert(cls, random=True):
return generate_ssl_cert(SERVER_CERT_PEM_FILE, random=random)
@classmethod
def get_flask_ssl_context(cls):
if USE_SSL:
combined_file, cert_file_name, key_file_name = cls.create_ssl_cert()
return (cert_file_name, key_file_name)
return None
| 1 | 9,032 | Looks like there is a closing parenthesis `)` missing here. | localstack-localstack | py |
@@ -1684,8 +1684,8 @@ void PairTlsph::coeff(int narg, char **arg) {
} // end energy release rate failure criterion
else {
- sprintf(str, "unknown *KEYWORD: %s", arg[ioffset]);
- error->all(FLERR, str);
+ snprintf(str,128,"unknown *KEYWORD: %s", arg[ioffset]);
+ error->all(FLERR, str);
}
} | 1 | /* ----------------------------------------------------------------------
*
* *** Smooth Mach Dynamics ***
*
* This file is part of the USER-SMD package for LAMMPS.
* Copyright (2014) Georg C. Ganzenmueller, [email protected]
* Fraunhofer Ernst-Mach Institute for High-Speed Dynamics, EMI,
* Eckerstrasse 4, D-79104 Freiburg i.Br, Germany.
*
* ----------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#include <cmath>
#include <cfloat>
#include <cstdlib>
#include <cstring>
#include <cstdio>
#include <iostream>
#include <map>
#include <Eigen/Eigen>
#include "pair_smd_tlsph.h"
#include "fix_smd_tlsph_reference_configuration.h"
#include "atom.h"
#include "domain.h"
#include "group.h"
#include "force.h"
#include "update.h"
#include "modify.h"
#include "fix.h"
#include "comm.h"
#include "neighbor.h"
#include "neigh_list.h"
#include "neigh_request.h"
#include "memory.h"
#include "error.h"
#include "math_special.h"
#include "update.h"
#include "smd_material_models.h"
#include "smd_kernels.h"
#include "smd_math.h"
using namespace SMD_Kernels;
using namespace Eigen;
using namespace std;
using namespace LAMMPS_NS;
using namespace SMD_Math;
#define JAUMANN false
#define DETF_MIN 0.2 // maximum compression deformation allow
#define DETF_MAX 2.0 // maximum tension deformation allowed
#define TLSPH_DEBUG 0
#define PLASTIC_STRAIN_AVERAGE_WINDOW 100.0
/* ---------------------------------------------------------------------- */
PairTlsph::PairTlsph(LAMMPS *lmp) :
Pair(lmp) {
onerad_dynamic = onerad_frozen = maxrad_dynamic = maxrad_frozen = NULL;
failureModel = NULL;
strengthModel = eos = NULL;
nmax = 0; // make sure no atom on this proc such that initial memory allocation is correct
Fdot = Fincr = K = PK1 = NULL;
R = FincrInv = W = D = NULL;
detF = NULL;
smoothVelDifference = NULL;
numNeighsRefConfig = NULL;
CauchyStress = NULL;
hourglass_error = NULL;
Lookup = NULL;
particle_dt = NULL;
updateFlag = 0;
first = true;
dtCFL = 0.0; // initialize dtCFL so it is set to safe value if extracted on zero-th timestep
comm_forward = 22; // this pair style communicates 20 doubles to ghost atoms : PK1 tensor + F tensor + shepardWeight
fix_tlsph_reference_configuration = NULL;
cut_comm = MAX(neighbor->cutneighmax, comm->cutghostuser); // cutoff radius within which ghost atoms are communicated.
}
/* ---------------------------------------------------------------------- */
PairTlsph::~PairTlsph() {
//printf("in PairTlsph::~PairTlsph()\n");
if (allocated) {
memory->destroy(setflag);
memory->destroy(cutsq);
memory->destroy(strengthModel);
memory->destroy(eos);
memory->destroy(Lookup);
delete[] onerad_dynamic;
delete[] onerad_frozen;
delete[] maxrad_dynamic;
delete[] maxrad_frozen;
delete[] Fdot;
delete[] Fincr;
delete[] K;
delete[] detF;
delete[] PK1;
delete[] smoothVelDifference;
delete[] R;
delete[] FincrInv;
delete[] W;
delete[] D;
delete[] numNeighsRefConfig;
delete[] CauchyStress;
delete[] hourglass_error;
delete[] particle_dt;
delete[] failureModel;
}
}
/* ----------------------------------------------------------------------
*
* use half neighbor list to re-compute shape matrix
*
---------------------------------------------------------------------- */
void PairTlsph::PreCompute() {
tagint *mol = atom->molecule;
double *vfrac = atom->vfrac;
double *radius = atom->radius;
double **x0 = atom->x0;
double **x = atom->x;
double **v = atom->vest; // extrapolated velocities corresponding to current positions
double **vint = atom->v; // Velocity-Verlet algorithm velocities
double *damage = atom->damage;
tagint *tag = atom->tag;
int *type = atom->type;
int nlocal = atom->nlocal;
int jnum, jj, i, j, itype, idim;
tagint **partner = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->partner;
int *npartner = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->npartner;
float **wfd_list = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->wfd_list;
float **wf_list = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->wf_list;
float **degradation_ij = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->degradation_ij;
double r0, r0Sq, wf, wfd, h, irad, voli, volj, scale, shepardWeight;
Vector3d dx, dx0, dv, g;
Matrix3d Ktmp, Ftmp, Fdottmp, L, U, eye;
Vector3d vi, vj, vinti, vintj, xi, xj, x0i, x0j, dvint;
int periodic = (domain->xperiodic || domain->yperiodic || domain->zperiodic);
bool status;
Matrix3d F0;
eye.setIdentity();
for (i = 0; i < nlocal; i++) {
itype = type[i];
if (setflag[itype][itype] == 1) {
K[i].setZero();
Fincr[i].setZero();
Fdot[i].setZero();
numNeighsRefConfig[i] = 0;
smoothVelDifference[i].setZero();
hourglass_error[i] = 0.0;
if (mol[i] < 0) { // valid SPH particle have mol > 0
continue;
}
// initialize aveage mass density
h = 2.0 * radius[i];
r0 = 0.0;
spiky_kernel_and_derivative(h, r0, domain->dimension, wf, wfd);
shepardWeight = wf * voli;
jnum = npartner[i];
irad = radius[i];
voli = vfrac[i];
// initialize Eigen data structures from LAMMPS data structures
for (idim = 0; idim < 3; idim++) {
xi(idim) = x[i][idim];
x0i(idim) = x0[i][idim];
vi(idim) = v[i][idim];
vinti(idim) = vint[i][idim];
}
for (jj = 0; jj < jnum; jj++) {
if (partner[i][jj] == 0)
continue;
j = atom->map(partner[i][jj]);
if (j < 0) { // // check if lost a partner without first breaking bond
partner[i][jj] = 0;
continue;
}
if (mol[j] < 0) { // particle has failed. do not include it for computing any property
continue;
}
if (mol[i] != mol[j]) {
continue;
}
// initialize Eigen data structures from LAMMPS data structures
for (idim = 0; idim < 3; idim++) {
xj(idim) = x[j][idim];
x0j(idim) = x0[j][idim];
vj(idim) = v[j][idim];
vintj(idim) = vint[j][idim];
}
dx0 = x0j - x0i;
dx = xj - xi;
if (periodic)
domain->minimum_image(dx0(0), dx0(1), dx0(2));
r0Sq = dx0.squaredNorm();
h = irad + radius[j];
r0 = sqrt(r0Sq);
volj = vfrac[j];
// distance vectors in current and reference configuration, velocity difference
dv = vj - vi;
dvint = vintj - vinti;
// scale the interaction according to the damage variable
scale = 1.0 - degradation_ij[i][jj];
wf = wf_list[i][jj] * scale;
wfd = wfd_list[i][jj] * scale;
g = (wfd / r0) * dx0;
/* build matrices */
Ktmp = -g * dx0.transpose();
Fdottmp = -dv * g.transpose();
Ftmp = -(dx - dx0) * g.transpose();
K[i] += volj * Ktmp;
Fdot[i] += volj * Fdottmp;
Fincr[i] += volj * Ftmp;
shepardWeight += volj * wf;
smoothVelDifference[i] += volj * wf * dvint;
numNeighsRefConfig[i]++;
} // end loop over j
// normalize average velocity field around an integration point
if (shepardWeight > 0.0) {
smoothVelDifference[i] /= shepardWeight;
} else {
smoothVelDifference[i].setZero();
}
pseudo_inverse_SVD(K[i]);
Fdot[i] *= K[i];
Fincr[i] *= K[i];
Fincr[i] += eye;
if (JAUMANN) {
R[i].setIdentity(); // for Jaumann stress rate, we do not need a subsequent rotation back into the reference configuration
} else {
status = PolDec(Fincr[i], R[i], U, false); // polar decomposition of the deformation gradient, F = R * U
if (!status) {
error->message(FLERR, "Polar decomposition of deformation gradient failed.\n");
mol[i] = -1;
} else {
Fincr[i] = R[i] * U;
}
}
detF[i] = Fincr[i].determinant();
FincrInv[i] = Fincr[i].inverse();
// velocity gradient
L = Fdot[i] * FincrInv[i];
// symmetric (D) and asymmetric (W) parts of L
D[i] = 0.5 * (L + L.transpose());
W[i] = 0.5 * (L - L.transpose()); // spin tensor:: need this for Jaumann rate
// unrotated rate-of-deformation tensor d, see right side of Pronto2d, eqn.(2.1.7)
// convention: unrotated frame is that one, where the true rotation of an integration point has been subtracted.
// stress in the unrotated frame of reference is denoted sigma (stress seen by an observer doing rigid body rotations along with the material)
// stress in the true frame of reference (a stationary observer) is denoted by T, "true stress"
D[i] = (R[i].transpose() * D[i] * R[i]).eval();
// limit strain rate
//double limit = 1.0e-3 * Lookup[SIGNAL_VELOCITY][itype] / radius[i];
//D[i] = LimitEigenvalues(D[i], limit);
/*
* make sure F stays within some limits
*/
if ((detF[i] < DETF_MIN) || (detF[i] > DETF_MAX) || (numNeighsRefConfig[i] == 0)) {
printf("deleting particle [%d] because det(F)=%f is outside stable range %f -- %f \n", tag[i],
Fincr[i].determinant(),
DETF_MIN, DETF_MAX);
printf("nn = %d, damage=%f\n", numNeighsRefConfig[i], damage[i]);
cout << "Here is matrix F:" << endl << Fincr[i] << endl;
cout << "Here is matrix F-1:" << endl << FincrInv[i] << endl;
cout << "Here is matrix K-1:" << endl << K[i] << endl;
cout << "Here is matrix K:" << endl << K[i].inverse() << endl;
cout << "Here is det of K" << endl << (K[i].inverse()).determinant() << endl;
cout << "Here is matrix R:" << endl << R[i] << endl;
cout << "Here is det of R" << endl << R[i].determinant() << endl;
cout << "Here is matrix U:" << endl << U << endl;
mol[i] = -1;
//error->one(FLERR, "");
}
if (mol[i] < 0) {
D[i].setZero();
Fdot[i].setZero();
Fincr[i].setIdentity();
smoothVelDifference[i].setZero();
detF[i] = 1.0;
K[i].setIdentity();
vint[i][0] = 0.0;
vint[i][1] = 0.0;
vint[i][2] = 0.0;
}
} // end loop over i
} // end check setflag
}
/* ---------------------------------------------------------------------- */
void PairTlsph::compute(int eflag, int vflag) {
if (atom->nmax > nmax) {
nmax = atom->nmax;
delete[] Fdot;
Fdot = new Matrix3d[nmax]; // memory usage: 9 doubles
delete[] Fincr;
Fincr = new Matrix3d[nmax]; // memory usage: 9 doubles
delete[] K;
K = new Matrix3d[nmax]; // memory usage: 9 doubles
delete[] PK1;
PK1 = new Matrix3d[nmax]; // memory usage: 9 doubles; total 5*9=45 doubles
delete[] detF;
detF = new double[nmax]; // memory usage: 1 double; total 46 doubles
delete[] smoothVelDifference;
smoothVelDifference = new Vector3d[nmax]; // memory usage: 3 doubles; total 49 doubles
delete[] R;
R = new Matrix3d[nmax]; // memory usage: 9 doubles; total 67 doubles
delete[] FincrInv;
FincrInv = new Matrix3d[nmax]; // memory usage: 9 doubles; total 85 doubles
delete[] W;
W = new Matrix3d[nmax]; // memory usage: 9 doubles; total 94 doubles
delete[] D;
D = new Matrix3d[nmax]; // memory usage: 9 doubles; total 103 doubles
delete[] numNeighsRefConfig;
numNeighsRefConfig = new int[nmax]; // memory usage: 1 int; total 108 doubles
delete[] CauchyStress;
CauchyStress = new Matrix3d[nmax]; // memory usage: 9 doubles; total 118 doubles
delete[] hourglass_error;
hourglass_error = new double[nmax];
delete[] particle_dt;
particle_dt = new double[nmax];
}
if (first) { // return on first call, because reference connectivity lists still needs to be built. Also zero quantities which are otherwise undefined.
first = false;
for (int i = 0; i < atom->nlocal; i++) {
Fincr[i].setZero();
detF[i] = 0.0;
smoothVelDifference[i].setZero();
D[i].setZero();
numNeighsRefConfig[i] = 0;
CauchyStress[i].setZero();
hourglass_error[i] = 0.0;
particle_dt[i] = 0.0;
}
return;
}
/*
* calculate deformations and rate-of-deformations
*/
PairTlsph::PreCompute();
/*
* calculate stresses from constitutive models
*/
PairTlsph::AssembleStress();
/*
* QUANTITIES ABOVE HAVE ONLY BEEN CALCULATED FOR NLOCAL PARTICLES.
* NEED TO DO A FORWARD COMMUNICATION TO GHOST ATOMS NOW
*/
comm->forward_comm_pair(this);
/*
* compute forces between particles
*/
updateFlag = 0;
ComputeForces(eflag, vflag);
}
void PairTlsph::ComputeForces(int eflag, int vflag) {
tagint *mol = atom->molecule;
double **x = atom->x;
double **v = atom->vest;
double **x0 = atom->x0;
double **f = atom->f;
double *vfrac = atom->vfrac;
double *de = atom->de;
double *rmass = atom->rmass;
double *radius = atom->radius;
double *damage = atom->damage;
double *plastic_strain = atom->eff_plastic_strain;
int *type = atom->type;
int nlocal = atom->nlocal;
int i, j, jj, jnum, itype, idim;
double r, hg_mag, wf, wfd, h, r0, r0Sq, voli, volj;
double delVdotDelR, visc_magnitude, deltaE, mu_ij, hg_err, gamma_dot_dx, delta, scale;
double strain1d, strain1d_max, softening_strain, shepardWeight;
char str[128];
Vector3d fi, fj, dx0, dx, dv, f_stress, f_hg, dxp_i, dxp_j, gamma, g, gamma_i, gamma_j, x0i, x0j;
Vector3d xi, xj, vi, vj, f_visc, sumForces, f_spring;
int periodic = (domain->xperiodic || domain->yperiodic || domain->zperiodic);
tagint **partner = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->partner;
int *npartner = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->npartner;
float **wfd_list = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->wfd_list;
float **wf_list = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->wf_list;
float **degradation_ij = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->degradation_ij;
float **energy_per_bond = ((FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[ifix_tlsph])->energy_per_bond;
Matrix3d eye;
eye.setIdentity();
if (eflag || vflag)
ev_setup(eflag, vflag);
else
evflag = vflag_fdotr = 0;
/*
* iterate over pairs of particles i, j and assign forces using PK1 stress tensor
*/
//updateFlag = 0;
hMin = 1.0e22;
dtRelative = 1.0e22;
for (i = 0; i < nlocal; i++) {
if (mol[i] < 0) {
continue; // Particle i is not a valid SPH particle (anymore). Skip all interactions with this particle.
}
itype = type[i];
jnum = npartner[i];
voli = vfrac[i];
// initialize aveage mass density
h = 2.0 * radius[i];
r = 0.0;
spiky_kernel_and_derivative(h, r, domain->dimension, wf, wfd);
shepardWeight = wf * voli;
for (idim = 0; idim < 3; idim++) {
x0i(idim) = x0[i][idim];
xi(idim) = x[i][idim];
vi(idim) = v[i][idim];
}
for (jj = 0; jj < jnum; jj++) {
if (partner[i][jj] == 0)
continue;
j = atom->map(partner[i][jj]);
if (j < 0) { // // check if lost a partner without first breaking bond
partner[i][jj] = 0;
continue;
}
if (mol[j] < 0) {
continue; // Particle j is not a valid SPH particle (anymore). Skip all interactions with this particle.
}
if (mol[i] != mol[j]) {
continue;
}
if (type[j] != itype) {
sprintf(str, "particle pair is not of same type!");
error->all(FLERR, str);
}
for (idim = 0; idim < 3; idim++) {
x0j(idim) = x0[j][idim];
xj(idim) = x[j][idim];
vj(idim) = v[j][idim];
}
if (periodic)
domain->minimum_image(dx0(0), dx0(1), dx0(2));
// check that distance between i and j (in the reference config) is less than cutoff
dx0 = x0j - x0i;
r0Sq = dx0.squaredNorm();
h = radius[i] + radius[j];
hMin = MIN(hMin, h);
r0 = sqrt(r0Sq);
volj = vfrac[j];
// distance vectors in current and reference configuration, velocity difference
dx = xj - xi;
dv = vj - vi;
r = dx.norm(); // current distance
// scale the interaction according to the damage variable
scale = 1.0 - degradation_ij[i][jj];
wf = wf_list[i][jj] * scale;
wfd = wfd_list[i][jj] * scale;
g = (wfd / r0) * dx0; // uncorrected kernel gradient
/*
* force contribution -- note that the kernel gradient correction has been absorbed into PK1
*/
f_stress = -voli * volj * (PK1[i] + PK1[j]) * g;
/*
* artificial viscosity
*/
delVdotDelR = dx.dot(dv) / (r + 0.1 * h); // project relative velocity onto unit particle distance vector [m/s]
LimitDoubleMagnitude(delVdotDelR, 0.01 * Lookup[SIGNAL_VELOCITY][itype]);
mu_ij = h * delVdotDelR / (r + 0.1 * h); // units: [m * m/s / m = m/s]
visc_magnitude = (-Lookup[VISCOSITY_Q1][itype] * Lookup[SIGNAL_VELOCITY][itype] * mu_ij
+ Lookup[VISCOSITY_Q2][itype] * mu_ij * mu_ij) / Lookup[REFERENCE_DENSITY][itype]; // units: m^5/(s^2 kg))
f_visc = rmass[i] * rmass[j] * visc_magnitude * wfd * dx / (r + 1.0e-2 * h); // units: kg^2 * m^5/(s^2 kg) * m^-4 = kg m / s^2 = N
/*
* hourglass deviation of particles i and j
*/
gamma = 0.5 * (Fincr[i] + Fincr[j]) * dx0 - dx;
hg_err = gamma.norm() / r0;
hourglass_error[i] += volj * wf * hg_err;
/* SPH-like hourglass formulation */
if (MAX(plastic_strain[i], plastic_strain[j]) > 1.0e-3) {
/*
* viscous hourglass formulation for particles with plastic deformation
*/
delta = gamma.dot(dx);
if (delVdotDelR * delta < 0.0) {
hg_err = MAX(hg_err, 0.05); // limit hg_err to avoid numerical instabilities
hg_mag = -hg_err * Lookup[HOURGLASS_CONTROL_AMPLITUDE][itype] * Lookup[SIGNAL_VELOCITY][itype] * mu_ij
/ Lookup[REFERENCE_DENSITY][itype]; // this has units of pressure
} else {
hg_mag = 0.0;
}
f_hg = rmass[i] * rmass[j] * hg_mag * wfd * dx / (r + 1.0e-2 * h);
} else {
/*
* stiffness hourglass formulation for particle in the elastic regime
*/
gamma_dot_dx = gamma.dot(dx); // project hourglass error vector onto pair distance vector
LimitDoubleMagnitude(gamma_dot_dx, 0.1 * r); // limit projected vector to avoid numerical instabilities
delta = 0.5 * gamma_dot_dx / (r + 0.1 * h); // delta has dimensions of [m]
hg_mag = Lookup[HOURGLASS_CONTROL_AMPLITUDE][itype] * delta / (r0Sq + 0.01 * h * h); // hg_mag has dimensions [m^(-1)]
hg_mag *= -voli * volj * wf * Lookup[YOUNGS_MODULUS][itype]; // hg_mag has dimensions [J*m^(-1)] = [N]
f_hg = (hg_mag / (r + 0.01 * h)) * dx;
}
// scale hourglass force with damage
f_hg *= (1.0 - damage[i]) * (1.0 - damage[j]);
// sum stress, viscous, and hourglass forces
sumForces = f_stress + f_visc + f_hg; // + f_spring;
// energy rate -- project velocity onto force vector
deltaE = 0.5 * sumForces.dot(dv);
// apply forces to pair of particles
f[i][0] += sumForces(0);
f[i][1] += sumForces(1);
f[i][2] += sumForces(2);
de[i] += deltaE;
// tally atomistic stress tensor
if (evflag) {
ev_tally_xyz(i, j, nlocal, 0, 0.0, 0.0, sumForces(0), sumForces(1), sumForces(2), dx(0), dx(1), dx(2));
}
shepardWeight += wf * volj;
// check if a particle has moved too much w.r.t another particle
if (r > r0) {
if (update_method == UPDATE_CONSTANT_THRESHOLD) {
if (r - r0 > update_threshold) {
updateFlag = 1;
}
} else if (update_method == UPDATE_PAIRWISE_RATIO) {
if ((r - r0) / h > update_threshold) {
updateFlag = 1;
}
}
}
if (failureModel[itype].failure_max_pairwise_strain) {
strain1d = (r - r0) / r0;
strain1d_max = Lookup[FAILURE_MAX_PAIRWISE_STRAIN_THRESHOLD][itype];
softening_strain = 2.0 * strain1d_max;
if (strain1d > strain1d_max) {
degradation_ij[i][jj] = (strain1d - strain1d_max) / softening_strain;
} else {
degradation_ij[i][jj] = 0.0;
}
if (degradation_ij[i][jj] >= 1.0) { // delete interaction if fully damaged
partner[i][jj] = 0;
}
}
if (failureModel[itype].failure_energy_release_rate) {
// integration approach
energy_per_bond[i][jj] += update->dt * f_stress.dot(dv) / (voli * volj);
double Vic = (2.0 / 3.0) * h * h * h; // interaction volume for 2d plane strain
double critical_energy_per_bond = Lookup[CRITICAL_ENERGY_RELEASE_RATE][itype] / (2.0 * Vic);
if (energy_per_bond[i][jj] > critical_energy_per_bond) {
//degradation_ij[i][jj] = 1.0;
partner[i][jj] = 0;
}
}
if (failureModel[itype].integration_point_wise) {
strain1d = (r - r0) / r0;
if (strain1d > 0.0) {
if ((damage[i] == 1.0) && (damage[j] == 1.0)) {
// check if damage_onset is already defined
if (energy_per_bond[i][jj] == 0.0) { // pair damage not defined yet
energy_per_bond[i][jj] = strain1d;
} else { // damage initiation strain already defined
strain1d_max = energy_per_bond[i][jj];
softening_strain = 2.0 * strain1d_max;
if (strain1d > strain1d_max) {
degradation_ij[i][jj] = (strain1d - strain1d_max) / softening_strain;
} else {
degradation_ij[i][jj] = 0.0;
}
}
}
if (degradation_ij[i][jj] >= 1.0) { // delete interaction if fully damaged
partner[i][jj] = 0;
}
} else {
degradation_ij[i][jj] = 0.0;
} // end failureModel[itype].integration_point_wise
}
} // end loop over jj neighbors of i
if (shepardWeight != 0.0) {
hourglass_error[i] /= shepardWeight;
}
} // end loop over i
if (vflag_fdotr)
virial_fdotr_compute();
}
/* ----------------------------------------------------------------------
assemble unrotated stress tensor using deviatoric and pressure components.
Convert to corotational Cauchy stress, then to PK1 stress and apply
shape matrix correction
------------------------------------------------------------------------- */
void PairTlsph::AssembleStress() {
tagint *mol = atom->molecule;
double *eff_plastic_strain = atom->eff_plastic_strain;
double *eff_plastic_strain_rate = atom->eff_plastic_strain_rate;
double **tlsph_stress = atom->smd_stress;
int *type = atom->type;
double *radius = atom->radius;
double *damage = atom->damage;
double *rmass = atom->rmass;
double *vfrac = atom->vfrac;
double *e = atom->e;
double pInitial, d_iso, pFinal, p_rate, plastic_strain_increment;
int i, itype;
int nlocal = atom->nlocal;
double dt = update->dt;
double M_eff, p_wave_speed, mass_specific_energy, vol_specific_energy, rho;
Matrix3d sigma_rate, eye, sigmaInitial, sigmaFinal, T, T_damaged, Jaumann_rate, sigma_rate_check;
Matrix3d d_dev, sigmaInitial_dev, sigmaFinal_dev, sigma_dev_rate, strain;
Vector3d x0i, xi, xp;
eye.setIdentity();
dtCFL = 1.0e22;
pFinal = 0.0;
for (i = 0; i < nlocal; i++) {
particle_dt[i] = 0.0;
itype = type[i];
if (setflag[itype][itype] == 1) {
if (mol[i] > 0) { // only do the following if particle has not failed -- mol < 0 means particle has failed
/*
* initial stress state: given by the unrotateted Cauchy stress.
* Assemble Eigen 3d matrix from stored stress state
*/
sigmaInitial(0, 0) = tlsph_stress[i][0];
sigmaInitial(0, 1) = tlsph_stress[i][1];
sigmaInitial(0, 2) = tlsph_stress[i][2];
sigmaInitial(1, 1) = tlsph_stress[i][3];
sigmaInitial(1, 2) = tlsph_stress[i][4];
sigmaInitial(2, 2) = tlsph_stress[i][5];
sigmaInitial(1, 0) = sigmaInitial(0, 1);
sigmaInitial(2, 0) = sigmaInitial(0, 2);
sigmaInitial(2, 1) = sigmaInitial(1, 2);
//cout << "this is sigma initial" << endl << sigmaInitial << endl;
pInitial = sigmaInitial.trace() / 3.0; // isotropic part of initial stress
sigmaInitial_dev = Deviator(sigmaInitial);
d_iso = D[i].trace(); // volumetric part of stretch rate
d_dev = Deviator(D[i]); // deviatoric part of stretch rate
strain = 0.5 * (Fincr[i].transpose() * Fincr[i] - eye);
mass_specific_energy = e[i] / rmass[i]; // energy per unit mass
rho = rmass[i] / (detF[i] * vfrac[i]);
vol_specific_energy = mass_specific_energy * rho; // energy per current volume
/*
* pressure: compute pressure rate p_rate and final pressure pFinal
*/
ComputePressure(i, rho, mass_specific_energy, vol_specific_energy, pInitial, d_iso, pFinal, p_rate);
/*
* material strength
*/
//cout << "this is the strain deviator rate" << endl << d_dev << endl;
ComputeStressDeviator(i, sigmaInitial_dev, d_dev, sigmaFinal_dev, sigma_dev_rate, plastic_strain_increment);
//cout << "this is the stress deviator rate" << endl << sigma_dev_rate << endl;
// keep a rolling average of the plastic strain rate over the last 100 or so timesteps
eff_plastic_strain[i] += plastic_strain_increment;
// compute a characteristic time over which to average the plastic strain
double tav = 1000 * radius[i] / (Lookup[SIGNAL_VELOCITY][itype]);
eff_plastic_strain_rate[i] -= eff_plastic_strain_rate[i] * dt / tav;
eff_plastic_strain_rate[i] += plastic_strain_increment / tav;
eff_plastic_strain_rate[i] = MAX(0.0, eff_plastic_strain_rate[i]);
/*
* assemble total stress from pressure and deviatoric stress
*/
sigmaFinal = pFinal * eye + sigmaFinal_dev; // this is the stress that is kept
if (JAUMANN) {
/*
* sigma is already the co-rotated Cauchy stress.
* The stress rate, however, needs to be made objective.
*/
if (dt > 1.0e-16) {
sigma_rate = (1.0 / dt) * (sigmaFinal - sigmaInitial);
} else {
sigma_rate.setZero();
}
Jaumann_rate = sigma_rate + W[i] * sigmaInitial + sigmaInitial * W[i].transpose();
sigmaFinal = sigmaInitial + dt * Jaumann_rate;
T = sigmaFinal;
} else {
/*
* sigma is the unrotated stress.
* need to do forward rotation of the unrotated stress sigma to the current configuration
*/
T = R[i] * sigmaFinal * R[i].transpose();
}
/*
* store unrotated stress in atom vector
* symmetry is exploited
*/
tlsph_stress[i][0] = sigmaFinal(0, 0);
tlsph_stress[i][1] = sigmaFinal(0, 1);
tlsph_stress[i][2] = sigmaFinal(0, 2);
tlsph_stress[i][3] = sigmaFinal(1, 1);
tlsph_stress[i][4] = sigmaFinal(1, 2);
tlsph_stress[i][5] = sigmaFinal(2, 2);
/*
* Damage due to failure criteria.
*/
if (failureModel[itype].integration_point_wise) {
ComputeDamage(i, strain, T, T_damaged);
//T = T_damaged; Do not do this, it is undefined as of now
}
// store rotated, "true" Cauchy stress
CauchyStress[i] = T;
/*
* We have the corotational Cauchy stress.
* Convert to PK1. Note that reference configuration used for computing the forces is linked via
* the incremental deformation gradient, not the full deformation gradient.
*/
PK1[i] = detF[i] * T * FincrInv[i].transpose();
/*
* pre-multiply stress tensor with shape matrix to save computation in force loop
*/
PK1[i] = PK1[i] * K[i];
/*
* compute stable time step according to Pronto 2d
*/
Matrix3d deltaSigma;
deltaSigma = sigmaFinal - sigmaInitial;
p_rate = deltaSigma.trace() / (3.0 * dt + 1.0e-16);
sigma_dev_rate = Deviator(deltaSigma) / (dt + 1.0e-16);
double K_eff, mu_eff;
effective_longitudinal_modulus(itype, dt, d_iso, p_rate, d_dev, sigma_dev_rate, damage[i], K_eff, mu_eff, M_eff);
p_wave_speed = sqrt(M_eff / rho);
if (mol[i] < 0) {
error->one(FLERR, "this should not happen");
}
particle_dt[i] = 2.0 * radius[i] / p_wave_speed;
dtCFL = MIN(dtCFL, particle_dt[i]);
} else { // end if mol > 0
PK1[i].setZero();
K[i].setIdentity();
CauchyStress[i].setZero();
sigma_rate.setZero();
tlsph_stress[i][0] = 0.0;
tlsph_stress[i][1] = 0.0;
tlsph_stress[i][2] = 0.0;
tlsph_stress[i][3] = 0.0;
tlsph_stress[i][4] = 0.0;
tlsph_stress[i][5] = 0.0;
} // end if mol > 0
} // end setflag
} // end for
}
/* ----------------------------------------------------------------------
allocate all arrays
------------------------------------------------------------------------- */
void PairTlsph::allocate() {
allocated = 1;
int n = atom->ntypes;
memory->create(setflag, n + 1, n + 1, "pair:setflag");
for (int i = 1; i <= n; i++)
for (int j = i; j <= n; j++)
setflag[i][j] = 0;
memory->create(strengthModel, n + 1, "pair:strengthmodel");
memory->create(eos, n + 1, "pair:eosmodel");
failureModel = new failure_types[n + 1];
memory->create(Lookup, MAX_KEY_VALUE, n + 1, "pair:LookupTable");
memory->create(cutsq, n + 1, n + 1, "pair:cutsq"); // always needs to be allocated, even with granular neighborlist
onerad_dynamic = new double[n + 1];
onerad_frozen = new double[n + 1];
maxrad_dynamic = new double[n + 1];
maxrad_frozen = new double[n + 1];
}
/* ----------------------------------------------------------------------
global settings
------------------------------------------------------------------------- */
void PairTlsph::settings(int narg, char **arg) {
if (comm->me == 0) {
printf(
"\n>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========\n");
printf("TLSPH settings\n");
}
/*
* default value for update_threshold for updates of reference configuration:
* The maximum relative displacement which is tracked by the construction of LAMMPS' neighborlists
* is the folowing.
*/
cut_comm = MAX(neighbor->cutneighmax, comm->cutghostuser); // cutoff radius within which ghost atoms are communicated.
update_threshold = cut_comm;
update_method = UPDATE_NONE;
int iarg = 0;
while (true) {
if (iarg >= narg) {
break;
}
if (strcmp(arg[iarg], "*UPDATE_CONSTANT") == 0) {
iarg++;
if (iarg == narg) {
error->all(FLERR, "expected number following *UPDATE_CONSTANT keyword");
}
update_method = UPDATE_CONSTANT_THRESHOLD;
update_threshold = force->numeric(FLERR, arg[iarg]);
} else if (strcmp(arg[iarg], "*UPDATE_PAIRWISE") == 0) {
iarg++;
if (iarg == narg) {
error->all(FLERR, "expected number following *UPDATE_PAIRWISE keyword");
}
update_method = UPDATE_PAIRWISE_RATIO;
update_threshold = force->numeric(FLERR, arg[iarg]);
} else {
char msg[128];
sprintf(msg, "Illegal keyword for smd/integrate_tlsph: %s\n", arg[iarg]);
error->all(FLERR, msg);
}
iarg++;
}
if ((update_threshold > cut_comm) && (update_method == UPDATE_CONSTANT_THRESHOLD)) {
if (comm->me == 0) {
printf("\n ***** WARNING ***\n");
printf("requested reference configuration update threshold is %g length units\n", update_threshold);
printf("This value exceeds the maximum value %g beyond which TLSPH displacements can be tracked at current settings.\n",
cut_comm);
printf("Expect loss of neighbors!\n");
}
}
if (comm->me == 0) {
if (update_method == UPDATE_CONSTANT_THRESHOLD) {
printf("... will update reference configuration if magnitude of relative displacement exceeds %g length units\n",
update_threshold);
} else if (update_method == UPDATE_PAIRWISE_RATIO) {
printf("... will update reference configuration if ratio pairwise distance / smoothing length exceeds %g\n",
update_threshold);
} else if (update_method == UPDATE_NONE) {
printf("... will never update reference configuration");
}
printf(
">>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========\n");
}
}
/* ----------------------------------------------------------------------
set coeffs for one or more type pairs
------------------------------------------------------------------------- */
void PairTlsph::coeff(int narg, char **arg) {
int ioffset, iarg, iNextKwd, itype;
char str[128];
std::string s, t;
if (narg < 3) {
sprintf(str, "number of arguments for pair tlsph is too small!");
error->all(FLERR, str);
}
if (!allocated)
allocate();
/*
* check that TLSPH parameters are given only in i,i form
*/
if (force->inumeric(FLERR, arg[0]) != force->inumeric(FLERR, arg[1])) {
sprintf(str, "TLSPH coefficients can only be specified between particles of same type!");
error->all(FLERR, str);
}
itype = force->inumeric(FLERR, arg[0]);
// set all eos, strength and failure models to inactive by default
eos[itype] = EOS_NONE;
strengthModel[itype] = STRENGTH_NONE;
if (comm->me == 0) {
printf(
"\n>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========\n");
printf("SMD / TLSPH PROPERTIES OF PARTICLE TYPE %d:\n", itype);
}
/*
* read parameters which are common -- regardless of material / eos model
*/
ioffset = 2;
if (strcmp(arg[ioffset], "*COMMON") != 0) {
sprintf(str, "common keyword missing!");
error->all(FLERR, str);
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
//printf("keyword following *COMMON is %s\n", arg[iNextKwd]);
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *COMMON");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 7 + 1) {
sprintf(str, "expected 7 arguments following *COMMON but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
Lookup[REFERENCE_DENSITY][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[YOUNGS_MODULUS][itype] = force->numeric(FLERR, arg[ioffset + 2]);
Lookup[POISSON_RATIO][itype] = force->numeric(FLERR, arg[ioffset + 3]);
Lookup[VISCOSITY_Q1][itype] = force->numeric(FLERR, arg[ioffset + 4]);
Lookup[VISCOSITY_Q2][itype] = force->numeric(FLERR, arg[ioffset + 5]);
Lookup[HOURGLASS_CONTROL_AMPLITUDE][itype] = force->numeric(FLERR, arg[ioffset + 6]);
Lookup[HEAT_CAPACITY][itype] = force->numeric(FLERR, arg[ioffset + 7]);
Lookup[LAME_LAMBDA][itype] = Lookup[YOUNGS_MODULUS][itype] * Lookup[POISSON_RATIO][itype]
/ ((1.0 + Lookup[POISSON_RATIO][itype]) * (1.0 - 2.0 * Lookup[POISSON_RATIO][itype]));
Lookup[SHEAR_MODULUS][itype] = Lookup[YOUNGS_MODULUS][itype] / (2.0 * (1.0 + Lookup[POISSON_RATIO][itype]));
Lookup[M_MODULUS][itype] = Lookup[LAME_LAMBDA][itype] + 2.0 * Lookup[SHEAR_MODULUS][itype];
Lookup[SIGNAL_VELOCITY][itype] = sqrt(
(Lookup[LAME_LAMBDA][itype] + 2.0 * Lookup[SHEAR_MODULUS][itype]) / Lookup[REFERENCE_DENSITY][itype]);
Lookup[BULK_MODULUS][itype] = Lookup[LAME_LAMBDA][itype] + 2.0 * Lookup[SHEAR_MODULUS][itype] / 3.0;
if (comm->me == 0) {
printf("\n material unspecific properties for SMD/TLSPH definition of particle type %d:\n", itype);
printf("%60s : %g\n", "reference density", Lookup[REFERENCE_DENSITY][itype]);
printf("%60s : %g\n", "Young's modulus", Lookup[YOUNGS_MODULUS][itype]);
printf("%60s : %g\n", "Poisson ratio", Lookup[POISSON_RATIO][itype]);
printf("%60s : %g\n", "linear viscosity coefficient", Lookup[VISCOSITY_Q1][itype]);
printf("%60s : %g\n", "quadratic viscosity coefficient", Lookup[VISCOSITY_Q2][itype]);
printf("%60s : %g\n", "hourglass control coefficient", Lookup[HOURGLASS_CONTROL_AMPLITUDE][itype]);
printf("%60s : %g\n", "heat capacity [energy / (mass * temperature)]", Lookup[HEAT_CAPACITY][itype]);
printf("%60s : %g\n", "Lame constant lambda", Lookup[LAME_LAMBDA][itype]);
printf("%60s : %g\n", "shear modulus", Lookup[SHEAR_MODULUS][itype]);
printf("%60s : %g\n", "bulk modulus", Lookup[BULK_MODULUS][itype]);
printf("%60s : %g\n", "signal velocity", Lookup[SIGNAL_VELOCITY][itype]);
}
/*
* read following material cards
*/
//printf("next kwd is %s\n", arg[iNextKwd]);
eos[itype] = EOS_NONE;
strengthModel[itype] = STRENGTH_NONE;
while (true) {
if (strcmp(arg[iNextKwd], "*END") == 0) {
if (comm->me == 0) {
printf("found *END keyword");
printf(
"\n>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========>>========\n\n");
}
break;
}
/*
* Linear Elasticity model based on deformation gradient
*/
ioffset = iNextKwd;
if (strcmp(arg[ioffset], "*LINEAR_DEFGRAD") == 0) {
strengthModel[itype] = LINEAR_DEFGRAD;
if (comm->me == 0) {
printf("reading *LINEAR_DEFGRAD\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *LINEAR_DEFGRAD");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1) {
sprintf(str, "expected 0 arguments following *LINEAR_DEFGRAD but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
if (comm->me == 0) {
printf("\n%60s\n", "Linear Elasticity model based on deformation gradient");
}
} else if (strcmp(arg[ioffset], "*STRENGTH_LINEAR") == 0) {
/*
* Linear Elasticity strength only model based on strain rate
*/
strengthModel[itype] = STRENGTH_LINEAR;
if (comm->me == 0) {
printf("reading *STRENGTH_LINEAR\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *STRENGTH_LINEAR");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1) {
sprintf(str, "expected 0 arguments following *STRENGTH_LINEAR but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
if (comm->me == 0) {
printf("%60s\n", "Linear Elasticity strength based on strain rate");
}
} // end Linear Elasticity strength only model based on strain rate
else if (strcmp(arg[ioffset], "*STRENGTH_LINEAR_PLASTIC") == 0) {
/*
* Linear Elastic / perfectly plastic strength only model based on strain rate
*/
strengthModel[itype] = STRENGTH_LINEAR_PLASTIC;
if (comm->me == 0) {
printf("reading *STRENGTH_LINEAR_PLASTIC\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *STRENGTH_LINEAR_PLASTIC");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 2 + 1) {
sprintf(str, "expected 2 arguments following *STRENGTH_LINEAR_PLASTIC but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
Lookup[YIELD_STRESS][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[HARDENING_PARAMETER][itype] = force->numeric(FLERR, arg[ioffset + 2]);
if (comm->me == 0) {
printf("%60s\n", "Linear elastic / perfectly plastic strength based on strain rate");
printf("%60s : %g\n", "Young's modulus", Lookup[YOUNGS_MODULUS][itype]);
printf("%60s : %g\n", "Poisson ratio", Lookup[POISSON_RATIO][itype]);
printf("%60s : %g\n", "shear modulus", Lookup[SHEAR_MODULUS][itype]);
printf("%60s : %g\n", "constant yield stress", Lookup[YIELD_STRESS][itype]);
printf("%60s : %g\n", "constant hardening parameter", Lookup[HARDENING_PARAMETER][itype]);
}
} // end Linear Elastic / perfectly plastic strength only model based on strain rate
else if (strcmp(arg[ioffset], "*JOHNSON_COOK") == 0) {
/*
* JOHNSON - COOK
*/
strengthModel[itype] = STRENGTH_JOHNSON_COOK;
if (comm->me == 0) {
printf("reading *JOHNSON_COOK\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *JOHNSON_COOK");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 8 + 1) {
sprintf(str, "expected 8 arguments following *JOHNSON_COOK but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
Lookup[JC_A][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[JC_B][itype] = force->numeric(FLERR, arg[ioffset + 2]);
Lookup[JC_a][itype] = force->numeric(FLERR, arg[ioffset + 3]);
Lookup[JC_C][itype] = force->numeric(FLERR, arg[ioffset + 4]);
Lookup[JC_epdot0][itype] = force->numeric(FLERR, arg[ioffset + 5]);
Lookup[JC_T0][itype] = force->numeric(FLERR, arg[ioffset + 6]);
Lookup[JC_Tmelt][itype] = force->numeric(FLERR, arg[ioffset + 7]);
Lookup[JC_M][itype] = force->numeric(FLERR, arg[ioffset + 8]);
if (comm->me == 0) {
printf("%60s\n", "Johnson Cook material strength model");
printf("%60s : %g\n", "A: initial yield stress", Lookup[JC_A][itype]);
printf("%60s : %g\n", "B : proportionality factor for plastic strain dependency", Lookup[JC_B][itype]);
printf("%60s : %g\n", "a : exponent for plastic strain dependency", Lookup[JC_a][itype]);
printf("%60s : %g\n", "C : proportionality factor for logarithmic plastic strain rate dependency",
Lookup[JC_C][itype]);
printf("%60s : %g\n", "epdot0 : dimensionality factor for plastic strain rate dependency",
Lookup[JC_epdot0][itype]);
printf("%60s : %g\n", "T0 : reference (room) temperature", Lookup[JC_T0][itype]);
printf("%60s : %g\n", "Tmelt : melting temperature", Lookup[JC_Tmelt][itype]);
printf("%60s : %g\n", "M : exponent for temperature dependency", Lookup[JC_M][itype]);
}
} else if (strcmp(arg[ioffset], "*EOS_NONE") == 0) {
/*
* no eos
*/
eos[itype] = EOS_NONE;
if (comm->me == 0) {
printf("reading *EOS_NONE\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *EOS_NONE");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1) {
sprintf(str, "expected 0 arguments following *EOS_NONE but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
if (comm->me == 0) {
printf("\n%60s\n", "no EOS selected");
}
} else if (strcmp(arg[ioffset], "*EOS_LINEAR") == 0) {
/*
* linear eos
*/
eos[itype] = EOS_LINEAR;
if (comm->me == 0) {
printf("reading *EOS_LINEAR\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *EOS_LINEAR");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1) {
sprintf(str, "expected 0 arguments following *EOS_LINEAR but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
if (comm->me == 0) {
printf("\n%60s\n", "linear EOS based on strain rate");
printf("%60s : %g\n", "bulk modulus", Lookup[BULK_MODULUS][itype]);
}
} // end linear eos
else if (strcmp(arg[ioffset], "*EOS_SHOCK") == 0) {
/*
* shock eos
*/
eos[itype] = EOS_SHOCK;
if (comm->me == 0) {
printf("reading *EOS_SHOCK\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *EOS_SHOCK");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 3 + 1) {
sprintf(str, "expected 3 arguments (c0, S, Gamma) following *EOS_SHOCK but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
Lookup[EOS_SHOCK_C0][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[EOS_SHOCK_S][itype] = force->numeric(FLERR, arg[ioffset + 2]);
Lookup[EOS_SHOCK_GAMMA][itype] = force->numeric(FLERR, arg[ioffset + 3]);
if (comm->me == 0) {
printf("\n%60s\n", "shock EOS based on strain rate");
printf("%60s : %g\n", "reference speed of sound", Lookup[EOS_SHOCK_C0][itype]);
printf("%60s : %g\n", "Hugoniot parameter S", Lookup[EOS_SHOCK_S][itype]);
printf("%60s : %g\n", "Grueneisen Gamma", Lookup[EOS_SHOCK_GAMMA][itype]);
}
} // end shock eos
else if (strcmp(arg[ioffset], "*EOS_POLYNOMIAL") == 0) {
/*
* polynomial eos
*/
eos[itype] = EOS_POLYNOMIAL;
if (comm->me == 0) {
printf("reading *EOS_POLYNOMIAL\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *EOS_POLYNOMIAL");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 7 + 1) {
sprintf(str, "expected 7 arguments following *EOS_POLYNOMIAL but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
Lookup[EOS_POLYNOMIAL_C0][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[EOS_POLYNOMIAL_C1][itype] = force->numeric(FLERR, arg[ioffset + 2]);
Lookup[EOS_POLYNOMIAL_C2][itype] = force->numeric(FLERR, arg[ioffset + 3]);
Lookup[EOS_POLYNOMIAL_C3][itype] = force->numeric(FLERR, arg[ioffset + 4]);
Lookup[EOS_POLYNOMIAL_C4][itype] = force->numeric(FLERR, arg[ioffset + 5]);
Lookup[EOS_POLYNOMIAL_C5][itype] = force->numeric(FLERR, arg[ioffset + 6]);
Lookup[EOS_POLYNOMIAL_C6][itype] = force->numeric(FLERR, arg[ioffset + 7]);
if (comm->me == 0) {
printf("\n%60s\n", "polynomial EOS based on strain rate");
printf("%60s : %g\n", "parameter c0", Lookup[EOS_POLYNOMIAL_C0][itype]);
printf("%60s : %g\n", "parameter c1", Lookup[EOS_POLYNOMIAL_C1][itype]);
printf("%60s : %g\n", "parameter c2", Lookup[EOS_POLYNOMIAL_C2][itype]);
printf("%60s : %g\n", "parameter c3", Lookup[EOS_POLYNOMIAL_C3][itype]);
printf("%60s : %g\n", "parameter c4", Lookup[EOS_POLYNOMIAL_C4][itype]);
printf("%60s : %g\n", "parameter c5", Lookup[EOS_POLYNOMIAL_C5][itype]);
printf("%60s : %g\n", "parameter c6", Lookup[EOS_POLYNOMIAL_C6][itype]);
}
} // end polynomial eos
else if (strcmp(arg[ioffset], "*FAILURE_MAX_PLASTIC_STRAIN") == 0) {
/*
* maximum plastic strain failure criterion
*/
if (comm->me == 0) {
printf("reading *FAILURE_MAX_PLASTIC_SRTRAIN\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_MAX_PLASTIC_STRAIN");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1 + 1) {
sprintf(str, "expected 1 arguments following *FAILURE_MAX_PLASTIC_STRAIN but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_max_plastic_strain = true;
failureModel[itype].integration_point_wise = true;
Lookup[FAILURE_MAX_PLASTIC_STRAIN_THRESHOLD][itype] = force->numeric(FLERR, arg[ioffset + 1]);
if (comm->me == 0) {
printf("\n%60s\n", "maximum plastic strain failure criterion");
printf("%60s : %g\n", "failure occurs when plastic strain reaches limit",
Lookup[FAILURE_MAX_PLASTIC_STRAIN_THRESHOLD][itype]);
}
} // end maximum plastic strain failure criterion
else if (strcmp(arg[ioffset], "*FAILURE_MAX_PAIRWISE_STRAIN") == 0) {
/*
* failure criterion based on maximum strain between a pair of TLSPH particles.
*/
if (comm->me == 0) {
printf("reading *FAILURE_MAX_PAIRWISE_STRAIN\n");
}
if (update_method != UPDATE_NONE) {
error->all(FLERR, "cannot use *FAILURE_MAX_PAIRWISE_STRAIN with updated Total-Lagrangian formalism");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_MAX_PAIRWISE_STRAIN");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1 + 1) {
sprintf(str, "expected 1 arguments following *FAILURE_MAX_PAIRWISE_STRAIN but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_max_pairwise_strain = true;
failureModel[itype].integration_point_wise = true;
Lookup[FAILURE_MAX_PAIRWISE_STRAIN_THRESHOLD][itype] = force->numeric(FLERR, arg[ioffset + 1]);
if (comm->me == 0) {
printf("\n%60s\n", "maximum pairwise strain failure criterion");
printf("%60s : %g\n", "failure occurs when pairwise strain reaches limit",
Lookup[FAILURE_MAX_PAIRWISE_STRAIN_THRESHOLD][itype]);
}
} // end pair based maximum strain failure criterion
else if (strcmp(arg[ioffset], "*FAILURE_MAX_PRINCIPAL_STRAIN") == 0) {
error->all(FLERR, "this failure model is currently unsupported");
/*
* maximum principal strain failure criterion
*/
if (comm->me == 0) {
printf("reading *FAILURE_MAX_PRINCIPAL_STRAIN\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_MAX_PRINCIPAL_STRAIN");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1 + 1) {
sprintf(str, "expected 1 arguments following *FAILURE_MAX_PRINCIPAL_STRAIN but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_max_principal_strain = true;
failureModel[itype].integration_point_wise = true;
Lookup[FAILURE_MAX_PRINCIPAL_STRAIN_THRESHOLD][itype] = force->numeric(FLERR, arg[ioffset + 1]);
if (comm->me == 0) {
printf("\n%60s\n", "maximum principal strain failure criterion");
printf("%60s : %g\n", "failure occurs when principal strain reaches limit",
Lookup[FAILURE_MAX_PRINCIPAL_STRAIN_THRESHOLD][itype]);
}
} // end maximum principal strain failure criterion
else if (strcmp(arg[ioffset], "*FAILURE_JOHNSON_COOK") == 0) {
error->all(FLERR, "this failure model is currently unsupported");
if (comm->me == 0) {
printf("reading *FAILURE_JOHNSON_COOK\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_JOHNSON_COOK");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 5 + 1) {
sprintf(str, "expected 5 arguments following *FAILURE_JOHNSON_COOK but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_johnson_cook = true;
failureModel[itype].integration_point_wise = true;
Lookup[FAILURE_JC_D1][itype] = force->numeric(FLERR, arg[ioffset + 1]);
Lookup[FAILURE_JC_D2][itype] = force->numeric(FLERR, arg[ioffset + 2]);
Lookup[FAILURE_JC_D3][itype] = force->numeric(FLERR, arg[ioffset + 3]);
Lookup[FAILURE_JC_D4][itype] = force->numeric(FLERR, arg[ioffset + 4]);
Lookup[FAILURE_JC_EPDOT0][itype] = force->numeric(FLERR, arg[ioffset + 5]);
if (comm->me == 0) {
printf("\n%60s\n", "Johnson-Cook failure criterion");
printf("%60s : %g\n", "parameter d1", Lookup[FAILURE_JC_D1][itype]);
printf("%60s : %g\n", "parameter d2", Lookup[FAILURE_JC_D2][itype]);
printf("%60s : %g\n", "parameter d3", Lookup[FAILURE_JC_D3][itype]);
printf("%60s : %g\n", "parameter d4", Lookup[FAILURE_JC_D4][itype]);
printf("%60s : %g\n", "reference plastic strain rate", Lookup[FAILURE_JC_EPDOT0][itype]);
}
} else if (strcmp(arg[ioffset], "*FAILURE_MAX_PRINCIPAL_STRESS") == 0) {
error->all(FLERR, "this failure model is currently unsupported");
/*
* maximum principal stress failure criterion
*/
if (comm->me == 0) {
printf("reading *FAILURE_MAX_PRINCIPAL_STRESS\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_MAX_PRINCIPAL_STRESS");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1 + 1) {
sprintf(str, "expected 1 arguments following *FAILURE_MAX_PRINCIPAL_STRESS but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_max_principal_stress = true;
failureModel[itype].integration_point_wise = true;
Lookup[FAILURE_MAX_PRINCIPAL_STRESS_THRESHOLD][itype] = force->numeric(FLERR, arg[ioffset + 1]);
if (comm->me == 0) {
printf("\n%60s\n", "maximum principal stress failure criterion");
printf("%60s : %g\n", "failure occurs when principal stress reaches limit",
Lookup[FAILURE_MAX_PRINCIPAL_STRESS_THRESHOLD][itype]);
}
} // end maximum principal stress failure criterion
else if (strcmp(arg[ioffset], "*FAILURE_ENERGY_RELEASE_RATE") == 0) {
if (comm->me == 0) {
printf("reading *FAILURE_ENERGY_RELEASE_RATE\n");
}
t = string("*");
iNextKwd = -1;
for (iarg = ioffset + 1; iarg < narg; iarg++) {
s = string(arg[iarg]);
if (s.compare(0, t.length(), t) == 0) {
iNextKwd = iarg;
break;
}
}
if (iNextKwd < 0) {
sprintf(str, "no *KEYWORD terminates *FAILURE_ENERGY_RELEASE_RATE");
error->all(FLERR, str);
}
if (iNextKwd - ioffset != 1 + 1) {
sprintf(str, "expected 1 arguments following *FAILURE_ENERGY_RELEASE_RATE but got %d\n", iNextKwd - ioffset - 1);
error->all(FLERR, str);
}
failureModel[itype].failure_energy_release_rate = true;
Lookup[CRITICAL_ENERGY_RELEASE_RATE][itype] = force->numeric(FLERR, arg[ioffset + 1]);
if (comm->me == 0) {
printf("\n%60s\n", "critical energy release rate failure criterion");
printf("%60s : %g\n", "failure occurs when energy release rate reaches limit",
Lookup[CRITICAL_ENERGY_RELEASE_RATE][itype]);
}
} // end energy release rate failure criterion
else {
sprintf(str, "unknown *KEYWORD: %s", arg[ioffset]);
error->all(FLERR, str);
}
}
setflag[itype][itype] = 1;
}
/* ----------------------------------------------------------------------
init for one type pair i,j and corresponding j,i
------------------------------------------------------------------------- */
double PairTlsph::init_one(int i, int j) {
if (!allocated)
allocate();
if (setflag[i][j] == 0)
error->all(FLERR, "All pair coeffs are not set");
if (force->newton == 1)
error->all(FLERR, "Pair style tlsph requires newton off");
// cutoff = sum of max I,J radii for
// dynamic/dynamic & dynamic/frozen interactions, but not frozen/frozen
double cutoff = maxrad_dynamic[i] + maxrad_dynamic[j];
cutoff = MAX(cutoff, maxrad_frozen[i] + maxrad_dynamic[j]);
cutoff = MAX(cutoff, maxrad_dynamic[i] + maxrad_frozen[j]);
//printf("cutoff for pair pair tlsph = %f\n", cutoff);
return cutoff;
}
/* ----------------------------------------------------------------------
init specific to this pair style
------------------------------------------------------------------------- */
void PairTlsph::init_style() {
int i;
if (force->newton_pair == 1) {
error->all(FLERR, "Pair style tlsph requires newton pair off");
}
// request a granular neighbor list
int irequest = neighbor->request(this);
neighbor->requests[irequest]->size = 1;
// set maxrad_dynamic and maxrad_frozen for each type
// include future Fix pour particles as dynamic
for (i = 1; i <= atom->ntypes; i++)
onerad_dynamic[i] = onerad_frozen[i] = 0.0;
double *radius = atom->radius;
int *type = atom->type;
int nlocal = atom->nlocal;
for (i = 0; i < nlocal; i++)
onerad_dynamic[type[i]] = MAX(onerad_dynamic[type[i]], radius[i]);
MPI_Allreduce(&onerad_dynamic[1], &maxrad_dynamic[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world);
MPI_Allreduce(&onerad_frozen[1], &maxrad_frozen[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world);
// if first init, create Fix needed for storing reference configuration neighbors
int igroup = group->find("tlsph");
if (igroup == -1)
error->all(FLERR, "Pair style tlsph requires its particles to be part of a group named tlsph. This group does not exist.");
if (fix_tlsph_reference_configuration == NULL) {
char **fixarg = new char*[3];
fixarg[0] = (char *) "SMD_TLSPH_NEIGHBORS";
fixarg[1] = (char *) "tlsph";
fixarg[2] = (char *) "SMD_TLSPH_NEIGHBORS";
modify->add_fix(3, fixarg);
delete[] fixarg;
fix_tlsph_reference_configuration = (FixSMD_TLSPH_ReferenceConfiguration *) modify->fix[modify->nfix - 1];
fix_tlsph_reference_configuration->pair = this;
}
// find associated SMD_TLSPH_NEIGHBORS fix that must exist
// could have changed locations in fix list since created
ifix_tlsph = -1;
for (int i = 0; i < modify->nfix; i++)
if (strcmp(modify->fix[i]->style, "SMD_TLSPH_NEIGHBORS") == 0)
ifix_tlsph = i;
if (ifix_tlsph == -1)
error->all(FLERR, "Fix SMD_TLSPH_NEIGHBORS does not exist");
}
/* ----------------------------------------------------------------------
neighbor callback to inform pair style of neighbor list to use
optional granular history list
------------------------------------------------------------------------- */
void PairTlsph::init_list(int id, NeighList *ptr) {
if (id == 0)
list = ptr;
}
/* ----------------------------------------------------------------------
memory usage of local atom-based arrays
------------------------------------------------------------------------- */
double PairTlsph::memory_usage() {
return 118 * nmax * sizeof(double);
}
/* ----------------------------------------------------------------------
extract method to provide access to this class' data structures
------------------------------------------------------------------------- */
void *PairTlsph::extract(const char *str, int &/*i*/) {
//printf("in PairTlsph::extract\n");
if (strcmp(str, "smd/tlsph/Fincr_ptr") == 0) {
return (void *) Fincr;
} else if (strcmp(str, "smd/tlsph/detF_ptr") == 0) {
return (void *) detF;
} else if (strcmp(str, "smd/tlsph/PK1_ptr") == 0) {
return (void *) PK1;
} else if (strcmp(str, "smd/tlsph/smoothVel_ptr") == 0) {
return (void *) smoothVelDifference;
} else if (strcmp(str, "smd/tlsph/numNeighsRefConfig_ptr") == 0) {
return (void *) numNeighsRefConfig;
} else if (strcmp(str, "smd/tlsph/stressTensor_ptr") == 0) {
return (void *) CauchyStress;
} else if (strcmp(str, "smd/tlsph/updateFlag_ptr") == 0) {
return (void *) &updateFlag;
} else if (strcmp(str, "smd/tlsph/strain_rate_ptr") == 0) {
return (void *) D;
} else if (strcmp(str, "smd/tlsph/hMin_ptr") == 0) {
return (void *) &hMin;
} else if (strcmp(str, "smd/tlsph/dtCFL_ptr") == 0) {
return (void *) &dtCFL;
} else if (strcmp(str, "smd/tlsph/dtRelative_ptr") == 0) {
return (void *) &dtRelative;
} else if (strcmp(str, "smd/tlsph/hourglass_error_ptr") == 0) {
return (void *) hourglass_error;
} else if (strcmp(str, "smd/tlsph/particle_dt_ptr") == 0) {
return (void *) particle_dt;
} else if (strcmp(str, "smd/tlsph/rotation_ptr") == 0) {
return (void *) R;
}
return NULL;
}
/* ---------------------------------------------------------------------- */
int PairTlsph::pack_forward_comm(int n, int *list, double *buf, int /*pbc_flag*/, int * /*pbc*/) {
int i, j, m;
tagint *mol = atom->molecule;
double *damage = atom->damage;
double *eff_plastic_strain = atom->eff_plastic_strain;
double *eff_plastic_strain_rate = atom->eff_plastic_strain_rate;
//printf("in PairTlsph::pack_forward_comm\n");
m = 0;
for (i = 0; i < n; i++) {
j = list[i];
buf[m++] = PK1[j](0, 0); // PK1 is not symmetric
buf[m++] = PK1[j](0, 1);
buf[m++] = PK1[j](0, 2);
buf[m++] = PK1[j](1, 0);
buf[m++] = PK1[j](1, 1);
buf[m++] = PK1[j](1, 2);
buf[m++] = PK1[j](2, 0);
buf[m++] = PK1[j](2, 1);
buf[m++] = PK1[j](2, 2); // 9
buf[m++] = Fincr[j](0, 0); // Fincr is not symmetric
buf[m++] = Fincr[j](0, 1);
buf[m++] = Fincr[j](0, 2);
buf[m++] = Fincr[j](1, 0);
buf[m++] = Fincr[j](1, 1);
buf[m++] = Fincr[j](1, 2);
buf[m++] = Fincr[j](2, 0);
buf[m++] = Fincr[j](2, 1);
buf[m++] = Fincr[j](2, 2); // 9 + 9 = 18
buf[m++] = mol[j]; //19
buf[m++] = damage[j]; //20
buf[m++] = eff_plastic_strain[j]; //21
buf[m++] = eff_plastic_strain_rate[j]; //22
}
return m;
}
/* ---------------------------------------------------------------------- */
void PairTlsph::unpack_forward_comm(int n, int first, double *buf) {
int i, m, last;
tagint *mol = atom->molecule;
double *damage = atom->damage;
double *eff_plastic_strain = atom->eff_plastic_strain;
double *eff_plastic_strain_rate = atom->eff_plastic_strain_rate;
//printf("in PairTlsph::unpack_forward_comm\n");
m = 0;
last = first + n;
for (i = first; i < last; i++) {
PK1[i](0, 0) = buf[m++]; // PK1 is not symmetric
PK1[i](0, 1) = buf[m++];
PK1[i](0, 2) = buf[m++];
PK1[i](1, 0) = buf[m++];
PK1[i](1, 1) = buf[m++];
PK1[i](1, 2) = buf[m++];
PK1[i](2, 0) = buf[m++];
PK1[i](2, 1) = buf[m++];
PK1[i](2, 2) = buf[m++];
Fincr[i](0, 0) = buf[m++];
Fincr[i](0, 1) = buf[m++];
Fincr[i](0, 2) = buf[m++];
Fincr[i](1, 0) = buf[m++];
Fincr[i](1, 1) = buf[m++];
Fincr[i](1, 2) = buf[m++];
Fincr[i](2, 0) = buf[m++];
Fincr[i](2, 1) = buf[m++];
Fincr[i](2, 2) = buf[m++];
mol[i] = static_cast<int>(buf[m++]);
damage[i] = buf[m++];
eff_plastic_strain[i] = buf[m++]; //22
eff_plastic_strain_rate[i] = buf[m++]; //23
}
}
/* ----------------------------------------------------------------------
compute effective P-wave speed
determined by longitudinal modulus
------------------------------------------------------------------------- */
void PairTlsph::effective_longitudinal_modulus(const int itype, const double dt, const double d_iso, const double p_rate,
const Matrix3d d_dev, const Matrix3d sigma_dev_rate, const double /*damage*/, double &K_eff, double &mu_eff, double &M_eff) {
double M0; // initial longitudinal modulus
double shear_rate_sq;
// if (damage >= 0.5) {
// M_eff = Lookup[M_MODULUS][itype];
// K_eff = Lookup[BULK_MODULUS][itype];
// mu_eff = Lookup[SHEAR_MODULUS][itype];
// return;
// }
M0 = Lookup[M_MODULUS][itype];
if (dt * d_iso > 1.0e-6) {
K_eff = p_rate / d_iso;
if (K_eff < 0.0) { // it is possible for K_eff to become negative due to strain softening
// if (damage == 0.0) {
// error->one(FLERR, "computed a negative effective bulk modulus but particle is not damaged.");
// }
K_eff = Lookup[BULK_MODULUS][itype];
}
} else {
K_eff = Lookup[BULK_MODULUS][itype];
}
if (domain->dimension == 3) {
// Calculate 2 mu by looking at ratio shear stress / shear strain. Use numerical softening to avoid divide-by-zero.
mu_eff = 0.5
* (sigma_dev_rate(0, 1) / (d_dev(0, 1) + 1.0e-16) + sigma_dev_rate(0, 2) / (d_dev(0, 2) + 1.0e-16)
+ sigma_dev_rate(1, 2) / (d_dev(1, 2) + 1.0e-16));
// Calculate magnitude of deviatoric strain rate. This is used for deciding if shear modulus should be computed from current rate or be taken as the initial value.
shear_rate_sq = d_dev(0, 1) * d_dev(0, 1) + d_dev(0, 2) * d_dev(0, 2) + d_dev(1, 2) * d_dev(1, 2);
} else {
mu_eff = 0.5 * (sigma_dev_rate(0, 1) / (d_dev(0, 1) + 1.0e-16));
shear_rate_sq = d_dev(0, 1) * d_dev(0, 1);
}
if (dt * dt * shear_rate_sq < 1.0e-8) {
mu_eff = Lookup[SHEAR_MODULUS][itype];
}
if (mu_eff < Lookup[SHEAR_MODULUS][itype]) { // it is possible for mu_eff to become negative due to strain softening
// if (damage == 0.0) {
// printf("mu_eff = %f, tau=%f, gamma=%f\n", mu_eff, sigma_dev_rate(0, 1), d_dev(0, 1));
// error->message(FLERR, "computed a negative effective shear modulus but particle is not damaged.");
// }
mu_eff = Lookup[SHEAR_MODULUS][itype];
}
//mu_eff = Lookup[SHEAR_MODULUS][itype];
if (K_eff < 0.0) {
printf("K_eff = %f, p_rate=%f, vol_rate=%f\n", K_eff, p_rate, d_iso);
}
if (mu_eff < 0.0) {
printf("mu_eff = %f, tau=%f, gamma=%f\n", mu_eff, sigma_dev_rate(0, 1), d_dev(0, 1));
error->one(FLERR, "");
}
M_eff = (K_eff + 4.0 * mu_eff / 3.0); // effective dilational modulus, see Pronto 2d eqn 3.4.8
if (M_eff < M0) { // do not allow effective dilatational modulus to decrease beyond its initial value
M_eff = M0;
}
}
/* ----------------------------------------------------------------------
compute pressure. Called from AssembleStress().
------------------------------------------------------------------------- */
void PairTlsph::ComputePressure(const int i, const double rho, const double mass_specific_energy, const double vol_specific_energy,
const double pInitial, const double d_iso, double &pFinal, double &p_rate) {
int *type = atom->type;
double dt = update->dt;
int itype;
itype = type[i];
switch (eos[itype]) {
case EOS_LINEAR:
LinearEOS(Lookup[BULK_MODULUS][itype], pInitial, d_iso, dt, pFinal, p_rate);
break;
case EOS_NONE:
pFinal = 0.0;
p_rate = 0.0;
break;
case EOS_SHOCK:
// rho, rho0, e, e0, c0, S, Gamma, pInitial, dt, &pFinal, &p_rate);
ShockEOS(rho, Lookup[REFERENCE_DENSITY][itype], mass_specific_energy, 0.0, Lookup[EOS_SHOCK_C0][itype],
Lookup[EOS_SHOCK_S][itype], Lookup[EOS_SHOCK_GAMMA][itype], pInitial, dt, pFinal, p_rate);
break;
case EOS_POLYNOMIAL:
polynomialEOS(rho, Lookup[REFERENCE_DENSITY][itype], vol_specific_energy, Lookup[EOS_POLYNOMIAL_C0][itype],
Lookup[EOS_POLYNOMIAL_C1][itype], Lookup[EOS_POLYNOMIAL_C2][itype], Lookup[EOS_POLYNOMIAL_C3][itype],
Lookup[EOS_POLYNOMIAL_C4][itype], Lookup[EOS_POLYNOMIAL_C5][itype], Lookup[EOS_POLYNOMIAL_C6][itype], pInitial, dt,
pFinal, p_rate);
break;
default:
error->one(FLERR, "unknown EOS.");
break;
}
}
/* ----------------------------------------------------------------------
Compute stress deviator. Called from AssembleStress().
------------------------------------------------------------------------- */
void PairTlsph::ComputeStressDeviator(const int i, const Matrix3d sigmaInitial_dev, const Matrix3d d_dev, Matrix3d &sigmaFinal_dev,
Matrix3d &sigma_dev_rate, double &plastic_strain_increment) {
double *eff_plastic_strain = atom->eff_plastic_strain;
double *eff_plastic_strain_rate = atom->eff_plastic_strain_rate;
int *type = atom->type;
double *rmass = atom->rmass;
//double *vfrac = atom->vfrac;
double *e = atom->e;
double dt = update->dt;
double yieldStress;
int itype;
double mass_specific_energy = e[i] / rmass[i]; // energy per unit mass
plastic_strain_increment = 0.0;
itype = type[i];
switch (strengthModel[itype]) {
case STRENGTH_LINEAR:
sigma_dev_rate = 2.0 * Lookup[SHEAR_MODULUS][itype] * d_dev;
sigmaFinal_dev = sigmaInitial_dev + dt * sigma_dev_rate;
break;
case LINEAR_DEFGRAD:
//LinearStrengthDefgrad(Lookup[LAME_LAMBDA][itype], Lookup[SHEAR_MODULUS][itype], Fincr[i], &sigmaFinal_dev);
//eff_plastic_strain[i] = 0.0;
//p_rate = pInitial - sigmaFinal_dev.trace() / 3.0;
//sigma_dev_rate = sigmaInitial_dev - Deviator(sigmaFinal_dev);
error->one(FLERR, "LINEAR_DEFGRAD is only for debugging purposes and currently deactivated.");
R[i].setIdentity();
break;
case STRENGTH_LINEAR_PLASTIC:
yieldStress = Lookup[YIELD_STRESS][itype] + Lookup[HARDENING_PARAMETER][itype] * eff_plastic_strain[i];
LinearPlasticStrength(Lookup[SHEAR_MODULUS][itype], yieldStress, sigmaInitial_dev, d_dev, dt, sigmaFinal_dev,
sigma_dev_rate, plastic_strain_increment);
break;
case STRENGTH_JOHNSON_COOK:
JohnsonCookStrength(Lookup[SHEAR_MODULUS][itype], Lookup[HEAT_CAPACITY][itype], mass_specific_energy, Lookup[JC_A][itype],
Lookup[JC_B][itype], Lookup[JC_a][itype], Lookup[JC_C][itype], Lookup[JC_epdot0][itype], Lookup[JC_T0][itype],
Lookup[JC_Tmelt][itype], Lookup[JC_M][itype], dt, eff_plastic_strain[i], eff_plastic_strain_rate[i],
sigmaInitial_dev, d_dev, sigmaFinal_dev, sigma_dev_rate, plastic_strain_increment);
break;
case STRENGTH_NONE:
sigmaFinal_dev.setZero();
sigma_dev_rate.setZero();
break;
default:
error->one(FLERR, "unknown strength model.");
break;
}
}
/* ----------------------------------------------------------------------
Compute damage. Called from AssembleStress().
------------------------------------------------------------------------- */
void PairTlsph::ComputeDamage(const int i, const Matrix3d strain, const Matrix3d stress, Matrix3d &/*stress_damaged*/) {
double *eff_plastic_strain = atom->eff_plastic_strain;
double *eff_plastic_strain_rate = atom->eff_plastic_strain_rate;
double *radius = atom->radius;
double *damage = atom->damage;
int *type = atom->type;
int itype = type[i];
double jc_failure_strain;
//double damage_gap, damage_rate;
Matrix3d eye, stress_deviator;
eye.setIdentity();
stress_deviator = Deviator(stress);
double pressure = -stress.trace() / 3.0;
if (failureModel[itype].failure_max_principal_stress) {
error->one(FLERR, "not yet implemented");
/*
* maximum stress failure criterion:
*/
IsotropicMaxStressDamage(stress, Lookup[FAILURE_MAX_PRINCIPAL_STRESS_THRESHOLD][itype]);
} else if (failureModel[itype].failure_max_principal_strain) {
error->one(FLERR, "not yet implemented");
/*
* maximum strain failure criterion:
*/
IsotropicMaxStrainDamage(strain, Lookup[FAILURE_MAX_PRINCIPAL_STRAIN_THRESHOLD][itype]);
} else if (failureModel[itype].failure_max_plastic_strain) {
if (eff_plastic_strain[i] >= Lookup[FAILURE_MAX_PLASTIC_STRAIN_THRESHOLD][itype]) {
damage[i] = 1.0;
//double damage_gap = 0.5 * Lookup[FAILURE_MAX_PLASTIC_STRAIN_THRESHOLD][itype];
//damage[i] = (eff_plastic_strain[i] - Lookup[FAILURE_MAX_PLASTIC_STRAIN_THRESHOLD][itype]) / damage_gap;
}
} else if (failureModel[itype].failure_johnson_cook) {
//cout << "this is stress deviator" << stress_deviator << endl;
jc_failure_strain = JohnsonCookFailureStrain(pressure, stress_deviator, Lookup[FAILURE_JC_D1][itype],
Lookup[FAILURE_JC_D2][itype], Lookup[FAILURE_JC_D3][itype], Lookup[FAILURE_JC_D4][itype],
Lookup[FAILURE_JC_EPDOT0][itype], eff_plastic_strain_rate[i]);
//cout << "plastic strain increment is " << plastic_strain_increment << " jc fs is " << jc_failure_strain << endl;
//printf("JC failure strain is: %f\n", jc_failure_strain);
if (eff_plastic_strain[i] >= jc_failure_strain) {
double damage_rate = Lookup[SIGNAL_VELOCITY][itype] / (100.0 * radius[i]);
damage[i] += damage_rate * update->dt;
//damage[i] = 1.0;
}
}
/*
* Apply damage to integration point
*/
// damage[i] = MIN(damage[i], 0.8);
//
// if (pressure > 0.0) { // compression: particle can carry compressive load but reduced shear
// stress_damaged = -pressure * eye + (1.0 - damage[i]) * Deviator(stress);
// } else { // tension: particle has reduced tensile and shear load bearing capability
// stress_damaged = (1.0 - damage[i]) * (-pressure * eye + Deviator(stress));
// }
}
| 1 | 25,192 | this one bothers me. There are many places where this one is written to for an error message and just a single callsite is changed. | lammps-lammps | cpp |
@@ -706,6 +706,7 @@ func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
+ config.mockMdserv.EXPECT().FastForwardBackoff().AnyTimes()
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type CheckBlockOps struct {
BlockOps
tr gomock.TestReporter
}
var _ BlockOps = (*CheckBlockOps)(nil)
func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd KeyMetadata,
block Block) (id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData,
err error) {
id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
encodedSize := readyBlockData.GetEncodedSize()
if plainSize > encodedSize {
cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
"encodedSize = %d", plainSize, encodedSize)
}
return
}
type tCtxIDType int
const (
tCtxID tCtxIDType = iota
)
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
config.SetCodec(kbfscodec.NewMsgpack())
blockops := &CheckBlockOps{config.mockBops, ctr}
config.SetBlockOps(blockops)
kbfsops := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsops)
config.SetNotifier(kbfsops)
// Use real caches, to avoid the overhead of tracking cache calls.
// Each test is expected to check the cache for correctness at the
// end of the test.
config.SetBlockCache(NewBlockCacheStandard(100, 1<<30))
config.SetDirtyBlockCache(NewDirtyBlockCacheStandard(wallClock{},
config.MakeLogger(""), 5<<20, 10<<20, 5<<20))
config.mockBcache = nil
config.mockDirtyBcache = nil
// These tests don't rely on external notifications at all, so ignore any
// goroutine attempting to register:
c := make(chan error, 1)
config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
config.mockMdserv.EXPECT().OffsetFromServerTime().
Return(time.Duration(0), true).AnyTimes()
// None of these tests depend on time
config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
// Ignore Notify calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
// Max out MaxPtrsPerBlock
config.mockBsplit.EXPECT().MaxPtrsPerBlock().
Return(int((^uint(0)) >> 1)).AnyTimes()
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Prefetcher calls
brc := &testBlockRetrievalConfig{nil, newTestLogMaker(t),
config.BlockCache(), nil, newTestDiskBlockCacheGetter(t, nil)}
pre := newBlockPrefetcher(nil, brc)
config.mockBops.EXPECT().Prefetcher().AnyTimes().Return(pre)
// Ignore BlockRetriever calls
brq := newBlockRetrievalQueue(0, brc)
config.mockBops.EXPECT().BlockRetriever().AnyTimes().Return(brq)
// Ignore key bundle ID creation calls for now
config.mockCrypto.EXPECT().MakeTLFWriterKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFWriterKeyBundleID{}, nil)
config.mockCrypto.EXPECT().MakeTLFReaderKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFReaderKeyBundleID{}, nil)
// Ignore favorites
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
Return(nil, nil)
config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
AnyTimes().Return(nil)
interposeDaemonKBPKI(config, "alice", "bob", "charlie")
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
// make the context identifiable, to verify that it is passed
// correctly to the observer
id := rand.Int()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, tCtxID, id)
}))
if err != nil {
t.Fatal(err)
}
initSuccess = true
return mockCtrl, config, ctx, cancel
}
func kbfsTestShutdown(mockCtrl *gomock.Controller, config *ConfigMock,
ctx context.Context, cancel context.CancelFunc) {
config.ctr.CheckForFailures()
config.KBFSOps().(*KBFSOpsStandard).Shutdown(ctx)
if config.mockDirtyBcache == nil {
if err := config.DirtyBlockCache().Shutdown(); err != nil {
// Ignore error; some tests intentionally leave around dirty data.
}
}
cancel()
if err := CleanupCancellationDelayer(ctx); err != nil {
panic(err)
}
mockCtrl.Finish()
}
// kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
// shutdown call is kbfsTestShutdownNoMocks.
func kbfsOpsInitNoMocks(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
config := MakeTestConfigOrBust(t, users...)
config.SetRekeyWithPromptWaitTime(individualTestTimeout)
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(c context.Context) context.Context {
return c
}))
if err != nil {
t.Fatal(err)
}
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
initSuccess = true
return config, session.UID, ctx, cancel
}
func kbfsTestShutdownNoMocks(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
CheckConfigAndShutdown(ctx, t, config)
cancel()
CleanupCancellationDelayer(ctx)
}
// TODO: Get rid of all users of this.
func kbfsTestShutdownNoMocksNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
config.Shutdown(ctx)
cancel()
CleanupCancellationDelayer(ctx)
}
func checkBlockCache(t *testing.T, config *ConfigMock, id tlf.ID,
expectedCleanBlocks []kbfsblock.ID,
expectedDirtyBlocks map[BlockPointer]BranchName) {
bcache := config.BlockCache().(*BlockCacheStandard)
// make sure the LRU consists of exactly the right set of clean blocks
for _, id := range expectedCleanBlocks {
_, ok := bcache.cleanTransient.Get(id)
if !ok {
t.Errorf("BlockCache missing clean block %v at the end of the test",
id)
}
}
if bcache.cleanTransient.Len() != len(expectedCleanBlocks) {
t.Errorf("BlockCache has extra clean blocks at end of test")
}
// make sure the dirty cache consists of exactly the right set of
// dirty blocks
dirtyBcache := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
for ptr, branch := range expectedDirtyBlocks {
_, err := dirtyBcache.Get(id, ptr, branch)
if err != nil {
t.Errorf("BlockCache missing dirty block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
if !dirtyBcache.IsDirty(id, ptr, branch) {
t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
}
if len(dirtyBcache.cache) != len(expectedDirtyBlocks) {
t.Errorf("BlockCache has extra dirty blocks at end of test")
}
}
func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
handle1 := parseTlfHandleOrBust(t, config, "alice", false)
handle2 := parseTlfHandleOrBust(t, config, "alice,bob", false)
// dup for testing
handles := []*TlfHandle{handle1, handle2, handle2}
for _, h := range handles {
config.KeybaseService().FavoriteAdd(
context.Background(), h.ToFavorite().toKBFolder(false))
}
// The favorites list contains our own public dir by default, even
// if KBPKI doesn't return it.
handle3 := parseTlfHandleOrBust(t, config, "alice", true)
handles = append(handles, handle3)
handles2, err := config.KBFSOps().GetFavorites(ctx)
if err != nil {
t.Errorf("Got error on favorites: %+v", err)
}
if len(handles2) != len(handles)-1 {
t.Errorf("Got bad handles back: %v", handles2)
}
}
func TestKBFSOpsGetFavoritesFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
err := errors.New("Fake fail")
// Replace the old one (added in init function)
config.mockKbpki = NewMockKBPKI(mockCtrl)
config.SetKBPKI(config.mockKbpki)
// expect one call to favorites, and fail it
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(nil, err)
if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
t.Errorf("Got bad error on favorites: %+v", err2)
}
}
func getOps(config Config, id tlf.ID) *folderBranchOps {
return config.KBFSOps().(*KBFSOpsStandard).
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
tlf.ID, *TlfHandle, *RootMetadata) {
id := tlf.FakeID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
return id, h, rmd
}
func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
mdID kbfsmd.ID) ImmutableRootMetadata {
session, err := config.KBPKI().GetCurrentSession(context.Background())
require.NoError(t, err)
// We have to fake out the signature here because most tests
// in this file modify the returned value, invalidating any
// real signatures. TODO: Fix all the tests in this file to
// not do so, and then just use MakeImmutableRootMetadata.
if brmdv2, ok := rmd.bareMd.(*BareRootMetadataV2); ok {
vk := brmdv2.WriterMetadataSigInfo.VerifyingKey
require.True(t, vk == (kbfscrypto.VerifyingKey{}) || vk == session.VerifyingKey,
"Writer signature %s with unexpected non-nil verifying key != %s",
brmdv2.WriterMetadataSigInfo, session.VerifyingKey)
brmdv2.WriterMetadataSigInfo = kbfscrypto.SignatureInfo{
VerifyingKey: session.VerifyingKey,
}
}
return MakeImmutableRootMetadata(rmd, session.VerifyingKey, mdID, time.Now())
}
// injectNewRMD creates a new RMD and makes sure the existing ops for
// its ID has as its head that RMD.
func injectNewRMD(t *testing.T, config *ConfigMock) (
keybase1.UID, tlf.ID, *RootMetadata) {
id, h, rmd := createNewRMD(t, config, "alice", false)
var keyGen KeyGen
if id.IsPublic() {
keyGen = PublicKeyGen
} else {
keyGen = 1
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
KeyGen: keyGen,
DataVer: 1,
},
EncodedSize: 1,
},
}
rmd.fakeInitialRekey()
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(
t, config, rmd, kbfsmd.FakeID(tlf.FakeIDByte(id)))
ops.headStatus = headTrusted
rmd.SetSerializedPrivateMetadata(make([]byte, 1))
config.Notifier().RegisterForChanges(
[]FolderBranch{{id, MasterBranch}}, config.observer)
uid := h.FirstResolvedWriter()
rmd.data.Dir.Creator = uid.AsUserOrTeam()
return uid, id, rmd
}
func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
// Mark everything for reidentifying, and wait for it to finish
// before checking.
kop := config.KBFSOps().(*KBFSOpsStandard)
returnCh := make(chan struct{})
kop.reIdentifyControlChan <- returnCh
<-returnCh
assert.False(t, fboIdentityDone(ops))
// Trigger new identify.
lState = makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
// fboIdentityDone is needed to avoid data races.
func fboIdentityDone(fbo *folderBranchOps) bool {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
return fbo.identifyDone
}
type failIdentifyKBPKI struct {
KBPKI
identifyErr error
}
func (kbpki failIdentifyKBPKI) Identify(ctx context.Context, assertion, reason string) (UserInfo, error) {
return UserInfo{}, kbpki.identifyErr
}
func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
expectedErr := errors.New("Identify failure")
config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
// Trigger identify.
lState := makeFBOLockState()
_, err := ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
assert.Equal(t, expectedErr, err)
assert.False(t, fboIdentityDone(ops))
}
func expectBlock(config *ConfigMock, kmd KeyMetadata, blockPtr BlockPointer, block Block, err error) {
config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
ptrMatcher{blockPtr}, gomock.Any(), gomock.Any()).
Do(func(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer, getBlock Block, lifetime BlockCacheLifetime) {
getBlock.Set(block)
config.BlockCache().Put(blockPtr, kmd.TlfID(), getBlock, lifetime)
}).Return(err)
}
// ptrMatcher implements the gomock.Matcher interface to compare
// BlockPointer objects. We don't care about some of the fields in a
// pointer for the purposes of these tests.
type ptrMatcher struct {
ptr BlockPointer
}
// Matches implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) Matches(x interface{}) bool {
xPtr, ok := x.(BlockPointer)
if !ok {
return false
}
return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
}
// String implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) String() string {
return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
}
func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
if !rmd.TlfID().IsPublic() {
rmd.fakeInitialRekey()
}
rootPtr := BlockPointer{
ID: kbfsblock.FakeID(42),
KeyGen: 1,
DataVer: 1,
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: rootPtr,
EncodedSize: 5,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 3,
},
}
return
}
func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, public bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", public)
fillInNewMD(t, config, rmd)
// create a new MD
config.mockMdops.EXPECT().GetUnmergedForTLF(
gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
irmd := makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id).Return(irmd, nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
require.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
require.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
require.Equal(t, rmd.data.Dir.EntryInfo, ei)
require.Equal(t, rmd.GetTlfHandle(), h)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, true)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, false)
}
func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
ID: kbfsblock.FakeID(1),
},
EncodedSize: 15,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 10,
Mtime: 1,
Ctime: 2,
},
}
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Unmerged).Return(
tlf.ID{}, ImmutableRootMetadata{}, nil)
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Merged).Return(
tlf.ID{}, makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1)), nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(2))
ops.headStatus = headTrusted
n, ei, err :=
config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
if p.Tlf != id {
t.Errorf("Got bad dir id back: %v", p.Tlf)
} else if len(p.path) != 1 {
t.Errorf("Got bad MD back: path size %d", len(p.path))
} else if p.path[0].ID != rmd.data.Dir.ID {
t.Errorf("Got bad MD back: root ID %v", p.path[0].ID)
} else if ei.Type != Dir {
t.Error("Got bad MD non-dir rootID back")
} else if ei.Size != 10 {
t.Errorf("Got bad MD Size back: %d", ei.Size)
} else if ei.Mtime != 1 {
t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
} else if ei.Ctime != 2 {
t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
}
}
// rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
// the helper functions below, but all the callers would have to go
// md.ReadOnly(), which doesn't buy us much in tests.
func makeBP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID) BlockPointer {
return BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: DefaultNewBlockDataVersion(false),
Context: kbfsblock.Context{
Creator: u.AsUserOrTeam(),
// Refnonces not needed; explicit refnonce
// testing happens elsewhere.
},
}
}
func makeBI(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32) BlockInfo {
return BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
}
}
func makeIFP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32, off int64) IndirectFilePtr {
return IndirectFilePtr{
BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
},
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeBIFromID(id kbfsblock.ID, user keybase1.UserOrTeamID) BlockInfo {
return BlockInfo{
BlockPointer: BlockPointer{
ID: id, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: user,
},
},
EncodedSize: 1,
}
}
func nodeFromPath(t *testing.T, ops *folderBranchOps, p path) Node {
var prevNode Node
// populate the node cache with all the nodes we'll need
for _, pathNode := range p.path {
n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
pathNode.Name, prevNode)
if err != nil {
t.Fatal(err)
}
prevNode = n
}
return prevNode
}
func testPutBlockInCache(
t *testing.T, config *ConfigMock, ptr BlockPointer, id tlf.ID,
block Block) {
err := config.BlockCache().Put(ptr, id, block, TransientEntry)
require.NoError(t, err)
if config.mockBcache != nil {
config.mockBcache.EXPECT().Get(ptr).AnyTimes().Return(block, nil)
}
}
func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, blockPtr, dirBlock, nil)
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
t.Errorf("Got error on getdir: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "bob#alice", false)
// Hack around access check in ParseTlfHandle.
h.resolvedReaders = nil
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, session.UID), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
// won't even try getting the block if the user isn't a reader
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
expectedErr := NewReadAccessError(h, "alice", "/keybase/private/bob#alice")
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
t.Errorf("Got no expected error on getdir")
} else if err != expectedErr {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key, then
// fail block fetch
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, blockPtr, dirBlock, err)
if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
t.Errorf("Got no expected error on getdir")
} else if err2 != err {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: Exec}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Sym}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsLookupSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
bPath := ops.nodeCache.PathFromNode(bn)
expectedBNode := pathNode{makeBP(bID, rmd, config, u), "b"}
expectedBNode.KeyGen = 1
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bPath.path[2] != expectedBNode {
t.Errorf("Bad path node after lookup: %v vs %v",
bPath.path[2], expectedBNode)
}
}
func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Sym,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad directory entry: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bn != nil {
t.Errorf("Node for symlink is not nil: %v", bn)
}
}
func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := NoSuchNameError{"c"}
_, _, err := config.KBFSOps().Lookup(ctx, n, "c")
if err == nil {
t.Error("No error as expected on Lookup")
} else if err != expectedErr {
t.Errorf("Unexpected error after bad Lookup: %+v", err)
}
}
func TestKBFSOpsLookupNewDataVersionFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
bInfo := makeBIFromID(bID, u.AsUserOrTeam())
bInfo.DataVer = 10
dirBlock.Children["b"] = DirEntry{
BlockInfo: bInfo,
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := &NewDataVersionError{
path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}},
bInfo.DataVer,
}
_, _, err := config.KBFSOps().Lookup(ctx, n, "b")
if err == nil {
t.Error("No expected error found on lookup")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Unexpected error after bad lookup: %+v", err)
}
}
func TestKBFSOpsStatSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{dirBlock.Children["b"].BlockPointer, "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
ei, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Stat returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
}
}
func getBlockFromCache(t *testing.T, config Config, id tlf.ID, ptr BlockPointer,
branch BranchName) Block {
if block, err := config.DirtyBlockCache().Get(id, ptr, branch); err == nil {
return block
}
block, err := config.BlockCache().Get(ptr)
if err != nil {
t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
"%+v", ptr, branch, err)
return nil
}
return block
}
func getDirBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *DirBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
dblock, ok := block.(*DirBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
}
return dblock
}
func getFileBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *FileBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
fblock, ok := block.(*FileBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
}
return fblock
}
func testCreateEntryFailDupName(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a", which already exists in the root block
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameExistsError{"a"}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, "a")
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, "a", "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, true)
}
func TestCreateLinkFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, false)
}
func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
config.maxNameBytes = 2
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameTooLongError{name, config.maxNameBytes}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, true)
}
func TestCreateLinkFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, false)
}
func testCreateEntryFailDirTooBig(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
rmd.data.Dir.Size = 10
config.maxDirBytes = 12
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if _, ok := err.(DirTooBigError); !ok {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, true)
}
func TestCreateLinkFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, false)
}
func testCreateEntryFailKBFSPrefix(t *testing.T, et EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
name := ".kbfs_status"
expectedErr := DisallowedPrefixError{name, ".kbfs"}
var err error
// dir and link have different checks for dup name
switch et {
case Dir:
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, name, "a")
case Exec:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, true, NoExcl)
case File:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, false, NoExcl)
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Dir)
}
func TestCreateFileFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, File)
}
func TestCreateExecFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Exec)
}
func TestCreateLinkFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Sym)
}
// TODO: Currently only the remove tests use makeDirTree(),
// makeFile(), et al. Make the other tests use these functions, too.
// makeDirTree creates a block tree for the given path components and
// returns the DirEntry for the root block, a path, and the
// corresponding list of blocks. If n components are given, then the
// path will have n+1 nodes (one extra for the root node), and there
// will be n+1 corresponding blocks.
func makeDirTree(id tlf.ID, uid keybase1.UserOrTeamID, components ...string) (
DirEntry, path, []*DirBlock) {
var idCounter byte = 0x10
makeBlockID := func() kbfsblock.ID {
id := kbfsblock.FakeID(idCounter)
idCounter++
return id
}
// Handle the first (root) block.
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
rootEntry := DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes := []pathNode{{bi.BlockPointer, "{root}"}}
rootBlock := NewDirBlock().(*DirBlock)
blocks := []*DirBlock{rootBlock}
// Handle the rest.
parentDirBlock := rootBlock
for _, component := range components {
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
parentDirBlock.Children[component] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes = append(nodes, pathNode{bi.BlockPointer, component})
dirBlock := NewDirBlock().(*DirBlock)
blocks = append(blocks, dirBlock)
parentDirBlock = dirBlock
}
return rootEntry, path{FolderBranch{Tlf: id}, nodes}, blocks
}
func makeFile(dir path, parentDirBlock *DirBlock, name string, et EntryType,
directType BlockDirectType) (
path, *FileBlock) {
if et != File && et != Exec {
panic(fmt.Sprintf("Unexpected type %s", et))
}
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
bi.DirectType = directType
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: et,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewFileBlock().(*FileBlock)
}
func makeDir(dir path, parentDirBlock *DirBlock, name string) (
path, *DirBlock) {
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewDirBlock().(*DirBlock)
}
func makeSym(dir path, parentDirBlock *DirBlock, name string) {
parentDirBlock.Children[name] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
},
}
}
func TestRemoveDirFailNonEmpty(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(
id, uid.AsUserOrTeam(), "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, *p.parentPath().parentPath())
expectedErr := DirNotEmptyError{p.parentPath().tailName()}
err := config.KBFSOps().RemoveDir(ctx, n, "d")
require.Equal(t, expectedErr, err)
}
func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et EntryType) {
require.NotEqual(t, et, Sym)
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.noBGFlush = true
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice", false)
kbfsOps := config.KBFSOps()
var nodeA Node
var err error
if et == Dir {
nodeA, _, err = kbfsOps.CreateDir(ctx, rootNode, "a")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
require.NoError(t, err)
} else {
exec := false
if et == Exec {
exec = true
}
nodeA, _, err = kbfsOps.CreateFile(ctx, rootNode, "a", exec, NoExcl)
require.NoError(t, err)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, nodeA, data, 0)
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
require.NoError(t, err)
}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
// Remove block from the server directly, and clear caches.
config.BlockOps().Delete(ctx, rootNode.GetFolderBranch().Tlf,
[]BlockPointer{ops.nodeCache.PathFromNode(nodeA).tailPointer()})
config.ResetCaches()
err = config.KBFSOps().RemoveEntry(ctx, rootNode, "a")
require.NoError(t, err)
err = config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
// Shutdown the mdserver explicitly before the state checker tries
// to run, since the sizes will definitely be wrong.
defer config.MDServer().Shutdown()
}
func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, File)
}
func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Dir)
}
func TestRemoveDirFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(
id, uid.AsUserOrTeam(), "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
expectedErr := NoSuchNameError{"nonexistent"}
err := config.KBFSOps().RemoveDir(ctx, n, "nonexistent")
require.Equal(t, expectedErr, err)
}
func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
id2 := tlf.FakeID(2, false)
h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", false)
rmd2, err := makeInitialRootMetadata(config.MetadataVersion(), id2, h2)
require.NoError(t, err)
uid1 := h2.ResolvedWriters()[0]
uid2 := h2.ResolvedWriters()[2]
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
rootID2 := kbfsblock.FakeID(38)
aID2 := kbfsblock.FakeID(39)
node2 := pathNode{makeBP(rootID2, rmd2, config, uid2), "p"}
aNode2 := pathNode{makeBP(aID2, rmd2, config, uid2), "a"}
p2 := path{FolderBranch{Tlf: id2}, []pathNode{node2, aNode2}}
ops2 := getOps(config, id2)
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestRenameFailAcrossBranches(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
uid1 := h1.FirstResolvedWriter()
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
p2 := path{FolderBranch{id1, "test"}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
ops2 := config.KBFSOps().(*KBFSOpsStandard).getOpsNoAdd(
FolderBranch{id1, "test"})
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 4 {
t.Errorf("Read the wrong number of bytes: %d", n)
} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
n := 20
dest := make([]byte, n, n)
fullContents := append(block1.Contents, block2.Contents...)
fullContents = append(fullContents, block3.Contents...)
fullContents = append(fullContents, block4.Contents...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fullContents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
n := 10
dest := make([]byte, n, n)
contents := append(block1.Contents[3:], block2.Contents...)
contents = append(contents, block3.Contents[:3]...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 0 {
t.Errorf("Read the wrong number of bytes: %d", n)
}
}
func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
t.Errorf("Got no expected error")
} else if err2 != err {
t.Errorf("Got unexpected error: %+v", err2)
}
}
func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
so *syncOp, filePtr BlockPointer, writes []WriteRange) {
if so == nil {
t.Error("No sync info for written file!")
}
if so.File.Unref != filePtr {
t.Errorf("Unexpected unref file in sync op: %v vs %v",
so.File.Unref, filePtr)
}
if len(so.Writes) != len(writes) {
t.Errorf("Unexpected number of writes: %v (expected %v)",
len(so.Writes), len(writes))
}
for i, w := range writes {
writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
if err != nil {
t.Fatal(err)
}
if !writeEqual {
t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
}
}
}
func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
ops *folderBranchOps, filePtr BlockPointer, writes []WriteRange) {
// check the in-progress syncOp
si, ok := ops.blocks.unrefCache[filePtr.Ref()]
if !ok {
t.Error("No sync info for written file!")
}
checkSyncOp(t, codec, si.op, filePtr, writes)
}
func updateWithDirtyEntries(ctx context.Context, ops *folderBranchOps,
lState *lockState, dir path, block *DirBlock) (*DirBlock, error) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
return ops.blocks.updateWithDirtyEntriesLocked(ctx, lState, dir, block)
}
func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
} else if newRootBlock.Children["f"].GetWriter() != uid.AsUserOrTeam() {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != uint64(len(data)) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 5); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: uint64(len(data))}})
}
func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(7)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 7); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteCauseSplit(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
expectedFullData := append([]byte{0}, newData...)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData, int64(1)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append([]byte{0}, data[0:5]...)
}).Return(int64(5))
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
// new left block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id1, nil)
// new right block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id2, nil)
// next we'll get the right block again
// then the second half
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(5))
if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
t.Errorf("Got error on write: %+v", err)
}
b, _ := config.BlockCache().Get(node.BlockPointer)
newRootBlock := b.(*DirBlock)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
b, _ = config.DirtyBlockCache().Get(id, fileNode.BlockPointer, p.Branch)
pblock := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id1, rmd, config, uid),
p.Branch)
block1 := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id2, rmd, config, uid),
p.Branch)
block2 := b.(*FileBlock)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:6], block1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[6:11], block2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
} else if !pblock.IsInd {
t.Errorf("Parent block is not indirect!")
} else if len(pblock.IPtrs) != 2 {
t.Errorf("Wrong number of pointers in pblock: %v", pblock.IPtrs)
} else if pblock.IPtrs[0].ID != id1 {
t.Errorf("Parent block has wrong id for block 1: %v (vs. %v)",
pblock.IPtrs[0].ID, id1)
} else if pblock.IPtrs[1].ID != id2 {
t.Errorf("Parent block has wrong id for block 2: %v",
pblock.IPtrs[1].ID)
} else if pblock.IPtrs[0].Off != 0 {
t.Errorf("Parent block has wrong offset for block 1: %d",
pblock.IPtrs[0].Off)
} else if pblock.IPtrs[1].Off != 6 {
t.Errorf("Parent block has wrong offset for block 5: %d",
pblock.IPtrs[1].Off)
} else if newRootBlock.Children["f"].Size != uint64(11) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
pblock.IPtrs[0].BlockPointer: p.Branch,
pblock.IPtrs[1].BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
}
func mergeUnrefCache(
ops *folderBranchOps, lState *lockState, file path, md *RootMetadata) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
ops.blocks.unrefCache[file.tailPointer().Ref()].mergeUnrefCache(md)
}
func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
filePtr := BlockPointer{
ID: fileID, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: uid.AsUserOrTeam(),
},
}
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: filePtr,
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5}
expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
so, err := newSyncOp(filePtr)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
// gomock.Any(), gomock.Any(), data, int64(2)).
gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block1.Contents[0:2], data[0:3]...)
}).Return(int64(3))
// update block 2
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data[3:], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(data, block2.Contents[2:]...)
}).Return(int64(2))
if err := config.KBFSOps().Write(ctx, n, data, 2); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:5], newBlock1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[5:10], newBlock2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
}
lState := makeFBOLockState()
// merge the unref cache to make it easy to check for changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 2, Len: uint64(len(data))}})
mergeUnrefCache(ops, lState, p, rmd)
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
// Read tests check the same error cases, so no need for similar write
// error tests
func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{}
if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
} else if newRootBlock.Children["f"].GetWriter() != uid.AsUserOrTeam() {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != 0 {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: 0}})
}
func TestKBFSOpsTruncateSameSize(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: makeBIFromID(fileID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := fileBlock.Contents
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
} else if config.observer.localChange != nil {
t.Errorf("Unexpected local update during truncate: %v",
config.observer.localChange)
} else if !bytes.Equal(data, fileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID}, nil)
}
func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{1, 2, 3, 4, 5}
if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 0}})
}
func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid.AsUserOrTeam())
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data2 := []byte{10, 9}
if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(block1.Contents, newBlock1.Contents) {
t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
} else if !bytes.Equal(data2, newBlock2.Contents) {
t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
} else if len(newPBlock.IPtrs) != 2 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+6 {
// The fileid and the last block was all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid.AsUserOrTeam())
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data := []byte{5, 4, 3, 2}
if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 4, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newBlock1.Contents) {
t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
} else if len(newPBlock.IPtrs) != 1 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+5+6 {
// The fileid and both blocks were all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
data := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
// A truncate past the end of the file actually translates into a
// write for the difference
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 5}})
}
func TestSetExFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
// chmod a+x a
if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
t.Errorf("Got no expected error on setex")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setex: %+v", err)
}
}
// Other SetEx failure cases are all the same as any other block sync
func TestSetMtimeNull(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
oldMtime := time.Now().UnixNano()
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u.AsUserOrTeam()),
EntryInfo: EntryInfo{
Type: File,
Mtime: oldMtime,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
t.Errorf("Got unexpected error on null setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if rootBlock.Children["a"].Mtime != oldMtime {
t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
} else if newP.path[0].ID != p.path[0].ID {
t.Errorf("Got back a changed path for null setmtime test: %v", newP)
}
checkBlockCache(t, config, id, nil, nil)
}
func TestMtimeFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
newMtime := time.Now()
if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
t.Errorf("Got no expected error on setmtime")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
}
func getOrCreateSyncInfo(
ops *folderBranchOps, lState *lockState, de DirEntry) (*syncInfo, error) {
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
return ops.blocks.getOrCreateSyncInfoLocked(lState, de)
}
func makeBlockStateDirty(config Config, kmd KeyMetadata, p path,
ptr BlockPointer) {
ops := getOps(config, kmd.TlfID())
lState := makeFBOLockState()
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, p)
df.setBlockDirty(ptr)
}
// SetMtime failure cases are all the same as any other block sync
func TestSyncCleanSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// fsync a
if err := config.KBFSOps().SyncAll(ctx, n.GetFolderBranch()); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(newP.path) != len(p.path) {
// should be the exact same path back
t.Errorf("Got a different length path back: %v", newP)
} else {
for i, n := range newP.path {
if n != p.path[i] {
t.Errorf("Node %d differed: %v", i, n)
}
}
}
checkBlockCache(t, config, id, nil, nil)
}
func expectSyncDirtyBlock(config *ConfigMock, kmd KeyMetadata,
p path, ptr BlockPointer, block *FileBlock, splitAt int64,
padSize int, opsLockHeld bool) *gomock.Call {
branch := MasterBranch
if config.mockDirtyBcache != nil {
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(block, nil)
} else {
config.DirtyBlockCache().Put(p.Tlf, ptr, branch, block)
}
if !opsLockHeld {
makeBlockStateDirty(config, kmd, p, ptr)
}
c1 := config.mockBsplit.EXPECT().CheckSplit(block).Return(splitAt)
newID := kbfsblock.FakeIDAdd(ptr.ID, 100)
// Ideally, we'd use the size of block.Contents at the time
// that Ready() is called, but GoMock isn't expressive enough
// for that.
newEncBuf := make([]byte, len(block.Contents)+padSize)
readyBlockData := ReadyBlockData{
buf: newEncBuf,
}
c2 := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd}, block).
After(c1).Return(newID, len(block.Contents), readyBlockData, nil)
newPtr := BlockPointer{ID: newID}
if config.mockBcache != nil {
config.mockBcache.EXPECT().Put(ptrMatcher{newPtr}, kmd.TlfID(), block, PermanentEntry).Return(nil)
config.mockBcache.EXPECT().DeletePermanent(newID).Return(nil)
} else {
// Nothing to do, since the cache entry is added and
// removed.
}
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
return c2
}
func putAndCleanAnyBlock(config *ConfigMock, p path) {
config.mockBcache.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), TransientEntry).
Do(func(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) {
config.mockDirtyBcache.EXPECT().
Get(gomock.Any(), ptrMatcher{BlockPointer{ID: ptr.ID}},
p.Branch).AnyTimes().Return(nil, NoSuchBlockError{ptr.ID})
config.mockBcache.EXPECT().
Get(ptrMatcher{BlockPointer{ID: ptr.ID}}).
AnyTimes().Return(block, nil)
}).AnyTimes().Return(nil)
config.mockDirtyBcache.EXPECT().Delete(gomock.Any(), gomock.Any(),
p.Branch).AnyTimes().Return(nil)
}
func TestKBFSOpsStatRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
_, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
}
func TestKBFSOpsFailingRootOps(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
node := pathNode{rmd.data.Dir.BlockPointer, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
// TODO: Make sure Read, Write, and Truncate fail also with
// InvalidPathError{}.
err := config.KBFSOps().SetEx(ctx, n, true)
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetEx: %+v", err)
}
err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetMtime: %+v", err)
}
// TODO: Sync succeeds, but it should fail. Fix this!
}
type testBGObserver struct {
c chan<- struct{}
}
func (t *testBGObserver) LocalChange(ctx context.Context, node Node,
write WriteRange) {
// ignore
}
func (t *testBGObserver) BatchChanges(ctx context.Context,
changes []NodeChange) {
t.c <- struct{}{}
}
func (t *testBGObserver) TlfHandleChange(ctx context.Context,
newHandle *TlfHandle) {
return
}
// Tests that the background flusher will sync a dirty file if the
// application does not.
func TestKBFSOpsBackgroundFlush(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.noBGFlush = true
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
nodeA, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
oldPtr := ops.nodeCache.PathFromNode(nodeA).tailPointer()
staller := NewNaïveStaller(config)
staller.StallMDOp(StallableMDAfterPut, 1, false)
// start the background flusher
config.SetBGFlushPeriod(1 * time.Millisecond)
go ops.backgroundFlusher()
// Wait for the stall to know the background work is done.
staller.WaitForStallMDOp(StallableMDAfterPut)
staller.UnstallOneMDOp(StallableMDAfterPut)
// Do our own SyncAll now to ensure we wait for the bg flusher to
// finish.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync all: %+v", err)
}
newPtr := ops.nodeCache.PathFromNode(nodeA).tailPointer()
if oldPtr == newPtr {
t.Fatalf("Background sync didn't update pointers")
}
}
func TestKBFSOpsWriteRenameStat(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Stat it again.
newEi, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
}
}
func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Get the stats via GetDirChildren.
eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if newEi := eis["b"]; ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v",
ei, eis["b"])
}
}
func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Remove the file, which will archive the block
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Create a second file, which will use the same initial block ID
// from the cache, even though it's been archived, and will be
// forced to try again.
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create second file: %+v", err)
}
}
func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Make the blocks small, with multiple levels of indirection, but
// make the unembedded size large, so we don't create thousands of
// unembedded block change blocks.
blockSize := int64(5)
bsplit := &BlockSplitterSimple{blockSize, 2, 100 * 1024}
config.SetBlockSplitter(bsplit)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write a few blocks
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Now overwrite those blocks to archive them
newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
err = kbfsOps.Write(ctx, fileNode, newData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Now write the original first block, which has been archived,
// and make sure it works.
err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
}
type corruptBlockServer struct {
BlockServer
}
func (cbs corruptBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, keyServerHalf, err := cbs.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return append(data, 0), keyServerHalf, nil
}
func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.SetBlockServer(&corruptBlockServer{
BlockServer: config.BlockServer(),
})
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Read using a different "device"
config2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(ctx, t, config2)
// Shutdown the mdserver explicitly before the state checker tries to run
defer config2.MDServer().Shutdown()
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "test_user", false)
// Lookup the file, which should fail on block ID verification
kbfsOps2 := config2.KBFSOps()
_, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if _, ok := errors.Cause(err).(kbfshash.HashMismatchError); !ok {
t.Fatalf("Could unexpectedly lookup the file: %+v", err)
}
}
// Test that the size of a single empty block doesn't change. If this
// test ever fails, consult max or strib before merging.
func TestKBFSOpsEmptyTlfSize(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Create a TLF.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
status, _, err := config.KBFSOps().FolderStatus(ctx,
rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't get folder status: %+v", err)
}
if status.DiskUsage != 313 {
t.Fatalf("Disk usage of an empty TLF is no longer 313. " +
"Talk to max or strib about why this matters.")
}
}
type cryptoFixedTlf struct {
Crypto
tlf tlf.ID
}
func (c cryptoFixedTlf) MakeRandomTlfID(isPublic bool) (tlf.ID, error) {
return c.tlf, nil
}
// TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
// accepting bad MDs.
func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config1, ctx, cancel)
// Create alice's TLF.
rootNode1 := GetRootNodeOrBust(ctx, t, config1, "alice", false)
fb1 := rootNode1.GetFolderBranch()
kbfsOps1 := config1.KBFSOps()
_, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
require.NoError(t, err)
// Create mallory's fake TLF using the same TLF ID as alice's.
config2 := ConfigAsUser(config1, "mallory")
crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
config2.SetCrypto(crypto2)
mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
require.NoError(t, err)
config2.MDServer().Shutdown()
config2.SetMDServer(mdserver2)
config2.SetMDCache(NewMDCacheStandard(1))
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "alice,mallory", false)
require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
kbfsOps2 := config2.KBFSOps()
// Add some operations to get mallory's TLF to have a higher
// MetadataVersion.
_, _, err = kbfsOps2.CreateFile(
ctx, rootNode2, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
require.NoError(t, err)
err = kbfsOps2.RemoveEntry(ctx, rootNode2, "dummy.txt")
require.NoError(t, err)
err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
require.NoError(t, err)
// Now route alice's TLF to mallory's MD server.
config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
// Simulate the server triggering alice to update.
config1.SetKeyCache(NewKeyCacheStandard(1))
err = kbfsOps1.SyncFromServerForTesting(ctx, fb1)
// TODO: We can actually fake out the PrevRoot pointer, too
// and then we'll be caught by the handle check. But when we
// have MDOps do the handle check, that'll trigger first.
require.IsType(t, MDPrevRootMismatch{}, err)
}
// TODO: Test malicious mdserver and rekey flow against wrong
// TLFs being introduced upon rekey.
// Test that if GetTLFCryptKeys fails to create a TLF, the second
// attempt will also fail with the same error. Regression test for
// KBFS-1929.
func TestGetTLFCryptKeysAfterFirstError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
createErr := errors.New("Cannot create this TLF")
mdserver := &shimMDServer{
MDServer: config.MDServer(),
nextErr: createErr,
}
config.SetMDServer(mdserver)
h := parseTlfHandleOrBust(t, config, "alice", false)
_, _, err := config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
// Reset the error.
mdserver.nextErr = createErr
// Should get the same error, otherwise something's wrong.
_, _, err = config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
}
func TestForceFastForwardOnEmptyTLF(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// Look up bob's public folder.
h := parseTlfHandleOrBust(t, config, "bob", true)
_, _, err := config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
if _, ok := err.(WriteAccessError); !ok {
t.Fatalf("Unexpected err reading a public TLF: %+v", err)
}
// There's only one folder at this point.
kbfsOps := config.KBFSOps().(*KBFSOpsStandard)
kbfsOps.opsLock.RLock()
var ops *folderBranchOps
for _, fbo := range kbfsOps.ops {
ops = fbo
break
}
kbfsOps.opsLock.RUnlock()
// FastForward shouldn't do anything, since the TLF hasn't been
// cleared yet.
config.KBFSOps().ForceFastForward(ctx)
err = ops.forcedFastForwards.Wait(ctx)
if err != nil {
t.Fatalf("Couldn't wait for fast forward: %+v", err)
}
}
// Regression test for KBFS-2161.
func TestDirtyPathsAfterRemoveDir(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
// Create a/b/c.
nodeA, _, err := kbfsOps.CreateDir(ctx, rootNode, "a")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
nodeB, _, err := kbfsOps.CreateDir(ctx, nodeA, "b")
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
nodeC, _, err := kbfsOps.CreateFile(ctx, nodeB, "c", false, NoExcl)
require.NoError(t, err)
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
// Remove node c from the block cache and the server, to guarantee
// it's not needed during the removal.
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
ptrC := ops.nodeCache.PathFromNode(nodeC).tailPointer()
err = config.BlockCache().DeleteTransient(
ptrC, rootNode.GetFolderBranch().Tlf)
require.NoError(t, err)
// Remove c.
err = kbfsOps.RemoveEntry(ctx, nodeB, "c")
require.NoError(t, err)
// Now a/b should be dirty.
status, _, err := kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 1)
require.Equal(t, "test_user/a/b", status.DirtyPaths[0])
// Now remove b, and make sure a/b is no longer dirty.
err = kbfsOps.RemoveDir(ctx, nodeA, "b")
require.NoError(t, err)
status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 1)
require.Equal(t, "test_user/a", status.DirtyPaths[0])
// Also make sure we can no longer create anything in the removed
// directory.
_, _, err = kbfsOps.CreateDir(ctx, nodeB, "d")
require.IsType(t, UnsupportedOpInUnlinkedDirError{}, errors.Cause(err))
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
require.NoError(t, err)
require.Len(t, status.DirtyPaths, 0)
// If the block made it back into the cache, we have a problem.
// It shouldn't be needed for removal.
_, err = config.BlockCache().Get(ptrC)
require.NotNil(t, err)
}
| 1 | 16,889 | You can put this in `kbfsOpsInit()` in you want, so we don't have to call it in every test. | keybase-kbfs | go |
@@ -528,8 +528,10 @@ drx_insert_counter_update(void *drcontext, instrlist_t *ilist, instr_t *where,
}
}
#elif defined(AARCHXX)
+# ifdef ARM_32
/* FIXME i#1551: implement 64-bit counter support */
- ASSERT(!is_64, "DRX_COUNTER_64BIT is not implemented");
+ ASSERT(!is_64, "DRX_COUNTER_64BIT is not implemented for ARM_32");
+# endif /* ARM_32 */
if (use_drreg) {
if (drreg_reserve_register(drcontext, ilist, where, NULL, ®1) != | 1 | /* **********************************************************
* Copyright (c) 2013-2019 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* DynamoRio eXtension utilities */
#include "dr_api.h"
#include "drx.h"
#include "hashtable.h"
#include "../ext_utils.h"
/* We use drmgr but only internally. A user of drx will end up loading in
* the drmgr library, but it won't affect the user's code.
*/
#include "drmgr.h"
#include "drreg.h"
#ifdef UNIX
# ifdef LINUX
# include "../../core/unix/include/syscall.h"
# else
# include <sys/syscall.h>
# endif
# include <signal.h> /* SIGKILL */
#endif
#include <limits.h>
#ifdef DEBUG
# define ASSERT(x, msg) DR_ASSERT_MSG(x, msg)
# define IF_DEBUG(x) x
#else
# define ASSERT(x, msg) /* nothing */
# define IF_DEBUG(x) /* nothing */
#endif /* DEBUG */
#define XMM_REG_SIZE 16
#define YMM_REG_SIZE 32
#define MAX(x, y) ((x) >= (y) ? (x) : (y))
#ifdef X86
/* TODO i#2985: add ARM SIMD. */
# define PLATFORM_SUPPORTS_SCATTER_GATHER
#endif
#define MINSERT instrlist_meta_preinsert
/* For inserting an app instruction, which must have a translation ("xl8") field. */
#define PREXL8 instrlist_preinsert
#define VERBOSE 0
/* Reserved note range values */
enum {
DRX_NOTE_AFLAGS_RESTORE_BEGIN,
DRX_NOTE_AFLAGS_RESTORE_SAHF,
DRX_NOTE_AFLAGS_RESTORE_END,
DRX_NOTE_COUNT,
};
static ptr_uint_t note_base;
#define NOTE_VAL(enum_val) ((void *)(ptr_int_t)(note_base + (enum_val)))
static bool expand_scatter_gather_drreg_initialized;
static bool soft_kills_enabled;
static void
soft_kills_exit(void);
/* For debugging */
static uint verbose = 0;
#undef NOTIFY
#define NOTIFY(n, ...) \
do { \
if (verbose >= (n)) { \
dr_fprintf(STDERR, __VA_ARGS__); \
} \
} while (0)
/* defined in drx_buf.c */
bool
drx_buf_init_library(void);
void
drx_buf_exit_library(void);
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
static bool
drx_event_restore_state(void *drcontext, bool restore_memory,
dr_restore_state_info_t *info);
#endif
/***************************************************************************
* INIT
*/
static int drx_init_count;
DR_EXPORT
bool
drx_init(void)
{
/* drx_insert_counter_update() needs 1 slot on x86 plus the 1 slot
drreg uses for aflags, and 2 reg slots on aarch, so 2 on both.
* We set do_not_sum_slots to true so that we only ask for *more* slots
* if the client doesn't ask for any. Another drreg_init() call is made
* in case drx_expand_scatter_gather() is called. More slots are reserved
* in that case.
*/
drreg_options_t ops = { sizeof(ops), 2, false, NULL, true };
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
drmgr_priority_t fault_priority = { sizeof(fault_priority),
DRMGR_PRIORITY_NAME_DRX_FAULT, NULL, NULL,
DRMGR_PRIORITY_FAULT_DRX };
#endif
int count = dr_atomic_add32_return_sum(&drx_init_count, 1);
if (count > 1)
return true;
drmgr_init();
note_base = drmgr_reserve_note_range(DRX_NOTE_COUNT);
ASSERT(note_base != DRMGR_NOTE_NONE, "failed to reserve note range");
if (drreg_init(&ops) != DRREG_SUCCESS)
return false;
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
if (!drmgr_register_restore_state_ex_event_ex(drx_event_restore_state,
&fault_priority))
return false;
#endif
return drx_buf_init_library();
}
DR_EXPORT
void
drx_exit()
{
int count = dr_atomic_add32_return_sum(&drx_init_count, -1);
if (count != 0)
return;
if (soft_kills_enabled)
soft_kills_exit();
drx_buf_exit_library();
drreg_exit();
if (expand_scatter_gather_drreg_initialized)
drreg_exit();
drmgr_exit();
}
/***************************************************************************
* INSTRUCTION NOTE FIELD
*/
/* For historical reasons we have this routine exported by drx.
* We just forward to drmgr.
*/
DR_EXPORT
ptr_uint_t
drx_reserve_note_range(size_t size)
{
return drmgr_reserve_note_range(size);
}
/***************************************************************************
* ANALYSIS
*/
DR_EXPORT
bool
drx_aflags_are_dead(instr_t *where)
{
instr_t *instr;
uint flags;
for (instr = where; instr != NULL; instr = instr_get_next(instr)) {
/* we treat syscall/interrupt as aflags read */
if (instr_is_syscall(instr) || instr_is_interrupt(instr))
return false;
flags = instr_get_arith_flags(instr, DR_QUERY_DEFAULT);
if (TESTANY(EFLAGS_READ_ARITH, flags))
return false;
if (TESTALL(EFLAGS_WRITE_ARITH, flags))
return true;
if (instr_is_cti(instr)) {
if (instr_is_app(instr) &&
(instr_is_ubr(instr) || instr_is_call_direct(instr))) {
instr_t *next = instr_get_next(instr);
opnd_t tgt = instr_get_target(instr);
/* continue on elision */
if (next != NULL && instr_is_app(next) && opnd_is_pc(tgt) &&
opnd_get_pc(tgt) == instr_get_app_pc(next))
continue;
}
/* unknown target, assume aflags is live */
return false;
}
}
return false;
}
/***************************************************************************
* INSTRUMENTATION
*/
#ifdef AARCHXX
/* XXX i#1603: add liveness analysis and pick dead regs */
# define SCRATCH_REG0 DR_REG_R0
# define SCRATCH_REG1 DR_REG_R1
#endif
/* insert a label instruction with note */
static void
ilist_insert_note_label(void *drcontext, instrlist_t *ilist, instr_t *where, void *note)
{
instr_t *instr = INSTR_CREATE_label(drcontext);
instr_set_note(instr, note);
MINSERT(ilist, where, instr);
}
#ifdef X86 /* not yet used on ARM but we may export */
/* Insert arithmetic flags saving code with more control.
* For x86:
* - skip %eax save if !save_reg
* - save %eax to reg if reg is not DR_REG_NULL,
* - save %eax to slot otherwise
* For ARM:
* - saves flags to reg
* - saves reg first to slot, unless !save_reg.
*/
static void
drx_save_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where, bool save_reg,
bool save_oflag, dr_spill_slot_t slot, reg_id_t reg)
{
# ifdef X86
instr_t *instr;
/* save %eax if necessary */
if (save_reg) {
if (reg != DR_REG_NULL) {
ASSERT(reg >= DR_REG_START_GPR && reg <= DR_REG_STOP_GPR && reg != DR_REG_XAX,
"wrong dead reg");
MINSERT(ilist, where,
INSTR_CREATE_mov_st(drcontext, opnd_create_reg(reg),
opnd_create_reg(DR_REG_XAX)));
} else {
ASSERT(slot >= SPILL_SLOT_1 && slot <= SPILL_SLOT_MAX, "wrong spill slot");
dr_save_reg(drcontext, ilist, where, DR_REG_XAX, slot);
}
}
/* lahf */
instr = INSTR_CREATE_lahf(drcontext);
MINSERT(ilist, where, instr);
if (save_oflag) {
/* seto %al */
instr = INSTR_CREATE_setcc(drcontext, OP_seto, opnd_create_reg(DR_REG_AL));
MINSERT(ilist, where, instr);
}
# elif defined(AARCHXX)
ASSERT(reg >= DR_REG_START_GPR && reg <= DR_REG_STOP_GPR, "reg must be a GPR");
if (save_reg) {
ASSERT(slot >= SPILL_SLOT_1 && slot <= SPILL_SLOT_MAX, "wrong spill slot");
dr_save_reg(drcontext, ilist, where, reg, slot);
}
MINSERT(ilist, where,
INSTR_CREATE_msr(drcontext, opnd_create_reg(DR_REG_CPSR),
OPND_CREATE_INT_MSR_NZCVQG(), opnd_create_reg(reg)));
# endif
}
/* Insert arithmetic flags restore code with more control.
* For x86:
* - skip %eax restore if !restore_reg
* - restore %eax from reg if reg is not DR_REG_NULL
* - restore %eax from slot otherwise
* For ARM:
* - restores flags from reg
* - restores reg to slot, unless !restore_reg.
* Routine merge_prev_drx_aflags_switch looks for labels inserted by
* drx_restore_arith_flags, so changes to this routine may affect
* merge_prev_drx_aflags_switch.
*/
static void
drx_restore_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
bool restore_reg, bool restore_oflag, dr_spill_slot_t slot,
reg_id_t reg)
{
instr_t *instr;
ilist_insert_note_label(drcontext, ilist, where,
NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_BEGIN));
# ifdef X86
if (restore_oflag) {
/* add 0x7f, %al */
instr = INSTR_CREATE_add(drcontext, opnd_create_reg(DR_REG_AL),
OPND_CREATE_INT8(0x7f));
MINSERT(ilist, where, instr);
}
/* sahf */
instr = INSTR_CREATE_sahf(drcontext);
instr_set_note(instr, NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_SAHF));
MINSERT(ilist, where, instr);
/* restore eax if necessary */
if (restore_reg) {
if (reg != DR_REG_NULL) {
ASSERT(reg >= DR_REG_START_GPR && reg <= DR_REG_STOP_GPR && reg != DR_REG_XAX,
"wrong dead reg");
MINSERT(ilist, where,
INSTR_CREATE_mov_st(drcontext, opnd_create_reg(DR_REG_XAX),
opnd_create_reg(reg)));
} else {
ASSERT(slot >= SPILL_SLOT_1 && slot <= SPILL_SLOT_MAX, "wrong spill slot");
dr_restore_reg(drcontext, ilist, where, DR_REG_XAX, slot);
}
}
# elif defined(AARCHXX)
ASSERT(reg >= DR_REG_START_GPR && reg <= DR_REG_STOP_GPR, "reg must be a GPR");
instr =
INSTR_CREATE_mrs(drcontext, opnd_create_reg(reg), opnd_create_reg(DR_REG_CPSR));
instr_set_note(instr, NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_SAHF));
MINSERT(ilist, where, instr);
if (restore_reg) {
ASSERT(slot >= SPILL_SLOT_1 && slot <= SPILL_SLOT_MAX, "wrong spill slot");
dr_restore_reg(drcontext, ilist, where, reg, slot);
}
# endif
ilist_insert_note_label(drcontext, ilist, where,
NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_END));
}
#endif /* X86 */
/* Check if current instrumentation can be merged into previous aflags
* (or on ARM, GPR) save/restore inserted by drx_restore_arith_flags.
* Returns NULL if cannot merge. Otherwise, returns the right insertion point,
* i.e., DRX_NOTE_AFLAGS_RESTORE_BEGIN label instr.
*
* This routine looks for labels inserted by drx_restore_arith_flags,
* so changes to drx_restore_arith_flags may affect this routine.
* On ARM the labels are from drx_insert_counter_update.
*/
static instr_t *
merge_prev_drx_spill(instrlist_t *ilist, instr_t *where, bool aflags)
{
instr_t *instr;
#ifdef DEBUG
bool has_sahf = false;
#endif
if (where == NULL)
return NULL;
instr = instr_get_prev(where);
if (instr == NULL)
return NULL;
if (!instr_is_label(instr))
return NULL;
/* Check if prev instr is DRX_NOTE_AFLAGS_RESTORE_END.
* We bail even there is only a label instr in between, which
* might be a target of internal cti.
*/
if (instr_get_note(instr) != NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_END))
return NULL;
/* On ARM we do not want to merge two drx spills if they are
* predicated differently.
*/
if (instr_get_predicate(instr) != instrlist_get_auto_predicate(ilist))
return NULL;
/* find DRX_NOTE_AFLAGS_RESTORE_BEGIN */
for (instr = instr_get_prev(instr); instr != NULL; instr = instr_get_prev(instr)) {
if (instr_is_app(instr)) {
/* we do not expect any app instr */
ASSERT(false, "drx aflags restore is corrupted");
return NULL;
}
if (instr_is_label(instr)) {
if (instr_get_note(instr) == NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_BEGIN)) {
ASSERT(!aflags || has_sahf, "missing sahf");
return instr;
}
/* we do not expect any other label instr */
ASSERT(false, "drx aflags restore is corrupted");
return NULL;
#ifdef DEBUG
} else {
if (instr_get_note(instr) == NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_SAHF))
has_sahf = true;
#endif
}
}
return NULL;
}
static bool
counter_crosses_cache_line(byte *addr, size_t size)
{
size_t cache_line_size = proc_get_cache_line_size();
if (ALIGN_BACKWARD(addr, cache_line_size) ==
ALIGN_BACKWARD(addr + size - 1, cache_line_size))
return false;
return true;
}
DR_EXPORT
bool
drx_insert_counter_update(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot,
IF_NOT_X86_(dr_spill_slot_t slot2) void *addr, int value,
uint flags)
{
instr_t *instr;
bool use_drreg = false;
#ifdef X86
bool save_aflags = true;
#elif defined(AARCHXX)
bool save_regs = true;
reg_id_t reg1, reg2;
#endif
bool is_64 = TEST(DRX_COUNTER_64BIT, flags);
/* Requires drx_init(), where it didn't when first added. */
if (drx_init_count == 0) {
ASSERT(false, "drx_insert_counter_update requires drx_init");
return false;
}
if (drcontext == NULL) {
ASSERT(false, "drcontext cannot be NULL");
return false;
}
if (drmgr_current_bb_phase(drcontext) == DRMGR_PHASE_INSERTION) {
use_drreg = true;
if (drmgr_current_bb_phase(drcontext) == DRMGR_PHASE_INSERTION &&
slot != SPILL_SLOT_MAX + 1) {
ASSERT(false, "with drmgr, SPILL_SLOT_MAX+1 must be passed");
return false;
}
} else if (!(slot >= SPILL_SLOT_1 && slot <= SPILL_SLOT_MAX)) {
ASSERT(false, "wrong spill slot");
return false;
}
/* check whether we can add lock */
if (TEST(DRX_COUNTER_LOCK, flags)) {
#ifdef ARM
/* FIXME i#1551: implement for ARM */
ASSERT(false, "DRX_COUNTER_LOCK not implemented for ARM");
return false;
#endif
if (IF_NOT_X64(is_64 ||) /* 64-bit counter in 32-bit mode */
counter_crosses_cache_line((byte *)addr, is_64 ? 8 : 4))
return false;
}
#ifdef X86
if (use_drreg) {
if (drreg_reserve_aflags(drcontext, ilist, where) != DRREG_SUCCESS)
return false;
} else {
/* if save_aflags, check if we can merge with the prev aflags save */
save_aflags = !drx_aflags_are_dead(where);
if (save_aflags) {
instr = merge_prev_drx_spill(ilist, where, true /*aflags*/);
if (instr != NULL) {
save_aflags = false;
where = instr;
}
}
/* save aflags if necessary */
if (save_aflags) {
drx_save_arith_flags(drcontext, ilist, where, true /* save eax */,
true /* save oflag */, slot, DR_REG_NULL);
}
}
/* update counter */
instr = INSTR_CREATE_add(
drcontext,
OPND_CREATE_ABSMEM(addr, IF_X64_ELSE((is_64 ? OPSZ_8 : OPSZ_4), OPSZ_4)),
OPND_CREATE_INT_32OR8(value));
if (TEST(DRX_COUNTER_LOCK, flags))
instr = LOCK(instr);
MINSERT(ilist, where, instr);
# ifndef X64
if (is_64) {
MINSERT(ilist, where,
INSTR_CREATE_adc(
drcontext, OPND_CREATE_ABSMEM((void *)((ptr_int_t)addr + 4), OPSZ_4),
OPND_CREATE_INT32(0)));
}
# endif /* !X64 */
if (use_drreg) {
if (drreg_unreserve_aflags(drcontext, ilist, where) != DRREG_SUCCESS)
return false;
} else {
/* restore aflags if necessary */
if (save_aflags) {
drx_restore_arith_flags(drcontext, ilist, where, true /* restore eax */,
true /* restore oflag */, slot, DR_REG_NULL);
}
}
#elif defined(AARCHXX)
/* FIXME i#1551: implement 64-bit counter support */
ASSERT(!is_64, "DRX_COUNTER_64BIT is not implemented");
if (use_drreg) {
if (drreg_reserve_register(drcontext, ilist, where, NULL, ®1) !=
DRREG_SUCCESS ||
drreg_reserve_register(drcontext, ilist, where, NULL, ®2) != DRREG_SUCCESS)
return false;
} else {
reg1 = SCRATCH_REG0;
reg2 = SCRATCH_REG1;
/* merge w/ prior restore */
if (save_regs) {
instr = merge_prev_drx_spill(ilist, where, false /*!aflags*/);
if (instr != NULL) {
save_regs = false;
where = instr;
}
}
if (save_regs) {
dr_save_reg(drcontext, ilist, where, reg1, slot);
dr_save_reg(drcontext, ilist, where, reg2, slot2);
}
}
/* XXX: another optimization is to look for the prior increment's
* address being near this one, and add to reg1 instead of
* taking 2 instrs to load it fresh.
*/
instrlist_insert_mov_immed_ptrsz(drcontext, (ptr_int_t)addr, opnd_create_reg(reg1),
ilist, where, NULL, NULL);
MINSERT(
ilist, where,
XINST_CREATE_load(drcontext, opnd_create_reg(reg2), OPND_CREATE_MEMPTR(reg1, 0)));
MINSERT(ilist, where,
XINST_CREATE_add(drcontext, opnd_create_reg(reg2), OPND_CREATE_INT(value)));
MINSERT(ilist, where,
XINST_CREATE_store(drcontext, OPND_CREATE_MEMPTR(reg1, 0),
opnd_create_reg(reg2)));
if (use_drreg) {
if (drreg_unreserve_register(drcontext, ilist, where, reg1) != DRREG_SUCCESS ||
drreg_unreserve_register(drcontext, ilist, where, reg2) != DRREG_SUCCESS)
return false;
} else if (save_regs) {
ilist_insert_note_label(drcontext, ilist, where,
NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_BEGIN));
dr_restore_reg(drcontext, ilist, where, reg2, slot2);
dr_restore_reg(drcontext, ilist, where, reg1, slot);
ilist_insert_note_label(drcontext, ilist, where,
NOTE_VAL(DRX_NOTE_AFLAGS_RESTORE_END));
}
#endif
return true;
}
/***************************************************************************
* SOFT KILLS
*/
/* Track callbacks in a simple list protected by a lock */
typedef struct _cb_entry_t {
/* XXX: the bool return value is complex to support in some situations. We
* ignore the return value and always skip the app's termination of the
* child process for jobs containing multiple pids and for
* JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE. If we wanted to not skip those we'd
* have to emulate the kill via NtTerminateProcess, which doesn't seem worth
* it when our two use cases (DrMem and drcov) don't need that kind of
* control.
*/
bool (*cb)(process_id_t, int);
struct _cb_entry_t *next;
} cb_entry_t;
static cb_entry_t *cb_list;
static void *cb_lock;
static bool
soft_kills_invoke_cbs(process_id_t pid, int exit_code)
{
cb_entry_t *e;
bool skip = false;
NOTIFY(1, "--drx-- parent %d soft killing pid %d code %d\n", dr_get_process_id(), pid,
exit_code);
dr_mutex_lock(cb_lock);
for (e = cb_list; e != NULL; e = e->next) {
/* If anyone wants to skip, we skip */
skip = e->cb(pid, exit_code) || skip;
}
dr_mutex_unlock(cb_lock);
return skip;
}
#ifdef WINDOWS
/* The system calls we need to watch for soft kills.
* These are are in ntoskrnl so we get away without drsyscall.
*/
enum {
SYS_NUM_PARAMS_TerminateProcess = 2,
SYS_NUM_PARAMS_TerminateJobObject = 2,
SYS_NUM_PARAMS_SetInformationJobObject = 4,
SYS_NUM_PARAMS_Close = 1,
SYS_NUM_PARAMS_DuplicateObject = 7,
};
enum {
SYS_WOW64_IDX_TerminateProcess = 0,
SYS_WOW64_IDX_TerminateJobObject = 0,
SYS_WOW64_IDX_SetInformationJobObject = 7,
SYS_WOW64_IDX_Close = 0,
SYS_WOW64_IDX_DuplicateObject = 0,
};
static int sysnum_TerminateProcess;
static int sysnum_TerminateJobObject;
static int sysnum_SetInformationJobObject;
static int sysnum_Close;
static int sysnum_DuplicateObject;
/* Table of job handles for which the app set JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE */
# define JOB_TABLE_HASH_BITS 6
static hashtable_t job_table;
/* Entry in job_table. If it is present in the table, it should only be
* accessed while holding the table lock.
*/
typedef struct _job_info_t {
/* So far just a reference count. We don't need store a duplicated handle
* b/c we always have a valid app handle for this job.
*/
uint ref_count;
} job_info_t;
/* We need CLS as we track data across syscalls, where TLS is not sufficient */
static int cls_idx_soft;
typedef struct _cls_soft_t {
/* For NtSetInformationJobObject */
DWORD job_limit_flags_orig;
DWORD *job_limit_flags_loc;
/* For NtDuplicateObject */
bool dup_proc_src_us;
bool dup_proc_dst_us;
ULONG dup_options;
HANDLE dup_src;
HANDLE *dup_dst;
job_info_t *dup_jinfo;
/* If we add data for more syscalls, we could use a union to save space */
} cls_soft_t;
/* XXX: should we have some kind of shared wininc/ dir for these common defines?
* I don't really want to include core/win32/ntdll.h here.
*/
typedef LONG NTSTATUS;
# define NT_SUCCESS(status) (((NTSTATUS)(status)) >= 0)
/* Since we invoke only in a client/privlib context, we can statically link
* with ntdll to call these syscall wrappers:
*/
# define GET_NTDLL(NtFunction, signature) NTSYSAPI NTSTATUS NTAPI NtFunction signature
GET_NTDLL(NtQueryInformationJobObject,
(IN HANDLE JobHandle, IN JOBOBJECTINFOCLASS JobInformationClass,
OUT PVOID JobInformation, IN ULONG JobInformationLength,
OUT PULONG ReturnLength OPTIONAL));
# define STATUS_BUFFER_OVERFLOW ((NTSTATUS)0x80000005L)
# define NT_CURRENT_PROCESS ((HANDLE)(ptr_int_t)-1)
typedef LONG KPRIORITY;
typedef enum _PROCESSINFOCLASS {
ProcessBasicInformation,
} PROCESSINFOCLASS;
typedef struct _PROCESS_BASIC_INFORMATION {
NTSTATUS ExitStatus;
void *PebBaseAddress;
ULONG_PTR AffinityMask;
KPRIORITY BasePriority;
ULONG_PTR UniqueProcessId;
ULONG_PTR InheritedFromUniqueProcessId;
} PROCESS_BASIC_INFORMATION;
typedef PROCESS_BASIC_INFORMATION *PPROCESS_BASIC_INFORMATION;
GET_NTDLL(NtQueryInformationProcess,
(IN HANDLE ProcessHandle, IN PROCESSINFOCLASS ProcessInformationClass,
OUT PVOID ProcessInformation, IN ULONG ProcessInformationLength,
OUT PULONG ReturnLength OPTIONAL));
GET_NTDLL(NtTerminateProcess, (IN HANDLE ProcessHandle, IN NTSTATUS ExitStatus));
static ssize_t
num_job_object_pids(HANDLE job)
{
/* i#1401: despite what Nebbett says and MSDN hints at, on Win7 at least
* JobObjectBasicProcessIdList returning STATUS_BUFFER_OVERFLOW does NOT
* fill in any data at all. We thus have to query through a different
* mechanism.
*/
JOBOBJECT_BASIC_ACCOUNTING_INFORMATION info;
NTSTATUS res;
DWORD len;
res = NtQueryInformationJobObject(job, JobObjectBasicAccountingInformation, &info,
sizeof(info), &len);
NOTIFY(1, "--drx-- job 0x%x => %d pids len=%d res=0x%08x\n", job,
info.ActiveProcesses, len, res);
if (NT_SUCCESS(res))
return info.ActiveProcesses;
else
return -1;
}
static bool
get_job_object_pids(HANDLE job, JOBOBJECT_BASIC_PROCESS_ID_LIST *list, size_t list_sz)
{
NTSTATUS res;
res = NtQueryInformationJobObject(job, JobObjectBasicProcessIdList, list,
(ULONG)list_sz, NULL);
return NT_SUCCESS(res);
}
/* XXX: should DR provide a routine to query this? */
static bool
get_app_exit_code(int *exit_code)
{
ULONG got;
PROCESS_BASIC_INFORMATION info;
NTSTATUS res;
memset(&info, 0, sizeof(PROCESS_BASIC_INFORMATION));
res = NtQueryInformationProcess(NT_CURRENT_PROCESS, ProcessBasicInformation, &info,
sizeof(PROCESS_BASIC_INFORMATION), &got);
if (!NT_SUCCESS(res) || got != sizeof(PROCESS_BASIC_INFORMATION))
return false;
*exit_code = (int)info.ExitStatus;
return true;
}
static void
soft_kills_context_init(void *drcontext, bool new_depth)
{
cls_soft_t *cls;
if (new_depth) {
cls = (cls_soft_t *)dr_thread_alloc(drcontext, sizeof(*cls));
drmgr_set_cls_field(drcontext, cls_idx_soft, cls);
} else {
cls = (cls_soft_t *)drmgr_get_cls_field(drcontext, cls_idx_soft);
}
memset(cls, 0, sizeof(*cls));
}
static void
soft_kills_context_exit(void *drcontext, bool thread_exit)
{
if (thread_exit) {
cls_soft_t *cls = (cls_soft_t *)drmgr_get_cls_field(drcontext, cls_idx_soft);
dr_thread_free(drcontext, cls, sizeof(*cls));
}
/* else, nothing to do: we leave the struct for re-use on next callback */
}
static int
soft_kills_get_sysnum(const char *name, int num_params, int wow64_idx)
{
static module_handle_t ntdll;
app_pc wrapper;
int sysnum;
if (ntdll == NULL) {
module_data_t *data = dr_lookup_module_by_name("ntdll.dll");
if (data == NULL)
return -1;
ntdll = data->handle;
dr_free_module_data(data);
}
wrapper = (app_pc)dr_get_proc_address(ntdll, name);
if (wrapper == NULL)
return -1;
sysnum = drmgr_decode_sysnum_from_wrapper(wrapper);
if (sysnum == -1)
return -1;
/* Ensure that DR intercepts these if we go native.
* XXX: better to only do this if client plans to use native execution
* to reduce the hook count and shrink chance of hook conflicts?
*/
if (!dr_syscall_intercept_natively(name, sysnum, num_params, wow64_idx))
return -1;
return sysnum;
}
static void
soft_kills_handle_job_termination(void *drcontext, HANDLE job, int exit_code)
{
ssize_t num_jobs = num_job_object_pids(job);
NOTIFY(1, "--drx-- for job 0x%x got %d jobs\n", job, num_jobs);
if (num_jobs > 0) {
JOBOBJECT_BASIC_PROCESS_ID_LIST *list;
size_t sz = sizeof(*list) + (num_jobs - 1) * sizeof(list->ProcessIdList[0]);
byte *buf = dr_thread_alloc(drcontext, sz);
list = (JOBOBJECT_BASIC_PROCESS_ID_LIST *)buf;
if (get_job_object_pids(job, list, sz)) {
uint i;
NOTIFY(1, "--drx-- for job 0x%x got %d jobs in list\n", job,
list->NumberOfProcessIdsInList);
for (i = 0; i < list->NumberOfProcessIdsInList; i++) {
process_id_t pid = list->ProcessIdList[i];
if (!soft_kills_invoke_cbs(pid, exit_code)) {
/* Client is not terminating and requests not to skip the action.
* But since we have multiple pids, we go with a local decision
* here and emulate the kill.
*/
HANDLE phandle = dr_convert_pid_to_handle(pid);
if (phandle != INVALID_HANDLE_VALUE)
NtTerminateProcess(phandle, exit_code);
/* else, child stays alive: not much we can do */
}
}
}
dr_thread_free(drcontext, buf, sz);
} /* else query failed: I'd issue a warning log msg if not inside drx library */
}
static void
soft_kills_free_job_info(void *ptr)
{
job_info_t *jinfo = (job_info_t *)ptr;
if (jinfo->ref_count == 0)
dr_global_free(jinfo, sizeof(*jinfo));
}
/* Called when the app closes a job handle "job".
* Caller must hold job_table lock.
* If "remove" is true, removes from the hashtable and de-allocates "jinfo",
* if refcount is 0.
*/
static void
soft_kills_handle_close(void *drcontext, job_info_t *jinfo, HANDLE job, int exit_code,
bool remove)
{
ASSERT(jinfo->ref_count > 0, "invalid ref count");
jinfo->ref_count--;
if (jinfo->ref_count == 0) {
NOTIFY(1, "--drx-- closing kill-on-close handle 0x%x in pid %d\n", job,
dr_get_process_id());
/* XXX: It's possible for us to miss a handle being closed from another
* process. In such a case, our ref count won't reach 0 and we'll
* fail to kill the child at all.
* If that handle value is re-used as a job object (else our job queryies
* will get STATUS_OBJECT_TYPE_MISMATCH) with no kill-on-close, we could
* incorrectly kill a job when the app is just closing its handle, but
* this would only happen when a job is being controlled from multiple
* processes. We'll have to live with the risk. We could watch
* NtCreateJobObject but it doesn't seem worth it.
*/
soft_kills_handle_job_termination(drcontext, job, exit_code);
}
if (remove)
hashtable_remove(&job_table, (void *)job);
}
static bool
soft_kills_filter_syscall(void *drcontext, int sysnum)
{
return (sysnum == sysnum_TerminateProcess || sysnum == sysnum_TerminateJobObject ||
sysnum == sysnum_SetInformationJobObject || sysnum == sysnum_Close ||
sysnum == sysnum_DuplicateObject);
}
static bool
soft_kills_pre_SetInformationJobObject(void *drcontext, cls_soft_t *cls)
{
HANDLE job = (HANDLE)dr_syscall_get_param(drcontext, 0);
JOBOBJECTINFOCLASS class = (JOBOBJECTINFOCLASS)dr_syscall_get_param(drcontext, 1);
ULONG sz = (ULONG)dr_syscall_get_param(drcontext, 3);
/* MSDN claims that JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE requires an
* extended info struct, which we trust, though it seems odd as it's
* a flag in the basic struct.
*/
JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
if (class == JobObjectExtendedLimitInformation && sz >= sizeof(info) &&
dr_safe_read((byte *)dr_syscall_get_param(drcontext, 2), sizeof(info), &info,
NULL)) {
if (TEST(JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE,
info.BasicLimitInformation.LimitFlags)) {
/* Remove the kill-on-close flag from the syscall arg.
* We restore in post-syscall in case app uses the memory
* for something else. There is of course a race where another
* thread could use it and get the wrong value: -soft_kills isn't
* perfect.
*/
JOBOBJECT_EXTENDED_LIMIT_INFORMATION *ptr =
(JOBOBJECT_EXTENDED_LIMIT_INFORMATION *)dr_syscall_get_param(drcontext,
2);
ULONG new_flags = info.BasicLimitInformation.LimitFlags &
(~JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE);
bool isnew;
job_info_t *jinfo;
cls->job_limit_flags_orig = info.BasicLimitInformation.LimitFlags;
cls->job_limit_flags_loc = &ptr->BasicLimitInformation.LimitFlags;
ASSERT(sizeof(cls->job_limit_flags_orig) ==
sizeof(ptr->BasicLimitInformation.LimitFlags),
"size mismatch");
if (!dr_safe_write(cls->job_limit_flags_loc,
sizeof(ptr->BasicLimitInformation.LimitFlags), &new_flags,
NULL)) {
/* XXX: Any way we can send a WARNING on our failure to write? */
NOTIFY(1,
"--drx-- FAILED to remove kill-on-close from job 0x%x "
"in pid %d\n",
job, dr_get_process_id());
} else {
NOTIFY(1, "--drx-- removed kill-on-close from job 0x%x in pid %d\n", job,
dr_get_process_id());
}
/* Track the handle so we can notify the client on close or exit */
hashtable_lock(&job_table);
/* See if already there (in case app called Set 2x) */
if (hashtable_lookup(&job_table, (void *)job) == NULL) {
jinfo = (job_info_t *)dr_global_alloc(sizeof(*jinfo));
jinfo->ref_count = 1;
isnew = hashtable_add(&job_table, (void *)job, (void *)jinfo);
ASSERT(isnew, "missed an NtClose");
}
hashtable_unlock(&job_table);
}
}
return true;
}
/* We must do two things on NtDuplicateObject:
* 1) Update our job table: adding a new entry for the duplicate,
* and removing the source handle if it is closed.
* 2) Process a handle being closed but a new one not being
* created (in this process): corner case that triggers a kill.
*/
static bool
soft_kills_pre_DuplicateObject(void *drcontext, cls_soft_t *cls)
{
HANDLE proc_src = (HANDLE)dr_syscall_get_param(drcontext, 0);
process_id_t id_src = dr_convert_handle_to_pid(proc_src);
cls->dup_proc_src_us = (id_src == dr_get_process_id());
cls->dup_jinfo = NULL;
if (cls->dup_proc_src_us) {
/* NtDuplicateObject seems more likely than NtClose to fail, so we
* shift as much handling as possible post-syscall.
*/
HANDLE proc_dst = (HANDLE)dr_syscall_get_param(drcontext, 2);
process_id_t id_dst = dr_convert_handle_to_pid(proc_dst);
cls->dup_proc_dst_us = (id_dst == dr_get_process_id());
cls->dup_src = (HANDLE)dr_syscall_get_param(drcontext, 1);
cls->dup_dst = (HANDLE *)dr_syscall_get_param(drcontext, 3);
cls->dup_options = (ULONG)dr_syscall_get_param(drcontext, 6);
hashtable_lock(&job_table);
/* We have to save jinfo b/c dup_src will be gone */
cls->dup_jinfo = (job_info_t *)hashtable_lookup(&job_table, (void *)cls->dup_src);
if (cls->dup_jinfo != NULL) {
if (TEST(DUPLICATE_CLOSE_SOURCE, cls->dup_options)) {
/* "This occurs regardless of any error status returned"
* according to MSDN DuplicateHandle, and Nebbett.
* Thus, we act on this here, which avoids any handle value
* reuse race, and we don't have to undo in post.
* If this weren't true, we'd have to reinstate in the table
* on failure, and we'd have to duplicate the handle
* (dr_dup_file_handle would do it -- at least w/ current impl)
* to call soft_kills_handle_close() in post.
*/
if (!cls->dup_proc_dst_us) {
NOTIFY(1, "--drx-- job 0x%x closed in pid %d w/ dst outside proc\n",
cls->dup_src, dr_get_process_id());
/* The exit code is set to 0 by the kernel for this case */
soft_kills_handle_close(drcontext, cls->dup_jinfo, cls->dup_src, 0,
true /*remove*/);
} else {
hashtable_remove(&job_table, (void *)cls->dup_src);
/* Adjust refcount after removing to avoid freeing prematurely.
* The refcount may be sitting at 0, but no other thread should
* be able to affect it as there is no hashtable entry.
*/
ASSERT(cls->dup_jinfo->ref_count > 0, "invalid ref count");
cls->dup_jinfo->ref_count--;
}
}
}
hashtable_unlock(&job_table);
}
return true;
}
static void
soft_kills_post_DuplicateObject(void *drcontext)
{
cls_soft_t *cls = (cls_soft_t *)drmgr_get_cls_field(drcontext, cls_idx_soft);
HANDLE dup_dst;
if (cls->dup_jinfo == NULL)
return;
if (!NT_SUCCESS(dr_syscall_get_result(drcontext)))
return;
ASSERT(cls->dup_proc_src_us, "shouldn't get here");
if (!cls->dup_proc_dst_us)
return; /* already handled in pre */
/* At this point we have a successful intra-process duplication. If
* DUPLICATE_CLOSE_SOURCE, we already removed from the table in pre.
*/
hashtable_lock(&job_table);
if (cls->dup_dst != NULL &&
dr_safe_read(cls->dup_dst, sizeof(dup_dst), &dup_dst, NULL)) {
NOTIFY(1, "--drx-- job 0x%x duplicated as 0x%x in pid %d\n", cls->dup_src,
dup_dst, dr_get_process_id());
cls->dup_jinfo->ref_count++;
hashtable_add(&job_table, (void *)dup_dst, (void *)cls->dup_jinfo);
}
hashtable_unlock(&job_table);
}
/* Returns whether to execute the system call */
static bool
soft_kills_pre_syscall(void *drcontext, int sysnum)
{
cls_soft_t *cls = (cls_soft_t *)drmgr_get_cls_field(drcontext, cls_idx_soft);
/* Xref DrMem i#544, DrMem i#1297, and DRi#1231: give child
* processes a chance for clean exit for dumping of data or other
* actions.
*
* XXX: a child under DR but not a supporting client will be left
* alive: but that's a risk we can live with.
*/
if (sysnum == sysnum_TerminateProcess) {
HANDLE proc = (HANDLE)dr_syscall_get_param(drcontext, 0);
process_id_t pid = dr_convert_handle_to_pid(proc);
if (pid != INVALID_PROCESS_ID && pid != dr_get_process_id()) {
int exit_code = (int)dr_syscall_get_param(drcontext, 1);
NOTIFY(1, "--drx-- NtTerminateProcess in pid %d\n", dr_get_process_id());
if (soft_kills_invoke_cbs(pid, exit_code)) {
dr_syscall_set_result(drcontext, 0 /*success*/);
return false; /* skip syscall */
} else
return true; /* execute syscall */
}
} else if (sysnum == sysnum_TerminateJobObject) {
/* There are several ways a process in a job can be killed:
*
* 1) NtTerminateJobObject
* 2) The last handle is closed + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE is set
* 3) JOB_OBJECT_LIMIT_ACTIVE_PROCESS is hit
* 4) Time limit and JOB_OBJECT_TERMINATE_AT_END_OF_JOB is hit
*
* XXX: we only handle #1 and #2.
*/
HANDLE job = (HANDLE)dr_syscall_get_param(drcontext, 0);
NTSTATUS exit_code = (NTSTATUS)dr_syscall_get_param(drcontext, 1);
NOTIFY(1, "--drx-- NtTerminateJobObject job 0x%x in pid %d\n", job,
dr_get_process_id());
soft_kills_handle_job_termination(drcontext, job, exit_code);
/* We always skip this syscall. If individual processes were requested
* to not be skipped, we emulated via NtTerminateProcess in
* soft_kills_handle_job_termination().
*/
dr_syscall_set_result(drcontext, 0 /*success*/);
return false; /* skip syscall */
} else if (sysnum == sysnum_SetInformationJobObject) {
return soft_kills_pre_SetInformationJobObject(drcontext, cls);
} else if (sysnum == sysnum_Close) {
/* If a job object, act on it, and remove from our table */
HANDLE handle = (HANDLE)dr_syscall_get_param(drcontext, 0);
job_info_t *jinfo;
hashtable_lock(&job_table);
jinfo = (job_info_t *)hashtable_lookup(&job_table, (void *)handle);
if (jinfo != NULL) {
NOTIFY(1, "--drx-- explicit close of job 0x%x in pid %d\n", handle,
dr_get_process_id());
/* The exit code is set to 0 by the kernel for this case */
soft_kills_handle_close(drcontext, jinfo, handle, 0, true /*remove*/);
}
hashtable_unlock(&job_table);
} else if (sysnum == sysnum_DuplicateObject) {
return soft_kills_pre_DuplicateObject(drcontext, cls);
}
return true;
}
static void
soft_kills_post_syscall(void *drcontext, int sysnum)
{
if (sysnum == sysnum_SetInformationJobObject) {
cls_soft_t *cls = (cls_soft_t *)drmgr_get_cls_field(drcontext, cls_idx_soft);
if (cls->job_limit_flags_loc != NULL) {
/* Restore the app's memory */
if (!dr_safe_write(cls->job_limit_flags_loc,
sizeof(cls->job_limit_flags_orig),
&cls->job_limit_flags_orig, NULL)) {
/* If we weren't in drx lib I'd log a warning */
}
cls->job_limit_flags_loc = NULL;
}
} else if (sysnum == sysnum_DuplicateObject) {
soft_kills_post_DuplicateObject(drcontext);
}
}
#else /* WINDOWS */
static bool
soft_kills_filter_syscall(void *drcontext, int sysnum)
{
return (sysnum == SYS_kill);
}
/* Returns whether to execute the system call */
static bool
soft_kills_pre_syscall(void *drcontext, int sysnum)
{
if (sysnum == SYS_kill) {
process_id_t pid = (process_id_t)dr_syscall_get_param(drcontext, 0);
int sig = (int)dr_syscall_get_param(drcontext, 1);
if (sig == SIGKILL && pid != INVALID_PROCESS_ID && pid != dr_get_process_id()) {
/* Pass exit code << 8 for use with dr_exit_process() */
int exit_code = sig << 8;
if (soft_kills_invoke_cbs(pid, exit_code)) {
/* set result to 0 (success) and use_high and use_errno to false */
dr_syscall_result_info_t info = {
sizeof(info),
};
info.succeeded = true;
dr_syscall_set_result_ex(drcontext, &info);
return false; /* skip syscall */
} else
return true; /* execute syscall */
}
}
return true;
}
static void
soft_kills_post_syscall(void *drcontext, int sysnum)
{
/* nothing yet */
}
#endif /* UNIX */
static bool
soft_kills_init(void)
{
#ifdef WINDOWS
IF_DEBUG(bool ok;)
#endif
/* XXX: would be nice to fail if it's not still process init,
* but we don't have an easy way to check.
*/
soft_kills_enabled = true;
NOTIFY(1, "--drx-- init pid %d %s\n", dr_get_process_id(), dr_get_application_name());
cb_lock = dr_mutex_create();
#ifdef WINDOWS
hashtable_init_ex(&job_table, JOB_TABLE_HASH_BITS, HASH_INTPTR, false /*!strdup*/,
false /*!synch*/, soft_kills_free_job_info, NULL, NULL);
sysnum_TerminateProcess =
soft_kills_get_sysnum("NtTerminateProcess", SYS_NUM_PARAMS_TerminateProcess,
SYS_WOW64_IDX_TerminateProcess);
if (sysnum_TerminateProcess == -1)
return false;
sysnum_TerminateJobObject =
soft_kills_get_sysnum("NtTerminateJobObject", SYS_NUM_PARAMS_TerminateJobObject,
SYS_WOW64_IDX_TerminateJobObject);
if (sysnum_TerminateJobObject == -1)
return false;
sysnum_SetInformationJobObject = soft_kills_get_sysnum(
"NtSetInformationJobObject", SYS_NUM_PARAMS_SetInformationJobObject,
SYS_WOW64_IDX_SetInformationJobObject);
if (sysnum_SetInformationJobObject == -1)
return false;
sysnum_Close =
soft_kills_get_sysnum("NtClose", SYS_NUM_PARAMS_Close, SYS_WOW64_IDX_Close);
if (sysnum_Close == -1)
return false;
sysnum_DuplicateObject =
soft_kills_get_sysnum("NtDuplicateObject", SYS_NUM_PARAMS_DuplicateObject,
SYS_WOW64_IDX_DuplicateObject);
if (sysnum_DuplicateObject == -1)
return false;
cls_idx_soft =
drmgr_register_cls_field(soft_kills_context_init, soft_kills_context_exit);
if (cls_idx_soft == -1)
return false;
/* Ensure that DR intercepts these when we're native */
IF_DEBUG(ok =)
dr_syscall_intercept_natively("NtTerminateProcess", sysnum_TerminateProcess,
SYS_NUM_PARAMS_TerminateProcess,
SYS_WOW64_IDX_TerminateProcess);
ASSERT(ok, "failure to watch syscall while native");
IF_DEBUG(ok =)
dr_syscall_intercept_natively("NtTerminateJobObject", sysnum_TerminateJobObject,
SYS_NUM_PARAMS_TerminateJobObject,
SYS_WOW64_IDX_TerminateJobObject);
ASSERT(ok, "failure to watch syscall while native");
IF_DEBUG(ok =)
dr_syscall_intercept_natively(
"NtSetInformationJobObject", sysnum_SetInformationJobObject,
SYS_NUM_PARAMS_SetInformationJobObject, SYS_WOW64_IDX_SetInformationJobObject);
ASSERT(ok, "failure to watch syscall while native");
IF_DEBUG(ok =)
dr_syscall_intercept_natively("NtClose", sysnum_Close, SYS_NUM_PARAMS_Close,
SYS_WOW64_IDX_Close);
ASSERT(ok, "failure to watch syscall while native");
IF_DEBUG(ok =)
dr_syscall_intercept_natively("NtDuplicateObject", sysnum_DuplicateObject,
SYS_NUM_PARAMS_DuplicateObject,
SYS_WOW64_IDX_DuplicateObject);
ASSERT(ok, "failure to watch syscall while native");
#endif
if (!drmgr_register_pre_syscall_event(soft_kills_pre_syscall) ||
!drmgr_register_post_syscall_event(soft_kills_post_syscall))
return false;
dr_register_filter_syscall_event(soft_kills_filter_syscall);
return true;
}
static void
soft_kills_exit(void)
{
cb_entry_t *e;
#ifdef WINDOWS
/* Any open job handles will be closed, triggering
* JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
*/
uint i;
/* The exit code used is the exit code for this process */
int exit_code;
if (!get_app_exit_code(&exit_code))
exit_code = 0;
hashtable_lock(&job_table);
for (i = 0; i < HASHTABLE_SIZE(job_table.table_bits); i++) {
hash_entry_t *he;
for (he = job_table.table[i]; he != NULL; he = he->next) {
HANDLE job = (HANDLE)he->key;
job_info_t *jinfo = (job_info_t *)he->payload;
NOTIFY(1, "--drx-- implicit close of job 0x%x in pid %d\n", job,
dr_get_process_id());
soft_kills_handle_close(dr_get_current_drcontext(), jinfo, job, exit_code,
false /*do not remove*/);
}
}
hashtable_unlock(&job_table);
hashtable_delete(&job_table);
drmgr_unregister_cls_field(soft_kills_context_init, soft_kills_context_exit,
cls_idx_soft);
#endif
dr_mutex_lock(cb_lock);
while (cb_list != NULL) {
e = cb_list;
cb_list = e->next;
dr_global_free(e, sizeof(*e));
}
dr_mutex_unlock(cb_lock);
dr_mutex_destroy(cb_lock);
}
bool
drx_register_soft_kills(bool (*event_cb)(process_id_t pid, int exit_code))
{
/* We split our init from drx_init() to avoid extra work when nobody
* requests this feature.
*/
static int soft_kills_init_count;
cb_entry_t *e;
int count = dr_atomic_add32_return_sum(&soft_kills_init_count, 1);
if (count == 1) {
soft_kills_init();
}
e = dr_global_alloc(sizeof(*e));
e->cb = event_cb;
dr_mutex_lock(cb_lock);
e->next = cb_list;
cb_list = e;
dr_mutex_unlock(cb_lock);
return true;
}
/***************************************************************************
* INSTRUCTION LIST
*/
DR_EXPORT
size_t
drx_instrlist_size(instrlist_t *ilist)
{
instr_t *instr;
size_t size = 0;
for (instr = instrlist_first(ilist); instr != NULL; instr = instr_get_next(instr))
size++;
return size;
}
DR_EXPORT
size_t
drx_instrlist_app_size(instrlist_t *ilist)
{
instr_t *instr;
size_t size = 0;
for (instr = instrlist_first_app(ilist); instr != NULL;
instr = instr_get_next_app(instr))
size++;
return size;
}
/***************************************************************************
* LOGGING
*/
#ifdef WINDOWS
# define DIRSEP '\\'
#else
# define DIRSEP '/'
#endif
file_t
drx_open_unique_file(const char *dir, const char *prefix, const char *suffix,
uint extra_flags, char *result OUT, size_t result_len)
{
char buf[MAXIMUM_PATH];
file_t f = INVALID_FILE;
int i;
ssize_t len;
for (i = 0; i < 10000; i++) {
len = dr_snprintf(
buf, BUFFER_SIZE_ELEMENTS(buf), "%s%c%s.%04d.%s", dir, DIRSEP, prefix,
(extra_flags == DRX_FILE_SKIP_OPEN) ? dr_get_random_value(9999) : i, suffix);
if (len < 0)
return INVALID_FILE;
NULL_TERMINATE_BUFFER(buf);
if (extra_flags != DRX_FILE_SKIP_OPEN)
f = dr_open_file(buf, DR_FILE_WRITE_REQUIRE_NEW | extra_flags);
if (f != INVALID_FILE || extra_flags == DRX_FILE_SKIP_OPEN) {
if (result != NULL)
dr_snprintf(result, result_len, "%s", buf);
return f;
}
}
return INVALID_FILE;
}
file_t
drx_open_unique_appid_file(const char *dir, ptr_int_t id, const char *prefix,
const char *suffix, uint extra_flags, char *result OUT,
size_t result_len)
{
int len;
char appid[MAXIMUM_PATH];
const char *app_name = dr_get_application_name();
if (app_name == NULL)
app_name = "<unknown-app>";
len = dr_snprintf(appid, BUFFER_SIZE_ELEMENTS(appid), "%s.%s.%05d", prefix, app_name,
id);
if (len < 0 || (size_t)len >= BUFFER_SIZE_ELEMENTS(appid))
return INVALID_FILE;
NULL_TERMINATE_BUFFER(appid);
return drx_open_unique_file(dir, appid, suffix, extra_flags, result, result_len);
}
bool
drx_open_unique_appid_dir(const char *dir, ptr_int_t id, const char *prefix,
const char *suffix, char *result OUT, size_t result_len)
{
char buf[MAXIMUM_PATH];
int i;
ssize_t len;
for (i = 0; i < 10000; i++) {
const char *app_name = dr_get_application_name();
if (app_name == NULL)
app_name = "<unknown-app>";
len = dr_snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s%c%s.%s.%05d.%04d.%s", dir,
DIRSEP, prefix, app_name, id, i, suffix);
if (len < 0 || (size_t)len >= BUFFER_SIZE_ELEMENTS(buf))
return false;
NULL_TERMINATE_BUFFER(buf);
if (dr_create_dir(buf)) {
if (result != NULL)
dr_snprintf(result, result_len, "%s", buf);
return true;
}
}
return false;
}
bool
drx_tail_pad_block(void *drcontext, instrlist_t *ilist)
{
instr_t *last = instrlist_last_app(ilist);
if (instr_is_cti(last) || instr_is_syscall(last)) {
/* This basic block is already branch or syscall-terminated */
return false;
}
instrlist_meta_postinsert(ilist, last, INSTR_CREATE_label(drcontext));
return true;
}
/***************************************************************************
* drx_expand_scatter_gather() related auxiliary functions and structures.
*/
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
typedef struct _scatter_gather_info_t {
bool is_evex;
bool is_load;
opnd_size_t scalar_index_size;
opnd_size_t scalar_value_size;
opnd_size_t scatter_gather_size;
reg_id_t mask_reg;
reg_id_t base_reg;
reg_id_t index_reg;
union {
reg_id_t gather_dst_reg;
reg_id_t scatter_src_reg;
};
int disp;
int scale;
} scatter_gather_info_t;
static void
get_scatter_gather_info(instr_t *instr, scatter_gather_info_t *sg_info)
{
/* We detect whether the instruction is EVEX by looking at its potential mask
* operand.
*/
opnd_t dst0 = instr_get_dst(instr, 0);
opnd_t src0 = instr_get_src(instr, 0);
opnd_t src1 = instr_get_src(instr, 1);
sg_info->is_evex = opnd_is_reg(src0) && reg_is_opmask(opnd_get_reg(src0));
sg_info->mask_reg = sg_info->is_evex ? opnd_get_reg(src0) : opnd_get_reg(src1);
ASSERT(!sg_info->is_evex ||
(opnd_get_reg(instr_get_dst(instr, 1)) == opnd_get_reg(src0)),
"Invalid gather instruction.");
int opc = instr_get_opcode(instr);
opnd_t memopnd;
switch (opc) {
case OP_vgatherdpd:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = true;
break;
case OP_vgatherqpd:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = true;
break;
case OP_vgatherdps:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = true;
break;
case OP_vgatherqps:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = true;
break;
case OP_vpgatherdd:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = true;
break;
case OP_vpgatherqd:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = true;
break;
case OP_vpgatherdq:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = true;
break;
case OP_vpgatherqq:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = true;
break;
case OP_vscatterdpd:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = false;
break;
case OP_vscatterqpd:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = false;
break;
case OP_vscatterdps:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = false;
break;
case OP_vscatterqps:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = false;
break;
case OP_vpscatterdd:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = false;
break;
case OP_vpscatterqd:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_4;
sg_info->is_load = false;
break;
case OP_vpscatterdq:
sg_info->scalar_index_size = OPSZ_4;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = false;
break;
case OP_vpscatterqq:
sg_info->scalar_index_size = OPSZ_8;
sg_info->scalar_value_size = OPSZ_8;
sg_info->is_load = false;
break;
default:
ASSERT(false, "Incorrect opcode.");
memopnd = opnd_create_null();
break;
}
if (sg_info->is_load) {
sg_info->scatter_gather_size = opnd_get_size(dst0);
sg_info->gather_dst_reg = opnd_get_reg(dst0);
memopnd = sg_info->is_evex ? src1 : src0;
} else {
sg_info->scatter_gather_size = opnd_get_size(src1);
sg_info->scatter_src_reg = opnd_get_reg(src1);
memopnd = dst0;
}
sg_info->index_reg = opnd_get_index(memopnd);
sg_info->base_reg = opnd_get_base(memopnd);
sg_info->disp = opnd_get_disp(memopnd);
sg_info->scale = opnd_get_scale(memopnd);
}
static bool
expand_gather_insert_scalar(void *drcontext, instrlist_t *bb, instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info, reg_id_t simd_reg,
reg_id_t scalar_reg, reg_id_t scratch_xmm, bool is_avx512,
app_pc orig_app_pc)
{
/* Used by both AVX2 and AVX-512. */
ASSERT(instr_is_gather(sg_instr), "Internal error: only gather instructions.");
reg_id_t simd_reg_zmm = reg_resize_to_opsz(simd_reg, OPSZ_64);
reg_id_t simd_reg_ymm = reg_resize_to_opsz(simd_reg, OPSZ_32);
uint scalar_value_bytes = opnd_size_in_bytes(sg_info->scalar_value_size);
int scalarxmmimm = el * scalar_value_bytes / XMM_REG_SIZE;
if (is_avx512) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vextracti32x4_mask(
drcontext, opnd_create_reg(scratch_xmm),
opnd_create_reg(DR_REG_K0),
opnd_create_immed_int(scalarxmmimm, OPSZ_1),
opnd_create_reg(simd_reg_zmm)),
orig_app_pc));
} else {
PREXL8(bb, sg_instr,
INSTR_XL8(
INSTR_CREATE_vextracti128(drcontext, opnd_create_reg(scratch_xmm),
opnd_create_reg(simd_reg_ymm),
opnd_create_immed_int(scalarxmmimm, OPSZ_1)),
orig_app_pc));
}
if (sg_info->scalar_value_size == OPSZ_4) {
PREXL8(
bb, sg_instr,
INSTR_XL8(
INSTR_CREATE_vpinsrd(
drcontext, opnd_create_reg(scratch_xmm), opnd_create_reg(scratch_xmm),
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(scalar_reg), scalar_reg)),
opnd_create_immed_int((el * scalar_value_bytes) % XMM_REG_SIZE /
opnd_size_in_bytes(OPSZ_4),
OPSZ_1)),
orig_app_pc));
} else if (sg_info->scalar_value_size == OPSZ_8) {
ASSERT(reg_is_64bit(scalar_reg),
"The qword index versions are unsupported in 32-bit mode.");
PREXL8(
bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vpinsrq(
drcontext, opnd_create_reg(scratch_xmm),
opnd_create_reg(scratch_xmm), opnd_create_reg(scalar_reg),
opnd_create_immed_int((el * scalar_value_bytes) % XMM_REG_SIZE /
opnd_size_in_bytes(OPSZ_8),
OPSZ_1)),
orig_app_pc));
} else {
ASSERT(false, "Unexpected index size.");
}
if (is_avx512) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vinserti32x4_mask(
drcontext, opnd_create_reg(simd_reg_zmm),
opnd_create_reg(DR_REG_K0),
opnd_create_immed_int(scalarxmmimm, OPSZ_1),
opnd_create_reg(simd_reg_zmm), opnd_create_reg(scratch_xmm)),
orig_app_pc));
} else {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vinserti128(
drcontext, opnd_create_reg(simd_reg_ymm),
opnd_create_reg(simd_reg_ymm), opnd_create_reg(scratch_xmm),
opnd_create_immed_int(scalarxmmimm, OPSZ_1)),
orig_app_pc));
}
return true;
}
static bool
expand_avx512_gather_insert_scalar_value(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scalar_value_reg, reg_id_t scratch_xmm,
app_pc orig_app_pc)
{
return expand_gather_insert_scalar(drcontext, bb, sg_instr, el, sg_info,
sg_info->gather_dst_reg, scalar_value_reg,
scratch_xmm, true /* AVX-512 */, orig_app_pc);
}
static bool
expand_avx2_gather_insert_scalar_value(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scalar_value_reg, reg_id_t scratch_xmm,
app_pc orig_app_pc)
{
return expand_gather_insert_scalar(drcontext, bb, sg_instr, el, sg_info,
sg_info->gather_dst_reg, scalar_value_reg,
scratch_xmm, false /* AVX2 */, orig_app_pc);
}
static bool
expand_avx2_gather_insert_scalar_mask(void *drcontext, instrlist_t *bb, instr_t *sg_instr,
int el, scatter_gather_info_t *sg_info,
reg_id_t scalar_index_reg, reg_id_t scratch_xmm,
app_pc orig_app_pc)
{
return expand_gather_insert_scalar(drcontext, bb, sg_instr, el, sg_info,
sg_info->mask_reg, scalar_index_reg, scratch_xmm,
false /* AVX2 */, orig_app_pc);
}
static bool
expand_scatter_gather_extract_scalar(void *drcontext, instrlist_t *bb, instr_t *sg_instr,
int el, scatter_gather_info_t *sg_info,
opnd_size_t scalar_size, uint scalar_bytes,
reg_id_t from_simd_reg, reg_id_t scratch_xmm,
reg_id_t scratch_reg, bool is_avx512,
app_pc orig_app_pc)
{
reg_id_t from_simd_reg_zmm = reg_resize_to_opsz(from_simd_reg, OPSZ_64);
reg_id_t from_simd_reg_ymm = reg_resize_to_opsz(from_simd_reg, OPSZ_32);
int scalarxmmimm = el * scalar_bytes / XMM_REG_SIZE;
if (is_avx512) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vextracti32x4_mask(
drcontext, opnd_create_reg(scratch_xmm),
opnd_create_reg(DR_REG_K0),
opnd_create_immed_int(scalarxmmimm, OPSZ_1),
opnd_create_reg(from_simd_reg_zmm)),
orig_app_pc));
} else {
PREXL8(bb, sg_instr,
INSTR_XL8(
INSTR_CREATE_vextracti128(drcontext, opnd_create_reg(scratch_xmm),
opnd_create_reg(from_simd_reg_ymm),
opnd_create_immed_int(scalarxmmimm, OPSZ_1)),
orig_app_pc));
}
if (scalar_size == OPSZ_4) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vpextrd(
drcontext,
opnd_create_reg(
IF_X64_ELSE(reg_64_to_32(scratch_reg), scratch_reg)),
opnd_create_reg(scratch_xmm),
opnd_create_immed_int((el * scalar_bytes) % XMM_REG_SIZE /
opnd_size_in_bytes(OPSZ_4),
OPSZ_1)),
orig_app_pc));
} else if (scalar_size == OPSZ_8) {
ASSERT(reg_is_64bit(scratch_reg),
"The qword index versions are unsupported in 32-bit mode.");
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vpextrq(
drcontext, opnd_create_reg(scratch_reg),
opnd_create_reg(scratch_xmm),
opnd_create_immed_int((el * scalar_bytes) % XMM_REG_SIZE /
opnd_size_in_bytes(OPSZ_8),
OPSZ_1)),
orig_app_pc));
} else {
ASSERT(false, "Unexpected scalar size.");
return false;
}
return true;
}
static bool
expand_avx512_scatter_extract_scalar_value(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scratch_xmm, reg_id_t scratch_reg,
app_pc orig_app_pc)
{
return expand_scatter_gather_extract_scalar(
drcontext, bb, sg_instr, el, sg_info, sg_info->scalar_value_size,
opnd_size_in_bytes(sg_info->scalar_value_size), sg_info->scatter_src_reg,
scratch_xmm, scratch_reg, true /* AVX-512 */, orig_app_pc);
}
static bool
expand_avx512_scatter_gather_extract_scalar_index(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scratch_xmm,
reg_id_t scratch_reg,
app_pc orig_app_pc)
{
return expand_scatter_gather_extract_scalar(
drcontext, bb, sg_instr, el, sg_info, sg_info->scalar_index_size,
opnd_size_in_bytes(sg_info->scalar_index_size), sg_info->index_reg, scratch_xmm,
scratch_reg, true /* AVX-512 */, orig_app_pc);
}
static bool
expand_avx2_gather_extract_scalar_index(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scratch_xmm, reg_id_t scratch_reg,
app_pc orig_app_pc)
{
return expand_scatter_gather_extract_scalar(
drcontext, bb, sg_instr, el, sg_info, sg_info->scalar_index_size,
opnd_size_in_bytes(sg_info->scalar_index_size), sg_info->index_reg, scratch_xmm,
scratch_reg, false /* AVX2 */, orig_app_pc);
}
static bool
expand_avx512_scatter_gather_update_mask(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scratch_reg, app_pc orig_app_pc,
drvector_t *allowed)
{
reg_id_t save_mask_reg;
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_mov_imm(drcontext,
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg)),
OPND_CREATE_INT32(1 << el)),
orig_app_pc));
if (drreg_reserve_register(drcontext, bb, sg_instr, allowed, &save_mask_reg) !=
DRREG_SUCCESS)
return false;
/* The scratch k register we're using here is always k0, because it is never
* used for scatter/gather.
*/
MINSERT(bb, sg_instr,
INSTR_CREATE_kmovw(
drcontext,
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(save_mask_reg), save_mask_reg)),
opnd_create_reg(DR_REG_K0)));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_kmovw(drcontext, opnd_create_reg(DR_REG_K0),
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg))),
orig_app_pc));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_kandnw(drcontext, opnd_create_reg(sg_info->mask_reg),
opnd_create_reg(DR_REG_K0),
opnd_create_reg(sg_info->mask_reg)),
orig_app_pc));
MINSERT(bb, sg_instr,
INSTR_CREATE_kmovw(drcontext, opnd_create_reg(DR_REG_K0),
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(save_mask_reg),
save_mask_reg))));
if (drreg_unreserve_register(drcontext, bb, sg_instr, save_mask_reg) !=
DRREG_SUCCESS) {
ASSERT(false, "drreg_unreserve_register should not fail");
return false;
}
return true;
}
static bool
expand_avx2_gather_update_mask(void *drcontext, instrlist_t *bb, instr_t *sg_instr,
int el, scatter_gather_info_t *sg_info,
reg_id_t scratch_xmm, reg_id_t scratch_reg,
app_pc orig_app_pc)
{
/* The width of the mask element and data element is identical per definition of the
* instruction.
*/
if (sg_info->scalar_value_size == OPSZ_4) {
PREXL8(
bb, sg_instr,
INSTR_XL8(
INSTR_CREATE_xor(
drcontext,
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(scratch_reg), scratch_reg)),
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(scratch_reg), scratch_reg))),
orig_app_pc));
} else if (sg_info->scalar_value_size == OPSZ_8) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_xor(drcontext, opnd_create_reg(scratch_reg),
opnd_create_reg(scratch_reg)),
orig_app_pc));
}
reg_id_t null_index_reg = scratch_reg;
if (!expand_avx2_gather_insert_scalar_mask(drcontext, bb, sg_instr, el, sg_info,
null_index_reg, scratch_xmm, orig_app_pc))
return false;
return true;
}
static bool
expand_avx2_gather_make_test(void *drcontext, instrlist_t *bb, instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info, reg_id_t scratch_xmm,
reg_id_t scratch_reg, instr_t *skip_label,
app_pc orig_app_pc)
{
/* The width of the mask element and data element is identical per definition of the
* instruction.
*/
expand_scatter_gather_extract_scalar(
drcontext, bb, sg_instr, el, sg_info, sg_info->scalar_value_size,
opnd_size_in_bytes(sg_info->scalar_value_size), sg_info->mask_reg, scratch_xmm,
scratch_reg, false /* AVX2 */, orig_app_pc);
if (sg_info->scalar_value_size == OPSZ_4) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_shr(drcontext,
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg)),
OPND_CREATE_INT8(31)),
orig_app_pc));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_and(drcontext,
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg)),
OPND_CREATE_INT32(1)),
orig_app_pc));
} else if (sg_info->scalar_value_size == OPSZ_8) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_shr(drcontext, opnd_create_reg(scratch_reg),
OPND_CREATE_INT8(63)),
orig_app_pc));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_and(drcontext, opnd_create_reg(scratch_reg),
OPND_CREATE_INT32(1)),
orig_app_pc));
}
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_jcc(drcontext, OP_jz, opnd_create_instr(skip_label)),
orig_app_pc));
return true;
}
static bool
expand_avx512_scatter_gather_make_test(void *drcontext, instrlist_t *bb,
instr_t *sg_instr, int el,
scatter_gather_info_t *sg_info,
reg_id_t scratch_reg, instr_t *skip_label,
app_pc orig_app_pc)
{
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_kmovw(drcontext,
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg)),
opnd_create_reg(sg_info->mask_reg)),
orig_app_pc));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_test(drcontext,
opnd_create_reg(IF_X64_ELSE(
reg_64_to_32(scratch_reg), scratch_reg)),
OPND_CREATE_INT32(1 << el)),
orig_app_pc));
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_jcc(drcontext, OP_jz, opnd_create_instr(skip_label)),
orig_app_pc));
return true;
}
static bool
expand_avx512_scatter_store_scalar_value(void *drcontext, instrlist_t *bb,
instr_t *sg_instr,
scatter_gather_info_t *sg_info,
reg_id_t scalar_index_reg,
reg_id_t scalar_value_reg, app_pc orig_app_pc)
{
if (sg_info->base_reg == IF_X64_ELSE(DR_REG_RAX, DR_REG_EAX)) {
/* We need the app's base register value. If it's xax, then it may be used to
* store flags by drreg.
*/
drreg_get_app_value(drcontext, bb, sg_instr, sg_info->base_reg,
sg_info->base_reg);
}
if (sg_info->scalar_value_size == OPSZ_4) {
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_mov_st(
drcontext,
opnd_create_base_disp(sg_info->base_reg, scalar_index_reg,
sg_info->scale, sg_info->disp, OPSZ_4),
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(scalar_value_reg),
scalar_value_reg))),
orig_app_pc));
} else if (sg_info->scalar_value_size == OPSZ_8) {
ASSERT(reg_is_64bit(scalar_index_reg),
"Internal error: scratch index register not 64-bit.");
ASSERT(reg_is_64bit(scalar_value_reg),
"Internal error: scratch value register not 64-bit.");
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_mov_st(
drcontext,
opnd_create_base_disp(sg_info->base_reg, scalar_index_reg,
sg_info->scale, sg_info->disp, OPSZ_8),
opnd_create_reg(scalar_value_reg)),
orig_app_pc));
} else {
ASSERT(false, "Unexpected index size.");
return false;
}
return true;
}
static bool
expand_gather_load_scalar_value(void *drcontext, instrlist_t *bb, instr_t *sg_instr,
scatter_gather_info_t *sg_info, reg_id_t scalar_index_reg,
app_pc orig_app_pc)
{
if (sg_info->base_reg == IF_X64_ELSE(DR_REG_RAX, DR_REG_EAX)) {
/* We need the app's base register value. If it's xax, then it may be used to
* store flags by drreg.
*/
drreg_get_app_value(drcontext, bb, sg_instr, sg_info->base_reg,
sg_info->base_reg);
}
if (sg_info->scalar_value_size == OPSZ_4) {
PREXL8(
bb, sg_instr,
INSTR_XL8(INSTR_CREATE_mov_ld(
drcontext,
opnd_create_reg(IF_X64_ELSE(reg_64_to_32(scalar_index_reg),
scalar_index_reg)),
opnd_create_base_disp(sg_info->base_reg, scalar_index_reg,
sg_info->scale, sg_info->disp, OPSZ_4)),
orig_app_pc));
} else if (sg_info->scalar_value_size == OPSZ_8) {
ASSERT(reg_is_64bit(scalar_index_reg),
"Internal error: scratch register not 64-bit.");
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(scalar_index_reg),
opnd_create_base_disp(
sg_info->base_reg, scalar_index_reg,
sg_info->scale, sg_info->disp, OPSZ_8)),
orig_app_pc));
} else {
ASSERT(false, "Unexpected index size.");
return false;
}
return true;
}
#endif
/*****************************************************************************************
* drx_expand_scatter_gather()
*
* The function expands scatter and gather instructions to a sequence of equivalent
* scalar operations. Gather instructions are expanded into a sequence of mask register
* bit tests, extracting the index value, a scalar load, inserting the scalar value into
* the destination simd register, and mask register bit updates. Scatter instructions
* are similarly expanded into a sequence, but deploy a scalar store. Registers spilled
* and restored by drreg are not illustrated in the sequence below.
*
* ------------------------------------------------------------------------------
* AVX2 vpgatherdd, vgatherdps, vpgatherdq, vgatherdpd, vpgatherqd, vgatherqps, |
* vpgatherqq, vgatherqpd: |
* ------------------------------------------------------------------------------
*
* vpgatherdd (%rax,%ymm1,4)[4byte] %ymm2 -> %ymm0 %ymm2 sequence laid out here,
* others are similar:
*
* Extract mask dword. qword versions use vpextrq:
* vextracti128 %ymm2 $0x00 -> %xmm3
* vpextrd %xmm3 $0x00 -> %ecx
* Test mask bit:
* shr $0x0000001f %ecx -> %ecx
* and $0x00000001 %ecx -> %ecx
* Skip element if mask not set:
* jz <skip0>
* Extract index dword. qword versions use vpextrq:
* vextracti128 %ymm1 $0x00 -> %xmm3
* vpextrd %xmm3 $0x00 -> %ecx
* Restore app's base register value (may not be present):
* mov %rax -> %gs:0x00000090[8byte]
* mov %gs:0x00000098[8byte] -> %rax
* Load scalar value:
* mov (%rax,%rcx,4)[4byte] -> %ecx
* Insert scalar value in destination register:
* vextracti128 %ymm0 $0x00 -> %xmm3
* vpinsrd %xmm3 %ecx $0x00 -> %xmm3
* vinserti128 %ymm0 %xmm3 $0x00 -> %ymm0
* Set mask dword to zero:
* xor %ecx %ecx -> %ecx
* vextracti128 %ymm2 $0x00 -> %xmm3
* vpinsrd %xmm3 %ecx $0x00 -> %xmm3
* vinserti128 %ymm2 %xmm3 $0x00 -> %ymm2
* skip0:
* Do the same as above for the next element:
* vextracti128 %ymm2 $0x00 -> %xmm3
* vpextrd %xmm3 $0x01 -> %ecx
* shr $0x0000001f %ecx -> %ecx
* and $0x00000001 %ecx -> %ecx
* jz <skip1>
* vextracti128 %ymm1 $0x00 -> %xmm3
* vpextrd %xmm3 $0x01 -> %ecx
* mov (%rax,%rcx,4)[4byte] -> %ecx
* vextracti128 %ymm0 $0x00 -> %xmm3
* vpinsrd %xmm3 %ecx $0x01 -> %xmm3
* vinserti128 %ymm0 %xmm3 $0x00 -> %ymm0
* xor %ecx %ecx -> %ecx
* vextracti128 %ymm2 $0x00 -> %xmm3
* vpinsrd %xmm3 %ecx $0x01 -> %xmm3
* vinserti128 %ymm2 %xmm3 $0x00 -> %ymm2
* skip1:
* [..]
* Do the same as above for the last element:
* vextracti128 %ymm2 $0x01 -> %xmm3
* vpextrd %xmm3 $0x03 -> %ecx
* shr $0x0000001f %ecx -> %ecx
* and $0x00000001 %ecx -> %ecx
* jz <skip7>
* vextracti128 %ymm1 $0x01 -> %xmm3
* vpextrd %xmm3 $0x03 -> %ecx
* mov (%rax,%rcx,4)[4byte] -> %ecx
* vextracti128 %ymm0 $0x01 -> %xmm3
* vpinsrd %xmm3 %ecx $0x03 -> %xmm3
* vinserti128 %ymm0 %xmm3 $0x01 -> %ymm0
* xor %ecx %ecx -> %ecx
* vextracti128 %ymm2 $0x01 -> %xmm3
* vpinsrd %xmm3 %ecx $0x03 -> %xmm3
* vinserti128 %ymm2 %xmm3 $0x01 -> %ymm2
* skip7:
* Finally, clear the entire mask register, even
* the parts that are not used as a mask:
* vpxor %ymm2 %ymm2 -> %ymm2
*
* ---------------------------------------------------------------------------------
* AVX-512 vpgatherdd, vgatherdps, vpgatherdq, vgatherdpd, vpgatherqd, vgatherqps, |
* vpgatherqq, vgatherqpd: |
* ---------------------------------------------------------------------------------
*
* vpgatherdd {%k1} (%rax,%zmm1,4)[4byte] -> %zmm0 %k1 sequence laid out here,
* others are similar:
*
* Extract mask bit:
* kmovw %k1 -> %ecx
* Test mask bit:
* test %ecx $0x00000001
* Skip element if mask not set:
* jz <skip0>
* Extract index dword. qword versions use vpextrq:
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x00 -> %ecx
* Restore app's base register value (may not be present):
* mov %rax -> %gs:0x00000090[8byte]
* mov %gs:0x00000098[8byte] -> %rax
* Load scalar value:
* mov (%rax,%rcx,4)[4byte] -> %ecx
* Insert scalar value in destination register:
* vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* vpinsrd %xmm2 %ecx $0x00 -> %xmm2
* vinserti32x4 {%k0} $0x00 %zmm0 %xmm2 -> %zmm0
* Set mask bit to zero:
* mov $0x00000001 -> %ecx
* %k0 is saved to a gpr here, while the gpr
* is managed by drreg. This is not further
* layed out in this example.
* kmovw %ecx -> %k0
* kandnw %k0 %k1 -> %k1
* It is not illustrated that %k0 is restored here.
* skip0:
* Do the same as above for the next element:
* kmovw %k1 -> %ecx
* test %ecx $0x00000002
* jz <skip1>
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x01 -> %ecx
* mov (%rax,%rcx,4)[4byte] -> %ecx
* vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* vpinsrd %xmm2 %ecx $0x01 -> %xmm2
* vinserti32x4 {%k0} $0x00 %zmm0 %xmm2 -> %zmm0
* mov $0x00000002 -> %ecx
* kmovw %ecx -> %k0
* kandnw %k0 %k1 -> %k1
* skip1:
* [..]
* Do the same as above for the last element:
* kmovw %k1 -> %ecx
* test %ecx $0x00008000
* jz <skip15>
* vextracti32x4 {%k0} $0x03 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x03 -> %ecx
* mov (%rax,%rcx,4)[4byte] -> %ecx
* vextracti32x4 {%k0} $0x03 %zmm0 -> %xmm2
* vpinsrd %xmm2 %ecx $0x03 -> %xmm2
* vinserti32x4 {%k0} $0x03 %zmm0 %xmm2 -> %zmm0
* mov $0x00008000 -> %ecx
* kmovw %ecx -> %k0
* kandnw %k0 %k1 -> %k1
* skip15:
* Finally, clear the entire mask register, even
* the parts that are not used as a mask:
* kxorq %k1 %k1 -> %k1
*
* --------------------------------------------------------------------------
* AVX-512 vpscatterdd, vscatterdps, vpscatterdq, vscatterdpd, vpscatterqd, |
* vscatterqps, vpscatterqq, vscatterqpd: |
* --------------------------------------------------------------------------
*
* vpscatterdd {%k1} %zmm0 -> (%rcx,%zmm1,4)[4byte] %k1 sequence laid out here,
* others are similar:
*
* Extract mask bit:
* kmovw %k1 -> %edx
* Test mask bit:
* test %edx $0x00000001
* Skip element if mask not set:
* jz <skip0>
* Extract index dword. qword versions use vpextrq:
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x00 -> %edx
* Extract scalar value dword. qword versions use vpextrq:
* vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* vpextrd %xmm2 $0x00 -> %ebx
* Store scalar value:
* mov %ebx -> (%rcx,%rdx,4)[4byte]
* Set mask bit to zero:
* mov $0x00000001 -> %edx
* kmovw %edx -> %k0
* kandnw %k0 %k1 -> %k1
* skip0:
* Do the same as above for the next element:
* kmovw %k1 -> %edx
* test %edx $0x00000002
* jz <skip1>
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x01 -> %edx
* vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* vpextrd %xmm2 $0x01 -> %ebx
* mov %ebx -> (%rcx,%rdx,4)[4byte]
* mov $0x00000002 -> %edx
* kmovw %edx -> %k0
* kandnw %k0 %k1 -> %k1
* skip1:
* [..]
* Do the same as above for the last element:
* kmovw %k1 -> %edx
* test %edx $0x00008000
* jz <skip15>
* vextracti32x4 {%k0} $0x03 %zmm1 -> %xmm2
* vpextrd %xmm2 $0x03 -> %edx
* vextracti32x4 {%k0} $0x03 %zmm0 -> %xmm2
* vpextrd %xmm2 $0x03 -> %ebx
* mov %ebx -> (%rcx,%rdx,4)[4byte]
* mov $0x00008000 -> %edx
* kmovw %edx -> %k0
* kandnw %k0 %k1 -> %k1
* skip15:
* Finally, clear the entire mask register, even
* the parts that are not used as a mask:
* kxorq %k1 %k1 -> %k1
*/
bool
drx_expand_scatter_gather(void *drcontext, instrlist_t *bb, OUT bool *expanded)
{
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
instr_t *instr, *next_instr, *first_app = NULL;
bool delete_rest = false;
#endif
if (expanded != NULL)
*expanded = false;
if (drmgr_current_bb_phase(drcontext) != DRMGR_PHASE_APP2APP) {
return false;
}
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
/* Make each scatter or gather instruction be in their own basic block.
* TODO i#3837: cross-platform code like the following bb splitting can be shared
* with other architectures in the future.
*/
for (instr = instrlist_first(bb); instr != NULL; instr = next_instr) {
next_instr = instr_get_next(instr);
if (delete_rest) {
instrlist_remove(bb, instr);
instr_destroy(drcontext, instr);
} else if (instr_is_app(instr)) {
if (first_app == NULL)
first_app = instr;
if (instr_is_gather(instr) || instr_is_scatter(instr)) {
delete_rest = true;
if (instr != first_app) {
instrlist_remove(bb, instr);
instr_destroy(drcontext, instr);
}
}
}
}
if (first_app == NULL)
return true;
if (!instr_is_gather(first_app) && !instr_is_scatter(first_app))
return true;
instr_t *sg_instr = first_app;
scatter_gather_info_t sg_info;
bool res = false;
/* XXX: we may want to make this function public, as it may be useful to clients. */
get_scatter_gather_info(sg_instr, &sg_info);
# ifndef X64
if (sg_info.scalar_index_size == OPSZ_8 || sg_info.scalar_value_size == OPSZ_8) {
/* FIXME i#2985: we do not yet support expansion of the qword index and value
* scatter/gather versions in 32-bit mode.
*/
return false;
}
# endif
/* The expansion potentially needs more slots than the drx default. We need up to 2
* slots on x86 plus the 1 slot drreg uses for aflags. We set do_not_sum_slots here
* to false.
*/
if (!expand_scatter_gather_drreg_initialized) {
/* We're requesting 3 slots for 3 gprs plus 3 additional ones because they are
* used cross-app. The additional slots are needed if drreg needs to move the
* values as documented in drreg.
*/
drreg_options_t ops = { sizeof(ops), 3 + 3, false, NULL, true };
if (drreg_init(&ops) != DRREG_SUCCESS)
return false;
expand_scatter_gather_drreg_initialized = true;
}
uint no_of_elements = opnd_size_in_bytes(sg_info.scatter_gather_size) /
MAX(opnd_size_in_bytes(sg_info.scalar_index_size),
opnd_size_in_bytes(sg_info.scalar_value_size));
reg_id_t scratch_reg0 = DR_REG_INVALID, scratch_reg1 = DR_REG_INVALID;
drvector_t allowed;
drreg_init_and_fill_vector(&allowed, true);
/* We need the scratch registers and base register app's value to be available at the
* same time. Do not use.
*/
drreg_set_vector_entry(&allowed, sg_info.base_reg, false);
if (drreg_reserve_aflags(drcontext, bb, sg_instr) != DRREG_SUCCESS)
goto drx_expand_scatter_gather_exit;
if (drreg_reserve_register(drcontext, bb, sg_instr, &allowed, &scratch_reg0) !=
DRREG_SUCCESS)
goto drx_expand_scatter_gather_exit;
if (instr_is_scatter(sg_instr)) {
if (drreg_reserve_register(drcontext, bb, sg_instr, &allowed, &scratch_reg1) !=
DRREG_SUCCESS)
goto drx_expand_scatter_gather_exit;
}
app_pc orig_app_pc = instr_get_app_pc(sg_instr);
reg_id_t scratch_xmm;
/* Search the instruction for an unused xmm register we will use as a temp. */
for (scratch_xmm = DR_REG_START_XMM; scratch_xmm <= DR_REG_STOP_XMM; ++scratch_xmm) {
if ((sg_info.is_evex ||
scratch_xmm != reg_resize_to_opsz(sg_info.mask_reg, OPSZ_16)) &&
scratch_xmm != reg_resize_to_opsz(sg_info.index_reg, OPSZ_16) &&
/* redundant with scatter_src_reg */
scratch_xmm != reg_resize_to_opsz(sg_info.gather_dst_reg, OPSZ_16))
break;
}
/* FIXME i#2985: spill scratch_xmm using a future drreg extension for simd. */
emulated_instr_t emulated_instr;
emulated_instr.size = sizeof(emulated_instr);
emulated_instr.pc = instr_get_app_pc(sg_instr);
emulated_instr.instr = sg_instr;
drmgr_insert_emulation_start(drcontext, bb, sg_instr, &emulated_instr);
if (sg_info.is_evex) {
if (/* AVX-512 */ instr_is_gather(sg_instr)) {
for (uint el = 0; el < no_of_elements; ++el) {
instr_t *skip_label = INSTR_CREATE_label(drcontext);
if (!expand_avx512_scatter_gather_make_test(drcontext, bb, sg_instr, el,
&sg_info, scratch_reg0,
skip_label, orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx512_scatter_gather_extract_scalar_index(
drcontext, bb, sg_instr, el, &sg_info, scratch_xmm, scratch_reg0,
orig_app_pc))
goto drx_expand_scatter_gather_exit;
reg_id_t scalar_index_reg = scratch_reg0;
if (!expand_gather_load_scalar_value(drcontext, bb, sg_instr, &sg_info,
scalar_index_reg, orig_app_pc))
goto drx_expand_scatter_gather_exit;
reg_id_t scalar_value_reg = scratch_reg0;
if (!expand_avx512_gather_insert_scalar_value(drcontext, bb, sg_instr, el,
&sg_info, scalar_value_reg,
scratch_xmm, orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx512_scatter_gather_update_mask(drcontext, bb, sg_instr, el,
&sg_info, scratch_reg0,
orig_app_pc, &allowed))
goto drx_expand_scatter_gather_exit;
MINSERT(bb, sg_instr, skip_label);
}
} else /* AVX-512 instr_is_scatter(sg_instr) */ {
for (uint el = 0; el < no_of_elements; ++el) {
instr_t *skip_label = INSTR_CREATE_label(drcontext);
expand_avx512_scatter_gather_make_test(drcontext, bb, sg_instr, el,
&sg_info, scratch_reg0, skip_label,
orig_app_pc);
if (!expand_avx512_scatter_gather_extract_scalar_index(
drcontext, bb, sg_instr, el, &sg_info, scratch_xmm, scratch_reg0,
orig_app_pc))
goto drx_expand_scatter_gather_exit;
reg_id_t scalar_index_reg = scratch_reg0;
reg_id_t scalar_value_reg = scratch_reg1;
if (!expand_avx512_scatter_extract_scalar_value(
drcontext, bb, sg_instr, el, &sg_info, scratch_xmm,
scalar_value_reg, orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx512_scatter_store_scalar_value(
drcontext, bb, sg_instr, &sg_info, scalar_index_reg,
scalar_value_reg, orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx512_scatter_gather_update_mask(drcontext, bb, sg_instr, el,
&sg_info, scratch_reg0,
orig_app_pc, &allowed))
goto drx_expand_scatter_gather_exit;
MINSERT(bb, sg_instr, skip_label);
}
}
/* The mask register is zeroed completely when instruction finishes. */
if (proc_has_feature(FEATURE_AVX512BW)) {
PREXL8(
bb, sg_instr,
INSTR_XL8(INSTR_CREATE_kxorq(drcontext, opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg)),
orig_app_pc));
} else {
PREXL8(
bb, sg_instr,
INSTR_XL8(INSTR_CREATE_kxorw(drcontext, opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg)),
orig_app_pc));
}
} else {
/* AVX2 instr_is_gather(sg_instr) */
for (uint el = 0; el < no_of_elements; ++el) {
instr_t *skip_label = INSTR_CREATE_label(drcontext);
if (!expand_avx2_gather_make_test(drcontext, bb, sg_instr, el, &sg_info,
scratch_xmm, scratch_reg0, skip_label,
orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx2_gather_extract_scalar_index(drcontext, bb, sg_instr, el,
&sg_info, scratch_xmm,
scratch_reg0, orig_app_pc))
goto drx_expand_scatter_gather_exit;
reg_id_t scalar_index_reg = scratch_reg0;
if (!expand_gather_load_scalar_value(drcontext, bb, sg_instr, &sg_info,
scalar_index_reg, orig_app_pc))
goto drx_expand_scatter_gather_exit;
reg_id_t scalar_value_reg = scratch_reg0;
if (!expand_avx2_gather_insert_scalar_value(drcontext, bb, sg_instr, el,
&sg_info, scalar_value_reg,
scratch_xmm, orig_app_pc))
goto drx_expand_scatter_gather_exit;
if (!expand_avx2_gather_update_mask(drcontext, bb, sg_instr, el, &sg_info,
scratch_xmm, scratch_reg0, orig_app_pc))
goto drx_expand_scatter_gather_exit;
MINSERT(bb, sg_instr, skip_label);
}
/* The mask register is zeroed completely when instruction finishes. */
PREXL8(bb, sg_instr,
INSTR_XL8(INSTR_CREATE_vpxor(drcontext, opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg),
opnd_create_reg(sg_info.mask_reg)),
orig_app_pc));
}
ASSERT(scratch_reg0 != scratch_reg1,
"Internal error: scratch registers must be different");
if (drreg_unreserve_register(drcontext, bb, sg_instr, scratch_reg0) !=
DRREG_SUCCESS) {
ASSERT(false, "drreg_unreserve_register should not fail");
goto drx_expand_scatter_gather_exit;
}
if (instr_is_scatter(sg_instr)) {
if (drreg_unreserve_register(drcontext, bb, sg_instr, scratch_reg1) !=
DRREG_SUCCESS) {
ASSERT(false, "drreg_unreserve_register should not fail");
goto drx_expand_scatter_gather_exit;
}
}
if (drreg_unreserve_aflags(drcontext, bb, sg_instr) != DRREG_SUCCESS)
goto drx_expand_scatter_gather_exit;
# if VERBOSE
dr_print_instr(drcontext, STDERR, sg_instr, "\tThe instruction\n");
# endif
drmgr_insert_emulation_end(drcontext, bb, sg_instr);
/* Remove and destroy the original scatter/gather. */
instrlist_remove(bb, sg_instr);
# if VERBOSE
dr_fprintf(STDERR, "\twas expanded to the following sequence:\n");
for (instr = instrlist_first(bb); instr != NULL; instr = instr_get_next(instr)) {
dr_print_instr(drcontext, STDERR, instr, "");
}
# endif
if (expanded != NULL)
*expanded = true;
res = true;
drx_expand_scatter_gather_exit:
drvector_delete(&allowed);
return res;
#else /* !PLATFORM_SUPPORTS_SCATTER_GATHER */
/* TODO i#3837: add support for AArch64. */
if (expanded != NULL)
*expanded = false;
return true;
#endif
}
/***************************************************************************
* RESTORE STATE
*/
#ifdef PLATFORM_SUPPORTS_SCATTER_GATHER
/*
* x86 scatter/gather emulation sequence support
*
* The following state machines exist in order to detect restore events that need
* additional attention by drx in order to fix the application state on top of the
* fixes that drreg already makes. For the AVX-512 scatter/gather sequences these are
* instruction windows where a scratch mask is being used, and the windows after
* each scalar load/store but before the destination mask register update. For AVX2,
* the scratch mask is an xmm register and will be handled by drreg directly (future
* update, xref #3844).
*
* The state machines allow for instructions like drreg spill/restore and instrumentation
* in between recognized states. This is an approximation and could be broken in many
* ways, e.g. by a client adding more than DRX_RESTORE_EVENT_SKIP_UNKNOWN_INSTR_MAX
* number of instructions as instrumentation, or by altering the emulation sequence's
* code. A more safe way to do this would be along the lines of xref i#3801: if we had
* instruction lists available, we could see and pass down emulation labels instead of
* guessing the sequence based on decoding the code cache.
*
* AVX-512 gather sequence detection example:
*
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_1
* vpextrd %xmm2 $0x00 -> %ecx
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_2
* mov (%rax,%rcx,4)[4byte] -> %ecx
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_3
* (a) vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_4
* (a) vpinsrd %xmm2 %ecx $0x00 -> %xmm2
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_5
* (a) vinserti32x4 {%k0} $0x00 %zmm0 %xmm2 -> %zmm0
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_6
* (a) mov $0x00000001 -> %ecx
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_7
* (a) kmovw %k0 -> %edx
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_8
* (a) kmovw %ecx -> %k0
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_9
* (a) (b) kandnw %k0 %k1 -> %k1
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_10
* (b) kmovw %edx -> %k0
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0
*
* (a): The instruction window where the destination mask state hadn't been updated yet.
* (b): The instruction window where the scratch mask is clobbered w/o support by drreg.
*
* AVX-512 scatter sequence detection example:
*
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0
* vextracti32x4 {%k0} $0x00 %zmm1 -> %xmm2
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_1
* vpextrd %xmm2 $0x00 -> %edx
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_2
* vextracti32x4 {%k0} $0x00 %zmm0 -> %xmm2
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_3
* vpextrd %xmm2 $0x00 -> %ebx
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_4
* mov %ebx -> (%rcx,%rdx,4)[4byte]
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_5
* (a) mov $0x00000001 -> %edx
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_6
* (a) kmovw %k0 -> %ebp
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_7
* (a) kmovw %edx -> %k0
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_8
* (a) (b) kandnw %k0 %k1 -> %k1
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_9
* (b) kmovw %ebp -> %k0
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0
*
* (a): The instruction window where the destination mask state hadn't been updated yet.
* (b): The instruction window where the scratch mask is clobbered w/o support by drreg.
*
* AVX2 gather sequence detection example:
*
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0
* vextracti128 %ymm2 $0x00 -> %xmm3
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_1
* vpextrd %xmm3 $0x00 -> %ecx
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_2
* mov (%rax,%rcx,4)[4byte] -> %ecx
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_3
* (a) vextracti128 %ymm0 $0x00 -> %xmm3
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_4
* (a) vpinsrd %xmm3 %ecx $0x00 -> %xmm3
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_5
* (a) vinserti128 %ymm0 %xmm3 $0x00 -> %ymm0
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_6
* (a) xor %ecx %ecx -> %ecx
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_7
* (a) vextracti128 %ymm2 $0x00 -> %xmm3
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_8
* (a) vpinsrd %xmm3 %ecx $0x00 -> %xmm3
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_9
* (a) vinserti128 %ymm2 %xmm3 $0x00 -> %ymm2
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0
*
* (a): The instruction window where the destination mask state hadn't been updated yet.
*
*/
# define DRX_RESTORE_EVENT_SKIP_UNKNOWN_INSTR_MAX 32
/* States of the AVX-512 gather detection state machine. */
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0 0
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_1 1
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_2 2
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_3 3
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_4 4
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_5 5
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_6 6
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_7 7
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_8 8
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_9 9
# define DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_10 10
/* States of the AVX-512 scatter detection state machine. */
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0 0
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_1 1
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_2 2
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_3 3
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_4 4
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_5 5
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_6 6
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_7 7
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_8 8
# define DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_9 9
/* States of the AVX2 gather detection state machine. */
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0 0
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_1 1
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_2 2
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_3 3
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_4 4
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_5 5
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_6 6
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_7 7
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_8 8
# define DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_9 9
typedef struct _drx_state_machine_params_t {
byte *pc;
byte *prev_pc;
/* state machine's state */
int detect_state;
/* detected start pc of destination mask update */
byte *restore_dest_mask_start_pc;
/* detected start pc of scratch mask usage */
byte *restore_scratch_mask_start_pc;
/* counter to allow for skipping unknown instructions */
int skip_unknown_instr_count;
/* detected scratch xmm register for mask update */
reg_id_t the_scratch_xmm;
/* detected gpr register that holds the mask update immediate */
reg_id_t gpr_bit_mask;
/* detected gpr register that holds the app's mask state */
reg_id_t gpr_save_scratch_mask;
/* counter of scalar element in the scatter/gather sequence */
uint scalar_mask_update_no;
/* temporary scratch gpr for the AVX-512 scatter value */
reg_id_t gpr_scratch_index;
/* temporary scratch gpr for the AVX-512 scatter index */
reg_id_t gpr_scratch_value;
instr_t inst;
dr_restore_state_info_t *info;
scatter_gather_info_t *sg_info;
} drx_state_machine_params_t;
static void
advance_state(int new_detect_state, drx_state_machine_params_t *params)
{
params->detect_state = new_detect_state;
params->skip_unknown_instr_count = 0;
}
/* Advances to state 0 if counter has exceeded threshold, returns otherwise. */
static inline void
skip_unknown_instr_inc(int reset_state, drx_state_machine_params_t *params)
{
if (params->skip_unknown_instr_count++ >= DRX_RESTORE_EVENT_SKIP_UNKNOWN_INSTR_MAX) {
advance_state(reset_state, params);
}
}
/* Run the state machines and decode the code cache. The state machines will search the
* code for whether the translation pc is in one of the instruction windows that need
* additional handling by drx in order to restore specific state of the application's mask
* registers. We consider this sufficiently accurate, but this is still an approximation.
*/
static bool
drx_restore_state_scatter_gather(
void *drcontext, dr_restore_state_info_t *info, scatter_gather_info_t *sg_info,
bool (*state_machine_func)(void *drcontext, drx_state_machine_params_t *params))
{
drx_state_machine_params_t params;
params.restore_dest_mask_start_pc = NULL;
params.restore_scratch_mask_start_pc = NULL;
params.detect_state = 0;
params.skip_unknown_instr_count = 0;
params.the_scratch_xmm = DR_REG_NULL;
params.gpr_bit_mask = DR_REG_NULL;
params.gpr_save_scratch_mask = DR_REG_NULL;
params.scalar_mask_update_no = 0;
params.info = info;
params.sg_info = sg_info;
params.pc = params.info->fragment_info.cache_start_pc;
instr_init(drcontext, ¶ms.inst);
/* As the state machine is looking for blocks of code that the fault may hit, the 128
* bytes is a conservative approximation of the block's size, see (a) and (b) above.
*/
while (params.pc <= params.info->raw_mcontext->pc + 128) {
instr_reset(drcontext, ¶ms.inst);
params.prev_pc = params.pc;
params.pc = decode(drcontext, params.pc, ¶ms.inst);
if (params.pc == NULL) {
/* Upon a decoding error we simply give up. */
break;
}
/* If there is a gather or scatter instruction in the code cache, then it is wise
* to assume that this is not an emulated sequence that we need to examine
* further.
*/
if (instr_is_gather(¶ms.inst))
break;
if (instr_is_scatter(¶ms.inst))
break;
if ((*state_machine_func)(drcontext, ¶ms))
break;
}
instr_free(drcontext, ¶ms.inst);
return true;
}
/* Returns true if done, false otherwise. */
static bool
drx_avx2_gather_sequence_state_machine(void *drcontext,
drx_state_machine_params_t *params)
{
switch (params->detect_state) {
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0:
if (instr_get_opcode(¶ms->inst) == OP_vextracti128) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_1, params);
break;
}
}
/* We don't need to ignore any instructions here, because we are already in
* DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0.
*/
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_1:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_index_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpextrd) ||
(params->sg_info->scalar_index_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpextrq)) {
ASSERT(opnd_is_reg(instr_get_src(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_src(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->the_scratch_xmm = DR_REG_NULL;
params->gpr_scratch_index = opnd_get_reg(dst0);
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_2, params);
break;
}
}
}
/* Intentionally not else if */
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_2:
if (!instr_is_reg_spill_or_restore(drcontext, ¶ms->inst, NULL, NULL, NULL,
NULL)) {
if (instr_reads_memory(¶ms->inst)) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_memory_reference(src0)) {
if (opnd_uses_reg(src0, params->gpr_scratch_index)) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->restore_dest_mask_start_pc = params->pc;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_3,
params);
break;
}
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_3:
if (instr_get_opcode(¶ms->inst) == OP_vextracti128) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_4, params);
break;
}
}
/* Intentionally not else if */
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_4:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_value_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrd) ||
(params->sg_info->scalar_value_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrq)) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
params->the_scratch_xmm = DR_REG_NULL;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_5, params);
break;
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_5:
if (instr_get_opcode(¶ms->inst) == OP_vinserti128) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
if (tmp_reg == params->sg_info->gather_dst_reg) {
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_6, params);
break;
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_6:
if (instr_get_opcode(¶ms->inst) == OP_xor) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
opnd_t src0 = instr_get_src(¶ms->inst, 0);
opnd_t src1 = instr_get_src(¶ms->inst, 1);
if (opnd_is_reg(dst0) && opnd_is_reg(src0) && opnd_is_reg(src1)) {
reg_id_t reg_dst0 = opnd_get_reg(dst0);
reg_id_t reg_src0 = opnd_get_reg(src0);
reg_id_t reg_src1 = opnd_get_reg(src1);
ASSERT(reg_is_gpr(reg_dst0) && reg_is_gpr(reg_src0) &&
reg_is_gpr(reg_src1),
"internal error: unexpected instruction format");
if (reg_dst0 == reg_src0 && reg_src0 == reg_src1) {
params->gpr_bit_mask = reg_dst0;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_7, params);
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_7:
if (instr_get_opcode(¶ms->inst) == OP_vextracti128) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0)) {
if (opnd_get_reg(src0) == params->sg_info->mask_reg) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_8,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_8:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_value_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrd) ||
(params->sg_info->scalar_value_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrq)) {
opnd_t src1 = instr_get_src(¶ms->inst, 1);
if (opnd_is_reg(src1)) {
if (opnd_get_reg(src1) == params->gpr_bit_mask) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_9,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_9:
if (instr_get_opcode(¶ms->inst) == OP_vinserti128) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)) &&
opnd_is_reg(instr_get_src(¶ms->inst, 0)) &&
opnd_is_reg(instr_get_src(¶ms->inst, 1)),
"internal error: unexpected instruction format");
reg_id_t dst0 = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
reg_id_t src0 = opnd_get_reg(instr_get_src(¶ms->inst, 0));
reg_id_t src1 = opnd_get_reg(instr_get_src(¶ms->inst, 1));
if (src1 == params->the_scratch_xmm) {
if (src0 == params->sg_info->mask_reg) {
if (dst0 == params->sg_info->mask_reg) {
if (params->restore_dest_mask_start_pc <=
params->info->raw_mcontext->pc &&
params->info->raw_mcontext->pc <= params->prev_pc) {
/* Fix the gather's destination mask here and zero out
* the bit that the emulation sequence hadn't done
* before the fault hit.
*/
ASSERT(reg_is_strictly_xmm(params->sg_info->mask_reg) ||
reg_is_strictly_ymm(params->sg_info->mask_reg),
"internal error: unexpected instruction format");
byte val[YMM_REG_SIZE];
if (!reg_get_value_ex(params->sg_info->mask_reg,
params->info->raw_mcontext, val)) {
ASSERT(
false,
"internal error: can't read mcontext's mask value");
}
uint mask_byte =
opnd_size_in_bytes(params->sg_info->scalar_index_size) *
(params->scalar_mask_update_no + 1) -
1;
val[mask_byte] &= ~(byte)128;
reg_set_value_ex(params->sg_info->mask_reg,
params->info->mcontext, val);
/* We are done. */
return true;
}
params->scalar_mask_update_no++;
uint no_of_elements =
opnd_size_in_bytes(params->sg_info->scatter_gather_size) /
MAX(opnd_size_in_bytes(params->sg_info->scalar_index_size),
opnd_size_in_bytes(params->sg_info->scalar_value_size));
if (params->scalar_mask_update_no > no_of_elements) {
/* Unlikely that something looks identical to an emulation
* sequence for this long, but we safely can return here.
*/
return true;
}
advance_state(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX2_GATHER_EVENT_STATE_0, params);
break;
default: ASSERT(false, "internal error: invalid state.");
}
return false;
}
/* Returns true if done, false otherwise. */
static bool
drx_avx512_scatter_sequence_state_machine(void *drcontext,
drx_state_machine_params_t *params)
{
switch (params->detect_state) {
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0:
if (instr_get_opcode(¶ms->inst) == OP_vextracti32x4) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_1, params);
break;
}
}
/* We don't need to ignore any instructions here, because we are already in
* DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0.
*/
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_1:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_index_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpextrd) ||
(params->sg_info->scalar_index_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpextrq)) {
ASSERT(opnd_is_reg(instr_get_src(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_src(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->the_scratch_xmm = DR_REG_NULL;
params->gpr_scratch_index = opnd_get_reg(dst0);
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_2,
params);
break;
}
}
}
/* Intentionally not else if */
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_2:
if (instr_get_opcode(¶ms->inst) == OP_vextracti32x4) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_3, params);
break;
}
}
/* Intentionally not else if */
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_3:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_value_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpextrd) ||
(params->sg_info->scalar_value_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpextrq)) {
ASSERT(opnd_is_reg(instr_get_src(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_src(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->the_scratch_xmm = DR_REG_NULL;
params->gpr_scratch_value = opnd_get_reg(dst0);
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_4,
params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_4: {
if (!instr_is_reg_spill_or_restore(drcontext, ¶ms->inst, NULL, NULL, NULL,
NULL)) {
if (instr_writes_memory(¶ms->inst)) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_memory_reference(dst0)) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0) &&
opnd_uses_reg(src0, params->gpr_scratch_value) &&
opnd_uses_reg(dst0, params->gpr_scratch_index)) {
params->restore_dest_mask_start_pc = params->pc;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_5,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
}
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_5: {
ptr_int_t val;
if (instr_is_mov_constant(¶ms->inst, &val)) {
/* If more than one bit is set, this is not what we're looking for. */
if (val == 0 || (val & (val - 1)) != 0)
break;
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_gpr = opnd_get_reg(dst0);
if (reg_is_gpr(tmp_gpr)) {
params->gpr_bit_mask = tmp_gpr;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_6,
params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
}
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_6:
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == DR_REG_K0) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_gpr = opnd_get_reg(dst0);
if (reg_is_gpr(tmp_gpr)) {
params->gpr_save_scratch_mask = tmp_gpr;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_7,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_7:
ASSERT(params->gpr_bit_mask != DR_REG_NULL,
"internal error: expected gpr register to be recorded in state "
"machine.");
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == params->gpr_bit_mask) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && opnd_get_reg(dst0) == DR_REG_K0) {
params->restore_scratch_mask_start_pc = params->pc;
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_8,
params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_8:
if (instr_get_opcode(¶ms->inst) == OP_kandnw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
opnd_t src1 = instr_get_src(¶ms->inst, 1);
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == DR_REG_K0) {
if (opnd_is_reg(src1) &&
opnd_get_reg(src1) == params->sg_info->mask_reg &&
opnd_is_reg(dst0) &&
opnd_get_reg(dst0) == params->sg_info->mask_reg) {
if (params->restore_dest_mask_start_pc <=
params->info->raw_mcontext->pc &&
params->info->raw_mcontext->pc <= params->prev_pc) {
/* Fix the scatter's destination mask here and zero out
* the bit that the emulation sequence hadn't done
* before the fault hit.
*/
params->info->mcontext
->opmask[params->sg_info->mask_reg - DR_REG_K0] &=
~(1 << params->scalar_mask_update_no);
/* We are not done yet, we have to fix up the scratch
* mask as well.
*/
}
/* We are counting the scalar load number in the sequence
* here.
*/
params->scalar_mask_update_no++;
uint no_of_elements =
opnd_size_in_bytes(params->sg_info->scatter_gather_size) /
MAX(opnd_size_in_bytes(params->sg_info->scalar_index_size),
opnd_size_in_bytes(params->sg_info->scalar_value_size));
if (params->scalar_mask_update_no > no_of_elements) {
/* Unlikely that something looks identical to an emulation
* sequence for this long, but we safely can return here.
*/
return true;
}
advance_state(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_9,
params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_9:
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && opnd_get_reg(dst0) == DR_REG_K0) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0)) {
reg_id_t tmp_gpr = opnd_get_reg(src0);
if (reg_is_gpr(tmp_gpr) &&
params->restore_scratch_mask_start_pc <=
params->info->raw_mcontext->pc &&
params->info->raw_mcontext->pc <= params->prev_pc) {
/* The scratch mask is always k0. This is hard-coded
* in drx. We carefully only update the lowest 16 bits
* because the mask was saved with kmovw.
*/
ASSERT(sizeof(params->info->mcontext->opmask[0]) ==
sizeof(long long),
"internal error: unexpected opmask slot size");
params->info->mcontext->opmask[0] &= ~0xffffLL;
params->info->mcontext->opmask[0] |=
reg_get_value(params->gpr_save_scratch_mask,
params->info->raw_mcontext) &
0xffff;
/* We are done. If we did fix up the scatter's destination
* mask, this already has happened.
*/
return true;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_SCATTER_EVENT_STATE_0, params);
break;
default: ASSERT(false, "internal error: invalid state.");
}
return false;
}
/* Returns true if done, false otherwise. */
static bool
drx_avx512_gather_sequence_state_machine(void *drcontext,
drx_state_machine_params_t *params)
{
switch (params->detect_state) {
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0:
if (instr_get_opcode(¶ms->inst) == OP_vextracti32x4) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_1, params);
break;
}
}
/* We don't need to ignore any instructions here, because we are already in
* DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0.
*/
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_1:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_index_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpextrd) ||
(params->sg_info->scalar_index_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpextrq)) {
ASSERT(opnd_is_reg(instr_get_src(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_src(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->the_scratch_xmm = DR_REG_NULL;
params->gpr_scratch_index = opnd_get_reg(dst0);
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_2, params);
break;
}
}
}
/* Intentionally not else if */
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_2:
if (!instr_is_reg_spill_or_restore(drcontext, ¶ms->inst, NULL, NULL, NULL,
NULL)) {
if (instr_reads_memory(¶ms->inst)) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_memory_reference(src0) &&
opnd_uses_reg(src0, params->gpr_scratch_index)) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && reg_is_gpr(opnd_get_reg(dst0))) {
params->restore_dest_mask_start_pc = params->pc;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_3,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_3:
if (instr_get_opcode(¶ms->inst) == OP_vextracti32x4) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_reg = opnd_get_reg(dst0);
if (!reg_is_strictly_xmm(tmp_reg))
break;
params->the_scratch_xmm = tmp_reg;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_4, params);
break;
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_4:
ASSERT(params->the_scratch_xmm != DR_REG_NULL,
"internal error: expected xmm register to be recorded in state "
"machine.");
if ((params->sg_info->scalar_value_size == OPSZ_4 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrd) ||
(params->sg_info->scalar_value_size == OPSZ_8 &&
instr_get_opcode(¶ms->inst) == OP_vpinsrq)) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
if (tmp_reg == params->the_scratch_xmm) {
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_5, params);
break;
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_5:
if (instr_get_opcode(¶ms->inst) == OP_vinserti32x4) {
ASSERT(opnd_is_reg(instr_get_dst(¶ms->inst, 0)),
"internal error: unexpected instruction format");
reg_id_t tmp_reg = opnd_get_reg(instr_get_dst(¶ms->inst, 0));
if (tmp_reg == params->sg_info->gather_dst_reg) {
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_6, params);
break;
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_6: {
ptr_int_t val;
if (instr_is_mov_constant(¶ms->inst, &val)) {
/* If more than one bit is set, this is not what we're looking for. */
if (val == 0 || (val & (val - 1)) != 0)
break;
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_gpr = opnd_get_reg(dst0);
if (reg_is_gpr(tmp_gpr)) {
params->gpr_bit_mask = tmp_gpr;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_7, params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
}
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_7:
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == DR_REG_K0) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0)) {
reg_id_t tmp_gpr = opnd_get_reg(dst0);
if (reg_is_gpr(tmp_gpr)) {
params->gpr_save_scratch_mask = tmp_gpr;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_8,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_8:
ASSERT(params->gpr_bit_mask != DR_REG_NULL,
"internal error: expected gpr register to be recorded in state "
"machine.");
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == params->gpr_bit_mask) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && opnd_get_reg(dst0) == DR_REG_K0) {
params->restore_scratch_mask_start_pc = params->pc;
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_9, params);
break;
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_9:
if (instr_get_opcode(¶ms->inst) == OP_kandnw) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
opnd_t src1 = instr_get_src(¶ms->inst, 1);
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(src0) && opnd_get_reg(src0) == DR_REG_K0) {
if (opnd_is_reg(src1) &&
opnd_get_reg(src1) == params->sg_info->mask_reg) {
if (opnd_is_reg(dst0) &&
opnd_get_reg(dst0) == params->sg_info->mask_reg) {
if (params->restore_dest_mask_start_pc <=
params->info->raw_mcontext->pc &&
params->info->raw_mcontext->pc <= params->prev_pc) {
/* Fix the gather's destination mask here and zero out
* the bit that the emulation sequence hadn't done
* before the fault hit.
*/
params->info->mcontext
->opmask[params->sg_info->mask_reg - DR_REG_K0] &=
~(1 << params->scalar_mask_update_no);
/* We are not done yet, we have to fix up the scratch
* mask as well.
*/
}
/* We are counting the scalar load number in the sequence
* here.
*/
params->scalar_mask_update_no++;
uint no_of_elements =
opnd_size_in_bytes(params->sg_info->scatter_gather_size) /
MAX(opnd_size_in_bytes(params->sg_info->scalar_index_size),
opnd_size_in_bytes(params->sg_info->scalar_value_size));
if (params->scalar_mask_update_no > no_of_elements) {
/* Unlikely that something looks identical to an emulation
* sequence for this long, but we safely can return here.
*/
return true;
}
advance_state(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_10,
params);
break;
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
case DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_10:
if (instr_get_opcode(¶ms->inst) == OP_kmovw) {
opnd_t dst0 = instr_get_dst(¶ms->inst, 0);
if (opnd_is_reg(dst0) && opnd_get_reg(dst0) == DR_REG_K0) {
opnd_t src0 = instr_get_src(¶ms->inst, 0);
if (opnd_is_reg(src0)) {
reg_id_t tmp_gpr = opnd_get_reg(src0);
if (reg_is_gpr(tmp_gpr)) {
if (params->restore_scratch_mask_start_pc <=
params->info->raw_mcontext->pc &&
params->info->raw_mcontext->pc <= params->prev_pc) {
/* The scratch mask is always k0. This is hard-coded
* in drx. We carefully only update the lowest 16 bits
* because the mask was saved with kmovw.
*/
ASSERT(sizeof(params->info->mcontext->opmask[0]) ==
sizeof(long long),
"internal error: unexpected opmask slot size");
params->info->mcontext->opmask[0] &= ~0xffffLL;
params->info->mcontext->opmask[0] |=
reg_get_value(params->gpr_save_scratch_mask,
params->info->raw_mcontext) &
0xffff;
/* We are done. If we did fix up the gather's destination
* mask, this already has happened.
*/
return true;
}
}
}
}
}
skip_unknown_instr_inc(DRX_DETECT_RESTORE_AVX512_GATHER_EVENT_STATE_0, params);
break;
default: ASSERT(false, "internal error: invalid state.");
}
return false;
}
static bool
drx_restore_state_for_avx512_gather(void *drcontext, dr_restore_state_info_t *info,
scatter_gather_info_t *sg_info)
{
return drx_restore_state_scatter_gather(drcontext, info, sg_info,
drx_avx512_gather_sequence_state_machine);
}
static bool
drx_restore_state_for_avx512_scatter(void *drcontext, dr_restore_state_info_t *info,
scatter_gather_info_t *sg_info)
{
return drx_restore_state_scatter_gather(drcontext, info, sg_info,
drx_avx512_scatter_sequence_state_machine);
}
static bool
drx_restore_state_for_avx2_gather(void *drcontext, dr_restore_state_info_t *info,
scatter_gather_info_t *sg_info)
{
return drx_restore_state_scatter_gather(drcontext, info, sg_info,
drx_avx2_gather_sequence_state_machine);
}
static bool
drx_event_restore_state(void *drcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
instr_t inst;
bool success = true;
if (info->fragment_info.cache_start_pc == NULL)
return true; /* fault not in cache */
if (!expand_scatter_gather_drreg_initialized) {
/* Nothing to do if nobody had never called expand_scatter_gather() before. */
return true;
}
if (!info->fragment_info.app_code_consistent) {
/* Can't verify application code.
* XXX i#2985: is it better to keep searching?
*/
return true;
}
instr_init(drcontext, &inst);
byte *pc = decode(drcontext, dr_fragment_app_pc(info->fragment_info.tag), &inst);
if (pc != NULL) {
scatter_gather_info_t sg_info;
get_scatter_gather_info(&inst, &sg_info);
if (instr_is_gather(&inst)) {
if (sg_info.is_evex) {
success = success &&
drx_restore_state_for_avx512_gather(drcontext, info, &sg_info);
} else {
success = success &&
drx_restore_state_for_avx2_gather(drcontext, info, &sg_info);
}
} else if (instr_is_scatter(&inst)) {
success = success &&
drx_restore_state_for_avx512_scatter(drcontext, info, &sg_info);
}
}
instr_free(drcontext, &inst);
return success;
}
#endif
| 1 | 22,700 | `ARM_32` is what clients define as an input to DR, and is not always defined internally: we use just `ARM` to mean AArch32. | DynamoRIO-dynamorio | c |
@@ -38,8 +38,11 @@ import javax.annotation.Nullable;
/** A field declaration wrapper around a Discovery Schema. */
public class DiscoveryField implements FieldModel, TypeModel {
private final List<DiscoveryField> properties;
+ // Dereferenced schema for use rendering type names and determining properties, type, and format.
private final Schema schema;
- private final Schema originalSchema; // Not dereferenced schema.
+
+ // Not dereferenced schema; used in rendering this FieldModel's parameter name.
+ private final Schema originalSchema;
private final DiscoApiModel apiModel;
/* Create a FieldModel object from a non-null Schema object, and internally dereference the input schema. */ | 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.discogapic.transformer.DiscoGapicParser;
import com.google.api.codegen.discovery.Document;
import com.google.api.codegen.discovery.Method;
import com.google.api.codegen.discovery.Schema;
import com.google.api.codegen.discovery.Schema.Format;
import com.google.api.codegen.discovery.Schema.Type;
import com.google.api.codegen.transformer.FeatureConfig;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.TypeName;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
/** A field declaration wrapper around a Discovery Schema. */
public class DiscoveryField implements FieldModel, TypeModel {
private final List<DiscoveryField> properties;
private final Schema schema;
private final Schema originalSchema; // Not dereferenced schema.
private final DiscoApiModel apiModel;
/* Create a FieldModel object from a non-null Schema object, and internally dereference the input schema. */
private DiscoveryField(Schema schema, DiscoApiModel apiModel) {
Preconditions.checkNotNull(schema);
this.originalSchema = schema;
this.schema = schema.dereference();
this.apiModel = apiModel;
ImmutableList.Builder<DiscoveryField> propertiesBuilder = ImmutableList.builder();
for (Schema child : this.schema.properties().values()) {
propertiesBuilder.add(DiscoveryField.create(child, apiModel));
}
this.properties = propertiesBuilder.build();
}
/* Create a FieldModel object from a non-null Schema object. */
public static DiscoveryField create(Schema schema, DiscoApiModel rootApiModel) {
Preconditions.checkNotNull(schema);
Preconditions.checkNotNull(rootApiModel);
return new DiscoveryField(schema, rootApiModel);
}
/* @return the underlying Discovery Schema. */
public Schema getDiscoveryField() {
return schema;
}
@Override
public String getSimpleName() {
String name = schema.getIdentifier();
String[] pieces = name.split("_");
return Name.anyCamel(pieces).toLowerCamel();
}
@Override
public String getFullName() {
return DiscoGapicParser.getSchemaNameAsParameter(originalSchema).toUpperCamel();
}
@Override
public String getNameAsParameter() {
return getNameAsParameterName().toLowerCamel();
}
@Override
public Name getNameAsParameterName() {
return DiscoGapicParser.getSchemaNameAsParameter(originalSchema);
}
@Override
public String getTypeFullName() {
return schema.getIdentifier();
}
@Override
public boolean isMap() {
return false;
}
@Override
public FieldModel getMapKeyField() {
throw new IllegalArgumentException("Discovery model types have no map keys.");
}
@Override
public FieldModel getMapValueField() {
throw new IllegalArgumentException("Discovery model types have no map values.");
}
@Override
public boolean isMessage() {
return !isPrimitiveType();
}
@Override
public boolean isRequired() {
return schema.required();
}
@Override
public boolean isRepeated() {
return schema.type() == Type.ARRAY;
}
@Override
public boolean mayBeInResourceName() {
// A ResourceName will only contain path parameters.
return schema.isPathParam();
}
@Override
public String getParentFullName() {
String parentName;
if (schema.parent() instanceof Method) {
parentName = DiscoGapicParser.getRequestName((Method) schema.parent()).toUpperCamel();
} else if (schema.parent() instanceof Schema) {
parentName = Name.anyCamel(((Schema) schema.parent()).getIdentifier()).toUpperCamel();
} else if (schema.parent() instanceof Document) {
parentName = ((Document) schema.parent()).name();
} else {
parentName = "";
}
return ResourceNameMessageConfig.getFullyQualifiedMessageName(
apiModel.getDefaultPackageName(), parentName);
}
@Override
public String getParentSimpleName() {
return schema.parent().id();
}
@Override
public TypeName getParentTypeName(ImportTypeTable typeTable) {
if (schema.parent() instanceof Schema) {
DiscoveryField parent = DiscoveryField.create((Schema) schema.parent(), apiModel);
return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor((FieldModel) parent));
}
return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor((FieldModel) this));
}
@Override
public Cardinality getCardinality() {
throw new IllegalArgumentException("Discovery model types have no defined Cardinality.");
}
@Override
public boolean isEnum() {
// TODO(andrealin): implement.
return false;
}
@Override
public boolean isPrimitive() {
return schema.items() == null && schema.type() != Type.OBJECT;
}
@Override
/* @Get the description of the element scoped to the visibility as currently set in the model. */
public String getScopedDocumentation() {
return schema.description();
}
@Override
public boolean isString() {
return schema.type().equals(Type.STRING);
}
@Override
public boolean isBytes() {
return schema.type().equals(Type.ANY)
|| (schema.type().equals(Type.STRING) && schema.format().equals(Format.BYTE));
}
@Override
public String getKind() {
return schema.type().toString();
}
@Nullable
@Override
public Oneof getOneof() {
return null;
}
@Override
public List<String> getPagedResponseResourceMethods(
FeatureConfig featureConfig, FieldConfig startingFieldConfig, SurfaceNamer namer) {
List<String> methodNames = new LinkedList<>();
for (FieldModel field : startingFieldConfig.getFieldPath()) {
methodNames.add(0, namer.getFieldGetFunctionName(field));
}
return ImmutableList.copyOf(methodNames);
}
@Override
public void validateValue(String value) {
switch (schema.type()) {
case BOOLEAN:
String lowerCaseValue = value.toLowerCase();
if (lowerCaseValue.equals("true") || lowerCaseValue.equals("false")) {
return;
}
break;
case NUMBER:
if (Pattern.matches("[+-]?([0-9]*[.])?[0-9]+", value)) {
return;
}
break;
case INTEGER:
if (Pattern.matches("[+-]?[0-9]+", value)) {
return;
}
break;
case STRING:
switch (schema.format()) {
case INT64:
case UINT64:
if (Pattern.matches("[+-]?[0-9]+", value)) {
return;
}
break;
default:
Matcher matcher = Pattern.compile("([^\\\"']*)").matcher(value);
if (matcher.matches()) {
return;
}
break;
}
default:
// Throw an exception if a value is unsupported for the given type.
throw new IllegalArgumentException(
"Tried to assign value for unsupported Schema type "
+ schema.type()
+ ", format "
+ schema.format()
+ "; value "
+ value);
}
throw new IllegalArgumentException(
"Could not assign value '"
+ value
+ "' to type "
+ schema.type()
+ ", format "
+ schema.format());
}
@Override
public List<DiscoveryField> getFields() {
return properties;
}
@Override
public DiscoveryField getField(String key) {
for (DiscoveryField field : getFields()) {
if (field.getNameAsParameter().equals(key)) {
return field;
}
}
Schema parentTypeSchema = getDiscoveryField();
List<Schema> pathToKeySchema = parentTypeSchema.findChild(key);
return DiscoveryField.create(pathToKeySchema.get(pathToKeySchema.size() - 1), apiModel);
}
@Override
// Schemas are immutable, so this is just the identity function.
public TypeModel makeOptional() {
return this;
}
@Override
public String getPrimitiveTypeName() {
Preconditions.checkArgument(isPrimitiveType());
switch (schema.type()) {
case INTEGER:
switch (schema.format()) {
case UINT32:
return "uint32";
default:
return "int32";
}
case NUMBER:
switch (schema.format()) {
case FLOAT:
return "float";
case DOUBLE:
default:
return "double";
}
case BOOLEAN:
return "bool";
case STRING:
if (schema.format() == null) {
return "string";
}
switch (schema.format()) {
case BYTE:
return "bytes";
case INT64:
return "sint64";
case UINT64:
return "uint64";
default:
return "string";
}
default:
return null;
}
}
private boolean isPrimitiveType() {
return schema.type().equals(Type.BOOLEAN)
|| schema.type().equals(Type.INTEGER)
|| schema.type().equals(Type.NUMBER)
|| schema.type().equals(Type.STRING);
}
@Override
public boolean isBooleanType() {
return schema.type().equals(Type.BOOLEAN);
}
@Override
public boolean isStringType() {
return schema.type().equals(Type.STRING);
}
@Override
public boolean isFloatType() {
return schema.type().equals(Type.NUMBER) && schema.format().equals(Format.FLOAT);
}
@Override
public boolean isBytesType() {
return schema.type().equals(Type.STRING) && schema.format().equals(Format.BYTE);
}
@Override
public boolean isDoubleType() {
return schema.type().equals(Type.NUMBER) && schema.format().equals(Format.DOUBLE);
}
@Override
public String getTypeName() {
if (isPrimitiveType()) {
return getPrimitiveTypeName();
}
switch (schema.type()) {
case ARRAY:
return "list";
default:
return "message";
}
}
@Override
public DiscoveryField getType() {
return this;
}
@Override
public boolean isEmptyType() {
return schema.getIdentifier().equals("Empty")
&& schema.type().equals(Type.OBJECT)
&& (schema.properties() == null || schema.properties().size() == 0);
}
@Override
public OneofConfig getOneOfConfig(String fieldName) {
return null;
}
@Override
public int hashCode() {
return 5 + 31 * schema.hashCode();
}
@Override
public String toString() {
return String.format("Discovery FieldModel: {%s}", schema.toString());
}
@Override
public boolean equals(Object o) {
return o != null
&& o instanceof DiscoveryField
&& ((DiscoveryField) o).schema.equals(this.schema);
}
}
| 1 | 25,027 | s/for use/to use for/ | googleapis-gapic-generator | java |
@@ -60,7 +60,7 @@ class UserCreatedEventProjector
'language' => $event->getLanguage()->getCode(),
'password' => $event->getPassword()->getValue(),
'is_active' => $event->isActive(),
- 'avatar_id' => $event->getAvatarId() ? $event->getAvatarId()->getValue() : null,
+ 'avatar_filename' => $event->getAvatarFilename() ? $event->getAvatarFilename()->getValue() : null,
],
[
'is_active' => \PDO::PARAM_BOOL, | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\Account\Persistence\Dbal\Projector\User;
use Doctrine\DBAL\Connection;
use Doctrine\DBAL\DBALException;
use Ergonode\Account\Domain\Event\User\UserCreatedEvent;
use JMS\Serializer\SerializerInterface;
/**
*/
class UserCreatedEventProjector
{
private const TABLE = 'users';
/**
* @var Connection
*/
private Connection $connection;
/**
* @var SerializerInterface
*/
private SerializerInterface $serializer;
/**
* @param Connection $connection
* @param SerializerInterface $serializer
*/
public function __construct(Connection $connection, SerializerInterface $serializer)
{
$this->connection = $connection;
$this->serializer = $serializer;
}
/**
* @param UserCreatedEvent $event
*
* @throws DBALException
*/
public function __invoke(UserCreatedEvent $event): void
{
$this->connection->insert(
self::TABLE,
[
'id' => $event->getAggregateId()->getValue(),
'first_name' => $event->getFirstName(),
'last_name' => $event->getLastName(),
'username' => $event->getEmail(),
'role_id' => $event->getRoleId()->getValue(),
'language_privileges_collection' =>
$this->serializer->serialize($event->getLanguagePrivilegesCollection(), 'json'),
'language' => $event->getLanguage()->getCode(),
'password' => $event->getPassword()->getValue(),
'is_active' => $event->isActive(),
'avatar_id' => $event->getAvatarId() ? $event->getAvatarId()->getValue() : null,
],
[
'is_active' => \PDO::PARAM_BOOL,
]
);
}
}
| 1 | 8,712 | ` $event->getAvatarFilename()` this function return `string` or `null`. In this place ` $event->getAvatarFilename()->getValue()` return `Fatal error ` | ergonode-backend | php |
@@ -34,7 +34,7 @@ import (
var (
// ErrPersistenceLimitExceeded is the error indicating QPS limit reached.
- ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted("Persistence Max QPS Reached.")
+ ErrPersistenceLimitExceeded = serviceerror.NewUnavailable("Persistence Max QPS Reached.")
)
type ( | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package persistence
import (
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/quotas"
)
var (
// ErrPersistenceLimitExceeded is the error indicating QPS limit reached.
ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted("Persistence Max QPS Reached.")
)
type (
shardRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence ShardManager
logger log.Logger
}
executionRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence ExecutionManager
logger log.Logger
}
taskRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence TaskManager
logger log.Logger
}
metadataRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence MetadataManager
logger log.Logger
}
clusterMetadataRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence ClusterMetadataManager
logger log.Logger
}
queueRateLimitedPersistenceClient struct {
rateLimiter quotas.RateLimiter
persistence Queue
logger log.Logger
}
)
var _ ShardManager = (*shardRateLimitedPersistenceClient)(nil)
var _ ExecutionManager = (*executionRateLimitedPersistenceClient)(nil)
var _ TaskManager = (*taskRateLimitedPersistenceClient)(nil)
var _ MetadataManager = (*metadataRateLimitedPersistenceClient)(nil)
var _ ClusterMetadataManager = (*clusterMetadataRateLimitedPersistenceClient)(nil)
var _ Queue = (*queueRateLimitedPersistenceClient)(nil)
// NewShardPersistenceRateLimitedClient creates a client to manage shards
func NewShardPersistenceRateLimitedClient(persistence ShardManager, rateLimiter quotas.RateLimiter, logger log.Logger) ShardManager {
return &shardRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
// NewExecutionPersistenceRateLimitedClient creates a client to manage executions
func NewExecutionPersistenceRateLimitedClient(persistence ExecutionManager, rateLimiter quotas.RateLimiter, logger log.Logger) ExecutionManager {
return &executionRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
// NewTaskPersistenceRateLimitedClient creates a client to manage tasks
func NewTaskPersistenceRateLimitedClient(persistence TaskManager, rateLimiter quotas.RateLimiter, logger log.Logger) TaskManager {
return &taskRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
// NewMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata
func NewMetadataPersistenceRateLimitedClient(persistence MetadataManager, rateLimiter quotas.RateLimiter, logger log.Logger) MetadataManager {
return &metadataRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
// NewClusterMetadataPersistenceRateLimitedClient creates a MetadataManager client to manage metadata
func NewClusterMetadataPersistenceRateLimitedClient(persistence ClusterMetadataManager, rateLimiter quotas.RateLimiter, logger log.Logger) ClusterMetadataManager {
return &clusterMetadataRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
// NewQueuePersistenceRateLimitedClient creates a client to manage queue
func NewQueuePersistenceRateLimitedClient(persistence Queue, rateLimiter quotas.RateLimiter, logger log.Logger) Queue {
return &queueRateLimitedPersistenceClient{
persistence: persistence,
rateLimiter: rateLimiter,
logger: logger,
}
}
func (p *shardRateLimitedPersistenceClient) GetName() string {
return p.persistence.GetName()
}
func (p *shardRateLimitedPersistenceClient) CreateShard(request *CreateShardRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CreateShard(request)
return err
}
func (p *shardRateLimitedPersistenceClient) GetShard(request *GetShardRequest) (*GetShardResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetShard(request)
return response, err
}
func (p *shardRateLimitedPersistenceClient) UpdateShard(request *UpdateShardRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.UpdateShard(request)
return err
}
func (p *shardRateLimitedPersistenceClient) Close() {
p.persistence.Close()
}
func (p *executionRateLimitedPersistenceClient) GetName() string {
return p.persistence.GetName()
}
func (p *executionRateLimitedPersistenceClient) CreateWorkflowExecution(request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.CreateWorkflowExecution(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetWorkflowExecution(request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetWorkflowExecution(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) UpdateWorkflowExecution(request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
resp, err := p.persistence.UpdateWorkflowExecution(request)
return resp, err
}
func (p *executionRateLimitedPersistenceClient) ConflictResolveWorkflowExecution(request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ConflictResolveWorkflowExecution(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) DeleteWorkflowExecution(request *DeleteWorkflowExecutionRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.DeleteWorkflowExecution(request)
return err
}
func (p *executionRateLimitedPersistenceClient) DeleteCurrentWorkflowExecution(request *DeleteCurrentWorkflowExecutionRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.DeleteCurrentWorkflowExecution(request)
return err
}
func (p *executionRateLimitedPersistenceClient) GetCurrentExecution(request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetCurrentExecution(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) ListConcreteExecutions(request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ListConcreteExecutions(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) AddTasks(request *AddTasksRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.AddTasks(request)
return err
}
func (p *executionRateLimitedPersistenceClient) GetTransferTask(request *GetTransferTaskRequest) (*GetTransferTaskResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetTransferTask(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetTransferTasks(request *GetTransferTasksRequest) (*GetTransferTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetTransferTasks(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetVisibilityTask(request *GetVisibilityTaskRequest) (*GetVisibilityTaskResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetVisibilityTask(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetVisibilityTasks(request *GetVisibilityTasksRequest) (*GetVisibilityTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetVisibilityTasks(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetReplicationTask(request *GetReplicationTaskRequest) (*GetReplicationTaskResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetReplicationTask(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetReplicationTasks(request *GetReplicationTasksRequest) (*GetReplicationTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetReplicationTasks(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) CompleteTransferTask(request *CompleteTransferTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CompleteTransferTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) RangeCompleteTransferTask(request *RangeCompleteTransferTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.RangeCompleteTransferTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) CompleteVisibilityTask(request *CompleteVisibilityTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CompleteVisibilityTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) RangeCompleteVisibilityTask(request *RangeCompleteVisibilityTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.RangeCompleteVisibilityTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) CompleteReplicationTask(request *CompleteReplicationTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CompleteReplicationTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.RangeCompleteReplicationTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) PutReplicationTaskToDLQ(
request *PutReplicationTaskToDLQRequest,
) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.PutReplicationTaskToDLQ(request)
}
func (p *executionRateLimitedPersistenceClient) GetReplicationTasksFromDLQ(
request *GetReplicationTasksFromDLQRequest,
) (*GetReplicationTasksFromDLQResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.GetReplicationTasksFromDLQ(request)
}
func (p *executionRateLimitedPersistenceClient) DeleteReplicationTaskFromDLQ(
request *DeleteReplicationTaskFromDLQRequest,
) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.DeleteReplicationTaskFromDLQ(request)
}
func (p *executionRateLimitedPersistenceClient) RangeDeleteReplicationTaskFromDLQ(
request *RangeDeleteReplicationTaskFromDLQRequest,
) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.RangeDeleteReplicationTaskFromDLQ(request)
}
func (p *executionRateLimitedPersistenceClient) GetTimerTask(request *GetTimerTaskRequest) (*GetTimerTaskResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetTimerTask(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
resonse, err := p.persistence.GetTimerIndexTasks(request)
return resonse, err
}
func (p *executionRateLimitedPersistenceClient) CompleteTimerTask(request *CompleteTimerTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CompleteTimerTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) RangeCompleteTimerTask(request *RangeCompleteTimerTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.RangeCompleteTimerTask(request)
return err
}
func (p *executionRateLimitedPersistenceClient) Close() {
p.persistence.Close()
}
func (p *taskRateLimitedPersistenceClient) GetName() string {
return p.persistence.GetName()
}
func (p *taskRateLimitedPersistenceClient) CreateTasks(request *CreateTasksRequest) (*CreateTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.CreateTasks(request)
return response, err
}
func (p *taskRateLimitedPersistenceClient) GetTasks(request *GetTasksRequest) (*GetTasksResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetTasks(request)
return response, err
}
func (p *taskRateLimitedPersistenceClient) CompleteTask(request *CompleteTaskRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.CompleteTask(request)
return err
}
func (p *taskRateLimitedPersistenceClient) CompleteTasksLessThan(request *CompleteTasksLessThanRequest) (int, error) {
if ok := p.rateLimiter.Allow(); !ok {
return 0, ErrPersistenceLimitExceeded
}
return p.persistence.CompleteTasksLessThan(request)
}
func (p *taskRateLimitedPersistenceClient) LeaseTaskQueue(request *LeaseTaskQueueRequest) (*LeaseTaskQueueResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.LeaseTaskQueue(request)
return response, err
}
func (p *taskRateLimitedPersistenceClient) UpdateTaskQueue(request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.UpdateTaskQueue(request)
return response, err
}
func (p *taskRateLimitedPersistenceClient) ListTaskQueue(request *ListTaskQueueRequest) (*ListTaskQueueResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.ListTaskQueue(request)
}
func (p *taskRateLimitedPersistenceClient) DeleteTaskQueue(request *DeleteTaskQueueRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.DeleteTaskQueue(request)
}
func (p *taskRateLimitedPersistenceClient) Close() {
p.persistence.Close()
}
func (p *metadataRateLimitedPersistenceClient) GetName() string {
return p.persistence.GetName()
}
func (p *metadataRateLimitedPersistenceClient) CreateNamespace(request *CreateNamespaceRequest) (*CreateNamespaceResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.CreateNamespace(request)
return response, err
}
func (p *metadataRateLimitedPersistenceClient) GetNamespace(request *GetNamespaceRequest) (*GetNamespaceResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetNamespace(request)
return response, err
}
func (p *metadataRateLimitedPersistenceClient) UpdateNamespace(request *UpdateNamespaceRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.UpdateNamespace(request)
return err
}
func (p *metadataRateLimitedPersistenceClient) DeleteNamespace(request *DeleteNamespaceRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.DeleteNamespace(request)
return err
}
func (p *metadataRateLimitedPersistenceClient) DeleteNamespaceByName(request *DeleteNamespaceByNameRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.DeleteNamespaceByName(request)
return err
}
func (p *metadataRateLimitedPersistenceClient) ListNamespaces(request *ListNamespacesRequest) (*ListNamespacesResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ListNamespaces(request)
return response, err
}
func (p *metadataRateLimitedPersistenceClient) GetMetadata() (*GetMetadataResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetMetadata()
return response, err
}
func (p *metadataRateLimitedPersistenceClient) Close() {
p.persistence.Close()
}
// AppendHistoryNodes add a node to history node table
func (p *executionRateLimitedPersistenceClient) AppendHistoryNodes(request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.AppendHistoryNodes(request)
}
// ReadHistoryBranch returns history node data for a branch
func (p *executionRateLimitedPersistenceClient) ReadHistoryBranch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ReadHistoryBranch(request)
return response, err
}
// ReadHistoryBranchByBatch returns history node data for a branch
func (p *executionRateLimitedPersistenceClient) ReadHistoryBranchByBatch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ReadHistoryBranchByBatch(request)
return response, err
}
// ReadHistoryBranchByBatch returns history node data for a branch
func (p *executionRateLimitedPersistenceClient) ReadRawHistoryBranch(request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ReadRawHistoryBranch(request)
return response, err
}
// ForkHistoryBranch forks a new branch from a old branch
func (p *executionRateLimitedPersistenceClient) ForkHistoryBranch(request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.ForkHistoryBranch(request)
return response, err
}
// DeleteHistoryBranch removes a branch
func (p *executionRateLimitedPersistenceClient) DeleteHistoryBranch(request *DeleteHistoryBranchRequest) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
err := p.persistence.DeleteHistoryBranch(request)
return err
}
// TrimHistoryBranch trims a branch
func (p *executionRateLimitedPersistenceClient) TrimHistoryBranch(request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
resp, err := p.persistence.TrimHistoryBranch(request)
return resp, err
}
// GetHistoryTree returns all branch information of a tree
func (p *executionRateLimitedPersistenceClient) GetHistoryTree(request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetHistoryTree(request)
return response, err
}
func (p *executionRateLimitedPersistenceClient) GetAllHistoryTreeBranches(request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
response, err := p.persistence.GetAllHistoryTreeBranches(request)
return response, err
}
func (p *queueRateLimitedPersistenceClient) EnqueueMessage(blob commonpb.DataBlob) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.EnqueueMessage(blob)
}
func (p *queueRateLimitedPersistenceClient) ReadMessages(lastMessageID int64, maxCount int) ([]*QueueMessage, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.ReadMessages(lastMessageID, maxCount)
}
func (p *queueRateLimitedPersistenceClient) UpdateAckLevel(metadata *InternalQueueMetadata) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.UpdateAckLevel(metadata)
}
func (p *queueRateLimitedPersistenceClient) GetAckLevels() (*InternalQueueMetadata, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.GetAckLevels()
}
func (p *queueRateLimitedPersistenceClient) DeleteMessagesBefore(messageID int64) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.DeleteMessagesBefore(messageID)
}
func (p *queueRateLimitedPersistenceClient) EnqueueMessageToDLQ(blob commonpb.DataBlob) (int64, error) {
if ok := p.rateLimiter.Allow(); !ok {
return EmptyQueueMessageID, ErrPersistenceLimitExceeded
}
return p.persistence.EnqueueMessageToDLQ(blob)
}
func (p *queueRateLimitedPersistenceClient) ReadMessagesFromDLQ(firstMessageID int64, lastMessageID int64, pageSize int, pageToken []byte) ([]*QueueMessage, []byte, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, nil, ErrPersistenceLimitExceeded
}
return p.persistence.ReadMessagesFromDLQ(firstMessageID, lastMessageID, pageSize, pageToken)
}
func (p *queueRateLimitedPersistenceClient) RangeDeleteMessagesFromDLQ(firstMessageID int64, lastMessageID int64) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.RangeDeleteMessagesFromDLQ(firstMessageID, lastMessageID)
}
func (p *queueRateLimitedPersistenceClient) UpdateDLQAckLevel(metadata *InternalQueueMetadata) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.UpdateDLQAckLevel(metadata)
}
func (p *queueRateLimitedPersistenceClient) GetDLQAckLevels() (*InternalQueueMetadata, error) {
if ok := p.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return p.persistence.GetDLQAckLevels()
}
func (p *queueRateLimitedPersistenceClient) DeleteMessageFromDLQ(messageID int64) error {
if ok := p.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return p.persistence.DeleteMessageFromDLQ(messageID)
}
func (p *queueRateLimitedPersistenceClient) Close() {
p.persistence.Close()
}
func (p *queueRateLimitedPersistenceClient) Init(blob *commonpb.DataBlob) error {
return p.persistence.Init(blob)
}
func (c *clusterMetadataRateLimitedPersistenceClient) Close() {
c.persistence.Close()
}
func (c *clusterMetadataRateLimitedPersistenceClient) GetName() string {
return c.persistence.GetName()
}
func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMembers(request *GetClusterMembersRequest) (*GetClusterMembersResponse, error) {
if ok := c.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return c.persistence.GetClusterMembers(request)
}
func (c *clusterMetadataRateLimitedPersistenceClient) UpsertClusterMembership(request *UpsertClusterMembershipRequest) error {
if ok := c.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return c.persistence.UpsertClusterMembership(request)
}
func (c *clusterMetadataRateLimitedPersistenceClient) PruneClusterMembership(request *PruneClusterMembershipRequest) error {
if ok := c.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return c.persistence.PruneClusterMembership(request)
}
func (c *clusterMetadataRateLimitedPersistenceClient) GetClusterMetadata() (*GetClusterMetadataResponse, error) {
if ok := c.rateLimiter.Allow(); !ok {
return nil, ErrPersistenceLimitExceeded
}
return c.persistence.GetClusterMetadata()
}
func (c *clusterMetadataRateLimitedPersistenceClient) SaveClusterMetadata(request *SaveClusterMetadataRequest) (bool, error) {
if ok := c.rateLimiter.Allow(); !ok {
return false, ErrPersistenceLimitExceeded
}
return c.persistence.SaveClusterMetadata(request)
}
func (c *metadataRateLimitedPersistenceClient) InitializeSystemNamespaces(currentClusterName string) error {
if ok := c.rateLimiter.Allow(); !ok {
return ErrPersistenceLimitExceeded
}
return c.persistence.InitializeSystemNamespaces(currentClusterName)
}
| 1 | 12,910 | why not creating a new error type for server's own resource limit exceed error and do conversion within rpc interceptor? | temporalio-temporal | go |
@@ -27,13 +27,10 @@ namespace OpenTelemetry.Instrumentation.AspNet
public class AspNetInstrumentationOptions
{
/// <summary>
- /// Gets or sets <see cref="TextMapPropagator"/> for context propagation. Default value: <see cref="CompositeTextMapPropagator"/> with <see cref="TraceContextPropagator"/> & <see cref="BaggagePropagator"/>.
+ /// Gets or sets <see cref="TextMapPropagator"/> for context propagation.
+ /// By default, <see cref="Propagators.DefaultTextMapPropagator" /> will be used.
/// </summary>
- public TextMapPropagator Propagator { get; set; } = new CompositeTextMapPropagator(new TextMapPropagator[]
- {
- new TraceContextPropagator(),
- new BaggagePropagator(),
- });
+ public TextMapPropagator Propagator { get; set; } = Propagators.DefaultTextMapPropagator;
/// <summary>
/// Gets or sets a Filter function to filter instrumentation for requests on a per request basis. | 1 | // <copyright file="AspNetInstrumentationOptions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using System.Web;
using OpenTelemetry.Context.Propagation;
namespace OpenTelemetry.Instrumentation.AspNet
{
/// <summary>
/// Options for ASP.NET instrumentation.
/// </summary>
public class AspNetInstrumentationOptions
{
/// <summary>
/// Gets or sets <see cref="TextMapPropagator"/> for context propagation. Default value: <see cref="CompositeTextMapPropagator"/> with <see cref="TraceContextPropagator"/> & <see cref="BaggagePropagator"/>.
/// </summary>
public TextMapPropagator Propagator { get; set; } = new CompositeTextMapPropagator(new TextMapPropagator[]
{
new TraceContextPropagator(),
new BaggagePropagator(),
});
/// <summary>
/// Gets or sets a Filter function to filter instrumentation for requests on a per request basis.
/// The Filter gets the HttpContext, and should return a boolean.
/// If Filter returns true, the request is collected.
/// If Filter returns false or throw exception, the request is filtered out.
/// </summary>
public Func<HttpContext, bool> Filter { get; set; }
/// <summary>
/// Gets or sets an action to enrich an Activity.
/// </summary>
/// <remarks>
/// <para><see cref="Activity"/>: the activity being enriched.</para>
/// <para>string: the name of the event.</para>
/// <para>object: the raw object from which additional information can be extracted to enrich the activity.
/// The type of this object depends on the event, which is given by the above parameter.</para>
/// </remarks>
public Action<Activity, string, object> Enrich { get; set; }
}
}
| 1 | 17,767 | Could be a timing thing here. When options are created they'll copy the ref for the current default propagator. If user sets through SDK the global propagator after that, it won't be reflected. Could leave it null here and then when it is used in instrumentation do `options.Propagator ?? Propagators.DefaultTextMapPropagator`. It would be a slight perf hit but it makes it hot-swappable. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -158,7 +158,6 @@ public class TransactionSimulator {
callParams.getFrom() != null ? callParams.getFrom() : DEFAULT_FROM;
BlockHeader blockHeaderToProcess = header;
-
if (transactionValidationParams.isAllowExceedingBalance()) {
updater.getOrCreate(senderAddress).getMutable().setBalance(Wei.of(UInt256.MAX_VALUE));
if (header.getBaseFee().isPresent()) { | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.transaction;
import static org.hyperledger.besu.ethereum.goquorum.GoQuorumPrivateStateUtil.getPrivateWorldStateAtBlock;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.crypto.SECPSignature;
import org.hyperledger.besu.crypto.SignatureAlgorithm;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderBuilder;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.mainnet.MainnetTransactionProcessor;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpec;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidationParams;
import org.hyperledger.besu.ethereum.processing.TransactionProcessingResult;
import org.hyperledger.besu.ethereum.vm.BlockHashLookup;
import org.hyperledger.besu.ethereum.worldstate.GoQuorumMutablePrivateAndPublicWorldStateUpdater;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.evm.Gas;
import org.hyperledger.besu.evm.account.Account;
import org.hyperledger.besu.evm.tracing.OperationTracer;
import org.hyperledger.besu.evm.worldstate.WorldUpdater;
import java.math.BigInteger;
import java.util.Optional;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
/*
* Used to process transactions for eth_call and eth_estimateGas.
*
* The processing won't affect the world state, it is used to execute read operations on the
* blockchain or to estimate the transaction gas cost.
*/
public class TransactionSimulator {
private static final Supplier<SignatureAlgorithm> SIGNATURE_ALGORITHM =
Suppliers.memoize(SignatureAlgorithmFactory::getInstance);
// Dummy signature for transactions to not fail being processed.
private static final SECPSignature FAKE_SIGNATURE =
SIGNATURE_ALGORITHM
.get()
.createSignature(
SIGNATURE_ALGORITHM.get().getHalfCurveOrder(),
SIGNATURE_ALGORITHM.get().getHalfCurveOrder(),
(byte) 0);
// TODO: Identify a better default from account to use, such as the registered
// coinbase or an account currently unlocked by the client.
private static final Address DEFAULT_FROM =
Address.fromHexString("0x0000000000000000000000000000000000000000");
private final Blockchain blockchain;
private final WorldStateArchive worldStateArchive;
private final ProtocolSchedule protocolSchedule;
private final Optional<PrivacyParameters> maybePrivacyParameters;
public TransactionSimulator(
final Blockchain blockchain,
final WorldStateArchive worldStateArchive,
final ProtocolSchedule protocolSchedule) {
this.blockchain = blockchain;
this.worldStateArchive = worldStateArchive;
this.protocolSchedule = protocolSchedule;
this.maybePrivacyParameters = Optional.empty();
}
public TransactionSimulator(
final Blockchain blockchain,
final WorldStateArchive worldStateArchive,
final ProtocolSchedule protocolSchedule,
final PrivacyParameters privacyParameters) {
this.blockchain = blockchain;
this.worldStateArchive = worldStateArchive;
this.protocolSchedule = protocolSchedule;
this.maybePrivacyParameters = Optional.of(privacyParameters);
}
public Optional<TransactionSimulatorResult> process(
final CallParameter callParams,
final TransactionValidationParams transactionValidationParams,
final OperationTracer operationTracer,
final long blockNumber) {
final BlockHeader header = blockchain.getBlockHeader(blockNumber).orElse(null);
return process(callParams, transactionValidationParams, operationTracer, header);
}
public Optional<TransactionSimulatorResult> process(
final CallParameter callParams, final Hash blockHeaderHash) {
final BlockHeader header = blockchain.getBlockHeader(blockHeaderHash).orElse(null);
return process(
callParams,
TransactionValidationParams.transactionSimulator(),
OperationTracer.NO_TRACING,
header);
}
public Optional<TransactionSimulatorResult> process(
final CallParameter callParams, final long blockNumber) {
return process(
callParams,
TransactionValidationParams.transactionSimulator(),
OperationTracer.NO_TRACING,
blockNumber);
}
public Optional<TransactionSimulatorResult> processAtHead(final CallParameter callParams) {
return process(
callParams,
TransactionValidationParams.transactionSimulator(),
OperationTracer.NO_TRACING,
blockchain.getChainHeadHeader());
}
public Optional<TransactionSimulatorResult> process(
final CallParameter callParams,
final TransactionValidationParams transactionValidationParams,
final OperationTracer operationTracer,
final BlockHeader header) {
if (header == null) {
return Optional.empty();
}
final MutableWorldState publicWorldState =
worldStateArchive.getMutable(header.getStateRoot(), header.getHash(), false).orElse(null);
if (publicWorldState == null) {
return Optional.empty();
}
final WorldUpdater updater = getEffectiveWorldStateUpdater(header, publicWorldState);
final ProtocolSpec protocolSpec = protocolSchedule.getByBlockNumber(header.getNumber());
final Address senderAddress =
callParams.getFrom() != null ? callParams.getFrom() : DEFAULT_FROM;
BlockHeader blockHeaderToProcess = header;
if (transactionValidationParams.isAllowExceedingBalance()) {
updater.getOrCreate(senderAddress).getMutable().setBalance(Wei.of(UInt256.MAX_VALUE));
if (header.getBaseFee().isPresent()) {
blockHeaderToProcess =
BlockHeaderBuilder.fromHeader(header)
.baseFee(0L)
.blockHeaderFunctions(protocolSpec.getBlockHeaderFunctions())
.buildBlockHeader();
}
}
final Account sender = publicWorldState.get(senderAddress);
final long nonce = sender != null ? sender.getNonce() : 0L;
final Wei gasPrice = callParams.getGasPrice() != null ? callParams.getGasPrice() : Wei.ZERO;
final long gasLimit =
callParams.getGasLimit() >= 0
? callParams.getGasLimit()
: blockHeaderToProcess.getGasLimit();
final Wei value = callParams.getValue() != null ? callParams.getValue() : Wei.ZERO;
final Bytes payload = callParams.getPayload() != null ? callParams.getPayload() : Bytes.EMPTY;
final MainnetTransactionProcessor transactionProcessor =
protocolSchedule
.getByBlockNumber(blockHeaderToProcess.getNumber())
.getTransactionProcessor();
final Transaction.Builder transactionBuilder =
Transaction.builder()
.nonce(nonce)
.gasLimit(gasLimit)
.to(callParams.getTo())
.sender(senderAddress)
.value(value)
.payload(payload)
.signature(FAKE_SIGNATURE);
if (header.getBaseFee().isEmpty()) {
transactionBuilder.gasPrice(gasPrice);
} else if (protocolSchedule.getChainId().isPresent()) {
transactionBuilder
.maxFeePerGas(callParams.getMaxFeePerGas().orElse(gasPrice))
.maxPriorityFeePerGas(callParams.getMaxPriorityFeePerGas().orElse(gasPrice));
} else {
return Optional.empty();
}
transactionBuilder.guessType();
if (transactionBuilder.getTransactionType().requiresChainId()) {
transactionBuilder.chainId(
protocolSchedule
.getChainId()
.orElse(BigInteger.ONE)); // needed to make some transactions valid
}
final Transaction transaction = transactionBuilder.build();
final TransactionProcessingResult result =
transactionProcessor.processTransaction(
blockchain,
updater,
blockHeaderToProcess,
transaction,
protocolSpec
.getMiningBeneficiaryCalculator()
.calculateBeneficiary(blockHeaderToProcess),
new BlockHashLookup(blockHeaderToProcess, blockchain),
false,
transactionValidationParams,
operationTracer);
// If GoQuorum privacy enabled, and value = zero, get max gas possible for a PMT hash.
// It is possible to have a data field that has a lower intrinsic value than the PMT hash.
// This means a potential over-estimate of gas, but the tx, if sent with this gas, will not
// fail.
if (GoQuorumOptions.goQuorumCompatibilityMode && value.isZero()) {
Gas privateGasEstimateAndState =
protocolSpec.getGasCalculator().getMaximumTransactionCost(64);
if (privateGasEstimateAndState.toLong() > result.getEstimateGasUsedByTransaction()) {
// modify the result to have the larger estimate
TransactionProcessingResult resultPmt =
TransactionProcessingResult.successful(
result.getLogs(),
privateGasEstimateAndState.toLong(),
result.getGasRemaining(),
result.getOutput(),
result.getValidationResult());
return Optional.of(new TransactionSimulatorResult(transaction, resultPmt));
}
}
return Optional.of(new TransactionSimulatorResult(transaction, result));
}
// return combined private/public world state updater if GoQuorum mode, otherwise the public state
private WorldUpdater getEffectiveWorldStateUpdater(
final BlockHeader header, final MutableWorldState publicWorldState) {
if (maybePrivacyParameters.isPresent()
&& maybePrivacyParameters.get().getGoQuorumPrivacyParameters().isPresent()) {
final MutableWorldState privateWorldState =
getPrivateWorldStateAtBlock(
maybePrivacyParameters.get().getGoQuorumPrivacyParameters(), header);
return new GoQuorumMutablePrivateAndPublicWorldStateUpdater(
publicWorldState.updater(), privateWorldState.updater());
}
return publicWorldState.updater();
}
public Optional<Boolean> doesAddressExistAtHead(final Address address) {
final BlockHeader header = blockchain.getChainHeadHeader();
final MutableWorldState worldState =
worldStateArchive.getMutable(header.getStateRoot(), header.getHash(), false).orElse(null);
return doesAddressExist(worldState, address, header);
}
public Optional<Boolean> doesAddressExist(
final MutableWorldState worldState, final Address address, final BlockHeader header) {
if (header == null) {
return Optional.empty();
}
if (worldState == null) {
return Optional.empty();
}
return Optional.of(worldState.get(address) != null);
}
}
| 1 | 26,183 | Unrelated file. Please remove from PR. | hyperledger-besu | java |
@@ -2663,6 +2663,7 @@ static work_queue_msg_code_t process_http_request( struct work_queue *q, struct
} else {
// Other requests get raw JSON data.
send_worker_msg(q,w,"Content-type: text/plain\n\n");
+ send_worker_msg(q,w,"Access-Control-Allow-Origin: *\n\n");
process_queue_status(q, w, &path[1], stoptime );
}
| 1 | /*
Copyright (C) 2014- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include "work_queue.h"
#include "work_queue_protocol.h"
#include "work_queue_internal.h"
#include "work_queue_resources.h"
#include "cctools.h"
#include "int_sizes.h"
#include "link.h"
#include "link_auth.h"
#include "debug.h"
#include "stringtools.h"
#include "catalog_query.h"
#include "datagram.h"
#include "domain_name_cache.h"
#include "hash_table.h"
#include "interfaces_address.h"
#include "itable.h"
#include "list.h"
#include "macros.h"
#include "username.h"
#include "create_dir.h"
#include "xxmalloc.h"
#include "load_average.h"
#include "buffer.h"
#include "rmonitor.h"
#include "rmonitor_types.h"
#include "rmonitor_poll.h"
#include "category_internal.h"
#include "copy_stream.h"
#include "random.h"
#include "process.h"
#include "path.h"
#include "md5.h"
#include "url_encode.h"
#include "jx_print.h"
#include "shell.h"
#include "pattern.h"
#include "tlq_config.h"
#include "host_disk_info.h"
#include <unistd.h>
#include <dirent.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <math.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
// The default tasks capacity reported before information is available.
// Default capacity also implies 1 core, 1024 MB of disk and 512 memory per task.
#define WORK_QUEUE_DEFAULT_CAPACITY_TASKS 10
// The minimum number of task reports to keep
#define WORK_QUEUE_TASK_REPORT_MIN_SIZE 50
// Seconds between updates to the catalog
#define WORK_QUEUE_UPDATE_INTERVAL 60
// Seconds between measurement of manager local resources
#define WORK_QUEUE_RESOURCE_MEASUREMENT_INTERVAL 30
#define WORKER_ADDRPORT_MAX 64
#define WORKER_HASHKEY_MAX 32
#define RESOURCE_MONITOR_TASK_LOCAL_NAME "wq-%d-task-%d"
#define RESOURCE_MONITOR_REMOTE_NAME "cctools-monitor"
#define RESOURCE_MONITOR_REMOTE_NAME_EVENTS RESOURCE_MONITOR_REMOTE_NAME "events.json"
#define MAX_TASK_STDOUT_STORAGE (1*GIGABYTE)
#define MAX_NEW_WORKERS 10
// Result codes for signaling the completion of operations in WQ
typedef enum {
WQ_SUCCESS = 0,
WQ_WORKER_FAILURE,
WQ_APP_FAILURE
} work_queue_result_code_t;
typedef enum {
MSG_PROCESSED = 0, /* Message was processed and connection is still good. */
MSG_PROCESSED_DISCONNECT, /* Message was processed and disconnect now expected. */
MSG_NOT_PROCESSED, /* Message was not processed, waiting to be consumed. */
MSG_FAILURE /* Message not received, connection failure. */
} work_queue_msg_code_t;
typedef enum {
MON_DISABLED = 0,
MON_SUMMARY = 1, /* generate only summary. */
MON_FULL = 2, /* generate summary, series and monitoring debug output. */
MON_WATCHDOG = 4 /* kill tasks that exhaust resources */
} work_queue_monitoring_mode;
typedef enum {
WORKER_DISCONNECT_UNKNOWN = 0,
WORKER_DISCONNECT_EXPLICIT,
WORKER_DISCONNECT_STATUS_WORKER,
WORKER_DISCONNECT_IDLE_OUT,
WORKER_DISCONNECT_FAST_ABORT,
WORKER_DISCONNECT_FAILURE
} worker_disconnect_reason;
typedef enum {
WORKER_TYPE_UNKNOWN = 1,
WORKER_TYPE_WORKER = 2,
WORKER_TYPE_STATUS = 4,
WORKER_TYPE_FOREMAN = 8
} worker_type;
// Threshold for available disk space (MB) beyond which files are not received from worker.
static uint64_t disk_avail_threshold = 100;
int wq_option_scheduler = WORK_QUEUE_SCHEDULE_TIME;
/* default timeout for slow workers to come back to the pool */
double wq_option_blocklist_slow_workers_timeout = 900;
struct work_queue {
char *name;
int port;
int priority;
int num_tasks_left;
int next_taskid;
char workingdir[PATH_MAX];
struct link *manager_link; // incoming tcp connection for workers.
struct link_info *poll_table;
int poll_table_size;
struct itable *tasks; // taskid -> task
struct itable *task_state_map; // taskid -> state
struct list *ready_list; // ready to be sent to a worker
struct hash_table *worker_table;
struct hash_table *worker_blocklist;
struct itable *worker_task_map;
struct hash_table *categories;
struct hash_table *workers_with_available_results;
struct work_queue_stats *stats;
struct work_queue_stats *stats_measure;
struct work_queue_stats *stats_disconnected_workers;
timestamp_t time_last_wait;
int worker_selection_algorithm;
int task_ordering;
int process_pending_check;
int short_timeout; // timeout to send/recv a brief message from worker
int long_timeout; // timeout to send/recv a brief message from a foreman
struct list *task_reports; /* list of last N work_queue_task_reports. */
double asynchrony_multiplier; /* Times the resource value, but disk */
int asynchrony_modifier; /* Plus this many cores or unlabeled tasks */
int minimum_transfer_timeout;
int foreman_transfer_timeout;
int transfer_outlier_factor;
int default_transfer_rate;
char *catalog_hosts;
time_t catalog_last_update_time;
time_t resources_last_update_time;
int busy_waiting_flag;
category_mode_t allocation_default_mode;
FILE *logfile;
FILE *transactions_logfile;
int keepalive_interval;
int keepalive_timeout;
timestamp_t link_poll_end; //tracks when we poll link; used to timeout unacknowledged keepalive checks
char *manager_preferred_connection;
int monitor_mode;
FILE *monitor_file;
char *monitor_output_directory;
char *monitor_summary_filename;
char *monitor_exe;
struct rmsummary *measured_local_resources;
struct rmsummary *current_max_worker;
char *password;
double bandwidth;
char *debug_path;
int tlq_port;
char *tlq_url;
};
struct work_queue_worker {
char *hostname;
char *os;
char *arch;
char *version;
char addrport[WORKER_ADDRPORT_MAX];
char hashkey[WORKER_HASHKEY_MAX];
worker_type type; // unknown, regular worker, status worker, foreman
int draining; // if 1, worker does not accept anymore tasks. It is shutdown if no task running.
struct work_queue_stats *stats;
struct work_queue_resources *resources;
struct hash_table *features;
char *workerid;
struct hash_table *current_files;
struct link *link;
struct itable *current_tasks;
struct itable *current_tasks_boxes;
int finished_tasks;
int64_t total_tasks_complete;
int64_t total_bytes_transferred;
timestamp_t total_task_time;
timestamp_t total_transfer_time;
timestamp_t start_time;
timestamp_t last_msg_recv_time;
timestamp_t last_update_msg_time;
};
struct work_queue_task_report {
timestamp_t transfer_time;
timestamp_t exec_time;
timestamp_t manager_time;
struct rmsummary *resources;
};
static void handle_failure(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, work_queue_result_code_t fail_type);
struct blocklist_host_info {
int blocked;
int times_blocked;
time_t release_at;
};
static void handle_worker_failure(struct work_queue *q, struct work_queue_worker *w);
static void handle_app_failure(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t);
static void remove_worker(struct work_queue *q, struct work_queue_worker *w, worker_disconnect_reason reason);
static void add_task_report(struct work_queue *q, struct work_queue_task *t );
static void commit_task_to_worker(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t);
static void reap_task_from_worker(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, work_queue_task_state_t new_state);
static int cancel_task_on_worker(struct work_queue *q, struct work_queue_task *t, work_queue_task_state_t new_state);
static void count_worker_resources(struct work_queue *q, struct work_queue_worker *w);
static void find_max_worker(struct work_queue *q);
static void update_max_worker(struct work_queue *q, struct work_queue_worker *w);
static void push_task_to_ready_list( struct work_queue *q, struct work_queue_task *t );
/* returns old state */
static work_queue_task_state_t change_task_state( struct work_queue *q, struct work_queue_task *t, work_queue_task_state_t new_state);
const char *task_state_str(work_queue_task_state_t state);
const char *task_result_str(work_queue_result_t result);
/* 1, 0 whether t is in state */
static int task_state_is( struct work_queue *q, uint64_t taskid, work_queue_task_state_t state);
/* pointer to first task found with state. NULL if no such task */
static struct work_queue_task *task_state_any(struct work_queue *q, work_queue_task_state_t state);
/* number of tasks with state */
static int task_state_count( struct work_queue *q, const char *category, work_queue_task_state_t state);
/* number of tasks with the resource allocation request */
static int task_request_count( struct work_queue *q, const char *category, category_allocation_t request);
static work_queue_result_code_t get_result(struct work_queue *q, struct work_queue_worker *w, const char *line);
static work_queue_result_code_t get_available_results(struct work_queue *q, struct work_queue_worker *w);
static int update_task_result(struct work_queue_task *t, work_queue_result_t new_result);
static void process_data_index( struct work_queue *q, struct work_queue_worker *w, time_t stoptime );
static work_queue_msg_code_t process_http_request( struct work_queue *q, struct work_queue_worker *w, const char *path, time_t stoptime );
static work_queue_msg_code_t process_workqueue(struct work_queue *q, struct work_queue_worker *w, const char *line);
static work_queue_msg_code_t process_queue_status(struct work_queue *q, struct work_queue_worker *w, const char *line, time_t stoptime);
static work_queue_msg_code_t process_resource(struct work_queue *q, struct work_queue_worker *w, const char *line);
static work_queue_msg_code_t process_feature(struct work_queue *q, struct work_queue_worker *w, const char *line);
static struct jx * queue_to_jx( struct work_queue *q, struct link *foreman_uplink );
static struct jx * queue_lean_to_jx( struct work_queue *q, struct link *foreman_uplink );
char *work_queue_monitor_wrap(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, struct rmsummary *limits);
const struct rmsummary *task_max_resources(struct work_queue *q, struct work_queue_task *t);
const struct rmsummary *task_min_resources(struct work_queue *q, struct work_queue_task *t);
void work_queue_accumulate_task(struct work_queue *q, struct work_queue_task *t);
struct category *work_queue_category_lookup_or_create(struct work_queue *q, const char *name);
static void write_transaction(struct work_queue *q, const char *str);
static void write_transaction_task(struct work_queue *q, struct work_queue_task *t);
static void write_transaction_category(struct work_queue *q, struct category *c);
static void write_transaction_worker(struct work_queue *q, struct work_queue_worker *w, int leaving, worker_disconnect_reason reason_leaving);
static void write_transaction_worker_resources(struct work_queue *q, struct work_queue_worker *w);
/** Clone a @ref work_queue_file
This performs a deep copy of the file struct.
@param file The file to clone.
@return A newly allocated file.
*/
static struct work_queue_file *work_queue_file_clone(const struct work_queue_file *file);
/** Clone a list of @ref work_queue_file structs
Thie performs a deep copy of the file list.
@param list The list to clone.
@return A newly allocated list of files.
*/
static struct list *work_queue_task_file_list_clone(struct list *list);
/** Write manager's resources to resource summary file and close the file **/
void work_queue_disable_monitoring(struct work_queue *q);
/******************************************************/
/********** work_queue internal functions *************/
/******************************************************/
static int64_t overcommitted_resource_total(struct work_queue *q, int64_t total, int cores_flag) {
int64_t r = 0;
if(total != 0)
{
r = ceil(total * q->asynchrony_multiplier);
if(cores_flag)
{
r += q->asynchrony_modifier;
}
}
return r;
}
//Returns count of workers according to type
static int count_workers(struct work_queue *q, int type) {
struct work_queue_worker *w;
char* id;
int count = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &id, (void**)&w)) {
if(w->type & type) {
count++;
}
}
return count;
}
//Returns count of workers that are available to run tasks.
static int available_workers(struct work_queue *q) {
struct work_queue_worker *w;
char* id;
int available_workers = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &id, (void**)&w)) {
if(strcmp(w->hostname, "unknown") != 0) {
if(overcommitted_resource_total(q, w->resources->cores.total, 1) > w->resources->cores.inuse || w->resources->disk.total > w->resources->disk.inuse || overcommitted_resource_total(q, w->resources->memory.total, 0) > w->resources->memory.inuse){
available_workers++;
}
}
}
return available_workers;
}
//Returns count of workers that are running at least 1 task.
static int workers_with_tasks(struct work_queue *q) {
struct work_queue_worker *w;
char* id;
int workers_with_tasks = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &id, (void**)&w)) {
if(strcmp(w->hostname, "unknown")){
if(itable_size(w->current_tasks)){
workers_with_tasks++;
}
}
}
return workers_with_tasks;
}
static void log_queue_stats(struct work_queue *q)
{
struct work_queue_stats s;
work_queue_get_stats(q, &s);
debug(D_WQ, "workers connections -- known: %d, connecting: %d, available: %d.",
s.workers_connected,
s.workers_init,
available_workers(q));
if(!q->logfile)
return;
buffer_t B;
buffer_init(&B);
buffer_printf(&B, "%" PRIu64, timestamp_get());
/* Stats for the current state of workers: */
buffer_printf(&B, " %d", s.workers_connected);
buffer_printf(&B, " %d", s.workers_init);
buffer_printf(&B, " %d", s.workers_idle);
buffer_printf(&B, " %d", s.workers_busy);
buffer_printf(&B, " %d", s.workers_able);
/* Cummulative stats for workers: */
buffer_printf(&B, " %d", s.workers_joined);
buffer_printf(&B, " %d", s.workers_removed);
buffer_printf(&B, " %d", s.workers_released);
buffer_printf(&B, " %d", s.workers_idled_out);
buffer_printf(&B, " %d", s.workers_fast_aborted);
buffer_printf(&B, " %d", s.workers_blocked);
buffer_printf(&B, " %d", s.workers_lost);
/* Stats for the current state of tasks: */
buffer_printf(&B, " %d", s.tasks_waiting);
buffer_printf(&B, " %d", s.tasks_on_workers);
buffer_printf(&B, " %d", s.tasks_running);
buffer_printf(&B, " %d", s.tasks_with_results);
/* Cummulative stats for tasks: */
buffer_printf(&B, " %d", s.tasks_submitted);
buffer_printf(&B, " %d", s.tasks_dispatched);
buffer_printf(&B, " %d", s.tasks_done);
buffer_printf(&B, " %d", s.tasks_failed);
buffer_printf(&B, " %d", s.tasks_cancelled);
buffer_printf(&B, " %d", s.tasks_exhausted_attempts);
/* Master time statistics: */
buffer_printf(&B, " %" PRId64, s.time_when_started);
buffer_printf(&B, " %" PRId64, s.time_send);
buffer_printf(&B, " %" PRId64, s.time_receive);
buffer_printf(&B, " %" PRId64, s.time_send_good);
buffer_printf(&B, " %" PRId64, s.time_receive_good);
buffer_printf(&B, " %" PRId64, s.time_status_msgs);
buffer_printf(&B, " %" PRId64, s.time_internal);
buffer_printf(&B, " %" PRId64, s.time_polling);
buffer_printf(&B, " %" PRId64, s.time_application);
/* Workers time statistics: */
buffer_printf(&B, " %" PRId64, s.time_workers_execute);
buffer_printf(&B, " %" PRId64, s.time_workers_execute_good);
buffer_printf(&B, " %" PRId64, s.time_workers_execute_exhaustion);
/* BW statistics */
buffer_printf(&B, " %" PRId64, s.bytes_sent);
buffer_printf(&B, " %" PRId64, s.bytes_received);
buffer_printf(&B, " %f", s.bandwidth);
/* resources statistics */
buffer_printf(&B, " %d", s.capacity_tasks);
buffer_printf(&B, " %d", s.capacity_cores);
buffer_printf(&B, " %d", s.capacity_memory);
buffer_printf(&B, " %d", s.capacity_disk);
buffer_printf(&B, " %d", s.capacity_instantaneous);
buffer_printf(&B, " %d", s.capacity_weighted);
buffer_printf(&B, " %f", s.manager_load);
buffer_printf(&B, " %" PRId64, s.total_cores);
buffer_printf(&B, " %" PRId64, s.total_memory);
buffer_printf(&B, " %" PRId64, s.total_disk);
buffer_printf(&B, " %" PRId64, s.committed_cores);
buffer_printf(&B, " %" PRId64, s.committed_memory);
buffer_printf(&B, " %" PRId64, s.committed_disk);
buffer_printf(&B, " %" PRId64, s.max_cores);
buffer_printf(&B, " %" PRId64, s.max_memory);
buffer_printf(&B, " %" PRId64, s.max_disk);
buffer_printf(&B, " %" PRId64, s.min_cores);
buffer_printf(&B, " %" PRId64, s.min_memory);
buffer_printf(&B, " %" PRId64, s.min_disk);
fprintf(q->logfile, "%s\n", buffer_tostring(&B));
buffer_free(&B);
}
static void link_to_hash_key(struct link *link, char *key)
{
sprintf(key, "0x%p", link);
}
/**
* This function sends a message to the worker and records the time the message is
* successfully sent. This timestamp is used to determine when to send keepalive checks.
*/
__attribute__ (( format(printf,3,4) ))
static int send_worker_msg( struct work_queue *q, struct work_queue_worker *w, const char *fmt, ... )
{
va_list va;
time_t stoptime;
buffer_t B[1];
buffer_init(B);
buffer_abortonfailure(B, 1);
buffer_max(B, WORK_QUEUE_LINE_MAX);
va_start(va, fmt);
buffer_putvfstring(B, fmt, va);
va_end(va);
debug(D_WQ, "tx to %s (%s): %s", w->hostname, w->addrport, buffer_tostring(B));
//If foreman, then we wait until foreman gives the manager some attention.
if(w->type == WORKER_TYPE_FOREMAN)
stoptime = time(0) + q->long_timeout;
else
stoptime = time(0) + q->short_timeout;
int result = link_putlstring(w->link, buffer_tostring(B), buffer_pos(B), stoptime);
buffer_free(B);
return result;
}
void work_queue_broadcast_message(struct work_queue *q, const char *msg) {
if(!q)
return;
struct work_queue_worker *w;
char* id;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &id, (void**)&w)) {
send_worker_msg(q, w, "%s", msg);
}
}
work_queue_msg_code_t process_name(struct work_queue *q, struct work_queue_worker *w, char *line)
{
debug(D_WQ, "Sending project name to worker (%s)", w->addrport);
//send project name (q->name) if there is one. otherwise send blank line
send_worker_msg(q, w, "%s\n", q->name ? q->name : "");
return MSG_PROCESSED;
}
work_queue_msg_code_t advertise_tlq_url(struct work_queue *q, struct work_queue_worker *w, char *line)
{
//attempt to find local TLQ server to retrieve manager URL
if(q->tlq_port && q->debug_path && !q->tlq_url) {
debug(D_TLQ, "looking up manager TLQ URL");
time_t config_stoptime = time(0) + 10;
q->tlq_url = tlq_config_url(q->tlq_port, q->debug_path, config_stoptime);
if(q->tlq_url) debug(D_TLQ, "set manager TLQ URL: %s", q->tlq_url);
else debug(D_TLQ, "error setting manager TLQ URL");
}
else if(q->tlq_port && !q->debug_path && !q->tlq_url) debug(D_TLQ, "cannot get manager TLQ URL: no debug log path set");
char worker_url[WORK_QUEUE_LINE_MAX];
int n = sscanf(line, "tlq %s", worker_url);
if(n != 1) debug(D_TLQ, "empty TLQ URL received from worker (%s)", w->addrport);
else debug(D_TLQ, "received worker (%s) TLQ URL %s", w->addrport, worker_url);
//send manager TLQ URL if there is one
if(q->tlq_url) {
debug(D_TLQ, "sending manager TLQ URL to worker (%s)", w->addrport);
send_worker_msg(q, w, "tlq %s\n", q->tlq_url);
}
return MSG_PROCESSED;
}
work_queue_msg_code_t process_info(struct work_queue *q, struct work_queue_worker *w, char *line)
{
char field[WORK_QUEUE_LINE_MAX];
char value[WORK_QUEUE_LINE_MAX];
int n = sscanf(line,"info %s %[^\n]", field, value);
if(n != 2)
return MSG_FAILURE;
if(string_prefix_is(field, "workers_joined")) {
w->stats->workers_joined = atoll(value);
} else if(string_prefix_is(field, "workers_removed")) {
w->stats->workers_removed = atoll(value);
} else if(string_prefix_is(field, "time_send")) {
w->stats->time_send = atoll(value);
} else if(string_prefix_is(field, "time_receive")) {
w->stats->time_receive = atoll(value);
} else if(string_prefix_is(field, "time_execute")) {
w->stats->time_workers_execute = atoll(value);
} else if(string_prefix_is(field, "bytes_sent")) {
w->stats->bytes_sent = atoll(value);
} else if(string_prefix_is(field, "bytes_received")) {
w->stats->bytes_received = atoll(value);
} else if(string_prefix_is(field, "tasks_waiting")) {
w->stats->tasks_waiting = atoll(value);
} else if(string_prefix_is(field, "tasks_running")) {
w->stats->tasks_running = atoll(value);
} else if(string_prefix_is(field, "idle-disconnecting")) {
remove_worker(q, w, WORKER_DISCONNECT_IDLE_OUT);
q->stats->workers_idled_out++;
} else if(string_prefix_is(field, "end_of_resource_update")) {
count_worker_resources(q, w);
write_transaction_worker_resources(q, w);
} else if(string_prefix_is(field, "worker-id")) {
free(w->workerid);
w->workerid = xxstrdup(value);
write_transaction_worker(q, w, 0, 0);
}
//Note we always mark info messages as processed, as they are optional.
return MSG_PROCESSED;
}
/**
* This function receives a message from worker and records the time a message is successfully
* received. This timestamp is used in keepalive timeout computations.
*/
static work_queue_msg_code_t recv_worker_msg(struct work_queue *q, struct work_queue_worker *w, char *line, size_t length )
{
time_t stoptime;
//If foreman, then we wait until foreman gives the manager some attention.
if(w->type == WORKER_TYPE_FOREMAN)
stoptime = time(0) + q->long_timeout;
else
stoptime = time(0) + q->short_timeout;
int result = link_readline(w->link, line, length, stoptime);
if (result <= 0) {
return MSG_FAILURE;
}
w->last_msg_recv_time = timestamp_get();
debug(D_WQ, "rx from %s (%s): %s", w->hostname, w->addrport, line);
char path[length];
// Check for status updates that can be consumed here.
if(string_prefix_is(line, "alive")) {
result = MSG_PROCESSED;
} else if(string_prefix_is(line, "workqueue")) {
result = process_workqueue(q, w, line);
} else if (string_prefix_is(line,"queue_status") || string_prefix_is(line, "worker_status") || string_prefix_is(line, "task_status") || string_prefix_is(line, "wable_status") || string_prefix_is(line, "resources_status")) {
result = process_queue_status(q, w, line, stoptime);
} else if (string_prefix_is(line, "available_results")) {
hash_table_insert(q->workers_with_available_results, w->hashkey, w);
result = MSG_PROCESSED;
} else if (string_prefix_is(line, "resource")) {
result = process_resource(q, w, line);
} else if (string_prefix_is(line, "feature")) {
result = process_feature(q, w, line);
} else if (string_prefix_is(line, "auth")) {
debug(D_WQ|D_NOTICE,"worker (%s) is attempting to use a password, but I do not have one.",w->addrport);
result = MSG_FAILURE;
} else if (string_prefix_is(line,"ready")) {
debug(D_WQ|D_NOTICE,"worker (%s) is an older worker that is not compatible with this manager.",w->addrport);
result = MSG_FAILURE;
} else if (string_prefix_is(line, "name")) {
result = process_name(q, w, line);
} else if (string_prefix_is(line, "info")) {
result = process_info(q, w, line);
} else if (string_prefix_is(line, "tlq")) {
result = advertise_tlq_url(q, w, line);
} else if( sscanf(line,"GET %s HTTP/%*d.%*d",path)==1) {
result = process_http_request(q,w,path,stoptime);
} else {
// Message is not a status update: return it to the user.
result = MSG_NOT_PROCESSED;
}
return result;
}
/*
Call recv_worker_msg and silently retry if the result indicates
an asynchronous update message like 'keepalive' or 'resource'.
*/
work_queue_msg_code_t recv_worker_msg_retry( struct work_queue *q, struct work_queue_worker *w, char *line, int length )
{
work_queue_msg_code_t result = MSG_PROCESSED;
do {
result = recv_worker_msg(q, w,line,length);
} while(result == MSG_PROCESSED);
return result;
}
static double get_queue_transfer_rate(struct work_queue *q, char **data_source)
{
double queue_transfer_rate; // bytes per second
int64_t q_total_bytes_transferred = q->stats->bytes_sent + q->stats->bytes_received;
timestamp_t q_total_transfer_time = q->stats->time_send + q->stats->time_receive;
// Note q_total_transfer_time is timestamp_t with units of microseconds.
if(q_total_transfer_time>1000000) {
queue_transfer_rate = 1000000.0 * q_total_bytes_transferred / q_total_transfer_time;
if (data_source) {
*data_source = xxstrdup("overall queue");
}
} else {
queue_transfer_rate = q->default_transfer_rate;
if (data_source) {
*data_source = xxstrdup("conservative default");
}
}
return queue_transfer_rate;
}
/*
Select an appropriate timeout value for the transfer of a certain number of bytes.
We do not know in advance how fast the system will perform.
So do this by starting with an assumption of bandwidth taken from the worker,
from the queue, or from a (slow) default number, depending on what information is available.
The timeout is chosen to be a multiple of the expected transfer time from the assumed bandwidth.
The overall effect is to reject transfers that are 10x slower than what has been seen before.
Two exceptions are made:
- The transfer time cannot be below a configurable minimum time.
- A foreman must have a high minimum, because its attention is divided
between the manager and the workers that it serves.
*/
static int get_transfer_wait_time(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, int64_t length)
{
double avg_transfer_rate; // bytes per second
char *data_source;
if(w->total_transfer_time>1000000) {
// Note w->total_transfer_time is timestamp_t with units of microseconds.
avg_transfer_rate = 1000000 * w->total_bytes_transferred / w->total_transfer_time;
data_source = xxstrdup("worker's observed");
} else {
avg_transfer_rate = get_queue_transfer_rate(q, &data_source);
}
double tolerable_transfer_rate = avg_transfer_rate / q->transfer_outlier_factor; // bytes per second
int timeout = length / tolerable_transfer_rate;
if(w->type == WORKER_TYPE_FOREMAN) {
// A foreman must have a much larger minimum timeout, b/c it does not respond immediately to the manager.
timeout = MAX(q->foreman_transfer_timeout,timeout);
} else {
// An ordinary manager has a lower minimum timeout b/c it responds immediately to the manager.
timeout = MAX(q->minimum_transfer_timeout,timeout);
}
/* Don't bother printing anything for transfers of less than 1MB, to avoid excessive output. */
if( length >= 1048576 ) {
debug(D_WQ,"%s (%s) using %s average transfer rate of %.2lf MB/s\n", w->hostname, w->addrport, data_source, avg_transfer_rate/MEGABYTE);
debug(D_WQ, "%s (%s) will try up to %d seconds to transfer this %.2lf MB file.", w->hostname, w->addrport, timeout, length/1000000.0);
}
free(data_source);
return timeout;
}
void update_catalog(struct work_queue *q, struct link *foreman_uplink, int force_update )
{
// Only advertise if we have a name.
if(!q->name) return;
// Only advertise every last_update_time seconds.
if(!force_update && (time(0) - q->catalog_last_update_time) < WORK_QUEUE_UPDATE_INTERVAL)
return;
// If host and port are not set, pick defaults.
if(!q->catalog_hosts) q->catalog_hosts = xxstrdup(CATALOG_HOST);
// Generate the manager status in an jx, and print it to a buffer.
struct jx *j = queue_to_jx(q,foreman_uplink);
char *str = jx_print_string(j);
// Send the buffer.
debug(D_WQ, "Advertising manager status to the catalog server(s) at %s ...", q->catalog_hosts);
if(!catalog_query_send_update_conditional(q->catalog_hosts, str)) {
// If the send failed b/c the buffer is too big, send the lean version instead.
struct jx *lj = queue_lean_to_jx(q,foreman_uplink);
char *lstr = jx_print_string(lj);
catalog_query_send_update(q->catalog_hosts,lstr);
free(lstr);
jx_delete(lj);
}
// Clean up.
free(str);
jx_delete(j);
q->catalog_last_update_time = time(0);
}
static void clean_task_state(struct work_queue_task *t) {
t->time_when_commit_start = 0;
t->time_when_commit_end = 0;
t->time_when_retrieval = 0;
t->time_workers_execute_last = 0;
t->bytes_sent = 0;
t->bytes_received = 0;
t->bytes_transferred = 0;
if(t->output) {
free(t->output);
t->output = NULL;
}
if(t->hostname) {
free(t->hostname);
t->hostname = NULL;
}
if(t->host) {
free(t->host);
t->host = NULL;
}
/* If result is never updated, then it is mark as a failure. */
t->result = WORK_QUEUE_RESULT_UNKNOWN;
}
static void cleanup_worker(struct work_queue *q, struct work_queue_worker *w)
{
char *key, *value;
struct work_queue_task *t;
struct rmsummary *r;
uint64_t taskid;
if(!q || !w) return;
hash_table_firstkey(w->current_files);
while(hash_table_nextkey(w->current_files, &key, (void **) &value)) {
hash_table_remove(w->current_files, key);
free(value);
hash_table_firstkey(w->current_files);
}
itable_firstkey(w->current_tasks);
while(itable_nextkey(w->current_tasks, &taskid, (void **)&t)) {
if (t->time_when_commit_end >= t->time_when_commit_start) {
timestamp_t delta_time = timestamp_get() - t->time_when_commit_end;
t->time_workers_execute_failure += delta_time;
t->time_workers_execute_all += delta_time;
}
clean_task_state(t);
if(t->max_retries > 0 && (t->try_count >= t->max_retries)) {
update_task_result(t, WORK_QUEUE_RESULT_MAX_RETRIES);
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_RETRIEVED);
} else {
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_READY);
}
itable_firstkey(w->current_tasks);
}
itable_firstkey(w->current_tasks_boxes);
while(itable_nextkey(w->current_tasks_boxes, &taskid, (void **) &r)) {
rmsummary_delete(r);
}
itable_clear(w->current_tasks);
itable_clear(w->current_tasks_boxes);
w->finished_tasks = 0;
}
#define accumulate_stat(qs, ws, field) (qs)->field += (ws)->field
static void record_removed_worker_stats(struct work_queue *q, struct work_queue_worker *w)
{
struct work_queue_stats *qs = q->stats_disconnected_workers;
struct work_queue_stats *ws = w->stats;
accumulate_stat(qs, ws, workers_joined);
accumulate_stat(qs, ws, workers_removed);
accumulate_stat(qs, ws, workers_released);
accumulate_stat(qs, ws, workers_idled_out);
accumulate_stat(qs, ws, workers_fast_aborted);
accumulate_stat(qs, ws, workers_blocked);
accumulate_stat(qs, ws, workers_lost);
accumulate_stat(qs, ws, time_send);
accumulate_stat(qs, ws, time_receive);
accumulate_stat(qs, ws, time_workers_execute);
accumulate_stat(qs, ws, bytes_sent);
accumulate_stat(qs, ws, bytes_received);
//Count all the workers joined as removed.
qs->workers_removed = ws->workers_joined;
}
static void remove_worker(struct work_queue *q, struct work_queue_worker *w, worker_disconnect_reason reason)
{
if(!q || !w) return;
debug(D_WQ, "worker %s (%s) removed", w->hostname, w->addrport);
if(w->type == WORKER_TYPE_WORKER || w->type == WORKER_TYPE_FOREMAN) {
q->stats->workers_removed++;
}
write_transaction_worker(q, w, 1, reason);
cleanup_worker(q, w);
hash_table_remove(q->worker_table, w->hashkey);
hash_table_remove(q->workers_with_available_results, w->hashkey);
record_removed_worker_stats(q, w);
if(w->link)
link_close(w->link);
itable_delete(w->current_tasks);
itable_delete(w->current_tasks_boxes);
hash_table_delete(w->current_files);
work_queue_resources_delete(w->resources);
free(w->workerid);
if(w->features)
hash_table_delete(w->features);
free(w->stats);
free(w->hostname);
free(w->os);
free(w->arch);
free(w->version);
free(w);
/* update the largest worker seen */
find_max_worker(q);
debug(D_WQ, "%d workers connected in total now", count_workers(q, WORKER_TYPE_WORKER | WORKER_TYPE_FOREMAN));
}
static int release_worker(struct work_queue *q, struct work_queue_worker *w)
{
if(!w) return 0;
send_worker_msg(q,w,"release\n");
remove_worker(q, w, WORKER_DISCONNECT_EXPLICIT);
q->stats->workers_released++;
return 1;
}
static void add_worker(struct work_queue *q)
{
struct link *link;
struct work_queue_worker *w;
char addr[LINK_ADDRESS_MAX];
int port;
link = link_accept(q->manager_link, time(0) + q->short_timeout);
if(!link) return;
link_keepalive(link, 1);
link_tune(link, LINK_TUNE_INTERACTIVE);
if(!link_address_remote(link, addr, &port)) {
link_close(link);
return;
}
debug(D_WQ,"worker %s:%d connected",addr,port);
if(q->password) {
debug(D_WQ,"worker %s:%d authenticating",addr,port);
if(!link_auth_password(link,q->password,time(0)+q->short_timeout)) {
debug(D_WQ|D_NOTICE,"worker %s:%d presented the wrong password",addr,port);
link_close(link);
return;
}
}
w = malloc(sizeof(*w));
if(!w) {
debug(D_NOTICE, "Cannot allocate memory for worker %s:%d.", addr, port);
link_close(link);
return;
}
memset(w, 0, sizeof(*w));
w->hostname = strdup("unknown");
w->os = strdup("unknown");
w->arch = strdup("unknown");
w->version = strdup("unknown");
w->type = WORKER_TYPE_UNKNOWN;
w->draining = 0;
w->link = link;
w->current_files = hash_table_create(0, 0);
w->current_tasks = itable_create(0);
w->current_tasks_boxes = itable_create(0);
w->finished_tasks = 0;
w->start_time = timestamp_get();
w->last_update_msg_time = w->start_time;
w->resources = work_queue_resources_create();
w->workerid = NULL;
w->stats = calloc(1, sizeof(struct work_queue_stats));
link_to_hash_key(link, w->hashkey);
sprintf(w->addrport, "%s:%d", addr, port);
hash_table_insert(q->worker_table, w->hashkey, w);
return;
}
/*
Get a single file from a remote worker.
*/
static work_queue_result_code_t get_file( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *local_name, int64_t length, int64_t * total_bytes)
{
// If a bandwidth limit is in effect, choose the effective stoptime.
timestamp_t effective_stoptime = 0;
if(q->bandwidth) {
effective_stoptime = (length/q->bandwidth)*1000000 + timestamp_get();
}
// Choose the actual stoptime.
time_t stoptime = time(0) + get_transfer_wait_time(q, w, t, length);
// If necessary, create parent directories of the file.
char dirname[WORK_QUEUE_LINE_MAX];
path_dirname(local_name,dirname);
if(strchr(local_name,'/')) {
if(!create_dir(dirname, 0777)) {
debug(D_WQ, "Could not create directory - %s (%s)", dirname, strerror(errno));
link_soak(w->link, length, stoptime);
return WQ_APP_FAILURE;
}
}
// Create the local file.
debug(D_WQ, "Receiving file %s (size: %"PRId64" bytes) from %s (%s) ...", local_name, length, w->addrport, w->hostname);
// Check if there is space for incoming file at manager
if(!check_disk_space_for_filesize(dirname, length, disk_avail_threshold)) {
debug(D_WQ, "Could not recieve file %s, not enough disk space (%"PRId64" bytes needed)\n", local_name, length);
return WQ_APP_FAILURE;
}
int fd = open(local_name, O_WRONLY | O_TRUNC | O_CREAT, 0777);
if(fd < 0) {
debug(D_NOTICE, "Cannot open file %s for writing: %s", local_name, strerror(errno));
link_soak(w->link, length, stoptime);
return WQ_APP_FAILURE;
}
// Write the data on the link to file.
int64_t actual = link_stream_to_fd(w->link, fd, length, stoptime);
close(fd);
if(actual != length) {
debug(D_WQ, "Received item size (%"PRId64") does not match the expected size - %"PRId64" bytes.", actual, length);
unlink(local_name);
return WQ_WORKER_FAILURE;
}
*total_bytes += length;
// If the transfer was too fast, slow things down.
timestamp_t current_time = timestamp_get();
if(effective_stoptime && effective_stoptime > current_time) {
usleep(effective_stoptime - current_time);
}
return WQ_SUCCESS;
}
/*
This function implements the recursive get protocol.
The manager sents a single get message, then the worker
responds with a continuous stream of dir and file message
that indicate the entire contents of the directory.
This makes it efficient to move deep directory hierarchies with
high throughput and low latency.
*/
static work_queue_result_code_t get_file_or_directory( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *remote_name, const char *local_name, int64_t * total_bytes)
{
// Remember the length of the specified remote path so it can be chopped from the result.
int remote_name_len = strlen(remote_name);
// Send the name of the file/dir name to fetch
debug(D_WQ, "%s (%s) sending back %s to %s", w->hostname, w->addrport, remote_name, local_name);
send_worker_msg(q,w, "get %s 1\n",remote_name);
work_queue_result_code_t result = WQ_SUCCESS; //return success unless something fails below
char *tmp_remote_path = NULL;
char *length_str = NULL;
char *errnum_str = NULL;
// Process the recursive file/dir responses as they are sent.
while(1) {
char line[WORK_QUEUE_LINE_MAX];
free(tmp_remote_path);
free(length_str);
tmp_remote_path = NULL;
length_str = NULL;
work_queue_msg_code_t mcode;
mcode = recv_worker_msg_retry(q, w, line, sizeof(line));
if(mcode!=MSG_NOT_PROCESSED) {
result = WQ_WORKER_FAILURE;
break;
}
if(pattern_match(line, "^dir (%S+) (%d+)$", &tmp_remote_path, &length_str) >= 0) {
char *tmp_local_name = string_format("%s%s",local_name, (tmp_remote_path + remote_name_len));
int result_dir = create_dir(tmp_local_name,0777);
if(!result_dir) {
debug(D_WQ, "Could not create directory - %s (%s)", tmp_local_name, strerror(errno));
result = WQ_APP_FAILURE;
free(tmp_local_name);
break;
}
free(tmp_local_name);
} else if(pattern_match(line, "^file (.+) (%d+)$", &tmp_remote_path, &length_str) >= 0) {
int64_t length = strtoll(length_str, NULL, 10);
char *tmp_local_name = string_format("%s%s",local_name, (tmp_remote_path + remote_name_len));
result = get_file(q,w,t,tmp_local_name,length,total_bytes);
free(tmp_local_name);
//Return if worker failure. Else wait for end message from worker.
if(result == WQ_WORKER_FAILURE) break;
} else if(pattern_match(line, "^missing (.+) (%d+)$", &tmp_remote_path, &errnum_str) >= 0) {
// If the output file is missing, we make a note of that in the task result,
// but we continue and consider the transfer a 'success' so that other
// outputs are transferred and the task is given back to the caller.
int errnum = atoi(errnum_str);
debug(D_WQ, "%s (%s): could not access requested file %s (%s)",w->hostname,w->addrport,remote_name,strerror(errnum));
update_task_result(t, WORK_QUEUE_RESULT_OUTPUT_MISSING);
} else if(!strcmp(line,"end")) {
// We have to return on receiving an end message.
if (result == WQ_SUCCESS) {
return result;
} else {
break;
}
} else {
debug(D_WQ, "%s (%s): sent invalid response to get: %s",w->hostname,w->addrport,line);
result = WQ_WORKER_FAILURE; //signal sys-level failure
break;
}
}
free(tmp_remote_path);
free(length_str);
// If we failed to *transfer* the output file, then that is a hard
// failure which causes this function to return failure and the task
// to be returned to the queue to be attempted elsewhere.
debug(D_WQ, "%s (%s) failed to return output %s to %s", w->addrport, w->hostname, remote_name, local_name);
if(result == WQ_APP_FAILURE) {
update_task_result(t, WORK_QUEUE_RESULT_OUTPUT_MISSING);;
}
return result;
}
/*
For a given task and file, generate the name under which the file
should be stored in the remote cache directory.
The basic strategy is to construct a name that is unique to the
namespace from where the file is drawn, so that tasks sharing
the same input file can share the same copy.
In the common case of files, the cached name is based on the
hash of the local path, with the basename of the local path
included simply to assist with debugging.
In each of the other file types, a similar approach is taken,
including a hash and a name where one is known, or another
unique identifier where no name is available.
*/
char *make_cached_name( const struct work_queue_file *f )
{
static unsigned int file_count = 0;
file_count++;
/* Default of payload is remote name (needed only for directories) */
char *payload = f->payload ? f->payload : f->remote_name;
unsigned char digest[MD5_DIGEST_LENGTH];
char payload_enc[PATH_MAX];
if(f->type == WORK_QUEUE_BUFFER) {
//dummy digest for buffers
md5_buffer("buffer", 6, digest);
} else {
md5_buffer(payload,strlen(payload),digest);
url_encode(path_basename(payload), payload_enc, PATH_MAX);
}
/* 0 for cache files, file_count for non-cache files. With this, non-cache
* files cannot be shared among tasks, and can be safely deleted once a
* task finishes. */
unsigned int cache_file_id = 0;
if(!(f->flags & WORK_QUEUE_CACHE)) {
cache_file_id = file_count;
}
switch(f->type) {
case WORK_QUEUE_FILE:
case WORK_QUEUE_DIRECTORY:
return string_format("file-%d-%s-%s", cache_file_id, md5_string(digest), payload_enc);
break;
case WORK_QUEUE_FILE_PIECE:
return string_format("piece-%d-%s-%s-%lld-%lld",cache_file_id, md5_string(digest),payload_enc,(long long)f->offset,(long long)f->piece_length);
break;
case WORK_QUEUE_REMOTECMD:
return string_format("cmd-%d-%s", cache_file_id, md5_string(digest));
break;
case WORK_QUEUE_URL:
return string_format("url-%d-%s", cache_file_id, md5_string(digest));
break;
case WORK_QUEUE_BUFFER:
default:
return string_format("buffer-%d-%s", cache_file_id, md5_string(digest));
break;
}
}
/*
This function stores an output file from the remote cache directory
to a third-party location, which can be either a remote filesystem
(WORK_QUEUE_FS_PATH) or a command to run (WORK_QUEUE_FS_CMD).
Returns 1 on success at worker and 0 on invalid message from worker.
*/
static int do_thirdput( struct work_queue *q, struct work_queue_worker *w, const char *cached_name, const char *payload, int command )
{
char line[WORK_QUEUE_LINE_MAX];
int result;
send_worker_msg(q,w,"thirdput %d %s %s\n",command,cached_name,payload);
work_queue_msg_code_t mcode;
mcode = recv_worker_msg_retry(q, w, line, WORK_QUEUE_LINE_MAX);
if(mcode!=MSG_NOT_PROCESSED) {
return WQ_WORKER_FAILURE;
}
if(sscanf(line, "thirdput-complete %d", &result)) {
return result;
} else {
debug(D_WQ, "Error: invalid message received (%s)\n", line);
return WQ_WORKER_FAILURE;
}
}
/*
Get a single output file, located at the worker under 'cached_name'.
*/
static work_queue_result_code_t get_output_file( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, struct work_queue_file *f )
{
int64_t total_bytes = 0;
work_queue_result_code_t result = WQ_SUCCESS; //return success unless something fails below.
timestamp_t open_time = timestamp_get();
if(f->flags & WORK_QUEUE_THIRDPUT) {
if(!strcmp(f->cached_name, f->payload)) {
debug(D_WQ, "output file %s already on shared filesystem", f->cached_name);
f->flags |= WORK_QUEUE_PREEXIST;
} else {
result = do_thirdput(q,w,f->cached_name,f->payload,WORK_QUEUE_FS_PATH);
}
} else if(f->type == WORK_QUEUE_REMOTECMD) {
result = do_thirdput(q,w,f->cached_name,f->payload,WORK_QUEUE_FS_CMD);
} else {
result = get_file_or_directory(q, w, t, f->cached_name, f->payload, &total_bytes);
}
timestamp_t close_time = timestamp_get();
timestamp_t sum_time = close_time - open_time;
if(total_bytes>0) {
q->stats->bytes_received += total_bytes;
t->bytes_received += total_bytes;
t->bytes_transferred += total_bytes;
w->total_bytes_transferred += total_bytes;
w->total_transfer_time += sum_time;
debug(D_WQ, "%s (%s) sent %.2lf MB in %.02lfs (%.02lfs MB/s) average %.02lfs MB/s", w->hostname, w->addrport, total_bytes / 1000000.0, sum_time / 1000000.0, (double) total_bytes / sum_time, (double) w->total_bytes_transferred / w->total_transfer_time);
}
// If the transfer was successful, make a record of it in the cache.
if(result == WQ_SUCCESS && f->flags & WORK_QUEUE_CACHE) {
struct stat local_info;
if (stat(f->payload,&local_info) == 0) {
struct stat *remote_info = malloc(sizeof(*remote_info));
if(!remote_info) {
debug(D_NOTICE, "Cannot allocate memory for cache entry for output file %s at %s (%s)", f->payload, w->hostname, w->addrport);
return WQ_APP_FAILURE;
}
memcpy(remote_info, &local_info, sizeof(local_info));
hash_table_insert(w->current_files, f->cached_name, remote_info);
} else {
debug(D_NOTICE, "Cannot stat file %s: %s", f->payload, strerror(errno));
}
}
return result;
}
static work_queue_result_code_t get_output_files( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t )
{
struct work_queue_file *f;
work_queue_result_code_t result = WQ_SUCCESS;
if(t->output_files) {
list_first_item(t->output_files);
while((f = list_next_item(t->output_files))) {
int task_succeeded = (t->result==WORK_QUEUE_RESULT_SUCCESS && t->return_status==0);
// skip failure-only files on success
if(f->flags&WORK_QUEUE_FAILURE_ONLY && task_succeeded) continue;
// skip success-only files on failure
if(f->flags&WORK_QUEUE_SUCCESS_ONLY && !task_succeeded) continue;
// otherwise, get the file.
result = get_output_file(q,w,t,f);
//if success or app-level failure, continue to get other files.
//if worker failure, return.
if(result == WQ_WORKER_FAILURE) {
break;
}
}
}
// tell the worker you no longer need that task's output directory.
send_worker_msg(q,w, "kill %d\n",t->taskid);
return result;
}
static work_queue_result_code_t get_monitor_output_file( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t )
{
struct work_queue_file *f;
work_queue_result_code_t result = WQ_SUCCESS;
const char *summary_name = RESOURCE_MONITOR_REMOTE_NAME ".summary";
if(t->output_files) {
list_first_item(t->output_files);
while((f = list_next_item(t->output_files))) {
if(!strcmp(summary_name, f->remote_name)) {
result = get_output_file(q,w,t,f);
break;
}
}
}
// tell the worker you no longer need that task's output directory.
send_worker_msg(q,w, "kill %d\n",t->taskid);
return result;
}
static void delete_worker_file( struct work_queue *q, struct work_queue_worker *w, const char *filename, int flags, int except_flags ) {
if(!(flags & except_flags)) {
send_worker_msg(q,w, "unlink %s\n", filename);
hash_table_remove(w->current_files, filename);
}
}
// Sends "unlink file" for every file in the list except those that match one or more of the "except_flags"
static void delete_worker_files( struct work_queue *q, struct work_queue_worker *w, struct list *files, int except_flags ) {
struct work_queue_file *tf;
if(!files) return;
list_first_item(files);
while((tf = list_next_item(files))) {
delete_worker_file(q, w, tf->cached_name, tf->flags, except_flags);
}
}
static void delete_task_output_files(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t)
{
delete_worker_files(q, w, t->output_files, 0);
}
static void delete_uncacheable_files( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t )
{
delete_worker_files(q, w, t->input_files, WORK_QUEUE_CACHE | WORK_QUEUE_PREEXIST);
delete_worker_files(q, w, t->output_files, WORK_QUEUE_CACHE | WORK_QUEUE_PREEXIST);
}
char *monitor_file_name(struct work_queue *q, struct work_queue_task *t, const char *ext) {
char *dir;
if(t->monitor_output_directory) {
dir = t->monitor_output_directory;
} else if(q->monitor_output_directory) {
dir = q->monitor_output_directory;
} else {
dir = "./";
}
return string_format("%s/" RESOURCE_MONITOR_TASK_LOCAL_NAME "%s",
dir, getpid(), t->taskid, ext ? ext : "");
}
void read_measured_resources(struct work_queue *q, struct work_queue_task *t) {
char *summary = monitor_file_name(q, t, ".summary");
if(t->resources_measured)
rmsummary_delete(t->resources_measured);
t->resources_measured = rmsummary_parse_file_single(summary);
if(t->resources_measured) {
t->resources_measured->category = xxstrdup(t->category);
t->return_status = t->resources_measured->exit_status;
} else {
/* if no resources were measured, then we don't overwrite the return
* status, and mark the task as with error from monitoring. */
update_task_result(t, WORK_QUEUE_RESULT_RMONITOR_ERROR);
}
free(summary);
}
void resource_monitor_append_report(struct work_queue *q, struct work_queue_task *t)
{
if(q->monitor_mode == MON_DISABLED)
return;
char *summary = monitor_file_name(q, t, ".summary");
if(q->monitor_output_directory) {
int monitor_fd = fileno(q->monitor_file);
struct flock lock;
lock.l_type = F_WRLCK;
lock.l_start = 0;
lock.l_whence = SEEK_SET;
lock.l_len = 0;
fcntl(monitor_fd, F_SETLKW, &lock);
if(!t->resources_measured)
{
fprintf(q->monitor_file, "# Summary for task %d was not available.\n", t->taskid);
}
FILE *fs = fopen(summary, "r");
if(fs) {
copy_stream_to_stream(fs, q->monitor_file);
fclose(fs);
}
fprintf(q->monitor_file, "\n");
lock.l_type = F_ULOCK;
fcntl(monitor_fd, F_SETLK, &lock);
}
/* Remove individual summary file unless it is named specifically. */
int keep = 0;
if(t->monitor_output_directory)
keep = 1;
if(q->monitor_mode & MON_FULL && q->monitor_output_directory)
keep = 1;
if(!keep)
unlink(summary);
free(summary);
}
void resource_monitor_compress_logs(struct work_queue *q, struct work_queue_task *t) {
char *series = monitor_file_name(q, t, ".series");
char *debug_log = monitor_file_name(q, t, ".debug");
char *command = string_format("gzip -9 -q %s %s", series, debug_log);
int status;
int rc = shellcode(command, NULL, NULL, 0, NULL, NULL, &status);
if(rc) {
debug(D_NOTICE, "Could no succesfully compress '%s', and '%s'\n", series, debug_log);
}
free(series);
free(debug_log);
free(command);
}
static void fetch_output_from_worker(struct work_queue *q, struct work_queue_worker *w, int taskid)
{
struct work_queue_task *t;
work_queue_result_code_t result = WQ_SUCCESS;
t = itable_lookup(w->current_tasks, taskid);
if(!t) {
debug(D_WQ, "Failed to find task %d at worker %s (%s).", taskid, w->hostname, w->addrport);
handle_failure(q, w, t, WQ_WORKER_FAILURE);
return;
}
// Start receiving output...
t->time_when_retrieval = timestamp_get();
if(t->result == WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION) {
result = get_monitor_output_file(q,w,t);
} else {
result = get_output_files(q,w,t);
}
if(result != WQ_SUCCESS) {
debug(D_WQ, "Failed to receive output from worker %s (%s).", w->hostname, w->addrport);
handle_failure(q, w, t, result);
}
if(result == WQ_WORKER_FAILURE) {
// Finish receiving output:
t->time_when_done = timestamp_get();
return;
}
delete_uncacheable_files(q,w,t);
/* if q is monitoring, append the task summary to the single
* queue summary, update t->resources_used, and delete the task summary. */
if(q->monitor_mode) {
read_measured_resources(q, t);
/* Further, if we got debug and series files, gzip them. */
if(q->monitor_mode & MON_FULL)
resource_monitor_compress_logs(q, t);
}
// Finish receiving output.
t->time_when_done = timestamp_get();
work_queue_accumulate_task(q, t);
// At this point, a task is completed.
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_RETRIEVED);
w->finished_tasks--;
w->total_tasks_complete++;
if(t->result == WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION) {
if(t->resources_measured && t->resources_measured->limits_exceeded) {
struct jx *j = rmsummary_to_json(t->resources_measured->limits_exceeded, 1);
if(j) {
char *str = jx_print_string(j);
debug(D_WQ, "Task %d exhausted resources on %s (%s): %s\n",
t->taskid,
w->hostname,
w->addrport,
str);
free(str);
jx_delete(j);
}
} else {
debug(D_WQ, "Task %d exhausted resources on %s (%s), but not resource usage was available.\n",
t->taskid,
w->hostname,
w->addrport);
}
struct category *c = work_queue_category_lookup_or_create(q, t->category);
category_allocation_t next = category_next_label(c, t->resource_request, /* resource overflow */ 1, t->resources_requested, t->resources_measured);
if(next == CATEGORY_ALLOCATION_ERROR) {
debug(D_WQ, "Task %d failed given max resource exhaustion.\n", t->taskid);
}
else {
debug(D_WQ, "Task %d resubmitted using new resource allocation.\n", t->taskid);
t->resource_request = next;
change_task_state(q, t, WORK_QUEUE_TASK_READY);
return;
}
}
/* print warnings if the task ran for a very short time (1s) and exited with common non-zero status */
if(t->result == WORK_QUEUE_RESULT_SUCCESS && t->time_workers_execute_last < 1000000) {
switch(t->return_status) {
case(126):
warn(D_WQ, "Task %d ran for a very short time and exited with code %d.\n", t->taskid, t->return_status);
warn(D_WQ, "This usually means that the task's command is not an executable,\n");
warn(D_WQ, "or that the worker's scratch directory is on a no-exec partition.\n");
break;
case(127):
warn(D_WQ, "Task %d ran for a very short time and exited with code %d.\n", t->taskid, t->return_status);
warn(D_WQ, "This usually means that the task's command could not be found, or that\n");
warn(D_WQ, "it uses a shared library not available at the worker, or that\n");
warn(D_WQ, "it uses a version of the glibc different than the one at the worker.\n");
break;
case(139):
warn(D_WQ, "Task %d ran for a very short time and exited with code %d.\n", t->taskid, t->return_status);
warn(D_WQ, "This usually means that the task's command had a segmentation fault,\n");
warn(D_WQ, "either because it has a memory access error (segfault), or because\n");
warn(D_WQ, "it uses a version of a shared library different from the one at the worker.\n");
break;
default:
break;
}
}
add_task_report(q, t);
debug(D_WQ, "%s (%s) done in %.02lfs total tasks %lld average %.02lfs",
w->hostname,
w->addrport,
(t->time_when_done - t->time_when_commit_start) / 1000000.0,
(long long) w->total_tasks_complete,
w->total_task_time / w->total_tasks_complete / 1000000.0);
return;
}
/*
Expire tasks in the ready list.
*/
static void expire_waiting_task(struct work_queue *q, struct work_queue_task *t)
{
update_task_result(t, WORK_QUEUE_RESULT_TASK_TIMEOUT);
//add the task to complete list so it is given back to the application.
change_task_state(q, t, WORK_QUEUE_TASK_RETRIEVED);
return;
}
static int expire_waiting_tasks(struct work_queue *q)
{
struct work_queue_task *t;
int expired = 0;
int count;
timestamp_t current_time = timestamp_get();
count = task_state_count(q, NULL, WORK_QUEUE_TASK_READY);
while(count > 0)
{
count--;
t = list_pop_head(q->ready_list);
if(t->resources_requested->end > 0 && (uint64_t) t->resources_requested->end <= current_time)
{
expire_waiting_task(q, t);
expired++;
}
else
{
list_push_tail(q->ready_list, t);
}
}
return expired;
}
/*
This function handles app-level failures. It remove the task from WQ and marks
the task as complete so it is returned to the application.
*/
static void handle_app_failure(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t)
{
//remove the task from tables that track dispatched tasks.
//and add the task to complete list so it is given back to the application.
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_RETRIEVED);
/*If the failure happened after a task execution, we remove all the output
files specified for that task from the worker's cache. This is because the
application may resubmit the task and the resubmitted task may produce
different outputs. */
if(t) {
if(t->time_when_commit_end > 0) {
delete_task_output_files(q,w,t);
}
}
return;
}
static void handle_worker_failure(struct work_queue *q, struct work_queue_worker *w)
{
//WQ failures happen in the manager-worker interactions. In this case, we
//remove the worker and retry the tasks dispatched to it elsewhere.
remove_worker(q, w, WORKER_DISCONNECT_FAILURE);
return;
}
static void handle_failure(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, work_queue_result_code_t fail_type)
{
if(fail_type == WQ_APP_FAILURE) {
handle_app_failure(q, w, t);
} else {
handle_worker_failure(q, w);
}
return;
}
static work_queue_msg_code_t process_workqueue(struct work_queue *q, struct work_queue_worker *w, const char *line)
{
char items[4][WORK_QUEUE_LINE_MAX];
int worker_protocol;
int n = sscanf(line,"workqueue %d %s %s %s %s",&worker_protocol,items[0],items[1],items[2],items[3]);
if(n != 5)
return MSG_FAILURE;
if(worker_protocol!=WORK_QUEUE_PROTOCOL_VERSION) {
debug(D_WQ|D_NOTICE,"worker (%s) is using work queue protocol %d, but I am using protocol %d",w->addrport,worker_protocol,WORK_QUEUE_PROTOCOL_VERSION);
return MSG_FAILURE;
}
if(w->hostname) free(w->hostname);
if(w->os) free(w->os);
if(w->arch) free(w->arch);
if(w->version) free(w->version);
w->hostname = strdup(items[0]);
w->os = strdup(items[1]);
w->arch = strdup(items[2]);
w->version = strdup(items[3]);
if(!strcmp(w->os, "foreman"))
{
w->type = WORKER_TYPE_FOREMAN;
} else {
w->type = WORKER_TYPE_WORKER;
}
q->stats->workers_joined++;
debug(D_WQ, "%d workers are connected in total now", count_workers(q, WORKER_TYPE_WORKER | WORKER_TYPE_FOREMAN));
debug(D_WQ, "%s (%s) running CCTools version %s on %s (operating system) with architecture %s is ready", w->hostname, w->addrport, w->version, w->os, w->arch);
if(cctools_version_cmp(CCTOOLS_VERSION, w->version) != 0) {
debug(D_DEBUG, "Warning: potential worker version mismatch: worker %s (%s) is version %s, and manager is version %s", w->hostname, w->addrport, w->version, CCTOOLS_VERSION);
}
return MSG_PROCESSED;
}
/*
If the manager has requested that a file be watched with WORK_QUEUE_WATCH,
the worker will periodically send back update messages indicating that
the file has been written to. There are a variety of ways in which the
message could be stale (e.g. task was cancelled) so if the message does
not line up with an expected task and file, then we discard it and keep
going.
*/
static work_queue_result_code_t get_update( struct work_queue *q, struct work_queue_worker *w, const char *line )
{
int64_t taskid;
char path[WORK_QUEUE_LINE_MAX];
int64_t offset;
int64_t length;
int n = sscanf(line,"update %"PRId64" %s %"PRId64" %"PRId64,&taskid,path,&offset,&length);
if(n!=4) {
debug(D_WQ,"Invalid message from worker %s (%s): %s", w->hostname, w->addrport, line );
return WQ_WORKER_FAILURE;
}
struct work_queue_task *t = itable_lookup(w->current_tasks,taskid);
if(!t) {
debug(D_WQ,"worker %s (%s) sent output for unassigned task %"PRId64, w->hostname, w->addrport, taskid);
link_soak(w->link,length,time(0)+get_transfer_wait_time(q,w,0,length));
return WQ_SUCCESS;
}
time_t stoptime = time(0) + get_transfer_wait_time(q,w,t,length);
struct work_queue_file *f;
const char *local_name = 0;
list_first_item(t->output_files);
while((f=list_next_item(t->output_files))) {
if(!strcmp(path,f->remote_name)) {
local_name = f->payload;
break;
}
}
if(!local_name) {
debug(D_WQ,"worker %s (%s) sent output for unwatched file %s",w->hostname,w->addrport,path);
link_soak(w->link,length,stoptime);
return WQ_SUCCESS;
}
int fd = open(local_name,O_WRONLY|O_CREAT,0777);
if(fd<0) {
debug(D_WQ,"unable to update watched file %s: %s",local_name,strerror(errno));
link_soak(w->link,length,stoptime);
return WQ_SUCCESS;
}
lseek(fd,offset,SEEK_SET);
link_stream_to_fd(w->link,fd,length,stoptime);
ftruncate(fd,offset+length);
close(fd);
return WQ_SUCCESS;
}
/*
Failure to store result is treated as success so we continue to retrieve the
output files of the task.
*/
static work_queue_result_code_t get_result(struct work_queue *q, struct work_queue_worker *w, const char *line) {
if(!q || !w || !line)
return WQ_WORKER_FAILURE;
struct work_queue_task *t;
int task_status, exit_status;
uint64_t taskid;
int64_t output_length, retrieved_output_length;
timestamp_t execution_time;
int64_t actual;
timestamp_t observed_execution_time;
timestamp_t effective_stoptime = 0;
time_t stoptime;
//Format: task completion status, exit status (exit code or signal), output length, execution time, taskid
char items[5][WORK_QUEUE_PROTOCOL_FIELD_MAX];
int n = sscanf(line, "result %s %s %s %s %" SCNd64"", items[0], items[1], items[2], items[3], &taskid);
if(n < 5) {
debug(D_WQ, "Invalid message from worker %s (%s): %s", w->hostname, w->addrport, line);
return WQ_WORKER_FAILURE;
}
task_status = atoi(items[0]);
exit_status = atoi(items[1]);
output_length = atoll(items[2]);
t = itable_lookup(w->current_tasks, taskid);
if(!t) {
debug(D_WQ, "Unknown task result from worker %s (%s): no task %" PRId64" assigned to worker. Ignoring result.", w->hostname, w->addrport, taskid);
stoptime = time(0) + get_transfer_wait_time(q, w, 0, output_length);
link_soak(w->link, output_length, stoptime);
return WQ_SUCCESS;
}
if(task_status == WORK_QUEUE_RESULT_FORSAKEN) {
// Delete any input files that are not to be cached.
delete_worker_files(q, w, t->input_files, WORK_QUEUE_CACHE | WORK_QUEUE_PREEXIST);
/* task will be resubmitted, so we do not update any of the execution stats */
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_READY);
return WQ_SUCCESS;
}
observed_execution_time = timestamp_get() - t->time_when_commit_end;
execution_time = atoll(items[3]);
t->time_workers_execute_last = observed_execution_time > execution_time ? execution_time : observed_execution_time;
t->time_workers_execute_all += t->time_workers_execute_last;
if(task_status == WORK_QUEUE_RESULT_DISK_ALLOC_FULL) {
t->disk_allocation_exhausted = 1;
}
else {
t->disk_allocation_exhausted = 0;
}
if(q->bandwidth) {
effective_stoptime = (output_length/q->bandwidth)*1000000 + timestamp_get();
}
if(output_length <= MAX_TASK_STDOUT_STORAGE) {
retrieved_output_length = output_length;
} else {
retrieved_output_length = MAX_TASK_STDOUT_STORAGE;
fprintf(stderr, "warning: stdout of task %"PRId64" requires %2.2lf GB of storage. This exceeds maximum supported size of %d GB. Only %d GB will be retreived.\n", taskid, ((double) output_length)/MAX_TASK_STDOUT_STORAGE, MAX_TASK_STDOUT_STORAGE/GIGABYTE, MAX_TASK_STDOUT_STORAGE/GIGABYTE);
update_task_result(t, WORK_QUEUE_RESULT_STDOUT_MISSING);
}
t->output = malloc(retrieved_output_length+1);
if(t->output == NULL) {
fprintf(stderr, "error: allocating memory of size %"PRId64" bytes failed for storing stdout of task %"PRId64".\n", retrieved_output_length, taskid);
//drop the entire length of stdout on the link
stoptime = time(0) + get_transfer_wait_time(q, w, t, output_length);
link_soak(w->link, output_length, stoptime);
retrieved_output_length = 0;
update_task_result(t, WORK_QUEUE_RESULT_STDOUT_MISSING);
}
if(retrieved_output_length > 0) {
debug(D_WQ, "Receiving stdout of task %"PRId64" (size: %"PRId64" bytes) from %s (%s) ...", taskid, retrieved_output_length, w->addrport, w->hostname);
//First read the bytes we keep.
stoptime = time(0) + get_transfer_wait_time(q, w, t, retrieved_output_length);
actual = link_read(w->link, t->output, retrieved_output_length, stoptime);
if(actual != retrieved_output_length) {
debug(D_WQ, "Failure: actual received stdout size (%"PRId64" bytes) is different from expected (%"PRId64" bytes).", actual, retrieved_output_length);
t->output[actual] = '\0';
return WQ_WORKER_FAILURE;
}
debug(D_WQ, "Retrieved %"PRId64" bytes from %s (%s)", actual, w->hostname, w->addrport);
//Then read the bytes we need to throw away.
if(output_length > retrieved_output_length) {
debug(D_WQ, "Dropping the remaining %"PRId64" bytes of the stdout of task %"PRId64" since stdout length is limited to %d bytes.\n", (output_length-MAX_TASK_STDOUT_STORAGE), taskid, MAX_TASK_STDOUT_STORAGE);
stoptime = time(0) + get_transfer_wait_time(q, w, t, (output_length-retrieved_output_length));
link_soak(w->link, (output_length-retrieved_output_length), stoptime);
//overwrite the last few bytes of buffer to signal truncated stdout.
char *truncate_msg = string_format("\n>>>>>> WORK QUEUE HAS TRUNCATED THE STDOUT AFTER THIS POINT.\n>>>>>> MAXIMUM OF %d BYTES REACHED, %" PRId64 " BYTES TRUNCATED.", MAX_TASK_STDOUT_STORAGE, output_length - retrieved_output_length);
memcpy(t->output + MAX_TASK_STDOUT_STORAGE - strlen(truncate_msg) - 1, truncate_msg, strlen(truncate_msg));
*(t->output + MAX_TASK_STDOUT_STORAGE - 1) = '\0';
free(truncate_msg);
}
timestamp_t current_time = timestamp_get();
if(effective_stoptime && effective_stoptime > current_time) {
usleep(effective_stoptime - current_time);
}
} else {
actual = 0;
}
if(t->output)
t->output[actual] = 0;
t->result = task_status;
t->return_status = exit_status;
q->stats->time_workers_execute += t->time_workers_execute_last;
w->finished_tasks++;
// Convert resource_monitor status into work queue status if needed.
if(q->monitor_mode) {
if(t->return_status == RM_OVERFLOW) {
update_task_result(t, WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION);
} else if(t->return_status == RM_TIME_EXPIRE) {
update_task_result(t, WORK_QUEUE_RESULT_TASK_TIMEOUT);
}
}
change_task_state(q, t, WORK_QUEUE_TASK_WAITING_RETRIEVAL);
return WQ_SUCCESS;
}
static work_queue_result_code_t get_available_results(struct work_queue *q, struct work_queue_worker *w)
{
//max_count == -1, tells the worker to send all available results.
send_worker_msg(q, w, "send_results %d\n", -1);
debug(D_WQ, "Reading result(s) from %s (%s)", w->hostname, w->addrport);
char line[WORK_QUEUE_LINE_MAX];
int i = 0;
work_queue_result_code_t result = WQ_SUCCESS; //return success unless something fails below.
while(1) {
work_queue_msg_code_t mcode;
mcode = recv_worker_msg_retry(q, w, line, sizeof(line));
if(mcode!=MSG_NOT_PROCESSED) {
result = WQ_WORKER_FAILURE;
break;
}
if(string_prefix_is(line,"result")) {
result = get_result(q, w, line);
if(result != WQ_SUCCESS) break;
i++;
} else if(string_prefix_is(line,"update")) {
result = get_update(q,w,line);
if(result != WQ_SUCCESS) break;
} else if(!strcmp(line,"end")) {
//Only return success if last message is end.
break;
} else {
debug(D_WQ, "%s (%s): sent invalid response to send_results: %s",w->hostname,w->addrport,line);
result = WQ_WORKER_FAILURE;
break;
}
}
if(result != WQ_SUCCESS) {
handle_worker_failure(q, w);
}
return result;
}
static int update_task_result(struct work_queue_task *t, work_queue_result_t new_result) {
if(new_result & ~(0x7)) {
/* Upper bits are set, so this is not related to old-style result for
* inputs, outputs, or stdout, so we simply make an update. */
t->result = new_result;
} else if(t->result != WORK_QUEUE_RESULT_UNKNOWN && t->result & ~(0x7)) {
/* Ignore new result, since we only update for input, output, or
* stdout missing when no other result exists. This is because
* missing inputs/outputs are anyway expected with other kind of
* errors. */
} else if(new_result == WORK_QUEUE_RESULT_INPUT_MISSING) {
/* input missing always appears by itself, so yet again we simply make an update. */
t->result = new_result;
} else if(new_result == WORK_QUEUE_RESULT_OUTPUT_MISSING) {
/* output missing clobbers stdout missing. */
t->result = new_result;
} else {
/* we only get here for stdout missing. */
t->result = new_result;
}
return t->result;
}
static struct jx *blocked_to_json( struct work_queue *q ) {
if(hash_table_size(q->worker_blocklist) < 1) {
return NULL;
}
struct jx *j = jx_array(0);
char *hostname;
struct blocklist_host_info *info;
hash_table_firstkey(q->worker_blocklist);
while(hash_table_nextkey(q->worker_blocklist, &hostname, (void *) &info)) {
if(info->blocked) {
jx_array_insert(j, jx_string(hostname));
}
}
return j;
}
static struct rmsummary *largest_waiting_declared_resources(struct work_queue *q, const char *category) {
struct rmsummary *max_resources_waiting = rmsummary_create(-1);
struct work_queue_task *t;
list_first_item(q->ready_list);
while((t = list_next_item(q->ready_list))) {
if(!category || (t->category && !strcmp(t->category, category))) {
rmsummary_merge_max(max_resources_waiting, t->resources_requested);
}
}
if(category) {
struct category *c = work_queue_category_lookup_or_create(q, category);
rmsummary_merge_max(max_resources_waiting, c->max_allocation);
}
return max_resources_waiting;
}
static struct rmsummary *total_resources_needed(struct work_queue *q) {
struct work_queue_task *t;
struct rmsummary *total = rmsummary_create(0);
/* for waiting tasks, we use what they would request if dispatched right now. */
list_first_item(q->ready_list);
while((t = list_next_item(q->ready_list))) {
const struct rmsummary *s = task_min_resources(q, t);
rmsummary_add(total, s);
}
/* for running tasks, we use what they have been allocated already. */
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(w->resources->tag < 0) {
continue;
}
total->cores += w->resources->cores.inuse;
total->memory += w->resources->memory.inuse;
total->disk += w->resources->disk.inuse;
total->gpus += w->resources->gpus.inuse;
}
return total;
}
static struct rmsummary *largest_waiting_measured_resources(struct work_queue *q, const char *category) {
struct rmsummary *max_resources_waiting = rmsummary_create(-1);
struct work_queue_task *t;
list_first_item(q->ready_list);
while((t = list_next_item(q->ready_list))) {
if(!category || (t->category && !strcmp(t->category, category))) {
const struct rmsummary *r = task_min_resources(q, t);
rmsummary_merge_max(max_resources_waiting, r);
}
}
if(category) {
struct category *c = work_queue_category_lookup_or_create(q, category);
rmsummary_merge_max(max_resources_waiting, c->max_allocation);
}
return max_resources_waiting;
}
static int check_worker_fit(struct work_queue_worker *w, struct rmsummary *s) {
if(w->resources->workers.total < 1)
return 0;
if(!s)
return w->resources->workers.total;
if(s->cores > w->resources->cores.largest)
return 0;
if(s->memory > w->resources->memory.largest)
return 0;
if(s->disk > w->resources->disk.largest)
return 0;
if(s->gpus > w->resources->gpus.largest)
return 0;
return w->resources->workers.total;
}
static int count_workers_for_waiting_tasks(struct work_queue *q, struct rmsummary *s) {
int count = 0;
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void**)&w)) {
count += check_worker_fit(w, s);
}
return count;
}
/* category_to_jx creates a jx expression with category statistics that can be
sent to the catalog.
*/
void category_jx_insert_max(struct jx *j, struct category *c, const char *field, struct rmsummary *largest) {
double l = rmsummary_get(largest, field);
double m = rmsummary_get(c->max_resources_seen, field);
double e = -1;
if(c->max_resources_seen->limits_exceeded) {
e = rmsummary_get(c->max_resources_seen->limits_exceeded, field);
}
char *field_str = string_format("max_%s", field);
if(l > -1){
char *max_str = string_format("%s", rmsummary_resource_to_str(field, l, 0));
jx_insert_string(j, field_str, max_str);
free(max_str);
} else if(!category_in_steady_state(c) && e > -1) {
char *max_str = string_format(">%s", rmsummary_resource_to_str(field, m - 1, 0));
jx_insert_string(j, field_str, max_str);
free(max_str);
} else if(m > -1) {
char *max_str = string_format("~%s", rmsummary_resource_to_str(field, m, 0));
jx_insert_string(j, field_str, max_str);
free(max_str);
}
free(field_str);
}
static struct jx * category_to_jx(struct work_queue *q, const char *category) {
struct category *c = work_queue_category_lookup_or_create(q, category);
struct work_queue_stats s;
work_queue_get_stats_category(q, category, &s);
if(s.tasks_waiting + s.tasks_running + s.tasks_done < 1)
return 0;
struct jx *j = jx_object(0);
if(!j) return 0;
jx_insert_string(j, "category", category);
jx_insert_integer(j, "tasks_waiting", s.tasks_waiting);
jx_insert_integer(j, "tasks_running", s.tasks_running);
jx_insert_integer(j, "tasks_dispatched", s.tasks_dispatched);
jx_insert_integer(j, "tasks_done", s.tasks_done);
jx_insert_integer(j, "tasks_failed", s.tasks_failed);
jx_insert_integer(j, "tasks_cancelled", s.tasks_cancelled);
jx_insert_integer(j, "workers_able", s.workers_able);
struct rmsummary *largest = largest_waiting_declared_resources(q, c->name);
category_jx_insert_max(j, c, "cores", largest);
category_jx_insert_max(j, c, "memory", largest);
category_jx_insert_max(j, c, "disk", largest);
rmsummary_delete(largest);
if(c->first_allocation) {
if(c->first_allocation->cores > -1)
jx_insert_integer(j, "first_cores", c->first_allocation->cores);
if(c->first_allocation->memory > -1)
jx_insert_integer(j, "first_memory", c->first_allocation->memory);
if(c->first_allocation->disk > -1)
jx_insert_integer(j, "first_disk", c->first_allocation->disk);
jx_insert_integer(j, "first_allocation_count", task_request_count(q, c->name, CATEGORY_ALLOCATION_FIRST));
jx_insert_integer(j, "max_allocation_count", task_request_count(q, c->name, CATEGORY_ALLOCATION_MAX));
} else {
jx_insert_integer(j, "first_allocation_count", 0);
jx_insert_integer(j, "max_allocation_count", s.tasks_waiting + s.tasks_running + s.tasks_dispatched);
}
return j;
}
static struct jx *categories_to_jx(struct work_queue *q) {
struct jx *a = jx_array(0);
struct category *c;
char *category_name;
hash_table_firstkey(q->categories);
while(hash_table_nextkey(q->categories, &category_name, (void **) &c)) {
struct jx *j = category_to_jx(q, category_name);
if(j) {
jx_array_insert(a, j);
}
}
return a;
}
/*
queue_to_jx examines the overall queue status and creates
an jx expression which can be sent directly to the
user that connects via work_queue_status.
*/
static struct jx * queue_to_jx( struct work_queue *q, struct link *foreman_uplink )
{
struct jx *j = jx_object(0);
if(!j) return 0;
// Insert all properties from work_queue_stats
struct work_queue_stats info;
work_queue_get_stats(q,&info);
// Add special properties expected by the catalog server
char owner[USERNAME_MAX];
username_get(owner);
jx_insert_string(j,"type","wq_master");
if(q->name) jx_insert_string(j,"project",q->name);
jx_insert_integer(j,"starttime",(q->stats->time_when_started/1000000)); // catalog expects time_t not timestamp_t
jx_insert_string(j,"working_dir",q->workingdir);
jx_insert_string(j,"owner",owner);
jx_insert_string(j,"version",CCTOOLS_VERSION);
jx_insert_integer(j,"port",work_queue_port(q));
jx_insert_integer(j,"priority",info.priority);
jx_insert_string(j,"manager_preferred_connection",q->manager_preferred_connection);
struct jx *interfaces = interfaces_of_host();
if(interfaces) {
jx_insert(j,jx_string("network_interfaces"),interfaces);
}
//send info on workers
jx_insert_integer(j,"workers",info.workers_connected);
jx_insert_integer(j,"workers_connected",info.workers_connected);
jx_insert_integer(j,"workers_init",info.workers_init);
jx_insert_integer(j,"workers_idle",info.workers_idle);
jx_insert_integer(j,"workers_busy",info.workers_busy);
jx_insert_integer(j,"workers_able",info.workers_able);
jx_insert_integer(j,"workers_joined",info.workers_joined);
jx_insert_integer(j,"workers_removed",info.workers_removed);
jx_insert_integer(j,"workers_released",info.workers_released);
jx_insert_integer(j,"workers_idled_out",info.workers_idled_out);
jx_insert_integer(j,"workers_fast_aborted",info.workers_fast_aborted);
jx_insert_integer(j,"workers_lost",info.workers_lost);
//workers_blocked adds host names, not a count
struct jx *blocklist = blocked_to_json(q);
if(blocklist) {
jx_insert(j,jx_string("workers_blocked"), blocklist);
}
//send info on tasks
jx_insert_integer(j,"tasks_waiting",info.tasks_waiting);
jx_insert_integer(j,"tasks_on_workers",info.tasks_on_workers);
jx_insert_integer(j,"tasks_running",info.tasks_running);
jx_insert_integer(j,"tasks_with_results",info.tasks_with_results);
jx_insert_integer(j,"tasks_left",q->num_tasks_left);
jx_insert_integer(j,"tasks_submitted",info.tasks_submitted);
jx_insert_integer(j,"tasks_dispatched",info.tasks_dispatched);
jx_insert_integer(j,"tasks_done",info.tasks_done);
jx_insert_integer(j,"tasks_failed",info.tasks_failed);
jx_insert_integer(j,"tasks_cancelled",info.tasks_cancelled);
jx_insert_integer(j,"tasks_exhausted_attempts",info.tasks_exhausted_attempts);
// tasks_complete is deprecated, but the old work_queue_status expects it.
jx_insert_integer(j,"tasks_complete",info.tasks_done);
//send info on queue
jx_insert_integer(j,"time_when_started",info.time_when_started);
jx_insert_integer(j,"time_send",info.time_send);
jx_insert_integer(j,"time_receive",info.time_receive);
jx_insert_integer(j,"time_send_good",info.time_send_good);
jx_insert_integer(j,"time_receive_good",info.time_receive_good);
jx_insert_integer(j,"time_status_msgs",info.time_status_msgs);
jx_insert_integer(j,"time_internal",info.time_internal);
jx_insert_integer(j,"time_polling",info.time_polling);
jx_insert_integer(j,"time_application",info.time_application);
jx_insert_integer(j,"time_workers_execute",info.time_workers_execute);
jx_insert_integer(j,"time_workers_execute_good",info.time_workers_execute_good);
jx_insert_integer(j,"time_workers_execute_exhaustion",info.time_workers_execute_exhaustion);
jx_insert_integer(j,"bytes_sent",info.bytes_sent);
jx_insert_integer(j,"bytes_received",info.bytes_received);
jx_insert_integer(j,"capacity_tasks",info.capacity_tasks);
jx_insert_integer(j,"capacity_cores",info.capacity_cores);
jx_insert_integer(j,"capacity_memory",info.capacity_memory);
jx_insert_integer(j,"capacity_disk",info.capacity_disk);
jx_insert_integer(j,"capacity_gpus",info.capacity_gpus);
jx_insert_integer(j,"capacity_instantaneous",info.capacity_instantaneous);
jx_insert_integer(j,"capacity_weighted",info.capacity_weighted);
jx_insert_integer(j,"manager_load",info.manager_load);
if(q->tlq_url) jx_insert_string(j,"tlq_url",q->tlq_url);
// Add the resources computed from tributary workers.
struct work_queue_resources r;
aggregate_workers_resources(q,&r,NULL);
work_queue_resources_add_to_jx(&r,j);
// If this is a foreman, add the manager address and the disk resources
if(foreman_uplink) {
int port;
char address[LINK_ADDRESS_MAX];
char addrport[WORK_QUEUE_LINE_MAX];
link_address_remote(foreman_uplink,address,&port);
sprintf(addrport,"%s:%d",address,port);
jx_insert_string(j,"my_manager",addrport);
// get foreman local resources and overwrite disk usage
struct work_queue_resources local_resources;
work_queue_resources_measure_locally(&local_resources,q->workingdir);
r.disk.total = local_resources.disk.total;
r.disk.inuse = local_resources.disk.inuse;
work_queue_resources_add_to_jx(&r,j);
}
//add the stats per category
jx_insert(j, jx_string("categories"), categories_to_jx(q));
//add total resources used/needed by the queue
struct rmsummary *total = total_resources_needed(q);
jx_insert_integer(j,"tasks_total_cores",total->cores);
jx_insert_integer(j,"tasks_total_memory",total->memory);
jx_insert_integer(j,"tasks_total_disk",total->disk);
jx_insert_integer(j,"tasks_total_gpus",total->gpus);
return j;
}
/*
queue_to_jx examines the overall queue status and creates
an jx expression which can be sent to the catalog.
It different from queue_to_jx in that only the minimum information that
workers, work_queue_status and the work_queue_factory need.
*/
static struct jx * queue_lean_to_jx( struct work_queue *q, struct link *foreman_uplink )
{
struct jx *j = jx_object(0);
if(!j) return 0;
// Insert all properties from work_queue_stats
struct work_queue_stats info;
work_queue_get_stats(q,&info);
//information regarding how to contact the manager
jx_insert_string(j,"version",CCTOOLS_VERSION);
jx_insert_string(j,"type","wq_master");
jx_insert_integer(j,"port",work_queue_port(q));
char owner[USERNAME_MAX];
username_get(owner);
jx_insert_string(j,"owner",owner);
if(q->name) jx_insert_string(j,"project",q->name);
jx_insert_integer(j,"starttime",(q->stats->time_when_started/1000000)); // catalog expects time_t not timestamp_t
jx_insert_string(j,"manager_preferred_connection",q->manager_preferred_connection);
struct jx *interfaces = interfaces_of_host();
if(interfaces) {
jx_insert(j,jx_string("network_interfaces"),interfaces);
}
//task information for general work_queue_status report
jx_insert_integer(j,"tasks_waiting",info.tasks_waiting);
jx_insert_integer(j,"tasks_running",info.tasks_running);
jx_insert_integer(j,"tasks_complete",info.tasks_done); // tasks_complete is deprecated, but the old work_queue_status expects it.
//addtional task information for work_queue_factory
jx_insert_integer(j,"tasks_on_workers",info.tasks_on_workers);
jx_insert_integer(j,"tasks_left",q->num_tasks_left);
//capacity information the factory needs
jx_insert_integer(j,"capacity_tasks",info.capacity_tasks);
jx_insert_integer(j,"capacity_cores",info.capacity_cores);
jx_insert_integer(j,"capacity_memory",info.capacity_memory);
jx_insert_integer(j,"capacity_disk",info.capacity_disk);
jx_insert_integer(j,"capacity_gpus",info.capacity_gpus);
jx_insert_integer(j,"capacity_weighted",info.capacity_weighted);
jx_insert_double(j,"manager_load",info.manager_load);
//resources information the factory needs
struct rmsummary *total = total_resources_needed(q);
jx_insert_integer(j,"tasks_total_cores",total->cores);
jx_insert_integer(j,"tasks_total_memory",total->memory);
jx_insert_integer(j,"tasks_total_disk",total->disk);
jx_insert_integer(j,"tasks_total_gpus",total->gpus);
//worker information for general work_queue_status report
jx_insert_integer(j,"workers",info.workers_connected);
jx_insert_integer(j,"workers_connected",info.workers_connected);
//additional worker information the factory needs
struct jx *blocklist = blocked_to_json(q);
if(blocklist) {
jx_insert(j,jx_string("workers_blocked"), blocklist); //danger! unbounded field
}
// Add information about the foreman
if(foreman_uplink) {
int port;
char address[LINK_ADDRESS_MAX];
char addrport[WORK_QUEUE_LINE_MAX];
link_address_remote(foreman_uplink,address,&port);
sprintf(addrport,"%s:%d",address,port);
jx_insert_string(j,"my_manager",addrport);
}
return j;
}
void current_tasks_to_jx( struct jx *j, struct work_queue_worker *w )
{
struct work_queue_task *t;
uint64_t taskid;
int n = 0;
itable_firstkey(w->current_tasks);
while(itable_nextkey(w->current_tasks, &taskid, (void**)&t)) {
char task_string[WORK_QUEUE_LINE_MAX];
sprintf(task_string, "current_task_%03d_id", n);
jx_insert_integer(j,task_string,t->taskid);
sprintf(task_string, "current_task_%03d_command", n);
jx_insert_string(j,task_string,t->command_line);
n++;
}
}
struct jx * worker_to_jx( struct work_queue *q, struct work_queue_worker *w )
{
struct jx *j = jx_object(0);
if(!j) return 0;
if(strcmp(w->hostname, "QUEUE_STATUS") == 0){
return 0;
}
jx_insert_string(j,"hostname",w->hostname);
jx_insert_string(j,"os",w->os);
jx_insert_string(j,"arch",w->arch);
jx_insert_string(j,"address_port",w->addrport);
jx_insert_integer(j,"ncpus",w->resources->cores.total);
jx_insert_integer(j,"total_tasks_complete",w->total_tasks_complete);
jx_insert_integer(j,"total_tasks_running",itable_size(w->current_tasks));
jx_insert_integer(j,"total_bytes_transferred",w->total_bytes_transferred);
jx_insert_integer(j,"total_transfer_time",w->total_transfer_time);
jx_insert_integer(j,"start_time",w->start_time);
jx_insert_integer(j,"current_time",timestamp_get());
work_queue_resources_add_to_jx(w->resources,j);
current_tasks_to_jx(j, w);
return j;
}
static void priority_add_to_jx(struct jx *j, double priority)
{
int decimals = 2;
int factor = pow(10, decimals);
int dpart = ((int) (priority * factor)) - ((int) priority) * factor;
char *str;
if(dpart == 0)
str = string_format("%d", (int) priority);
else
str = string_format("%.2g", priority);
jx_insert_string(j, "priority", str);
free(str);
}
struct jx * task_to_jx( struct work_queue_task *t, const char *state, const char *host )
{
struct jx *j = jx_object(0);
jx_insert_integer(j,"taskid",t->taskid);
jx_insert_string(j,"state",state);
if(t->tag) jx_insert_string(j,"tag",t->tag);
if(t->category) jx_insert_string(j,"category",t->category);
jx_insert_string(j,"command",t->command_line);
if(host) jx_insert_string(j,"host",host);
jx_insert_integer(j,"cores",t->resources_requested->cores);
jx_insert_integer(j,"gpus",t->resources_requested->gpus);
jx_insert_integer(j,"memory",t->resources_requested->memory);
jx_insert_integer(j,"disk",t->resources_requested->disk);
priority_add_to_jx(j, t->priority);
return j;
}
/*
Send a brief human-readable index listing the data
types that can be queried via this API.
*/
static void process_data_index( struct work_queue *q, struct work_queue_worker *w, time_t stoptime )
{
buffer_t buf;
buffer_init(&buf);
buffer_printf(&buf,"<h1>Work Queue Data API</h1>");
buffer_printf(&buf,"<ul>\n");
buffer_printf(&buf,"<li> <a href=\"/queue_status\">Queue Status</a>\n");
buffer_printf(&buf,"<li> <a href=\"/task_status\">Task Status</a>\n");
buffer_printf(&buf,"<li> <a href=\"/worker_status\">Worker Status</a>\n");
buffer_printf(&buf,"<li> <a href=\"/resources_status\">Resources Status</a>\n");
buffer_printf(&buf,"</ul>\n");
send_worker_msg(q,w,buffer_tostring(&buf),buffer_pos(&buf),stoptime);
buffer_free(&buf);
}
/*
Process an HTTP request that comes in via a worker port.
This represents a web browser that connected directly
to the manager to fetch status data.
*/
static work_queue_msg_code_t process_http_request( struct work_queue *q, struct work_queue_worker *w, const char *path, time_t stoptime )
{
char line[WORK_QUEUE_LINE_MAX];
// Consume (and ignore) the remainder of the headers.
while(link_readline(w->link,line,WORK_QUEUE_LINE_MAX,stoptime)) {
if(line[0]==0) break;
}
send_worker_msg(q,w,"HTTP/1.1 200 OK\nConnection: close\n");
if(!strcmp(path,"/")) {
// Requests to root get a simple human readable index.
send_worker_msg(q,w,"Content-type: text/html\n\n");
process_data_index(q, w, stoptime );
} else {
// Other requests get raw JSON data.
send_worker_msg(q,w,"Content-type: text/plain\n\n");
process_queue_status(q, w, &path[1], stoptime );
}
// Return success but require a disconnect now.
return MSG_PROCESSED_DISCONNECT;
}
/*
Process a queue status request which returns raw JSON.
This could come via the HTTP interface, or via a plain request.
*/
static work_queue_msg_code_t process_queue_status( struct work_queue *q, struct work_queue_worker *target, const char *line, time_t stoptime )
{
struct link *l = target->link;
struct jx *a = jx_array(NULL);
target->type = WORKER_TYPE_STATUS;
free(target->hostname);
target->hostname = xxstrdup("QUEUE_STATUS");
if(!strcmp(line, "queue_status")) {
struct jx *j = queue_to_jx( q, 0 );
if(j) {
jx_array_insert(a, j);
}
} else if(!strcmp(line, "task_status")) {
struct work_queue_task *t;
struct work_queue_worker *w;
struct jx *j;
uint64_t taskid;
itable_firstkey(q->tasks);
while(itable_nextkey(q->tasks,&taskid,(void**)&t)) {
w = itable_lookup(q->worker_task_map, taskid);
if(w) {
j = task_to_jx(t,"running",w->hostname);
if(j) {
// Include detailed information on where the task is running:
// address and port, workspace
jx_insert_string(j, "address_port", w->addrport);
// Timestamps on running task related events
jx_insert_integer(j, "time_when_submitted", t->time_when_submitted);
jx_insert_integer(j, "time_when_commit_start", t->time_when_commit_start);
jx_insert_integer(j, "time_when_commit_end", t->time_when_commit_end);
jx_insert_integer(j, "current_time", timestamp_get());
jx_array_insert(a, j);
}
} else {
work_queue_task_state_t state = (uintptr_t) itable_lookup(q->task_state_map, taskid);
j = task_to_jx(t,task_state_str(state),0);
if(j) {
jx_array_insert(a, j);
}
}
}
} else if(!strcmp(line, "worker_status")) {
struct work_queue_worker *w;
struct jx *j;
char *key;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table,&key,(void**)&w)) {
// If the worker has not been initialized, ignore it.
if(!strcmp(w->hostname, "unknown")) continue;
j = worker_to_jx(q, w);
if(j) {
jx_array_insert(a, j);
}
}
} else if(!strcmp(line, "wable_status")) {
jx_delete(a);
a = categories_to_jx(q);
} else if(!strcmp(line, "resources_status")) {
struct jx *j = queue_to_jx( q, 0 );
if(j) {
jx_array_insert(a, j);
}
} else {
debug(D_WQ, "Unknown status request: '%s'", line);
jx_delete(a);
return MSG_FAILURE;
}
jx_print_link(a,l,stoptime);
jx_delete(a);
return MSG_PROCESSED_DISCONNECT;
}
static work_queue_msg_code_t process_resource( struct work_queue *q, struct work_queue_worker *w, const char *line )
{
char resource_name[WORK_QUEUE_LINE_MAX];
struct work_queue_resource r;
int n = sscanf(line, "resource %s %"PRId64" %"PRId64" %"PRId64, resource_name, &r.total, &r.smallest, &r.largest);
if(n == 2 && !strcmp(resource_name,"tag"))
{
/* Shortcut, total has the tag, as "resources tag" only sends one value */
w->resources->tag = r.total;
} else if(n == 4) {
/* inuse is computed by the manager, so we save it here */
int64_t inuse;
if(!strcmp(resource_name,"cores")) {
inuse = w->resources->cores.inuse;
w->resources->cores = r;
w->resources->cores.inuse = inuse;
} else if(!strcmp(resource_name,"memory")) {
inuse = w->resources->memory.inuse;
w->resources->memory = r;
w->resources->memory.inuse = inuse;
} else if(!strcmp(resource_name,"disk")) {
inuse = w->resources->disk.inuse;
w->resources->disk = r;
w->resources->disk.inuse = inuse;
} else if(!strcmp(resource_name,"gpus")) {
inuse = w->resources->gpus.inuse;
w->resources->gpus = r;
w->resources->gpus.inuse = inuse;
} else if(!strcmp(resource_name,"workers")) {
inuse = w->resources->workers.inuse;
w->resources->workers = r;
w->resources->workers.inuse = inuse;
}
} else {
return MSG_FAILURE;
}
return MSG_PROCESSED;
}
static work_queue_msg_code_t process_feature( struct work_queue *q, struct work_queue_worker *w, const char *line )
{
char feature[WORK_QUEUE_LINE_MAX];
char fdec[WORK_QUEUE_LINE_MAX];
int n = sscanf(line, "feature %s", feature);
if(n != 1) {
return MSG_FAILURE;
}
if(!w->features)
w->features = hash_table_create(4,0);
url_decode(feature, fdec, WORK_QUEUE_LINE_MAX);
debug(D_WQ, "Feature found: %s\n", fdec);
hash_table_insert(w->features, fdec, (void **) 1);
return MSG_PROCESSED;
}
static work_queue_result_code_t handle_worker(struct work_queue *q, struct link *l)
{
char line[WORK_QUEUE_LINE_MAX];
char key[WORK_QUEUE_LINE_MAX];
struct work_queue_worker *w;
link_to_hash_key(l, key);
w = hash_table_lookup(q->worker_table, key);
work_queue_msg_code_t mcode;
mcode = recv_worker_msg(q, w, line, sizeof(line));
// We only expect asynchronous status queries and updates here.
switch(mcode) {
case MSG_PROCESSED:
// A status message was received and processed.
return WQ_SUCCESS;
break;
case MSG_PROCESSED_DISCONNECT:
// A status query was received and processed, so disconnect.
remove_worker(q, w, WORKER_DISCONNECT_STATUS_WORKER);
return WQ_SUCCESS;
case MSG_NOT_PROCESSED:
debug(D_WQ, "Invalid message from worker %s (%s): %s", w->hostname, w->addrport, line);
q->stats->workers_lost++;
remove_worker(q, w, WORKER_DISCONNECT_FAILURE);
return WQ_WORKER_FAILURE;
break;
case MSG_FAILURE:
debug(D_WQ, "Failed to read from worker %s (%s)", w->hostname, w->addrport);
q->stats->workers_lost++;
remove_worker(q, w, WORKER_DISCONNECT_FAILURE);
return WQ_WORKER_FAILURE;
}
return WQ_SUCCESS;
}
static int build_poll_table(struct work_queue *q, struct link *manager)
{
int n = 0;
char *key;
struct work_queue_worker *w;
// Allocate a small table, if it hasn't been done yet.
if(!q->poll_table) {
q->poll_table = malloc(sizeof(*q->poll_table) * q->poll_table_size);
if(!q->poll_table) {
//if we can't allocate a poll table, we can't do anything else.
fatal("allocating memory for poll table failed.");
}
}
// The first item in the poll table is the manager link, which accepts new connections.
q->poll_table[0].link = q->manager_link;
q->poll_table[0].events = LINK_READ;
q->poll_table[0].revents = 0;
n = 1;
if(manager) {
/* foreman uplink */
q->poll_table[1].link = manager;
q->poll_table[1].events = LINK_READ;
q->poll_table[1].revents = 0;
n++;
}
// For every worker in the hash table, add an item to the poll table
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
// If poll table is not large enough, reallocate it
if(n >= q->poll_table_size) {
q->poll_table_size *= 2;
q->poll_table = realloc(q->poll_table, sizeof(*q->poll_table) * q->poll_table_size);
if(q->poll_table == NULL) {
//if we can't allocate a poll table, we can't do anything else.
fatal("reallocating memory for poll table failed.");
}
}
q->poll_table[n].link = w->link;
q->poll_table[n].events = LINK_READ;
q->poll_table[n].revents = 0;
n++;
}
return n;
}
/*
Send a symbolic link to the remote worker.
Note that the target of the link is sent
as sthe "body" of the link, following the
message header.
*/
static int send_symlink( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *localname, const char *remotename, int64_t *total_bytes )
{
char target[WORK_QUEUE_LINE_MAX];
int length = readlink(localname,target,sizeof(target));
if(length<0) return WQ_APP_FAILURE;
char remotename_encoded[WORK_QUEUE_LINE_MAX];
url_encode(remotename,remotename_encoded,sizeof(remotename_encoded));
send_worker_msg(q,w,"symlink %s %d\n",remotename_encoded,length);
link_write(w->link,target,length,time(0)+q->long_timeout);
*total_bytes += length;
return WQ_SUCCESS;
}
/*
Send a single file (or a piece of a file) to the remote worker.
The transfer time is controlled by the size of the file.
If the transfer takes too long, then abort.
*/
static int send_file( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *localname, const char *remotename, off_t offset, int64_t length, struct stat info, int64_t *total_bytes )
{
time_t stoptime;
timestamp_t effective_stoptime = 0;
int64_t actual = 0;
/* normalize the mode so as not to set up invalid permissions */
int mode = ( info.st_mode | 0x600 ) & 0777;
if(!length) {
length = info.st_size;
}
int fd = open(localname, O_RDONLY, 0);
if(fd < 0) {
debug(D_NOTICE, "Cannot open file %s: %s", localname, strerror(errno));
return WQ_APP_FAILURE;
}
/* If we are sending only a piece of the file, seek there first. */
if (offset >= 0 && (offset+length) <= info.st_size) {
if(lseek(fd, offset, SEEK_SET) == -1) {
debug(D_NOTICE, "Cannot seek file %s to offset %lld: %s", localname, (long long) offset, strerror(errno));
close(fd);
return WQ_APP_FAILURE;
}
} else {
debug(D_NOTICE, "File specification %s (%lld:%lld) is invalid", localname, (long long) offset, (long long) offset+length);
close(fd);
return WQ_APP_FAILURE;
}
if(q->bandwidth) {
effective_stoptime = (length/q->bandwidth)*1000000 + timestamp_get();
}
/* filenames are url-encoded to avoid problems with spaces, etc */
char remotename_encoded[WORK_QUEUE_LINE_MAX];
url_encode(remotename,remotename_encoded,sizeof(remotename_encoded));
stoptime = time(0) + get_transfer_wait_time(q, w, t, length);
send_worker_msg(q,w, "put %s %"PRId64" 0%o\n",remotename_encoded, length, mode );
actual = link_stream_from_fd(w->link, fd, length, stoptime);
close(fd);
*total_bytes += actual;
if(actual != length) return WQ_WORKER_FAILURE;
timestamp_t current_time = timestamp_get();
if(effective_stoptime && effective_stoptime > current_time) {
usleep(effective_stoptime - current_time);
}
return WQ_SUCCESS;
}
/* Need prototype here to address mutually recursive code. */
static work_queue_result_code_t send_item( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *name, const char *remotename, int64_t offset, int64_t length, int64_t * total_bytes, int follow_links );
/*
Send a directory and all of its contents using the new streaming protocol.
Do this by sending a "dir" prefix, then all of the directory contents,
and then an "end" marker.
*/
static work_queue_result_code_t send_directory( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *localname, const char *remotename, int64_t * total_bytes )
{
DIR *dir = opendir(localname);
if(!dir) {
debug(D_NOTICE, "Cannot open dir %s: %s", localname, strerror(errno));
return WQ_APP_FAILURE;
}
work_queue_result_code_t result = WQ_SUCCESS;
char remotename_encoded[WORK_QUEUE_LINE_MAX];
url_encode(remotename,remotename_encoded,sizeof(remotename_encoded));
send_worker_msg(q,w,"dir %s\n",remotename_encoded);
struct dirent *d;
while((d = readdir(dir))) {
if(!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) continue;
char *localpath = string_format("%s/%s",localname,d->d_name);
result = send_item( q, w, t, localpath, d->d_name, 0, 0, total_bytes, 0 );
free(localpath);
if(result != WQ_SUCCESS) break;
}
send_worker_msg(q,w,"end\n");
closedir(dir);
return result;
}
/*
Send a single item, whether it is a directory, symlink, or file.
Note 1: We call stat/lstat here a single time, and then pass it
to the underlying object so as not to minimize syscall work.
Note 2: This function is invoked at the top level with follow_links=1,
since it is common for the user to to pass in a top-level symbolic
link to a file or directory which they want transferred.
However, in recursive calls, follow_links is set to zero,
and internal links are not followed, they are sent natively.
*/
static work_queue_result_code_t send_item( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, const char *localpath, const char *remotepath, int64_t offset, int64_t length, int64_t * total_bytes, int follow_links )
{
struct stat info;
int result = WQ_SUCCESS;
if(follow_links) {
result = stat(localpath,&info);
} else {
result = lstat(localpath,&info);
}
if(result>=0) {
if(S_ISDIR(info.st_mode)) {
result = send_directory( q, w, t, localpath, remotepath, total_bytes );
} else if(S_ISLNK(info.st_mode)) {
result = send_symlink( q, w, t, localpath, remotepath, total_bytes );
} else if(S_ISREG(info.st_mode)) {
result = send_file( q, w, t, localpath, remotepath, offset, length, info, total_bytes );
} else {
debug(D_NOTICE,"skipping unusual file: %s",strerror(errno));
}
} else {
debug(D_NOTICE, "cannot stat file %s: %s", localpath, strerror(errno));
result = WQ_APP_FAILURE;
}
return result;
}
/*
Send an item to a remote worker, if it is not already cached.
The local file name should already have been expanded by the caller.
If it is in the worker, but a new version is available, warn and return.
We do not want to rewrite the file while some other task may be using it.
Otherwise, send it to the worker.
*/
static work_queue_result_code_t send_item_if_not_cached( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, struct work_queue_file *tf, const char *expanded_local_name, int64_t * total_bytes)
{
struct stat local_info;
if(lstat(expanded_local_name, &local_info) < 0) {
debug(D_NOTICE, "Cannot stat file %s: %s", expanded_local_name, strerror(errno));
return WQ_APP_FAILURE;
}
struct stat *remote_info = hash_table_lookup(w->current_files, tf->cached_name);
if(remote_info && (remote_info->st_mtime != local_info.st_mtime || remote_info->st_size != local_info.st_size)) {
debug(D_NOTICE|D_WQ, "File %s changed locally. Task %d will be executed with an older version.", expanded_local_name, t->taskid);
return WQ_SUCCESS;
} else if(!remote_info) {
if(tf->offset==0 && tf->length==0) {
debug(D_WQ, "%s (%s) needs file %s as '%s'", w->hostname, w->addrport, expanded_local_name, tf->cached_name);
} else {
debug(D_WQ, "%s (%s) needs file %s (offset %lld length %lld) as '%s'", w->hostname, w->addrport, expanded_local_name, (long long) tf->offset, (long long) tf->length, tf->cached_name );
}
work_queue_result_code_t result;
result = send_item(q, w, t, expanded_local_name, tf->cached_name, tf->offset, tf->piece_length, total_bytes, 1 );
if(result == WQ_SUCCESS && tf->flags & WORK_QUEUE_CACHE) {
remote_info = xxmalloc(sizeof(*remote_info));
if(remote_info) {
memcpy(remote_info, &local_info, sizeof(local_info));
hash_table_insert(w->current_files, tf->cached_name, remote_info);
}
}
return result;
} else {
/* Up-to-date file on the worker, we do nothing. */
return WQ_SUCCESS;
}
}
/**
* This function expands Work Queue environment variables such as
* $OS, $ARCH, that are specified in the definition of Work Queue
* input files. It expands these variables based on the info reported
* by each connected worker.
* Will always return a non-empty string. That is if no match is found
* for any of the environment variables, it will return the input string
* as is.
* */
static char *expand_envnames(struct work_queue_worker *w, const char *payload)
{
char *expanded_name;
char *str, *curr_pos;
char *delimtr = "$";
char *token;
// Shortcut: If no dollars anywhere, duplicate the whole string.
if(!strchr(payload,'$')) return strdup(payload);
str = xxstrdup(payload);
expanded_name = (char *) malloc(strlen(payload) + (50 * sizeof(char)));
if(expanded_name == NULL) {
debug(D_NOTICE, "Cannot allocate memory for filename %s.\n", payload);
return NULL;
} else {
//Initialize to null byte so it works correctly with strcat.
*expanded_name = '\0';
}
token = strtok(str, delimtr);
while(token) {
if((curr_pos = strstr(token, "ARCH"))) {
if((curr_pos - token) == 0) {
strcat(expanded_name, w->arch);
strcat(expanded_name, token + 4);
} else {
//No match. So put back '$' and rest of the string.
strcat(expanded_name, "$");
strcat(expanded_name, token);
}
} else if((curr_pos = strstr(token, "OS"))) {
if((curr_pos - token) == 0) {
//Cygwin oddly reports OS name in all caps and includes version info.
if(strstr(w->os, "CYGWIN")) {
strcat(expanded_name, "Cygwin");
} else {
strcat(expanded_name, w->os);
}
strcat(expanded_name, token + 2);
} else {
strcat(expanded_name, "$");
strcat(expanded_name, token);
}
} else {
//If token and str don't point to same location, then $ sign was before token and needs to be put back.
if((token - str) > 0) {
strcat(expanded_name, "$");
}
strcat(expanded_name, token);
}
token = strtok(NULL, delimtr);
}
free(str);
debug(D_WQ, "File name %s expanded to %s for %s (%s).", payload, expanded_name, w->hostname, w->addrport);
return expanded_name;
}
static work_queue_result_code_t send_input_file(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, struct work_queue_file *f)
{
int64_t total_bytes = 0;
int64_t actual = 0;
work_queue_result_code_t result = WQ_SUCCESS; //return success unless something fails below
timestamp_t open_time = timestamp_get();
switch (f->type) {
case WORK_QUEUE_BUFFER:
debug(D_WQ, "%s (%s) needs literal as %s", w->hostname, w->addrport, f->remote_name);
time_t stoptime = time(0) + get_transfer_wait_time(q, w, t, f->length);
send_worker_msg(q,w, "put %s %d %o\n",f->cached_name, f->length, 0777 );
actual = link_putlstring(w->link, f->payload, f->length, stoptime);
if(actual!=f->length) {
result = WQ_WORKER_FAILURE;
}
total_bytes = actual;
break;
case WORK_QUEUE_REMOTECMD:
debug(D_WQ, "%s (%s) needs %s from remote filesystem using %s", w->hostname, w->addrport, f->remote_name, f->payload);
send_worker_msg(q,w, "thirdget %d %s %s\n",WORK_QUEUE_FS_CMD, f->cached_name, f->payload);
break;
case WORK_QUEUE_URL:
debug(D_WQ, "%s (%s) needs %s from the url, %s %d", w->hostname, w->addrport, f->cached_name, f->payload, f->length);
send_worker_msg(q,w, "url %s %d 0%o %d\n",f->cached_name, f->length, 0777, f->flags);
link_putlstring(w->link, f->payload, f->length, time(0) + q->short_timeout);
break;
case WORK_QUEUE_DIRECTORY:
// Do nothing. Empty directories are handled by the task specification, while recursive directories are implemented as WORK_QUEUE_FILEs
break;
case WORK_QUEUE_FILE:
case WORK_QUEUE_FILE_PIECE:
if(f->flags & WORK_QUEUE_THIRDGET) {
debug(D_WQ, "%s (%s) needs %s from shared filesystem as %s", w->hostname, w->addrport, f->payload, f->remote_name);
if(!strcmp(f->remote_name, f->payload)) {
f->flags |= WORK_QUEUE_PREEXIST;
} else {
if(f->flags & WORK_QUEUE_SYMLINK) {
send_worker_msg(q,w, "thirdget %d %s %s\n", WORK_QUEUE_FS_SYMLINK, f->cached_name, f->payload);
} else {
send_worker_msg(q,w, "thirdget %d %s %s\n", WORK_QUEUE_FS_PATH, f->cached_name, f->payload);
}
}
} else {
char *expanded_payload = expand_envnames(w, f->payload);
if(expanded_payload) {
result = send_item_if_not_cached(q,w,t,f,expanded_payload,&total_bytes);
free(expanded_payload);
} else {
result = WQ_APP_FAILURE; //signal app-level failure.
}
}
break;
}
if(result == WQ_SUCCESS) {
timestamp_t close_time = timestamp_get();
timestamp_t elapsed_time = close_time-open_time;
t->bytes_sent += total_bytes;
t->bytes_transferred += total_bytes;
w->total_bytes_transferred += total_bytes;
w->total_transfer_time += elapsed_time;
q->stats->bytes_sent += total_bytes;
// Avoid division by zero below.
if(elapsed_time==0) elapsed_time = 1;
if(total_bytes > 0) {
debug(D_WQ, "%s (%s) received %.2lf MB in %.02lfs (%.02lfs MB/s) average %.02lfs MB/s",
w->hostname,
w->addrport,
total_bytes / 1000000.0,
elapsed_time / 1000000.0,
(double) total_bytes / elapsed_time,
(double) w->total_bytes_transferred / w->total_transfer_time
);
}
} else {
debug(D_WQ, "%s (%s) failed to send %s (%" PRId64 " bytes sent).",
w->hostname,
w->addrport,
f->type == WORK_QUEUE_BUFFER ? "literal data" : f->payload,
total_bytes);
if(result == WQ_APP_FAILURE) {
update_task_result(t, WORK_QUEUE_RESULT_INPUT_MISSING);
}
}
return result;
}
static work_queue_result_code_t send_input_files( struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t )
{
struct work_queue_file *f;
struct stat s;
// Check for existence of each input file first.
// If any one fails to exist, set the failure condition and return failure.
if(t->input_files) {
list_first_item(t->input_files);
while((f = list_next_item(t->input_files))) {
if(f->type == WORK_QUEUE_FILE || f->type == WORK_QUEUE_FILE_PIECE) {
char * expanded_payload = expand_envnames(w, f->payload);
if(!expanded_payload) {
update_task_result(t, WORK_QUEUE_RESULT_INPUT_MISSING);
return WQ_APP_FAILURE;
}
if(stat(expanded_payload, &s) != 0) {
debug(D_WQ,"Could not stat %s: %s\n", expanded_payload, strerror(errno));
free(expanded_payload);
update_task_result(t, WORK_QUEUE_RESULT_INPUT_MISSING);
return WQ_APP_FAILURE;
}
free(expanded_payload);
}
}
}
// Send each of the input files.
// If any one fails to be sent, return failure.
if(t->input_files) {
list_first_item(t->input_files);
while((f = list_next_item(t->input_files))) {
work_queue_result_code_t result = send_input_file(q,w,t,f);
if(result != WQ_SUCCESS) {
return result;
}
}
}
return WQ_SUCCESS;
}
/* if max defined, use minimum of max or largest worker
* else if min is less than largest, chose largest, otherwise 'infinity' */
#define task_worker_box_size_resource(w, min, max, field)\
( max->field > -1 ? max->field :\
min->field <= w->resources->field.largest ? w->resources->field.largest : w->resources->field.largest + 1 )
static struct rmsummary *task_worker_box_size(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t) {
const struct rmsummary *min = task_min_resources(q, t);
const struct rmsummary *max = task_max_resources(q, t);
struct rmsummary *limits = rmsummary_create(-1);
rmsummary_merge_override(limits, max);
limits->cores = task_worker_box_size_resource(w, min, max, cores);
limits->memory = task_worker_box_size_resource(w, min, max, memory);
limits->disk = task_worker_box_size_resource(w, min, max, disk);
limits->gpus = task_worker_box_size_resource(w, min, max, gpus);
return limits;
}
static work_queue_result_code_t start_one_task(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t)
{
/* wrap command at the last minute, so that we have the updated information
* about resources. */
struct rmsummary *limits = task_worker_box_size(q, w, t);
char *command_line;
if(q->monitor_mode) {
command_line = work_queue_monitor_wrap(q, w, t, limits);
} else {
command_line = xxstrdup(t->command_line);
}
work_queue_result_code_t result = send_input_files(q, w, t);
if (result != WQ_SUCCESS) {
free(command_line);
return result;
}
send_worker_msg(q,w, "task %lld\n", (long long) t->taskid);
long long cmd_len = strlen(command_line);
send_worker_msg(q,w, "cmd %lld\n", (long long) cmd_len);
link_putlstring(w->link, command_line, cmd_len, /* stoptime */ time(0) + (w->type == WORKER_TYPE_FOREMAN ? q->long_timeout : q->short_timeout));
debug(D_WQ, "%s\n", command_line);
free(command_line);
send_worker_msg(q,w, "category %s\n", t->category);
send_worker_msg(q,w, "cores %s\n", rmsummary_resource_to_str("cores", limits->cores, 0));
send_worker_msg(q,w, "gpus %s\n", rmsummary_resource_to_str("gpus", limits->gpus, 0));
send_worker_msg(q,w, "memory %s\n", rmsummary_resource_to_str("memory", limits->memory, 0));
send_worker_msg(q,w, "disk %s\n", rmsummary_resource_to_str("disk", limits->disk, 0));
/* Do not specify end, wall_time if running the resource monitor. We let the monitor police these resources. */
if(q->monitor_mode == MON_DISABLED) {
if(limits->end > 0) {
send_worker_msg(q,w, "end_time %s\n", rmsummary_resource_to_str("end", limits->end, 0));
}
if(limits->wall_time > 0) {
send_worker_msg(q,w, "wall_time %s\n", rmsummary_resource_to_str("wall_time", limits->wall_time, 0));
}
}
itable_insert(w->current_tasks_boxes, t->taskid, limits);
rmsummary_merge_override(t->resources_allocated, limits);
/* Note that even when environment variables after resources, values for
* CORES, MEMORY, etc. will be set at the worker to the values of
* specify_*, if used. */
char *var;
list_first_item(t->env_list);
while((var=list_next_item(t->env_list))) {
send_worker_msg(q, w,"env %zu\n%s\n", strlen(var), var);
}
if(t->input_files) {
struct work_queue_file *tf;
list_first_item(t->input_files);
while((tf = list_next_item(t->input_files))) {
if(tf->type == WORK_QUEUE_DIRECTORY) {
send_worker_msg(q,w, "dir %s\n", tf->remote_name);
} else {
char remote_name_encoded[PATH_MAX];
url_encode(tf->remote_name, remote_name_encoded, PATH_MAX);
send_worker_msg(q,w, "infile %s %s %d\n", tf->cached_name, remote_name_encoded, tf->flags);
}
}
}
if(t->output_files) {
struct work_queue_file *tf;
list_first_item(t->output_files);
while((tf = list_next_item(t->output_files))) {
char remote_name_encoded[PATH_MAX];
url_encode(tf->remote_name, remote_name_encoded, PATH_MAX);
send_worker_msg(q,w, "outfile %s %s %d\n", tf->cached_name, remote_name_encoded, tf->flags);
}
}
// send_worker_msg returns the number of bytes sent, or a number less than
// zero to indicate errors. We are lazy here, we only check the last
// message we sent to the worker (other messages may have failed above).
int result_msg = send_worker_msg(q,w,"end\n");
if(result_msg > -1)
{
debug(D_WQ, "%s (%s) busy on '%s'", w->hostname, w->addrport, t->command_line);
return WQ_SUCCESS;
}
else
{
return WQ_WORKER_FAILURE;
}
}
/*
Store a report summarizing the performance of a completed task.
Keep a list of reports equal to the number of workers connected.
Used for computing queue capacity below.
*/
static void task_report_delete(struct work_queue_task_report *tr) {
rmsummary_delete(tr->resources);
free(tr);
}
static void add_task_report(struct work_queue *q, struct work_queue_task *t)
{
struct work_queue_task_report *tr;
struct work_queue_stats s;
work_queue_get_stats(q, &s);
if(!t->resources_allocated) {
return;
}
// Create a new report object and add it to the list.
tr = calloc(1, sizeof(struct work_queue_task_report));
tr->transfer_time = (t->time_when_commit_end - t->time_when_commit_start) + (t->time_when_done - t->time_when_retrieval);
tr->exec_time = t->time_workers_execute_last;
tr->manager_time = (((t->time_when_done - t->time_when_commit_start) - tr->transfer_time) - tr->exec_time);
tr->resources = rmsummary_copy(t->resources_allocated, 0);
list_push_tail(q->task_reports, tr);
// Trim the list, but never below its previous size.
static int count = WORK_QUEUE_TASK_REPORT_MIN_SIZE;
count = MAX(count, 2*q->stats->tasks_on_workers);
while(list_size(q->task_reports) >= count) {
tr = list_pop_head(q->task_reports);
task_report_delete(tr);
}
resource_monitor_append_report(q, t);
}
/*
Compute queue capacity based on stored task reports
and the summary of manager activity.
*/
static void compute_capacity(const struct work_queue *q, struct work_queue_stats *s)
{
struct work_queue_task_report *capacity = calloc(1, sizeof(*capacity));
capacity->resources = rmsummary_create(0);
struct work_queue_task_report *tr;
double alpha = 0.05;
int count = list_size(q->task_reports);
int capacity_instantaneous = 0;
// Compute the average task properties.
if(count < 1) {
capacity->resources->cores = 1;
capacity->resources->memory = 512;
capacity->resources->disk = 1024;
capacity->resources->gpus = 0;
capacity->exec_time = WORK_QUEUE_DEFAULT_CAPACITY_TASKS;
capacity->transfer_time = 1;
q->stats->capacity_weighted = WORK_QUEUE_DEFAULT_CAPACITY_TASKS;
capacity_instantaneous = WORK_QUEUE_DEFAULT_CAPACITY_TASKS;
count = 1;
} else {
// Sum up the task reports available.
list_first_item(q->task_reports);
while((tr = list_next_item(q->task_reports))) {
capacity->transfer_time += tr->transfer_time;
capacity->exec_time += tr->exec_time;
capacity->manager_time += tr->manager_time;
if(tr->resources) {
capacity->resources->cores += tr->resources ? tr->resources->cores : 1;
capacity->resources->memory += tr->resources ? tr->resources->memory : 512;
capacity->resources->disk += tr->resources ? tr->resources->disk : 1024;
capacity->resources->gpus += tr->resources ? tr->resources->gpus : 0;
}
}
tr = list_peek_tail(q->task_reports);
if(tr->transfer_time > 0) {
capacity_instantaneous = DIV_INT_ROUND_UP(tr->exec_time, (tr->transfer_time + tr->manager_time));
q->stats->capacity_weighted = (int) ceil((alpha * (float) capacity_instantaneous) + ((1.0 - alpha) * q->stats->capacity_weighted));
time_t ts;
time(&ts);
//debug(D_WQ, "capacity: %lld %"PRId64" %"PRId64" %"PRId64" %d %d %d", (long long) ts, tr->exec_time, tr->transfer_time, tr->manager_time, q->stats->capacity_weighted, s->tasks_done, s->workers_connected);
}
}
capacity->transfer_time = MAX(1, capacity->transfer_time);
capacity->exec_time = MAX(1, capacity->exec_time);
capacity->manager_time = MAX(1, capacity->manager_time);
//debug(D_WQ, "capacity.exec_time: %lld", (long long) capacity->exec_time);
//debug(D_WQ, "capacity.transfer_time: %lld", (long long) capacity->transfer_time);
//debug(D_WQ, "capacity.manager_time: %lld", (long long) capacity->manager_time);
// Never go below the default capacity
int64_t ratio = MAX(WORK_QUEUE_DEFAULT_CAPACITY_TASKS, DIV_INT_ROUND_UP(capacity->exec_time, (capacity->transfer_time + capacity->manager_time)));
q->stats->capacity_tasks = ratio;
q->stats->capacity_cores = DIV_INT_ROUND_UP(capacity->resources->cores * ratio, count);
q->stats->capacity_memory = DIV_INT_ROUND_UP(capacity->resources->memory * ratio, count);
q->stats->capacity_disk = DIV_INT_ROUND_UP(capacity->resources->disk * ratio, count);
q->stats->capacity_gpus = DIV_INT_ROUND_UP(capacity->resources->gpus * ratio, count);
q->stats->capacity_instantaneous = DIV_INT_ROUND_UP(capacity_instantaneous, 1);
task_report_delete(capacity);
}
void compute_manager_load(struct work_queue *q, int task_activity) {
double alpha = 0.05;
double load = q->stats->manager_load;
if(task_activity) {
load = load * (1 - alpha) + 1 * alpha;
} else {
load = load * (1 - alpha) + 0 * alpha;
}
q->stats->manager_load = load;
}
static int check_hand_against_task(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t) {
/* worker has no reported any resources yet */
if(w->resources->tag < 0)
return 0;
if(w->resources->workers.total < 1) {
return 0;
}
if(w->draining) {
return 0;
}
if(w->type != WORKER_TYPE_FOREMAN) {
struct blocklist_host_info *info = hash_table_lookup(q->worker_blocklist, w->hostname);
if (info && info->blocked) {
return 0;
}
}
struct rmsummary *limits = task_worker_box_size(q, w, t);
int ok = 1;
if(w->resources->cores.inuse + limits->cores > overcommitted_resource_total(q, w->resources->cores.total, 1)) {
ok = 0;
}
if(w->resources->memory.inuse + limits->memory > overcommitted_resource_total(q, w->resources->memory.total, 0)) {
ok = 0;
}
if(w->resources->disk.inuse + limits->disk > w->resources->disk.total) { /* No overcommit disk */
ok = 0;
}
if(w->resources->gpus.inuse + limits->gpus > overcommitted_resource_total(q, w->resources->gpus.total, 0)) {
ok = 0;
}
rmsummary_delete(limits);
if(t->features) {
if(!w->features)
return 0;
char *feature;
list_first_item(t->features);
while((feature = list_next_item(t->features))) {
if(!hash_table_lookup(w->features, feature))
return 0;
}
}
return ok;
}
static struct work_queue_worker *find_worker_by_files(struct work_queue *q, struct work_queue_task *t)
{
char *key;
struct work_queue_worker *w;
struct work_queue_worker *best_worker = 0;
int64_t most_task_cached_bytes = 0;
int64_t task_cached_bytes;
struct stat *remote_info;
struct work_queue_file *tf;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if( check_hand_against_task(q, w, t) ) {
task_cached_bytes = 0;
list_first_item(t->input_files);
while((tf = list_next_item(t->input_files))) {
if((tf->type == WORK_QUEUE_FILE || tf->type == WORK_QUEUE_FILE_PIECE) && (tf->flags & WORK_QUEUE_CACHE)) {
remote_info = hash_table_lookup(w->current_files, tf->cached_name);
if(remote_info)
task_cached_bytes += remote_info->st_size;
}
}
if(!best_worker || task_cached_bytes > most_task_cached_bytes) {
best_worker = w;
most_task_cached_bytes = task_cached_bytes;
}
}
}
return best_worker;
}
static struct work_queue_worker *find_worker_by_fcfs(struct work_queue *q, struct work_queue_task *t)
{
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void**)&w)) {
if( check_hand_against_task(q, w, t) ) {
return w;
}
}
return NULL;
}
static struct work_queue_worker *find_worker_by_random(struct work_queue *q, struct work_queue_task *t)
{
char *key;
struct work_queue_worker *w = NULL;
int random_worker;
struct list *valid_workers = list_create();
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void**)&w)) {
if(check_hand_against_task(q, w, t)) {
list_push_tail(valid_workers, w);
}
}
w = NULL;
if(list_size(valid_workers) > 0) {
random_worker = (rand() % list_size(valid_workers)) + 1;
while(random_worker && list_size(valid_workers)) {
w = list_pop_head(valid_workers);
random_worker--;
}
}
list_delete(valid_workers);
return w;
}
// 1 if a < b, 0 if a >= b
static int compare_worst_fit(struct work_queue_resources *a, struct work_queue_resources *b)
{
//Total worker order: free cores > free memory > free disk > free gpus
if((a->cores.total < b->cores.total))
return 1;
if((a->cores.total > b->cores.total))
return 0;
//Same number of free cores...
if((a->memory.total < b->memory.total))
return 1;
if((a->memory.total > b->memory.total))
return 0;
//Same number of free memory...
if((a->disk.total < b->disk.total))
return 1;
if((a->disk.total > b->disk.total))
return 0;
//Same number of free disk...
if((a->gpus.total < b->gpus.total))
return 1;
if((a->gpus.total > b->gpus.total))
return 0;
//Number of free resources are the same.
return 0;
}
static struct work_queue_worker *find_worker_by_worst_fit(struct work_queue *q, struct work_queue_task *t)
{
char *key;
struct work_queue_worker *w;
struct work_queue_worker *best_worker = NULL;
struct work_queue_resources bres;
struct work_queue_resources wres;
memset(&bres, 0, sizeof(struct work_queue_resources));
memset(&wres, 0, sizeof(struct work_queue_resources));
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if( check_hand_against_task(q, w, t) ) {
//Use total field on bres, wres to indicate free resources.
wres.cores.total = w->resources->cores.total - w->resources->cores.inuse;
wres.memory.total = w->resources->memory.total - w->resources->memory.inuse;
wres.disk.total = w->resources->disk.total - w->resources->disk.inuse;
wres.gpus.total = w->resources->gpus.total - w->resources->gpus.inuse;
if(!best_worker || compare_worst_fit(&bres, &wres))
{
best_worker = w;
memcpy(&bres, &wres, sizeof(struct work_queue_resources));
}
}
}
return best_worker;
}
static struct work_queue_worker *find_worker_by_time(struct work_queue *q, struct work_queue_task *t)
{
char *key;
struct work_queue_worker *w;
struct work_queue_worker *best_worker = 0;
double best_time = HUGE_VAL;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(check_hand_against_task(q, w, t)) {
if(w->total_tasks_complete > 0) {
double t = (w->total_task_time + w->total_transfer_time) / w->total_tasks_complete;
if(!best_worker || t < best_time) {
best_worker = w;
best_time = t;
}
}
}
}
if(best_worker) {
return best_worker;
} else {
return find_worker_by_fcfs(q, t);
}
}
// use task-specific algorithm if set, otherwise default to the queue's setting.
static struct work_queue_worker *find_best_worker(struct work_queue *q, struct work_queue_task *t)
{
int a = t->worker_selection_algorithm;
if(a == WORK_QUEUE_SCHEDULE_UNSET) {
a = q->worker_selection_algorithm;
}
switch (a) {
case WORK_QUEUE_SCHEDULE_FILES:
return find_worker_by_files(q, t);
case WORK_QUEUE_SCHEDULE_TIME:
return find_worker_by_time(q, t);
case WORK_QUEUE_SCHEDULE_WORST:
return find_worker_by_worst_fit(q, t);
case WORK_QUEUE_SCHEDULE_FCFS:
return find_worker_by_fcfs(q, t);
case WORK_QUEUE_SCHEDULE_RAND:
default:
return find_worker_by_random(q, t);
}
}
static void count_worker_resources(struct work_queue *q, struct work_queue_worker *w)
{
struct rmsummary *box;
uint64_t taskid;
w->resources->cores.inuse = 0;
w->resources->memory.inuse = 0;
w->resources->disk.inuse = 0;
w->resources->gpus.inuse = 0;
update_max_worker(q, w);
if(w->resources->workers.total < 1)
{
return;
}
itable_firstkey(w->current_tasks_boxes);
while(itable_nextkey(w->current_tasks_boxes, &taskid, (void **)& box)) {
w->resources->cores.inuse += box->cores;
w->resources->memory.inuse += box->memory;
w->resources->disk.inuse += box->disk;
w->resources->gpus.inuse += box->gpus;
}
}
static void update_max_worker(struct work_queue *q, struct work_queue_worker *w) {
if(!w)
return;
if(w->resources->workers.total < 1) {
return;
}
if(q->current_max_worker->cores < w->resources->cores.largest) {
q->current_max_worker->cores = w->resources->cores.largest;
}
if(q->current_max_worker->memory < w->resources->memory.largest) {
q->current_max_worker->memory = w->resources->memory.largest;
}
if(q->current_max_worker->disk < w->resources->memory.largest) {
q->current_max_worker->disk = w->resources->memory.largest;
}
if(q->current_max_worker->gpus < w->resources->memory.largest) {
q->current_max_worker->gpus = w->resources->memory.largest;
}
}
/* we call this function when a worker is disconnected. For efficiency, we use
* update_max_worker when a worker sends resource updates. */
static void find_max_worker(struct work_queue *q) {
q->current_max_worker->cores = 0;
q->current_max_worker->memory = 0;
q->current_max_worker->disk = 0;
q->current_max_worker->gpus = 0;
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(w->resources->workers.total > 0)
{
update_max_worker(q, w);
}
}
}
static void commit_task_to_worker(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t)
{
t->hostname = xxstrdup(w->hostname);
t->host = xxstrdup(w->addrport);
t->time_when_commit_start = timestamp_get();
work_queue_result_code_t result = start_one_task(q, w, t);
t->time_when_commit_end = timestamp_get();
itable_insert(w->current_tasks, t->taskid, t);
itable_insert(q->worker_task_map, t->taskid, w); //add worker as execution site for t.
change_task_state(q, t, WORK_QUEUE_TASK_RUNNING);
t->try_count += 1;
q->stats->tasks_dispatched += 1;
count_worker_resources(q, w);
if(result != WQ_SUCCESS) {
debug(D_WQ, "Failed to send task %d to worker %s (%s).", t->taskid, w->hostname, w->addrport);
handle_failure(q, w, t, result);
}
}
static void reap_task_from_worker(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, work_queue_task_state_t new_state)
{
struct work_queue_worker *wr = itable_lookup(q->worker_task_map, t->taskid);
if(wr != w)
{
debug(D_WQ, "Cannot reap task %d from worker. It is not being run by %s (%s)\n", t->taskid, w->hostname, w->addrport);
} else {
w->total_task_time += t->time_workers_execute_last;
}
//update tables.
struct rmsummary *task_box = itable_lookup(w->current_tasks_boxes, t->taskid);
if(task_box)
rmsummary_delete(task_box);
itable_remove(w->current_tasks_boxes, t->taskid);
itable_remove(w->current_tasks, t->taskid);
itable_remove(q->worker_task_map, t->taskid);
change_task_state(q, t, new_state);
count_worker_resources(q, w);
}
static int send_one_task( struct work_queue *q )
{
struct work_queue_task *t;
struct work_queue_worker *w;
// Consider each task in the order of priority:
list_first_item(q->ready_list);
while( (t = list_next_item(q->ready_list))) {
// Find the best worker for the task at the head of the list
w = find_best_worker(q,t);
// If there is no suitable worker, consider the next task.
if(!w) continue;
// Otherwise, remove it from the ready list and start it:
commit_task_to_worker(q,w,t);
return 1;
}
return 0;
}
static int receive_one_task( struct work_queue *q )
{
struct work_queue_task *t;
struct work_queue_worker *w;
uint64_t taskid;
itable_firstkey(q->tasks);
while( itable_nextkey(q->tasks, &taskid, (void **) &t) ) {
if( task_state_is(q, taskid, WORK_QUEUE_TASK_WAITING_RETRIEVAL) ) {
w = itable_lookup(q->worker_task_map, taskid);
fetch_output_from_worker(q, w, taskid);
return 1;
}
}
return 0;
}
//Sends keepalives to check if connected workers are responsive, and ask for updates If not, removes those workers.
static void ask_for_workers_updates(struct work_queue *q) {
struct work_queue_worker *w;
char *key;
timestamp_t current_time = timestamp_get();
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(q->keepalive_interval > 0) {
/* we have not received workqueue message from worker yet, so we
* simply check agains its start_time. */
if(!strcmp(w->hostname, "unknown")){
if ((int)((current_time - w->start_time)/1000000) >= q->keepalive_timeout) {
debug(D_WQ, "Removing worker %s (%s): hasn't sent its initialization in more than %d s", w->hostname, w->addrport, q->keepalive_timeout);
handle_worker_failure(q, w);
}
continue;
}
// send new keepalive check only (1) if we received a response since last keepalive check AND
// (2) we are past keepalive interval
if(w->last_msg_recv_time > w->last_update_msg_time) {
int64_t last_update_elapsed_time = (int64_t)(current_time - w->last_update_msg_time)/1000000;
if(last_update_elapsed_time >= q->keepalive_interval) {
if(send_worker_msg(q,w, "check\n")<0) {
debug(D_WQ, "Failed to send keepalive check to worker %s (%s).", w->hostname, w->addrport);
handle_worker_failure(q, w);
} else {
debug(D_WQ, "Sent keepalive check to worker %s (%s)", w->hostname, w->addrport);
w->last_update_msg_time = current_time;
}
}
} else {
// we haven't received a message from worker since its last keepalive check. Check if time
// since we last polled link for responses has exceeded keepalive timeout. If so, remove worker.
if (q->link_poll_end > w->last_update_msg_time) {
if ((int)((q->link_poll_end - w->last_update_msg_time)/1000000) >= q->keepalive_timeout) {
debug(D_WQ, "Removing worker %s (%s): hasn't responded to keepalive check for more than %d s", w->hostname, w->addrport, q->keepalive_timeout);
handle_worker_failure(q, w);
}
}
}
}
}
}
static int abort_slow_workers(struct work_queue *q)
{
struct category *c;
char *category_name;
struct work_queue_worker *w;
struct work_queue_task *t;
uint64_t taskid;
int removed = 0;
/* optimization. If no category has a fast abort multiplier, simply return. */
int fast_abort_flag = 0;
hash_table_firstkey(q->categories);
while(hash_table_nextkey(q->categories, &category_name, (void **) &c)) {
if(c->total_tasks < 10) {
c->average_task_time = 0;
continue;
}
struct work_queue_stats *stats = c->wq_stats;
if(!stats) {
/* no stats have been computed yet */
continue;
}
c->average_task_time = (stats->time_workers_execute_good + stats->time_send_good + stats->time_receive_good) / c->total_tasks;
if(c->fast_abort > 0)
fast_abort_flag = 1;
}
if(!fast_abort_flag)
return 0;
struct category *c_def = work_queue_category_lookup_or_create(q, "default");
timestamp_t current = timestamp_get();
itable_firstkey(q->tasks);
while(itable_nextkey(q->tasks, &taskid, (void **) &t)) {
c = work_queue_category_lookup_or_create(q, t->category);
/* Fast abort deactivated for this category */
if(c->fast_abort == 0)
continue;
timestamp_t runtime = current - t->time_when_commit_start;
timestamp_t average_task_time = c->average_task_time;
/* Not enough samples, skip the task. */
if(average_task_time < 1)
continue;
double multiplier;
if(c->fast_abort > 0) {
multiplier = c->fast_abort;
}
else if(c_def->fast_abort > 0) {
/* This category uses the default fast abort. (< 0 use default, 0 deactivate). */
multiplier = c_def->fast_abort;
}
else {
/* Fast abort also deactivated for the defaut category. */
continue;
}
if(runtime >= (average_task_time * multiplier)) {
w = itable_lookup(q->worker_task_map, t->taskid);
if(w && (w->type == WORKER_TYPE_WORKER))
{
debug(D_WQ, "Removing worker %s (%s): takes too long to execute the current task - %.02lf s (average task execution time by other workers is %.02lf s)", w->hostname, w->addrport, runtime / 1000000.0, average_task_time / 1000000.0);
work_queue_block_host_with_timeout(q, w->hostname, wq_option_blocklist_slow_workers_timeout);
remove_worker(q, w, WORKER_DISCONNECT_FAST_ABORT);
q->stats->workers_fast_aborted++;
removed++;
}
}
}
return removed;
}
static int shut_down_worker(struct work_queue *q, struct work_queue_worker *w)
{
if(!w) return 0;
send_worker_msg(q,w,"exit\n");
remove_worker(q, w, WORKER_DISCONNECT_EXPLICIT);
q->stats->workers_released++;
return 1;
}
static int abort_drained_workers(struct work_queue *q) {
char *worker_hashkey = NULL;
struct work_queue_worker *w = NULL;
int removed = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &worker_hashkey, (void **) &w)) {
if(w->draining && itable_size(w->current_tasks) == 0) {
removed++;
shut_down_worker(q, w);
}
}
return removed;
}
//comparator function for checking if a task matches given tag.
static int tasktag_comparator(void *t, const void *r) {
struct work_queue_task *task_in_queue = t;
const char *tasktag = r;
if (!strcmp(task_in_queue->tag, tasktag)) {
return 1;
}
return 0;
}
static int cancel_task_on_worker(struct work_queue *q, struct work_queue_task *t, work_queue_task_state_t new_state) {
struct work_queue_worker *w = itable_lookup(q->worker_task_map, t->taskid);
if (w) {
//send message to worker asking to kill its task.
send_worker_msg(q,w, "kill %d\n",t->taskid);
debug(D_WQ, "Task with id %d is aborted at worker %s (%s) and removed.", t->taskid, w->hostname, w->addrport);
//Delete any input files that are not to be cached.
delete_worker_files(q, w, t->input_files, WORK_QUEUE_CACHE | WORK_QUEUE_PREEXIST);
//Delete all output files since they are not needed as the task was aborted.
delete_worker_files(q, w, t->output_files, 0);
//update tables.
reap_task_from_worker(q, w, t, new_state);
return 1;
} else {
change_task_state(q, t, new_state);
return 0;
}
}
static struct work_queue_task *find_task_by_tag(struct work_queue *q, const char *tasktag) {
struct work_queue_task *t;
uint64_t taskid;
itable_firstkey(q->tasks);
while(itable_nextkey(q->tasks, &taskid, (void**)&t)) {
if( tasktag_comparator(t, tasktag) ) {
return t;
}
}
return NULL;
}
static struct work_queue_file *work_queue_file_clone(const struct work_queue_file *file) {
const int file_t_size = sizeof(struct work_queue_file);
struct work_queue_file *new = xxmalloc(file_t_size);
memcpy(new, file, file_t_size);
//allocate new memory for strings so we don't segfault when the original
//memory is freed.
new->payload = xxstrdup(file->payload);
new->remote_name = xxstrdup(file->remote_name);
if(file->cached_name)
new->cached_name = xxstrdup(file->cached_name);
return new;
}
static struct list *work_queue_task_file_list_clone(struct list *list) {
struct list *new = list_create();
struct work_queue_file *old_file, *new_file;
list_first_item(list);
while ((old_file = list_next_item(list))) {
new_file = work_queue_file_clone(old_file);
list_push_tail(new, new_file);
}
return new;
}
static struct list *work_queue_task_env_list_clone(struct list *env_list) {
struct list *new = list_create();
char *var;
list_first_item(env_list);
while((var=list_next_item(env_list))) {
list_push_tail(new, xxstrdup(var));
}
return new;
}
/******************************************************/
/********** work_queue_task public functions **********/
/******************************************************/
struct work_queue_task *work_queue_task_create(const char *command_line)
{
struct work_queue_task *t = malloc(sizeof(*t));
if(!t) {
fprintf(stderr, "Error: failed to allocate memory for task.\n");
return NULL;
}
memset(t, 0, sizeof(*t));
/* REMEMBER: Any memory allocation done in this function should have a
* corresponding copy in work_queue_task_clone. Otherwise we get
* double-free segfaults. */
if(command_line) t->command_line = xxstrdup(command_line);
t->worker_selection_algorithm = WORK_QUEUE_SCHEDULE_UNSET;
t->input_files = list_create();
t->output_files = list_create();
t->env_list = list_create();
t->return_status = -1;
t->result = WORK_QUEUE_RESULT_UNKNOWN;
t->resource_request = CATEGORY_ALLOCATION_FIRST;
/* In the absence of additional information, a task consumes an entire worker. */
t->resources_requested = rmsummary_create(-1);
t->resources_measured = rmsummary_create(-1);
t->resources_allocated = rmsummary_create(-1);
/* Default gpus are 0, rather than whole workers: */
t->resources_requested->gpus = 0;
t->category = xxstrdup("default");
return t;
}
struct work_queue_task *work_queue_task_clone(const struct work_queue_task *task)
{
struct work_queue_task *new = xxmalloc(sizeof(struct work_queue_task));
memcpy(new, task, sizeof(*new));
new->taskid = 0;
//allocate new memory so we don't segfault when original memory is freed.
if(task->tag) {
new->tag = xxstrdup(task->tag);
}
if(task->category) {
new->category = xxstrdup(task->category);
}
if(task->command_line) {
new->command_line = xxstrdup(task->command_line);
}
if(task->features) {
new->features = list_create();
char *req;
list_first_item(task->features);
while((req = list_next_item(task->features))) {
list_push_tail(new->features, xxstrdup(req));
}
}
new->input_files = work_queue_task_file_list_clone(task->input_files);
new->output_files = work_queue_task_file_list_clone(task->output_files);
new->env_list = work_queue_task_env_list_clone(task->env_list);
if(task->resources_requested) {
new->resources_requested = rmsummary_copy(task->resources_requested, 0);
}
if(task->resources_measured) {
new->resources_measured = rmsummary_copy(task->resources_measured, 0);
}
if(task->resources_allocated) {
new->resources_allocated = rmsummary_copy(task->resources_allocated, 0);
}
if(task->monitor_output_directory) {
new->monitor_output_directory = xxstrdup(task->monitor_output_directory);
}
if(task->output) {
new->output = xxstrdup(task->output);
}
if(task->host) {
new->host = xxstrdup(task->host);
}
if(task->hostname) {
new->hostname = xxstrdup(task->hostname);
}
return new;
}
void work_queue_task_specify_command( struct work_queue_task *t, const char *cmd )
{
if(t->command_line) free(t->command_line);
t->command_line = xxstrdup(cmd);
}
void work_queue_task_specify_environment_variable( struct work_queue_task *t, const char *name, const char *value )
{
if(value) {
list_push_tail(t->env_list,string_format("%s=%s",name,value));
} else {
/* Specifications without = indicate variables to me unset. */
list_push_tail(t->env_list,string_format("%s",name));
}
}
/* same as above, but with a typo. can't remove as it is part of already published api. */
void work_queue_task_specify_enviroment_variable( struct work_queue_task *t, const char *name, const char *value ) {
work_queue_task_specify_environment_variable(t, name, value);
}
void work_queue_task_specify_max_retries( struct work_queue_task *t, int64_t max_retries ) {
if(max_retries < 1) {
t->max_retries = 0;
}
else {
t->max_retries = max_retries;
}
}
void work_queue_task_specify_memory( struct work_queue_task *t, int64_t memory )
{
if(memory < 0)
{
t->resources_requested->memory = -1;
}
else
{
t->resources_requested->memory = memory;
}
}
void work_queue_task_specify_disk( struct work_queue_task *t, int64_t disk )
{
if(disk < 0)
{
t->resources_requested->disk = -1;
}
else
{
t->resources_requested->disk = disk;
}
}
void work_queue_task_specify_cores( struct work_queue_task *t, int cores )
{
if(cores < 0)
{
t->resources_requested->cores = -1;
}
else
{
t->resources_requested->cores = cores;
}
}
void work_queue_task_specify_gpus( struct work_queue_task *t, int gpus )
{
if(gpus < 0)
{
t->resources_requested->gpus = -1;
}
else
{
t->resources_requested->gpus = gpus;
}
}
void work_queue_task_specify_end_time( struct work_queue_task *t, int64_t useconds )
{
if(useconds < 1)
{
t->resources_requested->end = -1;
}
else
{
t->resources_requested->end = useconds;
}
}
void work_queue_task_specify_running_time( struct work_queue_task *t, int64_t useconds )
{
if(useconds < 1)
{
t->resources_requested->wall_time = -1;
}
else
{
t->resources_requested->wall_time = useconds;
}
}
void work_queue_task_specify_resources(struct work_queue_task *t, const struct rmsummary *rm) {
if(!rm)
return;
work_queue_task_specify_cores(t, rm->cores);
work_queue_task_specify_memory(t, rm->memory);
work_queue_task_specify_disk(t, rm->disk);
work_queue_task_specify_gpus(t, rm->gpus);
work_queue_task_specify_running_time(t, rm->wall_time);
work_queue_task_specify_end_time(t, rm->end);
}
void work_queue_task_specify_tag(struct work_queue_task *t, const char *tag)
{
if(t->tag)
free(t->tag);
t->tag = xxstrdup(tag);
}
void work_queue_task_specify_category(struct work_queue_task *t, const char *category)
{
if(t->category)
free(t->category);
t->category = xxstrdup(category ? category : "default");
}
void work_queue_task_specify_feature(struct work_queue_task *t, const char *name)
{
if(!name) {
return;
}
if(!t->features) {
t->features = list_create();
}
list_push_tail(t->features, xxstrdup(name));
}
struct work_queue_file *work_queue_file_create(const char *payload, const char *remote_name, work_queue_file_t type, work_queue_file_flags_t flags)
{
struct work_queue_file *f;
f = malloc(sizeof(*f));
if(!f) {
debug(D_NOTICE, "Cannot allocate memory for file %s.\n", remote_name);
return NULL;
}
memset(f, 0, sizeof(*f));
f->remote_name = xxstrdup(remote_name);
f->type = type;
f->flags = flags;
/* WORK_QUEUE_BUFFER needs to set these after the current function returns */
if(payload) {
f->payload = xxstrdup(payload);
f->length = strlen(payload);
}
f->cached_name = make_cached_name(f);
return f;
}
int work_queue_task_specify_url(struct work_queue_task *t, const char *file_url, const char *remote_name, work_queue_file_type_t type, work_queue_file_flags_t flags)
{
struct list *files;
struct work_queue_file *tf;
if(!t || !file_url || !remote_name) {
fprintf(stderr, "Error: Null arguments for task, url, and remote name not allowed in specify_url.\n");
return 0;
}
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
if(type == WORK_QUEUE_INPUT) {
files = t->input_files;
//check if two different urls map to the same remote name for inputs.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(remote_name, tf->remote_name) && strcmp(file_url, tf->payload)) {
fprintf(stderr, "Error: input url %s conflicts with another input pointing to same remote name (%s).\n", file_url, remote_name);
return 0;
}
}
//check if there is an output file with the same remote name.
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: input url %s conflicts with an output pointing to same remote name (%s).\n", file_url, remote_name);
return 0;
}
}
} else {
files = t->output_files;
//check if two different different remote names map to the same url for outputs.
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(file_url, tf->payload) && strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: output url remote name %s conflicts with another output pointing to same url (%s).\n", remote_name, file_url);
return 0;
}
}
//check if there is an input file with the same remote name.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: output url %s conflicts with an input pointing to same remote name (%s).\n", file_url, remote_name);
return 0;
}
}
}
tf = work_queue_file_create(file_url, remote_name, WORK_QUEUE_URL, flags);
if(!tf) return 0;
list_push_tail(files, tf);
return 1;
}
int work_queue_task_specify_file(struct work_queue_task *t, const char *local_name, const char *remote_name, work_queue_file_type_t type, work_queue_file_flags_t flags)
{
struct list *files;
struct work_queue_file *tf;
if(!t || !local_name || !remote_name) {
fprintf(stderr, "Error: Null arguments for task, local name, and remote name not allowed in specify_file.\n");
return 0;
}
// @param remote_name is the path of the file as on the worker machine. In
// the Work Queue framework, workers are prohibitted from writing to paths
// outside of their workspaces. When a task is specified, the workspace of
// the worker(the worker on which the task will be executed) is unlikely to
// be known. Thus @param remote_name should not be an absolute path.
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
if(type == WORK_QUEUE_INPUT) {
files = t->input_files;
//check if two different local names map to the same remote name for inputs.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name) && strcmp(local_name, tf->payload)){
fprintf(stderr, "Error: input file %s conflicts with another input pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
//check if there is an output file with the same remote name.
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: input file %s conflicts with an output pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
} else {
files = t->output_files;
//check if two different different remote names map to the same local name for outputs.
list_first_item(files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(local_name, tf->payload) && strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: output file %s conflicts with another output pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
//check if there is an input file with the same remote name.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: output file %s conflicts with an input pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
}
tf = work_queue_file_create(local_name, remote_name, WORK_QUEUE_FILE, flags);
if(!tf) return 0;
list_push_tail(files, tf);
return 1;
}
int work_queue_task_specify_directory(struct work_queue_task *t, const char *local_name, const char *remote_name, work_queue_file_type_t type, work_queue_file_flags_t flags, int recursive) {
struct list *files;
struct work_queue_file *tf;
if(!t || !remote_name) {
fprintf(stderr, "Error: Null arguments for task and remote name not allowed in specify_directory.\n");
return 0;
}
// @param remote_name is the path of the file as on the worker machine. In
// the Work Queue framework, workers are prohibitted from writing to paths
// outside of their workspaces. When a task is specified, the workspace of
// the worker(the worker on which the task will be executed) is unlikely to
// be known. Thus @param remote_name should not be an absolute path.
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
if(type == WORK_QUEUE_OUTPUT || recursive) {
return work_queue_task_specify_file(t, local_name, remote_name, type, flags);
}
files = t->input_files;
list_first_item(files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(remote_name, tf->remote_name))
{ return 0; }
}
//KNOWN HACK: Every file passes through make_cached_name() which expects the
//payload field to be set. So we simply set the payload to remote name if
//local name is null. This doesn't affect the behavior of the file transfers.
const char *payload = local_name ? local_name : remote_name;
tf = work_queue_file_create(payload, remote_name, WORK_QUEUE_DIRECTORY, flags);
if(!tf) return 0;
list_push_tail(files, tf);
return 1;
}
int work_queue_task_specify_file_piece(struct work_queue_task *t, const char *local_name, const char *remote_name, off_t start_byte, off_t end_byte, work_queue_file_type_t type, work_queue_file_flags_t flags)
{
struct list *files;
struct work_queue_file *tf;
if(!t || !local_name || !remote_name) {
fprintf(stderr, "Error: Null arguments for task, local name, and remote name not allowed in specify_file_piece.\n");
return 0;
}
// @param remote_name should not be an absolute path. @see
// work_queue_task_specify_file
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
if(end_byte < start_byte) {
fprintf(stderr, "Error: End byte lower than start byte for %s.\n", remote_name);
return 0;
}
if(type == WORK_QUEUE_INPUT) {
files = t->input_files;
//check if two different local names map to the same remote name for inputs.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name) && strcmp(local_name, tf->payload)){
fprintf(stderr, "Error: piece of input file %s conflicts with another input pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
//check if there is an output file with the same remote name.
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: piece of input file %s conflicts with an output pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
} else {
files = t->output_files;
//check if two different different remote names map to the same local name for outputs.
list_first_item(files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(local_name, tf->payload) && strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: piece of output file %s conflicts with another output pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
//check if there is an input file with the same remote name.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: piece of output file %s conflicts with an input pointing to same remote name (%s).\n", local_name, remote_name);
return 0;
}
}
}
tf = work_queue_file_create(local_name, remote_name, WORK_QUEUE_FILE_PIECE, flags);
if(!tf) return 0;
tf->offset = start_byte;
tf->piece_length = end_byte - start_byte + 1;
list_push_tail(files, tf);
return 1;
}
int work_queue_task_specify_buffer(struct work_queue_task *t, const char *data, int length, const char *remote_name, work_queue_file_flags_t flags)
{
struct work_queue_file *tf;
if(!t || !remote_name) {
fprintf(stderr, "Error: Null arguments for task and remote name not allowed in specify_buffer.\n");
return 0;
}
// @param remote_name should not be an absolute path. @see
// work_queue_task_specify_file
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: buffer conflicts with another input pointing to same remote name (%s).\n", remote_name);
return 0;
}
}
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: buffer conflicts with an output pointing to same remote name (%s).\n", remote_name);
return 0;
}
}
tf = work_queue_file_create(NULL, remote_name, WORK_QUEUE_BUFFER, flags);
if(!tf) return 0;
tf->payload = malloc(length);
if(!tf->payload) {
fprintf(stderr, "Error: failed to allocate memory for buffer with remote name %s and length %d bytes.\n", remote_name, length);
return 0;
}
tf->length = length;
memcpy(tf->payload, data, length);
list_push_tail(t->input_files, tf);
return 1;
}
int work_queue_task_specify_file_command(struct work_queue_task *t, const char *remote_name, const char *cmd, work_queue_file_type_t type, work_queue_file_flags_t flags)
{
struct list *files;
struct work_queue_file *tf;
if(!t || !remote_name || !cmd) {
fprintf(stderr, "Error: Null arguments for task, remote name, and command not allowed in specify_file_command.\n");
return 0;
}
// @param remote_name should not be an absolute path. @see
// work_queue_task_specify_file
if(remote_name[0] == '/') {
fatal("Error: Remote name %s is an absolute path.\n", remote_name);
}
if(type == WORK_QUEUE_INPUT) {
files = t->input_files;
//check if two different local names map to the same remote name for inputs.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name) && strcmp(cmd, tf->payload)){
fprintf(stderr, "Error: input file command %s conflicts with another input pointing to same remote name (%s).\n", cmd, remote_name);
return 0;
}
}
//check if there is an output file with the same remote name.
list_first_item(t->output_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: input file command %s conflicts with an output pointing to same remote name (%s).\n", cmd, remote_name);
return 0;
}
}
} else {
files = t->output_files;
//check if two different different remote names map to the same local name for outputs.
list_first_item(files);
while((tf = (struct work_queue_file*)list_next_item(files))) {
if(!strcmp(cmd, tf->payload) && strcmp(remote_name, tf->remote_name)) {
fprintf(stderr, "Error: output file command %s conflicts with another output pointing to same remote name (%s).\n", cmd, remote_name);
return 0;
}
}
//check if there is an input file with the same remote name.
list_first_item(t->input_files);
while((tf = (struct work_queue_file*)list_next_item(t->input_files))) {
if(!strcmp(remote_name, tf->remote_name)){
fprintf(stderr, "Error: output file command %s conflicts with an input pointing to same remote name (%s).\n", cmd, remote_name);
return 0;
}
}
}
if(strstr(cmd, "%%") == NULL) {
fatal("command to transfer file does not contain %%%% specifier: %s", cmd);
}
tf = work_queue_file_create(cmd, remote_name, WORK_QUEUE_REMOTECMD, flags);
if(!tf) return 0;
list_push_tail(files, tf);
return 1;
}
int work_queue_specify_snapshot_file(struct work_queue_task *t, const char *monitor_snapshot_file) {
assert(monitor_snapshot_file);
free(t->monitor_snapshot_file);
t->monitor_snapshot_file = xxstrdup(monitor_snapshot_file);
return work_queue_task_specify_file(t, monitor_snapshot_file, RESOURCE_MONITOR_REMOTE_NAME_EVENTS, WORK_QUEUE_INPUT, WORK_QUEUE_CACHE);
}
void work_queue_task_specify_algorithm(struct work_queue_task *t, work_queue_schedule_t algorithm)
{
t->worker_selection_algorithm = algorithm;
}
void work_queue_task_specify_priority( struct work_queue_task *t, double priority )
{
t->priority = priority;
}
void work_queue_task_specify_monitor_output(struct work_queue_task *t, const char *monitor_output_directory) {
if(!monitor_output_directory) {
fatal("Error: no monitor_output_file was specified.");
}
if(t->monitor_output_directory) {
free(t->monitor_output_directory);
}
t->monitor_output_directory = xxstrdup(monitor_output_directory);
}
void work_queue_file_delete(struct work_queue_file *tf) {
if(tf->payload)
free(tf->payload);
if(tf->remote_name)
free(tf->remote_name);
if(tf->cached_name)
free(tf->cached_name);
free(tf);
}
void work_queue_invalidate_cached_file(struct work_queue *q, const char *local_name, work_queue_file_t type) {
struct work_queue_file *f = work_queue_file_create(local_name, local_name, type, WORK_QUEUE_CACHE);
work_queue_invalidate_cached_file_internal(q, f->cached_name);
work_queue_file_delete(f);
}
void work_queue_invalidate_cached_file_internal(struct work_queue *q, const char *filename) {
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void**)&w)) {
if(!hash_table_lookup(w->current_files, filename))
continue;
if(w->type == WORKER_TYPE_FOREMAN) {
send_worker_msg(q, w, "invalidate-file %s\n", filename);
}
struct work_queue_task *t;
uint64_t taskid;
itable_firstkey(w->current_tasks);
while(itable_nextkey(w->current_tasks, &taskid, (void**)&t)) {
struct work_queue_file *tf;
list_first_item(t->input_files);
while((tf = list_next_item(t->input_files))) {
if(strcmp(filename, tf->cached_name) == 0) {
cancel_task_on_worker(q, t, WORK_QUEUE_TASK_READY);
continue;
}
}
while((tf = list_next_item(t->output_files))) {
if(strcmp(filename, tf->cached_name) == 0) {
cancel_task_on_worker(q, t, WORK_QUEUE_TASK_READY);
continue;
}
}
}
delete_worker_file(q, w, filename, 0, 0);
}
}
void work_queue_task_delete(struct work_queue_task *t)
{
struct work_queue_file *tf;
if(t) {
free(t->command_line);
free(t->tag);
free(t->category);
free(t->output);
if(t->input_files) {
while((tf = list_pop_tail(t->input_files))) {
work_queue_file_delete(tf);
}
list_delete(t->input_files);
}
if(t->output_files) {
while((tf = list_pop_tail(t->output_files))) {
work_queue_file_delete(tf);
}
list_delete(t->output_files);
}
if(t->env_list) {
char *var;
while((var=list_pop_tail(t->env_list))) {
free(var);
}
list_delete(t->env_list);
}
if(t->features) {
char *feature;
while((feature=list_pop_tail(t->features))) {
free(feature);
}
list_delete(t->features);
}
free(t->hostname);
free(t->host);
rmsummary_delete(t->resources_requested);
rmsummary_delete(t->resources_measured);
rmsummary_delete(t->resources_allocated);
free(t->monitor_output_directory);
free(t->monitor_snapshot_file);
free(t);
}
}
/** DEPRECATED FUNCTIONS **/
int work_queue_task_specify_output_file(struct work_queue_task *t, const char *rname, const char *fname)
{
return work_queue_task_specify_file(t, fname, rname, WORK_QUEUE_OUTPUT, WORK_QUEUE_CACHE);
}
int work_queue_task_specify_output_file_do_not_cache(struct work_queue_task *t, const char *rname, const char *fname)
{
return work_queue_task_specify_file(t, fname, rname, WORK_QUEUE_OUTPUT, WORK_QUEUE_NOCACHE);
}
int work_queue_task_specify_input_buf(struct work_queue_task *t, const char *buf, int length, const char *rname)
{
return work_queue_task_specify_buffer(t, buf, length, rname, WORK_QUEUE_NOCACHE);
}
int work_queue_task_specify_input_file(struct work_queue_task *t, const char *fname, const char *rname)
{
return work_queue_task_specify_file(t, fname, rname, WORK_QUEUE_INPUT, WORK_QUEUE_CACHE);
}
int work_queue_task_specify_input_file_do_not_cache(struct work_queue_task *t, const char *fname, const char *rname)
{
return work_queue_task_specify_file(t, fname, rname, WORK_QUEUE_INPUT, WORK_QUEUE_NOCACHE);
}
/******************************************************/
/********** work_queue public functions **********/
/******************************************************/
struct work_queue *work_queue_create(int port)
{
struct work_queue *q = malloc(sizeof(*q));
if(!q) {
fprintf(stderr, "Error: failed to allocate memory for queue.\n");
return 0;
}
char *envstring;
random_init();
memset(q, 0, sizeof(*q));
if(port == 0) {
envstring = getenv("WORK_QUEUE_PORT");
if(envstring) {
port = atoi(envstring);
}
}
/* compatibility code */
if (getenv("WORK_QUEUE_LOW_PORT"))
setenv("TCP_LOW_PORT", getenv("WORK_QUEUE_LOW_PORT"), 0);
if (getenv("WORK_QUEUE_HIGH_PORT"))
setenv("TCP_HIGH_PORT", getenv("WORK_QUEUE_HIGH_PORT"), 0);
q->manager_link = link_serve(port);
if(!q->manager_link) {
debug(D_NOTICE, "Could not create work_queue on port %i.", port);
free(q);
return 0;
} else {
char address[LINK_ADDRESS_MAX];
link_address_local(q->manager_link, address, &q->port);
}
getcwd(q->workingdir,PATH_MAX);
q->next_taskid = 1;
q->ready_list = list_create();
q->tasks = itable_create(0);
q->task_state_map = itable_create(0);
q->worker_table = hash_table_create(0, 0);
q->worker_blocklist = hash_table_create(0, 0);
q->worker_task_map = itable_create(0);
q->measured_local_resources = rmsummary_create(-1);
q->current_max_worker = rmsummary_create(-1);
q->stats = calloc(1, sizeof(struct work_queue_stats));
q->stats_disconnected_workers = calloc(1, sizeof(struct work_queue_stats));
q->stats_measure = calloc(1, sizeof(struct work_queue_stats));
q->workers_with_available_results = hash_table_create(0, 0);
// The poll table is initially null, and will be created
// (and resized) as needed by build_poll_table.
q->poll_table_size = 8;
q->worker_selection_algorithm = wq_option_scheduler;
q->process_pending_check = 0;
q->short_timeout = 5;
q->long_timeout = 3600;
q->stats->time_when_started = timestamp_get();
q->task_reports = list_create();
q->time_last_wait = 0;
q->catalog_hosts = 0;
q->keepalive_interval = WORK_QUEUE_DEFAULT_KEEPALIVE_INTERVAL;
q->keepalive_timeout = WORK_QUEUE_DEFAULT_KEEPALIVE_TIMEOUT;
q->monitor_mode = MON_DISABLED;
q->allocation_default_mode = WORK_QUEUE_ALLOCATION_MODE_FIXED;
q->categories = hash_table_create(0, 0);
// The value -1 indicates that fast abort is inactive by default
// fast abort depends on categories, thus set after them.
work_queue_activate_fast_abort(q, -1);
q->password = 0;
q->asynchrony_multiplier = 1.0;
q->asynchrony_modifier = 0;
q->minimum_transfer_timeout = 60;
q->foreman_transfer_timeout = 3600;
q->transfer_outlier_factor = 10;
q->default_transfer_rate = 1*MEGABYTE;
q->manager_preferred_connection = xxstrdup("by_ip");
if( (envstring = getenv("WORK_QUEUE_BANDWIDTH")) ) {
q->bandwidth = string_metric_parse(envstring);
if(q->bandwidth < 0) {
q->bandwidth = 0;
}
}
//Deprecated:
q->task_ordering = WORK_QUEUE_TASK_ORDER_FIFO;
//
log_queue_stats(q);
q->time_last_wait = timestamp_get();
char hostname[DOMAIN_NAME_MAX];
if(domain_name_cache_guess(hostname)) {
debug(D_WQ, "Master advertising as %s:%d", hostname, q->port);
}
else {
debug(D_WQ, "Work Queue is listening on port %d.", q->port);
}
return q;
}
int work_queue_enable_monitoring(struct work_queue *q, char *monitor_output_directory, int watchdog)
{
if(!q)
return 0;
q->monitor_mode = MON_DISABLED;
q->monitor_exe = resource_monitor_locate(NULL);
if(q->monitor_output_directory) {
free(q->monitor_output_directory);
q->monitor_output_directory = NULL;
}
if(!q->monitor_exe)
{
warn(D_WQ, "Could not find the resource monitor executable. Disabling monitoring.\n");
return 0;
}
if(monitor_output_directory) {
q->monitor_output_directory = xxstrdup(monitor_output_directory);
if(!create_dir(q->monitor_output_directory, 0777)) {
fatal("Could not create monitor output directory - %s (%s)", q->monitor_output_directory, strerror(errno));
}
q->monitor_summary_filename = string_format("%s/wq-%d.summaries", q->monitor_output_directory, getpid());
q->monitor_file = fopen(q->monitor_summary_filename, "a");
if(!q->monitor_file)
{
fatal("Could not open monitor log file for writing: '%s'\n", q->monitor_summary_filename);
}
}
if(q->measured_local_resources)
rmsummary_delete(q->measured_local_resources);
q->measured_local_resources = rmonitor_measure_process(getpid());
q->monitor_mode = MON_SUMMARY;
if(watchdog) {
q->monitor_mode |= MON_WATCHDOG;
}
return 1;
}
int work_queue_enable_monitoring_full(struct work_queue *q, char *monitor_output_directory, int watchdog) {
int status = work_queue_enable_monitoring(q, monitor_output_directory, 1);
if(status) {
q->monitor_mode = MON_FULL;
if(watchdog) {
q->monitor_mode |= MON_WATCHDOG;
}
}
return status;
}
int work_queue_activate_fast_abort_category(struct work_queue *q, const char *category, double multiplier)
{
struct category *c = work_queue_category_lookup_or_create(q, category);
if(multiplier >= 1) {
debug(D_WQ, "Enabling fast abort multiplier for '%s': %3.3lf\n", category, multiplier);
c->fast_abort = multiplier;
return 0;
} else if(multiplier == 0) {
debug(D_WQ, "Disabling fast abort multiplier for '%s'.\n", category);
c->fast_abort = 0;
return 1;
} else {
debug(D_WQ, "Using default fast abort multiplier for '%s'.\n", category);
c->fast_abort = -1;
return 0;
}
}
int work_queue_activate_fast_abort(struct work_queue *q, double multiplier)
{
return work_queue_activate_fast_abort_category(q, "default", multiplier);
}
int work_queue_port(struct work_queue *q)
{
char addr[LINK_ADDRESS_MAX];
int port;
if(!q) return 0;
if(link_address_local(q->manager_link, addr, &port)) {
return port;
} else {
return 0;
}
}
void work_queue_specify_estimate_capacity_on(struct work_queue *q, int value)
{
// always on
}
void work_queue_specify_algorithm(struct work_queue *q, work_queue_schedule_t algorithm)
{
q->worker_selection_algorithm = algorithm;
}
void work_queue_specify_task_order(struct work_queue *q, int order)
{
q->task_ordering = order;
}
void work_queue_specify_name(struct work_queue *q, const char *name)
{
if(q->name) free(q->name);
if(name) {
q->name = xxstrdup(name);
setenv("WORK_QUEUE_NAME", q->name, 1);
} else {
q->name = 0;
}
}
void work_queue_specify_debug_path(struct work_queue *q, const char *path)
{
if(q->debug_path) free(q->debug_path);
if(path) {
q->debug_path = xxstrdup(path);
setenv("WORK_QUEUE_DEBUG_PATH", q->debug_path, 1);
} else {
q->debug_path = 0;
}
}
void work_queue_specify_tlq_port(struct work_queue *q, int port)
{
q->tlq_port = port;
}
const char *work_queue_name(struct work_queue *q)
{
return q->name;
}
void work_queue_specify_priority(struct work_queue *q, int priority)
{
q->priority = priority;
}
void work_queue_specify_num_tasks_left(struct work_queue *q, int ntasks)
{
if(ntasks < 1) {
q->num_tasks_left = 0;
}
else {
q->num_tasks_left = ntasks;
}
}
void work_queue_specify_manager_mode(struct work_queue *q, int mode)
{
// Deprecated: Report to the catalog iff a name is given.
}
void work_queue_specify_catalog_server(struct work_queue *q, const char *hostname, int port)
{
char hostport[DOMAIN_NAME_MAX + 8];
if(hostname && (port > 0)) {
sprintf(hostport, "%s:%d", hostname, port);
work_queue_specify_catalog_servers(q, hostport);
} else if(hostname) {
work_queue_specify_catalog_servers(q, hostname);
} else if (port > 0) {
sprintf(hostport, "%d", port);
setenv("CATALOG_PORT", hostport, 1);
}
}
void work_queue_specify_catalog_servers(struct work_queue *q, const char *hosts)
{
if(hosts) {
if(q->catalog_hosts) free(q->catalog_hosts);
q->catalog_hosts = strdup(hosts);
setenv("CATALOG_HOST", hosts, 1);
}
}
void work_queue_specify_password( struct work_queue *q, const char *password )
{
q->password = xxstrdup(password);
}
int work_queue_specify_password_file( struct work_queue *q, const char *file )
{
return copy_file_to_buffer(file,&q->password,NULL)>0;
}
void work_queue_delete(struct work_queue *q)
{
if(q) {
struct work_queue_worker *w;
char *key;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
release_worker(q, w);
hash_table_firstkey(q->worker_table);
}
log_queue_stats(q);
if(q->name) {
update_catalog(q, NULL, 1);
}
/* we call this function here before any of the structures are freed. */
work_queue_disable_monitoring(q);
if(q->catalog_hosts) free(q->catalog_hosts);
hash_table_delete(q->worker_table);
hash_table_delete(q->worker_blocklist);
itable_delete(q->worker_task_map);
struct category *c;
hash_table_firstkey(q->categories);
while(hash_table_nextkey(q->categories, &key, (void **) &c)) {
category_delete(q->categories, key);
}
hash_table_delete(q->categories);
list_delete(q->ready_list);
itable_delete(q->tasks);
itable_delete(q->task_state_map);
hash_table_delete(q->workers_with_available_results);
struct work_queue_task_report *tr;
list_first_item(q->task_reports);
while((tr = list_next_item(q->task_reports))) {
task_report_delete(tr);
}
list_delete(q->task_reports);
free(q->stats);
free(q->stats_disconnected_workers);
free(q->stats_measure);
if(q->name)
free(q->name);
if(q->manager_preferred_connection)
free(q->manager_preferred_connection);
free(q->poll_table);
link_close(q->manager_link);
if(q->logfile) {
fclose(q->logfile);
}
if(q->transactions_logfile) {
write_transaction(q, "MANAGER END");
fclose(q->transactions_logfile);
}
if(q->measured_local_resources)
rmsummary_delete(q->measured_local_resources);
if(q->current_max_worker)
rmsummary_delete(q->current_max_worker);
free(q);
}
}
void update_resource_report(struct work_queue *q) {
// Only measure every few seconds.
if((time(0) - q->resources_last_update_time) < WORK_QUEUE_RESOURCE_MEASUREMENT_INTERVAL)
return;
rmonitor_measure_process_update_to_peak(q->measured_local_resources, getpid());
q->resources_last_update_time = time(0);
}
void work_queue_disable_monitoring(struct work_queue *q) {
if(q->monitor_mode == MON_DISABLED)
return;
rmonitor_measure_process_update_to_peak(q->measured_local_resources, getpid());
if(!q->measured_local_resources->exit_type)
q->measured_local_resources->exit_type = xxstrdup("normal");
if(q->monitor_mode && q->monitor_summary_filename) {
fclose(q->monitor_file);
char template[] = "rmonitor-summaries-XXXXXX";
int final_fd = mkstemp(template);
int summs_fd = open(q->monitor_summary_filename, O_RDONLY);
if( final_fd < 0 || summs_fd < 0 ) {
warn(D_DEBUG, "Could not consolidate resource summaries.");
return;
}
/* set permissions according to user's mask. getumask is not available yet,
and the only way to get the value of the current mask is to change
it... */
mode_t old_mask = umask(0);
umask(old_mask);
fchmod(final_fd, 0777 & ~old_mask );
FILE *final = fdopen(final_fd, "w");
const char *user_name = getlogin();
if(!user_name) {
user_name = "unknown";
}
struct jx *extra = jx_object(
jx_pair(jx_string("type"), jx_string("work_queue"),
jx_pair(jx_string("user"), jx_string(user_name),
NULL)));
if(q->name) {
jx_insert_string(extra, "manager_name", q->name);
}
rmsummary_print(final, q->measured_local_resources, /* pprint */ 0, extra);
copy_fd_to_stream(summs_fd, final);
jx_delete(extra);
fclose(final);
close(summs_fd);
if(rename(template, q->monitor_summary_filename) < 0)
warn(D_DEBUG, "Could not move monitor report to final destination file.");
}
if(q->monitor_exe)
free(q->monitor_exe);
if(q->monitor_output_directory)
free(q->monitor_output_directory);
if(q->monitor_summary_filename)
free(q->monitor_summary_filename);
}
void work_queue_monitor_add_files(struct work_queue *q, struct work_queue_task *t) {
work_queue_task_specify_file(t, q->monitor_exe, RESOURCE_MONITOR_REMOTE_NAME, WORK_QUEUE_INPUT, WORK_QUEUE_CACHE);
char *summary = monitor_file_name(q, t, ".summary");
work_queue_task_specify_file(t, summary, RESOURCE_MONITOR_REMOTE_NAME ".summary", WORK_QUEUE_OUTPUT, WORK_QUEUE_NOCACHE);
free(summary);
if(q->monitor_mode & MON_FULL && (q->monitor_output_directory || t->monitor_output_directory)) {
char *debug = monitor_file_name(q, t, ".debug");
char *series = monitor_file_name(q, t, ".series");
work_queue_task_specify_file(t, debug, RESOURCE_MONITOR_REMOTE_NAME ".debug", WORK_QUEUE_OUTPUT, WORK_QUEUE_NOCACHE);
work_queue_task_specify_file(t, series, RESOURCE_MONITOR_REMOTE_NAME ".series", WORK_QUEUE_OUTPUT, WORK_QUEUE_NOCACHE);
free(debug);
free(series);
}
}
char *work_queue_monitor_wrap(struct work_queue *q, struct work_queue_worker *w, struct work_queue_task *t, struct rmsummary *limits)
{
buffer_t b;
buffer_init(&b);
buffer_printf(&b, "-V 'task_id: %d'", t->taskid);
if(t->category) {
buffer_printf(&b, " -V 'category: %s'", t->category);
}
if(t->monitor_snapshot_file) {
buffer_printf(&b, " --snapshot-events %s", RESOURCE_MONITOR_REMOTE_NAME_EVENTS);
}
if(q->monitor_mode & MON_WATCHDOG) {
buffer_printf(&b, " --measure-only");
}
int extra_files = (q->monitor_mode & MON_FULL);
char *monitor_cmd = resource_monitor_write_command("./" RESOURCE_MONITOR_REMOTE_NAME, RESOURCE_MONITOR_REMOTE_NAME, limits, /* extra options */ buffer_tostring(&b), /* debug */ extra_files, /* series */ extra_files, /* inotify */ 0, /* measure_dir */ NULL);
char *wrap_cmd = string_wrap_command(t->command_line, monitor_cmd);
buffer_free(&b);
free(monitor_cmd);
return wrap_cmd;
}
static double work_queue_task_priority(void *item) {
assert(item);
struct work_queue_task *t = item;
return t->priority;
}
/* Put a given task on the ready list, taking into account the task priority and the queue schedule. */
void push_task_to_ready_list( struct work_queue *q, struct work_queue_task *t )
{
int by_priority = 1;
if(t->result == WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION) {
/* when a task is resubmitted given resource exhaustion, we
* push it at the head of the list, so it gets to run as soon
* as possible. This avoids the issue in which all 'big' tasks
* fail because the first allocation is too small. */
by_priority = 0;
}
if(by_priority) {
list_push_priority(q->ready_list, work_queue_task_priority, t);
} else {
list_push_head(q->ready_list,t);
}
/* If the task has been used before, clear out accumulated state. */
clean_task_state(t);
}
work_queue_task_state_t work_queue_task_state(struct work_queue *q, int taskid) {
return (int)(uintptr_t)itable_lookup(q->task_state_map, taskid);
}
static void fill_deprecated_tasks_stats(struct work_queue_task *t) {
t->time_task_submit = t->time_when_submitted;
t->time_task_finish = t->time_when_done;
t->time_committed = t->time_when_commit_start;
t->time_send_input_start = t->time_when_commit_start;
t->time_send_input_finish = t->time_when_commit_end;
t->time_receive_result_start = t->time_when_retrieval;
t->time_receive_result_finish = t->time_when_done;
t->time_receive_output_start = t->time_when_retrieval;
t->time_receive_output_finish = t->time_when_done;
t->time_execute_cmd_start = t->time_when_commit_start;
t->time_execute_cmd_finish = t->time_when_retrieval;
t->total_transfer_time = (t->time_when_commit_end - t->time_when_commit_start) + (t->time_when_done - t->time_when_retrieval);
t->cmd_execution_time = t->time_workers_execute_last;
t->total_cmd_execution_time = t->time_workers_execute_all;
t->total_cmd_exhausted_execute_time = t->time_workers_execute_exhaustion;
t->total_time_until_worker_failure = t->time_workers_execute_failure;
t->total_bytes_received = t->bytes_received;
t->total_bytes_sent = t->bytes_sent;
t->total_bytes_transferred = t->bytes_transferred;
}
/* Changes task state. Returns old state */
/* State of the task. One of WORK_QUEUE_TASK(UNKNOWN|READY|RUNNING|WAITING_RETRIEVAL|RETRIEVED|DONE) */
static work_queue_task_state_t change_task_state( struct work_queue *q, struct work_queue_task *t, work_queue_task_state_t new_state ) {
work_queue_task_state_t old_state = (uintptr_t) itable_lookup(q->task_state_map, t->taskid);
itable_insert(q->task_state_map, t->taskid, (void *) new_state);
// remove from current tables:
if( old_state == WORK_QUEUE_TASK_READY ) {
// Treat WORK_QUEUE_TASK_READY specially, as it has the order of the tasks
list_remove(q->ready_list, t);
}
// insert to corresponding table
debug(D_WQ, "Task %d state change: %s (%d) to %s (%d)\n", t->taskid, task_state_str(old_state), old_state, task_state_str(new_state), new_state);
switch(new_state) {
case WORK_QUEUE_TASK_READY:
update_task_result(t, WORK_QUEUE_RESULT_UNKNOWN);
push_task_to_ready_list(q, t);
break;
case WORK_QUEUE_TASK_DONE:
case WORK_QUEUE_TASK_CANCELED:
/* tasks are freed when returned to user, thus we remove them from our local record */
fill_deprecated_tasks_stats(t);
itable_remove(q->tasks, t->taskid);
break;
default:
/* do nothing */
break;
}
log_queue_stats(q);
write_transaction_task(q, t);
return old_state;
}
const char *task_state_str(work_queue_task_state_t task_state) {
const char *str;
switch(task_state) {
case WORK_QUEUE_TASK_READY:
str = "WAITING";
break;
case WORK_QUEUE_TASK_RUNNING:
str = "RUNNING";
break;
case WORK_QUEUE_TASK_WAITING_RETRIEVAL:
str = "WAITING_RETRIEVAL";
break;
case WORK_QUEUE_TASK_RETRIEVED:
str = "RETRIEVED";
break;
case WORK_QUEUE_TASK_DONE:
str = "DONE";
break;
case WORK_QUEUE_TASK_CANCELED:
str = "CANCELED";
break;
case WORK_QUEUE_TASK_UNKNOWN:
default:
str = "UNKNOWN";
break;
}
return str;
}
static int task_in_terminal_state(struct work_queue *q, struct work_queue_task *t) {
work_queue_task_state_t state = (uintptr_t) itable_lookup(q->task_state_map, t->taskid);
switch(state) {
case WORK_QUEUE_TASK_READY:
case WORK_QUEUE_TASK_RUNNING:
case WORK_QUEUE_TASK_WAITING_RETRIEVAL:
case WORK_QUEUE_TASK_RETRIEVED:
return 0;
break;
case WORK_QUEUE_TASK_DONE:
case WORK_QUEUE_TASK_CANCELED:
case WORK_QUEUE_TASK_UNKNOWN:
return 1;
break;
}
return 0;
}
const char *task_result_str(work_queue_result_t result) {
const char *str;
switch(result) {
case WORK_QUEUE_RESULT_SUCCESS:
str = "SUCCESS";
break;
case WORK_QUEUE_RESULT_INPUT_MISSING:
str = "INPUT_MISS";
break;
case WORK_QUEUE_RESULT_OUTPUT_MISSING:
str = "OUTPUT_MISS";
break;
case WORK_QUEUE_RESULT_STDOUT_MISSING:
str = "STDOUT_MISS";
break;
case WORK_QUEUE_RESULT_SIGNAL:
str = "SIGNAL";
break;
case WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION:
str = "RESOURCE_EXHAUSTION";
break;
case WORK_QUEUE_RESULT_TASK_TIMEOUT:
str = "END_TIME";
break;
case WORK_QUEUE_RESULT_FORSAKEN:
str = "FORSAKEN";
break;
case WORK_QUEUE_RESULT_MAX_RETRIES:
str = "MAX_RETRIES";
break;
case WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME:
str = "MAX_WALL_TIME";
break;
case WORK_QUEUE_RESULT_UNKNOWN:
default:
str = "UNKNOWN";
break;
}
return str;
}
static int task_state_is( struct work_queue *q, uint64_t taskid, work_queue_task_state_t state) {
return itable_lookup(q->task_state_map, taskid) == (void *) state;
}
static struct work_queue_task *task_state_any(struct work_queue *q, work_queue_task_state_t state) {
struct work_queue_task *t;
uint64_t taskid;
itable_firstkey(q->tasks);
while( itable_nextkey(q->tasks, &taskid, (void **) &t) ) {
if( task_state_is(q, taskid, state) ) {
return t;
}
}
return NULL;
}
static int task_state_count(struct work_queue *q, const char *category, work_queue_task_state_t state) {
struct work_queue_task *t;
uint64_t taskid;
int count = 0;
itable_firstkey(q->tasks);
while( itable_nextkey(q->tasks, &taskid, (void **) &t) ) {
if( task_state_is(q, taskid, state) ) {
if(!category || strcmp(category, t->category) == 0) {
count++;
}
}
}
return count;
}
static int task_request_count( struct work_queue *q, const char *category, category_allocation_t request) {
struct work_queue_task *t;
uint64_t taskid;
int count = 0;
itable_firstkey(q->tasks);
while( itable_nextkey(q->tasks, &taskid, (void **) &t) ) {
if(t->resource_request == request) {
if(!category || strcmp(category, t->category) == 0) {
count++;
}
}
}
return count;
}
int work_queue_submit_internal(struct work_queue *q, struct work_queue_task *t)
{
itable_insert(q->tasks, t->taskid, t);
/* Ensure category structure is created. */
work_queue_category_lookup_or_create(q, t->category);
change_task_state(q, t, WORK_QUEUE_TASK_READY);
t->time_when_submitted = timestamp_get();
q->stats->tasks_submitted++;
if(q->monitor_mode != MON_DISABLED)
work_queue_monitor_add_files(q, t);
return (t->taskid);
}
int work_queue_submit(struct work_queue *q, struct work_queue_task *t)
{
if(t->taskid > 0 && !task_in_terminal_state(q, t)) {
debug(D_NOTICE|D_WQ, "Task %d has been already submitted. Ignoring new submission.", t->taskid);
return 0;
}
t->taskid = q->next_taskid;
//Increment taskid. So we get a unique taskid for every submit.
q->next_taskid++;
return work_queue_submit_internal(q, t);
}
void work_queue_block_host_with_timeout(struct work_queue *q, const char *hostname, time_t timeout)
{
struct blocklist_host_info *info = hash_table_lookup(q->worker_blocklist, hostname);
if(!info) {
info = malloc(sizeof(struct blocklist_host_info));
info->times_blocked = 0;
info->blocked = 0;
}
q->stats->workers_blocked++;
/* count the times the worker goes from active to blocked. */
if(!info->blocked)
info->times_blocked++;
info->blocked = 1;
if(timeout > 0) {
debug(D_WQ, "Blocking host %s by %" PRIu64 " seconds (blocked %d times).\n", hostname, (uint64_t) timeout, info->times_blocked);
info->release_at = time(0) + timeout;
} else {
debug(D_WQ, "Blocking host %s indefinitely.\n", hostname);
info->release_at = -1;
}
hash_table_insert(q->worker_blocklist, hostname, (void *) info);
}
void work_queue_block_host(struct work_queue *q, const char *hostname)
{
work_queue_block_host_with_timeout(q, hostname, -1);
}
void work_queue_unblock_host(struct work_queue *q, const char *hostname)
{
struct blocklist_host_info *info = hash_table_remove(q->worker_blocklist, hostname);
if(info) {
info->blocked = 0;
info->release_at = 0;
}
}
/* deadline < 1 means release all, regardless of release_at time. */
static void work_queue_unblock_all_by_time(struct work_queue *q, time_t deadline)
{
char *hostname;
struct blocklist_host_info *info;
hash_table_firstkey(q->worker_blocklist);
while(hash_table_nextkey(q->worker_blocklist, &hostname, (void *) &info)) {
if(!info->blocked)
continue;
/* do not clear if blocked indefinitely, and we are not clearing the whole list. */
if(info->release_at < 1 && deadline > 0)
continue;
/* do not clear if the time for this host has not meet the deadline. */
if(deadline > 0 && info->release_at > deadline)
continue;
debug(D_WQ, "Clearing hostname %s from blocklist.\n", hostname);
work_queue_unblock_host(q, hostname);
}
}
void work_queue_unblock_all(struct work_queue *q)
{
work_queue_unblock_all_by_time(q, -1);
}
static void print_password_warning( struct work_queue *q )
{
static int did_password_warning = 0;
if(did_password_warning) return;
if(!q->password && q->name) {
fprintf(stderr,"warning: this work queue manager is visible to the public.\n");
fprintf(stderr,"warning: you should set a password with the --password option.\n");
did_password_warning = 1;
}
}
#define BEGIN_ACCUM_TIME(q, stat) {\
if(q->stats_measure->stat != 0) {\
fatal("Double-counting stat %s. This should not happen, and it is Work Queue bug.");\
} else {\
q->stats_measure->stat = timestamp_get();\
}\
}
#define END_ACCUM_TIME(q, stat) {\
q->stats->stat += timestamp_get() - q->stats_measure->stat;\
q->stats_measure->stat = 0;\
}
struct work_queue_task *work_queue_wait(struct work_queue *q, int timeout)
{
if(timeout == 0) {
// re-establish old, if unintended behavior, where 0 would wait at
// least a second. With 0, we would like the loop to be executed at
// least once, but right now we cannot enforce that. Making it 1, we
// guarantee that the wait loop is executed once.
timeout = 1;
}
if(timeout != WORK_QUEUE_WAITFORTASK && timeout < 0) {
debug(D_NOTICE|D_WQ, "Invalid wait timeout value '%d'. Waiting for 5 seconds.", timeout);
timeout = 5;
}
return work_queue_wait_internal(q, timeout, NULL, NULL);
}
/* return number of workers that failed */
static int poll_active_workers(struct work_queue *q, int stoptime, struct link *foreman_uplink, int *foreman_uplink_active)
{
BEGIN_ACCUM_TIME(q, time_polling);
int n = build_poll_table(q, foreman_uplink);
// We poll in at most small time segments (of a second). This lets
// promptly dispatch tasks, while avoiding busy waiting.
int msec = q->busy_waiting_flag ? 1000 : 0;
if(stoptime) {
msec = MIN(msec, (stoptime - time(0)) * 1000);
}
END_ACCUM_TIME(q, time_polling);
if(msec < 0) {
return 0;
}
BEGIN_ACCUM_TIME(q, time_polling);
// Poll all links for activity.
link_poll(q->poll_table, n, msec);
q->link_poll_end = timestamp_get();
int i, j = 1;
// Consider the foreman_uplink passed into the function and disregard if inactive.
if(foreman_uplink) {
if(q->poll_table[1].revents) {
*foreman_uplink_active = 1; //signal that the manager link saw activity
} else {
*foreman_uplink_active = 0;
}
j++;
}
END_ACCUM_TIME(q, time_polling);
BEGIN_ACCUM_TIME(q, time_status_msgs);
int workers_failed = 0;
// Then consider all existing active workers
for(i = j; i < n; i++) {
if(q->poll_table[i].revents) {
if(handle_worker(q, q->poll_table[i].link) == WQ_WORKER_FAILURE) {
workers_failed++;
}
}
}
if(hash_table_size(q->workers_with_available_results) > 0) {
char *key;
struct work_queue_worker *w;
hash_table_firstkey(q->workers_with_available_results);
while(hash_table_nextkey(q->workers_with_available_results,&key,(void**)&w)) {
get_available_results(q, w);
hash_table_remove(q->workers_with_available_results, key);
hash_table_firstkey(q->workers_with_available_results);
}
}
END_ACCUM_TIME(q, time_status_msgs);
return workers_failed;
}
static int connect_new_workers(struct work_queue *q, int stoptime, int max_new_workers)
{
int new_workers = 0;
// If the manager link was awake, then accept at most max_new_workers.
// Note we are using the information gathered in poll_active_workers, which
// is a little ugly.
if(q->poll_table[0].revents) {
do {
add_worker(q);
new_workers++;
} while(link_usleep(q->manager_link, 0, 1, 0) && (stoptime >= time(0) && (max_new_workers > new_workers)));
}
return new_workers;
}
struct work_queue_task *work_queue_wait_internal(struct work_queue *q, int timeout, struct link *foreman_uplink, int *foreman_uplink_active)
/*
- compute stoptime
S time left? No: return null
- task completed? Yes: return completed task to user
- update catalog if appropiate
- retrieve workers status messages
- tasks waiting to be retrieved? Yes: retrieve one task and go to S.
- tasks waiting to be dispatched? Yes: dispatch one task and go to S.
- send keepalives to appropiate workers
- fast-abort workers
- if new workers, connect n of them
- expired tasks? Yes: mark expired tasks as retrieved and go to S.
- queue empty? Yes: return null
- go to S
*/
{
int events = 0;
// account for time we spend outside work_queue_wait
if(q->time_last_wait > 0) {
q->stats->time_application += timestamp_get() - q->time_last_wait;
} else {
q->stats->time_application += timestamp_get() - q->stats->time_when_started;
}
print_password_warning(q);
// compute stoptime
time_t stoptime = (timeout == WORK_QUEUE_WAITFORTASK) ? 0 : time(0) + timeout;
int result;
struct work_queue_task *t = NULL;
// time left?
while( (stoptime == 0) || (time(0) < stoptime) ) {
BEGIN_ACCUM_TIME(q, time_internal);
// task completed?
t = task_state_any(q, WORK_QUEUE_TASK_RETRIEVED);
if(t) {
change_task_state(q, t, WORK_QUEUE_TASK_DONE);
if( t->result != WORK_QUEUE_RESULT_SUCCESS )
{
q->stats->tasks_failed++;
}
// return completed task (t) to the user. We do not return right
// away, and instead break out of the loop to correctly update the
// queue time statistics.
events++;
END_ACCUM_TIME(q, time_internal);
break;
}
// update catalog if appropriate
if(q->name) {
update_catalog(q, foreman_uplink, 0);
}
if(q->monitor_mode)
update_resource_report(q);
END_ACCUM_TIME(q, time_internal);
// retrieve worker status messages
if(poll_active_workers(q, stoptime, foreman_uplink, foreman_uplink_active) > 0) {
//at least one worker was removed.
events++;
// note we keep going, and we do not restart the loop as we do in
// further events. This is because we give top priority to
// returning and retrieving tasks.
}
q->busy_waiting_flag = 0;
// tasks waiting to be retrieved?
BEGIN_ACCUM_TIME(q, time_receive);
result = receive_one_task(q);
END_ACCUM_TIME(q, time_receive);
if(result) {
// retrieved at least one task
events++;
compute_manager_load(q, 1);
continue;
}
// expired tasks
BEGIN_ACCUM_TIME(q, time_internal);
result = expire_waiting_tasks(q);
END_ACCUM_TIME(q, time_internal);
if(result) {
// expired at least one task
events++;
compute_manager_load(q, 1);
continue;
}
// record that there was not task activity for this iteration
compute_manager_load(q, 0);
// tasks waiting to be dispatched?
BEGIN_ACCUM_TIME(q, time_send);
result = send_one_task(q);
END_ACCUM_TIME(q, time_send);
if(result) {
// sent at least one task
events++;
continue;
}
//we reach here only if no task was neither sent nor received.
compute_manager_load(q, 1);
// send keepalives to appropriate workers
BEGIN_ACCUM_TIME(q, time_status_msgs);
ask_for_workers_updates(q);
END_ACCUM_TIME(q, time_status_msgs);
// Kill off slow/drained workers.
BEGIN_ACCUM_TIME(q, time_internal);
result = abort_slow_workers(q);
result += abort_drained_workers(q);
work_queue_unblock_all_by_time(q, time(0));
END_ACCUM_TIME(q, time_internal);
if(result) {
// removed at least one worker
events++;
continue;
}
// if new workers, connect n of them
BEGIN_ACCUM_TIME(q, time_status_msgs);
result = connect_new_workers(q, stoptime, MAX_NEW_WORKERS);
END_ACCUM_TIME(q, time_status_msgs);
if(result) {
// accepted at least one worker
events++;
continue;
}
if(q->process_pending_check) {
BEGIN_ACCUM_TIME(q, time_internal);
int pending = process_pending();
END_ACCUM_TIME(q, time_internal);
if(pending) {
events++;
break;
}
}
// return if queue is empty.
BEGIN_ACCUM_TIME(q, time_internal);
int done = !task_state_any(q, WORK_QUEUE_TASK_RUNNING) && !task_state_any(q, WORK_QUEUE_TASK_READY) && !task_state_any(q, WORK_QUEUE_TASK_WAITING_RETRIEVAL) && !(foreman_uplink);
END_ACCUM_TIME(q, time_internal);
if(done)
break;
/* if we got here, no events were triggered. we set the busy_waiting
* flag so that link_poll waits for some time the next time around. */
q->busy_waiting_flag = 1;
// If the foreman_uplink is active then break so the caller can handle it.
if(foreman_uplink) {
break;
}
}
if(events > 0) {
log_queue_stats(q);
}
q->time_last_wait = timestamp_get();
return t;
}
//check if workers' resources are available to execute more tasks
//queue should have at least 10 ready tasks
//@param: struct work_queue* - pointer to queue
//@return: boolean - whether queue is "hungry"
int work_queue_hungry(struct work_queue *q)
{
//check if queue is initialized
//return false if not
if (q == NULL){
return 0;
}
struct work_queue_stats qstats;
work_queue_get_stats(q, &qstats);
//check if there's any workers joined from start
//if there's none, limit the number of ready tasks in queue to 10
//10 is chosen to be the default number of ready tasks in queue to keep queue efficient
if (qstats.workers_joined == 0){
if (qstats.tasks_waiting < 10){
return 1;
}
return 0;
}
//if number of ready tasks is less than 10, return true for more tasks in queue
//10 is chosen to be the default number of ready tasks in queue to keep queue efficient
if (qstats.tasks_waiting < 10){
return 1;
}
//get total available resources consumption (cores, memory, disk, gpus) of all workers of this manager
//available = total (all) - committed (actual in use)
int64_t workers_total_avail_cores = 0;
int64_t workers_total_avail_memory = 0;
int64_t workers_total_avail_disk = 0;
int64_t workers_total_avail_gpus = 0;
workers_total_avail_cores = q->stats->total_cores - q->stats->committed_cores;
workers_total_avail_memory = q->stats->total_memory - q->stats->committed_memory;
workers_total_avail_disk = q->stats->total_disk - q->stats->committed_disk;
workers_total_avail_gpus = q->stats->total_gpus - q->stats->committed_gpus;
//get required resources (cores, memory, disk, gpus) of one waiting task
int64_t ready_task_cores = 0;
int64_t ready_task_memory = 0;
int64_t ready_task_disk = 0;
int64_t ready_task_gpus = 0;
struct work_queue_task *t;
list_first_item(q->ready_list);
t = list_next_item(q->ready_list);
ready_task_cores += t->resources_allocated->cores;
ready_task_memory += t->resources_allocated->memory;
ready_task_disk += t->resources_allocated->disk;
ready_task_gpus += t->resources_allocated->gpus;
//check possible limiting factors
//return false if required resources exceed available resources
if (ready_task_cores > workers_total_avail_cores){
return 0;
}
if (ready_task_memory > workers_total_avail_memory){
return 0;
}
if (ready_task_disk > workers_total_avail_disk){
return 0;
}
if (ready_task_gpus > workers_total_avail_gpus){
return 0;
}
return 1; //all good
}
int work_queue_shut_down_workers(struct work_queue *q, int n)
{
struct work_queue_worker *w;
char *key;
int i = 0;
/* by default, remove all workers. */
if(n < 1)
n = hash_table_size(q->worker_table);
if(!q)
return -1;
// send worker the "exit" msg
hash_table_firstkey(q->worker_table);
while(i < n && hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(itable_size(w->current_tasks) == 0) {
shut_down_worker(q, w);
/* shut_down_worker alters the table, so we reset it here. */
hash_table_firstkey(q->worker_table);
i++;
}
}
return i;
}
int work_queue_specify_draining_by_hostname(struct work_queue *q, const char *hostname, int drain_flag)
{
char *worker_hashkey = NULL;
struct work_queue_worker *w = NULL;
drain_flag = !!(drain_flag);
int workers_updated = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &worker_hashkey, (void *) w)) {
if (!strcmp(w->hostname, hostname)) {
w->draining = drain_flag;
workers_updated++;
}
}
return workers_updated;
}
/**
* Cancel submitted task as long as it has not been retrieved through wait().
* This returns the work_queue_task struct corresponding to specified task and
* null if the task is not found.
*/
struct work_queue_task *work_queue_cancel_by_taskid(struct work_queue *q, int taskid) {
struct work_queue_task *matched_task = NULL;
matched_task = itable_lookup(q->tasks, taskid);
if(!matched_task) {
debug(D_WQ, "Task with id %d is not found in queue.", taskid);
return NULL;
}
cancel_task_on_worker(q, matched_task, WORK_QUEUE_TASK_CANCELED);
/* change state even if task is not running on a worker. */
change_task_state(q, matched_task, WORK_QUEUE_TASK_CANCELED);
q->stats->tasks_cancelled++;
return matched_task;
}
struct work_queue_task *work_queue_cancel_by_tasktag(struct work_queue *q, const char* tasktag) {
struct work_queue_task *matched_task = NULL;
if (tasktag){
matched_task = find_task_by_tag(q, tasktag);
if(matched_task) {
return work_queue_cancel_by_taskid(q, matched_task->taskid);
}
}
debug(D_WQ, "Task with tag %s is not found in queue.", tasktag);
return NULL;
}
struct list * work_queue_cancel_all_tasks(struct work_queue *q) {
struct list *l = list_create();
struct work_queue_task *t;
struct work_queue_worker *w;
uint64_t taskid;
char *key;
itable_firstkey(q->tasks);
while(itable_nextkey(q->tasks, &taskid, (void**)&t)) {
list_push_tail(l, t);
work_queue_cancel_by_taskid(q, taskid);
}
hash_table_firstkey(q->workers_with_available_results);
while(hash_table_nextkey(q->workers_with_available_results, &key, (void **) &w)) {
hash_table_remove(q->workers_with_available_results, key);
hash_table_firstkey(q->workers_with_available_results);
}
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void**)&w)) {
send_worker_msg(q,w,"kill -1\n");
itable_firstkey(w->current_tasks);
while(itable_nextkey(w->current_tasks, &taskid, (void**)&t)) {
//Delete any input files that are not to be cached.
delete_worker_files(q, w, t->input_files, WORK_QUEUE_CACHE | WORK_QUEUE_PREEXIST);
//Delete all output files since they are not needed as the task was aborted.
delete_worker_files(q, w, t->output_files, 0);
reap_task_from_worker(q, w, t, WORK_QUEUE_TASK_CANCELED);
list_push_tail(l, t);
q->stats->tasks_cancelled++;
itable_firstkey(w->current_tasks);
}
}
return l;
}
void release_all_workers(struct work_queue *q) {
struct work_queue_worker *w;
char *key;
if(!q) return;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table,&key,(void**)&w)) {
release_worker(q, w);
hash_table_firstkey(q->worker_table);
}
}
int work_queue_empty(struct work_queue *q)
{
struct work_queue_task *t;
uint64_t taskid;
itable_firstkey(q->tasks);
while( itable_nextkey(q->tasks, &taskid, (void **) &t) ) {
int state = work_queue_task_state(q, taskid);
if( state == WORK_QUEUE_TASK_READY ) return 0;
if( state == WORK_QUEUE_TASK_RUNNING ) return 0;
if( state == WORK_QUEUE_TASK_WAITING_RETRIEVAL ) return 0;
if( state == WORK_QUEUE_TASK_RETRIEVED ) return 0;
}
return 1;
}
void work_queue_specify_keepalive_interval(struct work_queue *q, int interval)
{
q->keepalive_interval = interval;
}
void work_queue_specify_keepalive_timeout(struct work_queue *q, int timeout)
{
q->keepalive_timeout = timeout;
}
void work_queue_manager_preferred_connection(struct work_queue *q, const char *preferred_connection)
{
free(q->manager_preferred_connection);
q->manager_preferred_connection = xxstrdup(preferred_connection);
}
int work_queue_tune(struct work_queue *q, const char *name, double value)
{
if(!strcmp(name, "asynchrony-multiplier")) {
q->asynchrony_multiplier = MAX(value, 1.0);
} else if(!strcmp(name, "asynchrony-modifier")) {
q->asynchrony_modifier = MAX(value, 0);
} else if(!strcmp(name, "min-transfer-timeout")) {
q->minimum_transfer_timeout = (int)value;
} else if(!strcmp(name, "foreman-transfer-timeout")) {
q->foreman_transfer_timeout = (int)value;
} else if(!strcmp(name, "default-transfer-rate")) {
q->default_transfer_rate = value;
} else if(!strcmp(name, "transfer-outlier-factor")) {
q->transfer_outlier_factor = value;
} else if(!strcmp(name, "fast-abort-multiplier")) {
work_queue_activate_fast_abort(q, value);
} else if(!strcmp(name, "keepalive-interval")) {
q->keepalive_interval = MAX(0, (int)value);
} else if(!strcmp(name, "keepalive-timeout")) {
q->keepalive_timeout = MAX(0, (int)value);
} else if(!strcmp(name, "short-timeout")) {
q->short_timeout = MAX(1, (int)value);
} else if(!strcmp(name, "long-timeout")) {
q->long_timeout = MAX(1, (int)value);
} else if(!strcmp(name, "category-steady-n-tasks")) {
category_tune_bucket_size("category-steady-n-tasks", (int) value);
} else {
debug(D_NOTICE|D_WQ, "Warning: tuning parameter \"%s\" not recognized\n", name);
return -1;
}
return 0;
}
void work_queue_enable_process_module(struct work_queue *q)
{
q->process_pending_check = 1;
}
char * work_queue_get_worker_summary( struct work_queue *q )
{
return strdup("n/a");
}
void work_queue_set_bandwidth_limit(struct work_queue *q, const char *bandwidth)
{
q->bandwidth = string_metric_parse(bandwidth);
}
double work_queue_get_effective_bandwidth(struct work_queue *q)
{
double queue_bandwidth = get_queue_transfer_rate(q, NULL)/MEGABYTE; //return in MB per second
return queue_bandwidth;
}
static void fill_deprecated_queue_stats(struct work_queue *q, struct work_queue_stats *s) {
s->total_workers_connected = s->workers_connected;
s->total_workers_joined = s->workers_joined;
s->total_workers_removed = s->workers_removed;
s->total_workers_lost = s->workers_lost;
s->total_workers_idled_out = s->workers_idled_out;
s->total_workers_fast_aborted = s->workers_fast_aborted;
s->tasks_complete = s->tasks_with_results;
s->total_tasks_dispatched = s->tasks_dispatched;
s->total_tasks_complete = s->tasks_done;
s->total_tasks_failed = s->tasks_failed;
s->total_tasks_cancelled = s->tasks_cancelled;
s->total_exhausted_attempts = s->tasks_exhausted_attempts;
s->start_time = s->time_when_started;
s->total_send_time = s->time_send;
s->total_receive_time = s->time_receive;
s->total_good_transfer_time = s->time_send_good + s->time_receive_good;
s->total_execute_time = s->time_workers_execute;
s->total_good_execute_time = s->time_workers_execute_good;
s->total_exhausted_execute_time = s->time_workers_execute_exhaustion;
s->total_bytes_sent = s->bytes_sent;
s-> total_bytes_received = s->bytes_received;
s->capacity = s->capacity_cores;
s->port = q->port;
s->priority = q->priority;
s->workers_ready = s->workers_idle;
s->workers_full = s->workers_busy;
s->total_worker_slots = s->tasks_dispatched;
s->avg_capacity = s->capacity_cores;
timestamp_t wall_clock_time = timestamp_get() - q->stats->time_when_started;
if(wall_clock_time > 0 && s->workers_connected > 0) {
s->efficiency = (double) (q->stats->time_workers_execute_good) / (wall_clock_time * s->workers_connected);
}
if(wall_clock_time>0) {
s->idle_percentage = (double) q->stats->time_polling / wall_clock_time;
}
}
void work_queue_get_stats(struct work_queue *q, struct work_queue_stats *s)
{
struct work_queue_stats *qs;
qs = q->stats;
memcpy(s, qs, sizeof(*s));
//info about workers
s->workers_connected = count_workers(q, WORKER_TYPE_WORKER | WORKER_TYPE_FOREMAN);
s->workers_init = count_workers(q, WORK_QUEUE_TASK_UNKNOWN);
s->workers_busy = workers_with_tasks(q);
s->workers_idle = s->workers_connected - s->workers_busy;
// s->workers_able computed below.
//info about tasks
s->tasks_waiting = task_state_count(q, NULL, WORK_QUEUE_TASK_READY);
s->tasks_on_workers = task_state_count(q, NULL, WORK_QUEUE_TASK_RUNNING) + task_state_count(q, NULL, WORK_QUEUE_TASK_WAITING_RETRIEVAL);
s->tasks_with_results = task_state_count(q, NULL, WORK_QUEUE_TASK_WAITING_RETRIEVAL);
{
//accumulate tasks running, from workers:
char *key;
struct work_queue_worker *w;
s->tasks_running = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
accumulate_stat(s, w->stats, tasks_running);
}
/* (see work_queue_get_stats_hierarchy for an explanation on the
* following line) */
s->tasks_running = MIN(s->tasks_running, s->tasks_on_workers);
}
compute_capacity(q, s);
//info about resources
s->bandwidth = work_queue_get_effective_bandwidth(q);
struct work_queue_resources r;
aggregate_workers_resources(q,&r,NULL);
s->total_cores = r.cores.total;
s->total_memory = r.memory.total;
s->total_disk = r.disk.total;
s->total_gpus = r.gpus.total;
s->committed_cores = r.cores.inuse;
s->committed_memory = r.memory.inuse;
s->committed_disk = r.disk.inuse;
s->committed_gpus = r.gpus.inuse;
s->min_cores = r.cores.smallest;
s->max_cores = r.cores.largest;
s->min_memory = r.memory.smallest;
s->max_memory = r.memory.largest;
s->min_disk = r.disk.smallest;
s->max_disk = r.disk.largest;
s->min_gpus = r.gpus.smallest;
s->max_gpus = r.gpus.largest;
{
struct rmsummary *rmax = largest_waiting_measured_resources(q, NULL);
char *key;
struct category *c;
hash_table_firstkey(q->categories);
while(hash_table_nextkey(q->categories, &key, (void **) &c)) {
rmsummary_merge_max(rmax, c->max_allocation);
}
s->workers_able = count_workers_for_waiting_tasks(q, rmax);
rmsummary_delete(rmax);
}
fill_deprecated_queue_stats(q, s);
}
void work_queue_get_stats_hierarchy(struct work_queue *q, struct work_queue_stats *s)
{
work_queue_get_stats(q, s);
char *key;
struct work_queue_worker *w;
/* Consider running only if reported by some hand. */
s->tasks_running = 0;
s->workers_connected = 0;
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table, &key, (void **) &w)) {
if(w->type == WORKER_TYPE_FOREMAN)
{
accumulate_stat(s, w->stats, workers_joined);
accumulate_stat(s, w->stats, workers_removed);
accumulate_stat(s, w->stats, workers_idled_out);
accumulate_stat(s, w->stats, workers_fast_aborted);
accumulate_stat(s, w->stats, workers_lost);
accumulate_stat(s, w->stats, time_send);
accumulate_stat(s, w->stats, time_receive);
accumulate_stat(s, w->stats, time_send_good);
accumulate_stat(s, w->stats, time_receive_good);
accumulate_stat(s, w->stats, time_workers_execute);
accumulate_stat(s, w->stats, time_workers_execute_good);
accumulate_stat(s, w->stats, time_workers_execute_exhaustion);
accumulate_stat(s, w->stats, bytes_sent);
accumulate_stat(s, w->stats, bytes_received);
}
accumulate_stat(s, w->stats, tasks_waiting);
accumulate_stat(s, w->stats, tasks_running);
}
/* we rely on workers messages to update tasks_running. such data are
* attached to keepalive messages, thus tasks_running is not always
* current. Here we simply enforce that there can be more tasks_running
* that tasks_on_workers. */
s->tasks_running = MIN(s->tasks_running, s->tasks_on_workers);
/* Account also for workers connected directly to the manager. */
s->workers_connected = s->workers_joined - s->workers_removed;
s->workers_joined += q->stats_disconnected_workers->workers_joined;
s->workers_removed += q->stats_disconnected_workers->workers_removed;
s->workers_idled_out += q->stats_disconnected_workers->workers_idled_out;
s->workers_fast_aborted += q->stats_disconnected_workers->workers_fast_aborted;
s->workers_lost += q->stats_disconnected_workers->workers_lost;
s->time_send += q->stats_disconnected_workers->time_send;
s->time_receive += q->stats_disconnected_workers->time_receive;
s->time_send_good += q->stats_disconnected_workers->time_send_good;
s->time_receive_good += q->stats_disconnected_workers->time_receive_good;
s->time_workers_execute += q->stats_disconnected_workers->time_workers_execute;
s->time_workers_execute_good += q->stats_disconnected_workers->time_workers_execute_good;
s->time_workers_execute_exhaustion += q->stats_disconnected_workers->time_workers_execute_exhaustion;
s->bytes_sent += q->stats_disconnected_workers->bytes_sent;
s->bytes_received += q->stats_disconnected_workers->bytes_received;
fill_deprecated_queue_stats(q, s);
}
void work_queue_get_stats_category(struct work_queue *q, const char *category, struct work_queue_stats *s)
{
struct category *c = work_queue_category_lookup_or_create(q, category);
struct work_queue_stats *cs = c->wq_stats;
memcpy(s, cs, sizeof(*s));
//info about tasks
s->tasks_waiting = task_state_count(q, category, WORK_QUEUE_TASK_READY);
s->tasks_running = task_state_count(q, category, WORK_QUEUE_TASK_RUNNING) + task_state_count(q, category, WORK_QUEUE_TASK_WAITING_RETRIEVAL);
s->tasks_with_results = task_state_count(q, category, WORK_QUEUE_TASK_WAITING_RETRIEVAL);
struct rmsummary *rmax = largest_waiting_measured_resources(q, c->name);
s->workers_able = count_workers_for_waiting_tasks(q, rmax);
rmsummary_delete(rmax);
}
void aggregate_workers_resources( struct work_queue *q, struct work_queue_resources *total, struct hash_table *features)
{
struct work_queue_worker *w;
char *key;
bzero(total, sizeof(struct work_queue_resources));
if(hash_table_size(q->worker_table)==0) {
return;
}
if(features) {
hash_table_clear(features);
}
hash_table_firstkey(q->worker_table);
while(hash_table_nextkey(q->worker_table,&key,(void**)&w)) {
if(w->resources->tag < 0)
continue;
work_queue_resources_add(total,w->resources);
if(features) {
if(w->features) {
char *key;
void *dummy;
hash_table_firstkey(w->features);
while(hash_table_nextkey(w->features, &key, &dummy)) {
hash_table_insert(features, key, (void **) 1);
}
}
}
}
}
int work_queue_specify_log(struct work_queue *q, const char *logfile)
{
q->logfile = fopen(logfile, "a");
if(q->logfile) {
setvbuf(q->logfile, NULL, _IOLBF, 2048); // line buffered, we don't want incomplete lines
fprintf(q->logfile,
// start with a comment
"#"
// time:
" timestamp"
// workers current:
" workers_connected workers_init workers_idle workers_busy workers_able"
// workers cummulative:
" workers_joined workers_removed workers_released workers_idled_out workers_blocked workers_fast_aborted workers_lost"
// tasks current:
" tasks_waiting tasks_on_workers tasks_running tasks_with_results"
// tasks cummulative
" tasks_submitted tasks_dispatched tasks_done tasks_failed tasks_cancelled tasks_exhausted_attempts"
// manager time statistics:
" time_when_started time_send time_receive time_send_good time_receive_good time_status_msgs time_internal time_polling time_application"
// workers time statistics:
" time_execute time_execute_good time_execute_exhaustion"
// bandwidth:
" bytes_sent bytes_received bandwidth"
// resources:
" capacity_tasks capacity_cores capacity_memory capacity_disk capacity_instantaneous capacity_weighted manager_load"
" total_cores total_memory total_disk"
" committed_cores committed_memory committed_disk"
" max_cores max_memory max_disk"
" min_cores min_memory min_disk"
// end with a newline
"\n"
);
log_queue_stats(q);
debug(D_WQ, "log enabled and is being written to %s\n", logfile);
return 1;
} else {
debug(D_NOTICE | D_WQ, "couldn't open logfile %s: %s\n", logfile, strerror(errno));
return 0;
}
}
static void write_transaction(struct work_queue *q, const char *str) {
if(!q->transactions_logfile)
return;
fprintf(q->transactions_logfile, "%" PRIu64, timestamp_get());
fprintf(q->transactions_logfile, " %d", getpid());
fprintf(q->transactions_logfile, " %s", str);
fprintf(q->transactions_logfile, "\n");
}
static void write_transaction_task(struct work_queue *q, struct work_queue_task *t) {
if(!q->transactions_logfile)
return;
struct buffer B;
buffer_init(&B);
work_queue_task_state_t state = (uintptr_t) itable_lookup(q->task_state_map, t->taskid);
buffer_printf(&B, "TASK %d %s", t->taskid, task_state_str(state));
if(state == WORK_QUEUE_TASK_UNKNOWN) {
/* do not add any info */
} else if(state == WORK_QUEUE_TASK_READY) {
const char *allocation = (t->resource_request == CATEGORY_ALLOCATION_FIRST ? "FIRST_RESOURCES" : "MAX_RESOURCES");
buffer_printf(&B, " %s %s ", t->category, allocation);
rmsummary_print_buffer(&B, task_min_resources(q, t), 1);
} else if(state == WORK_QUEUE_TASK_CANCELED) {
/* do not add any info */
} else if(state == WORK_QUEUE_TASK_RETRIEVED || state == WORK_QUEUE_TASK_DONE) {
buffer_printf(&B, " %s ", task_result_str(t->result));
buffer_printf(&B, " %d ", t->return_status);
if(t->resources_measured) {
if(t->result == WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION) {
rmsummary_print_buffer(&B, t->resources_measured->limits_exceeded, 1);
buffer_printf(&B, " ");
}
else {
// no limits broken, thus printing an empty dictionary
buffer_printf(&B, " {} ");
}
rmsummary_print_buffer(&B, t->resources_measured, 1);
} else {
// no resources measured, one empty dictionary for limits broken, other for resources.
buffer_printf(&B, " {} {}");
}
} else {
struct work_queue_worker *w = itable_lookup(q->worker_task_map, t->taskid);
const char *worker_str = "worker-info-not-available";
if(w) {
worker_str = w->addrport;
buffer_printf(&B, " %s ", worker_str);
if(state == WORK_QUEUE_TASK_RUNNING) {
const char *allocation = (t->resource_request == CATEGORY_ALLOCATION_FIRST ? "FIRST_RESOURCES" : "MAX_RESOURCES");
buffer_printf(&B, " %s ", allocation);
const struct rmsummary *box = itable_lookup(w->current_tasks_boxes, t->taskid);
rmsummary_print_buffer(&B, box, 1);
} else if(state == WORK_QUEUE_TASK_WAITING_RETRIEVAL) {
/* do not add any info */
}
}
}
write_transaction(q, buffer_tostring(&B));
buffer_free(&B);
}
static void write_transaction_category(struct work_queue *q, struct category *c) {
if(!q->transactions_logfile)
return;
if(!c)
return;
struct buffer B;
buffer_init(&B);
buffer_printf(&B, "CATEGORY %s MAX ", c->name);
rmsummary_print_buffer(&B, category_dynamic_task_max_resources(c, NULL, CATEGORY_ALLOCATION_MAX), 1);
write_transaction(q, buffer_tostring(&B));
buffer_rewind(&B, 0);
buffer_printf(&B, "CATEGORY %s MIN ", c->name);
rmsummary_print_buffer(&B, category_dynamic_task_min_resources(c, NULL, CATEGORY_ALLOCATION_FIRST), 1);
write_transaction(q, buffer_tostring(&B));
buffer_rewind(&B, 0);
const char *mode;
switch(c->allocation_mode) {
case CATEGORY_ALLOCATION_MODE_MAX:
mode = "MAX";
break;
case CATEGORY_ALLOCATION_MODE_MIN_WASTE:
mode = "MIN_WASTE";
break;
case CATEGORY_ALLOCATION_MODE_MAX_THROUGHPUT:
mode = "MAX_THROUGHPUT";
break;
case CATEGORY_ALLOCATION_MODE_FIXED:
default:
mode = "FIXED";
break;
}
buffer_printf(&B, "CATEGORY %s FIRST %s ", c->name, mode);
rmsummary_print_buffer(&B, category_dynamic_task_max_resources(c, NULL, CATEGORY_ALLOCATION_FIRST), 1);
write_transaction(q, buffer_tostring(&B));
buffer_free(&B);
}
static void write_transaction_worker(struct work_queue *q, struct work_queue_worker *w, int leaving, worker_disconnect_reason reason_leaving) {
struct buffer B;
buffer_init(&B);
buffer_printf(&B, "WORKER %s %s ", w->workerid, w->addrport);
if(leaving) {
buffer_printf(&B, " DISCONNECTION");
switch(reason_leaving) {
case WORKER_DISCONNECT_IDLE_OUT:
buffer_printf(&B, " IDLE_OUT");
break;
case WORKER_DISCONNECT_FAST_ABORT:
buffer_printf(&B, " FAST_ABORT");
break;
case WORKER_DISCONNECT_FAILURE:
buffer_printf(&B, " FAILURE");
break;
case WORKER_DISCONNECT_STATUS_WORKER:
buffer_printf(&B, " STATUS_WORKER");
break;
case WORKER_DISCONNECT_EXPLICIT:
buffer_printf(&B, " EXPLICIT");
break;
case WORKER_DISCONNECT_UNKNOWN:
default:
buffer_printf(&B, " UNKNOWN");
break;
}
} else {
buffer_printf(&B, " CONNECTION");
}
write_transaction(q, buffer_tostring(&B));
buffer_free(&B);
}
static void write_transaction_worker_resources(struct work_queue *q, struct work_queue_worker *w) {
struct rmsummary *s = rmsummary_create(-1);
s->cores = w->resources->cores.total;
s->memory = w->resources->memory.total;
s->disk = w->resources->disk.total;
char *rjx = rmsummary_print_string(s, 1);
struct buffer B;
buffer_init(&B);
buffer_printf(&B, "WORKER %s RESOURCES %s", w->workerid, rjx);
write_transaction(q, buffer_tostring(&B));
rmsummary_delete(s);
buffer_free(&B);
free(rjx);
}
int work_queue_specify_transactions_log(struct work_queue *q, const char *logfile) {
q->transactions_logfile =fopen(logfile, "a");
if(q->transactions_logfile) {
setvbuf(q->transactions_logfile, NULL, _IOLBF, 1024); // line buffered, we don't want incomplete lines
debug(D_WQ, "transactions log enabled and is being written to %s\n", logfile);
fprintf(q->transactions_logfile, "# time manager_pid MANAGER START|END\n");
fprintf(q->transactions_logfile, "# time manager_pid WORKER worker_id host:port CONNECTION\n");
fprintf(q->transactions_logfile, "# time manager_pid WORKER worker_id host:port DISCONNECTION (UNKNOWN|IDLE_OUT|FAST_ABORT|FAILURE|STATUS_WORKER|EXPLICIT\n");
fprintf(q->transactions_logfile, "# time manager_pid WORKER worker_id RESOURCES {resources}\n");
fprintf(q->transactions_logfile, "# time manager_pid CATEGORY name MAX {resources_max_per_task}\n");
fprintf(q->transactions_logfile, "# time manager_pid CATEGORY name MIN {resources_min_per_task_per_worker}\n");
fprintf(q->transactions_logfile, "# time manager_pid CATEGORY name FIRST (FIXED|MAX|MIN_WASTE|MAX_THROUGHPUT) {resources_requested}\n");
fprintf(q->transactions_logfile, "# time manager_pid TASK taskid WAITING category_name (FIRST_RESOURCES|MAX_RESOURCES) {resources_requested}\n");
fprintf(q->transactions_logfile, "# time manager_pid TASK taskid RUNNING worker_address (FIRST_RESOURCES|MAX_RESOURCES) {resources_allocated}\n");
fprintf(q->transactions_logfile, "# time manager_pid TASK taskid WAITING_RETRIEVAL worker_address\n");
fprintf(q->transactions_logfile, "# time manager_pid TASK taskid (RETRIEVED|DONE) (SUCCESS|SIGNAL|END_TIME|FORSAKEN|MAX_RETRIES|MAX_WALLTIME|UNKNOWN|RESOURCE_EXHAUSTION) exit_code {limits_exceeded} {resources_measured}\n\n");
write_transaction(q, "MANAGER START");
return 1;
}
else
{
debug(D_NOTICE | D_WQ, "couldn't open transactions logfile %s: %s\n", logfile, strerror(errno));
return 0;
}
}
void work_queue_accumulate_task(struct work_queue *q, struct work_queue_task *t) {
const char *name = t->category ? t->category : "default";
struct category *c = work_queue_category_lookup_or_create(q, name);
struct work_queue_stats *s = c->wq_stats;
s->bytes_sent += t->bytes_sent;
s->bytes_received += t->bytes_received;
s->time_workers_execute += t->time_workers_execute_last;
s->time_send += t->time_when_commit_end - t->time_when_commit_start;
s->time_receive += t->time_when_done - t->time_when_retrieval;
s->bandwidth = (1.0*MEGABYTE*(s->bytes_sent + s->bytes_received))/(s->time_send + s->time_receive + 1);
q->stats->tasks_done++;
if(t->result == WORK_QUEUE_RESULT_SUCCESS)
{
q->stats->time_workers_execute_good += t->time_workers_execute_last;
q->stats->time_send_good += t->time_when_commit_end - t->time_when_commit_end;
q->stats->time_receive_good += t->time_when_done - t->time_when_retrieval;
s->tasks_done++;
s->time_workers_execute_good += t->time_workers_execute_last;
s->time_send_good += t->time_when_commit_end - t->time_when_commit_end;
s->time_receive_good += t->time_when_done - t->time_when_retrieval;
} else {
s->tasks_failed++;
if(t->result == WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION) {
s->time_workers_execute_exhaustion += t->time_workers_execute_last;
q->stats->time_workers_execute_exhaustion += t->time_workers_execute_last;
q->stats->tasks_exhausted_attempts++;
t->time_workers_execute_exhaustion += t->time_workers_execute_last;
t->exhausted_attempts++;
}
}
/* accumulate resource summary to category only if task result makes it meaningful. */
switch(t->result) {
case WORK_QUEUE_RESULT_SUCCESS:
case WORK_QUEUE_RESULT_SIGNAL:
case WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION:
case WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME:
case WORK_QUEUE_RESULT_DISK_ALLOC_FULL:
if(category_accumulate_summary(c, t->resources_measured, q->current_max_worker)) {
write_transaction_category(q, c);
}
break;
case WORK_QUEUE_RESULT_INPUT_MISSING:
case WORK_QUEUE_RESULT_OUTPUT_MISSING:
case WORK_QUEUE_RESULT_TASK_TIMEOUT:
case WORK_QUEUE_RESULT_UNKNOWN:
case WORK_QUEUE_RESULT_FORSAKEN:
case WORK_QUEUE_RESULT_MAX_RETRIES:
default:
break;
}
}
void work_queue_initialize_categories(struct work_queue *q, struct rmsummary *max, const char *summaries_file) {
categories_initialize(q->categories, max, summaries_file);
}
void work_queue_specify_max_resources(struct work_queue *q, const struct rmsummary *rm) {
work_queue_specify_category_max_resources(q, "default", rm);
}
void work_queue_specify_min_resources(struct work_queue *q, const struct rmsummary *rm) {
work_queue_specify_category_min_resources(q, "default", rm);
}
void work_queue_specify_category_max_resources(struct work_queue *q, const char *category, const struct rmsummary *rm) {
struct category *c = work_queue_category_lookup_or_create(q, category);
category_specify_max_allocation(c, rm);
}
void work_queue_specify_category_min_resources(struct work_queue *q, const char *category, const struct rmsummary *rm) {
struct category *c = work_queue_category_lookup_or_create(q, category);
category_specify_min_allocation(c, rm);
}
void work_queue_specify_category_first_allocation_guess(struct work_queue *q, const char *category, const struct rmsummary *rm) {
struct category *c = work_queue_category_lookup_or_create(q, category);
category_specify_first_allocation_guess(c, rm);
}
int work_queue_specify_category_mode(struct work_queue *q, const char *category, category_mode_t mode) {
switch(mode) {
case CATEGORY_ALLOCATION_MODE_FIXED:
case CATEGORY_ALLOCATION_MODE_MAX:
case CATEGORY_ALLOCATION_MODE_MIN_WASTE:
case CATEGORY_ALLOCATION_MODE_MAX_THROUGHPUT:
break;
default:
notice(D_WQ, "Unknown category mode specified.");
return 0;
break;
}
if(!category) {
q->allocation_default_mode = mode;
}
else {
struct category *c = work_queue_category_lookup_or_create(q, category);
category_specify_allocation_mode(c, mode);
write_transaction_category(q, c);
}
return 1;
}
int work_queue_enable_category_resource(struct work_queue *q, const char *category, const char *resource, int autolabel) {
struct category *c = work_queue_category_lookup_or_create(q, category);
return category_enable_auto_resource(c, resource, autolabel);
}
const struct rmsummary *task_max_resources(struct work_queue *q, struct work_queue_task *t) {
struct category *c = work_queue_category_lookup_or_create(q, t->category);
return category_dynamic_task_max_resources(c, t->resources_requested, t->resource_request);
}
const struct rmsummary *task_min_resources(struct work_queue *q, struct work_queue_task *t) {
struct category *c = work_queue_category_lookup_or_create(q, t->category);
const struct rmsummary *s = category_dynamic_task_min_resources(c, t->resources_requested, t->resource_request);
if(t->resource_request != CATEGORY_ALLOCATION_FIRST || !q->current_max_worker) {
return s;
}
// If this task is being tried for the first time, we take the minimum as
// the minimum between what we have observed and the largest worker. This
// is to eliminate observed outliers that would prevent new tasks to run.
if((q->current_max_worker->cores > 0 && q->current_max_worker->cores < s->cores)
|| (q->current_max_worker->memory > 0 && q->current_max_worker->memory < s->memory)
|| (q->current_max_worker->disk > 0 && q->current_max_worker->disk < s->disk)
|| (q->current_max_worker->gpus > 0 && q->current_max_worker->gpus < s->gpus)) {
struct rmsummary *r = rmsummary_create(-1);
rmsummary_merge_override(r, q->current_max_worker);
rmsummary_merge_override(r, t->resources_requested);
s = category_dynamic_task_min_resources(c, r, t->resource_request);
rmsummary_delete(r);
}
return s;
}
struct category *work_queue_category_lookup_or_create(struct work_queue *q, const char *name) {
struct category *c = category_lookup_or_create(q->categories, name);
if(!c->wq_stats) {
c->wq_stats = calloc(1, sizeof(struct work_queue_stats));
category_specify_allocation_mode(c, q->allocation_default_mode);
}
return c;
}
char *work_queue_generate_disk_alloc_full_filename(char *pwd, int taskid) {
path_remove_trailing_slashes(pwd);
if(!taskid) {
return string_format("%s/cctools_disk_allocation_exhausted.log", pwd);
}
return string_format("%s/cctools_disk_allocation_exhausted.%d.log", pwd, taskid);
}
int work_queue_specify_min_taskid(struct work_queue *q, int minid) {
if(minid > q->next_taskid) {
q->next_taskid = minid;
}
return q->next_taskid;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 15,210 | Careful: you only want two newlines after the final header message. (That indicates the end of headers and the start of data.) | cooperative-computing-lab-cctools | c |
@@ -9,6 +9,9 @@ def request(flow):
flow.request.headers.pop('If-Modified-Since', None)
flow.request.headers.pop('Cache-Control', None)
+ # do not force https redirection
+ flow.request.headers.pop('Upgrade-Insecure-Requests', None)
+
# proxy connections to SSL-enabled hosts
if flow.request.pretty_host in secure_hosts:
flow.request.scheme = 'https' | 1 | import re
from six.moves import urllib
# set of SSL/TLS capable hosts
secure_hosts = set()
def request(flow):
flow.request.headers.pop('If-Modified-Since', None)
flow.request.headers.pop('Cache-Control', None)
# proxy connections to SSL-enabled hosts
if flow.request.pretty_host in secure_hosts:
flow.request.scheme = 'https'
flow.request.port = 443
def response(flow):
flow.request.headers.pop('Strict-Transport-Security', None)
flow.request.headers.pop('Public-Key-Pins', None)
# strip links in response body
flow.response.content = flow.response.content.replace('https://', 'http://')
# strip links in 'Location' header
if flow.response.headers.get('Location', '').startswith('https://'):
location = flow.response.headers['Location']
hostname = urllib.parse.urlparse(location).hostname
if hostname:
secure_hosts.add(hostname)
flow.response.headers['Location'] = location.replace('https://', 'http://', 1)
# strip secure flag from 'Set-Cookie' headers
cookies = flow.response.headers.get_all('Set-Cookie')
cookies = [re.sub(r';\s*secure\s*', '', s) for s in cookies]
flow.response.headers.set_all('Set-Cookie', cookies)
| 1 | 12,209 | This will not work on Python 3 (to which we are transitioning) because `.content` is bytes, not a str. Can you make the pattern a bytes object as well (like so: `b"pattern"`)? | mitmproxy-mitmproxy | py |
@@ -85,7 +85,7 @@ const ariaRoles = {
},
combobox: {
type: 'composite',
- requiredOwned: ['textbox', 'listbox', 'tree', 'grid', 'dialog'],
+ requiredOwned: ['listbox', 'tree', 'grid', 'dialog', 'textbox'],
requiredAttrs: ['aria-expanded'],
// Note: because aria-controls is not well supported we will not
// make it a required attribute even though it is required in the | 1 | // Source: https://www.w3.org/TR/wai-aria-1.1/#roles
/* easiest way to see allowed roles is to filter out the global ones
from the list of inherited states and properties. The dpub spec
does not have the global list so you'll need to copy over from
the wai-aria one:
const globalAttrs = Array.from(
document.querySelectorAll('#global_states li')
).map(li => li.textContent.replace(/\s*\(.*\)/, ''));
const globalRoleAttrs = Array.from(
document.querySelectorAll('.role-inherited li')
).filter(li => globalAttrs.includes(li.textContent.replace(/\s*\(.*\)/, '')))
globalRoleAttrs.forEach(li => li.style.display = 'none');
*/
const ariaRoles = {
alert: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
alertdialog: {
type: 'widget',
allowedAttrs: ['aria-expanded', 'aria-modal']
},
application: {
// Note: spec difference
type: 'landmark',
// Note: aria-expanded is not in the 1.1 spec but is
// consistently supported in ATs and was added in 1.2
allowedAttrs: ['aria-activedescendant', 'aria-expanded']
},
article: {
type: 'structure',
allowedAttrs: ['aria-posinset', 'aria-setsize', 'aria-expanded']
},
banner: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
button: {
type: 'widget',
allowedAttrs: ['aria-expanded', 'aria-pressed'],
nameFromContent: true
},
cell: {
type: 'structure',
requiredContext: ['row'],
allowedAttrs: [
'aria-colindex',
'aria-colspan',
'aria-rowindex',
'aria-rowspan',
'aria-expanded'
],
nameFromContent: true
},
checkbox: {
type: 'widget',
// Note: since the checkbox role has an implicit
// aria-checked value it is not required to be added by
// the user
//
// Note: aria-required is not in the 1.1 spec but is
// consistently supported in ATs and was added in 1.2
allowedAttrs: ['aria-checked', 'aria-readonly', 'aria-required'],
nameFromContent: true
},
columnheader: {
type: 'structure',
requiredContext: ['row'],
allowedAttrs: [
'aria-sort',
'aria-colindex',
'aria-colspan',
'aria-expanded',
'aria-readonly',
'aria-required',
'aria-rowindex',
'aria-rowspan',
'aria-selected'
],
nameFromContent: true
},
combobox: {
type: 'composite',
requiredOwned: ['textbox', 'listbox', 'tree', 'grid', 'dialog'],
requiredAttrs: ['aria-expanded'],
// Note: because aria-controls is not well supported we will not
// make it a required attribute even though it is required in the
// spec
allowedAttrs: [
'aria-controls',
'aria-autocomplete',
'aria-readonly',
'aria-required',
'aria-activedescendant',
'aria-orientation'
]
},
command: {
type: 'abstract'
},
complementary: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
composite: {
type: 'abstract'
},
contentinfo: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
definition: {
type: 'structure',
allowedAttrs: ['aria-expanded']
},
dialog: {
type: 'widget',
allowedAttrs: ['aria-expanded', 'aria-modal']
},
directory: {
type: 'structure',
allowedAttrs: ['aria-expanded'],
// Note: spec difference
nameFromContent: true
},
document: {
type: 'structure',
allowedAttrs: ['aria-expanded']
},
feed: {
type: 'structure',
requiredOwned: ['article'],
allowedAttrs: ['aria-expanded']
},
figure: {
type: 'structure',
allowedAttrs: ['aria-expanded'],
// Note: spec difference
nameFromContent: true
},
form: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
grid: {
type: 'composite',
requiredOwned: ['rowgroup', 'row'],
allowedAttrs: [
'aria-level',
'aria-multiselectable',
'aria-readonly',
'aria-activedescendant',
'aria-colcount',
'aria-expanded',
'aria-rowcount'
]
},
gridcell: {
type: 'widget',
requiredContext: ['row'],
allowedAttrs: [
'aria-readonly',
'aria-required',
'aria-selected',
'aria-colindex',
'aria-colspan',
'aria-expanded',
'aria-rowindex',
'aria-rowspan'
],
nameFromContent: true
},
group: {
type: 'structure',
allowedAttrs: ['aria-activedescendant', 'aria-expanded']
},
heading: {
type: 'structure',
requiredAttrs: ['aria-level'],
allowedAttrs: ['aria-expanded'],
nameFromContent: true
},
img: {
type: 'structure',
allowedAttrs: ['aria-expanded']
},
input: {
type: 'abstract'
},
landmark: {
type: 'abstract'
},
link: {
type: 'widget',
allowedAttrs: ['aria-expanded'],
nameFromContent: true
},
list: {
type: 'structure',
requiredOwned: ['listitem'],
allowedAttrs: ['aria-expanded']
},
listbox: {
type: 'composite',
requiredOwned: ['option'],
allowedAttrs: [
'aria-multiselectable',
'aria-readonly',
'aria-required',
'aria-activedescendant',
'aria-expanded',
'aria-orientation'
]
},
listitem: {
type: 'structure',
requiredContext: ['list'],
allowedAttrs: [
'aria-level',
'aria-posinset',
'aria-setsize',
'aria-expanded'
],
// Note: spec difference
nameFromContent: true
},
log: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
main: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
marquee: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
math: {
type: 'structure',
allowedAttrs: ['aria-expanded']
},
menu: {
type: 'composite',
requiredOwned: ['menuitemradio', 'menuitem', 'menuitemcheckbox'],
allowedAttrs: ['aria-activedescendant', 'aria-expanded', 'aria-orientation']
},
menubar: {
type: 'composite',
requiredOwned: ['menuitemradio', 'menuitem', 'menuitemcheckbox'],
allowedAttrs: ['aria-activedescendant', 'aria-expanded', 'aria-orientation']
},
menuitem: {
type: 'widget',
requiredContext: ['menu', 'menubar'],
// Note: aria-expanded is not in the 1.1 spec but is
// consistently supported in ATs and was added in 1.2
allowedAttrs: ['aria-posinset', 'aria-setsize', 'aria-expanded'],
nameFromContent: true
},
menuitemcheckbox: {
type: 'widget',
requiredContext: ['menu', 'menubar'],
allowedAttrs: [
'aria-checked',
'aria-posinset',
'aria-readonly',
'aria-setsize'
],
nameFromContent: true
},
menuitemradio: {
type: 'widget',
requiredContext: ['menu', 'menubar'],
allowedAttrs: [
'aria-checked',
'aria-posinset',
'aria-readonly',
'aria-setsize'
],
nameFromContent: true
},
navigation: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
none: {
type: 'structure'
},
note: {
type: 'structure',
allowedAttrs: ['aria-expanded']
},
option: {
type: 'widget',
requiredContext: ['listbox'],
// Note: since the option role has an implicit
// aria-selected value it is not required to be added by
// the user
allowedAttrs: [
'aria-selected',
'aria-checked',
'aria-posinset',
'aria-setsize'
],
nameFromContent: true
},
presentation: {
type: 'structure'
},
progressbar: {
type: 'widget',
allowedAttrs: [
'aria-expanded',
'aria-valuemax',
'aria-valuemin',
'aria-valuenow',
'aria-valuetext'
]
},
radio: {
type: 'widget',
// Note: since the radio role has an implicit
// aria-check value it is not required to be added by
// the user
//
// Note: aria-required is not in the 1.1 or 1.2 specs but is
// consistently supported in ATs on the individual radio element
allowedAttrs: [
'aria-checked',
'aria-posinset',
'aria-setsize',
'aria-required'
],
nameFromContent: true
},
radiogroup: {
type: 'composite',
requiredOwned: ['radio'],
allowedAttrs: [
'aria-readonly',
'aria-required',
'aria-activedescendant',
'aria-expanded',
'aria-orientation'
]
},
range: {
type: 'abstract'
},
region: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
roletype: {
type: 'abstract'
},
row: {
type: 'structure',
requiredContext: ['grid', 'rowgroup', 'table', 'treegrid'],
requiredOwned: ['cell', 'columnheader', 'gridcell', 'rowheader'],
allowedAttrs: [
'aria-colindex',
'aria-level',
'aria-rowindex',
'aria-selected',
'aria-activedescendant',
'aria-expanded'
],
nameFromContent: true
},
rowgroup: {
type: 'structure',
requiredContext: ['grid', 'table', 'treegrid'],
requiredOwned: ['row'],
nameFromContent: true
},
rowheader: {
type: 'structure',
requiredContext: ['row'],
allowedAttrs: [
'aria-sort',
'aria-colindex',
'aria-colspan',
'aria-expanded',
'aria-readonly',
'aria-required',
'aria-rowindex',
'aria-rowspan',
'aria-selected'
],
nameFromContent: true
},
scrollbar: {
type: 'widget',
requiredAttrs: ['aria-valuenow'],
// Note: since the scrollbar role has implicit
// aria-orientation, aria-valuemax, aria-valuemin values it
// is not required to be added by the user
//
// Note: because aria-controls is not well supported we will not
// make it a required attribute even though it is required in the
// spec
allowedAttrs: [
'aria-controls',
'aria-orientation',
'aria-valuemax',
'aria-valuemin',
'aria-valuetext'
]
},
search: {
type: 'landmark',
allowedAttrs: ['aria-expanded']
},
searchbox: {
type: 'widget',
allowedAttrs: [
'aria-activedescendant',
'aria-autocomplete',
'aria-multiline',
'aria-placeholder',
'aria-readonly',
'aria-required'
]
},
section: {
type: 'abstract',
// Note: spec difference
nameFromContent: true
},
sectionhead: {
type: 'abstract',
// Note: spec difference
nameFromContent: true
},
select: {
type: 'abstract'
},
separator: {
type: 'structure',
// Note: since the separator role has implicit
// aria-orientation, aria-valuemax, aria-valuemin, and
// aria-valuenow values it is not required to be added by
// the user
allowedAttrs: [
'aria-valuemax',
'aria-valuemin',
'aria-valuenow',
'aria-orientation',
'aria-valuetext'
]
},
slider: {
type: 'widget',
requiredAttrs: ['aria-valuenow'],
// Note: since the slider role has implicit
// aria-orientation, aria-valuemax, aria-valuemin values it
// is not required to be added by the user
allowedAttrs: [
'aria-valuemax',
'aria-valuemin',
'aria-orientation',
'aria-readonly',
'aria-valuetext'
]
},
spinbutton: {
type: 'widget',
requiredAttrs: ['aria-valuenow'],
// Note: since the spinbutton role has implicit
// aria-orientation, aria-valuemax, aria-valuemin values it
// is not required to be added by the user
allowedAttrs: [
'aria-valuemax',
'aria-valuemin',
'aria-readonly',
'aria-required',
'aria-activedescendant',
'aria-valuetext'
]
},
status: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
structure: {
type: 'abstract'
},
switch: {
type: 'widget',
requiredAttrs: ['aria-checked'],
allowedAttrs: ['aria-readonly'],
nameFromContent: true
},
tab: {
type: 'widget',
requiredContext: ['tablist'],
allowedAttrs: [
'aria-posinset',
'aria-selected',
'aria-setsize',
'aria-expanded'
],
nameFromContent: true
},
table: {
type: 'structure',
requiredOwned: ['rowgroup', 'row'],
allowedAttrs: ['aria-colcount', 'aria-rowcount', 'aria-expanded'],
// NOTE: although the spec says this is not named from contents,
// the accessible text acceptance tests (#139 and #140) require
// table be named from content (we even had to special case
// table in commons/aria/named-from-contents)
nameFromContent: true
},
tablist: {
type: 'composite',
requiredOwned: ['tab'],
// NOTE: aria-expanded is from the 1.0 spec but is still
// consistently supported in ATs
allowedAttrs: [
'aria-level',
'aria-multiselectable',
'aria-orientation',
'aria-activedescendant',
'aria-expanded'
]
},
tabpanel: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
term: {
type: 'structure',
allowedAttrs: ['aria-expanded'],
// Note: spec difference
nameFromContent: true
},
textbox: {
type: 'widget',
allowedAttrs: [
'aria-activedescendant',
'aria-autocomplete',
'aria-multiline',
'aria-placeholder',
'aria-readonly',
'aria-required'
]
},
timer: {
type: 'widget',
allowedAttrs: ['aria-expanded']
},
toolbar: {
type: 'structure',
allowedAttrs: ['aria-orientation', 'aria-activedescendant', 'aria-expanded']
},
tooltip: {
type: 'structure',
allowedAttrs: ['aria-expanded'],
nameFromContent: true
},
tree: {
type: 'composite',
requiredOwned: ['treeitem'],
allowedAttrs: [
'aria-multiselectable',
'aria-required',
'aria-activedescendant',
'aria-expanded',
'aria-orientation'
]
},
treegrid: {
type: 'composite',
requiredOwned: ['rowgroup', 'row'],
allowedAttrs: [
'aria-activedescendant',
'aria-colcount',
'aria-expanded',
'aria-level',
'aria-multiselectable',
'aria-orientation',
'aria-readonly',
'aria-required',
'aria-rowcount'
]
},
treeitem: {
type: 'widget',
requiredContext: ['group', 'tree'],
allowedAttrs: [
'aria-checked',
'aria-expanded',
'aria-level',
'aria-posinset',
'aria-selected',
'aria-setsize'
],
nameFromContent: true
},
widget: {
type: 'abstract'
},
window: {
type: 'abstract'
}
};
export default ariaRoles;
| 1 | 15,753 | This was to allow the tests to pass when the order of the required was different. Silly, but we don't have an easy way to check for "equal but order doesn't matter" in chai. | dequelabs-axe-core | js |
@@ -16546,6 +16546,18 @@ RelInternalSP::costMethod() const
} // RelInternalSP::costMethod()
//<pb>
+CostMethod *
+HbaseDelete::costMethod() const
+{
+ if (CmpCommon::getDefault(HBASE_DELETE_COSTING) == DF_OFF)
+ return RelExpr::costMethod(); // returns cost 1 cost object
+
+ static THREAD_P CostMethodHbaseDelete *m = NULL;
+ if (m == NULL)
+ m = new (GetCliGlobals()->exCollHeap()) CostMethodHbaseDelete();
+ return m;
+} // HbaseDelete::costMethod()
+
PhysicalProperty*
HbaseDelete::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber, | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: OptPhysRelExpr.C
* Description: Optimizer methods related to Physical Expressions
* These include methods related to costing and plan generation
* defined on physical operator classes as well as the
* base RelExpr class.
* Created: 12/10/96
* Language: C++
*
*
*
*
*****************************************************************************
*/
// ---------------------------------------------------------------------------
#include "Sqlcomp.h"
#include "GroupAttr.h"
#include "AllRelExpr.h"
#include "AllItemExpr.h"
#include "opt.h"
#include "PhyProp.h"
#include "Cost.h"
#include "ControlDB.h"
#include "CostMethod.h"
#include "EstLogProp.h"
#include "ScanOptimizer.h"
#include "DefaultConstants.h"
#include "PartKeyDist.h"
#include "OptimizerSimulator.h"
#include "HDFSHook.h"
#include "Globals.h"
#include "CmpStatement.h"
#include "UdfDllInteraction.h"
extern THREAD_P NAUnsigned SortEnforcerRuleNumber;
//<pb>
// -----------------------------------------------------------------------
// Methods on class RelExpr associated with physical exprs. These
// include methods for plan generation and costing.
// -----------------------------------------------------------------------
// static helper function for data source synthesis from two sources
static DataSourceEnum combineDataSources(DataSourceEnum a,
DataSourceEnum b)
{
if (a == b)
return a;
// there is kind of an order on the data source enums, just
// take the one with the higher order
if (a == SOURCE_ESP_DEPENDENT OR
b == SOURCE_ESP_DEPENDENT)
return SOURCE_ESP_DEPENDENT;
if (a == SOURCE_ESP_INDEPENDENT OR
b == SOURCE_ESP_INDEPENDENT)
return SOURCE_ESP_INDEPENDENT;
if (a == SOURCE_PERSISTENT_TABLE OR
b == SOURCE_PERSISTENT_TABLE)
return SOURCE_PERSISTENT_TABLE;
if (a == SOURCE_TEMPORARY_TABLE OR
b == SOURCE_TEMPORARY_TABLE)
return SOURCE_TEMPORARY_TABLE;
if (a == SOURCE_TRANSIENT_TABLE OR
b == SOURCE_TRANSIENT_TABLE)
return SOURCE_TRANSIENT_TABLE;
if (a == SOURCE_VIRTUAL_TABLE OR
b == SOURCE_VIRTUAL_TABLE)
return SOURCE_VIRTUAL_TABLE;
return a;
}
// ---------------------------------------------------------------------
// Allocate a workspace for plan generation.
// ---------------------------------------------------------------------
PlanWorkSpace * RelExpr::allocateWorkSpace() const
{
return new(CmpCommon::statementHeap()) PlanWorkSpace(getArity());
} // RelExpr::allocateWorkSpace()
// -----------------------------------------------------------------------
// RelExpr::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this type.
// -----------------------------------------------------------------------
// Each physical operator class that is derived from RelExpr
// should redefine this virtual function, unless the default
// implementation is acceptable. This default implementation
// returns a CostMethod object that yields a local cost of 1.0.
// -----------------------------------------------------------------------
CostMethod*
RelExpr::costMethod() const
{
static THREAD_P CostMethodFixedCostPerRow *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap())
CostMethodFixedCostPerRow( 1.0 // constant cost for the node
, 0.0 // cost per child row
, 0.0 // cost per output row
);
return m;
}
//<pb>
// -----------------------------------------------------------------------
// RelExpr::createPlan()
// -----------------------------------------------------------------------
Context* RelExpr::createPlan(Context* myContext,
PlanWorkSpace* pws,
Rule* rule,
Guidance* guidance,
Guidance* & guidanceForChild)
{
Context* result = NULL;
// if we already have a solution and are way over level1SafetyNet_
// then return null (ie, skip create plan)
double limit_for_CPT = CURRSTMT_OPTDEFAULTS->level1SafetyNetMultiple();
if (myContext->getSolution() != NULL AND
(limit_for_CPT > 0.0 AND CURRSTMT_OPTDEFAULTS->getTaskCount() >
limit_for_CPT * CURRSTMT_OPTDEFAULTS->level1SafetyNet()))
return result;
Lng32 childIndex;
// check the cost limit if the context has a cost limit,
// but don't check it if pruning has been disabled:
NABoolean checkCostLimit =
((myContext->getCostLimit() != NULL) AND
(myContext->isPruningEnabled()));
CostMethod* cm = this->costMethod();
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
if ( isParHeuristic4Feasible(myContext, rppForMe) )
return NULL;
// ---------------------------------------------------------------------
// First call to createPlan() for this operator.
// We want to recompute operatorCost depending on the plan number
// because for example Type2 and Type1 join operators would have
// quite different costs. BMO flag should be also recomputed. Although
// computeOperatorCost has a plan as an input parameter we were computing
// operatorCost only for the first plan and then reusing it with
// getOperatorCost for other plans. That caused overestimating of
// operator cost and pruning the better plan.
// ---------------------------------------------------------------------
if (pws->isEmpty() OR
( CURRSTMT_OPTDEFAULTS->optimizerPruning()) AND
(pws->getPlanChildCount() == getArity() )
)
{
CascadesPlan* myPlan = myContext->getPlan();
// -----------------------------------------------------------------
// Check if the operator is a big memory operator. Cache result in
// pws and in the plan.
// -----------------------------------------------------------------
Lng32 planNumber =
pws->getCountOfChildContexts()/(getArity()>0 ? getArity() : 1);
if(isBigMemoryOperator(pws,planNumber))
{
pws->setBigMemoryOperator(TRUE);
myPlan->setBigMemoryOperator(TRUE);
}
else
{
pws->setBigMemoryOperator(FALSE);
myPlan->setBigMemoryOperator(FALSE);
}
/*
I was trying to reuse an operatorCost not to recompute it when
coming to reoptimize the plan that failed exceeding cost limit.
But because of some problems I commented out this reusing for
a while. Issue needs more investigation.
if ( NOT ( CURRSTMT_OPTDEFAULTS->OPHreuseOperatorCost()
AND myPlan->exceededCostLimit() )
)
{
*/
// -----------------------------------------------------------------
// Synthesize physical props of leaves right away so that they are
// available when computeOperatorCost is called.
// -----------------------------------------------------------------
if (getArity()==0)
{
if (myPlan->getPhysicalProperty() == NULL)
{
PhysicalProperty* sppForMe = synthPhysicalProperty(myContext,-1,pws);
if (sppForMe == NULL)
{
// bad properties, no plan can ever be found
return NULL;
}
myPlan->setPhysicalProperty(sppForMe);
}
}
// -----------------------------------------------------------------
// Now compute the operator cost.
// -----------------------------------------------------------------
Lng32 countOfStreams;
Cost* operatorCost;
if ( (myContext->isPruningEnabled() AND
checkCostLimit
)
OR (getArity() == 0)
OR (getOperatorType() == REL_ROOT)
)
{
if (CmpCommon::getDefault(SIMPLE_COST_MODEL) == DF_ON)
operatorCost =
cm->scmComputeOperatorCost(this, pws, countOfStreams);
else
operatorCost =
cm->computeOperatorCost(this, myContext, countOfStreams);
}
else
// If pruning disabled - skip computeOperatorCost on the way down
// because it will be recomputed on the way up anyway.
{
operatorCost = new HEAP Cost();
countOfStreams=0;
}
pws->initializeCost(operatorCost);
pws->setCountOfStreams(countOfStreams);
delete operatorCost;
// } // of if for reusing operatorCost
}
// ---------------------------------------------------------------------
// Subsequent calls to createPlan() for this operator.
//
// Do partial plan costing when all of the following conditions hold:
//
// 1) Operator is not unary and not a leaf.
// 2) Not all children have been optimized for most recent plan.
// 3) Cost limit checking is required
// ---------------------------------------------------------------------
// else
// This change is to provide compatibility with the old behaviour of
// plan generation without pruning. When complete testing is done
// this part will be simplified.
if ( pws->getCountOfChildContexts() > 0 )
{
if ( getArity() > 1 )
{
if ( pws->getPlanChildCount() < getArity() )
{
if ( checkCostLimit )
{
// ------------------------------------------------------------
// Compute partial plan cost for this operator's current
// plan. First calculate known costs of any children and
// combine them with this operator's preliminary cost to
// produce a partial plan cost. Store both the known children
// cost and the partial plan cost in the plan workspace.
// ------------------------------------------------------------
cm->computePartialPlanCost( this, pws, myContext);
}
}
else
{
// -----------------------------------------------------------
// All children for latest plan have been optimized, so reset
// for next possible plan.
// -----------------------------------------------------------
pws->resetPlanChildCount();
pws->setPartialPlanCostToOperatorCost();
// Without clearing known children cost is was used
// after failed plan for the next one because staying
// in the loop in createContextForAChild()
pws->setKnownChildrenCost((Cost *)NULL);
}
}
}
// ---------------------------------------------------------------------
// Check cost limits (perform pruning within branch-and-bound).
//
// This is only done once for the preliminary cost and once for each
// child (except the last child) of each plan in the plan workspace.
// ---------------------------------------------------------------------
if ( checkCostLimit
&& ( pws->isEmpty()
|| pws->getPlanChildCount() > 0) )
{
// ------------------------------------------------------------------
// Compare the known cost for performing this operation with the cost
// limit using the cost weight that may be specified in the required
// physical properties, if any.
// -----------------------------------------------------------------
const Cost* partialPlanCost = pws->getPartialPlanCost();
if ( (myContext
->getCostLimit()
->compareWithCost(*partialPlanCost,rppForMe) == LESS))
{
// The intention was to stop optimizing a context when operator
// cost exceeded costLimit. This is rather aggressive because
// the next plan (Type1 join instead of Type 2 join as the first
// plan for HHJ ) can have cheaper operator cost but would never
// be considered. It is better to do this check in createContext
// forAChild() giving other plan a chance to complete.
if ( pws->isEmpty() AND CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() )
{
myContext->getPlan()->setExceededCostLimit();
return NULL;
}
// -----------------------------------------------------------------
// Mark the plan for the previous child Context as failed
// because it causes the cost for this operation to reach
// or exceed the cost limit.
// -----------------------------------------------------------------
Int32 numChildContextsPruned = getArity() - (Int32)pws->getPlanChildCount();
if (numChildContextsPruned > 0)
CURRSTMT_OPTGLOBALS->pruned_tasks_count += numChildContextsPruned;
pws->markLatestContextAsViolatesCostLimit();
DBGLOGMSG(" *** Cost limit exceeded by plan partial cost ");
}
}
//------------------------------------------------------------------------
// If we have optimized all children for the latest plan (A leaf operator
// satisfies this requirement trivially), get the cost of this most recent
// plan and see if it is the best plan so far.
//
// NOTE: for leaf operators there is only a single implicit plan.
//------------------------------------------------------------------------
if ( ( NOT pws->isEmpty()
&& pws->getPlanChildCount() == 0)
|| getArity() == 0 )
{
Cost* cost = pws->getCostOfPlan(pws->getLatestPlan());
// Only give this plan a chance to be stored as the plan
// with the best cost if it is acceptable, i.e. it is a valid
// plan and it satisfies any forced plan constraints.
// We should check if plan is acceptable when cost!=NULL? SP.
if (cost AND CmpCommon::getDefault(NSK_DBG_SHOW_PLAN_LOG) == DF_ON )
{
// This is to print each intermediate (not only the best) plans
CascadesPlan * myPlan = myContext->getPlan();
Cost * costBefore = (Cost *)myPlan->getRollUpCost();
// The setRollUpCost is going to delete what the plan is currently
// pointing to. We need to make a copy of the Cost before
// setRollUpCost() is called.
if (costBefore != NULL)
costBefore = costBefore->duplicate();
DBGLOGMSG(" *** Latest plan *** ");
myPlan->setRollUpCost(cost->duplicate()); // deletes old Cost
DBGLOGPLAN(myPlan);
myPlan->setRollUpCost(costBefore);
RelExpr * op = myPlan->getPhysicalExpr();
Lng32 opArity = op->getArity();
for (Lng32 childIndex=0; childIndex<opArity; childIndex++)
{
Context * childContext =
pws->getChildContext(childIndex,pws->getLatestPlan());
if (childContext)
CURRCONTEXT_OPTDEBUG->showTree(NULL,childContext->getSolution(),
" *** ",TRUE);
}
}
if (currentPlanIsAcceptable(pws->getLatestPlan(),rppForMe))
{
#ifdef _DEBUG
if(cost)
{
DBGLOGMSG(" *** is acceptable *** ");
}
#endif
pws->updateBestCost( cost, pws->getLatestPlan() );
}
else if (cost)
{
#ifdef _DEBUG
DBGLOGMSG(" *** is not acceptable *** ");
#endif
delete cost;
}
} // I optimized all my children or I am a leaf
// ---------------------------------------------------------------------
// Iterator over child Contexts.
// It creates a child Context, assigns a cost limit and returns the
// newly created Context to the caller iff the computed cost limit
// can yield a feasible solution.
// ---------------------------------------------------------------------
NABoolean done = FALSE;
if ( NOT CURRSTMT_OPTDEFAULTS->optimizerPruning() OR getArity() > 0 )
// need createContextForAChild() call only if there is a child
while (NOT done)
{
// -----------------------------------------------------------------
// Create a Context for a child.
// Either create a new Context or reuse an existing one for
// for optimizing a specific child. The Context is also remembered
// in the PlanWorkSpace.
// The method returns a number that identifies the child for which
// a context was created.
// -----------------------------------------------------------------
result = createContextForAChild(myContext, pws, childIndex);
// -----------------------------------------------------------------
// If the computed cost limit cannot yield a feasible solution,
// iterate and create another context for the child
// -----------------------------------------------------------------
if (result AND checkCostLimit)
{
if (result->isCostLimitFeasible())
done = TRUE;
else
{
if ( CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
{
if ( result->hasSolution() )
// here we set costLimitExceeded_ flag to FALSE to be
// able to reuse this solution. Otherwise the call for
// method hasOptimalSolution() which is used in many
// places in the code would return FALSE and we would
// ignore this existing solution in the future.
// This is a simple communication from createContext
// ForAChild() method. If we didn't set this flag to
// TRUE we would follow if branch above and didn't
// mark the latest context as violating costLimit.
// Now this flag when set affects the logic and allow
// us to prune earlier, for example, when one child
// of a join already has a solution that exceeds
// the current costLimit.
result->clearFailedStatus();
if ( pws->getPlanChildCount() == getArity() )
done = TRUE;
}
pws->markLatestContextAsViolatesCostLimit();
DBGLOGMSGCNTXT(" *** Created context not CLFeasible ",result);
}
}
else
done = TRUE;
} // end while
// ---------------------------------------------------------------------
// If a Context was created
// ---------------------------------------------------------------------
if (result)
{
// -----------------------------------------------------------------
// Obtain a guidance for optimizing the child from the rule.
// -----------------------------------------------------------------
if (rule != NULL)
guidanceForChild = rule->guidanceForOptimizingChild
(guidance, myContext, childIndex);
else
guidanceForChild = NULL;
}
else
{
// -----------------------------------------------------------------
// If either one of the children did not yield an optimal
// solution and we are unable to create a plan for this
// operator, return NULL.
// -----------------------------------------------------------------
if (NOT findOptimalSolution(myContext, pws)) return NULL;
// -----------------------------------------------------------------
// Check cost limits (perform pruning within branch-and-bound).
// -----------------------------------------------------------------
if (checkCostLimit)
{
// -------------------------------------------------------------
// Compare the cost for performing this operation with the cost
// limit using the cost weight that may be specified in the
// required physical properties, if any.
//
// If the cost for this operation has reached or exceeded
// the cost limit, return NULL to signal that no plan can
// be created.
// -------------------------------------------------------------
if ( myContext->getCostLimit()->compareWithPlanCost
(myContext->getPlan(), rppForMe) == LESS )
{
CURRSTMT_OPTGLOBALS->pruned_tasks_count += (Int32)pws->getCountOfChildContexts();
DBGLOGMSGCNTXT(" *** CLExceeded in createPlan() ",myContext);
return NULL;
}
}
// -----------------------------------------------------------------
// If we haven't done so already, synthesize properties now.
// The only operator who should not already have spp is RelRoot.
// -----------------------------------------------------------------
CascadesPlan* myPlan = myContext->getPlan();
if (myPlan->getPhysicalProperty() == NULL)
{
Lng32 planNumber = pws->getBestPlanSoFar();
myPlan->setPhysicalProperty(synthPhysicalProperty(myContext,
planNumber,
pws)
);
}
// -----------------------------------------------------------------
// NOTE: At this point we have found a plan but we do not know
// whether the plan actually satisfies the myContext. This check is
// done in the search engine.
// -----------------------------------------------------------------
}
return result;
} // RelExpr::createPlan()
//<pb>
// -----------------------------------------------------------------------
// RelExpr::isParHeuristic4Feasible()
// Heuristic4 (if it is ON) will try to avoid creating a parallel plan
// for "small" non-partitioned table, base or intermediate, by checking
// estimated logical properties like the number of rows coming from the
// left and right child of the expression to make a decision about
// parallelism like it's done in okToAttemptESPParallelism() function.
// Note that heuristic4 won't prevent parallel plan for GroupBy operator
// if its child is bigger than a threshold.
// The size check will be done in preventPushDownPartReq() function. It
// will also check some partitioning(physical) properties of children's
// plans, if there is any, using getFirstPlan() and
// getPhysicalProperty->isPartitioned() functions.
// We don't want to prevent creation of parallel plan for Exchange
// operator or if we looking at the right child of nested join (because
// Exchange operator cannot be used in this case to enforce partitioning)
// or correlated subquery. In both cases non-empty histogram is passed
// to this expression through input logical properties.
// We don't want to abort creating a plan if Context requires exactly
// one partition or replication of the table which might be necessary
// for the right child of nested or hash join.
NABoolean RelExpr::isParHeuristic4Feasible(Context* myContext,
const ReqdPhysicalProperty* rppForMe)
{
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
if ( rppForMe AND
CURRSTMT_OPTDEFAULTS->parallelHeuristic4() AND
NOT CURRSTMT_OPTDEFAULTS->pushDownDP2Requested() AND
( getOperatorType() != REL_EXCHANGE ) AND
( (inLogProp->getColStats()).entries() == 0 )
)
{
NABoolean conditionToAbort = FALSE;
const PartitioningRequirement* pr =
rppForMe->getPartitioningRequirement();
if ( pr )
{
conditionToAbort = NOT ( (pr->getCountOfPartitions()<2)
OR pr->isRequirementReplicateViaBroadcast()
OR pr->isRequirementReplicateNoBroadcast() );
}
else
{
const LogicalPartitioningRequirement *lpr =
rppForMe->getLogicalPartRequirement();
if ( lpr )
{
const PartitioningRequirement* logreq = lpr->getLogReq();
conditionToAbort = NOT ( (logreq->getCountOfPartitions()<2)
OR logreq->isRequirementReplicateViaBroadcast()
OR logreq->isRequirementReplicateNoBroadcast() );
}
}
if ( conditionToAbort AND preventPushDownPartReq(rppForMe,inLogProp) )
{
return TRUE;
}
}
return FALSE;
}
//<pb>
// -----------------------------------------------------------------------
// RelExpr::createContextForAChild()
// Since the normalizer transforms bushy trees to left linear
// trees, this default implementation optimizes the children from
// right to left. The idea is that optimization for the right
// child (in general) is cheaper than optimizing the left child.
// Therefore, perform the "easy" step first and come up with a
// cost limit earlier.
// -----------------------------------------------------------------------
Context * RelExpr::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
// ---------------------------------------------------------------------
// The default implementation creates exactly one Context per child.
// EXAMPLE: If arity = 2, contexts are created in the following order:
// childIndex = 1, 0
// ---------------------------------------------------------------------
childIndex = getArity() - pws->getCountOfChildContexts() - 1;
// return for RelExprs with arity 0
if (childIndex < 0)
return NULL;
RequirementGenerator rg(child(childIndex), rppForMe);
if (childIndex > 0)
{
// Don't pass any of the sort, arrangement, or partitioning
// requirements to the other children, assume that only the left
// child needs to satisfy them (Union doesn't use this code).
rg.removeSortKey();
rg.removeArrangement();
rg.removeAllPartitioningRequirements();
}
if (NOT pws->isEmpty())
{
const Context* childContext = pws->getLatestChildContext();
// ------------------------------------------------------------------
// Cost limit exceeded or got no solution? Give up since we only
// try one plan.
// ------------------------------------------------------------------
if(NOT (childContext AND childContext->hasOptimalSolution()))
return NULL;
if (NOT pws->isLatestContextWithinCostLimit())
return NULL;
}
if (NOT rg.checkFeasibility())
return NULL;
Lng32 planNumber = 0;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as myContext.
// Reuse it, if found. Otherwise, create a new Context that contains
// the same rpp and input log prop as in myContext.
// ---------------------------------------------------------------------
Context* result = shareContext(childIndex, rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit, myContext,
myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
//--------------------------------------------------------------------------
// Only need to keep track of plan child count for operators with more than
// one child, since only these operators make use of partial plan costing.
//--------------------------------------------------------------------------
if (getArity() > 1)
{
pws->incPlanChildCount();
}
return result;
} // RelExpr::createContextForAChild()
//<pb>
//==============================================================================
// Using the cost limit from a specified context as a starting point, produce
// a new cost limit for a child by accumulating the parent preliminary cost and
// the known children cost from a specified plan workspace.
//
//
// Input:
// myContext -- specified context.
//
// pws -- specified plan workspace.
//
// Output:
// none
//
// Return:
// Copy of accumulated cost limit. NULL if context contains no cost limit.
//
//==============================================================================
CostLimit*
RelExpr::computeCostLimit(const Context* myContext,
PlanWorkSpace* pws)
{
//----------------------------------------------------------------------------
// Context contains no cost limit. Interpret this as an infinite cost limit.
// Returning NULL indicates an infinite cost limit.
//----------------------------------------------------------------------------
if (myContext->getCostLimit() == NULL)
{
return NULL;
}
//---------------------------------------------------
// Create copy of cost limit from specified context.
//---------------------------------------------------
CostLimit* costLimit = myContext->getCostLimit()->copy();
//---------------------------------------------------------------------
// Accumulate this operator's preliminary cost into the ancestor cost.
//---------------------------------------------------------------------
Cost* tempCost = pws->getOperatorCost();
costLimit->ancestorAccum(*tempCost, myContext->getReqdPhysicalProperty());
//-------------------------------------------------------------------------
// Accumulate this operator's known children's cost into the sibling cost.
//-------------------------------------------------------------------------
tempCost = pws->getKnownChildrenCost();
if (tempCost != 0)
{
costLimit->otherKinAccum(*tempCost);
}
//---------------------------------------------------------------------------
// Use best plan so far for this operator to try and reduce the cost limit.
//---------------------------------------------------------------------------
tempCost = pws->getBestRollUpCostSoFar();
if (tempCost != NULL)
{
const ReqdPhysicalProperty* rpp = myContext->getReqdPhysicalProperty();
costLimit->tryToReduce(*tempCost,rpp);
}
return costLimit;
} // RelExpr::computeCostLimit()
//<pb>
// -----------------------------------------------------------------------
// RelExpr::findOptimalSolution()
// It associates one of the Contexts that was created for each child
// of this operator with the Context for this operator.
// -----------------------------------------------------------------------
NABoolean RelExpr::findOptimalSolution(Context * myContext,
PlanWorkSpace * pws)
{
// ---------------------------------------------------------------------
// The code originally here has been moved to a method with the same
// name under the class PlanWorkSpace. The rationale for the move are:
//
// 1. Since the plans are stored in pws, it's more natural to place the
// method under pws.
//
// 2. Since pws knows how many plans it has as well as how many children
// there are for the operator, the generic implementation there is
// more general and can be applicable in more cases. This reduces the
// number of subclasses of RelExpr which need to refine the method.
// Now, a refinement is needed only if there is processing that must
// occur after the optimal plan is chosen, and this processing is
// dependent on the operator type.
// ---------------------------------------------------------------------
// Plan # is only an output param, initialize it to an impossible value.
Lng32 planNumber = -1;
return pws->findOptimalSolution(planNumber);
} // RelExpr::findOptimalSolution()
// -----------------------------------------------------------------------
// RelExpr::currentPlanIsAcceptable()
//
// The virtual implementations of this method ensure the current plan
// for the current operator is acceptable, i.e. it is a valid plan
// and it satisfies any forced plan constraints.
// This default implementation assumes all plans for this operator are
// acceptable.
// -----------------------------------------------------------------------
NABoolean RelExpr::currentPlanIsAcceptable(Lng32 planNo,
const ReqdPhysicalProperty* const rppForMe) const
{
return TRUE;
} // RelExpr::currentPlanIsAcceptable()
//<pb>
//==============================================================================
// Synthesize physical properties for this operator's current plan extracted
// from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used in derived
// versions of this member function for synthesizing
// partitioning functions. Unused in this base class version.
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
RelExpr::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
// ---------------------------------------------------------------------
// By the time synthPhysicalProperty() is done, the plan whose spp is
// to be synthesized is stored as currentPlan_ in myContext.
// ---------------------------------------------------------------------
CascadesPlan* plan = myContext->getPlan();
CMPASSERT(plan != NULL);
Lng32 currentCountOfCPUs = 0;
// ---------------------------------------------------------------------
// Get the count of CPUs from child0, if it exists.
// ---------------------------------------------------------------------
if(getArity() != 0)
{
const Context* childContext = plan->getContextForChild(0);
CMPASSERT(childContext != NULL);
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
currentCountOfCPUs = sppForChild->getCurrentCountOfCPUs();
CMPASSERT(currentCountOfCPUs >= 1);
}
else
// Operator has no child. Cost should be available for plan.
// Get the count of CPUs from there.
{
currentCountOfCPUs = plan->getRollUpCost()->getCountOfCPUs();
CMPASSERT(currentCountOfCPUs >= 1);
}
// ---------------------------------------------------------------------
// The only physical property which this default implementation can
// synthesize is the no of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap()) PhysicalProperty();
sppForMe->setCurrentCountOfCPUs(currentCountOfCPUs);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // RelExpr::synthPhysicalProperty()
//<pb>
DefaultToken RelExpr::getParallelControlSettings (
const ReqdPhysicalProperty* const rppForMe, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/) const
{
// This default implementation does not handle forcing the number
// of ESPs.
numOfESPsForced = FALSE;
// Get the value from the defaults table that specifies when
// the optimizer should attempt ESP parallelism.
DefaultToken attESPPara = CURRSTMT_OPTDEFAULTS->attemptESPParallelism();
if ( rppForMe->getCountOfPipelines() == 1 )
{
// If there is only 1 cpu or the user doesn't want the optimizer
// to try ESP parallelism for any operators, set the result to OFF.
attESPPara = DF_OFF;
}
return attESPPara;
} // RelExpr::getParallelControlSettings()
NABoolean RelExpr::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace* pws, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
// CS or REPLICA: do not consider ESP parallelism if we are in DP2.
if ( rppForMe->executeInDP2() )
return FALSE;
// rowsetIterator cannot be ESP parallelized. Counting logic for rowNumber
// is no designed to work in nodes that are executing in ESP parallelism.
if (isRowsetIterator())
return FALSE;
NABoolean result = FALSE;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (parallelControlSettings == DF_OFF)
{
result = FALSE;
}
else if ( (parallelControlSettings == DF_MAXIMUM) AND
CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible()
)
{
numOfESPs = rppForMe->getCountOfPipelines();
// currently, numberOfPartitionsDeviation_ is set to 0 in
// OptDefaults when ATTEMT_ESP_PARALLELISM is 'MAXIMUM'
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
// allow deviation by default
if (CmpCommon::getDefault(COMP_BOOL_61) == DF_OFF)
{
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
if ( child0RowCount.getCeiling() <
MINOF(numOfESPs,CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold())
)
{
// Fewer outer table rows then pipelines - allow one or more parts
allowedDeviation = 1.0;
}
}
result = TRUE;
}
else if (parallelControlSettings == DF_ON)
{
// Either user wants to try ESP parallelism for all operators,
// or they are forcing the number of ESPs for this operator.
// Set the result to TRUE. If the number of ESPs is not being forced,
// set the number of ESPs that should be used to the maximum number
// with the allowable deviation percentage from the defaults table.
// NEW HEURISTIC: If there are fewer outer table rows than the number
// of pipelines, then set the deviation to allow any level of natural
// partitioning, including one. This is because we don't want to
// repartition so few rows to get more parallelism, since we would
// end up with a lot of ESPs doing nothing.
if (NOT numOfESPsForced)
{
if (getArity() > 0)
{
// Determine the number of outer table rows
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
numOfESPs = rppForMe->getCountOfPipelines();
if (child0RowCount.getCeiling() < numOfESPs)
{
// Fewer outer table rows than pipelines - allow one or more parts
allowedDeviation = 1.0;
}
else
{
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
}
}
else
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
} // end if number of ESPs not forced
result = TRUE;
}
else
{
// Otherwise, the user must have specified "SYSTEM" for the
// ATTEMPT_ESP_PARALLELISM default. This means it is up to the
// optimizer to decide.
// This default implementation will return TRUE if the number of
// rows returned by child(0) exceeds the threshold from the
// defaults table. The recommended number of ESPs is also computed
// to be 1 process per <threshold> number of rows. This is then
// used to indicate the MINIMUM number of ESPs that will be
// acceptable. This is done by setting the allowable deviation
// to a percentage of the maximum number of partitions such
// that the recommended number of partitions is the lowest
// number allowed. We make the recommended number of partitions
// a minimum instead of a hard requirement because we don't
// want to be forced to repartition the child just to get "less"
// parallelism.
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
CostScalar rowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
// This is to test better parallelism taking into account
// not only child0 but also this operator cardinality
// this could be important for joins but it seems feasible to
// have it in default method to use for example for TRANSPOSE
if(CmpCommon::getDefault(COMP_BOOL_125) == DF_ON)
{
rowCount += (getGroupAttr()->outputLogProp(inLogProp)->
getResultCardinality()).minCsOne();
}
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
if (rowCount > numberOfRowsThreshold)
{
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = (float) MAXOF(1.001 -
ceil((rowCount / numberOfRowsThreshold).value()) / numOfESPs, 0);
result = TRUE;
}
else
{
result = FALSE;
}
} // end if the user let the optimizer decide
return result;
} // RelExpr::okToAttemptESPParallelism()
//==============================================================================
// Returns TRUE if only an enforcer operator (either exchange or sort)
// can satisfy the required physical properties. Otherwise, it returns FALSE.
//==============================================================================
NABoolean RelExpr::rppRequiresEnforcer
(const ReqdPhysicalProperty* const rppForMe) const
{
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
// Only an exchange operator can satisfy a replicate via broadcast
// partitioning requirement.
if ((partReqForMe != NULL) AND
(
partReqForMe->isRequirementReplicateViaBroadcast()
OR partReqForMe->isRequirementSkewBusterBroadcast()
)
)
return TRUE;
SortOrderTypeEnum sortOrderTypeReq =
rppForMe->getSortOrderTypeReq();
// If a sort order type requirement of ESP_VIA_SORT exists,
// then return TRUE now. Only a sort operator can satisfy
// this requirement. The sort rule will be allowed to succeed
// for this requirement. The exchange rule is also allowed to
// succeed for this requirement, to allow for parallel sorts. All
// other implementation rules will fail. This means that when an
// operator issues an ESP_VIA_SORT requirement the sort to satisfy
// this requirement will be restricted to being the immediate child
// of the operator issuing the requirement.
if (sortOrderTypeReq == ESP_VIA_SORT_SOT)
return TRUE;
return FALSE;
} // RelExpr::rppRequiresEnforcer()
// -----------------------------------------------------------------------
// Called at precodegen time. The executor requires that all operators
// in the same ESP process set use the same partition input values (PIVs),
// and that all partitioning key predicates and partitioning expressions
// that are used are based on this same set of PIVs.
// This method ensures that this is so by checking if this operators PIVs
// are the same as it's child. If not, it makes the child's PIVs, partitioning
// key predicates, and partitioning expression the same as the parent's.
// For some operators, the parent partitioning key predicates and
// partitioning expression might need to be mapped first before assigning
// them to the child.
// Most of the time, the PIVs will be the same. They could only be different
// if the child's plan was stolen from another context.
// -----------------------------------------------------------------------
void RelExpr::replacePivs()
{
const PhysicalProperty* sppForMe = getPhysicalProperty();
CMPASSERT(sppForMe != NULL);
const PartitioningFunction* myPartFunc =
sppForMe->getPartitioningFunction();
// The only operator who should not have a partitioning function is
// RelRoot (although I don't know why it doesn't). If the operator does
// not have a partitioning function then there is nothing to do, so
// just return.
if (myPartFunc == NULL)
return;
const ValueIdSet &myPartKeyPreds = myPartFunc->getPartitioningKeyPredicates();
const ValueIdSet &myPivs = myPartFunc->getPartitionInputValues();
const ValueIdList &myPivLayout = myPartFunc->getPartitionInputValuesLayout();
if (myPivs.entries() == 0)
return;
// Process all children.
for (Lng32 childIndex = 0; childIndex < getArity(); childIndex++)
{
PhysicalProperty* sppOfChild =
(PhysicalProperty*)(child(childIndex)->getPhysicalProperty());
CMPASSERT(sppOfChild != NULL);
PartitioningFunction* childPartFunc =
(PartitioningFunction*)(sppOfChild->getPartitioningFunction());
CMPASSERT(childPartFunc != NULL);
const ValueIdSet &childPartKeyPreds =
childPartFunc->getPartitioningKeyPredicates();
const ValueIdList &childPivLayout =
childPartFunc->getPartitionInputValuesLayout();
// The parent's mapped partitioning function and the child's MUST be
// compatible, or the child has a replication partitioning function, or
// the parent is a DP2 Exchange, the child has a logPhysPartitioning
// function, and it's logical partitioning function is equivalent to the
// parent's mapped partitioning function.
// "compatible" means that the functions can use the same set of PIVs
// to produce valid values. For example, two HASH2 part functions are
// compatible if they have the same number of partitions. Two range
// part functions are compatible if they have the same number of
// key columns and the data types of the keys match.
// Check if I have some pivs and the child has some pivs and they
// are different. Note that it could be possible for this operator
// to have some pivs and the child to not have any, and yet this
// is ok. For example, the right child of a replicated join would
// not have any pivs and yet the join itself would. This is ok
// and in this case there is nothing to do - you would not want
// to give the child pivs that it does not need.
if (NOT childPivLayout.isEmpty() AND
!(myPivLayout == childPivLayout))
{
// Child's pivs exist and are different from mine.
// Make the child pivs and parent pivs the same and map
// all item expressions in the child that refer to them
// to the new PIVs.
ValueIdMap pivMap(myPartFunc->getPartitionInputValuesLayout(),
childPartFunc->getPartitionInputValuesLayout());
ValueIdSet rewrittenChildPartKeyPreds;
// check for "compatible" partitioning functions
CMPASSERT(myPartFunc->getPartitionInputValuesLayout().entries() ==
childPartFunc->getPartitionInputValuesLayout().entries());
CMPASSERT(myPartFunc->getPartitioningFunctionType() ==
childPartFunc->getPartitioningFunctionType());
CMPASSERT(myPartFunc->getCountOfPartitions() ==
childPartFunc->getCountOfPartitions());
// could also check column types of range part. func. but
// since that's more complicated we won't check for now
pivMap.mapValueIdSetDown(childPartKeyPreds, rewrittenChildPartKeyPreds);
// Update the child's partitioning function. Note that since replacePivs()
// is called before calling preCodeGen() on the child, we do this before
// any predicates that use PIVs are generated in the child. So, no other
// places in the child need to be updated.
childPartFunc->replacePivs(
myPivLayout,
rewrittenChildPartKeyPreds);
} // end if my part key preds are not the same as the childs
} // end for all children
} // end RelExpr::replacePivs()
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from this
// operator to the designated child, if the operator has/requires mapping.
// Note that this default implementation does no mapping.
// ---------------------------------------------------------------------
PartitioningFunction* RelExpr::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
return (PartitioningFunction*)partFunc;
} // end RelExpr::mapPartitioningFunction()
//<pb>
// -----------------------------------------------------------------------
// member functions for class Sort
// -----------------------------------------------------------------------
NABoolean Sort::isBigMemoryOperator(const PlanWorkSpace* pws,
const Lng32 planNumber)
{
const Context* context = pws->getContext();
// Get addressability to the defaults table and extract default memory.
// CURRSTMT_OPTDEFAULTS->getMemoryLimitPerCPU() should not be called since it can
// set to > 20MB in some cases, for example if the query tree contains
// REL_GROUPBY operator. This will result in the assumption that even BMO
// plans can be sorted in memory(leads to under estimation of cost), but
// executor uses internal sorts only if the sort table size < 20MB.
NADefaults &defs = ActiveSchemaDB()->getDefaults();
double memoryLimitPerCPU = defs.getAsDouble(MEMORY_UNITS_SIZE);
// ---------------------------------------------------------------------
// Without memory constraints, the sort operator would like to sort all
// the rows internally.
// ---------------------------------------------------------------------
const ReqdPhysicalProperty* rppForMe = context->getReqdPhysicalProperty();
// Start off assuming that the sort will use all available CPUs.
Lng32 cpuCount = rppForMe->getCountOfAvailableCPUs();
PartitioningRequirement* partReq = rppForMe->getPartitioningRequirement();
const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty();
Lng32 numOfStreams;
// If the physical properties are available, then this means we
// are on the way back up the tree. Get the actual level of
// parallelism from the spp to determine if the number of cpus we
// are using are less than the maximum number available.
if (spp != NULL)
{
PartitioningFunction* partFunc = spp->getPartitioningFunction();
numOfStreams = partFunc->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
else
if ((partReq != NULL) AND
(partReq->getCountOfPartitions() != ANY_NUMBER_OF_PARTITIONS))
{
// If there is a partitioning requirement, then this may limit
// the number of CPUs that can be used.
numOfStreams = partReq->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
EstLogPropSharedPtr inLogProp = context->getInputLogProp();
const double probeCount =
MAXOF(1.,inLogProp->getResultCardinality().value());
EstLogPropSharedPtr child0LogProp = child(0).outputLogProp(inLogProp);
const double child0RowCount =
MAXOF(1.,child0LogProp->getResultCardinality().value());
const double rowsPerCpu = MAXOF(1.0f,(child0RowCount / cpuCount));
const double rowsPerCpuPerProbe = MAXOF(1.0f,(rowsPerCpu / probeCount));
const GroupAttributes* groupAttr = getGroupAttr();
const ValueIdSet& outputVis = groupAttr->getCharacteristicOutputs();
//------------------------------------------------------------------------
// Produce the final Sort Key before determining whether or not this sort
// should be a big memory operator.
//------------------------------------------------------------------------
produceFinalSortKey();
ValueIdSet sortKeyVis;
sortKeyVis.insertList(sortKey_);
const Lng32 keyLength (sortKeyVis.getRowLength());
const Lng32 rowLength (keyLength + outputVis.getRowLength());
// Executor`s rowlength includes size of tuple descriptor (12 bytes).
// We need to consider this while determing whether the plan is BMO or NOT,
// otherwise optimizer thinks some queries as non-BMO but the executor
// consideres them as BMO.
const double fileSizePerCpu =
((rowsPerCpuPerProbe * (rowLength + 12)) / 1024. );
if (spp != NULL &&
CmpCommon::getDefault(COMP_BOOL_51) == DF_ON
)
{
CurrentFragmentBigMemoryProperty * bigMemoryProperty =
new (CmpCommon::statementHeap())
CurrentFragmentBigMemoryProperty();
((PhysicalProperty *)spp)->setBigMemoryEstimationProperty(bigMemoryProperty);
bigMemoryProperty->setCurrentFileSize(fileSizePerCpu);
bigMemoryProperty->setOperatorType(getOperatorType());
// get cumulative file size of the fragment; get the child spp??
PhysicalProperty *childSpp =
(PhysicalProperty *) context->getPhysicalPropertyOfSolutionForChild(0);
if (childSpp != NULL)
{
CurrentFragmentBigMemoryProperty * memProp =
(CurrentFragmentBigMemoryProperty *) childSpp->
getBigMemoryEstimationProperty();
if (memProp != NULL)
{
double childCumulativeMemSize = memProp->getCumulativeFileSize();
bigMemoryProperty->incrementCumulativeMemSize(childCumulativeMemSize);
memoryLimitPerCPU -= childCumulativeMemSize;
}
}
}
return (fileSizePerCpu >= memoryLimitPerCPU);
}
//<pb>
// -----------------------------------------------------------------------
// Sort::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this type.
// -----------------------------------------------------------------------
CostMethod*
Sort::costMethod() const
{
static THREAD_P CostMethodSort *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodSort();
return m;
} // Sort::costMethod()
//<pb>
// -----------------------------------------------------------------------
// sort::createContextForAChild() could be redefined to try different
// optimization goals for subsets (prefixes) of the actually required
// sort key. Right now, we always perform a full sort in the sort node
// without making use of partial orders of the child.
// -----------------------------------------------------------------------
Context* Sort::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
childIndex = 0;
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
// If one Context has been generated for each child, return NULL
// to signal completion.
if (pws->getCountOfChildContexts() == getArity())
return NULL;
// ---------------------------------------------------------------------
// Construct a new required physical property vector for my child
// subtree that replicates the properties that are required of me.
// The new property vector must not contain any requirements
// for arrangements or orderings because I will be fulfiling them.
// ---------------------------------------------------------------------
RequirementGenerator rg(child(0),rppForMe);
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
}
// If ESP parallelism seems like a good idea, give it a try.
// Note that if this conflicts with the parent requirement, then
// "makeNumOfPartsFeasible" will change it to something that
// doesn't conflict with the parent.
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
if (NOT rg.checkFeasibility())
return NULL;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(childIndex, rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit, myContext,
myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, 0, result);
return result;
} // Sort::createContextForAChild()
//<pb>
//==============================================================================
// Synthesize physical properties for sort operator's current plan extracted
// from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
Sort::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
if (CmpCommon::getDefault(COMP_BOOL_86) == DF_ON)
{
synthPartialSortKeyFromChild(myContext);
}
// ---------------------------------------------------------------------
// Replace the sort key for my spp.
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(
*(myContext->getPhysicalPropertyOfSolutionForChild(0)),
sortKey_,
ESP_VIA_SORT_SOT,
NULL // No Dp2 sort order part func
);
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
sppForMe->setDataSourceEnum(SOURCE_TRANSIENT_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // Sort::synthPhysicalProperty()
//========================================================================
// See if child is already sorted on a prefix; for an arrangement use
// existing child sort order if any
//
//========================================================================
void Sort::synthPartialSortKeyFromChild(const Context* myContext)
{
// child properties: is child sorted?
const PhysicalProperty *sppOfChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
PartialSortKeyFromChild_.clear();
ValueIdList childSortKey = (ValueIdList) sppOfChild->getSortKey();
// if the child is not sorted there is nothing to do
if (childSortKey.isEmpty()) return;
// Properties required from me
const ReqdPhysicalProperty * rpp =
myContext->getReqdPhysicalProperty();
// Does parent require some sort order
NABoolean parentRequiresOrder =
rpp->getSortKey() != NULL;
// Does parent require some arrangement of columns
NABoolean parentRequiresArrangement =
rpp->getArrangedCols() != NULL;
// Is partial sort key applicable?
NABoolean rewriteSortKey=FALSE;
if (parentRequiresArrangement)
{
ValueIdSet parentArrangement= *(rpp->getArrangedCols());
ValueIdSet myChildSortKeyAsASet(childSortKey);
myChildSortKeyAsASet = myChildSortKeyAsASet.removeInverseOrder();
// check if mySortKeySet is contained in requiredArrangement
ValueIdSet commonElements =
parentArrangement.intersect(myChildSortKeyAsASet);
if (!commonElements.isEmpty())
{
// rewrite Sortkey to make use of existing order
// populate partialsort key as well
// childSortKey[i] is in commonElements then
// add it to partial sort key
for (UInt32 i=0; i < childSortKey.entries(); i++)
{
if (commonElements.contains(
childSortKey[i].getItemExpr()->removeInverseOrder()->getValueId()
)
)
{
rewriteSortKey=TRUE;
PartialSortKeyFromChild_.insert(childSortKey[i]);
}
else
break;
}
}
if (rewriteSortKey)
{
// sortkey_ = PartialSortKeyFromChild_ + remaining elements from sort key
ValueIdSet mySortkey=sortKey_;
ValueIdSet mySortkeyTemp = mySortkey;
ValueIdSet partialSortKey = PartialSortKeyFromChild_.removeInverseOrder();
// mySortkey = mySortkey - (mySortKey /\ partialSortKey);
mySortkey.subtractSet(
mySortkey.intersect(partialSortKey));
sortKey_ = PartialSortKeyFromChild_.removeInverseOrder();
for (ValueId x=mySortkey.init() ;
mySortkey.next(x);
mySortkey.advance(x))
{
sortKey_.insert(x);
}
}
} // is Arrangement requirement
else if (parentRequiresOrder)
{
const ValueIdList parentSortKey = *(rpp->getSortKey());
UInt32 size = childSortKey.entries() > parentSortKey.entries() ?
parentSortKey.entries() : childSortKey.entries();
// size is at least one else we would not be here..
for (UInt32 i=0; i <= (size-1); i++)
{
// Make sure non-empty prefix of the sortkey from the parent and
// the child are exactly the same (semantically). Note for descending
// case, childSortKey[i] can be different from parentSortKey[i] but if
// the underneath item expressions are inverse of a common expression,
// the non-empty prefix test should pass.
//
// Solu. 10-090303-9701 (NF:R2.4:Partial Sort not used in sort of
// descending keys).
if (childSortKey[i] == parentSortKey[i] ||
(childSortKey[i].getItemExpr()->getOperatorType() == ITM_INVERSE
&&
parentSortKey[i].getItemExpr()->getOperatorType() == ITM_INVERSE
&&
childSortKey[i].getItemExpr()->removeInverseOrder()->getValueId()
==
parentSortKey[i].getItemExpr()->removeInverseOrder()->getValueId()
)
)
{
rewriteSortKey=TRUE;
PartialSortKeyFromChild_.insert(childSortKey[i]);
}
else
break;
} // for loop
} // Is order requirement
} // Sort::synthPartialSortKeyFromChild()
//<pb>
//==============================================================================
// Combine arrangement columns with sort key to produce a final sort key.
//
// Input:
// none
//
// Output:
// none
//
// Return:
// none
//
//==============================================================================
void
Sort::produceFinalSortKey()
{
// ----------------------------------------------------------------------
// if the parent asked for an arrangement rather than (or in addition to)
// a particular sort order, choose the final sort order now.
// ----------------------------------------------------------------------
if (NOT arrangedCols_.isEmpty())
{
ValueId svid;
ValueIdList simpleSortCols;
NABoolean parentSortOrderReq = FALSE;
//---------------------------------
// Determine if we have a sort key.
//---------------------------------
if (NOT sortKey_.isEmpty())
{
//------------------------------------------------
// Indicate that parent required a sort ordering.
//------------------------------------------------
parentSortOrderReq = TRUE;
//----------------------------------------------------------------
// Compute required sort order in simplified form. This will
// be used to ensure that the combined sort key (required sort
// order and required arrangement) doesn't reflect the same
// underlying column more than once.
//
// Note: the parent already eliminated duplicate expressions from
// the required sort order and arrangement when they were created.
//----------------------------------------------------------------
simpleSortCols = sortKey_.simplifyOrderExpr();
}
//-------------------------------------------------
// Combine arranged columns with sort key columns.
//-------------------------------------------------
for (ValueId x = arrangedCols_.init();
arrangedCols_.next(x);
arrangedCols_.advance(x))
{
//----------------------------------------------------------------
// If there was no required sort order, we need not do any checks.
//----------------------------------------------------------------
if (NOT parentSortOrderReq)
sortKey_.insert(x);
else
{
//--------------------------------------------------------------------
// Required sort order exists, so ensure that the underlying columns
// from the required arrangement don't already exist in the simplified
// form of the required sort order.
//--------------------------------------------------------------------
svid = x.getItemExpr()->simplifyOrderExpr()->getValueId();
if (NOT simpleSortCols.contains(svid))
{
//---------------------------------------------------------
// The sort key doesn't already reflect an ordering on this
// column. Go ahead and add it.
//---------------------------------------------------------
sortKey_.insert(x);
}
}
}
arrangedCols_.clear();
}
} //Sort::produceFinalSortKey()
//<pb>
// -----------------------------------------------------------------------
// member functions for class Exchange
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Exchange::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this type.
// -----------------------------------------------------------------------
CostMethod*
Exchange::costMethod() const
{
static THREAD_P CostMethodExchange *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodExchange();
return m;
} // Exchange::costMethod()
//<pb>
// -----------------------------------------------------------------------
// Exchange::createContextForAChild()
//
// The optimizer introduces an Exchange operator in the dataflow tree
// in order to redistribute the workload over multiple processes. A
// process can either be the master executor, an instance of an
// Executor Server Process (ESP), or a DP2, the Disk server
// Process.
//
// The ESP Exchange is introduced either when an operator in
// the query execution plan is expected to consume memory very
// heavily, or when a tree of operators implement a CPU-intensive
// operation that is likely to saturate the cpu, or asynchronous
// access to partitioned data can improve the throughput of an
// operator, or operator trees. It encapsulates parallelism.
//
// The DP2 exchange is introduced whenever we want the child tree to
// execute in DP2.
//
// The Exchange is just another dataflow operator. It is represented
// using the notation Exchange(m:n), where m is the number of data
// streams it receives as its internal dataflow inputs and n is the
// number of data streams it produces as its own output. Using Cascades
// jargon, it is a physical operator which enforces one or both of the
// following two properties:
//
// 1) The partitioning of data
//
// The term "partition" is an artifact of the product's association
// with Tandem. It is synonymous with the term data stream, which is,
// perhaps, more relevant in the context of the dataflow architecture
// of this product.
//
// 2) The location where the plan (fragment) should execute.
//
// A plan can be able to execute in one of the following "locations":
// a) In the MASTER only.
// b) In an ESP only.
// c) In either the master or an ESP.
// d) In DP2 only.
//
// Every Exchange enforces the execution of its parent outside of DP2.
// The optimizer uses a single Exchange for enforcing a location as
// well as the desired partitioning. However, when the plan executes,
// two instances of the Exchange operator may be necessary, one for
// enforcing the DP2 location and another for repartitioning of data.
// The query rewrite phase that follows after the optimal plan is
// selected introduces another Exchange as required.
//
// The Exchange is a "CISC" operator. The Exchange which is seen by the
// optimizer is replaced with one of the following three sets of
// operators by the code generator:
//
// 1) Exchange(plan fragment) -> PartitionAccess(plan fragment)
//
// Produced by plan 0 below.
//
// The Exchange is replaced with a PartitionAccess (PA).
//
// The PartitionAccess implements the messaging protocol between the
// SQL executor, running in an Executor Server Process (ESP), and
// DP2, the disk server process. It provides a dedicated connection
// between an ESP and a particular DP2. The ESP can establish one
// or more sessions successively on the same connection. A session
// is established when the PartitionAccess downloads a fragment of
// a query execution plan to DP2. A session permits a bi-directional
// dataflow during which the ESP can provide external dataflow inputs
// and the DP2 returns the corresponding result of executing the plan
// fragment.
//
// The PartitionAccess follows the policy of establishing the
// successor session only after the one that is in progress completes.
// According to this policy, there can be at most one session in
// progress per DP2 with which an ESP has established a connection.
// This policy permits an ESP to overlap the processing of rows that
// are provided by a particular DP2, while that DP2 is processing the
// successive set of rows. EXPLAIN shows this exchange as
// PARTITION_ACCESS.
//
// A PartitionAccess operator in the executor talks to at most one
// DP2 process at a time.
//
// 2) Exchange(plan fragment) -> SplitTop(PartitionAccess(plan fragment))
//
// Produced by plan 0 below.
//
// The Exchange is replaced with a SplitTop, PartitionAccess pair.
// The split top node in this configuration is called the PAPA (PArent
// of PA). Its purpose is to overcome the restriction that a PA node
// can only communicate with one DP2 at a time. The PAPA node enables
// parallel DP2 requests to partitioned tables. EXPLAIN shows this
// form of an exchange as SPLIT_TOP_PA.
//
// 3) Exchange(plan fragment) -> Exchange(plan fragment) ->
// SplitTop(SendTop(SendBottom(SplitBottom(
// SplitTop(PartitionAccess(plan fragment))))))
//
// Produced by plan 0 below.
//
// Combination of results 2) and 4). Alternatively, we may also produce
// a combination of results 1) and 4) (not shown). This is done if we
// need to produce a partitioning scheme that can not be produced by
// a PA or a PAPA node and if the child needs to execute in DP2. We
// need to add a repartitioning step above the PA or PAPA.
//
// 4) Exchange(plan fragment) ->
// SplitTop(SendTop(SendBottom(SplitBottom(plan fragment))))
//
// Produced by plan 1 below.
//
// This is the case when ESP parallelism is used. Also called the
// ESP Exchange or REPARTITION in EXPLAIN.
//
// -----------------------------------------------------------------------
Context * Exchange::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// An enforcer rule MUST be given a required physical property to enforce.
// Compute the number of plans that will be created for this Exchange.
// Since the Exchange enforces the location where a plan should
// execute as well as the partitioning function for the data streams
// that it produces, the required physical properties should contain
// either a specific location or a partitioning criterion.
// ---------------------------------------------------------------------
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement *partReqForMe =
rppForMe->getPartitioningRequirement();
SortOrderTypeEnum sortOrderTypeReq = NO_SOT;
PartitioningRequirement* dp2SortOrderPartReq = NULL;
childIndex = 0;
// ---------------------------------------------------------------------
// The location requirement MUST be an ESP because the Exchange can
// only enforce the execution of its parent outside of DP2.
// ---------------------------------------------------------------------
CMPASSERT(rppForMe AND NOT rppForMe->executeInDP2());
// ---------------------------------------------------------------------
// Cost limit exceeded? Erase latest Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// ---------------------------------------------------------------------
if ((NOT pws->isEmpty()) AND (NOT pws->isLatestContextWithinCostLimit()))
pws->eraseLatestContextFromWorkSpace();
// ---------------------------------------------------------------------
// Allocate local variables.
// ---------------------------------------------------------------------
Lng32 planNumber = pws->getCountOfChildContexts();
PlanExecutionEnum plenum;
ReqdPhysicalProperty* rppForChild = NULL;
Context* result = NULL;
// ---------------------------------------------------------------------
// Loop over the possible plans and try to find one that we actually
// want to try.
//
// We consider two plans for an exchange operator (unless forced
// otherwise):
//
// Plan 0: try executing the child tree in DP2 and pass our own
// partitioning requirement (if any) as a logical partitioning
// requirement to the child.
// Plan 1: if the parent requires some partitioning, try to enforce
// that partitioning here with an ESP exchange and require no
// partitioning and location ESP from the child.
//
// ---------------------------------------------------------------------
while (planNumber < 2 AND result == NULL)
{
// do we actually want to TRY this plan or do we want to skip it?
NABoolean tryThisPlan = FALSE;
if (planNumber == 0)
{
// ||opt some day we may consider some optimizations where we don't
// need to create a DP2 context:
// - for immediate children of an EXCHANGE
// - for groups that definitely can't execute in DP2
// - for groups that we don't want to execute in DP2
// Don't create a DP2 context if there is a partitioning requirement
// that requires broadcast replication or hash repartitioning. Only
// an ESP exchange can perform replication or hash repartitioning.
// When parallel heuristics were added we put an extra condition to
// create DP2 context: parallelHeuristic1_ is FALSE(not used) or
// numBaseTables_ in the current group is set to 1, or when the
// OPTS_PUSH_DOWN_DAM default is turned on to push operators in DP2.
// For "nice context" one more check was added to skip DP2 context.
// In fact, parallelHeuristic1 can be TRUE only when CQD
// ATTEMPT_ESP_PARALLELISM is SYSTEM. If it is ON then we always
// try DP2 context. Now OPH_USE_NICE_CONTEXT should be OFF for DP2
// context to be tried unless this is required to be in DP2.
if ( ( ( NOT ( CURRSTMT_OPTDEFAULTS->parallelHeuristic1() OR
myContext->isNiceContextEnabled()
)
) OR CURRSTMT_OPTDEFAULTS->pushDownDP2Requested() OR
( myContext->getGroupAttr()->getNumBaseTables() == 1)
) AND
( (partReqForMe == NULL) OR
NOT partReqForMe->isRequirementReplicateViaBroadcast()
)
)
{
plenum = EXECUTE_IN_DP2;
// do not try DP2 plans in Trafodion
// tryThisPlan = TRUE;
// Never pass a sort order type requirement to DP2.
// But, pass any dp2SortOrderPartReq.
sortOrderTypeReq = NO_SOT;
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq();
}
}
else if (planNumber == 1)
{
const Context* childContext = pws->getLatestChildContext();
// If we had a DP2_OR_ESP_NO_SORT requirement, and the
// first DP2 context we tried failed because the child
// was not physically partitioned the same as the
// dp2SortOrderPartReq requirement, then retry plan 0
// without the dp2SortOrderPartReq this time.
if ((rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT) AND
(childContext != NULL) AND
(childContext->getReqdPhysicalProperty()->
getDp2SortOrderPartReq() != NULL) AND
NOT childContext->hasOptimalSolution())
{
// Get rid of the context for the failed plan
pws->deleteChildContext(0,0);
// redo plan 0 with no dp2SortOrderPartReq this time
planNumber = 0;
plenum = EXECUTE_IN_DP2;
// do not try DP2 plans in Trafodion
// tryThisPlan = TRUE;
sortOrderTypeReq = NO_SOT;
dp2SortOrderPartReq = NULL;
}
else
{
// Try generating a execute in ESP location context for
// repartitioning if there is a partitioning requirement
// that can be enforced by repartitioning or if the
// requirement is to execute in exactly one stream, and
// if parallelism is enabled/possible.
// If the requirement is to execute in exactly one stream,
// then repartitioning is not needed to coalesce multiple
// streams into one stream, but we need to generate an ESP
// context here to give the child a chance to execute in
// parallel. The one partitioning requirement that we should not
// and cannot generate an ESP context for is a no broadcast
// replication. This is because if we did, this could lead
// to repartitioning, which would have to be a broadcast
// replication, and if the requirement is for no broadcast
// replication, this means the parent is a nested join and
// this could lead to wrong answers, because each nested
// join instance must get back only the rows for its probes,
// not all the rows from all the probes.
if ( (partReqForMe != NULL) AND
NOT partReqForMe->castToRequireReplicateNoBroadcast() AND
NOT rppForMe->getNoEspExchangeRequirement() AND
( (rppForMe->getCountOfPipelines() > 1)
// this is needed for nice context, otherwise when
// ATTEMPT_ESP_PARALLELISM is OFF getCountOfPipelines()
// would return 1 (?) ESP context with no partReq won't
// be created and implementation rules won't fire on
// any logical expressions. Otherwise CORE/test060
// fails because it uses OFF.
OR myContext->isNiceContextEnabled()
)
)
{
// Apply heuristics:
// if the previous DP2 context returned a plan and if we
// would repartition without having a requirement for
// more than one partition, then just forget about that
// option. Why do we do that: we assume that using the
// DP2 context will be cheaper than the repartitioning
// plan.
if (NOT (partReqForMe->getCountOfPartitions() <= 1 AND
childContext AND
childContext->hasOptimalSolution() AND
rppForMe->getMustMatch() == NULL AND
CmpCommon::getDefault(TRY_DP2_REPARTITION_ALWAYS)
== DF_OFF))
{
plenum = EXECUTE_IN_ESP;
tryThisPlan = TRUE;
// For ESP contexts, just pass along any existing
// sort order type requirements
sortOrderTypeReq = rppForMe->getSortOrderTypeReq();
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq();
}
} // end if an enforcable partitioning requirement
} // end if redo DP2 plan
} // end if planNumber == 1
if (tryThisPlan)
{
// -------------------------------------------------------------
// Construct a new required physical property vector for my
// child. No need to use the requirement generator, since
// an enforcer rule has its own group as a child and since
// we simply strip off the partitioning requirement and
// replace the location requirement.
// -------------------------------------------------------------
rppForChild = new (CmpCommon::statementHeap())
ReqdPhysicalProperty(*rppForMe,
rppForMe->getArrangedCols(),
rppForMe->getSortKey(),
sortOrderTypeReq,
dp2SortOrderPartReq,
NULL,
plenum);
// -------------------------------------------------------------
// Add logical partitioning requirement if the child context
// is for DP2 and the exchange was given a part requirement
// -------------------------------------------------------------
if ((plenum == EXECUTE_IN_DP2) AND
(rppForMe->getPartitioningRequirement() != NULL))
rppForChild->setLogicalPartRequirement(
new(CmpCommon::statementHeap()) LogicalPartitioningRequirement(
rppForMe->getPartitioningRequirement()));
// -------------------------------------------------------------
// Indicate we are giving the child a logical ordering or
// arrangement requirement if the child context
// is for DP2 and the exchange was given a ordering or
// arrangement requirement.
// -------------------------------------------------------------
if ((plenum == EXECUTE_IN_DP2) AND
((rppForMe->getArrangedCols() != NULL) OR
(rppForMe->getSortKey() != NULL)))
rppForChild->setLogicalOrderOrArrangementFlag(TRUE);
// -------------------------------------------------------------
// Check for a CONTROL QUERY SHAPE directive and process
// it. Sorry, this is actually the most complex part of the
// method.
// -------------------------------------------------------------
rppForChild = processCQS(rppForMe,rppForChild);
} // tryThisPlan
if (tryThisPlan AND rppForChild)
{
// -------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// -------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// -------------------------------------------------------------
// Get a Context for optimizing the child. Search for an
// existing Context in the CascadesGroup to which the child
// belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new
// Context that contains rppForChild as the required
// physical properties.
// -------------------------------------------------------------
result = shareContext(
childIndex, rppForChild,
myContext->getInputPhysicalProperty(), costLimit,
myContext, myContext->getInputLogProp());
} // tryThisPlan(2)
// -----------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace, even if
// it is NULL.
// -----------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
// -----------------------------------------------------------------
// increment loop variable
// -----------------------------------------------------------------
planNumber++;
} // while loop
return result;
} // Exchange::createContextForAChild()
//<pb>
// -----------------------------------------------------------------------
// Add required physical properties from the CONTROL QUERY SHAPE directive
// for an Exchange operator
// -----------------------------------------------------------------------
ReqdPhysicalProperty * Exchange::processCQS(
const ReqdPhysicalProperty *rppForMe,
ReqdPhysicalProperty *rppForChild)
{
ReqdPhysicalProperty *result = rppForChild;
if (rppForMe->getMustMatch())
{
if (rppForMe->getMustMatch()->getOperatorType() == REL_FORCE_EXCHANGE)
{
ExchangeForceWildCard *mm =
(ExchangeForceWildCard *) rppForMe->getMustMatch();
ExchangeForceWildCard::forcedExchEnum whichType = mm->getWhich();
ExchangeForceWildCard::forcedLogPartEnum logPart =
mm->getWhichLogPart();
Lng32 numClients = mm->getHowMany();
// translate invalid numClients into literal
if (numClients <= 0)
numClients = ANY_NUMBER_OF_PARTITIONS;
if (rppForChild->executeInDP2())
{
// don't try a DP2 child context for a forced ESP exchange
if (whichType == ExchangeForceWildCard::FORCED_ESP_EXCHANGE)
return NULL;
LogPhysPartitioningFunction::logPartType logPartType = LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING;
Lng32 numClients = mm->getHowMany();
NABoolean mustUsePapa = FALSE;
NABoolean numberOfPAsForced = FALSE;
// translate invalid numClients into literal
if (numClients <= 0)
numClients = ANY_NUMBER_OF_PARTITIONS;
// signal PA/PAPA decision if forced
if (whichType == ExchangeForceWildCard::FORCED_PAPA)
mustUsePapa = TRUE;
else if (whichType == ExchangeForceWildCard::FORCED_PA)
{
numClients = 1;
numberOfPAsForced = TRUE;
}
// map CQS literal into LogPhysPartitioningFunction
// literals
switch (mm->getWhichLogPart())
{
case ExchangeForceWildCard::ANY_LOGPART:
logPartType =
LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING;
break;
case ExchangeForceWildCard::FORCED_GROUP:
logPartType =
LogPhysPartitioningFunction::PA_PARTITION_GROUPING;
break;
case ExchangeForceWildCard::FORCED_SPLIT:
logPartType =
LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING;
break;
default:
CMPASSERT(0); // internal error
}
// force a log part scheme only if there is a partitioning
// requirement and if it is for more than one partition
if (NOT rppForMe->requiresPartitioning() OR
rppForMe->getPartitioningRequirement()->
castToRequireExactlyOnePartition())
{
logPartType =
LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING;
}
if (logPartType !=
LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING OR
numClients != ANY_NUMBER_OF_PARTITIONS OR
mustUsePapa)
{
// now replace the logical partitioning requirement
// (note that this may result in a logical partitioning
// requirement with a NULL PartitioningRequirement in it)
result->setLogicalPartRequirement(
new(CmpCommon::statementHeap())
LogicalPartitioningRequirement(
rppForMe->getPartitioningRequirement(),
logPartType,
numClients,
mustUsePapa,
numberOfPAsForced ));
}
} // child is required to execute in DP2
else
{
// child is required to execute in an ESP
// don't try an ESP child context for a forced PA/PAPA
if (whichType == ExchangeForceWildCard::FORCED_PA OR
whichType == ExchangeForceWildCard::FORCED_PAPA)
return NULL;
if (numClients != ANY_NUMBER_OF_PARTITIONS)
{
// Process a CQS requirement for a given number of
// ESP partitions: Alter the required physical
// properties by adding a requirement for a given
// number of ESPs
RequirementGenerator rg(child(0),rppForChild);
rg.addNumOfPartitions(numClients,0.0);
// Replace the requirement if it is still feasible,
// give up if it is not feasible. We should never run
// into the case where the requirement is not feasible
// because the exchange node should not require any
// partitioning at this point.
if (rg.checkFeasibility())
result = rg.produceRequirement();
else
CMPASSERT(0); // for debugging
}
} // child is required to execute in an ESP
} // must match specifies some exchange node
else
{
if (NOT CURRSTMT_OPTDEFAULTS->ignoreExchangesInCQS())
// We found out that the we must match a pattern that is not
// an exchange. Operators usually don't check this, but
// since we did, we might as well notice and save ourself
// some work.
return NULL;
}
} // end CQS processing
return result;
}
//<pb>
// -----------------------------------------------------------------------
// Exchange::storePhysPropertiesInNode()
// -----------------------------------------------------------------------
void Exchange::storePhysPropertiesInNode(const ValueIdList &partialSortKey)
{
const PhysicalProperty *sppForMe = getPhysicalProperty();
const PhysicalProperty *sppOfChild = child(0)->getPhysicalProperty();
CMPASSERT(sppForMe AND sppOfChild);
// ---------------------------------------------------------------------
// determine whether this Exchange node is redundant
// (it is redundant if it enforces neither location nor partitioning)
// ---------------------------------------------------------------------
isRedundant_ =
(sppOfChild->getPlanExecutionLocation() ==
sppForMe->getPlanExecutionLocation() AND
sppForMe->getPartitioningFunction()->
comparePartFuncToFunc(*sppOfChild->getPartitioningFunction()) == SAME);
// ---------------------------------------------------------------------
// Copy some of the physical properties into data members of this
// object, so that the code generator and preCodeGen can look at them
// (they may also modify them if needed).
// ---------------------------------------------------------------------
// Don't copy the sort key if the sort order type is DP2.
// This is because copying the sort key to sortKeyForMyOutput_
// will cause a merge of sorted streams to occur, and we don't
// want that if the sort order type is DP2.
if (sppForMe->getSortOrderType() != DP2_SOT)
{
sortKeyForMyOutput_ = sppForMe->getSortKey();
}
else if (CmpCommon::getDefault(COMP_BOOL_86) == DF_ON)
{
// begin fix to partial sort: comment out this if
// if (partialSortKey == sppForMe->getSortKey())
// otherwise, partial sort with split top can give wrong results
sortKeyForMyOutput_ = partialSortKey;
}
topPartFunc_ = sppForMe->getPartitioningFunction();
bottomPartFunc_ = sppOfChild->getPartitioningFunction();
bottomLocation_ = sppOfChild->getPlanExecutionLocation();
bottomLocIsSet_ = TRUE;
indexDesc_ = sppOfChild->getIndexDesc();
NABoolean isChildSyncAccess=FALSE;
const LogPhysPartitioningFunction *lpf =
bottomPartFunc_->castToLogPhysPartitioningFunction();
if (lpf)
isChildSyncAccess = lpf->getSynchronousAccess();
// solution 10-040505-5752: we check for synchronous sequential access. In
// that case, we check for the reverse scan flag. We manufacture a partition
// search key as if the scan direction is forward. We then set the direction
// of the scan as a flag on the partition access node; this gets copied from
// the flag on the exchange node.
if ( (CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF ||
isChildSyncAccess) &&
CmpCommon::getDefault(ATTEMPT_REVERSE_SYNCHRONOUS_ORDER) == DF_ON
)
{
OperatorTypeEnum ot = child(0)->castToRelExpr()->getOperatorType();
RelExpr *relExpr = child(0)->castToRelExpr();
NABoolean noScanFound = FALSE;
while (ot != REL_SCAN && ot != REL_FILE_SCAN)
{
if (relExpr->getArity() > 1 ||
relExpr->getOperatorType() == REL_EXCHANGE
)
{
noScanFound= TRUE;
break;
}
if (relExpr->getArity() != 0)
{
relExpr = relExpr->child(0)->castToRelExpr();
ot = relExpr->getOperatorType();
}
else
{
noScanFound = TRUE;
break;
}
}
FileScan *scan = NULL;
if (relExpr->getOperatorType() == REL_FILE_SCAN ||
relExpr->getOperatorType() == REL_SCAN)
{
scan = (FileScan *) relExpr;
}
else
{
noScanFound = TRUE;
}
if (!noScanFound &&
scan->getReverseScan() &&
scan->getIndexDesc()->isPartitioned())
{
ValueIdSet externalInputs = scan->getGroupAttr()->
getCharacteristicInputs();
ValueIdSet dummySet;
ValueIdSet selPreds(scan->getSelectionPred());
setOverReverseScan();
// Create and set the Searchkey for the partitioning key:
partSearchKey_ = new (CmpCommon::statementHeap())
SearchKey(scan->getIndexDesc()->getPartitioningKey(),
scan->getIndexDesc()->
getOrderOfPartitioningKeyValues(),
externalInputs,
TRUE,
selPreds,
dummySet, // not used
scan->getIndexDesc()
);
}
else
partSearchKey_ = sppOfChild->getPartSearchKey();
}
else
partSearchKey_ = sppOfChild->getPartSearchKey();
}
//<pb>
//==============================================================================
// Synthesize physical properties for exchange operator's current plan
// extracted from a specified context.
// Calls: synthPhysicalPropertyDP2() OR synthPhysicalPropertyESP();
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
Exchange::synthPhysicalProperty(const Context *myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty *sppOfChild = myContext->
getPhysicalPropertyOfSolutionForChild(0);
if (sppOfChild->executeInDP2())
{
return synthPhysicalPropertyDP2(myContext);
}
else // child executes in ESP
{
return synthPhysicalPropertyESP(myContext);
}
} // Exchange::synthPhysicalProperty()
//==============================================================================
// Synthesize physical properties for exchange operator's current plan
// extracted from a specified context when child executes in DP2.
//
// Helper method for Exchange::synthPhysicalProperty()
//==============================================================================
PhysicalProperty*
Exchange::synthPhysicalPropertyDP2(const Context *myContext)
{
NABoolean usingSynchronousAccess = FALSE;
NABoolean usePapa = FALSE;
NABoolean canMaintainSortOrder = TRUE;
PartitioningFunction *myPartFunc;
NodeMap *myNodeMap = NULL;
const ReqdPhysicalProperty *rppForMe = myContext->getReqdPhysicalProperty();
const PhysicalProperty *sppOfChild = myContext->getPhysicalPropertyOfSolutionForChild(0);
PartitioningFunction *childPartFunc = sppOfChild->getPartitioningFunction();
const LogPhysPartitioningFunction *logPhysChildPartFunc =
childPartFunc->castToLogPhysPartitioningFunction();
// -------------------------------------------------------------
// DP2 exchange (PA or PAPA)
// -------------------------------------------------------------
if (logPhysChildPartFunc)
{
usingSynchronousAccess = logPhysChildPartFunc->getSynchronousAccess();
usePapa = logPhysChildPartFunc->getUsePapa();
// Can this logPhys partitioning function maintain the order of
// an individual partition of the physical partitioning
// function. In order to maintain the order, a merge expression
// may be required.
//
canMaintainSortOrder =
logPhysChildPartFunc->canMaintainSortOrder(sppOfChild->getSortKey());
// Child synthesized a LogPhysPartitioningFunction which has all
// the instructions on what to do. Unpack these instructions.
myPartFunc = logPhysChildPartFunc->getLogPartitioningFunction();
// ---------------------------------------------------------------
// For all cases (PA_PARTITION_GROUPING, SUBPARTITIONING and
// replicate-no-broadcast), try to use any existing logical or physical
// partitioning function's nodemap that matches our partition count
// requirement. Only as a last resort do we synthesize a node map which
// attempts to co-locate ESPs and the corresponding DP2 partitions.
// We do this because synthesizeLogicalMap() is very expensive.
//
// Note: although the grouping model does not exactly fit for
// the cases of SUBPARTITIONING or a replicate-no-broadcast
// partitioning function, it results in a reasonable allocation
// of ESPs
// ---------------------------------------------------------------
if (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON) {
myNodeMap = logPhysChildPartFunc->getOrMakeSuitableNodeMap
(FALSE/*forDP2*/);
}
else {
const NodeMap* childNodeMap = logPhysChildPartFunc
->getPhysPartitioningFunction()
->getNodeMap();
Lng32 myPartitionCount = myPartFunc->getCountOfPartitions();
myNodeMap = ((NodeMap *)(childNodeMap))
->synthesizeLogicalMap(myPartitionCount, FALSE/*forDP2*/);
if(myPartFunc->castToReplicateNoBroadcastPartitioningFunction()) {
for(CollIndex i = 0; i < (CollIndex)myPartitionCount; i++) {
myNodeMap->setPartitionState(i, NodeMapEntry::ACTIVE);
}
}
}
} // if logPhysChildPartFunc
else
{
// we don't allow any child partitioning functions other
// than a LogPhysPartitioningFunction or a
// SinglePartitionPartitioningFunc for DP2 children.
CMPASSERT(childPartFunc AND
childPartFunc->castToSinglePartitionPartitioningFunction());
// child synthesized one partition, synthesize one partition and
// perform the simplest case of PA_PARTITION_GROUPING
myPartFunc = childPartFunc;
myNodeMap =
childPartFunc->getNodeMap()->copy(CmpCommon::statementHeap());
if (CmpCommon::getDefault(COMP_BOOL_83) == DF_ON) {
// we want to evenly distribute single-cpu ESP
// (replicate broadcasters & others)
myNodeMap->setToRandCPU(0);
}
} // end if not a logPhysChildPartFunc
// Now, set the synthesized sort order type and the corresponding
// dp2SortOrderPartFunc.
PartitioningFunction *dp2SortOrderPartFunc = NULL;
SortOrderTypeEnum sortOrderType = NO_SOT;
if (sppOfChild->getSortKey().isEmpty())
sortOrderType = NO_SOT;
else
{
if (myContext->requiresOrder())
{
// Since there is a required order/arrangement, we want to set
// the sort order type to DP2 if we are accessing ALL partitions
// asynchronously, there is a requirement for a DP2 sort order,
// and the synthesized DP2 sort order partitioning function
// (i.e. the physical partitioning function) matches the
// requirement.
if (usePapa AND
NOT usingSynchronousAccess AND
((rppForMe->getSortOrderTypeReq() == DP2_SOT) OR
(rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT)) AND
rppForMe->getDp2SortOrderPartReq()->
partReqAndFuncCompatible(sppOfChild->getDp2SortOrderPartFunc()))
{
sortOrderType = DP2_SOT;
dp2SortOrderPartFunc = sppOfChild->getDp2SortOrderPartFunc();
}
else if(canMaintainSortOrder)
{
// If we can maintain the sort order, lets do it.
sortOrderType = ESP_NO_SORT_SOT;
}
else if (usePapa AND NOT usingSynchronousAccess)
{
// If we can claim DP_SOT, lets do it.
sortOrderType = DP2_SOT;
dp2SortOrderPartFunc = sppOfChild->getDp2SortOrderPartFunc();
}
else
{
// If all else fail, we cannot claim we are sorted.
sortOrderType = NO_SOT;
}
}
else // no required order or arrangement
{
// Since there is no required order or arrangement,
// we want to set the sort order type to ESP_NO_SORT if
// we are not accessing ALL partitions asynchronously.
// NOTE. Here we can create a potential problem that
// sortKey and sortOrderType will conflict requirements
// for the parent merge union.
if (((NOT usePapa) OR
usingSynchronousAccess) AND
canMaintainSortOrder)
{
// If we can maintain the sort order with no extra effort,
// lets do it.
sortOrderType = ESP_NO_SORT_SOT;
}
else if (usePapa AND NOT usingSynchronousAccess)
{
// If we can claim DP_SOT, lets do it.
sortOrderType = DP2_SOT;
dp2SortOrderPartFunc = sppOfChild->getDp2SortOrderPartFunc();
}
else
{
// If all else fail, we cannot claim we are sorted.
sortOrderType = NO_SOT;
}
} // end if no required order or arrangement
} // end if there is a synthesized sort order
// For all cases there should have been a nodemap synthesized.
//
CMPASSERT(myNodeMap);
// --------------------------------------------------------------------
// Make a copy of this Exchange's partitioning function and replace the
// partitioning function's node map with the newly synthesized logical
// node map.
// --------------------------------------------------------------------
// Note that this makes a deep copy of the partitioning function,
// including the nodeMap. Then this copied nodeMap is replaced with
// the new nodeMap. May want to prevent the original nodemap from
// being copied.
myPartFunc = myPartFunc->copy();
myPartFunc->replaceNodeMap(myNodeMap);
return synthPhysicalPropertyFinalize
(myContext,
myPartFunc,
sortOrderType,
childPartFunc,
sppOfChild,
rppForMe,
dp2SortOrderPartFunc);
} // Exchange::synthPhysicalPropertyDP2()
//==============================================================================
// Synthesize physical properties for exchange operator's current plan
// extracted from a specified context when child executes in ESP.
//
// Helper method for Exchange::synthPhysicalProperty()
//==============================================================================
PhysicalProperty*
Exchange::synthPhysicalPropertyESP(const Context *myContext)
{
const PhysicalProperty *sppOfChild = myContext->getPhysicalPropertyOfSolutionForChild(0);
PartitioningFunction *childPartFunc = sppOfChild->getPartitioningFunction();
// -------------------------------------------------------------
// ESP exchange, realize the partitioning requirement
// -------------------------------------------------------------
PartitioningRequirement* softPartRequirement = NULL;
// some dummy variables for the cover test
ValueIdSet newInputs;
ValueIdSet referencedInputs;
ValueIdSet coveredSubExpr;
ValueIdSet uncoveredExpr;
// We may need to use the child's partitioning key columns
// (the "soft" partitioning requirement to realize()) for
// repartitioning, if the requirement did not specify a required
// partitioning key. So, we must make sure they are covered
// by the group attributes. Otherwise, we would attempt to
// repartition on a value that DP2 did not produce, and this
// would fail. If we don't pass a soft partitioning
// requirement to realize(), it will repartition on all
// the group attribute outputs, if the requirement does
// not specify a required partitioning key.
const ReqdPhysicalProperty *rppForMe = myContext->getReqdPhysicalProperty();
PartitioningRequirement *myPartReq = rppForMe->getPartitioningRequirement();
// we don't need to require coverage of child partitioning key if
// requirement is fuzzy and doea not have partitioning key. This means
// we could use current partitioning of the child to satisfy this
// requirement, therefore we can use child partitioning as soft requirement.
// comp_bool_126 should be ON to avoid coverage check.
NABoolean noNeedToCoverChildPartKey = myPartReq AND
myPartReq->isRequirementFuzzy() AND
myPartReq->getPartitioningKey().isEmpty();
// We use the partial partfunc from the child here because when the
// rppForMe contains a solid skew requirement, then the child partfunc
// will be guaranteed to be a SkewedData partfunc. Otherwise, the child
// partfunc will be some other type and its partial partfunc will be the
// same as the child part func itself. In both cases, the partial
// partfunc is sufficient to provide a base for the coverage test below.
if( ((CmpCommon::getDefault(COMP_BOOL_126) == DF_ON) AND
noNeedToCoverChildPartKey) OR
(childPartFunc->getPartialPartitioningKey().isCovered(
newInputs,
*(getGroupAttr()),
referencedInputs,
coveredSubExpr,
uncoveredExpr))
)
{
softPartRequirement = childPartFunc->makePartitioningRequirement();
}
PartitioningFunction *myPartFunc =
myPartReq->realize(myContext,
FALSE,
softPartRequirement);
myPartFunc->createPartitioningKeyPredicates();
// --------------------------------------------------------------------
// Make a copy of this Exchange's partitioning function.
// --------------------------------------------------------------------
myPartFunc = myPartFunc->copy();
// Double check or memorize the skewness handling requirement
// in the part func for exchange so that the right code can be
// generated for it.
//
// We do this only for partfuncs that can deal with skewed values. For
// others, they will fail the :satisfied() method because they are not
// SkewedDataPartFunc
if ( myPartReq -> isRequirementFuzzy() AND
myPartFunc -> castToSkewedDataPartitioningFunction()
) {
if (NOT (myPartReq ->castToFuzzyPartitioningRequirement()->
getSkewProperty()).isAnySkew())
{
CMPASSERT(myPartFunc->castToSkewedDataPartitioningFunction()->
getSkewProperty() ==
myPartReq->castToFuzzyPartitioningRequirement()->
getSkewProperty());
} else {
// "Any" skew can be present in myPartReq if this exchange node
// receives an ApproxNPart requirement and is located immediately
// above a skew insensitive hash join node. For exmaple, the following
// query
//
// select name, mysk.ssn, age from mysk, mydm1
// where mysk.ssn = mydm1.ssn order by mysk.ssn;
//
// will place a SORT node on top of the de-skewing exchange node.
// Since no skew requirement is present in myPartReq, we set the
// skew property for my partfunc to "Any". This can save the effort
// to repartition the skew data.
((SkewedDataPartitioningFunction*)(myPartFunc)) ->
setSkewProperty(myPartReq->castToFuzzyPartitioningRequirement()->getSkewProperty());
}
}
// --------------------------------------------------------------------
// If the partitioning requirement is a fully specified part func,
// then use its nodeMap, otherwise, replace the partitioning
// function's node map with the newly synthesized logical node map.
// --------------------------------------------------------------------
if (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON) {
myPartFunc->useNodeMapFromReqOrChild(myPartReq, childPartFunc,
TRUE/*forESP*/);
// "float" single partition ESPs
NodeMap* mynodemap = (NodeMap*)(myPartFunc->getNodeMap());
if (mynodemap->getNumEntries() == 1 &&
CmpCommon::getDefault(COMP_BOOL_83) == DF_ON) {
mynodemap->setToRandCPU(0);
}
}
else {
if(!(myPartReq->isRequirementFullySpecified() &&
(CmpCommon::getDefault(COMP_BOOL_87) != DF_ON) &&
myPartFunc->getNodeMap() &&
(myPartFunc->getNodeMap()->getNumEntries() ==
(ULng32)myPartFunc->getCountOfPartitions()))) {
Lng32 myPartitionCount = myPartFunc->getCountOfPartitions();
const NodeMap* childNodeMap = childPartFunc->getNodeMap();
// Synthesize a nodemap based on the nodemap of the child and the
// desired number of ESPs. Using synthesizeLogicalMap() assumes
// that the lower and upper ESPs are associated via grouping. This
// assumption is not valid when considering the communication
// patterns between the upper and lower ESPs, but this assumption
// will lead to a reasonable nodemap for the upper ESPs.
// "float" single partition ESPs.
//
NodeMap *myNodeMap =
((NodeMap *)(childNodeMap))->synthesizeLogicalMap
(myPartitionCount, TRUE/*forESP*/);
CMPASSERT(myNodeMap);
for(CollIndex i = 0; i < (CollIndex)myPartitionCount; i++) {
myNodeMap->setPartitionState(i, NodeMapEntry::ACTIVE);
}
CMPASSERT(myNodeMap->getNumActivePartitions() == (CollIndex)myPartitionCount);
myPartFunc->replaceNodeMap(myNodeMap);
}
}
return synthPhysicalPropertyFinalize
(myContext,
myPartFunc,
sppOfChild->getSortOrderType(),
childPartFunc,
sppOfChild,
rppForMe,
sppOfChild->getDp2SortOrderPartFunc());
} // Exchange::synthPhysicalPropertyESP()
//==============================================================================
// Helper method for Exchange::synthPhysicalProperty()
// Synthesize those physical properties for exchange operator's that are common
// to both case (child executes in DP2 and child executes in DP2)
//
//==============================================================================
PhysicalProperty*
Exchange::synthPhysicalPropertyFinalize(const Context *myContext,
PartitioningFunction *myPartFunc,
SortOrderTypeEnum sortOrderType,
PartitioningFunction *childPartFunc,
const PhysicalProperty *sppOfChild,
const ReqdPhysicalProperty *rppForMe,
PartitioningFunction* dp2SortOrderPartFunc)
{
// ---------------------------------------------------------------------
// Determine if a merge of sorted streams is required.
// A merge of sorted streams is required if there is a order to
// preserve whose sort order type is not DP2, and there is a required
// order or arrangement, and the child is in DP2 and is being
// accessed via a PAPA node (i.e. asynchronous access) or the child
// is not in DP2 and has more than one partition.
//
//
// NOTE: once we support clustering keys that are other than
// partitioning keys we MUST use a merge of sorted streams in order
// to maintain the order of multiple partitions. A PA node can only
// maintain the order of multiple partitions via synchronous access
// if a prefix of the clustering key is the partitioning key.
// Do this check in the DP2 scan node when determining whether to
// use a PAPA.
// ---------------------------------------------------------------------
const LogPhysPartitioningFunction *logPhysChildPartFunc =
childPartFunc->castToLogPhysPartitioningFunction();
NABoolean mustMergeSortedStreamsToPreserveOrder =
((sortOrderType != DP2_SOT) AND
(((logPhysChildPartFunc != NULL) AND logPhysChildPartFunc->getUsePapa()) OR
((logPhysChildPartFunc == NULL) AND
(childPartFunc->getCountOfPartitions() > 1))
)
);
NABoolean mergesSortedStreams =
sppOfChild->isSorted() AND
(rppForMe->getSortKey() OR rppForMe->getArrangedCols()) AND
mustMergeSortedStreamsToPreserveOrder;
// Synthesize a sort key for the exchange if a sort key exists, and
// either we can do so without doing a merge of sorted streams or we
// must do so because it is required.
ValueIdList mySortKey;
if (sppOfChild->isSorted() AND
(NOT mustMergeSortedStreamsToPreserveOrder OR
(rppForMe->getSortKey() OR rppForMe->getArrangedCols())))
{
mySortKey = sppOfChild->getSortKey();
}
else
{
// If we are not going to synthesize a sort key, then we must
// make sure we don't synthesize a sort order type or a
// dp2SortOrderPartFunc.
sortOrderType = NO_SOT;
dp2SortOrderPartFunc = NULL;
}
// this should be TRUE if we are compiling for transform
NABoolean maintainOrderAfterRepartitioning =
(CmpCommon::getDefault(COMP_BOOL_164) == DF_ON);
PartitioningFunction * childLogPartFunc = childPartFunc;
if(logPhysChildPartFunc)
childLogPartFunc = logPhysChildPartFunc->getLogPartitioningFunction();
// set this flag to FALSE, this is a defensive measure
// in case the code does not actually go through the if
// condition below.
hash2RepartitioningWithSameKey_ = FALSE;
// hash2RepartitioningWithSameKey_ should be TRUE if the repartitioning
// operation is just repartitioning to a different # of partitions
// but the partitioning key is the same. Also the partitioning should be
// a hash2 partitioning
if (childLogPartFunc &&
(myPartFunc->getPartitioningFunctionType()==
childLogPartFunc->getPartitioningFunctionType()) &&
myPartFunc->castToHash2PartitioningFunction() &&
maintainOrderAfterRepartitioning)
{
const Hash2PartitioningFunction *myHash2PartFunc =
myPartFunc->castToHash2PartitioningFunction();
const Hash2PartitioningFunction *childHash2PartFunc =
childLogPartFunc->castToHash2PartitioningFunction();
ValueIdList myPartKeyList = myHash2PartFunc->
getKeyColumnList();
ValueIdList childPartKeyList = childHash2PartFunc->
getKeyColumnList();
if (myPartKeyList.entries() == childPartKeyList.entries())
{
hash2RepartitioningWithSameKey_ = TRUE;
// compare the key columns and their order
for (CollIndex i = 0; i < myPartKeyList.entries(); i++)
{
if (myPartKeyList[i] != childPartKeyList[i])
hash2RepartitioningWithSameKey_ = FALSE;
if (!(myHash2PartFunc->getOriginalKeyColumnList()[i].getType() ==
childHash2PartFunc->getOriginalKeyColumnList()[i].getType()))
hash2RepartitioningWithSameKey_ = FALSE;
}
}
}
// ---------------------------------------------------------------------
// determine my location and data source (can execute in master only
// if top part function produces a single partition)
// ---------------------------------------------------------------------
PlanExecutionEnum location;
DataSourceEnum dataSource;
if (myPartFunc->getCountOfPartitions() == 1)
{
location = EXECUTE_IN_MASTER_AND_ESP;
dataSource = SOURCE_ESP_INDEPENDENT;
}
else
{
location = EXECUTE_IN_ESP;
dataSource = SOURCE_ESP_DEPENDENT;
}
if (sppOfChild->executeInDP2())
{
dataSource = SOURCE_PERSISTENT_TABLE;
}
else
{
// -----------------------------------------------------------------
// Avoid potential deadlocks for the following cases of merging of
// sorted streams in an ESP exchange:
//
// - Don't merge if there are multiple sources and multiple merging
// ESPs. Skew in the distribution could lead to a deadlock that
// would involve at least 4 ESPs.
// - Don't even merge with a single merging ESP if the source data
// is not being produced from independent sources. Again, skew
// in the merge process could cause a deadlock that would
// involve the data source, the repartitioning ESPs, and the
// single merging ESP.
// This check is kind of late, a partial check could also be done
// in the topMatch method if necessary for performance reasons.
// -----------------------------------------------------------------
if (mergesSortedStreams AND
((myPartFunc->getCountOfPartitions() > 1 AND
sppOfChild->getCountOfPartitions() > 1)
OR
sppOfChild->getDataSourceEnum() == SOURCE_ESP_DEPENDENT) AND
(!hash2RepartitioningWithSameKey_))
{
mySortKey.clear();
// If we are not going to synthesize a sort key, then we must
// make sure we don't synthesize a sort order type or a
// dp2SortOrderPartFunc.
sortOrderType = NO_SOT;
dp2SortOrderPartFunc = NULL;
}
}
// This is to prevent possible deadlock for parallel merge join.
// If child got repartitioned 1:n then Sort will be needed to satisfy
// order or arrangement. See solution 10-051219-3501
if ( (CmpCommon::getDefault(DESTROY_ORDER_AFTER_REPARTITIONING) == DF_ON) AND
(myPartFunc->getCountOfPartitions() > 1 AND
sppOfChild->getCountOfPartitions() == 1)
)
{
if(!hash2RepartitioningWithSameKey_)
{
mySortKey.clear();
// If we are not going to synthesize a sort key, then we must
// make sure we don't synthesize a sort order type or a
// dp2SortOrderPartFunc.
sortOrderType = NO_SOT;
dp2SortOrderPartFunc = NULL;
}
}
// ---------------------------------------------------------------------
// Now put it all together.
// ---------------------------------------------------------------------
PhysicalProperty *sppForMe = new (CmpCommon::statementHeap())
PhysicalProperty(mySortKey,
sortOrderType,
dp2SortOrderPartFunc,
myPartFunc,
location,
dataSource,
NULL,
NULL);
// -----------------------------------------------------------------------
// The number of cpus after an exchange boundary must be the
// total number of cpus in the system
//
// BGZ451: no change for Linux as the logic below implements
// the above statement.
// -----------------------------------------------------------------------
NADefaults &defs = ActiveSchemaDB()->getDefaults();
Lng32 smpVal = defs.getAsLong(DEF_NUM_SMP_CPUS);
Lng32 nodeVal= gpClusterInfo->numOfSMPs();
if (CURRSTMT_OPTDEFAULTS->isFakeHardware())
{
nodeVal = defs.getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS);
}
const CostScalar totalCPUsInCluster =
MAXOF((smpVal*nodeVal),1.);
sppForMe->setCurrentCountOfCPUs((Lng32)totalCPUsInCluster.getValue());
// transfer the onePartitionAccess flag to my physical property.
// We check the flag in HashJoin::computeOperatorPriority().
if (sppOfChild->executeInDP2())
sppForMe->setAccessOnePartition(sppOfChild->getAccessOnePartition());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // Exchange::synthPhysicalPropertyFinalize()
//<pb>
// ---------------------------------------------------------------------
// A method that interprets the CONTROL QUERY SHAPE ... to decide
// whether a matching partitions plan or a replicate child1 plan
// is desired by the user.
// ---------------------------------------------------------------------
JoinForceWildCard::forcedPlanEnum Join::getParallelJoinPlanToEnforce
(const ReqdPhysicalProperty* const rppForMe) const
{
if (rppForMe AND rppForMe->getMustMatch())
{
OperatorTypeEnum op = rppForMe->getMustMatch()->getOperatorType();
JoinForceWildCard *wc;
// check whether a join is being forced, this is indicated
// by a bunch of opcodes representing different shapes in
// CONTROL QUERY SHAPE
switch (op)
{
case REL_FORCE_JOIN:
case REL_FORCE_NESTED_JOIN:
case REL_FORCE_MERGE_JOIN:
case REL_FORCE_HASH_JOIN:
case REL_FORCE_ORDERED_HASH_JOIN:
case REL_FORCE_HYBRID_HASH_JOIN:
case REL_FORCE_ORDERED_CROSS_PRODUCT:
wc = (JoinForceWildCard *) rppForMe->getMustMatch();
return wc->getPlan();
default:
// something else is being forced
return JoinForceWildCard::ANY_PLAN;
}
}
else
return JoinForceWildCard::ANY_PLAN;
} // Join::getParallelJoinPlanToEnforce()
DefaultToken Join::getParallelControlSettings (
const ReqdPhysicalProperty* const rppForMe, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/) const
{
DefaultToken result;
Lng32 forcedNumOfESPs = ANY_NUMBER_OF_PARTITIONS;
// Check for the number of ESPs being forced.
if (rppForMe->getMustMatch())
{
JoinForceWildCard* mustMatch =
(JoinForceWildCard *)rppForMe->getMustMatch();
forcedNumOfESPs = mustMatch->getNumOfEsps();
}
if (forcedNumOfESPs > 0)
{
numOfESPs = forcedNumOfESPs;
// If the number of ESPs is being forced, then we should always
// attempt ESP parallelism, no matter what, so no need to do
// any more checks. This is true even if only one ESP is being
// forced - in this case, we need to attempt ESP parallelism
// with exactly one ESP, in order to guarantee we get only one ESP.
numOfESPsForced = TRUE;
// If the number of ESPs are being forced, then we must use
// exactly that many, so the allowable deviation is 0.
allowedDeviation = 0.0;
result = DF_ON;
}
else // Number of ESPs is not being forced.
{
result = RelExpr::getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
} // end if the number of ESPs is not being forced
return result;
} // Join::getParallelControlSettings()
// -----------------------------------------------------------------------
// Split any sort or arrangement requirements between the left and
// right children. Add those that are for the left child to the
// passed in requirement generator object, and return those that
// are for the right child.
// -----------------------------------------------------------------------
void Join::splitSortReqsForLeftChild(
const ReqdPhysicalProperty* rppForMe,
RequirementGenerator &rg,
ValueIdList& reqdOrder1,
ValueIdSet& reqdArr1) const
{
CMPASSERT(isNestedJoin() OR
(getOperatorType() == REL_HYBRID_HASH_JOIN AND
((HashJoin *) this)->isOrderedCrossProduct()));
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child.
if (rppForMe->getSortKey() AND rppForMe->getSortKey()->entries() > 0)
{
ValueIdList reqdOrder0;
if (splitOrderReq(*(rppForMe->getSortKey()),reqdOrder0,reqdOrder1))
{
rg.removeSortKey();
// Sort Order type and dp2SortOrderPartReq might have been
// removed by the call above, so get them again from the rpp
SortOrderTypeEnum childSortOrderTypeReq =
rppForMe->getSortOrderTypeReq();
PartitioningRequirement* childDp2SortOrderPartReq =
rppForMe->getDp2SortOrderPartReq();
rg.addSortKey(reqdOrder0,
childSortOrderTypeReq,
childDp2SortOrderPartReq);
}
} // end if required order
if (rppForMe->getArrangedCols() AND
rppForMe->getArrangedCols()->entries() > 0)
{
ValueIdSet reqdArr0;
if (splitArrangementReq(*(rppForMe->getArrangedCols()),
reqdArr0,reqdArr1))
{
rg.removeArrangement();
// Sort Order type and dp2SortOrderPartReq might have been
// removed by the call above, so get them again from the rpp
SortOrderTypeEnum childSortOrderTypeReq =
rppForMe->getSortOrderTypeReq();
PartitioningRequirement* childDp2SortOrderPartReq =
rppForMe->getDp2SortOrderPartReq();
rg.addArrangement(reqdArr0,
childSortOrderTypeReq,
childDp2SortOrderPartReq);
}
} // end if required arrangement
} // Join::splitSortReqsForLeftChild
// -----------------------------------------------------------------------
// Split any sort or arrangement requirements between the left and
// right children. Add those that are for the right child to the
// passed in requirement generator object, and return those that
// are for the left child.
// -----------------------------------------------------------------------
void Join::splitSortReqsForRightChild(
const ReqdPhysicalProperty* rppForMe,
RequirementGenerator &rg,
ValueIdList& reqdOrder0,
ValueIdSet& reqdArr0) const
{
CMPASSERT(isNestedJoin() OR
(getOperatorType() == REL_HYBRID_HASH_JOIN AND
((HashJoin *) this)->isOrderedCrossProduct()));
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// left child and only pass on the portion of the requirement
// that is for the right child.
if (rppForMe->getSortKey() AND
rppForMe->getSortKey()->entries() > 0)
{
ValueIdList reqdOrder1;
if (splitOrderReq(*(rppForMe->getSortKey()),
reqdOrder0,reqdOrder1))
{
// Get the sort order type again from the rpp
SortOrderTypeEnum childSortOrderTypeReq =
rppForMe->getSortOrderTypeReq();
// If the sort order type requirement is DP2 or
// DP2_OR_ESP_NO_SORT, then convert this for the right
// child to ESP_NO_SORT. We only pass the dp2SortOrderPartReq
// to the left child.
if ((childSortOrderTypeReq == DP2_SOT) OR
(childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT))
{
childSortOrderTypeReq = ESP_NO_SORT_SOT;
}
rg.addSortKey(reqdOrder1,childSortOrderTypeReq,NULL);
}
} // end if required order
if (rppForMe->getArrangedCols() AND
rppForMe->getArrangedCols()->entries() > 0)
{
ValueIdSet reqdArr1;
if (splitArrangementReq(*(rppForMe->getArrangedCols()),
reqdArr0,reqdArr1))
{
// Get the sort order type again from the rpp
SortOrderTypeEnum childSortOrderTypeReq =
rppForMe->getSortOrderTypeReq();
// If the sort order type requirement is DP2 or
// DP2_OR_ESP_NO_SORT, then convert this for the right
// child to ESP_NO_SORT. We only pass the dp2SortOrderPartReq
// to the left child.
if ((childSortOrderTypeReq == DP2_SOT) OR
(childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT))
{
childSortOrderTypeReq = ESP_NO_SORT_SOT;
}
rg.addArrangement(reqdArr1,childSortOrderTypeReq,NULL);
}
} // end if required arrangement
} // Join::splitSortReqsForRightChild
//<pb>
// -----------------------------------------------------------------------
// member functions for class NestedJoin
// -----------------------------------------------------------------------
PlanWorkSpace * NestedJoin::allocateWorkSpace() const
{
return new(CmpCommon::statementHeap()) NestedJoinPlanWorkSpace(getArity());
}
// -----------------------------------------------------------------------
// NestedJoin::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
NestedJoin::costMethod() const
{
static THREAD_P CostMethodNestedJoin *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodNestedJoin();
return m;
}
// -----------------------------------------------------------------------
// Generate the partitioning requirements for the left child of a
// preferred probing order nested join plan, or a NJ that is for a
// write operation. In both cases, we need to base the partitioning
// requirement for the left child on the partitioning of the right
// child to ensure that we get a Type-1 join. If the requirements could
// not be generated because the user is attempting to force something that
// is not possible, the method returns FALSE. Otherwise, it returns TRUE.
// -----------------------------------------------------------------------
NABoolean NestedJoin::genLeftChildPartReq(
Context* myContext, // IN
PlanWorkSpace* pws, // IN
const PartitioningFunction* physicalPartFunc, // IN
PartitioningRequirement* &logicalPartReq) // OUT
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
NABoolean createPartReqForChild = TRUE;
// Get some values from the defaults table
NABoolean baseNumPartsOnAP = FALSE;
if (CmpCommon::getDefault(BASE_NUM_PAS_ON_ACTIVE_PARTS) == DF_ON)
baseNumPartsOnAP = TRUE;
NABoolean isUpdateOfHBaseTable = updateTableDesc() &&
updateTableDesc()->getNATable()->isHbaseTable();
//adjust plan for hbase bulk load:
// -use range partitioning for salted tables
// -use hash on primary key for non salted tables
NABoolean adjustPFForTrafBulkLoadPrep = isUpdateOfHBaseTable &&
getIsForTrafLoadPrep() &&
CmpCommon::getDefault(TRAF_LOAD_PREP_ADJUST_PART_FUNC) == DF_ON ;//&&
//!updateTableDesc()->getClusteringIndex()->getNAFileSet()->hasSyskey();
if (adjustPFForTrafBulkLoadPrep)
{
baseNumPartsOnAP = TRUE;
}
Lng32 numActivePartitions = 1;
if (baseNumPartsOnAP)
{
if ((physicalPartFunc == NULL) OR
(physicalPartFunc->castToRangePartitioningFunction() == NULL))
baseNumPartsOnAP = FALSE;
else
{
CostScalar activePartitions =
((NodeMap *)(physicalPartFunc->getNodeMap()))->getNumActivePartitions();
numActivePartitions = (Lng32)activePartitions.getValue();
}
}
NABoolean okToAttemptESPParallelismFlag =
okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced);
ValueIdList pkList;
// notice if parent requires a particular number of partitions
if ((partReqForMe != NULL) AND
(partReqForMe->getCountOfPartitions() != ANY_NUMBER_OF_PARTITIONS) AND
!adjustPFForTrafBulkLoadPrep)
{
if (NOT numOfESPsForced)
childNumPartsRequirement = partReqForMe->getCountOfPartitions();
// don't ask for more ESPs than there are target partitions,
// the ESPs must be a grouping of that partitioning scheme. Also,
// can't change the number of partitions at all if we are in DP2.
if (physicalPartFunc == NULL)
{
// If the user was forcing the number of ESPs and it
// was not 1, then we cannot honor the request and so we
// must give up now.
if (numOfESPsForced AND (childNumPartsRequirement != 1))
return FALSE;
else
childNumPartsRequirement = 1;
}
else if ((childNumPartsRequirement >
physicalPartFunc->getCountOfPartitions()) OR
rppForMe->executeInDP2())
{
// If the user was forcing the number of ESPs, then we
// cannot honor the request and so we must give up now.
if (numOfESPsForced)
return FALSE;
childNumPartsRequirement = physicalPartFunc->getCountOfPartitions();
}
// If childNumPartsRequirement no longer satisfies the original requirement
// then give up now.
if (NOT
((childNumPartsRequirement == partReqForMe->getCountOfPartitions()) OR
((partReqForMe->castToRequireApproximatelyNPartitions() != NULL) AND
partReqForMe->castToRequireApproximatelyNPartitions()->
isPartitionCountWithinRange(childNumPartsRequirement))))
return FALSE;
}
else if (okToAttemptESPParallelismFlag)
{
// don't ask for more ESPs than there are target partitions,
// the ESPs must be a grouping of that partitioning scheme
if (isUpdateOfHBaseTable)
{
if (adjustPFForTrafBulkLoadPrep )
{
if (baseNumPartsOnAP && numActivePartitions >1 )
{
// this case will use base number of parts on Active Partitions and handle the case
//of partitioned table. Condition on syskey is removed based on comment from
//previous delivery
childNumPartsRequirement = numActivePartitions;
}
else if (numActivePartitions ==1 && childNumPartsRequirement > 1 )
{
//if table is not partitioned we use the clustering index to produce a hash2 partition
//function based on the clustering key. one thing to mention and is useful here
//is that rows with same keys will go to the same esp --> can be used for deduping
//Condition on syskey is removed based on comment from
//previous delivery
pkList.insert(updateTableDesc()->getClusteringIndex()->getClusteringKeyCols());
physicalPartFunc = new(CmpCommon::statementHeap())
Hash2PartitioningFunction(pkList, pkList,childNumPartsRequirement);
createPartReqForChild = TRUE;
}
else
{
//if none of the 2 cases apply then used the default random partitioning
ValueIdSet partKey;
ItemExpr *randNum =
new(CmpCommon::statementHeap()) RandomNum(NULL, TRUE);
randNum->synthTypeAndValueId();
partKey.insert(randNum->getValueId());
physicalPartFunc = new(CmpCommon::statementHeap())
Hash2PartitioningFunction (partKey, partKey, childNumPartsRequirement);
}
}
else
// HBase tables can be updated in any way, no restrictions on
// parallelism
if (numOfESPsForced && physicalPartFunc)
childNumPartsRequirement = numOfESPsForced;
else
createPartReqForChild = FALSE;
}
else if (physicalPartFunc == NULL)
{
// If the user was forcing the number of ESPs and it
// was not 1, then we cannot honor the request and so we
// must give up now.
if (numOfESPsForced AND (childNumPartsRequirement != 1))
return FALSE;
else
childNumPartsRequirement = 1;
}
else if ((baseNumPartsOnAP AND
(childNumPartsRequirement > numActivePartitions) AND
NOT numOfESPsForced) OR
(childNumPartsRequirement >
physicalPartFunc->getCountOfPartitions()))
{
// If the user was forcing the number of ESPs, then we
// cannot honor the request and so we must give up now.
if (numOfESPsForced)
return FALSE;
else
{
// The preferred level of parallelism is more than the number
// of active/physical partitions, which is undesirable/not allowed.
// Lower the level of parallelism to the number of active
// partitions/physical partitions.
if (baseNumPartsOnAP)
childNumPartsRequirement = numActivePartitions;
else
childNumPartsRequirement = physicalPartFunc->getCountOfPartitions();
}
}
} // end if ok to try a parallel plan
else
{
if ( rppForMe->executeInDP2() AND (physicalPartFunc != NULL) )
{
// If we are in DP2, then we cannot change the number of partitions
// at all.
childNumPartsRequirement = physicalPartFunc->getCountOfPartitions();
} else
childNumPartsRequirement = 1;
}
// now create the partitioning requirement to match the target table
if (createPartReqForChild == FALSE)
logicalPartReq = NULL;
else if (childNumPartsRequirement == 1)
{
logicalPartReq = new(CmpCommon::statementHeap())
RequireExactlyOnePartition();
}
else
{
// Take the target table's partitioning function and scale it down
// to the appropriate size.
PartitioningFunction *cpf = physicalPartFunc->copy();
// If we haven't estimated the active parts yet, all parts will be active.
// So, both grouping distribution algorithms are the same in this case.
Lng32 scaleNumPartReq = childNumPartsRequirement;
cpf = cpf->scaleNumberOfPartitions(scaleNumPartReq);
// Was scale able to do it's job?
if ((NOT cpf->isAGroupingOf(*physicalPartFunc)) &&
(scaleNumPartReq != childNumPartsRequirement)) // No
{
// If scale didn't work (it should have), are only choice is 1 part.
logicalPartReq = new(CmpCommon::statementHeap())
RequireExactlyOnePartition();
}
else // Yes
{
logicalPartReq = cpf->makePartitioningRequirement();
}
} //end if childNumPartsRequirement == 1
return TRUE;
} // NestedJoin::genLeftChildPartReq()
// -----------------------------------------------------------------------
// Generate a preferred probing order sort requirement for the left child
// of a nested join that is for a write operation - i.e. Insert, Update,
// or Delete. The generated sort requirement is returned.
// -----------------------------------------------------------------------
ValueIdList NestedJoin::genWriteOpLeftChildSortReq()
{
ValueIdList reqdOrder;
// Only generate a preferred probing order requirement if there is
// no required order stored in the join node. This is because the
// first nested join plan already required this order, and so there
// is no point in doing it again.
if (getReqdOrder().isEmpty())
{
NABoolean requireOrderedWrites;
// QSTUFF
if (NOT (getGroupAttr()->isStream() ||
getGroupAttr()->isEmbeddedUpdateOrDelete()))
// QSTUFF
requireOrderedWrites = TRUE;
else
requireOrderedWrites = FALSE;
// Get some of the logical property information so that we can
// see if it is worth sorting the writes.
// get # of probes estimate from the left child
CostScalar noOfProbes =
child(0).getGroupAttr()->getResultCardinalityForEmptyInput();
// We do not sort a small number of inserts or user provided data
// in rowsets. see Genesis case 10-021021-2615 for details on why
// rowset data is not sorted.
// We always want to sort if we need to avoid a potential halloween
// problem.
if (!(avoidHalloweenR2() OR (getHalloweenForceSort() == FORCED)
OR isTSJForSideTreeInsert() == TRUE
OR enableTransformToSTI() == TRUE
) AND
(noOfProbes < 100 OR child(0).getGroupAttr()->getNumBaseTables() == 0))
{
// This is used to prevent compiler failure under control
// query shape with sort enforcement. Because in this case
// we want to keep requireOrderedWrites as TRUE.
// It is better to check the CQS on this join (via getMustMatch())
// and its left child instead of only checking the CQS requirement
// on the root. This will allow us to focus only on CQS statements
// that actually force this join left child.
NABoolean underCQS =
ActiveControlDB()->getRequiredShape() &&
ActiveControlDB()->getRequiredShape()->getShape() &&
NOT ActiveControlDB()->getRequiredShape()->getShape()->isCutOp();
if (NOT underCQS)
requireOrderedWrites = FALSE;
}
// for bulk load we require that data is sorted
if (updateTableDesc() && updateTableDesc()->getNATable()->isHbaseTable() &&
getIsForTrafLoadPrep() && !requireOrderedWrites)
requireOrderedWrites = TRUE;
if (requireOrderedWrites)
{
// require the select data to be sorted by the clustering key
// of the target table
updateSelectValueIdMap()->rewriteValueIdListDown(
updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(),
reqdOrder);
// Fix 10-100412-9445.
// Truncate the trailing columns of reqdOrder starting at the
// syskey column (if exist). Anything beyond the syskey should
// not be sorted. Note that the order of the sortkey of the inner table
// and the order of the mapped required sort order on the outer source
// are the same.
Int32 sysKeyPosition = updateTableDesc()->getClusteringIndex()
->getNAFileSet()->getSysKeyPosition();
if ( sysKeyPosition >= 0 ) {
// truncation anything starting from the sysKey.
for (Int32 i=reqdOrder.entries()-1; i>= sysKeyPosition; i--) {
reqdOrder.removeAt(i);
}
}
// Remove from the required order any columns that are equal to
// constants or input values.
reqdOrder.removeCoveredExprs(getGroupAttr()->getCharacteristicInputs());
// Remove from the required order any columns that cannot be provided
// by left child outputs.
reqdOrder.removeUnCoveredExprs(child(0).getGroupAttr()->getCharacteristicOutputs());
// fix for case 10-080826-7916, soln 10-080826-5421
// make sure reqdOrder has some column(s) to sort on
// also soln 10-091002-5072. For Halloween queries the
// sort is used as a blocking operator and therefore
// it does not matter (for correctness) which column is used for sorting as long
// as the sort operator is present. If reqdOrder is empty then a sort node
// will not be present as an empty sortOrder is always satisfied.
// For performance it does help if the sort order is the same as
// clustering key of target side. If we get into the following statement
// then we know that the source side is not able to provide the target clustering
// key cols. In all known cases this is because syskey is the only clustering key,
// however this code has been written generally, without reference to syskey.
if (reqdOrder.isEmpty() AND
(avoidHalloweenR2() OR
(getHalloweenForceSort() == FORCED)))
{
const ValueIdSet& child0Outputs =
child(0).getGroupAttr()->getCharacteristicOutputs();
for (ValueId exprId = child0Outputs.init();
child0Outputs.next(exprId);
child0Outputs.advance(exprId))
{
if (!(exprId.getItemExpr()->doesExprEvaluateToConstant(FALSE,TRUE)))
{
reqdOrder.insert(exprId); // do not insert constants. If all outputs are constant
break ; // then there no need to sort/block
}
}
}
} // end if reqdOrderedWrites
} // end if the user did not specify an ORDER BY clause
// fix for soln 10-090826-4155
// make sure reqdOrder has some column(s) to sort on
if (reqdOrder.isEmpty() AND
updateTableDesc()->getClusteringIndex()->
getNAFileSet()->hasSyskey() AND
(avoidHalloweenR2() OR
(getHalloweenForceSort() == FORCED) OR
(getHalloweenForceSort() == NOT_FORCED)))
{
reqdOrder = ValueIdList
(child(0).getGroupAttr()->getCharacteristicOutputs());
}
return reqdOrder; // Will be empty if no required order
} // NestedJoin::genWriteOpLeftChildSortReq()
PartitioningFunction *
isOneToOneMatchHashPartitionJoinPossible(NestedJoin* nj,
const PartitioningFunction *leftPartFunc, const ValueIdMap &map)
{
if (CmpCommon::getDefault(COMP_BOOL_94) == DF_ON) {
return NULL;
}
const PartitioningFunction *origLeftHashPartFunc = leftPartFunc;
if (leftPartFunc->castToHashDistPartitioningFunction() != NULL) {
// If dealing with HashDist, then the original number of partitions
// must match the current number of partitions.
const TableHashPartitioningFunction *leftHashPartFunc;
leftHashPartFunc = leftPartFunc->castToHashDistPartitioningFunction();
if (leftHashPartFunc->getCountOfOrigHashPartitions() !=
leftHashPartFunc->getCountOfPartitions()) {
return NULL;
}
} else {
// If it is a skewed data part func, get the partition func for the
// unskewed data.
if ( leftPartFunc->castToSkewedDataPartitioningFunction() ) {
leftPartFunc = leftPartFunc->
castToSkewedDataPartitioningFunction()->
getPartialPartitioningFunction();
}
// Check whether this is hash2 partitioning. If not, return FALSE.
if ( leftPartFunc->castToHash2PartitioningFunction() == NULL )
return NULL;
}
PartitioningFunction *mappedLeftPartFunc = NULL;
ValueIdMap mapCopy(map);
// remap the left part func to right
mappedLeftPartFunc = leftPartFunc->copyAndRemap(mapCopy, FALSE);
GroupAttributes *ga = nj->child(1).getGroupAttr();
const SET(IndexDesc *) &availIndexes = ga->getAvailableBtreeIndexes();
for (CollIndex i = 0; i < availIndexes.entries(); i++) {
IndexDesc *iDesc = availIndexes[i];
if (iDesc->isClusteringIndex()) {
PartitioningFunction *rightPartFunc = iDesc->getPartitioningFunction();
PartitioningFunction *result = NULL;
NABoolean allowGrouping = (CmpCommon::getDefault(NESTED_JOINS_OCR_GROUPING) == DF_ON);
if (rightPartFunc AND
(
(
(allowGrouping
? leftPartFunc->isAGroupingOf(*rightPartFunc)
: (leftPartFunc->comparePartFuncToFunc(*rightPartFunc) == SAME))
)
OR
(
mappedLeftPartFunc AND
(allowGrouping
? mappedLeftPartFunc->isAGroupingOf(*rightPartFunc)
: (mappedLeftPartFunc->comparePartFuncToFunc(*rightPartFunc) == SAME))
)
)
) {
if ( origLeftHashPartFunc->castToSkewedDataPartitioningFunction() ) {
// Require the inner to be rep-n so that both the non-skewed and
// skewed rows from the outer table can be distributed to the
// right partitions. Note that the non-skewed and the skewed can
// reach different partitions, hence the need for a rep-n part func.
result = new (CmpCommon::statementHeap() )
ReplicateNoBroadcastPartitioningFunction
(rightPartFunc->getCountOfPartitions());
} else {
result = mappedLeftPartFunc;
}
}
return result;
}
}
// Should not be reachable
return NULL;
}
// -----------------------------------------------------------------------
// Generate the requirements for the right child of a nested join.
// Returns the generated requirements if they were feasible, otherwise
// NULL is returned.
// sppForChild: Physical property of the left child
// rppForMe: Required properties of the nested join
// -----------------------------------------------------------------------
ReqdPhysicalProperty* NestedJoin::genRightChildReqs(
const PhysicalProperty* sppForChild, // IN
const ReqdPhysicalProperty* rppForMe, // IN
NABoolean avoidNSquareOpens)
{
ReqdPhysicalProperty* rppForChild = NULL;
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
PartitioningRequirement* partReqForChild;
RequirementGenerator rg (child(1),rppForMe);
// Remove any parent requirements for the sort key or arrangement,
// since most likely these only refer to the left child. But,
// if there is a portion that refers to the right child, then
// pass this along.
if ((rppForMe->getSortKey() AND
(rppForMe->getSortKey()->entries() > 0)) OR
(rppForMe->getArrangedCols() AND
(rppForMe->getArrangedCols()->entries() > 0)))
{
rg.removeSortKey();
rg.removeArrangement();
if (updateTableDesc() == NULL)
{
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// left child and only pass on the portion of the requirement
// that is for the right child.
ValueIdList reqdOrder0;
ValueIdSet reqdArr0;
splitSortReqsForRightChild(rppForMe, rg, reqdOrder0, reqdArr0);
} // end if NJ is not for a write operation
} // end if context requires order
// If the partitioning function of the left child was only
// one partition, then pass a requirement for that to the right,
// since no parallelism is possible. Otherwise, pass a
// replication function without broadcast to the right child.
//
// Note that for write operations, we are really doing a Type-1
// join and so we don't need to pass a replication function -
// we should just pass the left child part func as the part
// requirement to the right. But, this would allow a
// repartitioning plan to be considered. This should never be
// chosen, since repartitioning is not necessary, but it is
// too risky to allow the optimizer to even consider it.
//
PartitioningFunction *npf = NULL;
if (childPartFunc->isASinglePartitionPartitioningFunction()) {
partReqForChild = childPartFunc->makePartitioningRequirement();
} else
if (rppForMe->executeInDP2()) {
partReqForChild = childPartFunc->makePartitioningRequirement();
} else {
if ((npf=isOneToOneMatchHashPartitionJoinPossible(this, childPartFunc, getOriginalEquiJoinExpressions())) &&
npf->getCountOfPartitions() <= rppForMe->getCountOfPipelines()
) {
partReqForChild = npf->makePartitioningRequirement();
} else {
// Use the node map of the left childs partitioning function.
// (Note: this NestedJoin will have the same partFunc and Nodemap as the left child)
NABoolean useNodeMap = (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON);
PartitioningFunction *mappedChildPartFunc = childPartFunc;
if (updateTableDesc() && isTSJForWrite()) {
mappedChildPartFunc =
childPartFunc->copyAndRemap(*updateSelectValueIdMap(),
TRUE);
}
if ( !avoidNSquareOpens || updateTableDesc() )
partReqForChild = new (CmpCommon::statementHeap() )
RequireReplicateNoBroadcast(mappedChildPartFunc, useNodeMap);
else
return NULL;
}
}
// Add a push-down requirement based on left child's push
// down property.
if ( sppForChild->getPushDownProperty() )
rg.addPushDownRequirement(
sppForChild->getPushDownProperty()->makeRequirement());
// Remove any parent partitioning requirement, since only the left
// child must satisfy it.
rg.removeAllPartitioningRequirements();
// Now, add in the Join's partitioning requirement for the
// right child.
// If this nested join is for a write operation and it is in Dp2,
// then don't need to pass the source partitioning function as a
// requirement to the target because we have already
// guaranteed that the two are partitioned the same by passing
// the target partitioning function to the source as a
// requirement. Furthermore, if we try to pass it it will fail
// because the partitioning key columns will not be covered
// by the target table characteristic outputs because a target
// table produces no values, i.e. it has no char. outputs.
//
if (NOT( updateTableDesc() AND rppForMe->executeInDP2()))
{
rg.addPartRequirement(partReqForChild);
// we add a requirement that ESP-Exchanges are not allowed
// under right side of a nested join except when cb_106 is on or
// (cb_102 is on and rowsets are involved)
if ( (CmpCommon::getDefault(COMP_BOOL_106) == DF_OFF) &&
NOT((CmpCommon::getDefault(COMP_BOOL_102) == DF_ON) && isRowsetIterator()) )
rg.addNoEspExchangeRequirement();
}
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
} // end if the requirements were feasible
return rppForChild;
} // NestedJoin::genRightChildReqs()
// -----------------------------------------------------------------------
// Generate an input physical property object to contain the sort
// order of the left child and related group attributes and
// partitioning functions.
// -----------------------------------------------------------------------
InputPhysicalProperty* NestedJoin::generateIpp(
const PhysicalProperty* sppForChild,
NABoolean isPlan0)
{
InputPhysicalProperty* ipp = NULL;
// ----------------------------------------------------------------
// Get the sort order of my left child and pass this information to
// the context for optimizing my right child. (Costing the inner
// table access has a dependency on the order from the outer table).
// If this is a write op, use the map to map the left child sort
// key values to their right child equivalents. We can't do this
// for read because there is no map for read. So, for read, we will
// have to do some complicated processing in FileScanOptimizer to
// determine if the orders are equivalent.
// ----------------------------------------------------------------
ValueIdList mappedLeftChildOrder(sppForChild->getSortKey());
ValueIdSet mappedCharOutputs(
child(0).getGroupAttr()->getCharacteristicOutputs());
PartitioningFunction* mappedChildPartFunc =
sppForChild->getPartitioningFunction();
PartitioningFunction* mappedDp2SortOrderPartFunc =
sppForChild->getDp2SortOrderPartFunc();
if (updateTableDesc() != NULL AND
NOT mappedLeftChildOrder.isEmpty())
{
// The control comes here for an Update operation
// map the char outputs from left to right
mappedCharOutputs.clear();
// map the sort key from left to right
mappedLeftChildOrder.clear();
NABoolean useListInsteadOfSet = FALSE;
ValueIdSet childCharOutput = child(0).getGroupAttr()->getCharacteristicOutputs();
ValueIdList childSortKey = sppForChild->getSortKey();
// We go through the new logic, only if there are some columns in the bottom list
// which are duplicated in the characteristics output of the child. An example of this
// is: Insert * into t1 from (select a, b, a from t2); Here column 'a' of t2
// is being matched to two columns of t1. Characteristics output of t2 would be
// only 'a' and 'b'. But we want to have two occurences of 'a' in order to match to
// the three columns of t1. Hence we work with the list, instead of columns.
// For all other cases we continue to work with the set.
// Sol: 10-040416-5166.
// First thing we do is to eliminate if this nestedJoin is due to create index
// for this we check if the right child of the nestedJoin is an index_table
// or a normal_table
CascadesGroup* group1 = (*CURRSTMT_OPTGLOBALS->memo)[child(1).getGroupId()];
GenericUpdate* rightChild = (GenericUpdate *) group1->getFirstLogExpr();
if (rightChild->getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE )
{
// In this case, we have columns from Scan duplicated
// Hence the bottom values in updateSelectValueIdMap have some columns
// appearing twice. When writing the top values in the updateSelectValueIdMap
// if we use characteristicsOutput then these duplicate values are removed.
// Hence use the bottom values that are covered by the child to map the
// top values of the updateSelectValueIdMap. Use list object to allow duplicates.
ValueIdList bottomValues = updateSelectValueIdMap()->getBottomValues();
// We go through the new logic, only if there are some columns in the bottom list
// which are duplicated in the characteristics output of the child. An example of this
// is: Insert * into t1 from (select a, b, a from t2); Here column 'a' of t2
// is being matched to two columns of t1. Characteristics output of t2 would be
// only 'a' and 'b'. But we want to have two occurences of 'a' in order to match to
// the three columns of t1. Hence we work with the list, instead of columns.
// For all other cases we continue to work with the set.
ValueIdSet bottomValuesSet(bottomValues);
if (bottomValuesSet.entries() != bottomValues.entries())
{
ValueIdList mappedCharOPs;
// from here get all the bottom values that appear in the characteristic
// output of the child.
ValueIdList bottomOutputValues = bottomValues;
bottomOutputValues.findCommonElements(childCharOutput );
bottomValuesSet = bottomOutputValues;
// see if the characteristics output contained some expressions. In that case,
// cannot use bottom values instead of outputs.
if (bottomValuesSet == childCharOutput)
{
// so we saw that Characteristics Outputs were duplicated.
// Check if the bottom values have key columns duplicated.
// If not, then we cannot use bottom lists
// find common elements between bottom values and the sort key of the Child
// we shall use bottomValues only if there were some columns of the
// sort key duplicated in the bottom values, and that the duplicate columns
// have not been handled in the sortkey. I have found cases where the problem of duplicate
// columns has been handled in various ways: 1. By assigning different valueId to the
// same column,2. appending _XX to the column name (for populate indexes),3. or retaining the
// duplicate columns in the key. Since, I do not understand a lot of cases that can arise
// and at this phase of release we do not have time to clean up everything, I will go with
// the new logic only if there are duplicate columns in the Select list, but they
// have not been handled till now. Sol: 10-040527-6486
ValueIdSet keySet(childSortKey);
// if there are no duplicates in childSortKey, then only proceed. Else if the
// duplicate columns exist in the sortKey, then use those.
if (keySet.entries() == childSortKey.entries())
{
bottomValues = updateSelectValueIdMap()->getBottomValues();
ValueIdList keyFromBottomList = bottomValues;
keyFromBottomList.findCommonElements(childSortKey);
bottomValuesSet = keyFromBottomList;
if (
// if there are duplicate columns in the select list
(keyFromBottomList.entries() != bottomValuesSet.entries()) &&
// and these duplicate columns have single entry in the childSortKey
// remember we are here only if there are no duplicates in the
// sortKey
(keyFromBottomList.entries() > childSortKey.entries() )
)
{
useListInsteadOfSet = TRUE;
updateSelectValueIdMap()->rewriteValueIdListUpWithIndex(
mappedCharOPs,
bottomOutputValues );
mappedCharOutputs = mappedCharOPs;
updateSelectValueIdMap()->rewriteValueIdListUpWithIndex(
mappedLeftChildOrder,
keyFromBottomList);
// If key of the child contained some extra or duplicate columns, then there
// could be some redundant columns of the parent picked up.
// from whatever topValues you got matching to the sortKey of the child
// select the one which is also the orderKey of the target.
ValueIdList targetSortKey = updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues();
ValueIdList mappedLeftChildTmp = mappedLeftChildOrder;
mappedLeftChildOrder =
mappedLeftChildTmp.findCommonElementsFromList(targetSortKey);
}
}
}
}
}
if (!useListInsteadOfSet)
{
// No duplicate entries, hence use the old logic for the set
// Update top values with the characteristics outputs of the child's
// characteristic outputs
updateSelectValueIdMap()->rewriteValueIdSetUp(
mappedCharOutputs,
childCharOutput);
// map the sort key from left to right
updateSelectValueIdMap()->rewriteValueIdListUp(
mappedLeftChildOrder,
sppForChild->getSortKey());
}
// map the partitioning function from the left to the right
mappedChildPartFunc =
mappedChildPartFunc->copyAndRemap(*updateSelectValueIdMap(),
TRUE);
if (mappedDp2SortOrderPartFunc != NULL)
{
// map the Dp2 sort order part func from the left to the right
mappedDp2SortOrderPartFunc =
mappedDp2SortOrderPartFunc->copyAndRemap(
*updateSelectValueIdMap(),
TRUE);
}
} // end if this is a base table write op
if (NOT mappedLeftChildOrder.isEmpty() &&
NOT isPlan0)
{
ValueIdList * leftChildOrder =
new(CmpCommon::statementHeap())
ValueIdList(mappedLeftChildOrder);
ipp = new(CmpCommon::statementHeap())
InputPhysicalProperty(
mappedCharOutputs,
leftChildOrder,
mappedChildPartFunc,
mappedDp2SortOrderPartFunc,
FALSE,
sppForChild->getexplodedOcbJoinProperty());
}
else
{
if (NOT isPlan0)
return NULL;
ipp = new(CmpCommon::statementHeap())
InputPhysicalProperty(TRUE,mappedChildPartFunc,sppForChild->getexplodedOcbJoinProperty());
}
return ipp;
} // NestedJoin::generateIpp()
PartitioningFunction *
NestedJoin::getClusteringIndexPartFuncForRightChild() const
{
GroupAttributes *ga = child(1).getGroupAttr();
const SET(IndexDesc *) &availIndexes = ga->getAvailableBtreeIndexes();
Int32 x = availIndexes.entries();
for (CollIndex i = 0; i < availIndexes.entries(); i++) {
IndexDesc *iDesc = availIndexes[i];
if (iDesc->isClusteringIndex()) {
return iDesc->getPartitioningFunction();
}
}
return NULL;
}
NABoolean
NestedJoin::checkCompleteSortOrder(const PhysicalProperty* sppForChild0)
{
// Map the target table sort key to the source. We could map
// from the source to the target and do the comparison that
// way. But any columns that are covered by constants will
// only be covered on the source side, not the target side.
// So we can only correctly identify target key columns that are
// covered by constants by mapping them to the source side first.
ValueIdList mappedTargetTableOrder;
updateSelectValueIdMap()->rewriteValueIdListDown(
updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(),
mappedTargetTableOrder);
// Remove from the mapped target table order any columns that are
// equal to constants or input values.
mappedTargetTableOrder.removeCoveredExprs(
getGroupAttr()->getCharacteristicInputs());
if (mappedTargetTableOrder.isEmpty())
return FALSE;
// The outer table sort order will not help if #of sort key columns from
// the child0 is less than #of the clustiner key columns of the target table.
CollIndex n = sppForChild0->getSortKey().entries();
if ( n < mappedTargetTableOrder.entries() )
return FALSE;
if ( n > mappedTargetTableOrder.entries() )
n = mappedTargetTableOrder.entries();
for ( CollIndex i=0; i<n; i++ ) {
ValueId targetKeyCol = mappedTargetTableOrder[i];
ValueId outerOrderCol = sppForChild0->getSortKey()[i];
if ( outerOrderCol.getItemExpr()->sameOrder(targetKeyCol.getItemExpr())
!= SAME_ORDER )
return FALSE;
}
// If there is a dp2SortOrderPartFunc, map it and see if it
// is exactly the same as the target partitioning function.
// If it is not, then we cannot use the outer table sort order.
PartitioningFunction* mappedDp2SortOrderPartFunc =
sppForChild0->getDp2SortOrderPartFunc();
if (mappedDp2SortOrderPartFunc != NULL)
{
mappedDp2SortOrderPartFunc =
mappedDp2SortOrderPartFunc->copyAndRemap(
*updateSelectValueIdMap(),
TRUE);
PartitioningFunction* targetTablePartFunc =
updateTableDesc()->getClusteringIndex()-> getPartitioningFunction();
// If the target table is not partitioned, it cannot
// match the dp2SortOrderPartFunc, since we will never
// synthesize a sort order type of DP2 for an unpart table.
if ((targetTablePartFunc == NULL) OR
mappedDp2SortOrderPartFunc->
comparePartFuncToFunc(*targetTablePartFunc) != SAME)
{
return FALSE;
}
}
return TRUE;
}
//<pb>
Context* NestedJoin::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// For a nested join we try up to 5 child plans, named plan 0 ... plan 4
// (10 contexts, 5 for each child). Here is a short summary of those
// plans:
//
// Plan 0: Requires no sort order from the left (outer) child.
// For read query: Leave left child the freedom to pick a
// partitioning and force right child to match it.
// For write query: Force left child to match partitioning
// of the target table (or a grouping of it)
//
// Plan 1: Similar to Plan0, except that this time we force the left child
// to match the order of the right child.
// Plan1 is skipped if OCB is being considered.
// For read query. try OCR, if applicable.
//
// Plan 2: Similar to plan1, but this time we request the left child to match
// the sort order naturally, without using a sort.
// Plan2 is skipped if preferred probing order is off (read) or
// UPD_ORDERED is off (write).
// In some cases we do plan 3 instead of plan 2.
//
// Plan 3: Similar to plan1, but this time we request the left child to match
// the sort order by using a sort operator
// Plan3 is skipped if preferred probing order is off (read) or
// UPD_ORDERED is off (write).
//
// Plan 4: Generate an OCB plan, if applicable. Also require the left to match
// the order of the right child. Unlike plans 0-3, we try the right child
// first in plan 4. We ask the left child to broadcast data via ESPs.
// Previous comment - probably no longer applicable: The old second plan
// is one where no order is required from the left child,
// but ordering information is passed to the right child if the
// left child synthesizes a sort key.
// Details for plan 2 and plan3:
// These two plans are only attempted when Preferred Probing Order
// (PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN) is ON for reads or UPD_ORDERED
// is ON for writes. For reads, plan 2 and plan 3 will choose an index
// from the right child group attributes information that satisfies any
// parent ordering or partitioning requirements and has the fewest number
// of uncovered key columns. The sort key columns of this index
// that are equijoin columns are then passed to the left child as
// a order requirement. For writes, the left child is required to
// be ordered on the target table primary key columns.
// For the third plan, the order requirement must be satisfied
// without sorting. If this does not produce a plan that satisfies
// the requirement without using synchronous access, then a fourth
// plan is tried that demands the left child satisfy the order
// requirement via sorting.
//
// Details for plan 4 (OCB):
// Plan 4 replicates outer child of nested join. For this
// plan we start from inner child and check if it can satisfy partitioning
// requirement for join itself. If it does then we create a plan for inner
// child first then create context for outer child to replicate with
// broadcast if certain conditions are met (see below). This is to prevent
// uneven load of ESPs running the join in case when data is highly
// skewed.
//
// Summary of abbreviations used
// -----------------------------
// OCB: Outer Child Broadcast, broadcast left child to all ESPs,
// each ESP only joins with local partitions on the right, this
// is done when there are very few outer rows, to distribute
// the work more evenly across ESPs. Can't be used for semi-
// or left outer joins.
// OCR: Outer Child Repartitioning (which includes a natural partitioning
// of the left child). Force the left child to be partitioned the
// same (or a grouping) of the right child's clustering index.
// This avoids a large number of opens in the ESPs, which would
// otherwise all open every inner partitions (n^2 opens).
// - Can't be used for non-equi joins.
// - Either OCR or OCB is considered, but not both.
// N2J: N square Join (or "vanilla" Nested plan 2 Join), using neither
// OCB nor OCR. In some cases, N2J competes with OCR or OCB.
// - Can be used in all cases (but not always considered)
//
// PPO: Preferred Probing Order, ask left child to match the order
// of the right one, to reduce random accesses to the right table.
//
// CQDs used to control OCB
// ------------------------
//
// COMP_BOOL_130, if ON - consider OCB join plan, by default ON
// COMP_BOOL_131, if ON - let OCB compete with plan 0,1 by cost, default OFF
// COMP_BOOL_132, if ON - compete with plan 2,3 by cost, default OFF
// COMP_BOOL_133, if ON - force OCB join, default OFF
// COMP_BOOL_134, if ON - force check for inner child #of partitions,
// default ON
// COMP_BOOL_135, if ON - force OCB for large inner child, not only
// start join into fact table, default ON
// COMP_INT_30 - defines a threshold for cardinality of outer child not
// to get broadcasted, default 5, threshold is
// #of NJ ESPs time COMP_INT_30 (for example 128*2 = 256)
// COMP_INT_31 - number of base table (of outer child) threshold,
// default 5
// COMP_INT_32 - minimum threshold for size of inner child per probe
// to prevent OCB, default 100 bytes
//
// Other relevant CQDs
// -------------------
//
// UPD_ORDERED - OFF - disables plans 3 and 4 for writes,
// unless required for Halloween
// protection or side tree insert
// SYSTEM - same as OFF
// ON - enables plans 3 and 4 for writes
// PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN
// - OFF - disables plans 3 and 4 for reads
// SYSTEM - same as OFF
// ON - enables plans 3 and 4 for reads
// NESTED_JOINS_NO_NSQUARE_OPENS - when ON, tries to suppress N2J plans
// that use an excessive number of opens.
// Also overrides the following CQD and
// enables OCR without a threshold.
// NESTED_JOINS_OCR_MAXOPEN_THRESHOLD - enables OCR if (max degree of ||ism *
// #partitions of inner table) exceed
// the value of this CQD
// NESTED_JOINS_ANTISKEW_ESPS - if >0, we consider a skew requirement
// h2-rd for the left child in plan 1 (OCR)
NestedJoinPlanWorkSpace *njPws = (NestedJoinPlanWorkSpace *) pws;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
NADefaults &defs = ActiveSchemaDB()->getDefaults();
// ---------------------------------------------------------------------
// Code that is executed only the first time for each PlanWorkSpace
// ---------------------------------------------------------------------
// If this is the first time we invoke this method for a CreatePlanTask,
// initialize some values and store them in our custom plan work space
if (njPws->isEmpty())
{
Lng32 childPlansToConsider = 4; // means plan 0 and plan 1 for this join
NABoolean OCBJoinIsConsidered = FALSE;
NABoolean OCRJoinIsConsidered = FALSE;
if (((CURRSTMT_OPTDEFAULTS->preferredProbingOrderForNJ() OR
((getSource() == Join::STAR_FACT) AND // star fact join is always ppo
(CmpCommon::getDefault(COMP_BOOL_72) != DF_ON))) AND
(getGroupAttr()->getNumBaseTables() > 1) AND // to avoid Index joins
(updateTableDesc() == NULL)) // read
OR
((CURRSTMT_OPTDEFAULTS->orderedWritesForNJ() OR
avoidHalloweenR2() OR // Always want to sort if avoidHalloweenR2
getHalloweenForceSort() == FORCED OR
CURRSTMT_OPTDEFAULTS->isSideTreeInsert() == TRUE
) // Always sort if halloween forces
AND
(updateTableDesc() != NULL))) // write
childPlansToConsider = 8; // try plans 2 and 3 for this join
CostScalar child0card=
child(0).getGroupAttr()->getResultCardinalityForEmptyInput();
const SET(IndexDesc *) &availIndexes1=
child(1).getGroupAttr()->getAvailableBtreeIndexes();
Cardinality minrows, maxrows;
child(0).getGroupAttr()->hasCardConstraint(minrows, maxrows);
CostScalar maxOCBRowsLimit = defs.getAsLong(COMP_INT_30) *
myContext->getReqdPhysicalProperty()->getCountOfPipelines();
// -----------------------------------------------------------------
// determine whether OCB is feasible
// -----------------------------------------------------------------
if ( ( (CmpCommon::getDefault(COMP_BOOL_133) == DF_ON) OR
( (CmpCommon::getDefault(COMP_BOOL_130) == DF_ON) AND
// this condition will prevent OCB for DDL, internal refresh
// and embedded update/delete statement. It could be ignored
// by setting COMP_BOOL_9 to 'ON'. The code for checking these
// conditions need to be refactored because it affects already
// several features: pruning, maximum parallelism, parallel
// insert/select and OCB. PruningIsFeasible is just a flag
// set to FALSE if any of these conditions holds.
CURRSTMT_OPTGLOBALS->pruningIsFeasible AND
// cannot use OCB for left-, semi-joins because it can produce
// the wrong result - multiple non matching rows.
( NOT ( isLeftJoin() OR isSemiJoin() OR isAntiSemiJoin() ) )
AND
(
( CostScalar(maxrows) <= maxOCBRowsLimit ) //heuristic0
OR
( // heuristic1
// OCB for top join of star join into fact table
( (getSource() == Join::STAR_FACT ) OR
// OCB for small outer child and big inner child
( (CmpCommon::getDefault(COMP_BOOL_135) == DF_ON) AND
// if this is not a star join we don't want outer child
// to have more than 2 base table because cardinality
// estimation is becomung less accurate
( child(0).getGroupAttr()->getNumBaseTables() <=
defs.getAsLong(COMP_INT_31)
) AND
// the size of inner child per probe is greater
// than a threshold
( getGroupAttr()->getResultCardinalityForEmptyInput()/
child0card.minCsOne() *
( child(1).getGroupAttr()->getRecordLength() +
// this is, in fact, row header size in a message
// CQD name is a misnomer.
defs.getAsLong( DP2_MESSAGE_HEADER_SIZE_BYTES )
)
) > defs.getAsLong(COMP_INT_32)
)
) AND
( child0card < maxOCBRowsLimit AND
child(0).getGroupAttr()->getResultMaxCardinalityForEmptyInput() / 10 <
maxOCBRowsLimit
)
)
OR
( // heuristic2 - forced OCB
// When join predicates do not cover inner table partition key,
// We do OCB to avoid O(n^2) connections (from Joining ESPs to
// DP2s).
JoinPredicateCoversChild1PartKey() == FALSE AND
availIndexes1.entries() >= 1 AND
(availIndexes1[0]->getPrimaryTableDesc()->getIndexes()).entries() == 1
// This line does not work as the indexes are masked out here
// (eliminated). The index shows up in the plan (e.g., in
// optdml03 Q17).
//(child(1).getGroupAttr()->getAvailableBtreeIndexes()).entries()
// == 1
)
)
// for the first prototype this will check partitioning and coverage
// of prefix of inner child clustering key by join columns, local
// predicates and and constant expressions
)
) AND OCBJoinIsFeasible(myContext)
)
{
OCBJoinIsConsidered = TRUE;
childPlansToConsider = 10;
}
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
NABoolean useParallelism =
okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced);
// -----------------------------------------------------------------
// determine whether OCR is feasible
// -----------------------------------------------------------------
if ( //CmpCommon::getDefault(NESTED_JOINS_OCR) == DF_ON AND
OCBJoinIsConsidered == FALSE AND
updateTableDesc() == NULL /* is read case? */ AND
OCRJoinIsFeasible(myContext) == TRUE AND
useParallelism )
{
OCRJoinIsConsidered = TRUE;
}
// Decide if it is a fast trafodion load query
NABoolean isFastLoadIntoTrafodion = FALSE;
OperatorTypeEnum childOpType = child(1).getLogExpr()->getOperatorType();
if ( childOpType == REL_LEAF_INSERT ) {
RelExpr* c1 = child(1).getLogExpr();
Insert* ins = (Insert*)c1;
isFastLoadIntoTrafodion = ins->getIsTrafLoadPrep();
}
// store results in njPws for later reuse
njPws->setParallelismItems(useParallelism,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced);
njPws->setChildPlansToConsider(childPlansToConsider);
njPws->setOCBJoinIsConsidered(OCBJoinIsConsidered);
njPws->setOCRJoinIsConsidered(OCRJoinIsConsidered);
njPws->setFastLoadIntoTrafodion(isFastLoadIntoTrafodion);
} // end njPws->isEmpty()
// ---------------------------------------------------------------------
// If enough Contexts are generated, return NULL
// to signal completion.
// ---------------------------------------------------------------------
if (njPws->getCountOfChildContexts() == njPws->getChildPlansToConsider())
return NULL;
Context* result = NULL;
Lng32 planNumber = 0;
Context* childContext = NULL;
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
const ReqdPhysicalProperty* rppForChild = NULL;
// Initialize the ipp to what the parent specifies. If the left child
// does not synthesize a sort key, then this is what we will pass to
// the right child.
const InputPhysicalProperty* ippForMyChild =
myContext->getInputPhysicalProperty();
NABoolean noN2JForRead = ((CmpCommon::getDefault(NESTED_JOINS_NO_NSQUARE_OPENS) == DF_ON) &&
(updateTableDesc() == NULL));
NABoolean noEquiN2J =
(njPws->getOCRJoinIsConsidered() || njPws->getOCBJoinIsConsidered()) &&
(!( isLeftJoin() OR isSemiJoin() OR isAntiSemiJoin() )) &&
(rppForMe->getMustMatch() == NULL) &&
getOriginalEquiJoinExpressions().getTopValues().entries() > 0 &&
noN2JForRead
;
NABoolean trySortPlanInPlan3 =
(CmpCommon::getDefault(NESTED_JOINS_PLAN3_TRY_SORT) == DF_ON);
// if this IUD, but UPD_ORDER is ON then don't try NJ plan 0
NABoolean shutDownPlan0 = FALSE;
if ( (updateTableDesc() != NULL) &&
(rppForMe->getMustMatch() == NULL) &&
(!rppForMe->executeInDP2()) &&
(!getReqdOrder().entries() || njPws->getFastLoadIntoTrafodion()) &&
CURRSTMT_OPTDEFAULTS->orderedWritesForNJ()
)
shutDownPlan0 = TRUE;
// ---------------------------------------------------------------------
// The creation of the next Context for a child depends upon the
// the number of child Contexts that have been created in earlier
// invocations of this method.
// ---------------------------------------------------------------------
while ((pws->getCountOfChildContexts() < njPws->getChildPlansToConsider()) AND
(rppForChild == NULL))
{
// If we stay in this loop because we didn't generate some
// child contexts, we need to reset the child plan count when
// it gets to be as large as the arity, because otherwise we
// would advance the child plan count past the arity.
if (pws->getPlanChildCount() >= getArity())
pws->resetPlanChildCount();
planNumber = pws->getCountOfChildContexts() / 2;
switch (pws->getCountOfChildContexts())
{
case 0:
childIndex = 0;
// -------------------------------------------------------------------
// Case 0: Plan 0, child 0
// +++++++++++++++++++++++
// Create the 1st Context for left child. Disable this context if
// OCB is not feasible or feasible but should compete with this
// plan, and OCR is not feasible. When OCB is feasible, OCR is
// automatically disabled.
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
( NOT njPws->getOCBJoinIsConsidered() OR rppForMe->getMustMatch() != NULL OR
rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT OR
(CmpCommon::getDefault(COMP_BOOL_131) == DF_ON)) AND
(
CmpCommon::getDefault(NESTED_JOINS_PLAN0) == DF_ON AND
njPws->getOCRJoinIsConsidered() == FALSE
)
)
{
// If using sort to prevent Halloween, then avoid this plan
//
if(! (avoidHalloweenR2() || getHalloweenForceSort() == FORCED) )
{
RequirementGenerator rg(child(0), rppForMe);
// Do not check the null-ness of updateTableDesc() as NestedJoinFlow
// is a subclass of this NestedJoin class, and it can flow tuples
// to an insert node.
if (rppForMe->executeInDP2())
{
if (rppForMe->getPushDownRequirement() == NULL) {
// Add a co-location requirement (type-1 join in DP2) if the parent
// does not provide one, regardless we are in CS or not.
rg.addPushDownRequirement(
new (CmpCommon::statementHeap())
PushDownColocationRequirement()
);
}
}
if (updateTableDesc() != NULL)
{
// -----------------------------------------------------------------
// TSJ on top of an insert/update/delete statement.
// Make the left side match the partitioning scheme of the
// updated table.
// -----------------------------------------------------------------
const PartitioningFunction *physicalPartFunc =
updateTableDesc()->getClusteringIndex()->getPartitioningFunction();
PartitioningRequirement* logicalPartReq = NULL;
// Generate the partitioning requirements.
// If the requirements could not be generated because the user
// is attempting to force something that is not possible,
// the method returns FALSE. Otherwise, it returns TRUE.
if (genLeftChildPartReq(myContext,
pws,
physicalPartFunc,
logicalPartReq))
{
// Push down IUD queries involving MVs.
//
// If the execution location is DP2, and the parent partition req
// is verified to be the same as the part func of the inner, we
// remove the initial requirement from rg. Without this step,
// this initial partition requirement will be determined not
// compatible (by rg) with the mapped part func for
// the left child, even though both are connected properly by
// the updateSelectValueIdMap()!
if ( physicalPartFunc != NULL AND
rppForMe->executeInDP2() AND
partReqForMe AND
partReqForMe->partReqAndFuncCompatible(physicalPartFunc)
)
rg.removeAllPartitioningRequirements();
// Map the partitioning requirement partitioning key columns
// from the right child to the left child.
if (logicalPartReq)
{
logicalPartReq =
logicalPartReq->copyAndRemap(*updateSelectValueIdMap(),FALSE);
rg.addPartRequirement(logicalPartReq);
}
}
else
// Tried to force something that was not possible. Give up now.
return NULL;
// If there is a required order stored in the join node, then require
// the select data to be sorted in this order.
// There will only be a required order if the write operation
// was an insert and the user specified an ORDER BY clause.
// Note that we don't need to map the req. order because it was
// based on the original form of the insert, which was an insert
// with the select as it's child, so the valueid's in the req. order
// are already in terms of the select valueid's.
if (getReqdOrder().entries() > 0)
{
ValueIdSet reqdOrder = getReqdOrder();
reqdOrder.removeCoveredExprs(getGroupAttr()->getCharacteristicInputs());
rg.addSortKey(reqdOrder);
}
} // this is for an write operation
else // read
{
ValueIdList reqdOrder1;
ValueIdSet reqdArr1;
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child. Pass back the requirements for
// the right child in case they are needed.
splitSortReqsForLeftChild(rppForMe, rg, reqdOrder1, reqdArr1);
// this is for the patch below related to MAXIMUM parallelism
RelExpr *child0LogExpr = child(0).getLogExpr();
if (njPws->getUseParallelism()
// This is to prevent parallel plan for inserting a single tuple.
// Currently, when a single tuple is inserted in the table with
// indexes, parallel plan caused by MAXIMUM parallelism option
// results in a message that 0 rows inserted although tuple did
// get inserted. The next check is to prevent such a plan which
// does not make much sense anyway. March 2006.
AND NOT
( child0LogExpr AND
child0LogExpr->getOperatorType() == REL_LEAF_INSERT AND
((Insert *)child0LogExpr)->insertATuple()
)
)
{
// If we want to try ESP parallelism, go for it. Note we may still
// not get it if the parent's requirement does not allow it.
// If we don't go for ESP parallelism, then we will specify either
// the parent's required number of partitions, or we will specify
// that we don't care how many partitions we get - i.e.
// ANY_NUMBER_OF_PARTITIONS.
njPws->transferParallelismReqsToRG(rg);
} // end if ok to try parallelism
} // end if NJ is for a read operation
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
}
// this is an update case. I leave this now but we might need to
// review it later because we might want to try another plan
} // endif (pws->getCountOfChildContexts() == 0)
break;
case 1:
childIndex = 1;
// -------------------------------------------------------------------
// Case 1: Plan 0, child 1
// +++++++++++++++++++++++
// Create the 1st Context for right child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe)
AND isTSJForSideTreeInsert() == FALSE
AND enableTransformToSTI() == FALSE
AND !shutDownPlan0
// AND NOT CURRSTMT_OPTDEFAULTS->orderedWritesForNJ()
)
// Try this plan regardless of NESTED_JOINS_PLAN0 CQD setting
// if OCR is not feasible
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,0);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
rppForChild = genRightChildReqs(sppForChild,rppForMe, noEquiN2J);
if (isRowsetIterator()) {
// This condition ensures that this nested join
// is the parent of an unpack node used for rowsets.
// The ipp created to pass information about a rowset to the right child
// is unique in that all the nj fields of the ipp are NULL and only
// the assumeSortedForCosting_ field is TRUE.
ippForMyChild = new(CmpCommon::statementHeap())
InputPhysicalProperty( NULL, NULL,
NULL, NULL, TRUE);
}
else if (updateTableDesc() == NULL &&
CmpCommon::getDefault(COMP_BOOL_60) == DF_ON)
{
// we would like to send left child's partitioning function
// for plan1. This is needed so that right child can accurately
// analyze grouping relationship from the sibling
ippForMyChild = generateIpp(sppForChild, TRUE);
}
else
// If we produced an rpp for the child and there is a CQ Shape
// command for this operator, then check if they are forcing PLAN1.
if ((rppForChild != NULL) AND
(rppForMe->getMustMatch() != NULL))
{
JoinForceWildCard::forcedPlanEnum forcePlanToken =
getParallelJoinPlanToEnforce(rppForMe);
// If the user is forcing plan #1, the plan where we pass the
// left child order to the right child, then pass the order
// now. This will be the only nj plan we will try.
// Simulate PLAN1's right context here, only if OCR is not feasible.
// When feasible, OCR will reuse PLAN0's left context and as such
// it will be incorrect to allow PLAN0's right context to continue.
// OCR can only be forces with PLAN1 or TYPE1 type specifier in CQS.
// Plan0 is strictly type-2.
if (forcePlanToken == JoinForceWildCard::FORCED_PLAN1 AND
njPws->getOCRJoinIsConsidered() == FALSE)
{
// ----------------------------------------------------------------
// Get the sort order of my left child and pass this information to
// the context for optimizing my right child. (Costing the inner
// table access has a dependency on the order from the outer table).
// ----------------------------------------------------------------
ippForMyChild = generateIpp(sppForChild);
if (ippForMyChild == NULL)
ippForMyChild = myContext->getInputPhysicalProperty();
} // end if the user is forcing the plan where we pass the
// left child order to the right child
} // end if we produced an rpp for the child and
// if there is a CQ Shape command to check
} // end if child0 had an optimal solution
} // endif (pws->getCountOfChildContexts() == 1)
break;
case 2:
childIndex = 0;
// -------------------------------------------------------------------
// Case 2: Plan 1, child 0
// +++++++++++++++++++++++
// Create the 2nd Context for left child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
( NOT njPws->getOCBJoinIsConsidered() OR rppForMe->getMustMatch() != NULL OR
rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT OR
(CmpCommon::getDefault(COMP_BOOL_131) == DF_ON)
) AND
( NOT derivedFromRoutineJoin()) AND
isTSJForSideTreeInsert() == FALSE AND
enableTransformToSTI() == FALSE
)
{
NABoolean usableSortOrder = FALSE;
NABoolean OCRJoinIsConsideredInCase2 = njPws->getOCRJoinIsConsidered();
// If using sort to avoid Halloween, then avoid this plan
//
if(!(avoidHalloweenR2() || getHalloweenForceSort() == FORCED))
{
// Get the child context that was created for the left child of
// the first nested join plan.
childContext = pws->getChildContext(0,0);
// -----------------------------------------------------------------
// Make sure a plan was produced by the previous context for the
// left child.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild0Plan0 =
childContext->getPhysicalPropertyForSolution();
CMPASSERT(sppForChild0Plan0 != NULL);
// See if the sort order for the left child of the previous plan
// attempted (plan #0) is not empty. If it is not, see if it
// can help keep the I/Os down when accessing the inner table.
if (NOT sppForChild0Plan0->getSortKey().isEmpty())
{
if (updateTableDesc() != NULL) // WRITE
{
usableSortOrder = checkCompleteSortOrder(sppForChild0Plan0);
} // end if write
else // READ
if ( OCRJoinIsConsideredInCase2 == FALSE )
{
// Allocate dummy requirement generator object so we can
// emulate the right child requirements.
RequirementGenerator rg1(child(1),rppForMe);
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child. Pass back the requirements for
// the right child. For our purposes here we
// really only want the requirements for the right child.
ValueIdList reqdOrder1;
ValueIdSet reqdArr1;
splitSortReqsForLeftChild(rppForMe, rg1, reqdOrder1, reqdArr1);
// Get rid of all the left child requirements since we are
// pretending we are processing the right child.
rg1.removeSortKey();
rg1.removeArrangement();
rg1.removeAllPartitioningRequirements();
// Add in the the left child sort key as a requirement for
// the right child. This will force us to only pick a
// index whose sort order is compatible with the left child
// sort key. If the left child synthesized a
// dp2SortOrderPartFunc, then we need to make sure we
// pick an index which can satisfy it. So, we will also
// specify the left child sort order type and the
// dp2SortOrderPartFunc as requirements, too. The left
// child synthesized sort order type must be "DP2" or
// "NO" (if still in DP2), and so this will force
// the recommendedOrderForNJProbing method to only
// consider indices that satisfy the dp2SortOrderPartReq.
if (sppForChild0Plan0->getDp2SortOrderPartFunc() == NULL)
rg1.addSortKey(sppForChild0Plan0->getSortKey());
else
rg1.addSortKey(sppForChild0Plan0->getSortKey(),
sppForChild0Plan0->getSortOrderType(),
sppForChild0Plan0->
getDp2SortOrderPartFunc()->
makePartitioningRequirement());
// Add in the left child partitioning function as a
// requirement so we will be forced to pick an index
// whose partitioning function is compatible with it.
rg1.addPartRequirement(sppForChild0Plan0->
getPartitioningFunction()->
makePartitioningRequirement());
// This is required to give a chance for ordered NJ plan when the
// LC has more culstering keys than needed. This function removes
// extra keys if any.
if ( CmpCommon::getDefault(COMP_BOOL_188) == DF_OFF )
rg1.removeExtraSortKeys();
// Make sure the sort key and partitioning function from
// the left child are covered by the right child ga. This
// means the sort key and part key cols must all be equijoin
// columns.
if (rg1.checkFeasibility())
{
// produce a new rpp with what we have added
const ReqdPhysicalProperty* rppForChild1Plan1 =
rg1.produceRequirement();
// Now allocate a new rg to carry these requirements
// over to the recommend probing method.
RequirementGenerator rg2(child(1),rppForChild1Plan1);
// Find an index that is compatible with the left child
// sort order and partitioning and also is compatible
// with any right child sort requirements.
IndexDesc* ppoIDesc = NULL;
NABoolean partKeyColsAreMappable = TRUE;
ValueIdList preferredOrder=
child(1).getGroupAttr()->recommendedOrderForNJProbing(
child(0).getGroupAttr(),
ANY_NUMBER_OF_PARTITIONS,
rg2,
reqdOrder1,
reqdArr1,
ppoIDesc,
partKeyColsAreMappable);
// If we got back a non-empty sort key, then this
// means we found an index that is compatible with
// the left child sort order, etc.
if (NOT preferredOrder.isEmpty())
{
usableSortOrder = TRUE;
}
} // end if feasible
} // end allow plan2 for read (OCRJoinIsFeasible FALSE)
// If we found a right child index that can use the left
// child sort order, go ahead and generate this plan.
// Otherwise, we will skip this plan.
if (usableSortOrder)
{
// Produce the required physical properties.
// Just get them from those we created for the left child
// of plan#0 because we want to use the exact same
// requirements. The only difference for this plan is
// we are going to pass the sort key of the left child to
// the right child when we create the context for the
// right child. This means we should do no work to
// optimize the left child for this plan - we should
// reuse the same context we created for the first plan.
rppForChild = childContext->getReqdPhysicalProperty();
OCRJoinIsConsideredInCase2 = FALSE;
}
} // end if left child of previous plan had a sort key
} // end if child0 of plan0 had an optimal solution
} // end if not Halloween sort
// Try OCR here if we do not get an useable Sort Order from the
// optimal plan for the left child generated in plan1,
// or the sort order is cerated to protect against Halloween.
//
// Note the existence of an usable sort order implies the the
// right child of this plan will match the partition function of
// the left child, which is an OCR by itself.
//
// We do not try OCR with plan1 because we want the plan
// configuration where potential repartition on the right
// hand side to have a chance to exist.
//
// We do not alter PPO plans (plan3 and plan4) for OCR either
// because both are superset of OCR (i.e., push the part func and
// sort order from the right side to the left).
if ( OCRJoinIsConsideredInCase2 )
{
PartitioningFunction * innerTablePartFunc =
getClusteringIndexPartFuncForRightChild();
ValueIdMap map(getOriginalEquiJoinExpressions());
// remap the right part func to the left (up)
PartitioningFunction *rightPartFunc =
innerTablePartFunc -> copyAndRemap(map, TRUE);
Lng32 childNumPartsRequirement = njPws->getChildNumPartsRequirement();
// Scale down the number of partitions if necessary so that
// childNUmPartsrequirement evenly divides the # of partitions
// of the rightPartFunc. We can do so because OCR works with
// hash2 partitioned table only and hash2 has the nice
// property:
//
// If down-scaling factor is x (x>1), then a row flows from
// the outer table of partition p will land in one of the
// partitions in the inner table ranging from partition
// (p-1)x to partition p x. If rep-n partitioning function is
// choosen for the inner table, the # of partitions that each
// outer table partition will drive is x. Since open operations
// are on-demand in nature, we will not open all (xp) partitions
// for each outer table partition.
// Use the original inner table part func if down-scaling fails.
if ( rightPartFunc ->
scaleNumberOfPartitions(childNumPartsRequirement) == FALSE )
rightPartFunc = innerTablePartFunc;
Int32 antiskewSkewEsps =
(Int32)CmpCommon::getDefaultNumeric(NESTED_JOINS_ANTISKEW_ESPS);
if ( antiskewSkewEsps > 0 ) {
ValueIdSet joinPreds = getOriginalEquiJoinExpressions().getTopValues();
double threshold = defs.getAsDouble(SKEW_SENSITIVITY_THRESHOLD) / rppForMe->getCountOfPipelines();
SkewedValueList* skList = NULL;
// If the join is on a skewed set of columns from the left child,
// then generate the skew data partitioning requirement to deal
// with skew.
// We ignore whether the nested join probe cache is applicable here
// because we want to deal with both the ESP and DP2 skew, Probe
// cache can deal with DP2 skew, but not the ESP skew.
if ( childNodeContainSkew(0, joinPreds, threshold, &skList) )
{
Lng32 cop = rightPartFunc->getCountOfPartitions();
antiskewSkewEsps = MINOF(cop, antiskewSkewEsps);
rightPartFunc = new (CmpCommon::statementHeap())
SkewedDataPartitioningFunction(
rightPartFunc,
skewProperty(skewProperty::UNIFORM_DISTRIBUTE,
skList, antiskewSkewEsps)
);
}
}
// Produce the part requirement for the outer child from
// the part function of the inner child.
PartitioningRequirement* partReqForChild
= rightPartFunc->makePartitioningRequirement();
// Now, add in the partitioning requirement for the
// outer child.
RequirementGenerator rg(child(0), rppForMe);
rg.removeAllPartitioningRequirements();
rg.addPartRequirement(partReqForChild);
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end of if OCR
} // endif (pws->getCountOfChildContexts() == 2)
break;
case 3:
childIndex = 1;
// -------------------------------------------------------------------
// Case 3: Plan 1, child 1
// +++++++++++++++++++++++
// Create the 2nd Context for right child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) )
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,1);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
// can not shut-down this code path when OCR is feasible
rppForChild = genRightChildReqs(sppForChild,rppForMe,
!njPws->getOCRJoinIsConsidered() && noN2JForRead);
// ----------------------------------------------------------------
// Get the sort order of my left child and pass this information to
// the context for optimizing my right child. (Costing the inner
// table access has a dependency on the order from the outer table).
// If sort order of my left child is empty, then pass second argument
// (TRUE) to generateIpp() so that we pass left child partfunc to
// correctly compute probeCache cost adjustment factor
// ----------------------------------------------------------------
if (rppForChild != NULL) {
if (sppForChild->getSortKey().isEmpty())
{
// if it's an OCR plan and sort key is empty, then the ipp
// created to pass information to the right child is unique
// in that all the nj fields of the ipp are NULL and only
// assumeSortedForCosting_ field is TRUE.
// This is to let optimizer pick ocr plan vs serial Nj P3.
if ( njPws->getOCRJoinIsConsidered() )
ippForMyChild = new(CmpCommon::statementHeap())
InputPhysicalProperty( NULL, NULL,
NULL, NULL, TRUE);
else
ippForMyChild = generateIpp(sppForChild, TRUE);
}
else
ippForMyChild = generateIpp(sppForChild);
if ( ippForMyChild ) {
// Check to see if the dp2 partfunc of the outer child in
// ippForMyChild is compatible with that of the clustering index,
// If so, we will use ippForChild to help pick the clustering
// index for the inner side. Only when these two partition
// functions are not the same, then we set ippForChild to NULL,
// which will still make the OCR a candidate plan with a
// higher cost (than if the not NULL ippForChild is passed).
//
// Solu 10-090210-9101 (OCR can select a serial plan), where
// njDp2OuterOrderPartFunc=[F2.C, F2.B, F2.A, F2.D, F2.G, F2.F] and
// innerTablePartFunc = [F2.A, F2.B, F2.C, F2.D, F2.E, F2.F],
// and njDp2OuterOrderPartFunc and innerTablePartFunc are not
// the same. If ippForMyChild is not set to NULL, we will not
// get the OCR plan.
if ( njPws->getOCRJoinIsConsidered() ) {
const PartitioningFunction* njDp2OuterOrderPartFunc =
ippForMyChild->getNjDp2OuterOrderPartFunc();
PartitioningFunction * innerTablePartFunc =
getClusteringIndexPartFuncForRightChild();
if ((njDp2OuterOrderPartFunc != NULL) AND
(njDp2OuterOrderPartFunc->
comparePartFuncToFunc(*innerTablePartFunc) != SAME))
{
ippForMyChild = NULL;
}
}
} else
ippForMyChild = myContext->getInputPhysicalProperty();
} // end if we produced an rpp for the child
} // end if child0 had an optimal solution
} // endif (pws->getCountOfChildContexts() == 3)
break;
//********************************************************
// The part of the code between the ***** is only executed
// if preferred probing order is ON
case 4:
childIndex = 0;
// -------------------------------------------------------------------
// Case 4: Plan 2, child 0
// +++++++++++++++++++++++
// Create the 3rd Context for left child:
// -------------------------------------------------------------------
// TBD: consider collapsing plans 2 and 3
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
( NOT njPws->getOCBJoinIsConsidered() OR rppForMe->getMustMatch() != NULL OR
rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT OR
(CmpCommon::getDefault(COMP_BOOL_132) == DF_ON)
) AND
( NOT derivedFromRoutineJoin())
)
{
// -----------------------------------------------------------------
// Try a preferred probing order plan where we demand that the
// left child satisfy the order naturally.
// -----------------------------------------------------------------
RequirementGenerator rg(child(0), rppForMe);
if ( rppForMe->getPushDownRequirement() == NULL AND
rppForMe->executeInDP2()
)
{
// Add a co-location requirement (type-1 join in DP2) if the parent
// does not provide one, regardless we are in CS or not.
rg.addPushDownRequirement(
new (CmpCommon::statementHeap())
PushDownColocationRequirement()
);
}
// FIRST: Determine the preferred order for accessing the right
// child table and add a partitioning requirement that is based
// on the corresponding right child access path.
ValueIdList preferredOrder;
// The following flag will be set to FALSE if the partitioning key
// columns of the chosen index for read are not mappable, i.e.
// they are not covered by the equijoin columns. For write,
// the partitioning key columns of the target table primary key
// are always mappable since there is an explicit map for this.
NABoolean partKeyColsAreMappable = TRUE;
PartitioningFunction* physicalPartFunc = NULL;
PartitioningRequirement* logicalPartReq = NULL;
if (updateTableDesc() != NULL) // WRITE
{
// Determine the preferred order for accessing the target
// table via the primary index.
preferredOrder = genWriteOpLeftChildSortReq();
if (preferredOrder.isEmpty() AND
!(avoidHalloweenR2() || getHalloweenForceSort() == FORCED))
{
// Might as well give up, since this context will be
// exactly the same as the first child context generated.
//
// Do it only when it is not sidetree inserts.
if ( isTSJForSideTreeInsert() == FALSE AND !shutDownPlan0 )
return NULL;
// this is an update case. I leave this now but we might need to
// review it later because we might want to try another plan. SP.
}
physicalPartFunc =
updateTableDesc()->getClusteringIndex()->getPartitioningFunction();
// Generate the partitioning requirements.
// If the requirements could not be generated because the user
// is attempting to force something that is not possible,
// the method returns FALSE. Otherwise, it returns TRUE.
if (genLeftChildPartReq(myContext,
pws,
physicalPartFunc,
logicalPartReq))
{
if (logicalPartReq)
{
// Map the partitioning requirement partitioning key columns
// from the right child to the left child.
logicalPartReq =
logicalPartReq->copyAndRemap(*updateSelectValueIdMap(),FALSE);
rg.addPartRequirement(logicalPartReq);
}
}
else
// Tried to force something that was not possible. Give up now.
return NULL;
// this is an update case. I leave this now but we might need to
// review it later because we might want to try another plan. SP.
} // end if WRITE
else // READ
{
NABoolean numOfESPsForced = FALSE;
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced);
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child. The portion that is for the
// right child (child1) is passed back so we can use it
// for verifying the suitability of any right child preferred
// probing order indexes.
ValueIdList reqdOrder1;
ValueIdSet reqdArr1;
splitSortReqsForLeftChild(rppForMe, rg, reqdOrder1, reqdArr1);
// Determine the preferred probing order and the associated index
IndexDesc* ppoIDesc = NULL;
preferredOrder =
child(1).getGroupAttr()->recommendedOrderForNJProbing(
child(0).getGroupAttr(),
childNumPartsRequirement,
rg,
reqdOrder1,
reqdArr1,
ppoIDesc,
partKeyColsAreMappable);
if (preferredOrder.isEmpty())
{
// Might as well give up, since this context will be
// exactly the same as the first child context generated.
//return NULL;
break;
// we don't want to give up now because we might want to
// consider another plan like OCB join
}
physicalPartFunc =
ppoIDesc->getPartitioningFunction();
// Don't add a partitioning requirement of our own if we have
// received a ReplicateNoBroadcast partitioning requirement
// from our parent. This is because there is no way any
// partitioning requirement we add will be compatible with
// the parent requirement, and there is no way to get rid
// of a ReplicateNoBroadcast partitioning requirement, as
// Exchange cannot get rid of it for us, unfortunately.
// So, if we don't make this exception, we will never be
// able to try preferred probing order plans for joins
// inside correlated subqueries if the nested join that
// connects the subquery to the main query is executing
// in parallel. Because of this, we may end up with
// a non-type-1 parallel join, and so the order will not
// be completely preserved, but this is deemed better
// then getting no ordering at all.
if ((partReqForMe == NULL) OR
NOT partReqForMe->isRequirementReplicateNoBroadcast())
{
// Generate the partitioning requirements.
// If the requirements could not be generated because the user
// is attempting to force something that is not possible,
// the method returns FALSE. Otherwise, it returns TRUE.
if (genLeftChildPartReq(myContext,
pws,
physicalPartFunc,
logicalPartReq))
{
if (logicalPartReq)
rg.addPartRequirement(logicalPartReq);
}
else
// Tried to force something that was not possible. Give up now.
//return NULL;
break;
// we don't want to give up now because we might want to
// consider another plan like OCB join
}
else
// Just set the logical part. req. to what we got from our parent.
logicalPartReq = partReqForMe;
} // end if READ
// SECOND: Determine and add the correct sort order type requirement
// for the preferred probing order.
SortOrderTypeEnum preferredSortOrderTypeReq = NO_SOT;
PartitioningRequirement* preferredDp2SortOrderPartReq = NULL;
JoinForceWildCard::forcedPlanEnum forcePlanToken =
JoinForceWildCard::ANY_PLAN;
// If there is a CQ Shape command for this operator,
// then check if they are forcing PLAN3.
if (rppForMe->getMustMatch() != NULL)
{
forcePlanToken = getParallelJoinPlanToEnforce(rppForMe);
// If the user is forcing the left child of the nested join
// to be a sort operator, then we need to try a sorted plan,
// as this will be only way we can honor the CQS request and
// try to force an order of our own. So, in this case we need
// to do PLAN3 now as well.
if ((forcePlanToken != JoinForceWildCard::FORCED_PLAN3) AND
(rppForMe->getMustMatch()->child(childIndex)->
getOperatorType() == REL_SORT))
forcePlanToken = JoinForceWildCard::FORCED_PLAN3;
}
// Determine the sort order type requirement:
// If we're in DP2, we must not specify a sort order type.
//
// If our parent requires an ESP_VIA_SORT sort order type,
// then we better ask for that now as asking for a natural
// sort order will not be compatible. If we do this now, we won't
// do the 4th nested join plan since we will have already done
// a sorted plan.
// If the user is forcing plan 3, the plan where we set the
// required sort order type to ESP_VIA_SORT, then require
// that sort order type now.
// If we do this now, we won't do plan 3
// since we will have already done a sorted plan.
//
// If there is only one physical partition, or if there
// will only be one partition per logical partition, then a
// dp2 sort order will be the same as an esp no sort order,
// so set the sort order type requirement to ESP_NO_SORT.
// If we can't map the partitioning key columns of the
// physical partitioning function, then we won't be able to
// use it as a dp2SortOrderPartReq, so set the sort order type
// requirement to ESP_NO_SORT.
// If synchronous access is forced, no point in asking for a
// DP2 sort order, so set the sort order type req. to ESP_NO_SORT.
// If avoidHalloweenR2 is TRUE or getHalloweenForceSort() == FORCED
// then, then we always want ESP_VIA_SORT_SOT to force a SORT
// operator. The blocking nature of sort avoids the potential
// Halloween problem.
//
if (rppForMe->executeInDP2())
preferredSortOrderTypeReq = NO_SOT;
else if ((rppForMe->getSortOrderTypeReq() == ESP_VIA_SORT_SOT) OR
(forcePlanToken == JoinForceWildCard::FORCED_PLAN3) OR
avoidHalloweenR2() OR
getHalloweenForceSort() == FORCED)
preferredSortOrderTypeReq = ESP_VIA_SORT_SOT;
else if ((physicalPartFunc == NULL) OR
(logicalPartReq == NULL) OR
(logicalPartReq->getCountOfPartitions() ==
physicalPartFunc->getCountOfPartitions()) OR
NOT partKeyColsAreMappable OR
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) ==
DF_OFF))
preferredSortOrderTypeReq = ESP_NO_SORT_SOT;
else
{
preferredSortOrderTypeReq = DP2_OR_ESP_NO_SORT_SOT;
preferredDp2SortOrderPartReq =
physicalPartFunc->makePartitioningRequirement();
if (updateTableDesc() != NULL) // WRITE
{
// map physical part func part key to left child value ids.
// Don't need to do this for read because we have
// ensured that all part key cols for read are covered
// by the equijoin columns, which means they are in
// the same VEG group so both sides of the join use
// the same valueids.
preferredDp2SortOrderPartReq =
preferredDp2SortOrderPartReq->copyAndRemap(
*updateSelectValueIdMap(),FALSE);
}
}
// Now add in a requirement that the data be sorted in the
// preferred probing order with the preferred sort order type.
rg.addSortKey(preferredOrder,
preferredSortOrderTypeReq,
preferredDp2SortOrderPartReq);
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // endif (pws->getCountOfChildContexts() == 4)
break;
case 5:
childIndex = 1;
// -------------------------------------------------------------------
// Case 5: Plan 2, child 1
// +++++++++++++++++++++++
// Create the 3rd Context for right child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) )
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,2);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
rppForChild = genRightChildReqs(sppForChild,rppForMe, noEquiN2J);
if (rppForChild != NULL)
{
// ----------------------------------------------------------------
// Get the sort order of my left child and pass this information to
// the context for optimizing my right child. (Costing the inner
// table access has a dependency on the order from the outer table).
// ----------------------------------------------------------------
ippForMyChild = generateIpp(sppForChild);
if (ippForMyChild == NULL)
ippForMyChild = myContext->getInputPhysicalProperty();
} // end if we produced an rpp for the child
} // end if child0 had an optimal solution
} // endif (pws->getCountOfChildContexts() == 5)
break;
case 6:
childIndex = 0;
// -------------------------------------------------------------------
// Case 6: Plan 3, child 0
// +++++++++++++++++++++++
// Create the 4th Context for left child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
( NOT njPws->getOCBJoinIsConsidered() OR rppForMe->getMustMatch() != NULL OR
rppForMe->getSortOrderTypeReq() == DP2_OR_ESP_NO_SORT_SOT OR
(CmpCommon::getDefault(COMP_BOOL_132) == DF_ON)
) AND
( NOT derivedFromRoutineJoin())
)
{
// -----------------------------------------------------------------
// Try a preferred probing order plan where we demand that the
// left child satisfy the order via sorting.
// -----------------------------------------------------------------
// Get the context for the preferred probing order plan without
// sorting to see if it succeeded in getting a DP2 sort order
// type plan.
childContext = pws->getChildContext(0,2);
// If the first ppo plan did not produce a context, then this
// plan won't be able to succeed, either, since all we are
// doing is changing the sort order type. So, only try this
// plan if the first ppo plan created a context. Also can't
// sort in DP2 so don't try this if in DP2.
if ((childContext != NULL) AND NOT rppForMe->executeInDP2())
{
NABoolean trySortedPlan = FALSE;
// If the first ppo plan did not get a plan, then this
// means no natural order was available, so we definitely
// want to try sorting.
if (NOT childContext->hasOptimalSolution())
trySortedPlan = TRUE;
else
{
// The first ppo plan did get a plan, so try a sorted plan
// if the first plan was not really a true sorted-in-ESP plan 3
// already (ESP_VIA_SORT_SOT).
const PhysicalProperty* sppForChild0Plan2 =
childContext->getPhysicalPropertyForSolution();
if ( trySortPlanInPlan3 ) {
// The new way: try the explicit sort plan when plan2 does not produce
// such a sort plan.
if ( sppForChild0Plan2->getSortOrderType() != ESP_VIA_SORT_SOT )
trySortedPlan = TRUE;
} else {
// The old way of doing business:
// The first ppo plan did get a plan, so try a sorted plan
// if the first plan used synchronous access to deliver the
// order and we did not already do a sorted plan.
// Remove the ELSE branch in M9 after the new logic is tested in M8SP2.
if ( sppForChild0Plan2->getSortOrderType() == ESP_NO_SORT_SOT )
trySortedPlan = TRUE;
}
}
// We also test if the first ppo plan (plan#2) has a sort requirement
// for its left child. If so we continue with the new ppo plan
// (force the order via sort).
//
// In special case such as inserting a sort operator to prevent
// halloween condition on a syskey-only table, there is no sort
// requirement. In this situation, we simply do not create
// a new ppo context. This is because the new ppo context would be
// exactly the same as the context for the left child in the first
// ppo plan (no sort requirement).
if (trySortedPlan AND
childContext->getReqdPhysicalProperty()->getSortKey())
{
RequirementGenerator rg(child(0),rppForMe);
if (updateTableDesc() == NULL) // READ?
{
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child.
ValueIdList reqdOrder1;
ValueIdSet reqdArr1;
splitSortReqsForLeftChild(rppForMe, rg, reqdOrder1, reqdArr1);
}
// Add a requirement that is exactly like the requirement
// we created for the first ppo plan, except change the
// required sort order type to ESP_VIA_SORT.
const ReqdPhysicalProperty* rppForChild0Plan2 =
childContext->getReqdPhysicalProperty();
PartitioningRequirement* partReqForChild =
rppForChild0Plan2->getPartitioningRequirement();
const ValueIdList* const reqdOrderForChild =
rppForChild0Plan2->getSortKey();
SortOrderTypeEnum sortOrderTypeReqForChild = ESP_VIA_SORT_SOT;
// The first ppo plan must have specified a partitioning
// requirement and a sort requirement, or we should not
// have done it!
CMPASSERT((partReqForChild != NULL OR
(updateTableDesc() AND
updateTableDesc()->getNATable()->isHbaseTable()))
AND
reqdOrderForChild != NULL);
if (partReqForChild)
rg.addPartRequirement(partReqForChild);
rg.addSortKey(*reqdOrderForChild,sortOrderTypeReqForChild);
// See if we are able to produce a plan. If the parent
// requirement does not allow sorting, this will fail.
if (rg.checkFeasibility())
{
// Produce the required physical properties for the child.
rppForChild = rg.produceRequirement();
}
} // end if trySortedPlan
} // end if a context was created for an unsorted ordered plan
// and we're not in DP2
} // endif (pws->getCountOfChildContexts() == 6)
break;
case 7:
childIndex = 1;
// -------------------------------------------------------------------
// Case 7: Plan 3, child 1
// +++++++++++++++++++++++
// Create the 4th Context for right child:
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) )
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,3);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
rppForChild = genRightChildReqs(sppForChild,rppForMe, noEquiN2J);
if (rppForChild != NULL)
{
// ----------------------------------------------------------------
// Get the sort order of my left child and pass this information to
// the context for optimizing my right child. (Costing the inner
// table access has a dependency on the order from the outer table).
// ----------------------------------------------------------------
ippForMyChild = generateIpp(sppForChild);
if (ippForMyChild == NULL)
ippForMyChild = myContext->getInputPhysicalProperty();
} // end if we produced an rpp for the child
} // end if child0 had an optimal solution
} // endif (pws->getCountOfChildContexts() == 7)
break;
//***************************************************
case 8:
childIndex = 1;
// -------------------------------------------------------------------
// Case 8: Plan 4, child 1 (note we start with child 1 this time)
// +++++++++++++++++++++++
// Create the 5th Context for right child: this is a new plan for OCB
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
njPws->getOCBJoinIsConsidered() AND
( NOT derivedFromRoutineJoin())
)
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
// -----------------------------------------------------------------
// Split the order requirement for the left and right child and
// ask the right child to satisfy its sort requirement, if split
// is possible (see Join::splitOrderReq() for details)
// -----------------------------------------------------------------
RequirementGenerator rg(child(1),rppForMe);
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
if (rppForMe->getSortKey() AND
(rppForMe->getSortKey()->entries() > 0))
{
ValueIdList reqdOrder0, reqdOrder1;
if (splitOrderReq(*(rppForMe->getSortKey()),
reqdOrder0,reqdOrder1))
{
rg.addSortKey(reqdOrder1);
}
}
}
// We must insist that the right child match the parent partitioning
// requirements, because we are dealing with the right child first.
// The right child will satisfy the parent somehow (if possible).
// So, we don't remove the parent requirements.
if (njPws->getUseParallelism())
njPws->transferParallelismReqsToRG(rg);
if( CmpCommon::getDefault(COMP_BOOL_53) == DF_ON )
rg.addOcbCostingRequirement();
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rg.addNoEspExchangeRequirement();
rppForChild = rg.produceRequirement();
}
} // endif (pws->getCountOfChildContexts() == 8)
break;
case 9:
childIndex = 0;
// -------------------------------------------------------------------
// Case 9: Plan 4, child 0
// +++++++++++++++++++++++
// Create the 5th Context for left child: this is a new plan for OCB
// -------------------------------------------------------------------
if ( currentPlanIsAcceptable(planNumber,rppForMe) AND
njPws->getOCBJoinIsConsidered()
)
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(1,4);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty* sppForChild =
childContext->getPhysicalPropertyForSolution();
CMPASSERT(sppForChild != NULL);
PartitioningFunction*
childPartFunc = sppForChild->getPartitioningFunction();
PartitioningRequirement* partReqForChild;
if (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON)
{
// Use the node map of the inner child partitioning function.
partReqForChild = new (CmpCommon::statementHeap() )
RequireReplicateViaBroadcast(childPartFunc, TRUE);
}
else
{
partReqForChild = new (CmpCommon::statementHeap())
RequireReplicateViaBroadcast(
childPartFunc->getCountOfPartitions());
// Use the node map of the inner child partitioning function.
NodeMap *myNodeMap =
childPartFunc->getNodeMap()->copy(CmpCommon::statementHeap());
partReqForChild->castToFullySpecifiedPartitioningRequirement()->
getPartitioningFunction()->replaceNodeMap(myNodeMap);
}
RequirementGenerator rg (child(0),rppForMe);
// Remove any parent partitioning requirements, since we
// have already enforced this on the left child.
rg.removeAllPartitioningRequirements();
// Now, add in broadcast partitioning requirement for the
// leht child.
rg.addPartRequirement(partReqForChild);
// Split the order requirement for the left and right child and
// ask the left child to satisfy its sort requirement, if split
// is possible (see Join::splitOrderReq() for details)
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
if (rppForMe->getSortKey() AND
(rppForMe->getSortKey()->entries() > 0))
{
ValueIdList reqdOrder0, reqdOrder1;
if (splitOrderReq(*(rppForMe->getSortKey()),
reqdOrder0,reqdOrder1))
{
rg.addSortKey(reqdOrder0);
}
}
}
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if child0 had an optimal solution
} // endif (pws->getCountOfChildContexts() == 9)
break;
} // end of switch statement
if (rppForChild != NULL)
{
// ----------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ----------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ----------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which
// the child belongs that requires the same properties as those
// in rppForChild. Reuse it, if found. Otherwise, create a new
// Context that contains rppForChild as the required physical
// properties.
// ----------------------------------------------------------------------
EstLogPropSharedPtr inputLPForChild;
EstLogPropSharedPtr copyInputLPForChild;
if (childIndex == 0)
inputLPForChild = myContext->getInputLogProp();
else
{
inputLPForChild = child(0).outputLogProp
(myContext->getInputLogProp());
if ( isSemiJoin() OR isAntiSemiJoin() )
{
// don't alter the estlogprop returned above, alter its copy.
copyInputLPForChild = EstLogPropSharedPtr(
new(CmpCommon::statementHeap()) EstLogProp(*inputLPForChild));
if ( isSemiJoin() )
copyInputLPForChild->setInputForSemiTSJ(EstLogProp::SEMI_TSJ);
else
copyInputLPForChild->setInputForSemiTSJ(EstLogProp::ANTI_SEMI_TSJ);
}
}
if ( childIndex == 0 || !isSemiJoin() || !isAntiSemiJoin() )
result = shareContext(childIndex, rppForChild,
ippForMyChild, costLimit,
myContext, inputLPForChild);
else
result = shareContext(childIndex, rppForChild,
ippForMyChild, costLimit,
myContext, copyInputLPForChild);
if ( NOT (pws->isLatestContextWithinCostLimit() OR
result->hasSolution() )
)
result = NULL;
} // end if OK to create a child context
else
result = NULL;
// -------------------------------------------------------------------
// Remember the cases for which a Context could not be generated,
// or store the context that was generated.
// -------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
pws->incPlanChildCount();
if ( CURRSTMT_OPTDEFAULTS->optimizerPruning() AND
( pws->getPlanChildCount() == getArity() ) AND
CURRSTMT_OPTDEFAULTS->OPHexitNJcrContChiLoop()
)
{
pws->resetAllChildrenContextsConsidered();
break;
}
} // end while loop
if ( pws->getCountOfChildContexts() == njPws->getChildPlansToConsider() )
pws->setAllChildrenContextsConsidered();
return result;
} // NestedJoin::createContextForAChild()
//<pb>
NABoolean NestedJoin::findOptimalSolution(Context* myContext,
PlanWorkSpace* pws)
{
NABoolean hasOptSol;
// Plan # is only an output param, initialize it to an impossible value.
Lng32 planNumber = -1;
hasOptSol = pws->findOptimalSolution(planNumber);
// All nested join plans other than the first one are plans where the
// probes from the outer table are in the same order as the key of the
// inner table. Indicate this in the nested join relexpr so we can
// report this information in EXPLAIN.
if(planNumber != 0)
{
setProbesInOrder(TRUE);
}
return hasOptSol;
} // NestedJoin::findOptimalSolution()
NABoolean NestedJoin::currentPlanIsAcceptable(Lng32 planNo,
const ReqdPhysicalProperty* const rppForMe) const
{
// ---------------------------------------------------------------------
// Check whether the user wants to enforce a particular plan type.
// ---------------------------------------------------------------------
// If nothing is being forced, return TRUE now.
if (rppForMe->getMustMatch() == NULL)
return TRUE;
// Check for the correct forced plan type.
JoinForceWildCard::forcedPlanEnum forcePlanToken =
getParallelJoinPlanToEnforce(rppForMe);
NABoolean OCR_CQD_is_on = ActiveSchemaDB()->getDefaults().
getAsLong(NESTED_JOINS_OCR_MAXOPEN_THRESHOLD) > 0;
switch (forcePlanToken)
{
case JoinForceWildCard::FORCED_PLAN0:
if (planNo != 0)
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN1:
// if we are forcing plan #1 - a plan which passes any
// sort order from the right child to the left - then we
// will do this as plan #0 instead, so we will not need
// to generate plan #1.
if ( planNo == 0)
return TRUE;
// If CQD NESTED_JOINS_OCR is on, and PLAN1 is forced, then we will
// do OCR. In this case, we can not ignore plan1
if (planNo == 1 AND OCR_CQD_is_on == TRUE)
return TRUE;
else
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN2:
if (planNo != 2)
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN3:
// if we are forcing plan #3 - a plan which passes
// a required sort order type of ESP_VIA_SORT -
// then we will do this as plan #2 instead, so we will not need
// to generate plan #3.
if (planNo != 2)
return FALSE;
break;
case JoinForceWildCard::FORCED_TYPE1:
// Plan0 and the old Plan1 plans do not require the left child
// to be partitioned in any particular way, so they are "type 2".
// But the OCR embedded in Plan1 is "type 1".
if (planNo == 0)
return FALSE;
if (planNo == 1 AND OCR_CQD_is_on == FALSE)
return FALSE;
break;
case JoinForceWildCard::FORCED_TYPE2:
// Nested Join plans #2 and #3 require the left child partitioning
// to be a grouping of the right child partitioning, so that the
// right child partitions will only see probes from at most one
// left child partition. So, these are type 1 joins.
if ((planNo == 2) OR (planNo == 3))
return FALSE;
// The following conditions test if OCR is sought for. If so we need
// return FALSE here, becaues OCR is "type-1",
if (planNo == 1 AND OCR_CQD_is_on == TRUE)
return FALSE;
break;
case JoinForceWildCard::ANY_PLAN:
// Any plan satisfies this - break out so we'll return TRUE
break;
case JoinForceWildCard::FORCED_INDEXJOIN:
// this is a forced index join
break;
default:
return FALSE; // must be some option that Nested Join doesn't support
}
// If we get here, the plan must have passed all the checks.
return TRUE;
} // NestedJoin::currentPlanIsAcceptable()
NABoolean NestedJoin::OCBJoinIsFeasible(const Context* myContext) const
{
// Here we do the main analysis if OCB join can be considered.
// The main purpose of this method is to guarantee correctness
// by avoiding duplicate results if the same tuple is sent to more
// than one partition of inner child. The easiest way to avoid this
// - not allowing split_top over inner child. For this we require
// that the number of ESPs running OCB join is the same as the number
// of partitions of inner child. Exception of this rule is when child
// is hash2 partitioned and ratio of number of partitions over number
// of ESPs is the power of 2. In this case hash2 partition grouping
// can guarantee that each partition will be accessed by only one ESP.
// We can also add a check if prefix of inner child clustering key
// is covered by join predicates, characteristic input and constant.
// this would guarantee ordered nested join. If not each probe will
// cause full inner child scan. this will cause very expensive plan
// that will most probably lose when compared with other plans for this
// join. Therefore, adding this type of check here will avoid costing
// of obviously expensive plan. This is planned for next phase.
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
if ( rppForMe->executeInDP2() == TRUE )
return FALSE;
PartitioningRequirement *partReq =
rppForMe->getPartitioningRequirement();
// For now we allow only fuzzy (or no )
// partitioning requirement without specified partitioning key,
// when there is no sort requirement for me.
if ( (myContext->requiresOrder() AND
partReq AND
partReq->isRequirementExactlyOne() == FALSE) OR
partReq == NULL OR
(partReq->isRequirementApproximatelyN() AND
NOT partReq->partitioningKeyIsSpecified()
)
)
{
if (CmpCommon::getDefault(COMP_BOOL_134) == DF_OFF)
{
// ignore Scan check for now. To force a check for
// the number of partitions of inner child we need to set
// COMP_BOOL_134 to ON
return TRUE;
}
// inner child should be a Scan node. For now just check
// the number of base tables, more complex check will be
// implemented later. For now to guarantee correctness we
// require all available indexes to be hash2 partitioned
// with the number of partitions greater or equal to the
// number of ESPs with ratio being power of 2.
if (child(1).getGroupAttr()->getNumBaseTables() != 1 )
{
return FALSE;
}
PartitioningFunction *rightPartFunc = NULL;
Lng32 numOfESPs = rppForMe->getCountOfPipelines();
const SET(IndexDesc *) &availIndexes=
child(1).getGroupAttr()->getAvailableBtreeIndexes();
for (CollIndex i = 0; i < availIndexes.entries(); i++)
{
rightPartFunc = availIndexes[i]->getPartitioningFunction();
if ( rightPartFunc )
{
Lng32 numOfparts = rightPartFunc->getCountOfPartitions();
if ( numOfparts < numOfESPs )
{
// don't use OCB if the number of partition of inner child
// is less than the number of ESPs.
return FALSE;
}
if ( numOfparts == numOfESPs )
continue;
// if we came here then the number of partition of inner child
// is greater than the number of ESPs. Allow OCB only if inner
// child is hash2 partitioned and the ratio is the power of 2
UInt32 d = numOfparts%numOfESPs;
if ( d > 0 )
{
// don't use OCB if the number of partition of inner child
// is not multiple of number of ESPs.
return FALSE;
}
d = numOfparts/numOfESPs;
if ( CmpCommon::getDefault(COMP_BOOL_161) == DF_ON AND (d & (d-1)) )
{
// if at least one bit of d&(d-1) is not 0 then d is not
// power of 2, don't use OCB. Do this only when CB_161 is on (
// default is off). Need this fix for 3-drive Blades system.
return FALSE;
}
else
{
// return true if inner child is hash2 partitioned
// otherwise FALSE will be returned
if (rightPartFunc->isAHash2PartitioningFunction())
continue;
else
return FALSE;
}
}
else
{
// no partitioning function found, don't use OCB join
DBGLOGMSG(" *** Index PartFunc is NULL - don't use OCB ");
return FALSE;
}
} //end of loop over available indexes
if ( rightPartFunc )
{
// Since we come here and didn't return FALSE - all
// indexes are good and we can use OCB
return TRUE;
}
else
{
// no available indexes. If index was available but not good
// then we would have returned FALSE earlier.
return FALSE;
}
}
else
{
// don't use OCB with partitioning requirement
return FALSE;
}
// we shouldn't have come here, but just in case for the future
return FALSE;
}
// Here are the conditions checked in this routine:
// 1. Number of base tables on right must be 1;
// 2. We only look at the clustering partitioning function on the right,
// not at any indexes;
// 3. execution location must not be DP2;
// 4. The base table on right is hash2 partitioned;
// 5. partreq is fuzzy, NULL or hash2;
// 6. The join predicates cover the partitioning key of the table on right;
// 7. The base table on right has more than one partition.
// 8. The join columns are not skewed (only if SkewBuster for NJ is disabled)
//
// If the parent requires a specific part function, we don't check whether
// that matches the partitioning function on the right (if it doesn't
// we can't use OCR and may be better off with a TYPE2 join)
NABoolean NestedJoin::OCRJoinIsFeasible(const Context* myContext) const
{
// Inner child should be a Scan node. For now just check
// the number of base tables, more complex check will be
// implemented later.
if (child(1).getGroupAttr()->getNumBaseTables() != 1 )
{
return FALSE;
}
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
// Do not consider OCR if we are in DP2.
if ( rppForMe->executeInDP2() )
return FALSE;
PartitioningRequirement *partReq =
rppForMe->getPartitioningRequirement();
// For now we allow only fuzzy part req without specified partitioning key,
// no part req, or hash2 part req.
if (NOT ((partReq == NULL)
OR
(partReq->isRequirementApproximatelyN() AND
NOT partReq->partitioningKeyIsSpecified())
OR
(partReq-> castToRequireHash2() != NULL)))
return FALSE;
// Do not consider OCR if the join predicates do not cover part key of the
// inner (correctness).
if ( JoinPredicateCoversChild1PartKey() == FALSE )
return FALSE;
PartitioningFunction *rightPartFunc =
getClusteringIndexPartFuncForRightChild();
// Do not consider OCR if the inner is not hash2 partitioned
if ( rightPartFunc->isAHash2PartitioningFunction() == FALSE )
return FALSE;
// Do not consider OCR if the inner is single partitioned
if ( rightPartFunc->getCountOfPartitions() == 1 )
return FALSE;
// IF N2Js, which demand opens, are not to be disabled, make sure
// OCR is allowed only if the threshold of #opens is reached.
if (CmpCommon::getDefault(NESTED_JOINS_NO_NSQUARE_OPENS) == DF_OFF) {
Lng32 threshold = ActiveSchemaDB()->getDefaults().getAsLong(NESTED_JOINS_OCR_MAXOPEN_THRESHOLD);
// the threshold is -1, do not do OCR.
if ( threshold == -1 )
return FALSE;
// If the total # of opens for this nested join is less than the threshold, return FALSE
if (CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism() *
rightPartFunc->getCountOfPartitions() < threshold)
return FALSE;
}
Lng32 antiskewSkewEsps =
ActiveSchemaDB()->getDefaults().getAsLong(NESTED_JOINS_ANTISKEW_ESPS);
if ( antiskewSkewEsps > 0 )
return TRUE; // the skew busting for OCR is enabled. No more check.
//
// If the join is on a skewed set of columns from the left child and the
// nested join probing cache is not applicable, then do not try OCR.
//
if ( isProbeCacheApplicable(rppForMe->getPlanExecutionLocation()) == FALSE ) {
ValueIdSet joinPreds = getOriginalEquiJoinExpressions().getTopValues();
double threshold =
(ActiveSchemaDB()->getDefaults().getAsDouble(SKEW_SENSITIVITY_THRESHOLD)) /
rppForMe->getCountOfPipelines();
SkewedValueList* skList = NULL;
if ( childNodeContainSkew(0, joinPreds, threshold, &skList) == TRUE )
return FALSE;
}
return TRUE;
}
NABoolean NestedJoin::JoinPredicateCoversChild1PartKey() const
{
PartitioningFunction *rightPartFunc =
getClusteringIndexPartFuncForRightChild();
if ( rightPartFunc == NULL )
return FALSE;
const ValueIdSet& equiJoinExprFromChild1AsSet =
getOriginalEquiJoinExpressions().getBottomValues();
ValueIdSet child1PartKey(rightPartFunc->getPartitioningKey());
// The equi-join predicate should contain the part key for OCR
// to guarantee the correct result. This is because each row from
// the outer table will be partitioned using child1's partitioning
// function. If some column in child1's partitioning key is not
// covered by the equi-join predicate, then some column from child0
// will not be partitioned, this implies that the row can go to an
// incorrect partition.
if ( equiJoinExprFromChild1AsSet.contains(child1PartKey) ) {
return TRUE;
}
// double check the uncovered park key columns here. If they are
// all constants, then the join columns still cover the part keys.
// first get the columns that are covered.
ValueIdSet coveredPartKey(child1PartKey);
coveredPartKey.intersectSet(equiJoinExprFromChild1AsSet);
// second get the columns that are not covered.
ValueIdSet unCoveredPartKey(child1PartKey);
unCoveredPartKey.subtractSet(coveredPartKey);
// remove all columns that are constants.
unCoveredPartKey.removeConstExprReferences(FALSE /*consider expressions*/);
// if nothing is left, we return true.
if ( unCoveredPartKey.entries() == 0 )
return TRUE;
return FALSE;
}
NABoolean NestedJoin::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace*, /*IN, ignored*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
// CS or REPLICA: do not consider ESP parallelism if we are in DP2.
if ( rppForMe->executeInDP2() == TRUE )
return FALSE;
// rowsetIterator cannot be parallelized. Counting logic for rowNumber
// is not designed to work in nodes that are executing in parallel.
if (isRowsetIterator())
return FALSE;
// merge statement cannot be ESP parallelised if it has an INSERT clause.
// But if it is forced using CB189, then parallelise it at user's own risk.
if (isTSJForMerge())
{
if ((isTSJForMergeWithInsert()) &&
(CmpCommon::getDefault(COMP_BOOL_189) == DF_OFF))
return FALSE;
}
NABoolean result = FALSE;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (parallelControlSettings == DF_OFF)
{
result = FALSE;
}
else if ( (parallelControlSettings == DF_MAXIMUM) AND
CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible()
)
{
numOfESPs = rppForMe->getCountOfPipelines();
// currently, numberOfPartitionsDeviation_ is set to 0 in
// OptDefaults when ATTEMT_ESP_PARALLELISM is 'MAXIMUM'
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
// allow deviation by default
if (CmpCommon::getDefault(COMP_BOOL_62) == DF_OFF)
{
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
if ( child0RowCount.getCeiling() <
MINOF(numOfESPs,CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold())
)
{
// Fewer outer table rows then pipelines - allow one or more parts
allowedDeviation = 1.0;
}
}
result = TRUE;
}
else if (parallelControlSettings == DF_ON)
{
// Either user wants to try ESP parallelism for all operators,
// or they are forcing the number of ESPs for this operator.
// Set the result to TRUE. If the number of ESPs is not being forced,
// set the number of ESPs that should be used to the maximum number.
// Set the allowable deviation to either what we get from the
// defaults table, or a percentage that allows any
// number of partitions from 2 to the maximum number. i.e. allow
// the natural partitioning of the child as long as the child is
// partitioned and does not have more partitions than max pipelines.
// NEW HEURISTIC: If there are fewer outer table rows than the number
// of pipelines, then set the deviation to allow any level of natural
// partitioning, including one. This is because we don't want to
// repartition so few rows to get more parallelism, since we would
// end up with a lot of ESPs doing nothing.
if (NOT numOfESPsForced)
{
// Determine the number of outer table rows
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
numOfESPs = rppForMe->getCountOfPipelines();
if (child0RowCount.getCeiling() < numOfESPs)
{
// Fewer outer table rows then pipelines - allow one or more parts
allowedDeviation = 1.0;
}
else
{
if ( CURRSTMT_OPTDEFAULTS->deviationType2JoinsSystem() )
{
// -------------------------------------------------------------------
// A value for NUM_OF_PARTS_DEVIATION_TYPE2_JOINS exists.
// -------------------------------------------------------------------
allowedDeviation = CURRSTMT_OPTDEFAULTS->numOfPartsDeviationType2Joins();
}
else
{
// Need to make 2 the minimum number of parts to support. Use
// 1.99 to protect against rounding errors.
allowedDeviation =
((float)numOfESPs - 1.99f) / (float)numOfESPs;
}
} // end if fewer outer table rows than pipelines
} // end if number of ESPs not forced
result = TRUE;
}
else
{
// Otherwise, the user must have specified "SYSTEM" for the
// ATTEMPT_ESP_PARALLELISM default. This means it is up to the
// optimizer to decide.
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
CostScalar child0Rows =
(child0OutputLogProp->getResultCardinality()).minCsOne();
if (updateTableDesc())
{
// -----------------------------------------------------------------
// NJ on top of an insert/update/delete statement, make the
// left side match the partitioning scheme of the updated table
// -----------------------------------------------------------------
const PartitioningFunction *updPartFunc =
updateTableDesc()->getClusteringIndex()->getPartitioningFunction();
if (updPartFunc == NULL OR
updPartFunc->getCountOfPartitions() == 1)
{
numOfESPs = 1;
}
else // Target table is partitioned
{
// get an estimate of how many rows and what rowsize will be
// returned from the left child (the select part of the update)
RowSize child0RowSize = child(0).getGroupAttr()->getRecordLength();
CostScalar child0TableSize = child0Rows * child0RowSize;
// now divide the amount of data returned by the left by a
// default constant to determine how many ESPs we would like
// to work on this
double sizePerESP = CURRSTMT_OPTDEFAULTS->updatedBytesPerESP();
double numOfESPsDbl = ceil(child0TableSize.value() / sizePerESP);
Lng32 countOfPipelines = rppForMe->getCountOfPipelines();
// require no more ESPs than there are pipelines in the system,
if ( numOfESPsDbl > countOfPipelines )
numOfESPs = countOfPipelines;
else
numOfESPs = (Lng32) numOfESPsDbl;
// don't ask for more ESPs than there are updated partitions,
// the ESPs must be a grouping of that partitioning scheme
Lng32 countOfPartitions = updPartFunc->getCountOfPartitions();
if ( numOfESPs > countOfPartitions )
numOfESPs = countOfPartitions;
// This in an adjustment to allow control of parallelism by CQDs
// MINIMUM_ESP_PARALLELISM and NUMBER_OF_ROWS_PARALLEL_THRESHOLD
// Without this adjustment insert/select will be serial in most
// of the cases because the number of ESPs calculated above
// may not allow partition grouping. This adjustment is controled
// by CQD COMP_BOOL_30 (ON by default)
if (CmpCommon::getDefault(COMP_BOOL_30) == DF_ON)
{
const CostScalar rowCount =
(CmpCommon::getDefault(COMP_BOOL_125) == DF_ON)
? child0Rows + (getGroupAttr()->outputLogProp(inLogProp)->
getResultCardinality()).minCsOne()
: child0Rows;
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
if ( rowCount > numberOfRowsThreshold )
{
Lng32 optimalNumOfESPs = MINOF(countOfPipelines,
(Lng32)(rowCount / numberOfRowsThreshold).value());
// make numOfESPs as available level of parallelism
// 16*N, 8*N, 4*N,..., N,1 where N is the number of segments
Lng32 i = CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism();
if (numOfESPs > i )
{
numOfESPs = i;
}
else
{
Lng32 MinParallelism =
MAXOF( MAXOF(CURRSTMT_OPTDEFAULTS->getMinimumESPParallelism(),
optimalNumOfESPs),
numOfESPs);
while(i > MinParallelism)
i/=2;
optimalNumOfESPs = (i<MinParallelism) ? i*=2 : i;
numOfESPs = MAXOF(numOfESPs, optimalNumOfESPs);
}
}
}
} // we can choose the number of partitions for the update
// For write operations, we always want to specify the number
// of ESPs we want to the child, and this is an exact number,
// so the deviation is 0.
allowedDeviation = 0.0;
if ( numOfESPs <= 1 )
result = FALSE;
else
result = TRUE;
} // end if Nested Join for a write operation
else
{
// Nested Join for a read operation.
// Return TRUE if the number of
// rows returned by child(0) exceeds the threshold from the
// defaults table. The recommended number of ESPs is also computed
// to be 1 process per <threshold> number of rows. This is then
// used to indicate the MINIMUM number of ESPs that will be
// acceptable. This is done by setting the allowable deviation
// to a percentage of the maximum number of partitions such
// that the recommended number of partitions is the lowest
// number allowed. We make the recommended number of partitions
// a minimum instead of a hard requirement because we don't
// want to be forced to repartition the child just to get "less"
// parallelism.
CostScalar rowCount = child0Rows;
// This is to test better parallelism taking into account
// not only child0 but also this operator cardinality
// this could be important for joins
if(CmpCommon::getDefault(COMP_BOOL_125) == DF_ON)
{
rowCount += (getGroupAttr()->outputLogProp(inLogProp)->
getResultCardinality()).minCsOne();
}
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
if (rowCount > numberOfRowsThreshold)
{
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = (float) MAXOF(1.001 -
ceil((rowCount / numberOfRowsThreshold).value()) / numOfESPs,0);
result = TRUE;
}
else
{
result = FALSE;
}
} // end if for a read operation
} // end if the user let the optimizer decide
return result;
} // NestedJoin::okToAttemptESPParallelism()
//<pb>
//==============================================================================
// Synthesize physical properties for nested join operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
NestedJoin::synthPhysicalProperty(const Context *myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty* const sppOfLeftChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
const PhysicalProperty* const sppOfRightChild =
myContext->getPhysicalPropertyOfSolutionForChild(1);
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
ValueIdList newSortKey(sppOfLeftChild->getSortKey());
SortOrderTypeEnum newSortOrderType = sppOfLeftChild->getSortOrderType();
PartitioningFunction* newDp2SortOrderPartFunc =
sppOfLeftChild->getDp2SortOrderPartFunc();
// ---------------------------------------------------------------------
// add the sort columns of the second child to the ones of the first
// child, but only if the sorted columns of the first child are unique
// ---------------------------------------------------------------------
NABoolean canAppendRightColumns = FALSE;
NABoolean leftChildSortColsAreReq = FALSE;
NABoolean rightChildSortColsAreReq = FALSE;
NABoolean childSortOrderTypesAreSame = FALSE;
// should we even bother?
if (sppOfRightChild->isSorted() AND
NOT rowsFromLeftHaveUniqueMatch() AND
NOT (getGroupAttr()->getMaxNumOfRows() <= 1))
{
GroupAttributes *leftGA = child(0).getGroupAttr();
// can append right sort cols, if the left sort cols form a
// candidate key
// Example why this is: imagine the left table ordered by (a,b)
// and the right table ordered by c.
//
// If (a,b) is unique, the output of a join might look like
//
// a b c first child: second child:
// -- -- -- a b c
// 1 1 1 -- -- --
// 1 1 2 1 1 1
// 1 2 1 1 2 2
// 1 2 2
//
// On the other hand, if (a,b) is not unique, you might get
//
// a b c left table: right table:
// -- -- -- a b c
// 1 1 1 -- -- --
// 1 1 2 1 1 1
// 1 2 1 1 2 2
// 1 2 2 1 2
// 1 2 1
// 1 2 2
//
// which is of course not ordered by (a,b,c)
//
// Left join special case:
//
// For nested left-join the same rules may be applied.
// The fact that left-joins produce null values in the right side, does
// not violate the required order. In this case the order is defined by
// the columns of the left child.
// Example:
// The left-join join predicate is (c between a and b) and (a,b) is unique.
// The output of a join might look like
//
// a b c left child: right child:
// -- -- -- a b c
// 1 2 1 -- -- --
// 1 2 2 1 2 1
// 2 1 NULL 2 1 2
// 2 3 2 2 3
//
// Notice that whenever the left row has no matching rows in the right table
// (the right child columns are null) it produces only a single row in the result set
// and therefor the order is defined by the left child columns only.
ValueIdSet leftSortCols;
// make the list of sort cols into a ValueIdSet
leftSortCols.insertList(sppOfLeftChild->getSortKey());
// check for uniqueness of the sort columns
if (leftGA->isUnique(leftSortCols))
{
// Determine if the sort cols are required from the left and
// right children.
if (rppForMe->getSortKey() AND
(rppForMe->getSortKey()->entries() > 0))
{
ValueIdList reqdOrder0, reqdOrder1;
if (splitOrderReq(*(rppForMe->getSortKey()),
reqdOrder0,reqdOrder1))
{
if (NOT reqdOrder0.isEmpty())
leftChildSortColsAreReq = TRUE;
if (NOT reqdOrder1.isEmpty())
rightChildSortColsAreReq = TRUE;
}
}
if ((NOT (leftChildSortColsAreReq AND rightChildSortColsAreReq)) AND
rppForMe->getArrangedCols() AND
(rppForMe->getArrangedCols()->entries() > 0))
{
ValueIdSet reqdArr0, reqdArr1;
if (splitArrangementReq(*(rppForMe->getArrangedCols()),
reqdArr0,reqdArr1))
{
if (NOT reqdArr0.isEmpty())
leftChildSortColsAreReq = TRUE;
if (NOT reqdArr1.isEmpty())
rightChildSortColsAreReq = TRUE;
}
}
// if we passed the uniqueness test because the left
// child has a cardinality constraint of at most one row,
// then only include the left child sort key columns if
// there was a requirement for them.
if ((leftGA->getMaxNumOfRows() <= 1) AND
NOT newSortKey.isEmpty())
{
if (NOT leftChildSortColsAreReq)
{
newSortKey.clear();
newSortOrderType = NO_SOT;
newDp2SortOrderPartFunc = NULL;
}
} // end if there was a left child card constraint
// If we aren't passing up any sort key columns from the left
// child, then simply set the sort key columns and sort order
// type to those of the right child. Otherwise, we need to
// append the right child sort key columns, so set the flag to TRUE.
if (newSortKey.isEmpty())
{
// Only use the right child sort key columns without
// any left child sort key columns if the sort order type
// of the right child is not DP2.
if (sppOfRightChild->getSortOrderType() != DP2_SOT)
{
newSortKey = sppOfRightChild->getSortKey();
newSortOrderType = sppOfRightChild->getSortOrderType();
}
}
else
{
canAppendRightColumns = TRUE;
// The child sort order types are the same if they are equal and both
// children's dp2SortOrderPartFunc are the same (as in both being
// NULL), or they are both not null but they are equivalent.
if ((sppOfLeftChild->getSortOrderType() ==
sppOfRightChild->getSortOrderType()) AND
((sppOfLeftChild->getDp2SortOrderPartFunc() ==
sppOfRightChild->getDp2SortOrderPartFunc()) OR
((sppOfLeftChild->getDp2SortOrderPartFunc() != NULL) AND
(sppOfRightChild->getDp2SortOrderPartFunc() != NULL) AND
(sppOfLeftChild->getDp2SortOrderPartFunc()->
comparePartFuncToFunc(
*sppOfRightChild->getDp2SortOrderPartFunc()) == SAME)
)
)
)
childSortOrderTypesAreSame = TRUE;
}
} // end if left child sort cols are unique
} // right child table is sorted
// We can only append the sort key columns from the right child
// if it passed the tests above, AND the sort order types are the
// same OR there is a requirement for the right child sort key
// columns AND the right child sort order type is not DP2.
if (canAppendRightColumns AND
(childSortOrderTypesAreSame OR
(rightChildSortColsAreReq AND
sppOfRightChild->getSortOrderType() != DP2_SOT))
)
{
// append right child sort key columns
const ValueIdList & rightSortKey = sppOfRightChild->getSortKey();
if (!isLeftJoin())
{
for (Lng32 i = 0; i < (Lng32)rightSortKey.entries(); i++)
newSortKey.insert(rightSortKey[i]);
}
//++MV
else
{
// For left-join we need to translate the right child sort key
// to the left join output value id's in order to allow
// the cover test on the left-join to pass .
ValueIdList newRightSortKey;
ValueIdMap &map = rightChildMapForLeftJoin();
map.mapValueIdListUp(newRightSortKey, rightSortKey);
for (Lng32 i = 0; i < (Lng32)newRightSortKey.entries(); i++)
newSortKey.insert(newRightSortKey[i]);
}
//--MV
// The sort order type stays that of the left child, unless the
// the left child sort order type is ESP_NO_SORT and the right
// child sort order type is ESP_VIA_SORT. In this case, we want
// to set the new sort order type to ESP_VIA_SORT, since a sort was
// done in producing at least part of the key, and setting the
// sort order type to ESP_NO_SORT would not be correct. If the
// left child sort order type was DP2 then this is the new sort
// order type because an executor process order is not produced.
if ((sppOfLeftChild->getSortOrderType() == ESP_NO_SORT_SOT) AND
(sppOfRightChild->getSortOrderType() == ESP_VIA_SORT_SOT))
newSortOrderType = ESP_VIA_SORT_SOT;
} // end if we can add sort key columns from the right child
// ---------------------------------------------------------------------
// synthesize plan execution location
// ---------------------------------------------------------------------
PlanExecutionEnum loc = sppOfLeftChild->getPlanExecutionLocation();
// merge statement cannot be ESP parallelised if it has an INSERT clause.
// But if it is forced using CB189, then parallelise it at user's own risk.
if (isTSJForMerge())
{
if ((isTSJForMergeWithInsert()) &&
(CmpCommon::getDefault(COMP_BOOL_189) == DF_OFF))
loc = EXECUTE_IN_MASTER;
}
// ||opt make a method to combine left and right location
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
// ---------------------------------------------------------------------
// The result of a nested join has the sort order of the outer
// table. The nested join maintains the partitioning of the outer table
// for plans 0,1,2,3. Otherwise (plan 4) it's outer child broadcast and
// we take the properties of the inner child as nested join properties
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe = NULL;
NABoolean expldocbjoin = FALSE;
if ( planNumber < 4 )
{
if (rppForMe->executeInDP2() && updateTableDesc() &&
rppForMe->getPushDownRequirement())
{
// Push down IUD queries involving MVs.
//
// Pick the right child's partition function when
// the execution location is DP2, the updateTableDesc is not NULL
// and the push-down requirement is not NULL. When all three
// conditions are met, then this join is an implementation of
// in-DP2 IUD involving MVs. We can safely use the right partfunc
// because we have already verified the required partition function
// is the same as the partition function of the inner table (the
// IUD table) in childIndex=0 block of code.
//
// In all other cases, the rg checkFeasibilty test will fail and
// thus we will not get to this call.
sppForMe = new (CmpCommon::statementHeap()) PhysicalProperty(
newSortKey,
newSortOrderType,
newDp2SortOrderPartFunc,
sppOfRightChild->getPartitioningFunction(),
loc,
combineDataSources(sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfRightChild->getIndexDesc(),
NULL,
sppOfRightChild->getPushDownProperty()
);
} else {
sppForMe = new (CmpCommon::statementHeap()) PhysicalProperty(
newSortKey,
newSortOrderType,
newDp2SortOrderPartFunc,
sppOfLeftChild->getPartitioningFunction(),
loc,
combineDataSources(sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfLeftChild->getIndexDesc(),
sppOfLeftChild->getPartSearchKey(),
sppOfLeftChild->getPushDownProperty(),
sppOfLeftChild->getexplodedOcbJoinProperty());
}
}
else
{
sppForMe =
new (CmpCommon::statementHeap()) PhysicalProperty(
newSortKey,
newSortOrderType,
newDp2SortOrderPartFunc,
sppOfRightChild->getPartitioningFunction(),
loc,
combineDataSources(sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfRightChild->getIndexDesc(),
sppOfRightChild->getPartSearchKey(),
sppOfRightChild->getPushDownProperty(), expldocbjoin);
}
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // NestedJoin::synthPhysicalProperty()
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from the
// nested join to the designated child.
// ---------------------------------------------------------------------
PartitioningFunction* NestedJoin::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
ValueIdMap map(getOriginalEquiJoinExpressions());
PartitioningFunction* newPartFunc =
partFunc->copyAndRemap(map,rewriteForChild0);
SkewedDataPartitioningFunction* oldSKpf = NULL;
SkewedDataPartitioningFunction* newSKpf = NULL;
if ( rewriteForChild0 == FALSE /* map for child 1 */ AND
(oldSKpf=(SkewedDataPartitioningFunction*)
(partFunc->castToSkewedDataPartitioningFunction())) AND
(newSKpf=(SkewedDataPartitioningFunction*)
(newPartFunc->castToSkewedDataPartitioningFunction()))
)
{
if ( oldSKpf->getSkewProperty().isUniformDistributed() ) {
skewProperty newSK(oldSKpf->getSkewProperty());
newSK.setIndicator(skewProperty::BROADCAST);
newSKpf->setSkewProperty(newSK);
}
} else {
// map for child 0. Do nothing
}
return newPartFunc;
} // end NestedJoin::mapPartitioningFunction()
//<pb>
// -----------------------------------------------------------------------
// member functions for class NestedJoinFlow
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// NestedJoinFlow::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
NestedJoinFlow::costMethod() const
{
static THREAD_P CostMethodNestedJoinFlow *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodNestedJoinFlow;
return m;
}
//<pb>
// -----------------------------------------------------------------------
// member functions for class MergeJoin
// -----------------------------------------------------------------------
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from the
// merge join to the designated child.
// ---------------------------------------------------------------------
PartitioningFunction* MergeJoin::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
ValueIdMap map(getEquiJoinExpressions());
return partFunc->copyAndRemap(map,rewriteForChild0);
} // end MergeJoin::mapPartitioningFunction()
// -----------------------------------------------------------------------
// Determine if the merge join will be able to satisfy the parent
// partitioning requirements.
// -----------------------------------------------------------------------
NABoolean MergeJoin::parentAndChildPartReqsCompatible(
const ReqdPhysicalProperty* const rppForMe) const
{
PartitioningRequirement* partReq = rppForMe->getPartitioningRequirement();
// If there are any parent partitioning requirements, then check them.
if (partReq != NULL)
{
ValueIdSet reqPartKey = partReq->getPartitioningKey();
Lng32 reqPartCount = partReq->getCountOfPartitions();
ValueIdSet joinLeftPartKey(getEquiJoinExprFromChild0());
ValueIdSet joinRightPartKey(getEquiJoinExprFromChild1());
if (partReq->isRequirementFullySpecified())
{
// The parent's required partitioning columns must be a subset
// of the either the left or right child's join columns, if we
// are to run in parallel.
// Contains will always return TRUE if the required part key
// is the empty set.
if (NOT (joinLeftPartKey.contains(reqPartKey) OR
joinRightPartKey.contains(reqPartKey)))
{
// Parent required part key can only be satisfied with a
// single partition partitioning function.
// See if this is allowed by the parent requirement
if (reqPartCount > EXACTLY_ONE_PARTITION)
return FALSE;
}
}
else // fuzzy requirement
{
CMPASSERT(partReq->isRequirementApproximatelyN());
// Did the parent specify a required partitioning key?
if (reqPartKey.entries() > 0)
{ // yes
joinLeftPartKey.intersectSet(reqPartKey);
joinRightPartKey.intersectSet(reqPartKey);
// If the required partitioning key columns and the join cols
// are disjoint, then the requirement must allow a single
// partition part func, because this will be the only way
// to satisfy both the requirement and the join cols.
#pragma nowarn(1506) // warning elimination
if (joinLeftPartKey.isEmpty() AND
joinRightPartKey.isEmpty() AND
(reqPartCount != ANY_NUMBER_OF_PARTITIONS) AND
((reqPartCount -
(reqPartCount * partReq->castToRequireApproximatelyNPartitions()->
getAllowedDeviation())) >
EXACTLY_ONE_PARTITION)
)
return FALSE;
#pragma warn(1506) // warning elimination
}
} // end if fuzzy requirement
} // end if there was a partitioning requirement for the join
return TRUE;
}; // MergeJoin::parentAndChildPartReqsCompatible()
// -----------------------------------------------------------------------
// Given an input ordering and a set of potential merge join predicates,
// generate the new merge join sort orders for both the left and right children.
// The ordering should contain the leading prefix of the provided ordering
// which is referenced by the set of (merge join) predicates.
// -----------------------------------------------------------------------
void MergeJoin::generateSortOrders (const ValueIdList & ordering, /* input */
const ValueIdSet & preds, /* input */
ValueIdList &leftSortOrder, /* output */
ValueIdList &rightSortOrder,/* output */
ValueIdList &orderedMJPreds,/* output */
NABoolean &completelyCovered /* output */
) const
{
NABoolean done = FALSE;
Lng32 i = 0;
ValueId predId, referencedValId;
ValueId leftOrderValId, rightOrderValId;
NABoolean isOrderPreserving;
OrderComparison order1,order2;
while (!done && i < (Lng32)ordering.entries())
{
// Get the value id for the simplified form of the ordering expression.
referencedValId = ordering[i].getItemExpr()->
simplifyOrderExpr(&order1)->getValueId();
// Check whether the ordering expression is covered by one
// of the merge join predicates.
if (preds.referencesTheGivenValue (referencedValId, predId))
{
ItemExpr *predExpr = predId.getItemExpr();
if (predExpr->isAnEquiJoinPredicate(child(0).getGroupAttr(),
child(1).getGroupAttr(),
getGroupAttr(),
leftOrderValId, rightOrderValId,
isOrderPreserving))
{
//
// Fix solu 10-081117-7343 (R2.4 - Error 7000 - GenRelJoin.cpp:
// Merge Join: expression not found).
//
// The root cause of the assertion is that mergeJoin::codeGen()
// is not written to deal with constant in the orderMJPreds.
// Unfortunately that method is not easy to be extended for
// constants, primarily because the constant is not added to
// the maptable until at very end of the method in a codegen call
// to the container expression. But without the generated Attributes
// for the constant, the left dup check expression happening before
// codeGen on the constant can not be generated.
//
// The fix in the optimizer will disable MJ when orderdMJpreds contain
// a constant by clearing the orderJoinPreds produced in this method.
// An empty orderJoinPreds set will force method
// MergeJoin::createContextforChild() to produce no context
// (thus no MJ plan).
ValueIdSet mjPredAsVidSet(predId);
for (Lng32 childIdx = 0; childIdx<predExpr->getArity(); childIdx++) {
mjPredAsVidSet.insert(predExpr->child(childIdx)->
castToItemExpr()->getValueId());
}
ItemExpr* constant = NULL;
if (mjPredAsVidSet.referencesAConstValue(&constant) == TRUE)
{
leftSortOrder.clear();
rightSortOrder.clear();
orderedMJPreds.clear();
break;
}
// Determine if the left side of the equijoin pred is an
// expression that results in an inverse order.
const ValueId & simpLeftOrderValId =
leftOrderValId.getItemExpr()->simplifyOrderExpr(&order2)->
getValueId();
// if the order expression and the left side equijoin expression
// do not have the same order, then generate an INVERSE
// expression on top of the left side equijoin expression.
if (order1 != order2)
{
InverseOrder * leftInverseExpr =
new(CmpCommon::statementHeap())
InverseOrder (leftOrderValId.getItemExpr());
leftInverseExpr->synthTypeAndValueId();
leftOrderValId = leftInverseExpr->getValueId();
}
// Determine if the right side of the equijoin pred is an
// expression that results in an inverse order.
const ValueId & simpRightOrderValId =
rightOrderValId.getItemExpr()->simplifyOrderExpr(&order2)->
getValueId();
// if the order expression and the right side equijoin expression
// do not have the same order, then generate an INVERSE
// expression on top of the right side equijoin expression.
if (order1 != order2)
{
InverseOrder * rightInverseExpr =
new(CmpCommon::statementHeap())
InverseOrder (rightOrderValId.getItemExpr());
rightInverseExpr->synthTypeAndValueId();
rightOrderValId = rightInverseExpr->getValueId();
}
// Prevent duplicate predicates; this is a correctness issue, but
// more importantly helps to avoid assertions in pre-CodeGen.
// The only way we could try to insert the same equijoin pred
// into the orderedMJPreds would be if there was a duplicate
// expression in the input ordering - this should not happen,
// but we will check just to be sure.
if ( NOT orderedMJPreds.contains(predId) )
{
leftSortOrder.insert (leftOrderValId);
rightSortOrder.insert (rightOrderValId);
orderedMJPreds.insert (predId);
}
}
else
// These preds should be equi-join predicates.
ABORT ("Internal Error in MergeJoin::generateSortOrder.");
}
else
done = TRUE;
i++;
}
// Were all columns of the provided order covered by join predicates?
if (i == (Lng32)ordering.entries())
completelyCovered = TRUE;
else
completelyCovered = FALSE;
} // MergeJoin::generateSortOrders()
// -----------------------------------------------------------------------
// Generate an arrangement requirement. Used only for creating an
// arrangement requirement for the right child when we try the right
// child first.
// -----------------------------------------------------------------------
void MergeJoin::genRightChildArrangementReq(
const ReqdPhysicalProperty* const rppForMe,
RequirementGenerator &rg) const
{
ValueIdSet rightJoinColumns;
rightJoinColumns.insertList(getEquiJoinExprFromChild1());
ValueIdList rightChildOrder;
NABoolean reqOrderExists = FALSE;
NABoolean reqOrderCompletelyCovered = FALSE;
NABoolean reqArrangementExists = FALSE;
if (rppForMe->getSortKey() AND
(rppForMe->getSortKey()->entries() > 0))
{
reqOrderExists = TRUE;
}
if (rppForMe->getArrangedCols() AND
(rppForMe->getArrangedCols()->entries() > 0))
{
reqArrangementExists = TRUE;
}
// If there is a required order, we need to make sure that for the
// left child join columns that are a prefix of the req. order cols,
// we require the corresponding right child join columns to be in
// that order. If all of the required order cols are covered by
// join columns, we can add the join columns as a required
// arrangement. This will allow any join predicates for the excess
// join columns to be used as merge join predicates.
if (reqOrderExists)
{
ValueIdList leftChildOrder;
ValueIdList orderedMJPreds;
generateSortOrders (*(rppForMe->getSortKey()),
getEquiJoinPredicates(),
leftChildOrder,
rightChildOrder,
orderedMJPreds,
reqOrderCompletelyCovered);
if (orderedMJPreds.entries() > 0)
{
// At least one of the left child merge join columns was compatible
// with the required sort order columns. This should always be
// true, because if it wasn't we should have given up when
// generating the first child context.
// Add the right child join columns whose left child equivalents
// were a prefix of the required order columns as a required
// sort order.
// Example: Req. order: ABC Join Columns: ACDE
// Child req. to generate: Ordered on A
rg.addSortKey(rightChildOrder);
// If all of the required order columns were covered, we can
// add the join columns as an arrangement, so we
// can use any join columns that were not part of
// the required order as merge join predicates. But, if there
// is a required arrangement, defer doing this until we check
// the join columns against the required arrangement.
// Example: Req. order: C Join Columns: ABCD
// Child req. to generate: Ordered on C, Arranged on ABCD
if (reqOrderCompletelyCovered AND NOT reqArrangementExists)
rg.addArrangement(rightJoinColumns);
}
}
// If there is a required arrangement, we must make sure that the
// join columm arrangement we ask for will be able to satisfy the
// required arrangement. We can only add a join column arrangement
// requirement if there was no required order or there was one but
// the join columns completely covered the required order columns.
if (reqArrangementExists AND
((NOT reqOrderExists) OR
(reqOrderExists AND reqOrderCompletelyCovered)))
{
// To ensure that the join column arrangement requirement is compatible
// with the required arrangement, we may have to set up a required
// order for some of the join columns in addition to a required
// arrangement, or we may have to remove some of the join
// columns from the required arrangement. Note that for any
// columns that are removed, they will not be able to be used
// as merge join predicates.
ValueIdSet leftJoinColumns;
ValueIdList rightChildOrder;
ValueIdSet reqArrangement(*(rppForMe->getArrangedCols()));
ValueIdSet simpleLeftJoinColumns;
ValueIdSet simpleReqArrangement;
ValueId vid,svid;
ValueId lvid,rvid;
ValueIdMap map(getEquiJoinExpressions());
leftJoinColumns.insertList(getEquiJoinExprFromChild0());
// First, build the simplified versions of the left join predicate
// columns and the required arrangement columns.
for (vid = leftJoinColumns.init();
leftJoinColumns.next(vid);
leftJoinColumns.advance(vid))
simpleLeftJoinColumns +=
vid.getItemExpr()->simplifyOrderExpr()->getValueId();
for (vid = reqArrangement.init();
reqArrangement.next(vid);
reqArrangement.advance(vid))
simpleReqArrangement +=
vid.getItemExpr()->simplifyOrderExpr()->getValueId();
if (simpleReqArrangement.contains(simpleLeftJoinColumns))
// The left child join columns are a subset of the required
// arrangement columns. Nothing special to do - just add
// the right child join cols as the arrangement requirement.
// Example: Req. arrangement: BCD Join Columns: CD
// Child req. to generate: Arranged on CD
rg.addArrangement(rightJoinColumns);
else if (simpleLeftJoinColumns.contains(simpleReqArrangement))
{
// The required arrangement columns are a subset of the left
// child join columns.
// Example: Req. arrangement: BC Join Columns: ABCD
// Child req. to generate: Ordered on BC, Arranged on ABCD
// Determine which left child join columns are also in the
// the required arrangement.
simpleLeftJoinColumns.intersectSet(simpleReqArrangement);
// Set up a required order consisting of the right child
// join columns whose left child equivalents were in the
// required arrangement. Note that if there was a required
// order from the parent, then rightChildOrder will already
// contain some columns. We may end up adding the same columns
// again to rightChildOrder. Not to worry, the requirement
// generator will eliminate the duplicates before processing
// it in the addSortKey method.
// Example: Req. order: C Req. arrangement: BC Join Columns: ABCD
// Child req. to generate: Ordered by CBC (CB), Arranged on ABCD
for (lvid = leftJoinColumns.init();
leftJoinColumns.next(lvid);
leftJoinColumns.advance(lvid))
{
svid = lvid.getItemExpr()->simplifyOrderExpr()->getValueId();
if (simpleLeftJoinColumns.contains(svid))
{
map.mapValueIdDown(lvid,rvid);
rightChildOrder.insert(rvid);
}
}
rg.addSortKey(rightChildOrder);
rg.addArrangement(rightJoinColumns);
}
else // neither is a subset or equal to the other
// Example: Req. arrangement: ABC Join Columns: BD
// Child req. to generate: Arranged on B
{
// Determine which left child join columns are also in the
// the required arrangement. Note that the resultant set
// cannot be empty. This is because at least one of the join
// columns must be compatible with the required arrangement,
// or we would have given up when generating the first child context.
simpleLeftJoinColumns.intersectSet(simpleReqArrangement);
// Remove the right child join columns whose left child
// equivalents are not in the required arrangement.
for (lvid = leftJoinColumns.init();
leftJoinColumns.next(lvid);
leftJoinColumns.advance(lvid))
{
svid = lvid.getItemExpr()->simplifyOrderExpr()->getValueId();
if (NOT simpleLeftJoinColumns.contains(svid))
{
map.mapValueIdDown(lvid,rvid);
rightJoinColumns -= rvid;
}
}
// Add the remaining right child join columns as the child
// arrangement requirement.
rg.addArrangement(rightJoinColumns);
} // end if neither is a subset of the other
} // end if a required arrangement
// If there is no required order and no required arrangement,
// then we can just add the join columns as a required arrangement.
if (NOT reqOrderExists AND
NOT reqArrangementExists)
{
rg.addArrangement(rightJoinColumns);
}
} // MergeJoin::genRightChildArrangementReq()
//<pb>
// -----------------------------------------------------------------------
// MergeJoin::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
MergeJoin::costMethod() const
{
static THREAD_P CostMethodMergeJoin *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodMergeJoin();
return m;
} // MergeJoin::costMethod()
//<pb>
Context* MergeJoin::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// Merge Join generates at most 2 context pairs. The first is
// either a non-parallel plan or a matching partitions parallel plan,
// where we generate the left child context first. The second pair is
// either a non-parallel plan or a matching partitions parallel plan,
// where we generate the right child context first.
// The reason we try matching partitions plans both ways is to try
// and avoid having to repartition both tables. If we only try one
// way and the first table must repartition, then the second table
// must repartition if it is going to be able to match the hash
// repartitioning function that the first table synthesized. If we
// were to try the other way and the first table this time did not
// need to repartition, then we would only have to range repartition
// the second table.
// The reason we try non-parallel plans both ways is because the
// first child tried only has to match an arrangement of the merge
// join columns, but the second child must match an order of the
// merge join columns. This might force us to sort the second child
// since it is harder to match an order than an arrangement.
// The second child might be large and thus more expensive to
// sort then the first child, so we might want to try it both ways.
// ---------------------------------------------------------------------
Context* result = NULL;
Lng32 planNumber;
Context* childContext = NULL;
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
const ReqdPhysicalProperty* rppForChild = NULL;
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
ValueIdSet equiJoinPreds = getEquiJoinPredicates();
// If either child of the merge join has a constraint that limits
// the number of rows it can produce to one or less, than merge
// join is not a good plan, so just give up now.
GroupAttributes *child0GA = child(0).getGroupAttr();
GroupAttributes *child1GA = child(1).getGroupAttr();
if ((child0GA != NULL AND (child0GA->getMaxNumOfRows() <= 1)) OR
(child1GA != NULL AND (child1GA->getMaxNumOfRows() <= 1)))
return NULL;;
// ---------------------------------------------------------------------
// Compute the number of child plans to consider.
// ---------------------------------------------------------------------
Lng32 childPlansToConsider = 4;
CollIndex numJoinCols = getEquiJoinExprFromChild0().entries();
NABoolean mustTryBothChildrenFirst = TRUE;
// Do we need to generate two different plans where we alternate
// which child to try first, in order to guarantee that we only
// sort if absolutely necessary and if we do sort the smallest
// child? The crux of the matter is that it is easier to satisfy
// an arrangement than a sort order, and whoever goes first only
// has to satisfy an arrangement of the join columns, not a sort
// order of the join columns. But, we can get away with trying
// just one of the plans if there already is a required sort order
// whose number of entries is greater than or equal to the number
// of join columns, because then the join columns are restricted
// to be in this order and so it does not matter who we try first -
// they both must satisfy a sort order of the join columns instead
// of an arrangement. We can also get away with only trying one of
// the plans if there is only one join column, because then it costs
// the same to satisfy an arrangement of the one column as a sort
// order of the one column.
if (((rppForMe->getSortKey() != NULL) AND
(rppForMe->getSortKey()->entries() >= numJoinCols)) OR
(numJoinCols == 1))
mustTryBothChildrenFirst = FALSE;
// If we don't need to try two plans (four child plans) for sort
// purposes and we don't need to try two for parallel purposes,
// then indicate we will only try one plan (two child plans).
if (NOT mustTryBothChildrenFirst AND
((rppForMe->getCountOfPipelines() == 1) OR
((partReqForMe != NULL) AND
partReqForMe->isRequirementFullySpecified())
)
)
childPlansToConsider = 2;
// ---------------------------------------------------------------------
// The creation of the next Context for a child depends upon the
// the number of child Contexts that have been created in earlier
// invocations of this method.
// ---------------------------------------------------------------------
while ((pws->getCountOfChildContexts() < childPlansToConsider) AND
(rppForChild == NULL))
{
// If we stay in this loop because we didn't generate some
// child contexts, we need to reset the child plan count when
// it gets to be as large the arity, because otherwise we
// would advance the child plan count past the arity.
if (pws->getPlanChildCount() >= getArity())
pws->resetPlanChildCount();
planNumber = pws->getCountOfChildContexts() / 2;
switch (pws->getCountOfChildContexts())
{
case 0:
childIndex = 0;
break;
case 1:
childIndex = 1;
break;
case 2:
childIndex = 1;
break;
case 3:
childIndex = 0;
break;
}
// -------------------------------------------------------------------
// Create the 1st Context for left child:
// -------------------------------------------------------------------
if ((pws->getCountOfChildContexts() == 0) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
ValueIdSet joinColumns;
RequirementGenerator rg(child(0),rppForMe);
// Convert equijoin columns from a list to a set.
joinColumns.insertList(getEquiJoinExprFromChild0());
// ---------------------------------------------------------------
// If this is an equijoin, create a partitioning requirement,
// which uses the expressions from the left child that are
// referenced in the equijoin predicate, as partitioning keys.
// ---------------------------------------------------------------
rg.addPartitioningKey(joinColumns);
// Optimize the left child with required arrangement = the
// joining columns. If my Context has a required order or
// arrangement, then potentially modify the join columns
// to be something that is compatible with the existing
// required order or arrangement.
if (myContext->requiresOrder())
{
rg.makeArrangementFeasible(joinColumns);
if (joinColumns.isEmpty())
// Impossible to satisfy the parent order or arrangement
// and the merge join arrangement at the same time.
// Give up now.
return NULL;
}
rg.addArrangement(joinColumns);
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if 1st child context
// -------------------------------------------------------------------
// Create 1st Context for right child:
// Any required order matches the order that is produced by the
// optimal solution for my left child.
//
// NOTE: We assume that order of the rows that are produced by
// the merge join is only affected by its left child.
// So, the input for the right child has no dependency
// on the required order that is specified in my myContext.
// -------------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 1) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
ValueIdList leftChildOrder;
ValueIdList rightChildOrder;
ValueIdList orderedMJPreds;
NABoolean completelyCovered = FALSE;
// ---------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// ---------------------------------------------------------------
if (NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,0);
// ---------------------------------------------------------------
// If the Context that was created for the left child has an
// optimal solution whose cost is within the specified cost
// limit, create the corresponding Context for the right child.
// ---------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
NABoolean rewriteForChild0 = FALSE;
ValueIdMap map(getEquiJoinExpressions());
PartitioningFunction* childPartFuncRewritten =
childPartFunc->copyAndRemap(map,
rewriteForChild0);
PartitioningRequirement* partReqForChild =
childPartFuncRewritten->makePartitioningRequirement();
RequirementGenerator rg (child(1),rppForMe);
// Remove any parent requirements for the sort key or arrangement,
// since they do not need to be satisfied by the
// right child of a join (we only insist that the left child
// satisfy these requirements).
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
}
// Remove any parent partitioning requirements, since we have
// already enforced this on the left child.
rg.removeAllPartitioningRequirements();
// Now, add in the Join's partitioning requirement for the
// left child.
rg.addPartRequirement(partReqForChild);
// -----------------------------------------------------------
// Use the sort key of the solution for the left child for
// creating a required order, which is expressed in terms of
// the corresponding columns from the right child that appear
// in the merge join predicates.
// -----------------------------------------------------------
generateSortOrders (sppForChild->getSortKey(),
equiJoinPreds,
leftChildOrder,
rightChildOrder,
orderedMJPreds,
completelyCovered);
if (orderedMJPreds.entries() > 0)
{
// At least one merge join predicate on the sort key
// columns was found. This will always be true at this
// point, unless there was a constraint that limited
// the number of left child rows to at most one. If this
// occurs, the satisfied method will allow a sort key
// that does not satisfy the arrangement requirement that
// merge join generated, and so the sort key may not cover
// any of the equijoin predicates. We could allow a merge
// join plan in this case, but if the other child only
// returns one row then merge join is not a good idea anyway.
// Note that in this case we should have given up on generating
// any child contexts in the beginning of this method.
// Add the ValueIdList returned in "rightChildOrder" as
// the required sort order for the right child
rg.addSortKey(rightChildOrder);
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
// Remember the merge join predicates in the RelExpr
setOrderedMJPreds (orderedMJPreds);
setLeftSortOrder (leftChildOrder);
setRightSortOrder (rightChildOrder);
// Need to save any equijoin predicates that were not
// used for orderedMJPreds. Store them in selectionPred(),
// if not for an outer join or a semijoin, or in
// joinPred(), otherwise.
if (orderedMJPreds.entries() != equiJoinPreds.entries())
{
ValueIdSet leftOverEquiJoinPreds = equiJoinPreds;
ValueIdSet ordMJPreds(orderedMJPreds);
leftOverEquiJoinPreds -= ordMJPreds;
// NB: to preserve the separation of equi- and nonequi- join
// predicates (see Join::separateEquiAndNonEquiJoinPrediates),
// we must not only save leftOverEquiJoinPreds into but also
// remove orderedMJPreds from joinPred or selectionPred.
// Otherwise, duplicate equijoinpredicates can cause
// MergeJoin::preCodeGen to GenAssert(!mjp.isEmpty()) because
// mjp.replaceVEGExpressions refuses to replace VEGExpr that
// it has already replaced.
if (isInnerNonSemiJoin())
{
selectionPred() -= ordMJPreds;
selectionPred() += leftOverEquiJoinPreds;
}
else
{
joinPred() -= ordMJPreds;
joinPred() += leftOverEquiJoinPreds;
}
}
} // end if merge join and parent requirements are compatible
} // end if merge join predicates were found
} // endif previous Context has an optimal solution
} // end if 2nd child context
// -------------------------------------------------------------------
// Create 2nd Context for the right child:
// The Context for the right child contains
// required arrangement = joining columns.
// -------------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 2) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
ValueIdSet joinColumns;
RequirementGenerator rg(child(1),rppForMe);
// Remove any parent requirements for the sort key or arrangement,
// since they do not need to be satisfied by the
// right child of a join (we only insist that the left child
// satisfy these requirements).
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
}
// Generate the arrangement requirement for the right child.
// This needs a special method because it is very complicated
// when we process the right child first. This is because we
// must take the required order and/or arrangement into account
// without actually enforcing it.
genRightChildArrangementReq(rppForMe, rg);
// We must insist that the right child match the parent partitioning
// requirements, because we are dealing with the right child first.
// The right child will satisfy the parent somehow (if possible) and
// the requirement we get from the right child will then be given to
// the left child, and so the parent requirement will be enforced
// on the left child in this way.
// So, we don't remove the parent requirements.
joinColumns.insertList(getEquiJoinExprFromChild1());
rg.addPartitioningKey(joinColumns);
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if 3rd child context
// -------------------------------------------------------------------
// Create 2nd Context for the left child:
// Force the left child to have the same order as the
// right child. If there is a required order in my
// myContext, ensure that it is satisfied.
// -------------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 3) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
ValueIdList leftChildOrder;
ValueIdList rightChildOrder;
ValueIdList orderedMJPreds;
NABoolean completelyCovered = FALSE;
// ---------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// ---------------------------------------------------------------
if (NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(1,1);
// ---------------------------------------------------------------
// If the Context that was created for the right child has an
// optimal solution whose cost is within the specified cost
// limit, create the corresponding Context for the left child.
// ---------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
NABoolean rewriteForChild0 = TRUE;
ValueIdMap map(getEquiJoinExpressions());
PartitioningFunction* childPartFuncRewritten =
childPartFunc->copyAndRemap(map,
rewriteForChild0);
PartitioningRequirement* partReqForChild =
childPartFuncRewritten->makePartitioningRequirement();
RequirementGenerator rg (child(0),rppForMe);
// We have already applied the parent partitioning requirements
// to the right child, so no need to apply them to the left child.
rg.removeAllPartitioningRequirements();
// Now, add in the Join's partitioning requirement for the
// right child.
rg.addPartRequirement(partReqForChild);
// -----------------------------------------------------------
// Use the sort key of the solution for the right child for
// creating a required order, which is expressed in terms of
// the corresponding columns from the left child that appear
// in the merge join predicates.
// -----------------------------------------------------------
generateSortOrders (sppForChild->getSortKey(),
equiJoinPreds,
leftChildOrder,
rightChildOrder,
orderedMJPreds,
completelyCovered);
if (orderedMJPreds.entries() > 0)
{
// At least one merge join predicate on the sort key
// columns was found. This will always be true at this
// point, unless there was a constraint that limited
// the number of right child rows to at most one. If this
// occurs, the satisfied method will allow a sort key
// that does not satisfy the arrangement requirement that
// merge join generated, and so the sort key may not cover
// any of the equijoin predicates. We could allow a merge
// join plan in this case, but if the other child only
// returns one row then merge join is not a good idea anyway.
// Note that in this case we should have given up on generating
// any child contexts in the beginning of this method.
// If there was a required order or arrangement, then it
// is possible that the synthesized sort key we got back
// contained some right child join columns whose left child
// equivalents are not compatible with the required order
// or arrangement. If so, we must remove any of these
// join columns from the required order that we are going
// to ask for. Note that there must be at least one
// compatible join column or else we would have given up
// when generating the first context.
// Example: Required order: ABC Join cols: ABD
// Child requirement generated for right child: ordered by AB
// Right child synthesized sort key cols: ABD
// D is not compatible and so must not be used.
ValueIdList feasibleLeftChildOrder(leftChildOrder);
if (myContext->requiresOrder())
rg.makeSortKeyFeasible(feasibleLeftChildOrder);
if (feasibleLeftChildOrder.isEmpty() &&
CmpCommon::getDefault(COMP_BOOL_84) == DF_OFF)
return NULL;
// If we did drop some join columns, then we must regenerate
// "orderedMJPreds", "leftChildOrder", "rightChildOrder"
if (feasibleLeftChildOrder.entries() != leftChildOrder.entries())
{
leftChildOrder.clear();
rightChildOrder.clear();
orderedMJPreds.clear();
generateSortOrders (feasibleLeftChildOrder,
equiJoinPreds,
leftChildOrder,
rightChildOrder,
orderedMJPreds,
completelyCovered);
CMPASSERT(orderedMJPreds.entries() > 0);
}
// Add the ValueIdList returned in "leftChildOrder" as
// the required sort order for the left child
rg.addSortKey(leftChildOrder);
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
// Remember the merge join predicates in the RelExpr
setCandidateMJPreds (orderedMJPreds);
setCandidateLeftSortOrder (leftChildOrder);
setCandidateRightSortOrder (rightChildOrder);
// Need to save any equijoin predicates that were not
// used for orderedMJPreds. Store them in selectionPred(),
// if not for an outer join or a semijoin, or in
// joinPred(), otherwise.
if (orderedMJPreds.entries() != equiJoinPreds.entries())
{
ValueIdSet leftOverEquiJoinPreds = equiJoinPreds;
ValueIdSet ordMJPreds(orderedMJPreds);
leftOverEquiJoinPreds -= ordMJPreds;
// NB: to preserve the separation of equi- and nonequi- join
// predicates (see Join::separateEquiAndNonEquiJoinPrediates),
// we must not only save leftOverEquiJoinPreds into but also
// remove orderedMJPreds from joinPred or selectionPred.
// Otherwise, duplicate equijoin predicates can cause
// MergeJoin::preCodeGen to GenAssert(!mjp.isEmpty()) because
// mjp.replaceVEGExpressions refuses to replace VEGExpr that
// it has already replaced.
if (isInnerNonSemiJoin())
{
selectionPred() -= ordMJPreds;
selectionPred() += leftOverEquiJoinPreds;
}
else
{
joinPred() -= ordMJPreds;
joinPred() += leftOverEquiJoinPreds;
}
}
} // end if merge join and parent requirements are compatible
} // end if merge join predicates were found
} // endif previous Context has an optimal solution
} // end if 4th child context
// -------------------------------------------------------------------
// Create a Context using the partitioning requirement that was
// generated for the current plan.
// -------------------------------------------------------------------
if (rppForChild != NULL)
{
// ---------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext,pws);
// ---------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which
// the child belongs that requires the same properties as those
// in rppForChild. Reuse it, if found. Otherwise, create a new
// Context that contains rppForChild as the required physical
// properties.
// ----------------------------------------------------------------
result = shareContext(childIndex, rppForChild,
myContext->getInputPhysicalProperty(),
costLimit,
myContext, myContext->getInputLogProp());
if ( NOT (pws->isLatestContextWithinCostLimit() OR
result->hasSolution() )
)
result = NULL;
if(CURRSTMT_OPTDEFAULTS->isMergeJoinControlEnabled() AND result)
{
result->ignoredRules() = *(GlobalRuleSet->applicableRules());
result->ignoredRules() -= SortEnforcerRuleNumber;
}
}
else
result = NULL;
// -------------------------------------------------------------------
// Remember the cases for which a Context could not be generated,
// or store the context that was generated.
// -------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
pws->incPlanChildCount();
if ( CURRSTMT_OPTDEFAULTS->optimizerPruning() AND
( pws->getPlanChildCount() == getArity() ) AND
CURRSTMT_OPTDEFAULTS->OPHexitMJcrContChiLoop()
)
{
pws->resetAllChildrenContextsConsidered();
break;
}
} // end while loop
if ( pws->getCountOfChildContexts() == childPlansToConsider )
pws->setAllChildrenContextsConsidered();
return result;
} // MergeJoin::createContextForAChild()
//<pb>
NABoolean MergeJoin::findOptimalSolution(Context* myContext,
PlanWorkSpace* pws)
{
NABoolean hasOptSol;
// Plan # is only an output param, initialize it to an impossible value.
Lng32 planNumber = -1;
hasOptSol = pws->findOptimalSolution(planNumber);
// Reset data members to reflect that of plan 1 (not plan 0) when picked.
if(planNumber == 1)
{
setOrderedMJPreds(getCandidateMJPreds());
setLeftSortOrder(getCandidateLeftSortOrder());
setRightSortOrder(getCandidateRightSortOrder());
}
return hasOptSol;
} // MergeJoin::findOptimalSolution()
NABoolean MergeJoin::currentPlanIsAcceptable(Lng32 planNo,
const ReqdPhysicalProperty* const rppForMe) const
{
// This is probably not the best place to check it. It should work
// temporarily. We are trying to force potentially hanging plan
// to fail. See solution 10-051219-3501.
if ( deadLockIsPossible_)
return FALSE;
// ---------------------------------------------------------------------
// Check whether the user wants to enforce a particular plan type.
// ---------------------------------------------------------------------
// If nothing is being forced, return TRUE now.
if (rppForMe->getMustMatch() == NULL)
return TRUE;
// Check for the correct forced plan type.
JoinForceWildCard::forcedPlanEnum forcePlanToken =
getParallelJoinPlanToEnforce(rppForMe);
switch (forcePlanToken)
{
case JoinForceWildCard::FORCED_PLAN0:
if (planNo != 0)
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN1:
if (planNo != 1)
return FALSE;
break;
case JoinForceWildCard::FORCED_TYPE1:
// All Merge Joins are Type 1 - break out so we'll return TRUE
break;
case JoinForceWildCard::ANY_PLAN:
// Any plan satisfies this - break out so we'll return TRUE
break;
default:
return FALSE; // must be some option that Merge Join doesn't support
}
// If we get here, the plan must have passed all the checks.
return TRUE;
} // MergeJoin::currentPlanIsAcceptable()
//<pb>
//==============================================================================
// Synthesize physical properties for merge join operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used for
// synthesizing partitioning functions.
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
MergeJoin::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty* const sppOfLeftChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
const PhysicalProperty* const sppOfRightChild =
myContext->getPhysicalPropertyOfSolutionForChild(1);
// This is to prevent possible deadlock for parallel merge join.
// If both children got repartitioned and didn't use Sort then
// force this plan to fail. See solution 10-051219-3501
if ( (CmpCommon::getDefault(MERGE_JOIN_WITH_POSSIBLE_DEADLOCK) == DF_OFF) AND
(sppOfLeftChild->getSortOrderType() == ESP_NO_SORT_SOT) AND
(sppOfLeftChild->getDataSourceEnum() == SOURCE_ESP_DEPENDENT) AND
(sppOfRightChild->getSortOrderType() == ESP_NO_SORT_SOT) AND
(sppOfRightChild->getDataSourceEnum() == SOURCE_ESP_DEPENDENT)
)
{
deadLockIsPossible_ = TRUE;
}
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
// ----------------------------------------------------------------
// Synthesize the partitioning function from the first child of the
// winning plan. Merge Joins have two potential plans--see member
// function MergeJoin::createContextForAChild(). In plan 0, the
// left child is first; in plan 1, the right child is first.
// ----------------------------------------------------------------
CMPASSERT(planNumber == 0 || planNumber == 1);
PartitioningFunction* myPartFunc;
const SearchKey* myPartSearchKey;
if (planNumber == 0)
{
myPartFunc = sppOfLeftChild->getPartitioningFunction();
myPartSearchKey = sppOfLeftChild->getPartSearchKey();
}
else
{
myPartFunc = sppOfRightChild->getPartitioningFunction();
myPartSearchKey = sppOfRightChild->getPartSearchKey();
}
// ---------------------------------------------------------------------
// Parallel merge join plans are set up such that
// they maintain the partitioning of the left child table.
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
sppOfLeftChild->getSortKey(),
sppOfLeftChild->getSortOrderType(),
sppOfLeftChild->getDp2SortOrderPartFunc(),
myPartFunc,
sppOfLeftChild->getPlanExecutionLocation(),//||opt synth from both kids
combineDataSources(
sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfLeftChild->getIndexDesc(),
myPartSearchKey);
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // MergeJoin::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// member functions for class HashJoin
// -----------------------------------------------------------------------
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from the
// hash join to the designated child.
// ---------------------------------------------------------------------
PartitioningFunction* HashJoin::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
ValueIdMap map(getEquiJoinExpressions());
PartitioningFunction* newPartFunc =
partFunc->copyAndRemap(map,rewriteForChild0);
SkewedDataPartitioningFunction* oldSKpf = NULL;
SkewedDataPartitioningFunction* newSKpf = NULL;
if ( rewriteForChild0 == FALSE /* map for child 1 */ AND
(oldSKpf=(SkewedDataPartitioningFunction*)
(partFunc->castToSkewedDataPartitioningFunction())) AND
(newSKpf=(SkewedDataPartitioningFunction*)
(newPartFunc->castToSkewedDataPartitioningFunction()))
)
{
if ( oldSKpf->getSkewProperty().isUniformDistributed() ) {
skewProperty newSK(oldSKpf->getSkewProperty());
newSK.setIndicator(skewProperty::BROADCAST);
newSKpf->setSkewProperty(newSK);
}
} else {
// map for child 0. Do nothing
}
return newPartFunc;
} // end HashJoin::mapPartitioningFunction()
NABoolean HashJoin::isBigMemoryOperator(const PlanWorkSpace* pws,
const Lng32 planNumber)
{
double dummy;
return isBigMemoryOperatorSetRatio( pws->getContext(), planNumber, dummy);
}
NABoolean HashJoin::isBigMemoryOperatorSetRatio(const Context* context,
const Lng32 planNumber,
double & ratio)
{
double memoryLimitPerCPU = CURRSTMT_OPTDEFAULTS->getMemoryLimitPerCPU();
// ---------------------------------------------------------------------
// Not given any memory constraints, the HJ would like to have enough
// memory to hold the whole inner table and auxiliary hash structures.
// Each row is stored with its hash key and a chain pointer (8 bytes in
// total).
// ---------------------------------------------------------------------
const ReqdPhysicalProperty* rppForMe = context->getReqdPhysicalProperty();
// Start off assuming that the operator will use all available CPUs.
Lng32 cpuCount = rppForMe->getCountOfAvailableCPUs();
PartitioningRequirement* partReq = rppForMe->getPartitioningRequirement();
// This check to ensure that a plan exists before calling getPhysProp <oa>
PhysicalProperty* spp = NULL;
if ( context->getPlan())
spp = context->getPlan()->getPhysicalProperty();
Lng32 numOfStreams;
// If the physical properties are available, then this means we
// are on the way back up the tree. Get the actual level of
// parallelism from the spp to determine if the number of cpus we
// are using are less than the maximum number available.
if (spp != NULL)
{
PartitioningFunction* partFunc = spp->getPartitioningFunction();
numOfStreams = partFunc->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
else
if ((partReq != NULL) AND
(partReq->getCountOfPartitions() != ANY_NUMBER_OF_PARTITIONS))
{
// If there is a partitioning requirement, then this may limit
// the number of CPUs that can be used.
numOfStreams = partReq->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
EstLogPropSharedPtr inLogProp = context->getInputLogProp();
const double probeCount =
MAXOF(1.,inLogProp->getResultCardinality().value());
const double innerRowCount =
child(1).outputLogProp(inLogProp)->getResultCardinality().value();
double rowsPerCpu;
if (planNumber != 0) // Type-1 Hash Join?
{
rowsPerCpu = MAXOF(1.,(innerRowCount / cpuCount));
}
else // Type-2 Hash Join
{
// Each ESP must build a hash table of all the rows.
rowsPerCpu = MAXOF(1.,innerRowCount);
}
const double rowsPerCpuPerProbe = MAXOF(1.,(rowsPerCpu / probeCount));
const Lng32 innerRowLength =
child(1).getGroupAttr()->getCharacteristicOutputs().getRowLength();
const Lng32 extInnerRowLength = innerRowLength + 8;
const double fileSizePerCpu =
((rowsPerCpuPerProbe * extInnerRowLength) / 1024.);
if (spp != NULL &&
CmpCommon::getDefault(COMP_BOOL_51) == DF_ON
)
{
CurrentFragmentBigMemoryProperty * bigMemoryProperty =
new (CmpCommon::statementHeap())
CurrentFragmentBigMemoryProperty();
spp->setBigMemoryEstimationProperty(bigMemoryProperty);
bigMemoryProperty->setCurrentFileSize(fileSizePerCpu);
bigMemoryProperty->setOperatorType(getOperatorType());
for (Int32 i=0; i<=getArity(); i++)
{
const PhysicalProperty *childSpp =
context->getPhysicalPropertyOfSolutionForChild(i);
if (childSpp != NULL)
{
CurrentFragmentBigMemoryProperty * memProp =
(CurrentFragmentBigMemoryProperty *)
((PhysicalProperty *)childSpp)->getBigMemoryEstimationProperty();
if (memProp != NULL)
{
double childCumulativeMemSize = memProp->getCumulativeFileSize();
bigMemoryProperty->incrementCumulativeMemSize(childCumulativeMemSize);
memoryLimitPerCPU -= childCumulativeMemSize;
}
}
}
}
if (memoryLimitPerCPU < 1)
memoryLimitPerCPU =1;
ratio = fileSizePerCpu/memoryLimitPerCPU;
return (fileSizePerCpu >= memoryLimitPerCPU);
}
// <pb>
NABoolean
HashJoin::isSkewBusterFeasible(SkewedValueList** skList, Lng32 countOfPipelines,
ValueId& vidOfEquiPredWithSkew)
{
CMPASSERT(skList != NULL);
double threshold =
(ActiveSchemaDB()->getDefaults().getAsDouble(SKEW_SENSITIVITY_THRESHOLD)) / countOfPipelines;
// Disable skew buster if the threshold is less than 0. A value of -1 is set
if ( threshold < 0 )
return FALSE;
// skew buster is not allowed for Full Outer Join, since it involves
// broadcasting the inner rows.
if (isFullOuterJoin())
return FALSE;
ValueIdSet joinPreds = getEquiJoinPredicates();
if ( joinPreds.entries() > 1 ) {
double mc_threshold =
(ActiveSchemaDB()->getDefaults().getAsDouble(MC_SKEW_SENSITIVITY_THRESHOLD))
/ countOfPipelines ;
if ( mc_threshold < 0 )
return FALSE;
if ( !multiColumnjoinPredOKforSB(joinPreds) )
return FALSE;
vidOfEquiPredWithSkew = NULL_VALUE_ID;
// First try to detect the true MC skews directly.
NABoolean ok = CmpCommon::getDefault(HJ_NEW_MCSB_PLAN) == DF_ON &&
childNodeContainMultiColumnSkew(0, joinPreds, mc_threshold,
countOfPipelines, skList);
if ( ok )
return TRUE;
else
// If fail, we go the old way of guessing the MC skews from the SC
// skew exist at the participating columns of the multi-column join
// predicate.
return childNodeContainMultiColumnSkew(0, joinPreds, mc_threshold,
threshold, countOfPipelines,
skList, vidOfEquiPredWithSkew);
}
// single column SB
return ( singleColumnjoinPredOKforSB(joinPreds) &&
childNodeContainSkew(0, joinPreds, threshold, skList)
);
}
void HashJoin::addNullToSkewedList(SkewedValueList** skList)
{
DCMPASSERT(skList != NULL);
//if not a NOTIN subquery return
if(!getIsNotInSubqTransform())
{
return;
}
ValueIdList child0ColList = getEquiJoinExprFromChild0();
// get the first column id and use it as skew list identifier
// same as in skew buster
ValueId colVid = child0ColList[0];
// Create a new skew value list for not in subq if not already created
// by skewbuster routines
if (*skList == NULL)
{
*skList = new (STMTHEAP) SkewedValueList(colVid.getType().newCopy(STMTHEAP), STMTHEAP, 1);
}
if (!(*skList)->getIsNullInList())
{
// add null value to the list
EncodedValue encNullValue;
encNullValue.setValueToNull();
(*skList)->insertInOrder(encNullValue);
(*skList)->setIsNullInList(TRUE);
}
} // HashJoin::addNullToSkewedList
// -----------------------------------------------------------------------
// HashJoin::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
HashJoin::costMethod() const
{
static THREAD_P CostMethodHashJoin *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodHashJoin;
return m;
} // HashJoin::costMethod()
//<pb>
// -----------------------------------------------------------------------
// HashJoin::createContextForAChild() can create up to 3 sets of contexts for
// its children.
// -----------------------------------------------------------------------
Context* HashJoin::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// Hash Join generates at most 3 context pairs. The first pair is
// either a non-parallel plan or Type 2 parallel plan. The second
// pair is a matching partitions parallel plan,
// where we generate the left child context first. The third pair is
// a parallel matching partitions plan where we generate the right
// child context first.
// The reason we try matching partitions plans both ways is to try
// and avoid having to repartition both tables. If we only try one
// way and the first table must repartition, then the second table
// must be repartitioned if it is going to be able to match the hash
// repartitioning function that the first table synthesized. If we
// were to try the other way and the first table this time did not
// need to repartition, then we would only have to range repartition
// the second table.
// ---------------------------------------------------------------------
Context* result = NULL;
Lng32 planNumber = 0;
Context* childContext = NULL;
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
const PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
ReqdPhysicalProperty* rppForChild = NULL;
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
// ---------------------------------------------------------------------
// Compute the number of child plans to consider.
// ---------------------------------------------------------------------
Lng32 childPlansToConsider = 6;
Lng32 baseTableThreshold = ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_19);
// part of fix to genesis case 10-061222-1068, soln 10-061222-1347.
// our goal here is to avoid arithmetic overflow in the computation
// of innerChildSize that can result in a replicate-broadcast of so much
// data to cause hashjoin to spill resulting in very poor performance.
const CostScalar innerChildSize =
child(1).getGroupAttr()->getResultCardinalityForEmptyInput()
* child(1).getGroupAttr()->getRecordLength();
// parallelHeuristic3(if TRUE) will prevent two extra pairs of contexts
// by checking if parallelism was worth trying after the first pair.
// If not, it will set childPlansToConsider to 2 instead of 6. By doing
// this we won't create, at least, one extra parallel context for each
// child. It will change pallelismIsOK for the workspace to FALSE when
// okToAttemptESPParallelism() returns FALSE and the heuristics3 is ON.
// Only repartitioning plans are allowed for Full Outer Join. That is
// because, broadcast is not compatible with it. Hence, no need to cut
// down on the number of plans to consider. Keep childPlansToConsider at 6.
if ((NOT isFullOuterJoin())
AND (
(rppForMe->getCountOfPipelines() == 1) OR
getEquiJoinPredicates().isEmpty() OR
( (partReqForMe != NULL) AND
( partReqForMe->isRequirementExactlyOne() OR
partReqForMe->isRequirementReplicateNoBroadcast()
)
) OR
( CURRSTMT_OPTDEFAULTS->parallelHeuristic3() AND
(pws->getParallelismIsOK() == FALSE)
) OR
( isOrderedCrossProduct() AND
myContext->requiresOrder()
) OR
( // New heuristics to limit repartitioning of small right child.
// If right child has very few rows, for example, dimension
// table with local predicate resulting in one row only, then
// repartitioning of this child will cause few (or one) join ESP
// active. So we could consider only type2 join in this case
// Now limited to star filter join but could be generalized
// by setting COMP_BOOL_68 to ON
( (getSource() == Join::STAR_FILTER_JOIN) OR
(CmpCommon::getDefault(COMP_BOOL_68) == DF_ON)
) AND
// To avoid complex subqueries due to cardinality uncertainties
( child(1).getGroupAttr()->getNumBaseTables() <= baseTableThreshold) AND
// the child is small - defined by COMP_INT_7
( innerChildSize < CostScalar(ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_7))) AND
( rppForMe->getMustMatch() == NULL )
)
) // AND
) // (NOT isFullOuterJoin())
{
// we want don't want to repartition the inner child/table
// rather we want to broadcast it.Do this only if:
// the child/table size is < COMP_INT_70
// or
// inner child/table size > COMP_INT_70 and the join
// column(s) from the left child are skewed.
// The decision to broadcast and not repartition is based on
// the size of the inner table (i.e. the table to be broadcast)
// We can define it visually as the range below
//
// comp_int_70 comp_int_7
// ------------------|---------------------|--------------------
// Range 1 Range 2 Range 3
//
// * If inner table size is in range 1 i.e. < comp_int_70 then repartitioning
// plan is not tried, the table is unconditionally broadcast
// * If inner table size is in range 2 i.e. > comp_int_70 and < comp_int_7
// then repartition plan is not tried i.e. only broadcast plan is tried
// if the join column from the left child is skewed.
// * If inner table size is in range 3 i.e. > comp_int_7 then repartitioning
// plan is tried.
if(innerChildSize <= ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_70))
{
// size of inner child is < conditional broadcast threshold (COMP_INT_70)
// therefore disallow the repartitioning plan
// By disallowing the repartitioning plan we force only the broadcast
// plan to be considered
childPlansToConsider = 2;
}
else
{
// size of inner child is > conditional broadcast threshold (COMP_INT_70)
// there disallow the repartitioning plan only if the join is
// on a skewed set of columns
// By disallowing the repartitioning plan we force only the broadcast
// plan to be considered
CostScalar skewFactor(1);
EncodedValue mostFreqVal(UNINIT_ENCODEDVALUE);
ValueIdSet joinPreds = getEquiJoinPredicates();
// iterate over all the join predicates between the left and the right
// In each iteration:
// * The skew factor is returned for the join column from the left child
// * The skew factor is combined with the skew factor from other predicates.
// We keep a running multiplication of skew factors from the different
// predicates to combine the skew factors for all the join predicates
// being applied at this join. This is based on the assumption of independence
// between columns if there is more than one join predicate. Multicolumn stats
// could perhaps be used to get better skew estimate in case we are joining
// on more than one column, but this is simpler and should be good enough for
// for a simple heuristic like this.
for(ValueId vid = joinPreds.init(); joinPreds.next(vid); joinPreds.advance(vid))
{
// get the skewFactor and combine it
skewFactor*=child(0).getGroupAttr()->getSkewnessFactor(vid,mostFreqVal);
}
// If the join is on a skewed set of columns from the left child.
// Skew threshold is defined by COMP_FLOAT_3
//
// Don't try repartitioning plan
if( (skewFactor > (ActiveSchemaDB()->getDefaults()).getAsDouble(COMP_FLOAT_3)) ||
// not in subquery --> set childPlansToConsider = 2 --> only plan0 is considered
getIsNotInSubqTransform())
childPlansToConsider = 2;
}
}
else if ((NOT isFullOuterJoin()) AND
(partReqForMe != NULL) AND
partReqForMe->isRequirementFullySpecified())
{
childPlansToConsider = 4;
}
// Force serial plans for full outer join.
if (isFullOuterJoin() AND
CmpCommon::getDefault(COMP_BOOL_197) == DF_ON )
childPlansToConsider = 2;
NABoolean considerMixedHashJoin = FALSE;
NABoolean considerHashJoinPlan2 = TRUE;
NABoolean broadcastOneRow = FALSE;
NABoolean notInLeftChildOptimization = FALSE;
// ---------------------------------------------------------------------
// The creation of the next Context for a child depends upon the
// the number of child Contexts that have been created in earlier
// invocations of this method.
// ---------------------------------------------------------------------
while ( (pws->getCountOfChildContexts() < childPlansToConsider)
AND (rppForChild == NULL) )
{
// If we stay in this loop because we didn't generate some
// child contexts, we need to reset the child plan count when
// it gets to be as large the arity, because otherwise we
// would advance the child plan count past the arity.
if (pws->getPlanChildCount() >= getArity())
pws->resetPlanChildCount();
planNumber = pws->getCountOfChildContexts() / 2;
switch (pws->getCountOfChildContexts())
{
case 0:
childIndex = 0;
break;
case 1:
childIndex = 1;
break;
case 2:
childIndex = 0;
break;
case 3:
childIndex = 1;
break;
case 4:
childIndex = 1;
break;
case 5:
childIndex = 0;
break;
}
SkewedValueList* skList = NULL;
considerMixedHashJoin = FALSE;
considerHashJoinPlan2 = TRUE;
broadcastOneRow = FALSE;
notInLeftChildOptimization = FALSE;
ValueId vidOfEquiPredWithSkew = NULL_VALUE_ID;
// Here, we consult our histogram data to identify potential skew values
// for the left side, when ESP parallel plans (plan 1 and 2) are
// sought for. In the 1st phase of the Mixed Hybrid HashJoin Project,
// only plan 1 is augmented to deal with skewed values. But we
// turn on 'considerMixedHashJoin' anyway here so that we can use it
// to turn off plan 2 generation when the skewness is found.
if (planNumber == 1 OR planNumber == 2)
{
if( isSkewBusterFeasible(&skList, MAXOF(1, rppForMe->getCountOfPipelines()),
vidOfEquiPredWithSkew) == TRUE )
{
considerMixedHashJoin = TRUE;
if ( CmpCommon::getDefault(COMP_BOOL_101) == DF_OFF )
considerHashJoinPlan2 = FALSE;
}
// When adding null value the next time isSkewBusterFeasible will return true even
// if there are no skew values. Note that this has to do with an executor optimization
// for anti-semijoins used for NOT IN. If the subquery has at least one NULL value,
// then the entire NOT IN predicate returns NULL. Handling this requires special
// logic in the executor. When we have a parallel TYPE1 anti-semijoin, then we
// use SkewBuster to broadcast all inner NULL values to all the joins, to be able
// to handle this case correctly.
if (getIsNotInSubqTransform())
{
//add null to the skewed list
addNullToSkewedList(&skList);
// is broadcast of one row required
broadcastOneRow = getRequireOneBroadcast();
considerMixedHashJoin = TRUE;
// if this is a notin subq then we don't want to consider plan2
considerHashJoinPlan2 = FALSE;
if ( CmpCommon::getDefault(NOT_IN_SKEW_BUSTER_OPTIMIZATION) == DF_ON)
{
if (skList->hasOnlyNonSkewedNull() &&
skList->getNAType()->getTypeQualifier() != NA_CHARACTER_TYPE)
// no real skewed values. null is the only item in the list and is not skewed
// in this case we don't really need to uniform-distribute rows on the right side
// all we need is the broadcast null (and one row if applicable) on the right side
// NOTE: This will cause a regular "hash2" function to be used on the left side
// and a "h2-br" function on the right. We have to guarantee that those produce
// the same hash value for every non-NULL column value. This is NOT true for
// character columns, since those get converted to VARCHAR in
// SkewedDataPartitioningFunction::createPartitioningExpression(), which does
// not happen in TableHashPartitioningFunction::createPartitioningExpression()
{
considerMixedHashJoin = FALSE;
notInLeftChildOptimization = TRUE;
}
}
}
}
if ((pws->getCountOfChildContexts() == 0) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
if ( NOT CURRSTMT_OPTDEFAULTS->optimizerPruning() OR
pws->isLatestContextWithinCostLimit() OR
NOT CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() )
{
RequirementGenerator rg(child(0), rppForMe);
if (isOrderedCrossProduct())
{
ValueIdList reqdOrder1;
ValueIdSet reqdArr1;
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// right child and only pass on the portion of the requirement
// that is for the left child. Pass back the requirements for
// the right child in case they are needed.
splitSortReqsForLeftChild(rppForMe, rg, reqdOrder1, reqdArr1);
}
// If we want to try ESP parallelism, go for it, only if
// its NOT isFullOuterJoin. Note we may still
// not get it if the parent's requirement does not allow it.
// If we don't go for ESP parallelism, then we will specify either
// the parent's required number of partitions, or we will specify
// that we don't care how many partitions we get - i.e.
// ANY_NUMBER_OF_PARTITIONS.
if (isFullOuterJoin())
{
// If it's full outer join, then force the requirement
// to be one partition - (serial).
rg.addNumOfPartitions(1);
}
else if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
else
{
if (CURRSTMT_OPTDEFAULTS->parallelHeuristic3())
pws->setParallelismIsOK(FALSE);
}
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // NOT CURRSTMT_OPTDEFAULTS->optimizerPruning() OR ..
} // endif (pws->getCountOfChildContexts() == 0)
else if ((pws->getCountOfChildContexts() == 1) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
// -----------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -----------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost() )
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,0);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
NABoolean considerReplicateBroadcast = TRUE; // unless revoked below
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
PartitioningRequirement* partReqForChild;
// If the partitioning function of the left child was only
// one partition, then pass a requirement for that to the right,
// since no parallelism is possible.
// BEGIN TEMPORARY CODE FIX COMMENT
// If there was a parent requirement and it was replicate no
// broadcast, then we must require replicate no broadcast from
// the right child as well, because the executor currently
// cannot handle broadcast replication under a nested join in
// certain cases. See Materialize::createContextForAChild for details.
// END TEMPORARY CODE FIX COMMENT
// Otherwise, pass a broadcast replication req. to the right child.
if (childPartFunc->isASinglePartitionPartitioningFunction())
partReqForChild = childPartFunc->makePartitioningRequirement();
else if ((partReqForMe != NULL) AND // TEMP CODE FIX
partReqForMe->isRequirementReplicateNoBroadcast()) // TEMP
partReqForChild = (PartitioningRequirement *)partReqForMe; // TEMP
else { // replicate broadcast inner
CostScalar innerMaxSize =
child(1).getGroupAttr()->getResultMaxCardinalityForEmptyInput()
* child(1).getGroupAttr()->getRecordLength();
CostScalar outerSize =
child(0).getGroupAttr()->getResultCardinalityForEmptyInput()
* child(0).getGroupAttr()->getRecordLength();
// disallow replicate broadcast only if:
// 1) we are an optional zigzag join, and
// 2) inner can be dangerously large
if (isJoinForZigZag() && innerMaxSize >=
outerSize * childPartFunc->getCountOfPartitions())
{
considerReplicateBroadcast = FALSE;
}
if (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON) {
// Use the node map of the left childs partitioning function.
partReqForChild = new (CmpCommon::statementHeap() )
RequireReplicateViaBroadcast(childPartFunc, TRUE);
}
else {
partReqForChild = new (CmpCommon::statementHeap() )
RequireReplicateViaBroadcast
(childPartFunc->getCountOfPartitions());
// Use the node map of the left childs partitioning function.
//
NodeMap *myNodeMap =
childPartFunc->getNodeMap()->copy(CmpCommon::statementHeap());
partReqForChild->
castToFullySpecifiedPartitioningRequirement()->
getPartitioningFunction()->replaceNodeMap(myNodeMap);
}
}
RequirementGenerator rg (child(1),rppForMe);
if (myContext->requiresOrder())
{
// the ordering of an ordered hash join is provided by the left child
// for cross products we need the next two methods to prevent a sort key from
// being added due to startRequirements,
rg.removeSortKey();
rg.removeArrangement();
if (isOrderedCrossProduct())
{
ValueIdList reqdOrder0;
ValueIdSet reqdArr0;
// If there is a required sort order and/or arrangement, then
// split off any part of the requirement that is for the
// left child and only pass on the portion of the requirement
// that is for the right child. Pass back the requirements for
// the left child in case they are needed.
splitSortReqsForRightChild(rppForMe, rg, reqdOrder0, reqdArr0);
}
}
// Remove any parent partitioning requirements, since we
// have already enforced this on the left child.
rg.removeAllPartitioningRequirements();
// Now, add in the Join's partitioning requirement for the
// right child.
rg.addPartRequirement(partReqForChild);
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility()
&& considerReplicateBroadcast)
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if child0 had an optimal solution
} // end if 2nd child context
// -----------------------------------------------------------------
// The next plan, (child0, plan1) and (child1, plan1), attempts to
// create matching partitions. (child1, plan1) constructs a
// partitioning key from the values that are produced by child0 and
// are referenced in the equijoin predicates.
// -----------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 2) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
if ( NOT CURRSTMT_OPTDEFAULTS->optimizerPruning() OR
pws->isLatestContextWithinCostLimit() OR
NOT CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() )
{
RequirementGenerator rg(child(0), rppForMe);
ValueIdSet partitioningKey;
if ( considerMixedHashJoin == TRUE &&
getEquiJoinExprFromChild0().entries() > 1 ) {
if ( vidOfEquiPredWithSkew != NULL_VALUE_ID ) {
// For multi-column SB, use one of the
// the skew partition column in the equi predicates
// in the partition key.
ValueId vref =
getEquiJoinExprFromChild0().
extractVEGRefForEquiPredicate(vidOfEquiPredWithSkew);
// Expect to get the valueId of a VegRef for a join predicate.
if (vref != NULL_VALUE_ID)
partitioningKey.insert(vref);
else
partitioningKey.insertList(getEquiJoinExprFromChild0());
} else
partitioningKey.insertList(getEquiJoinExprFromChild0());
} else
// For regular HJ or single-column SB, use the only equi predicate
// as the partition column.
partitioningKey.insertList(getEquiJoinExprFromChild0());
rg.addPartitioningKey(partitioningKey);
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
if ( considerMixedHashJoin == TRUE )
rg.addSkewRequirement(skewProperty::UNIFORM_DISTRIBUTE, skList, broadcastOneRow);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
}
} // end if 3rd child context
// -----------------------------------------------------------------
// (child1, plan1) is the counterpart plan for (child0, plan1).
// -----------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 3) AND
currentPlanIsAcceptable(planNumber,rppForMe))
{
// -------------------------------------------------------------
// Cost limit exceeded? Erase the Context (child0, plan1).
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost()
)
pws->eraseLatestContextFromWorkSpace();
const Context* const childContext = pws->getChildContext(0,1);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
NABoolean rewriteForChild0 = FALSE;
PartitioningFunction* childPartFuncRewritten =
mapPartitioningFunction(childPartFunc, rewriteForChild0);
RequirementGenerator rg (child(1),rppForMe);
// ignore single part func case since run-time skew does not exist.
if ( notInLeftChildOptimization &&
!childPartFuncRewritten->isASinglePartitionPartitioningFunction())
{
skewProperty skp (skewProperty::BROADCAST, skList);
skp.setBroadcastOneRow(broadcastOneRow);
//rg.addSkewRequirement(skewProperty::BROADCAST, skList, broadcastOneRow);
childPartFuncRewritten = new SkewedDataPartitioningFunction(childPartFuncRewritten, skp);
}
PartitioningRequirement* partReqForChild =
childPartFuncRewritten->makePartitioningRequirement();
//RequirementGenerator rg (child(1),rppForMe);
if (myContext->requiresOrder())
{
// the ordering of an ordered hash join is provided by the left child
rg.removeSortKey();
rg.removeArrangement();
}
// Remove any parent partitioning requirements, since we
// have already enforced this on the left child.
rg.removeAllPartitioningRequirements();
// Double check broadcasting skewed data requirment for the right child
// if my left child will produce randomly distributed skewed values,
// and the partfunc for the right child can do so and both partfuncs
// are of same type. The last check is necessary to avoid
// hash2-random_distribute on left side and some other non-hash2
// broadcast on right side.
//
// The above called HashJoin::mapPartitioningFunction() should perform
// the mapping from UNIFORM DISTRIBUTED to BROADCAST.
const SkewedDataPartitioningFunction* skpf = NULL;
if (childPartFunc->getPartitioningFunctionType() ==
childPartFuncRewritten->getPartitioningFunctionType() AND
(skpf=childPartFunc->castToSkewedDataPartitioningFunction()) AND
skpf->getSkewProperty().isUniformDistributed()
)
{
const SkewedDataPartitioningFunction* otherSKpf =
(SkewedDataPartitioningFunction*)childPartFuncRewritten;
CMPASSERT(
otherSKpf->getPartialPartitioningFunction()->canHandleSkew()
AND
otherSKpf->getSkewProperty().isBroadcasted()
);
//rg.addSkewRequirement(skewProperty::BROADCAST, skList);
}
// Now, add in the Join's partitioning requirement for the
// right child.
rg.addPartRequirement(partReqForChild);
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if child0 had an optimal solution
} // end if 4th child context
// -----------------------------------------------------------------
// The next plan, (child0, plan2) and (child1, plan2), attempt to
// create matching partitions. They are created iff an equijoin
// predicate exists. It differs from the previous plan in that a
// new partitioning requirement is created and applied to child1
// first, instead of to child0. (child0, plan2) constructs a
// partitioning key from the values that are produced by child1 and
// are referenced in the equijoin predicates.
// -----------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 4) AND
currentPlanIsAcceptable(planNumber,rppForMe) AND
considerHashJoinPlan2 == TRUE)
{
if ( NOT CURRSTMT_OPTDEFAULTS->optimizerPruning() OR
pws->isLatestContextWithinCostLimit() OR
NOT CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() )
{
RequirementGenerator rg(child(1), rppForMe);
if (myContext->requiresOrder())
{
// the ordering of an ordered hash join is provided by the left child
rg.removeSortKey();
rg.removeArrangement();
}
// We must insist that the right child match the parent partitioning
// requirements, because we are dealing with the right child first.
// The right child will satisfy the parent somehow (if possible) and
// the requirement we get from the right child will then be given to
// the left child, and so the parent requirement will be enforced
// on the left child in this way.
// So, we don't remove the parent requirements.
ValueIdSet partitioningKey;
partitioningKey.insertList(getEquiJoinExprFromChild1());
rg.addPartitioningKey(partitioningKey);
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
}
} // end if 5th child context
// -----------------------------------------------------------------
// (child0, plan2) is the counterpart plan for (child1, plan2).
// -----------------------------------------------------------------
else if ((pws->getCountOfChildContexts() == 5) AND
currentPlanIsAcceptable(planNumber,rppForMe) AND
considerHashJoinPlan2 == TRUE
)
{
// -------------------------------------------------------------
// Cost limit exceeded? Erase the Context (child1, plan2).
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// -------------------------------------------------------------
if (pws->getLatestChildContext() AND
NOT pws->isLatestContextWithinCostLimit() AND
NOT CURRSTMT_OPTDEFAULTS->OPHuseFailedPlanCost()
)
pws->eraseLatestContextFromWorkSpace();
const Context* const childContext = pws->getChildContext(1,2);
// -----------------------------------------------------------------
// Make sure a plan has been produced by the latest context.
// -----------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
NABoolean rewriteForChild0 = TRUE;
ValueIdMap map(getEquiJoinExpressions());
PartitioningFunction* childPartFuncRewritten =
childPartFunc->copyAndRemap(map,
rewriteForChild0);
PartitioningRequirement* partReqForChild =
childPartFuncRewritten->makePartitioningRequirement();
RequirementGenerator rg (child(0),rppForMe);
// We have already applied the parent partitioning requirements
// to the right child, so no need to apply them to the left child.
rg.removeAllPartitioningRequirements();
// Now, add in the Join's partitioning requirement for the
// left child.
rg.addPartRequirement(partReqForChild);
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if child1 had an optimal solution
} // end if 5th child context
// -------------------------------------------------------------------
// Create a Context using the partitioning requirement that was
// generated for the current plan.
// -------------------------------------------------------------------
if (rppForChild != NULL)
{
// ---------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext,pws);
EstLogPropSharedPtr inputLogPropForChild;
if (childIndex == 1 AND isReuse())
{
// -----------------------------------------------------------
// Reusing logic from the Materialize node to calculate
// input logical properties for the child (see comment in
// Materialize::createContextForAChild)
// -----------------------------------------------------------
inputLogPropForChild =
child(1).getGroupAttr()->materializeInputLogProp(
myContext->getInputLogProp(),&multipleCalls_);
}
else
inputLogPropForChild = myContext->getInputLogProp();
// ---------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which
// the child belongs that requires the same properties as those
// in rppForChild. Reuse it, if found. Otherwise, create a new
// Context that contains rppForChild as the required physical
// properties.
// ----------------------------------------------------------------
result = shareContext(childIndex, rppForChild,
myContext->getInputPhysicalProperty(),
costLimit,
myContext, inputLogPropForChild );
NABoolean pruneFixOff = (CmpCommon::getDefault(OPTIMIZER_PRUNING_FIX_1) == DF_OFF);
if ( (NOT (pws->isLatestContextWithinCostLimit() OR
result->hasSolution() )) AND
(CURRSTMT_OPTDEFAULTS->OPHpruneWhenCLExceeded() OR pruneFixOff)
)
result = NULL;
}
else
result = NULL;
// -------------------------------------------------------------------
// Remember the cases for which a Context could not be generated,
// or store the context that was generated.
// -------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
pws->incPlanChildCount();
// To prevent mixing children for knownChildrenCost we want to
// exit when plan failed for the right or left (therefore for
// the right also) child. The only case we can stay in this loop is
// when left child failed and we want to store NULL context for
// both children and need to advance in this loop without
// creating an extra CreatePlanTask, which was probably an initial
// intention.
if ( CURRSTMT_OPTDEFAULTS->optimizerPruning() AND
( pws->getPlanChildCount() == getArity() ) AND
CURRSTMT_OPTDEFAULTS->OPHexitHJcrContChiLoop()
)
{
pws->resetAllChildrenContextsConsidered();
break;
}
} // end while loop
if ( pws->getCountOfChildContexts() == childPlansToConsider )
pws->setAllChildrenContextsConsidered();
return result;
} // HashJoin::createContextForAChild()
NABoolean HashJoin::currentPlanIsAcceptable(Lng32 planNo,
const ReqdPhysicalProperty* const rppForMe) const
{
// ---------------------------------------------------------------------
// Check whether the user wants to enforce a particular plan type.
// ---------------------------------------------------------------------
// If nothing is being forced, return TRUE now.
if (rppForMe->getMustMatch() == NULL)
return TRUE;
// Check for the correct forced plan type.
JoinForceWildCard::forcedPlanEnum forcePlanToken =
getParallelJoinPlanToEnforce(rppForMe);
switch (forcePlanToken)
{
case JoinForceWildCard::FORCED_PLAN0:
if (planNo != 0)
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN1:
if (planNo != 1)
return FALSE;
break;
case JoinForceWildCard::FORCED_PLAN2:
if (planNo != 2)
return FALSE;
break;
case JoinForceWildCard::FORCED_TYPE1:
if (planNo == 0)
return FALSE;
break;
case JoinForceWildCard::FORCED_TYPE2:
if (planNo != 0)
return FALSE;
break;
case JoinForceWildCard::ANY_PLAN:
// Any plan satisfies this - break out so we'll return TRUE
break;
default:
return FALSE; // must be some option that doesn't apply to Hash Join
}
// If we get here, the plan must have passed all the checks.
return TRUE;
} // HashJoin::currentPlanIsAcceptable()
NABoolean HashJoin::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace* pws, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
NABoolean result = FALSE;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (parallelControlSettings == DF_OFF)
{
result = FALSE;
}
else if ( (parallelControlSettings == DF_MAXIMUM) AND
CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible()
)
{
numOfESPs = rppForMe->getCountOfPipelines();
// currently, numberOfPartitionsDeviation_ is set to 0 in
// OptDefaults when ATTEMT_ESP_PARALLELISM is 'MAXIMUM'
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
// allow deviation by default
if (CmpCommon::getDefault(COMP_BOOL_63) == DF_OFF)
{
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
if ( child0RowCount.getCeiling() <
MINOF(numOfESPs,CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold())
)
{
// Fewer outer table rows then pipelines - allow one or more parts
allowedDeviation = 1.0;
}
}
result = TRUE;
}
else if (parallelControlSettings == DF_ON)
{
// Either user wants to try ESP parallelism for all operators,
// or they are forcing the number of ESPs for this operator.
// Set the result to TRUE. If the number of ESPs is not being forced,
// set the number of ESPs that should be used to the maximum number.
// Set the allowable deviation to either the default or, for type2
// plans where no default was specified, a percentage that allows any
// number of partitions from 2 to the maximum number. i.e. allow
// the natural partitioning of the child as long as the child is
// partitioned and does not have more partitions than max pipelines.
// NEW HEURISTIC: If there are fewer outer table rows than the number
// of pipelines, then set the deviation to allow any level of natural
// partitioning, including one. This is because we don't want to
// repartition so few rows to get more parallelism, since we would
// end up with a lot of ESPs doing nothing.
if (NOT numOfESPsForced)
{
// Determine the number of outer table rows
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
const CostScalar child0RowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
numOfESPs = rppForMe->getCountOfPipelines();
if (child0RowCount.getCeiling() < numOfESPs)
{
// Fewer outer table rows then pipelines - allow one or more parts
allowedDeviation = 1.0;
}
else
{
if (pws->getCountOfChildContexts() == 0)
{
// Type 2 plan.
if ( CURRSTMT_OPTDEFAULTS->deviationType2JoinsSystem() )
{
// ------------------------------------------------------
// A value for NUM_OF_PARTS_DEVIATION_TYPE2_JOINS exists.
// ------------------------------------------------------
allowedDeviation = CURRSTMT_OPTDEFAULTS->numOfPartsDeviationType2Joins();
}
else
{
// Need to make 2 the minimum number of parts to support. Use
// 1.99 to protect against rounding errors.
allowedDeviation =
((float)numOfESPs - 1.99f) / (float)numOfESPs;
}
}
else
{
// Type 1 plan.
allowedDeviation = CURRSTMT_OPTDEFAULTS->numberOfPartitionsDeviation();
}
} // end if fewer outer table rows than pipelines
} // end if number of ESPs not forced
result = TRUE;
}
else
{
// Otherwise, the user must have specified "SYSTEM" for the
// ATTEMPT_ESP_PARALLELISM default. This means it is up to the
// optimizer to decide.
// Return TRUE if the size of the inner table in KB exceeds the
// memory limit for BMOs. Also return TRUE if
// the # of rows returned by child(0) exceeds the threshold from the
// defaults table. The recommended number of ESPs is also computed
// to be 1 process per <threshold> number of rows. This is then
// used to indicate the MINIMUM number of ESPs that will be
// acceptable. This is done by setting the allowable deviation
// to a percentage of the maximum number of partitions such
// that the recommended number of partitions is the lowest
// number allowed. We make the recommended number of partitions
// a minimum instead of a hard requirement because we don't
// want to be forced to repartition the child just to get "less"
// parallelism.
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr child0OutputLogProp = child(0).outputLogProp(inLogProp);
CostScalar rowCount =
(child0OutputLogProp->getResultCardinality()).minCsOne();
// This is to test better parallelism taking into account
// not only child0 but also this operator cardinality
if(CmpCommon::getDefault(COMP_BOOL_125) == DF_ON)
{
rowCount += (getGroupAttr()->outputLogProp(inLogProp)->
getResultCardinality()).minCsOne();
}
EstLogPropSharedPtr child1OutputLogProp = child(1).outputLogProp(inLogProp);
const CostScalar child1RowCount =
(child1OutputLogProp->getResultCardinality()).minCsOne();
const Lng32 child1RowLength =
child(1).getGroupAttr()->getCharacteristicOutputs().getRowLength();
const Lng32 extChild1RowLength = child1RowLength + 8;
const CostScalar child1SizeInKB =
((child1RowCount * extChild1RowLength) / 1024.);
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
const CostScalar bigMemoryLimit = CURRSTMT_OPTDEFAULTS->getMemoryLimitPerCPU();
// First check for the inner table exceeding the memory limit,
// since for a HJ it is more important to have the number of ESPs
// used based on the memory requirements of the inner table than
// the number of rows of the outer table.
CostScalar parThreshold = child1SizeInKB / bigMemoryLimit;
// If we consider only child1 to define the level of parallelism
// then deviation could be too close to 1 and we can have not enough
// parallelism from child0 point of view
parThreshold = MAXOF(parThreshold, rowCount / numberOfRowsThreshold);
if (parThreshold.isGreaterThanOne())
{
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = (float) MAXOF(1.001 -
ceil(parThreshold.value() / numOfESPs), 0);
result = TRUE;
}
else // no parallelism needed
{
result = FALSE;
}
} // end if the user let the optimizer decide
return result;
} // HashJoin::okToAttemptESPParallelism()
//<pb>
//==============================================================================
// Starting with a cost limit from a specified hash join operator's context,
// produce a new cost limit for a hash join operator's child by accumulating the
// hash join operator's preliminary cost and directly subtracting out the
// elapsed time of the known children cost. Both the preliminary cost and the
// known children cost come from the hash join operator's plan workspace.
//
//
// Input:
// myContext -- specified context hash join operator.
//
// pws -- specified plan workspace for hash join operator.
//
// Output:
// none
//
// Return:
// Copy of accumulated cost limit. NULL if context contains no cost limit.
//
//==============================================================================
CostLimit*
HashJoin::computeCostLimit(const Context* myContext,
PlanWorkSpace* pws)
{
//----------------------------------------------------------------------------
// Context contains no cost limit. Interpret this as an infinite cost limit.
// Returning NULL indicates an infinite cost limit.
//----------------------------------------------------------------------------
if (myContext->getCostLimit() == NULL)
{
return NULL;
}
//---------------------------------------------------
// Create copy of cost limit from specified context.
//---------------------------------------------------
CostLimit* costLimit = myContext->getCostLimit()->copy();
//---------------------------------------------------------------------
// Accumulate this operator's preliminary cost into the ancestor cost.
//---------------------------------------------------------------------
Cost* tempCost = pws->getOperatorCost();
costLimit->ancestorAccum(*tempCost, myContext->getReqdPhysicalProperty());
//-----------------------------------------------------------------------
// For hash joins, we know that the right child must complete before the
// the left child can begin, so we can directly subtract the elapsed time
// of one child's cost from the cost limit given to the other child.
//-----------------------------------------------------------------------
const ReqdPhysicalProperty* rpp = myContext->getReqdPhysicalProperty();
tempCost = pws->getKnownChildrenCost();
if (tempCost != 0)
{
ElapsedTime et = tempCost->convertToElapsedTime(rpp);
costLimit->unilaterallyReduce(et);
}
//---------------------------------------------------------------------------
// Use best plan so far for this operator to try and further reduce the cost
// limit.
//---------------------------------------------------------------------------
tempCost = pws->getBestRollUpCostSoFar();
if (tempCost != NULL)
{
costLimit->tryToReduce(*tempCost,rpp);
}
return costLimit;
} // HashJoin::computeCostLimit()
//<pb>
//==============================================================================
// Synthesize physical properties for hash join operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used for
// synthesizing partitioning functions.
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
HashJoin::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
if(isNoOverflow()){
CMPASSERT( (getOperatorType() == REL_ORDERED_HASH_JOIN) OR
(getOperatorType() == REL_ORDERED_HASH_SEMIJOIN) OR
(getOperatorType() == REL_ORDERED_HASH_ANTI_SEMIJOIN) OR
(getOperatorType() == REL_LEFT_ORDERED_HASH_JOIN) OR
(getOperatorType() == REL_HYBRID_HASH_JOIN AND
isOrderedCrossProduct())); // hybrid_hash_join used to implement a
// cross product should not overflow.
}
else
CMPASSERT( (getOperatorType() == REL_HYBRID_HASH_JOIN) OR
(getOperatorType() == REL_HYBRID_HASH_SEMIJOIN) OR
(getOperatorType() == REL_HYBRID_HASH_ANTI_SEMIJOIN) OR
(getOperatorType() == REL_LEFT_HYBRID_HASH_JOIN) OR
(getOperatorType() == REL_FULL_HYBRID_HASH_JOIN));
const PhysicalProperty* const sppOfLeftChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
const PhysicalProperty* const sppOfRightChild =
myContext->getPhysicalPropertyOfSolutionForChild(1);
ValueIdList emptySortKey;
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
// ----------------------------------------------------------------
// Synthesize the partitioning function from the first child of the
// winning plan. Hash Joins have three potential plans--see member
// function HashJoin::createContextForAChild(). In plans 0 and 1,
// the left child is first; in plan 2, the right child is first.
// ----------------------------------------------------------------
CMPASSERT(planNumber == 0 || planNumber == 1 || planNumber == 2);
PartitioningFunction* myPartFunc;
const SearchKey* myPartSearchKey;
if (planNumber == 0 || planNumber == 1)
{
myPartFunc = sppOfLeftChild->getPartitioningFunction();
myPartSearchKey = sppOfLeftChild->getPartSearchKey();
}
else
{
myPartFunc = sppOfRightChild->getPartitioningFunction();
myPartSearchKey = sppOfRightChild->getPartSearchKey();
}
// Can not strip off skewness property from the (hash2) partfunc, because
// otherwise the hash join output can be joined with another
// normal hash2 part func. This is incorrect because only
// hash2WithBroadcase or replicateViaBroadcast is compatible with
// hash2RandomDistribute part func. Other combinations will lead to
// incorrect join results.
PhysicalProperty* sppForMe;
NABoolean canAppendRightColumns = FALSE;
NABoolean childSortOrderTypesAreSame = FALSE;
if (isOrderedCrossProduct()) {
// right columns can be appended to give (left child order, right child order)
// only if left child rows are unique.
// see comment in NestedJoin::synthPhysicalProperties for an explanation.
GroupAttributes *leftGA = child(0).getGroupAttr();
ValueIdSet leftSortCols;
// make the list of sort cols into a ValueIdSet
leftSortCols.insertList(sppOfLeftChild->getSortKey());
// check for uniqueness of the sort columns
if (leftGA->isUnique(leftSortCols))
{
canAppendRightColumns = TRUE;
// The child sort order types are the same if they are equal and both
// children's dp2SortOrderPartFunc are the same (as in both being
// NULL), or they are both not null but they are equivalent.
if ((sppOfLeftChild->getSortOrderType() ==
sppOfRightChild->getSortOrderType()) AND
((sppOfLeftChild->getDp2SortOrderPartFunc() ==
sppOfRightChild->getDp2SortOrderPartFunc()) OR
((sppOfLeftChild->getDp2SortOrderPartFunc() != NULL) AND
(sppOfRightChild->getDp2SortOrderPartFunc() != NULL) AND
(sppOfLeftChild->getDp2SortOrderPartFunc()->
comparePartFuncToFunc(
*sppOfRightChild->getDp2SortOrderPartFunc()) == SAME)
)
)
)
childSortOrderTypesAreSame = TRUE;
}
if (NOT (canAppendRightColumns AND
childSortOrderTypesAreSame))
{
// even though this node seemed to be an appropriate choice
// to be an order preserving cross product in HashJoinRule::nextSubstitute,
// we now find due to (a) non-uniqueness of left child
// or (b) child sort orders not being the same that this cross
// product cannot be order preserving
// So we are unsetting the flag so that right join type will be shown by explain
// Overflow is set to false because a non-crossproduct HHJ has overflow set
// to false no ordering is promised.
setIsOrderedCrossProduct(FALSE);
setNoOverflow(FALSE);
}
}
if(isNoOverflow())
{
// ---------------------------------------------------------------------
// the ordered join delivers its result sorted (with respect to the left child)
// and preserves the partitioning of the left child table
// ---------------------------------------------------------------------
SortOrderTypeEnum newSortOrderType = sppOfLeftChild->getSortOrderType();
ValueIdList newSortKey;
newSortKey.insert(sppOfLeftChild->getSortKey());
// if this Hash Join is implementing a order preserving cross product
// then set the physical properties to have (left child order, right child order).
if (canAppendRightColumns && childSortOrderTypesAreSame)
{
const ValueIdList & rightSortKey = sppOfRightChild->getSortKey();
for (Lng32 i = 0; i < (Lng32)rightSortKey.entries(); i++)
newSortKey.insert(rightSortKey[i]);
}
PartitioningFunction *
newDP2SortOrderPartFunc = sppOfLeftChild->getDp2SortOrderPartFunc();
sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
newSortKey,
newSortOrderType, // no sort key, so no sort order type either
newDP2SortOrderPartFunc,
myPartFunc,
sppOfLeftChild->getPlanExecutionLocation(), //||opt synth fr both kids
combineDataSources(
sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfLeftChild->getIndexDesc(),
myPartSearchKey);
}
else
{
// ---------------------------------------------------------------------
// the hybrid join delivers its result unsorted and preserves the
// partitioning of the left child table
// ---------------------------------------------------------------------
sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
emptySortKey,
NO_SOT, // no sort key, so no sort order type either
NULL, // no dp2SortOrderPartFunc either
myPartFunc,
sppOfLeftChild->getPlanExecutionLocation(), //||opt synth fr both kids
combineDataSources(
sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfLeftChild->getIndexDesc(),
myPartSearchKey);
}
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
// (NB: no real need to call this method, since the sortKey is empty,
// and that's the only thing that this method checks currently)
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
// transfer the onePartitionAccess flag to join.
// We check the flag in HashJoin::computeOperatorPriority().
setInnerAccessOnePartition(sppOfRightChild->getAccessOnePartition());
delete sppTemp;
return sppForMe;
} // HashJoin::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// member functions for class Union
// -----------------------------------------------------------------------
NABoolean Union::rppAreCompatibleWithOperator
(const ReqdPhysicalProperty* const rppForMe) const
{
if ( rppForMe->executeInDP2() )
{
if ( !CURRSTMT_OPTDEFAULTS->pushDownDP2Requested() )
return FALSE;
// no push-down union if the containing CS is not pushed down
if ( isinBlockStmt() == TRUE )
{
if ( NOT PushDownCSRequirement::isInstanceOf(
rppForMe->getPushDownRequirement()
)
)
return FALSE;
} else {
// no push-down union if it is not the one introduced
// for the IUD log table associated with a MV.
if ( getInliningInfo().isUsedForMvLogging() == FALSE )
return FALSE;
}
}
// ---------------------------------------------------------------------
// If a partitioning requirement is specified, and it specifies some
// required partitioning key columns, then ensure that the required
// partitioning keys are contained in the output columns of the Union.
// ---------------------------------------------------------------------
if (rppForMe->requiresPartitioning() AND
NOT rppForMe->
getPartitioningRequirement()->getPartitioningKey().isEmpty() AND
NOT getUnionForIF())
{
// -----------------------------------------------------------------
// Construct a ValueIdSet of expressions that are produced as
// output by the Union.
// -----------------------------------------------------------------
ValueIdSet partKeyForUnion(colMapTable());
// -----------------------------------------------------------------
// If the (potential) partitioning key for the Union is contained
// in the partitioning key for the partitioning requirement, then
// the Union is capable of satisfying that requirement.
// -----------------------------------------------------------------
return partKeyForUnion.contains
(rppForMe->getPartitioningRequirement()
->getPartitioningKey());
} // endif (rppForMe->requiresPartitioning())
return TRUE;
} // Union::rppAreCompatibleWithOperator
// -----------------------------------------------------------------------
// member functions for class MergeUnion
// -----------------------------------------------------------------------
//<pb>
// -----------------------------------------------------------------------
// MergeUnion::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
MergeUnion::costMethod() const
{
static THREAD_P CostMethodMergeUnion *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodMergeUnion();
return m;
} // MergeUnion::costMethod()
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from the
// union to the designated child.
// ---------------------------------------------------------------------
PartitioningFunction* MergeUnion::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
NABoolean mapItUp = FALSE;
if (rewriteForChild0)
return partFunc->copyAndRemap(getMap(0),mapItUp);
else
return partFunc->copyAndRemap(getMap(1),mapItUp);
} // end MergeUnion::mapPartitioningFunction()
//<pb>
// -----------------------------------------------------------------------
// Given an input synthesized child sort key, a required arrangement,
// and a input required sort order (if any), generate a new sort key
// that has all the expressions in the required sort key and required
// arrangement, and only those expressions, in an order that matches
// the child sort key. Return this new sort key, which can then be used
// as a sort key requirement for the other child of the union or as the
// synthesized union sort key after being mapped.
//
// One reason why we need this method: If the required arrangement contains
// an simple arithmethic expression, like "b+1", it is possible that the
// sort key that is synthesized will be "b". So, when we try to map the
// sort key to the other side of the union, to use it as a requirement,
// we will not map the "b" to "b+1", which is what we want. We will either
// fail to map "b" or, if the select list of the union contained "b" in
// addition to "b+1", we will map "b" to "b" - even though "b" may not
// have been in the required arrangement. This would cause us to require
// something from the other side of the union that was not in
// the original required arrangement. This could lead to incorrect results.
// This is because even though the one side of the union selected
// "b+1" and "b", and they happen to be equivalent as far as sort order
// is concerned, there is no guarantee that their positional equivalents
// on the other side of the union will have equivalent sort orders.
// For example, take the following query:
//
// Select y,sum(z)
// from (select b+1,b
// from t005t1
// union all
// select c,a+1
// from t005t1) as t(y,z)
// group by y;
//
// Notice:
// "b+1" on the left side of the union maps to "c" on the right side.
// "b" on the left side of the union maps to "a+1" on the right side.
//
// "c" and "a+1" DO NOT have equivalent sort orders. If we map the
// sort key we get from the left side to "b", even though "b+1" is
// what was required, we will then map "b" from the left side to
// "a+1" on the right side - even though we need to require the
// right side to be ordered on "c" ! THIS WILL LEAD TO WRONG ANSWERS!
//
// So, this is why we need to make sure that the sort key
// from one side of the union contains only Value Id's from the
// required sort key and arrangement (as translated to be in terms of
// the Value Id's from that side of the union), before we can use the
// sort key as a requirement for the other side of the union or as the
// synthesized sort key for the union itself.
//
// NOTE: One thing this method does not handle, and the union mapping
// in general cannot handle, is if the user selects the exact same
// column more than once, such as
// "select b,b,b from t union select x,y,z from t2".
// The mapping wouldn't know which "b" to map to. Also, the
// extra "b"'s would always disappear from the required arrangement
// for the left child, since a required arrangement is a ValueIdSet,
// and a ValueIdSet does not allow duplicates. So, we would never
// decide upon an order for the extra "b"'s, and so there would be
// no required order for "y" and "z" on the other side of the union,
// for the plan where we talk to the left child first. This will
// yield incorrect results. To solve this, we must refuse to
// synthesize any sort keys, partitioning keys, etc. for the union,
// because we cannot get it right. One way to do this would be to
// disallow entries in the union map for any duplicate select list
// entries. This would cause the mapping for these expressions to
// always fail and so the children could never satisfy any
// requirements that require mapping these expreessions.
// We don't do this now - this is a bug.
// -----------------------------------------------------------------------
void MergeUnion::synthUnionSortKeyFromChild (
const ValueIdList &childSortKey, /* input */
const ValueIdSet &reqArrangement, /* input */
ValueIdList &unionSortKey /* input,output */
) const
{
// Make a copy of the req arrangement so we can delete from it
// every time we add an entry to the union sort key. This way we
// can make sure all required arrangement columns are present in
// the union sort key we produce.
ValueIdSet reqArrangementCopy = reqArrangement;
// Translate the input unionSortKey, which represents any required
// sort key, into a set, with any INVERSE nodes (i.e. DESC order)
// stripped off. This is so we can make sure any required
// arrangement expression is not part of the required sort key,
// as we do not want any duplicate expressions in the union sort
// key we produce. Note that we don't get the simplified form of
// the required sort key cols, because we want to check the
// unsimplified form of the required arrangement expression against
// the unsimplified form of the required sort key. For example,
// even though b+1 is already in the required sort key, we would
// still want to include "b" if it was in the required arrangement,
// because these two do not represent the same piece of data outside
// of the union or on the other side of the union. But, if "b+1"
// is in both the req. sort key and req. arrangement, then clearly
// this is the same piece of data and we don't want to include it again.
//
ValueIdSet reqSortKeyCols;
ValueId noInverseVid;
for (CollIndex j = 0; j < unionSortKey.entries(); j++)
{
noInverseVid = unionSortKey[j].getItemExpr()->
removeInverseOrder()->getValueId();
reqSortKeyCols += noInverseVid;
}
ValueId sortKeySvid, reqVid, reqSvid;
CollIndex i = 0;
NABoolean done = FALSE;
OrderComparison order1,order2;
ValueIdSet childSortKeyProcessed;
// Loop until we've processed all the sort key entries, or until
// we find a sort key entry that is not in the required arrangement.
while (!done && i < childSortKey.entries())
{
done = TRUE; // Assume we won't find a match
sortKeySvid = childSortKey[i].getItemExpr()->
simplifyOrderExpr(&order1)->getValueId();
// The following 'if' assures that duplicated simplified sort key
// expressions are not processed.
//
// Here is an exmaple of childSortKey with duplicates.
// The index I on T(a), where T(a,b) has both a and b as primary key column
// will generate child sort key list [a, a, b]. The list is generated
// inside IndexDesc::IndexDesc() under name orderOfPartitioningKeyValues_.
//
// As another example, I1 on T(a desc).
//
// Duplications require extra sort requirement for the other side; it's bad
// from performance point of view. Worse, the method finalizeUnionSortKey()
// requires the size of unionSortKey to be the same as the reqArrangement.
// So childSortKey with duplicates will lead to an assertion failure there.
//
// We can remove duplicates simplified expressions because they do not
// provide any useful information, as in the following code,
// regSvid (=sortKeySvid, simplified) becomes part of the union sort key.
//
// The logic is added to fix Solution 10-020913-1676.
if ( NOT childSortKeyProcessed.contains(sortKeySvid) )
{
childSortKeyProcessed.insert(sortKeySvid);
// Get the value id for the simplified form of the sort key expression.
// For each sort key column, loop over all required arrangement expr.s
// There may be multiple arrangement expressions that match the same
// sort key entry, e.g. b+1 and b+2 will both match a sort key entry of b
for (reqVid = reqArrangement.init();
reqArrangement.next(reqVid);
reqArrangement.advance(reqVid))
{
// Get the value id for the simplified form of the required
// arrangement expression.
reqSvid = reqVid.getItemExpr()->
simplifyOrderExpr(&order2)->getValueId();
if (sortKeySvid == reqSvid)
{
// Remove the req. arrangement expression from the copy
reqArrangementCopy.remove(reqVid);
done = FALSE; //found this sort key entry in the req. arrangement
// Add the required arrangement expression if it is not part
// of the required sort key.
if (NOT reqSortKeyCols.contains(reqVid))
{
// If the sort key expression and the required arrangement
// expression do not specify the same order, then
// generate an INVERSE expression on top of the req. arr. expr.
if (order1 != order2)
{
InverseOrder * inverseExpr =
new(CmpCommon::statementHeap())
InverseOrder (reqVid.getItemExpr());
inverseExpr->synthTypeAndValueId();
// Add the INVERSE expression plus it's child (which is the
// val Id from the required arrangement) to the new sort key
unionSortKey.insert(inverseExpr->getValueId());
}
else
{
// Add the value Id from the requirement to the new sort key.
unionSortKey.insert(reqVid);
}
} // end if not a duplicate entry
} // end if found sort key entry in the req. arrangement
} // end for all req. arrangement entries
} // end if not contained in childSortKeyProcessed
i++;
} // end while more sort key entries to process
// If there are any required arrangement columns left, this means
// we did not find them in the synthesized child sort key. This
// means they must have been equal to a constant, so just tack any
// remaining arrangement expressions with ASC order to the end of
// the union sort key if they are not part of the required sort key.
for (reqVid = reqArrangementCopy.init();
reqArrangementCopy.next(reqVid);
reqArrangementCopy.advance(reqVid))
{
// Add the required arrangement expression if it is not part
// of the required sort key.
if (NOT reqSortKeyCols.contains(reqVid))
{
unionSortKey.insert(reqVid);
}
} // end for all req. arrangement entries
} // MergeUnion::synthUnionSortKeyFromChild()
// This help function tries to make the Union sort key the same
// length as the arrangement requirement of Union cols. The key
// can be shorter when some of the Union columns are duplicated,
// as t2.i2 in the query
//
// SELECT T2.i2, T2.i2, T2.i2 FROM D2 T2
// UNION
// SELECT -39 , T0.i2, t0.i2 FROM D1 T0 ;
//
// Because the Union sort key is generated based on the first
// optimized child of the Union, and is used in derivation of
// the sort order requirement for the un-optimized child, it is
// important for the Union sort key to maintain all the arragement
// (sort) requirements from the parent of the Union. Otherwise,
// the output from the other child may not be properly sorted, and
// makes it impossible to completely remove the duplicated Unioned
// rows.
//
// The task is accomplished by appending to the Union sort key those
// ValueIds in the arrangement set that are missing from the key.
//
// This function is added to fix CR 10-010502-2586 (MX0412
// With partitioned tables get assertion failure in OptPhysRelExpr).
//
// Args:
// knownSortKey: the sort key already generated
// knownSide: on which side the sort key is generated (0 left, 1 right)
// arrCols: the arragement requirement on columns
// unionSortKey: the union sort key to extend if necessary.
//
NABoolean
MergeUnion::finalizeUnionSortKey(const ValueIdList& knownSortKey,
Lng32 knownSide,
const ValueIdSet& arrCols,
ValueIdList& unionSortKey)
{
// Before finalizing the sortKey, we shall remove any duplicate
// entries that are already present in the unionSortKey
// This takes care of queries like
// SELECT T0.i2 , T0.i2 , T0.i2 FROM d1 T0
// UNION
// VALUES( '1', '2', '3')
// ORDER BY 1, 2 ;
// If there are any duplicate entries coming from the arrangement
// requirement, those will still be retained. Sol: 10-030708-7691
// We only search for a simplified version of the Union sort key
// in which all inverse nodes (on top of a sort order) are removed.
ValueIdList simplifiedUnionSortKey = unionSortKey.removeInverseOrder();
ValueIdSet copyUnionSortKey(simplifiedUnionSortKey); // to avoid any duplicates
ValueIdList sortKeyWithoutDuplicates;
for (CollIndex i = 0; i < simplifiedUnionSortKey.entries(); i++)
{
ValueId vid = simplifiedUnionSortKey[i];
if ( copyUnionSortKey.contains(vid))
{
// It has still not been added to new key without duplicates, so add
// in there and remove all its entries from the copy, to make sure it is
// not added again
sortKeyWithoutDuplicates.insert(unionSortKey[i]);
copyUnionSortKey -= vid;
}
}
DCMPASSERT(copyUnionSortKey.entries() == 0);
// copy back the result in UnionSortKey for further processing
unionSortKey = sortKeyWithoutDuplicates;
// For the time being, till Sol: 10-030812-8714 is fixed,
// we would like to disable this piece of
// code if the sorting order is different from the arrangement
// This is done by returning FALSE. This is checked at the time
// we are creating context for the child.
if (unionSortKey.entries() != arrCols.entries() )
return FALSE;
if ( unionSortKey.entries() < arrCols.entries() ) {
CollIndex index = unionSortKey.entries();
for (ValueId vid = arrCols.init(); arrCols.next(vid);
arrCols.advance(vid))
{
if ( NOT simplifiedUnionSortKey.contains(vid) ) {
// duplications of source vids, which means duplicated
// columns in the Union columns. We need to insert such vids
// into the UnionSortKey. But before that, we need to determine
// the ASC or DESC order for each such vid.
//
// Added to fix CR 10-010814-4545 - assertion failure
// (unionSortKey.entries() == arrCols.entries())
// First get the source valueId corresponding to the vid.
ValueId sourceVid =
((ValueIdUnion*)vid.getItemExpr())->getSource(knownSide);
// If the source vid is in the known sort key (directly),
// then the vid corresponds to an ASC order. Otherwise, the vid
// is hidden below an InverseOrder node, and hence the DESC
// order. Note the vid can only have one sort order.
//
if ( knownSortKey.contains(sourceVid) ) // ASC
{
unionSortKey.insertAt(index++,vid);
} else { // DESC
// fabricate a new InverseOrder node.
ItemExpr *inverseCol = new(CmpCommon::statementHeap())
InverseOrder(vid.getItemExpr());
inverseCol->synthTypeAndValueId();
unionSortKey.insertAt(index++, inverseCol->getValueId());
}
}
}
}
CMPASSERT(unionSortKey.entries() == arrCols.entries());
return TRUE;
}
//<pb>
Context* MergeUnion::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// Merge Union generates at most 2 context pairs. The first is
// either a non-parallel plan or a matching partitions parallel plan,
// where we generate the left child context first. The second pair is
// either a non-parallel plan or a matching partitions parallel plan,
// where we generate the right child context first.
// The reason we try matching partitions plans both ways is to try
// and avoid having to repartition both tables. If we only try one
// way and the first table must repartition, then the second table
// must repartition if it is going to be able to match the hash
// repartitioning function that the first table synthesized. If we
// were to try the other way and the first table this time did not
// need to repartition, then we would only have to range repartition
// the second table.
// The reason we might try non-parallel plans both ways is
// because if there is a required arrangement, then the
// first child tried only has to match the required arrangement,
// but the second child must match a required order.
// This might force us to sort the second child
// since it is harder to match an order than an arrangement.
// The second child might be large and thus more expensive to
// sort then the first child, so we might want to try it both ways.
// Note that there is no fundamental requirement that both sides
// of a union must be partitioned the same way to execute them in
// parallel. But, if we were to allow the two children of a union
// to exhibit different partitioning functions, what would the
// partitioning function for the union be? Until we solve this
// problem, we must insist that both sides of the union always
// exhibit the same partitioning function.
// ---------------------------------------------------------------------
Context* result = NULL;
Lng32 planNumber = 0;
Context* childContext = NULL;
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
const ReqdPhysicalProperty* rppForChild = NULL;
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
SortOrderTypeEnum childSortOrderTypeReq = NO_SOT;
PartitioningRequirement* dp2SortOrderPartReq = NULL;
// ---------------------------------------------------------------------
// Compute the number of child plans to consider.
// ---------------------------------------------------------------------
Lng32 childPlansToConsider = 4;
NABoolean mustTryBothChildrenFirst = FALSE;
// If there is a required arrangement of more than one column,
// we need to generate two different plans where we alternate
// which child to try first, in order to guarantee that we only
// sort if absolutely necessary and if we do sort the smallest
// child. The crux of the matter is that it is easier to satisfy
// an arrangement than a sort order, and whoever goes first only
// has to satisfy that arrangement, not a sort
// order.
if ((rppForMe->getArrangedCols() != NULL) AND
(rppForMe->getArrangedCols()->entries() > 1))
mustTryBothChildrenFirst = TRUE;
// If we don't need to try two plans (four child plans) for sort
// purposes and we don't need to try two for parallel purposes,
// then indicate we will only try one plan (two child plans).
if (NOT mustTryBothChildrenFirst AND
((rppForMe->getCountOfPipelines() == 1) OR
((partReqForMe != NULL) AND
(partReqForMe->isRequirementExactlyOne() OR
partReqForMe->isRequirementReplicateNoBroadcast()))
)
)
childPlansToConsider = 2;
// ---------------------------------------------------------------------
// The creation of the next Context for a child depends upon the
// the number of child Contexts that have been created in earlier
// invocations of this method.
// ---------------------------------------------------------------------
while ((pws->getCountOfChildContexts() < childPlansToConsider) AND
(rppForChild == NULL))
{
// If we stay in this loop because we didn't generate some
// child contexts, we need to reset the child plan count when
// it gets to be as large the arity, because otherwise we
// would advance the child plan count past the arity.
if (pws->getPlanChildCount() >= getArity())
pws->resetPlanChildCount();
// -------------------------------------------------------------------
// Create the 1st Context for left child:
// -------------------------------------------------------------------
if (pws->getCountOfChildContexts() == 0)
{
childIndex = 0;
planNumber = 0;
RequirementGenerator rg(child(0),rppForMe);
// If there was a required order and/or arrangement, then we
// need to remove it, and replace it with a required order
// and/or arrangement that is expressed in terms of the child's
// ValueIds. This is because the requirement will be in terms
// of ValueIdUnions for the Union.
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
// Sort Order type was removed by the calls above, so
// get it again from the rpp
childSortOrderTypeReq = rppForMe->getSortOrderTypeReq();
// Convert any DP2_OR_ESP_NO_SORT requirement to ESP_NO_SORT,
// to avoid the possibility of one union child choosing a
// DP2 sort order type and the other union child choosing a
// ESP_NO_SORT sort order type, which are incompatible for union.
if (childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT)
childSortOrderTypeReq = ESP_NO_SORT_SOT;
else if (rppForMe->getDp2SortOrderPartReq() != NULL)
{
// Need to map any dp2SortOrderPartReq
NABoolean mapItUp = FALSE;
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq()->
copyAndRemap(getMap(childIndex),
mapItUp);
}
}
if (rppForMe->getArrangedCols() != NULL)
{
ValueIdSet newArrangedCols;
getMap(childIndex).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
newArrangedCols);
rg.addArrangement(newArrangedCols,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
if (rppForMe->getSortKey() != NULL)
{
ValueIdList newSortKey;
getMap(childIndex).rewriteValueIdListDown(*rppForMe->getSortKey(),
newSortKey);
rg.addSortKey(newSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
// If there is a required partitioning function, then we need
// to remove it, and replace it with a partitioning function
// that is expressed in terms of the child's ValueIds.
if (rppForMe->requiresPartitioning())
{
// remove the original requirement
rg.removeAllPartitioningRequirements();
// Rewrite parent req. part func part key in terms of the
// values that appear below this Union.
NABoolean mapItUp = FALSE;
PartitioningRequirement* parentPartReq =
rppForMe->getPartitioningRequirement();
PartitioningRequirement* parentPartReqRewritten =
parentPartReq->copyAndRemap(getMap(childIndex),mapItUp);
// add in the new, rewritten partitioning requirement
rg.addPartRequirement(parentPartReqRewritten);
}
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if 1st child context
// -------------------------------------------------------------------
// Create 1st Context for right child:
// Any required order matches the order that is produced by the
// optimal solution for my left child.
//
// -------------------------------------------------------------------
else if (pws->getCountOfChildContexts() == 1)
{
childIndex = 1;
planNumber = 0;
// ---------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// ---------------------------------------------------------------
if (NOT pws->isLatestContextWithinCostLimit())
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(0,0);
// ---------------------------------------------------------------
// If the Context that was created for the left child has an
// optimal solution whose cost is within the specified cost
// limit, create the corresponding Context for the right child.
// ---------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* leftPartFunc =
sppForChild->getPartitioningFunction();
// Take the synthesized partitioning function from the left
// child, translate it to be in terms of the ValueIdUnions of
// the union, then translate this to be in terms of the valueids
// of the right child.
NABoolean mapItUp = TRUE;
PartitioningFunction* unionPartFunc =
leftPartFunc->copyAndRemap(getMap(0),mapItUp);
mapItUp = FALSE;
PartitioningFunction* rightPartFunc =
unionPartFunc->copyAndRemap(getMap(1),mapItUp);
// Now transform this partitioning function into a partitioning
// requirement.
PartitioningRequirement* partReqForRight =
rightPartFunc->makePartitioningRequirement();
// Try a new requirement (require right child of the UNION to be
// hash2 partitioned with the same number of partitions as the
// left child) when the partitioning key is not covered by the
// characteristic output AND if both children are hash2 partitioned
// with the same number of partitions. Please note that, in this
// scenario, a parallel UNION plan is considered only for hash2
// partitioned children and NOT for hash (hash1) and range
// partitioned children.
if ((CURRSTMT_OPTDEFAULTS->isSideTreeInsert() == FALSE) &&
CmpCommon::getDefault(COMP_BOOL_22) == DF_ON &&
leftPartFunc->castToHash2PartitioningFunction() &&
!(child(0).getGroupAttr()->getCharacteristicOutputs().contains(
leftPartFunc->getPartialPartitioningKey())))
{
// Get the number of partitions from the left child.
Lng32 numOfPartitions = leftPartFunc->getCountOfPartitions();
// Create a new requirement for the right child and require a
// hash2 partitioning only (TRUE).
partReqForRight =
new (CmpCommon::statementHeap())
RequireApproximatelyNPartitions(0.0, numOfPartitions, TRUE);
}
RequirementGenerator rg (child(1),rppForMe);
// Remove any parent partitioning requirements, since we have
// already enforced this on the left child.
rg.removeAllPartitioningRequirements();
if ( getUnionForIF() && sppForChild->getPushDownProperty() )
{
// Add the push-down requirement based on left child's push
// down property.
//
// There is no need to enforce sorting order/partitioning etc.
// requirements.
rg.addPushDownRequirement(
sppForChild->getPushDownProperty()->makeRequirement());
} else {
// Now, add in the Union's partitioning requirement for the
// right child.
rg.addPartRequirement(partReqForRight);
// If there is a required order and/or arrangement, then they
// must be removed and replaced with requirements that are
// in terms of the child's valueids.
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
// Sort Order type was removed by the calls above, so
// get it again from the rpp
childSortOrderTypeReq = rppForMe->getSortOrderTypeReq();
// Convert any DP2_OR_ESP_NO_SORT requirement to ESP_NO_SORT,
// to avoid the possibility of one union child choosing a
// DP2 sort order type and the other union child choosing a
// ESP_NO_SORT sort order type, which are incompatible for union.
if (childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT)
childSortOrderTypeReq = ESP_NO_SORT_SOT;
else if (rppForMe->getDp2SortOrderPartReq() != NULL)
{
// Need to map any dp2SortOrderPartReq
NABoolean mapItUp = FALSE;
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq()->
copyAndRemap(getMap(childIndex),
mapItUp);
}
}
// If there was a required arrangement, then the left child
// chose a particular order. We need to take the sort key
// from the left child and translate it into the corresponding
// valueIds for the right child. We then require this order
// of the right child.
if (rppForMe->getArrangedCols() != NULL)
{
// First, we need to make sure the sort key from the left child
// contains all the expressions from the required arrangement
// and any sort key it was given, and only those expressions.
// (Taking inverse nodes into account, of course).
ValueIdSet reqArrangementForChild;
getMap(0).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
reqArrangementForChild);
ValueIdList leftSortKey;
// Is there also a required sort key?
if (rppForMe->getSortKey() != NULL)
{
// Initialize the left child sort key to the required sort key cols.
getMap(0).rewriteValueIdListDown(*rppForMe->getSortKey(),
leftSortKey);
}
synthUnionSortKeyFromChild(sppForChild->getSortKey(),
reqArrangementForChild,
leftSortKey);
ValueIdList unionSortKey;
// map left child sort key cols to their output ValueIdUnion
// equivalents
getMap(0).rewriteValueIdListUp(unionSortKey,
leftSortKey);
// Temporary fix - if the sorting order is different than the arrangement
// force the sort above Union. See comment in finalizeUnionSortKey
if (finalizeUnionSortKey(leftSortKey, 0, *rppForMe->getArrangedCols(),
unionSortKey) == FALSE)
return NULL;;
if (unionSortKey.entries() > 0) // is usually true
{
ValueIdList rightSortKey;
if (alternateRightChildOrderExpr().isEmpty())
{
// map output ValueIdUnions to their equivalent right
// child ValueIds
getMap(childIndex).rewriteValueIdListDown(unionSortKey,
rightSortKey);
}
else
{
//MV specific optimization refer to MVRefreshBuilder.cpp
// MultiTxnMavBuilder::buildUnionBetweenRangeAndIudBlocks()
getMap(childIndex).rewriteValueIdListDown(alternateRightChildOrderExpr(),
rightSortKey);
}
// Add the sort key from the left child, now translated
// to be in terms of the right child valueid's, as the
// required sort order for the right child.
rg.addSortKey(rightSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
}
// If there was only a required order then the left child must
// have picked this order, so we don't need to look
// at the sort key for the left child. Just translate
// the required order into the valueIds for the right child.
else if (rppForMe->getSortKey() != NULL)
{
ValueIdList newSortKey;
//++MV
if (alternateRightChildOrderExpr().isEmpty())
{
getMap(childIndex).rewriteValueIdListDown(*rppForMe->getSortKey(),
newSortKey);
}
else
{
//MV specific optimization refer to MVRefreshBuilder.cpp
// MultiTxnMavBuilder::buildUnionBetweenRangeAndIudBlocks()
getMap(childIndex).rewriteValueIdListDown(alternateRightChildOrderExpr(),
newSortKey);
}
rg.addSortKey(newSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
}
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // endif previous Context has an optimal solution
} // end if 2nd child context
// -------------------------------------------------------------------
// Create 2nd Context for the right child
// -------------------------------------------------------------------
else if (pws->getCountOfChildContexts() == 2)
{
childIndex = 1;
planNumber = 1;
RequirementGenerator rg(child(1),rppForMe);
// If there was a required order and/or arrangement, then we
// need to remove it, and replace it with a required order
// and/or arrangement that is expressed in terms of the child's
// ValueIds. This is because the requirement will be in terms
// of ValueIdUnions for the Union.
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
// Sort Order type was removed by the calls above, so
// get it again from the rpp
childSortOrderTypeReq = rppForMe->getSortOrderTypeReq();
// Convert any DP2_OR_ESP_NO_SORT requirement to ESP_NO_SORT,
// to avoid the possibility of one union child choosing a
// DP2 sort order type and the other union child choosing a
// ESP_NO_SORT sort order type, which are incompatible for union.
if (childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT)
childSortOrderTypeReq = ESP_NO_SORT_SOT;
else if (rppForMe->getDp2SortOrderPartReq() != NULL)
{
// Need to map any dp2SortOrderPartReq
NABoolean mapItUp = FALSE;
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq()->
copyAndRemap(getMap(childIndex),
mapItUp);
}
}
if (rppForMe->getArrangedCols() != NULL)
{
ValueIdSet newArrangedCols;
getMap(childIndex).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
newArrangedCols);
rg.addArrangement(newArrangedCols,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
if (rppForMe->getSortKey() != NULL)
{
ValueIdList newSortKey;
getMap(childIndex).rewriteValueIdListDown(*rppForMe->getSortKey(),
newSortKey);
rg.addSortKey(newSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
// If there is a required partitioning function, then we need
// to remove it, and replace it with a partitioning function
// that is expressed in terms of the child's ValueIds.
if (rppForMe->requiresPartitioning())
{
// remove the original requirement
rg.removeAllPartitioningRequirements();
// Rewrite parent req. part func part key in terms of the
// values that appear below this Union.
NABoolean mapItUp = FALSE;
PartitioningRequirement* parentPartReq =
rppForMe->getPartitioningRequirement();
PartitioningRequirement* parentPartReqRewritten =
parentPartReq->copyAndRemap(getMap(childIndex),mapItUp);
// add in the new, rewritten partitioning requirement
rg.addPartRequirement(parentPartReqRewritten);
}
// --------------------------------------------------------------------
// If this is a CPU or memory-intensive operator then add a
// requirement for a maximum number of partitions, unless
// that requirement would conflict with our parent's requirement.
//
// --------------------------------------------------------------------
if (okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced))
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
} // end if ok to try parallelism
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // end if 3rd child context
// -------------------------------------------------------------------
// Create 2nd Context for the left child:
// Force the left child to have the same order as the
// right child. If there is a required order in my
// myContext, ensure that it is satisfied.
// -------------------------------------------------------------------
else if (pws->getCountOfChildContexts() == 3)
{
childIndex = 0;
planNumber = 1;
// ---------------------------------------------------------------
// Cost limit exceeded? Erase the Context from the workspace.
// Erasing the Context does not decrement the count of Contexts
// considered so far.
// ---------------------------------------------------------------
if (NOT pws->isLatestContextWithinCostLimit())
pws->eraseLatestContextFromWorkSpace();
childContext = pws->getChildContext(1,1);
// ---------------------------------------------------------------
// If the Context that was created for the right child has an
// optimal solution whose cost is within the specified cost
// limit, create the corresponding Context for the left child.
// ---------------------------------------------------------------
if((childContext != NULL) AND childContext->hasOptimalSolution())
{
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
CMPASSERT(sppForChild != NULL);
PartitioningFunction* rightPartFunc =
sppForChild->getPartitioningFunction();
// Take the synthesized partitioning function from the right
// child, translate it to be in terms of the ValueIdUnions of
// the union, then translate this to be in terms of the valueids
// of the left child.
NABoolean mapItUp = TRUE;
PartitioningFunction* unionPartFunc =
rightPartFunc->copyAndRemap(getMap(1),mapItUp);
mapItUp = FALSE;
PartitioningFunction* leftPartFunc =
unionPartFunc->copyAndRemap(getMap(0),mapItUp);
// Now translate the partitioning function into a requirement
PartitioningRequirement* partReqForLeft =
leftPartFunc->makePartitioningRequirement();
// Try a new requirement (require left child of the UNION to be
// hash2 partitioned with the same number of partitions as the
// right child) when the partitioning key is not covered by the
// characteristic output AND if both children are hash2 partitioned
// with the same number of partitions. Please note that, in this
// scenario, a parallel UNION plan is considered only for hash2
// partitioned children and NOT for hash (hash1) and range
// partitioned children.
if ((CURRSTMT_OPTDEFAULTS->isSideTreeInsert() == FALSE) &&
CmpCommon::getDefault(COMP_BOOL_22) == DF_ON &&
rightPartFunc->castToHash2PartitioningFunction() &&
!(child(1).getGroupAttr()->getCharacteristicOutputs().contains(
rightPartFunc->getPartialPartitioningKey())))
{
// Get the number of partitions from the right child.
Lng32 numOfPartitions = rightPartFunc->getCountOfPartitions();
// Create a new requirement for the left child and require a
// hash2 partitioning only (TRUE).
partReqForLeft =
new (CmpCommon::statementHeap())
RequireApproximatelyNPartitions(0.0, numOfPartitions, TRUE);
}
RequirementGenerator rg (child(0),rppForMe);
// We have already applied the parent partitioning requirements
// to the right child, so no need to apply them to the left child.
rg.removeAllPartitioningRequirements();
// Now, add in the Union's partitioning requirement for the
// left child.
rg.addPartRequirement(partReqForLeft);
// If there is a required order and/or arrangement, then they
// must be removed and replaced with requirements that are
// in terms of the child's valueids.
if (myContext->requiresOrder())
{
rg.removeSortKey();
rg.removeArrangement();
// Sort Order type was removed by the calls above, so
// get it again from the rpp
childSortOrderTypeReq = rppForMe->getSortOrderTypeReq();
// Convert any DP2_OR_ESP_NO_SORT requirement to ESP_NO_SORT,
// to avoid the possibility of one union child choosing a
// DP2 sort order type and the other union child choosing a
// ESP_NO_SORT sort order type, which are incompatible for union.
if (childSortOrderTypeReq == DP2_OR_ESP_NO_SORT_SOT)
childSortOrderTypeReq = ESP_NO_SORT_SOT;
else if (rppForMe->getDp2SortOrderPartReq() != NULL)
{
// Need to map any dp2SortOrderPartReq
NABoolean mapItUp = FALSE;
dp2SortOrderPartReq = rppForMe->getDp2SortOrderPartReq()->
copyAndRemap(getMap(childIndex),
mapItUp);
}
}
// If there was a required arrangement, then the right child
// chose a particular order. We need to take the sort key
// from the right child and translate it into the corresponding
// valueIds for the left child. We then require this order
// of the left child.
if (rppForMe->getArrangedCols() != NULL)
{
// First, we need to make sure the sort key from the right child
// contains all the expressions from the required arrangement
// and any sort key it was given, and only those expressions.
// (Taking inverse nodes into account, of course).
ValueIdSet reqArrangementForChild;
getMap(1).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
reqArrangementForChild);
ValueIdList rightSortKey;
// Is there also a required sort key?
if (rppForMe->getSortKey() != NULL)
{
// Initialize the right child sort key to the required sort key cols.
getMap(1).rewriteValueIdListDown(*rppForMe->getSortKey(),
rightSortKey);
}
synthUnionSortKeyFromChild(sppForChild->getSortKey(),
reqArrangementForChild,
rightSortKey);
ValueIdList unionSortKey;
// map right child sort key cols to their output ValueIdUnion
// equivalents
getMap(1).rewriteValueIdListUp(unionSortKey,
rightSortKey);
// Temporary fix - if the sorting order is different than the arrangement
// force the sort above Union - See comment in finalizeUnionSortKey
if (finalizeUnionSortKey(rightSortKey, 1, *rppForMe->getArrangedCols(),
unionSortKey) == FALSE)
return NULL;
if (unionSortKey.entries() > 0) // is usually true
{
ValueIdList leftSortKey;
// map output ValueIdUnions to their equivalent left
// child ValueIds
getMap(childIndex).rewriteValueIdListDown(unionSortKey,
leftSortKey);
// Add the sort key from the right child, now translated
// to be in terms of the left child valueid's, as the
// required sort order for the left child.
rg.addSortKey(leftSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
}
// If there was only a required order then the right child must
// have picked this order, so we don't need to look
// at the sort key for the right child. Just translate
// the required order into the valueIds for the left child.
else if (rppForMe->getSortKey() != NULL)
{
ValueIdList newSortKey;
//++MV
if (alternateRightChildOrderExpr().isEmpty())
{
getMap(childIndex).rewriteValueIdListDown(*rppForMe->getSortKey(),
newSortKey);
}
else
{
// MV specific optimization refer to MVRefreshBuilder.cpp
// MultiTxnMavBuilder::buildUnionBetweenRangeAndIudBlocks()
getMap(childIndex).rewriteValueIdListDown(alternateRightChildOrderExpr(),
newSortKey);
}
rg.addSortKey(newSortKey,
childSortOrderTypeReq,
dp2SortOrderPartReq);
}
// Produce the requirements and make sure they are feasible.
if (rg.checkFeasibility())
{
// Produce the required physical properties.
rppForChild = rg.produceRequirement();
}
} // endif previous Context has an optimal solution
} // end if 4th child context
// -------------------------------------------------------------------
// Create a Context using the partitioning requirement that was
// generated for the current plan.
// -------------------------------------------------------------------
if (rppForChild != NULL)
{
// ---------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext,pws);
// ---------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which
// the child belongs that requires the same properties as those
// in rppForChild. Reuse it, if found. Otherwise, create a new
// Context that contains rppForChild as the required physical
// properties.
// ----------------------------------------------------------------
result = shareContext(childIndex, rppForChild,
myContext->getInputPhysicalProperty(),
costLimit,
myContext, myContext->getInputLogProp());
}
else
result = NULL;
// -------------------------------------------------------------------
// Remember the cases for which a Context could not be generated,
// or store the context that was generated.
// -------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
pws->incPlanChildCount();
} // end while loop
return result;
} // MergeUnion::createContextForAChild()
//<pb>
NABoolean MergeUnion::findOptimalSolution(Context* myContext,
PlanWorkSpace* pws)
{
// Plan # is only an output param, initialize it to an impossible value.
Lng32 planNumber = -1;
NABoolean hasOptSol = pws->findOptimalSolution(planNumber);
if (hasOptSol)
{
// Set the sort order in the union node from the sort key
// in the synthesized physical properties. The union sort key
// is also set when the union physical properties are synthesized,
// but this is only valid if spp is synthesized AFTER the
// optimal plan is chosen. We must make sure that the union
// sort key reflects the sort key of the union plan that was
// actually chosen, not just the last one to have it's
// properties synthesized.
CascadesPlan* myOptimalPlan = myContext->getPlan();
PhysicalProperty* myOptimalSPP = myOptimalPlan->getPhysicalProperty();
// Make sure SPP is not null. Since we always synthesize properties
// early now so we can use them during costing, this should always
// be true.
if (myOptimalSPP != NULL)
setSortOrder(myOptimalSPP->getSortKey());
}
return hasOptSol;
} // MergeUnion::findOptimalSolution()
//<pb>
//==============================================================================
// Synthesize physical properties for merge union operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used for
// synthesizing partitioning functions.
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
MergeUnion::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
const PhysicalProperty* const sppOfLeftChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
const PhysicalProperty* const sppOfRightChild =
myContext->getPhysicalPropertyOfSolutionForChild(1);
SortOrderTypeEnum unionSortOrderType = NO_SOT;
PartitioningFunction* unionDp2SortOrderPartFunc = NULL;
// ----------------------------------------------------------------
// Merge Unions have two potential plans--see member
// function MergeUnion::createContextForAChild(). In plan 0, the
// left child is first; in plan 1, the right child is first.
// We utilize this information when synthesizing the union sort
// key if there was a required arrangement. We also utilize this
// information when synthesizing the union partitioning function.
// ----------------------------------------------------------------
CMPASSERT(planNumber == 0 || planNumber == 1);
if (rppForMe->getArrangedCols() != NULL)
{
ValueIdList unionSortKey;
// We must synthesize the union sort key from the child that was
// optimized first. This is because sometimes, the required
// arrangement expressions are equivalent to each other on
// one side of the union, but not the other side. Any duplicate
// arrangement expressions are only reflected in the child
// sort key once, so when we map the child sort key columns
// back to the union sort key the order of the duplicate
// arrangement expressions in the union sort key is arbitrary,
// so long as they do not appear before the corresponding
// child sort key entry. If we optimized the child with the
// duplicate arrangement expressions first, then the order
// that this child chooses will be the order of the other
// child, and thus the union. But if we optimized the child
// with no duplicate arrangement expressions first, then this
// child may have chosen a quite different order for these
// arrangement expressions. Thus the union sort key is
// based on the child sort key of the child that was
// optimized first.
if (planNumber == 0)
{
// Get the union sort key from the left child's sort key.
ValueIdSet reqArrangementForChild;
getMap(0).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
reqArrangementForChild);
ValueIdList leftSortKey;
// Is there also a required sort key?
if (rppForMe->getSortKey() != NULL)
{
// Initialize the left child sort key to the required sort key cols.
getMap(0).rewriteValueIdListDown(*rppForMe->getSortKey(),
leftSortKey);
}
synthUnionSortKeyFromChild(sppOfLeftChild->getSortKey(),
reqArrangementForChild,
leftSortKey);
// map left child sort key cols to their output ValueIdUnion equivalents
getMap(0).rewriteValueIdListUp(unionSortKey,
leftSortKey);
}
else
{
// Get the union sort key from the right child's sort key.
ValueIdSet reqArrangementForChild;
getMap(1).rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
reqArrangementForChild);
ValueIdList rightSortKey;
// Is there also a required sort key?
if (rppForMe->getSortKey() != NULL)
{
// Initialize the right child sort key to the required sort key cols.
getMap(1).rewriteValueIdListDown(*rppForMe->getSortKey(),
rightSortKey);
}
synthUnionSortKeyFromChild(sppOfRightChild->getSortKey(),
reqArrangementForChild,
rightSortKey);
// map right child sort key cols to their output ValueIdUnion equivalents
getMap(1).rewriteValueIdListUp(unionSortKey,
rightSortKey);
}
// remember the child sort order in the union node
setSortOrder(unionSortKey);
} // end if there was a required arrangement
else if (rppForMe->getSortKey() != NULL)
{
// If there wasn't a required arrangement but there was a required
// order, then remember the required order in the union node.
setSortOrder(*rppForMe->getSortKey());
}
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext, -1, pws);
PartitioningFunction* myPartFunc;
PartitioningFunction* leftPartFunc;
PartitioningFunction* rightPartFunc;
const SearchKey* myPartSearchKey;
// if any child has random partitioning then union has random partitioning
leftPartFunc = sppOfLeftChild->getPartitioningFunction();
rightPartFunc = sppOfRightChild->getPartitioningFunction();
if (leftPartFunc && leftPartFunc->isARandomPartitioningFunction())
{
myPartFunc = leftPartFunc;
myPartSearchKey = sppOfLeftChild->getPartSearchKey();
unionSortOrderType = NO_SOT;
unionDp2SortOrderPartFunc = NULL;
}
else if (rightPartFunc && rightPartFunc->isARandomPartitioningFunction())
{
myPartFunc = rightPartFunc;
myPartSearchKey = sppOfRightChild->getPartSearchKey();
unionSortOrderType = NO_SOT;
unionDp2SortOrderPartFunc = NULL;
}
else // neither child uses random partitioning.
{
CMPASSERT(leftPartFunc != NULL AND rightPartFunc != NULL);
// check if union children have the same partitioning
// Rewrite partitioning key in terms of values that are above Union
NABoolean mapItUp = TRUE;
leftPartFunc = leftPartFunc->copyAndRemap(getMap(0), mapItUp);
rightPartFunc = rightPartFunc->copyAndRemap(getMap(1), mapItUp);
const PushDownRequirement* pdreq = rppForMe->getPushDownRequirement();
NABoolean comparable = ( leftPartFunc->
comparePartFuncToFunc(*rightPartFunc) == SAME )
OR
(pdreq && pdreq ->castToPushDownCSRequirement());
// are union children partitioning same?
if (comparable)
{
// yes, children have same partitioning or
// it is a special case for pushing down CS,
// reflect it as union's partitition scheme.
if (planNumber == 0)
{
myPartFunc = leftPartFunc;
myPartSearchKey = sppOfLeftChild->getPartSearchKey();
}
else
{
myPartFunc = rightPartFunc;
myPartSearchKey = sppOfRightChild->getPartSearchKey();
}
}
else // union children have incompatible partitioning
{
// union partitioning is random and has:
// no sort, no arrangement, no partitioning key predicates, nothing
ValueIdSet partKey;
ItemExpr *randNum =
new(CmpCommon::statementHeap()) RandomNum(NULL, TRUE);
randNum->synthTypeAndValueId();
partKey.insert(randNum->getValueId());
myPartFunc = new(CmpCommon::statementHeap()) Hash2PartitioningFunction
(partKey, partKey, leftPartFunc->getCountOfPartitions(),
const_cast<NodeMap*>(leftPartFunc->getNodeMap()));
myPartFunc->createPartitioningKeyPredicates();
myPartSearchKey = NULL;
unionSortOrderType = NO_SOT;
unionDp2SortOrderPartFunc = NULL;
}
}
if (NOT sortOrder_.isEmpty())
{
// It's possible one or both children did not synthesize a sort
// key at all, if all sort key columns were equal to constants.
// If both did not synthesize a sort key, then arbitrarily set the
// sort order type to ESP_NO_SORT. If only one child did not synthesize
// a sort key, then synthesize the sort order type from the other child.
NABoolean leftChildNotOrdered =
sppOfLeftChild->getSortOrderType() == NO_SOT OR
NOT myContext->getPlan()->getContextForChild(0)->requiresOrder();
NABoolean rightChildNotOrdered =
sppOfRightChild->getSortOrderType() == NO_SOT OR
NOT myContext->getPlan()->getContextForChild(1)->requiresOrder();
if ( leftChildNotOrdered AND rightChildNotOrdered )
unionSortOrderType = ESP_NO_SORT_SOT;
else if ( leftChildNotOrdered )
unionSortOrderType = sppOfRightChild->getSortOrderType();
else if ( rightChildNotOrdered )
unionSortOrderType = sppOfLeftChild->getSortOrderType();
else if (sppOfLeftChild->getSortOrderType() !=
sppOfRightChild->getSortOrderType())
{
// The sort order types of the children are not the same,
// so we must choose a sort order type for the union.
// This should only happen if the required sort order type was ESP.
// Set the sort order type to ESP_VIA_SORT in this case since
// we did do a sort in producing at least part of the sort key.
CMPASSERT(rppForMe->getSortOrderTypeReq() == ESP_SOT);
// Shouldn't be any synthesized dp2SortOrderPartFunc in this case.
CMPASSERT(sppOfLeftChild->getDp2SortOrderPartFunc() == NULL);
// Only synthesized sort order types that satisfy an ESP sort
// order type requirement are ESP_NO_SORT and ESP_VIA_SORT.
CMPASSERT(((sppOfLeftChild->getSortOrderType() == ESP_NO_SORT_SOT) OR
(sppOfLeftChild->getSortOrderType() == ESP_VIA_SORT_SOT)) AND
((sppOfRightChild->getSortOrderType() == ESP_NO_SORT_SOT) OR
(sppOfRightChild->getSortOrderType() == ESP_VIA_SORT_SOT)));
// Finally, set the sort order type
unionSortOrderType = ESP_VIA_SORT_SOT;
} // end if sort order types of the children were not the same
else
{
// Sort order types of the children are the same, so
// arbitrarily get it from the left child.
unionSortOrderType = sppOfLeftChild->getSortOrderType();
if (sppOfLeftChild->getDp2SortOrderPartFunc() != NULL)
{
NABoolean mapItUp = TRUE;
unionDp2SortOrderPartFunc =
sppOfLeftChild->getDp2SortOrderPartFunc()->copyAndRemap(getMap(0),
mapItUp);
}
}
} // end if sort order is not empty
//++MV
// MV specific optimization refer to MVRefreshBuilder.cpp
// MultiTxnMavBuilder::buildUnionBetweenRangeAndIudBlocks()
ValueIdList unionSortKey;
if (alternateRightChildOrderExpr().isEmpty())
{
unionSortKey = sortOrder_;
}
else
{
// Because we replaced the order of the right child with the
// alternate order we now need to pretend that the union
// is doing the requested order which we can get from the left child
// map left child sort key cols to their output ValueIdUnion equivalents
getMap(0).rewriteValueIdListUp(unionSortKey,
sppOfLeftChild->getSortKey());
}
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap()) PhysicalProperty(
unionSortKey,
unionSortOrderType,
unionDp2SortOrderPartFunc,
myPartFunc,
sppOfLeftChild->getPlanExecutionLocation(), //||opt
// synth from both child props
combineDataSources(sppOfLeftChild->getDataSourceEnum(),
sppOfRightChild->getDataSourceEnum()),
sppOfLeftChild->getIndexDesc(),
myPartSearchKey,
sppOfLeftChild->getPushDownProperty());
//--MV
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // MergeUnion::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// Helper function for the physical exprs that derive from GroupByAgg
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// GroupByAgg::rppAreCompatibleWithOperator()
// The following method is used by GroupBySplitRule::topMatch(),
// SortGroupBy::topMatch(), HashGroupBy::topMatch() and
// PhysShortCutGroupBy::topMatch() to determine whether it is worth to fire
// those rules.
// -----------------------------------------------------------------------
NABoolean GroupByAgg::rppAreCompatibleWithOperator
(const ReqdPhysicalProperty* const rppForMe) const
{
PartitioningRequirement* partReq = rppForMe->getPartitioningRequirement();
// ---------------------------------------------------------------------
// The groupbys are labeled as partial leaf etc. to see from which
// part of the groupby split rule they were generated, if at all.
// Use this labels for suppressing unwanted configurations.
// ---------------------------------------------------------------------
if (rppForMe->executeInDP2())
{
// allow a in-CS full GroupBy when push-down to DP2, if a
// push-down requirement is available.
if ( isinBlockStmt() )
{
// do not allow push-down if no push-down req is available.
// i.e., the aggr must be below CS which can generate push-down.
// This applies to all forms of groupbys.
const PushDownRequirement* pdr = rppForMe->getPushDownRequirement();
if ( pdr == NULL OR NOT PushDownCSRequirement::isInstanceOf(pdr) )
return FALSE;
}
// we only want leaf groupbys to execute in DP2
if (isAPartialGroupByRoot() OR
isAPartialGroupByNonLeaf())
return FALSE;
}
else
{
// allowing a partialGBLeaf in ESP is a good idea if:
// Parallelism is possible.
// The partial GB Leaf has no partitioning requirement, so there
// must be an exchange in between the partial GB Leaf and the
// partial GB Root. This requirement exists because we do not
// want the partial GB Leaf and partial GB Root to be in the
// same process.
// The partial GB Leaf has not been pushed below a TSJ. If the
// partial GB Leaf has been pushed below a TSJ, it is probably
// best for us to wait until it gets to DP2.
NABoolean partialGBLeafInESPOK =
(rppForMe->getCountOfPipelines() > 1) AND
(partReq == NULL) AND
NOT gbAggPushedBelowTSJ();
// we don't want leaf groupbys to execute outside of DP2
if (isAPartialGroupByLeaf() AND
NOT partialGBLeafInESPOK)
return FALSE; // partial GB leaf in ESP not allowed
}
// ---------------------------------------------------------------------
// Compute the number of pipelines that are available for executing
// the plan for this GroupBy.
// ---------------------------------------------------------------------
Lng32 numberOfPipelines = rppForMe->getCountOfPipelines();
// ---------------------------------------------------------------------
// A partial groupby that is a non leaf performs an intermediate
// consolidation of partial groups/aggregates. The heuristic that
// is applied below requires such an intermediate consolidator to
// employ parallel execution (numberOfPipelines > 1) or to produce
// multiple partitions. The intention is to prevent such an
// intermediate consolidator from becoming a bottleneck, which is
// an inhibitor for scalability.
// ---------------------------------------------------------------------
if (isAPartialGroupByNonLeaf() AND (numberOfPipelines == 1))
return FALSE;
// ---------------------------------------------------------------------
// Ensure that the parent partitioning requirement is compatible
// with the GB.
// ---------------------------------------------------------------------
if (partReq != NULL)
{
if (partReq->isRequirementFullySpecified())
{
if (groupExpr().isEmpty())
{
// Scalar aggregates can not execute with ESP parallelism,
// except as part of a ReplicateNoBroadcast.
if (NOT(partReq->isRequirementExactlyOne() OR
partReq->isRequirementReplicateNoBroadcast()))
return FALSE;
}
else
{
// If there is a group by expression, then the required
// partitioning key columns must be a subset of the gb cols.
// Contains will always return TRUE if the required part key
// is the empty set.
if (NOT groupExpr().contains(partReq->getPartitioningKey()))
return FALSE;
}
}
else // fuzzy requirement
{
CMPASSERT(partReq->isRequirementApproximatelyN());
Lng32 reqPartCount = partReq->getCountOfPartitions();
if (groupExpr().isEmpty())
{
// Scalar aggregates cannot execute in parallel, so the
// requirement must allow a single partition part func.
#pragma nowarn(1506) // warning elimination
if ((reqPartCount != ANY_NUMBER_OF_PARTITIONS) AND
((reqPartCount -
(reqPartCount * partReq->castToRequireApproximatelyNPartitions()->
getAllowedDeviation())) >
EXACTLY_ONE_PARTITION)
)
return FALSE;
#pragma warn(1506) // warning elimination
}
else if (partReq->getPartitioningKey().entries() > 0)
{
ValueIdSet myPartKey = groupExpr();
myPartKey.intersectSet(partReq->getPartitioningKey());
// If the required partitioning key columns and the GB cols
// are disjoint, then the requirement must allow a single
// partition part func, because this will be the only way
// to satisfy both the requirement and the GB cols.
#pragma nowarn(1506) // warning elimination
if (myPartKey.isEmpty() AND
(reqPartCount != ANY_NUMBER_OF_PARTITIONS) AND
((reqPartCount -
(reqPartCount * partReq->castToRequireApproximatelyNPartitions()->
getAllowedDeviation())) >
EXACTLY_ONE_PARTITION))
return FALSE;
#pragma warn(1506) // warning elimination
}
} // end if fuzzy requirement
} // end if there was a partitioning requirement for the GB
return TRUE;
}; // GroupByAgg::rppAreCompatibleWithOperator()
//<pb>
Context* GroupByAgg::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// If one Context has been generated for each child, return NULL
// to signal completion.
// ---------------------------------------------------------------------
if (pws->getCountOfChildContexts() == getArity())
return NULL;
childIndex = 0;
Lng32 planNumber = 0;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
RequirementGenerator rg(child(0),rppForMe);
PartitioningRequirement* preq = rppForMe->getPartitioningRequirement();
// ---------------------------------------------------------------------
// If this is not a partial groupby then the child must be partitioned
// on the groupby columns (even if there are none!).
// By adding an empty part key when there are no GB cols, this will
// force a scalar aggregate to not execute in parallel.
//
// For pushed down groupbys, do not add the groupExpr() if it is empty.
// Otherwise, we will get a singlePartFunc, which will lead to
// no-match for partitioned-tables down in the tree.
// ---------------------------------------------------------------------
if ( (isNotAPartialGroupBy() OR isAPartialGroupByRoot()) )
{
if ( NOT rppForMe->executeInDP2() OR NOT isinBlockStmt() OR
( ( NOT groupExpr().isEmpty() ) AND
( ( rppForMe->getCountOfPipelines() > 1 ) OR
( CmpCommon::getDefault(COMP_BOOL_36) == DF_OFF )
)
)
)
rg.addPartitioningKey(groupExpr());
}
// ---------------------------------------------------------------------
// Add the order or arrangement requirements needed for this groupby
// ---------------------------------------------------------------------
addArrangementAndOrderRequirements(rg);
// ---------------------------------------------------------------------
// If this is a CPU or memory-intensive operator or if we could benefit
// from parallelism by performing multiple sort groupbys on different
// ranges at the same time, then add a requirement for a maximum number
// of partitions, unless that requirement would conflict with our
// parent's requirement.
//
// Don't add a required number of partitions if we must execute in DP2,
// because you can't change the number of DP2s.
// Also don't specify a required number of partitions for a scalar
// aggregate, because a scalar aggregate cannot execute in parallel.
// ---------------------------------------------------------------------
if ( !isAPartialGroupByLeaf1() && !isAPartialGroupByLeaf2() &&
okToAttemptESPParallelism(myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced) AND
NOT rppForMe->executeInDP2() AND
NOT groupExpr().isEmpty())
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
// Can not allow skewBuster hash join co-exist with a full groupby
// or a partial groupby root in the same ESPs because the a group
// (in a non-skewbuster plan) can be broken into pieces across
// different ESPs.
// The BR case (i.e. the right child of a skew buster join is a full
// groupby) is dealt with in RelExpr::rppRequiresEnforcer().
//
// The UD case (i.e., the left child of a skew buster join is a full
// groupby) is dealt with through the partitioning key requirement
// (i.e., the partitioning keys added by statement
// rg.addPartitioningKey(groupExpr()) above will NEVER be satisfied
// by a SkewedDataPartitioningFunction).
//
// A full groupBy node is prevented from being the immediate parent of
// the skew-buster join due to the reason similar to the UD case.
} // end if ok to try parallelism
// ---------------------------------------------------------------------
// Done adding all the requirements together, now see whether it worked
// and give up if it is not possible to satisfy them
// ---------------------------------------------------------------------
if (NOT rg.checkFeasibility())
return NULL;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(
childIndex,
rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit,
myContext,
myContext->getInputLogProp());
// try to create a nice context for the child of groupBy below the ROOT
// that requires repartitioning for grouping. The check for the parent
// might need to be more elaborate, now it's just a flag in corresponding
// group.
if ( (CmpCommon::getDefault(COMP_BOOL_33) == DF_ON) AND
(result->getReqdPhysicalProperty()->getCountOfPipelines() > 1) AND
(NOT groupExpr().isEmpty()) AND
(isNotAPartialGroupBy() OR isAPartialGroupByRoot()) AND
(*CURRSTMT_OPTGLOBALS->memo)[myContext->getGroupId()]->isBelowRoot()
)
{
result->setNice(TRUE);
}
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
} // GroupByAgg::createContextForAChild()
//<pb>
void GroupByAgg::addArrangementAndOrderRequirements(
RequirementGenerator &rg)
{
// the default implementation is to do nothing
// (works fine for hash groupby)
}
//<pb>
// -----------------------------------------------------------------------
// member functions for class SortGroupBy
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// SortGroupBy::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
SortGroupBy::costMethod() const
{
static THREAD_P CostMethodSortGroupBy *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodSortGroupBy();
return m;
} // SortGroupBy::costMethod()
//<pb>
void SortGroupBy::addArrangementAndOrderRequirements(
RequirementGenerator &rg)
{
// A sort groupby needs an arrangement by the grouping
// columns, it needs nothing if there are no grouping
// columns.
// Once we have "partial" arrangement requirements we
// could indicate those for partial groupbys.
if (NOT groupExpr().isEmpty())
{
// Shouldn't/Can't add a sort order type requirement
// if we are in DP2
if (rg.getStartRequirements()->executeInDP2())
rg.addArrangement(groupExpr(),NO_SOT);
else
rg.addArrangement(groupExpr(),ESP_SOT);
}
}
//<pb>
//==============================================================================
// Synthesize physical properties for Sort-group-by operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
SortGroupBy::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty* const sppOfChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(sppOfChild->getSortKey(),
sppOfChild->getSortOrderType(),
sppOfChild->getDp2SortOrderPartFunc(),
sppOfChild->getPartitioningFunction(),
sppOfChild->getPlanExecutionLocation(),
sppOfChild->getDataSourceEnum(),
sppOfChild->getIndexDesc(),
sppOfChild->getPartSearchKey(),
sppOfChild->getPushDownProperty());
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// aggregate stores entire result before it gets sent on
if (groupExpr().isEmpty())
sppForMe->setDataSourceEnum(SOURCE_TRANSIENT_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
DP2CostDataThatDependsOnSPP * dp2CostInfo = (DP2CostDataThatDependsOnSPP *)
sppOfChild->getDP2CostThatDependsOnSPP();
if (sppForMe->executeInDP2())
sppForMe->setDP2CostThatDependsOnSPP( dp2CostInfo);
delete sppTemp;
return sppForMe;
} // SortGroupBy::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// member functions for class PhysShortCutGroupBy
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// PhysShortCutGroupBy::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
PhysShortCutGroupBy::costMethod() const
{
static THREAD_P CostMethodShortCutGroupBy *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodShortCutGroupBy();
return m;
} // PhysShortCutGroupBy::costMethod()
//<pb>
void PhysShortCutGroupBy::addArrangementAndOrderRequirements(
RequirementGenerator &rg)
{
// The ShortCutGroupBy requires an order instead of an arrangement
// right now it only uses a single value id
//ShortCutGroupBy handles AnyTrue expr as well as min-max expressions
//opt_for_min indicates x>anytrue y and as a result we want y to be
//sorted increasing. Realize that this also works for min(y) so opt_for_min
//is true for min(y).
//In the same way opt_for_max works with x<anytrue y and max(y) and we ask
//a inverse order on y(i.e. decreasing order)
ValueIdList sk;
if (opt_for_min_)
{
// the order of rhs becomes the optimization goal of the child
sk.insert(rhs_anytrue_->getValueId());
}
else if (opt_for_max_)
{
// the order of the inverse of the rhs becomes the optimization goal
ItemExpr *inverseCol =
new(CmpCommon::statementHeap()) InverseOrder(rhs_anytrue_);
inverseCol->synthTypeAndValueId();
sk.insert(inverseCol->getValueId());
}
const ValueIdSet &aggrs = aggregateExpr();
CMPASSERT(groupExpr().isEmpty());
CMPASSERT(NOT aggrs.isEmpty());
ValueId aggr_valueid = aggrs.init();
// "next" is probably called here for its side effect because we know
// "aggrs.next()" returns true (ie, aggrs is not empty).
// coverity[unchecked_value]
aggrs.next(aggr_valueid);
ItemExpr *item_expr = aggr_valueid.getItemExpr();
OperatorTypeEnum op = item_expr->getOperatorType();
if(op==ITM_ANY_TRUE)
{
// Shouldn't/Can't add a sort order type requirement
// if we are in DP2
if (rg.getStartRequirements()->executeInDP2())
rg.addSortKey(sk,NO_SOT);
else
{
if (CmpCommon::getDefault(COSTING_SHORTCUT_GROUPBY_FIX) != DF_ON)
rg.addSortKey(sk,ESP_SOT);
else
rg.addSortKey(sk,ESP_NO_SORT_SOT);
}
}
else
{
// Shouldn't/Can't add a sort order type requirement
// if we are in DP2
if (rg.getStartRequirements()->executeInDP2())
rg.addSortKey(sk,NO_SOT);
else
rg.addSortKey(sk,ESP_NO_SORT_SOT);
}
}
//<pb>
//==============================================================================
// Synthesize physical properties for Any-true-group-by operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
PhysShortCutGroupBy::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
ValueIdList emptySortKey;
const PhysicalProperty* sppOfChild = myContext->
getPhysicalPropertyOfSolutionForChild(0);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(emptySortKey,
NO_SOT, // no sort key, so no sort order type either
NULL, // no dp2SortOrderPartFunc either
sppOfChild->getPartitioningFunction(),
sppOfChild->getPlanExecutionLocation(),
sppOfChild->getDataSourceEnum(),
sppOfChild->getIndexDesc(),
sppOfChild->getPartSearchKey(),
sppOfChild->getPushDownProperty());
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// aggregate stores entire result before it gets sent on
if (groupExpr().isEmpty())
sppForMe->setDataSourceEnum(SOURCE_TRANSIENT_TABLE);
DP2CostDataThatDependsOnSPP * dp2CostInfo = (DP2CostDataThatDependsOnSPP *)
sppOfChild->getDP2CostThatDependsOnSPP();
if (sppForMe->executeInDP2())
sppForMe->setDP2CostThatDependsOnSPP( dp2CostInfo);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // PhysShortCutGroupBy::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// member functions for class HashGroupBy
// -----------------------------------------------------------------------
NABoolean HashGroupBy::isBigMemoryOperator(const PlanWorkSpace* pws,
const Lng32 planNumber)
{
const Context* context = pws->getContext();
double memoryLimitPerCPU = CURRSTMT_OPTDEFAULTS->getMemoryLimitPerCPU();
// ---------------------------------------------------------------------
// With no memory constraints, a HGB operator could use as much as an
// amount of memory to store all the groups plus any aggregates. Hash
// key with the chain pointer make up 8 more bytes for each group.
// ---------------------------------------------------------------------
const ReqdPhysicalProperty* rppForMe = context->getReqdPhysicalProperty();
// Start off assuming that the operator will use all available CPUs.
Lng32 cpuCount = rppForMe->getCountOfAvailableCPUs();
PartitioningRequirement* partReq = rppForMe->getPartitioningRequirement();
const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty();
Lng32 numOfStreams;
// If the physical properties are available, then this means we
// are on the way back up the tree. Get the actual level of
// parallelism from the spp to determine if the number of cpus we
// are using are less than the maximum number available.
if (spp != NULL)
{
PartitioningFunction* partFunc = spp->getPartitioningFunction();
numOfStreams = partFunc->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
else
if ((partReq != NULL) AND
(partReq->getCountOfPartitions() != ANY_NUMBER_OF_PARTITIONS))
{
// If there is a partitioning requirement, then this may limit
// the number of CPUs that can be used.
numOfStreams = partReq->getCountOfPartitions();
if (numOfStreams < cpuCount)
cpuCount = numOfStreams;
}
EstLogPropSharedPtr inLogProp = context->getInputLogProp();
const double probeCount =
MAXOF(1.,inLogProp->getResultCardinality().value());
const double myGroupCount = getGroupAttr()->
intermedOutputLogProp(inLogProp)->getResultCardinality().value();
const double rowsPerCpu = MAXOF(1.,(myGroupCount / cpuCount));
const double rowsPerCpuPerProbe = MAXOF(1.,(rowsPerCpu / probeCount));
const Lng32 myRowLength =
getGroupAttr()->getCharacteristicOutputs().getRowLength();
const Lng32 extRowLength = myRowLength + 8;
const double fileSizePerCpu = ((rowsPerCpuPerProbe * extRowLength) / 1024.);
if (spp != NULL &&
CmpCommon::getDefault(COMP_BOOL_51) == DF_ON
)
{
CurrentFragmentBigMemoryProperty * bigMemoryProperty =
new (CmpCommon::statementHeap())
CurrentFragmentBigMemoryProperty();
((PhysicalProperty *)spp)->
setBigMemoryEstimationProperty(bigMemoryProperty);
bigMemoryProperty->setCurrentFileSize(fileSizePerCpu);
bigMemoryProperty->setOperatorType(getOperatorType());
// get cumulative file size of the fragment; get the child spp??
const PhysicalProperty *childSpp =
context->getPhysicalPropertyOfSolutionForChild(0);
if (childSpp != NULL)
{
CurrentFragmentBigMemoryProperty * memProp =
(CurrentFragmentBigMemoryProperty *)
((PhysicalProperty *)childSpp)->getBigMemoryEstimationProperty();
if (memProp != NULL)
{
double childCumulativeMemSize = memProp->getCumulativeFileSize();
bigMemoryProperty->incrementCumulativeMemSize(childCumulativeMemSize);
memoryLimitPerCPU -= childCumulativeMemSize;
}
}
}
return (fileSizePerCpu >= memoryLimitPerCPU);
} // HashGroupBy::isBigMemoryOperator
//<pb>
// -----------------------------------------------------------------------
// HashGroupBy::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
HashGroupBy::costMethod() const
{
static THREAD_P CostMethodHashGroupBy *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodHashGroupBy();
return m;
} // HashGroupBy::costMethod()
//<pb>
//==============================================================================
// Synthesize physical properties for Hash-group-by operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
HashGroupBy::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
ValueIdList emptySortKey;
const PhysicalProperty* sppOfChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
// ---------------------------------------------------------------------
// The output of a hash groupby is not sorted.
// It produces a hash partitioned set of rows (groups).
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(emptySortKey,
NO_SOT,
NULL,
sppOfChild->getPartitioningFunction(),
sppOfChild->getPlanExecutionLocation(),
sppOfChild->getDataSourceEnum(),
sppOfChild->getIndexDesc(),
sppOfChild->getPartSearchKey());
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// aggregate stores entire result before it gets sent on,
// non-partial hash groupby also stores entire result
if (groupExpr().isEmpty() OR
isNotAPartialGroupBy() OR isAPartialGroupByRoot())
sppForMe->setDataSourceEnum(SOURCE_TRANSIENT_TABLE);
// remove anything that's not covered by the group attributes
// (NB: no real need to call this method, since the sortKey is empty, and that's
// the only thing that this method checks currently)
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
DP2CostDataThatDependsOnSPP * dp2CostInfo = (DP2CostDataThatDependsOnSPP *)
sppOfChild->getDP2CostThatDependsOnSPP();
if (sppForMe->executeInDP2())
sppForMe->setDP2CostThatDependsOnSPP( dp2CostInfo);
delete sppTemp;
return sppForMe;
} // HashGroupBy::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// Member functions for class RelRoot
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// RelRoot::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
RelRoot::costMethod() const
{
static THREAD_P CostMethodRelRoot *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodRelRoot();
return m;
} // RelRoot::costMethod()
//<pb>
Context* RelRoot::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
childIndex = 0;
Lng32 planNumber = pws->getCountOfChildContexts();
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
const ReqdPhysicalProperty* rppForChild;
PartitioningRequirement* partReq;
PlanExecutionEnum loc;
ValueIdList* sortKey = NULL;
SortOrderTypeEnum sortOrderTypeReq = NO_SOT;
// Get the value from the defaults table that specifies whether
// the optimizer should attempt ESP parallelism.
NADefaults &defs = ActiveSchemaDB()->getDefaults();
DefaultToken attESPPara = CURRSTMT_OPTDEFAULTS->attemptESPParallelism();
// heuristics for nice context we want to detect the group which is just
// below root. For example if group by is just below root we want it to
// create a nice context (with repartitioning on grouping columns) that
// will not be propagated below top join.
(*CURRSTMT_OPTGLOBALS->memo)[child(0).getGroupId()]->setBelowRoot(TRUE);
// ---------------------------------------------------------------------
// If we've created all the necessary child contexts, we're done.
// ---------------------------------------------------------------------
if (((planNumber == 1)
AND ((attESPPara != DF_SYSTEM)
#ifndef NDEBUG
OR getenv("SINGLE_ROOT_CONTEXT") != NULL
#endif
)) OR
(planNumber > 1))
return NULL;
// ---------------------------------------------------------------------
// Decide how many cpus are available. There are four default table
// entries that have an influence on this value:
//
// - ESP parallelism can be switched off altogether by setting
// the ATTEMPT_ESP_PARALLELISM defaults table entry to "OFF".
// - The PARALLEL_NUM_ESPS default attribute is either the keyword "SYSTEM",
// or the positive number of ESPs to use.
// - If PARALLEL_NUM_ESPS is "SYSTEM",
// then we get the number of nodes in the cluster and multiply it by
// the number of processors per (SMP) node by reading the
// DEF_NUM_NODES_IN_ACTIVE_CLUSTERS and DEF_NUM_SMP_CPUS entries.
// - If PARALLEL_NUM_ESPS is a positive number, ensure that it does not
// exceed the number of available CPUs in the system.
// ---------------------------------------------------------------------
Lng32 countOfCPUs = DEFAULT_SINGLETON;
Lng32 pipelinesPerCPU = DEFAULT_SINGLETON;
// Regardless of how the ATTEMPT_ESP_PARALLELISM CQD is set, some operations
// like LRU always execute under ESPs for partitioned tables.
// These are indicated by mustUseESPs() == TRUE.
// Note that some conditions below may still override this.
if ( ((CURRSTMT_OPTDEFAULTS->attemptESPParallelism() != DF_OFF) OR mustUseESPs())
// QSTUFF
// we don't support paralled execution in the get_next
// protocol yet - but for streams there should not be
// a problem.
&& NOT getGroupAttr()->isEmbeddedUpdateOrDelete()
&& NOT getGroupAttr()->isStream()
&& NOT getTriggersList()
&& NOT containsOnStatementMV()
// QSTUFF
)
{
// Get the maximum number of processes per cpu for a given operator
// from the defaults table.
pipelinesPerCPU = 1;
// -------------------------------------------------------------------
// Extract number of cpus per node and number of nodes in cluster from
// defaults table.
// -------------------------------------------------------------------
NABoolean canAdjustDoP = TRUE;
NABoolean isASON = FALSE;
NABoolean fakeEnv = FALSE;
countOfCPUs = defs.getTotalNumOfESPsInCluster(fakeEnv);
// Do not enable the Adaptive Segmentation functionality for
// parallel label (DDL) operations or for a parallel extract
// operation, or fast loading into traf tables. This will
// prevent AS from reducing the max degree of
// parallelism for these operations.
OperatorTypeEnum childOpType = child(0).getLogExpr()->getOperatorType();
// Decide if it is a fast trafodion load query
NABoolean isFastLoadIntoTrafodion = FALSE;
if ( childOpType == REL_UNARY_INSERT ) {
RelExpr* c0 = child(0).getLogExpr();
Insert* ins = (Insert*)c0;
isFastLoadIntoTrafodion = ins->getIsTrafLoadPrep();
}
if ((CmpCommon::getDefault(ASG_FEATURE) == DF_ON) &&
(childOpType != REL_EXE_UTIL) &&
(numExtractStreams_ == 0) &&
!isFastLoadIntoTrafodion &&
!mustUseESPs())
{
countOfCPUs =
CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism();
// Adaptive segmentation is ON
isASON = TRUE;
}
// Get the value as a token code, no errmsg if not a keyword.
if (CmpCommon::getDefault(PARALLEL_NUM_ESPS, 0) != DF_SYSTEM)
{
// -------------------------------------------------------------------
// A value for PARALLEL_NUM_ESPS exists. Use it for the count of cpus
// but don't exceed the number of cpus available in the cluster.
// On SQ, include the number of ESPs allowed per cpu.
// -------------------------------------------------------------------
canAdjustDoP = FALSE;
}
// final adjustment to countOfCPUs and pipelinesPerCPU - special cases
//
// Check CQD EXE_PARALLEL_PURGEDATA and do the #esp = #partition parallel
// plan when the CQD is not set to OFF. See ExeUtilFastDelete::bindNode()
// on legal values for the CQD.
RelExpr* x = child(0).getGroupAttr()->getLogExprForSynthesis();
if (CmpCommon::getDefault(EXE_PARALLEL_PURGEDATA) != DF_OFF)
{
if ( x && x->getOperatorType() == REL_UNARY_DELETE &&
((Delete*)x)->isFastDelete() // fast delete is turned on
// for DELETE USING PURGEDATA FROM <t>
)
{
PartitioningFunction *pf = ((Delete*)x)->getScanIndexDesc()
->getPartitioningFunction();
const NodeMap* np;
if ( pf && (np = pf->getNodeMap()) && np->getNumEntries() > 1 ) {
// set countOfCPUs to the number of partitions
UInt32 partns = np->getNumEntries();
countOfCPUs = partns;
pipelinesPerCPU = 1;
CURRSTMT_OPTDEFAULTS->setRequiredESPs(partns);
CURRSTMT_OPTDEFAULTS->setRequiredScanDescForFastDelete(
((Delete*)x)->getScanIndexDesc());
canAdjustDoP = FALSE;
}
}
}
// for multi-commit DELETE (expressed as DELETE WITH MULTI COMMIT FROM <t>)
if ( containsLRU() )
{
RelExpr* x = child(0).getGroupAttr()->getLogExprForSynthesis();
if ( x && x->getOperatorType() == REL_EXE_UTIL )
{
PartitioningFunction *pf = ((ExeUtilExpr*)x)->getUtilTableDesc()
->getClusteringIndex()
->getPartitioningFunction();
const NodeMap* np;
UInt32 partns = 1;
if ( pf && (np = pf->getNodeMap()) ) {
// set countOfCPUs to the number of partitions
partns = np->getNumEntries();
}
countOfCPUs = partns;
pipelinesPerCPU = 1;
CURRSTMT_OPTDEFAULTS->setRequiredESPs(partns);
canAdjustDoP = FALSE;
}
}
if (isFastLoadIntoTrafodion)
{
Insert *ins = (Insert*)(x->castToRelExpr());
PartitioningFunction *pf = ins->getTableDesc()
->getClusteringIndex()
->getPartitioningFunction();
const NodeMap* np;
Lng32 partns = 1;
if ( pf && (np = pf->getNodeMap()) )
{
partns = np->getNumEntries();
}
if (partns>1)
{
countOfCPUs = partns;
pipelinesPerCPU = 1;
CURRSTMT_OPTDEFAULTS->setRequiredESPs(partns);
canAdjustDoP = FALSE;
}
}
// for side-tree INSERT (expressed as INSERT USING SIDEINSERTS INTO <t>
// <source> )
// Set countOfCPUs to the number of partitions of the inner table
// if countOfCPUs is not a multiple of the of the # of partitions
// (e.g., 8 vs 34). The key is to avoid more than one TSJ ESPs to
// send rows to an inner table partition.
NADefaults &defs = ActiveSchemaDB()->getDefaults();
Lng32 ocrControl = defs.getAsLong(OCR_FOR_SIDETREE_INSERT);
// setting for OCR_FOR_SIDETREE_INSERT:
// 0: OCR repartition is disableld
// 1: OCR repartition is enable and will be done if DoP can not divide
// # of partitions of the inner table
// 2: OCR repartition is enabled and will be done
if ( ocrControl > 0 && x && x->getOperatorType() == REL_UNARY_INSERT )
{
Insert *ins = (Insert*)(x->castToRelExpr());
if ( ins->getInsertType() == Insert::VSBB_LOAD )
{
PartitioningFunction *pf = ins->getTableDesc()
->getClusteringIndex()
->getPartitioningFunction();
const NodeMap* np;
Lng32 partns = 1;
if ( pf && (np = pf->getNodeMap()) ) {
partns = np->getNumEntries();
}
if (
( ocrControl == 1 &&
(
((countOfCPUs < partns) && (partns % countOfCPUs != 0)) ||
(countOfCPUs > partns)
)
) ||
ocrControl == 2
)
{
countOfCPUs = partns;
pipelinesPerCPU = 1;
CURRSTMT_OPTDEFAULTS->setRequiredESPs(partns);
canAdjustDoP = FALSE;
}
}
}
Lng32 minBytesPerESP = defs.getAsLong(HBASE_MIN_BYTES_PER_ESP_PARTITION);
// To be replaced later by a different CQD
if ( CmpCommon::getDefault(HBASE_RANGE_PARTITIONING) == DF_OFF ||
isASON ||
mustUseESPs())
canAdjustDoP = FALSE;
// Adjust DoP based on table size, if possible
if ( canAdjustDoP ) {
QueryAnalysis* qAnalysis = CmpCommon::statement()->getQueryAnalysis();
TableAnalysis * tAnalysis = qAnalysis->getLargestTable();
if ( tAnalysis ) {
CostScalar tableSize = tAnalysis->getCardinalityOfBaseTable() *
tAnalysis->getRecordSizeOfBaseTable() ;
CostScalar espsInCS = tableSize / CostScalar(minBytesPerESP);
Lng32 esps = (Lng32)(espsInCS.getCeiling().getValue());
if ( esps < 0 )
esps = countOfCPUs;
else {
if ( esps < 1 )
countOfCPUs = 1;
else {
if ( esps < countOfCPUs )
countOfCPUs = esps;
}
}
pipelinesPerCPU = 1;
}
}
// --------------------------------------
// Ensure cpu count is a positive number.
// --------------------------------------
countOfCPUs = MIN_ONE(countOfCPUs);
#ifdef _DEBUG
if ((CmpCommon::getDefault( NSK_DBG ) == DF_ON) &&
(CmpCommon::getDefault( NSK_DBG_GENERIC ) == DF_ON))
{
CURRCONTEXT_OPTDEBUG->stream() << endl << "countOfCPUs= " << countOfCPUs << endl;
}
#endif
} // end if parallel execution is enabled
// Setup the range partition flag in OptDefaults when the query
// is a count query
/*
if ( child(0)->getOperatorType == REL_GROUPBY ) {
CURRSTMT_OPTDEFAULTS->setDoRangePartitionForHbase(TRUE);
}
*/
// Create a "teaser" context if the user is letting the optimizer
// decide heuristically whether parallelism is a good idea or not
// for each operator.
if ((planNumber == 0) AND
(attESPPara == DF_SYSTEM)
#ifndef NDEBUG
// set this env var to disable the "teaser" context
AND getenv("SINGLE_ROOT_CONTEXT") == NULL
#endif
)
{
// Start off with a context that is less stringent than what the
// root node needs. Don't impose any partitioning or location
// requirements other than a requirement to be outside of
// DP2. Why do we do this: we hope that the optimal solution
// will actually execute in the master and that it has one
// partition. If that is the case, the second context (see a few
// lines below) will cause no work, it will just take the
// optimal solution of the first context. The effect is that we
// do much less work, since we save all the partitioning
// enforcers. This trick works only if the optimal solution is
// not parallel in the node below the root, but that is the case
// where optimization cost matters most.
// RelRoot::currentPlanIsAcceptable makes sure that we don't
// use the solution of this context.
partReq = NULL;
loc = EXECUTE_IN_MASTER_OR_ESP;
}
else
{
// The real context: the root node executes in the master
// executor, always wants 1 data stream
if (CmpCommon::getDefault(COMP_BOOL_82) == DF_ON) {
partReq=new(CmpCommon::statementHeap())RequireExactlyOnePartition(TRUE);
} else {
partReq = new(CmpCommon::statementHeap())RequireExactlyOnePartition();
}
loc = EXECUTE_IN_MASTER;
}
// get the order by list
ValueIdList orderByList(reqdOrder_);
// Are there any required ordering columns?
if (orderByList.entries() > 0)
{
// get input constant values
ValueIdSet inputVals = getGroupAttr()->getCharacteristicInputs();
// remove any input constant values from the required ordering,
// since ordering by a constant is a no-op
orderByList.removeCoveredExprs(inputVals);
// Will need to remember the simplified valueId's of the expressions
// in the order by list.
ValueIdList simpleOrderByList;
CollIndex i = 0;
while (i < orderByList.entries())
{
// Need to check if we have seen the simplified version
// of this valueId before. If we have, we need to remove it,
// because we don't want or need to ask to order on an
// expression that has the same simplified valueId as an
// expression we have already seen in the list.
ValueId svid = orderByList[i].getItemExpr()->
simplifyOrderExpr()->getValueId();
if (simpleOrderByList.contains(svid))
orderByList.removeAt(i);
else
{
// If we haven't seen it before, then remember it.
simpleOrderByList.insert(svid);
// Only increment the array counter "i" if we didn't remove
// the current entry, because if we did remove the current
// entry, "i" will already point to the next entry in the list!
i++;
}
} // end while more required order columns
} // end if there was a required order
if (orderByList.entries() > 0) // got a required ordering ?
{
sortKey = new(CmpCommon::statementHeap()) ValueIdList(orderByList);
sortOrderTypeReq = ESP_SOT;
}
// ---------------------------------------------------------------------
// A root node is never partitioned and never excludes properties.
// It requires an order if the query had an order by clause.
// ---------------------------------------------------------------------
if (rppForMe)
rppForChild = new (CmpCommon::statementHeap())
ReqdPhysicalProperty(
*rppForMe,
NULL, // no arrangement of rows required
sortKey, // user-specified sort order
sortOrderTypeReq,
NULL, // never a dp2SortOrderPartReq at the root
partReq,
loc,
countOfCPUs,
pipelinesPerCPU);
else
rppForChild = new (CmpCommon::statementHeap())
ReqdPhysicalProperty(
NULL, // no arrangement of rows required
sortKey, // user-specified sort order
sortOrderTypeReq,
NULL, // never a dp2SortOrderPartReq at the root
FALSE, // no logical order or arrangement
partReq,
NULL, // no logical part. requirement
loc,
countOfCPUs,
pipelinesPerCPU,
CURRSTMT_OPTDEFAULTS->getDefaultCostWeight(),
CURRSTMT_OPTDEFAULTS->getDefaultPerformanceGoal(),
NULL, NULL);
// Support for CONTROL QUERY SHAPE
if (reqdShape_ AND reqdShape_->isCutOp())
reqdShape_ = NULL;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties.
// The child context's solution has to match the required shape
// specified in a CONTROL QUERY SHAPE statement, if applicable.
// ---------------------------------------------------------------------
Context* result = shareContext(childIndex,
rppForChild,
myContext->getInputPhysicalProperty(),
costLimit,
myContext,
myContext->getInputLogProp(),
reqdShape_);
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
} // RelRoot::createContextForAChild()
NABoolean RelRoot::currentPlanIsAcceptable(Lng32 planNo,
const ReqdPhysicalProperty* const rppForMe) const
{
DefaultToken attESPPara = CURRSTMT_OPTDEFAULTS->attemptESPParallelism();
// Don't consider plan 0 when ATTEMPT_ESP_PARALLELISM is set
// to level SYSTEM, it has the wrong requirements!
// See RelRoot::createContextForAChild for details
if ((planNo == 0) AND
(attESPPara == DF_SYSTEM)
#ifndef NDEBUG
// set this env var to disable the "teaser" context
AND getenv("SINGLE_ROOT_CONTEXT") == NULL
#endif
)
return FALSE;
else
return TRUE;
} // RelRoot::currentPlanIsAcceptable()
//<pb>
//==============================================================================
// Synthesize physical properties for RelRoot operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
RelRoot::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty* sppForChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(NULL,
EXECUTE_IN_MASTER,
sppForChild->getDataSourceEnum());
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // RelRoot::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// Member functions for class MapValueIds
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// MapValueIds::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
MapValueIds::costMethod() const
{
static THREAD_P CostMethodFixedCostPerRow *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap())
CostMethodFixedCostPerRow( 0.001 // constant cost for the node
, 0.0 // cost per child row
, 0.0 // cost per output row
);
return m;
}
// ---------------------------------------------------------------------
// Performs mapping on the partitioning function, from the
// MapValueIds node to the child.
// ---------------------------------------------------------------------
PartitioningFunction* MapValueIds::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
NABoolean mapItUp = FALSE;
return partFunc->copyAndRemap(map_,mapItUp);
} // end MapValueIds::mapPartitioningFunction()
//<pb>
Context* MapValueIds::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
childIndex = 0;
Lng32 planNumber = 0;
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
const ReqdPhysicalProperty* rppForChild;
ValueIdSet * arrangedColsReqForChild = NULL;
ValueIdList * sortKeyReqForChild = NULL;
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
PartitioningRequirement* partReqForChild = partReqForMe;
PartitioningRequirement* dp2SortOrderPartReqForMe =
rppForMe->getDp2SortOrderPartReq();
PartitioningRequirement* dp2SortOrderPartReqForChild =
dp2SortOrderPartReqForMe;
// ---------------------------------------------------------------------
// If one Context has been generated for each child, return NULL
// to signal completion.
// ---------------------------------------------------------------------
if (pws->getCountOfChildContexts() == getArity())
return NULL;
// ---------------------------------------------------------------------
// now map all components of the required props that do use value ids
// ---------------------------------------------------------------------
if (rppForMe->getArrangedCols() != NULL)
{
arrangedColsReqForChild = new(CmpCommon::statementHeap()) ValueIdSet();
map_.rewriteValueIdSetDown(*rppForMe->getArrangedCols(),
*arrangedColsReqForChild);
}
if (rppForMe->getSortKey() != NULL)
{
sortKeyReqForChild = new(CmpCommon::statementHeap()) ValueIdList();
map_.rewriteValueIdListDown(*rppForMe->getSortKey(),
*sortKeyReqForChild);
}
if ((partReqForMe != NULL) AND
(NOT partReqForMe->getPartitioningKey().isEmpty()))
{
// -------------------------------------------------------------------
// Rewrite the partitioning key in terms of the values that appear
// below this MapValueIds.
// -------------------------------------------------------------------
NABoolean mapItUp = FALSE;
partReqForChild =
partReqForMe->copyAndRemap(map_, mapItUp);
}
if ((dp2SortOrderPartReqForMe != NULL) AND
(NOT dp2SortOrderPartReqForMe->getPartitioningKey().isEmpty()))
{
// -------------------------------------------------------------------
// Rewrite the partitioning key in terms of the values that appear
// below this MapValueIds.
// -------------------------------------------------------------------
NABoolean mapItUp = FALSE;
dp2SortOrderPartReqForChild =
dp2SortOrderPartReqForMe->copyAndRemap(map_, mapItUp);
}
rppForChild = new (CmpCommon::statementHeap())
ReqdPhysicalProperty(*rppForMe,
arrangedColsReqForChild,
sortKeyReqForChild,
rppForMe->getSortOrderTypeReq(),
dp2SortOrderPartReqForChild,
partReqForChild);
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains bottomRPP as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(childIndex, rppForChild,
myContext->getInputPhysicalProperty(),
costLimit,
myContext, myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
} // MapValueIds::createContextForAChild()
//<pb>
//==============================================================================
// Synthesize physical properties for MapValueIds operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
MapValueIds::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const PhysicalProperty* const sppOfChild =
myContext->getPhysicalPropertyOfSolutionForChild(0);
PartitioningFunction* actualPartFunc;
// ---------------------------------------------------------------------
// Rewrite the partitioning keys in terms of the values that appear
// above this MapValueIds.
// ---------------------------------------------------------------------
NABoolean mapItUp = TRUE;
ValueIdList newSortKey;
PartitioningFunction* oldDp2SortOrderPartFunc =
sppOfChild->getDp2SortOrderPartFunc();
PartitioningFunction* newDp2SortOrderPartFunc =
oldDp2SortOrderPartFunc;
// ---------------------------------------------------------------------
// map the child value ids to the output value ids
// ---------------------------------------------------------------------
map_.rewriteValueIdListUp(newSortKey,sppOfChild->getSortKey());
if ((oldDp2SortOrderPartFunc != NULL) AND
(NOT oldDp2SortOrderPartFunc->getPartitioningKey().isEmpty()))
{
newDp2SortOrderPartFunc =
oldDp2SortOrderPartFunc->copyAndRemap(map_,mapItUp);
}
actualPartFunc =
sppOfChild->getPartitioningFunction()->copyAndRemap(map_, mapItUp);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(myContext,
planNumber,
pws);
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(*sppOfChild,
newSortKey,
sppOfChild->getSortOrderType(),
newDp2SortOrderPartFunc,
actualPartFunc);
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // MapValueIds::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// Helper methods for leaf operators
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Helper method for synthDP2PhysicalProperty.
// -----------------------------------------------------------------------
static void
computeDP2CostDataThatDependsOnSPP(
PartitioningFunction &physicalPartFunc //in/out
,DP2CostDataThatDependsOnSPP &dp2CostInfo //out
,const IndexDesc& indexDesc
,const ScanKey& partKey
,GroupAttributes &scanGroupAttr
,const Context& myContext
,NAMemory *heap
,const RelExpr& scan
)
{
// -----------------------------------------------------------------------
// Estimate CPUs executing DP2s:
// -----------------------------------------------------------------------
NADefaults &defs = ActiveSchemaDB()->getDefaults();
NABoolean isHbaseTable = indexDesc.getPrimaryTableDesc()->getNATable()->isHbaseTable();
NABoolean fakeEnv = FALSE; // do not care
CostScalar totalCPUsExecutingDP2s = defs.getTotalNumOfESPsInCluster(fakeEnv);
if(!isHbaseTable)
{
// seabed api doesn't return audit count
totalCPUsExecutingDP2s--; // do not count the system volume
totalCPUsExecutingDP2s = MAXOF(totalCPUsExecutingDP2s,1.);
}
CostScalar activePartitions =
((NodeMap *)(physicalPartFunc.getNodeMap()))->getNumActivePartitions();
// Assume at least one DP2 volume even if node map indicates otherwise.
Lng32 numOfDP2Volumes =
#pragma nowarn(1506) // warning elimination
MIN_ONE(((NodeMap *)(physicalPartFunc.getNodeMap()))->getNumOfDP2Volumes());
#pragma warn(1506) // warning elimination
// The number of cpus executing DP2's cannot be more than the number
// of active partitions :
Lng32 cpusExecutingDP2s =
MINOF((Lng32)totalCPUsExecutingDP2s.getValue(),
(Lng32)activePartitions.getValue());
// The number of cpus executing DP2's cannot be more than the number
// of DP2 volumes:
if (!isHbaseTable)
cpusExecutingDP2s = MINOF(cpusExecutingDP2s, numOfDP2Volumes);
dp2CostInfo.setCountOfCPUsExecutingDP2s(cpusExecutingDP2s);
// -----------------------------------------------------------------------
// Set default estimation for repeat count. Then, refine this estimate
// if possible below
// -----------------------------------------------------------------------
dp2CostInfo.setRepeatCountForOperatorsInDP2(
(myContext.getInputLogProp()->getResultCardinality()).minCsOne());
// check if we are doing updates
if (scan.getOperator().match(REL_ANY_LEAF_GEN_UPDATE) ||
scan.getOperator().match(REL_ANY_UNARY_GEN_UPDATE) )
dp2CostInfo.setRepeatCountState
(DP2CostDataThatDependsOnSPP::UPDATE_OPERATION);
// only do this code if we have more than one partition:
if (physicalPartFunc.getNodeMap()->getNumEntries() > 0)
{
// ------------------------------------------------------------
// only detect AP for range partitioning (for now)
// Also, only do this for Scans (not for update, nor insert, nor
// delete)
// ------------------------------------------------------------
if ( physicalPartFunc.isARangePartitioningFunction() AND
( (scan.getOperatorType() == REL_FILE_SCAN) OR
(scan.getOperatorType() == REL_HBASE_ACCESS)
)
)
{
// ------------------------------------------------------------
// ESTIMATE ACTIVE PARTITIONS:
// and only estimate if there actually are any predicates
// over the leading part. key column
// ------------------------------------------------------------
// Get the key columns from the partKey since the
// part key in the part. func. contains veg references.
const ValueIdList& partKeyList = partKey.getKeyColumns();
ColumnOrderList keyPredsByCol(partKeyList);
// This only works for the case of single disjunct.
// This is ok as far as the partkey is a search key.
// $$$ When we add support for Mdam to the PA this
// $$$ will need to change to support several
// $$$ disjuncts. See AP doc for algorithm.
CMPASSERT(partKey.getKeyDisjunctEntries() == 1);
partKey.getKeyPredicatesByColumn(keyPredsByCol,0);
//But we only care about the leading column now...
// $$$ Revisit when multicol. hists. are added
const ValueId& leadingColumn = partKeyList[0];
ValueIdSet keyPredsForLeadingColumn =
keyPredsByCol.getPredicatesForColumn(leadingColumn);
// Note that there can be more than one predicate for
// the leading column (as in a range)
// but if there are no predicates then we just return
// (default estimates were set above)
if (keyPredsForLeadingColumn.isEmpty())
return;
// Obtain a new node map to decorate:
NodeMap *newNodeMapPtr =
physicalPartFunc.getNodeMap()->copy(heap);
// Run estimation algorithm:
CostScalar probes =
myContext.getInputLogProp()->getResultCardinality();
RangePartitioningFunction *prpfPtr =
(RangePartitioningFunction *) &physicalPartFunc;
// ------------------------------------------------------------
// Obtain distribution for leading key of part. column:
// ------------------------------------------------------------
Histograms leadColHist(heap);
const ColStatDescList &primaryTableCSDL =
indexDesc.getPrimaryTableDesc()->getTableColStats();
// This code is based on method
// void
// IndexDescHistograms::appendHistogramForColumnPosition(
// const CollIndex& columnPosition)
// on ScanOptimizer.h/cpp
// However here the situation is a lot simpler
// because we don't need to synchronize histograms
CollIndex i;
// AQ02-009 ATOMIC TEST SUITE Costing Anomaly Fix
ColStatsSharedPtr colStats;
if (primaryTableCSDL.getColStatDescIndexForColumn(i, // out
leadingColumn))
{
leadColHist.append(primaryTableCSDL[i]);
colStats = primaryTableCSDL[i]->getColStats();
}
else
{
// CMPABORT; // there must exist a CSD for every part. col!
// Deletes don't work because of the
// Delete.IndexDesc/Scan.IndexDesc->DeleteCursor(
// Delete.IndexDesc, Scan.PartKey) problem
return;
}
if( colStats->isOrigFakeHist() )
return;
// Are any histograms fake?
// AQ02-009 ATOMIC TEST SUITE Costing Anomaly Fix
// PROBE COUNT is estimated wrong if predicates search key is beyond
// the range of lowerbound and upper bounds of search key's value in
// the table.
const ColStatDescList & colStatDescList = myContext.getInputLogProp()->getColStats();
for ( i = 0; i < colStatDescList.entries(); i++ )
{
if ( colStatDescList[i]->getColStats()->isOrigFakeHist() )
{
return;
}
}
// ------------------------------------------------------------
// Apply predicates for leadingColumn:
// ------------------------------------------------------------
const SelectivityHint * selHint = indexDesc.getPrimaryTableDesc()->getSelectivityHint();
const CardinalityHint * cardHint = indexDesc.getPrimaryTableDesc()->getCardinalityHint();
OperatorTypeEnum opType = ITM_FIRST_ITEM_OP;
if ( (scan.getOperatorType() == REL_FILE_SCAN) OR
(scan.getOperatorType() == REL_HBASE_ACCESS))
opType = REL_SCAN;
if ( (probes.isGreaterThanZero())
AND
(myContext.getInputLogProp()->getColStats().entries() > 0) )
{
// NJ case:
leadColHist.applyPredicatesWhenMultipleProbes(
keyPredsForLeadingColumn
,*(myContext.getInputLogProp())
,scanGroupAttr.getCharacteristicInputs()
,FALSE // MDAM irrelevant here
,selHint
,cardHint
,NULL
,opType
);
}
else
{
leadColHist.applyPredicates(keyPredsForLeadingColumn, scan, selHint, cardHint, opType) ;
if (leadColHist.getRowCount() == 1)
{
newNodeMapPtr->setNumActivePartitions(1);
}
}
// At this point, leadColHist has the modified histograms
// ------------------------------------------------------------
// Now create the partition histograms:
// ------------------------------------------------------------
PartitionKeyDistribution partKeyDist(
*prpfPtr
,leadColHist.getColStatDescList()
);
DCMPASSERT(partKeyDist.isValid());
// We cannot procede with an invalid part key dist, but
// it's not a good idea to abort, a bad plan is better
// than no plan
if (NOT partKeyDist.isValid())
{
return;
}
// ------------------------------------------------------------
// Traverse the part. histogram to find out the AP,
// set the part. state accordingly
// ------------------------------------------------------------
CollIndex numParts = partKeyDist.getNumPartitions();
DCMPASSERT(numParts
==
newNodeMapPtr->getNumEntries());
for (i=0; i < numParts; i++)
{
// NB: we used to only count those with >1 rows; however,
// for most cases, a rowcount that's at all non-zero means
// that the partition is active.
if (partKeyDist.getRowsForPartition(i) > 0.)
newNodeMapPtr->setPartitionState(i,
NodeMapEntry::ACTIVE);
else
newNodeMapPtr->setPartitionState(i,
NodeMapEntry::NOT_ACTIVE);
}
//--------------------------------------------------------------------
// If key predicate for leading column has an equality predicate
// on a host variable or parameter, then set the maximum
// number of active partition at runtime estimate to the max partition
// factor of the partition distribution key.
// Currently this variable if set will be used only for costing of scans.
// ------------------------------------------------------------------
if (keyPredsForLeadingColumn.referencesAHostvariableorParam())
newNodeMapPtr->setEstNumActivePartitionsAtRuntime(partKeyDist.getMaxPartitionFactor());
// done!, replace new map in existing part func:
physicalPartFunc.replaceNodeMap(newNodeMapPtr);
// -------------------------------------------------------------------
// Estimate the RC
// -------------------------------------------------------------------
// Find out the highest column covered by an equijoin
Lng32 highestColumn = 0;
const ValueIdSet &inputValues =
scanGroupAttr.getCharacteristicInputs();
const ValueIdSet& operatorValues =
indexDesc.getIndexKey();
ValueId firstPredId;
for (i=0; i < partKeyList.entries(); i++)
{
ValueId currentColumn = partKeyList[i];
ValueIdSet keyPredsForRC =
keyPredsByCol.getPredicatesForColumn(currentColumn);
if (keyPredsForRC.isEmpty())
break;
// find a predicate in the set that is an equijoin pred:
NABoolean found = FALSE;
for (ValueId predId = keyPredsForRC.init();
NOT found AND keyPredsForRC.next(predId);
keyPredsForRC.advance(predId))
{
// does it cover the column?
ItemExpr *predIEPtr = predId.getItemExpr();
if (predIEPtr->
isANestedJoinPredicate(inputValues, operatorValues))
{
// is it en equijoin?
if (predIEPtr->getOperatorType() == ITM_VEG_PREDICATE
OR
predIEPtr->getOperatorType() == ITM_EQUAL)
{
if (i==0)
{
// save first equijoin for the estimation
// of affected partitions
firstPredId = predId;
}
highestColumn++;
found = TRUE;
} // if equijoin
} // if nested join pred
} // for every pred for current column
if (NOT found)
{
// we found a column not covered, do not
// continue
break;
}
} // for every column
dp2CostInfo.setHighestLeadingPartitionColumnCovered(highestColumn);
if (highestColumn > 0)
{
// Obtain distribution for leading key of part. column:
// Reuse the partKeyHist:
// the synthesis for AP:
Lng32 affectedPartitions =
#pragma nowarn(1506) // warning elimination
newNodeMapPtr->getNumActivePartitions();
#pragma warn(1506) // warning elimination
// Now estimate the RC:
// minRC would be the RC if all the columns were
// covered
CostScalar minRC = (probes/affectedPartitions).getCeiling();
// If not all partitions are covered, minRC must be
// multiplied by a "fanout". The "fanout" represents
// the number of partitions each probe goes to.
// our first estimate for the fanout is the maximum
// number of partitions sharing the same boundary
// value for the first column:
CostScalar fanout = partKeyDist.getMaxPartitionFactor();
// But if more columns of the partition key are covered
// then the fanout must decrease. Compute that using
// formula below which interpolates given the number
// of columns in the key and the highest column covered.
// Note that the fanout factor
// becomes one when all columns are covered,
// and it becomes "fanout" when only the first column is covered
// note that if we are here then highestColumn >= 1
CostScalar numPartKeyColumns = partKeyList.entries();
CostScalar fanoutRatio =
CostScalar(highestColumn-1)/(numPartKeyColumns-1);
CostScalar fanoutFactor =
(fanout * (csOne - fanoutRatio)).minCsOne();
CostScalar RC = ((minRC * fanoutFactor));
dp2CostInfo.setRepeatCountForOperatorsInDP2(RC.minCsOne());
} // if at least the first col. is covered by an equijoin
} // if we have range partitioning
else
if ( ( physicalPartFunc.isATableHashPartitioningFunction() AND
(scan.getOperatorType() == REL_FILE_SCAN)
) OR
( indexDesc.isPartitioned() AND
(scan.getOperatorType() == REL_HBASE_ACCESS) AND
(CmpCommon::getDefault(NCM_HBASE_COSTING) == DF_ON))
)
{
// ------------------------------------------------------------
// The inner table is hash-partitioned.
// For details on the logic within this IF block, please read
// "Support for hash-partitioned tables in method
// computeDP2CostDataThatDependsOnSPP" by Sunil Sharma.
// ------------------------------------------------------------
// Obtain access to the node-map
NodeMap *NodeMapPtr = (NodeMap *) physicalPartFunc.getNodeMap();
const ValueIdSet &inputValues =
scanGroupAttr.getCharacteristicInputs();
const ValueIdSet& operatorValues =
indexDesc.getIndexKey();
//-------------------------------------------------------------------
// Determine number of partkey columns covered by an nested-join pred.
// or by a constant expr.
// Determine the number of partkey columns covered by constant
// expressions alone.
//-------------------------------------------------------------------
// Get the key columns from the partKey since the
// part key in the part. func. contains VEG references.
const ValueIdList& partKeyList = partKey.getKeyColumns();
ColumnOrderList keyPredsByCol(partKeyList);
// This only works for the case of single disjunct.
// This is ok as far as the partkey is a search key.
// $$$ When we add support for Mdam to the PA this
// $$$ will need to change to support several
// $$$ disjuncts.
CMPASSERT(partKey.getKeyDisjunctEntries() == 1);
// populate keyPredsByCol with predicates
partKey.getKeyPredicatesByColumn(keyPredsByCol);
ULng32 keyColsCoveredByNJPredOrConst = 0;
ULng32 keyColsCoveredByConst = 0;
// iterate over all partition-key columns
ValueId firstPredId;
for (CollIndex i=0; i < partKeyList.entries(); i++)
{
ValueId currentColumn = partKeyList[i];
ValueIdSet keyPredsForCurCol =
keyPredsByCol.getPredicatesForColumn(currentColumn);
if (keyPredsForCurCol.isEmpty())
{
break;
}
// find a predicate in the set that is a nested-join pred or
// involves a constant expression.
// find a predicate in the set that involves a constant.
NABoolean foundNJPredOrConstPred = FALSE;
NABoolean foundConstPred = FALSE;
for (ValueId predId = keyPredsForCurCol.init();
keyPredsForCurCol.next(predId);
keyPredsForCurCol.advance(predId))
{
ItemExpr *predIEPtr = predId.getItemExpr();
if ((predIEPtr->getOperatorType() == ITM_VEG_PREDICATE)
OR
(predIEPtr->getOperatorType() == ITM_EQUAL))
{
// This pred is an equi-join pred or a constant pred.
// If this pred is an equi-join pred, then ensure that
// it links the current/inner
// table with the outer composite,i.e. that it is a
// nested-join pred.
// if needed, determine whether the pred is a constant pred
NABoolean isAConstPred = FALSE;
if ((NOT foundConstPred) OR (NOT foundNJPredOrConstPred))
{
// does the pred cover the column with a constant expression
// (i.e. a constant value, a host-var or a param)?
// is the predicate a VEG-predicate?
if (predIEPtr->getOperatorType() == ITM_VEG_PREDICATE)
{
const VEG * predVEG=
((VEGPredicate*)predIEPtr)->getVEG();
// Now, get all members of the VEG group
const ValueIdSet & VEGGroup=predVEG->getAllValues();
if (VEGGroup.referencesAConstExpr())
{
if (NOT foundConstPred)
{
keyColsCoveredByConst ++;
foundConstPred = TRUE;
}
isAConstPred = TRUE;
}
}
else
{
// Pred uses binary relational operator ITM_EQUAL.
// (It's not a VEG predicate but an ItemExpr.)
const ItemExpr *leftExpr = predIEPtr->child(0);
const ItemExpr *rightExpr = predIEPtr->child(1);
//Check if the other operand of
//the equality condition is a non-strict constant.
//A strict constant is something like cos(1), CAST(1),
//whereas cos(?p), CAST(?p) can be considered a constant
//in the non-strict sense since they remain
//constant for a given execution of a query.
if ( leftExpr->doesExprEvaluateToConstant(FALSE) OR
rightExpr->doesExprEvaluateToConstant(FALSE) )
{
if (NOT foundConstPred)
{
keyColsCoveredByConst ++;
foundConstPred = TRUE;
}
isAConstPred = TRUE;
}
}
}
if ((NOT foundNJPredOrConstPred)
&& (isAConstPred ||
(predIEPtr->
isANestedJoinPredicate(inputValues, operatorValues))))
{
keyColsCoveredByNJPredOrConst ++;
foundNJPredOrConstPred = TRUE;
}
if (foundNJPredOrConstPred && foundConstPred)
// we're done with this partitioning key column;
// end the iteration over its predicates
break;
}
}
}
CollIndex numParts = 1;
if (scan.getOperatorType() == REL_HBASE_ACCESS)
numParts = indexDesc.getPartitioningFunction()->getCountOfPartitions();
else
numParts = physicalPartFunc.getCountOfPartitions();
// The order of the IF conditions in the following statement, is
// CRITICAL. It is possible that partitioning-key list may be
// fully covered by constant expressions and also by some combination
// of equijoin predicates and constant expressions. If so, the first
// condition takes precedence during query execution in that if it's
// true, then all probes are routed to one specific partition. This
// precedence is reflect in the ordering of the IF conditions below.
if (keyColsCoveredByConst == partKeyList.entries())
{
// All inner table partitioning key columns are covered by
// constant expr. Hence, all outer probes go to one specific
// inner table partition, which is the only active partition.
dp2CostInfo.setRepeatCountForOperatorsInDP2(
(myContext.getInputLogProp()->
getResultCardinality()).minCsOne()
);
dp2CostInfo.setRepeatCountState
(DP2CostDataThatDependsOnSPP::KEYCOLS_COVERED_BY_CONST);
// If all partition-key columns are covered by constant values
// (not host-vars or params), then we can determine the one
// active partition by computing the hash-partitioning function
// (using the constant-folding technique) and can set the active
// partition bitmap appropriately. However, my investigation shows
// that this bitmap is used only for range-partitioned tables.
// Moreover, query-caching (in ODBC/JDBC & in MXCMP) replaces
// constant values in equality predicates with param. Hence, the
// likelihood that all partition-key columns are covered by
// constants is low. These factors reduce the value of or the
// need for accurately setting the active bitmap in the nodemap
// for hash-partitioned tables. The bitmap will be left as-is.
// Indicate in the node map that only one partition will be
// accessed during plan evaluation.
NodeMapPtr->setEstNumActivePartitionsAtRuntime(1);
}
else if (keyColsCoveredByNJPredOrConst == partKeyList.entries())
{
// All inner table partitioning key columns are covered by
// equijoin predicates or constant expressions.
// Hence, each outer probe goes to a single
// inner table partition.
if (CURRSTMT_OPTDEFAULTS->incorporateSkewInCosting() AND
physicalPartFunc.isATableHashPartitioningFunction())
{
CostScalar probesAtBusyStream =
myContext.getInputLogProp()->getCardOfBusiestStream(
&physicalPartFunc,
numParts,
&scanGroupAttr,
numParts,
TRUE);
dp2CostInfo.setProbesAtBusiestStream(probesAtBusyStream);
}
dp2CostInfo.setRepeatCountForOperatorsInDP2
(
(myContext.getInputLogProp()->getResultCardinality()
/numParts).getCeiling().minCsOne()
);
dp2CostInfo.setRepeatCountState
(DP2CostDataThatDependsOnSPP::KEYCOLS_COVERED_BY_PROBE_COLS_CONST);
// indicate in the node map that all partitions will be
// accessed during plan evaluation.
NodeMapPtr->setEstNumActivePartitionsAtRuntime(numParts);
}
else
{
// The default applies: Each outer probe goes to all inner
// partitions.
dp2CostInfo.setRepeatCountForOperatorsInDP2(
(myContext.getInputLogProp()->
getResultCardinality()).minCsOne()
);
dp2CostInfo.setRepeatCountState(
DP2CostDataThatDependsOnSPP::KEYCOLS_NOT_COVERED);
// indicate in the node map that all partitions will be
// accessed during plan evaluation.
NodeMapPtr->setEstNumActivePartitionsAtRuntime
(numParts);
};
}; // inner table is hash-partitioned
}// If we have more than one partition
} // computeDP2CostDataThatDependsOnSPP()
//<pb>
// -----------------------------------------------------------------------
// Helper method for DP2 cursor operators (scan, cursor ins/upd/del)
//
// This method interprets the partitioning requirements, the type of
// the operator, and the physical partitioning function of the file
// and comes up with a synthesized partitioning function. We are using
// a standalone procedure because at this time there is no common
// base class (other than RelExpr) among DP2 scan and DP2 updates.
//
// This method decides things that relate to the DP2 operator and to
// the DP2 exchange above it. Decisions are sent to the DP2 exchange
// via a special "LogPhysPartitioningFunction" object that is only
// used in DP2.
//
// Here is what this method decides:
//
// - the "logical partitioning function", meaning the top partitioning
// function of the DP2 exchange above,
// - the type of logical partitioning used (for documentation see class
// LogPhysPartitioningFunction in file PartFunc.h),
// - how many PAs (total among all client processes) will be used and
// whether a PAPA node will be used in the executor process(es),
// - how many executor processes will be used (equal to the number
// of logical partitions, unless we do load balancing).
//
// There are certain constraints with special situations, such as
// VSBB inserts and merging of sorted streams, which may require one PA
// per DP2 partition in each executor process.
// -----------------------------------------------------------------------
PhysicalProperty * RelExpr::synthDP2PhysicalProperty(
const Context* myContext,
const ValueIdList& sortOrder,
const IndexDesc* indexDesc,
const SearchKey* partSearchKey)
{
// my required phys props (always non-NULL)
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
// the result
PhysicalProperty *sppForMe;
// variables that help to come up with my partitioning function
const LogicalPartitioningRequirement *lpr =
rppForMe->getLogicalPartRequirement();
PartitioningRequirement * logPartReq = NULL;
Lng32 numPAs = ANY_NUMBER_OF_PARTITIONS;
Lng32 numEsps = 1;
NABoolean usePapa = FALSE;
NABoolean shouldUseSynchronousAccess = FALSE;
NABoolean mergeOfSortedStreams = FALSE;
NABoolean numPAsForced = FALSE;
PlanExecutionEnum location = EXECUTE_IN_DP2;
// get the maximum number of access nodes per process that can be allowed from
// MAX_ACCESS_NODES_PER_ESP. This is an absolute value
// that should be based on file system and executor buffer size
// restrictions, message system restrictions, etc.
Int32 maxPAsPerProcess =
(Int32) getDefaultAsLong(MAX_ACCESS_NODES_PER_ESP);
LogPhysPartitioningFunction::logPartType logPartType;
PartitioningFunction * physicalPartFunc =
indexDesc->getPartitioningFunction();
ValueIdList physicalClusteringKey =
indexDesc->getOrderOfKeyValues();
PartitioningFunction * logicalPartFunc = NULL;
PartitioningFunction * logPhysPartFunc = NULL;
// convert a non-existent part func to a single part func
if (physicalPartFunc == NULL)
{
NodeMap* nodeMap = indexDesc->getNAFileSet()
->getPartitioningFunction()
->getNodeMap()
->copy(CmpCommon::statementHeap());
physicalPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(nodeMap);
}
// -----------------------------------------------------------------------
// Vector to put all costing data that is computed at synthesis time
// Make it a local variable for now. If we ever reach the end of
// this routine create a variable from the heap, initialize it with this,
// and then set the sppForMe slot.
// -----------------------------------------------------------------------
DP2CostDataThatDependsOnSPP dp2CostInfo;
// ---------------------------------------------------------------------
// Estimate the number of active partitions and other costing
// data that depends on SPP:
// ---------------------------------------------------------------------
computeDP2CostDataThatDependsOnSPP(*physicalPartFunc // in/out
,dp2CostInfo //out
,*indexDesc // in
,*partSearchKey // in
,*getGroupAttr() //in
,*myContext // in
,CmpCommon::statementHeap() // in
, *this
);
Lng32 currentCountOfCPUs = dp2CostInfo.getCountOfCPUsExecutingDP2s();
// ---------------------------------------------------------------------
// determine the logical partitioning type
// ---------------------------------------------------------------------
if (lpr)
{
logPartReq = lpr->getLogReq();
logPartType = lpr->getLogPartTypeReq();
numPAs = lpr->getNumClientsReq();
usePapa = lpr->getMustUsePapa();
if ( lpr->getNumClientsReq() == 1 &&
lpr->isNumberOfPAsForced() &&
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_ON)
)
{
// number of PAs are being forced through CQS. This should be number
// of PA nodes over all the partitions.
numPAs= MINOF( currentCountOfCPUs,
physicalPartFunc->getCountOfPartitions()
);
// the following is needed to make sure that subsequent calls to
// to shouldUseSynchronousAccess() do get the right number of PAs
// this should be set ideally in Exchange::processCQS(), but there
// the number of partitions is not available
LogicalPartitioningRequirement *pr=(LogicalPartitioningRequirement *)lpr;
pr->setNumClientsReq(numPAs);
}
// TEMPORARY CODE
// The code cannot handle parallelism for vertically partitioned
// tables right now. Also, the code cannot handle parallelism for
// tables with float columns in the partitioning key.
// So, if someone wants parallelism they are out of luck until
// the problems are fixed.
if ((physicalPartFunc->isARoundRobinPartitioningFunction() OR
physicalPartFunc->partKeyContainsFloatColumn()) AND
((logPartReq == NULL) OR
NOT (logPartReq->isRequirementExactlyOne() OR
logPartReq->isRequirementReplicateNoBroadcast())))
return NULL;
// The synthesized partitioning function needs to satisfy the
// partitioning requirements in the context. This would be an
// argument for considering these requirements when calling
// realize() below. However, the result of realize() is not
// the synthesized partitioning function and therefore doesn't
// have to satisfy those requirements!
//
// This is a tricky problem, but for now we just hope that not
// too many physical partitioning requirements will be generated
// by DP2 operators.
NABoolean considerPhysicalReqs = FALSE;
// try to realize the logical partitioning requirement in the way
// that is most similar to the physical partitioning function
if (logPartReq)
{
logicalPartFunc = logPartReq->realize(
myContext,
considerPhysicalReqs,
physicalPartFunc->makePartitioningRequirement());
logicalPartFunc->createPartitioningKeyPredicates();
}
// -----------------------------------------------------------------
// Determine the type of logical partitioning.
// -----------------------------------------------------------------
// check for partition grouping first
if (logicalPartFunc == NULL OR
logicalPartFunc->isAGroupingOf(*physicalPartFunc) OR
(logPartReq AND logPartReq->castToRequireReplicateNoBroadcast()))
{
if (logPartType ==
LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING)
{
// Partition grouping is the preferred way of doing things
// when there is no specific requirement or when the
// requirement allows it or when we are replicating data.
logPartType = LogPhysPartitioningFunction::PA_PARTITION_GROUPING;
// =====================
}
}
else if (logPartType ==
LogPhysPartitioningFunction::PA_PARTITION_GROUPING)
{
// trying to force grouping for a case where grouping doesn't work
return NULL;
}
// If we didn't pick partition grouping, try logical subpartitioning
if (logPartType !=
LogPhysPartitioningFunction::PA_PARTITION_GROUPING AND
logicalPartFunc AND
logicalPartFunc->canProducePartitioningKeyPredicates())
{
// check whether it would be beneficial to apply the partitioning
// key predicates to the scan node
ValueIdSet pkp = logicalPartFunc->getPartitioningKeyPredicates();
const RangePartitioningFunction *rpfFromRequirement;
if (logPartReq->castToRequireRange())
rpfFromRequirement = logPartReq->castToRequireRange()->
getPartitioningFunction()->castToRangePartitioningFunction();
else
rpfFromRequirement = NULL;
// Test whether the partitioning key columns of a required
// range partitioning scheme (if any) are a leading prefix
// of and in the same order as the physical clustering
// key columns, and either the physical partitioning
// function is a SinglePartitionPartitioningFunction, or
// the physical partitioning function is a range partitioning
// function and the partitioning key columns are a leading
// prefix of the clustering key columns.
// Choose logical subpartitioning if this is indeed the case.
// Note that we could some day allow an INVERSE_ORDER
// result, too, because the partitioning key predicates
// would work on the inverse order just as well.
if (rpfFromRequirement AND
(physicalClusteringKey.satisfiesReqdOrder(
rpfFromRequirement->getOrderOfKeyValues()) == SAME_ORDER) AND
(physicalPartFunc->isASinglePartitionPartitioningFunction() OR
(physicalPartFunc->isARangePartitioningFunction() AND
(physicalClusteringKey.satisfiesReqdOrder(
physicalPartFunc->
castToRangePartitioningFunction()->
getOrderOfKeyValues()) == SAME_ORDER)))
)
{
logPartType =
LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING;
// =======================
}
else if (logPartType ==
LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING)
{
// trying to force subpartitioning where it doesn't work
return NULL;
}
else
{
// should check whether applying the part key preds pkp
// results in a good key pred selectivity ||opt
//logPartType =
//LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING;
// ============================
}
}
if (logPartType ==
LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING)
{
// nothing worked, give up and hope for a double exchange
return NULL;
}
}
else
{
// no logical partitioning requirement, choose a simple scheme
logPartType = LogPhysPartitioningFunction::PA_PARTITION_GROUPING;
// =====================
}
// ---------------------------------------------------------------------
// at this point we have chosen logPartType, now determine the
// number of PA clients to be used and whether to use a PAPA
// ---------------------------------------------------------------------
// see if user wants us to base the number of PAs on the number of
// active partitions
NABoolean baseNumPAsOnAP = FALSE;
if (CmpCommon::getDefault(BASE_NUM_PAS_ON_ACTIVE_PARTS) == DF_ON)
baseNumPAsOnAP = TRUE;
// Get the number of active partitions - used to limit the # of PAs
// if the above defaults entry says it is ok.
CostScalar activePartitions =
((NodeMap *)(physicalPartFunc->getNodeMap()))->getNumActivePartitions();
// -----------------------------------------------------------------------
// Determine number of PAs to use
// -----------------------------------------------------------------------
// Calculating the number of PA nodes is a difficult task. Here are some
// of the factors that should influence the decision:
//
// - the number of physical partitions (no need to use more PAs than
// physical partitions in a PAPA node),
// - the selectivity of the partitioning key predicates (influences number
// of active physical partitions),
// - the distribution of DP2s over the available CPUs or nodes (influences
// the degree of DP2 parallelism that we can get),
// - the logPartType (influences whether the ESPs go after the same
// DP2s or have distinct set of DP2s) and the number of ESPs if the
// ESPs access the same partitions (HORIZONTAL_PARTITION_SLICING),
// - whether there is a required order in the DP2 exchange (requires
// a sufficient number of PAs to do the merge), whether it matches the
// clustering key and/or partitioning key, and the ratio of rows
// accessed in DP2 vs. rows returned by the DP2 exchange (low
// selectivity queries with sort can still benefit from parallelism).
if ((numPAs != ANY_NUMBER_OF_PARTITIONS) OR
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF))
{
// The number of PAs are being forced via C.Q. Shape, or
// synchronous access is being forced via C.Q. Default.
// if the number of PAs were not specified via C.Q. Shape,
// then we must be forcing synchronous access.
if (numPAs == ANY_NUMBER_OF_PARTITIONS)
numPAs = 1;
else // number of PAs were forced
numPAsForced = TRUE;
// If the user is forcing any amount of synchronous access
// (numPAs < number of physical partitions),
// then this could lead to incorrect results if there is a
// required order or arrangement, the table is partitioned,
// and the table is not range partitioned on the required
// order or arrangement columns. Call the shouldUseSynchronousAccess
// method. If synchronous access is ok this method will
// return TRUE, note that this method knows if synch. access
// is being forced. If this method returns false
// then we must give up now.
if (numPAs < physicalPartFunc->getCountOfPartitions())
{
if (physicalPartFunc->shouldUseSynchronousAccess(
rppForMe,myContext->getInputLogProp(),getGroupAttr()))
shouldUseSynchronousAccess = TRUE;
else
return NULL;
}
} // end if forcing number of PAs or asynchronous access
else if (physicalPartFunc->shouldUseSynchronousAccess(
rppForMe,myContext->getInputLogProp(),getGroupAttr()))
{
shouldUseSynchronousAccess = TRUE;
// For synchronous access, set the numPAs to 1. This is really
// the number of PAs we need PER PROCESS. But, since numPAs
// reflects the total number of PAs for all processes, it needs
// to be scaled up by the number of processes. This will be
// done after we compute the number of ESPs.
numPAs = 1;
}
else // # of PAs not forced and no synchronous access
{
// Set the number of PAs to the number of active partitions if
// allowed by the defaults table.
// Don't limit the # of PAs by the number of DP2 volumes (yet).
// We must be very careful about limiting the number of PAs if
// a merge of sorted streams is required. A merge of sorted streams
// cannot allow any kind of synchronous access, or the wrong
// answer might be returned. So, if we are going to do a
// merge of sorted streams, there must be one PA for every
// TRULY active partition. Since we can't guarantee the active
// partition estimate, set the number of PAs to the number of
// physical partitions if a merge of sorted streams is necessary.
if (NOT baseNumPAsOnAP OR
(rppForMe->getLogicalOrderOrArrangementFlag() AND
NOT physicalPartFunc->isASinglePartitionPartitioningFunction()))
numPAs = physicalPartFunc->getCountOfPartitions();
else
numPAs = (Lng32)activePartitions.getValue();
}
// Now that we know if synchronous access will be done, see if
// we will need to do a merge of sorted streams.
// We will need a merge of sorted streams if there is a logical
// order or arrangement requirement, the required order does
// not require a DP2 sort order type (no merge of sorted streams
// is necessary to satisfy a DP2 sort order type requirement),
// and we are not accessing all partitions synchronously.
mergeOfSortedStreams =
rppForMe->getLogicalOrderOrArrangementFlag() AND
(rppForMe->getDp2SortOrderPartReq() == NULL) AND
(numPAs != 1);
// -----------------------------------------------------------------------
// Determine the number of processes (ESPs or master) used. That number
// must be identical to the number of partitions in the logical
// partitioning function (if any).
// -----------------------------------------------------------------------
if ( logicalPartFunc AND logPartReq AND
(logPartReq->getCountOfPartitions() != ANY_NUMBER_OF_PARTITIONS)
)
{
// the logical partitioning function, which was derived from
// the logical partitioning requirement, has specified the
// number of ESPs
numEsps =
( (CmpCommon::getDefault(COMP_BOOL_129) == DF_ON) AND
(CURRSTMT_OPTDEFAULTS->attemptESPParallelism() == DF_SYSTEM) AND
logPartReq->isRequirementApproximatelyN()
) ? logPartReq->castToRequireApproximatelyNPartitions()->
getCountOfPartitionsLowBound()
: logicalPartFunc->getCountOfPartitions();
}
else if (numPAs == 1)
{
// If numPAs is 1, there must be only one partition,
// or synchronous access is being used. So, it wouldn't
// do us any good to have more than one ESP.
numEsps = 1;
logicalPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
logicalPartFunc->createPartitioningKeyPredicates();
}
else
{
// The DP2 exchange above us has not had any partitioning
// requirement or has not specified a number of partitions in
// its logical partitioning requirement. Produce a logical
// partitioning function in a way that we get a reasonable
// number of PAs per ESP (or in the master). Make the
// logical partitioning function by scaling the
// physical partitioning function or the previously realized
// partitioning function to numEsps partitions.
// initialize numEsps to the number of PAs
numEsps = numPAs;
NABoolean numOfESPsForced = FALSE; // Not used
float allowedDeviation = 0.0; // Not used
if (NOT okToAttemptESPParallelism(myContext,
NULL, //don't need/have pws
numEsps,
allowedDeviation,
numOfESPsForced) OR
(numEsps == 1)
// TEMPORARY CODE - until vert. part tables support parallelism
OR physicalPartFunc->isARoundRobinPartitioningFunction()
// TEMPORARY CODE - until the executor can handle float columns
// in the partitioning key
OR physicalPartFunc->partKeyContainsFloatColumn()
)
{
numEsps = 1;
logicalPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
}
else
{
if (logicalPartFunc == NULL)
logicalPartFunc = physicalPartFunc->copy();
else
// logicalPartFunc should be the physical part function
logicalPartFunc = logicalPartFunc->copy();
// Now group the number of partitions down to a reasonable #
Lng32 scaleNumOfParts = numEsps;
// First, need to see if we need to base the parallelism on
// the number of active partitions.
Lng32 numActivePartitions = 1;
if ((CmpCommon::getDefault(BASE_NUM_PAS_ON_ACTIVE_PARTS)
== DF_ON) AND
(logicalPartFunc->castToRangePartitioningFunction() != NULL))
{
CostScalar activePartitions =
((NodeMap *)(logicalPartFunc->getNodeMap()))->
getNumActivePartitions();
numActivePartitions = (Lng32)activePartitions.getValue();
// If we are grouping based on the number of active partitions,
// and there won't be enough active partitions to go around,
// then reduce the number of groups to the number of active
// partitions. This will ensure that the scaling will work
// and that we don't end up with more ESPs than active partitions.
if (scaleNumOfParts > numActivePartitions)
scaleNumOfParts = numActivePartitions;
}
// Actually do the grouping now
logicalPartFunc =
logicalPartFunc->scaleNumberOfPartitions(scaleNumOfParts);
// Was scale able to do it's job?
if (scaleNumOfParts != numEsps) // No
{
// Scaling failed, so use the # of parts
// in the logical function, if there aren't too many partitions.
if (logicalPartFunc->getCountOfPartitions() <=
rppForMe->getCountOfPipelines())
numEsps = logicalPartFunc->getCountOfPartitions();
else // No choice left but no parallelism
{
numEsps = 1;
logicalPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
}
} // end if scaling failed
}
logicalPartFunc->createPartitioningKeyPredicates();
}
// We better have at least one ESP (or master)
CMPASSERT(numEsps >= 1);
// We must have a logical partitioning function at this point
CMPASSERT(logicalPartFunc != NULL);
// ---------------------------------------------------------------------
// Perform adjustments of the number of PAs that are based on the
// number of ESPs.
// ---------------------------------------------------------------------
// Can't/Don't need to adjust the number of PAs if the # is being forced
// or if synchronous access is being done, or if there is only one
// logical partition.
if (NOT numPAsForced AND NOT shouldUseSynchronousAccess AND numEsps > 1)
{
Lng32 maxPartsPerGroup;
if (logicalPartFunc->isAReplicateNoBroadcastPartitioningFunction())
{
// Does the REP-N really replicate to all partitions or is there
// some grouping going on. If it does replicate, then each
// instance may access all partitions via PAs. If there is
// grouping, then each instance will access a non-overlapping
// group of partitions. So in this case the total number of PAs
// across all instances is just the total number of partitions.
NABoolean grouping = FALSE;
const RequireReplicateNoBroadcast *rnbReq =
logPartReq->castToRequireReplicateNoBroadcast();
if(rnbReq)
{
const PartitioningFunction *parentPartFunc = rnbReq->getParentPartFunc();
Lng32 factor;
if(parentPartFunc)
grouping = parentPartFunc->isAGroupingOf(*physicalPartFunc, &factor);
}
// This is a Type-2 join, so all logical partitions might need
// to access all physical partitions. So, each ESP needs to have all
// the PAs, so we must multiply the number of PAs by the number of ESPs.
// Only do this if it's not a unique scan or if we must do a merge of
// sorted streams.
// If there is grouping going on, then we do not need to multiply.
if (((mergeOfSortedStreams) ||
(CmpCommon::getDefault(COMP_BOOL_67) == DF_OFF)) && !grouping)
numPAs = numPAs * numEsps;
}
else if (logicalPartFunc->isAGroupingOf(*physicalPartFunc,&maxPartsPerGroup)
AND (logPartType ==
LogPhysPartitioningFunction::PA_PARTITION_GROUPING))
{
// Partition grouping is being done.
// Compute the maximum number of physical partitions that would result
// if the groups were formed based on a uniform distribution of all
// physical partitions.
Lng32 maxPartsPerUniformGroup =
(physicalPartFunc->getCountOfPartitions() + numEsps - 1) / numEsps;
if (mergeOfSortedStreams)
{
// Since a merge of sorted streams is being done, each group
// (ESP) will need to have as many PAs as the group that has
// the most partitions, to ensure that the ESP with the most
// partitions gets as many PAs as it has partitions.
// If this exceeds the maximum # of PAs allowed for a process,
// and this grouping was the result of a call to
// scaleNumberOfPartitions that was performed here and was
// based on an active partition distribution, see if a uniform
// grouping of the partitions would work. If so, call
// scaleNumberOfPartitions again, passing a parameter that indicates
// a uniform distribution of all physical partitions must be used.
if ((maxPartsPerGroup > maxPAsPerProcess) AND
((logPartReq == NULL) OR logPartReq->isRequirementFuzzy()) AND
(maxPartsPerUniformGroup <= maxPAsPerProcess))
{
logicalPartFunc = logicalPartFunc->copy(); // just in case
logicalPartFunc->scaleNumberOfPartitions(numEsps,
UNIFORM_PHYSICAL_PARTITION_GROUPING);
logicalPartFunc->createPartitioningKeyPredicates();
}
else
numPAs = maxPartsPerGroup * numEsps;
}
else
{
// IF this grouping was the result of a call to scaleNumberOfPartitions
// performed here, AND was based on an active partition distribution,
// AND there are enough inactive partitions to make it worthwhile
// to allocate some extra PAs to handle them, then do that.
Lng32 roundedUpNumOfPAs =
((numPAs + numEsps - 1) / numEsps) * numEsps;
if (((logPartReq == NULL) OR logPartReq->isRequirementFuzzy()) AND
(maxPartsPerUniformGroup != maxPartsPerGroup) AND
(roundedUpNumOfPAs < physicalPartFunc->getCountOfPartitions()))
numPAs += numEsps;
}
} // end if partition grouping
else if (mergeOfSortedStreams)
{
// logical subpartitioning + merge of sorted streams
// Because it is possible that ALL the physical partitions could end
// up in one of the logical partitions (due to skew), the only safe
// number of PAs in this case is to have (numPAs * numEsps) PAs.
// Perhaps eventually the executor will implement allocate-PA-on-demand
// and then the optimizer won't have to figure out how many PAs
// are needed.
numPAs = numPAs * numEsps;
}
} // end if # of PAs not forced, no synch. access, > 1 logical part
// ---------------------------------------------------------------------
// determine use of PAPA and make numPAs a multiple of the number of ESPs
// ---------------------------------------------------------------------
if (numPAs < numEsps)
{
// Synchronous access, or there just weren't enough partitions to
// go around.
numPAs = numEsps;
}
else if ((numPAs % numEsps) != 0)
{
// numPAs is not a multiple of numEsps. Round up if we are doing
// PA partition grouping, round down if we are doing logical
// subpartitioning. Note that if we round up, we will have
// some PAs in some ESPs that are not doing any work. But,
// this will help to minimize the effect of any imbalance,
// because even though some ESPs must access more partitions
// than others, they will access all partitions asynchronously.
if (logPartType ==
LogPhysPartitioningFunction::PA_PARTITION_GROUPING)
{
// Increase numPAs by numEsps-1 so that the truncation
// below will result in rounding the # of PAs up.
numPAs += numEsps - 1;
numPAs = (numPAs / numEsps) * numEsps;
}
else // logical subpartitioning
{
// Round down. We round down because otherwise, we would have
// multiple ESPs trying to access a portion of the same disk.
numPAs = (numPAs / numEsps) * numEsps;
}
} // end if numPAs is not a multiple of numEsps
if ((numPAs / numEsps) > maxPAsPerProcess)
{
// We have exceeded the maximum number of PAs per process, so we must
// reduce the number to the limit. This will result in synchronous
// access to some partitions. We will not be able to preserve the order
// via a merge of sorted streams, so if that's required give up now.
if (mergeOfSortedStreams)
return NULL;
// Reduce the number of PAs so that the number of PAs per
// process will be below the limit.
numPAs = maxPAsPerProcess * numEsps;
}
usePapa = (usePapa OR numPAs > numEsps);
// The number of cpus executing DP2's cannot be more than the number
// of PAs. In other words, cannot be using more cpus than there are
// streams.
currentCountOfCPUs = MINOF(currentCountOfCPUs,numPAs);
// ---------------------------------------------------------------------
// create a partitioning function for the scan node
// ---------------------------------------------------------------------
if (logPartType == LogPhysPartitioningFunction::PA_PARTITION_GROUPING AND
physicalPartFunc->isASinglePartitionPartitioningFunction() AND
numPAs == 1 AND NOT usePapa)
{
// Shortcut, no parallelism, no PAPA, and no logical
// partitioning at all. For this case ONLY we create a single
// partition partitioning function. The DP2 exchange node above
// will recognize this special case.
logPhysPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(
physicalPartFunc->getNodeMap()
->copy(CmpCommon::statementHeap())
);
logPhysPartFunc->createPartitioningKeyPredicates(); // just to be nice
}
else
{
// Make the logphys partitioning function which describes the way
// in which we will actually perform the query. Each partition of the
// logphys part function will correspond to one DP2 session.
LogPhysPartitioningFunction *lpf = new(CmpCommon::statementHeap())
LogPhysPartitioningFunction(
logicalPartFunc,
physicalPartFunc,
logPartType,
numPAs,
usePapa,
shouldUseSynchronousAccess);
// lpf becomes const once added to synthLogProp, so calculate all
// the info here.
lpf->createPartitioningKeyPredicates();
logPhysPartFunc = lpf;
// Make a new partSearchKey with the partitioning key preds of
// the partitioning function, if there are any. Note that ignoring
// the part key preds will result in a wrong answer if we use
// PA_PARTITION_GROUPING, since the PA node is the node responsible
// for the grouping. If it doesn't select a subgroup of partitions,
// too much data may be returned. For now we only consider a
// search key for the PA node, MDAM to be implemented later.
// MDAM will be useful for combining user-specified part key preds
// with logicalPartFunc->getPartitioningKeyPredicates().
if (NOT (logicalPartFunc->getPartitioningKeyPredicates().isEmpty() &&
logicalPartFunc->usesFSForPartitionSelection()))
{
// the key preds have the group's char. inputs and the
// partition input variables available
ValueIdSet
availInputs(getGroupAttr()->getCharacteristicInputs());
ValueIdSet dummy;
SearchKey *newPartSearchKey =
logicalPartFunc->createSearchKey(indexDesc, availInputs, dummy);
if(newPartSearchKey)
{
partSearchKey = newPartSearchKey;
// again, for PA_PARTITION_GROUPING we have to interpret
// all of the partitioning key predicates!
CMPASSERT((partSearchKey->getKeyPredicates() ==
logicalPartFunc->getPartitioningKeyPredicates()) OR
(logPartType !=
LogPhysPartitioningFunction::PA_PARTITION_GROUPING));
}
}
}
// location is in master or ESP for hive tables
if ((indexDesc->getPrimaryTableDesc()->getNATable()->isHiveTable()) ||
(indexDesc->getPrimaryTableDesc()->getNATable()->isHbaseTable()))
location = EXECUTE_IN_MASTER_AND_ESP;
// Should never be a sort order type requirement in DP2
CMPASSERT(rppForMe->getSortOrderTypeReq() == NO_SOT);
PartitioningFunction* dp2SortOrderPartFunc = NULL;
// Synthesize the dp2SortOrderPartFunc if the sort key is not empty
if (NOT sortOrder.isEmpty())
dp2SortOrderPartFunc = physicalPartFunc;
PushDownProperty* pushDownProperty = NULL;
const PushDownRequirement* pdr = rppForMe->getPushDownRequirement();
if ( pdr ) {
// Depend on the pushdown requirement, generate a colocation
// or a CS push-down property.
if ( PushDownCSRequirement::isInstanceOf(pdr) )
pushDownProperty = new (CmpCommon::statementHeap())
PushDownCSProperty(physicalPartFunc, partSearchKey);
else {
if ( PushDownColocationRequirement::isInstanceOf(pdr) )
pushDownProperty = new (CmpCommon::statementHeap())
PushDownColocationProperty(physicalPartFunc->getNodeMap());
else
CMPASSERT(1==0);
}
}
// ---------------------------------------------------------------------
// create a physical property object
// ---------------------------------------------------------------------
sppForMe = new (CmpCommon::statementHeap()) PhysicalProperty(
sortOrder,
NO_SOT, // Don't synthesize a sort order type until the exchange node
dp2SortOrderPartFunc,
logPhysPartFunc,
location,
SOURCE_PERSISTENT_TABLE,
indexDesc,
partSearchKey,
pushDownProperty);
DP2CostDataThatDependsOnSPP *dp2CostInfoPtr =
new HEAP DP2CostDataThatDependsOnSPP(dp2CostInfo);
sppForMe->setDP2CostThatDependsOnSPP(dp2CostInfoPtr);
// ---------------------------------------------------------------------
// Store more information about the decisions made in the synthesized
// property
// ---------------------------------------------------------------------
sppForMe->setCurrentCountOfCPUs(currentCountOfCPUs);
return sppForMe;
} // RelExpr::synthDP2PhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// FileScan::synthHiveScanPhysicalProperty()
// Synthesize physical property for a Hive table scan node,
// running in the master or an ESP
// -----------------------------------------------------------------------
PhysicalProperty * FileScan::synthHiveScanPhysicalProperty(
const Context *context,
const Lng32 planNumber,
ValueIdList &sortOrderVEG)
{
PhysicalProperty *sppForMe = NULL;
PartitioningFunction *myPartFunc = NULL;
// my required phys props (always non-NULL)
const ReqdPhysicalProperty* rppForMe = context->getReqdPhysicalProperty();
PartitioningRequirement * partReq = rppForMe->getPartitioningRequirement();
PlanExecutionEnum location = EXECUTE_IN_MASTER_AND_ESP;
PartitioningFunction * ixDescPartFunc = indexDesc_->getPartitioningFunction();
Lng32 numESPs = 1;
// CQDs related to # of ESPs for a Hive table scan
double bytesPerESP = getDefaultAsDouble(HIVE_MIN_BYTES_PER_ESP_PARTITION);
Lng32 maxESPs = getDefaultAsLong(HIVE_MAX_ESPS);
Lng32 numESPsPerDataNode = getDefaultAsLong(HIVE_NUM_ESPS_PER_DATANODE);
Lng32 numSQNodes = HHDFSMasterHostList::getNumSQNodes();
// minimum # of ESPs required by the parent
Lng32 minESPs = (partReq ? partReq->getCountOfPartitions() : 1);
if (partReq && partReq->castToRequireApproximatelyNPartitions())
minESPs = partReq->castToRequireApproximatelyNPartitions()->
getCountOfPartitionsLowBound();
NABoolean requiredESPsFixed =
partReq && partReq->castToFullySpecifiedPartitioningRequirement();
const HHDFSTableStats *tableStats = hiveSearchKey_->getHDFSTableStats();
// stats for partitions/buckets selected by predicates
HHDFSStatsBase selectedStats;
hiveSearchKey_->accumulateSelectedStats(selectedStats);
// limit the number of ESPs to HIVE_NUM_ESPS_PER_DATANODE * nodes
maxESPs = MAXOF(MINOF(numSQNodes*numESPsPerDataNode, maxESPs),1);
// check for ATTEMPT_ESP_PARALLELISM CQD
if (CURRSTMT_OPTDEFAULTS->attemptESPParallelism() == DF_OFF)
maxESPs = 1;
NABoolean useLocality = NodeMap::useLocalityForHiveScanInfo();
// Take the smallest # of ESPs in the allowed range as a start
numESPs = MINOF(minESPs, maxESPs);
// We can adjust #ESPs only when the required ESPs is not fully specified
// from the parent.
if ( !requiredESPsFixed ) {
// following are soft adjustments to numESPs, within the allowed range
double numESPsBasedOnTotalSize = 1;
// adjust minESPs based on HIVE_MIN_BYTES_PER_ESP_PARTITION CQD
if (bytesPerESP > 1.01)
numESPsBasedOnTotalSize = selectedStats.getTotalSize()/(bytesPerESP-1.0);
if (numESPsBasedOnTotalSize >= maxESPs)
numESPs = maxESPs;
else
numESPs = MAXOF(numESPs, (Int32) ceil(numESPsBasedOnTotalSize));
// if we use locality, generously increase # of ESPs to cover all the nodes
if (useLocality &&
maxESPs >= numSQNodes &&
(numESPs > numSQNodes / 2 ||
numESPs > numSQNodes - 10))
numESPs = MAXOF(numESPs, numSQNodes);
}
if (numESPs > 1)
{
// Try to make the # of ESPs a factor, the same or a multiple
// of the # of SQ nodes to avoid an imbalance. If we use locality,
// make the # of ESPs a multiple of the # of nodes for now.
double allowedDev = 1.0 + ActiveSchemaDB()->getDefaults().getAsDouble(
HIVE_NUM_ESPS_ROUND_DEVIATION)/100.0;
Lng32 maxRoundedESPs = MINOF((Lng32) (numESPs * allowedDev), maxESPs);
Lng32 minRoundedESPs = MAXOF((Lng32) (numESPs / allowedDev), minESPs);
Lng32 delta = 0;
// starting with numESPs, search in ever larger
// circles until we find a "nice" number
NABoolean done = FALSE;
while (! done)
{
Lng32 numOutOfRange = 0;
// try i=+1 and i=-1 in this order
for (Lng32 i=1; i<2 && !done; i=((i==1) ? -1 : 2))
{
// our candidate number, c is numESPs +/- delta
Lng32 c = numESPs + i*delta;
if (c >= minRoundedESPs && c <= maxRoundedESPs)
{
NABoolean canUse = TRUE;
if ( partReq &&
partReq->castToRequireApproximatelyNPartitions() &&
!( ((RequireApproximatelyNPartitions*)partReq)->
isPartitionCountWithinRange(c) ) )
canUse = FALSE;
// let's check if we like this number c
// - same as or factor of # of SQ nodes
// - multiple of # of SQ nodes
// - multiple of # of SQ nodes + factor of # of SQ nodes
if ((c % numSQNodes == 0 ||
(! useLocality &&
(numSQNodes % c == 0 ||
(numSQNodes % (c % numSQNodes) == 0 && (c % numSQNodes > 1))))) && canUse )
{
// pick this candidate
numESPs = c;
done = TRUE;
}
}
else
if (++numOutOfRange >= 2)
done = TRUE; // exceeded both limits, leave numESPs unchanged
} // for
// widen the circle by 1
delta++;
} // end while loop to try getting a "nice" number
} // end if numESPs > 1
NodeMap* myNodeMap = NULL;
if (numESPs > 1)
{
// create a HASH2 partitioning function with numESPs partitions
// and a RandomNum as the partitioning key (i.e. no usable part key)
const HHDFSTableStats *tableStats = hiveSearchKey_->getHDFSTableStats();
myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
numESPs,
NodeMapEntry::ACTIVE, NodeMap::HIVE);
PartitioningFunction* pf = getTableDesc()
->getClusteringIndex() ->getPartitioningFunction();
NABoolean useHash2Only =
CmpCommon::getDefault(HIVE_USE_HASH2_AS_PARTFUNCION) == DF_ON;
if ( useHash2Only ||
tableStats->getNumOfConsistentBuckets() == 0 || pf==NULL )
{
ItemExpr *randNum = new(CmpCommon::statementHeap()) RandomNum(NULL, TRUE);
randNum->synthTypeAndValueId();
ValueIdSet partKey;
partKey.insert(randNum->getValueId());
ValueIdList partKeyList(partKey);
myPartFunc = new(CmpCommon::statementHeap())
Hash2PartitioningFunction(partKey,
partKeyList,
numESPs,
myNodeMap);
} else {
ValueIdSet partKey = pf->getPartitioningKey();
ValueIdList partKeyList(partKey);
myPartFunc = new(CmpCommon::statementHeap())
HivePartitioningFunction(partKey,
partKeyList,
numESPs,
myNodeMap);
}
myPartFunc->createPartitioningKeyPredicates();
}
else
{
myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE, NodeMap::HIVE);
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
}
// create a very simple physical property for now, no sort order
// and no partitioning key for now
sppForMe = new(CmpCommon::statementHeap()) PhysicalProperty(myPartFunc,
location);
//FILE* fd = fopen("nodemap.log", "a");
//myNodeMap->print(fd, "", "hiveNodeMap");
//fclose(fd);
return sppForMe;
}
RangePartitionBoundaries * createRangePartitionBoundariesFromStats
(const IndexDesc* idesc,
HistogramSharedPtr& hist,
Lng32 numberOfPartitions,
const NAColumnArray & partColArray,
const ValueIdList& partitioningKeyColumnsOrder,
const Int32 statsColsCount,
NAMemory* heap);
//
// Parameter partns:
//
// On input: the desired # of partitions
// On output: the final scaled-to # of partitions
//
RangePartitioningFunction*
FileScan::createRangePartFuncForHbaseTableUsingStats(
Int32& partns,
const ValueIdSet& partitioningKeyColumns,
const ValueIdList& partitioningKeyColumnsList,
const ValueIdList& partitioningKeyColumnsOrder
)
{
Int32 bytesPerESP = getDefaultAsLong(HBASE_MIN_BYTES_PER_ESP_PARTITION);
NABoolean useMCSplit = (CmpCommon::getDefault(HBASE_RANGE_PARTITIONING_MC_SPLIT) == DF_ON);
if ( partns == 1 ||
(!useMCSplit && (partitioningKeyColumns.entries() != 1 ))) // if partition key has more than one column but MC stats based
// partitioning is disabled, then return
return NULL;
// do not split the SMD or UMD table for now.
if ( indexDesc_->getPrimaryTableDesc()->getNATable()->isSMDTable() ||
indexDesc_->getPrimaryTableDesc()->getNATable()->isUMDTable() )
return NULL;
// Now consider the stats
const ColStatDescList &primaryTableCSDL = indexDesc_->getPrimaryTableDesc()->getTableColStats();
// Get the key columns from the partKey since the
// part key in the part. func. contains veg references.
ValueId leadingColumn;
NAColumnArray partKeyColArray;
// if we have a partitioning key, take its first column,
// otherwise take the first column of the clustering key
if (indexDesc_->getPartitioningKey().entries() > 0)
{
leadingColumn = indexDesc_->getPartitioningKey()[0];
for (CollIndex j =0; j < partitioningKeyColumns.entries(); j++)
{
partKeyColArray.insert(
indexDesc_-> getNAFileSet()->getPartitioningKeyColumns()[j]
);
partKeyColArray.setAscending(j,
indexDesc_->getNAFileSet()->getPartitioningKeyColumns().
isAscending(j)
);
}
} else {
leadingColumn = indexDesc_-> getIndexKey()[0];
for (CollIndex j =0; j < partitioningKeyColumns.entries(); j++)
{
partKeyColArray.insert(
indexDesc_-> getNAFileSet()->getIndexKeyColumns()[j]
);
partKeyColArray.setAscending(j,
indexDesc_-> getNAFileSet()->getIndexKeyColumns().
isAscending(j)
);
}
}
// char types of non ISO88591 charset are currently not supported by the splitting logic
for (CollIndex i = 0; i < partKeyColArray.entries(); i ++)
{
const NAType* nt = partKeyColArray.getColumn(i)->getType();
if ((nt->getTypeQualifier() == NA_CHARACTER_TYPE) && (((CharType*)nt)->getCharSet() != CharInfo::ISO88591))
return NULL;
}
CollIndex i;
if (primaryTableCSDL.getColStatDescIndexForColumn(i, // out
leadingColumn,
partKeyColArray))
{
ColStatsSharedPtr colStats = primaryTableCSDL[i]->getColStats();
const NAFileSet* fset = indexDesc_->getNAFileSet();
// indexDesc_->getPrimaryTableDesc()->
// getNATable()->getClusteringIndex();
Lng32 recLength = fset->getRecordLength();
if ( (colStats->getRowcount() * recLength) < CostScalar(bytesPerESP) )
return NULL;
if ( !colStats->isOrigFakeHist() )
{
// Find a new set of boundaries values which will evenly divide
// the whole table into partns partitions.
HistogramSharedPtr hist = NULL;
if (colStats->getStatColumns().entries() > 1)
hist = colStats->transformOnIntervalsForMC(partns);
else
hist = colStats->transformOnIntervals(partns);
RangePartitionBoundaries * rpb =
createRangePartitionBoundariesFromStats(
indexDesc_,
hist,
partns,
partKeyColArray,
partitioningKeyColumnsOrder,
colStats->getStatColumns().entries(),
STMTHEAP);
if ( !rpb )
return NULL;
// Finally create a new range partitioned partition function,
// with node map set to NULL.
RangePartitioningFunction* newPartFunc = new (STMTHEAP)
RangePartitioningFunction(
partitioningKeyColumns,
partitioningKeyColumnsList,
partitioningKeyColumnsOrder,
rpb, NULL, STMTHEAP);
newPartFunc->createPartitioningKeyPredicates();
return newPartFunc;
}
}
// no stats for the leading key columns, or faked stats. Give up.
return NULL;
}
// -----------------------------------------------------------------------
// FileScan::synthHbaseScanPhysicalProperty()
// Synthesize physical property for a Hive table scan node,
// running in the master or an ESP
// -----------------------------------------------------------------------
PhysicalProperty * FileScan::synthHbaseScanPhysicalProperty(
const Context *context,
const Lng32 planNumber,
ValueIdList &sortOrderVEG)
{
// my required phys props (always non-NULL)
const ReqdPhysicalProperty* rppForMe = context->getReqdPhysicalProperty();
PartitioningRequirement * partReq = rppForMe->getPartitioningRequirement();
PartitioningFunction* myPartFunc = NULL;
NABoolean partnsScaled = FALSE;
Lng32 oldPartns = 0;
Lng32 numESPs = 1;
PartitioningFunction * ixDescPartFunc = NULL;
// Nothing we can do if the requirment is a single partition func
if ( partReq && partReq->castToRequireExactlyOnePartition() ) {
myPartFunc = new (CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
} else {
//////////////////////////////////////
// Perform the scaling
//////////////////////////////////////
ixDescPartFunc = indexDesc_->getPartitioningFunction();
//////////////////////////////////////
// Compute the desirable #ESPs first
//////////////////////////////////////
// minimum # of ESPs required by the parent
Lng32 minESPs = (partReq ?
partReq->getCountOfPartitions() :
CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism());
if (partReq && partReq->castToRequireApproximatelyNPartitions())
minESPs = partReq->castToRequireApproximatelyNPartitions()->
getCountOfPartitionsLowBound();
Lng32 maxESPs = 1;
NADefaults &defs = ActiveSchemaDB()->getDefaults();
// check for ATTEMPT_ESP_PARALLELISM CQD
if ( !(CURRSTMT_OPTDEFAULTS->attemptESPParallelism() == DF_OFF) ) {
// CQDs related to # of ESPs for a HBase table scan
maxESPs = getDefaultAsLong(HBASE_MAX_ESPS);
Int32 numOfPartitions = -1;
if ( ixDescPartFunc )
numOfPartitions = ixDescPartFunc->getCountOfPartitions();
if ( maxESPs == 0 && minESPs <= numOfPartitions ) {
minESPs = maxESPs = numOfPartitions;
} else {
NABoolean fakeEnv = FALSE;
CollIndex totalESPsAllowed = defs.getTotalNumOfESPsInCluster(fakeEnv);
if ( !fakeEnv ) {
// limit the number of ESPs to max(totalESPsAllowed, HBASE_MAX_ESPS)
maxESPs = MAXOF(MINOF(totalESPsAllowed, maxESPs),1);
if (!partReq && minESPs == 1) {
minESPs = rppForMe->getCountOfPipelines();
if (ixDescPartFunc && (CmpCommon::getDefault(LIMIT_HBASE_SCAN_DOP) == DF_ON)) {
minESPs = MINOF(minESPs, ixDescPartFunc->getCountOfPartitions());
}
}
if ( getDefaultAsLong(AFFINITY_VALUE) != -2 && ixDescPartFunc ) {
Int32 numOfUniqueNodes =
ixDescPartFunc->getNodeMap()->getNumberOfUniqueNodes();
// #ESPs performing reading from HBase tables is capped at by
// the # of unique nodes or region servers.
if ( numOfUniqueNodes > 0 )
minESPs = MINOF(minESPs, numOfUniqueNodes);
}
}
else {
maxESPs = totalESPsAllowed;
}
}
}
numESPs = MINOF(minESPs, maxESPs);
NABoolean performStatsSplit =
(CmpCommon::getDefault(HBASE_STATS_PARTITIONING) != DF_OFF &&
!(ixDescPartFunc && ixDescPartFunc->isAHash2PartitioningFunction()));
if (partReq && partReq->castToRequireReplicateNoBroadcast()) {
myPartFunc =
partReq->castToRequireReplicateNoBroadcast()->
getPartitioningFunction()->copy();
}
else if ( ixDescPartFunc )
{
myPartFunc = ixDescPartFunc->copy();
oldPartns = myPartFunc->getCountOfPartitions();
Lng32 partns = numESPs;
const RangePartitioningFunction* myPartFuncAsRange = NULL;
if ( (myPartFuncAsRange=myPartFunc->castToRangePartitioningFunction()) )
{
RangePartitioningFunction* newPartFunc = NULL;
if (performStatsSplit)
newPartFunc =
createRangePartFuncForHbaseTableUsingStats(partns,
myPartFuncAsRange->getPartitioningKey(),
myPartFuncAsRange->getKeyColumnList(),
myPartFuncAsRange->getOrderOfKeyValues()
);
if ( newPartFunc )
myPartFunc = newPartFunc;
else
myPartFunc->scaleNumberOfPartitions(partns);
} else {
myPartFunc->scaleNumberOfPartitions(partns);
}
partnsScaled = (oldPartns != partns);
} else {
// A NULL ixDescPartFunc implies the table is single partitioned
// (1 region only).
if ( performStatsSplit ) {
Lng32 partns = numESPs;
const ValueIdList& keyColumnList = indexDesc_->getPartitioningKey();
const ValueIdList& orderOfKeyValues = indexDesc_->getOrderOfKeyValues();
ValueIdSet keyColumnSet(keyColumnList);
RangePartitioningFunction* newPartFunc =
createRangePartFuncForHbaseTableUsingStats(partns,
keyColumnSet,
keyColumnList,
orderOfKeyValues);
if ( newPartFunc ) {
myPartFunc = newPartFunc;
// setup partKeys_
ValueIdSet externalInputs = getGroupAttr()->getCharacteristicInputs();
ValueIdSet dummySet;
// Create and set the Searchkey for the partitioning key:
partKeys_ = new (CmpCommon::statementHeap())
SearchKey(keyColumnList,
orderOfKeyValues,
externalInputs,
NOT getReverseScan(),
selectionPred(),
*disjunctsPtr_,
dummySet, // needed by interface but not used here
indexDesc_
);
partnsScaled = TRUE;
} else
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
} else
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
}
}
if (myPartFunc->getNodeMap() == NULL || partnsScaled ) {
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
myPartFunc->getCountOfPartitions(),
NodeMapEntry::ACTIVE,
NodeMap::HBASE);
myPartFunc->replaceNodeMap(myNodeMap);
}
// colocated ESP logic
if ( (CmpCommon::getDefault(TRAF_ALLOW_ESP_COLOCATION) == DF_ON) AND
ixDescPartFunc ) {
// get region nodeMap which has regions nodeIds populated
NodeMap* myNodeMap = (NodeMap*) myPartFunc->getNodeMap();
const NodeMap* regNodeMap = ixDescPartFunc->getNodeMap();
Int32 m= myNodeMap->getNumEntries();
Int32 n = regNodeMap->getNumEntries();
// m : n allocation strategy where m < n using most popular node num
if (m < n) {
Lng32 regionsPerEsp = n / m;
Lng32 beginPos = 0;
for (Lng32 index = 0; (index < m && beginPos < n); index++) {
Lng32 endPos = beginPos + regionsPerEsp;
Lng32 popularNodeId =
regNodeMap->getPopularNodeNumber(beginPos, endPos);
myNodeMap->setNodeNumber(index, popularNodeId);
beginPos = endPos;
}
myNodeMap->smooth(gpClusterInfo->numOfSMPs());
} else if (m == n) { // 1:1 allocation strategy
for (Lng32 index = 0; index < n; index++) {
myNodeMap->setNodeNumber(index, regNodeMap->getNodeNumber(index));
}
}
}
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(sortOrderVEG,
ESP_NO_SORT_SOT,
NULL, /* no dp2 part func*/
myPartFunc,
EXECUTE_IN_MASTER_AND_ESP,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
// -----------------------------------------------------------------------
// Vector to put all costing data that is computed at synthesis time
// Make it a local variable for now. If we ever reach the end of
// this routine create a variable from the heap, initialize it with this,
// and then set the sppForMe slot.
// -----------------------------------------------------------------------
DP2CostDataThatDependsOnSPP dp2CostInfo;
// ---------------------------------------------------------------------
// Estimate the number of active partitions and other costing
// data that depends on SPP:
// ---------------------------------------------------------------------
computeDP2CostDataThatDependsOnSPP(*myPartFunc // in/out
,dp2CostInfo //out
,*indexDesc_ // in
,*partKeys_ // in
,*getGroupAttr() //in
,*context // in
,CmpCommon::statementHeap() // in
, *this
);
DP2CostDataThatDependsOnSPP *dp2CostInfoPtr =
new HEAP DP2CostDataThatDependsOnSPP(dp2CostInfo);
sppForMe->setDP2CostThatDependsOnSPP(dp2CostInfoPtr);
sppForMe->setCurrentCountOfCPUs(dp2CostInfo.getCountOfCPUsExecutingDP2s());
return sppForMe ;
}
//<pb>
// -----------------------------------------------------------------------
// FileScan::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
FileScan::costMethod() const
{
static THREAD_P CostMethodFileScan *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodFileScan();
return m;
} // FileScan::costMethod()
//<pb>
//==============================================================================
// Synthesize physical properties for FileScan operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
FileScan::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
PhysicalProperty *sppForMe;
// synthesized order
ValueIdList sortOrderVEG = NULL;
sortOrderVEG = indexDesc_->getOrderOfKeyValues();
// ---------------------------------------------------------------------
// Remove from the sortOrder those columns that are equal to constants
// or input values. Also, remove from sortOrder those columns that are
// not in the characteristic outputs.
// It is possible that the characteristic outputs
// will not contain the base columns, but rather expressions
// involving the base columns. For example, if the user specified
// "select b+1 from t order by 1;", and "b" is the primary key,
// then the characteristic output will only contain the valueId for the
// expression "b+1" - it will not contain the value id for "b". So,
// even though "b" is the primary key, we will fail to find it in the
// characteristic outputs and thus we will not synthesize the sort key.
// To solve this, we first check for the base column in the
// characteristic outputs. If we find it, great. But if we don't, we
// need to see if the base column is included in the SIMPLIFIED form
// of the characteristic outputs. If it is, then we need to change
// the sort key to include the valueId for the expression "b+1" instead
// of "b". This is because we cannot synthesize anything in the sort
// key that is not in the characteristic outputs, but if something
// is sorted by "b" then it is surely sorted by "b+1". We have
// coined the expression "complify" to represent this operation.
// ---------------------------------------------------------------------
sortOrderVEG.removeCoveredExprs(getGroupAttr()->getCharacteristicInputs());
sortOrderVEG.complifyAndRemoveUncoveredSuffix(
getGroupAttr()->getCharacteristicOutputs()) ;
// ---------------------------------------------------------------------
// if this is a reverse scan, apply an inversion function to
// each of the ordering columns
// ---------------------------------------------------------------------
if (getReverseScan())
{
ItemExpr *inverseCol;
for (Lng32 i = 0; i < (Lng32)sortOrderVEG.entries(); i++)
{
ItemExpr *ix = sortOrderVEG[i].getItemExpr();
if (ix->getOperatorType() == ITM_INVERSE)
{
// remove the inverse operator, the reverse scan
// cancels it out
inverseCol = ix->child(0);
}
else
{
// add an inverse operator on top
inverseCol = new(CmpCommon::statementHeap())
InverseOrder(ix);
inverseCol->synthTypeAndValueId();
}
sortOrderVEG[i] = inverseCol->getValueId();
}
}
if (isHiveTable())
return synthHiveScanPhysicalProperty(myContext, planNumber, sortOrderVEG);
else if (isHbaseTable())
return synthHbaseScanPhysicalProperty(myContext, planNumber, sortOrderVEG);
// ---------------------------------------------------------------------
// call a static helper method shared between scan and ins/upd/del
// ---------------------------------------------------------------------
if ((sppForMe = synthDP2PhysicalProperty(myContext,
sortOrderVEG,
indexDesc_,
partKeys_)) == NULL)
return NULL;
// ---------------------------------------------------------------------
// Apply partitioning key predicates if necessary
// ---------------------------------------------------------------------
if (sppForMe->getPartitioningFunction()->
castToLogPhysPartitioningFunction())
{
LogPhysPartitioningFunction *logPhysPartFunc =
(LogPhysPartitioningFunction *) // cast away const
sppForMe->getPartitioningFunction()->
castToLogPhysPartitioningFunction();
LogPhysPartitioningFunction::logPartType logPartType =
logPhysPartFunc->getLogPartType();
if (logPartType ==
LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING OR
logPartType ==
LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING)
{
logPhysPartFunc->createPartitioningKeyPredicates();
CMPASSERT(FALSE);
// also need to apply the part key preds and pick up the PIVs in
// FileScan::preCodGen if we ever use this
}
}
// try to determine if the scan will take place only with one partition.
// The condition for this to happen is that a local predicate T.A = const
// exists and the table T is partition on A.
if ( partKeys_ ) {
// get the part key predicate. For local predicate 'T.A = 12',
// the predicate = { vegRef { T.A, '12' } }
const ValueIdSet& pkPredicate = partKeys_->getKeyPredicates();
// get the partitioning key of the indexDesc_. If the T is partitioned
// on T.A, the pkList then is [ T.A ]
const ValueIdList & pkList = indexDesc_->getPartitioningKey();
// We now check if every element of the part key predicate is associated
// with an element of the partition key of the current indexDesc_ below.
UInt32 size = pkList.entries();
NABoolean accessSinglePartition = size > 0;
for (UInt32 i=0; i < size; i++)
{
if ( ! pkPredicate.containsAsEquiLocalPred(pkList[i]) ) {
accessSinglePartition = FALSE;
break;
}
}
sppForMe->setAccessOnePartition(accessSinglePartition);
}
return sppForMe;
} // FileScan::synthPhysicalProperty()
const PartitioningFunction * FileScan::getPartFunc() const
{
return getPhysicalProperty()->getPartitioningFunction();
}
void FileScan::addPartKeyPredsToSelectionPreds(
const ValueIdSet& partKeyPreds,
const ValueIdSet& pivs)
{
selectionPred() += partKeyPreds;
}
NABoolean FileScan::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace* pws, /*IN*/
Lng32& numOfESPs, /*IN,OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
NABoolean result = FALSE;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (parallelControlSettings == DF_OFF)
{
result = FALSE;
}
else if ((CmpCommon::getDefault(COMP_BOOL_64) == DF_ON) AND
(parallelControlSettings == DF_MAXIMUM) AND
CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible()
)
{
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = 0.0; // not used by leafs
result = TRUE;
}
else if (parallelControlSettings == DF_ON)
{
// Currently, forcing of the number of ESPs for a leaf
// is not supported. So, numOfESPsForced should always be FALSE.
if (NOT numOfESPsForced)
{
const Int32 optimalNumPAsPerEsp =
(Int32) getDefaultAsLong(PARTITION_ACCESS_NODES_PER_ESP);
// divide number of PAs by number of PAs per ESP and round up to
// the next highest number of ESPs if there is a remainder
numOfESPs = (numOfESPs+optimalNumPAsPerEsp-1) /
optimalNumPAsPerEsp;
// Can't have more ESPs than the maximum
numOfESPs = MINOF(numOfESPs,rppForMe->getCountOfPipelines());
allowedDeviation = 0.0; // not used by leafs
}
result = TRUE;
}
else
{
// Otherwise, the user must have specified "SYSTEM" for the
// ATTEMPT_ESP_PARALLELISM default. This means it is up to the
// optimizer to decide.
// Return TRUE if the number of rows returned
// by child(0) exceeds the threshold from the defaults
// table. The recommended number of ESPs is also computed
// to be 1 process per <threshold> number of rows.
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr outputLogProp = getGroupAttr()->outputLogProp(inLogProp);
const CostScalar rowCount =
(outputLogProp->getResultCardinality()).minCsOne();
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
numOfESPs = rppForMe->getCountOfPipelines();
if ( (rowCount > numberOfRowsThreshold) AND
(CmpCommon::getDefault(COMP_BOOL_128) == DF_ON)
)
{
Lng32 optimalNumOfESPs = MINOF(numOfESPs,
(Lng32)(rowCount / numberOfRowsThreshold).value());
// make numOfESPs as available level of parallelism
// 16*N, 8*N, 4*N,..., N,1 where N is the number of segments
Lng32 i = CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism();
Lng32 MinParallelism =
MAXOF(CURRSTMT_OPTDEFAULTS->getMinimumESPParallelism(),optimalNumOfESPs);
while(i > MinParallelism)
i/=2;
numOfESPs = (i<MinParallelism) ? i*=2 : i;
allowedDeviation = 0.0; // not used by scan
result = TRUE;
}
else
{
result = FALSE;
}
} // end if the user let the optimizer decide
return result;
} // FileScan::okToAttemptESPParallelism()
//<pb>
// -----------------------------------------------------------------------
// member functions for class DP2Scan
// -----------------------------------------------------------------------
CostMethod*
DP2Scan::costMethod() const
{
static THREAD_P CostMethodDP2Scan *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodDP2Scan();
return m;
} // DP2Scan::costMethod()
//<pb>
//==============================================================================
// Synthesize physical properties for Describe operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
Describe::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // Describe::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for GenericUtilExpr operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
GenericUtilExpr::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // GenericUtilExpr::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for FirstN operator's current plan
// extracted from a specified context.
// FirstN operator has the same properties as its child.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
FirstN::synthPhysicalProperty(const Context* context,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
// ---------------------------------------------------------------------
// Simply propogate child's physical property.
// ---------------------------------------------------------------------
const PhysicalProperty* const sppOfTheChild =
context->getPhysicalPropertyOfSolutionForChild(0);
PhysicalProperty* sppForMe = new (CmpCommon::statementHeap())
PhysicalProperty (*sppOfTheChild);
if (canExecuteInDp2())
sppForMe->setLocation(EXECUTE_IN_DP2);
else
sppForMe->setLocation(EXECUTE_IN_MASTER);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
}
//<pb>
//==============================================================================
// Synthesize physical properties for RelTransaction operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
RelTransaction::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // RelTransaction::synthPhysicalProperty()
//============================================================================
// Synthesize physical properties for RelSetTimeout operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//============================================================================
PhysicalProperty*
RelSetTimeout::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // RelSetTimeout::synthPhysicalProperty()
//==============================================================================
// Synthesize physical properties for RelLock operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
RelLock::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
PartitioningFunction *myPartFunc = NULL;
const ReqdPhysicalProperty* rpp = myContext->getReqdPhysicalProperty();
if (rpp->getCountOfAvailableCPUs() <= 1)
parallelExecution_ = FALSE;
if (parallelExecution_)
{
myPartFunc =
tabIds_[0]->getClusteringIndex()->getPartitioningFunction();
if (!myPartFunc)
parallelExecution_ = FALSE;
else
{
PhysicalProperty * sppForMe = new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_ESP,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes(getGroupAttr());
return sppForMe;
}
}
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // RelLock::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for ControlAbstractClass operator's current
// plan extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
ControlAbstractClass::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // ControlAbstractClass::synthPhysicalProperty()
//<pb>
// -----------------------------------------------------------------------
// methods for class Tuple
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Tuple::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
Tuple::costMethod() const
{
static THREAD_P CostMethodTuple *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodTuple();
return m;
} // Tuple::costMethod()
//<pb>
//==============================================================================
// Synthesize physical properties for Tuple operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
// Note this is a close copy of the synthPhysicalProperties for Tuple,
// so if we change anything here we should evaluate if the Tuple method
// should change as well.
//
//==============================================================================
#pragma nowarn(262) // warning elimination
PhysicalProperty*
Tuple::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
CMPASSERT(myContext != NULL);
PlanExecutionEnum planExecutionLocation = EXECUTE_IN_MASTER_AND_ESP;
PartitioningRequirement *myPartReq = NULL;
PartitioningFunction *myPartFunc = NULL;
const ReqdPhysicalProperty* rppForMe = 0;
rppForMe = myContext->getReqdPhysicalProperty();
// -----------------------------------------------------------------
// decide on my partitioning function
// -----------------------------------------------------------------
myPartReq = rppForMe->getPartitioningRequirement();
if (myPartReq ) {
myPartFunc = myPartReq->realize(myContext);
if ( NOT rppForMe->executeInDP2() &&
(myPartFunc -> castToHashPartitioningFunction() ||
myPartFunc -> castToTableHashPartitioningFunction() ||
myPartFunc -> castToRangePartitioningFunction() ||
myPartFunc -> castToRoundRobinPartitioningFunction()
) && myPartFunc -> getCountOfPartitions() > 1
)
return NULL;
} else {
// If the tuple is in DP2, use the partition function in
// pushdown property (if any).
if ( rppForMe->executeInDP2() )
{
const PushDownRequirement* pdq = rppForMe->getPushDownRequirement();
if ( pdq AND PushDownCSRequirement::isInstanceOf(pdq) )
myPartFunc = (PartitioningFunction*)
((pdq->castToPushDownCSRequirement())->getPartFunc());
}
if ( myPartFunc == NULL )
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
}
// Make sure the Tuple node only produces its tuple when it is
// part of the partition to be produced. Do this by applying the
// partitioning key predicates which filter out the data of that
// partition.
if (myPartFunc->canProducePartitioningKeyPredicates())
{
myPartFunc->createPartitioningKeyPredicates();
if ( NOT rppForMe->executeInDP2() )
{
// for single partition and replication the predicates will be empty
if (NOT myPartFunc->getPartitioningKeyPredicates().isEmpty())
{
// ||opt we would need to add the partitioning key
// predicates, but I see in the executor that no
// predicates are evaluated on the tuple node. This may
// be an unrelated bug or a problem for this case. For now
// refuse to generate any partitioning scheme that has
// partitioning key predicates
return NULL;
}
}
}
else
{
// A leaf node can only produce a partitioning function if
// it can apply its partitioning key predicates. If we can't
// produce the data we better return NULL physical props.
return NULL;
}
// If partitioning function has no node map already, create a new node map
// having wild-card entries for each partition.
if (myPartFunc->getNodeMap() == 0)
{
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
myPartFunc->getCountOfPartitions(),
NodeMapEntry::ACTIVE);
myPartFunc->replaceNodeMap(myNodeMap);
}
const PushDownRequirement* pdr = rppForMe->getPushDownRequirement();
// Disable inserts under exchange if the compound statement is not
// under the same exchange.
if ( isinBlockStmt() AND rppForMe->executeInDP2() AND
NOT PushDownCSRequirement::isInstanceOf(pdr)
)
return 0;
if ( pdr )
{
// If the tuple is the first statement (left most node) or
// immediately below an EXCHANGE, do not allow it.
if (pdr->isEmpty()== TRUE)
return 0;
planExecutionLocation = EXECUTE_IN_DP2;
// Any leaf node inside DP2 should produce a physical property.
// Furthermore, we need a logphy partfunc here so that it can
// be compared with that of the INSERTed node.
if (! myPartFunc->isASinglePartitionPartitioningFunction())
{
LogicalPartitioningRequirement *lpr;
lpr = rppForMe->getLogicalPartRequirement();
PartitioningRequirement *logPartReq = NULL;
Lng32 numPAs = ANY_NUMBER_OF_PARTITIONS;
Lng32 numEsps = 1;
NABoolean usePapa = FALSE;
NABoolean shouldUseSynchronousAccess = FALSE;
LogPhysPartitioningFunction::logPartType logPartType;
PartitioningFunction * logicalPartFunc = NULL;
if (!lpr)
{
lpr = new(CmpCommon::statementHeap())
LogicalPartitioningRequirement(
rppForMe->getPartitioningRequirement());
logPartType = LogPhysPartitioningFunction::ANY_LOGICAL_PARTITIONING;
numPAs = ANY_NUMBER_OF_PARTITIONS;
usePapa = FALSE;
}
else
{
logPartType = lpr->getLogPartTypeReq();
numPAs = lpr->getNumClientsReq();
usePapa = lpr->getMustUsePapa();
}
logPartReq = lpr->getLogReq();
if (logPartReq)
{
logicalPartFunc = logPartReq->realize(
myContext,
FALSE,
myPartFunc->makePartitioningRequirement());
myPartFunc = new(CmpCommon::statementHeap())
LogPhysPartitioningFunction(
logicalPartFunc, // logical
myPartFunc, // physical
logPartType,
numPAs,
usePapa,
shouldUseSynchronousAccess);
myPartFunc->createPartitioningKeyPredicates();
}
// coverity thinks "lpr" is leaked here. "lpr" is eventually freed by
// CmpCommon::statementHeap's destructor. It is dangerous to free
// "lpr" here because myPartFunc indirectly holds a pointer to "lpr".
// coverity[leaked_storage]
}
}
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
planExecutionLocation,
SOURCE_TUPLE);
PushDownProperty* pushDownProperty = 0;
if ( pdr && !pdr->isEmpty() )
{
const PushDownCSRequirement*
pdcsr = pdr->castToPushDownCSRequirement();
if ( pdcsr )
{
// generate a CS push-down property.
pushDownProperty = new (CmpCommon::statementHeap())
PushDownCSProperty(pdcsr->getPartFunc(), pdcsr->getSearchKey());
} else {
const PushDownColocationRequirement*
pdclr = pdr->castToPushDownColocationRequirement();
// generate a colocation push-down property.
if ( pdclr )
{
pushDownProperty = new (CmpCommon::statementHeap())
PushDownColocationProperty(pdclr->getNodeMap());
} else
CMPASSERT(1==0);
}
}
sppForMe->setPushDownProperty(pushDownProperty);
// -----------------------------------------------------------------------
// Estimate the number of cpus executing executing copies of this
// instance:
// -----------------------------------------------------------------------
Lng32 countOfCPUs = 1;
Lng32 countOfStreams = 1;
if (myPartFunc != NULL)
countOfStreams = myPartFunc->getCountOfPartitions();
Lng32 countOfAvailableCPUs = 1;
if (myContext->getReqdPhysicalProperty())
countOfAvailableCPUs =
myContext->getReqdPhysicalProperty()->getCountOfAvailableCPUs();
// The number of CPUs is limited by the number of streams
countOfCPUs = (countOfStreams < countOfAvailableCPUs ?
countOfStreams : countOfAvailableCPUs);
CMPASSERT(countOfCPUs >= 1);
// A Tuple operator does not normally execute in DP2. Still, it is
// a leaf operator. So, set the currentCountOfCPUs in the spp for
// tuple to tuple's count of cpus. This way, if any of tuple's
// parents decide to base their degree of parallelism on their
// children they can get their child's degree of parallelism
// from the currentCountOfCPUs field of their child's spp, regardless
// of whether the child is really a DP2 operator or not.
sppForMe->setCurrentCountOfCPUs(countOfCPUs);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // Tuple::synthPhysicalProperty()
#pragma warn(262) // warning elimination
//<pb>
//==============================================================================
// Synthesize physical properties for InsertCursor operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
InsertCursor::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
ValueIdList emptySortKey;
PhysicalProperty * sppForMe = synthDP2PhysicalProperty(myContext,
emptySortKey,
getIndexDesc(),
getPartKey());
// remove anything that's not covered by the group attributes
if ( sppForMe )
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // InsertCursor::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for UpdateCursor operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
UpdateCursor::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
ValueIdList emptySortKey;
PhysicalProperty * sppForMe = synthDP2PhysicalProperty(myContext,
emptySortKey,
getIndexDesc(),
getPartKey());
// remove anything that's not covered by the group attributes
if ( sppForMe )
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // UpdateCursor::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for DeleteCursor operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
DeleteCursor::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
ValueIdList emptySortKey;
PhysicalProperty * sppForMe = synthDP2PhysicalProperty(myContext,
emptySortKey,
getIndexDesc(),
getPartKey());
// remove anything that's not covered by the group attributes
if ( sppForMe )
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // DeleteCursor::synthPhysicalProperty()
NABoolean GenericUpdate::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace* pws, /*IN*/
Lng32& numOfESPs, /*IN,OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
NABoolean result = FALSE;
DefaultToken parallelControlSettings =
getParallelControlSettings(rppForMe,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (isMerge())
{
result = FALSE;
}
else
if (parallelControlSettings == DF_OFF)
{
result = FALSE;
}
else if ((CmpCommon::getDefault(COMP_BOOL_65) == DF_ON) AND
(parallelControlSettings == DF_MAXIMUM) AND
CURRSTMT_OPTDEFAULTS->maxParallelismIsFeasible()
)
{
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = 0.0; // not used by leafs
result = TRUE;
}
else if (parallelControlSettings == DF_ON)
{
// Currently, forcing of the number of ESPs for a leaf
// is not supported. So, numOfESPsForced should always be FALSE.
if (NOT numOfESPsForced)
{
const Int32 optimalNumPAsPerEsp =
(Int32) getDefaultAsLong(PARTITION_ACCESS_NODES_PER_ESP);
// divide number of PAs by number of PAs per ESP and round up to
// the next highest number of ESPs if there is a remainder
numOfESPs = (numOfESPs+optimalNumPAsPerEsp-1) /
optimalNumPAsPerEsp;
// Can't have more ESPs than the maximum
numOfESPs = MINOF(numOfESPs,rppForMe->getCountOfPipelines());
allowedDeviation = 0.0; // not used by leafs
}
result = TRUE;
}
else
{
// Otherwise, the user must have specified "SYSTEM" for the
// ATTEMPT_ESP_PARALLELISM default. This means it is up to the
// optimizer to decide.
// Return TRUE if the number of rows returned
// by child(0) exceeds the threshold from the defaults
// table. The recommended number of ESPs is also computed
// to be 1 process per <threshold> number of rows.
EstLogPropSharedPtr inLogProp = myContext->getInputLogProp();
EstLogPropSharedPtr outputLogProp = getGroupAttr()->outputLogProp(inLogProp);
const CostScalar rowCount =
(outputLogProp->getResultCardinality()).minCsOne();
const CostScalar numberOfRowsThreshold =
CURRSTMT_OPTDEFAULTS->numberOfRowsParallelThreshold();
if (rowCount > numberOfRowsThreshold)
{
double optimalNumOfESPsDbl =
ceil((rowCount / numberOfRowsThreshold).value());
// Don't need / can't have more ESPs than PAs
numOfESPs = (Lng32) MINOF(numOfESPs,optimalNumOfESPsDbl);
// Can't have more ESPs than the maximum
numOfESPs = MINOF(numOfESPs,rppForMe->getCountOfPipelines());
allowedDeviation = 0.0; // not used
result = TRUE;
}
else
{
result = FALSE;
}
} // end if the user let the optimizer decide
return result;
} // GenericUpdate::okToAttemptESPParallelism()
//<pb>
//==============================================================================
// Synthesize physical properties for Explain operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
ExplainFunc::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // ExplainFunc::synthPhysicalProperty()
PhysicalProperty*
StatisticsFunc::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // StatisticsFunc::synthPhysicalProperty()
PhysicalProperty *
PhysicalSPProxyFunc::synthPhysicalProperty(const Context *myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(), 1, NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction *myPartFunc = new (CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty *sppForMe = new (CmpCommon::statementHeap())
PhysicalProperty(myPartFunc, EXECUTE_IN_MASTER, SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes(getGroupAttr());
return sppForMe;
} // PhysicalSPProxyFunc::synthPhysicalProperty()
PhysicalProperty *
PhysicalExtractSource::synthPhysicalProperty(const Context *myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(), 1, NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction *myPartFunc = new (CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty *sppForMe = new (CmpCommon::statementHeap())
PhysicalProperty(myPartFunc, EXECUTE_IN_ESP, SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes(getGroupAttr());
return sppForMe;
} // PhysicalExtractSource::synthPhysicalProperty()
// Transpose::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this type.
CostMethod*
PhysTranspose::costMethod() const
{
static THREAD_P CostMethodTranspose *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodTranspose();
return m;
} // PhysTranspose::costMethod()
//<pb>
//==============================================================================
// Synthesize physical properties for Transpose operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
PhysTranspose::synthPhysicalProperty(const Context *context,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
// for now, simply propagate the physical property
PhysicalProperty *sppForMe = new(CmpCommon::statementHeap())
PhysicalProperty(*context->getPhysicalPropertyOfSolutionForChild(0));
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // PhysTranspose::synthPhysicalProperty()
//<pb>
//==============================================================================
// Synthesize physical properties for Stored Procedure operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty *
RelInternalSP::synthPhysicalProperty(const Context * myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // RelInternalSP::synthPhysicalProperty()
// -----------------------------------------------------------------------
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of type RelInternalSP.
// -----------------------------------------------------------------------
CostMethod *
RelInternalSP::costMethod() const
{
static THREAD_P CostMethodStoredProc *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodStoredProc();
return m;
} // RelInternalSP::costMethod()
//<pb>
PhysicalProperty*
HbaseDelete::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER_AND_ESP,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // HbaseDelete::synthPhysicalProperty()
PhysicalProperty*
HbaseUpdate::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER_AND_ESP,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // HbaseUpdate::synthPhysicalProperty()
PhysicalProperty*
HiveInsert::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement * partReq = rppForMe->getPartitioningRequirement();
// variables that help to come up with my partitioning function
const LogicalPartitioningRequirement *lpr =
rppForMe->getLogicalPartRequirement();
PartitioningRequirement * logPartReq = NULL;
PlanExecutionEnum location = EXECUTE_IN_MASTER_AND_ESP;
PartitioningFunction* myPartFunc;
if ( partReq->isRequirementFullySpecified() ) {
FullySpecifiedPartitioningRequirement* fpr =
(FullySpecifiedPartitioningRequirement*)
(partReq->castToFullySpecifiedPartitioningRequirement());
myPartFunc = fpr -> getPartitioningFunction();
} else
myPartFunc = getIndexDesc()->getPartitioningFunction();
PhysicalProperty *pp = new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc, location);
return pp;
} // HiveInsert::synthPhysicalProperty()
CostMethod *
HbaseInsert::costMethod() const
{
static THREAD_P CostMethodHbaseInsert *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodHbaseInsert();
return m;
} // HbaseInsert::costMethod()
PhysicalProperty*
HbaseInsert::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
const ReqdPhysicalProperty* rppForMe =
myContext->getReqdPhysicalProperty();
PartitioningRequirement* partReqForMe =
rppForMe->getPartitioningRequirement();
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc = NULL;
if (partReqForMe &&
partReqForMe->castToRequireReplicateNoBroadcast())
myPartFunc =
partReqForMe->castToRequireReplicateNoBroadcast()->
getPartitioningFunction()->copy();
else
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER_AND_ESP,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // HbaseInsert::synthPhysicalProperty()
// -----------------------------------------------------------------------
// member functions for class PhyPack
// -----------------------------------------------------------------------
CostMethod* PhyPack::costMethod() const
{
// Zero costs for now.
static THREAD_P CostMethodFixedCostPerRow *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodFixedCostPerRow(0.,0.,0.);
return m;
}
//<pb>
//==============================================================================
// Synthesize physical properties for Pack operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
//==============================================================================
PhysicalProperty*
PhyPack::synthPhysicalProperty(const Context* context,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
/*
PlanExecutionEnum planExecutionLocation;
// Get child's properties
PhysicalProperty const *sppOfChild =
context->getPhysicalPropertyOfSolutionForChild(0);
// Execute in DP2 if required
if ( context->getReqdPhysicalProperty()->executeInDP2()
)
{
planExecutionLocation = EXECUTE_IN_DP2;
}
else {
planExecutionLocation = sppOfChild->getPlanExecutionLocation();
}
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(sppOfChild->getSortKey(),
sppOfChild->getSortOrderType(),
sppOfChild->getDp2SortOrderPartFunc(),
sppOfChild->getPartitioningFunction(),
planExecutionLocation,
sppOfChild->getDataSourceEnum(),
sppOfChild->getIndexDesc(),
sppOfChild->getPartSearchKey()
);
*/
// ---------------------------------------------------------------------
// Simply propogate child's physical property for now. After packing,
// there should be no more order and stuffs though.
// ---------------------------------------------------------------------
const PhysicalProperty* const sppOfTheChild =
context->getPhysicalPropertyOfSolutionForChild(0);
PhysicalProperty* sppForMe = new (CmpCommon::statementHeap())
PhysicalProperty (*sppOfTheChild);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
}
// -----------------------------------------------------------------------
// PhyCompoundStmt::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
PhysCompoundStmt::costMethod() const
{
static THREAD_P CostMethodCompoundStmt *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodCompoundStmt();
return m;
}
PhysicalProperty* PhysCompoundStmt::synthPhysicalProperty(const Context* context,
const Lng32 /*unused*/,
PlanWorkSpace *pws)
{
const PhysicalProperty* const sppOfLeftChild =
context->getPhysicalPropertyOfSolutionForChild(0);
// ---------------------------------------------------------------------
// Call the default implementation (RelExpr::synthPhysicalProperty())
// to synthesize the properties on the number of cpus.
// ---------------------------------------------------------------------
PhysicalProperty* sppTemp = RelExpr::synthPhysicalProperty(context,0, pws);
// ---------------------------------------------------------------------
// The result of a compound statement has the sort order of the left
// child. The nested join maintains the partitioning of the left child
// as well.
// ---------------------------------------------------------------------
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(sppOfLeftChild->getSortKey(),
sppOfLeftChild->getSortOrderType(),
sppOfLeftChild->getDp2SortOrderPartFunc(),
sppOfLeftChild->getPartitioningFunction(),
sppOfLeftChild->getPlanExecutionLocation(),
sppOfLeftChild->getDataSourceEnum(),
sppOfLeftChild->getIndexDesc(),
sppOfLeftChild->getPartSearchKey(),
sppOfLeftChild->getPushDownProperty());
sppForMe->setCurrentCountOfCPUs(sppTemp->getCurrentCountOfCPUs());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
delete sppTemp;
return sppForMe;
} // CompoundStmt::synthPhysicalProperty()
//<pb>
Context* CompoundStmt::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// If one Context has been generated for each child, return NULL
// to signal completion.
// ---------------------------------------------------------------------
if (pws->getCountOfChildContexts() == getArity())
return NULL;
childIndex = pws->getCountOfChildContexts();
Lng32 planNumber = 0;
Context* childContext = NULL;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
RequirementGenerator rg(child(childIndex),rppForMe);
if ( rppForMe->getPushDownRequirement() == NULL AND
rppForMe->executeInDP2() AND
childIndex == 0
)
{
// This must be the top CS and we are optimizing the left child.
// Add a null-state required push-down property.
rg.addPushDownRequirement(
new (CmpCommon::statementHeap()) PushDownCSRequirement()
);
}
if (
childIndex == 1 // for right child
AND
// a plan has been produced by latest context
(childContext = pws->getChildContext(0, 0)) != NULL
//(pws->getLatestChildIndex(), pws->getLatestPlan())) != NULL
AND
childContext->hasOptimalSolution()
)
{
// clean the rg.
rg.removeAllPartitioningRequirements();
// Since we do not impose the sortKey or arragement of the
// left child to the right, we have to remove them from the
// requirement.
rg.removeSortKey();
rg.removeArrangement();
const PhysicalProperty*
sppForChild = childContext->getPhysicalPropertyForSolution();
CMPASSERT(sppForChild != NULL);
if ( rppForMe -> executeInDP2() )
{
CMPASSERT(sppForChild->getPushDownProperty() != NULL);
// Add the push-down requirement if the left child echos back
// the push down requirement.
rg.addPushDownRequirement(
sppForChild->getPushDownProperty()->makeRequirement());
} else {
// ---------------------------------------------------------------
// spp should have been synthesized for child's optimal plan.
// ---------------------------------------------------------------
PartitioningFunction* childPartFunc =
sppForChild->getPartitioningFunction();
PartitioningRequirement* partReqForChild =
childPartFunc->makePartitioningRequirement();
CMPASSERT(partReqForChild->
castToFullySpecifiedPartitioningRequirement());
// for above DP2 cases, we need the part req.
//
// Note ESP parallism would not work here as partKey is not mapped.
//
rg.addPartRequirement(partReqForChild);
}
}
// ---------------------------------------------------------------------
// If this is a CPU or memory-intensive operator or if we could benefit
// from parallelism by performing multiple sort groupbys on different
// ranges at the same time, then add a requirement for a minimum number
// of partitions, unless that requirement would conflict with our
// parent's requirement.
//
// Don't add a required number of partitions if we must execute in DP2,
// because you can't change the number of DP2s.
// Also don't specify a required number of partitions for a scalar
// aggregate, because a scalar aggregate cannot execute in parallel.
// ---------------------------------------------------------------------
// and give up if it is not possible to satisfy them
// ---------------------------------------------------------------------
if (NOT rg.checkFeasibility())
return NULL;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(
childIndex,
rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit,
myContext,
myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
} // CompoundStmt::createContextForAChild()
Context* Pack::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// If one Context has been generated for each child, return NULL
// to signal completion.
// ---------------------------------------------------------------------
if (pws->getCountOfChildContexts() == getArity())
return NULL;
childIndex = 0;
Lng32 planNumber = 0;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
RequirementGenerator rg(child(0),rppForMe);
// ---------------------------------------------------------------------
// Add the order requirements needed for this RelPack node
// ---------------------------------------------------------------------
// Shouldn't/Can't add a sort order type requirement
// if we are in DP2
if (rppForMe->executeInDP2())
rg.addSortKey(requiredOrder(),NO_SOT);
else
rg.addSortKey(requiredOrder(),ESP_SOT);
// rg.addLocationRequirement(EXECUTE_IN_ESP);
// Cannot execute in parallel
//
// Do not impose single part requirement if executed in DP2.
if ( NOT rppForMe->executeInDP2() OR NOT isinBlockStmt() )
rg.addNumOfPartitions(1);
// ---------------------------------------------------------------------
// Done adding all the requirements together, now see whether it worked
// and give up if it is not possible to satisfy them
// ---------------------------------------------------------------------
if (NOT rg.checkFeasibility())
return NULL;
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(
childIndex,
rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit,
myContext,
myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
} // Pack::createContextForAChild()
// -----------------------------------------------------------------------
// IsolatedScalarUDF::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
IsolatedScalarUDF::costMethod() const
{
static THREAD_P CostMethodIsolatedScalarUDF *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodIsolatedScalarUDF();
return m;
}
// -----------------------------------------------------------------------
// PhysicalIsolatedScalarUDF::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod*
PhysicalIsolatedScalarUDF::costMethod() const
{
static THREAD_P CostMethodIsolatedScalarUDF *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodIsolatedScalarUDF();
return m;
}
//==============================================================================
// Synthesize physical properties for IsolatedScalarUDF operator's current plan
// extracted from a specified context.
//
// Input:
// myContext -- specified context containing this operator's current plan.
//
// planNumber -- plan's number within the plan workspace. Used optionally for
// synthesizing partitioning functions but unused in this
// derived version of synthPhysicalProperty().
//
// Output:
// none
//
// Return:
// Pointer to this operator's synthesized physical properties.
//
// Note this is a close copy of the synthPhysicalProperties for Tuple,
// so if we change anything here we should evaluate if the Tuple method
// should change as well.
//
//==============================================================================
#pragma nowarn(262) // warning elimination
PhysicalProperty*
IsolatedScalarUDF::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
CMPASSERT(myContext != NULL);
PartitioningRequirement *myPartReq = NULL;
PartitioningFunction *myPartFunc = NULL;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
// -----------------------------------------------------------------
// decide on my partitioning function
// -----------------------------------------------------------------
myPartReq = rppForMe->getPartitioningRequirement();
if (myPartReq ) {
myPartFunc = myPartReq->realize(myContext);
CMPASSERT(myPartFunc != NULL);
// we cannot execute in DP2 nor can we do some of these partitioning
// schemes.
if ( rppForMe->executeInDP2() ||
(NOT rppForMe->executeInDP2() &&
(myPartFunc -> castToHashPartitioningFunction() ||
myPartFunc -> castToTableHashPartitioningFunction() ||
myPartFunc -> castToRangePartitioningFunction() ||
myPartFunc -> castToRoundRobinPartitioningFunction()
) && myPartFunc -> getCountOfPartitions() > 1
)
)
return NULL;
} else {
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction();
}
// Make sure the IsolatedScalarUDF node only produces its output when it is
// part of the partition to be produced. Do this by applying the
// partitioning key predicates which filter out the data of that
// partition.
if (myPartFunc->canProducePartitioningKeyPredicates())
{
myPartFunc->createPartitioningKeyPredicates();
if ( NOT rppForMe->executeInDP2() )
{
// for single partition and replication the predicates will be empty
if (NOT myPartFunc->getPartitioningKeyPredicates().isEmpty())
{
// ||opt we would need to add the partitioning key
// predicates, but I see in the executor that no
// predicates are evaluated on the IsolatedScalarUDF node. This may
// be an unrelated bug or a problem for this case. For now
// refuse to generate any partitioning scheme that has
// partitioning key predicates
return NULL;
}
}
}
else
{
// A leaf node can only produce a partitioning function if
// it can apply its partitioning key predicates. If we can't
// produce the data we better return NULL physical props.
return NULL;
}
// If partitioning function has no node map already, create a new node map
// having wild-card entries for each partition.
if (myPartFunc->getNodeMap() == 0)
{
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
myPartFunc->getCountOfPartitions(),
NodeMapEntry::ACTIVE);
myPartFunc->replaceNodeMap(myNodeMap);
}
const PushDownRequirement* pdr = rppForMe->getPushDownRequirement();
// Disable inserts under exchange if the compound statement is not
// under the same exchange.
if ( isinBlockStmt() AND rppForMe->executeInDP2() AND
NOT PushDownCSRequirement::isInstanceOf(pdr)
)
return 0;
if ( pdr )
{
// If the IsolatedScalarUDF is the first statement (left most node) or
// immediately below an EXCHANGE, do not allow it.
if (pdr->isEmpty()== TRUE)
return 0;
}
// XXX Verify that AP is what ANY will translate to
NABoolean canRunInParallel = (getRoutineDesc()->getEffectiveNARoutine()->
getParallelism() == "AP");
PlanExecutionEnum planExecutionLocation = canRunInParallel ?
EXECUTE_IN_MASTER_AND_ESP :
EXECUTE_IN_MASTER;
PhysicalProperty* sppForMe =
new (CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
planExecutionLocation,
SOURCE_TUPLE);
PushDownProperty* pushDownProperty = 0;
if ( pdr && !pdr->isEmpty() )
{
const PushDownCSRequirement*
pdcsr = pdr->castToPushDownCSRequirement();
if ( pdcsr )
{
// generate a CS push-down property.
pushDownProperty = new (CmpCommon::statementHeap())
PushDownCSProperty(pdcsr->getPartFunc(), pdcsr->getSearchKey());
} else {
const PushDownColocationRequirement*
pdclr = pdr->castToPushDownColocationRequirement();
// generate a colocation push-down property.
if ( pdclr )
{
pushDownProperty = new (CmpCommon::statementHeap())
PushDownColocationProperty(pdclr->getNodeMap());
} else
CMPASSERT(1==0);
}
}
sppForMe->setPushDownProperty(pushDownProperty);
// -----------------------------------------------------------------------
// Estimate the number of cpus executing executing copies of this
// instance:
// -----------------------------------------------------------------------
Lng32 countOfCPUs = 1;
Lng32 countOfStreams = 1;
if (myPartFunc != NULL)
countOfStreams = myPartFunc->getCountOfPartitions();
Lng32 countOfAvailableCPUs = 1;
if (myContext->getReqdPhysicalProperty())
countOfAvailableCPUs =
myContext->getReqdPhysicalProperty()->getCountOfAvailableCPUs();
// The number of CPUs is limited by the number of streams
countOfCPUs = (countOfStreams < countOfAvailableCPUs ?
countOfStreams : countOfAvailableCPUs);
CMPASSERT(countOfCPUs >= 1);
// A IsolatedScalarUDF operator does not execute in DP2. Still, it is
// a leaf operator. So, set the currentCountOfCPUs in the spp for
// IsolatedScalarUDF to IsolatedScalarUDF's count of cpus. This way, if
// any of IsolatedScalarUDF's parents decide to base their degree of
// parallelism on their children they can get their child's degree of
// parallelism from the currentCountOfCPUs field of their child's spp,
// regardless of whether the child is really a DP2 operator or not.
sppForMe->setCurrentCountOfCPUs(countOfCPUs);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe;
} // IsolatedScalarUDF::synthPhysicalProperty()
#pragma warn(262) // warning elimination
PhysicalProperty *CallSP::synthPhysicalProperty(const Context* context,
const Lng32 /*unused*/,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // CallSP::synthPhysicalProperty()
DefaultToken TableMappingUDF::getParallelControlSettings (
const ReqdPhysicalProperty* const rppForMe, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/) const
{
return RelExpr::getParallelControlSettings(rppForMe,
numOfESPs, allowedDeviation, numOfESPsForced );
};
NABoolean TableMappingUDF::okToAttemptESPParallelism (
const Context* myContext, /*IN*/
PlanWorkSpace* pws, /*IN*/
Lng32& numOfESPs, /*OUT*/
float& allowedDeviation, /*OUT*/
NABoolean& numOfESPsForced /*OUT*/)
{
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
// call the base class method
NABoolean result = RelExpr::okToAttemptESPParallelism(myContext,
pws, numOfESPs, allowedDeviation, numOfESPsForced);
Lng32 reqdNumOfPartitions = (rppForMe->requiresPartitioning() ?
rppForMe->getCountOfPartitions() :
ANY_NUMBER_OF_PARTITIONS);
int udfDoP = 0;
// also ask the UDF what DoP it would like
NABoolean status = dllInteraction_->degreeOfParallelism(
this, (TMUDFPlanWorkSpace *) pws, udfDoP);
if (udfDoP != 0 && udfDoP != numOfESPs && !numOfESPsForced)
{
// the UDF cares about parallelism and it did not
// return the same DoP as suggested by the base class method
// (and we are not forcing the # of ESPs)
DefaultToken parallelControlSetting =
CURRSTMT_OPTDEFAULTS->attemptESPParallelism();
Lng32 maxDoP = CURRSTMT_OPTDEFAULTS->getMaximumDegreeOfParallelism();
switch (udfDoP)
{
case tmudr::UDRPlanInfo::MAX_DEGREE_OF_PARALLELISM:
// UDF desires a DoP of maxDoP
if (result)
udfDoP = maxDoP;
break;
case tmudr::UDRPlanInfo::ONE_INSTANCE_PER_NODE:
// override base class implementation and CQDs
parallelControlSetting = DF_ON;
numOfESPs =
udfDoP =
maxDoP = gpClusterInfo->numOfSMPs();
numOfESPsForced = TRUE;
allowedDeviation = 0.0;
result = TRUE;
break;
case 1:
// UDF wants serial execution
numOfESPs = 1;
numOfESPsForced = TRUE;
result = FALSE;
break;
case tmudr::UDRPlanInfo::DEFAULT_DEGREE_OF_PARALLELISM:
udfDoP = reqdNumOfPartitions;
break;
default:
// leave all values unchanged
break;
}
if (result)
// try to reconcile the two different DoPs
// - if parallelism is OFF, ignore UDF parallelism
switch (parallelControlSetting)
{
case DF_OFF:
// this overrides the UDF method
if (!numOfESPsForced)
{
result = FALSE;
numOfESPs = 1;
}
break;
case DF_SYSTEM:
case DF_ON:
case DF_MAXIMUM:
default:
{
if (!numOfESPsForced)
if (parallelControlSetting == DF_SYSTEM &&
reqdNumOfPartitions != ANY_NUMBER_OF_PARTITIONS ||
udfDoP == ANY_NUMBER_OF_PARTITIONS)
{
// if CQD is SYSTEM and parent requires
// a degree of ||ism, go with that
numOfESPs = rppForMe->getCountOfPipelines();
allowedDeviation = 1.0;
}
else
{
// allow udfDoP, up to 4 * max. degree of parallelism
numOfESPs = MINOF(
udfDoP,
4*maxDoP);
if (numOfESPs == udfDoP)
// if we chose the exact DoP requested by the UDF, then
// stick with the number the UDF specified, no deviation
allowedDeviation = 0.0;
}
}
break;
}
}
return result;
}
PartitioningFunction* TableMappingUDF::mapPartitioningFunction(
const PartitioningFunction* partFunc,
NABoolean rewriteForChild0)
{
return RelExpr::mapPartitioningFunction(partFunc, rewriteForChild0);
};
NABoolean TableMappingUDF::isBigMemoryOperator(const PlanWorkSpace* pws,
const Lng32 /*planNumber*/)
{
NABoolean result = FALSE;
const Context* context = pws->getContext();
const TMUDFPlanWorkSpace *udfPWS = static_cast<const TMUDFPlanWorkSpace *>(pws);
int udfWriterDop = tmudr::UDRPlanInfo::ANY_DEGREE_OF_PARALLELISM;
if (udfPWS->getUDRPlanInfo())
udfWriterDop = udfPWS->getUDRPlanInfo()->getDesiredDegreeOfParallelism();
if (udfWriterDop > 0)
{
// the UDF writer specified a desired degree of parallelism,
// this means that the UDF needs parallelism, so it is a BMO
result = TRUE;
}
else
{
// values <= 0 indicate special instructions for the DoP,
// defined as enum values in file ../sqludr/sqludr.h
switch (udfWriterDop)
{
case tmudr::UDRPlanInfo::MAX_DEGREE_OF_PARALLELISM:
case tmudr::UDRPlanInfo::ONE_INSTANCE_PER_NODE:
result = TRUE;
break;
default:
// leave result at FALSE
break;
}
}
return result;
};
// -----------------------------------------------------------------------
// PhysicalTableMappingUDF::costMethod()
// Obtain a pointer to a CostMethod object providing access
// to the cost estimation functions for nodes of this class.
// -----------------------------------------------------------------------
CostMethod* PhysicalTableMappingUDF::costMethod() const
{
static THREAD_P CostMethodTableMappingUDF *m = NULL;
if (m == NULL)
m = new (GetCliGlobals()->exCollHeap()) CostMethodTableMappingUDF();
return m;
}
PlanWorkSpace * PhysicalTableMappingUDF::allocateWorkSpace() const
{
PlanWorkSpace *result =
new(CmpCommon::statementHeap()) TMUDFPlanWorkSpace(getArity());
return result;
}
Context* PhysicalTableMappingUDF::createContextForAChild(Context* myContext,
PlanWorkSpace* pws,
Lng32& childIndex)
{
// ---------------------------------------------------------------------
// If one Context has been generated for each child, return NULL
// to signal completion. This will also take care of 0 child case.
// ---------------------------------------------------------------------
childIndex = pws->getCountOfChildContexts();
if (childIndex == getArity())
return NULL;
Lng32 planNumber = 0;
const ReqdPhysicalProperty* rppForMe = myContext->getReqdPhysicalProperty();
Lng32 childNumPartsRequirement = ANY_NUMBER_OF_PARTITIONS;
float childNumPartsAllowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
RequirementGenerator rg(child(childIndex),rppForMe);
TableMappingUDFChildInfo * childInfo = getChildInfo(childIndex);
TMUDFInputPartReq childPartReqType = childInfo->getPartitionType();
PartitioningRequirement* partReqForChild = NULL;
NABoolean useAParallelPlan = okToAttemptESPParallelism(
myContext,
pws,
childNumPartsRequirement,
childNumPartsAllowedDeviation,
numOfESPsForced);
// add PARTITION BY as a required partitioning key
if (useAParallelPlan)
if (childPartReqType == SPECIFIED_PARTITIONING)
{
// if some specified partitioning is to be required from the child
// then the required partitioning columns should be mentioned
CMPASSERT(NOT childInfo->getPartitionBy().isEmpty());
rg.addPartitioningKey(childInfo->getPartitionBy());
}
else if(childPartReqType == REPLICATE_PARTITIONING)
{
// get the number of replicas
// for right now just get what ever number of streams the parent requires
Lng32 countOfPartitions = childNumPartsRequirement;
if(rppForMe->getPartitioningRequirement() &&
(countOfPartitions < rppForMe->getCountOfPartitions()))
countOfPartitions = rppForMe->getCountOfPartitions();
if(countOfPartitions > 1)
partReqForChild = new (CmpCommon::statementHeap() )
RequireReplicateViaBroadcast(countOfPartitions);
else
partReqForChild = new(CmpCommon::statementHeap())
RequireExactlyOnePartition();
rg.addPartRequirement(partReqForChild);
}
// Since we treat a TMUDF like a MapReduce operator, we ensure
// that the TMUDF sees all values of a particular partition
// together. We do that by requesting an arrangement by th
// PARTITION BY columns, if any are specified. This applies
// to parallel and serial plans.
rg.addArrangement(childInfo->getPartitionBy(),ESP_SOT);
// add ORDER BY as a required order
if (NOT childInfo->getOrderBy().isEmpty())
{
ValueIdList sortKey(getChildInfo(0)->getPartitionBy());
for (Int32 i=0;i<(Int32)getChildInfo(0)->getOrderBy().entries();i++)
sortKey.insert(getChildInfo(0)->getOrderBy()[i]);
rg.addSortKey(sortKey, ESP_SOT);
}
// add requirement for the degree of parallelism
if (useAParallelPlan)
{
if (NOT numOfESPsForced)
rg.makeNumOfPartsFeasible(childNumPartsRequirement,
&childNumPartsAllowedDeviation);
rg.addNumOfPartitions(childNumPartsRequirement,
childNumPartsAllowedDeviation);
}
else
rg.addNumOfPartitions(1);
// ---------------------------------------------------------------------
// Done adding all the requirements together, now see whether it worked
// and give up if it is not possible to satisfy them
// ---------------------------------------------------------------------
if (NOT rg.checkFeasibility())
{
// remember this so that we can give an appropriate error in case
// we fail to produce a plan
char reason[250];
snprintf(reason,
sizeof(reason),
"%s, use %d parallel streams in context %s",
getUserTableName().getCorrNameAsString().data(),
childNumPartsRequirement,
myContext->getRPPString().data());
CmpCommon::statement()->setTMUDFRefusedRequirements(reason);
return NULL;
}
// ---------------------------------------------------------------------
// Compute the cost limit to be applied to the child.
// ---------------------------------------------------------------------
CostLimit* costLimit = computeCostLimit(myContext, pws);
// ---------------------------------------------------------------------
// Get a Context for optimizing the child.
// Search for an existing Context in the CascadesGroup to which the
// child belongs that requires the same properties as those in
// rppForChild. Reuse it, if found. Otherwise, create a new Context
// that contains rppForChild as the required physical properties..
// ---------------------------------------------------------------------
Context* result = shareContext(
childIndex,
rg.produceRequirement(),
myContext->getInputPhysicalProperty(),
costLimit,
myContext,
myContext->getInputLogProp());
// ---------------------------------------------------------------------
// Store the Context for the child in the PlanWorkSpace.
// ---------------------------------------------------------------------
pws->storeChildContext(childIndex, planNumber, result);
return result;
};
PhysicalProperty* PhysicalTableMappingUDF::synthPhysicalProperty(
const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
PartitioningFunction* myPartFunc = NULL;
Int32 arity = getArity();
Lng32 numOfESPs = 0;
NABoolean createSinglePartFunc = FALSE;
NABoolean createRandomPartFunc = FALSE;
if (arity == 0)
{
// for a TMUDF with no table inputs, call okToAttemptESPParallelism()
// here to determine the DoP. In the other case where we had table
// inputs, we did that already in
// PhysicalTableMappingUDF::createContextForAChild()
float allowedDeviation = 0.0;
NABoolean numOfESPsForced = FALSE;
NABoolean useAParallelPlan = okToAttemptESPParallelism(
myContext,
pws,
numOfESPs,
allowedDeviation,
numOfESPsForced);
if (useAParallelPlan && numOfESPs > 1)
createRandomPartFunc = TRUE;
else
createSinglePartFunc = TRUE;
}
else
{
const PhysicalProperty * sppOfChild;
const PartitioningFunction *childPartFunc;
Int32 childToUse = 0;
NABoolean foundChildToUse = FALSE;
// find a child that is not replicated, the first such
// child will determine our partitioning function
while (!foundChildToUse && childToUse < arity)
{
sppOfChild =
myContext->getPhysicalPropertyOfSolutionForChild(childToUse);
childPartFunc =
sppOfChild->getPartitioningFunction();
if (childPartFunc &&
!childPartFunc->isAReplicationPartitioningFunction())
foundChildToUse = TRUE;
else
childToUse++;
}
if (!foundChildToUse ||
childPartFunc->isASinglePartitionPartitioningFunction())
{
createSinglePartFunc = TRUE;
}
else
{
// Check whether the partitioning key of the child is visible
// in our characteristic outputs. If so, then we can map the
// first child's partitioning function to our own. Otherwise,
// we will generate a HASH2 part func on a random number - in
// other words that's a partitioning function with a known
// number of partitions but an unknown partitioning key.
ValueIdSet passThruCols(udfOutputToChildInputMap_.getBottomValues());
if (passThruCols.contains(childPartFunc->getPartitioningKey()))
{
// use a copy of the child's part func, with the key
// columns remapped to our corresponding pass-through
// output columns
myPartFunc = childPartFunc->copyAndRemap(
udfOutputToChildInputMap_, TRUE);
}
else
{
// child's partitioning key is not visible to the
// parent, create a part func w/o a usable partitioning key
numOfESPs = childPartFunc->getCountOfPartitions();
createRandomPartFunc = TRUE;
}
} // found a partitioned child
} // arity > 0
// a couple of common cases
if (createSinglePartFunc)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
myPartFunc = new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
}
else if (createRandomPartFunc)
{
//-----------------------------------------------------------
// Create a node map with numOfESPs active, wild-card entries
//-----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
numOfESPs,
NodeMapEntry::ACTIVE);
// set node numbers in the entries, if we need to have one
// ESP per node
if (numOfESPs == gpClusterInfo->numOfSMPs())
for (int i=0; i<numOfESPs; i++)
myNodeMap->getNodeMapEntry(i)->setNodeNumber(i);
ValueIdSet partKey;
ItemExpr *randNum =
new(CmpCommon::statementHeap()) RandomNum(NULL, TRUE);
randNum->synthTypeAndValueId();
partKey.insert(randNum->getValueId());
myPartFunc = new(CmpCommon::statementHeap())
Hash2PartitioningFunction (partKey,
partKey,
numOfESPs,
myNodeMap);
myPartFunc->createPartitioningKeyPredicates();
}
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap()) PhysicalProperty(
myPartFunc,
EXECUTE_IN_MASTER_AND_ESP,
SOURCE_VIRTUAL_TABLE);
sppForMe->setUDRPlanInfo(
static_cast<TMUDFPlanWorkSpace *>(pws)->getUDRPlanInfo());
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr());
return sppForMe ;
}
//***********************************************************************
//
//
//***********************************************************************
NABoolean RelExpr::isBigMemoryOperator(const PlanWorkSpace *pws,
const Lng32)
{
const Context* context = pws->getContext();
const PhysicalProperty* spp = context->getPlan()->getPhysicalProperty();
if (spp == NULL || CmpCommon::getDefault(COMP_BOOL_51) != DF_ON)
return FALSE;
CurrentFragmentBigMemoryProperty * bigMemoryProperty =
new (CmpCommon::statementHeap())
CurrentFragmentBigMemoryProperty();
((PhysicalProperty*) spp)->setBigMemoryEstimationProperty(bigMemoryProperty);
bigMemoryProperty->setOperatorType(getOperatorType());
if (getOperatorType() == REL_EXCHANGE)
{
bigMemoryProperty->setCurrentFileSize(0);
bigMemoryProperty->incrementCumulativeMemSize(0);
}
else
{
//get cumulative file size of the fragment; get the child spp??
for (Int32 i=0; i<getArity(); i++)
{
const PhysicalProperty *childSpp =
context->getPhysicalPropertyOfSolutionForChild(i);
if (childSpp != NULL)
{
CurrentFragmentBigMemoryProperty * memProp =
(CurrentFragmentBigMemoryProperty *)
((PhysicalProperty *)childSpp)->getBigMemoryEstimationProperty();
if (memProp != NULL)
{
double childCumulativeMemSize = memProp->getCumulativeFileSize();
bigMemoryProperty->incrementCumulativeMemSize(childCumulativeMemSize);
}
}
}
}
return FALSE;
}
PhysicalProperty*
ControlRunningQuery::synthPhysicalProperty(const Context* myContext,
const Lng32 planNumber,
PlanWorkSpace *pws)
{
//----------------------------------------------------------
// Create a node map with a single, active, wild-card entry.
//----------------------------------------------------------
NodeMap* myNodeMap = new(CmpCommon::statementHeap())
NodeMap(CmpCommon::statementHeap(),
1,
NodeMapEntry::ACTIVE);
//------------------------------------------------------------
// Synthesize a partitioning function with a single partition.
//------------------------------------------------------------
PartitioningFunction* myPartFunc =
new(CmpCommon::statementHeap())
SinglePartitionPartitioningFunction(myNodeMap);
PhysicalProperty * sppForMe =
new(CmpCommon::statementHeap())
PhysicalProperty(myPartFunc,
EXECUTE_IN_MASTER,
SOURCE_VIRTUAL_TABLE);
// remove anything that's not covered by the group attributes
sppForMe->enforceCoverageByGroupAttributes (getGroupAttr()) ;
return sppForMe ;
} // ControlRunningQuery::synthPhysicalProperty()
| 1 | 7,769 | maybe say "returns the default cost method that returns an object of cost 1". | apache-trafodion | cpp |
@@ -293,6 +293,19 @@ func (r *DefaultRuleRenderer) endpointIptablesChain(
},
})
+ rules = append(rules, Rule{
+ Match: Match().ProtocolNum(ProtoUDP).
+ DestPorts(uint16(r.Config.VXLANPort)).
+ VXLANVNI(uint32(r.Config.VXLANVNI)),
+ Action: DropAction{},
+ Comment: "Drop VXLAN encapped packets originating in pods",
+ })
+ rules = append(rules, Rule{
+ Match: Match().ProtocolNum(ProtoIPIP),
+ Action: DropAction{},
+ Comment: "Drop IPinIP encapped packets originating in pods",
+ })
+
if len(policyNames) > 0 {
// Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies and
// continue processing the profiles, if there are any. | 1 | // Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/hashutils"
. "github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
)
func (r *DefaultRuleRenderer) WorkloadEndpointToIptablesChains(
ifaceName string,
epMarkMapper EndpointMarkMapper,
adminUp bool,
ingressPolicies []string,
egressPolicies []string,
profileIDs []string,
) []*Chain {
result := []*Chain{}
result = append(result,
// Chain for traffic _to_ the endpoint.
r.endpointIptablesChain(
ingressPolicies,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
WorkloadToEndpointPfx,
"", // No fail-safe chains for workloads.
chainTypeNormal,
adminUp,
r.filterAllowAction, // Workload endpoint chains are only used in the filter table
),
// Chain for traffic _from_ the endpoint.
r.endpointIptablesChain(
egressPolicies,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
WorkloadFromEndpointPfx,
"", // No fail-safe chains for workloads.
chainTypeNormal,
adminUp,
r.filterAllowAction, // Workload endpoint chains are only used in the filter table
),
)
if r.KubeIPVSSupportEnabled {
// Chain for setting endpoint mark of an endpoint.
result = append(result,
r.endpointSetMarkChain(
ifaceName,
epMarkMapper,
SetEndPointMarkPfx,
),
)
}
return result
}
func (r *DefaultRuleRenderer) HostEndpointToFilterChains(
ifaceName string,
epMarkMapper EndpointMarkMapper,
ingressPolicyNames []string,
egressPolicyNames []string,
ingressForwardPolicyNames []string,
egressForwardPolicyNames []string,
profileIDs []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering filter host endpoint chain.")
result := []*Chain{}
result = append(result,
// Chain for output traffic _to_ the endpoint.
r.endpointIptablesChain(
egressPolicyNames,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointPfx,
ChainFailsafeOut,
chainTypeNormal,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for input traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressPolicyNames,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypeNormal,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for forward traffic _to_ the endpoint.
r.endpointIptablesChain(
egressForwardPolicyNames,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointForwardPfx,
"", // No fail-safe chains for forward traffic.
chainTypeForward,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for forward traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressForwardPolicyNames,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointForwardPfx,
"", // No fail-safe chains for forward traffic.
chainTypeForward,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
)
if r.KubeIPVSSupportEnabled {
// Chain for setting endpoint mark of an endpoint.
result = append(result,
r.endpointSetMarkChain(
ifaceName,
epMarkMapper,
SetEndPointMarkPfx,
),
)
}
return result
}
func (r *DefaultRuleRenderer) HostEndpointToRawChains(
ifaceName string,
ingressPolicyNames []string,
egressPolicyNames []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering raw (untracked) host endpoint chain.")
return []*Chain{
// Chain for traffic _to_ the endpoint.
r.endpointIptablesChain(
egressPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointPfx,
ChainFailsafeOut,
chainTypeUntracked,
true, // Host endpoints are always admin up.
AcceptAction{},
),
// Chain for traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypeUntracked,
true, // Host endpoints are always admin up.
AcceptAction{},
),
}
}
func (r *DefaultRuleRenderer) HostEndpointToMangleChains(
ifaceName string,
preDNATPolicyNames []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering pre-DNAT host endpoint chain.")
return []*Chain{
// Chain for traffic _from_ the endpoint. Pre-DNAT policy does not apply to
// outgoing traffic through a host endpoint.
r.endpointIptablesChain(
preDNATPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypePreDNAT,
true, // Host endpoints are always admin up.
r.mangleAllowAction,
),
}
}
type endpointChainType int
const (
chainTypeNormal endpointChainType = iota
chainTypeUntracked
chainTypePreDNAT
chainTypeForward
)
func (r *DefaultRuleRenderer) endpointSetMarkChain(
name string,
epMarkMapper EndpointMarkMapper,
endpointPrefix string,
) *Chain {
rules := []Rule{}
chainName := EndpointChainName(endpointPrefix, name)
if endPointMark, err := epMarkMapper.GetEndpointMark(name); err == nil {
// Set endpoint mark.
rules = append(rules, Rule{
Action: SetMaskedMarkAction{
Mark: endPointMark,
Mask: epMarkMapper.GetMask()},
})
}
return &Chain{
Name: chainName,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) endpointIptablesChain(
policyNames []string,
profileIds []string,
name string,
policyPrefix PolicyChainNamePrefix,
profilePrefix ProfileChainNamePrefix,
endpointPrefix string,
failsafeChain string,
chainType endpointChainType,
adminUp bool,
allowAction Action,
) *Chain {
rules := []Rule{}
chainName := EndpointChainName(endpointPrefix, name)
if !adminUp {
// Endpoint is admin-down, drop all traffic to/from it.
rules = append(rules, Rule{
Match: Match(),
Action: DropAction{},
Comment: "Endpoint admin disabled",
})
return &Chain{
Name: chainName,
Rules: rules,
}
}
if chainType != chainTypeUntracked {
// Tracked chain: install conntrack rules, which implement our stateful connections.
// This allows return traffic associated with a previously-permitted request.
rules = r.appendConntrackRules(rules, allowAction)
}
// First set up failsafes.
if failsafeChain != "" {
rules = append(rules, Rule{
Action: JumpAction{Target: failsafeChain},
})
}
// Start by ensuring that the accept mark bit is clear, policies set that bit to indicate
// that they accepted the packet.
rules = append(rules, Rule{
Action: ClearMarkAction{
Mark: r.IptablesMarkAccept,
},
})
if len(policyNames) > 0 {
// Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies and
// continue processing the profiles, if there are any.
rules = append(rules, Rule{
Comment: "Start of policies",
Action: ClearMarkAction{
Mark: r.IptablesMarkPass,
},
})
// Then, jump to each policy in turn.
for _, polID := range policyNames {
polChainName := PolicyChainName(
policyPrefix,
&proto.PolicyID{Name: polID},
)
// If a previous policy didn't set the "pass" mark, jump to the policy.
rules = append(rules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: JumpAction{Target: polChainName},
})
// If policy marked packet as accepted, it returns, setting the accept
// mark bit.
if chainType == chainTypeUntracked {
// For an untracked policy, map allow to "NOTRACK and ALLOW".
rules = append(rules, Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: NoTrackAction{},
})
}
// If accept bit is set, return from this chain. We don't immediately
// accept because there may be other policy still to apply.
rules = append(rules, Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if policy accepted",
})
}
if chainType == chainTypeNormal || chainType == chainTypeForward {
// When rendering normal and forward rules, if no policy marked the packet as "pass", drop the
// packet.
//
// For untracked and pre-DNAT rules, we don't do that because there may be
// normal rules still to be applied to the packet in the filter table.
rules = append(rules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: DropAction{},
Comment: "Drop if no policies passed packet",
})
}
} else if chainType == chainTypeForward {
// Forwarded traffic is allowed when there are no policies with
// applyOnForward that apply to this endpoint (and in this direction).
rules = append(rules, Rule{
Action: SetMarkAction{Mark: r.IptablesMarkAccept},
Comment: "Allow forwarded traffic by default",
})
rules = append(rules, Rule{
Action: ReturnAction{},
Comment: "Return for accepted forward traffic",
})
}
if chainType == chainTypeNormal {
// Then, jump to each profile in turn.
for _, profileID := range profileIds {
profChainName := ProfileChainName(profilePrefix, &proto.ProfileID{Name: profileID})
rules = append(rules,
Rule{Action: JumpAction{Target: profChainName}},
// If policy marked packet as accepted, it returns, setting the
// accept mark bit. If that is set, return from this chain.
Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if profile accepted",
})
}
// When rendering normal rules, if no profile marked the packet as accepted, drop
// the packet.
//
// For untracked rules, we don't do that because there may be tracked rules
// still to be applied to the packet in the filter table.
rules = append(rules, Rule{
Match: Match(),
Action: DropAction{},
Comment: "Drop if no profiles matched",
})
}
return &Chain{
Name: chainName,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) appendConntrackRules(rules []Rule, allowAction Action) []Rule {
// Allow return packets for established connections.
if allowAction != (AcceptAction{}) {
// If we've been asked to return instead of accept the packet immediately,
// make sure we flag the packet as allowed.
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: r.IptablesMarkAccept},
},
)
}
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: allowAction,
},
)
if !r.Config.DisableConntrackInvalid {
// Drop packets that aren't either a valid handshake or part of an established
// connection.
rules = append(rules, Rule{
Match: Match().ConntrackState("INVALID"),
Action: DropAction{},
})
}
return rules
}
func EndpointChainName(prefix string, ifaceName string) string {
return hashutils.GetLengthLimitedID(
prefix,
ifaceName,
MaxChainNameLength,
)
}
| 1 | 16,987 | I believe these rules will be enforced both (1) on egress from a local workload, and (2) on ingress **to** a local workload. Right? I understand that we definitely want (1), but do we really want to enforce (2) as well? | projectcalico-felix | c |
@@ -125,7 +125,9 @@ public class SalesforceNetworkPlugin extends ForcePlugin {
try {
// Not a 2xx status
if (!response.isSuccess()) {
- callbackContext.error(response.asString());
+ JSONObject errorObj = new JSONObject();
+ errorObj.putOpt("response", response.fullResponseAsJSONObject());
+ callbackContext.error(errorObj.toString());
}
// Binary response
else if (returnBinary) { | 1 | /*
* Copyright (c) 2016-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.phonegap.plugin;
import android.text.TextUtils;
import android.util.Base64;
import com.salesforce.androidsdk.phonegap.app.SalesforceHybridSDKManager;
import com.salesforce.androidsdk.phonegap.ui.SalesforceDroidGapActivity;
import com.salesforce.androidsdk.phonegap.util.SalesforceHybridLogger;
import com.salesforce.androidsdk.rest.RestClient;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import org.apache.cordova.CallbackContext;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.RequestBody;
/**
* PhoneGap plugin for native networking.
*
* @author bhariharan
*/
public class SalesforceNetworkPlugin extends ForcePlugin {
private static final String TAG = "SalesforceNetworkPlugin";
private static final String METHOD_KEY = "method";
private static final String END_POINT_KEY = "endPoint";
private static final String PATH_KEY = "path";
private static final String QUERY_PARAMS_KEY = "queryParams";
private static final String HEADER_PARAMS_KEY = "headerParams";
private static final String FILE_PARAMS_KEY = "fileParams";
private static final String FILE_MIME_TYPE_KEY = "fileMimeType";
private static final String FILE_URL_KEY = "fileUrl";
private static final String FILE_NAME_KEY = "fileName";
private static final String RETURN_BINARY = "returnBinary";
private static final String ENCODED_BODY = "encodedBody";
private static final String CONTENT_TYPE = "contentType";
private static final String DOES_NOT_REQUIRE_AUTHENTICATION = "doesNotRequireAuthentication";
/**
* Supported plugin actions that the client can take.
*/
enum Action {
pgSendRequest
}
@Override
public boolean execute(String actionStr, JavaScriptPluginVersion jsVersion, JSONArray args,
CallbackContext callbackContext) {
Action action;
try {
action = Action.valueOf(actionStr);
switch(action) {
case pgSendRequest:
sendRequest(args, callbackContext);
return true;
default:
return false;
}
} catch (IllegalArgumentException e) {
return false;
}
}
/**
* Native implementation for "sendRequest" action.
*
* @param callbackContext Used when calling back into Javascript.
*/
protected void sendRequest(JSONArray args, final CallbackContext callbackContext) {
try {
final RestRequest request = prepareRestRequest(args);
final boolean returnBinary = ((JSONObject) args.get(0)).optBoolean(RETURN_BINARY, false);
final boolean doesNotRequireAuth = ((JSONObject) args.get(0)).optBoolean(DOES_NOT_REQUIRE_AUTHENTICATION, false);
// Sends the request.
final RestClient restClient = getRestClient(doesNotRequireAuth);
if (restClient == null) {
return;
}
restClient.sendAsync(request, new RestClient.AsyncRequestCallback() {
@Override
public void onSuccess(RestRequest request, RestResponse response) {
try {
// Not a 2xx status
if (!response.isSuccess()) {
callbackContext.error(response.asString());
}
// Binary response
else if (returnBinary) {
JSONObject result = new JSONObject();
result.put(CONTENT_TYPE, response.getContentType());
result.put(ENCODED_BODY, Base64.encodeToString(response.asBytes(), Base64.DEFAULT));
callbackContext.success(result);
}
// Some response
else if (response.asBytes().length > 0) {
// Is it a JSONObject?
final JSONObject responseAsJSONObject = parseResponseAsJSONObject(response);
if (responseAsJSONObject != null) {
callbackContext.success(responseAsJSONObject);
return;
}
// Is it a JSONArray?
final JSONArray responseAsJSONArray = parseResponseAsJSONArray(response);
if (responseAsJSONArray != null) {
callbackContext.success(responseAsJSONArray);
return;
}
// Otherwise return as string
callbackContext.success(response.asString());
}
// No response
else {
callbackContext.success();
}
} catch (Exception e) {
SalesforceHybridLogger.e(TAG, "Error while parsing response", e);
onError(e);
}
}
@Override
public void onError(Exception exception) {
callbackContext.error(exception.getMessage());
}
});
} catch (Exception exception) {
callbackContext.error(exception.getMessage());
}
}
private JSONObject parseResponseAsJSONObject(RestResponse response) throws IOException {
try {
return response.asJSONObject();
}
catch (JSONException e) {
// Not a JSON object
return null;
}
}
private JSONArray parseResponseAsJSONArray(RestResponse response) throws IOException {
try {
return response.asJSONArray();
}
catch (JSONException e) {
// Not a JSON array
return null;
}
}
private RestRequest prepareRestRequest(JSONArray args) throws UnsupportedEncodingException,
URISyntaxException, JSONException {
final JSONObject arg0 = args.optJSONObject(0);
if (arg0 != null) {
final RestRequest.RestMethod method = RestRequest.RestMethod.valueOf(arg0.optString(METHOD_KEY));
final String endPoint = arg0.optString(END_POINT_KEY);
final String path = arg0.optString(PATH_KEY);
final String queryParamString = arg0.optString(QUERY_PARAMS_KEY);
JSONObject queryParams = new JSONObject();
if (!TextUtils.isEmpty(queryParamString)) {
queryParams = new JSONObject(queryParamString);
}
final JSONObject headerParams = arg0.optJSONObject(HEADER_PARAMS_KEY);
final Iterator<String> headerKeys = headerParams.keys();
final Map<String, String> additionalHeaders = new HashMap<>();
if (headerKeys != null) {
while (headerKeys.hasNext()) {
final String headerKeyStr = headerKeys.next();
if (!TextUtils.isEmpty(headerKeyStr)) {
additionalHeaders.put(headerKeyStr, headerParams.optString(headerKeyStr));
}
}
}
final JSONObject fileParams = arg0.optJSONObject(FILE_PARAMS_KEY);
// Prepares the request.
String urlParams = "";
RequestBody requestBody = null;
if (method == RestRequest.RestMethod.DELETE || method == RestRequest.RestMethod.GET
|| method == RestRequest.RestMethod.HEAD) {
urlParams = buildQueryString(queryParams);
} else {
requestBody = buildRequestBody(queryParams, fileParams);
}
final String separator = urlParams.isEmpty()
? ""
: path.contains("?")
? (path.endsWith("&") ? "" : "&")
: "?";
return new RestRequest(method, endPoint + path + separator + urlParams,
requestBody, additionalHeaders);
}
return null;
}
private RestClient getRestClient(boolean doesNotRequireAuth) {
final SalesforceDroidGapActivity currentActivity = (SalesforceDroidGapActivity) cordova.getActivity();
if (currentActivity == null) {
return null;
}
if (doesNotRequireAuth) {
return currentActivity.buildClientManager().peekUnauthenticatedRestClient();
}
return currentActivity.getRestClient();
}
private static String buildQueryString(JSONObject params) throws UnsupportedEncodingException {
if (params == null || params.length() == 0) {
return "";
}
final StringBuilder sb = new StringBuilder();
final Iterator<String> keys = params.keys();
if (keys != null) {
while (keys.hasNext()) {
final String keyStr = keys.next();
if (!TextUtils.isEmpty(keyStr)) {
sb.append(keyStr).append("=").append(URLEncoder.encode(params.optString(keyStr),
RestRequest.UTF_8)).append("&");
}
}
}
return sb.toString();
}
private static RequestBody buildRequestBody(JSONObject params, JSONObject fileParams) throws URISyntaxException {
if (fileParams == null || fileParams.length() == 0) {
return RequestBody.create(RestRequest.MEDIA_TYPE_JSON, params.toString());
} else {
final MultipartBody.Builder builder = new MultipartBody.Builder().setType(MultipartBody.FORM);
final Iterator<String> keys = params.keys();
if (keys != null) {
while (keys.hasNext()) {
final String keyStr = keys.next();
if (!TextUtils.isEmpty(keyStr)) {
builder.addFormDataPart(keyStr, params.optString(keyStr));
}
}
}
/*
* File params expected to be of the form:
* {<fileParamNameInPost>: {fileMimeType:<someMimeType>, fileUrl:<fileUrl>, fileName:<fileNameForPost>}}.
*/
final Iterator<String> fileKeys = fileParams.keys();
if (fileKeys != null) {
while (fileKeys.hasNext()) {
final String fileKeyStr = fileKeys.next();
if (!TextUtils.isEmpty(fileKeyStr)) {
final JSONObject fileParam = fileParams.optJSONObject(fileKeyStr);
if (fileParam != null) {
final String mimeType = fileParam.optString(FILE_MIME_TYPE_KEY);
final String name = fileParam.optString(FILE_NAME_KEY);
final URI url = new URI(fileParam.optString(FILE_URL_KEY));
final File file = new File(url);
final MediaType mediaType = MediaType.parse(mimeType);
builder.addFormDataPart(fileKeyStr, name, RequestBody.create(mediaType, file));
}
}
}
}
return builder.build();
}
}
}
| 1 | 17,801 | Use `response.asJsonObject()` instead. Also, use `put()` instead of `putOpt()`, `null` as a value is fine. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -113,7 +113,7 @@ describe('PasswordEditor', () => {
const editorHolder = $('.handsontableInputHolder');
const editor = editorHolder.find('.handsontableInput');
- expect(parseInt(editorHolder.css('z-index'), 10)).toBeGreaterThan(0);
+ expect(editorHolder.is(':visible')).toBe(true);
editor.val('Edgar');
| 1 | describe('PasswordEditor', () => {
const id = 'testContainer';
beforeEach(function() {
this.$container = $(`<div id="${id}" style="width: 300px; height: 300px;"></div>`).appendTo('body');
});
afterEach(function() {
if (this.$container) {
destroy();
this.$container.remove();
}
});
it('should display editor as password field', () => {
handsontable({
data: [
['Joe'],
['Timothy'],
['Margaret'],
['Jerry']
],
columns: [
{
editor: Handsontable.editors.PasswordEditor
}
]
});
selectCell(0, 0);
keyDown('enter');
const editor = $('.handsontableInput');
expect(editor.is(':visible')).toBe(true);
expect(editor.is(':password')).toBe(true);
});
it('should set passwordEditor using \'password\' alias', () => {
handsontable({
data: [
['Joe'],
['Timothy'],
['Margaret'],
['Jerry']
],
columns: [
{
editor: 'password'
}
]
});
selectCell(0, 0);
keyDown('enter');
const editor = $('.handsontableInput');
expect(editor.is(':visible')).toBe(true);
expect(editor.is(':password')).toBe(true);
});
it('should set passwordEditor using column type \'password\' ', () => {
handsontable({
data: [
['Joe'],
['Timothy'],
['Margaret'],
['Jerry']
],
columns: [
{
type: 'password'
}
]
});
selectCell(0, 0);
keyDown('enter');
const editorHolder = $('.handsontableInputHolder');
const editor = editorHolder.find('.handsontableInput');
expect(editorHolder.is(':visible')).toBe(true);
expect(editor.is(':password')).toBe(true);
});
it('should save values typed in passwordEditor', () => {
handsontable({
data: [
['Joe'],
['Timothy'],
['Margaret'],
['Jerry']
],
columns: [
{
editor: 'password'
}
]
});
selectCell(0, 0);
expect(getDataAtCell(0, 0)).toMatch('Joe');
expect(getRenderedValue(0, 0)).toMatch('Joe');
keyDown('enter');
const editorHolder = $('.handsontableInputHolder');
const editor = editorHolder.find('.handsontableInput');
expect(parseInt(editorHolder.css('z-index'), 10)).toBeGreaterThan(0);
editor.val('Edgar');
selectCell(1, 0); // closes editor and saves current value
expect(editorHolder.css('z-index')).toBe('-1');
expect(getDataAtCell(0, 0)).toMatch('Edgar');
expect(getRenderedValue(0, 0)).toMatch('Edgar');
});
// Input element can not lose the focus while entering new characters. It breaks IME editor functionality for Asian users.
it('should not lose the focus on input element while inserting new characters (#839)', async() => {
let blured = false;
const listener = () => {
blured = true;
};
const hot = handsontable({
data: [
['Joe'],
['Timothy'],
['Margaret'],
['Jerry']
],
columns: [
{ data: 'id', type: 'password' },
],
});
selectCell(0, 0);
keyDownUp('enter');
hot.getActiveEditor().TEXTAREA.addEventListener('blur', listener);
await sleep(200);
hot.getActiveEditor().TEXTAREA.value = '1';
keyDownUp('1'.charCodeAt(0));
hot.getActiveEditor().TEXTAREA.value = '12';
keyDownUp('2'.charCodeAt(0));
hot.getActiveEditor().TEXTAREA.value = '123';
keyDownUp('3'.charCodeAt(0));
expect(blured).toBeFalsy();
hot.getActiveEditor().TEXTAREA.removeEventListener('blur', listener);
});
describe('IME support', () => {
it('should focus editable element after selecting the cell', async() => {
handsontable({
type: 'password',
});
selectCell(0, 0, 0, 0, true, false);
await sleep(10);
expect(document.activeElement).toBe(getActiveEditor().TEXTAREA);
});
});
});
| 1 | 14,925 | Are you sure? We've changed it during an introduction of the IME support. | handsontable-handsontable | js |
@@ -46,7 +46,7 @@ class QueryBuilder
$queryBuilder->leftJoin('entity.'.$sortFieldParts[0], $sortFieldParts[0]);
}
- if (!empty($dqlFilter)) {
+ if (null !== $dqlFilter) {
$queryBuilder->andWhere($dqlFilter);
}
| 1 | <?php
namespace EasyCorp\Bundle\EasyAdminBundle\Search;
use Doctrine\Bundle\DoctrineBundle\Registry;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\QueryBuilder as DoctrineQueryBuilder;
/**
* @author Javier Eguiluz <[email protected]>
*/
class QueryBuilder
{
/** @var Registry */
private $doctrine;
public function __construct(Registry $doctrine)
{
$this->doctrine = $doctrine;
}
/**
* Creates the query builder used to get all the records displayed by the
* "list" view.
*
* @param array $entityConfig
* @param string|null $sortField
* @param string|null $sortDirection
* @param string|null $dqlFilter
*
* @return DoctrineQueryBuilder
*/
public function createListQueryBuilder(array $entityConfig, $sortField = null, $sortDirection = null, $dqlFilter = null)
{
/* @var EntityManager $em */
$em = $this->doctrine->getManagerForClass($entityConfig['class']);
/* @var DoctrineQueryBuilder $queryBuilder */
$queryBuilder = $em->createQueryBuilder()
->select('entity')
->from($entityConfig['class'], 'entity')
;
$isSortedByDoctrineAssociation = false !== strpos($sortField, '.');
if ($isSortedByDoctrineAssociation) {
$sortFieldParts = explode('.', $sortField);
$queryBuilder->leftJoin('entity.'.$sortFieldParts[0], $sortFieldParts[0]);
}
if (!empty($dqlFilter)) {
$queryBuilder->andWhere($dqlFilter);
}
if (null !== $sortField) {
$queryBuilder->orderBy(sprintf('%s%s', $isSortedByDoctrineAssociation ? '' : 'entity.', $sortField), $sortDirection);
}
return $queryBuilder;
}
/**
* Creates the query builder used to get the results of the search query
* performed by the user in the "search" view.
*
* @param array $entityConfig
* @param string $searchQuery
* @param string|null $sortField
* @param string|null $sortDirection
* @param string|null $dqlFilter
*
* @return DoctrineQueryBuilder
*/
public function createSearchQueryBuilder(array $entityConfig, $searchQuery, $sortField = null, $sortDirection = null, $dqlFilter = null)
{
/* @var EntityManager $em */
$em = $this->doctrine->getManagerForClass($entityConfig['class']);
/* @var DoctrineQueryBuilder $queryBuilder */
$queryBuilder = $em->createQueryBuilder()
->select('entity')
->from($entityConfig['class'], 'entity')
;
$isSearchQueryNumeric = is_numeric($searchQuery);
$isSearchQuerySmallInteger = (\is_int($searchQuery) || ctype_digit($searchQuery)) && $searchQuery >= -32768 && $searchQuery <= 32767;
$isSearchQueryInteger = (\is_int($searchQuery) || ctype_digit($searchQuery)) && $searchQuery >= -2147483648 && $searchQuery <= 2147483647;
$isSearchQueryUuid = 1 === preg_match('/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i', $searchQuery);
$lowerSearchQuery = mb_strtolower($searchQuery);
$queryParameters = array();
$entitiesAlreadyJoined = array();
foreach ($entityConfig['search']['fields'] as $fieldName => $metadata) {
$entityName = 'entity';
if (false !== strpos($fieldName, '.')) {
list($associatedEntityName, $associatedFieldName) = explode('.', $fieldName);
if (!\in_array($associatedEntityName, $entitiesAlreadyJoined)) {
$queryBuilder->leftJoin('entity.'.$associatedEntityName, $associatedEntityName);
$entitiesAlreadyJoined[] = $associatedEntityName;
}
$entityName = $associatedEntityName;
$fieldName = $associatedFieldName;
}
$isSmallIntegerField = 'smallint' === $metadata['dataType'];
$isIntegerField = 'integer' === $metadata['dataType'];
$isNumericField = \in_array($metadata['dataType'], array('number', 'bigint', 'decimal', 'float'));
$isTextField = \in_array($metadata['dataType'], array('string', 'text'));
$isGuidField = 'guid' === $metadata['dataType'];
// this complex condition is needed to avoid issues on PostgreSQL databases
if (
$isSmallIntegerField && $isSearchQuerySmallInteger ||
$isIntegerField && $isSearchQueryInteger ||
$isNumericField && $isSearchQueryNumeric
) {
$queryBuilder->orWhere(sprintf('%s.%s = :numeric_query', $entityName, $fieldName));
// adding '0' turns the string into a numeric value
$queryParameters['numeric_query'] = 0 + $searchQuery;
} elseif ($isGuidField && $isSearchQueryUuid) {
$queryBuilder->orWhere(sprintf('%s.%s = :uuid_query', $entityName, $fieldName));
$queryParameters['uuid_query'] = $searchQuery;
} elseif ($isTextField) {
$queryBuilder->orWhere(sprintf('LOWER(%s.%s) LIKE :fuzzy_query', $entityName, $fieldName));
$queryParameters['fuzzy_query'] = '%'.$lowerSearchQuery.'%';
$queryBuilder->orWhere(sprintf('LOWER(%s.%s) IN (:words_query)', $entityName, $fieldName));
$queryParameters['words_query'] = explode(' ', $lowerSearchQuery);
}
}
if (0 !== \count($queryParameters)) {
$queryBuilder->setParameters($queryParameters);
}
if (!empty($dqlFilter)) {
$queryBuilder->andWhere($dqlFilter);
}
$isSortedByDoctrineAssociation = false !== strpos($sortField, '.');
if ($isSortedByDoctrineAssociation) {
list($associatedEntityName, $associatedFieldName) = explode('.', $sortField);
if (!\in_array($associatedEntityName, $entitiesAlreadyJoined)) {
$queryBuilder->leftJoin('entity.'.$associatedEntityName, $associatedEntityName);
$entitiesAlreadyJoined[] = $associatedEntityName;
}
}
if (null !== $sortField) {
$queryBuilder->orderBy(sprintf('%s%s', $isSortedByDoctrineAssociation ? '' : 'entity.', $sortField), $sortDirection ?: 'DESC');
}
return $queryBuilder;
}
}
| 1 | 11,332 | I think here we want the use of `empty()` to take care of empty strings. If you put `dql_filter: ''` in your YAML config ... this will add `->andWhere('')` and it will fail, right? | EasyCorp-EasyAdminBundle | php |
@@ -170,14 +170,12 @@ func (p *Agent) Start(ctx context.Context) error {
p2pMsgLatency.WithLabelValues("broadcast", strconv.Itoa(int(broadcast.MsgType)), status).Observe(float64(latency))
}()
if err = proto.Unmarshal(data, &broadcast); err != nil {
- err = errors.Wrap(err, "error when marshaling broadcast message")
- return
+ return errors.Wrap(err, "error when marshaling broadcast message")
}
// Skip the broadcast message if it's from the node itself
rawmsg, ok := p2p.GetBroadcastMsg(ctx)
if !ok {
- err = errors.New("error when asserting broadcast msg context")
- return
+ return errors.New("error when asserting broadcast msg context")
}
peerID = rawmsg.GetFrom().Pretty()
if p.host.HostIdentity() == peerID { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package p2p
import (
"context"
"encoding/hex"
"fmt"
"io"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-p2p"
"github.com/iotexproject/go-pkgs/hash"
goproto "github.com/iotexproject/iotex-proto/golang"
"github.com/iotexproject/iotex-proto/golang/iotexrpc"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/routine"
)
const (
successStr = "success"
failureStr = "failure"
)
var (
p2pMsgCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_p2p_message_counter",
Help: "P2P message stats",
},
[]string{"protocol", "message", "direction", "peer", "status"},
)
p2pMsgLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "iotex_p2p_message_latency",
Help: "message latency",
Buckets: prometheus.LinearBuckets(0, 10, 200),
},
[]string{"protocol", "message", "status"},
)
// ErrAgentNotStarted is the error returned when p2p agent has not been started
ErrAgentNotStarted = errors.New("p2p agent has not been started")
)
func init() {
prometheus.MustRegister(p2pMsgCounter)
prometheus.MustRegister(p2pMsgLatency)
}
const (
// TODO: the topic could be fine tuned
broadcastTopic = "broadcast"
unicastTopic = "unicast"
numDialRetries = 8
dialRetryInterval = 2 * time.Second
)
type (
// HandleBroadcastInbound handles broadcast message when agent listens it from the network
HandleBroadcastInbound func(context.Context, uint32, string, proto.Message)
// HandleUnicastInboundAsync handles unicast message when agent listens it from the network
HandleUnicastInboundAsync func(context.Context, uint32, peer.AddrInfo, proto.Message)
// Network is the config of p2p
Network struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
ExternalHost string `yaml:"externalHost"`
ExternalPort int `yaml:"externalPort"`
BootstrapNodes []string `yaml:"bootstrapNodes"`
MasterKey string `yaml:"masterKey"` // master key will be PrivateKey if not set.
// RelayType is the type of P2P network relay. By default, the value is empty, meaning disabled. Two relay types
// are supported: active, nat.
RelayType string `yaml:"relayType"`
ReconnectInterval time.Duration `yaml:"reconnectInterval"`
RateLimit p2p.RateLimitConfig `yaml:"rateLimit"`
EnableRateLimit bool `yaml:"enableRateLimit"`
PrivateNetworkPSK string `yaml:"privateNetworkPSK"`
}
// Agent is the agent to help the blockchain node connect into the P2P networks and send/receive messages
Agent struct {
cfg Network
topicSuffix string
broadcastInboundHandler HandleBroadcastInbound
unicastInboundAsyncHandler HandleUnicastInboundAsync
host *p2p.Host
reconnectTimeout time.Duration
reconnectTask *routine.RecurringTask
qosMetrics *Qos
}
)
// NewAgent instantiates a local P2P agent instance
func NewAgent(cfg Network, genesisHash hash.Hash256, broadcastHandler HandleBroadcastInbound, unicastHandler HandleUnicastInboundAsync) *Agent {
log.L().Info("p2p agent", log.Hex("topicSuffix", genesisHash[22:]))
return &Agent{
cfg: cfg,
// Make sure the honest node only care the messages related the chain from the same genesis
topicSuffix: hex.EncodeToString(genesisHash[22:]), // last 10 bytes of genesis hash
broadcastInboundHandler: broadcastHandler,
unicastInboundAsyncHandler: unicastHandler,
reconnectTimeout: cfg.ReconnectInterval,
qosMetrics: NewQoS(time.Now(), 2*cfg.ReconnectInterval),
}
}
// Start connects into P2P network
func (p *Agent) Start(ctx context.Context) error {
ready := make(chan interface{})
p2p.SetLogger(log.L())
opts := []p2p.Option{
p2p.HostName(p.cfg.Host),
p2p.Port(p.cfg.Port),
p2p.Gossip(),
p2p.SecureIO(),
p2p.MasterKey(p.cfg.MasterKey),
p2p.PrivateNetworkPSK(p.cfg.PrivateNetworkPSK),
}
if p.cfg.EnableRateLimit {
opts = append(opts, p2p.WithRateLimit(p.cfg.RateLimit))
}
if p.cfg.ExternalHost != "" {
opts = append(opts, p2p.ExternalHostName(p.cfg.ExternalHost))
opts = append(opts, p2p.ExternalPort(p.cfg.ExternalPort))
}
if p.cfg.RelayType != "" {
opts = append(opts, p2p.WithRelay(p.cfg.RelayType))
}
host, err := p2p.NewHost(ctx, opts...)
if err != nil {
return errors.Wrap(err, "error when instantiating Agent host")
}
if err := host.AddBroadcastPubSub(broadcastTopic+p.topicSuffix, func(ctx context.Context, data []byte) (err error) {
// Blocking handling the broadcast message until the agent is started
<-ready
var (
peerID string
broadcast iotexrpc.BroadcastMsg
latency int64
)
skip := false
defer func() {
// Skip accounting if the broadcast message is not handled
if skip {
return
}
status := successStr
if err != nil {
status = failureStr
}
p2pMsgCounter.WithLabelValues("broadcast", strconv.Itoa(int(broadcast.MsgType)), "in", peerID, status).Inc()
p2pMsgLatency.WithLabelValues("broadcast", strconv.Itoa(int(broadcast.MsgType)), status).Observe(float64(latency))
}()
if err = proto.Unmarshal(data, &broadcast); err != nil {
err = errors.Wrap(err, "error when marshaling broadcast message")
return
}
// Skip the broadcast message if it's from the node itself
rawmsg, ok := p2p.GetBroadcastMsg(ctx)
if !ok {
err = errors.New("error when asserting broadcast msg context")
return
}
peerID = rawmsg.GetFrom().Pretty()
if p.host.HostIdentity() == peerID {
skip = true
return
}
t, _ := ptypes.Timestamp(broadcast.GetTimestamp())
latency = time.Since(t).Nanoseconds() / time.Millisecond.Nanoseconds()
msg, err := goproto.TypifyRPCMsg(broadcast.MsgType, broadcast.MsgBody)
if err != nil {
err = errors.Wrap(err, "error when typifying broadcast message")
return
}
p.broadcastInboundHandler(ctx, broadcast.ChainId, peerID, msg)
p.qosMetrics.updateRecvBroadcast(time.Now())
return
}); err != nil {
return errors.Wrap(err, "error when adding broadcast pubsub")
}
if err := host.AddUnicastPubSub(unicastTopic+p.topicSuffix, func(ctx context.Context, _ io.Writer, data []byte) (err error) {
// Blocking handling the unicast message until the agent is started
<-ready
var (
unicast iotexrpc.UnicastMsg
peerID string
latency int64
)
defer func() {
status := successStr
if err != nil {
status = failureStr
}
p2pMsgCounter.WithLabelValues("unicast", strconv.Itoa(int(unicast.MsgType)), "in", peerID, status).Inc()
p2pMsgLatency.WithLabelValues("unicast", strconv.Itoa(int(unicast.MsgType)), status).Observe(float64(latency))
}()
if err = proto.Unmarshal(data, &unicast); err != nil {
err = errors.Wrap(err, "error when marshaling unicast message")
return
}
msg, err := goproto.TypifyRPCMsg(unicast.MsgType, unicast.MsgBody)
if err != nil {
err = errors.Wrap(err, "error when typifying unicast message")
return
}
t, _ := ptypes.Timestamp(unicast.GetTimestamp())
latency = time.Since(t).Nanoseconds() / time.Millisecond.Nanoseconds()
stream, ok := p2p.GetUnicastStream(ctx)
if !ok {
err = errors.Wrap(err, "error when get unicast stream")
return
}
remote := stream.Conn().RemotePeer()
peerID = remote.Pretty()
peerInfo := peer.AddrInfo{
ID: remote,
Addrs: []multiaddr.Multiaddr{stream.Conn().RemoteMultiaddr()},
}
p.unicastInboundAsyncHandler(ctx, unicast.ChainId, peerInfo, msg)
p.qosMetrics.updateRecvUnicast(peerID, time.Now())
return
}); err != nil {
return errors.Wrap(err, "error when adding unicast pubsub")
}
// connect to bootstrap nodes
p.host = host
if err = p.connect(ctx); err != nil {
return err
}
p.host.JoinOverlay(ctx)
close(ready)
// check network connectivity every 60 blocks, and reconnect in case of disconnection
p.reconnectTask = routine.NewRecurringTask(p.reconnect, p.reconnectTimeout)
return p.reconnectTask.Start(ctx)
}
// Stop disconnects from P2P network
func (p *Agent) Stop(ctx context.Context) error {
if p.host == nil {
return ErrAgentNotStarted
}
log.L().Info("p2p is shutting down.", zap.Error(ctx.Err()))
if err := p.reconnectTask.Stop(ctx); err != nil {
return err
}
if err := p.host.Close(); err != nil {
return errors.Wrap(err, "error when closing Agent host")
}
return nil
}
// BroadcastOutbound sends a broadcast message to the whole network
func (p *Agent) BroadcastOutbound(ctx context.Context, msg proto.Message) (err error) {
host := p.host
if host == nil {
return ErrAgentNotStarted
}
var msgType iotexrpc.MessageType
var msgBody []byte
defer func() {
status := successStr
if err != nil {
status = failureStr
}
p2pMsgCounter.WithLabelValues(
"broadcast",
strconv.Itoa(int(msgType)),
"out",
host.HostIdentity(),
status,
).Inc()
}()
msgType, msgBody, err = convertAppMsg(msg)
if err != nil {
return
}
p2pCtx, ok := GetContext(ctx)
if !ok {
err = errors.New("P2P context doesn't exist")
return
}
broadcast := iotexrpc.BroadcastMsg{
ChainId: p2pCtx.ChainID,
PeerId: host.HostIdentity(),
MsgType: msgType,
MsgBody: msgBody,
Timestamp: ptypes.TimestampNow(),
}
data, err := proto.Marshal(&broadcast)
if err != nil {
err = errors.Wrap(err, "error when marshaling broadcast message")
return
}
t := time.Now()
if err = host.Broadcast(ctx, broadcastTopic+p.topicSuffix, data); err != nil {
err = errors.Wrap(err, "error when sending broadcast message")
p.qosMetrics.updateSendBroadcast(t, false)
return
}
p.qosMetrics.updateSendBroadcast(t, true)
return
}
// UnicastOutbound sends a unicast message to the given address
func (p *Agent) UnicastOutbound(ctx context.Context, peer peer.AddrInfo, msg proto.Message) (err error) {
host := p.host
if host == nil {
return ErrAgentNotStarted
}
var (
peerName = peer.ID.Pretty()
msgType iotexrpc.MessageType
msgBody []byte
)
defer func() {
status := successStr
if err != nil {
status = failureStr
}
p2pMsgCounter.WithLabelValues("unicast", strconv.Itoa(int(msgType)), "out", peer.ID.Pretty(), status).Inc()
}()
msgType, msgBody, err = convertAppMsg(msg)
if err != nil {
return
}
p2pCtx, ok := GetContext(ctx)
if !ok {
err = errors.New("P2P context doesn't exist")
return
}
unicast := iotexrpc.UnicastMsg{
ChainId: p2pCtx.ChainID,
PeerId: host.HostIdentity(),
MsgType: msgType,
MsgBody: msgBody,
Timestamp: ptypes.TimestampNow(),
}
data, err := proto.Marshal(&unicast)
if err != nil {
err = errors.Wrap(err, "error when marshaling unicast message")
return
}
t := time.Now()
if err = host.Unicast(ctx, peer, unicastTopic+p.topicSuffix, data); err != nil {
err = errors.Wrap(err, "error when sending unicast message")
p.qosMetrics.updateSendUnicast(peerName, t, false)
return
}
p.qosMetrics.updateSendUnicast(peerName, t, true)
return
}
// Info returns agents' peer info.
func (p *Agent) Info() (peer.AddrInfo, error) {
if p.host == nil {
return peer.AddrInfo{}, ErrAgentNotStarted
}
return p.host.Info(), nil
}
// Self returns the self network address
func (p *Agent) Self() ([]multiaddr.Multiaddr, error) {
if p.host == nil {
return nil, ErrAgentNotStarted
}
return p.host.Addresses(), nil
}
// Neighbors returns the neighbors' peer info
func (p *Agent) Neighbors(ctx context.Context) ([]peer.AddrInfo, error) {
if p.host == nil {
return nil, ErrAgentNotStarted
}
return p.host.Neighbors(ctx), nil
}
// QosMetrics returns the Qos metrics
func (p *Agent) QosMetrics() *Qos {
return p.qosMetrics
}
// connect connects to bootstrap nodes
func (p *Agent) connect(ctx context.Context) error {
if len(p.cfg.BootstrapNodes) == 0 {
return nil
}
var tryNum, errNum, connNum, desiredConnNum int
conn := make(chan struct{}, len(p.cfg.BootstrapNodes))
connErrChan := make(chan error, len(p.cfg.BootstrapNodes))
// try to connect to all bootstrap node beside itself.
for _, bootstrapNode := range p.cfg.BootstrapNodes {
bootAddr := multiaddr.StringCast(bootstrapNode)
if strings.Contains(bootAddr.String(), p.host.HostIdentity()) {
continue
}
tryNum++
go func() {
if err := exponentialRetry(
func() error { return p.host.ConnectWithMultiaddr(ctx, bootAddr) },
dialRetryInterval,
numDialRetries,
); err != nil {
err := errors.Wrap(err, fmt.Sprintf("error when connecting bootstrap node %s", bootAddr.String()))
connErrChan <- err
return
}
conn <- struct{}{}
log.L().Info("Connected bootstrap node.", zap.String("address", bootAddr.String()))
}()
}
// wait until half+1 bootnodes get connected
desiredConnNum = len(p.cfg.BootstrapNodes)/2 + 1
for {
select {
case err := <-connErrChan:
log.L().Info("Connection failed.", zap.Error(err))
errNum++
if errNum == tryNum {
return errors.New("failed to connect to any bootstrap node")
}
case <-conn:
connNum++
}
// can add more condition later
if connNum >= desiredConnNum {
break
}
}
return nil
}
func (p *Agent) reconnect() {
if p.qosMetrics.lostConnection() {
log.L().Info("Network lost, try re-connecting.")
p.host.ClearBlocklist()
p.connect(context.Background())
}
}
func convertAppMsg(msg proto.Message) (iotexrpc.MessageType, []byte, error) {
msgType, err := goproto.GetTypeFromRPCMsg(msg)
if err != nil {
return 0, nil, errors.Wrap(err, "error when converting application message to proto")
}
msgBody, err := proto.Marshal(msg)
if err != nil {
return 0, nil, errors.Wrap(err, "error when marshaling application message")
}
return msgType, msgBody, nil
}
func exponentialRetry(f func() error, retryInterval time.Duration, numRetries int) (err error) {
for i := 0; i < numRetries; i++ {
if err = f(); err == nil {
return
}
log.L().Error("Error happens, will retry.", zap.Error(err))
time.Sleep(retryInterval)
retryInterval *= 2
}
return
}
| 1 | 23,598 | this is golang's named return, err is defined, `return` is equivalent to `return err` the current code has no problem | iotexproject-iotex-core | go |
@@ -202,7 +202,7 @@ ostree_gpg_verify_result_get_all (OstreeGpgVerifyResult *result,
* ostree_gpg_verify_result_describe:
* @result: an #OstreeGpgVerifyResult
* @signature_index: which signature to describe
- * @output_buffer: a #GString to hold the description
+ * @output_buffer: (out): a #GString to hold the description
* @line_prefix: (allow-none): optional line prefix string
* @flags: flags to adjust the description format
* | 1 | /*
* Copyright (C) 2015 Red Hat, Inc.
* Copyright (C) 2019 Collabora Ltd.
*
* SPDX-License-Identifier: LGPL-2.0+
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include "libglnx.h"
#include "ostree-gpg-verify-result.h"
/**
* SECTION: ostree-gpg-verify-result
* @title: GPG signature verification results
* @short_description: Dummy functions for detached GPG signatures
*
* This file contain dummy functions for GPG signatures checks to
* provide API backward compatibility.
*/
#ifndef OSTREE_DISABLE_GPGME
#error This file should not be compiled if GPG support is enabled
#endif
/**
* OstreeGpgVerifyResult:
*
* Private instance structure.
*/
struct OstreeGpgVerifyResult {
GObject parent;
};
typedef struct {
GObjectClass parent_class;
} OstreeGpgVerifyResultClass;
static void ostree_gpg_verify_result_initable_iface_init (GInitableIface *iface);
G_DEFINE_TYPE_WITH_CODE (OstreeGpgVerifyResult,
ostree_gpg_verify_result,
G_TYPE_OBJECT,
G_IMPLEMENT_INTERFACE (G_TYPE_INITABLE,
ostree_gpg_verify_result_initable_iface_init))
static void
ostree_gpg_verify_result_class_init (OstreeGpgVerifyResultClass *class)
{
}
static void
ostree_gpg_verify_result_initable_iface_init (GInitableIface *iface)
{
}
static void
ostree_gpg_verify_result_init (OstreeGpgVerifyResult *result)
{
}
/**
* ostree_gpg_verify_result_count_all:
* @result: an #OstreeGpgVerifyResult
*
* Counts all the signatures in @result.
*
* Returns: signature count
*/
guint
ostree_gpg_verify_result_count_all (OstreeGpgVerifyResult *result)
{
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
return 0;
}
/**
* ostree_gpg_verify_result_count_valid:
* @result: an #OstreeGpgVerifyResult
*
* Counts only the valid signatures in @result.
*
* Returns: valid signature count
*/
guint
ostree_gpg_verify_result_count_valid (OstreeGpgVerifyResult *result)
{
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
return 0;
}
/**
* ostree_gpg_verify_result_lookup:
* @result: an #OstreeGpgVerifyResult
* @key_id: a GPG key ID or fingerprint
* @out_signature_index: (out): return location for the index of the signature
* signed by @key_id, or %NULL
*
* Searches @result for a signature signed by @key_id. If a match is found,
* the function returns %TRUE and sets @out_signature_index so that further
* signature details can be obtained through ostree_gpg_verify_result_get().
* If no match is found, the function returns %FALSE and leaves
* @out_signature_index unchanged.
*
* Returns: %TRUE on success, %FALSE on failure
**/
gboolean
ostree_gpg_verify_result_lookup (OstreeGpgVerifyResult *result,
const gchar *key_id,
guint *out_signature_index)
{
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
return FALSE;
}
/**
* ostree_gpg_verify_result_get:
* @result: an #OstreeGpgVerifyResult
* @signature_index: which signature to get attributes from
* @attrs: (array length=n_attrs): Array of requested attributes
* @n_attrs: Length of the @attrs array
*
* Builds a #GVariant tuple of requested attributes for the GPG signature at
* @signature_index in @result. See the #OstreeGpgSignatureAttr description
* for the #GVariantType of each available attribute.
*
* It is a programmer error to request an invalid #OstreeGpgSignatureAttr or
* an invalid @signature_index. Use ostree_gpg_verify_result_count_all() to
* find the number of signatures in @result.
*
* Returns: a new, floating, #GVariant tuple
**/
GVariant *
ostree_gpg_verify_result_get (OstreeGpgVerifyResult *result,
guint signature_index,
OstreeGpgSignatureAttr *attrs,
guint n_attrs)
{
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
return NULL;
}
/**
* ostree_gpg_verify_result_get_all:
* @result: an #OstreeGpgVerifyResult
* @signature_index: which signature to get attributes from
*
* Builds a #GVariant tuple of all available attributes for the GPG signature
* at @signature_index in @result.
*
* The child values in the returned #GVariant tuple are ordered to match the
* #OstreeGpgSignatureAttr enumeration, which means the enum values can be
* used as index values in functions like g_variant_get_child(). See the
* #OstreeGpgSignatureAttr description for the #GVariantType of each
* available attribute.
*
* <note>
* <para>
* The #OstreeGpgSignatureAttr enumeration may be extended in the future
* with new attributes, which would affect the #GVariant tuple returned by
* this function. While the position and type of current child values in
* the #GVariant tuple will not change, to avoid backward-compatibility
* issues <emphasis>please do not depend on the tuple's overall size or
* type signature</emphasis>.
* </para>
* </note>
*
* It is a programmer error to request an invalid @signature_index. Use
* ostree_gpg_verify_result_count_all() to find the number of signatures in
* @result.
*
* Returns: a new, floating, #GVariant tuple
**/
GVariant *
ostree_gpg_verify_result_get_all (OstreeGpgVerifyResult *result,
guint signature_index)
{
g_return_val_if_fail (OSTREE_IS_GPG_VERIFY_RESULT (result), NULL);
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
return NULL;
}
/**
* ostree_gpg_verify_result_describe:
* @result: an #OstreeGpgVerifyResult
* @signature_index: which signature to describe
* @output_buffer: a #GString to hold the description
* @line_prefix: (allow-none): optional line prefix string
* @flags: flags to adjust the description format
*
* Appends a brief, human-readable description of the GPG signature at
* @signature_index in @result to the @output_buffer. The description
* spans multiple lines. A @line_prefix string, if given, will precede
* each line of the description.
*
* The @flags argument is reserved for future variations to the description
* format. Currently must be 0.
*
* It is a programmer error to request an invalid @signature_index. Use
* ostree_gpg_verify_result_count_all() to find the number of signatures in
* @result.
*/
void
ostree_gpg_verify_result_describe (OstreeGpgVerifyResult *result,
guint signature_index,
GString *output_buffer,
const gchar *line_prefix,
OstreeGpgSignatureFormatFlags flags)
{
g_autoptr(GVariant) variant = NULL;
g_return_if_fail (OSTREE_IS_GPG_VERIFY_RESULT (result));
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
variant = ostree_gpg_verify_result_get_all (result, signature_index);
ostree_gpg_verify_result_describe_variant (variant, output_buffer, line_prefix, flags);
}
/**
* ostree_gpg_verify_result_describe_variant:
* @variant: a #GVariant from ostree_gpg_verify_result_get_all()
* @output_buffer: a #GString to hold the description
* @line_prefix: (allow-none): optional line prefix string
* @flags: flags to adjust the description format
*
* Similar to ostree_gpg_verify_result_describe() but takes a #GVariant of
* all attributes for a GPG signature instead of an #OstreeGpgVerifyResult
* and signature index.
*
* The @variant <emphasis>MUST</emphasis> have been created by
* ostree_gpg_verify_result_get_all().
*/
void
ostree_gpg_verify_result_describe_variant (GVariant *variant,
GString *output_buffer,
const gchar *line_prefix,
OstreeGpgSignatureFormatFlags flags)
{
const char *type_string;
g_return_if_fail (variant != NULL);
g_return_if_fail (output_buffer != NULL);
/* Verify the variant's type string. This code is
* not prepared to handle just any random GVariant. */
type_string = g_variant_get_type_string (variant);
g_return_if_fail (strcmp (type_string, "(bbbbbsxxsssssxx)") == 0);
g_string_append (output_buffer,
"GPG feature is disabled in a build time\n");
g_critical ("%s: GPG feature is disabled in a build time", __FUNCTION__);
}
/**
* ostree_gpg_verify_result_require_valid_signature:
* @result: (nullable): an #OstreeGpgVerifyResult
* @error: A #GError
*
* Checks if the result contains at least one signature from the
* trusted keyring. You can call this function immediately after
* ostree_repo_verify_summary() or ostree_repo_verify_commit_ext() -
* it will handle the %NULL @result and filled @error too.
*
* Returns: %TRUE if @result was not %NULL and had at least one
* signature from trusted keyring, otherwise %FALSE
*
* Since: 2016.6
*/
gboolean
ostree_gpg_verify_result_require_valid_signature (OstreeGpgVerifyResult *result,
GError **error)
{
if (result == NULL)
return FALSE;
g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
"'%s': GPG feature is disabled in a build time",
__FUNCTION__);
return FALSE;
}
G_DEFINE_QUARK (OstreeGpgError, ostree_gpg_error)
| 1 | 17,222 | I don't think this is right; in Rust terms it's like a `&mut String`, in Java `StringBuilder` - it's not a return value from the function which is what `(out)` is for. | ostreedev-ostree | c |
@@ -7,9 +7,9 @@ from ..registry import LOSSES
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
- inds = torch.nonzero(labels >= 1).squeeze()
+ inds = torch.nonzero((labels >= 0) & (labels < label_channels)).squeeze()
if inds.numel() > 0:
- bin_labels[inds, labels[inds] - 1] = 1
+ bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights | 1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
"Gradient Harmonized Single-stage Detector".
https://arxiv.org/abs/1811.05181
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
"""
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
Returns:
The gradient harmonized loss.
"""
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_binary_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, weights, reduction='sum') / tot
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
"Gradient Harmonized Single-stage Detector"
https://arxiv.org/abs/1811.05181
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
"""
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
# TODO: support reduction parameter
def forward(self, pred, target, label_weight, avg_factor=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
Returns:
The gradient harmonized loss.
"""
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = loss * weights
loss = loss.sum() / tot
return loss * self.loss_weight
| 1 | 18,965 | If the label is not binary, should we rename this function? | open-mmlab-mmdetection | py |
@@ -421,10 +421,12 @@ class JMeterExecutor(ScenarioExecutor, WidgetProvider, FileLister):
:param file_list:
:return: etree
"""
+ cur_path = r"${__BeanShell(import org.apache.jmeter.services.FileServer; FileServer.getFileServer()" \
+ r".getBaseDir();)}${__BeanShell(File.separator,)}"
for file_path in file_list:
file_path_elements = jmx.xpath('//stringProp[text()="%s"]' % file_path)
for file_path_element in file_path_elements:
- file_path_element.text = os.path.basename(file_path)
+ file_path_element.text = cur_path + os.path.basename(file_path)
def __get_resource_files_from_jmx(self, jmx):
""" | 1 | """
Module holds all stuff regarding JMeter tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import Counter, namedtuple
import os
import platform
import subprocess
import time
import signal
import traceback
import logging
from subprocess import CalledProcessError
import six
import shutil
from distutils.version import LooseVersion
from cssselect import GenericTranslator
import urwid
from bzt.engine import ScenarioExecutor, Scenario, FileLister
from bzt.modules.console import WidgetProvider
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader, DataPoint, KPISet
from bzt.utils import shell_exec, ensure_is_dict, humanize_time, dehumanize_time, BetterDict, \
guess_csv_delimiter, unzip, download_progress_hook
try:
from lxml import etree
except ImportError:
try:
import cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
try:
from urllib import URLopener
except ImportError:
from urllib.request import URLopener
exe_suffix = ".bat" if platform.system() == 'Windows' else ""
class JMeterExecutor(ScenarioExecutor, WidgetProvider, FileLister):
"""
JMeter executor module
"""
JMETER_DOWNLOAD_LINK = "http://apache.claz.org/jmeter/binaries/apache-jmeter-{version}.zip"
JMETER_VER = "2.13"
PLUGINS_DOWNLOAD_TPL = "http://jmeter-plugins.org/files/JMeterPlugins-{plugin}-1.2.1.zip"
def __init__(self):
super(JMeterExecutor, self).__init__()
self.original_jmx = None
self.modified_jmx = None
self.jmeter_log = None
self.properties_file = None
self.kpi_jtl = None
self.errors_jtl = None
self.process = None
self.start_time = None
self.end_time = None
self.retcode = None
self.reader = None
self.widget = None
self.distributed_servers = []
def prepare(self):
"""
Preparation for JMeter involves either getting existing JMX
and modifying it, or generating new JMX from input data. Then,
original JMX is modified to contain JTL writing classes with
required settings and have workload as suggested by Provisioning
:raise ValueError:
"""
self.jmeter_log = self.engine.create_artifact("jmeter", ".log")
self.__check_jmeter()
self.distributed_servers = self.execution.get('distributed', self.distributed_servers)
scenario = self.get_scenario()
self.resource_files()
if Scenario.SCRIPT in scenario:
self.original_jmx = self.__get_script()
self.engine.existing_artifact(self.original_jmx)
elif "requests" in scenario:
self.original_jmx = self.__jmx_from_requests()
else:
raise ValueError("There must be a JMX file to run JMeter")
load = self.get_load()
self.modified_jmx = self.__get_modified_jmx(self.original_jmx, load)
props = self.settings.get("properties")
props_local = scenario.get("properties")
props.merge(props_local)
props['user.classpath'] = self.engine.artifacts_dir
if props:
self.log.debug("Additional properties: %s", props)
props_file = self.engine.create_artifact("jmeter-bzt", ".properties")
with open(props_file, 'w') as fds:
for key, val in six.iteritems(props):
fds.write("%s=%s\n" % (key, val))
self.properties_file = props_file
self.reader = JTLReader(self.kpi_jtl, self.log, self.errors_jtl)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
# TODO: weighted requests
def startup(self):
"""
Should start JMeter as fast as possible.
"""
cmdline = [self.settings.get("path")] # default is set when prepared
if not self.settings.get("gui", False):
cmdline += ["-n"]
cmdline += ["-t", self.modified_jmx]
if self.jmeter_log:
cmdline += ["-j", self.jmeter_log]
if self.properties_file:
cmdline += ["-p", self.properties_file]
if self.distributed_servers:
cmdline += ['-R%s' % ','.join(self.distributed_servers)]
self.start_time = time.time()
try:
self.process = shell_exec(cmdline, stderr=None)
except OSError as exc:
self.log.error("Failed to start JMeter: %s", traceback.format_exc())
self.log.error("Failed command: %s", cmdline)
raise RuntimeError("Failed to start JMeter: %s" % exc)
def check(self):
"""
Checks if JMeter is still running. Also checks if resulting JTL contains
any data and throws exception otherwise.
:return: bool
:raise RuntimeWarning:
"""
if self.widget:
self.widget.update()
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
self.log.info("JMeter exit code: %s", self.retcode)
raise RuntimeError("JMeter exited with non-zero code")
return True
return False
def shutdown(self):
"""
If JMeter is still running - let's stop it.
"""
# TODO: print JMeter's stdout/stderr on empty JTL
while self.process and self.process.poll() is None:
# TODO: find a way to have graceful shutdown, then kill
self.log.info("Terminating jmeter PID: %s", self.process.pid)
time.sleep(1)
try:
if platform.system() == 'Windows':
os.kill(self.process.pid, signal.SIGTERM)
else:
os.killpg(self.process.pid, signal.SIGTERM)
except OSError as exc:
self.log.debug("Failed to terminate jmeter: %s", exc)
if self.start_time:
self.end_time = time.time()
self.log.debug("JMeter worked for %s seconds", self.end_time - self.start_time)
if self.kpi_jtl:
if not os.path.exists(self.kpi_jtl) or not os.path.getsize(self.kpi_jtl):
msg = "Empty results JTL, most likely JMeter failed: %s"
raise RuntimeWarning(msg % self.kpi_jtl)
def __apply_ramp_up(self, jmx, ramp_up):
rampup_sel = "stringProp[name='ThreadGroup.ramp_time']"
xpath = GenericTranslator().css_to_xpath(rampup_sel)
for group in jmx.enabled_thread_groups():
prop = group.xpath(xpath)
prop[0].text = str(ramp_up)
def __apply_duration(self, jmx, duration):
sched_sel = "[name='ThreadGroup.scheduler']"
sched_xpath = GenericTranslator().css_to_xpath(sched_sel)
dur_sel = "[name='ThreadGroup.duration']"
dur_xpath = GenericTranslator().css_to_xpath(dur_sel)
for group in jmx.enabled_thread_groups():
group.xpath(sched_xpath)[0].text = 'true'
group.xpath(dur_xpath)[0].text = str(int(duration))
def __apply_iterations(self, jmx, iterations):
sel = "elementProp>[name='LoopController.loops']"
xpath = GenericTranslator().css_to_xpath(sel)
flag_sel = "elementProp>[name='LoopController.continue_forever']"
flag_xpath = GenericTranslator().css_to_xpath(flag_sel)
for group in jmx.enabled_thread_groups():
bprop = group.xpath(flag_xpath)
if not iterations:
bprop[0].text = 'true'
else:
bprop[0].text = 'false'
sprop = group.xpath(xpath)
if not iterations:
sprop[0].text = str(-1)
else:
sprop[0].text = str(iterations)
def __apply_concurrency(self, jmx, concurrency):
tnum_sel = "stringProp[name='ThreadGroup.num_threads']"
tnum_xpath = GenericTranslator().css_to_xpath(tnum_sel)
orig_sum = 0.0
for group in jmx.enabled_thread_groups():
othreads = group.xpath(tnum_xpath)
orig_sum += int(othreads[0].text)
self.log.debug("Original threads: %s", orig_sum)
leftover = concurrency
for group in jmx.enabled_thread_groups():
othreads = group.xpath(tnum_xpath)
orig = int(othreads[0].text)
new = int(round(concurrency * orig / orig_sum))
leftover -= new
othreads[0].text = str(new)
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
def __add_shaper(self, jmx, load):
"""
Adds shaper
:param jmx:
:param load: namedtuple("LoadSpec",
('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration'))
:return:
"""
if load.throughput and load.duration:
etree_shaper = jmx.get_rps_shaper()
if load.ramp_up:
jmx.add_rps_shaper_schedule(etree_shaper, 1, load.throughput, load.ramp_up)
if load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, load.throughput, load.throughput, load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
def __disable_listeners(self, jmx):
sel = 'stringProp[name=filename]'
xpath = GenericTranslator().css_to_xpath(sel)
listeners = jmx.get('ResultCollector')
for listener in listeners:
file_setting = listener.xpath(xpath)
if not file_setting or not file_setting[0].text:
listener.set("enabled", "false")
def __get_modified_jmx(self, original, load):
"""
add two listeners to test plan:
- to collect basic stats for KPIs
- to collect detailed errors info
:return: path to artifact
"""
self.log.debug("Load: %s", load)
jmx = JMX(original)
resource_files_from_jmx = self.__get_resource_files_from_jmx(jmx)
resource_files_from_requests = self.__get_resource_files_from_requests()
self.__copy_resources_to_artifacts_dir(resource_files_from_jmx)
self.__copy_resources_to_artifacts_dir(resource_files_from_requests)
if resource_files_from_jmx:
self.__modify_resources_paths_in_jmx(jmx.tree, resource_files_from_jmx)
if self.get_scenario().get("disable-listeners", True):
self.__disable_listeners(jmx)
user_def_vars = self.get_scenario().get("variables")
if user_def_vars:
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, jmx.add_user_def_vars_elements(user_def_vars))
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
self.__apply_modifications(jmx)
if load.duration and load.iterations:
msg = "You have specified both iterations count"
msg += " and ramp-up/hold duration times, so test will end"
msg += " on what runs out first"
self.log.warning(msg)
if load.concurrency:
self.__apply_concurrency(jmx, load.concurrency)
if load.ramp_up is not None:
self.__apply_ramp_up(jmx, int(load.ramp_up))
if load.iterations is not None:
self.__apply_iterations(jmx, int(load.iterations))
if load.duration:
self.__apply_duration(jmx, int(load.duration))
if load.throughput:
self.__add_shaper(jmx, load)
self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
kpil = jmx.new_kpi_listener(self.kpi_jtl)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, kpil)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
# NOTE: maybe have option not to write it, since it consumes drive space
# TODO: option to enable full trace JTL for all requests
self.errors_jtl = self.engine.create_artifact("errors", ".jtl")
errs = jmx.new_errors_listener(self.errors_jtl)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, errs)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
prefix = "modified_" + os.path.basename(original)
filename = self.engine.create_artifact(prefix, ".jmx")
jmx.save(filename)
return filename
def __jmx_from_requests(self):
filename = self.engine.create_artifact("requests", ".jmx")
jmx = JMeterScenarioBuilder()
jmx.scenario = self.get_scenario()
jmx.save(filename)
return filename
def get_widget(self):
"""
Add progress widget to console screen sidebar
:return:
"""
if not self.widget:
self.widget = JMeterWidget(self)
return self.widget
def resource_files(self):
"""
Get list of resource files, copy resource files to artifacts dir, modify jmx
"""
resource_files = []
# get all resource files from requests
files_from_requests = self.__get_resource_files_from_requests()
script = self.__get_script()
if script:
jmx = JMX(script)
resource_files_from_jmx = self.__get_resource_files_from_jmx(jmx)
if resource_files_from_jmx:
self.__modify_resources_paths_in_jmx(jmx.tree, resource_files_from_jmx)
script_name, script_ext = os.path.splitext(script)
script_name = os.path.basename(script_name)
# create modified jmx script in artifacts dir
modified_script = self.engine.create_artifact(script_name, script_ext)
jmx.save(modified_script)
script = modified_script
resource_files.extend(resource_files_from_jmx)
resource_files.extend(files_from_requests)
# copy files to artifacts dir
self.__copy_resources_to_artifacts_dir(resource_files)
if script:
resource_files.append(script)
return [os.path.basename(file_path) for file_path in resource_files] # return list of file names
def __copy_resources_to_artifacts_dir(self, resource_files_list):
"""
:param resource_files_list:
:return:
"""
for resource_file in resource_files_list:
if os.path.exists(resource_file):
try:
shutil.copy(resource_file, self.engine.artifacts_dir)
except:
self.log.warning("Cannot copy file: %s" % resource_file)
else:
self.log.warning("File not found: %s" % resource_file)
def __modify_resources_paths_in_jmx(self, jmx, file_list):
"""
:param jmx:
:param file_list:
:return: etree
"""
for file_path in file_list:
file_path_elements = jmx.xpath('//stringProp[text()="%s"]' % file_path)
for file_path_element in file_path_elements:
file_path_element.text = os.path.basename(file_path)
def __get_resource_files_from_jmx(self, jmx):
"""
:return: (file list)
"""
resource_files = []
search_patterns = ["File.path", "filename", "BeanShellSampler.filename"]
for pattern in search_patterns:
resource_elements = jmx.tree.findall(".//stringProp[@name='%s']" % pattern)
for resource_element in resource_elements:
# check if none of parents are disabled
parent = resource_element.getparent()
parent_disabled = False
while parent is not None: # ?
if parent.get('enabled') == 'false':
parent_disabled = True
break
parent = parent.getparent()
if resource_element.text and parent_disabled is False:
resource_files.append(resource_element.text)
return resource_files
def __get_resource_files_from_requests(self):
"""
Get post-body files from requests
:return file list:
"""
post_body_files = []
scenario = self.get_scenario()
data_sources = scenario.data.get('data-sources')
if data_sources:
for data_source in data_sources:
if isinstance(data_source, six.text_type):
post_body_files.append(data_source)
requests = scenario.data.get("requests")
if requests:
for req in requests:
if isinstance(req, dict):
post_body_path = req.get('body-file')
if post_body_path:
post_body_files.append(post_body_path)
return post_body_files
def __get_script(self):
scenario = self.get_scenario()
if Scenario.SCRIPT not in scenario:
return None
scen = ensure_is_dict(scenario, Scenario.SCRIPT, "path")
fname = scen["path"]
if fname is not None:
return self.engine.find_file(fname)
else:
return None
def __apply_modifications(self, jmx):
"""
:type jmx: JMX
"""
modifs = self.get_scenario().get("modifications")
for action, items in six.iteritems(modifs):
if action in ('disable', 'enable'):
if not isinstance(items, list):
modifs[action] = [items]
items = modifs[action]
for name in items:
jmx.set_enabled("[testname='%s']" % name, True if action == 'enable' else False)
elif action == 'set-prop':
for path, text in six.iteritems(items):
parts = path.split('>')
if len(parts) < 2:
raise ValueError("Property selector must have at least 2 levels")
sel = "[testname='%s']" % parts[0]
for add in parts[1:]:
sel += ">[name='%s']" % add
jmx.set_text(sel, text)
else:
raise ValueError("Unsupported JMX modification action: %s" % action)
def __jmeter_check(self, jmeter):
"""
Try to execute JMeter
"""
self.log.debug("Trying jmeter: %s > %s", jmeter, self.jmeter_log)
jmout = subprocess.check_output([jmeter, '-j', self.jmeter_log, '--version'], stderr=subprocess.STDOUT)
self.log.debug("JMeter check: %s", jmout)
def __check_jmeter(self):
"""
Checks if JMeter is available, otherwise download and install it.
"""
jmeter = self.settings.get("path", "~/jmeter-taurus/bin/jmeter" + exe_suffix)
jmeter = os.path.abspath(os.path.expanduser(jmeter))
self.settings['path'] = jmeter # set back after expanding ~
try:
self.__jmeter_check(jmeter)
return
except (OSError, CalledProcessError):
self.log.debug("Failed to run JMeter: %s", traceback.format_exc())
try:
jout = subprocess.check_output(["java", '-version'], stderr=subprocess.STDOUT)
self.log.debug("Java check: %s", jout)
except BaseException:
self.log.warning("Failed to run java: %s", traceback.format_exc())
raise RuntimeError("The 'java' is not operable or not available. Consider installing it")
self.settings['path'] = self.__install_jmeter(jmeter)
self.__jmeter_check(self.settings['path'])
def __install_jmeter(self, path):
"""
Installs JMeter and plugins.
JMeter version, download links (templates) for JMeter and plugins may be set in config:
for JMeter: "download-link":"http://domain/resource-{version}.zip"
for plugins: "plugins-download-link": "http://domain/resource-{plugins}.zip"
JMeter version: "version":"1.2.3"
"""
# normalize path
dest = os.path.dirname(os.path.dirname(os.path.expanduser(path)))
if not dest:
dest = "jmeter-taurus"
dest = os.path.abspath(dest)
jmeter = os.path.join(dest, "bin", "jmeter" + exe_suffix)
try:
self.__jmeter_check(jmeter)
return jmeter
except OSError:
self.log.info("Will try to install JMeter into %s", dest)
downloader = URLopener()
jmeter_dist = self.engine.create_artifact("jmeter-dist", ".zip")
jmeter_download_link = self.settings.get("download-link", JMeterExecutor.JMETER_DOWNLOAD_LINK)
jmeter_version = self.settings.get("version", JMeterExecutor.JMETER_VER)
jmeter_download_link = jmeter_download_link.format(version=jmeter_version)
self.log.info("Downloading %s", jmeter_download_link)
try:
downloader.retrieve(jmeter_download_link, jmeter_dist, download_progress_hook)
except BaseException as exc:
self.log.error("Error while downloading %s", jmeter_download_link)
raise exc
self.log.info("Unzipping %s to %s", jmeter_dist, dest)
unzip(jmeter_dist, dest, 'apache-jmeter-' + jmeter_version)
# NOTE: should we remove this file in test environment? or not?
os.remove(jmeter_dist)
# set exec permissions
os.chmod(jmeter, 0o755)
# NOTE: other files like shutdown.sh might also be needed later
# install plugins
for set_name in ("Standard", "Extras", "ExtrasLibs", "WebDriver"):
plugin_dist = self.engine.create_artifact("jmeter-plugin-%s" % set_name, ".zip")
plugin_download_link = self.settings.get("plugins-download-link", JMeterExecutor.PLUGINS_DOWNLOAD_TPL)
plugin_download_link = plugin_download_link.format(plugin=set_name)
self.log.info("Downloading %s", plugin_download_link)
# TODO: fix socket timeout timer (tcp connection timeout too long)
try:
downloader.retrieve(plugin_download_link, plugin_dist, download_progress_hook)
except BaseException as e:
self.log.error("Error while downloading %s", plugin_download_link)
raise e
self.log.info("Unzipping %s", plugin_dist)
unzip(plugin_dist, dest)
os.remove(plugin_dist)
self.__remove_old_jar_versions(os.path.join(dest, 'lib'))
self.log.info("Installed JMeter and Plugins successfully")
return jmeter
def __remove_old_jar_versions(self, path):
"""
Remove old jars
"""
jarlib = namedtuple("jarlib", ("file_name", "lib_name"))
jars = [fname for fname in os.listdir(path) if '-' in fname and os.path.isfile(os.path.join(path, fname))]
jar_libs = [jarlib(file_name=jar, lib_name='-'.join(jar.split('-')[:-1])) for jar in jars]
duplicated_libraries = []
for jar_lib_obj in jar_libs:
similar_packages = [LooseVersion(x.file_name) for x in
filter(lambda _: _.lib_name == jar_lib_obj.lib_name, jar_libs)]
if len(similar_packages) > 1:
right_version = max(similar_packages)
similar_packages.remove(right_version)
duplicated_libraries.extend(filter(lambda _: _ not in duplicated_libraries, similar_packages))
for old_lib in duplicated_libraries:
os.remove(os.path.join(path, old_lib.vstring))
self.log.debug("Old jar removed %s" % old_lib.vstring)
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
FIELD_RESP_CODE = "http-code"
FIELD_HEADERS = "headers"
FIELD_BODY = "body"
def __init__(self, original=None):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname="BZT Generated Test Plan",
testclass="TestPlan")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel(
"TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise RuntimeError: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
self.log.debug("XML parsing error: %s", traceback.format_exc())
data = (original, exc)
raise RuntimeError("XML parsing failed for file %s: %s" % data)
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise RuntimeError: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise RuntimeError(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
# self.log.debug("\n%s", etree.tostring(self.tree))
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def enabled_thread_groups(self):
"""
Get thread groups that are enabled
"""
tgroups = self.get('jmeterTestPlan>hashTree>hashTree>ThreadGroup')
for group in tgroups:
if group.get("enabled") != 'false':
yield group
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in six.iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
kpi_listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
kpi_listener.append(jtl)
kpi_listener.append(obj_prop)
return kpi_listener
@staticmethod
def new_kpi_listener(filename):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
flags = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": False,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": False,
"threadCounts": True,
"url": False
}
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_errors_listener(filename):
"""
:type filename: str
:return:
"""
flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": True,
"samplerData": True,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp",
name=name,
elementType="Arguments",
guiclass="ArgumentsPanel",
testclass="Arguments")
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive):
"""
Generates HTTP request
:type timeout: float
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui",
testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
# six.u
if isinstance(body, six.string_types):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
header.append(JMX._string_prop("Argument.value", body))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
elif isinstance(body, dict):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", False))
http_element_prop.append(JMX._string_prop("Argument.value", arg_value))
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
proxy.append(JMX._string_prop("HTTPSampler.path", url))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
if timeout is not None:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
return proxy
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = str(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = str(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def _get_thread_group(concurrency=None, rampup=None, iterations=None):
"""
Generates ThreadGroup with 1 thread and 1 loop
:param iterations:
:param rampup:
:param concurrency:
:return:
"""
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname="TG")
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
loop.append(JMX._bool_prop("LoopController.continue_forever", False))
if not iterations:
iterations = 1
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
if not concurrency:
concurrency = 1
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
if not rampup:
rampup = ""
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", False))
trg.append(JMX._long_prop("ThreadGroup.duration", 0))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element("kg.apc.jmeter.timers.VariableThroughputTimer",
guiclass="kg.apc.jmeter.timers.VariableThroughputTimerGui",
testclass="kg.apc.jmeter.timers.VariableThroughputTimer",
testname="jp@gc - Throughput Shaping Timer",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("1817389797")
start_rps_prop = self._string_prop("49", int(start_rps))
end_rps_prop = self._string_prop("1567", int(end_rps))
duration_prop = self._string_prop("53", int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
def add_user_def_vars_elements(self, udv_dict):
"""
:param udv_dict:
:return:
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname="my_defined_vars")
udv_collection_prop = self._collection_prop("Arguments.arguments")
for var_name, var_value in udv_dict.items():
udv_element_prop = self._element_prop(var_name, "Argument")
udv_arg_name_prop = self._string_prop("Argument.name", var_name)
udv_arg_value_prop = self._string_prop("Argument.value", var_value)
udv_arg_desc_prop = self._string_prop("Argument.desc", "")
udv_arg_meta_prop = self._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in six.iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
return mgr
@staticmethod
def _get_cookie_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
return mgr
@staticmethod
def _get_http_defaults(default_domain_name, default_port, timeout, retrieve_resources, concurrent_pool_size=4):
"""
:type timeout: int
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
if default_domain_name:
cfg.append(JMX._string_prop("HTTPSampler.domain", default_domain_name))
if default_port:
cfg.append(JMX._string_prop("HTTPSampler.port", default_port))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
return cfg
@staticmethod
def _get_dur_assertion(timeout):
"""
:type timeout: int
:return:
"""
element = etree.Element("DurationAssertion", guiclass="DurationAssertionGui",
testclass="DurationAssertion", testname="Timeout Check")
element.append(JMX._string_prop("DurationAssertion.duration", timeout))
return element
@staticmethod
def _get_constant_timer(delay):
"""
:type delay: int
:rtype: lxml.etree.Element
"""
element = etree.Element("ConstantTimer", guiclass="ConstantTimerGui",
testclass="ConstantTimer", testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
return element
@staticmethod
def _get_extractor(varname, regexp, template, match_no, default):
"""
:type varname: str
:type regexp: str
:type template: str
:type match_no: int
:type default: str
:rtype: lxml.etree.Element
"""
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname)
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.append(JMX._string_prop("RegexExtractor.default", default))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s has %s" % ("not" if is_invert else "", [str(x) for x in contains])
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == JMX.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == JMX.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_csv_config(path, delimiter, is_quoted, is_recycle):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:type is_recycle: bool
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", is_recycle))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
for item in items:
item.text = text
class JTLReader(ResultsReader):
"""
Class to read KPI JTL
:type errors_reader: JTLErrorsReader
"""
def __init__(self, filename, parent_logger, errors_filename):
super(JTLReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.filename = filename
self.fds = None
self.indexes = {}
self.partial_buffer = ""
self.delimiter = ","
self.offset = 0
self.errors_reader = JTLErrorsReader(errors_filename, parent_logger)
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:type last_pass: bool
"""
self.errors_reader.read_file(last_pass)
while not self.fds and not self.__open_fds():
self.log.debug("No data to start reading yet")
yield None
self.log.debug("Reading JTL [%s]: %s", os.path.getsize(self.filename), self.filename)
self.fds.seek(self.offset) # without this we have a stuck reads on Mac
if last_pass:
lines = self.fds.readlines() # unlimited
else:
lines = self.fds.readlines(1024 * 1024) # 1MB limit to read
self.offset = self.fds.tell()
self.log.debug("Read lines: %s / %s bytes", len(lines), len(''.join(lines)))
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
if not self.indexes:
self.delimiter = guess_csv_delimiter(line)
columns = line.strip().split(self.delimiter)
for idx, field in enumerate(columns):
self.indexes[field] = idx
self.log.debug("Analyzed header line: %s", self.indexes)
continue
fields = line.strip().split(self.delimiter)
label = fields[self.indexes["label"]]
concur = int(fields[self.indexes["allThreads"]])
rtm = int(fields[self.indexes["elapsed"]]) / 1000.0
ltc = int(fields[self.indexes["Latency"]]) / 1000.0
if "Connect" in self.indexes:
cnn = int(fields[self.indexes["Connect"]]) / 1000.0
if cnn < ltc: # this is generally bad idea...
ltc -= cnn # fixing latency included into connect time
else:
cnn = None
rcd = fields[self.indexes["responseCode"]]
if rcd.endswith('Exception'):
rcd = rcd.split('.')[-1]
if fields[self.indexes["success"]] != "true":
error = fields[self.indexes["responseMessage"]]
else:
error = None
tstmp = int(int(fields[self.indexes["timeStamp"]]) / 1000)
yield tstmp, label, concur, rtm, cnn, ltc, rcd, error
def __open_fds(self):
"""
Opens JTL file for reading
"""
if not os.path.isfile(self.filename):
self.log.debug("File not appeared yet: %s", self.filename)
return False
fsize = os.path.getsize(self.filename)
if not fsize:
self.log.debug("File is empty: %s", self.filename)
return False
if fsize <= self.offset:
self.log.debug("Waiting file to grow larget than %s, current: %s", self.offset, fsize)
return False
self.log.debug("Opening file: %s", self.filename)
self.fds = open(self.filename)
self.fds.seek(self.offset)
return True
def __del__(self):
if self.fds:
logging.debug("Closing file descriptor for %s", self.filename)
self.fds.close()
def _calculate_datapoints(self, final_pass=False):
for point in super(JTLReader, self)._calculate_datapoints(final_pass):
data = self.errors_reader.get_data(point[DataPoint.TIMESTAMP])
for label, label_data in six.iteritems(point[DataPoint.CURRENT]):
if label in data:
label_data[KPISet.ERRORS] = data[label]
else:
label_data[KPISet.ERRORS] = {}
yield point
class JTLErrorsReader(object):
"""
Reader for errors.jtl, which is in XML max-verbose format
:type filename: str
:type parent_logger: logging.Logger
"""
assertionMessage = GenericTranslator().css_to_xpath("assertionResult>failureMessage")
url_xpath = GenericTranslator().css_to_xpath("java\\.net\\.URL")
def __init__(self, filename, parent_logger):
# http://stackoverflow.com/questions/9809469/python-sax-to-lxml-for-80gb-xml/9814580#9814580
super(JTLErrorsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.parser = etree.XMLPullParser(events=('end',))
# context = etree.iterparse(self.fds, events=('end',))
self.offset = 0
self.filename = filename
self.fds = None
self.buffer = BetterDict()
def __del__(self):
if self.fds:
self.log.debug("Closing file descriptor for %s", self.filename)
self.fds.close()
def read_file(self, final_pass=False):
"""
Read the next part of the file
:type final_pass: bool
:return:
"""
if not self.fds:
if os.path.exists(self.filename):
self.log.debug("Opening %s", self.filename)
self.fds = open(self.filename) # NOTE: maybe we have the same mac problem with seek() needed
else:
self.log.debug("File not exists: %s", self.filename)
return
self.fds.seek(self.offset)
self.parser.feed(self.fds.read(1024 * 1024)) # "Huge input lookup" error without capping :)
self.offset = self.fds.tell()
for action, elem in self.parser.read_events():
if elem.getparent() is None or elem.getparent().tag != 'testResults':
continue
if elem.items():
self.__extract_standard(elem)
else:
self.__extract_nonstandard(elem)
# cleanup processed from the memory
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
def get_data(self, max_ts):
"""
Get accumulated errors data up to specified timestamp
:param max_ts:
:return:
"""
result = BetterDict()
for ts in sorted(self.buffer.keys()):
if ts > max_ts:
break
labels = self.buffer.pop(ts)
for label, label_data in six.iteritems(labels):
res = result.get(label, [])
for err_item in label_data:
KPISet.inc_list(res, ('msg', err_item['msg']), err_item)
return result
def __extract_standard(self, elem):
ts = int(elem.get("ts")) / 1000
label = elem.get("lb")
message = elem.get("rm")
rc = elem.get("rc")
urls = elem.xpath(self.url_xpath)
if urls:
url = Counter({urls[0].text: 1})
else:
url = Counter()
errtype = KPISet.ERRTYPE_ERROR
massert = elem.xpath(self.assertionMessage)
if len(massert):
errtype = KPISet.ERRTYPE_ASSERT
message = massert[0].text
err_item = KPISet.error_item_skel(message, rc, 1, errtype, url)
KPISet.inc_list(self.buffer.get(ts).get(label, []), ("msg", message), err_item)
KPISet.inc_list(self.buffer.get(ts).get('', []), ("msg", message), err_item)
def __extract_nonstandard(self, elem):
ts = int(self.__get_child(elem, 'timeStamp')) / 1000 # NOTE: will it be sometimes EndTime?
label = self.__get_child(elem, "label")
message = self.__get_child(elem, "responseMessage")
rc = self.__get_child(elem, "responseCode")
urls = elem.xpath(self.url_xpath)
if urls:
url = Counter({urls[0].text: 1})
else:
url = Counter()
errtype = KPISet.ERRTYPE_ERROR
massert = elem.xpath(self.assertionMessage)
if len(massert):
errtype = KPISet.ERRTYPE_ASSERT
message = massert[0].text
err_item = KPISet.error_item_skel(message, rc, 1, errtype, url)
KPISet.inc_list(self.buffer.get(ts).get(label, []), ("msg", message), err_item)
KPISet.inc_list(self.buffer.get(ts).get('', []), ("msg", message), err_item)
def __get_child(self, elem, tag):
for child in elem:
if child.tag == tag:
return child.text
class JMeterWidget(urwid.Pile):
"""
Progress sidebar widget
:type executor: bzt.modules.jmeter.JMeterExecutor
"""
def __init__(self, executor):
self.executor = executor
self.dur = executor.get_load().duration
widgets = []
if self.executor.original_jmx:
self.script_name = urwid.Text("Script: %s" % os.path.basename(self.executor.original_jmx))
widgets.append(self.script_name)
if self.dur:
self.progress = urwid.ProgressBar('pb-en', 'pb-dis', done=self.dur)
else:
self.progress = urwid.Text("Running...")
widgets.append(self.progress)
self.elapsed = urwid.Text("Elapsed: N/A")
self.eta = urwid.Text("ETA: N/A", align=urwid.RIGHT)
widgets.append(urwid.Columns([self.elapsed, self.eta]))
super(JMeterWidget, self).__init__(widgets)
def update(self):
"""
Refresh widget values
"""
if self.executor.start_time:
elapsed = time.time() - self.executor.start_time
self.elapsed.set_text("Elapsed: %s" % humanize_time(elapsed))
if self.dur:
eta = self.dur - elapsed
if eta >= 0:
self.eta.set_text("ETA: %s" % humanize_time(eta))
else:
over = elapsed - self.dur
self.eta.set_text("Overtime: %s" % humanize_time(over))
else:
self.eta.set_text("")
if isinstance(self.progress, urwid.ProgressBar):
self.progress.set_completion(elapsed)
self._invalidate()
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:param original: inherited from JMX
"""
def __init__(self, original=None):
super(JMeterScenarioBuilder, self).__init__(original)
self.scenario = Scenario()
def __add_managers(self):
headers = self.scenario.get_headers()
if headers:
self.append(self.TEST_PLAN_SEL, self._get_header_mgr(headers))
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
if self.scenario.get("store-cache", True):
self.append(self.TEST_PLAN_SEL, self._get_cache_mgr())
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
if self.scenario.get("store-cookie", True):
self.append(self.TEST_PLAN_SEL, self._get_cookie_mgr())
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
def __add_defaults(self):
"""
:return:
"""
default_domain = self.scenario.get("default-domain", None)
default_port = self.scenario.get("default-port", None)
retrieve_resources = self.scenario.get("retrieve-resources", True)
concurrent_pool_size = self.scenario.get("concurrent-pool-size", 4)
timeout = self.scenario.get("timeout", None)
timeout = int(1000 * dehumanize_time(timeout))
self.append(self.TEST_PLAN_SEL, self._get_http_defaults(default_domain, default_port, timeout,
retrieve_resources, concurrent_pool_size))
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
def __add_think_time(self, children, request):
global_ttime = self.scenario.get("think-time", None)
if request.think_time is not None:
ttime = int(1000 * dehumanize_time(request.think_time))
elif global_ttime is not None:
ttime = int(1000 * dehumanize_time(global_ttime))
else:
ttime = None
if ttime is not None:
children.append(JMX._get_constant_timer(ttime))
children.append(etree.Element("hashTree"))
def __add_extractors(self, children, request):
extractors = request.config.get("extract-regexp", BetterDict())
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
extractor = JMX._get_extractor(varname, cfg['regexp'], '$%s$' % cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'))
children.append(extractor)
children.append(etree.Element("hashTree"))
jextractors = request.config.get("extract-jsonpath", BetterDict())
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
children.append(JMX._get_json_extractor(
varname,
cfg['jsonpath'],
cfg.get('default', 'NOT_FOUND'))
)
children.append(etree.Element("hashTree"))
def __add_assertions(self, children, request):
assertions = request.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(
assertion.get("subject", self.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False)
))
children.append(etree.Element("hashTree"))
jpath_assertions = request.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
children.append(JMX._get_json_path_assertion(
assertion['jsonpath'],
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
))
children.append(etree.Element("hashTree"))
def __add_requests(self):
global_timeout = self.scenario.get("timeout", None)
global_keepalive = self.scenario.get("keepalive", True)
for request in self.scenario.get_requests():
if request.timeout is not None:
timeout = int(1000 * dehumanize_time(request.timeout))
elif global_timeout is not None:
timeout = int(1000 * dehumanize_time(global_timeout))
else:
timeout = None
http = JMX._get_http_request(request.url, request.label, request.method, timeout, request.body,
global_keepalive)
self.append(self.THR_GROUP_SEL, http)
children = etree.Element("hashTree")
self.append(self.THR_GROUP_SEL, children)
if request.headers:
children.append(JMX._get_header_mgr(request.headers))
children.append(etree.Element("hashTree"))
self.__add_think_time(children, request)
self.__add_assertions(children, request)
if timeout is not None:
children.append(JMX._get_dur_assertion(timeout))
children.append(etree.Element("hashTree"))
self.__add_extractors(children, request)
def __generate(self):
"""
Generate the test plan
"""
# NOTE: set realistic dns-cache and JVM prop by default?
self.__add_managers()
self.__add_defaults()
self.__add_datasources()
thread_group = JMX._get_thread_group(1, 0, 1)
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree", type="tg")) # arbitrary trick with our own attribute
self.__add_requests()
self.__add_results_tree()
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
# NOTE: bad design, as repetitive save will duplicate stuff
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
def __add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
def __add_datasources(self):
sources = self.scenario.get("data-sources", [])
for idx, source in enumerate(sources):
source = ensure_is_dict(sources, idx, "path")
delimiter = source.get("delimiter", self.__guess_delimiter(source['path']))
self.append(self.TEST_PLAN_SEL, JMX._get_csv_config(
os.path.abspath(source['path']), delimiter,
source.get("quoted", False), source.get("loop", True)
))
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
def __guess_delimiter(self, path):
with open(path) as fhd:
header = fhd.read(4096) # 4KB is enough for header
return guess_csv_delimiter(header)
| 1 | 13,468 | This is very-very bad idea because of its performance impact | Blazemeter-taurus | py |
@@ -6,6 +6,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
+ "github.com/sonm-io/core/proto"
"math/big"
"net"
"sync" | 1 | package dwh
import (
"context"
"crypto/ecdsa"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"net"
"sync"
"time"
"github.com/grpc-ecosystem/go-grpc-prometheus"
_ "github.com/mattn/go-sqlite3"
log "github.com/noxiouz/zapctx/ctxlog"
"github.com/pkg/errors"
"github.com/sonm-io/core/blockchain"
"github.com/sonm-io/core/proto"
"github.com/sonm-io/core/util"
"github.com/sonm-io/core/util/rest"
"github.com/sonm-io/core/util/xgrpc"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
)
type DWH struct {
logger *zap.Logger
mu sync.RWMutex
ctx context.Context
cfg *DWHConfig
key *ecdsa.PrivateKey
cancel context.CancelFunc
grpc *grpc.Server
http *rest.Server
db *sql.DB
creds credentials.TransportCredentials
certRotator util.HitlessCertRotator
blockchain blockchain.API
storage *sqlStorage
lastEvent *blockchain.Event
stats *sonm.DWHStatsReply
}
func NewDWH(ctx context.Context, cfg *DWHConfig, key *ecdsa.PrivateKey) (*DWH, error) {
ctx, cancel := context.WithCancel(ctx)
w := &DWH{
ctx: ctx,
cancel: cancel,
cfg: cfg,
key: key,
logger: log.GetLogger(ctx),
}
return w, nil
}
func (m *DWH) Serve() error {
m.logger.Info("starting with backend", zap.String("endpoint", m.cfg.Storage.Endpoint))
var err error
m.db, err = sql.Open("postgres", m.cfg.Storage.Endpoint)
if err != nil {
m.Stop()
return err
}
bch, err := blockchain.NewAPI(m.ctx, blockchain.WithConfig(m.cfg.Blockchain))
if err != nil {
m.Stop()
return fmt.Errorf("failed to create NewAPI: %v", err)
}
m.blockchain = bch
numBenchmarks, err := m.blockchain.Market().GetNumBenchmarks(m.ctx)
if err != nil {
return fmt.Errorf("failed to GetNumBenchmarks: %v", err)
}
m.storage = newPostgresStorage(numBenchmarks)
wg := errgroup.Group{}
wg.Go(m.serveGRPC)
wg.Go(m.serveHTTP)
wg.Go(m.monitorStatistics)
return wg.Wait()
}
func (m *DWH) Stop() {
m.mu.Lock()
defer m.mu.Unlock()
m.stop()
}
func (m *DWH) stop() {
if m.cancel != nil {
m.cancel()
}
if m.db != nil {
m.db.Close()
}
if m.grpc != nil {
m.grpc.Stop()
}
if m.http != nil {
m.http.Close()
}
}
func (m *DWH) serveGRPC() error {
lis, err := func() (net.Listener, error) {
m.mu.Lock()
defer m.mu.Unlock()
certRotator, TLSConfig, err := util.NewHitlessCertRotator(m.ctx, m.key)
if err != nil {
return nil, err
}
m.certRotator = certRotator
m.creds = util.NewTLS(TLSConfig)
m.grpc = xgrpc.NewServer(
m.logger,
xgrpc.Credentials(m.creds),
xgrpc.DefaultTraceInterceptor(),
xgrpc.UnaryServerInterceptor(m.unaryInterceptor),
)
sonm.RegisterDWHServer(m.grpc, m)
grpc_prometheus.Register(m.grpc)
lis, err := net.Listen("tcp", m.cfg.GRPCListenAddr)
if err != nil {
return nil, fmt.Errorf("failed to listen on %s: %v", m.cfg.GRPCListenAddr, err)
}
return lis, nil
}()
if err != nil {
return err
}
return m.grpc.Serve(lis)
}
func (m *DWH) serveHTTP() error {
lis, err := func() (net.Listener, error) {
m.mu.Lock()
defer m.mu.Unlock()
options := []rest.Option{rest.WithLog(m.logger)}
lis, err := net.Listen("tcp", m.cfg.HTTPListenAddr)
if err != nil {
return nil, fmt.Errorf("failed to create http listener: %v", err)
}
srv := rest.NewServer(options...)
err = srv.RegisterService((*sonm.DWHServer)(nil), m)
if err != nil {
return nil, fmt.Errorf("failed to RegisterService: %v", err)
}
m.http = srv
return lis, err
}()
if err != nil {
return err
}
return m.http.Serve(lis)
}
func (m *DWH) monitorNumBenchmarks() error {
lastBlock, err := m.blockchain.Events().GetLastBlock(m.ctx)
if err != nil {
return err
}
filter := m.blockchain.Events().GetMarketFilter(big.NewInt(0).SetUint64(lastBlock))
events, err := m.blockchain.Events().GetEvents(m.ctx, filter)
if err != nil {
return err
}
for {
event, ok := <-events
if !ok {
return errors.New("events channel closed")
}
if _, ok := event.Data.(*blockchain.NumBenchmarksUpdatedData); ok {
if m.storage, err = setupDB(m.ctx, m.db, m.blockchain); err != nil {
return fmt.Errorf("failed to setupDB after NumBenchmarksUpdated event: %v", err)
}
if err := m.storage.CreateIndices(m.db); err != nil {
return fmt.Errorf("failed to CreateIndices (onNumBenchmarksUpdated): %v", err)
}
}
}
}
func (m *DWH) monitorStatistics() error {
tk := util.NewImmediateTicker(time.Second)
for {
select {
case <-tk.C:
func() {
conn := newSimpleConn(m.db)
defer conn.Finish()
if stats, err := m.storage.getStats(conn); err != nil {
m.logger.Warn("failed to getStats", zap.Error(err))
} else {
m.mu.Lock()
m.stats = stats
m.mu.Unlock()
}
}()
case <-m.ctx.Done():
return errors.New("monitorStatistics: context cancelled")
}
}
}
// unaryInterceptor RLocks DWH for all incoming requests. This is needed because some events (e.g.,
// NumBenchmarksUpdated) can alter `m.storage` state.
func (m *DWH) unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (resp interface{}, err error) {
m.mu.RLock()
defer m.mu.RUnlock()
return handler(ctx, req)
}
func (m *DWH) GetDeals(ctx context.Context, request *sonm.DealsRequest) (*sonm.DWHDealsReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
deals, count, err := m.storage.GetDeals(conn, request)
if err != nil {
m.logger.Warn("failed to GetDeals", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetDeals")
}
return &sonm.DWHDealsReply{Deals: deals, Count: count}, nil
}
func (m *DWH) GetDealDetails(ctx context.Context, request *sonm.BigInt) (*sonm.DWHDeal, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetDealByID(conn, request.Unwrap())
if err != nil {
m.logger.Warn("failed to GetDealDetails", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetDealDetails")
}
return out, nil
}
func (m *DWH) GetDealConditions(ctx context.Context, request *sonm.DealConditionsRequest) (*sonm.DealConditionsReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
dealConditions, count, err := m.storage.GetDealConditions(conn, request)
if err != nil {
m.logger.Warn("failed to GetDealConditions", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetDealConditions")
}
return &sonm.DealConditionsReply{Conditions: dealConditions, Count: count}, nil
}
func (m *DWH) GetOrders(ctx context.Context, request *sonm.OrdersRequest) (*sonm.DWHOrdersReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
orders, count, err := m.storage.GetOrders(conn, request)
if err != nil {
m.logger.Warn("failed to GetOrders", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetOrders")
}
return &sonm.DWHOrdersReply{Orders: orders, Count: count}, nil
}
func (m *DWH) GetMatchingOrders(ctx context.Context, request *sonm.MatchingOrdersRequest) (*sonm.DWHOrdersReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
orders, count, err := m.storage.GetMatchingOrders(conn, request)
if err != nil {
m.logger.Warn("failed to GetMatchingOrders", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetMatchingOrders")
}
return &sonm.DWHOrdersReply{Orders: orders, Count: count}, nil
}
func (m *DWH) GetOrderDetails(ctx context.Context, request *sonm.BigInt) (*sonm.DWHOrder, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetOrderByID(conn, request.Unwrap())
if err != nil {
m.logger.Warn("failed to GetOrderDetails", zap.Error(err), zap.Any("request", *request))
return nil, fmt.Errorf("failed to GetOrderDetails: %v", err)
}
return out, nil
}
func (m *DWH) GetProfiles(ctx context.Context, request *sonm.ProfilesRequest) (*sonm.ProfilesReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
profiles, count, err := m.storage.GetProfiles(conn, request)
if err != nil {
m.logger.Warn("failed to GetProfiles", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetProfiles")
}
return &sonm.ProfilesReply{Profiles: profiles, Count: count}, nil
}
func (m *DWH) GetProfileInfo(ctx context.Context, request *sonm.EthID) (*sonm.Profile, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetProfileByID(conn, request.GetId().Unwrap())
if err != nil {
m.logger.Warn("failed to GetProfileInfo", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetProfileInfo")
}
certs, err := m.storage.GetCertificates(conn, request.GetId().Unwrap())
if err != nil {
return nil, fmt.Errorf("failed to GetCertificates: %v", err)
}
certsEncoded, err := json.Marshal(certs)
if err != nil {
return nil, fmt.Errorf("failed to marshal %s certificates: %v", request.GetId().Unwrap(), err)
}
out.Certificates = string(certsEncoded)
return out, nil
}
func (m *DWH) GetBlacklist(ctx context.Context, request *sonm.BlacklistRequest) (*sonm.BlacklistReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetBlacklist(conn, request)
if err != nil {
m.logger.Warn("failed to GetBlacklist", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetBlacklist")
}
return out, nil
}
func (m *DWH) GetBlacklistsContainingUser(ctx context.Context, r *sonm.BlacklistRequest) (*sonm.BlacklistsContainingUserReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetBlacklistsContainingUser(conn, r)
if err != nil {
m.logger.Warn("failed to GetBlacklistsContainingUser", zap.Error(err), zap.Any("request", *r))
return nil, status.Error(codes.NotFound, "failed to GetBlacklist")
}
return out, nil
}
func (m *DWH) GetValidators(ctx context.Context, request *sonm.ValidatorsRequest) (*sonm.ValidatorsReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
validators, count, err := m.storage.GetValidators(conn, request)
if err != nil {
m.logger.Warn("failed to GetValidators", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetValidators")
}
return &sonm.ValidatorsReply{Validators: validators, Count: count}, nil
}
func (m *DWH) GetDealChangeRequests(ctx context.Context, dealID *sonm.BigInt) (*sonm.DealChangeRequestsReply, error) {
return m.GetChangeRequests(ctx, &sonm.ChangeRequestsRequest{
DealID: dealID,
OnlyActive: true,
})
}
func (m *DWH) GetChangeRequests(ctx context.Context, request *sonm.ChangeRequestsRequest) (*sonm.DealChangeRequestsReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
out, err := m.storage.GetDealChangeRequestsByDealID(conn, request.DealID.Unwrap(), request.OnlyActive)
if err != nil {
m.logger.Error("failed to GetDealChangeRequests", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetDealChangeRequests")
}
return &sonm.DealChangeRequestsReply{
Requests: out,
}, nil
}
func (m *DWH) GetWorkers(ctx context.Context, request *sonm.WorkersRequest) (*sonm.WorkersReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
workers, count, err := m.storage.GetWorkers(conn, request)
if err != nil {
m.logger.Error("failed to GetWorkers", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetWorkers")
}
return &sonm.WorkersReply{Workers: workers, Count: count}, nil
}
func (m *DWH) GetStats(ctx context.Context, request *sonm.Empty) (*sonm.DWHStatsReply, error) {
return m.stats, nil
}
func (m *DWH) GetOrdersByIDs(ctx context.Context, request *sonm.OrdersByIDsRequest) (*sonm.DWHOrdersReply, error) {
conn := newSimpleConn(m.db)
defer conn.Finish()
orders, count, err := m.storage.GetOrdersByIDs(conn, request)
if err != nil {
m.logger.Warn("failed to GetOrdersByIDs", zap.Error(err), zap.Any("request", *request))
return nil, status.Error(codes.NotFound, "failed to GetWorkers")
}
return &sonm.DWHOrdersReply{
Orders: orders,
Count: count,
}, nil
}
| 1 | 7,833 | WHY U NOT SORT IMPORTS? | sonm-io-core | go |
@@ -0,0 +1,11 @@
+module SignInRequestHelpers
+ def sign_in_as(user)
+ post(
+ "/session",
+ session: {
+ email: user.email,
+ password: user.password,
+ },
+ )
+ end
+end | 1 | 1 | 16,741 | Put a comma after the last item of a multiline hash. | thoughtbot-upcase | rb |
|
@@ -12,7 +12,7 @@ __version__ = param.Version(release=(1,7,0), fpath=__file__,
commit="$Format:%h$", reponame='holoviews')
from .core import archive # noqa (API import)
-from .core.dimension import OrderedDict, Dimension # noqa (API import)
+from .core.dimension import OrderedDict, Dimension, Dimensioned # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.options import (Options, Store, Cycle, # noqa (API import)
Palette, StoreOptions) | 1 | from __future__ import print_function, absolute_import
import os, sys, pydoc
import numpy as np # noqa (API import)
_cwd = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(_cwd, '..', 'param'))
import param
__version__ = param.Version(release=(1,7,0), fpath=__file__,
commit="$Format:%h$", reponame='holoviews')
from .core import archive # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.options import (Options, Store, Cycle, # noqa (API import)
Palette, StoreOptions)
from .core.layout import * # noqa (API import)
from .core.element import * # noqa (API import)
from .core.overlay import * # noqa (API import)
from .core.tree import * # noqa (API import)
from .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import)
GridSpace, GridMatrix)
from .interface import * # noqa (API import)
from .operation import Operation # noqa (API import)
from .operation import ElementOperation # noqa (Deprecated API import)
from .element import * # noqa (API import)
from .element import __all__ as elements_list
from . import util # noqa (API import)
# Surpress warnings generated by NumPy in matplotlib
# Expected to be fixed in next matplotlib release
import warnings
warnings.filterwarnings("ignore",
message="elementwise comparison failed; returning scalar instead")
try:
import IPython # noqa (API import)
from .ipython import notebook_extension
except ImportError as e:
class notebook_extension(param.ParameterizedFunction):
def __call__(self, *args, **opts):
raise Exception("IPython notebook not available")
# A single holoviews.rc file may be executed if found.
for rcfile in [os.environ.get("HOLOVIEWSRC", ''),
"~/.holoviews.rc",
"~/.config/holoviews/holoviews.rc"]:
try:
filename = os.path.expanduser(rcfile)
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
try:
exec(code)
except Exception as e:
print("Warning: Could not load %r [%r]" % (filename, str(e)))
break
except IOError:
pass
def help(obj, visualization=True, ansi=True, backend=None,
recursive=False, pattern=None):
"""
Extended version of the built-in help that supports parameterized
functions and objects. A pattern (regular expression) may be used to
filter the output and if recursive is set to True, documentation for
the supplied object is shown. Note that the recursive option will
only work with an object instance and not a class.
If ansi is set to False, all ANSI color
codes are stripped out.
"""
backend = backend if backend else Store.current_backend
info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,
recursive=recursive, pattern=pattern, elements=elements_list)
msg = ( "\nTo view the visualization options applicable to this "
"object or class, use:\n\n"
" holoviews.help(obj, visualization=True)\n\n")
if info:
print((msg if visualization is False else '') + info)
else:
pydoc.help(obj)
| 1 | 17,859 | How come we need ``Dimensioned`` in the top-level namespace? | holoviz-holoviews | py |
@@ -75,9 +75,9 @@ import net.runelite.client.util.Text;
import net.runelite.client.util.WildcardMatcher;
@PluginDescriptor(
- name = "NPC Indicators",
- description = "Highlight NPCs on-screen and/or on the minimap",
- tags = {"highlight", "minimap", "npcs", "overlay", "respawn", "tags"}
+ name = "NPC Indicators",
+ description = "Highlight NPCs on-screen and/or on the minimap",
+ tags = {"highlight", "minimap", "npcs", "overlay", "respawn", "tags"}
)
@Slf4j
@Singleton | 1 | /*
* Copyright (c) 2018, James Swindle <[email protected]>
* Copyright (c) 2018, Adam <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.npchighlight;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.inject.Provides;
import java.awt.Color;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Client;
import net.runelite.api.GameState;
import net.runelite.api.GraphicID;
import net.runelite.api.GraphicsObject;
import net.runelite.api.MenuAction;
import static net.runelite.api.MenuAction.MENU_ACTION_DEPRIORITIZE_OFFSET;
import net.runelite.api.MenuEntry;
import net.runelite.api.NPC;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.FocusChanged;
import net.runelite.api.events.GameStateChanged;
import net.runelite.api.events.GameTick;
import net.runelite.api.events.GraphicsObjectCreated;
import net.runelite.api.events.MenuEntryAdded;
import net.runelite.api.events.MenuOptionClicked;
import net.runelite.api.events.NpcDefinitionChanged;
import net.runelite.api.events.NpcDespawned;
import net.runelite.api.events.NpcSpawned;
import net.runelite.client.callback.ClientThread;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.input.KeyManager;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.ui.overlay.OverlayManager;
import net.runelite.client.util.Text;
import net.runelite.client.util.WildcardMatcher;
@PluginDescriptor(
name = "NPC Indicators",
description = "Highlight NPCs on-screen and/or on the minimap",
tags = {"highlight", "minimap", "npcs", "overlay", "respawn", "tags"}
)
@Slf4j
@Singleton
public class NpcIndicatorsPlugin extends Plugin
{
private static final int MAX_ACTOR_VIEW_RANGE = 15;
// Option added to NPC menu
private static final String TAG = "Tag";
private static final String UNTAG = "Untag";
private static final Set<MenuAction> NPC_MENU_ACTIONS = ImmutableSet.of(MenuAction.NPC_FIRST_OPTION, MenuAction.NPC_SECOND_OPTION,
MenuAction.NPC_THIRD_OPTION, MenuAction.NPC_FOURTH_OPTION, MenuAction.NPC_FIFTH_OPTION);
@Inject
private Client client;
@Inject
private NpcIndicatorsConfig config;
@Inject
private OverlayManager overlayManager;
@Inject
private NpcSceneOverlay npcSceneOverlay;
@Inject
private NpcMinimapOverlay npcMinimapOverlay;
@Inject
private NpcIndicatorsInput inputListener;
@Inject
private KeyManager keyManager;
@Inject
private ClientThread clientThread;
@Inject
private EventBus eventbus;
@Setter(AccessLevel.PACKAGE)
private boolean hotKeyPressed = false;
/**
* NPCs to highlight
*/
@Getter(AccessLevel.PACKAGE)
private final Set<NPC> highlightedNpcs = new HashSet<>();
/**
* Dead NPCs that should be displayed with a respawn indicator if the config is on.
*/
@Getter(AccessLevel.PACKAGE)
private final Map<Integer, MemorizedNpc> deadNpcsToDisplay = new HashMap<>();
/**
* The time when the last game tick event ran.
*/
@Getter(AccessLevel.PACKAGE)
private Instant lastTickUpdate;
/**
* Tagged NPCs that have died at some point, which are memorized to
* remember when and where they will respawn
*/
private final Map<Integer, MemorizedNpc> memorizedNpcs = new HashMap<>();
/**
* Highlight strings from the configuration
*/
private List<String> highlights = new ArrayList<>();
/**
* NPC ids marked with the Tag option
*/
private final Set<Integer> npcTags = new HashSet<>();
/**
* Tagged NPCs that spawned this tick, which need to be verified that
* they actually spawned and didn't just walk into view range.
*/
private final List<NPC> spawnedNpcsThisTick = new ArrayList<>();
/**
* Tagged NPCs that despawned this tick, which need to be verified that
* they actually spawned and didn't just walk into view range.
*/
private final List<NPC> despawnedNpcsThisTick = new ArrayList<>();
/**
* World locations of graphics object which indicate that an
* NPC teleported that were played this tick.
*/
private final Set<WorldPoint> teleportGraphicsObjectSpawnedThisTick = new HashSet<>();
/**
* The players location on the last game tick.
*/
private WorldPoint lastPlayerLocation;
/**
* When hopping worlds, NPCs can spawn without them actually respawning,
* so we would not want to mark it as a real spawn in those cases.
*/
private boolean skipNextSpawnCheck = false;
@Getter(AccessLevel.PACKAGE)
private RenderStyle renderStyle;
@Setter(AccessLevel.PACKAGE)
private String getNpcToHighlight;
@Getter(AccessLevel.PACKAGE)
private Color getHighlightColor;
@Getter(AccessLevel.PACKAGE)
private boolean drawNames;
@Getter(AccessLevel.PACKAGE)
private boolean drawMinimapNames;
@Getter(AccessLevel.PACKAGE)
private boolean highlightMenuNames;
@Getter(AccessLevel.PACKAGE)
private boolean showRespawnTimer;
@Provides
NpcIndicatorsConfig provideConfig(ConfigManager configManager)
{
return configManager.getConfig(NpcIndicatorsConfig.class);
}
@Override
protected void startUp() throws Exception
{
updateConfig();
addSubscriptions();
overlayManager.add(npcSceneOverlay);
overlayManager.add(npcMinimapOverlay);
keyManager.registerKeyListener(inputListener);
highlights = getHighlights();
clientThread.invoke(() ->
{
skipNextSpawnCheck = true;
rebuildAllNpcs();
});
}
@Override
protected void shutDown() throws Exception
{
eventbus.unregister(this);
overlayManager.remove(npcSceneOverlay);
overlayManager.remove(npcMinimapOverlay);
deadNpcsToDisplay.clear();
memorizedNpcs.clear();
spawnedNpcsThisTick.clear();
despawnedNpcsThisTick.clear();
teleportGraphicsObjectSpawnedThisTick.clear();
npcTags.clear();
highlightedNpcs.clear();
keyManager.unregisterKeyListener(inputListener);
}
private void addSubscriptions()
{
eventbus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventbus.subscribe(GameStateChanged.class, this, this::onGameStateChanged);
eventbus.subscribe(FocusChanged.class, this, this::onFocusChanged);
eventbus.subscribe(MenuEntryAdded.class, this, this::onMenuEntryAdded);
eventbus.subscribe(MenuOptionClicked.class, this, this::onMenuOptionClicked);
eventbus.subscribe(NpcSpawned.class, this, this::onNpcSpawned);
eventbus.subscribe(NpcDefinitionChanged.class, this, this::onNpcDefinitionChanged);
eventbus.subscribe(NpcDespawned.class, this, this::onNpcDespawned);
eventbus.subscribe(GraphicsObjectCreated.class, this, this::onGraphicsObjectCreated);
eventbus.subscribe(GameTick.class, this, this::onGameTick);
}
private void onGameStateChanged(GameStateChanged event)
{
if (event.getGameState() == GameState.LOGIN_SCREEN ||
event.getGameState() == GameState.HOPPING)
{
highlightedNpcs.clear();
deadNpcsToDisplay.clear();
memorizedNpcs.forEach((id, npc) -> npc.setDiedOnTick(-1));
lastPlayerLocation = null;
skipNextSpawnCheck = true;
}
}
private void onConfigChanged(ConfigChanged configChanged)
{
if (!configChanged.getGroup().equals("npcindicators"))
{
return;
}
updateConfig();
highlights = getHighlights();
rebuildAllNpcs();
}
private void onFocusChanged(FocusChanged focusChanged)
{
if (!focusChanged.isFocused())
{
hotKeyPressed = false;
}
}
private void onMenuEntryAdded(MenuEntryAdded event)
{
MenuEntry[] menuEntries = client.getMenuEntries();
String target = event.getTarget();
int type = event.getType();
if (type >= MENU_ACTION_DEPRIORITIZE_OFFSET)
{
type -= MENU_ACTION_DEPRIORITIZE_OFFSET;
}
if (this.highlightMenuNames &&
NPC_MENU_ACTIONS.contains(MenuAction.of(type)) &&
highlightedNpcs.stream().anyMatch(npc -> npc.getIndex() == event.getIdentifier()))
{
final MenuEntry menuEntry = menuEntries[menuEntries.length - 1];
menuEntry.setTarget(target);
client.setMenuEntries(menuEntries);
}
else if (hotKeyPressed && type == MenuAction.EXAMINE_NPC.getId())
{
// Add tag option
menuEntries = Arrays.copyOf(menuEntries, menuEntries.length + 1);
final MenuEntry tagEntry = menuEntries[menuEntries.length - 1] = new MenuEntry();
tagEntry.setOption(npcTags.contains(event.getIdentifier()) ? UNTAG : TAG);
tagEntry.setTarget(event.getTarget());
tagEntry.setParam0(event.getActionParam0());
tagEntry.setParam1(event.getActionParam1());
tagEntry.setIdentifier(event.getIdentifier());
tagEntry.setType(MenuAction.RUNELITE.getId());
client.setMenuEntries(menuEntries);
}
}
private void onMenuOptionClicked(MenuOptionClicked click)
{
if (click.getMenuAction() != MenuAction.RUNELITE
|| (!click.getOption().equals(TAG)
&& !click.getOption().equals(UNTAG)))
{
return;
}
final int id = click.getIdentifier();
final boolean removed = npcTags.remove(id);
final NPC[] cachedNPCs = client.getCachedNPCs();
final NPC npc = cachedNPCs[id];
if (npc == null || npc.getName() == null)
{
return;
}
if (removed)
{
MemorizedNpc mn = memorizedNpcs.get(npc.getIndex());
if (mn != null && isNpcMemorizationUnnecessary(mn))
{
memorizedNpcs.remove(npc.getIndex());
rebuildAllNpcs();
}
}
else
{
npcTags.add(id);
rebuildAllNpcs();
}
click.consume();
}
private void onNpcSpawned(NpcSpawned npcSpawned)
{
NPC npc = npcSpawned.getNpc();
highlightNpcIfMatch(npc);
if (memorizedNpcs.containsKey(npc.getIndex()))
{
spawnedNpcsThisTick.add(npc);
}
}
private void onNpcDefinitionChanged(NpcDefinitionChanged event)
{
NPC npc = event.getNpc();
highlightNpcIfMatch(npc);
MemorizedNpc mn = memorizedNpcs.get(npc.getIndex());
if (mn != null)
{
String npcName = npc.getName();
if (npcName != null)
{
mn.getNpcNames().add(npcName);
}
}
}
private void onNpcDespawned(NpcDespawned npcDespawned)
{
final NPC npc = npcDespawned.getNpc();
if (memorizedNpcs.containsKey(npc.getIndex()))
{
despawnedNpcsThisTick.add(npc);
}
highlightedNpcs.remove(npc);
}
private void onGraphicsObjectCreated(GraphicsObjectCreated event)
{
final GraphicsObject go = event.getGraphicsObject();
if (go.getId() == GraphicID.GREY_BUBBLE_TELEPORT)
{
teleportGraphicsObjectSpawnedThisTick.add(WorldPoint.fromLocal(client, go.getLocation()));
}
}
private void onGameTick(GameTick event)
{
removeOldHighlightedRespawns();
validateSpawnedNpcs();
lastTickUpdate = Instant.now();
lastPlayerLocation = client.getLocalPlayer().getWorldLocation();
}
private static boolean isInViewRange(WorldPoint wp1, WorldPoint wp2)
{
int distance = wp1.distanceTo(wp2);
return distance < MAX_ACTOR_VIEW_RANGE;
}
private static WorldPoint getWorldLocationBehind(NPC npc)
{
final int orientation = npc.getOrientation() / 256;
int dx = 0, dy = 0;
switch (orientation)
{
case 0: // South
dy = -1;
break;
case 1: // Southwest
dx = -1;
dy = -1;
break;
case 2: // West
dx = -1;
break;
case 3: // Northwest
dx = -1;
dy = 1;
break;
case 4: // North
dy = 1;
break;
case 5: // Northeast
dx = 1;
dy = 1;
break;
case 6: // East
dx = 1;
break;
case 7: // Southeast
dx = 1;
dy = -1;
break;
}
final WorldPoint currWP = npc.getWorldLocation();
return new WorldPoint(currWP.getX() - dx, currWP.getY() - dy, currWP.getPlane());
}
private void highlightNpcIfMatch(final NPC npc)
{
if (npcTags.contains(npc.getIndex()))
{
memorizeNpc(npc);
highlightedNpcs.add(npc);
return;
}
final String npcName = npc.getName();
if (npcName != null)
{
for (String highlight : highlights)
{
if (WildcardMatcher.matches(highlight, npcName))
{
memorizeNpc(npc);
highlightedNpcs.add(npc);
return;
}
}
}
highlightedNpcs.remove(npc);
}
private void memorizeNpc(NPC npc)
{
final int npcIndex = npc.getIndex();
memorizedNpcs.putIfAbsent(npcIndex, new MemorizedNpc(npc));
}
private boolean isNpcMemorizationUnnecessary(final MemorizedNpc mn)
{
if (npcTags.contains(mn.getNpcIndex()))
{
return false;
}
for (String npcName : mn.getNpcNames())
{
for (String highlight : highlights)
{
if (WildcardMatcher.matches(highlight, npcName))
{
return false;
}
}
}
return true;
}
private void removeOldHighlightedRespawns()
{
deadNpcsToDisplay.values().removeIf(x -> x.getDiedOnTick() + x.getRespawnTime() <= client.getTickCount() + 1);
}
@VisibleForTesting
List<String> getHighlights()
{
final String configNpcs = this.getNpcToHighlight.toLowerCase();
if (configNpcs.isEmpty())
{
return Collections.emptyList();
}
return Text.fromCSV(configNpcs);
}
private void rebuildAllNpcs()
{
highlightedNpcs.clear();
if (client.getGameState() != GameState.LOGGED_IN &&
client.getGameState() != GameState.LOADING)
{
// NPCs are still in the client after logging out,
// but we don't want to highlight those.
return;
}
Iterator<Map.Entry<Integer, MemorizedNpc>> it = memorizedNpcs.entrySet().iterator();
while (it.hasNext())
{
MemorizedNpc mn = it.next().getValue();
if (isNpcMemorizationUnnecessary(mn))
{
deadNpcsToDisplay.remove(mn.getNpcIndex());
it.remove();
}
}
for (NPC npc : client.getNpcs())
{
highlightNpcIfMatch(npc);
}
}
private void validateSpawnedNpcs()
{
if (skipNextSpawnCheck)
{
skipNextSpawnCheck = false;
}
else
{
for (NPC npc : despawnedNpcsThisTick)
{
if (!teleportGraphicsObjectSpawnedThisTick.isEmpty() && teleportGraphicsObjectSpawnedThisTick.contains(npc.getWorldLocation()))
{
// NPC teleported away, so we don't want to add the respawn timer
continue;
}
if (isInViewRange(client.getLocalPlayer().getWorldLocation(), npc.getWorldLocation()))
{
final MemorizedNpc mn = memorizedNpcs.get(npc.getIndex());
if (mn != null)
{
mn.setDiedOnTick(client.getTickCount() + 1); // This runs before tickCounter updates, so we add 1
if (!mn.getPossibleRespawnLocations().isEmpty())
{
log.debug("Starting {} tick countdown for {}", mn.getRespawnTime(), mn.getNpcNames().iterator().next());
deadNpcsToDisplay.put(mn.getNpcIndex(), mn);
}
}
}
}
for (NPC npc : spawnedNpcsThisTick)
{
if (!teleportGraphicsObjectSpawnedThisTick.isEmpty() &&
(teleportGraphicsObjectSpawnedThisTick.contains(npc.getWorldLocation()) ||
teleportGraphicsObjectSpawnedThisTick.contains(getWorldLocationBehind(npc))))
{
// NPC teleported here, so we don't want to update the respawn timer
continue;
}
if (lastPlayerLocation != null && isInViewRange(lastPlayerLocation, npc.getWorldLocation()))
{
final MemorizedNpc mn = memorizedNpcs.get(npc.getIndex());
if (mn.getDiedOnTick() != -1)
{
final int respawnTime = client.getTickCount() + 1 - mn.getDiedOnTick();
// By killing a monster and leaving the area before seeing it again, an erroneously lengthy
// respawn time can be recorded. Thus, if the respawn time is already set and is greater than
// the observed time, assume that the lower observed respawn time is correct.
if (mn.getRespawnTime() == -1 || respawnTime < mn.getRespawnTime())
{
mn.setRespawnTime(respawnTime);
}
mn.setDiedOnTick(-1);
}
final WorldPoint npcLocation = npc.getWorldLocation();
// An NPC can move in the same tick as it spawns, so we also have
// to consider whatever tile is behind the npc
final WorldPoint possibleOtherNpcLocation = getWorldLocationBehind(npc);
mn.getPossibleRespawnLocations().removeIf(x ->
x.distanceTo(npcLocation) != 0 && x.distanceTo(possibleOtherNpcLocation) != 0);
if (mn.getPossibleRespawnLocations().isEmpty())
{
mn.getPossibleRespawnLocations().add(npcLocation);
mn.getPossibleRespawnLocations().add(possibleOtherNpcLocation);
}
}
}
}
spawnedNpcsThisTick.clear();
despawnedNpcsThisTick.clear();
teleportGraphicsObjectSpawnedThisTick.clear();
}
private void updateConfig()
{
this.renderStyle = config.renderStyle();
this.getNpcToHighlight = config.getNpcToHighlight();
this.getHighlightColor = config.getHighlightColor();
this.drawNames = config.drawNames();
this.drawMinimapNames = config.drawMinimapNames();
this.highlightMenuNames = config.highlightMenuNames();
this.showRespawnTimer = config.showRespawnTimer();
}
}
| 1 | 15,284 | excess whitespace through plugin. | open-osrs-runelite | java |
@@ -138,7 +138,7 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u, const NodeI
NodeID current_edge_source_coordinate_id = node_u;
// traverse arrays from start and end respectively
- for (const auto i : util::irange(0UL, geometry_size))
+ for (const auto i : util::irange(std::size_t{ 0 }, geometry_size))
{
BOOST_ASSERT(current_edge_source_coordinate_id ==
m_compressed_edge_container.GetBucketReference( | 1 | #include "extractor/edge_based_edge.hpp"
#include "extractor/edge_based_graph_factory.hpp"
#include "util/coordinate.hpp"
#include "util/coordinate_calculation.hpp"
#include "util/percent.hpp"
#include "util/integer_range.hpp"
#include "util/lua_util.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp"
#include "util/exception.hpp"
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iomanip>
#include <limits>
#include <sstream>
#include <string>
namespace osrm
{
namespace extractor
{
// configuration of turn classification
const bool constexpr INVERT = true;
const bool constexpr RESOLVE_TO_RIGHT = true;
const bool constexpr RESOLVE_TO_LEFT = false;
// what angle is interpreted as going straight
const double constexpr STRAIGHT_ANGLE = 180.;
// if a turn deviates this much from going straight, it will be kept straight
const double constexpr MAXIMAL_ALLOWED_NO_TURN_DEVIATION = 2.;
// angle that lies between two nearly indistinguishable roads
const double constexpr NARROW_TURN_ANGLE = 25.;
// angle difference that can be classified as straight, if its the only narrow turn
const double constexpr FUZZY_STRAIGHT_ANGLE = 15.;
const double constexpr DISTINCTION_RATIO = 2;
// Configuration to find representative candidate for turn angle calculations
const double constexpr MINIMAL_SEGMENT_LENGTH = 1.;
const double constexpr DESIRED_SEGMENT_LENGTH = 10.;
EdgeBasedGraphFactory::EdgeBasedGraphFactory(
std::shared_ptr<util::NodeBasedDynamicGraph> node_based_graph,
const CompressedEdgeContainer &compressed_edge_container,
const std::unordered_set<NodeID> &barrier_nodes,
const std::unordered_set<NodeID> &traffic_lights,
std::shared_ptr<const RestrictionMap> restriction_map,
const std::vector<QueryNode> &node_info_list,
SpeedProfileProperties speed_profile)
: m_max_edge_id(0), m_node_info_list(node_info_list),
m_node_based_graph(std::move(node_based_graph)),
m_restriction_map(std::move(restriction_map)), m_barrier_nodes(barrier_nodes),
m_traffic_lights(traffic_lights), m_compressed_edge_container(compressed_edge_container),
speed_profile(std::move(speed_profile))
{
}
void EdgeBasedGraphFactory::GetEdgeBasedEdges(
util::DeallocatingVector<EdgeBasedEdge> &output_edge_list)
{
BOOST_ASSERT_MSG(0 == output_edge_list.size(), "Vector is not empty");
using std::swap; // Koenig swap
swap(m_edge_based_edge_list, output_edge_list);
}
void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes)
{
#ifndef NDEBUG
for (const EdgeBasedNode &node : m_edge_based_node_list)
{
BOOST_ASSERT(m_node_info_list.at(node.u).lat != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.u).lon != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.v).lon != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.v).lat != INT_MAX);
}
#endif
using std::swap; // Koenig swap
swap(nodes, m_edge_based_node_list);
}
void EdgeBasedGraphFactory::GetStartPointMarkers(std::vector<bool> &node_is_startpoint)
{
using std::swap; // Koenig swap
swap(m_edge_based_node_is_startpoint, node_is_startpoint);
}
void EdgeBasedGraphFactory::GetEdgeBasedNodeWeights(std::vector<EdgeWeight> &output_node_weights)
{
using std::swap; // Koenig swap
swap(m_edge_based_node_weights, output_node_weights);
}
unsigned EdgeBasedGraphFactory::GetHighestEdgeID() { return m_max_edge_id; }
void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u, const NodeID node_v)
{
// merge edges together into one EdgeBasedNode
BOOST_ASSERT(node_u != SPECIAL_NODEID);
BOOST_ASSERT(node_v != SPECIAL_NODEID);
// find forward edge id and
const EdgeID edge_id_1 = m_node_based_graph->FindEdge(node_u, node_v);
BOOST_ASSERT(edge_id_1 != SPECIAL_EDGEID);
const EdgeData &forward_data = m_node_based_graph->GetEdgeData(edge_id_1);
// find reverse edge id and
const EdgeID edge_id_2 = m_node_based_graph->FindEdge(node_v, node_u);
BOOST_ASSERT(edge_id_2 != SPECIAL_EDGEID);
const EdgeData &reverse_data = m_node_based_graph->GetEdgeData(edge_id_2);
if (forward_data.edge_id == SPECIAL_NODEID && reverse_data.edge_id == SPECIAL_NODEID)
{
return;
}
if (forward_data.edge_id != SPECIAL_NODEID && reverse_data.edge_id == SPECIAL_NODEID)
m_edge_based_node_weights[forward_data.edge_id] = INVALID_EDGE_WEIGHT;
BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_id_1) ==
m_compressed_edge_container.HasEntryForID(edge_id_2));
BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_id_1));
BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_id_2));
const auto &forward_geometry = m_compressed_edge_container.GetBucketReference(edge_id_1);
BOOST_ASSERT(forward_geometry.size() ==
m_compressed_edge_container.GetBucketReference(edge_id_2).size());
const auto geometry_size = forward_geometry.size();
// There should always be some geometry
BOOST_ASSERT(0 != geometry_size);
NodeID current_edge_source_coordinate_id = node_u;
// traverse arrays from start and end respectively
for (const auto i : util::irange(0UL, geometry_size))
{
BOOST_ASSERT(current_edge_source_coordinate_id ==
m_compressed_edge_container.GetBucketReference(
edge_id_2)[geometry_size - 1 - i].node_id);
const NodeID current_edge_target_coordinate_id = forward_geometry[i].node_id;
BOOST_ASSERT(current_edge_target_coordinate_id != current_edge_source_coordinate_id);
// build edges
m_edge_based_node_list.emplace_back(
forward_data.edge_id, reverse_data.edge_id, current_edge_source_coordinate_id,
current_edge_target_coordinate_id, forward_data.name_id,
m_compressed_edge_container.GetPositionForID(edge_id_1),
m_compressed_edge_container.GetPositionForID(edge_id_2), false, INVALID_COMPONENTID, i,
forward_data.travel_mode, reverse_data.travel_mode);
m_edge_based_node_is_startpoint.push_back(forward_data.startpoint ||
reverse_data.startpoint);
current_edge_source_coordinate_id = current_edge_target_coordinate_id;
}
BOOST_ASSERT(current_edge_source_coordinate_id == node_v);
}
void EdgeBasedGraphFactory::FlushVectorToStream(
std::ofstream &edge_data_file, std::vector<OriginalEdgeData> &original_edge_data_vector) const
{
if (original_edge_data_vector.empty())
{
return;
}
edge_data_file.write((char *)&(original_edge_data_vector[0]),
original_edge_data_vector.size() * sizeof(OriginalEdgeData));
original_edge_data_vector.clear();
}
void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const bool generate_edge_lookup)
{
TIMER_START(renumber);
m_max_edge_id = RenumberEdges() - 1;
TIMER_STOP(renumber);
TIMER_START(generate_nodes);
m_edge_based_node_weights.reserve(m_max_edge_id + 1);
GenerateEdgeExpandedNodes();
TIMER_STOP(generate_nodes);
TIMER_START(generate_edges);
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state, edge_segment_lookup_filename,
edge_penalty_filename, generate_edge_lookup);
TIMER_STOP(generate_edges);
util::SimpleLogger().Write() << "Timing statistics for edge-expanded graph:";
util::SimpleLogger().Write() << "Renumbering edges: " << TIMER_SEC(renumber) << "s";
util::SimpleLogger().Write() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s";
util::SimpleLogger().Write() << "Generating edges: " << TIMER_SEC(generate_edges) << "s";
}
/// Renumbers all _forward_ edges and sets the edge_id.
/// A specific numbering is not important. Any unique ID will do.
/// Returns the number of edge based nodes.
unsigned EdgeBasedGraphFactory::RenumberEdges()
{
// renumber edge based node of outgoing edges
unsigned numbered_edges_count = 0;
for (const auto current_node : util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{
for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(current_node))
{
EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge);
// only number incoming edges
if (edge_data.reversed)
{
continue;
}
// oneway streets always require this self-loop. Other streets only if a u-turn plus
// traversal
// of the street takes longer than the loop
m_edge_based_node_weights.push_back(edge_data.distance + speed_profile.u_turn_penalty);
BOOST_ASSERT(numbered_edges_count < m_node_based_graph->GetNumberOfEdges());
edge_data.edge_id = numbered_edges_count;
++numbered_edges_count;
BOOST_ASSERT(SPECIAL_NODEID != edge_data.edge_id);
}
}
return numbered_edges_count;
}
/// Creates the nodes in the edge expanded graph from edges in the node-based graph.
void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
{
util::Percent progress(m_node_based_graph->GetNumberOfNodes());
// loop over all edges and generate new set of nodes
for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{
BOOST_ASSERT(node_u != SPECIAL_NODEID);
BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes());
progress.printStatus(node_u);
for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u))
{
const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1);
BOOST_ASSERT(e1 != SPECIAL_EDGEID);
const NodeID node_v = m_node_based_graph->GetTarget(e1);
BOOST_ASSERT(SPECIAL_NODEID != node_v);
// pick only every other edge, since we have every edge as an outgoing
// and incoming egde
if (node_u > node_v)
{
continue;
}
BOOST_ASSERT(node_u < node_v);
// if we found a non-forward edge reverse and try again
if (edge_data.edge_id == SPECIAL_NODEID)
{
InsertEdgeBasedNode(node_v, node_u);
}
else
{
InsertEdgeBasedNode(node_u, node_v);
}
}
}
BOOST_ASSERT(m_edge_based_node_list.size() == m_edge_based_node_is_startpoint.size());
BOOST_ASSERT(m_max_edge_id + 1 == m_edge_based_node_weights.size());
util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size()
<< " nodes in edge-expanded graph";
}
/// Actually it also generates OriginalEdgeData and serializes them...
void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const std::string &original_edge_data_filename,
lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup)
{
util::SimpleLogger().Write() << "generating edge-expanded edges";
std::size_t node_based_edge_counter = 0;
std::size_t original_edges_counter = 0;
restricted_turns_counter = 0;
skipped_uturns_counter = 0;
skipped_barrier_turns_counter = 0;
std::ofstream edge_data_file(original_edge_data_filename.c_str(), std::ios::binary);
std::ofstream edge_segment_file;
std::ofstream edge_penalty_file;
if (generate_edge_lookup)
{
edge_segment_file.open(edge_segment_lookup_filename.c_str(), std::ios::binary);
edge_penalty_file.open(edge_fixed_penalties_filename.c_str(), std::ios::binary);
}
// Writes a dummy value at the front that is updated later with the total length
const unsigned length_prefix_empty_space{0};
edge_data_file.write(reinterpret_cast<const char *>(&length_prefix_empty_space),
sizeof(length_prefix_empty_space));
std::vector<OriginalEdgeData> original_edge_data_vector;
original_edge_data_vector.reserve(1024 * 1024);
// Loop over all turns and generate new set of edges.
// Three nested loop look super-linear, but we are dealing with a (kind of)
// linear number of turns only.
util::Percent progress(m_node_based_graph->GetNumberOfNodes());
for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{
// progress.printStatus(node_u);
for (const EdgeID edge_form_u : m_node_based_graph->GetAdjacentEdgeRange(node_u))
{
if (m_node_based_graph->GetEdgeData(edge_form_u).reversed)
{
continue;
}
++node_based_edge_counter;
auto turn_candidates = getTurnCandidates(node_u, edge_form_u);
turn_candidates = optimizeCandidates(edge_form_u, turn_candidates);
turn_candidates = suppressTurns(edge_form_u, turn_candidates);
const NodeID node_v = m_node_based_graph->GetTarget(edge_form_u);
for (const auto turn : turn_candidates)
{
if (!turn.valid)
continue;
const double turn_angle = turn.angle;
// only add an edge if turn is not prohibited
const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(edge_form_u);
const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(turn.eid);
BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id);
BOOST_ASSERT(!edge_data1.reversed);
BOOST_ASSERT(!edge_data2.reversed);
// the following is the core of the loop.
unsigned distance = edge_data1.distance;
if (m_traffic_lights.find(node_v) != m_traffic_lights.end())
{
distance += speed_profile.traffic_signal_penalty;
}
const int turn_penalty = GetTurnPenalty(turn_angle, lua_state);
const TurnInstruction turn_instruction = turn.instruction;
if (turn_instruction == TurnInstruction::UTurn)
{
distance += speed_profile.u_turn_penalty;
}
distance += turn_penalty;
BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_form_u));
original_edge_data_vector.emplace_back(
m_compressed_edge_container.GetPositionForID(edge_form_u), edge_data1.name_id,
turn_instruction, edge_data1.travel_mode);
++original_edges_counter;
if (original_edge_data_vector.size() > 1024 * 1024 * 10)
{
FlushVectorToStream(edge_data_file, original_edge_data_vector);
}
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id);
// NOTE: potential overflow here if we hit 2^32 routable edges
BOOST_ASSERT(m_edge_based_edge_list.size() <= std::numeric_limits<NodeID>::max());
m_edge_based_edge_list.emplace_back(edge_data1.edge_id, edge_data2.edge_id,
m_edge_based_edge_list.size(), distance, true,
false);
// Here is where we write out the mapping between the edge-expanded edges, and
// the node-based edges that are originally used to calculate the `distance`
// for the edge-expanded edges. About 40 lines back, there is:
//
// unsigned distance = edge_data1.distance;
//
// This tells us that the weight for an edge-expanded-edge is based on the weight
// of the *source* node-based edge. Therefore, we will look up the individual
// segments of the source node-based edge, and write out a mapping between
// those and the edge-based-edge ID.
// External programs can then use this mapping to quickly perform
// updates to the edge-expanded-edge based directly on its ID.
if (generate_edge_lookup)
{
unsigned fixed_penalty = distance - edge_data1.distance;
edge_penalty_file.write(reinterpret_cast<const char *>(&fixed_penalty),
sizeof(fixed_penalty));
const auto node_based_edges =
m_compressed_edge_container.GetBucketReference(edge_form_u);
NodeID previous = node_u;
const unsigned node_count = node_based_edges.size() + 1;
edge_segment_file.write(reinterpret_cast<const char *>(&node_count),
sizeof(node_count));
const QueryNode &first_node = m_node_info_list[previous];
edge_segment_file.write(reinterpret_cast<const char *>(&first_node.node_id),
sizeof(first_node.node_id));
for (auto target_node : node_based_edges)
{
const QueryNode &from = m_node_info_list[previous];
const QueryNode &to = m_node_info_list[target_node.node_id];
const double segment_length =
util::coordinate_calculation::greatCircleDistance(from.lat, from.lon,
to.lat, to.lon);
edge_segment_file.write(reinterpret_cast<const char *>(&to.node_id),
sizeof(to.node_id));
edge_segment_file.write(reinterpret_cast<const char *>(&segment_length),
sizeof(segment_length));
edge_segment_file.write(reinterpret_cast<const char *>(&target_node.weight),
sizeof(target_node.weight));
previous = target_node.node_id;
}
}
}
}
}
FlushVectorToStream(edge_data_file, original_edge_data_vector);
// Finally jump back to the empty space at the beginning and write length prefix
edge_data_file.seekp(std::ios::beg);
const auto length_prefix = boost::numeric_cast<unsigned>(original_edges_counter);
static_assert(sizeof(length_prefix_empty_space) == sizeof(length_prefix), "type mismatch");
edge_data_file.write(reinterpret_cast<const char *>(&length_prefix), sizeof(length_prefix));
util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size()
<< " edge based nodes";
util::SimpleLogger().Write() << "Node-based graph contains " << node_based_edge_counter
<< " edges";
util::SimpleLogger().Write() << "Edge-expanded graph ...";
util::SimpleLogger().Write() << " contains " << m_edge_based_edge_list.size() << " edges";
util::SimpleLogger().Write() << " skips " << restricted_turns_counter << " turns, "
"defined by "
<< m_restriction_map->size() << " restrictions";
util::SimpleLogger().Write() << " skips " << skipped_uturns_counter << " U turns";
util::SimpleLogger().Write() << " skips " << skipped_barrier_turns_counter
<< " turns over barriers";
}
// requires sorted candidates
std::vector<EdgeBasedGraphFactory::TurnCandidate>
EdgeBasedGraphFactory::optimizeCandidates(NodeID via_eid,
std::vector<TurnCandidate> turn_candidates) const
{
BOOST_ASSERT_MSG(std::is_sorted(turn_candidates.begin(), turn_candidates.end(),
[](const TurnCandidate &left, const TurnCandidate &right)
{
return left.angle < right.angle;
}),
"Turn Candidates not sorted by angle.");
if (turn_candidates.size() <= 1)
return turn_candidates;
const auto getLeft = [&turn_candidates](std::size_t index)
{
return (index + 1) % turn_candidates.size();
};
const auto getRight = [&turn_candidates](std::size_t index)
{
return (index + turn_candidates.size() - 1) % turn_candidates.size();
};
// handle availability of multiple u-turns (e.g. street with separated small parking roads)
if (turn_candidates[0].instruction == TurnInstruction::UTurn && turn_candidates[0].angle == 0)
{
if (turn_candidates[getLeft(0)].instruction == TurnInstruction::UTurn)
turn_candidates[getLeft(0)].instruction = TurnInstruction::TurnSharpLeft;
if (turn_candidates[getRight(0)].instruction == TurnInstruction::UTurn)
turn_candidates[getRight(0)].instruction = TurnInstruction::TurnSharpRight;
}
const auto keepStraight = [](double angle)
{
return std::abs(angle - 180) < 5;
};
for (std::size_t turn_index = 0; turn_index < turn_candidates.size(); ++turn_index)
{
auto &turn = turn_candidates[turn_index];
if (turn.instruction > TurnInstruction::TurnSlightLeft ||
turn.instruction == TurnInstruction::UTurn)
continue;
auto &left = turn_candidates[getLeft(turn_index)];
if (turn.angle == left.angle)
{
util::SimpleLogger().Write(logDEBUG)
<< "[warning] conflicting turn angles, identical road duplicated? "
<< m_node_info_list[m_node_based_graph->GetTarget(via_eid)].lat << " "
<< m_node_info_list[m_node_based_graph->GetTarget(via_eid)].lon << std::endl;
}
if (isConflict(turn.instruction, left.instruction))
{
// begin of a conflicting region
std::size_t conflict_begin = turn_index;
std::size_t conflict_end = getLeft(turn_index);
std::size_t conflict_size = 2;
while (
isConflict(turn_candidates[getLeft(conflict_end)].instruction, turn.instruction) &&
conflict_size < turn_candidates.size())
{
conflict_end = getLeft(conflict_end);
++conflict_size;
}
turn_index = (conflict_end < conflict_begin) ? turn_candidates.size() : conflict_end;
if (conflict_size > 3)
{
// check if some turns are invalid to find out about good handling
}
auto &instruction_left_of_end = turn_candidates[getLeft(conflict_end)].instruction;
auto &instruction_right_of_begin =
turn_candidates[getRight(conflict_begin)].instruction;
auto &candidate_at_end = turn_candidates[conflict_end];
auto &candidate_at_begin = turn_candidates[conflict_begin];
if (conflict_size == 2)
{
if (turn.instruction == TurnInstruction::GoStraight)
{
if (instruction_left_of_end != TurnInstruction::TurnSlightLeft &&
instruction_right_of_begin != TurnInstruction::TurnSlightRight)
{
std::int32_t resolved_count = 0;
// uses side-effects in resolve
if (!keepStraight(candidate_at_end.angle) &&
!resolve(candidate_at_end.instruction, instruction_left_of_end,
RESOLVE_TO_LEFT))
util::SimpleLogger().Write(logDEBUG)
<< "[warning] failed to resolve conflict";
else
++resolved_count;
// uses side-effects in resolve
if (!keepStraight(candidate_at_begin.angle) &&
!resolve(candidate_at_begin.instruction, instruction_right_of_begin,
RESOLVE_TO_RIGHT))
util::SimpleLogger().Write(logDEBUG)
<< "[warning] failed to resolve conflict";
else
++resolved_count;
if (resolved_count >= 1 &&
(!keepStraight(candidate_at_begin.angle) ||
!keepStraight(candidate_at_end.angle))) // should always be the
// case, theoretically
continue;
}
}
if (candidate_at_begin.confidence < candidate_at_end.confidence)
{ // if right shift is cheaper, or only option
if (resolve(candidate_at_begin.instruction, instruction_right_of_begin,
RESOLVE_TO_RIGHT))
continue;
else if (resolve(candidate_at_end.instruction, instruction_left_of_end,
RESOLVE_TO_LEFT))
continue;
}
else
{
if (resolve(candidate_at_end.instruction, instruction_left_of_end,
RESOLVE_TO_LEFT))
continue;
else if (resolve(candidate_at_begin.instruction, instruction_right_of_begin,
RESOLVE_TO_RIGHT))
continue;
}
if (isSlightTurn(turn.instruction) || isSharpTurn(turn.instruction))
{
auto resolve_direction =
(turn.instruction == TurnInstruction::TurnSlightRight ||
turn.instruction == TurnInstruction::TurnSharpLeft)
? RESOLVE_TO_RIGHT
: RESOLVE_TO_LEFT;
if (resolve_direction == RESOLVE_TO_RIGHT &&
resolveTransitive(
candidate_at_begin.instruction, instruction_right_of_begin,
turn_candidates[getRight(getRight(conflict_begin))].instruction,
RESOLVE_TO_RIGHT))
continue;
else if (resolve_direction == RESOLVE_TO_LEFT &&
resolveTransitive(
candidate_at_end.instruction, instruction_left_of_end,
turn_candidates[getLeft(getLeft(conflict_end))].instruction,
RESOLVE_TO_LEFT))
continue;
}
}
else if (conflict_size >= 3)
{
// a conflict of size larger than three cannot be handled with the current
// model.
// Handle it as best as possible and keep the rest of the conflicting turns
if (conflict_size > 3)
{
NodeID conflict_location = m_node_based_graph->GetTarget(via_eid);
util::SimpleLogger().Write(logDEBUG)
<< "[warning] found conflict larget than size three at "
<< m_node_info_list[conflict_location].lat << ", "
<< m_node_info_list[conflict_location].lon;
}
if (!resolve(candidate_at_begin.instruction, instruction_right_of_begin,
RESOLVE_TO_RIGHT))
{
if (isSlightTurn(turn.instruction))
resolveTransitive(
candidate_at_begin.instruction, instruction_right_of_begin,
turn_candidates[getRight(getRight(conflict_begin))].instruction,
RESOLVE_TO_RIGHT);
else if (isSharpTurn(turn.instruction))
resolveTransitive(
candidate_at_end.instruction, instruction_left_of_end,
turn_candidates[getLeft(getLeft(conflict_end))].instruction,
RESOLVE_TO_LEFT);
}
if (!resolve(candidate_at_end.instruction, instruction_left_of_end,
RESOLVE_TO_LEFT))
{
if (isSlightTurn(turn.instruction))
resolveTransitive(
candidate_at_end.instruction, instruction_left_of_end,
turn_candidates[getLeft(getLeft(conflict_end))].instruction,
RESOLVE_TO_LEFT);
else if (isSharpTurn(turn.instruction))
resolveTransitive(
candidate_at_begin.instruction, instruction_right_of_begin,
turn_candidates[getRight(getRight(conflict_begin))].instruction,
RESOLVE_TO_RIGHT);
}
}
}
}
return turn_candidates;
}
bool EdgeBasedGraphFactory::isObviousChoice(EdgeID via_eid,
std::size_t turn_index,
const std::vector<TurnCandidate> &turn_candidates) const
{
const auto getLeft = [&turn_candidates](std::size_t index)
{
return (index + 1) % turn_candidates.size();
};
const auto getRight = [&turn_candidates](std::size_t index)
{
return (index + turn_candidates.size() - 1) % turn_candidates.size();
};
const auto &candidate = turn_candidates[turn_index];
const EdgeData &in_data = m_node_based_graph->GetEdgeData(via_eid);
const EdgeData &out_data = m_node_based_graph->GetEdgeData(candidate.eid);
const auto &candidate_to_the_left = turn_candidates[getLeft(turn_index)];
const auto &candidate_to_the_right = turn_candidates[getRight(turn_index)];
const auto hasValidRatio =
[](const TurnCandidate &left, const TurnCandidate ¢er, const TurnCandidate &right)
{
auto angle_left = (left.angle > 180) ? angularDeviation(left.angle, STRAIGHT_ANGLE) : 180;
auto angle_right =
(right.angle < 180) ? angularDeviation(right.angle, STRAIGHT_ANGLE) : 180;
auto self_angle = angularDeviation(center.angle, STRAIGHT_ANGLE);
return angularDeviation(center.angle, STRAIGHT_ANGLE) < NARROW_TURN_ANGLE &&
((center.angle < STRAIGHT_ANGLE)
? (angle_right > self_angle && angle_left / self_angle > DISTINCTION_RATIO)
: (angle_left > self_angle && angle_right / self_angle > DISTINCTION_RATIO));
};
// only valid turn
return turn_candidates.size() == 1 ||
// only non u-turn
(turn_candidates.size() == 2 &&
candidate_to_the_left.instruction == TurnInstruction::UTurn) || // nearly straight turn
angularDeviation(candidate.angle, STRAIGHT_ANGLE) < MAXIMAL_ALLOWED_NO_TURN_DEVIATION ||
hasValidRatio(candidate_to_the_left, candidate, candidate_to_the_right) ||
(in_data.name_id != 0 && in_data.name_id == out_data.name_id &&
angularDeviation(candidate.angle, STRAIGHT_ANGLE) < NARROW_TURN_ANGLE / 2);
}
std::vector<EdgeBasedGraphFactory::TurnCandidate>
EdgeBasedGraphFactory::suppressTurns(EdgeID via_eid,
std::vector<TurnCandidate> turn_candidates) const
{
// remove invalid candidates
BOOST_ASSERT_MSG(std::is_sorted(turn_candidates.begin(), turn_candidates.end(),
[](const TurnCandidate &left, const TurnCandidate &right)
{
return left.angle < right.angle;
}),
"Turn Candidates not sorted by angle.");
const auto end_valid = std::remove_if(turn_candidates.begin(), turn_candidates.end(),
[](const TurnCandidate &candidate)
{
return !candidate.valid;
});
turn_candidates.erase(end_valid, turn_candidates.end());
const auto getLeft = [&turn_candidates](std::size_t index)
{
return (index + 1) % turn_candidates.size();
};
const auto getRight = [&turn_candidates](std::size_t index)
{
return (index + turn_candidates.size() - 1) % turn_candidates.size();
};
const EdgeData &in_data = m_node_based_graph->GetEdgeData(via_eid);
bool has_obvious_with_same_name = false;
double obvious_with_same_name_angle = 0;
for (std::size_t turn_index = 0; turn_index < turn_candidates.size(); ++turn_index)
{
if (m_node_based_graph->GetEdgeData(turn_candidates[turn_index].eid).name_id ==
in_data.name_id &&
isObviousChoice(via_eid, turn_index, turn_candidates))
{
has_obvious_with_same_name = true;
obvious_with_same_name_angle = turn_candidates[turn_index].angle;
break;
}
}
for (std::size_t turn_index = 0; turn_index < turn_candidates.size(); ++turn_index)
{
auto &candidate = turn_candidates[turn_index];
const EdgeData &out_data = m_node_based_graph->GetEdgeData(candidate.eid);
if (candidate.valid && candidate.instruction != TurnInstruction::UTurn)
{
// TODO road category would be useful to indicate obviousness of turn
// check if turn can be omitted or at least changed
const auto &left = turn_candidates[getLeft(turn_index)];
const auto &right = turn_candidates[getRight(turn_index)];
// make very slight instructions straight, if they are the only valid choice going with
// at most a slight turn
if (candidate.instruction < TurnInstruction::ReachViaLocation &&
(!isSlightTurn(getTurnDirection(left.angle)) || !left.valid) &&
(!isSlightTurn(getTurnDirection(right.angle)) || !right.valid) &&
angularDeviation(candidate.angle, STRAIGHT_ANGLE) < FUZZY_STRAIGHT_ANGLE)
candidate.instruction = TurnInstruction::GoStraight;
// TODO this smaller comparison for turns is DANGEROUS, has to be revised if turn
// instructions change
if (candidate.instruction < TurnInstruction::ReachViaLocation)
{
if (in_data.travel_mode ==
out_data.travel_mode) // make sure to always announce mode changes
{
if (isObviousChoice(via_eid, turn_index, turn_candidates))
{
if (in_data.name_id == out_data.name_id) // same road
{
candidate.instruction = TurnInstruction::NoTurn;
}
else if (!has_obvious_with_same_name)
{
// TODO discuss, we might want to keep the current name of the turn. But
// this would mean emitting a turn when you just keep on a road
candidate.instruction = TurnInstruction::NameChanges;
}
else if (candidate.angle < obvious_with_same_name_angle)
candidate.instruction = TurnInstruction::TurnSlightRight;
else
candidate.instruction = TurnInstruction::TurnSlightLeft;
}
else if (candidate.instruction == TurnInstruction::GoStraight &&
has_obvious_with_same_name)
{
if (candidate.angle < obvious_with_same_name_angle)
candidate.instruction = TurnInstruction::TurnSlightRight;
else
candidate.instruction = TurnInstruction::TurnSlightLeft;
}
}
}
}
}
return turn_candidates;
}
std::vector<EdgeBasedGraphFactory::TurnCandidate>
EdgeBasedGraphFactory::getTurnCandidates(NodeID from_node, EdgeID via_eid)
{
std::vector<TurnCandidate> turn_candidates;
const NodeID turn_node = m_node_based_graph->GetTarget(via_eid);
const NodeID only_restriction_to_node =
m_restriction_map->CheckForEmanatingIsOnlyTurn(from_node, turn_node);
const bool is_barrier_node = m_barrier_nodes.find(turn_node) != m_barrier_nodes.end();
for (const EdgeID onto_edge : m_node_based_graph->GetAdjacentEdgeRange(turn_node))
{
bool turn_is_valid = true;
if (m_node_based_graph->GetEdgeData(onto_edge).reversed)
{
turn_is_valid = false;
}
const NodeID to_node = m_node_based_graph->GetTarget(onto_edge);
if (turn_is_valid && (only_restriction_to_node != SPECIAL_NODEID) &&
(to_node != only_restriction_to_node))
{
// We are at an only_-restriction but not at the right turn.
++restricted_turns_counter;
turn_is_valid = false;
}
if (turn_is_valid)
{
if (is_barrier_node)
{
if (from_node != to_node)
{
++skipped_barrier_turns_counter;
turn_is_valid = false;
}
}
else
{
if (from_node == to_node && m_node_based_graph->GetOutDegree(turn_node) > 1)
{
auto number_of_emmiting_bidirectional_edges = 0;
for (auto edge : m_node_based_graph->GetAdjacentEdgeRange(turn_node))
{
auto target = m_node_based_graph->GetTarget(edge);
auto reverse_edge = m_node_based_graph->FindEdge(target, turn_node);
if (!m_node_based_graph->GetEdgeData(reverse_edge).reversed)
{
++number_of_emmiting_bidirectional_edges;
}
}
if (number_of_emmiting_bidirectional_edges > 1)
{
++skipped_uturns_counter;
turn_is_valid = false;
}
}
}
}
// only add an edge if turn is not a U-turn except when it is
// at the end of a dead-end street
if (m_restriction_map->CheckIfTurnIsRestricted(from_node, turn_node, to_node) &&
(only_restriction_to_node == SPECIAL_NODEID) && (to_node != only_restriction_to_node))
{
// We are at an only_-restriction but not at the right turn.
++restricted_turns_counter;
turn_is_valid = false;
}
// unpack first node of second segment if packed
const auto first_coordinate =
getRepresentativeCoordinate(from_node, turn_node, via_eid, INVERT);
const auto third_coordinate =
getRepresentativeCoordinate(turn_node, to_node, onto_edge, !INVERT);
const auto angle = util::coordinate_calculation::computeAngle(
first_coordinate, m_node_info_list[turn_node], third_coordinate);
const auto turn = AnalyzeTurn(from_node, via_eid, turn_node, onto_edge, to_node, angle);
auto confidence = getTurnConfidence(angle, turn);
if (!turn_is_valid)
confidence *= 0.8; // makes invalid turns more likely to be resolved in conflicts
turn_candidates.push_back({onto_edge, turn_is_valid, angle, turn, confidence});
}
const auto ByAngle = [](const TurnCandidate &first, const TurnCandidate second)
{
return first.angle < second.angle;
};
std::sort(std::begin(turn_candidates), std::end(turn_candidates), ByAngle);
const auto getLeft = [&](std::size_t index)
{
return (index + 1) % turn_candidates.size();
};
const auto getRight = [&](std::size_t index)
{
return (index + turn_candidates.size() - 1) % turn_candidates.size();
};
const auto isInvalidEquivalent = [&](std::size_t this_turn, std::size_t valid_turn)
{
if (!turn_candidates[valid_turn].valid || turn_candidates[this_turn].valid)
return false;
return angularDeviation(turn_candidates[this_turn].angle,
turn_candidates[valid_turn].angle) < NARROW_TURN_ANGLE;
};
for (std::size_t index = 0; index < turn_candidates.size(); ++index)
{
if (isInvalidEquivalent(index, getRight(index)) ||
isInvalidEquivalent(index, getLeft(index)))
{
turn_candidates.erase(turn_candidates.begin() + index);
--index;
}
}
return turn_candidates;
}
int EdgeBasedGraphFactory::GetTurnPenalty(double angle, lua_State *lua_state) const
{
if (speed_profile.has_turn_penalty_function)
{
try
{
// call lua profile to compute turn penalty
double penalty =
luabind::call_function<double>(lua_state, "turn_function", 180. - angle);
return static_cast<int>(penalty);
}
catch (const luabind::error &er)
{
util::SimpleLogger().Write(logWARNING) << er.what();
}
}
return 0;
}
// node_u -- (edge_1) --> node_v -- (edge_2) --> node_w
TurnInstruction EdgeBasedGraphFactory::AnalyzeTurn(const NodeID node_u,
const EdgeID edge1,
const NodeID node_v,
const EdgeID edge2,
const NodeID node_w,
const double angle) const
{
const EdgeData &data1 = m_node_based_graph->GetEdgeData(edge1);
const EdgeData &data2 = m_node_based_graph->GetEdgeData(edge2);
if (node_u == node_w)
{
return TurnInstruction::UTurn;
}
// roundabouts need to be handled explicitely
if (data1.roundabout && data2.roundabout)
{
// Is a turn possible? If yes, we stay on the roundabout!
if (1 == m_node_based_graph->GetDirectedOutDegree(node_v))
{
// No turn possible.
return TurnInstruction::NoTurn;
}
return TurnInstruction::StayOnRoundAbout;
}
// Does turn start or end on roundabout?
if (data1.roundabout || data2.roundabout)
{
// We are entering the roundabout
if ((!data1.roundabout) && data2.roundabout)
{
return TurnInstruction::EnterRoundAbout;
}
// We are leaving the roundabout
if (data1.roundabout && (!data2.roundabout))
{
return TurnInstruction::LeaveRoundAbout;
}
}
// assign a designated turn angle instruction purely based on the angle
return getTurnDirection(angle);
}
QueryNode EdgeBasedGraphFactory::getRepresentativeCoordinate(const NodeID src,
const NodeID tgt,
const EdgeID via_eid,
bool INVERTED) const
{
if (m_compressed_edge_container.HasEntryForID(via_eid))
{
util::FixedPointCoordinate prev = util::FixedPointCoordinate(
m_node_info_list[INVERTED ? tgt : src].lat,
m_node_info_list[INVERTED ? tgt : src].lon),
cur;
// walk along the edge for the first 5 meters
const auto &geometry = m_compressed_edge_container.GetBucketReference(via_eid);
double dist = 0;
double this_dist = 0;
NodeID prev_id = INVERTED ? tgt : src;
const auto selectBestCandidate =
[this](const NodeID current, const double current_distance, const NodeID previous,
const double previous_distance)
{
if (current_distance < DESIRED_SEGMENT_LENGTH ||
current_distance - DESIRED_SEGMENT_LENGTH <
DESIRED_SEGMENT_LENGTH - previous_distance ||
previous_distance < MINIMAL_SEGMENT_LENGTH)
{
return m_node_info_list[current];
}
else
{
return m_node_info_list[previous];
}
};
if (INVERTED)
{
for (auto itr = geometry.rbegin(), end = geometry.rend(); itr != end; ++itr)
{
const auto compressed_node = *itr;
cur = util::FixedPointCoordinate(m_node_info_list[compressed_node.node_id].lat,
m_node_info_list[compressed_node.node_id].lon);
this_dist = util::coordinate_calculation::haversineDistance(prev, cur);
if (dist + this_dist > DESIRED_SEGMENT_LENGTH)
{
return selectBestCandidate(compressed_node.node_id, dist + this_dist, prev_id,
dist);
}
dist += this_dist;
prev = cur;
prev_id = compressed_node.node_id;
}
cur = util::FixedPointCoordinate(m_node_info_list[src].lat, m_node_info_list[src].lon);
this_dist = util::coordinate_calculation::haversineDistance(prev, cur);
return selectBestCandidate(src, dist + this_dist, prev_id, dist);
}
else
{
for (auto itr = geometry.begin(), end = geometry.end(); itr != end; ++itr)
{
const auto compressed_node = *itr;
cur = util::FixedPointCoordinate(m_node_info_list[compressed_node.node_id].lat,
m_node_info_list[compressed_node.node_id].lon);
this_dist = util::coordinate_calculation::haversineDistance(prev, cur);
if (dist + this_dist > DESIRED_SEGMENT_LENGTH)
{
return selectBestCandidate(compressed_node.node_id, dist + this_dist, prev_id,
dist);
}
dist += this_dist;
prev = cur;
prev_id = compressed_node.node_id;
}
cur = util::FixedPointCoordinate(m_node_info_list[tgt].lat, m_node_info_list[tgt].lon);
this_dist = util::coordinate_calculation::haversineDistance(prev, cur);
return selectBestCandidate(tgt, dist + this_dist, prev_id, dist);
}
}
// default: If the edge is very short, or we do not have a compressed geometry
return m_node_info_list[INVERTED ? src : tgt];
}
} // namespace extractor
} // namespace osrm
| 1 | 15,701 | What is the problem here? I feel we used this in a lot of places. | Project-OSRM-osrm-backend | cpp |
@@ -109,8 +109,7 @@ class RPN(BaseDetector):
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
- # TODO: remove this restriction
- return proposal_list[0].cpu().numpy()
+ return [proposal.cpu().numpy() for proposal in proposal_list]
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation. | 1 | import mmcv
from mmdet.core import bbox_mapping, tensor2imgs
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class RPN(BaseDetector):
"""Implementation of Region Proposal Network."""
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None):
super(RPN, self).__init__()
self.backbone = build_backbone(backbone)
self.neck = build_neck(neck) if neck is not None else None
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(RPN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
self.neck.init_weights()
self.rpn_head.init_weights()
def extract_feat(self, img):
"""Extract features.
Args:
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
Returns:
list[torch.Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Dummy forward function."""
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
return rpn_outs
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if self.train_cfg.rpn.get('debug', False):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
np.ndarray: proposals
"""
x = self.extract_feat(img)
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
# TODO: remove this restriction
return proposal_list[0].cpu().numpy()
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
np.ndarray: proposals
"""
proposal_list = self.rpn_head.aug_test_rpn(
self.extract_feats(imgs), img_metas)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
flip_direction = img_meta['flip_direction']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
# TODO: remove this restriction
return proposal_list[0].cpu().numpy()
def show_result(self, data, result, dataset=None, top_k=20):
"""Show RPN proposals on the image.
Although we assume batch size is 1, this method supports arbitrary
batch size.
"""
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
mmcv.imshow_bboxes(img_show, result, top_k=top_k)
| 1 | 21,069 | Update the docstring. | open-mmlab-mmdetection | py |
@@ -7,10 +7,11 @@
package api
import (
- "errors"
"testing"
"time"
+ "github.com/pkg/errors"
+
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_apiserver"
)
var (
errorSend error = errors.New("send error")
)
func TestBlockListener(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
errChan := make(chan error, 10)
server := mock_apiserver.NewMockStreamBlocksServer(ctrl)
responder := NewBlockListener(server, errChan)
receipts := []*action.Receipt{
{
BlockHeight: 1,
},
{
BlockHeight: 2,
},
}
builder := block.NewTestingBuilder().
SetHeight(1).
SetVersion(111).
SetTimeStamp(time.Now()).
SetReceipts(receipts)
testBlock, err := builder.SignAndBuild(identityset.PrivateKey(0))
require.NoError(t, err)
server.EXPECT().Send(gomock.Any()).Return(nil).Times(1)
require.NoError(t, responder.Respond(&testBlock))
server.EXPECT().Send(gomock.Any()).Return(errorSend).Times(1)
require.Equal(t, errorSend, responder.Respond(&testBlock))
responder.Exit()
require.Equal(t, errorSend, <-errChan)
require.NoError(t, <-errChan)
}
| 1 | 22,820 | delete the empty line "github.com/pkg/errors" should be grouped with other third party packages same for the rest | iotexproject-iotex-core | go |
@@ -155,7 +155,7 @@ public class RemoteWebDriver implements WebDriver, JavascriptExecutor,
}
private void init(Capabilities capabilities) {
- capabilities = capabilities == null ? new ImmutableCapabilities() : capabilities;
+ this.capabilities = capabilities == null ? new ImmutableCapabilities() : capabilities;
logger.addHandler(LoggingHandler.getInstance());
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.openqa.selenium.remote.CapabilityType.LOGGING_PREFS;
import static org.openqa.selenium.remote.CapabilityType.PLATFORM;
import static org.openqa.selenium.remote.CapabilityType.PLATFORM_NAME;
import static org.openqa.selenium.remote.CapabilityType.SUPPORTS_JAVASCRIPT;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.openqa.selenium.Alert;
import org.openqa.selenium.Beta;
import org.openqa.selenium.By;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.Cookie;
import org.openqa.selenium.Dimension;
import org.openqa.selenium.HasCapabilities;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.NoSuchElementException;
import org.openqa.selenium.NoSuchFrameException;
import org.openqa.selenium.NoSuchWindowException;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.Platform;
import org.openqa.selenium.Point;
import org.openqa.selenium.SearchContext;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.WindowType;
import org.openqa.selenium.interactions.HasInputDevices;
import org.openqa.selenium.interactions.Interactive;
import org.openqa.selenium.interactions.Keyboard;
import org.openqa.selenium.interactions.Mouse;
import org.openqa.selenium.interactions.Sequence;
import org.openqa.selenium.internal.FindsByClassName;
import org.openqa.selenium.internal.FindsByCssSelector;
import org.openqa.selenium.internal.FindsById;
import org.openqa.selenium.internal.FindsByLinkText;
import org.openqa.selenium.internal.FindsByName;
import org.openqa.selenium.internal.FindsByTagName;
import org.openqa.selenium.internal.FindsByXPath;
import org.openqa.selenium.logging.LocalLogs;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.logging.LoggingHandler;
import org.openqa.selenium.logging.LoggingPreferences;
import org.openqa.selenium.logging.Logs;
import org.openqa.selenium.logging.NeedsLocalLogs;
import org.openqa.selenium.remote.internal.WebElementToJsonConverter;
import org.openqa.selenium.virtualauthenticator.Credential;
import org.openqa.selenium.virtualauthenticator.HasVirtualAuthenticator;
import org.openqa.selenium.virtualauthenticator.VirtualAuthenticator;
import org.openqa.selenium.virtualauthenticator.VirtualAuthenticatorOptions;
import java.net.URL;
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@Augmentable
public class RemoteWebDriver implements WebDriver, JavascriptExecutor,
FindsById, FindsByClassName, FindsByLinkText, FindsByName,
FindsByCssSelector, FindsByTagName, FindsByXPath,
HasInputDevices, HasCapabilities, Interactive, TakesScreenshot,
HasVirtualAuthenticator {
// TODO(dawagner): This static logger should be unified with the per-instance localLogs
private static final Logger logger = Logger.getLogger(RemoteWebDriver.class.getName());
private Level level = Level.FINE;
private ErrorHandler errorHandler = new ErrorHandler();
private CommandExecutor executor;
private Capabilities capabilities;
private SessionId sessionId;
private FileDetector fileDetector = new UselessFileDetector();
private ExecuteMethod executeMethod;
private JsonToWebElementConverter converter;
private RemoteKeyboard keyboard;
private RemoteMouse mouse;
private Logs remoteLogs;
private LocalLogs localLogs;
// For cglib
protected RemoteWebDriver() {
init(new ImmutableCapabilities());
}
public RemoteWebDriver(Capabilities capabilities) {
this(new HttpCommandExecutor(null), capabilities);
}
public RemoteWebDriver(CommandExecutor executor, Capabilities capabilities) {
this.executor = executor;
init(capabilities);
if (executor instanceof NeedsLocalLogs) {
((NeedsLocalLogs)executor).setLocalLogs(localLogs);
}
try {
startSession(capabilities);
} catch (RuntimeException e) {
try {
quit();
} catch (Exception ignored) {
// Ignore the clean-up exception. We'll propagate the original failure.
}
throw e;
}
}
public RemoteWebDriver(URL remoteAddress, Capabilities capabilities) {
this(new HttpCommandExecutor(remoteAddress), capabilities);
}
@Beta
public static RemoteWebDriverBuilder builder() {
return new RemoteWebDriverBuilder();
}
private void init(Capabilities capabilities) {
capabilities = capabilities == null ? new ImmutableCapabilities() : capabilities;
logger.addHandler(LoggingHandler.getInstance());
converter = new JsonToWebElementConverter(this);
executeMethod = new RemoteExecuteMethod(this);
keyboard = new RemoteKeyboard(executeMethod);
mouse = new RemoteMouse(executeMethod);
ImmutableSet.Builder<String> builder = new ImmutableSet.Builder<>();
boolean isProfilingEnabled = capabilities.is(CapabilityType.ENABLE_PROFILING_CAPABILITY);
if (isProfilingEnabled) {
builder.add(LogType.PROFILER);
}
LoggingPreferences mergedLoggingPrefs = new LoggingPreferences();
mergedLoggingPrefs.addPreferences((LoggingPreferences) capabilities.getCapability(LOGGING_PREFS));
if (!mergedLoggingPrefs.getEnabledLogTypes().contains(LogType.CLIENT) ||
mergedLoggingPrefs.getLevel(LogType.CLIENT) != Level.OFF) {
builder.add(LogType.CLIENT);
}
Set<String> logTypesToInclude = builder.build();
LocalLogs performanceLogger = LocalLogs.getStoringLoggerInstance(logTypesToInclude);
LocalLogs clientLogs = LocalLogs.getHandlerBasedLoggerInstance(LoggingHandler.getInstance(),
logTypesToInclude);
localLogs = LocalLogs.getCombinedLogsHolder(clientLogs, performanceLogger);
remoteLogs = new RemoteLogs(executeMethod, localLogs);
}
/**
* Set the file detector to be used when sending keyboard input. By default, this is set to a file
* detector that does nothing.
*
* @param detector The detector to use. Must not be null.
* @see FileDetector
* @see LocalFileDetector
* @see UselessFileDetector
*/
public void setFileDetector(FileDetector detector) {
if (detector == null) {
throw new WebDriverException("You may not set a file detector that is null");
}
fileDetector = detector;
}
public SessionId getSessionId() {
return sessionId;
}
protected void setSessionId(String opaqueKey) {
sessionId = new SessionId(opaqueKey);
}
protected void startSession(Capabilities capabilities) {
Response response = execute(DriverCommand.NEW_SESSION(capabilities));
Map<String, Object> rawCapabilities = (Map<String, Object>) response.getValue();
MutableCapabilities returnedCapabilities = new MutableCapabilities();
for (Map.Entry<String, Object> entry : rawCapabilities.entrySet()) {
// Handle the platform later
if (PLATFORM.equals(entry.getKey()) || PLATFORM_NAME.equals(entry.getKey())) {
continue;
}
returnedCapabilities.setCapability(entry.getKey(), entry.getValue());
}
String platformString = (String) rawCapabilities.getOrDefault(PLATFORM,
rawCapabilities.get(PLATFORM_NAME));
Platform platform;
try {
if (platformString == null || "".equals(platformString)) {
platform = Platform.ANY;
} else {
platform = Platform.fromString(platformString);
}
} catch (WebDriverException e) {
// The server probably responded with a name matching the os.name
// system property. Try to recover and parse this.
platform = Platform.extractFromSysProperty(platformString);
}
returnedCapabilities.setCapability(PLATFORM, platform);
returnedCapabilities.setCapability(PLATFORM_NAME, platform);
if (rawCapabilities.containsKey(SUPPORTS_JAVASCRIPT)) {
Object raw = rawCapabilities.get(SUPPORTS_JAVASCRIPT);
if (raw instanceof String) {
returnedCapabilities.setCapability(SUPPORTS_JAVASCRIPT, Boolean.parseBoolean((String) raw));
} else if (raw instanceof Boolean) {
returnedCapabilities.setCapability(SUPPORTS_JAVASCRIPT, ((Boolean) raw).booleanValue());
}
} else {
returnedCapabilities.setCapability(SUPPORTS_JAVASCRIPT, true);
}
this.capabilities = returnedCapabilities;
sessionId = new SessionId(response.getSessionId());
}
public ErrorHandler getErrorHandler() {
return errorHandler;
}
public void setErrorHandler(ErrorHandler handler) {
this.errorHandler = handler;
}
public CommandExecutor getCommandExecutor() {
return executor;
}
protected void setCommandExecutor(CommandExecutor executor) {
this.executor = executor;
}
@Override
public Capabilities getCapabilities() {
return capabilities;
}
@Override
public void get(String url) {
execute(DriverCommand.GET(url));
}
@Override
public String getTitle() {
Response response = execute(DriverCommand.GET_TITLE);
Object value = response.getValue();
return value == null ? "" : value.toString();
}
@Override
public String getCurrentUrl() {
Response response = execute(DriverCommand.GET_CURRENT_URL);
if (response == null || response.getValue() == null) {
throw new WebDriverException("Remote browser did not respond to getCurrentUrl");
}
return response.getValue().toString();
}
@Override
public <X> X getScreenshotAs(OutputType<X> outputType) throws WebDriverException {
Response response = execute(DriverCommand.SCREENSHOT);
Object result = response.getValue();
if (result instanceof String) {
String base64EncodedPng = (String) result;
return outputType.convertFromBase64Png(base64EncodedPng);
} else if (result instanceof byte[]) {
String base64EncodedPng = new String((byte[]) result);
return outputType.convertFromBase64Png(base64EncodedPng);
} else {
throw new RuntimeException(String.format("Unexpected result for %s command: %s",
DriverCommand.SCREENSHOT,
result == null ? "null" : result.getClass().getName() + " instance"));
}
}
@Override
public List<WebElement> findElements(By by) {
return by.findElements(this);
}
@Override
public WebElement findElement(By by) {
return by.findElement(this);
}
protected WebElement findElement(String by, String using) {
if (using == null) {
throw new IllegalArgumentException("Cannot find elements when the selector is null.");
}
Response response = execute(DriverCommand.FIND_ELEMENT(by, using));
Object value = response.getValue();
if (value == null) { // see https://github.com/SeleniumHQ/selenium/issues/5809
throw new NoSuchElementException(String.format("Cannot locate an element using %s=%s", by, using));
}
WebElement element;
try {
element = (WebElement) value;
} catch (ClassCastException ex) {
throw new WebDriverException("Returned value cannot be converted to WebElement: " + value, ex);
}
setFoundBy(this, element, by, using);
return element;
}
protected void setFoundBy(SearchContext context, WebElement element, String by, String using) {
if (element instanceof RemoteWebElement) {
RemoteWebElement remoteElement = (RemoteWebElement) element;
remoteElement.setFoundBy(context, by, using);
remoteElement.setFileDetector(getFileDetector());
}
}
@SuppressWarnings("unchecked")
protected List<WebElement> findElements(String by, String using) {
if (using == null) {
throw new IllegalArgumentException("Cannot find elements when the selector is null.");
}
Response response = execute(DriverCommand.FIND_ELEMENTS(by, using));
Object value = response.getValue();
if (value == null) { // see https://github.com/SeleniumHQ/selenium/issues/4555
return Collections.emptyList();
}
List<WebElement> allElements;
try {
allElements = (List<WebElement>) value;
} catch (ClassCastException ex) {
throw new WebDriverException("Returned value cannot be converted to List<WebElement>: " + value, ex);
}
for (WebElement element : allElements) {
setFoundBy(this, element, by, using);
}
return allElements;
}
@Override
public WebElement findElementById(String using) {
return findElement("id", using);
}
@Override
public List<WebElement> findElementsById(String using) {
return findElements("id", using);
}
@Override
public WebElement findElementByLinkText(String using) {
return findElement("link text", using);
}
@Override
public List<WebElement> findElementsByLinkText(String using) {
return findElements("link text", using);
}
@Override
public WebElement findElementByPartialLinkText(String using) {
return findElement("partial link text", using);
}
@Override
public List<WebElement> findElementsByPartialLinkText(String using) {
return findElements("partial link text", using);
}
@Override
public WebElement findElementByTagName(String using) {
return findElement("tag name", using);
}
@Override
public List<WebElement> findElementsByTagName(String using) {
return findElements("tag name", using);
}
@Override
public WebElement findElementByName(String using) {
return findElement("name", using);
}
@Override
public List<WebElement> findElementsByName(String using) {
return findElements("name", using);
}
@Override
public WebElement findElementByClassName(String using) {
return findElement("class name", using);
}
@Override
public List<WebElement> findElementsByClassName(String using) {
return findElements("class name", using);
}
@Override
public WebElement findElementByCssSelector(String using) {
return findElement("css selector", using);
}
@Override
public List<WebElement> findElementsByCssSelector(String using) {
return findElements("css selector", using);
}
@Override
public WebElement findElementByXPath(String using) {
return findElement("xpath", using);
}
@Override
public List<WebElement> findElementsByXPath(String using) {
return findElements("xpath", using);
}
// Misc
@Override
public String getPageSource() {
return (String) execute(DriverCommand.GET_PAGE_SOURCE).getValue();
}
@Override
public void close() {
execute(DriverCommand.CLOSE);
}
@Override
public void quit() {
// no-op if session id is null. We're only going to make ourselves unhappy
if (sessionId == null) {
return;
}
try {
execute(DriverCommand.QUIT);
} finally {
sessionId = null;
}
}
@Override
@SuppressWarnings({"unchecked"})
public Set<String> getWindowHandles() {
Response response = execute(DriverCommand.GET_WINDOW_HANDLES);
Object value = response.getValue();
try {
List<String> returnedValues = (List<String>) value;
return new LinkedHashSet<>(returnedValues);
} catch (ClassCastException ex) {
throw new WebDriverException(
"Returned value cannot be converted to List<String>: " + value, ex);
}
}
@Override
public String getWindowHandle() {
return String.valueOf(execute(DriverCommand.GET_CURRENT_WINDOW_HANDLE).getValue());
}
@Override
public Object executeScript(String script, Object... args) {
if (!isJavascriptEnabled()) {
throw new UnsupportedOperationException(
"You must be using an underlying instance of WebDriver that supports executing javascript");
}
// Escape the quote marks
script = script.replaceAll("\"", "\\\"");
List<Object> convertedArgs = Stream.of(args).map(new WebElementToJsonConverter()).collect(
Collectors.toList());
return execute(DriverCommand.EXECUTE_SCRIPT(script, convertedArgs)).getValue();
}
@Override
public Object executeAsyncScript(String script, Object... args) {
if (!isJavascriptEnabled()) {
throw new UnsupportedOperationException("You must be using an underlying instance of " +
"WebDriver that supports executing javascript");
}
// Escape the quote marks
script = script.replaceAll("\"", "\\\"");
List<Object> convertedArgs = Stream.of(args).map(new WebElementToJsonConverter()).collect(
Collectors.toList());
return execute(DriverCommand.EXECUTE_ASYNC_SCRIPT(script, convertedArgs)).getValue();
}
private boolean isJavascriptEnabled() {
return capabilities.is(SUPPORTS_JAVASCRIPT);
}
@Override
public TargetLocator switchTo() {
return new RemoteTargetLocator();
}
@Override
public Navigation navigate() {
return new RemoteNavigation();
}
@Override
public Options manage() {
return new RemoteWebDriverOptions();
}
protected void setElementConverter(JsonToWebElementConverter converter) {
this.converter = Objects.requireNonNull(converter, "Element converter must not be null");
}
protected JsonToWebElementConverter getElementConverter() {
return converter;
}
/**
* Sets the RemoteWebDriver's client log level.
*
* @param level The log level to use.
*/
public void setLogLevel(Level level) {
this.level = level;
}
Response execute(CommandPayload payload) {
Command command = new Command(sessionId, payload);
Response response;
long start = System.currentTimeMillis();
String currentName = Thread.currentThread().getName();
Thread.currentThread().setName(
String.format("Forwarding %s on session %s to remote", command.getName(), sessionId));
try {
log(sessionId, command.getName(), command, When.BEFORE);
response = executor.execute(command);
log(sessionId, command.getName(), response, When.AFTER);
if (response == null) {
return null;
}
// Unwrap the response value by converting any JSON objects of the form
// {"ELEMENT": id} to RemoteWebElements.
Object value = getElementConverter().apply(response.getValue());
response.setValue(value);
} catch (WebDriverException e) {
throw e;
} catch (Exception e) {
log(sessionId, command.getName(), command, When.EXCEPTION);
String errorMessage = "Error communicating with the remote browser. " +
"It may have died.";
if (command.getName().equals(DriverCommand.NEW_SESSION)) {
errorMessage = "Could not start a new session. Possible causes are " +
"invalid address of the remote server or browser start-up failure.";
}
UnreachableBrowserException ube = new UnreachableBrowserException(errorMessage, e);
if (getSessionId() != null) {
ube.addInfo(WebDriverException.SESSION_ID, getSessionId().toString());
}
if (getCapabilities() != null) {
ube.addInfo("Capabilities", getCapabilities().toString());
}
throw ube;
} finally {
Thread.currentThread().setName(currentName);
}
try {
errorHandler.throwIfResponseFailed(response, System.currentTimeMillis() - start);
} catch (WebDriverException ex) {
if (command.getParameters() != null && command.getParameters().containsKey("using") && command.getParameters().containsKey("value")) {
ex.addInfo(
"*** Element info",
String.format(
"{Using=%s, value=%s}",
command.getParameters().get("using"),
command.getParameters().get("value")));
}
ex.addInfo(WebDriverException.DRIVER_INFO, this.getClass().getName());
if (getSessionId() != null) {
ex.addInfo(WebDriverException.SESSION_ID, getSessionId().toString());
}
if (getCapabilities() != null) {
ex.addInfo("Capabilities", getCapabilities().toString());
}
throw ex;
}
return response;
}
protected Response execute(String driverCommand, Map<String, ?> parameters) {
return execute(new CommandPayload(driverCommand, parameters));
}
protected Response execute(String command) {
return execute(command, ImmutableMap.of());
}
protected ExecuteMethod getExecuteMethod() {
return executeMethod;
}
@Override
public void perform(Collection<Sequence> actions) {
execute(DriverCommand.ACTIONS(actions));
}
@Override
public void resetInputState() {
execute(DriverCommand.CLEAR_ACTIONS_STATE);
}
@Override
public Keyboard getKeyboard() {
return keyboard;
}
@Override
public Mouse getMouse() {
return mouse;
}
@Override
public VirtualAuthenticator addVirtualAuthenticator(VirtualAuthenticatorOptions options) {
String authenticatorId = (String)
execute(DriverCommand.ADD_VIRTUAL_AUTHENTICATOR, options.toMap()).getValue();
return new RemoteVirtualAuthenticator(authenticatorId);
}
@Override
public void removeVirtualAuthenticator(VirtualAuthenticator authenticator) {
execute(DriverCommand.REMOVE_VIRTUAL_AUTHENTICATOR,
ImmutableMap.of("authenticatorId", authenticator.getId()));
}
/**
* Override this to be notified at key points in the execution of a command.
*
* @param sessionId the session id.
* @param commandName the command that is being executed.
* @param toLog any data that might be interesting.
* @param when verb tense of "Execute" to prefix message
*/
protected void log(SessionId sessionId, String commandName, Object toLog, When when) {
if (!logger.isLoggable(level)) {
return;
}
String text = String.valueOf(toLog);
if (commandName.equals(DriverCommand.EXECUTE_SCRIPT)
|| commandName.equals(DriverCommand.EXECUTE_ASYNC_SCRIPT)) {
if (text.length() > 100 && Boolean.getBoolean("webdriver.remote.shorten_log_messages")) {
text = text.substring(0, 100) + "...";
}
}
switch(when) {
case BEFORE:
logger.log(level, "Executing: " + commandName + " " + text);
break;
case AFTER:
logger.log(level, "Executed: " + text);
break;
case EXCEPTION:
logger.log(level, "Exception: " + text);
break;
default:
logger.log(level, text);
break;
}
}
public FileDetector getFileDetector() {
return fileDetector;
}
protected class RemoteWebDriverOptions implements Options {
@Override
@Beta
public Logs logs() {
return remoteLogs;
}
@Override
public void addCookie(Cookie cookie) {
cookie.validate();
execute(DriverCommand.ADD_COOKIE(cookie));
}
@Override
public void deleteCookieNamed(String name) {
execute(DriverCommand.DELETE_COOKIE(name));
}
@Override
public void deleteCookie(Cookie cookie) {
deleteCookieNamed(cookie.getName());
}
@Override
public void deleteAllCookies() {
execute(DriverCommand.DELETE_ALL_COOKIES);
}
@Override
@SuppressWarnings({"unchecked"})
public Set<Cookie> getCookies() {
Object returned = execute(DriverCommand.GET_ALL_COOKIES).getValue();
Set<Cookie> toReturn = new HashSet<>();
if (!(returned instanceof Collection)) {
return toReturn;
}
((Collection<?>) returned).stream()
.map(o -> (Map<String, Object>) o)
.map(rawCookie -> {
// JSON object keys are defined in
// https://w3c.github.io/webdriver/#dfn-table-for-cookie-conversion.
Cookie.Builder builder =
new Cookie.Builder((String) rawCookie.get("name"), (String) rawCookie.get("value"))
.path((String) rawCookie.get("path"))
.domain((String) rawCookie.get("domain"))
.isSecure(rawCookie.containsKey("secure") && (Boolean) rawCookie.get("secure"))
.isHttpOnly(
rawCookie.containsKey("httpOnly") && (Boolean) rawCookie.get("httpOnly"))
.sameSite((String) rawCookie.get("samesite"));
Number expiryNum = (Number) rawCookie.get("expiry");
builder.expiresOn(expiryNum == null ? null : new Date(SECONDS.toMillis(expiryNum.longValue())));
return builder.build();
})
.forEach(toReturn::add);
return toReturn;
}
@Override
public Cookie getCookieNamed(String name) {
Set<Cookie> allCookies = getCookies();
for (Cookie cookie : allCookies) {
if (cookie.getName().equals(name)) {
return cookie;
}
}
return null;
}
@Override
public Timeouts timeouts() {
return new RemoteTimeouts();
}
@Override
public ImeHandler ime() {
return new RemoteInputMethodManager();
}
@Override
@Beta
public Window window() {
return new RemoteWindow();
}
protected class RemoteInputMethodManager implements WebDriver.ImeHandler {
@Override
@SuppressWarnings("unchecked")
public List<String> getAvailableEngines() {
Response response = execute(DriverCommand.IME_GET_AVAILABLE_ENGINES);
return (List<String>) response.getValue();
}
@Override
public String getActiveEngine() {
Response response = execute(DriverCommand.IME_GET_ACTIVE_ENGINE);
return (String) response.getValue();
}
@Override
public boolean isActivated() {
Response response = execute(DriverCommand.IME_IS_ACTIVATED);
return (Boolean) response.getValue();
}
@Override
public void deactivate() {
execute(DriverCommand.IME_DEACTIVATE);
}
@Override
public void activateEngine(String engine) {
execute(DriverCommand.IME_ACTIVATE_ENGINE(engine));
}
} // RemoteInputMethodManager class
protected class RemoteTimeouts implements Timeouts {
@Override
public Timeouts implicitlyWait(long time, TimeUnit unit) {
execute(DriverCommand.SET_IMPLICIT_WAIT_TIMEOUT(time, unit));
return this;
}
@Override
public Timeouts setScriptTimeout(long time, TimeUnit unit) {
execute(DriverCommand.SET_SCRIPT_TIMEOUT(time, unit));
return this;
}
@Override
public Timeouts pageLoadTimeout(long time, TimeUnit unit) {
execute(DriverCommand.SET_PAGE_LOAD_TIMEOUT(time, unit));
return this;
}
} // timeouts class.
@Beta
protected class RemoteWindow implements Window {
@Override
public void setSize(Dimension targetSize) {
execute(DriverCommand.SET_CURRENT_WINDOW_SIZE(targetSize));
}
@Override
public void setPosition(Point targetPosition) {
execute(DriverCommand.SET_CURRENT_WINDOW_POSITION(targetPosition));
}
@Override
@SuppressWarnings({"unchecked"})
public Dimension getSize() {
Response response = execute(DriverCommand.GET_CURRENT_WINDOW_SIZE);
Map<String, Object> rawSize = (Map<String, Object>) response.getValue();
int width = ((Number) rawSize.get("width")).intValue();
int height = ((Number) rawSize.get("height")).intValue();
return new Dimension(width, height);
}
Map<String, Object> rawPoint;
@Override
@SuppressWarnings("unchecked")
public Point getPosition() {
Response response = execute(DriverCommand.GET_CURRENT_WINDOW_POSITION());
rawPoint = (Map<String, Object>) response.getValue();
int x = ((Number) rawPoint.get("x")).intValue();
int y = ((Number) rawPoint.get("y")).intValue();
return new Point(x, y);
}
@Override
public void maximize() {
execute(DriverCommand.MAXIMIZE_CURRENT_WINDOW);
}
@Override
public void minimize() {
execute(DriverCommand.MINIMIZE_CURRENT_WINDOW);
}
@Override
public void fullscreen() {
execute(DriverCommand.FULLSCREEN_CURRENT_WINDOW);
}
}
}
private class RemoteNavigation implements Navigation {
@Override
public void back() {
execute(DriverCommand.GO_BACK);
}
@Override
public void forward() {
execute(DriverCommand.GO_FORWARD);
}
@Override
public void to(String url) {
get(url);
}
@Override
public void to(URL url) {
get(String.valueOf(url));
}
@Override
public void refresh() {
execute(DriverCommand.REFRESH);
}
}
protected class RemoteTargetLocator implements TargetLocator {
@Override
public WebDriver frame(int frameIndex) {
execute(DriverCommand.SWITCH_TO_FRAME(frameIndex));
return RemoteWebDriver.this;
}
@Override
public WebDriver frame(String frameName) {
String name = frameName.replaceAll("(['\"\\\\#.:;,!?+<>=~*^$|%&@`{}\\-/\\[\\]\\(\\)])", "\\\\$1");
List<WebElement> frameElements = RemoteWebDriver.this.findElements(
By.cssSelector("frame[name='" + name + "'],iframe[name='" + name + "']"));
if (frameElements.size() == 0) {
frameElements = RemoteWebDriver.this.findElements(
By.cssSelector("frame#" + name + ",iframe#" + name));
}
if (frameElements.size() == 0) {
throw new NoSuchFrameException("No frame element found by name or id " + frameName);
}
return frame(frameElements.get(0));
}
@Override
public WebDriver frame(WebElement frameElement) {
Object elementAsJson = new WebElementToJsonConverter().apply(frameElement);
execute(DriverCommand.SWITCH_TO_FRAME(elementAsJson));
return RemoteWebDriver.this;
}
@Override
public WebDriver parentFrame() {
execute(DriverCommand.SWITCH_TO_PARENT_FRAME);
return RemoteWebDriver.this;
}
@Override
public WebDriver window(String windowHandleOrName) {
try {
execute(DriverCommand.SWITCH_TO_WINDOW(windowHandleOrName));
return RemoteWebDriver.this;
} catch (NoSuchWindowException nsw) {
// simulate search by name
String original = getWindowHandle();
for (String handle : getWindowHandles()) {
switchTo().window(handle);
if (windowHandleOrName.equals(executeScript("return window.name"))) {
return RemoteWebDriver.this; // found by name
}
}
switchTo().window(original);
throw nsw;
}
}
@Override
public WebDriver newWindow(WindowType typeHint) {
String original = getWindowHandle();
try {
Response response = execute(DriverCommand.SWITCH_TO_NEW_WINDOW(typeHint));
String newWindowHandle = ((Map<String, Object>) response.getValue()).get("handle").toString();
switchTo().window(newWindowHandle);
return RemoteWebDriver.this;
} catch (WebDriverException ex) {
switchTo().window(original);
throw ex;
}
}
@Override
public WebDriver defaultContent() {
execute(DriverCommand.SWITCH_TO_FRAME(null));
return RemoteWebDriver.this;
}
@Override
public WebElement activeElement() {
Response response = execute(DriverCommand.GET_ACTIVE_ELEMENT);
return (WebElement) response.getValue();
}
@Override
public Alert alert() {
execute(DriverCommand.GET_ALERT_TEXT);
return new RemoteAlert();
}
}
private class RemoteAlert implements Alert {
public RemoteAlert() {
}
@Override
public void dismiss() {
execute(DriverCommand.DISMISS_ALERT);
}
@Override
public void accept() {
execute(DriverCommand.ACCEPT_ALERT);
}
@Override
public String getText() {
return (String) execute(DriverCommand.GET_ALERT_TEXT).getValue();
}
/**
* @param keysToSend character sequence to send to the alert
*
* @throws IllegalArgumentException if keysToSend is null
*/
@Override
public void sendKeys(String keysToSend) {
if(keysToSend==null) {
throw new IllegalArgumentException("Keys to send should be a not null CharSequence");
}
execute(DriverCommand.SET_ALERT_VALUE(keysToSend));
}
}
private class RemoteVirtualAuthenticator implements VirtualAuthenticator {
private final String id;
public RemoteVirtualAuthenticator(final String id) {
this.id = Objects.requireNonNull(id);
}
@Override
public String getId() {
return id;
}
@Override
public void addCredential(Credential credential) {
execute(DriverCommand.ADD_CREDENTIAL,
new ImmutableMap.Builder<String, Object>()
.putAll(credential.toMap())
.put("authenticatorId", id)
.build());
}
@Override
public List<Credential> getCredentials() {
List<Map<String, Object>> response = (List<Map<String, Object>>)
execute(DriverCommand.GET_CREDENTIALS, ImmutableMap.of("authenticatorId", id)).getValue();
return response.stream().map(Credential::fromMap).collect(Collectors.toList());
}
@Override
public void removeCredential(byte[] credentialId) {
removeCredential(Base64.getUrlEncoder().encodeToString(credentialId));
}
@Override
public void removeCredential(String credentialId) {
execute(DriverCommand.REMOVE_CREDENTIAL,
ImmutableMap.of("authenticatorId", id, "credentialId", credentialId)).getValue();
}
@Override
public void removeAllCredentials() {
execute(DriverCommand.REMOVE_ALL_CREDENTIALS, ImmutableMap.of("authenticatorId", id));
}
@Override
public void setUserVerified(boolean verified) {
execute(DriverCommand.SET_USER_VERIFIED,
ImmutableMap.of("authenticatorId", id, "isUserVerified", verified));
}
}
public enum When {
BEFORE,
AFTER,
EXCEPTION
}
@Override
public String toString() {
Capabilities caps = getCapabilities();
if (caps == null) {
return super.toString();
}
// w3c name first
Object platform = caps.getCapability(PLATFORM_NAME);
if (!(platform instanceof String)) {
platform = caps.getCapability(PLATFORM);
}
if (platform == null) {
platform = "unknown";
}
return String.format(
"%s: %s on %s (%s)",
getClass().getSimpleName(),
caps.getBrowserName(),
platform,
getSessionId());
}
}
| 1 | 17,454 | Assigning capabilities to the field here is not the best idea. Semantically this field contains the capabilities returned by the browser after session start. So here we sould better amend capabilities and return them from `init` method to pass later to `startSession` method (that will assign the capabilities returned by the browser to the field). | SeleniumHQ-selenium | rb |
@@ -110,8 +110,6 @@ module RSpec::Core
end
end
- alias_method :abort, :finish
-
def stop
@duration = (RSpec::Core::Time.now - @start).to_f if @start
notify :stop | 1 | module RSpec::Core
class Reporter
NOTIFICATIONS = %W[start message example_group_started example_group_finished example_started
example_passed example_failed example_pending start_dump dump_pending
dump_failures dump_summary seed close stop deprecation deprecation_summary].map(&:to_sym)
def initialize(configuration, *formatters)
@configuration = configuration
@listeners = Hash.new { |h,k| h[k] = [] }
formatters.each do |formatter|
register_listener(formatter, *NOTIFICATIONS)
end
@example_count = @failure_count = @pending_count = 0
@duration = @start = nil
end
# @api
# @param [Object] An obect that wishes to be notified of reporter events
# @param [Array] Array of symbols represents the events a listener wishes to subscribe too
#
# Registers a listener to a list of notifications. The reporter will send notification of
# events to all registered listeners
def register_listener(listener, *notifications)
notifications.each do |notification|
@listeners[notification.to_sym] << listener if listener.respond_to?(notification)
end
true
end
def registered_listeners(notification)
@listeners[notification]
end
# @api
# @overload report(count, &block)
# @overload report(count, &block)
# @param [Integer] count the number of examples being run
# @param [Block] block yields itself for further reporting.
#
# Initializes the report run and yields itself for further reporting. The
# block is required, so that the reporter can manage cleaning up after the
# run.
#
# @example
#
# reporter.report(group.examples.size) do |r|
# example_groups.map {|g| g.run(r) }
# end
#
def report(expected_example_count)
start(expected_example_count)
begin
yield self
ensure
finish
end
end
def start(expected_example_count)
@start = RSpec::Core::Time.now
notify :start, expected_example_count
end
def message(message)
notify :message, message
end
def example_group_started(group)
notify :example_group_started, group unless group.descendant_filtered_examples.empty?
end
def example_group_finished(group)
notify :example_group_finished, group unless group.descendant_filtered_examples.empty?
end
def example_started(example)
@example_count += 1
notify :example_started, example
end
def example_passed(example)
notify :example_passed, example
end
def example_failed(example)
@failure_count += 1
notify :example_failed, example
end
def example_pending(example)
@pending_count += 1
notify :example_pending, example
end
def deprecation(message)
notify :deprecation, message
end
def finish
begin
stop
notify :start_dump
notify :dump_pending
notify :dump_failures
notify :dump_summary, @duration, @example_count, @failure_count, @pending_count
notify :deprecation_summary
notify :seed, @configuration.seed if seed_used?
ensure
notify :close
end
end
alias_method :abort, :finish
def stop
@duration = (RSpec::Core::Time.now - @start).to_f if @start
notify :stop
end
def notify(event, *args, &block)
registered_listeners(event).each do |formatter|
formatter.send(event, *args, &block)
end
end
private
def seed_used?
@configuration.seed && @configuration.seed_used?
end
end
end
| 1 | 10,921 | Is this a breaking change, or is the API private? | rspec-rspec-core | rb |
@@ -0,0 +1,4 @@
+import pandas as pd
+
+test_data = pd.read_json("./dumps/courseData.json");
+test_data = {k: val.groupby('pk')['fields'].apply(list).apply(lambda x: x[0]).to_dict() for k, val in test_data.groupby("model")}; | 1 | 1 | 7,649 | I don't think we need this dependency in this project right now | kantord-LibreLingo | py |
|
@@ -30,8 +30,9 @@ const (
// FanoutName is the name used for the fanout container.
FanoutName = "fanout"
// RetryName is the name used for the retry container.
- RetryName = "retry"
- BrokerCellLabelKey = "brokerCell"
+ RetryName = "retry"
+ BrokerCellLabelKey = "brokerCell"
+ BrokerSystemNamespace = "cloud-run-events"
)
var ( | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"fmt"
"knative.dev/pkg/kmeta"
intv1alpha1 "github.com/google/knative-gcp/pkg/apis/intevents/v1alpha1"
)
const (
// IngressName is the name used for the ingress container.
IngressName = "ingress"
// FanoutName is the name used for the fanout container.
FanoutName = "fanout"
// RetryName is the name used for the retry container.
RetryName = "retry"
BrokerCellLabelKey = "brokerCell"
)
var (
optionalSecretVolume = true
)
// Args are the common arguments to create a Broker's data plane Deployment.
type Args struct {
ComponentName string
BrokerCell *intv1alpha1.BrokerCell
Image string
ServiceAccountName string
MetricsPort int
AllowIstioSidecar bool
}
// IngressArgs are the arguments to create a Broker's ingress Deployment.
type IngressArgs struct {
Args
Port int
}
// FanoutArgs are the arguments to create a Broker's fanout Deployment.
type FanoutArgs struct {
Args
}
// RetryArgs are the arguments to create a Broker's retry Deployment.
type RetryArgs struct {
Args
}
// AutoscalingArgs are the arguments to create HPA for deployments.
type AutoscalingArgs struct {
ComponentName string
BrokerCell *intv1alpha1.BrokerCell
AvgCPUUtilization int32
AvgMemoryUsage string
MaxReplicas int32
MinReplicas int32
}
// Labels generates the labels present on all resources representing the
// component of the given BrokerCell.
func Labels(brokerCellName, componentName string) map[string]string {
cl := CommonLabels(brokerCellName)
cl["role"] = componentName
return cl
}
func CommonLabels(brokerCellName string) map[string]string {
return map[string]string{
"app": "cloud-run-events",
BrokerCellLabelKey: brokerCellName,
}
}
// Name creates a name for the component (ingress/fanout/retry).
func Name(brokerCellName, componentName string) string {
return kmeta.ChildName(fmt.Sprintf("%s-brokercell-", brokerCellName), componentName)
}
| 1 | 17,294 | I don't think this is necessary since `CommonLabels` is a public func and every component name is also public constant. | google-knative-gcp | go |
@@ -155,14 +155,15 @@ module Bolt
"`task.py`) and the extension is case sensitive. When a target's name is `localhost`, "\
"Ruby tasks run with the Bolt Ruby interpreter by default.",
additionalProperties: {
- type: String,
+ type: [String, Array],
_plugin: false
},
propertyNames: {
pattern: "^.?[a-zA-Z0-9]+$"
},
_plugin: true,
- _example: { "rb" => "/usr/bin/ruby" }
+ _example: { "rb" => ["/usr/bin/ruby", "-r", "puppet"],
+ ".py" => "/usr/bin/python3" }
},
"job-poll-interval" => {
type: Integer, | 1 | # frozen_string_literal: true
module Bolt
class Config
module Transport
module Options
LOGIN_SHELLS = %w[sh bash zsh dash ksh powershell].freeze
# Definitions used to validate config options.
# https://github.com/puppetlabs/bolt/blob/main/schemas/README.md
TRANSPORT_OPTIONS = {
"basic-auth-only" => {
type: [TrueClass, FalseClass],
description: "Whether to force basic authentication. This option is only available when using SSL.",
_plugin: true,
_default: false,
_example: true
},
"batch-mode" => {
type: [TrueClass, FalseClass],
description: "Whether to disable password querying. When set to `false`, SSH will fall back to "\
"prompting for a password if key authentication fails. This might cause Bolt to hang. "\
"To prevent Bolt from hanging, you can configure `ssh-command` to use an SSH utility "\
"such as sshpass that supports providing a password non-interactively. For more "\
"information, see [Providing a password non-interactively using "\
"`native-ssh`](troubleshooting.md#providing-a-password-non-interactively-using-native-ssh).",
_plugin: true,
_default: true,
_example: false
},
"bundled-ruby" => {
description: "Whether to use the Ruby bundled with Bolt packages for local targets.",
type: [TrueClass, FalseClass],
_plugin: false,
_example: true,
_default: true
},
"cacert" => {
type: String,
description: "The path to the CA certificate.",
_plugin: true,
_example: "~/.puppetlabs/puppet/cert.pem"
},
"cleanup" => {
type: [TrueClass, FalseClass],
description: "Whether to clean up temporary files created on targets. When running commands on a target, "\
"Bolt might create temporary files. After completing the command, these files are "\
"automatically deleted. This value can be set to 'false' if you wish to leave these "\
"temporary files on the target.",
_plugin: true,
_default: true,
_example: false
},
"connect-timeout" => {
type: Integer,
description: "How long to wait in seconds when establishing connections. Set this value higher if you "\
"frequently encounter connection timeout errors when running Bolt.",
minimum: 1,
_plugin: true,
_default: 10,
_example: 15
},
"copy-command" => {
type: [Array, String],
description: "The command to use when copying files using native SSH. Bolt runs `<copy-command> <src> "\
"<dest>`. This option is used when you need support for features or algorithms that are not "\
"supported by the net-ssh Ruby library. **This option is experimental.** You can read more "\
"about this option in [Native SSH transport](experimental_features.md#native-ssh-transport).",
items: {
type: String
},
_plugin: true,
_default: %w[scp -r],
_example: %w[scp -r -F ~/ssh-config/myconf]
},
"disconnect-timeout" => {
type: Integer,
description: "How long to wait in seconds before force-closing a connection.",
minimum: 1,
_plugin: true,
_default: 5,
_example: 10
},
"encryption-algorithms" => {
type: Array,
description: "A list of encryption algorithms to use when establishing a connection "\
"to a target. Supported algorithms are defined by the Ruby net-ssh library and can be "\
"viewed [here](https://github.com/net-ssh/net-ssh#supported-algorithms). All supported, "\
"non-deprecated algorithms are available by default when this option is not used. To "\
"reference all default algorithms using this option, add 'defaults' to the list of "\
"supported algorithms.",
uniqueItems: true,
items: {
type: String
},
_plugin: true,
_example: %w[defaults idea-cbc]
},
"extensions" => {
type: Array,
description: "A list of file extensions that are accepted for scripts or tasks on "\
"Windows. Scripts with these file extensions rely on the target's file "\
"type association to run. For example, if Python is installed on the "\
"system, a `.py` script runs with `python.exe`. The extensions `.ps1`, "\
"`.rb`, and `.pp` are always allowed and run via hard-coded "\
"executables.",
uniqueItems: true,
items: {
type: String
},
_plugin: true,
_example: [".sh"]
},
"file-protocol" => {
type: String,
description: "Which file transfer protocol to use. Either `winrm` or `smb`. Using `smb` is "\
"recommended for large file transfers.",
enum: %w[smb winrm],
_plugin: true,
_default: "winrm",
_example: "smb"
},
"host" => {
type: String,
description: "The target's hostname.",
_plugin: true,
_example: "docker_host_production"
},
"host-key-algorithms" => {
type: Array,
description: "A list of host key algorithms to use when establishing a connection "\
"to a target. Supported algorithms are defined by the Ruby net-ssh library and can be "\
"viewed [here](https://github.com/net-ssh/net-ssh#supported-algorithms). All supported, "\
"non-deprecated algorithms are available by default when this option is not used. To "\
"reference all default algorithms using this option, add 'defaults' to the list of "\
"supported algorithms.",
uniqueItems: true,
items: {
type: String
},
_plugin: true,
_example: %w[defaults ssh-dss]
},
"host-key-check" => {
type: [TrueClass, FalseClass],
description: "Whether to perform host key validation when connecting.",
_plugin: true,
_example: false
},
"interpreters" => {
type: Hash,
description: "A map of an extension name to the absolute path of an executable, enabling you to "\
"override the shebang defined in a task executable. The extension can optionally be "\
"specified with the `.` character (`.py` and `py` both map to a task executable "\
"`task.py`) and the extension is case sensitive. When a target's name is `localhost`, "\
"Ruby tasks run with the Bolt Ruby interpreter by default.",
additionalProperties: {
type: String,
_plugin: false
},
propertyNames: {
pattern: "^.?[a-zA-Z0-9]+$"
},
_plugin: true,
_example: { "rb" => "/usr/bin/ruby" }
},
"job-poll-interval" => {
type: Integer,
description: "The interval, in seconds, to poll orchestrator for job status.",
minimum: 1,
_plugin: true,
_example: 2
},
"job-poll-timeout" => {
type: Integer,
description: "The time, in seconds, to wait for orchestrator job status.",
minimum: 1,
_plugin: true,
_example: 2000
},
"kex-algorithms" => {
type: Array,
description: "A list of key exchange algorithms to use when establishing a connection "\
"to a target. Supported algorithms are defined by the Ruby net-ssh library and can be "\
"viewed [here](https://github.com/net-ssh/net-ssh#supported-algorithms). All supported, "\
"non-deprecated algorithms are available by default when this option is not used. To "\
"reference all default algorithms using this option, add 'defaults' to the list of "\
"supported algorithms.",
uniqueItems: true,
items: {
type: String
},
_plugin: true,
_example: %w[defaults diffie-hellman-group1-sha1]
},
"load-config" => {
type: [TrueClass, FalseClass],
description: "Whether to load system SSH configuration from '~/.ssh/config' and '/etc/ssh_config'.",
_plugin: true,
_default: true,
_example: false
},
"login-shell" => {
type: String,
description: "Which login shell Bolt should expect on the target. Supported shells are " \
"#{LOGIN_SHELLS.join(', ')}. **This option is experimental.**",
enum: LOGIN_SHELLS,
_plugin: true,
_default: "bash",
_example: "powershell"
},
"mac-algorithms" => {
type: Array,
description: "List of message authentication code algorithms to use when establishing a connection "\
"to a target. Supported algorithms are defined by the Ruby net-ssh library and can be "\
"viewed [here](https://github.com/net-ssh/net-ssh#supported-algorithms). All supported, "\
"non-deprecated algorithms are available by default when this option is not used. To "\
"reference all default algorithms using this option, add 'defaults' to the list of "\
"supported algorithms.",
uniqueItems: true,
items: {
type: String
},
_plugin: true,
_example: %w[defaults hmac-md5]
},
"native-ssh" => {
type: [TrueClass, FalseClass],
description: "This enables the native SSH transport, which shells out to SSH instead of using the "\
"net-ssh Ruby library",
_default: false,
_example: true
},
"password" => {
type: String,
description: "The password to use to login.",
_plugin: true,
_example: "hunter2!"
},
"port" => {
type: Integer,
description: "The port to use when connecting to the target.",
minimum: 0,
_plugin: true,
_example: 22
},
"private-key" => {
type: [Hash, String],
description: "Either the path to the private key file to use for authentication, or "\
"a hash with the key `key-data` and the contents of the private key. Note that "\
"the key cannot be encrypted if using the `key-data` hash.",
required: ["key-data"],
properties: {
"key-data" => {
description: "The contents of the private key.",
type: String
}
},
_plugin: true,
_example: "~/.ssh/id_rsa"
},
"proxyjump" => {
type: String,
description: "A jump host to proxy connections through, and an optional user to connect with.",
format: "uri",
_plugin: true,
_example: "jump.example.com"
},
"read-timeout" => {
type: Integer,
description: "How long to wait in seconds when making requests to the Orchestrator.",
minimum: 1,
_plugin: true,
_example: 15
},
"realm" => {
type: String,
description: "The Kerberos realm (Active Directory domain) to authenticate against.",
_plugin: true,
_example: "BOLT.PRODUCTION"
},
"remote" => {
type: String,
description: "The LXD remote host to use.",
_default: "local",
_plugin: false,
_example: 'myremote'
},
"run-as" => {
type: String,
description: "The user to run commands as after login. The run-as user must be different than the "\
"login user.",
_plugin: true,
_example: "root"
},
"run-as-command" => {
type: Array,
description: "The command to elevate permissions. Bolt appends the user and command strings to the "\
"configured `run-as-command` before running it on the target. This command must not require "\
" aninteractive password prompt, and the `sudo-password` option is ignored when "\
"`run-as-command` is specified. The `run-as-command` must be specified as an array.",
items: {
type: String
},
_plugin: true,
_example: ["sudo", "-nkSEu"]
},
"run-on" => {
type: String,
description: "The proxy target that the task executes on.",
format: "uri",
_plugin: true,
_default: "localhost",
_example: "proxy_target"
},
"script-dir" => {
type: String,
description: "The subdirectory of the tmpdir to use in place of a randomized "\
"subdirectory for uploading and executing temporary files on the "\
"target. It's expected that this directory already exists as a subdir "\
"of tmpdir, which is either configured or defaults to `/tmp`.",
_plugin: true,
_example: "bolt_scripts"
},
"service-url" => {
type: String,
description: "The URL of the host used for API requests.",
format: "uri",
_plugin: true,
_example: "https://api.example.com:8143"
},
"shell-command" => {
type: String,
description: "A shell command to wrap any Docker exec commands in, such as `bash -lc`.",
_plugin: true,
_example: "bash -lc"
},
"smb-port" => {
type: Integer,
description: "The port to use when connecting to the target when file-protocol is set to 'smb'.",
minimum: 0,
_plugin: true,
_example: 445
},
"ssh-command" => {
type: [Array, String],
description: "The command and options to use when SSHing. This option is used when you need support for "\
"features or algorithms that are not supported by the net-ssh Ruby library. **This option "\
"is experimental.** You can read more about this option in [Native SSH "\
"transport](experimental_features.md#native-ssh-transport).",
items: {
type: String
},
_plugin: true,
_default: 'ssh',
_example: %w[ssh -o [email protected]]
},
"ssl" => {
type: [TrueClass, FalseClass],
description: "Whether to use secure https connections for WinRM.",
_plugin: true,
_default: true,
_example: false
},
"ssl-verify" => {
type: [TrueClass, FalseClass],
description: "Whether to verify that the target's certificate matches the cacert.",
_plugin: true,
_default: true,
_example: false
},
"sudo-executable" => {
type: String,
description: "The executable to use when escalating to the configured `run-as` user. This is useful "\
"when you want to escalate using the configured `sudo-password`, since `run-as-command` "\
"does not use `sudo-password` or support prompting. The command executed on the target "\
"is `<sudo-executable> -S -u <user> -p custom_bolt_prompt <command>`. **This option is "\
"experimental.**",
_plugin: true,
_example: "dzdo"
},
"sudo-password" => {
type: String,
description: "The password to use when changing users via `run-as`.",
_plugin: true,
_example: "p@$$w0rd!"
},
"task-environment" => {
type: String,
description: "The environment the orchestrator loads task code from.",
_plugin: true,
_default: "production",
_example: "development"
},
"tmpdir" => {
type: String,
description: "The directory to upload and execute temporary files on the target.",
_plugin: true,
_example: "/tmp/bolt"
},
"token-file" => {
type: String,
description: "The path to the token file.",
_plugin: true,
_example: "~/.puppetlabs/puppet/token.pem"
},
"tty" => {
type: [TrueClass, FalseClass],
description: "Whether to enable tty on exec commands.",
_plugin: true,
_example: true
},
"user" => {
type: String,
description: "The user name to login as.",
_plugin: true,
_example: "bolt"
}
}.freeze
RUN_AS_OPTIONS = %w[
run-as
run-as-command
sudo-executable
sudo-password
].freeze
end
end
end
end
| 1 | 19,247 | The `_example` field should be updated to include an interpreter with an array value. | puppetlabs-bolt | rb |
@@ -532,12 +532,14 @@ var supportedKeyTypes = map[string]acme.KeyType{
// Map of supported protocols.
// HTTP/2 only supports TLS 1.2 and higher.
-var supportedProtocols = map[string]uint16{
+var SupportedProtocols = map[string]uint16{
"tls1.0": tls.VersionTLS10,
"tls1.1": tls.VersionTLS11,
"tls1.2": tls.VersionTLS12,
}
+// NOTE: if updating the above map, also update sslProtocolToStringMap in caddyhttp/fastcgi/fastcgi.go
+
// Map of supported ciphers, used only for parsing config.
//
// Note that, at time of writing, HTTP/2 blacklists 276 cipher suites, | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/codahale/aesnicheck"
"github.com/mholt/caddy"
"github.com/xenolf/lego/acme"
)
// Config describes how TLS should be configured and used.
type Config struct {
// The hostname or class of hostnames this config is
// designated for; can contain wildcard characters
// according to RFC 6125 §6.4.3 - this field MUST
// be set in order for things to work as expected
Hostname string
// Whether TLS is enabled
Enabled bool
// Minimum and maximum protocol versions to allow
ProtocolMinVersion uint16
ProtocolMaxVersion uint16
// The list of cipher suites; first should be
// TLS_FALLBACK_SCSV to prevent degrade attacks
Ciphers []uint16
// Whether to prefer server cipher suites
PreferServerCipherSuites bool
// The list of preferred curves
CurvePreferences []tls.CurveID
// Client authentication policy
ClientAuth tls.ClientAuthType
// List of client CA certificates to allow, if
// client authentication is enabled
ClientCerts []string
// Manual means user provides own certs and keys
Manual bool
// Managed means config qualifies for implicit,
// automatic, managed TLS; as opposed to the user
// providing and managing the certificate manually
Managed bool
// OnDemand means the class of hostnames this
// config applies to may obtain and manage
// certificates at handshake-time (as opposed
// to pre-loaded at startup); OnDemand certs
// will be managed the same way as preloaded
// ones, however, if an OnDemand cert fails to
// renew, it is removed from the in-memory
// cache; if this is true, Managed must
// necessarily be true
OnDemand bool
// SelfSigned means that this hostname is
// served with a self-signed certificate
// that we generated in memory for convenience
SelfSigned bool
// The endpoint of the directory for the ACME
// CA we are to use
CAUrl string
// The host (ONLY the host, not port) to listen
// on if necessary to start a listener to solve
// an ACME challenge
ListenHost string
// The alternate port (ONLY port, not host) to
// use for the ACME HTTP challenge; if non-empty,
// this port will be used instead of
// HTTPChallengePort to spin up a listener for
// the HTTP challenge
AltHTTPPort string
// The alternate port (ONLY port, not host)
// to use for the ACME TLS-SNI challenge.
// The system must forward TLSSNIChallengePort
// to this port for challenge to succeed
AltTLSSNIPort string
// The string identifier of the DNS provider
// to use when solving the ACME DNS challenge
DNSProvider string
// The email address to use when creating or
// using an ACME account (fun fact: if this
// is set to "off" then this config will not
// qualify for managed TLS)
ACMEEmail string
// The type of key to use when generating
// certificates
KeyType acme.KeyType
// The storage creator; use StorageFor() to get a guaranteed
// non-nil Storage instance. Note, Caddy may call this frequently
// so implementors are encouraged to cache any heavy instantiations.
StorageProvider string
// The state needed to operate on-demand TLS
OnDemandState OnDemandState
// Add the must staple TLS extension to the CSR generated by lego/acme
MustStaple bool
// The list of protocols to choose from for Application Layer
// Protocol Negotiation (ALPN).
ALPN []string
// The map of hostname to certificate hash. This is used to complete
// handshakes and serve the right certificate given the SNI.
Certificates map[string]string
certCache *certificateCache // pointer to the Instance's certificate store
tlsConfig *tls.Config // the final tls.Config created with buildStandardTLSConfig()
}
// OnDemandState contains some state relevant for providing
// on-demand TLS.
type OnDemandState struct {
// The number of certificates that have been issued on-demand
// by this config. It is only safe to modify this count atomically.
// If it reaches MaxObtain, on-demand issuances must fail.
ObtainedCount int32
// Set from max_certs in tls config, it specifies the
// maximum number of certificates that can be issued.
MaxObtain int32
// The url to call to check if an on-demand tls certificate should
// be issued. If a request to the URL fails or returns a non 2xx
// status on-demand issuances must fail.
AskURL *url.URL
}
// NewConfig returns a new Config with a pointer to the instance's
// certificate cache. You will usually need to set Other fields on
// the returned Config for successful practical use.
func NewConfig(inst *caddy.Instance) *Config {
inst.StorageMu.RLock()
certCache, ok := inst.Storage[CertCacheInstStorageKey].(*certificateCache)
inst.StorageMu.RUnlock()
if !ok || certCache == nil {
certCache = &certificateCache{cache: make(map[string]Certificate)}
inst.StorageMu.Lock()
inst.Storage[CertCacheInstStorageKey] = certCache
inst.StorageMu.Unlock()
}
cfg := new(Config)
cfg.Certificates = make(map[string]string)
cfg.certCache = certCache
return cfg
}
// ObtainCert obtains a certificate for name using c, as long
// as a certificate does not already exist in storage for that
// name. The name must qualify and c must be flagged as Managed.
// This function is a no-op if storage already has a certificate
// for name.
//
// It only obtains and stores certificates (and their keys),
// it does not load them into memory. If allowPrompts is true,
// the user may be shown a prompt.
func (c *Config) ObtainCert(name string, allowPrompts bool) error {
if !c.Managed || !HostQualifies(name) {
return nil
}
storage, err := c.StorageFor(c.CAUrl)
if err != nil {
return err
}
siteExists, err := storage.SiteExists(name)
if err != nil {
return err
}
if siteExists {
return nil
}
if c.ACMEEmail == "" {
c.ACMEEmail = getEmail(storage, allowPrompts)
}
client, err := newACMEClient(c, allowPrompts)
if err != nil {
return err
}
return client.Obtain(name)
}
// RenewCert renews the certificate for name using c. It stows the
// renewed certificate and its assets in storage if successful.
func (c *Config) RenewCert(name string, allowPrompts bool) error {
client, err := newACMEClient(c, allowPrompts)
if err != nil {
return err
}
return client.Renew(name)
}
// StorageFor obtains a TLS Storage instance for the given CA URL which should
// be unique for every different ACME CA. If a StorageCreator is set on this
// Config, it will be used. Otherwise the default file storage implementation
// is used. When the error is nil, this is guaranteed to return a non-nil
// Storage instance.
func (c *Config) StorageFor(caURL string) (Storage, error) {
// Validate CA URL
if caURL == "" {
caURL = DefaultCAUrl
}
if caURL == "" {
return nil, fmt.Errorf("cannot create storage without CA URL")
}
caURL = strings.ToLower(caURL)
// scheme required or host will be parsed as path (as of Go 1.6)
if !strings.Contains(caURL, "://") {
caURL = "https://" + caURL
}
u, err := url.Parse(caURL)
if err != nil {
return nil, fmt.Errorf("%s: unable to parse CA URL: %v", caURL, err)
}
if u.Host == "" {
return nil, fmt.Errorf("%s: no host in CA URL", caURL)
}
// Create the storage based on the URL
var s Storage
if c.StorageProvider == "" {
c.StorageProvider = "file"
}
creator, ok := storageProviders[c.StorageProvider]
if !ok {
return nil, fmt.Errorf("%s: Unknown storage: %v", caURL, c.StorageProvider)
}
s, err = creator(u)
if err != nil {
return nil, fmt.Errorf("%s: unable to create custom storage '%v': %v", caURL, c.StorageProvider, err)
}
return s, nil
}
// buildStandardTLSConfig converts cfg (*caddytls.Config) to a *tls.Config
// and stores it in cfg so it can be used in servers. If TLS is disabled,
// no tls.Config is created.
func (c *Config) buildStandardTLSConfig() error {
if !c.Enabled {
return nil
}
config := new(tls.Config)
ciphersAdded := make(map[uint16]struct{})
curvesAdded := make(map[tls.CurveID]struct{})
// add cipher suites
for _, ciph := range c.Ciphers {
if _, ok := ciphersAdded[ciph]; !ok {
ciphersAdded[ciph] = struct{}{}
config.CipherSuites = append(config.CipherSuites, ciph)
}
}
config.PreferServerCipherSuites = c.PreferServerCipherSuites
// add curve preferences
for _, curv := range c.CurvePreferences {
if _, ok := curvesAdded[curv]; !ok {
curvesAdded[curv] = struct{}{}
config.CurvePreferences = append(config.CurvePreferences, curv)
}
}
config.MinVersion = c.ProtocolMinVersion
config.MaxVersion = c.ProtocolMaxVersion
config.ClientAuth = c.ClientAuth
config.NextProtos = c.ALPN
config.GetCertificate = c.GetCertificate
// set up client authentication if enabled
if config.ClientAuth != tls.NoClientCert {
pool := x509.NewCertPool()
clientCertsAdded := make(map[string]struct{})
for _, caFile := range c.ClientCerts {
// don't add cert to pool more than once
if _, ok := clientCertsAdded[caFile]; ok {
continue
}
clientCertsAdded[caFile] = struct{}{}
// Any client with a certificate from this CA will be allowed to connect
caCrt, err := ioutil.ReadFile(caFile)
if err != nil {
return err
}
if !pool.AppendCertsFromPEM(caCrt) {
return fmt.Errorf("error loading client certificate '%s': no certificates were successfully parsed", caFile)
}
}
config.ClientCAs = pool
}
// default cipher suites
if len(config.CipherSuites) == 0 {
config.CipherSuites = getPreferredDefaultCiphers()
}
// for security, ensure TLS_FALLBACK_SCSV is always included first
if len(config.CipherSuites) == 0 || config.CipherSuites[0] != tls.TLS_FALLBACK_SCSV {
config.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.CipherSuites...)
}
// store the resulting new tls.Config
c.tlsConfig = config
return nil
}
// MakeTLSConfig makes a tls.Config from configs. The returned
// tls.Config is programmed to load the matching caddytls.Config
// based on the hostname in SNI, but that's all. This is used
// to create a single TLS configuration for a listener (a group
// of sites).
func MakeTLSConfig(configs []*Config) (*tls.Config, error) {
if len(configs) == 0 {
return nil, nil
}
configMap := make(configGroup)
for i, cfg := range configs {
if cfg == nil {
// avoid nil pointer dereference below this loop
configs[i] = new(Config)
continue
}
// can't serve TLS and non-TLS on same port
if i > 0 && cfg.Enabled != configs[i-1].Enabled {
thisConfProto, lastConfProto := "not TLS", "not TLS"
if cfg.Enabled {
thisConfProto = "TLS"
}
if configs[i-1].Enabled {
lastConfProto = "TLS"
}
return nil, fmt.Errorf("cannot multiplex %s (%s) and %s (%s) on same listener",
configs[i-1].Hostname, lastConfProto, cfg.Hostname, thisConfProto)
}
// convert this caddytls.Config into a tls.Config
if err := cfg.buildStandardTLSConfig(); err != nil {
return nil, err
}
// if an existing config with this hostname was already
// configured, then they must be identical (or at least
// compatible), otherwise that is a configuration error
if otherConfig, ok := configMap[cfg.Hostname]; ok {
if err := assertConfigsCompatible(cfg, otherConfig); err != nil {
return nil, fmt.Errorf("incompabile TLS configurations for the same SNI "+
"name (%s) on the same listener: %v",
cfg.Hostname, err)
}
}
// key this config by its hostname (overwrites
// configs with the same hostname pattern; should
// be OK since we already asserted they are roughly
// the same); during TLS handshakes, configs are
// loaded based on the hostname pattern, according
// to client's SNI
configMap[cfg.Hostname] = cfg
}
// Is TLS disabled? By now, we know that all
// configs agree whether it is or not, so we
// can just look at the first one. If so,
// we're done here.
if len(configs) == 0 || !configs[0].Enabled {
return nil, nil
}
return &tls.Config{
GetConfigForClient: configMap.GetConfigForClient,
}, nil
}
// assertConfigsCompatible returns an error if the two Configs
// do not have the same (or roughly compatible) configurations.
// If one of the tlsConfig pointers on either Config is nil,
// an error will be returned. If both are nil, no error.
func assertConfigsCompatible(cfg1, cfg2 *Config) error {
c1, c2 := cfg1.tlsConfig, cfg2.tlsConfig
if (c1 == nil && c2 != nil) || (c1 != nil && c2 == nil) {
return fmt.Errorf("one config is not made")
}
if c1 == nil && c2 == nil {
return nil
}
if len(c1.CipherSuites) != len(c2.CipherSuites) {
return fmt.Errorf("different number of allowed cipher suites")
}
for i, ciph := range c1.CipherSuites {
if c2.CipherSuites[i] != ciph {
return fmt.Errorf("different cipher suites or different order")
}
}
if len(c1.CurvePreferences) != len(c2.CurvePreferences) {
return fmt.Errorf("different number of allowed cipher suites")
}
for i, curve := range c1.CurvePreferences {
if c2.CurvePreferences[i] != curve {
return fmt.Errorf("different curve preferences or different order")
}
}
if len(c1.NextProtos) != len(c2.NextProtos) {
return fmt.Errorf("different number of ALPN (NextProtos) values")
}
for i, proto := range c1.NextProtos {
if c2.NextProtos[i] != proto {
return fmt.Errorf("different ALPN (NextProtos) values or different order")
}
}
if c1.PreferServerCipherSuites != c2.PreferServerCipherSuites {
return fmt.Errorf("one prefers server cipher suites, the other does not")
}
if c1.MinVersion != c2.MinVersion {
return fmt.Errorf("minimum TLS version mismatch")
}
if c1.MaxVersion != c2.MaxVersion {
return fmt.Errorf("maximum TLS version mismatch")
}
if c1.ClientAuth != c2.ClientAuth {
return fmt.Errorf("client authentication policy mismatch")
}
return nil
}
// ConfigGetter gets a Config keyed by key.
type ConfigGetter func(c *caddy.Controller) *Config
var configGetters = make(map[string]ConfigGetter)
// RegisterConfigGetter registers fn as the way to get a
// Config for server type serverType.
func RegisterConfigGetter(serverType string, fn ConfigGetter) {
configGetters[serverType] = fn
}
// SetDefaultTLSParams sets the default TLS cipher suites, protocol versions,
// and server preferences of a server.Config if they were not previously set
// (it does not overwrite; only fills in missing values).
func SetDefaultTLSParams(config *Config) {
// If no ciphers provided, use default list
if len(config.Ciphers) == 0 {
config.Ciphers = getPreferredDefaultCiphers()
}
// Not a cipher suite, but still important for mitigating protocol downgrade attacks
// (prepend since having it at end breaks http2 due to non-h2-approved suites before it)
config.Ciphers = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.Ciphers...)
// If no curves provided, use default list
if len(config.CurvePreferences) == 0 {
config.CurvePreferences = defaultCurves
}
// Set default protocol min and max versions - must balance compatibility and security
if config.ProtocolMinVersion == 0 {
config.ProtocolMinVersion = tls.VersionTLS12
}
if config.ProtocolMaxVersion == 0 {
config.ProtocolMaxVersion = tls.VersionTLS12
}
// Prefer server cipher suites
config.PreferServerCipherSuites = true
}
// Map of supported key types
var supportedKeyTypes = map[string]acme.KeyType{
"P384": acme.EC384,
"P256": acme.EC256,
"RSA8192": acme.RSA8192,
"RSA4096": acme.RSA4096,
"RSA2048": acme.RSA2048,
}
// Map of supported protocols.
// HTTP/2 only supports TLS 1.2 and higher.
var supportedProtocols = map[string]uint16{
"tls1.0": tls.VersionTLS10,
"tls1.1": tls.VersionTLS11,
"tls1.2": tls.VersionTLS12,
}
// Map of supported ciphers, used only for parsing config.
//
// Note that, at time of writing, HTTP/2 blacklists 276 cipher suites,
// including all but four of the suites below (the four GCM suites).
// See https://http2.github.io/http2-spec/#BadCipherSuites
//
// TLS_FALLBACK_SCSV is not in this list because we manually ensure
// it is always added (even though it is not technically a cipher suite).
//
// This map, like any map, is NOT ORDERED. Do not range over this map.
var supportedCiphersMap = map[string]uint16{
"ECDHE-ECDSA-AES256-GCM-SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"ECDHE-RSA-AES256-GCM-SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-ECDSA-AES128-GCM-SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"ECDHE-RSA-AES128-GCM-SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"ECDHE-ECDSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
"ECDHE-RSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
"ECDHE-RSA-AES256-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
"ECDHE-RSA-AES128-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
"ECDHE-ECDSA-AES256-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
"ECDHE-ECDSA-AES128-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
"RSA-AES256-CBC-SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
"RSA-AES128-CBC-SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
"ECDHE-RSA-3DES-EDE-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
"RSA-3DES-EDE-CBC-SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
}
// List of all the ciphers we want to use by default
var defaultCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// List of ciphers we should prefer if native AESNI support is missing
var defaultCiphersNonAESNI = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// getPreferredDefaultCiphers returns an appropriate cipher suite to use, depending on
// the hardware support available for AES-NI.
//
// See https://github.com/mholt/caddy/issues/1674
func getPreferredDefaultCiphers() []uint16 {
if aesnicheck.HasAESNI() {
return defaultCiphers
}
// Return a cipher suite that prefers ChaCha20
return defaultCiphersNonAESNI
}
// Map of supported curves
// https://golang.org/pkg/crypto/tls/#CurveID
var supportedCurvesMap = map[string]tls.CurveID{
"X25519": tls.X25519,
"P256": tls.CurveP256,
"P384": tls.CurveP384,
"P521": tls.CurveP521,
}
// List of all the curves we want to use by default.
//
// This list should only include curves which are fast by design (e.g. X25519)
// and those for which an optimized assembly implementation exists (e.g. P256).
// The latter ones can be found here: https://github.com/golang/go/tree/master/src/crypto/elliptic
var defaultCurves = []tls.CurveID{
tls.X25519,
tls.CurveP256,
}
const (
// HTTPChallengePort is the officially designated port for
// the HTTP challenge according to the ACME spec.
HTTPChallengePort = "80"
// TLSSNIChallengePort is the officially designated port for
// the TLS-SNI challenge according to the ACME spec.
TLSSNIChallengePort = "443"
// DefaultHTTPAlternatePort is the port on which the ACME
// client will open a listener and solve the HTTP challenge.
// If this alternate port is used instead of the default
// port, then whatever is listening on the default port must
// be capable of proxying or forwarding the request to this
// alternate port.
DefaultHTTPAlternatePort = "5033"
// CertCacheInstStorageKey is the name of the key for
// accessing the certificate storage on the *caddy.Instance.
CertCacheInstStorageKey = "tls_cert_cache"
)
| 1 | 11,995 | Put this in the godoc of SupportedProtocols instead. Preferably we would just use one map though. Why not just use this one? | caddyserver-caddy | go |
@@ -59,13 +59,12 @@ func (container *CronContainer) StopStatsCron() {
}
// newCronContainer creates a CronContainer object.
-func newCronContainer(dockerID *string, name *string, dockerGraphPath string) *CronContainer {
+func newCronContainer(dockerID *string, dockerGraphPath string) *CronContainer {
statePath := filepath.Join(dockerGraphPath, DockerExecDriverPath, *dockerID)
container := &CronContainer{
containerMetadata: &ContainerMetadata{
DockerID: dockerID,
- Name: name,
},
statePath: statePath,
} | 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package stats
import (
"path/filepath"
"time"
"github.com/docker/libcontainer"
"golang.org/x/net/context"
)
const (
// DockerExecDriverPath points to the docker exec driver path.
DockerExecDriverPath = "execdriver/native"
// SleepBetweenUsageDataCollection is the sleep duration between collecting usage data for a container.
SleepBetweenUsageDataCollection = 500 * time.Millisecond
// ContainerStatsBufferLength is the number of usage metrics stored in memory for a container. It is calculated as
// Number of usage metrics gathered in a second (10) * 60 * Time duration in minutes to store the data for (2)
ContainerStatsBufferLength = 1200
)
// ContainerStatsCollector defines methods to get container stats. This interface is defined to
// make testing easier.
type ContainerStatsCollector interface {
getContainerStats(container *CronContainer) (*ContainerStats, error)
}
// LibcontainerStatsCollector implements ContainerStatsCollector.
type LibcontainerStatsCollector struct{}
// StartStatsCron starts a go routine to periodically pull usage data for the container.
func (container *CronContainer) StartStatsCron() {
// Create the queue to store utilization data from cgroup fs.
container.statsQueue = NewQueue(ContainerStatsBufferLength)
// Create the context to handle deletion of container from the manager.
// The manager can cancel the cronStats go routing by calling StopStatsCron method.
container.ctx, container.cancel = context.WithCancel(context.Background())
go container.cronStats()
}
// StopStatsCron stops the periodic collection of usage data for the container..
func (container *CronContainer) StopStatsCron() {
container.cancel()
}
// newCronContainer creates a CronContainer object.
func newCronContainer(dockerID *string, name *string, dockerGraphPath string) *CronContainer {
statePath := filepath.Join(dockerGraphPath, DockerExecDriverPath, *dockerID)
container := &CronContainer{
containerMetadata: &ContainerMetadata{
DockerID: dockerID,
Name: name,
},
statePath: statePath,
}
container.statsCollector = &LibcontainerStatsCollector{}
return container
}
// cronStats periodically pulls usage data for the container from cgroup fs.
func (container *CronContainer) cronStats() {
for {
select {
case <-container.ctx.Done():
return
default:
stats, err := container.statsCollector.getContainerStats(container)
if err != nil {
log.Debug("Error getting stats", "error", err, "contianer", container)
} else {
container.statsQueue.Add(stats)
}
time.Sleep(SleepBetweenUsageDataCollection)
}
}
}
// getContainerStats reads usage data of a container from the cgroup fs.
func (collector *LibcontainerStatsCollector) getContainerStats(container *CronContainer) (*ContainerStats, error) {
state, err := libcontainer.GetState(container.statePath)
if err != nil {
// The state file is not created immediately when a container starts.
// Bubble up the error.
return nil, err
}
// libcontainer.GetStats ignores the config argument. So, don't bother providing one.
containerStats, err := libcontainer.GetStats(nil, state)
if err != nil && !isNetworkStatsError(err) {
log.Error("Error getting libcontainer stats", "err", err)
return nil, err
}
cs := toContainerStats(*containerStats)
return cs, nil
}
| 1 | 13,483 | I'd feel a little safer if the first argument were a string, not a *string unless there's a particular reason for making it a pointer. | aws-amazon-ecs-agent | go |
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Arrays;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
import java.lang.reflect.Method;
import java.util.Arrays;
public class CSVTest {
Method CSV;
@Before
public void setUp() {
Method[] methods = HttpCommandProcessor.class.getDeclaredMethods();
for (int i = 0; i < methods.length; i++) {
if ("parseCSV".equals(methods[i].getName())) {
Method csvMethod = methods[i];
csvMethod.setAccessible(true);
CSV = csvMethod;
break;
}
}
}
public String[] parseCSV(String input, String[] expected) {
System.out.print(input + ": ");
String[] output;
try {
output = (String[]) CSV.invoke(null, input);
} catch (Exception e) {
throw new RuntimeException(e);
}
System.out.println(Arrays.asList(output).toString());
compareStringArrays(expected, output);
return output;
}
@Test
public void testSimple() {
String input = "1,2,3";
String[] expected = new String[] {"1", "2", "3"};
parseCSV(input, expected);
}
@Test
public void testBackSlash() {
String input = "1,2\\,3,4"; // Java-escaped, but not CSV-escaped
String[] expected = new String[] {"1", "2,3", "4"}; // backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testRandomSingleBackSlash() {
String input = "1,\\2,3"; // Java-escaped, but not CSV-escaped
String[] expected = new String[] {"1", "2", "3"}; // backslash should disappear
parseCSV(input, expected);
}
@Test
public void testDoubleBackSlashBeforeComma() {
String input = "1,2\\\\,3"; // Java-escaped and CSV-escaped
String[] expected = new String[] {"1", "2\\", "3"}; // one backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testRandomDoubleBackSlash() {
String input = "1,\\\\2,3"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "\\2", "3"}; // one backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testTripleBackSlashBeforeComma() {
String input = "1,2\\\\\\,3,4"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "2\\,3", "4"}; // one backslash should disappear in
// output
parseCSV(input, expected);
}
@Test
public void test4BackSlashesBeforeComma() {
String input = "1,2\\\\\\\\,3"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "2\\\\", "3"}; // two backslashes should disappear in
// output
parseCSV(input, expected);
}
public void compareStringArrays(String[] expected, String[] actual) {
assertEquals("Wrong number of elements", expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
}
| 1 | 19,390 | Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC. | SeleniumHQ-selenium | rb |
@@ -19,7 +19,10 @@ package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
+
+import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.grid.distributor.Distributor;
+import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.internal.Require;
import java.net.URI; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
public class GridData implements DataFetcher {
private final Distributor distributor;
private final URI publicUri;
public GridData(Distributor distributor, URI publicUri) {
this.distributor = Require.nonNull("Distributor", distributor);
this.publicUri = Require.nonNull("Grid's public URI", publicUri);
}
@Override
public Object get(DataFetchingEnvironment environment) {
return new Grid(distributor, publicUri);
}
}
| 1 | 17,779 | You can safely revert changes to this file. | SeleniumHQ-selenium | py |
@@ -127,7 +127,6 @@ class AnchorHead(nn.Module):
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples, cfg):
- # classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3, 1).reshape( | 1 | from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox,
multi_apply, weighted_cross_entropy, weighted_smoothl1,
weighted_binary_cross_entropy,
weighted_sigmoid_focal_loss, multiclass_nms)
from ..registry import HEADS
@HEADS.register_module
class AnchorHead(nn.Module):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
anchor_scales (Iterable): Anchor scales.
anchor_ratios (Iterable): Anchor aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
use_sigmoid_cls (bool): Whether to use sigmoid loss for classification.
(softmax by default)
use_focal_loss (bool): Whether to use focal loss for classification.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
anchor_scales=[8, 16, 32],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0),
use_sigmoid_cls=False,
use_focal_loss=False):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.target_means = target_means
self.target_stds = target_stds
self.use_sigmoid_cls = use_sigmoid_cls
self.use_focal_loss = use_focal_loss
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
AnchorGenerator(anchor_base, anchor_scales, anchor_ratios))
self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
self._init_layers()
def _init_layers(self):
self.conv_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: anchors of each image, valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = []
for i in range(num_levels):
anchors = self.anchor_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i])
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.anchor_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples, cfg):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
if self.use_sigmoid_cls:
if self.use_focal_loss:
cls_criterion = weighted_sigmoid_focal_loss
else:
cls_criterion = weighted_binary_cross_entropy
else:
if self.use_focal_loss:
raise NotImplementedError
else:
cls_criterion = weighted_cross_entropy
if self.use_focal_loss:
loss_cls = cls_criterion(
cls_score,
labels,
label_weights,
gamma=cfg.gamma,
alpha=cfg.alpha,
avg_factor=num_total_samples)
else:
loss_cls = cls_criterion(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_reg = weighted_smoothl1(
bbox_pred,
bbox_targets,
bbox_weights,
beta=cfg.smoothl1_beta,
avg_factor=num_total_samples)
return loss_cls, loss_reg
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas)
sampling = False if self.use_focal_loss else True
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = anchor_target(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (num_total_pos if self.use_focal_loss else
num_total_pos + num_total_neg)
losses_cls, losses_reg = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples,
cfg=cfg)
return dict(loss_cls=losses_cls, loss_reg=losses_reg)
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
mlvl_anchors = [
self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:],
self.anchor_strides[i])
for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors in zip(cls_scores, bbox_preds,
mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means,
self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
return det_bboxes, det_labels
| 1 | 17,191 | This deletion is unnecessary. | open-mmlab-mmdetection | py |
@@ -302,7 +302,10 @@ class Notification extends Component {
mdc-layout-grid__cell
mdc-layout-grid__cell--span-1
">
- <img className="googlesitekit-publisher-win__small-image" alt="" src={ smallImage } />
+ {
+ typeof smallImage === 'string'
+ ? <img className="googlesitekit-publisher-win__small-image" alt="" src={ smallImage } /> : smallImage
+ }
</div>
}
| 1 | /**
* Notification component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
import classnames from 'classnames';
import { map } from 'lodash';
/**
* WordPress dependencies
*/
import { Component, Fragment, createRef, isValidElement } from '@wordpress/element';
/**
* Internal dependencies
*/
import GoogleLogoIcon from '../../../svg/logo-g.svg';
import { sanitizeHTML } from '../../util/sanitize';
import { setCache, getCache, deleteCache } from '../data/cache';
import DataBlock from '../data-block';
import Button from '../button';
import Warning from '../notifications/warning';
import Error from '../notifications/error';
import Link from '../Link';
import ModuleIcon from '../ModuleIcon';
class Notification extends Component {
constructor( props ) {
super( props );
this.state = {
isClosed: false,
};
this.cardRef = createRef();
this.handleDismiss = this.handleDismiss.bind( this );
this.handleCTAClick = this.handleCTAClick.bind( this );
if ( 0 < this.props.dismissExpires ) {
this.expireDismiss();
}
if ( this.props.showOnce ) {
setCache( `notification::displayed::${ this.props.id }`, new Date() );
}
}
async handleDismiss( e ) {
e.persist();
e.preventDefault();
const { onDismiss } = this.props;
if ( onDismiss ) {
await onDismiss( e );
}
this.dismiss();
}
dismiss() {
const card = this.cardRef.current;
this.setState( {
isClosed: true,
} );
setTimeout( () => {
setCache( `notification::dismissed::${ this.props.id }`, new Date() );
card.style.display = 'none';
const event = new Event( 'notificationDismissed' );
document.dispatchEvent( event );
}, 350 );
}
async handleCTAClick( e ) {
e.persist();
const { isDismissable, onCTAClick } = this.props;
if ( onCTAClick ) {
await onCTAClick( e );
}
if ( isDismissable ) {
this.dismiss();
}
}
expireDismiss() {
const {
id,
dismissExpires,
} = this.props;
const dismissed = getCache( `notification::dismissed::${ id }` );
if ( dismissed ) {
const expiration = new Date( dismissed );
expiration.setSeconds( expiration.getSeconds() + parseInt( dismissExpires, 10 ) );
if ( expiration < new Date() ) {
deleteCache( `notification::dismissed::${ id }` );
}
}
}
render() {
const { isClosed } = this.state;
const {
children,
id,
title,
description,
blockData,
winImage,
smallImage,
format,
learnMoreURL,
learnMoreDescription,
learnMoreLabel,
ctaLink,
ctaLabel,
ctaTarget,
type,
dismiss,
isDismissable,
logo,
module,
moduleName,
pageIndex,
anchorLink,
anchorLinkLabel,
} = this.props;
if ( getCache( `notification::dismissed::${ id }` ) ) {
return null;
}
const closedClass = isClosed ? 'is-closed' : 'is-open';
const inlineLayout = 'large' === format && 'win-stats-increase' === type;
let layout = 'mdc-layout-grid__cell--span-12';
if ( 'large' === format ) {
layout = 'mdc-layout-grid__cell--order-2-phone ' +
'mdc-layout-grid__cell--order-1-tablet ' +
'mdc-layout-grid__cell--span-6-tablet ' +
'mdc-layout-grid__cell--span-8-desktop ';
if ( inlineLayout ) {
layout = 'mdc-layout-grid__cell--order-2-phone ' +
'mdc-layout-grid__cell--order-1-tablet ' +
'mdc-layout-grid__cell--span-5-tablet ' +
'mdc-layout-grid__cell--span-8-desktop ';
}
} else if ( 'small' === format ) {
layout = 'mdc-layout-grid__cell--span-11-desktop ' +
'mdc-layout-grid__cell--span-7-tablet ' +
'mdc-layout-grid__cell--span-3-phone';
}
let icon;
if ( 'win-warning' === type ) {
icon = <Warning />;
} else if ( 'win-error' === type ) {
icon = <Error />;
} else {
icon = '';
}
const dataBlockMarkup = (
<Fragment>
{ blockData &&
<div className="mdc-layout-grid__inner">
{
map( blockData, ( block, i ) => {
return (
<div
key={ i }
className={ classnames(
'mdc-layout-grid__cell',
{
'mdc-layout-grid__cell--span-5-desktop': inlineLayout,
'mdc-layout-grid__cell--span-4-desktop': ! inlineLayout,
}
) }
>
<div className="googlesitekit-publisher-win__stats">
<DataBlock { ...block } />
</div>
</div>
);
} )
}
</div>
}
</Fragment>
);
const inlineMarkup = (
<Fragment>
{ title &&
<h3 className="googlesitekit-heading-2 googlesitekit-publisher-win__title">
{ title }
</h3>
}
{ anchorLink && anchorLinkLabel &&
<p className="googlesitekit-publisher-win__link">
<Link href={ anchorLink }>
{ anchorLinkLabel }
</Link>
</p>
}
{ description &&
<div className="googlesitekit-publisher-win__desc">
<p>
{ isValidElement( description ) ? description : (
<span dangerouslySetInnerHTML={ sanitizeHTML( description, {
ALLOWED_TAGS: [ 'strong', 'em', 'br', 'a' ],
ALLOWED_ATTR: [ 'href' ],
} ) } />
) }
{ learnMoreLabel &&
<Fragment>
{ ' ' }
<Link href={ learnMoreURL } external inherit>
{ learnMoreLabel }
</Link>
{ learnMoreDescription }
</Fragment>
}
{ pageIndex &&
<span className="googlesitekit-publisher-win__detect">{ pageIndex }</span>
}
</p>
</div>
}
{ children }
</Fragment>
);
const logoSVG = module
? <ModuleIcon slug={ module } height={ 19 } width={ 19 } />
: <GoogleLogoIcon height="34" width="32" />;
return (
<section
ref={ this.cardRef }
className={ classnames(
'googlesitekit-publisher-win',
{
[ `googlesitekit-publisher-win--${ format }` ]: format,
[ `googlesitekit-publisher-win--${ type }` ]: type,
[ `googlesitekit-publisher-win--${ closedClass }` ]: closedClass,
}
) }
>
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
{ logo &&
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
{
'mdc-layout-grid__cell--order-2-phone': inlineLayout,
'mdc-layout-grid__cell--order-1-tablet': inlineLayout,
}
) }>
<div className="googlesitekit-publisher-win__logo">
{ logoSVG }
</div>
{ moduleName &&
<div className="googlesitekit-publisher-win__module-name">
{ moduleName }
</div>
}
</div>
}
{ smallImage &&
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-1
">
<img className="googlesitekit-publisher-win__small-image" alt="" src={ smallImage } />
</div>
}
<div className={ classnames(
'mdc-layout-grid__cell',
layout
) } >
{ inlineLayout ? (
<div className="mdc-layout-grid__inner">
<div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-5-desktop mdc-layout-grid__cell--span-8-tablet">
{ inlineMarkup }
</div>
<div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-7-desktop mdc-layout-grid__cell--span-8-tablet mdc-layout-grid__cell--align-bottom">
{ dataBlockMarkup }
</div>
</div>
) : (
<Fragment>
{ inlineMarkup }
{ dataBlockMarkup }
</Fragment>
) }
{ ctaLink &&
<Button
href={ ctaLink }
target={ ctaTarget }
onClick={ this.handleCTAClick }
>
{ ctaLabel }
</Button>
}
{ isDismissable && dismiss &&
<Link onClick={ this.handleDismiss }>
{ dismiss }
</Link>
}
</div>
{ winImage &&
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--order-1-phone
mdc-layout-grid__cell--order-2-tablet
mdc-layout-grid__cell--span-2-tablet
mdc-layout-grid__cell--span-4-desktop
">
<div className="googlesitekit-publisher-win__image-large">
<img alt="" src={ winImage } />
</div>
</div>
}
{ ( 'win-error' === type || 'win-warning' === type ) &&
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-1
">
<div className="googlesitekit-publisher-win__icons">
{ icon }
</div>
</div>
}
</div>
</div>
</section>
);
}
}
Notification.propTypes = {
id: PropTypes.string.isRequired,
title: PropTypes.string.isRequired,
description: PropTypes.node,
learnMoreURL: PropTypes.string,
learnMoreDescription: PropTypes.string,
learnMoreLabel: PropTypes.string,
blockData: PropTypes.array,
winImage: PropTypes.string,
smallImage: PropTypes.string,
format: PropTypes.string,
ctaLink: PropTypes.string,
ctaLabel: PropTypes.string,
type: PropTypes.string,
dismiss: PropTypes.string,
isDismissable: PropTypes.bool,
logo: PropTypes.bool,
module: PropTypes.string,
moduleName: PropTypes.string,
pageIndex: PropTypes.string,
dismissExpires: PropTypes.number,
showOnce: PropTypes.bool,
onCTAClick: PropTypes.func,
onDismiss: PropTypes.func,
anchorLink: PropTypes.string,
anchorLinkLabel: PropTypes.string,
};
Notification.defaultProps = {
isDismissable: true,
dismissExpires: 0,
showOnce: false,
};
export default Notification;
| 1 | 34,072 | I'm not a huge fan of this overloading of the prop; accepting very different types of arguments for props like this can be confusing, and wasn't part of the IB. I understand wanting to import the SVG directly, but the IB suggests using `${global._googlesitekitLegacyData.admin.assetsRoot}${userInputSuccessImage}` as the `src`/`smallImage` value here. It'd be great to refactor this to accept components directly in the future, but let's do that all-at-once rather than this hybrid approach that makes the API less clear. | google-site-kit-wp | js |
@@ -21,10 +21,7 @@ import java.io.InputStream;
import java.io.StringWriter;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.ZkController; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.schema;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.rest.BaseSolrResource;
import org.apache.solr.common.util.CommandOperation;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.InputSource;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.apache.solr.schema.FieldType.CLASS_NAME;
import static org.apache.solr.schema.IndexSchema.DESTINATION;
import static org.apache.solr.schema.IndexSchema.MAX_CHARS;
import static org.apache.solr.schema.IndexSchema.NAME;
import static org.apache.solr.schema.IndexSchema.SOURCE;
import static org.apache.solr.schema.IndexSchema.TYPE;
/**
* A utility class to manipulate schema using the bulk mode.
* This class takes in all the commands and processes them completely.
* It is an all or nothing operation.
*/
public class SchemaManager {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
final SolrQueryRequest req;
ManagedIndexSchema managedIndexSchema;
int timeout;
public SchemaManager(SolrQueryRequest req){
this.req = req;
//The default timeout is 10 minutes when no BaseSolrResource.UPDATE_TIMEOUT_SECS is specified
timeout = req.getParams().getInt(BaseSolrResource.UPDATE_TIMEOUT_SECS, 600);
//If BaseSolrResource.UPDATE_TIMEOUT_SECS=0 or -1 then end time then we'll try for 10 mins ( default timeout )
if (timeout < 1) {
timeout = 600;
}
}
/**
* Take in a JSON command set and execute them. It tries to capture as many errors
* as possible instead of failing at the first error it encounters
* @return List of errors. If the List is empty then the operation was successful.
*/
@SuppressWarnings({"rawtypes"})
public List performOperations() throws Exception {
List<CommandOperation> ops = req.getCommands(false);
List errs = CommandOperation.captureErrors(ops);
if (!errs.isEmpty()) return errs;
IndexSchema schema = req.getCore().getLatestSchema();
if (schema instanceof ManagedIndexSchema && schema.isMutable()) {
return doOperations(ops);
} else {
return singletonList(singletonMap(CommandOperation.ERR_MSGS, "schema is not editable"));
}
}
@SuppressWarnings({"rawtypes"})
private List doOperations(List<CommandOperation> operations) throws InterruptedException, IOException, KeeperException {
TimeOut timeOut = new TimeOut(timeout, TimeUnit.SECONDS, TimeSource.NANO_TIME);
SolrCore core = req.getCore();
String errorMsg = "Unable to persist managed schema. ";
List errors = Collections.emptyList();
int latestVersion = -1;
synchronized (req.getSchema().getSchemaUpdateLock()) {
while (!timeOut.hasTimedOut()) {
managedIndexSchema = getFreshManagedSchema(req.getCore());
for (CommandOperation op : operations) {
OpType opType = OpType.get(op.name);
if (opType != null) {
opType.perform(op, this);
} else {
op.addError("No such operation : " + op.name);
}
}
errors = CommandOperation.captureErrors(operations);
if (!errors.isEmpty()) break;
SolrResourceLoader loader = req.getCore().getResourceLoader();
if (loader instanceof ZkSolrResourceLoader) {
ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
StringWriter sw = new StringWriter();
try {
managedIndexSchema.persist(sw);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "unable to serialize schema");
//unlikely
}
try {
latestVersion = ZkController.persistConfigResourceToZooKeeper
(zkLoader, managedIndexSchema.getSchemaZkVersion(), managedIndexSchema.getResourceName(),
sw.toString().getBytes(StandardCharsets.UTF_8), true);
req.getCore().getCoreContainer().reload(req.getCore().getName());
break;
} catch (ZkController.ResourceModifiedInZkException e) {
log.info("Schema was modified by another node. Retrying..");
}
} else {
try {
//only for non cloud stuff
managedIndexSchema.persistManagedSchema(false);
core.setLatestSchema(managedIndexSchema);
core.getCoreContainer().reload(core.getName());
} catch (SolrException e) {
log.warn(errorMsg);
errors = singletonList(errorMsg + e.getMessage());
}
break;
}
}
}
if (req.getCore().getResourceLoader() instanceof ZkSolrResourceLoader) {
// Don't block further schema updates while waiting for a pending update to propagate to other replicas.
// This reduces the likelihood of a (time-limited) distributed deadlock during concurrent schema updates.
waitForOtherReplicasToUpdate(timeOut, latestVersion);
}
if (errors.isEmpty() && timeOut.hasTimedOut()) {
log.warn("{} Timed out", errorMsg);
errors = singletonList(errorMsg + "Timed out.");
}
return errors;
}
private void waitForOtherReplicasToUpdate(TimeOut timeOut, int latestVersion) {
SolrCore core = req.getCore();
CoreDescriptor cd = core.getCoreDescriptor();
String collection = cd.getCollectionName();
if (collection != null) {
if (timeOut.hasTimedOut()) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Not enough time left to update replicas. However, the schema is updated already.");
}
ManagedIndexSchema.waitForSchemaZkVersionAgreement(collection, cd.getCloudDescriptor().getCoreNodeName(),
latestVersion, core.getCoreContainer().getZkController(), (int) timeOut.timeLeft(TimeUnit.SECONDS));
}
}
public enum OpType {
ADD_FIELD_TYPE("add-field-type") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String className = op.getStr(CLASS_NAME);
if (op.hasError())
return false;
try {
FieldType fieldType = mgr.managedIndexSchema.newFieldType(name, className, op.getDataMap());
mgr.managedIndexSchema = mgr.managedIndexSchema.addFieldTypes(singletonList(fieldType), false);
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
ADD_COPY_FIELD("add-copy-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String src = op.getStr(SOURCE);
List<String> dests = op.getStrs(DESTINATION);
int maxChars = CopyField.UNLIMITED; // If maxChars is not specified, there is no limit on copied chars
String maxCharsStr = op.getStr(MAX_CHARS, null);
if (null != maxCharsStr) {
try {
maxChars = Integer.parseInt(maxCharsStr);
} catch (NumberFormatException e) {
op.addError("Exception parsing " + MAX_CHARS + " '" + maxCharsStr + "': " + getErrorStr(e));
}
if (maxChars < 0) {
op.addError(MAX_CHARS + " '" + maxCharsStr + "' is negative.");
}
}
if (op.hasError())
return false;
if ( ! op.getValuesExcluding(SOURCE, DESTINATION, MAX_CHARS).isEmpty()) {
op.addError("Only the '" + SOURCE + "', '" + DESTINATION + "' and '" + MAX_CHARS
+ "' params are allowed with the 'add-copy-field' operation");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.addCopyFields(src, dests, maxChars);
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
ADD_FIELD("add-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String type = op.getStr(TYPE);
if (op.hasError())
return false;
try {
SchemaField field = mgr.managedIndexSchema.newField(name, type, op.getValuesExcluding(NAME, TYPE));
mgr.managedIndexSchema
= mgr.managedIndexSchema.addFields(singletonList(field), Collections.emptyMap(), false);
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
ADD_DYNAMIC_FIELD("add-dynamic-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String type = op.getStr(TYPE);
if (op.hasError())
return false;
try {
SchemaField field = mgr.managedIndexSchema.newDynamicField(name, type, op.getValuesExcluding(NAME, TYPE));
mgr.managedIndexSchema
= mgr.managedIndexSchema.addDynamicFields(singletonList(field), Collections.emptyMap(), false);
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
DELETE_FIELD_TYPE("delete-field-type") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
if (op.hasError())
return false;
if ( ! op.getValuesExcluding(NAME).isEmpty()) {
op.addError("Only the '" + NAME + "' param is allowed with the 'delete-field-type' operation");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.deleteFieldTypes(singleton(name));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
DELETE_COPY_FIELD("delete-copy-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String source = op.getStr(SOURCE);
List<String> dests = op.getStrs(DESTINATION);
if (op.hasError())
return false;
if ( ! op.getValuesExcluding(SOURCE, DESTINATION).isEmpty()) {
op.addError("Only the '" + SOURCE + "' and '" + DESTINATION
+ "' params are allowed with the 'delete-copy-field' operation");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.deleteCopyFields(singletonMap(source, dests));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
DELETE_FIELD("delete-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
if (op.hasError())
return false;
if ( ! op.getValuesExcluding(NAME).isEmpty()) {
op.addError("Only the '" + NAME + "' param is allowed with the 'delete-field' operation");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.deleteFields(singleton(name));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
DELETE_DYNAMIC_FIELD("delete-dynamic-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
if (op.hasError())
return false;
if ( ! op.getValuesExcluding(NAME).isEmpty()) {
op.addError("Only the '" + NAME + "' param is allowed with the 'delete-dynamic-field' operation");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.deleteDynamicFields(singleton(name));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
REPLACE_FIELD_TYPE("replace-field-type") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String className = op.getStr(CLASS_NAME);
if (op.hasError())
return false;
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.replaceFieldType(name, className, op.getDataMap());
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
REPLACE_FIELD("replace-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String type = op.getStr(TYPE);
if (op.hasError())
return false;
FieldType ft = mgr.managedIndexSchema.getFieldTypeByName(type);
if (ft == null) {
op.addError("No such field type '" + type + "'");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.replaceField(name, ft, op.getValuesExcluding(NAME, TYPE));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
},
REPLACE_DYNAMIC_FIELD("replace-dynamic-field") {
@Override public boolean perform(CommandOperation op, SchemaManager mgr) {
String name = op.getStr(NAME);
String type = op.getStr(TYPE);
if (op.hasError())
return false;
FieldType ft = mgr.managedIndexSchema.getFieldTypeByName(type);
if (ft == null) {
op.addError("No such field type '" + type + "'");
return false;
}
try {
mgr.managedIndexSchema = mgr.managedIndexSchema.replaceDynamicField(name, ft, op.getValuesExcluding(NAME, TYPE));
return true;
} catch (Exception e) {
op.addError(getErrorStr(e));
return false;
}
}
};
public abstract boolean perform(CommandOperation op, SchemaManager mgr);
public static OpType get(String label) {
return Nested.OP_TYPES.get(label);
}
private static class Nested { // Initializes contained static map before any enum ctor
static final Map<String,OpType> OP_TYPES = new HashMap<>();
}
private OpType(String label) {
Nested.OP_TYPES.put(label, this);
}
}
public static String getErrorStr(Exception e) {
StringBuilder sb = new StringBuilder();
Throwable cause = e;
for (int i = 0 ; i < 5 ; i++) {
sb.append(cause.getMessage()).append("\n");
if (cause.getCause() == null || cause.getCause() == cause) break;
cause = cause.getCause();
}
return sb.toString();
}
private ManagedIndexSchema getFreshManagedSchema(SolrCore core) throws IOException,
KeeperException, InterruptedException {
SolrResourceLoader resourceLoader = core.getResourceLoader();
String name = core.getLatestSchema().getResourceName();
if (resourceLoader instanceof ZkSolrResourceLoader) {
final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader)resourceLoader;
SolrZkClient zkClient = zkLoader.getZkController().getZkClient();
try {
if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + name, true)) {
String backupName = name + ManagedIndexSchemaFactory.UPGRADED_SCHEMA_EXTENSION;
if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + backupName, true)) {
log.warn("Unable to retrieve fresh managed schema, neither {} nor {} exist.", name, backupName);
// use current schema
return (ManagedIndexSchema) core.getLatestSchema();
} else {
name = backupName;
}
}
} catch (Exception e) {
log.warn("Unable to retrieve fresh managed schema {}", name, e);
// use current schema
return (ManagedIndexSchema) core.getLatestSchema();
}
InputStream in = resourceLoader.openResource(name);
if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
int version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
log.info("managed schema loaded . version : {} ", version);
return new ManagedIndexSchema(core.getSolrConfig(), name, new InputSource(in), true, name, version,
core.getLatestSchema().getSchemaUpdateLock());
} else {
return (ManagedIndexSchema) core.getLatestSchema();
}
} else {
return (ManagedIndexSchema) core.getLatestSchema();
}
}
}
| 1 | 36,363 | I've had this happen recently with IntelliJ, was this intentional? | apache-lucene-solr | java |
@@ -22,9 +22,10 @@
#include <string.h>
#include <string>
#include <vector>
-#include <map>
+#include <unordered_map>
#include "vulkan/vulkan.h"
#include "vk_format_utils.h"
+#include "hash_util.h"
struct VULKAN_FORMAT_INFO {
size_t size; | 1 | /* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
*
*/
#include <string.h>
#include <string>
#include <vector>
#include <map>
#include "vulkan/vulkan.h"
#include "vk_format_utils.h"
struct VULKAN_FORMAT_INFO {
size_t size;
uint32_t channel_count;
VkFormatCompatibilityClass format_class;
};
// Disable auto-formatting for this large table
// clang-format off
// Set up data structure with number of bytes and number of channels for each Vulkan format
const std::map<VkFormat, VULKAN_FORMAT_INFO> vk_format_table = {
{VK_FORMAT_UNDEFINED, {0, 0, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }},
{VK_FORMAT_R4G4_UNORM_PACK8, {1, 2, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R4G4B4A4_UNORM_PACK16, {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_B4G4R4A4_UNORM_PACK16, {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R5G6B5_UNORM_PACK16, {2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_B5G6R5_UNORM_PACK16, {2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R5G5B5A1_UNORM_PACK16, {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_B5G5R5A1_UNORM_PACK16, {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_A1R5G5B5_UNORM_PACK16, {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8_UNORM, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_SNORM, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_USCALED, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_SSCALED, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_UINT, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_SINT, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8_SRGB, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}},
{VK_FORMAT_R8G8_UNORM, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_SNORM, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_USCALED, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_SSCALED, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_UINT, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_SINT, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8_SRGB, {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R8G8B8_UNORM, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_SNORM, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_USCALED, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_SSCALED, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_UINT, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_SINT, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8_SRGB, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_UNORM, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_SNORM, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_USCALED, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_SSCALED, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_UINT, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_SINT, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_B8G8R8_SRGB, {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}},
{VK_FORMAT_R8G8B8A8_UNORM, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_SNORM, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_USCALED, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_SSCALED, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_UINT, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_SINT, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R8G8B8A8_SRGB, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_UNORM, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_SNORM, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_USCALED, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_SSCALED, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_UINT, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_SINT, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_B8G8R8A8_SRGB, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_UNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_SNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_USCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_SSCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_UINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_SINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A8B8G8R8_SRGB_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_UNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_SNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_USCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_SSCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_UINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2R10G10B10_SINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_UNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_SNORM_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_USCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_SSCALED_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_UINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_A2B10G10R10_SINT_PACK32, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16_UNORM, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_SNORM, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_USCALED, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_SSCALED, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_UINT, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_SINT, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16_SFLOAT, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}},
{VK_FORMAT_R16G16_UNORM, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_SNORM, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_USCALED, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_SSCALED, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_UINT, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_SINT, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16_SFLOAT, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R16G16B16_UNORM, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_SNORM, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_USCALED, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_SSCALED, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_UINT, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_SINT, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16_SFLOAT, {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}},
{VK_FORMAT_R16G16B16A16_UNORM, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_SNORM, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_USCALED, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_SSCALED, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_UINT, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_SINT, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R16G16B16A16_SFLOAT, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R32_UINT, {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R32_SINT, {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R32_SFLOAT, {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_R32G32_UINT, {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R32G32_SINT, {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R32G32_SFLOAT, {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R32G32B32_UINT, {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}},
{VK_FORMAT_R32G32B32_SINT, {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}},
{VK_FORMAT_R32G32B32_SFLOAT, {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}},
{VK_FORMAT_R32G32B32A32_UINT, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R32G32B32A32_SINT, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R32G32B32A32_SFLOAT, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R64_UINT, {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R64_SINT, {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R64_SFLOAT, {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}},
{VK_FORMAT_R64G64_UINT, {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R64G64_SINT, {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R64G64_SFLOAT, {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}},
{VK_FORMAT_R64G64B64_UINT, {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}},
{VK_FORMAT_R64G64B64_SINT, {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}},
{VK_FORMAT_R64G64B64_SFLOAT, {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}},
{VK_FORMAT_R64G64B64A64_UINT, {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}},
{VK_FORMAT_R64G64B64A64_SINT, {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}},
{VK_FORMAT_R64G64B64A64_SFLOAT, {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}},
{VK_FORMAT_B10G11R11_UFLOAT_PACK32, {4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, {4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}},
{VK_FORMAT_D16_UNORM, {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_X8_D24_UNORM_PACK32, {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_D32_SFLOAT, {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_S8_UINT, {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_D16_UNORM_S8_UINT, {3, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_D24_UNORM_S8_UINT, {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_D32_SFLOAT_S8_UINT, {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}},
{VK_FORMAT_BC1_RGB_UNORM_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT}},
{VK_FORMAT_BC1_RGB_SRGB_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT}},
{VK_FORMAT_BC1_RGBA_UNORM_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT}},
{VK_FORMAT_BC1_RGBA_SRGB_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT}},
{VK_FORMAT_BC2_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT}},
{VK_FORMAT_BC2_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT}},
{VK_FORMAT_BC3_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT}},
{VK_FORMAT_BC3_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT}},
{VK_FORMAT_BC4_UNORM_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT}},
{VK_FORMAT_BC4_SNORM_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT}},
{VK_FORMAT_BC5_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT}},
{VK_FORMAT_BC5_SNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT}},
{VK_FORMAT_BC6H_UFLOAT_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT}},
{VK_FORMAT_BC6H_SFLOAT_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT}},
{VK_FORMAT_BC7_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT}},
{VK_FORMAT_BC7_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT}},
{VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, {8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT}},
{VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, {8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT}},
{VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT}},
{VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT}},
{VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT}},
{VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT}},
{VK_FORMAT_EAC_R11_UNORM_BLOCK, {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT}},
{VK_FORMAT_EAC_R11_SNORM_BLOCK, {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT}},
{VK_FORMAT_EAC_R11G11_UNORM_BLOCK, {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT}},
{VK_FORMAT_EAC_R11G11_SNORM_BLOCK, {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT}},
{VK_FORMAT_ASTC_4x4_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT}},
{VK_FORMAT_ASTC_4x4_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT}},
{VK_FORMAT_ASTC_5x4_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT}},
{VK_FORMAT_ASTC_5x4_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT}},
{VK_FORMAT_ASTC_5x5_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT}},
{VK_FORMAT_ASTC_5x5_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT}},
{VK_FORMAT_ASTC_6x5_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT}},
{VK_FORMAT_ASTC_6x5_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT}},
{VK_FORMAT_ASTC_6x6_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT}},
{VK_FORMAT_ASTC_6x6_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT}},
{VK_FORMAT_ASTC_8x5_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT}},
{VK_FORMAT_ASTC_8x5_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT}},
{VK_FORMAT_ASTC_8x6_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT}},
{VK_FORMAT_ASTC_8x6_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT}},
{VK_FORMAT_ASTC_8x8_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT}},
{VK_FORMAT_ASTC_8x8_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT}},
{VK_FORMAT_ASTC_10x5_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT}},
{VK_FORMAT_ASTC_10x5_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT}},
{VK_FORMAT_ASTC_10x6_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT}},
{VK_FORMAT_ASTC_10x6_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT}},
{VK_FORMAT_ASTC_10x8_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT}},
{VK_FORMAT_ASTC_10x8_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT}},
{VK_FORMAT_ASTC_10x10_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT}},
{VK_FORMAT_ASTC_10x10_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT}},
{VK_FORMAT_ASTC_12x10_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT}},
{VK_FORMAT_ASTC_12x10_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT}},
{VK_FORMAT_ASTC_12x12_UNORM_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT}},
{VK_FORMAT_ASTC_12x12_SRGB_BLOCK, {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT}},
{VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_2BPP_BIT}},
{VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_4BPP_BIT}},
{VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_2BPP_BIT}},
{VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_4BPP_BIT}},
{VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_2BPP_BIT}},
{VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_4BPP_BIT}},
{VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_2BPP_BIT}},
{VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_4BPP_BIT}},
/* KHR_sampler_YCbCr_conversion */
{VK_FORMAT_G8B8G8R8_422_UNORM_KHR, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32BIT_G8B8G8R8}},
{VK_FORMAT_B8G8R8G8_422_UNORM_KHR, {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32BIT_B8G8R8G8}},
{VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_R10G10B10A10}},
{VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G10B10G10R10}},
{VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B10G10R10G10}},
{VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_R12G12B12A12}},
{VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G12B12G12R12}},
{VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B12G12R12G12}},
{VK_FORMAT_G16B16G16R16_422_UNORM_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G16B16G16R16}},
{VK_FORMAT_B16G16R16G16_422_UNORM_KHR, {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B16G16R16G16}}
#if 0 // TBD - Figure out what 'size' means for multi-planar formats
{VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_420}},
{VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_8BIT_2PLANE_420}},
{VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_422}},
{VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_8BIT_2PLANE_422}},
{VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_444}},
{VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_420}},
{VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_10BIT_2PLANE_420}},
{VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_422}},
{VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_10BIT_2PLANE_422}},
{VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_444}},
{VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_420}},
{VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_12BIT_2PLANE_420}},
{VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_422}},
{VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_12BIT_2PLANE_422}},
{VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_444}},
{VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_420}},
{VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_16BIT_2PLANE_420}},
{VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_422}},
{VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_16BIT_2PLANE_422}},
{VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR, {0, 3, VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_444}}
#endif
};
// Renable formatting
// clang-format on
// Return true if format is an ETC2 or EAC compressed texture format
VK_LAYER_EXPORT bool FormatIsCompressed_ETC2_EAC(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
found = true;
break;
default:
break;
}
return found;
}
// Return true if format is an ASTC compressed texture format
VK_LAYER_EXPORT bool FormatIsCompressed_ASTC_LDR(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
found = true;
break;
default:
break;
}
return found;
}
// Return true if format is a BC compressed texture format
VK_LAYER_EXPORT bool FormatIsCompressed_BC(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
found = true;
break;
default:
break;
}
return found;
}
// Return true if format is a PVRTC compressed texture format
VK_LAYER_EXPORT bool FormatIsCompressed_PVRTC(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
found = true;
break;
default:
break;
}
return found;
}
// Single-plane "_422" formats are treated as 2x1 compressed (for copies)
VK_LAYER_EXPORT bool FormatIsSinglePlane_422(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_G8B8G8R8_422_UNORM_KHR:
case VK_FORMAT_B8G8R8G8_422_UNORM_KHR:
case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR:
case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR:
case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR:
case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR:
case VK_FORMAT_G16B16G16R16_422_UNORM_KHR:
case VK_FORMAT_B16G16R16G16_422_UNORM_KHR:
found = true;
break;
default:
break;
}
return found;
}
// Return true if format is compressed
VK_LAYER_EXPORT bool FormatIsCompressed(VkFormat format) {
return (FormatIsCompressed_ASTC_LDR(format) || FormatIsCompressed_BC(format) || FormatIsCompressed_ETC2_EAC(format) ||
FormatIsCompressed_PVRTC(format));
}
// Return true if format is packed
VK_LAYER_EXPORT bool FormatIsPacked(VkFormat format) {
bool found = false;
switch (format) {
case VK_FORMAT_R4G4_UNORM_PACK8:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
case VK_FORMAT_A8B8G8R8_UINT_PACK32:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
case VK_FORMAT_A2R10G10B10_UINT_PACK32:
case VK_FORMAT_A2R10G10B10_SINT_PACK32:
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
case VK_FORMAT_A2B10G10R10_UINT_PACK32:
case VK_FORMAT_A2B10G10R10_SINT_PACK32:
case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
case VK_FORMAT_X8_D24_UNORM_PACK32:
found = true;
break;
default:
break;
}
return found;
}
// Return true if format is a depth or stencil format
VK_LAYER_EXPORT bool FormatIsDepthOrStencil(VkFormat format) {
return (FormatIsDepthAndStencil(format) || FormatIsDepthOnly(format) || FormatIsStencilOnly(format));
}
// Return true if format contains depth and stencil information
VK_LAYER_EXPORT bool FormatIsDepthAndStencil(VkFormat format) {
bool is_ds = false;
switch (format) {
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
is_ds = true;
break;
default:
break;
}
return is_ds;
}
// Return true if format is a stencil-only format
VK_LAYER_EXPORT bool FormatIsStencilOnly(VkFormat format) { return (format == VK_FORMAT_S8_UINT); }
// Return true if format is a depth-only format
VK_LAYER_EXPORT bool FormatIsDepthOnly(VkFormat format) {
bool is_depth = false;
switch (format) {
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT:
is_depth = true;
break;
default:
break;
}
return is_depth;
}
// Return true if format is of type NORM
VK_LAYER_EXPORT bool FormatIsNorm(VkFormat format) {
bool is_norm = false;
switch (format) {
case VK_FORMAT_R4G4_UNORM_PACK8:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8_SNORM:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R16_SNORM:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SNORM:
case VK_FORMAT_R16G16B16_UNORM:
case VK_FORMAT_R16G16B16_SNORM:
case VK_FORMAT_R16G16B16A16_UNORM:
case VK_FORMAT_R16G16B16A16_SNORM:
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_B8G8R8_UNORM:
case VK_FORMAT_B8G8R8_SNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
is_norm = true;
break;
default:
break;
}
return is_norm;
}
// Return true if format is of type UNORM
VK_LAYER_EXPORT bool FormatIsUNorm(VkFormat format) {
bool is_unorm = false;
switch (format) {
case VK_FORMAT_R4G4_UNORM_PACK8:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16B16_UNORM:
case VK_FORMAT_R16G16B16A16_UNORM:
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_B8G8R8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
is_unorm = true;
break;
default:
break;
}
return is_unorm;
}
// Return true if format is of type SNORM
VK_LAYER_EXPORT bool FormatIsSNorm(VkFormat format) {
bool is_snorm = false;
switch (format) {
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8B8_SNORM:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
case VK_FORMAT_R16_SNORM:
case VK_FORMAT_R16G16_SNORM:
case VK_FORMAT_R16G16B16_SNORM:
case VK_FORMAT_R16G16B16A16_SNORM:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
case VK_FORMAT_B8G8R8_SNORM:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
is_snorm = true;
break;
default:
break;
}
return is_snorm;
}
// Return true if format is an integer format
VK_LAYER_EXPORT bool FormatIsInt(VkFormat format) { return (FormatIsSInt(format) || FormatIsUInt(format)); }
// Return true if format is an unsigned integer format
VK_LAYER_EXPORT bool FormatIsUInt(VkFormat format) {
bool is_uint = false;
switch (format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_S8_UINT:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8B8_UINT:
case VK_FORMAT_R8G8B8A8_UINT:
case VK_FORMAT_A8B8G8R8_UINT_PACK32:
case VK_FORMAT_A2B10G10R10_UINT_PACK32:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16G16_UINT:
case VK_FORMAT_R16G16B16_UINT:
case VK_FORMAT_R16G16B16A16_UINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32G32_UINT:
case VK_FORMAT_R32G32B32_UINT:
case VK_FORMAT_R32G32B32A32_UINT:
case VK_FORMAT_R64_UINT:
case VK_FORMAT_R64G64_UINT:
case VK_FORMAT_R64G64B64_UINT:
case VK_FORMAT_R64G64B64A64_UINT:
case VK_FORMAT_B8G8R8_UINT:
case VK_FORMAT_B8G8R8A8_UINT:
case VK_FORMAT_A2R10G10B10_UINT_PACK32:
is_uint = true;
break;
default:
break;
}
return is_uint;
}
// Return true if format is a signed integer format
VK_LAYER_EXPORT bool FormatIsSInt(VkFormat format) {
bool is_sint = false;
switch (format) {
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8B8_SINT:
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A2B10G10R10_SINT_PACK32:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R16G16_SINT:
case VK_FORMAT_R16G16B16_SINT:
case VK_FORMAT_R16G16B16A16_SINT:
case VK_FORMAT_R32_SINT:
case VK_FORMAT_R32G32_SINT:
case VK_FORMAT_R32G32B32_SINT:
case VK_FORMAT_R32G32B32A32_SINT:
case VK_FORMAT_R64_SINT:
case VK_FORMAT_R64G64_SINT:
case VK_FORMAT_R64G64B64_SINT:
case VK_FORMAT_R64G64B64A64_SINT:
case VK_FORMAT_B8G8R8_SINT:
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_A2R10G10B10_SINT_PACK32:
is_sint = true;
break;
default:
break;
}
return is_sint;
}
// Return true if format is a floating-point format
VK_LAYER_EXPORT bool FormatIsFloat(VkFormat format) {
bool is_float = false;
switch (format) {
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R16G16B16_SFLOAT:
case VK_FORMAT_R16G16B16A16_SFLOAT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_R32G32_SFLOAT:
case VK_FORMAT_R32G32B32_SFLOAT:
case VK_FORMAT_R32G32B32A32_SFLOAT:
case VK_FORMAT_R64_SFLOAT:
case VK_FORMAT_R64G64_SFLOAT:
case VK_FORMAT_R64G64B64_SFLOAT:
case VK_FORMAT_R64G64B64A64_SFLOAT:
case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
is_float = true;
break;
default:
break;
}
return is_float;
}
// Return true if format is in the SRGB colorspace
VK_LAYER_EXPORT bool FormatIsSRGB(VkFormat format) {
bool is_srgb = false;
switch (format) {
case VK_FORMAT_R8_SRGB:
case VK_FORMAT_R8G8_SRGB:
case VK_FORMAT_R8G8B8_SRGB:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
case VK_FORMAT_B8G8R8_SRGB:
case VK_FORMAT_B8G8R8A8_SRGB:
is_srgb = true;
break;
default:
break;
}
return is_srgb;
}
// Return true if format is a USCALED format
VK_LAYER_EXPORT bool FormatIsUScaled(VkFormat format) {
bool is_uscaled = false;
switch (format) {
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8B8_USCALED:
case VK_FORMAT_B8G8R8_USCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
case VK_FORMAT_R16_USCALED:
case VK_FORMAT_R16G16_USCALED:
case VK_FORMAT_R16G16B16_USCALED:
case VK_FORMAT_R16G16B16A16_USCALED:
is_uscaled = true;
break;
default:
break;
}
return is_uscaled;
}
// Return true if format is a SSCALED format
VK_LAYER_EXPORT bool FormatIsSScaled(VkFormat format) {
bool is_sscaled = false;
switch (format) {
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8B8_SSCALED:
case VK_FORMAT_B8G8R8_SSCALED:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
case VK_FORMAT_R16_SSCALED:
case VK_FORMAT_R16G16_SSCALED:
case VK_FORMAT_R16G16B16_SSCALED:
case VK_FORMAT_R16G16B16A16_SSCALED:
is_sscaled = true;
break;
default:
break;
}
return is_sscaled;
}
// Return compressed texel block sizes for block compressed formats
VK_LAYER_EXPORT VkExtent3D FormatCompressedTexelBlockExtent(VkFormat format) {
VkExtent3D block_size = {1, 1, 1};
switch (format) {
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
block_size = {4, 4, 1};
break;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
block_size = {5, 4, 1};
break;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
block_size = {5, 5, 1};
break;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
block_size = {6, 5, 1};
break;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
block_size = {6, 6, 1};
break;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
block_size = {8, 5, 1};
break;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
block_size = {8, 6, 1};
break;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
block_size = {8, 8, 1};
break;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
block_size = {10, 5, 1};
break;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
block_size = {10, 6, 1};
break;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
block_size = {10, 8, 1};
break;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
block_size = {10, 10, 1};
break;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
block_size = {12, 10, 1};
break;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
block_size = {12, 12, 1};
break;
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
block_size = {8, 4, 1};
break;
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
block_size = {4, 4, 1};
break;
// With KHR_sampler_ycbcr_conversion, these formats are treated as 2x1 compressed (for copies)
case VK_FORMAT_G8B8G8R8_422_UNORM_KHR:
case VK_FORMAT_B8G8R8G8_422_UNORM_KHR:
case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR:
case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR:
case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR:
case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR:
case VK_FORMAT_G16B16G16R16_422_UNORM_KHR:
case VK_FORMAT_B16G16R16G16_422_UNORM_KHR:
block_size = {2, 1, 1};
break;
default:
break;
}
return block_size;
}
VK_LAYER_EXPORT uint32_t FormatPlaneCount(VkFormat format) {
switch (format) {
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR:
case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR:
case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR:
case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR:
case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR:
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR:
return 3;
break;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR:
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR:
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR:
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR:
return 2;
break;
default:
return 1;
break;
}
}
// Return format class of the specified format
VK_LAYER_EXPORT VkFormatCompatibilityClass FormatCompatibilityClass(VkFormat format) {
auto item = vk_format_table.find(format);
if (item != vk_format_table.end()) {
return item->second.format_class;
}
return VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT;
}
// Return size, in bytes, of a pixel of the specified format
VK_LAYER_EXPORT size_t FormatSize(VkFormat format) {
auto item = vk_format_table.find(format);
if (item != vk_format_table.end()) {
return item->second.size;
}
return 0;
}
// Return the number of channels for a given format
uint32_t FormatChannelCount(VkFormat format) {
auto item = vk_format_table.find(format);
if (item != vk_format_table.end()) {
return item->second.channel_count;
}
return 0;
}
// Perform a zero-tolerant modulo operation
VK_LAYER_EXPORT VkDeviceSize SafeModulo(VkDeviceSize dividend, VkDeviceSize divisor) {
VkDeviceSize result = 0;
if (divisor != 0) {
result = dividend % divisor;
}
return result;
}
struct VULKAN_PER_PLANE_COMPATIBILITY {
uint32_t width_divisor;
uint32_t height_divisor;
VkFormat compatible_format;
};
struct VULKAN_MULTIPLANE_COMPATIBILITY {
VULKAN_PER_PLANE_COMPATIBILITY per_plane[VK_MULTIPLANE_FORMAT_MAX_PLANES];
};
// Source: Vulkan spec Table 45. Plane Format Compatibility Table
// clang-format off
const std::map<VkFormat, VULKAN_MULTIPLANE_COMPATIBILITY> vk_multiplane_compatibility_map {
{ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR, { { { 1, 1, VK_FORMAT_R8_UNORM },
{ 2, 2, VK_FORMAT_R8_UNORM },
{ 2, 2, VK_FORMAT_R8_UNORM } } } },
{ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, { { { 1, 1, VK_FORMAT_R8_UNORM },
{ 2, 2, VK_FORMAT_R8G8_UNORM },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, { { { 1, 1, VK_FORMAT_R8_UNORM },
{ 2, 1, VK_FORMAT_R8_UNORM },
{ 2, 1, VK_FORMAT_R8_UNORM } } } },
{ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR, { { { 1, 1, VK_FORMAT_R8_UNORM },
{ 2, 1, VK_FORMAT_R8G8_UNORM },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR, { { { 1, 1, VK_FORMAT_R8_UNORM },
{ 1, 1, VK_FORMAT_R8_UNORM },
{ 1, 1, VK_FORMAT_R8_UNORM } } } },
{ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R10X6_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR },
{ 1, 1, VK_FORMAT_R10X6_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R12X4_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 2, VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 2, 1, VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR, { { { 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR },
{ 1, 1, VK_FORMAT_R12X4_UNORM_PACK16_KHR } } } },
{ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR, { { { 1, 1, VK_FORMAT_R16_UNORM },
{ 2, 2, VK_FORMAT_R16_UNORM },
{ 2, 2, VK_FORMAT_R16_UNORM } } } },
{ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR, { { { 1, 1, VK_FORMAT_R16_UNORM },
{ 2, 2, VK_FORMAT_R16G16_UNORM },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR, { { { 1, 1, VK_FORMAT_R16_UNORM },
{ 2, 1, VK_FORMAT_R16_UNORM },
{ 2, 1, VK_FORMAT_R16_UNORM } } } },
{ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR, { { { 1, 1, VK_FORMAT_R16_UNORM },
{ 2, 1, VK_FORMAT_R16G16_UNORM },
{ 1, 1, VK_FORMAT_UNDEFINED } } } },
{ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR, { { { 1, 1, VK_FORMAT_R16_UNORM },
{ 1, 1, VK_FORMAT_R16_UNORM },
{ 1, 1, VK_FORMAT_R16_UNORM } } } }
};
// clang-format on
VK_LAYER_EXPORT VkFormat FindMultiplaneCompatibleFormat(VkFormat mp_fmt, uint32_t plane) {
auto it = vk_multiplane_compatibility_map.find(mp_fmt);
if ((it == vk_multiplane_compatibility_map.end()) || (plane >= VK_MULTIPLANE_FORMAT_MAX_PLANES)) {
return VK_FORMAT_UNDEFINED;
}
return it->second.per_plane[plane].compatible_format;
}
// Return alignment, in bytes, of a data for the specified format
VK_LAYER_EXPORT size_t FormatAlignment(VkFormat format) {
if (FormatIsPacked(format)) {
return FormatSize(format);
} else {
return FormatSize(format) / FormatChannelCount(format);
}
} | 1 | 8,705 | Order(1) vs Order(logN) -- not sure it's measurable at the ~200 element size of the table. All of the published comparisons start at ~1000 elements. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -33,6 +33,7 @@ import (
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
+ location_factory "github.com/mysteriumnetwork/node/core/location/factory"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/core/service" | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cmd
import (
"time"
log "github.com/cihub/seelog"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/mysteriumnetwork/node/blockchain"
"github.com/mysteriumnetwork/node/communication"
nats_dialog "github.com/mysteriumnetwork/node/communication/nats/dialog"
nats_discovery "github.com/mysteriumnetwork/node/communication/nats/discovery"
consumer_session "github.com/mysteriumnetwork/node/consumer/session"
"github.com/mysteriumnetwork/node/consumer/statistics"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/core/service"
"github.com/mysteriumnetwork/node/core/storage/boltdb"
"github.com/mysteriumnetwork/node/core/storage/boltdb/migrations/history"
"github.com/mysteriumnetwork/node/eventbus"
"github.com/mysteriumnetwork/node/identity"
identity_registry "github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/logconfig"
"github.com/mysteriumnetwork/node/market"
market_metrics "github.com/mysteriumnetwork/node/market/metrics"
"github.com/mysteriumnetwork/node/market/metrics/oracle"
"github.com/mysteriumnetwork/node/market/mysterium"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/metrics"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/nat"
"github.com/mysteriumnetwork/node/nat/event"
"github.com/mysteriumnetwork/node/nat/mapping"
"github.com/mysteriumnetwork/node/nat/traversal"
"github.com/mysteriumnetwork/node/nat/traversal/config"
"github.com/mysteriumnetwork/node/nat/upnp"
"github.com/mysteriumnetwork/node/services"
service_noop "github.com/mysteriumnetwork/node/services/noop"
"github.com/mysteriumnetwork/node/services/openvpn"
service_openvpn "github.com/mysteriumnetwork/node/services/openvpn"
"github.com/mysteriumnetwork/node/services/openvpn/discovery/dto"
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
session_payment "github.com/mysteriumnetwork/node/session/payment"
payment_factory "github.com/mysteriumnetwork/node/session/payment/factory"
payments_noop "github.com/mysteriumnetwork/node/session/payment/noop"
"github.com/mysteriumnetwork/node/session/promise"
"github.com/mysteriumnetwork/node/session/promise/validators"
"github.com/mysteriumnetwork/node/tequilapi"
tequilapi_endpoints "github.com/mysteriumnetwork/node/tequilapi/endpoints"
"github.com/mysteriumnetwork/node/utils"
)
// Storage stores persistent objects for future usage
type Storage interface {
Store(issuer string, data interface{}) error
Delete(issuer string, data interface{}) error
Update(bucket string, object interface{}) error
GetAllFrom(bucket string, data interface{}) error
GetOneByField(bucket string, fieldName string, key interface{}, to interface{}) error
GetLast(bucket string, to interface{}) error
GetBuckets() []string
Close() error
}
// NatPinger is responsible for pinging nat holes
type NatPinger interface {
PingProvider(ip string, port int, stop <-chan struct{}) error
PingTarget(*traversal.Params)
BindConsumerPort(port int)
BindServicePort(serviceType services.ServiceType, port int)
Start()
Stop()
}
// NatEventTracker is responsible for tracking NAT events
type NatEventTracker interface {
ConsumeNATEvent(event event.Event)
LastEvent() *event.Event
WaitForEvent() event.Event
}
// NatEventSender is responsible for sending NAT events to metrics server
type NatEventSender interface {
ConsumeNATEvent(event event.Event)
}
// NATStatusTracker tracks status of NAT traversal by consuming NAT events
type NATStatusTracker interface {
Status() nat.Status
ConsumeNATEvent(event event.Event)
}
// CacheResolver caches the location resolution results
type CacheResolver interface {
location.Resolver
HandleConnectionEvent(connection.StateEvent)
}
// Dependencies is DI container for top level components which is reused in several places
type Dependencies struct {
Node *node.Node
NetworkDefinition metadata.NetworkDefinition
MysteriumAPI *mysterium.MysteriumAPI
MysteriumMorqaClient market_metrics.QualityOracle
EtherClient *ethclient.Client
NATService nat.NATService
Storage Storage
Keystore *keystore.KeyStore
PromiseStorage *promise.Storage
IdentityManager identity.Manager
SignerFactory identity.SignerFactory
IdentityRegistry identity_registry.IdentityRegistry
IdentityRegistration identity_registry.RegistrationDataProvider
IPResolver ip.Resolver
LocationResolver CacheResolver
StatisticsTracker *statistics.SessionStatisticsTracker
StatisticsReporter *statistics.SessionStatisticsReporter
SessionStorage *consumer_session.Storage
EventBus eventbus.EventBus
ConnectionManager connection.Manager
ConnectionRegistry *connection.Registry
ServicesManager *service.Manager
ServiceRegistry *service.Registry
ServiceSessionStorage *session.StorageMemory
NATPinger NatPinger
NATTracker NatEventTracker
NATEventSender NatEventSender
NATStatusTracker NATStatusTracker
PortPool *port.Pool
MetricsSender *metrics.Sender
}
// Bootstrap initiates all container dependencies
func (di *Dependencies) Bootstrap(nodeOptions node.Options) error {
logconfig.Bootstrap()
nats_discovery.Bootstrap()
log.Infof("Starting Mysterium Node (%s)", metadata.VersionAsString())
if err := nodeOptions.Directories.Check(); err != nil {
return err
}
if err := nodeOptions.Openvpn.Check(); err != nil {
return err
}
if err := di.bootstrapNetworkComponents(nodeOptions.OptionsNetwork); err != nil {
return err
}
if err := di.bootstrapStorage(nodeOptions.Directories.Storage); err != nil {
return err
}
di.bootstrapEventBus()
di.bootstrapIdentityComponents(nodeOptions)
if err := di.bootstrapLocationComponents(nodeOptions.Location, nodeOptions.Directories.Config); err != nil {
return err
}
di.bootstrapMetrics(nodeOptions)
di.PortPool = port.NewPool()
go upnp.ReportNetworkGateways()
di.bootstrapNATComponents(nodeOptions)
di.bootstrapServices(nodeOptions)
di.bootstrapNodeComponents(nodeOptions)
di.registerConnections(nodeOptions)
err := di.subscribeEventConsumers()
if err != nil {
return err
}
if err := di.Node.Start(); err != nil {
return err
}
return nil
}
func (di *Dependencies) registerOpenvpnConnection(nodeOptions node.Options) {
service_openvpn.Bootstrap()
connectionFactory := service_openvpn.NewProcessBasedConnectionFactory(
// TODO instead of passing binary path here, Openvpn from node options could represent abstract vpn factory itself
nodeOptions.Openvpn.BinaryPath(),
nodeOptions.Directories.Config,
nodeOptions.Directories.Runtime,
di.SignerFactory,
di.IPResolver,
di.NATPinger,
)
di.ConnectionRegistry.Register(service_openvpn.ServiceType, connectionFactory)
}
func (di *Dependencies) registerNoopConnection() {
service_noop.Bootstrap()
di.ConnectionRegistry.Register(service_noop.ServiceType, service_noop.NewConnectionCreator())
}
// Shutdown stops container
func (di *Dependencies) Shutdown() (err error) {
var errs []error
defer func() {
for i := range errs {
log.Error("Dependencies shutdown failed: ", errs[i])
if err == nil {
err = errs[i]
}
}
}()
if di.ServicesManager != nil {
if err := di.ServicesManager.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.NATService != nil {
if err := di.NATService.Disable(); err != nil {
errs = append(errs, err)
}
}
if di.Node != nil {
if err := di.Node.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.Storage != nil {
if err := di.Storage.Close(); err != nil {
errs = append(errs, err)
}
}
log.Flush()
return nil
}
func (di *Dependencies) bootstrapStorage(path string) error {
localStorage, err := boltdb.NewStorage(path)
if err != nil {
return err
}
migrator := boltdb.NewMigrator(localStorage)
err = migrator.RunMigrations(history.Sequence)
if err != nil {
return err
}
di.Storage = localStorage
return nil
}
func (di *Dependencies) subscribeEventConsumers() error {
// state events
err := di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsTracker.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsReporter.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.SessionStorage.ConsumeSessionEvent)
if err != nil {
return err
}
// statistics events
err = di.EventBus.Subscribe(connection.StatisticsEventTopic, di.StatisticsTracker.ConsumeStatisticsEvent)
if err != nil {
return err
}
err = di.EventBus.SubscribeAsync(connection.StateEventTopic, di.LocationResolver.HandleConnectionEvent)
if err != nil {
return err
}
// NAT events
err = di.EventBus.Subscribe(event.Topic, di.NATEventSender.ConsumeNATEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(event.Topic, di.NATTracker.ConsumeNATEvent)
if err != nil {
return err
}
return di.EventBus.Subscribe(event.Topic, di.NATStatusTracker.ConsumeNATEvent)
}
func (di *Dependencies) bootstrapNodeComponents(nodeOptions node.Options) {
dialogFactory := func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) {
dialogEstablisher := nats_dialog.NewDialogEstablisher(consumerID, di.SignerFactory(consumerID))
return dialogEstablisher.EstablishDialog(providerID, contact)
}
di.StatisticsTracker = statistics.NewSessionStatisticsTracker(time.Now)
di.StatisticsReporter = statistics.NewSessionStatisticsReporter(
di.StatisticsTracker,
di.MysteriumAPI,
di.SignerFactory,
di.LocationResolver,
time.Minute,
)
di.SessionStorage = consumer_session.NewSessionStorage(di.Storage, di.StatisticsTracker)
di.PromiseStorage = promise.NewStorage(di.Storage)
di.ConnectionRegistry = connection.NewRegistry()
di.ConnectionManager = connection.NewManager(
dialogFactory,
payment_factory.PaymentIssuerFactoryFunc(nodeOptions, di.SignerFactory),
di.ConnectionRegistry.CreateConnection,
di.EventBus,
di.IPResolver,
)
router := tequilapi.NewAPIRouter()
tequilapi_endpoints.AddRouteForStop(router, utils.SoftKiller(di.Shutdown))
tequilapi_endpoints.AddRoutesForIdentities(router, di.IdentityManager)
tequilapi_endpoints.AddRoutesForConnection(router, di.ConnectionManager, di.IPResolver, di.StatisticsTracker, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForConnectionSessions(router, di.SessionStorage)
tequilapi_endpoints.AddRoutesForConnectionLocation(router, di.ConnectionManager, di.LocationResolver)
tequilapi_endpoints.AddRoutesForLocation(router, di.LocationResolver)
tequilapi_endpoints.AddRoutesForProposals(router, di.MysteriumAPI, di.MysteriumMorqaClient)
tequilapi_endpoints.AddRoutesForService(router, di.ServicesManager, serviceTypesRequestParser, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForServiceSessions(router, di.ServiceSessionStorage)
tequilapi_endpoints.AddRoutesForPayout(router, di.IdentityManager, di.SignerFactory, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForAccessPolicies(router, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForNAT(router, di.NATStatusTracker.Status)
identity_registry.AddIdentityRegistrationEndpoint(router, di.IdentityRegistration, di.IdentityRegistry)
corsPolicy := tequilapi.NewMysteriumCorsPolicy()
httpAPIServer := tequilapi.NewServer(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort, router, corsPolicy)
di.Node = node.NewNode(di.ConnectionManager, httpAPIServer, di.LocationResolver, di.MetricsSender, di.NATPinger)
}
func newSessionManagerFactory(
proposal market.ServiceProposal,
sessionStorage *session.StorageMemory,
promiseStorage session_payment.PromiseStorage,
nodeOptions node.Options,
natPingerChan func(*traversal.Params),
natTracker NatEventTracker,
serviceID string,
) session.ManagerFactory {
return func(dialog communication.Dialog) *session.Manager {
providerBalanceTrackerFactory := func(consumerID, receiverID, issuerID identity.Identity) (session.BalanceTracker, error) {
// We want backwards compatibility for openvpn on desktop providers, so no payments for them.
// Splitting this as a separate case just for that reason.
// TODO: remove this one day.
if proposal.ServiceType == openvpn.ServiceType {
return payments_noop.NewSessionBalance(), nil
}
timeTracker := session.NewTracker(time.Now)
// TODO: set the time and proper payment info
payment := dto.PaymentPerTime{
Price: money.Money{
Currency: money.CurrencyMyst,
Amount: uint64(0),
},
Duration: time.Minute,
}
amountCalc := session.AmountCalc{PaymentDef: payment}
sender := balance.NewBalanceSender(dialog)
promiseChan := make(chan promise.Message, 1)
listener := promise.NewListener(promiseChan)
err := dialog.Receive(listener.GetConsumer())
if err != nil {
return nil, err
}
// TODO: the ints and times here need to be passed in as well, or defined as constants
tracker := balance.NewBalanceTracker(&timeTracker, amountCalc, 0)
validator := validators.NewIssuedPromiseValidator(consumerID, receiverID, issuerID)
return session_payment.NewSessionBalance(sender, tracker, promiseChan, payment_factory.BalanceSendPeriod, payment_factory.PromiseWaitTimeout, validator, promiseStorage, consumerID, receiverID, issuerID), nil
}
return session.NewManager(
proposal,
session.GenerateUUID,
sessionStorage,
providerBalanceTrackerFactory,
natPingerChan,
natTracker,
serviceID,
)
}
}
// function decides on network definition combined from testnet/localnet flags and possible overrides
func (di *Dependencies) bootstrapNetworkComponents(options node.OptionsNetwork) (err error) {
network := metadata.DefaultNetwork
switch {
case options.Testnet:
network = metadata.TestnetDefinition
case options.Localnet:
network = metadata.LocalnetDefinition
}
//override defined values one by one from options
if options.DiscoveryAPIAddress != metadata.DefaultNetwork.DiscoveryAPIAddress {
network.DiscoveryAPIAddress = options.DiscoveryAPIAddress
}
if options.BrokerAddress != metadata.DefaultNetwork.BrokerAddress {
network.BrokerAddress = options.BrokerAddress
}
normalizedAddress := common.HexToAddress(options.EtherPaymentsAddress)
if normalizedAddress != metadata.DefaultNetwork.PaymentsContractAddress {
network.PaymentsContractAddress = normalizedAddress
}
if options.EtherClientRPC != metadata.DefaultNetwork.EtherClientRPC {
network.EtherClientRPC = options.EtherClientRPC
}
di.NetworkDefinition = network
di.MysteriumAPI = mysterium.NewClient(network.DiscoveryAPIAddress)
di.MysteriumMorqaClient = oracle.NewMorqaClient(network.QualityOracle)
log.Info("Using Eth endpoint: ", network.EtherClientRPC)
if di.EtherClient, err = blockchain.NewClient(network.EtherClientRPC); err != nil {
return err
}
log.Info("Using Eth contract at address: ", network.PaymentsContractAddress.String())
if options.ExperimentIdentityCheck {
if di.IdentityRegistry, err = identity_registry.NewIdentityRegistryContract(di.EtherClient, network.PaymentsContractAddress); err != nil {
return err
}
} else {
di.IdentityRegistry = &identity_registry.FakeRegistry{Registered: true, RegistrationEventExists: true}
}
return nil
}
func (di *Dependencies) bootstrapEventBus() {
di.EventBus = eventbus.New()
}
func (di *Dependencies) bootstrapIdentityComponents(options node.Options) {
di.Keystore = identity.NewKeystoreFilesystem(options.Directories.Keystore, options.Keystore.UseLightweight)
di.IdentityManager = identity.NewIdentityManager(di.Keystore)
di.SignerFactory = func(id identity.Identity) identity.Signer {
return identity.NewSigner(di.Keystore, id)
}
di.IdentityRegistration = identity_registry.NewRegistrationDataProvider(di.Keystore)
}
func (di *Dependencies) bootstrapLocationComponents(options node.OptionsLocation, configDirectory string) (err error) {
di.IPResolver = ip.NewResolver(options.IPDetectorURL)
resolver, err := location.CreateLocationResolver(di.IPResolver, options.Country, options.City, options.Type, options.NodeType, options.Address, options.ExternalDb, configDirFlag)
if err != nil {
return err
}
di.LocationResolver = location.NewCache(resolver, time.Minute*5)
return nil
}
func (di *Dependencies) bootstrapMetrics(options node.Options) {
appVersion := metadata.VersionAsString()
di.MetricsSender = metrics.NewSender(options.DisableMetrics, options.MetricsAddress, appVersion)
}
func (di *Dependencies) bootstrapNATComponents(options node.Options) {
di.NATTracker = event.NewTracker()
if options.ExperimentNATPunching {
di.NATPinger = traversal.NewPingerFactory(
di.NATTracker,
config.NewConfigParser(),
traversal.NewNATProxy(),
di.PortPool,
mapping.StageName,
di.EventBus,
)
} else {
di.NATPinger = &traversal.NoopPinger{}
}
di.NATEventSender = event.NewSender(di.MetricsSender, di.IPResolver.GetPublicIP)
var lastStageName string
if options.ExperimentNATPunching {
lastStageName = traversal.StageName
} else {
lastStageName = mapping.StageName
}
di.NATStatusTracker = nat.NewStatusTracker(lastStageName)
}
| 1 | 14,209 | I don't know about this aliasing and 'factory'. Previous version was rather straightforward: `location.CreateLocationResolver`. Perhaps `location.CreateResolver` would be even better? What do we actually gain here from moving DI to a separate sub-package? | mysteriumnetwork-node | go |
@@ -47,6 +47,14 @@ void Engine::SetCallBack(std::function<void(const void *, std::string,
{
}
+static void engineThrowUp(const std::string &engineType,
+ const std::string &func)
+{
+ throw std::invalid_argument(
+ "ERROR: Engine bass class " + func + "() called. " + engineType +
+ " child class is not implementing this function\n");
+}
+
// should these functions throw an exception?
void Engine::Write(Variable<char> & /*variable*/, const char * /*values*/) {}
void Engine::Write(Variable<unsigned char> & /*variable*/, | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* Engine.cpp
*
* Created on: Dec 19, 2016
* Author: wfg
*/
#include "Engine.h"
#include <ios> //std::ios_base::failure
#include "adios2/ADIOSMPI.h"
#include "adios2/core/Support.h"
#include "adios2/core/adiosFunctions.h"
namespace adios
{
Engine::Engine(ADIOS &adios, const std::string engineType,
const std::string &name, const std::string accessMode,
MPI_Comm mpiComm, const Method &method,
const std::string endMessage)
: m_MPIComm(mpiComm), m_EngineType(engineType), m_Name(name),
m_AccessMode(accessMode), m_Method(method), m_ADIOS(adios),
m_DebugMode(m_Method.m_DebugMode), m_EndMessage(endMessage)
{
if (m_DebugMode == true)
{
if (m_MPIComm == MPI_COMM_NULL)
{
throw std::ios_base::failure(
"ERROR: engine communicator is MPI_COMM_NULL,"
" in call to ADIOS Open or Constructor\n");
}
}
MPI_Comm_rank(m_MPIComm, &m_RankMPI);
MPI_Comm_size(m_MPIComm, &m_SizeMPI);
}
void Engine::SetCallBack(std::function<void(const void *, std::string,
std::string, std::string, Dims)>
callback)
{
}
// should these functions throw an exception?
void Engine::Write(Variable<char> & /*variable*/, const char * /*values*/) {}
void Engine::Write(Variable<unsigned char> & /*variable*/,
const unsigned char * /*values*/)
{
}
void Engine::Write(Variable<short> & /*variable*/, const short * /*values*/) {}
void Engine::Write(Variable<unsigned short> & /*variable*/,
const unsigned short * /*values*/)
{
}
void Engine::Write(Variable<int> & /*variable*/, const int * /*values*/) {}
void Engine::Write(Variable<unsigned int> & /*variable*/,
const unsigned int * /*values*/)
{
}
void Engine::Write(Variable<long int> & /*variable*/,
const long int * /*values*/)
{
}
void Engine::Write(Variable<unsigned long int> & /*variable*/,
const unsigned long int * /*values*/)
{
}
void Engine::Write(Variable<long long int> & /*variable*/,
const long long int * /*values*/)
{
}
void Engine::Write(Variable<unsigned long long int> & /*variable*/,
const unsigned long long int * /*values*/)
{
}
void Engine::Write(Variable<float> & /*variable*/, const float * /*values*/) {}
void Engine::Write(Variable<double> & /*variable*/, const double * /*values*/)
{
}
void Engine::Write(Variable<long double> & /*variable*/,
const long double * /*values*/)
{
}
void Engine::Write(Variable<std::complex<float>> & /*variable*/,
const std::complex<float> * /*values*/)
{
}
void Engine::Write(Variable<std::complex<double>> & /*variable*/,
const std::complex<double> * /*values*/)
{
}
void Engine::Write(Variable<std::complex<long double>> & /*variable*/,
const std::complex<long double> * /*values*/)
{
}
void Engine::Write(VariableCompound & /*variable*/, const void * /*values*/) {}
void Engine::Write(const std::string & /*variableName*/,
const char * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const unsigned char * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const short * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const unsigned short * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/, const int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const unsigned int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const long int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const unsigned long int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const long long int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const unsigned long long int * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const float * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const double * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const long double * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const std::complex<float> * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const std::complex<double> * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const std::complex<long double> * /*values*/)
{
}
void Engine::Write(const std::string & /*variableName*/,
const void * /*values*/)
{
}
void Engine::Advance(float /*timeout_sec*/) {}
void Engine::Advance(AdvanceMode /*mode*/, float /*timeout_sec*/) {}
void Engine::AdvanceAsync(
AdvanceMode /*mode*/,
std::function<void(std::shared_ptr<adios::Engine>)> /*callback*/)
{
}
void Engine::Close(const int /*transportIndex*/) {}
// READ
Variable<void> *Engine::InquireVariable(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<char> *Engine::InquireVariableChar(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<unsigned char> *
Engine::InquireVariableUChar(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<short> *Engine::InquireVariableShort(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<unsigned short> *
Engine::InquireVariableUShort(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<int> *Engine::InquireVariableInt(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<unsigned int> *
Engine::InquireVariableUInt(const std::string & /*name*/, const bool /*readIn*/)
{
return nullptr;
}
Variable<long int> *Engine::InquireVariableLInt(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<unsigned long int> *
Engine::InquireVariableULInt(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<long long int> *
Engine::InquireVariableLLInt(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<unsigned long long int> *
Engine::InquireVariableULLInt(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<float> *Engine::InquireVariableFloat(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<double> *Engine::InquireVariableDouble(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<long double> *
Engine::InquireVariableLDouble(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<std::complex<float>> *
Engine::InquireVariableCFloat(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<std::complex<double>> *
Engine::InquireVariableCDouble(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
Variable<std::complex<long double>> *
Engine::InquireVariableCLDouble(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
VariableCompound *Engine::InquireVariableCompound(const std::string & /*name*/,
const bool /*readIn*/)
{
return nullptr;
}
void Engine::Read(Variable<double> & /*variable*/, const double * /*values*/) {}
void Engine::ScheduleRead(Variable<double> & /*variable*/, double * /*values*/)
{
}
void Engine::ScheduleRead(const std::string /*variableName*/,
double * /*values*/)
{
}
void Engine::Release() {}
void Engine::PerformReads(PerformReadMode /*mode*/){};
// PROTECTED
void Engine::Init() {}
void Engine::InitParameters() {}
void Engine::InitTransports() {}
void Engine::CheckParameter(
const std::map<std::string, std::string>::const_iterator itParameter,
const std::map<std::string, std::string> ¶meters,
const std::string parameterName, const std::string hint) const
{
if (itParameter == parameters.end())
{
{
throw std::invalid_argument("ERROR: parameter name " +
parameterName + " not found " + hint);
}
}
}
bool Engine::TransportNamesUniqueness() const
{
auto lf_CheckTransportsType =
[&](const std::set<std::string> &specificType) -> bool
{
std::set<std::string> transportNames;
for (const auto ¶meters : m_Method.m_TransportParameters)
{
auto itTransport = parameters.find("transport");
if (m_DebugMode == true)
{
if (itTransport == parameters.end())
{
throw std::invalid_argument("ERROR: transport not defined "
"in Method input to Engine " +
m_Name);
}
}
const std::string type(itTransport->second);
if (specificType.count(type) == 1) // file transports type
{
std::string name(m_Name);
auto itName = parameters.find("name");
if (itName != parameters.end())
{
name = itName->second;
}
if (transportNames.count(name) == 0)
{
transportNames.insert(name);
}
else
{
return false;
}
}
}
return true;
};
return lf_CheckTransportsType(Support::FileTransports);
}
void Engine::CheckTransportIndex(const int transportIndex)
{
if (m_DebugMode == true)
{
if (transportIndex >= static_cast<int>(m_Transports.size()) ||
transportIndex < -1)
{
throw std::invalid_argument(
"ERROR: transport index " + std::to_string(transportIndex) +
" is out of range, in call to " + m_Name + "Close \n");
}
}
}
} // end namespace adios
| 1 | 11,539 | Use `UpperCamelCase` for function names | ornladios-ADIOS2 | cpp |
@@ -232,9 +232,12 @@ SchemaDate.prototype.cast = function(value) {
if (value instanceof Number || typeof value === 'number') {
date = new Date(value);
+ } else if (typeof value === 'string' && !isNaN(Number(value)) && (Number(value) >= 275761 || Number(value) < 0)) {
+ // string representation of milliseconds take this path
+ date = new Date(Number(value));
} else if (typeof value.valueOf === 'function') {
- // support for moment.js. This is also the path strings will take because strings
- // have a `valueOf()`
+ // support for moment.js. This is also the path string representation of years
+ // will take because strings have a `valueOf()`
date = new Date(value.valueOf());
} else {
// fallback | 1 | /*!
* Module requirements.
*/
var MongooseError = require('../error');
var utils = require('../utils');
var SchemaType = require('../schematype');
var CastError = SchemaType.CastError;
/**
* Date SchemaType constructor.
*
* @param {String} key
* @param {Object} options
* @inherits SchemaType
* @api public
*/
function SchemaDate(key, options) {
SchemaType.call(this, key, options, 'Date');
}
/**
* This schema type's name, to defend against minifiers that mangle
* function names.
*
* @api public
*/
SchemaDate.schemaName = 'Date';
/*!
* Inherits from SchemaType.
*/
SchemaDate.prototype = Object.create(SchemaType.prototype);
SchemaDate.prototype.constructor = SchemaDate;
/**
* Declares a TTL index (rounded to the nearest second) for _Date_ types only.
*
* This sets the `expireAfterSeconds` index option available in MongoDB >= 2.1.2.
* This index type is only compatible with Date types.
*
* ####Example:
*
* // expire in 24 hours
* new Schema({ createdAt: { type: Date, expires: 60*60*24 }});
*
* `expires` utilizes the `ms` module from [guille](https://github.com/guille/) allowing us to use a friendlier syntax:
*
* ####Example:
*
* // expire in 24 hours
* new Schema({ createdAt: { type: Date, expires: '24h' }});
*
* // expire in 1.5 hours
* new Schema({ createdAt: { type: Date, expires: '1.5h' }});
*
* // expire in 7 days
* var schema = new Schema({ createdAt: Date });
* schema.path('createdAt').expires('7d');
*
* @param {Number|String} when
* @added 3.0.0
* @return {SchemaType} this
* @api public
*/
SchemaDate.prototype.expires = function(when) {
if (!this._index || this._index.constructor.name !== 'Object') {
this._index = {};
}
this._index.expires = when;
utils.expires(this._index);
return this;
};
/**
* Check if the given value satisfies a required validator. To satisfy
* a required validator, the given value must be an instance of `Date`.
*
* @param {Any} value
* @param {Document} doc
* @return {Boolean}
* @api public
*/
SchemaDate.prototype.checkRequired = function(value) {
return value instanceof Date;
};
/**
* Sets a minimum date validator.
*
* ####Example:
*
* var s = new Schema({ d: { type: Date, min: Date('1970-01-01') })
* var M = db.model('M', s)
* var m = new M({ d: Date('1969-12-31') })
* m.save(function (err) {
* console.error(err) // validator error
* m.d = Date('2014-12-08');
* m.save() // success
* })
*
* // custom error messages
* // We can also use the special {MIN} token which will be replaced with the invalid value
* var min = [Date('1970-01-01'), 'The value of path `{PATH}` ({VALUE}) is beneath the limit ({MIN}).'];
* var schema = new Schema({ d: { type: Date, min: min })
* var M = mongoose.model('M', schema);
* var s= new M({ d: Date('1969-12-31') });
* s.validate(function (err) {
* console.log(String(err)) // ValidationError: The value of path `d` (1969-12-31) is before the limit (1970-01-01).
* })
*
* @param {Date} value minimum date
* @param {String} [message] optional custom error message
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaDate.prototype.min = function(value, message) {
if (this.minValidator) {
this.validators = this.validators.filter(function(v) {
return v.validator !== this.minValidator;
}, this);
}
if (value) {
var msg = message || MongooseError.messages.Date.min;
msg = msg.replace(/{MIN}/, (value === Date.now ? 'Date.now()' : this.cast(value).toString()));
var _this = this;
this.validators.push({
validator: this.minValidator = function(val) {
var min = (value === Date.now ? value() : _this.cast(value));
return val === null || val.valueOf() >= min.valueOf();
},
message: msg,
type: 'min',
min: value
});
}
return this;
};
/**
* Sets a maximum date validator.
*
* ####Example:
*
* var s = new Schema({ d: { type: Date, max: Date('2014-01-01') })
* var M = db.model('M', s)
* var m = new M({ d: Date('2014-12-08') })
* m.save(function (err) {
* console.error(err) // validator error
* m.d = Date('2013-12-31');
* m.save() // success
* })
*
* // custom error messages
* // We can also use the special {MAX} token which will be replaced with the invalid value
* var max = [Date('2014-01-01'), 'The value of path `{PATH}` ({VALUE}) exceeds the limit ({MAX}).'];
* var schema = new Schema({ d: { type: Date, max: max })
* var M = mongoose.model('M', schema);
* var s= new M({ d: Date('2014-12-08') });
* s.validate(function (err) {
* console.log(String(err)) // ValidationError: The value of path `d` (2014-12-08) exceeds the limit (2014-01-01).
* })
*
* @param {Date} maximum date
* @param {String} [message] optional custom error message
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaDate.prototype.max = function(value, message) {
if (this.maxValidator) {
this.validators = this.validators.filter(function(v) {
return v.validator !== this.maxValidator;
}, this);
}
if (value) {
var msg = message || MongooseError.messages.Date.max;
msg = msg.replace(/{MAX}/, (value === Date.now ? 'Date.now()' : this.cast(value).toString()));
var _this = this;
this.validators.push({
validator: this.maxValidator = function(val) {
var max = (value === Date.now ? value() : _this.cast(value));
return val === null || val.valueOf() <= max.valueOf();
},
message: msg,
type: 'max',
max: value
});
}
return this;
};
/**
* Casts to date
*
* @param {Object} value to cast
* @api private
*/
SchemaDate.prototype.cast = function(value) {
// If null or undefined
if (value === null || value === void 0 || value === '') {
return null;
}
if (value instanceof Date) {
if (isNaN(value.valueOf())) {
throw new CastError('date', value, this.path);
}
return value;
}
var date;
if (typeof value === 'boolean') {
throw new CastError('date', value, this.path);
}
if (value instanceof Number || typeof value === 'number') {
date = new Date(value);
} else if (typeof value.valueOf === 'function') {
// support for moment.js. This is also the path strings will take because strings
// have a `valueOf()`
date = new Date(value.valueOf());
} else {
// fallback
date = new Date(value);
}
if (!isNaN(date.valueOf())) {
return date;
}
throw new CastError('date', value, this.path);
};
/*!
* Date Query casting.
*
* @api private
*/
function handleSingle(val) {
return this.cast(val);
}
SchemaDate.prototype.$conditionalHandlers =
utils.options(SchemaType.prototype.$conditionalHandlers, {
$gt: handleSingle,
$gte: handleSingle,
$lt: handleSingle,
$lte: handleSingle
});
/**
* Casts contents for queries.
*
* @param {String} $conditional
* @param {any} [value]
* @api private
*/
SchemaDate.prototype.castForQuery = function($conditional, val) {
var handler;
if (arguments.length !== 2) {
return this._castForQuery($conditional);
}
handler = this.$conditionalHandlers[$conditional];
if (!handler) {
throw new Error('Can\'t use ' + $conditional + ' with Date.');
}
return handler.call(this, val);
};
/*!
* Module exports.
*/
module.exports = SchemaDate;
| 1 | 13,757 | I'm not 100% sold on this idea but I like it in general. Nice compromise between using the 'Date' constructor where possible and falling back to the pre #5880 behavior when it makes sense. However, instead of `Number(value) < 0`, let's do `Number(value) < MIN_YEAR` because `new Date('-2017')` is perfectly valid in JS and I don't want to break the date behavior. | Automattic-mongoose | js |
@@ -138,8 +138,7 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
Set<String> liveNodes = new HashSet((List<String>)(cluster.get("live_nodes")));
this.liveNodes = liveNodes;
liveNodesTimestamp = System.nanoTime();
- //TODO SOLR-11877 we don't know the znode path; CLUSTER_STATE is probably wrong leading to bad stateFormat
- ClusterState cs = ClusterState.load(znodeVersion, collectionsMap, liveNodes, ZkStateReader.CLUSTER_STATE);
+ ClusterState cs = ClusterState.createFromData(znodeVersion, collectionsMap, liveNodes);
if (clusterProperties != null) {
Map<String, Object> properties = (Map<String, Object>) cluster.get("properties");
if (properties != null) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.impl;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.client.solrj.impl.BaseHttpSolrClient.*;
public abstract class BaseHttpClusterStateProvider implements ClusterStateProvider {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private String urlScheme;
volatile Set<String> liveNodes;
long liveNodesTimestamp = 0;
volatile Map<String, List<String>> aliases;
volatile Map<String, Map<String, String>> aliasProperties;
long aliasesTimestamp = 0;
private int cacheTimeout = 5; // the liveNodes and aliases cache will be invalidated after 5 secs
public void init(List<String> solrUrls) throws Exception {
for (String solrUrl: solrUrls) {
urlScheme = solrUrl.startsWith("https")? "https": "http";
try (SolrClient initialClient = getSolrClient(solrUrl)) {
this.liveNodes = fetchLiveNodes(initialClient);
liveNodesTimestamp = System.nanoTime();
break;
} catch (SolrServerException | IOException e) {
log.warn("Attempt to fetch cluster state from {} failed.", solrUrl, e);
}
}
if (this.liveNodes == null || this.liveNodes.isEmpty()) {
throw new RuntimeException("Tried fetching live_nodes using Solr URLs provided, i.e. " + solrUrls + ". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
}
protected abstract SolrClient getSolrClient(String baseUrl);
@Override
public ClusterState.CollectionRef getState(String collection) {
for (String nodeName: liveNodes) {
String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
try (SolrClient client = getSolrClient(baseUrl)) {
ClusterState cs = fetchClusterState(client, collection, null);
return cs.getCollectionRef(collection);
} catch (SolrServerException | IOException e) {
log.warn("Attempt to fetch cluster state from {} failed."
, Utils.getBaseUrlForNodeName(nodeName, urlScheme), e);
} catch (RemoteSolrException e) {
if ("NOT_FOUND".equals(e.getMetadata("CLUSTERSTATUS"))) {
return null;
}
log.warn("Attempt to fetch cluster state from {} failed.", baseUrl, e);
} catch (NotACollectionException e) {
// Cluster state for the given collection was not found, could be an alias.
// Lets fetch/update our aliases:
getAliases(true);
return null;
}
}
throw new RuntimeException("Tried fetching cluster state using the node names we knew of, i.e. " + liveNodes +". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
@SuppressWarnings({"rawtypes", "unchecked"})
private ClusterState fetchClusterState(SolrClient client, String collection, Map<String, Object> clusterProperties) throws SolrServerException, IOException, NotACollectionException {
ModifiableSolrParams params = new ModifiableSolrParams();
if (collection != null) {
params.set("collection", collection);
}
params.set("action", "CLUSTERSTATUS");
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
NamedList cluster = (SimpleOrderedMap) client.request(request).get("cluster");
Map<String, Object> collectionsMap;
if (collection != null) {
collectionsMap = Collections.singletonMap(collection,
((NamedList) cluster.get("collections")).get(collection));
} else {
collectionsMap = ((NamedList)cluster.get("collections")).asMap(10);
}
int znodeVersion;
Map<String, Object> collFromStatus = (Map<String, Object>) (collectionsMap).get(collection);
if (collection != null && collFromStatus == null) {
throw new NotACollectionException(); // probably an alias
}
if (collection != null) { // can be null if alias
znodeVersion = (int) collFromStatus.get("znodeVersion");
} else {
znodeVersion = -1;
}
Set<String> liveNodes = new HashSet((List<String>)(cluster.get("live_nodes")));
this.liveNodes = liveNodes;
liveNodesTimestamp = System.nanoTime();
//TODO SOLR-11877 we don't know the znode path; CLUSTER_STATE is probably wrong leading to bad stateFormat
ClusterState cs = ClusterState.load(znodeVersion, collectionsMap, liveNodes, ZkStateReader.CLUSTER_STATE);
if (clusterProperties != null) {
Map<String, Object> properties = (Map<String, Object>) cluster.get("properties");
if (properties != null) {
clusterProperties.putAll(properties);
}
}
return cs;
}
@Override
public Set<String> getLiveNodes() {
if (liveNodes == null) {
throw new RuntimeException("We don't know of any live_nodes to fetch the"
+ " latest live_nodes information from. "
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
if (TimeUnit.SECONDS.convert((System.nanoTime() - liveNodesTimestamp), TimeUnit.NANOSECONDS) > getCacheTimeout()) {
for (String nodeName: liveNodes) {
String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
try (SolrClient client = getSolrClient(baseUrl)) {
Set<String> liveNodes = fetchLiveNodes(client);
this.liveNodes = (liveNodes);
liveNodesTimestamp = System.nanoTime();
return liveNodes;
} catch (Exception e) {
log.warn("Attempt to fetch cluster state from {} failed.", baseUrl, e);
}
}
throw new RuntimeException("Tried fetching live_nodes using all the node names we knew of, i.e. " + liveNodes +". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
} else {
return liveNodes; // cached copy is fresh enough
}
}
private static Set<String> fetchLiveNodes(SolrClient client) throws Exception {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", "CLUSTERSTATUS");
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
NamedList cluster = (SimpleOrderedMap) client.request(request).get("cluster");
return (Set<String>) new HashSet((List<String>)(cluster.get("live_nodes")));
}
@Override
public List<String> resolveAlias(String aliasName) {
return resolveAlias(aliasName, false);
}
public List<String> resolveAlias(String aliasName, boolean forceFetch) {
return Aliases.resolveAliasesGivenAliasMap(getAliases(forceFetch), aliasName);
}
@Override
public String resolveSimpleAlias(String aliasName) throws IllegalArgumentException {
return Aliases.resolveSimpleAliasGivenAliasMap(getAliases(false), aliasName);
}
private Map<String, List<String>> getAliases(boolean forceFetch) {
if (this.liveNodes == null) {
throw new RuntimeException("We don't know of any live_nodes to fetch the"
+ " latest aliases information from. "
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
if (forceFetch || this.aliases == null ||
TimeUnit.SECONDS.convert((System.nanoTime() - aliasesTimestamp), TimeUnit.NANOSECONDS) > getCacheTimeout()) {
for (String nodeName: liveNodes) {
String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
try (SolrClient client = getSolrClient(baseUrl)) {
CollectionAdminResponse response = new CollectionAdminRequest.ListAliases().process(client);
this.aliases = response.getAliasesAsLists();
this.aliasProperties = response.getAliasProperties(); // side-effect
this.aliasesTimestamp = System.nanoTime();
return Collections.unmodifiableMap(this.aliases);
} catch (SolrServerException | RemoteSolrException | IOException e) {
// Situation where we're hitting an older Solr which doesn't have LISTALIASES
if (e instanceof RemoteSolrException && ((RemoteSolrException)e).code()==400) {
log.warn("LISTALIASES not found, possibly using older Solr server. Aliases won't work {}"
,"unless you re-create the CloudSolrClient using zkHost(s) or upgrade Solr server"
, e);
this.aliases = Collections.emptyMap();
this.aliasProperties = Collections.emptyMap();
this.aliasesTimestamp = System.nanoTime();
return aliases;
}
log.warn("Attempt to fetch cluster state from {} failed.", baseUrl, e);
}
}
throw new RuntimeException("Tried fetching aliases using all the node names we knew of, i.e. " + liveNodes +". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using a working"
+ " solrUrl or zkHost.");
} else {
return Collections.unmodifiableMap(this.aliases); // cached copy is fresh enough
}
}
@Override
public Map<String, String> getAliasProperties(String alias) {
getAliases(false);
return Collections.unmodifiableMap(aliasProperties.getOrDefault(alias, Collections.emptyMap()));
}
@Override
public ClusterState getClusterState() throws IOException {
for (String nodeName: liveNodes) {
String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
try (SolrClient client = getSolrClient(baseUrl)) {
return fetchClusterState(client, null, null);
} catch (SolrServerException | BaseHttpSolrClient.RemoteSolrException | IOException e) {
log.warn("Attempt to fetch cluster state from {} failed.", baseUrl, e);
} catch (NotACollectionException e) {
// not possible! (we passed in null for collection so it can't be an alias)
throw new RuntimeException("null should never cause NotACollectionException in " +
"fetchClusterState() Please report this as a bug!");
}
}
throw new RuntimeException("Tried fetching cluster state using the node names we knew of, i.e. " + liveNodes +". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
@Override
public Map<String, Object> getClusterProperties() {
for (String nodeName : liveNodes) {
String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
try (SolrClient client = getSolrClient(baseUrl)) {
Map<String, Object> clusterProperties = new HashMap<>();
fetchClusterState(client, null, clusterProperties);
return clusterProperties;
} catch (SolrServerException | BaseHttpSolrClient.RemoteSolrException | IOException e) {
log.warn("Attempt to fetch cluster state from {} failed.", baseUrl, e);
} catch (NotACollectionException e) {
// not possible! (we passed in null for collection so it can't be an alias)
throw new RuntimeException("null should never cause NotACollectionException in " +
"fetchClusterState() Please report this as a bug!");
}
}
throw new RuntimeException("Tried fetching cluster state using the node names we knew of, i.e. " + liveNodes + ". However, "
+ "succeeded in obtaining the cluster state from none of them."
+ "If you think your Solr cluster is up and is accessible,"
+ " you could try re-creating a new CloudSolrClient using working"
+ " solrUrl(s) or zkHost(s).");
}
@Override
public String getPolicyNameByCollection(String coll) {
throw new UnsupportedOperationException("Fetching cluster properties not supported"
+ " using the HttpClusterStateProvider. "
+ "ZkClientClusterStateProvider can be used for this."); // TODO
}
@Override
public Object getClusterProperty(String propertyName) {
if (propertyName.equals(ZkStateReader.URL_SCHEME)) {
return this.urlScheme;
}
return getClusterProperties().get(propertyName);
}
@Override
public void connect() {}
public int getCacheTimeout() {
return cacheTimeout;
}
public void setCacheTimeout(int cacheTimeout) {
this.cacheTimeout = cacheTimeout;
}
// This exception is not meant to escape this class it should be caught and wrapped.
private class NotACollectionException extends Exception {
}
}
| 1 | 34,394 | Remember to close SOLR-11877 after this | apache-lucene-solr | java |
@@ -1124,7 +1124,7 @@ void Identifier::_exportToJSON(JSONFormatter *formatter) const {
//! @cond Doxygen_Suppress
static bool isIgnoredChar(char ch) {
return ch == ' ' || ch == '_' || ch == '-' || ch == '/' || ch == '(' ||
- ch == ')' || ch == '.' || ch == '&' || ch == ',';
+ ch == ')' || ch == '.' || ch == '&';
}
//! @endcond
| 1 | /******************************************************************************
*
* Project: PROJ
* Purpose: ISO19111:2019 implementation
* Author: Even Rouault <even dot rouault at spatialys dot com>
*
******************************************************************************
* Copyright (c) 2018, Even Rouault <even dot rouault at spatialys dot com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************/
#ifndef FROM_PROJ_CPP
#define FROM_PROJ_CPP
#endif
#include "proj/metadata.hpp"
#include "proj/common.hpp"
#include "proj/io.hpp"
#include "proj/util.hpp"
#include "proj/internal/internal.hpp"
#include "proj/internal/io_internal.hpp"
#include "proj_json_streaming_writer.hpp"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
using namespace NS_PROJ::internal;
using namespace NS_PROJ::io;
using namespace NS_PROJ::util;
#if 0
namespace dropbox{ namespace oxygen {
template<> nn<std::shared_ptr<NS_PROJ::metadata::Citation>>::~nn() = default;
template<> nn<NS_PROJ::metadata::ExtentPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::GeographicBoundingBoxPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::GeographicExtentPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::VerticalExtentPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::TemporalExtentPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::IdentifierPtr>::~nn() = default;
template<> nn<NS_PROJ::metadata::PositionalAccuracyPtr>::~nn() = default;
}}
#endif
NS_PROJ_START
namespace metadata {
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct Citation::Private {
optional<std::string> title{};
};
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
Citation::Citation() : d(internal::make_unique<Private>()) {}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Constructs a citation by its title. */
Citation::Citation(const std::string &titleIn)
: d(internal::make_unique<Private>()) {
d->title = titleIn;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
Citation::Citation(const Citation &other)
: d(internal::make_unique<Private>(*(other.d))) {}
// ---------------------------------------------------------------------------
Citation::~Citation() = default;
// ---------------------------------------------------------------------------
Citation &Citation::operator=(const Citation &other) {
if (this != &other) {
*d = *other.d;
}
return *this;
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns the name by which the cited resource is known. */
const optional<std::string> &Citation::title() PROJ_PURE_DEFN {
return d->title;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct GeographicExtent::Private {};
//! @endcond
// ---------------------------------------------------------------------------
GeographicExtent::GeographicExtent() : d(internal::make_unique<Private>()) {}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
GeographicExtent::~GeographicExtent() = default;
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct GeographicBoundingBox::Private {
double west_{};
double south_{};
double east_{};
double north_{};
Private(double west, double south, double east, double north)
: west_(west), south_(south), east_(east), north_(north) {}
bool intersects(const Private &other) const;
std::unique_ptr<Private> intersection(const Private &other) const;
};
//! @endcond
// ---------------------------------------------------------------------------
GeographicBoundingBox::GeographicBoundingBox(double west, double south,
double east, double north)
: GeographicExtent(),
d(internal::make_unique<Private>(west, south, east, north)) {}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
GeographicBoundingBox::~GeographicBoundingBox() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns the western-most coordinate of the limit of the dataset
* extent.
*
* The unit is degrees.
*
* If eastBoundLongitude < westBoundLongitude(), then the bounding box crosses
* the anti-meridian.
*/
double GeographicBoundingBox::westBoundLongitude() PROJ_PURE_DEFN {
return d->west_;
}
// ---------------------------------------------------------------------------
/** \brief Returns the southern-most coordinate of the limit of the dataset
* extent.
*
* The unit is degrees.
*/
double GeographicBoundingBox::southBoundLatitude() PROJ_PURE_DEFN {
return d->south_;
}
// ---------------------------------------------------------------------------
/** \brief Returns the eastern-most coordinate of the limit of the dataset
* extent.
*
* The unit is degrees.
*
* If eastBoundLongitude < westBoundLongitude(), then the bounding box crosses
* the anti-meridian.
*/
double GeographicBoundingBox::eastBoundLongitude() PROJ_PURE_DEFN {
return d->east_;
}
// ---------------------------------------------------------------------------
/** \brief Returns the northern-most coordinate of the limit of the dataset
* extent.
*
* The unit is degrees.
*/
double GeographicBoundingBox::northBoundLatitude() PROJ_PURE_DEFN {
return d->north_;
}
// ---------------------------------------------------------------------------
/** \brief Instantiate a GeographicBoundingBox.
*
* If east < west, then the bounding box crosses the anti-meridian.
*
* @param west Western-most coordinate of the limit of the dataset extent (in
* degrees).
* @param south Southern-most coordinate of the limit of the dataset extent (in
* degrees).
* @param east Eastern-most coordinate of the limit of the dataset extent (in
* degrees).
* @param north Northern-most coordinate of the limit of the dataset extent (in
* degrees).
* @return a new GeographicBoundingBox.
*/
GeographicBoundingBoxNNPtr GeographicBoundingBox::create(double west,
double south,
double east,
double north) {
return GeographicBoundingBox::nn_make_shared<GeographicBoundingBox>(
west, south, east, north);
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
bool GeographicBoundingBox::_isEquivalentTo(
const util::IComparable *other, util::IComparable::Criterion,
const io::DatabaseContextPtr &) const {
auto otherExtent = dynamic_cast<const GeographicBoundingBox *>(other);
if (!otherExtent)
return false;
return d->west_ == otherExtent->d->west_ &&
d->south_ == otherExtent->d->south_ &&
d->east_ == otherExtent->d->east_ &&
d->north_ == otherExtent->d->north_;
}
//! @endcond
// ---------------------------------------------------------------------------
bool GeographicBoundingBox::contains(const GeographicExtentNNPtr &other) const {
auto otherExtent = dynamic_cast<const GeographicBoundingBox *>(other.get());
if (!otherExtent) {
return false;
}
const double W = d->west_;
const double E = d->east_;
const double N = d->north_;
const double S = d->south_;
const double oW = otherExtent->d->west_;
const double oE = otherExtent->d->east_;
const double oN = otherExtent->d->north_;
const double oS = otherExtent->d->south_;
if (!(S <= oS && N >= oN)) {
return false;
}
if (W == -180.0 && E == 180.0) {
return true;
}
if (oW == -180.0 && oE == 180.0) {
return false;
}
// Normal bounding box ?
if (W < E) {
if (oW < oE) {
return W <= oW && E >= oE;
} else {
return false;
}
// No: crossing antimerian
} else {
if (oW < oE) {
if (oW >= W) {
return true;
} else if (oE <= E) {
return true;
} else {
return false;
}
} else {
return W <= oW && E >= oE;
}
}
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
bool GeographicBoundingBox::Private::intersects(const Private &other) const {
const double W = west_;
const double E = east_;
const double N = north_;
const double S = south_;
const double oW = other.west_;
const double oE = other.east_;
const double oN = other.north_;
const double oS = other.south_;
if (N < oS || S > oN) {
return false;
}
if (W == -180.0 && E == 180.0 && oW > oE) {
return true;
}
if (oW == -180.0 && oE == 180.0 && W > E) {
return true;
}
// Normal bounding box ?
if (W <= E) {
if (oW < oE) {
if (std::max(W, oW) < std::min(E, oE)) {
return true;
}
return false;
}
return intersects(Private(oW, oS, 180.0, oN)) ||
intersects(Private(-180.0, oS, oE, oN));
// No: crossing antimerian
} else {
if (oW <= oE) {
return other.intersects(*this);
}
return true;
}
}
//! @endcond
bool GeographicBoundingBox::intersects(
const GeographicExtentNNPtr &other) const {
auto otherExtent = dynamic_cast<const GeographicBoundingBox *>(other.get());
if (!otherExtent) {
return false;
}
return d->intersects(*(otherExtent->d));
}
// ---------------------------------------------------------------------------
GeographicExtentPtr
GeographicBoundingBox::intersection(const GeographicExtentNNPtr &other) const {
auto otherExtent = dynamic_cast<const GeographicBoundingBox *>(other.get());
if (!otherExtent) {
return nullptr;
}
auto ret = d->intersection(*(otherExtent->d));
if (ret) {
auto bbox = GeographicBoundingBox::create(ret->west_, ret->south_,
ret->east_, ret->north_);
return bbox.as_nullable();
}
return nullptr;
}
//! @cond Doxygen_Suppress
std::unique_ptr<GeographicBoundingBox::Private>
GeographicBoundingBox::Private::intersection(const Private &otherExtent) const {
const double W = west_;
const double E = east_;
const double N = north_;
const double S = south_;
const double oW = otherExtent.west_;
const double oE = otherExtent.east_;
const double oN = otherExtent.north_;
const double oS = otherExtent.south_;
if (N < oS || S > oN) {
return nullptr;
}
if (W == -180.0 && E == 180.0 && oW > oE) {
return internal::make_unique<Private>(oW, std::max(S, oS), oE,
std::min(N, oN));
}
if (oW == -180.0 && oE == 180.0 && W > E) {
return internal::make_unique<Private>(W, std::max(S, oS), E,
std::min(N, oN));
}
// Normal bounding box ?
if (W <= E) {
if (oW < oE) {
auto res = internal::make_unique<Private>(
std::max(W, oW), std::max(S, oS), std::min(E, oE),
std::min(N, oN));
if (res->west_ < res->east_) {
return res;
}
return nullptr;
}
// Return larger of two parts of the multipolygon
auto inter1 = intersection(Private(oW, oS, 180.0, oN));
auto inter2 = intersection(Private(-180.0, oS, oE, oN));
if (!inter1) {
return inter2;
}
if (!inter2) {
return inter1;
}
if (inter1->east_ - inter1->west_ > inter2->east_ - inter2->west_) {
return inter1;
}
return inter2;
// No: crossing antimerian
} else {
if (oW <= oE) {
return otherExtent.intersection(*this);
}
return internal::make_unique<Private>(std::max(W, oW), std::max(S, oS),
std::min(E, oE), std::min(N, oN));
}
}
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct VerticalExtent::Private {
double minimum_{};
double maximum_{};
common::UnitOfMeasureNNPtr unit_;
Private(double minimum, double maximum,
const common::UnitOfMeasureNNPtr &unit)
: minimum_(minimum), maximum_(maximum), unit_(unit) {}
};
//! @endcond
// ---------------------------------------------------------------------------
VerticalExtent::VerticalExtent(double minimumIn, double maximumIn,
const common::UnitOfMeasureNNPtr &unitIn)
: d(internal::make_unique<Private>(minimumIn, maximumIn, unitIn)) {}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
VerticalExtent::~VerticalExtent() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns the minimum of the vertical extent.
*/
double VerticalExtent::minimumValue() PROJ_PURE_DEFN { return d->minimum_; }
// ---------------------------------------------------------------------------
/** \brief Returns the maximum of the vertical extent.
*/
double VerticalExtent::maximumValue() PROJ_PURE_DEFN { return d->maximum_; }
// ---------------------------------------------------------------------------
/** \brief Returns the unit of the vertical extent.
*/
common::UnitOfMeasureNNPtr &VerticalExtent::unit() PROJ_PURE_DEFN {
return d->unit_;
}
// ---------------------------------------------------------------------------
/** \brief Instantiate a VerticalExtent.
*
* @param minimumIn minimum.
* @param maximumIn maximum.
* @param unitIn unit.
* @return a new VerticalExtent.
*/
VerticalExtentNNPtr
VerticalExtent::create(double minimumIn, double maximumIn,
const common::UnitOfMeasureNNPtr &unitIn) {
return VerticalExtent::nn_make_shared<VerticalExtent>(minimumIn, maximumIn,
unitIn);
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
bool VerticalExtent::_isEquivalentTo(const util::IComparable *other,
util::IComparable::Criterion,
const io::DatabaseContextPtr &) const {
auto otherExtent = dynamic_cast<const VerticalExtent *>(other);
if (!otherExtent)
return false;
return d->minimum_ == otherExtent->d->minimum_ &&
d->maximum_ == otherExtent->d->maximum_ &&
d->unit_ == otherExtent->d->unit_;
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent contains the other one.
*/
bool VerticalExtent::contains(const VerticalExtentNNPtr &other) const {
const double thisUnitToSI = d->unit_->conversionToSI();
const double otherUnitToSI = other->d->unit_->conversionToSI();
return d->minimum_ * thisUnitToSI <= other->d->minimum_ * otherUnitToSI &&
d->maximum_ * thisUnitToSI >= other->d->maximum_ * otherUnitToSI;
}
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent intersects the other one.
*/
bool VerticalExtent::intersects(const VerticalExtentNNPtr &other) const {
const double thisUnitToSI = d->unit_->conversionToSI();
const double otherUnitToSI = other->d->unit_->conversionToSI();
return d->minimum_ * thisUnitToSI <= other->d->maximum_ * otherUnitToSI &&
d->maximum_ * thisUnitToSI >= other->d->minimum_ * otherUnitToSI;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct TemporalExtent::Private {
std::string start_{};
std::string stop_{};
Private(const std::string &start, const std::string &stop)
: start_(start), stop_(stop) {}
};
//! @endcond
// ---------------------------------------------------------------------------
TemporalExtent::TemporalExtent(const std::string &startIn,
const std::string &stopIn)
: d(internal::make_unique<Private>(startIn, stopIn)) {}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
TemporalExtent::~TemporalExtent() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns the start of the temporal extent.
*/
const std::string &TemporalExtent::start() PROJ_PURE_DEFN { return d->start_; }
// ---------------------------------------------------------------------------
/** \brief Returns the end of the temporal extent.
*/
const std::string &TemporalExtent::stop() PROJ_PURE_DEFN { return d->stop_; }
// ---------------------------------------------------------------------------
/** \brief Instantiate a TemporalExtent.
*
* @param start start.
* @param stop stop.
* @return a new TemporalExtent.
*/
TemporalExtentNNPtr TemporalExtent::create(const std::string &start,
const std::string &stop) {
return TemporalExtent::nn_make_shared<TemporalExtent>(start, stop);
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
bool TemporalExtent::_isEquivalentTo(const util::IComparable *other,
util::IComparable::Criterion,
const io::DatabaseContextPtr &) const {
auto otherExtent = dynamic_cast<const TemporalExtent *>(other);
if (!otherExtent)
return false;
return start() == otherExtent->start() && stop() == otherExtent->stop();
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent contains the other one.
*/
bool TemporalExtent::contains(const TemporalExtentNNPtr &other) const {
return start() <= other->start() && stop() >= other->stop();
}
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent intersects the other one.
*/
bool TemporalExtent::intersects(const TemporalExtentNNPtr &other) const {
return start() <= other->stop() && stop() >= other->start();
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct Extent::Private {
optional<std::string> description_{};
std::vector<GeographicExtentNNPtr> geographicElements_{};
std::vector<VerticalExtentNNPtr> verticalElements_{};
std::vector<TemporalExtentNNPtr> temporalElements_{};
};
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
Extent::Extent() : d(internal::make_unique<Private>()) {}
// ---------------------------------------------------------------------------
Extent::Extent(const Extent &other)
: d(internal::make_unique<Private>(*other.d)) {}
// ---------------------------------------------------------------------------
Extent::~Extent() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** Return a textual description of the extent.
*
* @return the description, or empty.
*/
const optional<std::string> &Extent::description() PROJ_PURE_DEFN {
return d->description_;
}
// ---------------------------------------------------------------------------
/** Return the geographic element(s) of the extent
*
* @return the geographic element(s), or empty.
*/
const std::vector<GeographicExtentNNPtr> &
Extent::geographicElements() PROJ_PURE_DEFN {
return d->geographicElements_;
}
// ---------------------------------------------------------------------------
/** Return the vertical element(s) of the extent
*
* @return the vertical element(s), or empty.
*/
const std::vector<VerticalExtentNNPtr> &
Extent::verticalElements() PROJ_PURE_DEFN {
return d->verticalElements_;
}
// ---------------------------------------------------------------------------
/** Return the temporal element(s) of the extent
*
* @return the temporal element(s), or empty.
*/
const std::vector<TemporalExtentNNPtr> &
Extent::temporalElements() PROJ_PURE_DEFN {
return d->temporalElements_;
}
// ---------------------------------------------------------------------------
/** \brief Instantiate a Extent.
*
* @param descriptionIn Textual description, or empty.
* @param geographicElementsIn Geographic element(s), or empty.
* @param verticalElementsIn Vertical element(s), or empty.
* @param temporalElementsIn Temporal element(s), or empty.
* @return a new Extent.
*/
ExtentNNPtr
Extent::create(const optional<std::string> &descriptionIn,
const std::vector<GeographicExtentNNPtr> &geographicElementsIn,
const std::vector<VerticalExtentNNPtr> &verticalElementsIn,
const std::vector<TemporalExtentNNPtr> &temporalElementsIn) {
auto extent = Extent::nn_make_shared<Extent>();
extent->assignSelf(extent);
extent->d->description_ = descriptionIn;
extent->d->geographicElements_ = geographicElementsIn;
extent->d->verticalElements_ = verticalElementsIn;
extent->d->temporalElements_ = temporalElementsIn;
return extent;
}
// ---------------------------------------------------------------------------
/** \brief Instantiate a Extent from a bounding box
*
* @param west Western-most coordinate of the limit of the dataset extent (in
* degrees).
* @param south Southern-most coordinate of the limit of the dataset extent (in
* degrees).
* @param east Eastern-most coordinate of the limit of the dataset extent (in
* degrees).
* @param north Northern-most coordinate of the limit of the dataset extent (in
* degrees).
* @param descriptionIn Textual description, or empty.
* @return a new Extent.
*/
ExtentNNPtr
Extent::createFromBBOX(double west, double south, double east, double north,
const util::optional<std::string> &descriptionIn) {
return create(
descriptionIn,
std::vector<GeographicExtentNNPtr>{
nn_static_pointer_cast<GeographicExtent>(
GeographicBoundingBox::create(west, south, east, north))},
std::vector<VerticalExtentNNPtr>(), std::vector<TemporalExtentNNPtr>());
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
bool Extent::_isEquivalentTo(const util::IComparable *other,
util::IComparable::Criterion criterion,
const io::DatabaseContextPtr &dbContext) const {
auto otherExtent = dynamic_cast<const Extent *>(other);
bool ret =
(otherExtent &&
description().has_value() == otherExtent->description().has_value() &&
*description() == *otherExtent->description() &&
d->geographicElements_.size() ==
otherExtent->d->geographicElements_.size() &&
d->verticalElements_.size() ==
otherExtent->d->verticalElements_.size() &&
d->temporalElements_.size() ==
otherExtent->d->temporalElements_.size());
if (ret) {
for (size_t i = 0; ret && i < d->geographicElements_.size(); ++i) {
ret = d->geographicElements_[i]->_isEquivalentTo(
otherExtent->d->geographicElements_[i].get(), criterion,
dbContext);
}
for (size_t i = 0; ret && i < d->verticalElements_.size(); ++i) {
ret = d->verticalElements_[i]->_isEquivalentTo(
otherExtent->d->verticalElements_[i].get(), criterion,
dbContext);
}
for (size_t i = 0; ret && i < d->temporalElements_.size(); ++i) {
ret = d->temporalElements_[i]->_isEquivalentTo(
otherExtent->d->temporalElements_[i].get(), criterion,
dbContext);
}
}
return ret;
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent contains the other one.
*
* Behavior only well specified if each sub-extent category as at most
* one element.
*/
bool Extent::contains(const ExtentNNPtr &other) const {
bool res = true;
if (d->geographicElements_.size() == 1 &&
other->d->geographicElements_.size() == 1) {
res = d->geographicElements_[0]->contains(
other->d->geographicElements_[0]);
}
if (res && d->verticalElements_.size() == 1 &&
other->d->verticalElements_.size() == 1) {
res = d->verticalElements_[0]->contains(other->d->verticalElements_[0]);
}
if (res && d->temporalElements_.size() == 1 &&
other->d->temporalElements_.size() == 1) {
res = d->temporalElements_[0]->contains(other->d->temporalElements_[0]);
}
return res;
}
// ---------------------------------------------------------------------------
/** \brief Returns whether this extent intersects the other one.
*
* Behavior only well specified if each sub-extent category as at most
* one element.
*/
bool Extent::intersects(const ExtentNNPtr &other) const {
bool res = true;
if (d->geographicElements_.size() == 1 &&
other->d->geographicElements_.size() == 1) {
res = d->geographicElements_[0]->intersects(
other->d->geographicElements_[0]);
}
if (res && d->verticalElements_.size() == 1 &&
other->d->verticalElements_.size() == 1) {
res =
d->verticalElements_[0]->intersects(other->d->verticalElements_[0]);
}
if (res && d->temporalElements_.size() == 1 &&
other->d->temporalElements_.size() == 1) {
res =
d->temporalElements_[0]->intersects(other->d->temporalElements_[0]);
}
return res;
}
// ---------------------------------------------------------------------------
/** \brief Returns the intersection of this extent with another one.
*
* Behavior only well specified if there is one single GeographicExtent
* in each object.
* Returns nullptr otherwise.
*/
ExtentPtr Extent::intersection(const ExtentNNPtr &other) const {
if (d->geographicElements_.size() == 1 &&
other->d->geographicElements_.size() == 1) {
if (contains(other)) {
return other.as_nullable();
}
auto self = util::nn_static_pointer_cast<Extent>(shared_from_this());
if (other->contains(self)) {
return self.as_nullable();
}
auto geogIntersection = d->geographicElements_[0]->intersection(
other->d->geographicElements_[0]);
if (geogIntersection) {
return create(util::optional<std::string>(),
std::vector<GeographicExtentNNPtr>{
NN_NO_CHECK(geogIntersection)},
std::vector<VerticalExtentNNPtr>{},
std::vector<TemporalExtentNNPtr>{});
}
}
return nullptr;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct Identifier::Private {
optional<Citation> authority_{};
std::string code_{};
optional<std::string> codeSpace_{};
optional<std::string> version_{};
optional<std::string> description_{};
optional<std::string> uri_{};
Private() = default;
Private(const std::string &codeIn, const PropertyMap &properties)
: code_(codeIn) {
setProperties(properties);
}
private:
// cppcheck-suppress functionStatic
void setProperties(const PropertyMap &properties);
};
// ---------------------------------------------------------------------------
void Identifier::Private::setProperties(
const PropertyMap &properties) // throw(InvalidValueTypeException)
{
{
const auto pVal = properties.get(AUTHORITY_KEY);
if (pVal) {
if (auto genVal = dynamic_cast<const BoxedValue *>(pVal->get())) {
if (genVal->type() == BoxedValue::Type::STRING) {
authority_ = Citation(genVal->stringValue());
} else {
throw InvalidValueTypeException("Invalid value type for " +
AUTHORITY_KEY);
}
} else {
if (auto citation =
dynamic_cast<const Citation *>(pVal->get())) {
authority_ = *citation;
} else {
throw InvalidValueTypeException("Invalid value type for " +
AUTHORITY_KEY);
}
}
}
}
{
const auto pVal = properties.get(CODE_KEY);
if (pVal) {
if (auto genVal = dynamic_cast<const BoxedValue *>(pVal->get())) {
if (genVal->type() == BoxedValue::Type::INTEGER) {
code_ = toString(genVal->integerValue());
} else if (genVal->type() == BoxedValue::Type::STRING) {
code_ = genVal->stringValue();
} else {
throw InvalidValueTypeException("Invalid value type for " +
CODE_KEY);
}
} else {
throw InvalidValueTypeException("Invalid value type for " +
CODE_KEY);
}
}
}
properties.getStringValue(CODESPACE_KEY, codeSpace_);
properties.getStringValue(VERSION_KEY, version_);
properties.getStringValue(DESCRIPTION_KEY, description_);
properties.getStringValue(URI_KEY, uri_);
}
//! @endcond
// ---------------------------------------------------------------------------
Identifier::Identifier(const std::string &codeIn,
const util::PropertyMap &properties)
: d(internal::make_unique<Private>(codeIn, properties)) {}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
// ---------------------------------------------------------------------------
Identifier::Identifier() : d(internal::make_unique<Private>()) {}
// ---------------------------------------------------------------------------
Identifier::Identifier(const Identifier &other)
: d(internal::make_unique<Private>(*(other.d))) {}
// ---------------------------------------------------------------------------
Identifier::~Identifier() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Instantiate a Identifier.
*
* @param codeIn Alphanumeric value identifying an instance in the codespace
* @param properties See \ref general_properties.
* Generally, the Identifier::CODESPACE_KEY should be set.
* @return a new Identifier.
*/
IdentifierNNPtr Identifier::create(const std::string &codeIn,
const PropertyMap &properties) {
return Identifier::nn_make_shared<Identifier>(codeIn, properties);
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
IdentifierNNPtr
Identifier::createFromDescription(const std::string &descriptionIn) {
auto id = Identifier::nn_make_shared<Identifier>();
id->d->description_ = descriptionIn;
return id;
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Return a citation for the organization responsible for definition and
* maintenance of the code.
*
* @return the citation for the authority, or empty.
*/
const optional<Citation> &Identifier::authority() PROJ_PURE_DEFN {
return d->authority_;
}
// ---------------------------------------------------------------------------
/** \brief Return the alphanumeric value identifying an instance in the
* codespace.
*
* e.g. "4326" (for EPSG:4326 WGS 84 GeographicCRS)
*
* @return the code.
*/
const std::string &Identifier::code() PROJ_PURE_DEFN { return d->code_; }
// ---------------------------------------------------------------------------
/** \brief Return the organization responsible for definition and maintenance of
* the code.
*
* e.g "EPSG"
*
* @return the authority codespace, or empty.
*/
const optional<std::string> &Identifier::codeSpace() PROJ_PURE_DEFN {
return d->codeSpace_;
}
// ---------------------------------------------------------------------------
/** \brief Return the version identifier for the namespace.
*
* When appropriate, the edition is identified by the effective date, coded
* using ISO 8601 date format.
*
* @return the version or empty.
*/
const optional<std::string> &Identifier::version() PROJ_PURE_DEFN {
return d->version_;
}
// ---------------------------------------------------------------------------
/** \brief Return the natural language description of the meaning of the code
* value.
*
* @return the description or empty.
*/
const optional<std::string> &Identifier::description() PROJ_PURE_DEFN {
return d->description_;
}
// ---------------------------------------------------------------------------
/** \brief Return the URI of the identifier.
*
* @return the URI or empty.
*/
const optional<std::string> &Identifier::uri() PROJ_PURE_DEFN {
return d->uri_;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
void Identifier::_exportToWKT(WKTFormatter *formatter) const {
const bool isWKT2 = formatter->version() == WKTFormatter::Version::WKT2;
const std::string &l_code = code();
const std::string &l_codeSpace = *codeSpace();
if (!l_codeSpace.empty() && !l_code.empty()) {
if (isWKT2) {
formatter->startNode(WKTConstants::ID, false);
formatter->addQuotedString(l_codeSpace);
try {
(void)std::stoi(l_code);
formatter->add(l_code);
} catch (const std::exception &) {
formatter->addQuotedString(l_code);
}
if (version().has_value()) {
auto l_version = *(version());
try {
(void)c_locale_stod(l_version);
formatter->add(l_version);
} catch (const std::exception &) {
formatter->addQuotedString(l_version);
}
}
if (authority().has_value() &&
*(authority()->title()) != l_codeSpace) {
formatter->startNode(WKTConstants::CITATION, false);
formatter->addQuotedString(*(authority()->title()));
formatter->endNode();
}
if (uri().has_value()) {
formatter->startNode(WKTConstants::URI, false);
formatter->addQuotedString(*(uri()));
formatter->endNode();
}
formatter->endNode();
} else {
formatter->startNode(WKTConstants::AUTHORITY, false);
formatter->addQuotedString(l_codeSpace);
formatter->addQuotedString(l_code);
formatter->endNode();
}
}
}
// ---------------------------------------------------------------------------
void Identifier::_exportToJSON(JSONFormatter *formatter) const {
const std::string &l_code = code();
const std::string &l_codeSpace = *codeSpace();
if (!l_codeSpace.empty() && !l_code.empty()) {
auto writer = formatter->writer();
auto objContext(formatter->MakeObjectContext(nullptr, false));
writer->AddObjKey("authority");
writer->Add(l_codeSpace);
writer->AddObjKey("code");
try {
writer->Add(std::stoi(l_code));
} catch (const std::exception &) {
writer->Add(l_code);
}
}
}
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
static bool isIgnoredChar(char ch) {
return ch == ' ' || ch == '_' || ch == '-' || ch == '/' || ch == '(' ||
ch == ')' || ch == '.' || ch == '&' || ch == ',';
}
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
static const struct utf8_to_lower {
const char *utf8;
char ascii;
} map_utf8_to_lower[] = {
{"\xc3\xa1", 'a'}, // a acute
{"\xc3\xa4", 'a'}, // a tremma
{"\xc4\x9b", 'e'}, // e reverse circumflex
{"\xc3\xa8", 'e'}, // e grave
{"\xc3\xa9", 'e'}, // e acute
{"\xc3\xab", 'e'}, // e tremma
{"\xc3\xad", 'i'}, // i grave
{"\xc3\xb4", 'o'}, // o circumflex
{"\xc3\xb6", 'o'}, // o tremma
{"\xc3\xa7", 'c'}, // c cedilla
};
static const struct utf8_to_lower *get_ascii_replacement(const char *c_str) {
for (const auto &pair : map_utf8_to_lower) {
if (*c_str == pair.utf8[0] &&
strncmp(c_str, pair.utf8, strlen(pair.utf8)) == 0) {
return &pair;
}
}
return nullptr;
}
//! @endcond
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
std::string Identifier::canonicalizeName(const std::string &str) {
std::string res;
const char *c_str = str.c_str();
for (size_t i = 0; c_str[i] != 0; ++i) {
const auto ch = c_str[i];
if (ch == ' ' && c_str[i + 1] == '+' && c_str[i + 2] == ' ') {
i += 2;
continue;
}
if (ch == '1' && !res.empty() &&
!(res.back() >= '0' && res.back() <= '9') && c_str[i + 1] == '9' &&
c_str[i + 2] >= '0' && c_str[i + 2] <= '9') {
++i;
continue;
}
if (static_cast<unsigned char>(ch) > 127) {
const auto *replacement = get_ascii_replacement(c_str + i);
if (replacement) {
res.push_back(replacement->ascii);
i += strlen(replacement->utf8) - 1;
continue;
}
}
if (!isIgnoredChar(ch)) {
res.push_back(static_cast<char>(::tolower(ch)));
}
}
return res;
}
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Returns whether two names are considered equivalent.
*
* Two names are equivalent by removing any space, underscore, dash, slash,
* { or } character from them, and comparing in a case insensitive way.
*/
bool Identifier::isEquivalentName(const char *a, const char *b) noexcept {
size_t i = 0;
size_t j = 0;
char lastValidA = 0;
char lastValidB = 0;
while (a[i] != 0 && b[j] != 0) {
char aCh = a[i];
char bCh = b[j];
if (aCh == ' ' && a[i + 1] == '+' && a[i + 2] == ' ') {
i += 3;
continue;
}
if (bCh == ' ' && b[j + 1] == '+' && b[j + 2] == ' ') {
j += 3;
continue;
}
if (isIgnoredChar(aCh)) {
++i;
continue;
}
if (isIgnoredChar(bCh)) {
++j;
continue;
}
if (aCh == '1' && !(lastValidA >= '0' && lastValidA <= '9') &&
a[i + 1] == '9' && a[i + 2] >= '0' && a[i + 2] <= '9') {
i += 2;
lastValidA = '9';
continue;
}
if (bCh == '1' && !(lastValidB >= '0' && lastValidB <= '9') &&
b[j + 1] == '9' && b[j + 2] >= '0' && b[j + 2] <= '9') {
j += 2;
lastValidB = '9';
continue;
}
if (static_cast<unsigned char>(aCh) > 127) {
const auto *replacement = get_ascii_replacement(a + i);
if (replacement) {
aCh = replacement->ascii;
i += strlen(replacement->utf8) - 1;
}
}
if (static_cast<unsigned char>(bCh) > 127) {
const auto *replacement = get_ascii_replacement(b + j);
if (replacement) {
bCh = replacement->ascii;
j += strlen(replacement->utf8) - 1;
}
}
if (::tolower(aCh) != ::tolower(bCh)) {
return false;
}
lastValidA = aCh;
lastValidB = bCh;
++i;
++j;
}
while (a[i] != 0 && isIgnoredChar(a[i])) {
++i;
}
while (b[j] != 0 && isIgnoredChar(b[j])) {
++j;
}
return a[i] == b[j];
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
struct PositionalAccuracy::Private {
std::string value_{};
};
//! @endcond
// ---------------------------------------------------------------------------
PositionalAccuracy::PositionalAccuracy(const std::string &valueIn)
: d(internal::make_unique<Private>()) {
d->value_ = valueIn;
}
// ---------------------------------------------------------------------------
//! @cond Doxygen_Suppress
PositionalAccuracy::~PositionalAccuracy() = default;
//! @endcond
// ---------------------------------------------------------------------------
/** \brief Return the value of the positional accuracy.
*/
const std::string &PositionalAccuracy::value() PROJ_PURE_DEFN {
return d->value_;
}
// ---------------------------------------------------------------------------
/** \brief Instantiate a PositionalAccuracy.
*
* @param valueIn positional accuracy value.
* @return a new PositionalAccuracy.
*/
PositionalAccuracyNNPtr PositionalAccuracy::create(const std::string &valueIn) {
return PositionalAccuracy::nn_make_shared<PositionalAccuracy>(valueIn);
}
} // namespace metadata
NS_PROJ_END
| 1 | 12,405 | this change should be reverted | OSGeo-PROJ | cpp |
@@ -186,9 +186,15 @@ type mockedIdentityRegistry struct {
anyIdentityRegistered bool
}
+// IsRegistered mock
func (mir *mockedIdentityRegistry) IsRegistered(address common.Address) (bool, error) {
return mir.anyIdentityRegistered, nil
}
+// WaitForRegistrationEvent mock
+func (mir *mockedIdentityRegistry) WaitForRegistrationEvent(providerAddress common.Address, registeredEvent chan int, stopLoop chan int) {
+
+}
+
//check that we implemented mocked registry correctly
var _ registry.IdentityRegistry = &mockedIdentityRegistry{} | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dialog
import (
"errors"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/mysterium/node/communication"
"github.com/mysterium/node/communication/nats"
"github.com/mysterium/node/communication/nats/discovery"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/identity/registry"
"github.com/stretchr/testify/assert"
)
func TestDialogWaiter_Interface(t *testing.T) {
var _ communication.DialogWaiter = &dialogWaiter{}
}
func TestDialogWaiter_Factory(t *testing.T) {
address := discovery.NewAddress("custom", "nats://far-server:4222")
signer := &identity.SignerFake{}
waiter := NewDialogWaiter(address, signer, &mockedIdentityRegistry{})
assert.NotNil(t, waiter)
assert.Equal(t, address, waiter.myAddress)
assert.Equal(t, signer, waiter.mySigner)
}
func TestDialogWaiter_ServeDialogs(t *testing.T) {
peerID := identity.FromAddress("0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea")
connection := nats.StartConnectionFake()
defer connection.Close()
signer := &identity.SignerFake{}
waiter, handler := dialogServe(connection, signer)
defer waiter.Stop()
dialogAsk(connection, `{
"payload": {"peer_id":"0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea"},
"signature": "tl+WbYkJdXD5foaIP3bqVGFHfr6kdd5FzmJAmu1GdpINEnNR3bTto6wgEoke/Fpy4zsWOjrulDVfrc32f5ArTgA="
}`)
dialogInstance, err := dialogWait(handler)
defer dialogInstance.Close()
assert.NoError(t, err)
assert.NotNil(t, dialogInstance)
dialog, ok := dialogInstance.(*dialog)
assert.True(t, ok)
expectedCodec := NewCodecSecured(communication.NewCodecJSON(), signer, identity.NewVerifierIdentity(peerID))
assert.Equal(
t,
nats.NewSender(connection, expectedCodec, "my-topic.0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea"),
dialog.Sender,
)
assert.Equal(
t,
nats.NewReceiver(connection, expectedCodec, "my-topic.0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea"),
dialog.Receiver,
)
}
func TestDialogWaiter_ServeDialogsRejectInvalidSignature(t *testing.T) {
connection := nats.StartConnectionFake()
defer connection.Close()
signer := &identity.SignerFake{}
waiter, handler := dialogServe(connection, signer)
defer waiter.Stop()
dialogAsk(connection, `{
"payload": {"peer_id":"0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea"},
"signature": "malformed"
}`)
dialogInstance, err := dialogWait(handler)
assert.EqualError(t, err, "dialog not received")
assert.Nil(t, dialogInstance)
}
func TestDialogWaiter_ServeDialogsRejectUnregisteredConsumers(t *testing.T) {
connection := nats.StartConnectionFake()
defer connection.Close()
signer := &identity.SignerFake{}
mockedRegistry := &mockedIdentityRegistry{
anyIdentityRegistered: false,
}
mockeDialogHandler := &dialogHandler{
dialogReceived: make(chan communication.Dialog),
}
waiter := NewDialogWaiter(discovery.NewAddressWithConnection(connection, "test-topic"), signer, mockedRegistry)
err := waiter.ServeDialogs(mockeDialogHandler)
assert.NoError(t, err)
msg, err := connection.Request("test-topic.dialog-create", []byte(`{
"payload": {"peer_id":"0x28bf83df144ab7a566bc8509d1fff5d5470bd4ea"},
"signature": "tl+WbYkJdXD5foaIP3bqVGFHfr6kdd5FzmJAmu1GdpINEnNR3bTto6wgEoke/Fpy4zsWOjrulDVfrc32f5ArTgA="
}`), 100*time.Millisecond)
assert.NoError(t, err)
assert.JSONEq(
t,
`{
"payload": {
"reason":400,
"reasonMessage":"Invalid identity"
},
"signature":"c2lnbmVkeyJyZWFzb24iOjQwMCwicmVhc29uTWVzc2FnZSI6IkludmFsaWQgaWRlbnRpdHkifQ=="
}`,
string(msg.Data),
)
}
func dialogServe(connection nats.Connection, mySigner identity.Signer) (waiter *dialogWaiter, handler *dialogHandler) {
myTopic := "my-topic"
waiter = &dialogWaiter{
myAddress: discovery.NewAddressWithConnection(connection, myTopic),
mySigner: mySigner,
identityRegistry: &mockedIdentityRegistry{
anyIdentityRegistered: true,
},
}
handler = &dialogHandler{
dialogReceived: make(chan communication.Dialog),
}
err := waiter.ServeDialogs(handler)
if err != nil {
panic(err)
}
return waiter, handler
}
func dialogAsk(connection nats.Connection, payload string) {
err := connection.Publish("my-topic.dialog-create", []byte(payload))
if err != nil {
panic(err)
}
}
func dialogWait(handler *dialogHandler) (communication.Dialog, error) {
select {
case dialog := <-handler.dialogReceived:
return dialog, nil
case <-time.After(10 * time.Millisecond):
return nil, errors.New("dialog not received")
}
}
type dialogHandler struct {
dialogReceived chan communication.Dialog
}
func (handler *dialogHandler) Handle(dialog communication.Dialog) error {
handler.dialogReceived <- dialog
return nil
}
type mockedIdentityRegistry struct {
anyIdentityRegistered bool
}
func (mir *mockedIdentityRegistry) IsRegistered(address common.Address) (bool, error) {
return mir.anyIdentityRegistered, nil
}
//check that we implemented mocked registry correctly
var _ registry.IdentityRegistry = &mockedIdentityRegistry{}
| 1 | 11,640 | This function signature is a bit complicated, some parameters are IN type (stopLoop which is modified from outside), others are OUT (registeredEvent channel which is modified inside function) I suggest the following signature -> SubscribeToRegistrationEvent(identityAddress) returns registeredEvent chan of type (RegisteredEvent not int), Unsubscribe func(), error (in case of any error). That way caller can always cance subscription if needed and check for any errors. Also - unsubscribe function hides any internal details how subscribtion is really implemented | mysteriumnetwork-node | go |
@@ -45,7 +45,7 @@ const options = {
query: ['src/**/*.png', 'src/**/*.jpg', 'src/**/*.gif', 'src/**/*.svg']
},
copy: {
- query: ['src/**/*.json', 'src/**/*.ico']
+ query: ['src/**/*.json', 'src/**/*.ico', 'src/**/*.wav']
},
injectBundle: {
query: 'src/index.html' | 1 | const { src, dest, series, parallel, watch } = require('gulp');
const browserSync = require('browser-sync').create();
const del = require('del');
const babel = require('gulp-babel');
const concat = require('gulp-concat');
const terser = require('gulp-terser');
const htmlmin = require('gulp-htmlmin');
const imagemin = require('gulp-imagemin');
const sourcemaps = require('gulp-sourcemaps');
const mode = require('gulp-mode')({
modes: ['development', 'production'],
default: 'development',
verbose: false
});
const stream = require('webpack-stream');
const inject = require('gulp-inject');
const postcss = require('gulp-postcss');
const sass = require('gulp-sass');
const gulpif = require('gulp-if');
const lazypipe = require('lazypipe');
sass.compiler = require('node-sass');
let config;
if (mode.production()) {
config = require('./webpack.prod.js');
} else {
config = require('./webpack.dev.js');
}
const options = {
javascript: {
query: ['src/**/*.js', '!src/bundle.js', '!src/standalone.js', '!src/scripts/apploader.js']
},
apploader: {
query: ['src/standalone.js', 'src/scripts/apploader.js']
},
css: {
query: ['src/**/*.css', 'src/**/*.scss']
},
html: {
query: ['src/**/*.html', '!src/index.html']
},
images: {
query: ['src/**/*.png', 'src/**/*.jpg', 'src/**/*.gif', 'src/**/*.svg']
},
copy: {
query: ['src/**/*.json', 'src/**/*.ico']
},
injectBundle: {
query: 'src/index.html'
}
};
function serve() {
browserSync.init({
server: {
baseDir: './dist'
},
port: 8080
});
const events = ['add', 'change'];
watch(options.javascript.query).on('all', function (event, path) {
if (events.includes(event)) {
javascript(path);
}
});
watch(options.apploader.query, apploader(true));
watch('src/bundle.js', webpack);
watch(options.css.query).on('all', function (event, path) {
if (events.includes(event)) {
css(path);
}
});
watch(options.html.query).on('all', function (event, path) {
if (events.includes(event)) {
html(path);
}
});
watch(options.images.query).on('all', function (event, path) {
if (events.includes(event)) {
images(path);
}
});
watch(options.copy.query).on('all', function (event, path) {
if (events.includes(event)) {
copy(path);
}
});
watch(options.injectBundle.query, injectBundle);
}
function clean() {
return del(['dist/']);
}
const pipelineJavascript = lazypipe()
.pipe(function () {
return mode.development(sourcemaps.init({ loadMaps: true }));
})
.pipe(function () {
return babel({
presets: [
['@babel/preset-env']
]
});
})
.pipe(function () {
return terser({
keep_fnames: true,
mangle: false
});
})
.pipe(function () {
return mode.development(sourcemaps.write('.'));
});
function javascript(query) {
return src(typeof query !== 'function' ? query : options.javascript.query, { base: './src/' })
.pipe(pipelineJavascript())
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function apploader(standalone) {
function task() {
return src(options.apploader.query, { base: './src/' })
.pipe(gulpif(standalone, concat('scripts/apploader.js')))
.pipe(pipelineJavascript())
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
task.displayName = 'apploader';
return task;
}
function webpack() {
return stream(config)
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function css(query) {
return src(typeof query !== 'function' ? query : options.css.query, { base: './src/' })
.pipe(mode.development(sourcemaps.init({ loadMaps: true })))
.pipe(sass().on('error', sass.logError))
.pipe(postcss())
.pipe(mode.development(sourcemaps.write('.')))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function html(query) {
return src(typeof query !== 'function' ? query : options.html.query, { base: './src/' })
.pipe(mode.production(htmlmin({ collapseWhitespace: true })))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function images(query) {
return src(typeof query !== 'function' ? query : options.images.query, { base: './src/' })
.pipe(mode.production(imagemin()))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function copy(query) {
return src(typeof query !== 'function' ? query : options.copy.query, { base: './src/' })
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function copyIndex() {
return src(options.injectBundle.query, { base: './src/' })
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function injectBundle() {
return src(options.injectBundle.query, { base: './src/' })
.pipe(inject(
src(['src/scripts/apploader.js'], { read: false }, { base: './src/' }), { relative: true }
))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function build(standalone) {
return series(clean, parallel(javascript, apploader(standalone), webpack, css, html, images, copy));
}
exports.default = series(build(false), copyIndex);
exports.standalone = series(build(true), injectBundle);
exports.serve = series(exports.standalone, serve);
| 1 | 14,154 | `.wav`?! cannot we use something slightly more modern and compressed instead? :) I dunno, like `.mp3` or `.aac` or `.ogg`... | jellyfin-jellyfin-web | js |
@@ -1602,6 +1602,17 @@ luaA_client_swap(lua_State *L)
*ref_swap = c;
luaA_class_emit_signal(L, &client_class, "list", 0);
+
+ luaA_object_push(L, swap);
+ lua_pushboolean(L, true);
+ luaA_object_emit_signal(L, -4, "swapped", 2);
+ lua_pop(L, 2);
+
+ luaA_object_push(L, swap);
+ luaA_object_push(L, c);
+ lua_pushboolean(L, false);
+ luaA_object_emit_signal(L, -3, "swapped", 2);
+ lua_pop(L, 3);
}
return 0; | 1 | /*
* client.c - client management
*
* Copyright © 2007-2009 Julien Danjou <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/** awesome client API
*
* Furthermore to the classes described here, one can also use signals as
* described in @{signals} and X properties as described in @{xproperties}.
*
* Some signal names are starting with a dot. These dots are artefacts from
* the documentation generation, you get the real signal name by
* removing the starting dot.
*
* @author Julien Danjou <[email protected]>
* @copyright 2008-2009 Julien Danjou
* @release @AWESOME_VERSION@
* @classmod client
*/
#include "objects/client.h"
#include "common/atoms.h"
#include "common/xutil.h"
#include "event.h"
#include "ewmh.h"
#include "objects/drawable.h"
#include "objects/screen.h"
#include "objects/tag.h"
#include "property.h"
#include "spawn.h"
#include "systray.h"
#include "xwindow.h"
#include <xcb/xcb_atom.h>
#include <xcb/shape.h>
#include <cairo-xcb.h>
/** Client class.
*
* @table class
* @field focus The focused `client.object`.
*/
/** Client object.
*
* @field window The X window id.
* @field name The client title.
* @field skip_taskbar True if the client does not want to be in taskbar.
* @field type The window type (desktop, normal, dock, …).
* @field class The client class.
* @field instance The client instance.
* @field pid The client PID, if available.
* @field role The window role, if available.
* @field machine The machine client is running on.
* @field icon_name The client name when iconified.
* @field icon The client icon.
* @field screen Client screen.
* @field hidden Define if the client must be hidden, i.e. never mapped,
* invisible in taskbar.
* @field minimized Define it the client must be iconify, i.e. only visible in
* taskbar.
* @field size_hints_honor Honor size hints, i.e. respect size ratio.
* @field border_width The client border width.
* @field border_color The client border color.
* @field urgent The client urgent state.
* @field content An image representing the client window content (screenshot).
* @field opacity The client opacity between 0 and 1.
* @field ontop The client is on top of every other windows.
* @field above The client is above normal windows.
* @field below The client is below normal windows.
* @field fullscreen The client is fullscreen or not.
* @field maximized The client is maximized (horizontally and vertically) or not.
* @field maximized_horizontal The client is maximized horizontally or not.
* @field maximized_vertical The client is maximized vertically or not.
* @field transient_for The client the window is transient for.
* @field group_window Window identification unique to a group of windows.
* @field leader_window Identification unique to windows spawned by the same command.
* @field size_hints A table with size hints of the client: `user_position`,
* `user_size`, `program_position`, `program_size`, etc.
* @field sticky Set the client sticky, i.e. available on all tags.
* @field modal Indicate if the client is modal.
* @field focusable True if the client can receive the input focus.
* @field shape_bounding The client's bounding shape as set by awesome as a (native) cairo surface.
* @field shape_clip The client's clip shape as set by awesome as a (native) cairo surface.
* @field shape_client_bounding The client's bounding shape as set by the program as a (native) cairo surface.
* @field shape_client_clip The client's clip shape as set by the program as a (native) cairo surface.
* @field startup_id The FreeDesktop StartId.
* @field valid If the client that this object refers to is still managed by awesome.
* @field first_tag The first tag of the client. Optimized form of `c:tags()[1]`.
* @table object
*/
/** Return client struts (reserved space at the edge of the screen).
*
* @param struts A table with new strut values, or none.
* @return A table with strut values.
* @function struts
*/
/** Get or set mouse buttons bindings for a client.
*
* @param buttons_table An array of mouse button bindings objects, or nothing.
* @return A table with all buttons.
* @function buttons
*/
/** Get the number of instances.
*
* @return The number of client objects alive.
* @function instances
*/
static area_t titlebar_get_area(client_t *c, client_titlebar_t bar);
static drawable_t *titlebar_get_drawable(lua_State *L, client_t *c, int cl_idx, client_titlebar_t bar);
static void client_resize_do(client_t *c, area_t geometry, bool force_notice);
/** Collect a client.
* \param L The Lua VM state.
* \return The number of element pushed on stack.
*/
static void
client_wipe(client_t *c)
{
key_array_wipe(&c->keys);
xcb_icccm_get_wm_protocols_reply_wipe(&c->protocols);
p_delete(&c->machine);
p_delete(&c->class);
p_delete(&c->instance);
p_delete(&c->icon_name);
p_delete(&c->alt_icon_name);
p_delete(&c->name);
p_delete(&c->alt_name);
p_delete(&c->startup_id);
if(c->icon)
cairo_surface_destroy(c->icon);
}
/** Change the clients urgency flag.
* \param L The Lua VM state.
* \param cidx The client index on the stack.
* \param urgent The new flag state.
*/
void
client_set_urgent(lua_State *L, int cidx, bool urgent)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->urgent != urgent)
{
xcb_get_property_cookie_t hints =
xcb_icccm_get_wm_hints_unchecked(globalconf.connection, c->window);
c->urgent = urgent;
/* update ICCCM hints */
xcb_icccm_wm_hints_t wmh;
xcb_icccm_get_wm_hints_reply(globalconf.connection, hints, &wmh, NULL);
if(urgent)
wmh.flags |= XCB_ICCCM_WM_HINT_X_URGENCY;
else
wmh.flags &= ~XCB_ICCCM_WM_HINT_X_URGENCY;
xcb_icccm_set_wm_hints(globalconf.connection, c->window, &wmh);
luaA_object_emit_signal(L, cidx, "property::urgent", 0);
}
}
#define DO_CLIENT_SET_PROPERTY(prop) \
void \
client_set_##prop(lua_State *L, int cidx, fieldtypeof(client_t, prop) value) \
{ \
client_t *c = luaA_checkudata(L, cidx, &client_class); \
if(c->prop != value) \
{ \
c->prop = value; \
luaA_object_emit_signal(L, cidx, "property::" #prop, 0); \
} \
}
DO_CLIENT_SET_PROPERTY(group_window)
DO_CLIENT_SET_PROPERTY(type)
DO_CLIENT_SET_PROPERTY(transient_for)
DO_CLIENT_SET_PROPERTY(pid)
DO_CLIENT_SET_PROPERTY(skip_taskbar)
#undef DO_CLIENT_SET_PROPERTY
#define DO_CLIENT_SET_STRING_PROPERTY2(prop, signal) \
void \
client_set_##prop(lua_State *L, int cidx, char *value) \
{ \
client_t *c = luaA_checkudata(L, cidx, &client_class); \
if (A_STREQ(c->prop, value)) \
{ \
p_delete(&value); \
return; \
} \
p_delete(&c->prop); \
c->prop = value; \
luaA_object_emit_signal(L, cidx, "property::" #signal, 0); \
}
#define DO_CLIENT_SET_STRING_PROPERTY(prop) \
DO_CLIENT_SET_STRING_PROPERTY2(prop, prop)
DO_CLIENT_SET_STRING_PROPERTY(name)
DO_CLIENT_SET_STRING_PROPERTY2(alt_name, name)
DO_CLIENT_SET_STRING_PROPERTY(icon_name)
DO_CLIENT_SET_STRING_PROPERTY2(alt_icon_name, icon_name)
DO_CLIENT_SET_STRING_PROPERTY(role)
DO_CLIENT_SET_STRING_PROPERTY(machine)
#undef DO_CLIENT_SET_STRING_PROPERTY
void
client_set_class_instance(lua_State *L, int cidx, const char *class, const char *instance)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
p_delete(&c->class);
p_delete(&c->instance);
c->class = a_strdup(class);
luaA_object_emit_signal(L, cidx, "property::class", 0);
c->instance = a_strdup(instance);
luaA_object_emit_signal(L, cidx, "property::instance", 0);
}
/** Returns true if a client is tagged with one of the active tags.
* \param c The client to check.
* \return true if the client is visible, false otherwise.
*/
bool
client_on_selected_tags(client_t *c)
{
if(c->sticky)
return true;
foreach(tag, globalconf.tags)
if(tag_get_selected(*tag) && is_client_tagged(c, *tag))
return true;
return false;
}
/** Get a client by its window.
* \param w The client window to find.
* \return A client pointer if found, NULL otherwise.
*/
client_t *
client_getbywin(xcb_window_t w)
{
foreach(c, globalconf.clients)
if((*c)->window == w)
return *c;
return NULL;
}
/** Get a client by its frame window.
* \param w The client window to find.
* \return A client pointer if found, NULL otherwise.
*/
client_t *
client_getbyframewin(xcb_window_t w)
{
foreach(c, globalconf.clients)
if((*c)->frame_window == w)
return *c;
return NULL;
}
/** Unfocus a client (internal).
* \param c The client.
*/
static void
client_unfocus_internal(client_t *c)
{
lua_State *L = globalconf_get_lua_State();
globalconf.focus.client = NULL;
luaA_object_push(L, c);
luaA_object_emit_signal(L, -1, "unfocus", 0);
lua_pop(L, 1);
}
/** Unfocus a client.
* \param c The client.
*/
static void
client_unfocus(client_t *c)
{
client_unfocus_internal(c);
globalconf.focus.need_update = true;
}
/** Check if client supports atom a protocol in WM_PROTOCOL.
* \param c The client.
* \param atom The protocol atom to check for.
* \return True if client has the atom in protocol, false otherwise.
*/
bool
client_hasproto(client_t *c, xcb_atom_t atom)
{
for(uint32_t i = 0; i < c->protocols.atoms_len; i++)
if(c->protocols.atoms[i] == atom)
return true;
return false;
}
/** Prepare banning a client by running all needed lua events.
* \param c The client.
*/
void client_ban_unfocus(client_t *c)
{
/* Wait until the last moment to take away the focus from the window. */
if(globalconf.focus.client == c)
client_unfocus(c);
}
/** Ban client and move it out of the viewport.
* \param c The client.
*/
void
client_ban(client_t *c)
{
if(!c->isbanned)
{
xcb_unmap_window(globalconf.connection, c->frame_window);
c->isbanned = true;
client_ban_unfocus(c);
}
}
/** This is part of The Bob Marley Algorithm: we ignore enter and leave window
* in certain cases, like map/unmap or move, so we don't get spurious events.
*/
void
client_ignore_enterleave_events(void)
{
foreach(c, globalconf.clients)
{
xcb_change_window_attributes(globalconf.connection,
(*c)->window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { CLIENT_SELECT_INPUT_EVENT_MASK & ~(XCB_EVENT_MASK_ENTER_WINDOW | XCB_EVENT_MASK_LEAVE_WINDOW) });
xcb_change_window_attributes(globalconf.connection,
(*c)->frame_window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { FRAME_SELECT_INPUT_EVENT_MASK & ~(XCB_EVENT_MASK_ENTER_WINDOW | XCB_EVENT_MASK_LEAVE_WINDOW) });
}
}
void
client_restore_enterleave_events(void)
{
foreach(c, globalconf.clients)
{
xcb_change_window_attributes(globalconf.connection,
(*c)->window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { CLIENT_SELECT_INPUT_EVENT_MASK });
xcb_change_window_attributes(globalconf.connection,
(*c)->frame_window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { FRAME_SELECT_INPUT_EVENT_MASK });
}
}
/** Record that a client got focus.
* \param c The client.
* \return true if the client focus changed, false otherwise.
*/
bool
client_focus_update(client_t *c)
{
lua_State *L = globalconf_get_lua_State();
if(globalconf.focus.client && globalconf.focus.client != c)
{
/* When we are called due to a FocusIn event (=old focused client
* already unfocused), we don't want to cause a SetInputFocus,
* because the client which has focus now could be using globally
* active input model (or 'no input').
*/
client_unfocus_internal(globalconf.focus.client);
}
bool focused_new = globalconf.focus.client != c;
globalconf.focus.client = c;
/* According to EWMH, we have to remove the urgent state from a client.
* This should be done also for the current/focused client (FS#1310). */
luaA_object_push(L, c);
client_set_urgent(L, -1, false);
if(focused_new)
luaA_object_emit_signal(L, -1, "focus", 0);
lua_pop(L, 1);
return focused_new;
}
/** Give focus to client, or to first client if client is NULL.
* \param c The client.
*/
void
client_focus(client_t *c)
{
/* We have to set focus on first client */
if(!c && globalconf.clients.len && !(c = globalconf.clients.tab[0]))
return;
if(client_focus_update(c))
globalconf.focus.need_update = true;
}
void
client_focus_refresh(void)
{
client_t *c = globalconf.focus.client;
xcb_window_t win = globalconf.focus.window_no_focus;
if(!globalconf.focus.need_update)
return;
globalconf.focus.need_update = false;
if(c && client_on_selected_tags(c))
{
/* Make sure this window is unbanned and e.g. not minimized */
client_unban(c);
/* Sets focus on window - using xcb_set_input_focus or WM_TAKE_FOCUS */
if(!c->nofocus)
win = c->window;
else
/* Move the focus away from whatever has it to make sure the
* previously focused client doesn't get any input in case
* WM_TAKE_FOCUS gets ignored.
*/
win = globalconf.focus.window_no_focus;
if(client_hasproto(c, WM_TAKE_FOCUS))
xwindow_takefocus(c->window);
}
/* If nothing has the focus or the currently focused client does not want
* us to focus it, this sets the focus to the root window. This makes sure
* the previously focused client actually gets unfocused. Alternatively, the
* new client gets the input focus.
*/
xcb_set_input_focus(globalconf.connection, XCB_INPUT_FOCUS_PARENT,
win, globalconf.timestamp);
}
static void
border_width_callback(client_t *c, uint16_t old_width, uint16_t new_width)
{
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_WIN_GRAVITY)
{
area_t geometry = c->geometry;
int16_t diff = new_width - old_width;
int16_t diff_x = 0, diff_y = 0;
xwindow_translate_for_gravity(c->size_hints.win_gravity,
diff, diff, diff, diff,
&diff_x, &diff_y);
geometry.x += diff_x;
geometry.y += diff_y;
/* force_notice = true -> inform client about changes */
client_resize_do(c, geometry, true);
}
}
static void
client_update_properties(lua_State *L, int cidx, client_t *c)
{
/* get all hints */
xcb_get_property_cookie_t wm_normal_hints = property_get_wm_normal_hints(c);
xcb_get_property_cookie_t wm_hints = property_get_wm_hints(c);
xcb_get_property_cookie_t wm_transient_for = property_get_wm_transient_for(c);
xcb_get_property_cookie_t wm_client_leader = property_get_wm_client_leader(c);
xcb_get_property_cookie_t wm_client_machine = property_get_wm_client_machine(c);
xcb_get_property_cookie_t wm_window_role = property_get_wm_window_role(c);
xcb_get_property_cookie_t net_wm_pid = property_get_net_wm_pid(c);
xcb_get_property_cookie_t net_wm_icon = property_get_net_wm_icon(c);
xcb_get_property_cookie_t wm_name = property_get_wm_name(c);
xcb_get_property_cookie_t net_wm_name = property_get_net_wm_name(c);
xcb_get_property_cookie_t wm_icon_name = property_get_wm_icon_name(c);
xcb_get_property_cookie_t net_wm_icon_name = property_get_net_wm_icon_name(c);
xcb_get_property_cookie_t wm_class = property_get_wm_class(c);
xcb_get_property_cookie_t wm_protocols = property_get_wm_protocols(c);
xcb_get_property_cookie_t opacity = xwindow_get_opacity_unchecked(c->window);
/* update strut */
ewmh_process_client_strut(c);
/* Now process all replies */
property_update_wm_normal_hints(c, wm_normal_hints);
property_update_wm_hints(c, wm_hints);
property_update_wm_transient_for(c, wm_transient_for);
property_update_wm_client_leader(c, wm_client_leader);
property_update_wm_client_machine(c, wm_client_machine);
property_update_wm_window_role(c, wm_window_role);
property_update_net_wm_pid(c, net_wm_pid);
property_update_net_wm_icon(c, net_wm_icon);
property_update_wm_name(c, wm_name);
property_update_net_wm_name(c, net_wm_name);
property_update_wm_icon_name(c, wm_icon_name);
property_update_net_wm_icon_name(c, net_wm_icon_name);
property_update_wm_class(c, wm_class);
property_update_wm_protocols(c, wm_protocols);
window_set_opacity(L, cidx, xwindow_get_opacity_from_cookie(opacity));
}
/** Manage a new client.
* \param w The window.
* \param wgeom Window geometry.
* \param startup True if we are managing at startup time.
*/
void
client_manage(xcb_window_t w, xcb_get_geometry_reply_t *wgeom, xcb_get_window_attributes_reply_t *wattr)
{
lua_State *L = globalconf_get_lua_State();
const uint32_t select_input_val[] = { CLIENT_SELECT_INPUT_EVENT_MASK };
if(systray_iskdedockapp(w))
{
systray_request_handle(w, NULL);
return;
}
/* If this is a new client that just has been launched, then request its
* startup id. */
xcb_get_property_cookie_t startup_id_q = xcb_get_property(globalconf.connection, false,
w, _NET_STARTUP_ID,
XCB_GET_PROPERTY_TYPE_ANY, 0, UINT_MAX);
/* Make sure the window is automatically mapped if awesome exits or dies. */
xcb_change_save_set(globalconf.connection, XCB_SET_MODE_INSERT, w);
if (globalconf.have_shape)
xcb_shape_select_input(globalconf.connection, w, 1);
client_t *c = client_new(L);
xcb_screen_t *s = globalconf.screen;
c->border_width_callback = (void (*) (void *, uint16_t, uint16_t)) border_width_callback;
/* consider the window banned */
c->isbanned = true;
/* Store window and visual */
c->window = w;
c->visualtype = draw_find_visual(globalconf.screen, wattr->visual);
c->frame_window = xcb_generate_id(globalconf.connection);
xcb_create_window(globalconf.connection, globalconf.default_depth, c->frame_window, s->root,
wgeom->x, wgeom->y, wgeom->width, wgeom->height,
wgeom->border_width, XCB_COPY_FROM_PARENT, globalconf.visual->visual_id,
XCB_CW_BORDER_PIXEL | XCB_CW_BIT_GRAVITY | XCB_CW_WIN_GRAVITY
| XCB_CW_OVERRIDE_REDIRECT | XCB_CW_EVENT_MASK | XCB_CW_COLORMAP,
(const uint32_t [])
{
globalconf.screen->black_pixel,
XCB_GRAVITY_NORTH_WEST,
XCB_GRAVITY_NORTH_WEST,
1,
FRAME_SELECT_INPUT_EVENT_MASK,
globalconf.default_cmap
});
/* The client may already be mapped, thus we must be sure that we don't send
* ourselves an UnmapNotify due to the xcb_reparent_window().
*
* Grab the server to make sure we don't lose any events.
*/
uint32_t no_event[] = { 0 };
xcb_grab_server(globalconf.connection);
xcb_change_window_attributes(globalconf.connection,
globalconf.screen->root,
XCB_CW_EVENT_MASK,
no_event);
xcb_reparent_window(globalconf.connection, w, c->frame_window, 0, 0);
xcb_map_window(globalconf.connection, w);
xcb_change_window_attributes(globalconf.connection,
globalconf.screen->root,
XCB_CW_EVENT_MASK,
ROOT_WINDOW_EVENT_MASK);
xcb_ungrab_server(globalconf.connection);
/* Do this now so that we don't get any events for the above
* (Else, reparent could cause an UnmapNotify) */
xcb_change_window_attributes(globalconf.connection, w, XCB_CW_EVENT_MASK, select_input_val);
luaA_object_emit_signal(L, -1, "property::window", 0);
/* The frame window gets the border, not the real client window */
xcb_configure_window(globalconf.connection, w,
XCB_CONFIG_WINDOW_BORDER_WIDTH,
(uint32_t[]) { 0 });
/* Move this window to the bottom of the stack. Without this we would force
* other windows which will be above this one to redraw themselves because
* this window occludes them for a tiny moment. The next stack_refresh()
* will fix this up and move the window to its correct place. */
xcb_configure_window(globalconf.connection, c->frame_window,
XCB_CONFIG_WINDOW_STACK_MODE,
(uint32_t[]) { XCB_STACK_MODE_BELOW});
/* Duplicate client and push it in client list */
lua_pushvalue(L, -1);
client_array_push(&globalconf.clients, luaA_object_ref(L, -1));
/* Set the right screen */
screen_client_moveto(c, screen_getbycoord(wgeom->x, wgeom->y), false);
/* Store initial geometry and emits signals so we inform that geometry have
* been set. */
#define HANDLE_GEOM(attr) \
c->geometry.attr = wgeom->attr; \
luaA_object_emit_signal(L, -1, "property::" #attr, 0);
HANDLE_GEOM(x)
HANDLE_GEOM(y)
HANDLE_GEOM(width)
HANDLE_GEOM(height)
#undef HANDLE_GEOM
luaA_object_emit_signal(L, -1, "property::geometry", 0);
/* Set border width */
window_set_border_width(L, -1, wgeom->border_width);
/* we honor size hints by default */
c->size_hints_honor = true;
luaA_object_emit_signal(L, -1, "property::size_hints_honor", 0);
/* update all properties */
client_update_properties(L, -1, c);
/* Then check clients hints */
ewmh_client_check_hints(c);
/* Push client in stack */
stack_client_push(c);
/* Put the window in normal state. */
xwindow_set_state(c->window, XCB_ICCCM_WM_STATE_NORMAL);
/* Request our response */
xcb_get_property_reply_t *reply =
xcb_get_property_reply(globalconf.connection, startup_id_q, NULL);
/* Say spawn that a client has been started, with startup id as argument */
char *startup_id = xutil_get_text_property_from_reply(reply);
c->startup_id = startup_id;
p_delete(&reply);
spawn_start_notify(c, startup_id);
luaA_class_emit_signal(L, &client_class, "list", 0);
/* client is still on top of the stack; emit signal */
luaA_object_emit_signal(L, -1, "manage", 0);
/* pop client */
lua_pop(L, 1);
}
static void
client_remove_titlebar_geometry(client_t *c, area_t *geometry)
{
geometry->x += c->titlebar[CLIENT_TITLEBAR_LEFT].size;
geometry->y += c->titlebar[CLIENT_TITLEBAR_TOP].size;
geometry->width -= c->titlebar[CLIENT_TITLEBAR_LEFT].size;
geometry->width -= c->titlebar[CLIENT_TITLEBAR_RIGHT].size;
geometry->height -= c->titlebar[CLIENT_TITLEBAR_TOP].size;
geometry->height -= c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
}
static void
client_add_titlebar_geometry(client_t *c, area_t *geometry)
{
geometry->x -= c->titlebar[CLIENT_TITLEBAR_LEFT].size;
geometry->y -= c->titlebar[CLIENT_TITLEBAR_TOP].size;
geometry->width += c->titlebar[CLIENT_TITLEBAR_LEFT].size;
geometry->width += c->titlebar[CLIENT_TITLEBAR_RIGHT].size;
geometry->height += c->titlebar[CLIENT_TITLEBAR_TOP].size;
geometry->height += c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
}
/** Send a synthetic configure event to a window.
*/
void
client_send_configure(client_t *c)
{
area_t geometry = c->geometry;
if (!c->fullscreen)
client_remove_titlebar_geometry(c, &geometry);
xwindow_configure(c->window, geometry, c->border_width);
}
/** Apply size hints to the client's new geometry.
*/
static area_t
client_apply_size_hints(client_t *c, area_t geometry)
{
int32_t minw = 0, minh = 0;
int32_t basew = 0, baseh = 0, real_basew = 0, real_baseh = 0;
if (c->fullscreen)
return geometry;
/* Size hints are applied to the window without any decoration */
client_remove_titlebar_geometry(c, &geometry);
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_BASE_SIZE)
{
basew = c->size_hints.base_width;
baseh = c->size_hints.base_height;
real_basew = basew;
real_baseh = baseh;
}
else if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_MIN_SIZE)
{
/* base size is substituted with min size if not specified */
basew = c->size_hints.min_width;
baseh = c->size_hints.min_height;
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_MIN_SIZE)
{
minw = c->size_hints.min_width;
minh = c->size_hints.min_height;
}
else if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_BASE_SIZE)
{
/* min size is substituted with base size if not specified */
minw = c->size_hints.base_width;
minh = c->size_hints.base_height;
}
/* Handle the size aspect ratio */
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_ASPECT
&& c->size_hints.min_aspect_den > 0
&& c->size_hints.max_aspect_den > 0
&& geometry.height > real_baseh
&& geometry.width > real_basew)
{
/* ICCCM mandates:
* If a base size is provided along with the aspect ratio fields, the base size should be subtracted from the
* window size prior to checking that the aspect ratio falls in range. If a base size is not provided, nothing
* should be subtracted from the window size. (The minimum size is not to be used in place of the base size for
* this purpose.)
*/
double dx = geometry.width - real_basew;
double dy = geometry.height - real_baseh;
double ratio = dx / dy;
double min = c->size_hints.min_aspect_num / (double) c->size_hints.min_aspect_den;
double max = c->size_hints.max_aspect_num / (double) c->size_hints.max_aspect_den;
if(max > 0 && min > 0 && ratio > 0)
{
if(ratio < min)
{
/* dx is lower than allowed, make dy lower to compensate this (+ 0.5 to force proper rounding). */
dy = dx / min + 0.5;
geometry.width = dx + real_basew;
geometry.height = dy + real_baseh;
} else if(ratio > max)
{
/* dx is too high, lower it (+0.5 for proper rounding) */
dx = dy * max + 0.5;
geometry.width = dx + real_basew;
geometry.height = dy + real_baseh;
}
}
}
/* Handle the minimum size */
geometry.width = MAX(geometry.width, minw);
geometry.height = MAX(geometry.height, minh);
/* Handle the maximum size */
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_MAX_SIZE)
{
if(c->size_hints.max_width)
geometry.width = MIN(geometry.width, c->size_hints.max_width);
if(c->size_hints.max_height)
geometry.height = MIN(geometry.height, c->size_hints.max_height);
}
/* Handle the size increment */
if(c->size_hints.flags & (XCB_ICCCM_SIZE_HINT_P_RESIZE_INC | XCB_ICCCM_SIZE_HINT_BASE_SIZE)
&& c->size_hints.width_inc && c->size_hints.height_inc)
{
uint16_t t1 = geometry.width, t2 = geometry.height;
unsigned_subtract(t1, basew);
unsigned_subtract(t2, baseh);
geometry.width -= t1 % c->size_hints.width_inc;
geometry.height -= t2 % c->size_hints.height_inc;
}
client_add_titlebar_geometry(c, &geometry);
return geometry;
}
static void
client_resize_do(client_t *c, area_t geometry, bool force_notice)
{
lua_State *L = globalconf_get_lua_State();
bool send_notice = force_notice;
bool hide_titlebars = c->fullscreen;
screen_t *new_screen = c->screen;
if(!screen_coord_in_screen(new_screen, geometry.x, geometry.y))
new_screen = screen_getbycoord(geometry.x, geometry.y);
if(c->geometry.width == geometry.width
&& c->geometry.height == geometry.height)
send_notice = true;
/* Also store geometry including border */
area_t old_geometry = c->geometry;
c->geometry = geometry;
/* Ignore all spurious enter/leave notify events */
client_ignore_enterleave_events();
/* Configure the client for its new size */
area_t real_geometry = geometry;
if (!hide_titlebars)
{
real_geometry.x = c->titlebar[CLIENT_TITLEBAR_LEFT].size;
real_geometry.y = c->titlebar[CLIENT_TITLEBAR_TOP].size;
real_geometry.width -= c->titlebar[CLIENT_TITLEBAR_LEFT].size;
real_geometry.width -= c->titlebar[CLIENT_TITLEBAR_RIGHT].size;
real_geometry.height -= c->titlebar[CLIENT_TITLEBAR_TOP].size;
real_geometry.height -= c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
} else {
real_geometry.x = 0;
real_geometry.y = 0;
}
xcb_configure_window(globalconf.connection, c->frame_window,
XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y | XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT,
(uint32_t[]) { geometry.x, geometry.y, geometry.width, geometry.height });
xcb_configure_window(globalconf.connection, c->window,
XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y | XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT,
(uint32_t[]) { real_geometry.x, real_geometry.y, real_geometry.width, real_geometry.height });
if(send_notice)
/* We are moving without changing the size, see ICCCM 4.2.3 */
client_send_configure(c);
client_restore_enterleave_events();
luaA_object_push(L, c);
if (!AREA_EQUAL(old_geometry, geometry))
luaA_object_emit_signal(L, -1, "property::geometry", 0);
if (old_geometry.x != geometry.x)
luaA_object_emit_signal(L, -1, "property::x", 0);
if (old_geometry.y != geometry.y)
luaA_object_emit_signal(L, -1, "property::y", 0);
if (old_geometry.width != geometry.width)
luaA_object_emit_signal(L, -1, "property::width", 0);
if (old_geometry.height != geometry.height)
luaA_object_emit_signal(L, -1, "property::height", 0);
lua_pop(L, 1);
screen_client_moveto(c, new_screen, false);
/* Update all titlebars */
for (client_titlebar_t bar = CLIENT_TITLEBAR_TOP; bar < CLIENT_TITLEBAR_COUNT; bar++) {
if (c->titlebar[bar].drawable == NULL && c->titlebar[bar].size == 0)
continue;
luaA_object_push(L, c);
drawable_t *drawable = titlebar_get_drawable(L, c, -1, bar);
luaA_object_push_item(L, -1, drawable);
area_t area = titlebar_get_area(c, bar);
/* Convert to global coordinates */
area.x += geometry.x;
area.y += geometry.y;
if (hide_titlebars)
area.width = area.height = 0;
drawable_set_geometry(L, -1, area);
/* Pop the client and the drawable */
lua_pop(L, 2);
}
}
/** Resize client window.
* The sizes given as parameters are with borders!
* \param c Client to resize.
* \param geometry New window geometry.
* \param honor_hints Use size hints.
* \return true if an actual resize occurred.
*/
bool
client_resize(client_t *c, area_t geometry, bool honor_hints)
{
area_t area;
/* offscreen appearance fixes */
area = display_area_get();
if(geometry.x > area.width)
geometry.x = area.width - geometry.width;
if(geometry.y > area.height)
geometry.y = area.height - geometry.height;
if(geometry.x + geometry.width < 0)
geometry.x = 0;
if(geometry.y + geometry.height < 0)
geometry.y = 0;
if(geometry.width < c->titlebar[CLIENT_TITLEBAR_LEFT].size + c->titlebar[CLIENT_TITLEBAR_RIGHT].size)
return false;
if(geometry.height < c->titlebar[CLIENT_TITLEBAR_TOP].size + c->titlebar[CLIENT_TITLEBAR_BOTTOM].size)
return false;
if(geometry.width == 0 || geometry.height == 0)
return false;
if (honor_hints)
geometry = client_apply_size_hints(c, geometry);
if(c->geometry.x != geometry.x
|| c->geometry.y != geometry.y
|| c->geometry.width != geometry.width
|| c->geometry.height != geometry.height)
{
client_resize_do(c, geometry, false);
return true;
}
return false;
}
static void
client_emit_property_workarea_on_screen(lua_State *L, client_t *c)
{
luaA_object_push(L, c->screen);
luaA_object_emit_signal(L, -1, "property::workarea", 0);
lua_pop(L, 1);
}
/** Set a client minimized, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client minimized.
*/
void
client_set_minimized(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->minimized != s)
{
c->minimized = s;
banning_need_update();
if(s)
{
/* ICCCM: To transition from ICONIC to NORMAL state, the client
* should just map the window. Thus, iconic clients need to be
* unmapped, else the MapWindow request doesn't have any effect.
*/
xwindow_set_state(c->window, XCB_ICCCM_WM_STATE_ICONIC);
uint32_t no_event[] = { 0 };
const uint32_t client_select_input_val[] = { CLIENT_SELECT_INPUT_EVENT_MASK };
const uint32_t frame_select_input_val[] = { FRAME_SELECT_INPUT_EVENT_MASK };
xcb_grab_server(globalconf.connection);
xcb_change_window_attributes(globalconf.connection,
globalconf.screen->root,
XCB_CW_EVENT_MASK,
no_event);
xcb_change_window_attributes(globalconf.connection,
c->frame_window,
XCB_CW_EVENT_MASK,
no_event);
xcb_change_window_attributes(globalconf.connection,
c->window,
XCB_CW_EVENT_MASK,
no_event);
xcb_unmap_window(globalconf.connection, c->window);
xcb_change_window_attributes(globalconf.connection,
globalconf.screen->root,
XCB_CW_EVENT_MASK,
ROOT_WINDOW_EVENT_MASK);
xcb_change_window_attributes(globalconf.connection,
c->frame_window,
XCB_CW_EVENT_MASK,
frame_select_input_val);
xcb_change_window_attributes(globalconf.connection,
c->window,
XCB_CW_EVENT_MASK,
client_select_input_val);
xcb_ungrab_server(globalconf.connection);
}
else
{
xwindow_set_state(c->window, XCB_ICCCM_WM_STATE_NORMAL);
xcb_map_window(globalconf.connection, c->window);
}
if(strut_has_value(&c->strut))
client_emit_property_workarea_on_screen(L, c);
luaA_object_emit_signal(L, cidx, "property::minimized", 0);
}
}
/** Set a client hidden, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client hidden.
*/
static void
client_set_hidden(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->hidden != s)
{
c->hidden = s;
banning_need_update();
if(strut_has_value(&c->strut))
client_emit_property_workarea_on_screen(L, c);
luaA_object_emit_signal(L, cidx, "property::hidden", 0);
}
}
/** Set a client sticky, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client sticky.
*/
void
client_set_sticky(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->sticky != s)
{
c->sticky = s;
banning_need_update();
luaA_object_emit_signal(L, cidx, "property::sticky", 0);
}
}
/** Set a client focusable, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client's focusable property.
*/
static void
client_set_focusable(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->focusable != s || !c->focusable_set)
{
c->focusable = s;
c->focusable_set = true;
luaA_object_emit_signal(L, cidx, "property::focusable", 0);
}
}
/** Unset a client's focusable property and make it use the default again.
* \param L The Lua VM state.
* \param cidx The client index.
*/
static void
client_unset_focusable(lua_State *L, int cidx)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->focusable_set)
{
c->focusable_set = false;
luaA_object_emit_signal(L, cidx, "property::focusable", 0);
}
}
/** Set a client fullscreen, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client fullscreen.
*/
void
client_set_fullscreen(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->fullscreen != s)
{
/* become fullscreen! */
if(s)
{
/* You can only be part of one of the special layers. */
client_set_below(L, cidx, false);
client_set_above(L, cidx, false);
client_set_ontop(L, cidx, false);
}
int abs_cidx = luaA_absindex(L, cidx); \
lua_pushboolean(L, s);
c->fullscreen = s;
luaA_object_emit_signal(L, abs_cidx, "request::fullscreen", 1);
luaA_object_emit_signal(L, abs_cidx, "property::fullscreen", 0);
/* Force a client resize, so that titlebars get shown/hidden */
client_resize_do(c, c->geometry, true);
stack_windows();
}
}
/** Get a clients maximized state (horizontally and vertically).
* \param c The client.
* \return The maximized state.
*/
static int
client_get_maximized(client_t *c)
{
return c->maximized_horizontal && c->maximized_vertical;
}
/** Set a client horizontally|vertically maximized.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s The maximized status.
*/
#define DO_FUNCTION_CLIENT_MAXIMIZED(type) \
void \
client_set_maximized_##type(lua_State *L, int cidx, bool s) \
{ \
client_t *c = luaA_checkudata(L, cidx, &client_class); \
if(c->maximized_##type != s) \
{ \
int abs_cidx = luaA_absindex(L, cidx); \
lua_pushboolean(L, s); \
int max_before = client_get_maximized(c); \
c->maximized_##type = s; \
luaA_object_emit_signal(L, abs_cidx, "request::maximized_" #type, 1); \
luaA_object_emit_signal(L, abs_cidx, "property::maximized_" #type, 0); \
if(max_before != client_get_maximized(c)) \
luaA_object_emit_signal(L, abs_cidx, "property::maximized", 0); \
stack_windows(); \
} \
}
DO_FUNCTION_CLIENT_MAXIMIZED(vertical)
DO_FUNCTION_CLIENT_MAXIMIZED(horizontal)
#undef DO_FUNCTION_CLIENT_MAXIMIZED
/** Set a client maximized (horizontally and vertically).
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client maximized attribute.
*/
void
client_set_maximized(lua_State *L, int cidx, bool s)
{
client_set_maximized_horizontal(L, cidx, s);
client_set_maximized_vertical(L, cidx, s);
}
/** Set a client above, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client above.
*/
void
client_set_above(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->above != s)
{
/* You can only be part of one of the special layers. */
if(s)
{
client_set_below(L, cidx, false);
client_set_ontop(L, cidx, false);
client_set_fullscreen(L, cidx, false);
}
c->above = s;
stack_windows();
luaA_object_emit_signal(L, cidx, "property::above", 0);
}
}
/** Set a client below, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client below.
*/
void
client_set_below(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->below != s)
{
/* You can only be part of one of the special layers. */
if(s)
{
client_set_above(L, cidx, false);
client_set_ontop(L, cidx, false);
client_set_fullscreen(L, cidx, false);
}
c->below = s;
stack_windows();
luaA_object_emit_signal(L, cidx, "property::below", 0);
}
}
/** Set a client modal, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client modal attribute.
*/
void
client_set_modal(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->modal != s)
{
c->modal = s;
stack_windows();
luaA_object_emit_signal(L, cidx, "property::modal", 0);
}
}
/** Set a client ontop, or not.
* \param L The Lua VM state.
* \param cidx The client index.
* \param s Set or not the client ontop attribute.
*/
void
client_set_ontop(lua_State *L, int cidx, bool s)
{
client_t *c = luaA_checkudata(L, cidx, &client_class);
if(c->ontop != s)
{
/* You can only be part of one of the special layers. */
if(s)
{
client_set_above(L, cidx, false);
client_set_below(L, cidx, false);
client_set_fullscreen(L, cidx, false);
}
c->ontop = s;
stack_windows();
luaA_object_emit_signal(L, cidx, "property::ontop", 0);
}
}
/** Unban a client and move it back into the viewport.
* \param c The client.
*/
void
client_unban(client_t *c)
{
lua_State *L = globalconf_get_lua_State();
if(c->isbanned)
{
xcb_map_window(globalconf.connection, c->frame_window);
c->isbanned = false;
/* An unbanned client shouldn't be minimized or hidden */
luaA_object_push(L, c);
client_set_minimized(L, -1, false);
client_set_hidden(L, -1, false);
lua_pop(L, 1);
if (globalconf.focus.client == c)
globalconf.focus.need_update = true;
}
}
/** Unmanage a client.
* \param c The client.
* \param window_valid Is the client's window still valid?
*/
void
client_unmanage(client_t *c, bool window_valid)
{
lua_State *L = globalconf_get_lua_State();
/* Reset transient_for attributes of windows that might be referring to us */
foreach(_tc, globalconf.clients)
{
client_t *tc = *_tc;
if(tc->transient_for == c)
tc->transient_for = NULL;
}
if(globalconf.focus.client == c)
client_unfocus(c);
/* remove client from global list and everywhere else */
foreach(elem, globalconf.clients)
if(*elem == c)
{
client_array_remove(&globalconf.clients, elem);
break;
}
stack_client_remove(c);
for(int i = 0; i < globalconf.tags.len; i++)
untag_client(c, globalconf.tags.tab[i]);
luaA_object_push(L, c);
luaA_object_emit_signal(L, -1, "unmanage", 0);
lua_pop(L, 1);
luaA_class_emit_signal(L, &client_class, "list", 0);
if(strut_has_value(&c->strut))
client_emit_property_workarea_on_screen(L, c);
/* Get rid of all titlebars */
for (client_titlebar_t bar = CLIENT_TITLEBAR_TOP; bar < CLIENT_TITLEBAR_COUNT; bar++) {
if (c->titlebar[bar].drawable == NULL)
continue;
if (globalconf.drawable_under_mouse == c->titlebar[bar].drawable) {
/* Leave drawable before we invalidate the client */
lua_pushnil(L);
event_drawable_under_mouse(L, -1);
lua_pop(L, 1);
}
/* Forget about the drawable */
luaA_object_push(L, c);
luaA_object_unref_item(L, -1, c->titlebar[bar].drawable);
c->titlebar[bar].drawable = NULL;
lua_pop(L, 1);
}
/* Clear our event mask so that we don't receive any events from now on,
* especially not for the following requests. */
if(window_valid)
xcb_change_window_attributes(globalconf.connection,
c->window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { 0 });
xcb_change_window_attributes(globalconf.connection,
c->frame_window,
XCB_CW_EVENT_MASK,
(const uint32_t []) { 0 });
if(window_valid)
{
xcb_unmap_window(globalconf.connection, c->window);
xcb_reparent_window(globalconf.connection, c->window, globalconf.screen->root,
c->geometry.x, c->geometry.y);
}
/* Ignore all spurious enter/leave notify events */
client_ignore_enterleave_events();
xcb_destroy_window(globalconf.connection, c->frame_window);
client_restore_enterleave_events();
if(window_valid)
{
/* Remove this window from the save set since this shouldn't be made visible
* after a restart anymore. */
xcb_change_save_set(globalconf.connection, XCB_SET_MODE_DELETE, c->window);
if (globalconf.have_shape)
xcb_shape_select_input(globalconf.connection, c->window, 0);
/* Do this last to avoid races with clients. According to ICCCM, clients
* arent allowed to re-use the window until after this. */
xwindow_set_state(c->window, XCB_ICCCM_WM_STATE_WITHDRAWN);
}
/* set client as invalid */
c->window = XCB_NONE;
luaA_object_unref(L, c);
}
/** Kill a client via a WM_DELETE_WINDOW request or KillClient if not
* supported.
* \param c The client to kill.
*/
void
client_kill(client_t *c)
{
if(client_hasproto(c, WM_DELETE_WINDOW))
{
xcb_client_message_event_t ev;
/* Initialize all of event's fields first */
p_clear(&ev, 1);
ev.response_type = XCB_CLIENT_MESSAGE;
ev.window = c->window;
ev.format = 32;
ev.data.data32[1] = globalconf.timestamp;
ev.type = WM_PROTOCOLS;
ev.data.data32[0] = WM_DELETE_WINDOW;
xcb_send_event(globalconf.connection, false, c->window,
XCB_EVENT_MASK_NO_EVENT, (char *) &ev);
}
else
xcb_kill_client(globalconf.connection, c->window);
}
/** Get all clients into a table.
*
* @tparam[opt] integer screen A screen number to filter clients on.
* @tparam[opt] boolean stacked Return clients in stacking order? (ordered from
* top to bottom).
* @treturn table A table with clients.
* @function get
*/
static int
luaA_client_get(lua_State *L)
{
int i = 1;
screen_t *screen = NULL;
bool stacked = false;
if(!lua_isnoneornil(L, 1))
screen = luaA_checkscreen(L, 1);
if(!lua_isnoneornil(L, 2))
stacked = luaA_checkboolean(L, 2);
lua_newtable(L);
if(stacked)
{
foreach_reverse(c, globalconf.stack)
if(screen == NULL || (*c)->screen == screen)
{
luaA_object_push(L, *c);
lua_rawseti(L, -2, i++);
}
}
else
{
foreach(c, globalconf.clients)
if(screen == NULL || (*c)->screen == screen)
{
luaA_object_push(L, *c);
lua_rawseti(L, -2, i++);
}
}
return 1;
}
/** Check if a client is visible on its screen.
*
* @return A boolean value, true if the client is visible, false otherwise.
* @function isvisible
*/
static int
luaA_client_isvisible(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
lua_pushboolean(L, client_isvisible(c));
return 1;
}
/** Set a client icon.
* \param L The Lua VM state.
* \param cidx The client index on the stack.
* \param iidx The image index on the stack.
*/
void
client_set_icon(client_t *c, cairo_surface_t *s)
{
lua_State *L = globalconf_get_lua_State();
if (s)
s = draw_dup_image_surface(s);
if(c->icon)
cairo_surface_destroy(c->icon);
c->icon = s;
luaA_object_push(L, c);
luaA_object_emit_signal(L, -1, "property::icon", 0);
lua_pop(L, 1);
}
/** Set a client icon.
* \param c The client to change.
* \param icon A bitmap containing the icon.
* \param mask A mask for the bitmap (optional)
*/
void
client_set_icon_from_pixmaps(client_t *c, xcb_pixmap_t icon, xcb_pixmap_t mask)
{
xcb_get_geometry_cookie_t geom_icon_c, geom_mask_c;
xcb_get_geometry_reply_t *geom_icon_r, *geom_mask_r = NULL;
cairo_surface_t *s_icon, *result;
geom_icon_c = xcb_get_geometry_unchecked(globalconf.connection, icon);
if (mask)
geom_mask_c = xcb_get_geometry_unchecked(globalconf.connection, mask);
geom_icon_r = xcb_get_geometry_reply(globalconf.connection, geom_icon_c, NULL);
if (mask)
geom_mask_r = xcb_get_geometry_reply(globalconf.connection, geom_mask_c, NULL);
if (!geom_icon_r || (mask && !geom_mask_r))
goto out;
if ((geom_icon_r->depth != 1 && geom_icon_r->depth != globalconf.screen->root_depth)
|| (geom_mask_r && geom_mask_r->depth != 1))
{
warn("Got pixmaps with depth (%d, %d) while processing icon, but only depth 1 and %d are allowed",
geom_icon_r->depth, geom_mask_r ? geom_mask_r->depth : 0, globalconf.screen->root_depth);
goto out;
}
if (geom_icon_r->depth == 1)
s_icon = cairo_xcb_surface_create_for_bitmap(globalconf.connection,
globalconf.screen, icon, geom_icon_r->width, geom_icon_r->height);
else
s_icon = cairo_xcb_surface_create(globalconf.connection, icon, globalconf.default_visual,
geom_icon_r->width, geom_icon_r->height);
result = s_icon;
if (mask)
{
cairo_surface_t *s_mask;
cairo_t *cr;
result = cairo_surface_create_similar(s_icon, CAIRO_CONTENT_COLOR_ALPHA, geom_icon_r->width, geom_icon_r->height);
s_mask = cairo_xcb_surface_create_for_bitmap(globalconf.connection,
globalconf.screen, mask, geom_icon_r->width, geom_icon_r->height);
cr = cairo_create(result);
cairo_set_source_surface(cr, s_icon, 0, 0);
cairo_mask_surface(cr, s_mask, 0, 0);
cairo_surface_destroy(s_mask);
cairo_destroy(cr);
}
client_set_icon(c, result);
cairo_surface_destroy(result);
if (result != s_icon)
cairo_surface_destroy(s_icon);
out:
p_delete(&geom_icon_r);
p_delete(&geom_mask_r);
}
/** Kill a client.
*
* @function kill
*/
static int
luaA_client_kill(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
client_kill(c);
return 0;
}
/** Swap a client with another one in global client list.
* @client A client to swap with.
* @function swap
*/
static int
luaA_client_swap(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
client_t *swap = luaA_checkudata(L, 2, &client_class);
if(c != swap)
{
client_t **ref_c = NULL, **ref_swap = NULL;
foreach(item, globalconf.clients)
{
if(*item == c)
ref_c = item;
else if(*item == swap)
ref_swap = item;
if(ref_c && ref_swap)
break;
}
/* swap ! */
*ref_c = swap;
*ref_swap = c;
luaA_class_emit_signal(L, &client_class, "list", 0);
}
return 0;
}
/** Access or set the client tags.
*
* Use the `first_tag` field to access the first tag of a client directly.
*
* @tparam table tags_table A table with tags to set, or `nil` to get the
* current tags.
* @treturn table A table with all tags.
* @function tags
*/
static int
luaA_client_tags(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
int j = 0;
if(lua_gettop(L) == 2)
{
luaA_checktable(L, 2);
for(int i = 0; i < globalconf.tags.len; i++)
{
/* Only untag if we aren't going to add this tag again */
bool found = false;
lua_pushnil(L);
while(lua_next(L, 2))
{
tag_t *t = lua_touserdata(L, -1);
/* Pop the value from lua_next */
lua_pop(L, 1);
if (t != globalconf.tags.tab[i])
continue;
/* Pop the key from lua_next */
lua_pop(L, 1);
found = true;
break;
}
if(!found)
untag_client(c, globalconf.tags.tab[i]);
}
lua_pushnil(L);
while(lua_next(L, 2))
tag_client(L, c);
lua_pop(L, 1);
}
lua_newtable(L);
foreach(tag, globalconf.tags)
if(is_client_tagged(c, *tag))
{
luaA_object_push(L, *tag);
lua_rawseti(L, -2, ++j);
}
return 1;
}
/** Get the first tag of a client.
*/
static int
luaA_client_get_first_tag(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
foreach(tag, globalconf.tags)
if(is_client_tagged(c, *tag))
{
luaA_object_push(L, *tag);
return 1;
}
return 0;
}
/** Raise a client on top of others which are on the same layer.
*
* @function raise
*/
static int
luaA_client_raise(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
client_raise(c);
return 0;
}
/** Lower a client on bottom of others which are on the same layer.
*
* @function lower
*/
static int
luaA_client_lower(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
stack_client_push(c);
/* Traverse all transient layers. */
for(client_t *tc = c->transient_for; tc; tc = tc->transient_for)
stack_client_push(tc);
return 0;
}
/** Stop managing a client.
*
* @function unmanage
*/
static int
luaA_client_unmanage(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
client_unmanage(c, true);
return 0;
}
static area_t
titlebar_get_area(client_t *c, client_titlebar_t bar)
{
area_t result = c->geometry;
result.x = result.y = 0;
// Let's try some ascii art:
// ---------------------------
// | Top |
// |-------------------------|
// |L| |R|
// |e| |i|
// |f| |g|
// |t| |h|
// | | |t|
// |-------------------------|
// | Bottom |
// ---------------------------
switch (bar) {
case CLIENT_TITLEBAR_BOTTOM:
result.y = c->geometry.height - c->titlebar[bar].size;
/* Fall through */
case CLIENT_TITLEBAR_TOP:
result.height = c->titlebar[bar].size;
break;
case CLIENT_TITLEBAR_RIGHT:
result.x = c->geometry.width - c->titlebar[bar].size;
/* Fall through */
case CLIENT_TITLEBAR_LEFT:
result.y = c->titlebar[CLIENT_TITLEBAR_TOP].size;
result.width = c->titlebar[bar].size;
result.height -= c->titlebar[CLIENT_TITLEBAR_TOP].size;
result.height -= c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
break;
default:
fatal("Unknown titlebar kind %d\n", (int) bar);
}
return result;
}
drawable_t *
client_get_drawable_offset(client_t *c, int *x, int *y)
{
for (client_titlebar_t bar = CLIENT_TITLEBAR_TOP; bar < CLIENT_TITLEBAR_COUNT; bar++) {
area_t area = titlebar_get_area(c, bar);
if (AREA_LEFT(area) > *x || AREA_RIGHT(area) <= *x)
continue;
if (AREA_TOP(area) > *y || AREA_BOTTOM(area) <= *y)
continue;
*x -= area.x;
*y -= area.y;
return c->titlebar[bar].drawable;
}
return NULL;
}
drawable_t *
client_get_drawable(client_t *c, int x, int y)
{
return client_get_drawable_offset(c, &x, &y);
}
static void
client_refresh_titlebar_partial(client_t *c, client_titlebar_t bar, int16_t x, int16_t y, uint16_t width, uint16_t height)
{
if(c->titlebar[bar].drawable == NULL
|| c->titlebar[bar].drawable->pixmap == XCB_NONE
|| !c->titlebar[bar].drawable->refreshed)
return;
/* Is the titlebar part of the area that should get redrawn? */
area_t area = titlebar_get_area(c, bar);
if (AREA_LEFT(area) >= x + width || AREA_RIGHT(area) <= x)
return;
if (AREA_TOP(area) >= y + height || AREA_BOTTOM(area) <= y)
return;
/* Redraw the affected parts */
cairo_surface_flush(c->titlebar[bar].drawable->surface);
xcb_copy_area(globalconf.connection, c->titlebar[bar].drawable->pixmap, c->frame_window,
globalconf.gc, x - area.x, y - area.y, x, y, width, height);
}
#define HANDLE_TITLEBAR_REFRESH(name, index) \
static void \
client_refresh_titlebar_ ## name(client_t *c) \
{ \
area_t area = titlebar_get_area(c, index); \
client_refresh_titlebar_partial(c, index, area.x, area.y, area.width, area.height); \
}
HANDLE_TITLEBAR_REFRESH(top, CLIENT_TITLEBAR_TOP)
HANDLE_TITLEBAR_REFRESH(right, CLIENT_TITLEBAR_RIGHT)
HANDLE_TITLEBAR_REFRESH(bottom, CLIENT_TITLEBAR_BOTTOM)
HANDLE_TITLEBAR_REFRESH(left, CLIENT_TITLEBAR_LEFT)
/**
* Refresh all titlebars that are in the specified rectangle
*/
void
client_refresh_partial(client_t *c, int16_t x, int16_t y, uint16_t width, uint16_t height)
{
for (client_titlebar_t bar = CLIENT_TITLEBAR_TOP; bar < CLIENT_TITLEBAR_COUNT; bar++) {
client_refresh_titlebar_partial(c, bar, x, y, width, height);
}
}
static drawable_t *
titlebar_get_drawable(lua_State *L, client_t *c, int cl_idx, client_titlebar_t bar)
{
if (c->titlebar[bar].drawable == NULL)
{
cl_idx = luaA_absindex(L, cl_idx);
switch (bar) {
case CLIENT_TITLEBAR_TOP:
drawable_allocator(L, (drawable_refresh_callback *) client_refresh_titlebar_top, c);
break;
case CLIENT_TITLEBAR_BOTTOM:
drawable_allocator(L, (drawable_refresh_callback *) client_refresh_titlebar_bottom, c);
break;
case CLIENT_TITLEBAR_RIGHT:
drawable_allocator(L, (drawable_refresh_callback *) client_refresh_titlebar_right, c);
break;
case CLIENT_TITLEBAR_LEFT:
drawable_allocator(L, (drawable_refresh_callback *) client_refresh_titlebar_left, c);
break;
default:
fatal("Unknown titlebar kind %d\n", (int) bar);
}
c->titlebar[bar].drawable = luaA_object_ref_item(L, cl_idx, -1);
}
return c->titlebar[bar].drawable;
}
static void
titlebar_resize(lua_State *L, int cidx, client_t *c, client_titlebar_t bar, int size)
{
const char *property_name;
if (size < 0)
return;
if (size == c->titlebar[bar].size)
return;
/* Now resize the client (and titlebars!) suitably (the client without
* titlebars should keep its current size!) */
area_t geometry = c->geometry;
int change = size - c->titlebar[bar].size;
int16_t diff_top = 0, diff_bottom = 0, diff_right = 0, diff_left = 0;
switch (bar) {
case CLIENT_TITLEBAR_TOP:
geometry.height += change;
diff_top = change;
property_name = "property::titlebar_top";
break;
case CLIENT_TITLEBAR_BOTTOM:
geometry.height += change;
diff_bottom = change;
property_name = "property::titlebar_bottom";
break;
case CLIENT_TITLEBAR_RIGHT:
geometry.width += change;
diff_right = change;
property_name = "property::titlebar_right";
break;
case CLIENT_TITLEBAR_LEFT:
geometry.width += change;
diff_left = change;
property_name = "property::titlebar_left";
break;
default:
fatal("Unknown titlebar kind %d\n", (int) bar);
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_WIN_GRAVITY)
{
int16_t diff_x = 0, diff_y = 0;
xwindow_translate_for_gravity(c->size_hints.win_gravity,
diff_left, diff_top,
diff_right, diff_bottom,
&diff_x, &diff_y);
geometry.x += diff_x;
geometry.y += diff_y;
}
c->titlebar[bar].size = size;
client_resize_do(c, geometry, true);
luaA_object_emit_signal(L, cidx, property_name, 0);
}
#define HANDLE_TITLEBAR(name, index) \
static int \
luaA_client_titlebar_ ## name(lua_State *L) \
{ \
client_t *c = luaA_checkudata(L, 1, &client_class); \
\
if (lua_gettop(L) == 2) \
{ \
if (lua_isnil(L, 2)) \
titlebar_resize(L, 1, c, index, 0); \
else \
titlebar_resize(L, 1, c, index, luaL_checknumber(L, 2)); \
} \
\
luaA_object_push_item(L, 1, titlebar_get_drawable(L, c, 1, index)); \
lua_pushinteger(L, c->titlebar[index].size); \
return 2; \
}
HANDLE_TITLEBAR(top, CLIENT_TITLEBAR_TOP)
HANDLE_TITLEBAR(right, CLIENT_TITLEBAR_RIGHT)
HANDLE_TITLEBAR(bottom, CLIENT_TITLEBAR_BOTTOM)
HANDLE_TITLEBAR(left, CLIENT_TITLEBAR_LEFT)
/** Return or set client geometry.
*
* @tparam table|nil geo A table with new coordinates, or nil.
* @treturn table A table with client geometry and coordinates.
* @function geometry
*/
static int
luaA_client_geometry(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
if(lua_gettop(L) == 2 && !lua_isnil(L, 2))
{
area_t geometry;
luaA_checktable(L, 2);
geometry.x = luaA_getopt_number(L, 2, "x", c->geometry.x);
geometry.y = luaA_getopt_number(L, 2, "y", c->geometry.y);
if(client_isfixed(c))
{
geometry.width = c->geometry.width;
geometry.height = c->geometry.height;
}
else
{
geometry.width = luaA_getopt_number(L, 2, "width", c->geometry.width);
geometry.height = luaA_getopt_number(L, 2, "height", c->geometry.height);
}
client_resize(c, geometry, c->size_hints_honor);
}
return luaA_pusharea(L, c->geometry);
}
/** Apply size hints to a size.
*
* @param width Desired width of client
* @param height Desired height of client
* @return Actual width of client
* @return Actual height of client
* @function apply_size_hints
*/
static int
luaA_client_apply_size_hints(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
area_t geometry = c->geometry;
if(!client_isfixed(c))
{
geometry.width = luaL_checknumber(L, 2);
geometry.height = luaL_checknumber(L, 3);
}
if (c->size_hints_honor)
geometry = client_apply_size_hints(c, geometry);
lua_pushinteger(L, geometry.width);
lua_pushinteger(L, geometry.height);
return 2;
}
static int
luaA_client_set_screen(lua_State *L, client_t *c)
{
screen_client_moveto(c, luaA_checkscreen(L, -1), true);
return 0;
}
static int
luaA_client_set_hidden(lua_State *L, client_t *c)
{
client_set_hidden(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_minimized(lua_State *L, client_t *c)
{
client_set_minimized(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_fullscreen(lua_State *L, client_t *c)
{
client_set_fullscreen(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_modal(lua_State *L, client_t *c)
{
client_set_modal(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_maximized(lua_State *L, client_t *c)
{
client_set_maximized(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_maximized_horizontal(lua_State *L, client_t *c)
{
client_set_maximized_horizontal(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_maximized_vertical(lua_State *L, client_t *c)
{
client_set_maximized_vertical(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_icon(lua_State *L, client_t *c)
{
cairo_surface_t *surf = NULL;
if(!lua_isnil(L, -1))
surf = (cairo_surface_t *)lua_touserdata(L, -1);
client_set_icon(c, surf);
return 0;
}
static int
luaA_client_set_focusable(lua_State *L, client_t *c)
{
if(lua_isnil(L, -1))
client_unset_focusable(L, -3);
else
client_set_focusable(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_sticky(lua_State *L, client_t *c)
{
client_set_sticky(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_size_hints_honor(lua_State *L, client_t *c)
{
c->size_hints_honor = luaA_checkboolean(L, -1);
luaA_object_emit_signal(L, -3, "property::size_hints_honor", 0);
return 0;
}
static int
luaA_client_set_ontop(lua_State *L, client_t *c)
{
client_set_ontop(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_below(lua_State *L, client_t *c)
{
client_set_below(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_above(lua_State *L, client_t *c)
{
client_set_above(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_urgent(lua_State *L, client_t *c)
{
client_set_urgent(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_set_skip_taskbar(lua_State *L, client_t *c)
{
client_set_skip_taskbar(L, -3, luaA_checkboolean(L, -1));
return 0;
}
static int
luaA_client_get_name(lua_State *L, client_t *c)
{
lua_pushstring(L, c->name ? c->name : c->alt_name);
return 1;
}
/** Set the client name.
* \param L The Lua VM state.
* \param client The client to name.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_set_name(lua_State *L, client_t *c)
{
const char *name = luaL_checkstring(L, -1);
client_set_name(L, 1, a_strdup(name));
return 0;
}
static int
luaA_client_get_icon_name(lua_State *L, client_t *c)
{
lua_pushstring(L, c->icon_name ? c->icon_name : c->alt_icon_name);
return 1;
}
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, class, lua_pushstring)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, instance, lua_pushstring)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, machine, lua_pushstring)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, role, lua_pushstring)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, transient_for, luaA_object_push)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, skip_taskbar, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, leader_window, lua_pushinteger)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, group_window, lua_pushinteger)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, pid, lua_pushinteger)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, hidden, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, minimized, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, fullscreen, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, modal, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, ontop, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, urgent, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, above, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, below, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, sticky, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, size_hints_honor, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, maximized_horizontal, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, maximized_vertical, lua_pushboolean)
LUA_OBJECT_EXPORT_PROPERTY(client, client_t, startup_id, lua_pushstring)
static int
luaA_client_get_maximized(lua_State *L, client_t *c)
{
lua_pushboolean(L, client_get_maximized(c));
return 1;
}
static int
luaA_client_get_content(lua_State *L, client_t *c)
{
cairo_surface_t *surface;
int width = c->geometry.width;
int height = c->geometry.height;
/* Just the client size without decorations */
width -= c->titlebar[CLIENT_TITLEBAR_LEFT].size + c->titlebar[CLIENT_TITLEBAR_RIGHT].size;
height -= c->titlebar[CLIENT_TITLEBAR_TOP].size + c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
surface = cairo_xcb_surface_create(globalconf.connection, c->window,
c->visualtype, width, height);
/* lua has to make sure to free the ref or we have a leak */
lua_pushlightuserdata(L, surface);
return 1;
}
static int
luaA_client_get_screen(lua_State *L, client_t *c)
{
if(!c->screen)
return 0;
lua_pushinteger(L, screen_get_index(c->screen));
return 1;
}
static int
luaA_client_get_icon(lua_State *L, client_t *c)
{
if(!c->icon)
return 0;
/* lua gets its own reference which it will have to destroy */
lua_pushlightuserdata(L, cairo_surface_reference(c->icon));
return 1;
}
static int
luaA_client_get_focusable(lua_State *L, client_t *c)
{
bool ret;
if (c->focusable_set)
ret = c->focusable;
/* A client can be focused if it doesnt have the "nofocus" hint...*/
else if (!c->nofocus)
ret = true;
else
/* ...or if it knows the WM_TAKE_FOCUS protocol */
ret = client_hasproto(c, WM_TAKE_FOCUS);
lua_pushboolean(L, ret);
return 1;
}
static int
luaA_client_get_size_hints(lua_State *L, client_t *c)
{
const char *u_or_p = NULL;
lua_createtable(L, 0, 1);
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_US_POSITION)
u_or_p = "user_position";
else if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_POSITION)
u_or_p = "program_position";
if(u_or_p)
{
lua_createtable(L, 0, 2);
lua_pushinteger(L, c->size_hints.x);
lua_setfield(L, -2, "x");
lua_pushinteger(L, c->size_hints.y);
lua_setfield(L, -2, "y");
lua_setfield(L, -2, u_or_p);
u_or_p = NULL;
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_US_SIZE)
u_or_p = "user_size";
else if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_SIZE)
u_or_p = "program_size";
if(u_or_p)
{
lua_createtable(L, 0, 2);
lua_pushinteger(L, c->size_hints.width);
lua_setfield(L, -2, "width");
lua_pushinteger(L, c->size_hints.height);
lua_setfield(L, -2, "height");
lua_setfield(L, -2, u_or_p);
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_MIN_SIZE)
{
lua_pushinteger(L, c->size_hints.min_width);
lua_setfield(L, -2, "min_width");
lua_pushinteger(L, c->size_hints.min_height);
lua_setfield(L, -2, "min_height");
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_MAX_SIZE)
{
lua_pushinteger(L, c->size_hints.max_width);
lua_setfield(L, -2, "max_width");
lua_pushinteger(L, c->size_hints.max_height);
lua_setfield(L, -2, "max_height");
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_RESIZE_INC)
{
lua_pushinteger(L, c->size_hints.width_inc);
lua_setfield(L, -2, "width_inc");
lua_pushinteger(L, c->size_hints.height_inc);
lua_setfield(L, -2, "height_inc");
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_ASPECT)
{
lua_pushinteger(L, c->size_hints.min_aspect_num);
lua_setfield(L, -2, "min_aspect_num");
lua_pushinteger(L, c->size_hints.min_aspect_den);
lua_setfield(L, -2, "min_aspect_den");
lua_pushinteger(L, c->size_hints.max_aspect_num);
lua_setfield(L, -2, "max_aspect_num");
lua_pushinteger(L, c->size_hints.max_aspect_den);
lua_setfield(L, -2, "max_aspect_den");
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_BASE_SIZE)
{
lua_pushinteger(L, c->size_hints.base_width);
lua_setfield(L, -2, "base_width");
lua_pushinteger(L, c->size_hints.base_height);
lua_setfield(L, -2, "base_height");
}
if(c->size_hints.flags & XCB_ICCCM_SIZE_HINT_P_WIN_GRAVITY)
{
switch(c->size_hints.win_gravity)
{
default:
lua_pushliteral(L, "north_west");
break;
case XCB_GRAVITY_NORTH:
lua_pushliteral(L, "north");
break;
case XCB_GRAVITY_NORTH_EAST:
lua_pushliteral(L, "north_east");
break;
case XCB_GRAVITY_WEST:
lua_pushliteral(L, "west");
break;
case XCB_GRAVITY_CENTER:
lua_pushliteral(L, "center");
break;
case XCB_GRAVITY_EAST:
lua_pushliteral(L, "east");
break;
case XCB_GRAVITY_SOUTH_WEST:
lua_pushliteral(L, "south_west");
break;
case XCB_GRAVITY_SOUTH:
lua_pushliteral(L, "south");
break;
case XCB_GRAVITY_SOUTH_EAST:
lua_pushliteral(L, "south_east");
break;
case XCB_GRAVITY_STATIC:
lua_pushliteral(L, "static");
break;
}
lua_setfield(L, -2, "win_gravity");
}
return 1;
}
/** Get the client's child window bounding shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_get_client_shape_bounding(lua_State *L, client_t *c)
{
cairo_surface_t *surf = xwindow_get_shape(c->window, XCB_SHAPE_SK_BOUNDING);
if (!surf)
return 0;
/* lua has to make sure to free the ref or we have a leak */
lua_pushlightuserdata(L, surf);
return 1;
}
/** Get the client's frame window bounding shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_get_shape_bounding(lua_State *L, client_t *c)
{
cairo_surface_t *surf = xwindow_get_shape(c->frame_window, XCB_SHAPE_SK_BOUNDING);
if (!surf)
return 0;
/* lua has to make sure to free the ref or we have a leak */
lua_pushlightuserdata(L, surf);
return 1;
}
/** Set the client's frame window bounding shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_set_shape_bounding(lua_State *L, client_t *c)
{
cairo_surface_t *surf = NULL;
if(!lua_isnil(L, -1))
surf = (cairo_surface_t *)lua_touserdata(L, -1);
xwindow_set_shape(c->frame_window,
c->geometry.width + (c->border_width * 2),
c->geometry.height + (c->border_width * 2),
XCB_SHAPE_SK_BOUNDING, surf, -c->border_width);
luaA_object_emit_signal(L, -3, "property::shape_bounding", 0);
return 0;
}
/** Get the client's child window clip shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_get_client_shape_clip(lua_State *L, client_t *c)
{
cairo_surface_t *surf = xwindow_get_shape(c->window, XCB_SHAPE_SK_CLIP);
if (!surf)
return 0;
/* lua has to make sure to free the ref or we have a leak */
lua_pushlightuserdata(L, surf);
return 1;
}
/** Get the client's frame window clip shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_get_shape_clip(lua_State *L, client_t *c)
{
cairo_surface_t *surf = xwindow_get_shape(c->frame_window, XCB_SHAPE_SK_CLIP);
if (!surf)
return 0;
/* lua has to make sure to free the ref or we have a leak */
lua_pushlightuserdata(L, surf);
return 1;
}
/** Set the client's frame window clip shape.
* \param L The Lua VM state.
* \param client The client object.
* \return The number of elements pushed on stack.
*/
static int
luaA_client_set_shape_clip(lua_State *L, client_t *c)
{
cairo_surface_t *surf = NULL;
if(!lua_isnil(L, -1))
surf = (cairo_surface_t *)lua_touserdata(L, -1);
xwindow_set_shape(c->frame_window, c->geometry.width, c->geometry.height,
XCB_SHAPE_SK_CLIP, surf, 0);
luaA_object_emit_signal(L, -3, "property::shape_clip", 0);
return 0;
}
/** Get or set keys bindings for a client.
*
* @param keys_table An array of key bindings objects, or nothing.
* @return A table with all keys.
* @function keys
*/
static int
luaA_client_keys(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
key_array_t *keys = &c->keys;
if(lua_gettop(L) == 2)
{
luaA_key_array_set(L, 1, 2, keys);
luaA_object_emit_signal(L, 1, "property::keys", 0);
xwindow_grabkeys(c->window, keys);
}
return luaA_key_array_get(L, 1, keys);
}
static int
client_tostring(lua_State *L, client_t *c)
{
char *name = c->name ? c->name : c->alt_name;
ssize_t len = a_strlen(name);
ssize_t limit = 20;
lua_pushlstring(L, name, MIN(len, limit));
if (len > limit)
lua_pushstring(L, "...");
return len > limit ? 2 : 1;
}
/* Client module.
* \param L The Lua VM state.
* \return The number of pushed elements.
*/
static int
luaA_client_module_index(lua_State *L)
{
const char *buf = luaL_checkstring(L, 2);
if (A_STREQ(buf, "focus"))
return luaA_object_push(L, globalconf.focus.client);
return 0;
}
/* Client module new index.
* \param L The Lua VM state.
* \return The number of pushed elements.
*/
static int
luaA_client_module_newindex(lua_State *L)
{
const char *buf = luaL_checkstring(L, 2);
client_t *c;
if (A_STREQ(buf, "focus"))
{
c = luaA_checkudataornil(L, 3, &client_class);
if (c)
client_focus(c);
else if (globalconf.focus.client)
client_unfocus(globalconf.focus.client);
}
return 0;
}
static bool
client_checker(client_t *c)
{
return c->window != XCB_NONE;
}
void
client_class_setup(lua_State *L)
{
static const struct luaL_Reg client_methods[] =
{
LUA_CLASS_METHODS(client)
{ "get", luaA_client_get },
{ "__index", luaA_client_module_index },
{ "__newindex", luaA_client_module_newindex },
{ NULL, NULL }
};
static const struct luaL_Reg client_meta[] =
{
LUA_OBJECT_META(client)
LUA_CLASS_META
{ "keys", luaA_client_keys },
{ "isvisible", luaA_client_isvisible },
{ "geometry", luaA_client_geometry },
{ "apply_size_hints", luaA_client_apply_size_hints },
{ "tags", luaA_client_tags },
{ "kill", luaA_client_kill },
{ "swap", luaA_client_swap },
{ "raise", luaA_client_raise },
{ "lower", luaA_client_lower },
{ "unmanage", luaA_client_unmanage },
{ "titlebar_top", luaA_client_titlebar_top },
{ "titlebar_right", luaA_client_titlebar_right },
{ "titlebar_bottom", luaA_client_titlebar_bottom },
{ "titlebar_left", luaA_client_titlebar_left },
{ NULL, NULL }
};
luaA_class_setup(L, &client_class, "client", &window_class,
(lua_class_allocator_t) client_new,
(lua_class_collector_t) client_wipe,
(lua_class_checker_t) client_checker,
luaA_class_index_miss_property, luaA_class_newindex_miss_property,
client_methods, client_meta);
luaA_class_set_tostring(&client_class, (lua_class_propfunc_t) client_tostring);
luaA_class_add_property(&client_class, "name",
(lua_class_propfunc_t) luaA_client_set_name,
(lua_class_propfunc_t) luaA_client_get_name,
(lua_class_propfunc_t) luaA_client_set_name);
luaA_class_add_property(&client_class, "transient_for",
NULL,
(lua_class_propfunc_t) luaA_client_get_transient_for,
NULL);
luaA_class_add_property(&client_class, "skip_taskbar",
(lua_class_propfunc_t) luaA_client_set_skip_taskbar,
(lua_class_propfunc_t) luaA_client_get_skip_taskbar,
(lua_class_propfunc_t) luaA_client_set_skip_taskbar);
luaA_class_add_property(&client_class, "content",
NULL,
(lua_class_propfunc_t) luaA_client_get_content,
NULL);
luaA_class_add_property(&client_class, "type",
NULL,
(lua_class_propfunc_t) luaA_window_get_type,
NULL);
luaA_class_add_property(&client_class, "class",
NULL,
(lua_class_propfunc_t) luaA_client_get_class,
NULL);
luaA_class_add_property(&client_class, "instance",
NULL,
(lua_class_propfunc_t) luaA_client_get_instance,
NULL);
luaA_class_add_property(&client_class, "role",
NULL,
(lua_class_propfunc_t) luaA_client_get_role,
NULL);
luaA_class_add_property(&client_class, "pid",
NULL,
(lua_class_propfunc_t) luaA_client_get_pid,
NULL);
luaA_class_add_property(&client_class, "leader_window",
NULL,
(lua_class_propfunc_t) luaA_client_get_leader_window,
NULL);
luaA_class_add_property(&client_class, "machine",
NULL,
(lua_class_propfunc_t) luaA_client_get_machine,
NULL);
luaA_class_add_property(&client_class, "icon_name",
NULL,
(lua_class_propfunc_t) luaA_client_get_icon_name,
NULL);
luaA_class_add_property(&client_class, "screen",
NULL,
(lua_class_propfunc_t) luaA_client_get_screen,
(lua_class_propfunc_t) luaA_client_set_screen);
luaA_class_add_property(&client_class, "hidden",
(lua_class_propfunc_t) luaA_client_set_hidden,
(lua_class_propfunc_t) luaA_client_get_hidden,
(lua_class_propfunc_t) luaA_client_set_hidden);
luaA_class_add_property(&client_class, "minimized",
(lua_class_propfunc_t) luaA_client_set_minimized,
(lua_class_propfunc_t) luaA_client_get_minimized,
(lua_class_propfunc_t) luaA_client_set_minimized);
luaA_class_add_property(&client_class, "fullscreen",
(lua_class_propfunc_t) luaA_client_set_fullscreen,
(lua_class_propfunc_t) luaA_client_get_fullscreen,
(lua_class_propfunc_t) luaA_client_set_fullscreen);
luaA_class_add_property(&client_class, "modal",
(lua_class_propfunc_t) luaA_client_set_modal,
(lua_class_propfunc_t) luaA_client_get_modal,
(lua_class_propfunc_t) luaA_client_set_modal);
luaA_class_add_property(&client_class, "group_window",
NULL,
(lua_class_propfunc_t) luaA_client_get_group_window,
NULL);
luaA_class_add_property(&client_class, "maximized",
(lua_class_propfunc_t) luaA_client_set_maximized,
(lua_class_propfunc_t) luaA_client_get_maximized,
(lua_class_propfunc_t) luaA_client_set_maximized);
luaA_class_add_property(&client_class, "maximized_horizontal",
(lua_class_propfunc_t) luaA_client_set_maximized_horizontal,
(lua_class_propfunc_t) luaA_client_get_maximized_horizontal,
(lua_class_propfunc_t) luaA_client_set_maximized_horizontal);
luaA_class_add_property(&client_class, "maximized_vertical",
(lua_class_propfunc_t) luaA_client_set_maximized_vertical,
(lua_class_propfunc_t) luaA_client_get_maximized_vertical,
(lua_class_propfunc_t) luaA_client_set_maximized_vertical);
luaA_class_add_property(&client_class, "icon",
(lua_class_propfunc_t) luaA_client_set_icon,
(lua_class_propfunc_t) luaA_client_get_icon,
(lua_class_propfunc_t) luaA_client_set_icon);
luaA_class_add_property(&client_class, "ontop",
(lua_class_propfunc_t) luaA_client_set_ontop,
(lua_class_propfunc_t) luaA_client_get_ontop,
(lua_class_propfunc_t) luaA_client_set_ontop);
luaA_class_add_property(&client_class, "above",
(lua_class_propfunc_t) luaA_client_set_above,
(lua_class_propfunc_t) luaA_client_get_above,
(lua_class_propfunc_t) luaA_client_set_above);
luaA_class_add_property(&client_class, "below",
(lua_class_propfunc_t) luaA_client_set_below,
(lua_class_propfunc_t) luaA_client_get_below,
(lua_class_propfunc_t) luaA_client_set_below);
luaA_class_add_property(&client_class, "sticky",
(lua_class_propfunc_t) luaA_client_set_sticky,
(lua_class_propfunc_t) luaA_client_get_sticky,
(lua_class_propfunc_t) luaA_client_set_sticky);
luaA_class_add_property(&client_class, "size_hints_honor",
(lua_class_propfunc_t) luaA_client_set_size_hints_honor,
(lua_class_propfunc_t) luaA_client_get_size_hints_honor,
(lua_class_propfunc_t) luaA_client_set_size_hints_honor);
luaA_class_add_property(&client_class, "urgent",
(lua_class_propfunc_t) luaA_client_set_urgent,
(lua_class_propfunc_t) luaA_client_get_urgent,
(lua_class_propfunc_t) luaA_client_set_urgent);
luaA_class_add_property(&client_class, "size_hints",
NULL,
(lua_class_propfunc_t) luaA_client_get_size_hints,
NULL);
luaA_class_add_property(&client_class, "focusable",
(lua_class_propfunc_t) luaA_client_set_focusable,
(lua_class_propfunc_t) luaA_client_get_focusable,
(lua_class_propfunc_t) luaA_client_set_focusable);
luaA_class_add_property(&client_class, "shape_bounding",
(lua_class_propfunc_t) luaA_client_set_shape_bounding,
(lua_class_propfunc_t) luaA_client_get_shape_bounding,
(lua_class_propfunc_t) luaA_client_set_shape_bounding);
luaA_class_add_property(&client_class, "shape_clip",
(lua_class_propfunc_t) luaA_client_set_shape_clip,
(lua_class_propfunc_t) luaA_client_get_shape_clip,
(lua_class_propfunc_t) luaA_client_set_shape_clip);
luaA_class_add_property(&client_class, "startup_id",
NULL,
(lua_class_propfunc_t) luaA_client_get_startup_id,
NULL);
luaA_class_add_property(&client_class, "client_shape_bounding",
NULL,
(lua_class_propfunc_t) luaA_client_get_client_shape_bounding,
NULL);
luaA_class_add_property(&client_class, "client_shape_clip",
NULL,
(lua_class_propfunc_t) luaA_client_get_client_shape_clip,
NULL);
luaA_class_add_property(&client_class, "first_tag",
NULL,
(lua_class_propfunc_t) luaA_client_get_first_tag,
NULL);
/** When a client gains focus.
* @signal .focus
*/
signal_add(&client_class.signals, "focus");
/** Before manage, after unmanage, and when clients swap.
* @signal .list
*/
signal_add(&client_class.signals, "list");
/**
* @signal .manage
*/
signal_add(&client_class.signals, "manage");
/**
* @signal button::press
*/
signal_add(&client_class.signals, "button::press");
/**
* @signal button::release
*/
signal_add(&client_class.signals, "button::release");
/**
* @signal mouse::enter
*/
signal_add(&client_class.signals, "mouse::enter");
/**
* @signal mouse::leave
*/
signal_add(&client_class.signals, "mouse::leave");
/**
* @signal mouse::move
*/
signal_add(&client_class.signals, "mouse::move");
/**
* @signal property::above
*/
signal_add(&client_class.signals, "property::above");
/**
* @signal property::below
*/
signal_add(&client_class.signals, "property::below");
/**
* @signal property::class
*/
signal_add(&client_class.signals, "property::class");
/**
* @signal property::focusable
*/
signal_add(&client_class.signals, "property::focusable");
/**
* @signal property::fullscreen
*/
signal_add(&client_class.signals, "property::fullscreen");
/**
* @signal property::geometry
*/
signal_add(&client_class.signals, "property::geometry");
/**
* @signal property::group_window
*/
signal_add(&client_class.signals, "property::group_window");
/**
* @signal property::height
*/
signal_add(&client_class.signals, "property::height");
/**
* @signal property::hidden
*/
signal_add(&client_class.signals, "property::hidden");
/**
* @signal property::icon
*/
signal_add(&client_class.signals, "property::icon");
/**
* @signal property::icon_name
*/
signal_add(&client_class.signals, "property::icon_name");
/**
* @signal property::instance
*/
signal_add(&client_class.signals, "property::instance");
/**
* @signal property::keys
*/
signal_add(&client_class.signals, "property::keys");
/**
* @signal property::machine
*/
signal_add(&client_class.signals, "property::machine");
/**
* @signal property::maximized
*/
signal_add(&client_class.signals, "property::maximized");
/**
* @signal property::maximized_horizontal
*/
signal_add(&client_class.signals, "property::maximized_horizontal");
/**
* @signal property::maximized_vertical
*/
signal_add(&client_class.signals, "property::maximized_vertical");
/**
* @signal property::minimized
*/
signal_add(&client_class.signals, "property::minimized");
/**
* @signal property::modal
*/
signal_add(&client_class.signals, "property::modal");
/**
* @signal property::name
*/
signal_add(&client_class.signals, "property::name");
/**
* @signal property::ontop
*/
signal_add(&client_class.signals, "property::ontop");
/**
* @signal property::pid
*/
signal_add(&client_class.signals, "property::pid");
/**
* @signal property::role
*/
signal_add(&client_class.signals, "property::role");
/**
* @signal property::screen
*/
signal_add(&client_class.signals, "property::screen");
/**
* @signal property::shape_bounding
*/
signal_add(&client_class.signals, "property::shape_bounding");
/**
* @signal property::shape_client_bounding
*/
signal_add(&client_class.signals, "property::shape_client_bounding");
/**
* @signal property::shape_client_clip
*/
signal_add(&client_class.signals, "property::shape_client_clip");
/**
* @signal property::shape_clip
*/
signal_add(&client_class.signals, "property::shape_clip");
/**
* @signal property::size_hints_honor
*/
signal_add(&client_class.signals, "property::size_hints_honor");
/**
* @signal property::skip_taskbar
*/
signal_add(&client_class.signals, "property::skip_taskbar");
/**
* @signal property::sticky
*/
signal_add(&client_class.signals, "property::sticky");
/**
* @signal property::struts
*/
signal_add(&client_class.signals, "property::struts");
/**
* @signal property::titlebar_bottom
*/
signal_add(&client_class.signals, "property::titlebar_bottom");
/**
* @signal property::titlebar_left
*/
signal_add(&client_class.signals, "property::titlebar_left");
/**
* @signal property::titlebar_right
*/
signal_add(&client_class.signals, "property::titlebar_right");
/**
* @signal property::titlebar_top
*/
signal_add(&client_class.signals, "property::titlebar_top");
/**
* @signal property::transient_for
*/
signal_add(&client_class.signals, "property::transient_for");
/**
* @signal property::type
*/
signal_add(&client_class.signals, "property::type");
/**
* @signal property::urgent
*/
signal_add(&client_class.signals, "property::urgent");
/**
* @signal property::width
*/
signal_add(&client_class.signals, "property::width");
/**
* @signal property::window
*/
signal_add(&client_class.signals, "property::window");
/**
* @signal property::x
*/
signal_add(&client_class.signals, "property::x");
/**
* @signal property::y
*/
signal_add(&client_class.signals, "property::y");
/** When a client should get activated (focused and/or raised).
*
* Default implementation: `awful.ewmh.activate`.
* @signal request::activate
* @tparam string context The context where this signal was used.
* @tparam[opt] table hints A table with additional hints:
* @tparam[opt=false] boolean hints.raise should the client be raised?
*/
signal_add(&client_class.signals, "request::activate");
/**
* @signal request::fullscreen
*/
signal_add(&client_class.signals, "request::fullscreen");
/**
* @signal request::maximized_horizontal
*/
signal_add(&client_class.signals, "request::maximized_horizontal");
/**
* @signal request::maximized_vertical
*/
signal_add(&client_class.signals, "request::maximized_vertical");
/**
* @signal request::tag
*/
signal_add(&client_class.signals, "request::tag");
/**
* @signal request::urgent
*/
signal_add(&client_class.signals, "request::urgent");
/** When a client gets tagged.
* @signal .tagged
* @tag t The tag object.
*/
signal_add(&client_class.signals, "tagged");
/** When a client gets unfocused.
* @signal .unfocus
*/
signal_add(&client_class.signals, "unfocus");
/**
* @signal .unmanage
*/
signal_add(&client_class.signals, "unmanage");
/** When a client gets untagged.
* @signal .untagged
* @tag t The tag object.
*/
signal_add(&client_class.signals, "untagged");
}
// vim: filetype=c:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
| 1 | 8,908 | Wouldn't a single signal call be enough? (without the boolean even) | awesomeWM-awesome | c |
@@ -17,6 +17,11 @@
<br/>
<%= note_event(@note.status, @note.closed_at, @note_comments.last.author) %>
<% end %>
+ <% if current_user && current_user != @note.author %>
+ <%= link_to new_report_url(reportable_id: @note.id, reportable_type: @note.class.name), :title => t('browse.note.report') do %>
+ ⚐
+ <% end %>
+ <% end %>
</div>
<% if @note_comments.find { |comment| comment.author.nil? } -%> | 1 | <% set_title(t('browse.note.title', :id => @note.id)) %>
<h2>
<a class="geolink" href="<%= root_path %>"><span class="icon close"></span></a>
<%= t "browse.note.#{@note.status}_title", :note_name => @note.id %>
</h2>
<div class="browse-section">
<h4><%= t('browse.note.description') %></h4>
<div class="note-description">
<%= h(@note_comments.first.body.to_html) %>
</div>
<div class="details" data-coordinates="<%= @note.lat %>,<%= @note.lon %>" data-status="<%= @note.status %>">
<%= note_event('open', @note.created_at, @note.author) %>
<% if @note.status == "closed" %>
<br/>
<%= note_event(@note.status, @note.closed_at, @note_comments.last.author) %>
<% end %>
</div>
<% if @note_comments.find { |comment| comment.author.nil? } -%>
<p class='warning'><%= t "javascripts.notes.show.anonymous_warning" %></p>
<% end -%>
<% if @note_comments.length > 1 %>
<div class='note-comments'>
<ul>
<% @note_comments[1..-1].each do |comment| %>
<li id="c<%= comment.id %>">
<small class='deemphasize'><%= note_event(comment.event, comment.created_at, comment.author) %></small>
<%= comment.body.to_html %>
</li>
<% end %>
</ul>
</div>
<% end %>
<% if @note.status == "open" %>
<form action="#">
<textarea class="comment" name="text" cols="40" rows="5"></textarea>
<div class="buttons clearfix">
<input type="submit" name="hide" value="<%= t('javascripts.notes.show.hide') %>" class="hide_unless_moderator deemphasize" data-note-id="<%= @note.id %>" data-method="DELETE" data-url="<%= note_url(@note, 'json') %>">
<input type="submit" name="close" value="<%= t('javascripts.notes.show.resolve') %>" class="hide_unless_logged_in" data-note-id="<%= @note.id %>" data-method="POST" data-url="<%= close_note_url(@note, 'json') %>">
<input type="submit" name="comment" value="<%= t('javascripts.notes.show.comment') %>" data-note-id="<%= @note.id %>" data-method="POST" data-url="<%= comment_note_url(@note, 'json') %>" disabled="1">
</div>
</form>
<% else %>
<form action="#">
<input type="hidden" name="text" value="">
<div class="buttons clearfix">
<input type="submit" name="hide" value="<%= t('javascripts.notes.show.hide') %>" class="hide_unless_moderator deemphasize" data-note-id="<%= @note.id %>" data-method="DELETE" data-url="<%= note_url(@note, 'json') %>">
<input type="submit" name="reopen" value="<%= t('javascripts.notes.show.reactivate') %>" class="hide_unless_logged_in" data-note-id="<%= @note.id %>" data-method="POST" data-url="<%= reopen_note_url(@note, 'json') %>">
</div>
</form>
<% end %>
</div>
| 1 | 10,838 | I suspect that this whole block, which is going to be repeated a number of times, should probably be in a helper. I guess it would need to be given the object and the title and could probably figure out everything else from that? | openstreetmap-openstreetmap-website | rb |
@@ -218,6 +218,9 @@ class Document < AbstractBlock
# Public: Get the Reader associated with this document
attr_reader :reader
+ # Public: Get/Set the PathResolver instance used to resolve paths in this Document.
+ attr_reader :path_resolver
+
# Public: Get the Converter associated with this document
attr_reader :converter
| 1 | # encoding: UTF-8
module Asciidoctor
# Public: The Document class represents a parsed AsciiDoc document.
#
# Document is the root node of a parsed AsciiDoc document. It provides an
# abstract syntax tree (AST) that represents the structure of the AsciiDoc
# document from which the Document object was parsed.
#
# Although the constructor can be used to create an empty document object, more
# commonly, you'll load the document object from AsciiDoc source using the
# primary API methods, {Asciidoctor.load} or {Asciidoctor.load_file}. When
# using one of these APIs, you almost always want to set the safe mode to
# :safe (or :unsafe) to enable all of Asciidoctor's features.
#
# Asciidoctor.load '= Hello, AsciiDoc!', safe: :safe
# # => Asciidoctor::Document { doctype: "article", doctitle: "Hello, Asciidoc!", blocks: 0 }
#
# Instances of this class can be used to extract information from the document
# or alter its structure. As such, the Document object is most often used in
# extensions and by integrations.
#
# The most basic usage of the Document object is to retrieve the document's
# title.
#
# source = '= Document Title'
# document = Asciidoctor.load source, safe: :safe
# document.doctitle
# # => 'Document Title'
#
# If the document has no title, the {Document#doctitle} method returns the
# title of the first section. If that check falls through, you can have the
# method return a fallback value (the value of the untitled-label attribute).
#
# Asciidoctor.load('no doctitle', safe: :safe).doctitle use_fallback: true
# # => "Untitled"
#
# You can also use the Document object to access document attributes defined in
# the header, such as the author and doctype.
#
# source = '= Document Title
# Author Name
# :doctype: book'
# document = Asciidoctor.load source, safe: :safe
# document.author
# # => 'Author Name'
# document.doctype
# # => 'book'
#
# You can retrieve arbitrary document attributes defined in the header using
# {Document#attr} or check for the existence of one using {Document#attr?}:
#
# source = '= Asciidoctor
# :uri-project: https://asciidoctor.org'
# document = Asciidoctor.load source, safe: :safe
# document.attr 'uri-project'
# # => 'https://asciidoctor.org'
# document.attr? 'icons'
# # => false
#
# Starting at the Document object, you can begin walking the document tree using
# the {Document#blocks} method:
#
# source = 'paragraph contents
#
# [sidebar]
# sidebar contents'
# doc = Asciidoctor.load source, safe: :safe
# doc.blocks.map {|block| block.context }
# # => [:paragraph, :sidebar]
#
# You can discover block nodes at any depth in the tree using the
# {AbstractBlock#find_by} method.
#
# source = '****
# paragraph in sidebar
# ****'
# doc = Asciidoctor.load source, safe: :safe
# doc.find_by(context: :paragraph).map {|block| block.context }
# # => [:paragraph]
#
# Loading a document object is the first step in the conversion process. You
# can take the process to completion by calling the {Document#convert} method.
class Document < AbstractBlock
Footnote = ::Struct.new :index, :id, :text
class AttributeEntry
attr_reader :name, :value, :negate
def initialize name, value, negate = nil
@name = name
@value = value
@negate = negate.nil? ? value.nil? : negate
end
def save_to block_attributes
(block_attributes[:attribute_entries] ||= []) << self
self
end
end
# Public Parsed and stores a partitioned title (i.e., title & subtitle).
class Title
attr_reader :main
alias title main
attr_reader :subtitle
attr_reader :combined
def initialize val, opts = {}
# TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none)
if (@sanitized = opts[:sanitize]) && val.include?('<')
val = val.gsub(XmlSanitizeRx, '').squeeze(' ').strip
end
if (sep = opts[:separator] || ':').empty? || !val.include?(sep = %(#{sep} ))
@main = val
@subtitle = nil
else
@main, _, @subtitle = val.rpartition sep
end
@combined = val
end
def sanitized?
@sanitized
end
def subtitle?
@subtitle ? true : false
end
def to_s
@combined
end
end
# Public A read-only integer value indicating the level of security that
# should be enforced while processing this document. The value must be
# set in the Document constructor using the :safe option.
#
# A value of 0 (UNSAFE) disables any of the security features enforced
# by Asciidoctor (Ruby is still subject to its own restrictions).
#
# A value of 1 (SAFE) closely parallels safe mode in AsciiDoc. In particular,
# it prevents access to files which reside outside of the parent directory
# of the source file and disables any macro other than the include directive.
#
# A value of 10 (SERVER) disallows the document from setting attributes that
# would affect the conversion of the document, in addition to all the security
# features of SafeMode::SAFE. For instance, this value disallows changing the
# backend or the source-highlighter using an attribute defined in the source
# document. This is the most fundamental level of security for server-side
# deployments (hence the name).
#
# A value of 20 (SECURE) disallows the document from attempting to read files
# from the file system and including the contents of them into the document,
# in addition to all the security features of SafeMode::SECURE. In
# particular, it disallows use of the include::[] directive and the embedding of
# binary content (data uri), stylesheets and JavaScripts referenced by the
# document. (Asciidoctor and trusted extensions may still be allowed to embed
# trusted content into the document).
#
# Since Asciidoctor is aiming for wide adoption, 20 (SECURE) is the default
# value and is recommended for server-side deployments.
#
# A value of 100 (PARANOID) is planned to disallow the use of passthrough
# macros and prevents the document from setting any known attributes in
# addition to all the security features of SafeMode::SECURE. Please note that
# this level is not currently implemented (and therefore not enforced)!
attr_reader :safe
# Public: Get the Boolean AsciiDoc compatibility mode
#
# enabling this attribute activates the following syntax changes:
#
# * single quotes as constrained emphasis formatting marks
# * single backticks parsed as inline literal, formatted as monospace
# * single plus parsed as constrained, monospaced inline formatting
# * double plus parsed as constrained, monospaced inline formatting
#
attr_reader :compat_mode
# Public: Get the cached value of the backend attribute for this document
attr_reader :backend
# Public: Get the cached value of the doctype attribute for this document
attr_reader :doctype
# Public: Get or set the Boolean flag that indicates whether source map information should be tracked by the parser
attr_accessor :sourcemap
# Public: Get the document catalog Hash
attr_reader :catalog
# Public: Alias catalog property as references for backwards compatiblity
alias references catalog
# Public: Get the Hash of document counters
attr_reader :counters
# Public: Get the level-0 Section
attr_reader :header
# Public: Get the String base directory for converting this document.
#
# Defaults to directory of the source file.
# If the source is a string, defaults to the current directory.
attr_reader :base_dir
# Public: Get the Hash of resolved options used to initialize this Document
attr_reader :options
# Public: Get the outfilesuffix defined at the end of the header.
attr_reader :outfilesuffix
# Public: Get a reference to the parent Document of this nested document.
attr_reader :parent_document
# Public: Get the Reader associated with this document
attr_reader :reader
# Public: Get the Converter associated with this document
attr_reader :converter
# Public: Get the activated Extensions::Registry associated with this document.
attr_reader :extensions
# Public: Initialize a {Document} object.
#
# data - The AsciiDoc source data as a String or String Array. (default: nil)
# options - A Hash of options to control processing (e.g., safe mode value (:safe), backend (:backend),
# header/footer toggle (:header_footer), custom attributes (:attributes)). (default: {})
#
# Duplication of the options Hash is handled in the enclosing API.
#
# Examples
#
# data = File.read filename
# doc = Asciidoctor::Document.new data
# puts doc.convert
def initialize data = nil, options = {}
super self, :document
if (parent_doc = options.delete :parent)
@parent_document = parent_doc
options[:base_dir] ||= parent_doc.base_dir
options[:catalog_assets] = true if parent_doc.options[:catalog_assets]
@catalog = parent_doc.catalog.inject({}) do |accum, (key, table)|
accum[key] = (key == :footnotes ? [] : table)
accum
end
# QUESTION should we support setting attribute in parent document from nested document?
# NOTE we must dup or else all the assignments to the overrides clobbers the real attributes
@attribute_overrides = attr_overrides = parent_doc.attributes.dup
parent_doctype = attr_overrides.delete 'doctype'
attr_overrides.delete 'compat-mode'
attr_overrides.delete 'toc'
attr_overrides.delete 'toc-placement'
attr_overrides.delete 'toc-position'
@safe = parent_doc.safe
@attributes['compat-mode'] = '' if (@compat_mode = parent_doc.compat_mode)
@sourcemap = parent_doc.sourcemap
@timings = nil
@converter = parent_doc.converter
initialize_extensions = false
@extensions = parent_doc.extensions
else
@parent_document = nil
@catalog = {
:ids => {},
:refs => {},
:footnotes => [],
:links => [],
:images => [],
:indexterms => [],
:callouts => Callouts.new,
:includes => {},
}
# copy attributes map and normalize keys
# attribute overrides are attributes that can only be set from the commandline
# a direct assignment effectively makes the attribute a constant
# a nil value or name with leading or trailing ! will result in the attribute being unassigned
@attribute_overrides = attr_overrides = {}
(options[:attributes] || {}).each do |key, val|
if key.end_with? '@'
if key.start_with? '!'
key, val = (key.slice 1, key.length), false
elsif key.end_with? '!@'
key, val = (key.slice 0, key.length - 2), false
else
key, val = key.chop, %(#{val}@)
end
elsif key.start_with? '!'
key, val = (key.slice 1, key.length), val == '@' ? false : nil
elsif key.end_with? '!'
key, val = key.chop, val == '@' ? false : nil
end
attr_overrides[key.downcase] = val
end
if (to_file = options[:to_file])
attr_overrides['outfilesuffix'] = ::File.extname to_file
end
# safely resolve the safe mode from const, int or string
if !(safe_mode = options[:safe])
@safe = SafeMode::SECURE
elsif ::Integer === safe_mode
# be permissive in case API user wants to define new levels
@safe = safe_mode
else
# NOTE: not using infix rescue for performance reasons, see https://github.com/jruby/jruby/issues/1816
begin
@safe = SafeMode.value_for_name safe_mode.to_s
rescue
@safe = SafeMode::SECURE
end
end
@compat_mode = attr_overrides.key? 'compat-mode'
@sourcemap = options[:sourcemap]
@timings = options.delete :timings
@converter = nil
initialize_extensions = defined? ::Asciidoctor::Extensions
@extensions = nil # initialize furthur down
end
@parsed = false
@header = nil
@counters = {}
@attributes_modified = ::Set.new
@docinfo_processor_extensions = {}
header_footer = (options[:header_footer] ||= false)
(@options = options).freeze
attrs = @attributes
#attrs['encoding'] = 'UTF-8'
attrs['sectids'] = ''
attrs['toc-placement'] = 'auto'
if header_footer
attrs['copycss'] = ''
# sync embedded attribute with :header_footer option value
attr_overrides['embedded'] = nil
else
attrs['notitle'] = ''
# sync embedded attribute with :header_footer option value
attr_overrides['embedded'] = ''
end
attrs['stylesheet'] = ''
attrs['webfonts'] = ''
attrs['prewrap'] = ''
attrs['attribute-undefined'] = Compliance.attribute_undefined
attrs['attribute-missing'] = Compliance.attribute_missing
attrs['iconfont-remote'] = ''
# language strings
# TODO load these based on language settings
attrs['caution-caption'] = 'Caution'
attrs['important-caption'] = 'Important'
attrs['note-caption'] = 'Note'
attrs['tip-caption'] = 'Tip'
attrs['warning-caption'] = 'Warning'
attrs['example-caption'] = 'Example'
attrs['figure-caption'] = 'Figure'
#attrs['listing-caption'] = 'Listing'
attrs['table-caption'] = 'Table'
attrs['toc-title'] = 'Table of Contents'
#attrs['preface-title'] = 'Preface'
attrs['section-refsig'] = 'Section'
attrs['part-refsig'] = 'Part'
attrs['chapter-refsig'] = 'Chapter'
attrs['appendix-caption'] = attrs['appendix-refsig'] = 'Appendix'
attrs['untitled-label'] = 'Untitled'
attrs['version-label'] = 'Version'
attrs['last-update-label'] = 'Last updated'
attr_overrides['asciidoctor'] = ''
attr_overrides['asciidoctor-version'] = VERSION
attr_overrides['safe-mode-name'] = (safe_mode_name = SafeMode.name_for_value @safe)
attr_overrides["safe-mode-#{safe_mode_name}"] = ''
attr_overrides['safe-mode-level'] = @safe
# the only way to set the max-include-depth attribute is via the API; default to 64 like AsciiDoc Python
attr_overrides['max-include-depth'] ||= 64
# the only way to set the allow-uri-read attribute is via the API; disabled by default
attr_overrides['allow-uri-read'] ||= nil
attr_overrides['user-home'] = USER_HOME
# legacy support for numbered attribute
attr_overrides['sectnums'] = attr_overrides.delete 'numbered' if attr_overrides.key? 'numbered'
# If the base_dir option is specified, it overrides docdir and is used as the root for relative
# paths. Otherwise, the base_dir is the directory of the source file (docdir), if set, otherwise
# the current directory.
if (base_dir_val = options[:base_dir])
@base_dir = (attr_overrides['docdir'] = ::File.expand_path base_dir_val)
elsif attr_overrides['docdir']
@base_dir = attr_overrides['docdir']
else
#logger.warn 'setting base_dir is recommended when working with string documents' unless nested?
@base_dir = attr_overrides['docdir'] = ::Dir.pwd
end
# allow common attributes backend and doctype to be set using options hash, coerce values to string
if (backend_val = options[:backend])
attr_overrides['backend'] = %(#{backend_val})
end
if (doctype_val = options[:doctype])
attr_overrides['doctype'] = %(#{doctype_val})
end
if @safe >= SafeMode::SERVER
# restrict document from setting copycss, source-highlighter and backend
attr_overrides['copycss'] ||= nil
attr_overrides['source-highlighter'] ||= nil
attr_overrides['backend'] ||= DEFAULT_BACKEND
# restrict document from seeing the docdir and trim docfile to relative path
if !parent_doc && attr_overrides.key?('docfile')
attr_overrides['docfile'] = attr_overrides['docfile'][(attr_overrides['docdir'].length + 1)..-1]
end
attr_overrides['docdir'] = ''
attr_overrides['user-home'] = '.'
if @safe >= SafeMode::SECURE
attr_overrides['max-attribute-value-size'] = 4096 unless attr_overrides.key? 'max-attribute-value-size'
# assign linkcss (preventing css embedding) unless explicitly disabled from the commandline or API
#attr_overrides['linkcss'] = (attr_overrides.fetch 'linkcss', '') || nil
attr_overrides['linkcss'] = '' unless attr_overrides.key? 'linkcss'
# restrict document from enabling icons
attr_overrides['icons'] ||= nil
end
end
# the only way to set the max-attribute-value-size attribute is via the API; disabled by default
@max_attribute_value_size = (size = (attr_overrides['max-attribute-value-size'] ||= nil)) ? size.to_i.abs : nil
attr_overrides.delete_if do |key, val|
if val
# a value ending in @ allows document to override value
if ::String === val && (val.end_with? '@')
val, verdict = val.chop, true
end
attrs[key] = val
else
# a nil or false value both unset the attribute; only a nil value locks it
attrs.delete key
verdict = val == false
end
verdict
end
if parent_doc
@backend = attrs['backend']
# reset doctype unless it matches the default value
unless (@doctype = attrs['doctype'] = parent_doctype) == DEFAULT_DOCTYPE
update_doctype_attributes DEFAULT_DOCTYPE
end
# don't need to do the extra processing within our own document
# FIXME line info isn't reported correctly within include files in nested document
@reader = Reader.new data, options[:cursor]
@source_location = @reader.cursor if @sourcemap
# Now parse the lines in the reader into blocks
# Eagerly parse (for now) since a subdocument is not a publicly accessible object
Parser.parse @reader, self
# should we call some sort of post-parse function?
restore_attributes
@parsed = true
else
# setup default backend and doctype
@backend = nil
if (attrs['backend'] ||= DEFAULT_BACKEND) == 'manpage'
@doctype = attrs['doctype'] = attr_overrides['doctype'] = 'manpage'
else
@doctype = (attrs['doctype'] ||= DEFAULT_DOCTYPE)
end
update_backend_attributes attrs['backend'], true
#attrs['indir'] = attrs['docdir']
#attrs['infile'] = attrs['docfile']
# dynamic intrinstic attribute values
# See https://reproducible-builds.org/specs/source-date-epoch/
# NOTE Opal can't call key? on ENV
now = ::ENV['SOURCE_DATE_EPOCH'] ? ::Time.at(Integer ::ENV['SOURCE_DATE_EPOCH']).utc : ::Time.now
if (localdate = attrs['localdate'])
localyear = (attrs['localyear'] ||= ((localdate.index '-') == 4 ? (localdate.slice 0, 4) : nil))
else
localdate = attrs['localdate'] = (now.strftime '%Y-%m-%d')
localyear = (attrs['localyear'] ||= now.year.to_s)
end
localtime = (attrs['localtime'] ||= begin
now.strftime '%H:%M:%S %Z'
rescue # Asciidoctor.js fails if timezone string has characters outside basic Latin (see asciidoctor.js#23)
now.strftime '%H:%M:%S %z'
end)
attrs['localdatetime'] ||= %(#{localdate} #{localtime})
# docdate, doctime and docdatetime should default to
# localdate, localtime and localdatetime if not otherwise set
attrs['docdate'] ||= localdate
attrs['docyear'] ||= localyear
attrs['doctime'] ||= localtime
attrs['docdatetime'] ||= %(#{localdate} #{localtime})
# fallback directories
attrs['stylesdir'] ||= '.'
attrs['iconsdir'] ||= %(#{attrs.fetch 'imagesdir', './images'}/icons)
if initialize_extensions
if (ext_registry = options[:extension_registry])
# QUESTION should we warn if the value type of this option is not a registry
if Extensions::Registry === ext_registry || (::RUBY_ENGINE_JRUBY &&
::AsciidoctorJ::Extensions::ExtensionRegistry === ext_registry)
@extensions = ext_registry.activate self
end
elsif ::Proc === (ext_block = options[:extensions])
@extensions = Extensions.create(&ext_block).activate self
elsif !Extensions.groups.empty?
@extensions = Extensions::Registry.new.activate self
end
end
@reader = PreprocessorReader.new self, data, (Reader::Cursor.new attrs['docfile'], @base_dir), :normalize => true
@source_location = @reader.cursor if @sourcemap
end
end
# Public: Parse the AsciiDoc source stored in the {Reader} into an abstract syntax tree.
#
# If the data parameter is not nil, create a new {PreprocessorReader} and assigned it to the reader
# property of this object. Otherwise, continue with the reader that was created in {#initialize}.
# Pass the reader to {Parser.parse} to parse the source data into an abstract syntax tree.
#
# If parsing has already been performed, this method returns without performing any processing.
#
# data - The optional replacement AsciiDoc source data as a String or String Array. (default: nil)
#
# Returns this [Document]
def parse data = nil
if @parsed
self
else
doc = self
# create reader if data is provided (used when data is not known at the time the Document object is created)
if data
@reader = PreprocessorReader.new doc, data, (Reader::Cursor.new @attributes['docfile'], @base_dir), :normalize => true
@source_location = @reader.cursor if @sourcemap
end
if (exts = @parent_document ? nil : @extensions) && exts.preprocessors?
exts.preprocessors.each do |ext|
@reader = ext.process_method[doc, @reader] || @reader
end
end
# Now parse the lines in the reader into blocks
Parser.parse @reader, doc, :header_only => @options[:parse_header_only]
# should we call sort of post-parse function?
restore_attributes
if exts && exts.tree_processors?
exts.tree_processors.each do |ext|
if (result = ext.process_method[doc]) && Document === result && result != doc
doc = result
end
end
end
@parsed = true
doc
end
end
# Public: Get the named counter and take the next number in the sequence.
#
# name - the String name of the counter
# seed - the initial value as a String or Integer
#
# returns the next number in the sequence for the specified counter
def counter name, seed = nil
return @parent_document.counter name, seed if @parent_document
if (attr_seed = !(attr_val = @attributes[name]).nil_or_empty?) && (@counters.key? name)
@attributes[name] = @counters[name] = (nextval attr_val)
elsif seed
@attributes[name] = @counters[name] = (seed == seed.to_i.to_s ? seed.to_i : seed)
else
@attributes[name] = @counters[name] = nextval(attr_seed ? attr_val : 0)
end
end
# Public: Increment the specified counter and store it in the block's attributes
#
# counter_name - the String name of the counter attribute
# block - the Block on which to save the counter
#
# returns the next number in the sequence for the specified counter
def increment_and_store_counter counter_name, block
((AttributeEntry.new counter_name, (counter counter_name)).save_to block.attributes).value
end
# Deprecated: Map old counter_increment method to increment_counter for backwards compatibility
alias counter_increment increment_and_store_counter
# Internal: Get the next value in the sequence.
#
# Handles both integer and character sequences.
#
# current - the value to increment as a String or Integer
#
# returns the next value in the sequence according to the current value's type
def nextval(current)
if ::Integer === current
current + 1
else
intval = current.to_i
if intval.to_s != current.to_s
(current[0].ord + 1).chr
else
intval + 1
end
end
end
def register type, value
case type
when :ids # deprecated
id, reftext = value
@catalog[:ids][id] ||= reftext || ('[' + id + ']')
when :refs
id, ref, reftext = value
unless (refs = @catalog[:refs]).key? id
@catalog[:ids][id] = reftext || ('[' + id + ']')
refs[id] = ref
end
when :footnotes, :indexterms
@catalog[type] << value
else
@catalog[type] << value if @options[:catalog_assets]
end
end
def footnotes?
@catalog[:footnotes].empty? ? false : true
end
def footnotes
@catalog[:footnotes]
end
def callouts
@catalog[:callouts]
end
def nested?
@parent_document ? true : false
end
def embedded?
@attributes.key? 'embedded'
end
def extensions?
@extensions ? true : false
end
# Make the raw source for the Document available.
def source
@reader.source if @reader
end
# Make the raw source lines for the Document available.
def source_lines
@reader.source_lines if @reader
end
def basebackend? base
@attributes['basebackend'] == base
end
# Public: Return the doctitle as a String
#
# Returns the resolved doctitle as a [String] or nil if a doctitle cannot be resolved
def title
doctitle
end
# Public: Set the title on the document header
#
# Set the title of the document header to the specified value. If the header
# does not exist, it is first created.
#
# title - the String title to assign as the title of the document header
#
# Returns the new [String] title assigned to the document header
def title= title
unless (sect = @header)
(sect = (@header = Section.new self, 0)).sectname = 'header'
end
sect.title = title
end
# Public: Resolves the primary title for the document
#
# Searches the locations to find the first non-empty
# value:
#
# * document-level attribute named title
# * header title (known as the document title)
# * title of the first section
# * document-level attribute named untitled-label (if :use_fallback option is set)
#
# If no value can be resolved, nil is returned.
#
# If the :partition attribute is specified, the value is parsed into an Document::Title object.
# If the :sanitize attribute is specified, XML elements are removed from the value.
#
# TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none)
#
# Returns the resolved title as a [Title] if the :partition option is passed or a [String] if not
# or nil if no value can be resolved.
def doctitle opts = {}
unless (val = @attributes['title'])
if (sect = first_section)
val = sect.title
elsif !(opts[:use_fallback] && (val = @attributes['untitled-label']))
return
end
end
if (separator = opts[:partition])
Title.new val, opts.merge({ :separator => (separator == true ? @attributes['title-separator'] : separator) })
elsif opts[:sanitize] && val.include?('<')
val.gsub(XmlSanitizeRx, '').squeeze(' ').strip
else
val
end
end
alias name doctitle
# Public: Convenience method to retrieve the document attribute 'author'
#
# returns the full name of the author as a String
def author
@attributes['author']
end
# Public: Convenience method to retrieve the document attribute 'revdate'
#
# returns the date of last revision for the document as a String
def revdate
@attributes['revdate']
end
def notitle
[email protected]?('showtitle') && @attributes.key?('notitle')
end
def noheader
@attributes.key? 'noheader'
end
def nofooter
@attributes.key? 'nofooter'
end
def first_section
@header || @blocks.find {|e| e.context == :section }
end
def has_header?
@header ? true : false
end
alias header? has_header?
# Public: Append a content Block to this Document.
#
# If the child block is a Section, assign an index to it.
#
# block - The child Block to append to this parent Block
#
# Returns The parent Block
def << block
assign_numeral block if block.context == :section
super
end
# Internal: called after the header has been parsed and before the content
# will be parsed.
#--
# QUESTION should we invoke the TreeProcessors here, passing in a phase?
# QUESTION is finalize_header the right name?
def finalize_header unrooted_attributes, header_valid = true
clear_playback_attributes unrooted_attributes
save_attributes
unrooted_attributes['invalid-header'] = true unless header_valid
unrooted_attributes
end
# Internal: Branch the attributes so that the original state can be restored
# at a future time.
def save_attributes
# enable toc and sectnums (i.e., numbered) by default in DocBook backend
# NOTE the attributes_modified should go away once we have a proper attribute storage & tracking facility
if (attrs = @attributes)['basebackend'] == 'docbook'
attrs['toc'] = '' unless attribute_locked?('toc') || @attributes_modified.include?('toc')
attrs['sectnums'] = '' unless attribute_locked?('sectnums') || @attributes_modified.include?('sectnums')
end
unless attrs.key?('doctitle') || !(val = doctitle)
attrs['doctitle'] = val
end
# css-signature cannot be updated after header attributes are processed
@id = attrs['css-signature'] unless @id
toc_position_val = if (toc_val = (attrs.delete('toc2') ? 'left' : attrs['toc']))
# toc-placement allows us to separate position from using fitted slot vs macro
(toc_placement = attrs.fetch('toc-placement', 'macro')) && toc_placement != 'auto' ? toc_placement : attrs['toc-position']
else
nil
end
if toc_val && (!toc_val.empty? || !toc_position_val.nil_or_empty?)
default_toc_position = 'left'
# TODO rename toc2 to aside-toc
default_toc_class = 'toc2'
if !toc_position_val.nil_or_empty?
position = toc_position_val
elsif !toc_val.empty?
position = toc_val
else
position = default_toc_position
end
attrs['toc'] = ''
attrs['toc-placement'] = 'auto'
case position
when 'left', '<', '<'
attrs['toc-position'] = 'left'
when 'right', '>', '>'
attrs['toc-position'] = 'right'
when 'top', '^'
attrs['toc-position'] = 'top'
when 'bottom', 'v'
attrs['toc-position'] = 'bottom'
when 'preamble', 'macro'
attrs['toc-position'] = 'content'
attrs['toc-placement'] = position
default_toc_class = nil
else
attrs.delete 'toc-position'
default_toc_class = nil
end
attrs['toc-class'] ||= default_toc_class if default_toc_class
end
if (@compat_mode = attrs.key? 'compat-mode')
attrs['source-language'] = attrs['language'] if attrs.key? 'language'
end
# NOTE pin the outfilesuffix after the header is parsed
@outfilesuffix = attrs['outfilesuffix']
@header_attributes = attrs.dup
# unfreeze "flexible" attributes
unless @parent_document
FLEXIBLE_ATTRIBUTES.each do |name|
# turning a flexible attribute off should be permanent
# (we may need more config if that's not always the case)
if @attribute_overrides.key?(name) && @attribute_overrides[name]
@attribute_overrides.delete(name)
end
end
end
end
# Internal: Restore the attributes to the previously saved state (attributes in header)
def restore_attributes
@catalog[:callouts].rewind unless @parent_document
@attributes.replace @header_attributes
end
# Internal: Delete any attributes stored for playback
def clear_playback_attributes(attributes)
attributes.delete(:attribute_entries)
end
# Internal: Replay attribute assignments at the block level
def playback_attributes(block_attributes)
if block_attributes.key? :attribute_entries
block_attributes[:attribute_entries].each do |entry|
name = entry.name
if entry.negate
@attributes.delete name
@compat_mode = false if name == 'compat-mode'
else
@attributes[name] = entry.value
@compat_mode = true if name == 'compat-mode'
end
end
end
end
# Public: Set the specified attribute on the document if the name is not locked
#
# If the attribute is locked, false is returned. Otherwise, the value is
# assigned to the attribute name after first performing attribute
# substitutions on the value. If the attribute name is 'backend' or
# 'doctype', then the value of backend-related attributes are updated.
#
# name - the String attribute name
# value - the String attribute value; must not be nil (default: '')
#
# Returns the resolved value if the attribute was set or false if it was not because it's locked.
def set_attribute name, value = ''
if attribute_locked? name
false
else
if @max_attribute_value_size
resolved_value = (apply_attribute_value_subs value).limit_bytesize @max_attribute_value_size
else
resolved_value = apply_attribute_value_subs value
end
case name
when 'backend'
update_backend_attributes resolved_value, (@attributes_modified.delete? 'htmlsyntax')
when 'doctype'
update_doctype_attributes resolved_value
else
@attributes[name] = resolved_value
end
@attributes_modified << name
resolved_value
end
end
# Public: Delete the specified attribute from the document if the name is not locked
#
# If the attribute is locked, false is returned. Otherwise, the attribute is deleted.
#
# name - the String attribute name
#
# returns true if the attribute was deleted, false if it was not because it's locked
def delete_attribute(name)
if attribute_locked?(name)
false
else
@attributes.delete(name)
@attributes_modified << name
true
end
end
# Public: Determine if the attribute has been locked by being assigned in document options
#
# key - The attribute key to check
#
# Returns true if the attribute is locked, false otherwise
def attribute_locked?(name)
@attribute_overrides.key?(name)
end
# Internal: Apply substitutions to the attribute value
#
# If the value is an inline passthrough macro (e.g., pass:<subs>[value]),
# apply the substitutions defined in <subs> to the value, or leave the value
# unmodified if no substitutions are specified. If the value is not an
# inline passthrough macro, apply header substitutions to the value.
#
# value - The String attribute value on which to perform substitutions
#
# Returns The String value with substitutions performed
def apply_attribute_value_subs value
if AttributeEntryPassMacroRx =~ value
$1 ? (apply_subs $2, (resolve_pass_subs $1)) : $2
else
apply_header_subs value
end
end
# Public: Update the backend attributes to reflect a change in the active backend.
#
# This method also handles updating the related doctype attributes if the
# doctype attribute is assigned at the time this method is called.
#
# Returns the resolved String backend if updated, nothing otherwise.
def update_backend_attributes new_backend, force = nil
if force || (new_backend && new_backend != @backend)
current_backend, current_basebackend, current_doctype = @backend, (attrs = @attributes)['basebackend'], @doctype
if new_backend.start_with? 'xhtml'
attrs['htmlsyntax'] = 'xml'
new_backend = new_backend.slice 1, new_backend.length
elsif new_backend.start_with? 'html'
attrs['htmlsyntax'] = 'html' unless attrs['htmlsyntax'] == 'xml'
end
if (resolved_backend = BACKEND_ALIASES[new_backend])
new_backend = resolved_backend
end
if current_doctype
if current_backend
attrs.delete %(backend-#{current_backend})
attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype})
end
attrs[%(backend-#{new_backend}-doctype-#{current_doctype})] = ''
attrs[%(doctype-#{current_doctype})] = ''
elsif current_backend
attrs.delete %(backend-#{current_backend})
end
attrs[%(backend-#{new_backend})] = ''
@backend = attrs['backend'] = new_backend
# (re)initialize converter
if Converter::BackendInfo === (@converter = create_converter)
new_basebackend = @converter.basebackend
attrs['outfilesuffix'] = @converter.outfilesuffix unless attribute_locked? 'outfilesuffix'
new_filetype = @converter.filetype
elsif @converter
new_basebackend = new_backend.sub TrailingDigitsRx, ''
if (new_outfilesuffix = DEFAULT_EXTENSIONS[new_basebackend])
new_filetype = new_outfilesuffix.slice 1, new_outfilesuffix.length
else
new_outfilesuffix, new_basebackend, new_filetype = '.html', 'html', 'html'
end
attrs['outfilesuffix'] = new_outfilesuffix unless attribute_locked? 'outfilesuffix'
else
# NOTE ideally we shouldn't need the converter before the converter phase, but we do
raise ::NotImplementedError, %(asciidoctor: FAILED: missing converter for backend '#{new_backend}'. Processing aborted.)
end
if (current_filetype = attrs['filetype'])
attrs.delete %(filetype-#{current_filetype})
end
attrs['filetype'] = new_filetype
attrs[%(filetype-#{new_filetype})] = ''
if (page_width = DEFAULT_PAGE_WIDTHS[new_basebackend])
attrs['pagewidth'] = page_width
else
attrs.delete 'pagewidth'
end
if new_basebackend != current_basebackend
if current_doctype
if current_basebackend
attrs.delete %(basebackend-#{current_basebackend})
attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype})
end
attrs[%(basebackend-#{new_basebackend}-doctype-#{current_doctype})] = ''
elsif current_basebackend
attrs.delete %(basebackend-#{current_basebackend})
end
attrs[%(basebackend-#{new_basebackend})] = ''
attrs['basebackend'] = new_basebackend
end
return new_backend
end
end
# TODO document me
#
# Returns the String doctype if updated, nothing otherwise.
def update_doctype_attributes new_doctype
if new_doctype && new_doctype != @doctype
current_backend, current_basebackend, current_doctype = @backend, (attrs = @attributes)['basebackend'], @doctype
if current_doctype
attrs.delete %(doctype-#{current_doctype})
if current_backend
attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype})
attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = ''
end
if current_basebackend
attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype})
attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = ''
end
else
attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' if current_backend
attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' if current_basebackend
end
attrs[%(doctype-#{new_doctype})] = ''
return @doctype = attrs['doctype'] = new_doctype
end
end
# TODO document me
def create_converter
converter_opts = {}
converter_opts[:htmlsyntax] = @attributes['htmlsyntax']
if (template_dir = @options[:template_dir])
template_dirs = [template_dir]
elsif (template_dirs = @options[:template_dirs])
template_dirs = Array template_dirs
end
if template_dirs
converter_opts[:template_dirs] = template_dirs
converter_opts[:template_cache] = @options.fetch :template_cache, true
converter_opts[:template_engine] = @options[:template_engine]
converter_opts[:template_engine_options] = @options[:template_engine_options]
converter_opts[:eruby] = @options[:eruby]
converter_opts[:safe] = @safe
end
if (converter = @options[:converter])
converter_factory = Converter::Factory.new ::Hash[backend, converter]
else
converter_factory = Converter::Factory.default false
end
# QUESTION should we honor the convert_opts?
# QUESTION should we pass through all options and attributes too?
#converter_opts.update opts
converter_factory.create backend, converter_opts
end
# Public: Convert the AsciiDoc document using the templates
# loaded by the Converter. If a :template_dir is not specified,
# or a template is missing, the converter will fall back to
# using the appropriate built-in template.
def convert opts = {}
@timings.start :convert if @timings
parse unless @parsed
unless @safe >= SafeMode::SERVER || opts.empty?
# QUESTION should we store these on the Document object?
@attributes.delete 'outfile' unless (@attributes['outfile'] = opts['outfile'])
@attributes.delete 'outdir' unless (@attributes['outdir'] = opts['outdir'])
end
# QUESTION should we add extensions that execute before conversion begins?
if doctype == 'inline'
if (block = @blocks[0] || @header)
if block.content_model == :compound || block.content_model == :empty
logger.warn 'no inline candidate; use the inline doctype to convert a single paragragh, verbatim, or raw block'
else
output = block.content
end
end
else
transform = ((opts.key? :header_footer) ? opts[:header_footer] : @options[:header_footer]) ? 'document' : 'embedded'
output = @converter.convert self, transform
end
unless @parent_document
if (exts = @extensions) && exts.postprocessors?
exts.postprocessors.each do |ext|
output = ext.process_method[self, output]
end
end
end
@timings.record :convert if @timings
output
end
# Alias render to convert to maintain backwards compatibility
alias render convert
# Public: Write the output to the specified file
#
# If the converter responds to :write, delegate the work of writing the file
# to that method. Otherwise, write the output the specified file.
#
# Returns nothing
def write output, target
@timings.start :write if @timings
if Writer === @converter
@converter.write output, target
else
if target.respond_to? :write
unless output.nil_or_empty?
target.write output.chomp
# ensure there's a trailing endline
target.write LF
end
else
::IO.write target, output
end
if @backend == 'manpage' && ::String === target && (@converter.respond_to? :write_alternate_pages)
@converter.write_alternate_pages @attributes['mannames'], @attributes['manvolnum'], target
end
end
@timings.record :write if @timings
nil
end
=begin
def convert_to target, opts = {}
start = ::Time.now.to_f if (monitor = opts[:monitor])
output = (r = converter opts).convert
monitor[:convert] = ::Time.now.to_f - start if monitor
unless target.respond_to? :write
@attributes['outfile'] = target = ::File.expand_path target
@attributes['outdir'] = ::File.dirname target
end
start = ::Time.now.to_f if monitor
r.write output, target
monitor[:write] = ::Time.now.to_f - start if monitor
output
end
=end
def content
# NOTE per AsciiDoc-spec, remove the title before converting the body
@attributes.delete('title')
super
end
# Public: Read the docinfo file(s) for inclusion in the document template
#
# If the docinfo1 attribute is set, read the docinfo.ext file. If the docinfo
# attribute is set, read the doc-name.docinfo.ext file. If the docinfo2
# attribute is set, read both files in that order.
#
# location - The Symbol location of the docinfo (e.g., :head, :footer, etc). (default: :head)
# suffix - The suffix of the docinfo file(s). If not set, the extension
# will be set to the outfilesuffix. (default: nil)
#
# returns The contents of the docinfo file(s) or empty string if no files are
# found or the safe mode is secure or greater.
def docinfo location = :head, suffix = nil
if safe >= SafeMode::SECURE
''
else
content = []
qualifier = %(-#{location}) unless location == :head
suffix = @outfilesuffix unless suffix
if (docinfo = @attributes['docinfo']).nil_or_empty?
if @attributes.key? 'docinfo2'
docinfo = ['private', 'shared']
elsif @attributes.key? 'docinfo1'
docinfo = ['shared']
else
docinfo = docinfo ? ['private'] : nil
end
else
docinfo = docinfo.split(',').map {|it| it.strip }
end
if docinfo
docinfo_file, docinfo_dir, docinfo_subs = %(docinfo#{qualifier}#{suffix}), @attributes['docinfodir'], resolve_docinfo_subs
unless (docinfo & ['shared', %(shared-#{location})]).empty?
docinfo_path = normalize_system_path docinfo_file, docinfo_dir
# NOTE normalizing the lines is essential if we're performing substitutions
if (shd_content = (read_asset docinfo_path, :normalize => true))
content << (apply_subs shd_content, docinfo_subs)
end
end
unless @attributes['docname'].nil_or_empty? || (docinfo & ['private', %(private-#{location})]).empty?
docinfo_path = normalize_system_path %(#{@attributes['docname']}-#{docinfo_file}), docinfo_dir
# NOTE normalizing the lines is essential if we're performing substitutions
if (pvt_content = (read_asset docinfo_path, :normalize => true))
content << (apply_subs pvt_content, docinfo_subs)
end
end
end
# TODO allow document to control whether extension docinfo is contributed
if @extensions && (docinfo_processors? location)
content += @docinfo_processor_extensions[location].map {|ext| ext.process_method[self] }.compact
end
content * LF
end
end
# Internal: Resolve the list of comma-delimited subs to apply to docinfo files.
#
# Resolve the list of substitutions from the value of the docinfosubs
# document attribute, if specified. Otherwise, return an Array containing
# the Symbol :attributes.
#
# Returns an [Array] of substitution [Symbol]s
def resolve_docinfo_subs
(@attributes.key? 'docinfosubs') ? (resolve_subs @attributes['docinfosubs'], :block, nil, 'docinfo') : [:attributes]
end
def docinfo_processors?(location = :head)
if @docinfo_processor_extensions.key?(location)
# false means we already performed a lookup and didn't find any
@docinfo_processor_extensions[location] != false
elsif @extensions && @document.extensions.docinfo_processors?(location)
!!(@docinfo_processor_extensions[location] = @document.extensions.docinfo_processors(location))
else
@docinfo_processor_extensions[location] = false
end
end
def to_s
%(#<#{self.class}@#{object_id} {doctype: #{doctype.inspect}, doctitle: #{(@header != nil ? @header.title : nil).inspect}, blocks: #{@blocks.size}}>)
end
end
end
| 1 | 5,862 | Should we expose this attribute in the Asciidoctor.js API ? | asciidoctor-asciidoctor | rb |
@@ -147,7 +147,7 @@ public interface Multimap<K, V> extends Traversable<Tuple2<K, V>>, Function1<K,
@Override
default boolean contains(Tuple2<K, V> element) {
- return get(element._1).map(v -> Objects.equals(v, element._2)).getOrElse(false);
+ return get(element._1).map(v -> v.contains(element._2)).getOrElse(false);
}
/** | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.control.Option;
import java.util.*;
import java.util.function.*;
/**
* An immutable {@code Multimap} interface.
*
* <p>
* Basic operations:
*
* <ul>
* <li>{@link #containsKey(Object)}</li>
* <li>{@link #containsValue(Object)}</li>
* <li>{@link #get(Object)}</li>
* <li>{@link #getContainerType()}</li>
* <li>{@link #keySet()}</li>
* <li>{@link #merge(Multimap)}</li>
* <li>{@link #merge(Multimap, BiFunction)}</li>
* <li>{@link #put(Object, Object)}</li>
* <li>{@link #put(Tuple2)}</li>
* <li>{@link #values()}</li>
* </ul>
*
* Conversion:
* <ul>
* <li>{@link #toJavaMap()}</li>
* </ul>
*
* Filtering:
*
* <ul>
* <li>{@link #filter(BiPredicate)}</li>
* <li>{@link #filterKeys(Predicate)}</li>
* <li>{@link #filterValues(Predicate)}</li>
* <li>{@link #remove(Object)}</li>
* <li>{@link #remove(Object, Object)}</li>
* <li>{@link #removeAll(BiPredicate)}</li>
* <li>{@link #removeAll(Iterable)}</li>
* <li>{@link #removeKeys(Predicate)}</li>
* <li>{@link #removeValues(Predicate)}</li>
* </ul>
*
* Iteration:
*
* <ul>
* <li>{@link #forEach(BiConsumer)}</li>
* <li>{@link #traverse(BiFunction)}</li>
* </ul>
*
* Transformation:
*
* <ul>
* <li>{@link #bimap(Function, Function)}</li>
* <li>{@link #flatMap(BiFunction)}</li>
* <li>{@link #map(BiFunction)}</li>
* <li>{@link #mapValues(Function)}</li>
* <li>{@link #transform(Function)}</li>
* <li>{@link #unzip(BiFunction)}</li>
* <li>{@link #unzip3(BiFunction)}</li>
* </ul>
*
* @param <K> Key type
* @param <V> Value type
* @author Ruslan Sennov
* @since 2.1.0
*/
public interface Multimap<K, V> extends Traversable<Tuple2<K, V>>, Function1<K, Traversable<V>>, Kind2<Multimap<?, ?>, K, V> {
long serialVersionUID = 1L;
@SuppressWarnings("unchecked")
enum ContainerType {
SET(
(Traversable<?> set, Object elem) -> ((Set<Object>) set).add(elem),
(Traversable<?> set, Object elem) -> ((Set<Object>) set).remove(elem)
),
SORTED_SET(
(Traversable<?> set, Object elem) -> ((Set<Object>) set).add(elem),
(Traversable<?> set, Object elem) -> ((Set<Object>) set).remove(elem)
),
SEQ(
(Traversable<?> seq, Object elem) -> ((List<Object>) seq).append(elem),
(Traversable<?> seq, Object elem) -> ((List<Object>) seq).remove(elem)
);
private final BiFunction<Traversable<?>, Object, Traversable<?>> add;
private final BiFunction<Traversable<?>, Object, Traversable<?>> remove;
ContainerType(BiFunction<Traversable<?>, Object, Traversable<?>> add,
BiFunction<Traversable<?>, Object, Traversable<?>> remove) {
this.add = add;
this.remove = remove;
}
<T> Traversable<T> add(Traversable<?> container, T elem) {
return (Traversable<T>) add.apply(container, elem);
}
<T> Traversable<T> remove(Traversable<?> container, T elem) {
return (Traversable<T>) remove.apply(container, elem);
}
}
/**
* Narrows a widened {@code Multimap<? extends K, ? extends V>} to {@code Multimap<K, V>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param map A {@code Multimap}.
* @param <K> Key type
* @param <V> Value type
* @return the given {@code map} instance as narrowed type {@code Map<K, V>}.
*/
@SuppressWarnings("unchecked")
static <K, V> Multimap<K, V> narrow(Multimap<? extends K, ? extends V> map) {
return (Multimap<K, V>) map;
}
// -- non-static API
@Override
default Traversable<V> apply(K key) {
return get(key).getOrElseThrow(NoSuchElementException::new);
}
/**
* Maps this {@code Multimap} to a new {@code Multimap} with different component type by applying a function to its elements.
*
* @param <K2> key's component type of the multimap result
* @param <V2> value's component type of the multimap result
* @param keyMapper a {@code Function} that maps the keys of type {@code K} to keys of type {@code K2}
* @param valueMapper a {@code Function} that the values of type {@code V} to values of type {@code V2}
* @return a new {@code Multimap}
* @throws NullPointerException if {@code keyMapper} or {@code valueMapper} is null
*/
<K2, V2> Multimap<K2, V2> bimap(Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper);
@Override
default boolean contains(Tuple2<K, V> element) {
return get(element._1).map(v -> Objects.equals(v, element._2)).getOrElse(false);
}
/**
* Returns <code>true</code> if this multimap contains a mapping for the specified key.
*
* @param key key whose presence in this multimap is to be tested
* @return <code>true</code> if this multimap contains a mapping for the specified key
*/
boolean containsKey(K key);
/**
* Returns <code>true</code> if this multimap maps one or more keys to the
* specified value. This operation will require time linear in the map size.
*
* @param value value whose presence in this multimap is to be tested
* @return <code>true</code> if this multimap maps one or more keys to the
* specified value
*/
default boolean containsValue(V value) {
return iterator().map(Tuple2::_2).contains(value);
}
/**
* Returns a new Multimap consisting of all elements which satisfy the given predicate.
*
* @param predicate the predicate used to test elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> filter(BiPredicate<? super K, ? super V> predicate);
/**
* Returns a new Multimap consisting of all elements with keys which satisfy the given predicate.
*
* @param predicate the predicate used to test keys of elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> filterKeys(Predicate<? super K> predicate);
/**
* Returns a new Multimap consisting of all elements with values which satisfy the given predicate.
*
* @param predicate the predicate used to test values of elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> filterValues(Predicate<? super V> predicate);
/**
* FlatMaps this {@code Multimap} to a new {@code Multimap} with different component type.
*
* @param mapper A mapper
* @param <K2> key's component type of the mapped {@code Multimap}
* @param <V2> value's component type of the mapped {@code Multimap}
* @return A new {@code Multimap}.
* @throws NullPointerException if {@code mapper} is null
*/
<K2, V2> Multimap<K2, V2> flatMap(BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper);
/**
* Performs an action on key, value pair.
*
* @param action A {@code BiConsumer}
* @throws NullPointerException if {@code action} is null
*/
default void forEach(BiConsumer<K, V> action) {
Objects.requireNonNull(action, "action is null");
for (Tuple2<K, V> t : this) {
action.accept(t._1, t._2);
}
}
/**
* Returns the {@code Some} of value to which the specified key
* is mapped, or {@code None} if this multimap contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the {@code Some} of value to which the specified key
* is mapped, or {@code None} if this multimap contains no mapping
* for the key
*/
Option<Traversable<V>> get(K key);
/**
* Returns the value associated with a key, or a default value if the key is not contained in the map
*
* @param key the key
* @param defaultValue a default value
* @return the value associated with key if it exists, otherwise the result of the default value
*/
Traversable<V> getOrElse(K key, Traversable<? extends V> defaultValue);
/**
* Returns the type of the {@code Traversable} value container of this {@code MultiMap}.
*
* @return an enum value representing the container type
*/
ContainerType getContainerType();
@Override
default boolean hasDefiniteSize() {
return true;
}
@Override
default boolean isTraversableAgain() {
return true;
}
@Override
Iterator<Tuple2<K, V>> iterator();
/**
* Returns the keys contained in this multimap.
*
* @return {@code Set} of the keys contained in this multimap.
*/
Set<K> keySet();
@Override
default int length() {
return size();
}
/**
* Maps the entries of this {@code Multimap} to form a new {@code Multimap}.
*
* @param <K2> key's component type of the multimap result
* @param <V2> value's component type of the multimap result
* @param mapper a {@code Function} that maps entries of type {@code (K, V)} to entries of type {@code (K2, V2)}
* @return a new {@code Multimap}
* @throws NullPointerException if {@code mapper} is null
*/
<K2, V2> Multimap<K2, V2> map(BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper);
/**
* Maps the {@code Multimap} entries to a sequence of values.
* <p>
* Please use {@link #map(BiFunction)} if the result has to be of type {@code Multimap}.
*
* @param mapper A mapper
* @param <U> Component type
* @return A sequence of mapped values.
*/
@SuppressWarnings("unchecked")
@Override
default <U> Seq<U> map(Function<? super Tuple2<K, V>, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
// don't remove cast, doesn't compile in Eclipse without it
return (Seq<U>) iterator().map(mapper).toStream();
}
/**
* Maps the values of this {@code Multimap} while preserving the corresponding keys.
*
* @param <V2> the new value type
* @param valueMapper a {@code Function} that maps values of type {@code V} to values of type {@code V2}
* @return a new {@code Multimap}
* @throws NullPointerException if {@code valueMapper} is null
*/
<V2> Multimap<K, V2> mapValues(Function<? super V, ? extends V2> valueMapper);
/**
* Creates a new multimap which by merging the entries of {@code this} multimap and {@code that} multimap.
* <p>
* If collisions occur, the value of {@code this} multimap is taken.
*
* @param that the other multimap
* @return A merged multimap
* @throws NullPointerException if that multimap is null
*/
Multimap<K, V> merge(Multimap<? extends K, ? extends V> that);
/**
* Creates a new multimap which by merging the entries of {@code this} multimap and {@code that} multimap.
* <p>
* Uses the specified collision resolution function if two keys are the same.
* The collision resolution function will always take the first argument from <code>this</code> multimap
* and the second from <code>that</code> multimap.
*
* @param <K2> key type of that Multimap
* @param <V2> value type of that Multimap
* @param that the other multimap
* @param collisionResolution the collision resolution function
* @return A merged multimap
* @throws NullPointerException if that multimap or the given collision resolution function is null
*/
<K2 extends K, V2 extends V> Multimap<K, V> merge(Multimap<K2, V2> that, BiFunction<Traversable<V>, Traversable<V2>, Traversable<V>> collisionResolution);
/**
* Associates the specified value with the specified key in this multimap.
* If the map previously contained a mapping for the key, the old value is
* replaced by the specified value.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return A new Multimap containing these elements and that entry.
*/
Multimap<K, V> put(K key, V value);
/**
* Convenience method for {@code put(entry._1, entry._2)}.
*
* @param entry A Tuple2 containing the key and value
* @return A new Multimap containing these elements and that entry.
*/
Multimap<K, V> put(Tuple2<? extends K, ? extends V> entry);
/**
* Removes the mapping for a key from this multimap if it is present.
*
* @param key key whose mapping is to be removed from the multimap
* @return A new Multimap containing these elements without the entry
* specified by that key.
*/
Multimap<K, V> remove(K key);
/**
* Removes the key-value pair from this multimap if it is present.
*
* @param key key whose mapping is to be removed from the multimap
* @param value value whose mapping is to be removed from the multimap
* @return A new Multimap containing these elements without the entry
* specified by that key and value.
*/
Multimap<K, V> remove(K key, V value);
/**
* Returns a new Multimap consisting of all elements which do not satisfy the given predicate.
*
* @param predicate the predicate used to test elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> removeAll(BiPredicate<? super K, ? super V> predicate);
/**
* Removes the mapping for a key from this multimap if it is present.
*
* @param keys keys are to be removed from the multimap
* @return A new Multimap containing these elements without the entries
* specified by that keys.
*/
Multimap<K, V> removeAll(Iterable<? extends K> keys);
/**
* Returns a new Multimap consisting of all elements with keys which do not satisfy the given predicate.
*
* @param predicate the predicate used to test keys of elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> removeKeys(Predicate<? super K> predicate);
/**
* Returns a new Multimap consisting of all elements with values which do not satisfy the given predicate.
*
* @param predicate the predicate used to test values of elements
* @return a new Multimap
* @throws NullPointerException if {@code predicate} is null
*/
Multimap<K, V> removeValues(Predicate<? super V> predicate);
@Override
default <U> Seq<U> scanLeft(U zero, BiFunction<? super U, ? super Tuple2<K, V>, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanLeft(this, zero, operation, List.empty(), List::prepend, List::reverse);
}
@Override
default <U> Seq<U> scanRight(U zero, BiFunction<? super Tuple2<K, V>, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanRight(this, zero, operation, List.empty(), List::prepend, Function.identity());
}
@Override
int size();
@Override
default Spliterator<Tuple2<K, V>> spliterator() {
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
/**
* Converts this Javaslang {@code Map} to a {@code java.util.Map} while preserving characteristics
* like insertion order ({@code LinkedHashMultimap}) and sort order ({@code SortedMultimap}).
*
* @return a new {@code java.util.Map} instance
*/
java.util.Map<K, java.util.Collection<V>> toJavaMap();
/**
* Transforms this {@code Multimap}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Multimap<K, V>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
default <U> Seq<U> traverse(BiFunction<K, V, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return foldLeft(Vector.empty(), (acc, entry) -> acc.append(mapper.apply(entry._1, entry._2)));
}
default <T1, T2> Tuple2<Seq<T1>, Seq<T2>> unzip(BiFunction<? super K, ? super V, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return unzip(entry -> unzipper.apply(entry._1, entry._2));
}
@Override
default <T1, T2> Tuple2<Seq<T1>, Seq<T2>> unzip(Function<? super Tuple2<K, V>, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return iterator().unzip(unzipper).map(Stream::ofAll, Stream::ofAll);
}
default <T1, T2, T3> Tuple3<Seq<T1>, Seq<T2>, Seq<T3>> unzip3(BiFunction<? super K, ? super V, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return unzip3(entry -> unzipper.apply(entry._1, entry._2));
}
@Override
default <T1, T2, T3> Tuple3<Seq<T1>, Seq<T2>, Seq<T3>> unzip3(
Function<? super Tuple2<K, V>, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return iterator().unzip3(unzipper).map(Stream::ofAll, Stream::ofAll, Stream::ofAll);
}
Traversable<V> values();
@Override
default <U> Seq<Tuple2<Tuple2<K, V>, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
default <U> Seq<Tuple2<Tuple2<K, V>, U>> zipAll(Iterable<? extends U> that, Tuple2<K, V> thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
return Stream.ofAll(iterator().zipAll(that, thisElem, thatElem));
}
@Override
default <U, R> Seq<R> zipWith(Iterable<? extends U> that, BiFunction<? super Tuple2<K, V>, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
return Stream.ofAll(iterator().zipWith(that, mapper));
}
@Override
default Seq<Tuple2<Tuple2<K, V>, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
default <U> Seq<U> zipWithIndex(BiFunction<? super Tuple2<K, V>, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return Stream.ofAll(iterator().zipWithIndex(mapper));
}
// -- Adjusted return types of Traversable methods
@Override
Multimap<K, V> distinct();
@Override
Multimap<K, V> distinctBy(Comparator<? super Tuple2<K, V>> comparator);
@Override
<U> Multimap<K, V> distinctBy(Function<? super Tuple2<K, V>, ? extends U> keyExtractor);
@Override
Multimap<K, V> drop(int n);
@Override
Multimap<K, V> dropRight(int n);
@Override
Multimap<K, V> dropUntil(Predicate<? super Tuple2<K, V>> predicate);
@Override
Multimap<K, V> dropWhile(Predicate<? super Tuple2<K, V>> predicate);
@Override
Multimap<K, V> filter(Predicate<? super Tuple2<K, V>> predicate);
/**
* Flat-maps this entries to a sequence of values.
* <p>
* Please use {@link #flatMap(BiFunction)} if the result should be a {@code Multimap}
*
* @param mapper A mapper
* @param <U> Component type
* @return A sequence of flat-mapped values.
*/
@SuppressWarnings("unchecked")
@Override
default <U> Seq<U> flatMap(Function<? super Tuple2<K, V>, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
// don't remove cast, doesn't compile in Eclipse without it
return (Seq<U>) iterator().flatMap(mapper).toStream();
}
@Override
default <U> U foldRight(U zero, BiFunction<? super Tuple2<K, V>, ? super U, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return iterator().foldRight(zero, f);
}
@Override
<C> Map<C, ? extends Multimap<K, V>> groupBy(Function<? super Tuple2<K, V>, ? extends C> classifier);
@Override
Iterator<? extends Multimap<K, V>> grouped(int size);
@Override
Multimap<K, V> init();
@Override
Option<? extends Multimap<K, V>> initOption();
@Override
Tuple2<? extends Multimap<K, V>, ? extends Multimap<K, V>> partition(Predicate<? super Tuple2<K, V>> predicate);
@Override
Multimap<K, V> peek(Consumer<? super Tuple2<K, V>> action);
@Override
Multimap<K, V> replace(Tuple2<K, V> currentElement, Tuple2<K, V> newElement);
@Override
Multimap<K, V> replaceAll(Tuple2<K, V> currentElement, Tuple2<K, V> newElement);
@Override
Multimap<K, V> retainAll(Iterable<? extends Tuple2<K, V>> elements);
@Override
Multimap<K, V> scan(Tuple2<K, V> zero,
BiFunction<? super Tuple2<K, V>, ? super Tuple2<K, V>, ? extends Tuple2<K, V>> operation);
@Override
Iterator<? extends Multimap<K, V>> sliding(int size);
@Override
Iterator<? extends Multimap<K, V>> sliding(int size, int step);
@Override
Tuple2<? extends Multimap<K, V>, ? extends Multimap<K, V>> span(Predicate<? super Tuple2<K, V>> predicate);
@Override
Multimap<K, V> tail();
@Override
Option<? extends Multimap<K, V>> tailOption();
@Override
Multimap<K, V> take(int n);
@Override
Multimap<K, V> takeRight(int n);
@Override
Multimap<K, V> takeUntil(Predicate<? super Tuple2<K, V>> predicate);
@Override
Multimap<K, V> takeWhile(Predicate<? super Tuple2<K, V>> predicate);
}
| 1 | 10,739 | I think contains on `Multimap` was broken - it should return true if one of the values is associated with the key? It that right | vavr-io-vavr | java |
@@ -57,6 +57,7 @@ public class TiConfiguration implements Serializable {
private static final int DEF_KV_CLIENT_CONCURRENCY = 10;
private static final List<TiStoreType> DEF_ISOLATION_READ_ENGINES =
ImmutableList.of(TiStoreType.TiKV, TiStoreType.TiFlash);
+ private static final int DEF_PREWRITE_CONCURRENCY = 20;
private int timeout = DEF_TIMEOUT;
private TimeUnit timeoutUnit = DEF_TIMEOUT_UNIT; | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv;
import com.google.common.collect.ImmutableList;
import com.pingcap.tikv.pd.PDUtils;
import com.pingcap.tikv.region.TiStoreType;
import com.pingcap.tikv.types.Converter;
import java.io.Serializable;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.joda.time.DateTimeZone;
import org.tikv.kvproto.Kvrpcpb.CommandPri;
import org.tikv.kvproto.Kvrpcpb.IsolationLevel;
public class TiConfiguration implements Serializable {
private static final DateTimeZone DEF_TIMEZONE = Converter.getLocalTimezone();
private static final int DEF_TIMEOUT = 10;
private static final TimeUnit DEF_TIMEOUT_UNIT = TimeUnit.MINUTES;
private static final int DEF_SCAN_BATCH_SIZE = 100;
private static final boolean DEF_IGNORE_TRUNCATE = true;
private static final boolean DEF_TRUNCATE_AS_WARNING = false;
private static final int DEF_MAX_FRAME_SIZE = 268435456 * 2; // 256 * 2 MB
private static final int DEF_INDEX_SCAN_BATCH_SIZE = 20000;
private static final int DEF_REGION_SCAN_DOWNGRADE_THRESHOLD = 10000000;
// if keyRange size per request exceeds this limit, the request might be too large to be accepted
// by TiKV(maximum request size accepted by TiKV is around 1MB)
private static final int MAX_REQUEST_KEY_RANGE_SIZE = 20000;
private static final int DEF_INDEX_SCAN_CONCURRENCY = 5;
private static final int DEF_TABLE_SCAN_CONCURRENCY = 512;
private static final CommandPri DEF_COMMAND_PRIORITY = CommandPri.Low;
private static final IsolationLevel DEF_ISOLATION_LEVEL = IsolationLevel.SI;
private static final boolean DEF_SHOW_ROWID = false;
private static final String DEF_DB_PREFIX = "";
private static final boolean DEF_WRITE_ENABLE = true;
private static final boolean DEF_WRITE_ALLOW_SPARK_SQL = false;
private static final boolean DEF_WRITE_WITHOUT_LOCK_TABLE = false;
private static final int DEF_TIKV_REGION_SPLIT_SIZE_IN_MB = 96;
private static final int DEF_PARTITION_PER_SPLIT = 10;
private static final int DEF_KV_CLIENT_CONCURRENCY = 10;
private static final List<TiStoreType> DEF_ISOLATION_READ_ENGINES =
ImmutableList.of(TiStoreType.TiKV, TiStoreType.TiFlash);
private int timeout = DEF_TIMEOUT;
private TimeUnit timeoutUnit = DEF_TIMEOUT_UNIT;
private boolean ignoreTruncate = DEF_IGNORE_TRUNCATE;
private boolean truncateAsWarning = DEF_TRUNCATE_AS_WARNING;
private int maxFrameSize = DEF_MAX_FRAME_SIZE;
private List<URI> pdAddrs = new ArrayList<>();
private int indexScanBatchSize = DEF_INDEX_SCAN_BATCH_SIZE;
private int downgradeThreshold = DEF_REGION_SCAN_DOWNGRADE_THRESHOLD;
private int indexScanConcurrency = DEF_INDEX_SCAN_CONCURRENCY;
private int tableScanConcurrency = DEF_TABLE_SCAN_CONCURRENCY;
private CommandPri commandPriority = DEF_COMMAND_PRIORITY;
private IsolationLevel isolationLevel = DEF_ISOLATION_LEVEL;
private int maxRequestKeyRangeSize = MAX_REQUEST_KEY_RANGE_SIZE;
private boolean showRowId = DEF_SHOW_ROWID;
private String dbPrefix = DEF_DB_PREFIX;
private boolean writeAllowSparkSQL = DEF_WRITE_ALLOW_SPARK_SQL;
private boolean writeEnable = DEF_WRITE_ENABLE;
private boolean writeWithoutLockTable = DEF_WRITE_WITHOUT_LOCK_TABLE;
private int tikvRegionSplitSizeInMB = DEF_TIKV_REGION_SPLIT_SIZE_IN_MB;
private int partitionPerSplit = DEF_PARTITION_PER_SPLIT;
private int kvClientConcurrency = DEF_KV_CLIENT_CONCURRENCY;
private List<TiStoreType> isolationReadEngines = DEF_ISOLATION_READ_ENGINES;
public static TiConfiguration createDefault(String pdAddrsStr) {
Objects.requireNonNull(pdAddrsStr, "pdAddrsStr is null");
TiConfiguration conf = new TiConfiguration();
conf.pdAddrs = strToURI(pdAddrsStr);
return conf;
}
private static List<URI> strToURI(String addressStr) {
Objects.requireNonNull(addressStr);
String[] addrs = addressStr.split(",");
Arrays.sort(addrs);
return PDUtils.addrsToUrls(addrs);
}
public static <E> String listToString(List<E> list) {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (int i = 0; i < list.size(); i++) {
sb.append(list.get(i).toString());
if (i != list.size() - 1) {
sb.append(",");
}
}
sb.append("]");
return sb.toString();
}
public DateTimeZone getLocalTimeZone() {
return DEF_TIMEZONE;
}
public int getTimeout() {
return timeout;
}
public TiConfiguration setTimeout(int timeout) {
this.timeout = timeout;
return this;
}
public TimeUnit getTimeoutUnit() {
return timeoutUnit;
}
public TiConfiguration setTimeoutUnit(TimeUnit timeoutUnit) {
this.timeoutUnit = timeoutUnit;
return this;
}
public List<URI> getPdAddrs() {
return pdAddrs;
}
public String getPdAddrsString() {
return listToString(pdAddrs);
}
public int getScanBatchSize() {
return DEF_SCAN_BATCH_SIZE;
}
boolean isIgnoreTruncate() {
return ignoreTruncate;
}
public TiConfiguration setIgnoreTruncate(boolean ignoreTruncate) {
this.ignoreTruncate = ignoreTruncate;
return this;
}
boolean isTruncateAsWarning() {
return truncateAsWarning;
}
public TiConfiguration setTruncateAsWarning(boolean truncateAsWarning) {
this.truncateAsWarning = truncateAsWarning;
return this;
}
public int getMaxFrameSize() {
return maxFrameSize;
}
public TiConfiguration setMaxFrameSize(int maxFrameSize) {
this.maxFrameSize = maxFrameSize;
return this;
}
public int getIndexScanBatchSize() {
return indexScanBatchSize;
}
public void setIndexScanBatchSize(int indexScanBatchSize) {
this.indexScanBatchSize = indexScanBatchSize;
}
public int getIndexScanConcurrency() {
return indexScanConcurrency;
}
public void setIndexScanConcurrency(int indexScanConcurrency) {
this.indexScanConcurrency = indexScanConcurrency;
}
public int getTableScanConcurrency() {
return tableScanConcurrency;
}
public void setTableScanConcurrency(int tableScanConcurrency) {
this.tableScanConcurrency = tableScanConcurrency;
}
public CommandPri getCommandPriority() {
return commandPriority;
}
public void setCommandPriority(CommandPri commandPriority) {
this.commandPriority = commandPriority;
}
public IsolationLevel getIsolationLevel() {
return isolationLevel;
}
public void setIsolationLevel(IsolationLevel isolationLevel) {
this.isolationLevel = isolationLevel;
}
public int getMaxRequestKeyRangeSize() {
return maxRequestKeyRangeSize;
}
public void setMaxRequestKeyRangeSize(int maxRequestKeyRangeSize) {
if (maxRequestKeyRangeSize <= 0) {
throw new IllegalArgumentException("Key range size cannot be less than 1");
}
this.maxRequestKeyRangeSize = maxRequestKeyRangeSize;
}
public void setShowRowId(boolean flag) {
this.showRowId = flag;
}
public boolean ifShowRowId() {
return showRowId;
}
public String getDBPrefix() {
return dbPrefix;
}
public void setDBPrefix(String dbPrefix) {
this.dbPrefix = dbPrefix;
}
public boolean isWriteEnable() {
return writeEnable;
}
public void setWriteEnable(boolean writeEnable) {
this.writeEnable = writeEnable;
}
public boolean isWriteWithoutLockTable() {
return writeWithoutLockTable;
}
public void setWriteWithoutLockTable(boolean writeWithoutLockTable) {
this.writeWithoutLockTable = writeWithoutLockTable;
}
public boolean isWriteAllowSparkSQL() {
return writeAllowSparkSQL;
}
public void setWriteAllowSparkSQL(boolean writeAllowSparkSQL) {
this.writeAllowSparkSQL = writeAllowSparkSQL;
}
public int getTikvRegionSplitSizeInMB() {
return tikvRegionSplitSizeInMB;
}
public void setTikvRegionSplitSizeInMB(int tikvRegionSplitSizeInMB) {
this.tikvRegionSplitSizeInMB = tikvRegionSplitSizeInMB;
}
public int getDowngradeThreshold() {
return downgradeThreshold;
}
public void setDowngradeThreshold(int downgradeThreshold) {
this.downgradeThreshold = downgradeThreshold;
}
public int getPartitionPerSplit() {
return partitionPerSplit;
}
public void setPartitionPerSplit(int partitionPerSplit) {
this.partitionPerSplit = partitionPerSplit;
}
public List<TiStoreType> getIsolationReadEngines() {
return isolationReadEngines;
}
public void setIsolationReadEngines(List<TiStoreType> isolationReadEngines) {
this.isolationReadEngines = isolationReadEngines;
}
public int getKvClientConcurrency() {
return kvClientConcurrency;
}
public void setKvClientConcurrency(int kvClientConcurrency) {
this.kvClientConcurrency = kvClientConcurrency;
}
}
| 1 | 12,598 | delete this line | pingcap-tispark | java |
@@ -43,6 +43,7 @@ module Test
def self.setup_models
conn = ClientRequest.connection
+ return if conn.table_exists? "test_client_requests"
conn.create_table(:test_client_requests, force: true) do |t|
t.decimal :amount
t.string :project_title | 1 | module Test
def self.table_name_prefix
"test_"
end
class ClientRequest < ActiveRecord::Base
belongs_to :approving_official, class_name: User
def self.purchase_amount_column_name
:amount
end
include ClientDataMixin
include PurchaseCardMixin
def editable?
true
end
def name
project_title
end
def self.expense_type_options
[]
end
def total_price
amount
end
def self.permitted_params(params, _client_request_instance)
params.require(:test_client_request).permit(:project_title, :amount, :approving_official_id)
end
def initialize_steps
end
def public_identifier
"TEST-#{id}"
end
end
def self.setup_models
conn = ClientRequest.connection
conn.create_table(:test_client_requests, force: true) do |t|
t.decimal :amount
t.string :project_title
t.integer :approving_official_id
t.datetime :created_at
t.datetime :updated_at
end
end
def self.teardown_models
ClientRequest.connection.drop_table :test_client_requests
end
# We must defer loading the factory until we have defined our namespace,
# so call this explicitly to work around rails app load order.
require File.join(Rails.root, "spec/factories/test/client_request.rb")
end
| 1 | 17,735 | Presumably we no longer need `force: true` here. | 18F-C2 | rb |
@@ -339,6 +339,12 @@ hipError_t hipHostAlloc(void** ptr, size_t sizeBytes, unsigned int flags) {
// width in bytes
hipError_t ihipMallocPitch(void** ptr, size_t* pitch, size_t width, size_t height, size_t depth) {
hipError_t hip_status = hipSuccess;
+ if(ptr==NULL || ptr==0)
+ {
+ hip_status=hipErrorInvalidValue;
+ return hip_status;
+ }
+
// hardcoded 128 bytes
*pitch = ((((int)width - 1) / 128) + 1) * 128;
const size_t sizeBytes = (*pitch) * height; | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <hc_am.hpp>
#include "hsa/hsa.h"
#include "hsa/hsa_ext_amd.h"
#include "hip/hip_runtime.h"
#include "hip_hcc_internal.h"
#include "trace_helper.h"
// Internal HIP APIS:
namespace hip_internal {
hipError_t memcpyAsync(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind kind,
hipStream_t stream) {
hipError_t e = hipSuccess;
// Return success if number of bytes to copy is 0
if (sizeBytes == 0) return e;
stream = ihipSyncAndResolveStream(stream);
if ((dst == NULL) || (src == NULL)) {
e = hipErrorInvalidValue;
} else if (stream) {
try {
stream->locked_copyAsync(dst, src, sizeBytes, kind);
} catch (ihipException& ex) {
e = ex._code;
}
} else {
e = hipErrorInvalidValue;
}
return e;
}
// return 0 on success or -1 on error:
int sharePtr(void* ptr, ihipCtx_t* ctx, bool shareWithAll, unsigned hipFlags) {
int ret = 0;
auto device = ctx->getWriteableDevice();
#if USE_APP_PTR_FOR_CTX
hc::am_memtracker_update(ptr, device->_deviceId, hipFlags, ctx);
#else
hc::am_memtracker_update(ptr, device->_deviceId, hipFlags);
#endif
if (shareWithAll) {
hsa_status_t s = hsa_amd_agents_allow_access(g_deviceCnt + 1, g_allAgents, NULL, ptr);
tprintf(DB_MEM, " allow access to CPU + all %d GPUs (shareWithAll)\n", g_deviceCnt);
if (s != HSA_STATUS_SUCCESS) {
ret = -1;
}
} else {
int peerCnt = 0;
{
LockedAccessor_CtxCrit_t crit(ctx->criticalData());
// the peerCnt always stores self so make sure the trace actually
peerCnt = crit->peerCnt();
tprintf(DB_MEM, " allow access to %d other peer(s)\n", peerCnt - 1);
if (peerCnt > 1) {
// printf ("peer self access\n");
// TODOD - remove me:
for (auto iter = crit->_peers.begin(); iter != crit->_peers.end(); iter++) {
tprintf(DB_MEM, " allow access to peer: %s%s\n", (*iter)->toString().c_str(),
(iter == crit->_peers.begin()) ? " (self)" : "");
};
hsa_status_t s =
hsa_amd_agents_allow_access(crit->peerCnt(), crit->peerAgents(), NULL, ptr);
if (s != HSA_STATUS_SUCCESS) {
ret = -1;
}
}
}
}
return ret;
}
// Allocate a new pointer with am_alloc and share with all valid peers.
// Returns null-ptr if a memory error occurs (either allocation or sharing)
void* allocAndSharePtr(const char* msg, size_t sizeBytes, ihipCtx_t* ctx, bool shareWithAll,
unsigned amFlags, unsigned hipFlags, size_t alignment) {
void* ptr = nullptr;
auto device = ctx->getWriteableDevice();
#if (__hcc_workweek__ >= 17332)
if (alignment != 0) {
ptr = hc::am_aligned_alloc(sizeBytes, device->_acc, amFlags, alignment);
} else
#endif
{
ptr = hc::am_alloc(sizeBytes, device->_acc, amFlags);
}
tprintf(DB_MEM, " alloc %s ptr:%p-%p size:%zu on dev:%d\n", msg, ptr,
static_cast<char*>(ptr) + sizeBytes, sizeBytes, device->_deviceId);
if (HIP_INIT_ALLOC != -1) {
// TODO , dont' call HIP API directly here:
hipMemset(ptr, HIP_INIT_ALLOC, sizeBytes);
}
if (ptr != nullptr) {
int r = sharePtr(ptr, ctx, shareWithAll, hipFlags);
if (r != 0) {
ptr = nullptr;
}
}
return ptr;
}
} // end namespace hip_internal
//-------------------------------------------------------------------------------------------------
//-------------------------------------------------------------------------------------------------
// Memory
//
//
//
// HIP uses several "app*" fields HC memory tracker to track state necessary for the HIP API.
//_appId : DeviceID. For device mem, this is device where the memory is physically allocated.
// For host or registered mem, this is the current device when the memory is allocated or
// registered. This device will have a GPUVM mapping for the host mem.
//
//_appAllocationFlags : These are flags provided by the user when allocation is performed. They are
//returned to user in hipHostGetFlags and other APIs.
// TODO - add more info here when available.
//
hipError_t hipPointerGetAttributes(hipPointerAttribute_t* attributes, const void* ptr) {
HIP_INIT_API(attributes, ptr);
hipError_t e = hipSuccess;
if ((attributes == nullptr) || (ptr == nullptr)) {
e = hipErrorInvalidValue;
} else {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, ptr);
if (status == AM_SUCCESS) {
attributes->memoryType =
amPointerInfo._isInDeviceMem ? hipMemoryTypeDevice : hipMemoryTypeHost;
attributes->hostPointer = amPointerInfo._hostPointer;
attributes->devicePointer = amPointerInfo._devicePointer;
attributes->isManaged = 0;
if (attributes->memoryType == hipMemoryTypeHost) {
attributes->hostPointer = (void*)ptr;
}
if (attributes->memoryType == hipMemoryTypeDevice) {
attributes->devicePointer = (void*)ptr;
}
attributes->allocationFlags = amPointerInfo._appAllocationFlags;
attributes->device = amPointerInfo._appId;
if (attributes->device < 0) {
e = hipErrorInvalidDevice;
}
} else {
attributes->memoryType = hipMemoryTypeDevice;
attributes->hostPointer = 0;
attributes->devicePointer = 0;
attributes->device = -1;
attributes->isManaged = 0;
attributes->allocationFlags = 0;
e = hipErrorUnknown; // TODO - should be hipErrorInvalidValue ?
}
}
return ihipLogStatus(e);
}
hipError_t hipHostGetDevicePointer(void** devicePointer, void* hostPointer, unsigned flags) {
HIP_INIT_API(devicePointer, hostPointer, flags);
hipError_t e = hipSuccess;
// Flags must be 0:
if ((flags != 0) || (devicePointer == nullptr) || (hostPointer == nullptr)) {
e = hipErrorInvalidValue;
} else {
hc::accelerator acc;
*devicePointer = NULL;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, hostPointer);
if (status == AM_SUCCESS) {
*devicePointer =
static_cast<char*>(amPointerInfo._devicePointer) +
(static_cast<char*>(hostPointer) - static_cast<char*>(amPointerInfo._hostPointer));
tprintf(DB_MEM, " host_ptr=%p returned device_pointer=%p\n", hostPointer,
*devicePointer);
} else {
e = hipErrorMemoryAllocation;
}
}
return ihipLogStatus(e);
}
hipError_t hipMalloc(void** ptr, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MEM), ptr, sizeBytes);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
// return NULL pointer when malloc size is 0
if (sizeBytes == 0) {
*ptr = NULL;
hip_status = hipSuccess;
} else if ((ctx == nullptr) || (ptr == nullptr)) {
hip_status = hipErrorInvalidValue;
} else {
auto device = ctx->getWriteableDevice();
*ptr = hip_internal::allocAndSharePtr("device_mem", sizeBytes, ctx, false /*shareWithAll*/,
0 /*amFlags*/, 0 /*hipFlags*/, 0);
if (sizeBytes && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
}
return ihipLogStatus(hip_status);
}
hipError_t hipHostMalloc(void** ptr, size_t sizeBytes, unsigned int flags) {
HIP_INIT_SPECIAL_API((TRACE_MEM), ptr, sizeBytes, flags);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
if (HIP_SYNC_HOST_ALLOC) {
hipDeviceSynchronize();
}
auto ctx = ihipGetTlsDefaultCtx();
if (sizeBytes == 0) {
hip_status = hipSuccess;
// TODO - should size of 0 return err or be siliently ignored?
} else if ((ctx == nullptr) || (ptr == nullptr)) {
hip_status = hipErrorInvalidValue;
} else {
unsigned trueFlags = flags;
if (flags == hipHostMallocDefault) {
// HCC/ROCM provide a modern system with unified memory and should set both of these
// flags by default:
trueFlags = hipHostMallocMapped | hipHostMallocPortable;
}
const unsigned supportedFlags = hipHostMallocPortable | hipHostMallocMapped |
hipHostMallocWriteCombined | hipHostMallocCoherent |
hipHostMallocNonCoherent;
const unsigned coherencyFlags = hipHostMallocCoherent | hipHostMallocNonCoherent;
if ((flags & ~supportedFlags) || ((flags & coherencyFlags) == coherencyFlags)) {
*ptr = nullptr;
// can't specify unsupported flags, can't specify both Coherent + NonCoherent
hip_status = hipErrorInvalidValue;
} else {
auto device = ctx->getWriteableDevice();
unsigned amFlags = 0;
if (flags & hipHostMallocCoherent) {
amFlags = amHostCoherent;
} else if (flags & hipHostMallocNonCoherent) {
amFlags = amHostNonCoherent;
} else {
// depends on env variables:
amFlags = HIP_HOST_COHERENT ? amHostCoherent : amHostNonCoherent;
}
*ptr = hip_internal::allocAndSharePtr(
(amFlags & amHostCoherent) ? "finegrained_host" : "pinned_host", sizeBytes, ctx,
(trueFlags & hipHostMallocPortable) /*shareWithAll*/, amFlags, flags, 0);
if (sizeBytes && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
}
}
if (HIP_SYNC_HOST_ALLOC) {
hipDeviceSynchronize();
}
return ihipLogStatus(hip_status);
}
// Deprecated function:
hipError_t hipMallocHost(void** ptr, size_t sizeBytes) { return hipHostMalloc(ptr, sizeBytes, 0); }
// Deprecated function:
hipError_t hipHostAlloc(void** ptr, size_t sizeBytes, unsigned int flags) {
return hipHostMalloc(ptr, sizeBytes, flags);
};
// width in bytes
hipError_t ihipMallocPitch(void** ptr, size_t* pitch, size_t width, size_t height, size_t depth) {
hipError_t hip_status = hipSuccess;
// hardcoded 128 bytes
*pitch = ((((int)width - 1) / 128) + 1) * 128;
const size_t sizeBytes = (*pitch) * height;
auto ctx = ihipGetTlsDefaultCtx();
if (ctx) {
hc::accelerator acc = ctx->getDevice()->_acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
size_t allocGranularity = 0;
hsa_amd_memory_pool_t* allocRegion =
static_cast<hsa_amd_memory_pool_t*>(acc.get_hsa_am_region());
hsa_amd_memory_pool_get_info(*allocRegion, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE,
&allocGranularity);
hsa_ext_image_descriptor_t imageDescriptor;
imageDescriptor.width = *pitch;
imageDescriptor.height = height;
imageDescriptor.depth = 0; // depth;
imageDescriptor.array_size = 0;
if (depth == 0)
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_2D;
else
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_3D;
imageDescriptor.format.channel_order = HSA_EXT_IMAGE_CHANNEL_ORDER_R;
imageDescriptor.format.channel_type = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT32;
hsa_access_permission_t permission = HSA_ACCESS_PERMISSION_RW;
hsa_ext_image_data_info_t imageInfo;
hsa_status_t status =
hsa_ext_image_data_get_info(*agent, &imageDescriptor, permission, &imageInfo);
size_t alignment = imageInfo.alignment <= allocGranularity ? 0 : imageInfo.alignment;
const unsigned am_flags = 0;
*ptr = hip_internal::allocAndSharePtr("device_pitch", sizeBytes, ctx,
false /*shareWithAll*/, am_flags, 0, alignment);
if (sizeBytes && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorMemoryAllocation;
}
return hip_status;
}
// width in bytes
hipError_t hipMallocPitch(void** ptr, size_t* pitch, size_t width, size_t height) {
HIP_INIT_SPECIAL_API((TRACE_MEM), ptr, pitch, width, height);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
if (width == 0 || height == 0) return ihipLogStatus(hipErrorUnknown);
hip_status = ihipMallocPitch(ptr, pitch, width, height, 0);
return ihipLogStatus(hip_status);
}
hipError_t hipMalloc3D(hipPitchedPtr* pitchedDevPtr, hipExtent extent) {
HIP_INIT_API(pitchedDevPtr, &extent);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
if (extent.width == 0 || extent.height == 0) return ihipLogStatus(hipErrorUnknown);
if (!pitchedDevPtr) return ihipLogStatus(hipErrorInvalidValue);
void* ptr;
size_t pitch;
hip_status =
ihipMallocPitch(&pitchedDevPtr->ptr, &pitch, extent.width, extent.height, extent.depth);
if (hip_status == hipSuccess) {
pitchedDevPtr->pitch = pitch;
pitchedDevPtr->xsize = extent.width;
pitchedDevPtr->ysize = extent.height;
}
return ihipLogStatus(hip_status);
}
hipChannelFormatDesc hipCreateChannelDesc(int x, int y, int z, int w, hipChannelFormatKind f) {
hipChannelFormatDesc cd;
cd.x = x;
cd.y = y;
cd.z = z;
cd.w = w;
cd.f = f;
return cd;
}
extern void getChannelOrderAndType(const hipChannelFormatDesc& desc,
enum hipTextureReadMode readMode,
hsa_ext_image_channel_order_t* channelOrder,
hsa_ext_image_channel_type_t* channelType);
hipError_t hipArrayCreate(hipArray** array, const HIP_ARRAY_DESCRIPTOR* pAllocateArray) {
HIP_INIT_SPECIAL_API((TRACE_MEM), array, pAllocateArray);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
if (pAllocateArray->width > 0) {
auto ctx = ihipGetTlsDefaultCtx();
*array = (hipArray*)malloc(sizeof(hipArray));
array[0]->drvDesc = *pAllocateArray;
array[0]->width = pAllocateArray->width;
array[0]->height = pAllocateArray->height;
array[0]->isDrv = true;
array[0]->textureType = hipTextureType2D;
void** ptr = &array[0]->data;
if (ctx) {
const unsigned am_flags = 0;
size_t size = pAllocateArray->width;
if (pAllocateArray->height > 0) {
size = size * pAllocateArray->height;
}
hsa_ext_image_channel_type_t channelType;
size_t allocSize = 0;
switch (pAllocateArray->format) {
case HIP_AD_FORMAT_UNSIGNED_INT8:
allocSize = size * sizeof(uint8_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT8;
break;
case HIP_AD_FORMAT_UNSIGNED_INT16:
allocSize = size * sizeof(uint16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT16;
break;
case HIP_AD_FORMAT_UNSIGNED_INT32:
allocSize = size * sizeof(uint32_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT32;
break;
case HIP_AD_FORMAT_SIGNED_INT8:
allocSize = size * sizeof(int8_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT8;
break;
case HIP_AD_FORMAT_SIGNED_INT16:
allocSize = size * sizeof(int16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT16;
break;
case HIP_AD_FORMAT_SIGNED_INT32:
allocSize = size * sizeof(int32_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT32;
break;
case HIP_AD_FORMAT_HALF:
allocSize = size * sizeof(int16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_HALF_FLOAT;
break;
case HIP_AD_FORMAT_FLOAT:
allocSize = size * sizeof(float);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_FLOAT;
break;
default:
hip_status = hipErrorUnknown;
break;
}
hc::accelerator acc = ctx->getDevice()->_acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
size_t allocGranularity = 0;
hsa_amd_memory_pool_t* allocRegion =
static_cast<hsa_amd_memory_pool_t*>(acc.get_hsa_am_region());
hsa_amd_memory_pool_get_info(
*allocRegion, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE, &allocGranularity);
hsa_ext_image_descriptor_t imageDescriptor;
imageDescriptor.width = pAllocateArray->width;
imageDescriptor.height = pAllocateArray->height;
imageDescriptor.depth = 0;
imageDescriptor.array_size = 0;
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_2D;
hsa_ext_image_channel_order_t channelOrder;
if (pAllocateArray->numChannels == 4) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_RGBA;
} else if (pAllocateArray->numChannels == 2) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_RG;
} else if (pAllocateArray->numChannels == 1) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_R;
}
imageDescriptor.format.channel_order = channelOrder;
imageDescriptor.format.channel_type = channelType;
hsa_access_permission_t permission = HSA_ACCESS_PERMISSION_RW;
hsa_ext_image_data_info_t imageInfo;
hsa_status_t status =
hsa_ext_image_data_get_info(*agent, &imageDescriptor, permission, &imageInfo);
size_t alignment = imageInfo.alignment <= allocGranularity ? 0 : imageInfo.alignment;
*ptr = hip_internal::allocAndSharePtr("device_array", allocSize, ctx,
false /*shareWithAll*/, am_flags, 0, alignment);
if (size && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorInvalidValue;
}
return ihipLogStatus(hip_status);
}
hipError_t hipMallocArray(hipArray** array, const hipChannelFormatDesc* desc, size_t width,
size_t height, unsigned int flags) {
HIP_INIT_SPECIAL_API((TRACE_MEM), array, desc, width, height, flags);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
if (width > 0) {
auto ctx = ihipGetTlsDefaultCtx();
*array = (hipArray*)malloc(sizeof(hipArray));
array[0]->type = flags;
array[0]->width = width;
array[0]->height = height;
array[0]->depth = 1;
array[0]->desc = *desc;
array[0]->isDrv = false;
array[0]->textureType = hipTextureType2D;
void** ptr = &array[0]->data;
if (ctx) {
const unsigned am_flags = 0;
size_t size = width;
if (height > 0) {
size = size * height;
}
size_t allocSize = 0;
switch (desc->f) {
case hipChannelFormatKindSigned:
allocSize = size * sizeof(int);
break;
case hipChannelFormatKindUnsigned:
allocSize = size * sizeof(unsigned int);
break;
case hipChannelFormatKindFloat:
allocSize = size * sizeof(float);
break;
case hipChannelFormatKindNone:
allocSize = size * sizeof(size_t);
break;
default:
hip_status = hipErrorUnknown;
break;
}
hc::accelerator acc = ctx->getDevice()->_acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
size_t allocGranularity = 0;
hsa_amd_memory_pool_t* allocRegion =
static_cast<hsa_amd_memory_pool_t*>(acc.get_hsa_am_region());
hsa_amd_memory_pool_get_info(
*allocRegion, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE, &allocGranularity);
hsa_ext_image_descriptor_t imageDescriptor;
imageDescriptor.width = width;
imageDescriptor.height = height;
imageDescriptor.depth = 0;
imageDescriptor.array_size = 0;
switch (flags) {
case hipArrayLayered:
case hipArrayCubemap:
case hipArraySurfaceLoadStore:
case hipArrayTextureGather:
assert(0);
break;
case hipArrayDefault:
default:
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_2D;
break;
}
hsa_ext_image_channel_order_t channelOrder;
hsa_ext_image_channel_type_t channelType;
getChannelOrderAndType(*desc, hipReadModeElementType, &channelOrder, &channelType);
imageDescriptor.format.channel_order = channelOrder;
imageDescriptor.format.channel_type = channelType;
hsa_access_permission_t permission = HSA_ACCESS_PERMISSION_RW;
hsa_ext_image_data_info_t imageInfo;
hsa_status_t status =
hsa_ext_image_data_get_info(*agent, &imageDescriptor, permission, &imageInfo);
size_t alignment = imageInfo.alignment <= allocGranularity ? 0 : imageInfo.alignment;
*ptr = hip_internal::allocAndSharePtr("device_array", allocSize, ctx,
false /*shareWithAll*/, am_flags, 0, alignment);
if (size && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorInvalidValue;
}
return ihipLogStatus(hip_status);
}
hipError_t hipArray3DCreate(hipArray_t* array, const HIP_ARRAY_DESCRIPTOR* pAllocateArray) {
HIP_INIT_SPECIAL_API((TRACE_MEM), array, pAllocateArray);
hipError_t hip_status = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
*array = (hipArray*)malloc(sizeof(hipArray));
array[0]->type = pAllocateArray->flags;
array[0]->width = pAllocateArray->width;
array[0]->height = pAllocateArray->height;
array[0]->depth = pAllocateArray->depth;
array[0]->drvDesc = *pAllocateArray;
array[0]->isDrv = true;
array[0]->textureType = hipTextureType3D;
void** ptr = &array[0]->data;
if (ctx) {
const unsigned am_flags = 0;
const size_t size = pAllocateArray->width * pAllocateArray->height * pAllocateArray->depth;
size_t allocSize = 0;
hsa_ext_image_channel_type_t channelType;
switch (pAllocateArray->format) {
case HIP_AD_FORMAT_UNSIGNED_INT8:
allocSize = size * sizeof(uint8_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT8;
break;
case HIP_AD_FORMAT_UNSIGNED_INT16:
allocSize = size * sizeof(uint16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT16;
break;
case HIP_AD_FORMAT_UNSIGNED_INT32:
allocSize = size * sizeof(uint32_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_UNSIGNED_INT32;
break;
case HIP_AD_FORMAT_SIGNED_INT8:
allocSize = size * sizeof(int8_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT8;
break;
case HIP_AD_FORMAT_SIGNED_INT16:
allocSize = size * sizeof(int16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT16;
break;
case HIP_AD_FORMAT_SIGNED_INT32:
allocSize = size * sizeof(int32_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_SIGNED_INT32;
break;
case HIP_AD_FORMAT_HALF:
allocSize = size * sizeof(int16_t);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_HALF_FLOAT;
break;
case HIP_AD_FORMAT_FLOAT:
allocSize = size * sizeof(float);
channelType = HSA_EXT_IMAGE_CHANNEL_TYPE_FLOAT;
break;
default:
hip_status = hipErrorUnknown;
break;
}
hc::accelerator acc = ctx->getDevice()->_acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
size_t allocGranularity = 0;
hsa_amd_memory_pool_t* allocRegion =
static_cast<hsa_amd_memory_pool_t*>(acc.get_hsa_am_region());
hsa_amd_memory_pool_get_info(*allocRegion, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE,
&allocGranularity);
hsa_ext_image_descriptor_t imageDescriptor;
imageDescriptor.width = pAllocateArray->width;
imageDescriptor.height = pAllocateArray->height;
imageDescriptor.depth = 0;
imageDescriptor.array_size = 0;
switch (pAllocateArray->flags) {
case hipArrayLayered:
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_2DA;
imageDescriptor.array_size = pAllocateArray->depth;
break;
case hipArraySurfaceLoadStore:
case hipArrayTextureGather:
case hipArrayDefault:
assert(0);
break;
case hipArrayCubemap:
default:
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_3D;
imageDescriptor.depth = pAllocateArray->depth;
break;
}
hsa_ext_image_channel_order_t channelOrder;
// getChannelOrderAndType(*desc, hipReadModeElementType, &channelOrder, &channelType);
if (pAllocateArray->numChannels == 4) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_RGBA;
} else if (pAllocateArray->numChannels == 2) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_RG;
} else if (pAllocateArray->numChannels == 1) {
channelOrder = HSA_EXT_IMAGE_CHANNEL_ORDER_R;
}
imageDescriptor.format.channel_order = channelOrder;
imageDescriptor.format.channel_type = channelType;
hsa_access_permission_t permission = HSA_ACCESS_PERMISSION_RW;
hsa_ext_image_data_info_t imageInfo;
hsa_status_t status =
hsa_ext_image_data_get_info(*agent, &imageDescriptor, permission, &imageInfo);
size_t alignment = imageInfo.alignment <= allocGranularity ? 0 : imageInfo.alignment;
*ptr = hip_internal::allocAndSharePtr("device_array", allocSize, ctx, false, am_flags, 0,
alignment);
if (size && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorMemoryAllocation;
}
return ihipLogStatus(hip_status);
}
hipError_t hipMalloc3DArray(hipArray_t* array, const struct hipChannelFormatDesc* desc,
struct hipExtent extent, unsigned int flags) {
HIP_INIT_API(array, desc, &extent, flags);
HIP_SET_DEVICE();
hipError_t hip_status = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
*array = (hipArray*)malloc(sizeof(hipArray));
array[0]->type = flags;
array[0]->width = extent.width;
array[0]->height = extent.height;
array[0]->depth = extent.depth;
array[0]->desc = *desc;
array[0]->isDrv = false;
array[0]->textureType = hipTextureType3D;
void** ptr = &array[0]->data;
if (ctx) {
const unsigned am_flags = 0;
const size_t size = extent.width * extent.height * extent.depth;
size_t allocSize = 0;
switch (desc->f) {
case hipChannelFormatKindSigned:
allocSize = size * sizeof(int);
break;
case hipChannelFormatKindUnsigned:
allocSize = size * sizeof(unsigned int);
break;
case hipChannelFormatKindFloat:
allocSize = size * sizeof(float);
break;
case hipChannelFormatKindNone:
allocSize = size * sizeof(size_t);
break;
default:
hip_status = hipErrorUnknown;
break;
}
hc::accelerator acc = ctx->getDevice()->_acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
size_t allocGranularity = 0;
hsa_amd_memory_pool_t* allocRegion =
static_cast<hsa_amd_memory_pool_t*>(acc.get_hsa_am_region());
hsa_amd_memory_pool_get_info(*allocRegion, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE,
&allocGranularity);
hsa_ext_image_descriptor_t imageDescriptor;
imageDescriptor.width = extent.width;
imageDescriptor.height = extent.height;
imageDescriptor.depth = 0;
imageDescriptor.array_size = 0;
switch (flags) {
case hipArrayLayered:
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_2DA;
imageDescriptor.array_size = extent.depth;
break;
case hipArraySurfaceLoadStore:
case hipArrayTextureGather:
case hipArrayDefault:
assert(0);
break;
case hipArrayCubemap:
default:
imageDescriptor.geometry = HSA_EXT_IMAGE_GEOMETRY_3D;
imageDescriptor.depth = extent.depth;
break;
}
hsa_ext_image_channel_order_t channelOrder;
hsa_ext_image_channel_type_t channelType;
getChannelOrderAndType(*desc, hipReadModeElementType, &channelOrder, &channelType);
imageDescriptor.format.channel_order = channelOrder;
imageDescriptor.format.channel_type = channelType;
hsa_access_permission_t permission = HSA_ACCESS_PERMISSION_RW;
hsa_ext_image_data_info_t imageInfo;
hsa_status_t status =
hsa_ext_image_data_get_info(*agent, &imageDescriptor, permission, &imageInfo);
size_t alignment = imageInfo.alignment <= allocGranularity ? 0 : imageInfo.alignment;
*ptr = hip_internal::allocAndSharePtr("device_array", allocSize, ctx, false, am_flags, 0,
alignment);
if (size && (*ptr == NULL)) {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorMemoryAllocation;
}
return ihipLogStatus(hip_status);
}
hipError_t hipHostGetFlags(unsigned int* flagsPtr, void* hostPtr) {
HIP_INIT_API(flagsPtr, hostPtr);
hipError_t hip_status = hipSuccess;
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, hostPtr);
if (status == AM_SUCCESS) {
*flagsPtr = amPointerInfo._appAllocationFlags;
if (*flagsPtr == 0) {
hip_status = hipErrorInvalidValue;
} else {
hip_status = hipSuccess;
}
tprintf(DB_MEM, " %s: host ptr=%p\n", __func__, hostPtr);
} else {
hip_status = hipErrorInvalidValue;
}
return ihipLogStatus(hip_status);
}
// TODO - need to fix several issues here related to P2P access, host memory fallback.
hipError_t hipHostRegister(void* hostPtr, size_t sizeBytes, unsigned int flags) {
HIP_INIT_API(hostPtr, sizeBytes, flags);
hipError_t hip_status = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
if (hostPtr == NULL) {
return ihipLogStatus(hipErrorInvalidValue);
}
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t am_status = hc::am_memtracker_getinfo(&amPointerInfo, hostPtr);
if (am_status == AM_SUCCESS) {
hip_status = hipErrorHostMemoryAlreadyRegistered;
} else {
auto ctx = ihipGetTlsDefaultCtx();
if (hostPtr == NULL) {
return ihipLogStatus(hipErrorInvalidValue);
}
// TODO-test : multi-gpu access to registered host memory.
if (ctx) {
if (flags == hipHostRegisterDefault || flags == hipHostRegisterPortable ||
flags == hipHostRegisterMapped) {
auto device = ctx->getWriteableDevice();
std::vector<hc::accelerator> vecAcc;
for (int i = 0; i < g_deviceCnt; i++) {
vecAcc.push_back(ihipGetDevice(i)->_acc);
}
am_status = hc::am_memory_host_lock(device->_acc, hostPtr, sizeBytes, &vecAcc[0],
vecAcc.size());
#if USE_APP_PTR_FOR_CTX
hc::am_memtracker_update(hostPtr, device->_deviceId, flags, ctx);
#else
hc::am_memtracker_update(hostPtr, device->_deviceId, flags);
#endif
tprintf(DB_MEM, " %s registered ptr=%p and allowed access to %zu peers\n", __func__,
hostPtr, vecAcc.size());
if (am_status == AM_SUCCESS) {
hip_status = hipSuccess;
} else {
hip_status = hipErrorMemoryAllocation;
}
} else {
hip_status = hipErrorInvalidValue;
}
}
}
return ihipLogStatus(hip_status);
}
hipError_t hipHostUnregister(void* hostPtr) {
HIP_INIT_API(hostPtr);
auto ctx = ihipGetTlsDefaultCtx();
hipError_t hip_status = hipSuccess;
if (hostPtr == NULL) {
hip_status = hipErrorInvalidValue;
} else {
auto device = ctx->getWriteableDevice();
am_status_t am_status = hc::am_memory_host_unlock(device->_acc, hostPtr);
tprintf(DB_MEM, " %s unregistered ptr=%p\n", __func__, hostPtr);
if (am_status != AM_SUCCESS) {
hip_status = hipErrorHostMemoryNotRegistered;
}
}
return ihipLogStatus(hip_status);
}
namespace {
inline hipDeviceptr_t agent_address_for_symbol(const char* symbolName) {
hipDeviceptr_t r = nullptr;
#if __hcc_workweek__ >= 17481
size_t byte_cnt = 0u;
hipModuleGetGlobal(&r, &byte_cnt, 0, symbolName);
#else
auto ctx = ihipGetTlsDefaultCtx();
auto acc = ctx->getDevice()->_acc;
r = acc.get_symbol_address(symbolName);
#endif
return r;
}
} // namespace
hipError_t hipMemcpyToSymbol(const void* symbolName, const void* src, size_t count, size_t offset,
hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), symbolName, src, count, offset, kind);
if (symbolName == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
auto ctx = ihipGetTlsDefaultCtx();
hc::accelerator acc = ctx->getDevice()->_acc;
hipDeviceptr_t dst = agent_address_for_symbol(static_cast<const char*>(symbolName));
tprintf(DB_MEM, " symbol '%s' resolved to address:%p\n", symbolName, dst);
if (dst == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
if (kind == hipMemcpyHostToDevice || kind == hipMemcpyDeviceToHost ||
kind == hipMemcpyDeviceToDevice || kind == hipMemcpyHostToHost) {
stream->lockedSymbolCopySync(acc, dst, (void*)src, count, offset, kind);
// acc.memcpy_symbol(dst, (void*)src, count+offset);
} else {
return ihipLogStatus(hipErrorInvalidValue);
}
return ihipLogStatus(hipSuccess);
}
hipError_t hipMemcpyFromSymbol(void* dst, const void* symbolName, size_t count, size_t offset,
hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), symbolName, dst, count, offset, kind);
if (symbolName == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
auto ctx = ihipGetTlsDefaultCtx();
hc::accelerator acc = ctx->getDevice()->_acc;
hipDeviceptr_t src = agent_address_for_symbol(static_cast<const char*>(symbolName));
tprintf(DB_MEM, " symbol '%s' resolved to address:%p\n", symbolName, dst);
if (dst == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
if (kind == hipMemcpyHostToDevice || kind == hipMemcpyDeviceToHost ||
kind == hipMemcpyDeviceToDevice || kind == hipMemcpyHostToHost) {
stream->lockedSymbolCopySync(acc, dst, (void*)src, count, offset, kind);
} else {
return ihipLogStatus(hipErrorInvalidValue);
}
return ihipLogStatus(hipSuccess);
}
hipError_t hipMemcpyToSymbolAsync(const void* symbolName, const void* src, size_t count,
size_t offset, hipMemcpyKind kind, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), symbolName, src, count, offset, kind, stream);
if (symbolName == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
hipError_t e = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
hc::accelerator acc = ctx->getDevice()->_acc;
hipDeviceptr_t dst = agent_address_for_symbol(static_cast<const char*>(symbolName));
tprintf(DB_MEM, " symbol '%s' resolved to address:%p\n", symbolName, dst);
if (dst == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
if (stream) {
try {
stream->lockedSymbolCopyAsync(acc, dst, (void*)src, count, offset, kind);
} catch (ihipException& ex) {
e = ex._code;
}
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyFromSymbolAsync(void* dst, const void* symbolName, size_t count, size_t offset,
hipMemcpyKind kind, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), symbolName, dst, count, offset, kind, stream);
if (symbolName == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
hipError_t e = hipSuccess;
auto ctx = ihipGetTlsDefaultCtx();
hc::accelerator acc = ctx->getDevice()->_acc;
hipDeviceptr_t src = agent_address_for_symbol(static_cast<const char*>(symbolName));
tprintf(DB_MEM, " symbol '%s' resolved to address:%p\n", symbolName, src);
if (src == nullptr || dst == nullptr) {
return ihipLogStatus(hipErrorInvalidSymbol);
}
stream = ihipSyncAndResolveStream(stream);
if (stream) {
try {
stream->lockedSymbolCopyAsync(acc, dst, src, count, offset, kind);
} catch (ihipException& ex) {
e = ex._code;
}
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
//---
hipError_t hipMemcpy(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes, kind);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync(dst, src, sizeBytes, kind);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((void*)dst, (void*)src, sizeBytes, hipMemcpyHostToDevice, false);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((void*)dst, (void*)src, sizeBytes, hipMemcpyDeviceToHost, false);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((void*)dst, (void*)src, sizeBytes, hipMemcpyDeviceToDevice, false);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyHtoH(void* dst, void* src, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((void*)dst, (void*)src, sizeBytes, hipMemcpyHostToHost, false);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyAsync(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind kind,
hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes, kind, stream);
return ihipLogStatus(hip_internal::memcpyAsync(dst, src, sizeBytes, kind, stream));
}
hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t sizeBytes, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes, stream);
return ihipLogStatus(
hip_internal::memcpyAsync(dst, src, sizeBytes, hipMemcpyHostToDevice, stream));
}
hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes,
hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes, stream);
return ihipLogStatus(
hip_internal::memcpyAsync(dst, src, sizeBytes, hipMemcpyDeviceToDevice, stream));
}
hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, src, sizeBytes, stream);
return ihipLogStatus(
hip_internal::memcpyAsync(dst, src, sizeBytes, hipMemcpyDeviceToHost, stream));
}
// TODO - review and optimize
hipError_t ihipMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width,
size_t height, hipMemcpyKind kind) {
if (width > dpitch || width > spitch) return hipErrorUnknown;
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
for (int i = 0; i < height; ++i) {
stream->locked_copySync((unsigned char*)dst + i * dpitch,
(unsigned char*)src + i * spitch, width, kind);
}
} catch (ihipException& ex) {
e = ex._code;
}
return e;
}
hipError_t hipMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width,
size_t height, hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, dpitch, src, spitch, width, height, kind);
hipError_t e = hipSuccess;
e = ihipMemcpy2D(dst, dpitch, src, spitch, width, height, kind);
return ihipLogStatus(e);
}
hipError_t hipMemcpyParam2D(const hip_Memcpy2D* pCopy) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), pCopy);
hipError_t e = hipSuccess;
if (pCopy == nullptr) {
e = hipErrorInvalidValue;
}
e = ihipMemcpy2D(pCopy->dstArray->data, pCopy->widthInBytes, pCopy->srcHost, pCopy->srcPitch,
pCopy->widthInBytes, pCopy->height, hipMemcpyDefault);
return ihipLogStatus(e);
}
hipError_t hipMemcpy2DAsync(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width,
size_t height, hipMemcpyKind kind, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, dpitch, src, spitch, width, height, kind, stream);
if (width > dpitch || width > spitch) return ihipLogStatus(hipErrorUnknown);
hipError_t e = hipSuccess;
try {
for (int i = 0; i < height; ++i) {
e = hip_internal::memcpyAsync((unsigned char*)dst + i * dpitch,
(unsigned char*)src + i * spitch, width, kind, stream);
}
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpy2DToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src,
size_t spitch, size_t width, size_t height, hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, wOffset, hOffset, src, spitch, width, height, kind);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
size_t byteSize;
if (dst) {
switch (dst[0].desc.f) {
case hipChannelFormatKindSigned:
byteSize = sizeof(int);
break;
case hipChannelFormatKindUnsigned:
byteSize = sizeof(unsigned int);
break;
case hipChannelFormatKindFloat:
byteSize = sizeof(float);
break;
case hipChannelFormatKindNone:
byteSize = sizeof(size_t);
break;
default:
byteSize = 0;
break;
}
} else {
return ihipLogStatus(hipErrorUnknown);
}
if ((wOffset + width > (dst->width * byteSize)) || width > spitch) {
return ihipLogStatus(hipErrorUnknown);
}
size_t src_w = spitch;
size_t dst_w = (dst->width) * byteSize;
try {
for (int i = 0; i < height; ++i) {
stream->locked_copySync((unsigned char*)dst->data + i * dst_w,
(unsigned char*)src + i * src_w, width, kind);
}
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src,
size_t count, hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, wOffset, hOffset, src, count, kind);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((char*)dst->data + wOffset, src, count, kind);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyFromArray(void* dst, hipArray_const_t srcArray, size_t wOffset, size_t hOffset,
size_t count, hipMemcpyKind kind) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, srcArray, wOffset, hOffset, count, kind);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((char*)dst, (char*)srcArray->data + wOffset, count, kind);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dstArray, dstOffset, srcHost, count);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((char*)dstArray->data + dstOffset, srcHost, count,
hipMemcpyHostToDevice);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, srcArray, srcOffset, count);
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
hipError_t e = hipSuccess;
try {
stream->locked_copySync((char*)dst, (char*)srcArray->data + srcOffset, count,
hipMemcpyDeviceToHost);
} catch (ihipException& ex) {
e = ex._code;
}
return ihipLogStatus(e);
}
hipError_t hipMemcpy3D(const struct hipMemcpy3DParms* p) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), p);
hipError_t e = hipSuccess;
if (p) {
size_t byteSize;
size_t depth;
size_t height;
size_t widthInBytes;
size_t dstWidthInbytes;
size_t srcPitch;
size_t dstPitch;
void* srcPtr;
void* dstPtr;
size_t ySize;
if (p->dstArray != nullptr) {
if (p->dstArray->isDrv == false) {
switch (p->dstArray->desc.f) {
case hipChannelFormatKindSigned:
byteSize = sizeof(int);
break;
case hipChannelFormatKindUnsigned:
byteSize = sizeof(unsigned int);
break;
case hipChannelFormatKindFloat:
byteSize = sizeof(float);
break;
case hipChannelFormatKindNone:
byteSize = sizeof(size_t);
break;
default:
byteSize = 0;
break;
}
depth = p->extent.depth;
height = p->extent.height;
widthInBytes = p->extent.width * byteSize;
srcPitch = p->srcPtr.pitch;
srcPtr = p->srcPtr.ptr;
ySize = p->srcPtr.ysize;
dstWidthInbytes = p->dstArray->width * byteSize;
dstPtr = p->dstArray->data;
} else {
depth = p->Depth;
height = p->Height;
widthInBytes = p->WidthInBytes;
dstWidthInbytes = p->dstArray->width * 4;
srcPitch = p->srcPitch;
srcPtr = (void*)p->srcHost;
ySize = p->srcHeight;
dstPtr = p->dstArray->data;
}
} else {
// Non array destination
depth = p->extent.depth;
height = p->extent.height;
widthInBytes = p->extent.width;
srcPitch = p->srcPtr.pitch;
srcPtr = p->srcPtr.ptr;
dstPtr = p->dstPtr.ptr;
ySize = p->srcPtr.ysize;
dstWidthInbytes = p->dstPtr.pitch;
}
hipStream_t stream = ihipSyncAndResolveStream(hipStreamNull);
hc::completion_future marker;
try {
for (int i = 0; i < depth; i++) {
for (int j = 0; j < height; j++) {
// TODO: p->srcPos or p->dstPos are not 0.
unsigned char* src =
(unsigned char*)srcPtr + i * ySize * srcPitch + j * srcPitch;
unsigned char* dst =
(unsigned char*)dstPtr + i * height * dstWidthInbytes + j * dstWidthInbytes;
stream->locked_copySync(dst, src, widthInBytes, p->kind);
}
}
} catch (ihipException ex) {
e = ex._code;
}
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
namespace {
template <uint32_t block_dim, typename RandomAccessIterator, typename N, typename T>
__global__ void hip_fill_n(RandomAccessIterator f, N n, T value) {
const uint32_t grid_dim = gridDim.x * blockDim.x;
size_t idx = blockIdx.x * block_dim + threadIdx.x;
while (idx < n) {
__builtin_memcpy(reinterpret_cast<void*>(&f[idx]), reinterpret_cast<const void*>(&value),
sizeof(T));
idx += grid_dim;
}
}
template <typename T, typename std::enable_if<std::is_integral<T>{}>::type* = nullptr>
inline const T& clamp_integer(const T& x, const T& lower, const T& upper) {
assert(!(upper < lower));
return std::min(upper, std::max(x, lower));
}
} // namespace
template <typename T>
void ihipMemsetKernel(hipStream_t stream, T* ptr, T val, size_t sizeBytes) {
static constexpr uint32_t block_dim = 256;
const uint32_t grid_dim = clamp_integer<size_t>(sizeBytes / block_dim, 1, UINT32_MAX);
hipLaunchKernelGGL(hip_fill_n<block_dim>, dim3(grid_dim), dim3{block_dim}, 0u, stream, ptr,
sizeBytes, std::move(val));
}
typedef enum ihipMemsetDataType {
ihipMemsetDataTypeChar = 0,
ihipMemsetDataTypeShort = 1,
ihipMemsetDataTypeInt = 2
}ihipMemsetDataType;
hipError_t ihipMemset(void* dst, int value, size_t sizeBytes, hipStream_t stream, enum ihipMemsetDataType copyDataType )
{
hipError_t e = hipSuccess;
if (stream && (dst != NULL)) {
if(copyDataType == ihipMemsetDataTypeChar){
if ((sizeBytes & 0x3) == 0) {
// use a faster dword-per-workitem copy:
try {
value = value & 0xff;
uint32_t value32 = (value << 24) | (value << 16) | (value << 8) | (value) ;
ihipMemsetKernel<uint32_t> (stream, static_cast<uint32_t*> (dst), value32, sizeBytes/sizeof(uint32_t));
}
catch (std::exception &ex) {
e = hipErrorInvalidValue;
}
} else {
// use a slow byte-per-workitem copy:
try {
ihipMemsetKernel<char> (stream, static_cast<char*> (dst), value, sizeBytes);
}
catch (std::exception &ex) {
e = hipErrorInvalidValue;
}
}
} else {
if(copyDataType == ihipMemsetDataTypeInt) { // 4 Bytes value
try {
ihipMemsetKernel<uint32_t> (stream, static_cast<uint32_t*> (dst), value, sizeBytes);
} catch (std::exception &ex) {
e = hipErrorInvalidValue;
}
} else if(copyDataType == ihipMemsetDataTypeShort) {
try {
value = value & 0xffff;
ihipMemsetKernel<uint16_t> (stream, static_cast<uint16_t*> (dst), value, sizeBytes);
} catch (std::exception &ex) {
e = hipErrorInvalidValue;
}
}
}
if (HIP_API_BLOCKING) {
tprintf (DB_SYNC, "%s LAUNCH_BLOCKING wait for hipMemsetAsync.\n", ToString(stream).c_str());
stream->locked_wait();
}
} else {
e = hipErrorInvalidValue;
}
return e;
};
// TODO-sync: function is async unless target is pinned host memory - then these are fully sync.
hipError_t hipMemsetAsync(void* dst, int value, size_t sizeBytes, hipStream_t stream) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, value, sizeBytes, stream);
hipError_t e = hipSuccess;
stream = ihipSyncAndResolveStream(stream);
e = ihipMemset(dst, value, sizeBytes, stream, ihipMemsetDataTypeChar);
return ihipLogStatus(e);
};
hipError_t hipMemset(void* dst, int value, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, value, sizeBytes);
hipError_t e = hipSuccess;
hipStream_t stream = hipStreamNull;
stream = ihipSyncAndResolveStream(stream);
if (stream) {
e = ihipMemset(dst, value, sizeBytes, stream, ihipMemsetDataTypeChar);
stream->locked_wait();
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
hipError_t hipMemset2D(void* dst, size_t pitch, int value, size_t width, size_t height) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, pitch, value, width, height);
hipError_t e = hipSuccess;
hipStream_t stream = hipStreamNull;
stream = ihipSyncAndResolveStream(stream);
if (stream) {
size_t sizeBytes = pitch * height;
e = ihipMemset(dst, value, sizeBytes, stream, ihipMemsetDataTypeChar);
stream->locked_wait();
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
hipError_t hipMemset2DAsync(void* dst, size_t pitch, int value, size_t width, size_t height, hipStream_t stream )
{
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, pitch, value, width, height, stream);
hipError_t e = hipSuccess;
stream = ihipSyncAndResolveStream(stream);
if (stream) {
size_t sizeBytes = pitch * height;
e = ihipMemset(dst, value, sizeBytes, stream, ihipMemsetDataTypeChar);
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
};
hipError_t hipMemsetD8(hipDeviceptr_t dst, unsigned char value, size_t sizeBytes) {
HIP_INIT_SPECIAL_API((TRACE_MCMD), dst, value, sizeBytes);
hipError_t e = hipSuccess;
hipStream_t stream = hipStreamNull;
stream = ihipSyncAndResolveStream(stream);
if (stream) {
e = ihipMemset(dst, value, sizeBytes, stream, ihipMemsetDataTypeChar);
stream->locked_wait();
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
hipError_t hipMemGetInfo(size_t* free, size_t* total) {
HIP_INIT_API(free, total);
hipError_t e = hipSuccess;
ihipCtx_t* ctx = ihipGetTlsDefaultCtx();
if (ctx) {
auto device = ctx->getWriteableDevice();
if (total) {
*total = device->_props.totalGlobalMem;
} else {
e = hipErrorInvalidValue;
}
if (free) {
// TODO - replace with kernel-level for reporting free memory:
size_t deviceMemSize, hostMemSize, userMemSize;
hc::am_memtracker_sizeinfo(device->_acc, &deviceMemSize, &hostMemSize, &userMemSize);
*free = device->_props.totalGlobalMem - deviceMemSize;
// Deduct the amount of memory from the free memory reported from the system
if (HIP_HIDDEN_FREE_MEM) *free -= (size_t)HIP_HIDDEN_FREE_MEM * 1024 * 1024;
} else {
e = hipErrorInvalidValue;
}
} else {
e = hipErrorInvalidDevice;
}
return ihipLogStatus(e);
}
hipError_t hipMemPtrGetInfo(void* ptr, size_t* size) {
HIP_INIT_API(ptr, size);
hipError_t e = hipSuccess;
if (ptr != nullptr && size != nullptr) {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, ptr);
if (status == AM_SUCCESS) {
*size = amPointerInfo._sizeBytes;
} else {
e = hipErrorInvalidValue;
}
} else {
e = hipErrorInvalidValue;
}
return ihipLogStatus(e);
}
hipError_t hipFree(void* ptr) {
HIP_INIT_SPECIAL_API((TRACE_MEM), ptr);
hipError_t hipStatus = hipErrorInvalidDevicePointer;
// Synchronize to ensure all work has finished.
ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits
// for all activity to finish.
if (ptr) {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, ptr);
if (status == AM_SUCCESS) {
if (amPointerInfo._hostPointer == NULL) {
hc::am_free(ptr);
hipStatus = hipSuccess;
}
}
} else {
// free NULL pointer succeeds and is common technique to initialize runtime
hipStatus = hipSuccess;
}
return ihipLogStatus(hipStatus);
}
hipError_t hipHostFree(void* ptr) {
HIP_INIT_SPECIAL_API((TRACE_MEM), ptr);
// Synchronize to ensure all work has finished.
ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits
// for all activity to finish.
hipError_t hipStatus = hipErrorInvalidValue;
if (ptr) {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, ptr);
if (status == AM_SUCCESS) {
if (amPointerInfo._hostPointer == ptr) {
hc::am_free(ptr);
hipStatus = hipSuccess;
}
}
} else {
// free NULL pointer succeeds and is common technique to initialize runtime
hipStatus = hipSuccess;
}
return ihipLogStatus(hipStatus);
};
// Deprecated:
hipError_t hipFreeHost(void* ptr) { return hipHostFree(ptr); }
hipError_t hipFreeArray(hipArray* array) {
HIP_INIT_SPECIAL_API((TRACE_MEM), array);
hipError_t hipStatus = hipErrorInvalidDevicePointer;
// Synchronize to ensure all work has finished.
ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits
// for all activity to finish.
if (array->data) {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, array->data);
if (status == AM_SUCCESS) {
if (amPointerInfo._hostPointer == NULL) {
hc::am_free(array->data);
hipStatus = hipSuccess;
}
}
}
return ihipLogStatus(hipStatus);
}
hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr) {
HIP_INIT_API(pbase, psize, dptr);
hipError_t hipStatus = hipSuccess;
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, dptr);
if (status == AM_SUCCESS) {
*pbase = amPointerInfo._devicePointer;
*psize = amPointerInfo._sizeBytes;
} else
hipStatus = hipErrorInvalidDevicePointer;
return ihipLogStatus(hipStatus);
}
// TODO: IPC implementaiton:
hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr) {
HIP_INIT_API(handle, devPtr);
hipError_t hipStatus = hipSuccess;
// Get the size of allocated pointer
size_t psize = 0u;
hc::accelerator acc;
if ((handle == NULL) || (devPtr == NULL)) {
hipStatus = hipErrorInvalidResourceHandle;
} else {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo amPointerInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo amPointerInfo(NULL, NULL, 0, acc, 0, 0);
#endif
am_status_t status = hc::am_memtracker_getinfo(&amPointerInfo, devPtr);
if (status == AM_SUCCESS) {
psize = (size_t)amPointerInfo._sizeBytes;
} else {
hipStatus = hipErrorInvalidResourceHandle;
}
ihipIpcMemHandle_t* iHandle = (ihipIpcMemHandle_t*)handle;
// Save the size of the pointer to hipIpcMemHandle
iHandle->psize = psize;
#if USE_IPC
// Create HSA ipc memory
hsa_status_t hsa_status =
hsa_amd_ipc_memory_create(devPtr, psize, (hsa_amd_ipc_memory_t*)&(iHandle->ipc_handle));
if (hsa_status != HSA_STATUS_SUCCESS) hipStatus = hipErrorMemoryAllocation;
#else
hipStatus = hipErrorRuntimeOther;
#endif
}
return ihipLogStatus(hipStatus);
}
hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags) {
HIP_INIT_API(devPtr, &handle, flags);
hipError_t hipStatus = hipSuccess;
if (devPtr == NULL) {
hipStatus = hipErrorInvalidValue;
} else {
#if USE_IPC
// Get the current device agent.
hc::accelerator acc;
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
if (!agent) return hipErrorInvalidResourceHandle;
ihipIpcMemHandle_t* iHandle = (ihipIpcMemHandle_t*)&handle;
// Attach ipc memory
auto ctx = ihipGetTlsDefaultCtx();
{
LockedAccessor_CtxCrit_t crit(ctx->criticalData());
// the peerCnt always stores self so make sure the trace actually
hsa_status_t hsa_status = hsa_amd_ipc_memory_attach(
(hsa_amd_ipc_memory_t*)&(iHandle->ipc_handle), iHandle->psize, crit->peerCnt(),
crit->peerAgents(), devPtr);
if (hsa_status != HSA_STATUS_SUCCESS) hipStatus = hipErrorMapBufferObjectFailed;
}
#else
hipStatus = hipErrorRuntimeOther;
#endif
}
return ihipLogStatus(hipStatus);
}
hipError_t hipIpcCloseMemHandle(void* devPtr) {
HIP_INIT_API(devPtr);
hipError_t hipStatus = hipSuccess;
if (devPtr == NULL) {
hipStatus = hipErrorInvalidValue;
} else {
#if USE_IPC
hsa_status_t hsa_status = hsa_amd_ipc_memory_detach(devPtr);
if (hsa_status != HSA_STATUS_SUCCESS) return hipErrorInvalidResourceHandle;
#else
hipStatus = hipErrorRuntimeOther;
#endif
}
return ihipLogStatus(hipStatus);
}
// hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle){
// return hipSuccess;
// }
| 1 | 6,609 | HIP runtime is compiled using a C++ compiler. So comparison to both NULL as well as 0 does not make sense. Just comparing to NULL is sufficient. | ROCm-Developer-Tools-HIP | cpp |
@@ -0,0 +1,10 @@
+<%= t("mailer.welcome_mailer.welcome_notification.header") %>
+
+<%= t("mailer.welcome_mailer.welcome_notification.para1") %>
+
+<%= t("mailer.welcome_mailer.welcome_notification.para2", help_url: help_url('') ) %>
+
+<%= t("mailer.welcome_mailer.welcome_notification.para3", feedback_url: feedback_url ) %>
+
+<%= t("mailer.welcome_mailer.welcome_notification.signature") %>
+ | 1 | 1 | 16,624 | these urls are still in `a` tags so we should probably include these links separately for a non-HTML version | 18F-C2 | rb |
|
@@ -507,8 +507,18 @@ func (s *Server) configureAccounts() error {
if opts.SystemAccount != _EMPTY_ {
// Lock may be acquired in lookupAccount, so release to call lookupAccount.
s.mu.Unlock()
- _, err := s.lookupAccount(opts.SystemAccount)
+ acc, err := s.lookupAccount(opts.SystemAccount)
s.mu.Lock()
+ if err == nil && s.sys != nil && acc != s.sys.account {
+ // sys.account.clients (including internal client)/respmap/etc... are transferred separately
+ s.sys.account = acc
+ s.mu.Unlock()
+ // acquires server lock separately
+ s.addSystemAccountExports(acc)
+ // can't hold the lock as go routine reading it may be waiting for lock as well
+ s.sys.resetCh <- struct{}{}
+ s.mu.Lock()
+ }
if err != nil {
return fmt.Errorf("error resolving system account: %v", err)
} | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
// Allow dynamic profiling.
_ "net/http/pprof"
"github.com/nats-io/jwt"
"github.com/nats-io/nats-server/v2/logger"
"github.com/nats-io/nkeys"
)
const (
// Time to wait before starting closing clients when in LD mode.
lameDuckModeDefaultInitialDelay = int64(10 * time.Second)
// Interval for the first PING for non client connections.
firstPingInterval = time.Second
// This is for the first ping for client connections.
firstClientPingInterval = 2 * time.Second
)
// Make this a variable so that we can change during tests
var lameDuckModeInitialDelay = int64(lameDuckModeDefaultInitialDelay)
// Info is the information sent to clients, routes, gateways, and leaf nodes,
// to help them understand information about this server.
type Info struct {
ID string `json:"server_id"`
Name string `json:"server_name"`
Version string `json:"version"`
Proto int `json:"proto"`
GitCommit string `json:"git_commit,omitempty"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required,omitempty"`
TLSRequired bool `json:"tls_required,omitempty"`
TLSVerify bool `json:"tls_verify,omitempty"`
MaxPayload int32 `json:"max_payload"`
IP string `json:"ip,omitempty"`
CID uint64 `json:"client_id,omitempty"`
ClientIP string `json:"client_ip,omitempty"`
Nonce string `json:"nonce,omitempty"`
Cluster string `json:"cluster,omitempty"`
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
// Route Specific
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
// Gateways Specific
Gateway string `json:"gateway,omitempty"` // Name of the origin Gateway (sent by gateway's INFO)
GatewayURLs []string `json:"gateway_urls,omitempty"` // Gateway URLs in the originating cluster (sent by gateway's INFO)
GatewayURL string `json:"gateway_url,omitempty"` // Gateway URL on that server (sent by route's INFO)
GatewayCmd byte `json:"gateway_cmd,omitempty"` // Command code for the receiving server to know what to do
GatewayCmdPayload []byte `json:"gateway_cmd_payload,omitempty"` // Command payload when needed
GatewayNRP bool `json:"gateway_nrp,omitempty"` // Uses new $GNR. prefix for mapped replies
// LeafNode Specific
LeafNodeURLs []string `json:"leafnode_urls,omitempty"` // LeafNode URLs that the server can reconnect to.
}
// Server is our main struct.
type Server struct {
gcid uint64
stats
mu sync.Mutex
kp nkeys.KeyPair
prand *rand.Rand
info Info
configFile string
optsMu sync.RWMutex
opts *Options
running bool
shutdown bool
listener net.Listener
gacc *Account
sys *internal
accounts sync.Map
tmpAccounts sync.Map // Temporarily stores accounts that are being built
activeAccounts int32
accResolver AccountResolver
clients map[uint64]*client
routes map[uint64]*client
routesByHash sync.Map
hash []byte
remotes map[string]*client
leafs map[uint64]*client
users map[string]*User
nkeys map[string]*NkeyUser
totalClients uint64
closed *closedRingBuffer
done chan bool
start time.Time
http net.Listener
httpHandler http.Handler
profiler net.Listener
httpReqStats map[string]uint64
routeListener net.Listener
routeInfo Info
routeInfoJSON []byte
leafNodeListener net.Listener
leafNodeInfo Info
leafNodeInfoJSON []byte
leafNodeOpts struct {
resolver netResolver
dialTimeout time.Duration
}
quitCh chan struct{}
shutdownComplete chan struct{}
// Tracking Go routines
grMu sync.Mutex
grTmpClients map[uint64]*client
grRunning bool
grWG sync.WaitGroup // to wait on various go routines
cproto int64 // number of clients supporting async INFO
configTime time.Time // last time config was loaded
logging struct {
sync.RWMutex
logger Logger
trace int32
debug int32
traceSysAcc int32
}
clientConnectURLs []string
// Used internally for quick look-ups.
clientConnectURLsMap map[string]struct{}
lastCURLsUpdate int64
// For Gateways
gatewayListener net.Listener // Accept listener
gateway *srvGateway
// Used by tests to check that http.Servers do
// not set any timeout.
monitoringServer *http.Server
profilingServer *http.Server
// LameDuck mode
ldm bool
ldmCh chan bool
// Trusted public operator keys.
trustedKeys []string
// We use this to minimize mem copies for request to monitoring
// endpoint /varz (when it comes from http).
varzMu sync.Mutex
varz *Varz
// This is set during a config reload if we detect that we have
// added/removed routes. The monitoring code then check that
// to know if it should update the cluster's URLs array.
varzUpdateRouteURLs bool
// Keeps a sublist of of subscriptions attached to leafnode connections
// for the $GNR.*.*.*.> subject so that a server can send back a mapped
// gateway reply.
gwLeafSubs *Sublist
// Used for expiration of mapped GW replies
gwrm struct {
w int32
ch chan time.Duration
m sync.Map
}
}
// Make sure all are 64bits for atomic use
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
slowConsumers int64
}
// New will setup a new server struct after parsing the options.
// DEPRECATED: Use NewServer(opts)
func New(opts *Options) *Server {
s, _ := NewServer(opts)
return s
}
// NewServer will setup a new server struct after parsing the options.
// Could return an error if options can not be validated.
func NewServer(opts *Options) (*Server, error) {
setBaselineOptions(opts)
// Process TLS options, including whether we require client certificates.
tlsReq := opts.TLSConfig != nil
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
// Created server's nkey identity.
kp, _ := nkeys.CreateServer()
pub, _ := kp.PublicKey()
serverName := pub
if opts.ServerName != "" {
serverName = opts.ServerName
}
// Validate some options. This is here because we cannot assume that
// server will always be started with configuration parsing (that could
// report issues). Its options can be (incorrectly) set by hand when
// server is embedded. If there is an error, return nil.
if err := validateOptions(opts); err != nil {
return nil, err
}
info := Info{
ID: pub,
Version: VERSION,
Proto: PROTO,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
Name: serverName,
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: verify,
MaxPayload: opts.MaxPayload,
}
now := time.Now()
s := &Server{
kp: kp,
configFile: opts.ConfigFile,
info: info,
prand: rand.New(rand.NewSource(time.Now().UnixNano())),
opts: opts,
done: make(chan bool, 1),
start: now,
configTime: now,
gwLeafSubs: NewSublistWithCache(),
}
// Trusted root operator keys.
if !s.processTrustedKeys() {
return nil, fmt.Errorf("Error processing trusted operator keys")
}
s.mu.Lock()
defer s.mu.Unlock()
// Ensure that non-exported options (used in tests) are properly set.
s.setLeafNodeNonExportedOptions()
// Used internally for quick look-ups.
s.clientConnectURLsMap = make(map[string]struct{})
// Call this even if there is no gateway defined. It will
// initialize the structure so we don't have to check for
// it to be nil or not in various places in the code.
if err := s.newGateway(opts); err != nil {
return nil, err
}
if s.gateway.enabled {
s.info.Cluster = s.getGatewayName()
}
// This is normally done in the AcceptLoop, once the
// listener has been created (possibly with random port),
// but since some tests may expect the INFO to be properly
// set after New(), let's do it now.
s.setInfoHostPortAndGenerateJSON()
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking closed clients.
s.closed = newClosedRingBuffer(opts.MaxClosedClients)
// For tracking connections that are not yet registered
// in s.routes, but for which readLoop has started.
s.grTmpClients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// For tracking leaf nodes.
s.leafs = make(map[uint64]*client)
// Used to kick out all go routines possibly waiting on server
// to shutdown.
s.quitCh = make(chan struct{})
// Closed when Shutdown() is complete. Allows WaitForShutdown() to block
// waiting for complete shutdown.
s.shutdownComplete = make(chan struct{})
// For tracking accounts
if err := s.configureAccounts(); err != nil {
return nil, err
}
// If there is an URL account resolver, do basic test to see if anyone is home.
// Do this after configureAccounts() which calls configureResolver(), which will
// set TLSConfig if specified.
if ar := opts.AccountResolver; ar != nil {
if ur, ok := ar.(*URLAccResolver); ok {
if _, err := ur.Fetch(""); err != nil {
return nil, err
}
}
}
// In local config mode, check that leafnode configuration
// refers to account that exist.
if len(opts.TrustedOperators) == 0 {
checkAccountExists := func(accName string) error {
if accName == _EMPTY_ {
return nil
}
if _, ok := s.accounts.Load(accName); !ok {
return fmt.Errorf("cannot find account %q specified in leafnode authorization", accName)
}
return nil
}
if err := checkAccountExists(opts.LeafNode.Account); err != nil {
return nil, err
}
for _, lu := range opts.LeafNode.Users {
if lu.Account == nil {
continue
}
if err := checkAccountExists(lu.Account.Name); err != nil {
return nil, err
}
}
for _, r := range opts.LeafNode.Remotes {
if r.LocalAccount == _EMPTY_ {
continue
}
if _, ok := s.accounts.Load(r.LocalAccount); !ok {
return nil, fmt.Errorf("no local account %q for remote leafnode", r.LocalAccount)
}
}
}
// Used to setup Authorization.
s.configureAuthorization()
// Start signal handler
s.handleSignals()
return s, nil
}
// ClientURL returns the URL used to connect clients. Helpful in testing
// when we designate a random client port (-1).
func (s *Server) ClientURL() string {
// FIXME(dlc) - should we add in user and pass if defined single?
opts := s.getOpts()
scheme := "nats://"
if opts.TLSConfig != nil {
scheme = "tls://"
}
return fmt.Sprintf("%s%s:%d", scheme, opts.Host, opts.Port)
}
func validateOptions(o *Options) error {
// Check that the trust configuration is correct.
if err := validateTrustedOperators(o); err != nil {
return err
}
// Check on leaf nodes which will require a system
// account when gateways are also configured.
if err := validateLeafNode(o); err != nil {
return err
}
// Check that authentication is properly configured.
if err := validateAuth(o); err != nil {
return err
}
// Check that gateway is properly configured. Returns no error
// if there is no gateway defined.
return validateGatewayOptions(o)
}
func (s *Server) getOpts() *Options {
s.optsMu.RLock()
opts := s.opts
s.optsMu.RUnlock()
return opts
}
func (s *Server) setOpts(opts *Options) {
s.optsMu.Lock()
s.opts = opts
s.optsMu.Unlock()
}
func (s *Server) globalAccount() *Account {
s.mu.Lock()
gacc := s.gacc
s.mu.Unlock()
return gacc
}
// Used to setup Accounts.
// Lock is held upon entry.
func (s *Server) configureAccounts() error {
// Create global account.
if s.gacc == nil {
s.gacc = NewAccount(globalAccountName)
s.registerAccountNoLock(s.gacc)
}
opts := s.opts
// Check opts and walk through them. We need to copy them here
// so that we do not keep a real one sitting in the options.
for _, acc := range s.opts.Accounts {
a := acc.shallowCopy()
acc.sl = nil
acc.clients = nil
s.registerAccountNoLock(a)
}
// Now that we have this we need to remap any referenced accounts in
// import or export maps to the new ones.
swapApproved := func(ea *exportAuth) {
for sub, a := range ea.approved {
var acc *Account
if v, ok := s.accounts.Load(a.Name); ok {
acc = v.(*Account)
}
ea.approved[sub] = acc
}
}
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Exports
for _, ea := range acc.exports.streams {
if ea != nil {
swapApproved(&ea.exportAuth)
}
}
for _, ea := range acc.exports.services {
if ea != nil {
swapApproved(&ea.exportAuth)
}
}
// Imports
for _, si := range acc.imports.streams {
if v, ok := s.accounts.Load(si.acc.Name); ok {
si.acc = v.(*Account)
}
}
for _, si := range acc.imports.services {
if v, ok := s.accounts.Load(si.acc.Name); ok {
si.acc = v.(*Account)
}
}
return true
})
// Check for configured account resolvers.
if err := s.configureResolver(); err != nil {
return err
}
// Set the system account if it was configured.
if opts.SystemAccount != _EMPTY_ {
// Lock may be acquired in lookupAccount, so release to call lookupAccount.
s.mu.Unlock()
_, err := s.lookupAccount(opts.SystemAccount)
s.mu.Lock()
if err != nil {
return fmt.Errorf("error resolving system account: %v", err)
}
}
return nil
}
// Setup the account resolver. For memory resolver, make sure the JWTs are
// properly formed but do not enforce expiration etc.
func (s *Server) configureResolver() error {
opts := s.getOpts()
s.accResolver = opts.AccountResolver
if opts.AccountResolver != nil {
// For URL resolver, set the TLSConfig if specified.
if opts.AccountResolverTLSConfig != nil {
if ar, ok := opts.AccountResolver.(*URLAccResolver); ok {
if t, ok := ar.c.Transport.(*http.Transport); ok {
t.CloseIdleConnections()
t.TLSClientConfig = opts.AccountResolverTLSConfig.Clone()
}
}
}
if len(opts.resolverPreloads) > 0 {
if _, ok := s.accResolver.(*MemAccResolver); !ok {
return fmt.Errorf("resolver preloads only available for resolver type MEM")
}
for k, v := range opts.resolverPreloads {
_, err := jwt.DecodeAccountClaims(v)
if err != nil {
return fmt.Errorf("preload account error for %q: %v", k, err)
}
s.accResolver.Store(k, v)
}
}
}
return nil
}
// This will check preloads for validation issues.
func (s *Server) checkResolvePreloads() {
opts := s.getOpts()
// We can just check the read-only opts versions here, that way we do not need
// to grab server lock or access s.accResolver.
for k, v := range opts.resolverPreloads {
claims, err := jwt.DecodeAccountClaims(v)
if err != nil {
s.Errorf("Preloaded account [%s] not valid", k)
}
// Check if it is expired.
vr := jwt.CreateValidationResults()
claims.Validate(vr)
if vr.IsBlocking(true) {
s.Warnf("Account [%s] has validation issues:", k)
for _, v := range vr.Issues {
s.Warnf(" - %s", v.Description)
}
}
}
}
func (s *Server) generateRouteInfoJSON() {
// New proto wants a nonce.
var raw [nonceLen]byte
nonce := raw[:]
s.generateNonce(nonce)
s.routeInfo.Nonce = string(nonce)
b, _ := json.Marshal(s.routeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.routeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// isTrustedIssuer will check that the issuer is a trusted public key.
// This is used to make sure an account was signed by a trusted operator.
func (s *Server) isTrustedIssuer(issuer string) bool {
s.mu.Lock()
defer s.mu.Unlock()
// If we are not running in trusted mode and there is no issuer, that is ok.
if len(s.trustedKeys) == 0 && issuer == "" {
return true
}
for _, tk := range s.trustedKeys {
if tk == issuer {
return true
}
}
return false
}
// processTrustedKeys will process binary stamped and
// options-based trusted nkeys. Returns success.
func (s *Server) processTrustedKeys() bool {
if trustedKeys != "" && !s.initStampedTrustedKeys() {
return false
} else if s.opts.TrustedKeys != nil {
for _, key := range s.opts.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
return false
}
}
s.trustedKeys = s.opts.TrustedKeys
}
return true
}
// checkTrustedKeyString will check that the string is a valid array
// of public operator nkeys.
func checkTrustedKeyString(keys string) []string {
tks := strings.Fields(keys)
if len(tks) == 0 {
return nil
}
// Walk all the keys and make sure they are valid.
for _, key := range tks {
if !nkeys.IsValidPublicOperatorKey(key) {
return nil
}
}
return tks
}
// initStampedTrustedKeys will check the stamped trusted keys
// and will set the server field 'trustedKeys'. Returns whether
// it succeeded or not.
func (s *Server) initStampedTrustedKeys() bool {
// Check to see if we have an override in options, which will cause us to fail.
if len(s.opts.TrustedKeys) > 0 {
return false
}
tks := checkTrustedKeyString(trustedKeys)
if len(tks) == 0 {
return false
}
s.trustedKeys = tks
return true
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintln(os.Stderr, msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("nats-server: v%s\n", VERSION)
os.Exit(0)
}
// ProcessCommandLineArgs takes the command line arguments
// validating and setting flags for handling in case any
// sub command was present.
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
if len(cmd.Args()) > 0 {
arg := cmd.Args()[0]
switch strings.ToLower(arg) {
case "version":
return true, false, nil
case "help":
return false, true, nil
default:
return false, false, fmt.Errorf("unrecognized command: %q", arg)
}
}
return false, false, nil
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
running := s.running
s.mu.Unlock()
return running
}
func (s *Server) logPid() error {
pidStr := strconv.Itoa(os.Getpid())
return ioutil.WriteFile(s.getOpts().PidFile, []byte(pidStr), 0660)
}
// NewAccountsAllowed returns whether or not new accounts can be created on the fly.
func (s *Server) NewAccountsAllowed() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.opts.AllowNewAccounts
}
// numReservedAccounts will return the number of reserved accounts configured in the server.
// Currently this is 1 for the global default service.
func (s *Server) numReservedAccounts() int {
return 1
}
// NumActiveAccounts reports number of active accounts on this server.
func (s *Server) NumActiveAccounts() int32 {
return atomic.LoadInt32(&s.activeAccounts)
}
// incActiveAccounts() just adds one under lock.
func (s *Server) incActiveAccounts() {
atomic.AddInt32(&s.activeAccounts, 1)
}
// decActiveAccounts() just subtracts one under lock.
func (s *Server) decActiveAccounts() {
atomic.AddInt32(&s.activeAccounts, -1)
}
// This should be used for testing only. Will be slow since we have to
// range over all accounts in the sync.Map to count.
func (s *Server) numAccounts() int {
count := 0
s.mu.Lock()
s.accounts.Range(func(k, v interface{}) bool {
count++
return true
})
s.mu.Unlock()
return count
}
// NumLoadedAccounts returns the number of loaded accounts.
func (s *Server) NumLoadedAccounts() int {
return s.numAccounts()
}
// LookupOrRegisterAccount will return the given account if known or create a new entry.
func (s *Server) LookupOrRegisterAccount(name string) (account *Account, isNew bool) {
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.accounts.Load(name); ok {
return v.(*Account), false
}
acc := NewAccount(name)
s.registerAccountNoLock(acc)
return acc, true
}
// RegisterAccount will register an account. The account must be new
// or this call will fail.
func (s *Server) RegisterAccount(name string) (*Account, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.accounts.Load(name); ok {
return nil, ErrAccountExists
}
acc := NewAccount(name)
s.registerAccountNoLock(acc)
return acc, nil
}
// SetSystemAccount will set the internal system account.
// If root operators are present it will also check validity.
func (s *Server) SetSystemAccount(accName string) error {
// Lookup from sync.Map first.
if v, ok := s.accounts.Load(accName); ok {
return s.setSystemAccount(v.(*Account))
}
// If we are here we do not have local knowledge of this account.
// Do this one by hand to return more useful error.
ac, jwt, err := s.fetchAccountClaims(accName)
if err != nil {
return err
}
acc := s.buildInternalAccount(ac)
acc.claimJWT = jwt
// Due to race, we need to make sure that we are not
// registering twice.
if racc := s.registerAccount(acc); racc != nil {
return nil
}
return s.setSystemAccount(acc)
}
// SystemAccount returns the system account if set.
func (s *Server) SystemAccount() *Account {
s.mu.Lock()
defer s.mu.Unlock()
if s.sys != nil {
return s.sys.account
}
return nil
}
// For internal sends.
const internalSendQLen = 4096
// Assign a system account. Should only be called once.
// This sets up a server to send and receive messages from
// inside the server itself.
func (s *Server) setSystemAccount(acc *Account) error {
if acc == nil {
return ErrMissingAccount
}
// Don't try to fix this here.
if acc.IsExpired() {
return ErrAccountExpired
}
// If we are running with trusted keys for an operator
// make sure we check the account is legit.
if !s.isTrustedIssuer(acc.Issuer) {
return ErrAccountValidation
}
s.mu.Lock()
if s.sys != nil {
s.mu.Unlock()
return ErrAccountExists
}
// This is here in an attempt to quiet the race detector and not have to place
// locks on fast path for inbound messages and checking service imports.
acc.mu.Lock()
if acc.imports.services == nil {
acc.imports.services = make(map[string]*serviceImport)
}
acc.mu.Unlock()
now := time.Now()
s.sys = &internal{
account: acc,
client: &client{srv: s, kind: SYSTEM, opts: internalOpts, msubs: -1, mpay: -1, start: now, last: now},
seq: 1,
sid: 1,
servers: make(map[string]*serverUpdate),
subs: make(map[string]msgHandler),
replies: make(map[string]msgHandler),
sendq: make(chan *pubMsg, internalSendQLen),
statsz: eventsHBInterval,
orphMax: 5 * eventsHBInterval,
chkOrph: 3 * eventsHBInterval,
}
s.sys.client.initClient()
s.sys.client.echo = false
s.sys.wg.Add(1)
s.mu.Unlock()
// Register with the account.
s.sys.client.registerWithAccount(acc)
// Start our internal loop to serialize outbound messages.
// We do our own wg here since we will stop first during shutdown.
go s.internalSendLoop(&s.sys.wg)
// Start up our general subscriptions
s.initEventTracking()
// Track for dead remote servers.
s.wrapChk(s.startRemoteServerSweepTimer)()
// Send out statsz updates periodically.
s.wrapChk(s.startStatszTimer)()
// If we have existing accounts make sure we enable account tracking.
s.mu.Lock()
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
s.enableAccountTracking(acc)
return true
})
s.mu.Unlock()
return nil
}
func (s *Server) systemAccount() *Account {
var sacc *Account
s.mu.Lock()
if s.sys != nil {
sacc = s.sys.account
}
s.mu.Unlock()
return sacc
}
// Determine if accounts should track subscriptions for
// efficient propagation.
// Lock should be held on entry.
func (s *Server) shouldTrackSubscriptions() bool {
return (s.opts.Cluster.Port != 0 || s.opts.Gateway.Port != 0)
}
// Invokes registerAccountNoLock under the protection of the server lock.
// That is, server lock is acquired/released in this function.
// See registerAccountNoLock for comment on returned value.
func (s *Server) registerAccount(acc *Account) *Account {
s.mu.Lock()
racc := s.registerAccountNoLock(acc)
s.mu.Unlock()
return racc
}
// Helper to set the sublist based on preferences.
func (s *Server) setAccountSublist(acc *Account) {
if acc != nil && acc.sl == nil {
opts := s.getOpts()
if opts != nil && opts.NoSublistCache {
acc.sl = NewSublistNoCache()
} else {
acc.sl = NewSublistWithCache()
}
}
}
// Registers an account in the server.
// Due to some locking considerations, we may end-up trying
// to register the same account twice. This function will
// then return the already registered account.
// Lock should be held on entry.
func (s *Server) registerAccountNoLock(acc *Account) *Account {
// We are under the server lock. Lookup from map, if present
// return existing account.
if a, _ := s.accounts.Load(acc.Name); a != nil {
s.tmpAccounts.Delete(acc.Name)
return a.(*Account)
}
// Finish account setup and store.
s.setAccountSublist(acc)
if acc.maxnae == 0 {
acc.maxnae = DEFAULT_MAX_ACCOUNT_AE_RESPONSE_MAPS
}
if acc.maxaettl == 0 {
acc.maxaettl = DEFAULT_TTL_AE_RESPONSE_MAP
}
if acc.maxnrm == 0 {
acc.maxnrm = DEFAULT_MAX_ACCOUNT_INTERNAL_RESPONSE_MAPS
}
if acc.clients == nil {
acc.clients = make(map[*client]*client)
}
// If we are capable of routing we will track subscription
// information for efficient interest propagation.
// During config reload, it is possible that account was
// already created (global account), so use locking and
// make sure we create only if needed.
acc.mu.Lock()
// TODO(dlc)- Double check that we need this for GWs.
if acc.rm == nil && s.opts != nil && s.shouldTrackSubscriptions() {
acc.rm = make(map[string]int32)
acc.lqws = make(map[string]int32)
}
acc.srv = s
acc.mu.Unlock()
s.accounts.Store(acc.Name, acc)
s.tmpAccounts.Delete(acc.Name)
s.enableAccountTracking(acc)
return nil
}
// lookupAccount is a function to return the account structure
// associated with an account name.
// Lock MUST NOT be held upon entry.
func (s *Server) lookupAccount(name string) (*Account, error) {
var acc *Account
if v, ok := s.accounts.Load(name); ok {
acc = v.(*Account)
} else if v, ok := s.tmpAccounts.Load(name); ok {
acc = v.(*Account)
}
if acc != nil {
// If we are expired and we have a resolver, then
// return the latest information from the resolver.
if acc.IsExpired() {
s.Debugf("Requested account [%s] has expired", name)
if s.AccountResolver() != nil {
if err := s.updateAccount(acc); err != nil {
// This error could mask expired, so just return expired here.
return nil, ErrAccountExpired
}
} else {
return nil, ErrAccountExpired
}
}
return acc, nil
}
// If we have a resolver see if it can fetch the account.
if s.AccountResolver() == nil {
return nil, ErrMissingAccount
}
return s.fetchAccount(name)
}
// LookupAccount is a public function to return the account structure
// associated with name.
func (s *Server) LookupAccount(name string) (*Account, error) {
return s.lookupAccount(name)
}
// This will fetch new claims and if found update the account with new claims.
// Lock MUST NOT be held upon entry.
func (s *Server) updateAccount(acc *Account) error {
// TODO(dlc) - Make configurable
if time.Since(acc.updated) < time.Second {
s.Debugf("Requested account update for [%s] ignored, too soon", acc.Name)
return ErrAccountResolverUpdateTooSoon
}
claimJWT, err := s.fetchRawAccountClaims(acc.Name)
if err != nil {
return err
}
return s.updateAccountWithClaimJWT(acc, claimJWT)
}
// updateAccountWithClaimJWT will check and apply the claim update.
// Lock MUST NOT be held upon entry.
func (s *Server) updateAccountWithClaimJWT(acc *Account, claimJWT string) error {
if acc == nil {
return ErrMissingAccount
}
acc.updated = time.Now()
if acc.claimJWT != "" && acc.claimJWT == claimJWT {
s.Debugf("Requested account update for [%s], same claims detected", acc.Name)
return ErrAccountResolverSameClaims
}
accClaims, _, err := s.verifyAccountClaims(claimJWT)
if err == nil && accClaims != nil {
acc.claimJWT = claimJWT
s.updateAccountClaims(acc, accClaims)
return nil
}
return err
}
// fetchRawAccountClaims will grab raw account claims iff we have a resolver.
// Lock is NOT held upon entry.
func (s *Server) fetchRawAccountClaims(name string) (string, error) {
accResolver := s.AccountResolver()
if accResolver == nil {
return "", ErrNoAccountResolver
}
// Need to do actual Fetch
start := time.Now()
claimJWT, err := accResolver.Fetch(name)
fetchTime := time.Since(start)
if fetchTime > time.Second {
s.Warnf("Account [%s] fetch took %v", name, fetchTime)
} else {
s.Debugf("Account [%s] fetch took %v", name, fetchTime)
}
if err != nil {
s.Warnf("Account fetch failed: %v", err)
return "", err
}
return claimJWT, nil
}
// fetchAccountClaims will attempt to fetch new claims if a resolver is present.
// Lock is NOT held upon entry.
func (s *Server) fetchAccountClaims(name string) (*jwt.AccountClaims, string, error) {
claimJWT, err := s.fetchRawAccountClaims(name)
if err != nil {
return nil, _EMPTY_, err
}
return s.verifyAccountClaims(claimJWT)
}
// verifyAccountClaims will decode and validate any account claims.
func (s *Server) verifyAccountClaims(claimJWT string) (*jwt.AccountClaims, string, error) {
accClaims, err := jwt.DecodeAccountClaims(claimJWT)
if err != nil {
return nil, _EMPTY_, err
}
vr := jwt.CreateValidationResults()
accClaims.Validate(vr)
if vr.IsBlocking(true) {
return nil, _EMPTY_, ErrAccountValidation
}
return accClaims, claimJWT, nil
}
// This will fetch an account from a resolver if defined.
// Lock is NOT held upon entry.
func (s *Server) fetchAccount(name string) (*Account, error) {
accClaims, claimJWT, err := s.fetchAccountClaims(name)
if accClaims != nil {
acc := s.buildInternalAccount(accClaims)
acc.claimJWT = claimJWT
// Due to possible race, if registerAccount() returns a non
// nil account, it means the same account was already
// registered and we should use this one.
if racc := s.registerAccount(acc); racc != nil {
// Update with the new claims in case they are new.
// Following call will return ErrAccountResolverSameClaims
// if claims are the same.
err = s.updateAccountWithClaimJWT(racc, claimJWT)
if err != nil && err != ErrAccountResolverSameClaims {
return nil, err
}
return racc, nil
}
return acc, nil
}
return nil, err
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
s.Noticef("Starting nats-server version %s", VERSION)
s.Debugf("Go build version %s", s.info.GoVersion)
gc := gitCommit
if gc == "" {
gc = "not set"
}
s.Noticef("Git commit [%s]", gc)
// Check for insecure configurations.op
s.checkAuthforWarnings()
// Avoid RACE between Start() and Shutdown()
s.mu.Lock()
s.running = true
s.mu.Unlock()
s.grMu.Lock()
s.grRunning = true
s.grMu.Unlock()
// Snapshot server options.
opts := s.getOpts()
hasOperators := len(opts.TrustedOperators) > 0
if hasOperators {
s.Noticef("Trusted Operators")
}
for _, opc := range opts.TrustedOperators {
s.Noticef(" System : %q", opc.Audience)
s.Noticef(" Operator: %q", opc.Name)
s.Noticef(" Issued : %v", time.Unix(opc.IssuedAt, 0))
s.Noticef(" Expires : %v", time.Unix(opc.Expires, 0))
}
if hasOperators && opts.SystemAccount == _EMPTY_ {
s.Warnf("Trusted Operators should utilize a System Account")
}
// If we have a memory resolver, check the accounts here for validation exceptions.
// This allows them to be logged right away vs when they are accessed via a client.
if hasOperators && len(opts.resolverPreloads) > 0 {
s.checkResolvePreloads()
}
// Log the pid to a file
if opts.PidFile != _EMPTY_ {
if err := s.logPid(); err != nil {
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
}
}
// Start monitoring if needed
if err := s.StartMonitoring(); err != nil {
s.Fatalf("Can't start monitoring: %v", err)
return
}
// Setup system account which will start the eventing stack.
if sa := opts.SystemAccount; sa != _EMPTY_ {
if err := s.SetSystemAccount(sa); err != nil {
s.Fatalf("Can't set system account: %v", err)
return
}
}
// Start expiration of mapped GW replies, regardless if
// this server is configured with gateway or not.
s.startGWReplyMapExpiration()
// Start up gateway if needed. Do this before starting the routes, because
// we want to resolve the gateway host:port so that this information can
// be sent to other routes.
if opts.Gateway.Port != 0 {
s.startGateways()
}
// Start up listen if we want to accept leaf node connections.
if opts.LeafNode.Port != 0 {
// Spin up the accept loop if needed
ch := make(chan struct{})
go s.leafNodeAcceptLoop(ch)
// This ensure that we have resolved or assigned the advertise
// address for the leafnode listener. We need that in StartRouting().
<-ch
}
// Solicit remote servers for leaf node connections.
if len(opts.LeafNode.Remotes) > 0 {
s.solicitLeafNodeRemotes(opts.LeafNode.Remotes)
}
// The Routing routine needs to wait for the client listen
// port to be opened and potential ephemeral port selected.
clientListenReady := make(chan struct{})
// Start up routing as well if needed.
if opts.Cluster.Port != 0 {
s.startGoRoutine(func() {
s.StartRouting(clientListenReady)
})
}
// Pprof http endpoint for the profiler.
if opts.ProfPort != 0 {
s.StartProfiler()
}
if opts.PortsFileDir != _EMPTY_ {
s.logPorts()
}
// Wait for clients.
s.AcceptLoop(clientListenReady)
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
// Shutdown the eventing system as needed.
// This is done first to send out any messages for
// account status. We will also clean up any
// eventing items associated with accounts.
s.shutdownEventing()
s.mu.Lock()
// Prevent issues with multiple calls.
if s.shutdown {
s.mu.Unlock()
return
}
s.Noticef("Initiating Shutdown...")
opts := s.getOpts()
s.shutdown = true
s.running = false
s.grMu.Lock()
s.grRunning = false
s.grMu.Unlock()
conns := make(map[uint64]*client)
// Copy off the clients
for i, c := range s.clients {
conns[i] = c
}
// Copy off the connections that are not yet registered
// in s.routes, but for which the readLoop has started
s.grMu.Lock()
for i, c := range s.grTmpClients {
conns[i] = c
}
s.grMu.Unlock()
// Copy off the routes
for i, r := range s.routes {
conns[i] = r
}
// Copy off the gateways
s.getAllGatewayConnections(conns)
// Copy off the leaf nodes
for i, c := range s.leafs {
conns[i] = c
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick leafnodes AcceptLoop()
if s.leafNodeListener != nil {
doneExpected++
s.leafNodeListener.Close()
s.leafNodeListener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick Gateway AcceptLoop()
if s.gatewayListener != nil {
doneExpected++
s.gatewayListener.Close()
s.gatewayListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Kick Profiling if its running
if s.profiler != nil {
doneExpected++
s.profiler.Close()
}
s.mu.Unlock()
// Release go routines that wait on that channel
close(s.quitCh)
// Close client and route connections
for _, c := range conns {
c.setNoReconnect()
c.closeConnection(ServerShutdown)
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
// Wait for go routines to be done.
s.grWG.Wait()
if opts.PortsFileDir != _EMPTY_ {
s.deletePortsFile(opts.PortsFileDir)
}
s.Noticef("Server Exiting..")
// Close logger if applicable. It allows tests on Windows
// to be able to do proper cleanup (delete log file).
s.logging.RLock()
log := s.logging.logger
s.logging.RUnlock()
if log != nil {
if l, ok := log.(*logger.Logger); ok {
l.Close()
}
}
// Notify that the shutdown is complete
close(s.shutdownComplete)
}
// WaitForShutdown will block until the server has been fully shutdown.
func (s *Server) WaitForShutdown() {
<-s.shutdownComplete
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop(clr chan struct{}) {
// If we were to exit before the listener is setup properly,
// make sure we close the channel.
defer func() {
if clr != nil {
close(clr)
}
}()
// Snapshot server options.
opts := s.getOpts()
hp := net.JoinHostPort(opts.Host, strconv.Itoa(opts.Port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on port: %s, %q", hp, e)
return
}
s.Noticef("Listening for client connections on %s",
net.JoinHostPort(opts.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
// Alert of TLS enabled.
if opts.TLSConfig != nil {
s.Noticef("TLS required for client connections")
}
s.Noticef("Server id is %s", s.info.ID)
s.Noticef("Server is ready")
// Setup state that can enable shutdown
s.mu.Lock()
s.listener = l
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
// to 0 at the beginning this function. So we need to get the actual port
if opts.Port == 0 {
// Write resolved port back to options.
opts.Port = l.Addr().(*net.TCPAddr).Port
}
// Now that port has been set (if it was set to RANDOM), set the
// server's info Host/Port with either values from Options or
// ClientAdvertise. Also generate the JSON byte array.
if err := s.setInfoHostPortAndGenerateJSON(); err != nil {
s.Fatalf("Error setting server INFO with ClientAdvertise value of %s, err=%v", s.opts.ClientAdvertise, err)
s.mu.Unlock()
return
}
// Keep track of client connect URLs. We may need them later.
s.clientConnectURLs = s.getClientConnectURLs()
s.mu.Unlock()
// Let the caller know that we are ready
close(clr)
clr = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if s.isLameDuckMode() {
// Signal that we are not accepting new clients
s.ldmCh <- true
// Now wait for the Shutdown...
<-s.quitCh
return
}
tmpDelay = s.acceptError("Client", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createClient(conn)
s.grWG.Done()
})
}
s.done <- true
}
// This function sets the server's info Host/Port based on server Options.
// Note that this function may be called during config reload, this is why
// Host/Port may be reset to original Options if the ClientAdvertise option
// is not set (since it may have previously been).
// The function then generates the server infoJSON.
func (s *Server) setInfoHostPortAndGenerateJSON() error {
// When this function is called, opts.Port is set to the actual listen
// port (if option was originally set to RANDOM), even during a config
// reload. So use of s.opts.Port is safe.
if s.opts.ClientAdvertise != "" {
h, p, err := parseHostPort(s.opts.ClientAdvertise, s.opts.Port)
if err != nil {
return err
}
s.info.Host = h
s.info.Port = p
} else {
s.info.Host = s.opts.Host
s.info.Port = s.opts.Port
}
return nil
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
// Snapshot server options.
opts := s.getOpts()
port := opts.ProfPort
// Check for Random Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Host, strconv.Itoa(port))
l, err := net.Listen("tcp", hp)
s.Noticef("profiling port: %d", l.Addr().(*net.TCPAddr).Port)
if err != nil {
s.Fatalf("error starting profiler: %s", err)
}
srv := &http.Server{
Addr: hp,
Handler: http.DefaultServeMux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.profiler = l
s.profilingServer = srv
s.mu.Unlock()
// Enable blocking profile
runtime.SetBlockProfileRate(1)
go func() {
// if this errors out, it's probably because the server is being shutdown
err := srv.Serve(l)
if err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("error starting profiler: %s", err)
}
}
srv.Close()
s.done <- true
}()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPMonitoring() {
s.startMonitoring(false)
}
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPSMonitoring() {
s.startMonitoring(true)
}
// StartMonitoring starts the HTTP or HTTPs server if needed.
func (s *Server) StartMonitoring() error {
// Snapshot server options.
opts := s.getOpts()
// Specifying both HTTP and HTTPS ports is a misconfiguration
if opts.HTTPPort != 0 && opts.HTTPSPort != 0 {
return fmt.Errorf("can't specify both HTTP (%v) and HTTPs (%v) ports", opts.HTTPPort, opts.HTTPSPort)
}
var err error
if opts.HTTPPort != 0 {
err = s.startMonitoring(false)
} else if opts.HTTPSPort != 0 {
if opts.TLSConfig == nil {
return fmt.Errorf("TLS cert and key required for HTTPS")
}
err = s.startMonitoring(true)
}
return err
}
// HTTP endpoints
const (
RootPath = "/"
VarzPath = "/varz"
ConnzPath = "/connz"
RoutezPath = "/routez"
GatewayzPath = "/gatewayz"
LeafzPath = "/leafz"
SubszPath = "/subsz"
StackszPath = "/stacksz"
)
// Start the monitoring server
func (s *Server) startMonitoring(secure bool) error {
// Snapshot server options.
opts := s.getOpts()
// Used to track HTTP requests
s.httpReqStats = map[string]uint64{
RootPath: 0,
VarzPath: 0,
ConnzPath: 0,
RoutezPath: 0,
GatewayzPath: 0,
SubszPath: 0,
}
var (
hp string
err error
httpListener net.Listener
port int
)
monitorProtocol := "http"
if secure {
monitorProtocol += "s"
port = opts.HTTPSPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
config := opts.TLSConfig.Clone()
config.ClientAuth = tls.NoClientCert
httpListener, err = tls.Listen("tcp", hp, config)
} else {
port = opts.HTTPPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
httpListener, err = net.Listen("tcp", hp)
}
if err != nil {
return fmt.Errorf("can't listen to the monitor port: %v", err)
}
s.Noticef("Starting %s monitor on %s", monitorProtocol,
net.JoinHostPort(opts.HTTPHost, strconv.Itoa(httpListener.Addr().(*net.TCPAddr).Port)))
mux := http.NewServeMux()
// Root
mux.HandleFunc(RootPath, s.HandleRoot)
// Varz
mux.HandleFunc(VarzPath, s.HandleVarz)
// Connz
mux.HandleFunc(ConnzPath, s.HandleConnz)
// Routez
mux.HandleFunc(RoutezPath, s.HandleRoutez)
// Gatewayz
mux.HandleFunc(GatewayzPath, s.HandleGatewayz)
// Leafz
mux.HandleFunc(LeafzPath, s.HandleLeafz)
// Subz
mux.HandleFunc(SubszPath, s.HandleSubsz)
// Subz alias for backwards compatibility
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
// Stacksz
mux.HandleFunc(StackszPath, s.HandleStacksz)
// Do not set a WriteTimeout because it could cause cURL/browser
// to return empty response or unable to display page if the
// server needs more time to build the response.
srv := &http.Server{
Addr: hp,
Handler: mux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.http = httpListener
s.httpHandler = mux
s.monitoringServer = srv
s.mu.Unlock()
go func() {
if err := srv.Serve(httpListener); err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("Error starting monitor on %q: %v", hp, err)
}
}
srv.Close()
srv.Handler = nil
s.mu.Lock()
s.httpHandler = nil
s.mu.Unlock()
s.done <- true
}()
return nil
}
// HTTPHandler returns the http.Handler object used to handle monitoring
// endpoints. It will return nil if the server is not configured for
// monitoring, or if the server has not been started yet (Server.Start()).
func (s *Server) HTTPHandler() http.Handler {
s.mu.Lock()
defer s.mu.Unlock()
return s.httpHandler
}
// Perform a conditional deep copy due to reference nature of ClientConnectURLs.
// If updates are made to Info, this function should be consulted and updated.
// Assume lock is held.
func (s *Server) copyInfo() Info {
info := s.info
if info.ClientConnectURLs != nil {
info.ClientConnectURLs = make([]string, len(s.info.ClientConnectURLs))
copy(info.ClientConnectURLs, s.info.ClientConnectURLs)
}
if s.nonceRequired() {
// Nonce handling
var raw [nonceLen]byte
nonce := raw[:]
s.generateNonce(nonce)
info.Nonce = string(nonce)
}
return info
}
func (s *Server) createClient(conn net.Conn) *client {
// Snapshot server options.
opts := s.getOpts()
maxPay := int32(opts.MaxPayload)
maxSubs := int32(opts.MaxSubs)
// For system, maxSubs of 0 means unlimited, so re-adjust here.
if maxSubs == 0 {
maxSubs = -1
}
now := time.Now()
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
c.registerWithAccount(s.globalAccount())
// Grab JSON info string
s.mu.Lock()
info := s.copyInfo()
c.nonce = []byte(info.Nonce)
s.totalClients++
s.mu.Unlock()
// Grab lock
c.mu.Lock()
if info.AuthRequired {
c.flags.set(expectConnect)
}
// Initialize
c.initClient()
c.Debugf("Client connection created")
// Send our information.
// Need to be sent in place since writeLoop cannot be started until
// TLS handshake is done (if applicable).
c.sendProtoNow(c.generateClientInfoJSON(info))
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
// If server is not running, Shutdown() may have already gathered the
// list of connections to close. It won't contain this one, so we need
// to bail out now otherwise the readLoop started down there would not
// be interrupted. Skip also if in lame duck mode.
if !s.running || s.ldm {
s.mu.Unlock()
return c
}
// If there is a max connections specified, check that adding
// this new client would not push us over the max
if opts.MaxConn > 0 && len(s.clients) >= opts.MaxConn {
s.mu.Unlock()
c.maxConnExceeded()
return nil
}
s.clients[c.cid] = c
s.mu.Unlock()
// Re-Grab lock
c.mu.Lock()
// Check for TLS
if info.TLSRequired {
c.Debugf("Starting TLS client connection handshake")
c.nc = tls.Server(c.nc, opts.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// The connection may have been closed
if c.isClosed() {
c.mu.Unlock()
// If it was due to TLS timeout, teardownConn() has already been called.
// Otherwise, if connection was marked as closed while sending the INFO,
// we need to call teardownConn() directly here.
if !info.TLSRequired {
c.teardownConn()
}
return c
}
// Check for Auth. We schedule this timer after the TLS handshake to avoid
// the race where the timer fires during the handshake and causes the
// server to write bad data to the socket. See issue #432.
if info.AuthRequired {
c.setAuthTimer(secondsToDuration(opts.AuthTimeout))
}
// Do final client initialization
// Set the Ping timer. Will be reset once connect was received.
c.setPingTimer()
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
if info.TLSRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
c.mu.Unlock()
return c
}
// This will save off a closed client in a ring buffer such that
// /connz can inspect. Useful for debugging, etc.
func (s *Server) saveClosedClient(c *client, nc net.Conn, reason ClosedState) {
now := time.Now()
s.accountDisconnectEvent(c, now, reason.String())
c.mu.Lock()
cc := &closedClient{}
cc.fill(c, nc, now)
cc.Stop = &now
cc.Reason = reason.String()
// Do subs, do not place by default in main ConnInfo
if len(c.subs) > 0 {
cc.subs = make([]SubDetail, 0, len(c.subs))
for _, sub := range c.subs {
cc.subs = append(cc.subs, newSubDetail(sub))
}
}
// Hold user as well.
cc.user = c.opts.Username
// Hold account name if not the global account.
if c.acc != nil && c.acc.Name != globalAccountName {
cc.acc = c.acc.Name
}
c.mu.Unlock()
// Place in the ring buffer
s.mu.Lock()
if s.closed != nil {
s.closed.append(cc)
}
s.mu.Unlock()
}
// Adds the given array of urls to the server's INFO.ClientConnectURLs
// array. The server INFO JSON is regenerated.
// Note that a check is made to ensure that given URLs are not
// already present. So the INFO JSON is regenerated only if new ULRs
// were added.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) addClientConnectURLsAndSendINFOToClients(urls []string) {
s.updateServerINFOAndSendINFOToClients(urls, true)
}
// Removes the given array of urls from the server's INFO.ClientConnectURLs
// array. The server INFO JSON is regenerated if needed.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) removeClientConnectURLsAndSendINFOToClients(urls []string) {
s.updateServerINFOAndSendINFOToClients(urls, false)
}
// Updates the server's Info object with the given array of URLs and re-generate
// the infoJSON byte array, then send an (async) INFO protocol to clients that
// support it.
func (s *Server) updateServerINFOAndSendINFOToClients(urls []string, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
// Will be set to true if we alter the server's Info object.
wasUpdated := false
remove := !add
for _, url := range urls {
_, present := s.clientConnectURLsMap[url]
if add && !present {
s.clientConnectURLsMap[url] = struct{}{}
wasUpdated = true
} else if remove && present {
delete(s.clientConnectURLsMap, url)
wasUpdated = true
}
}
if wasUpdated {
// Recreate the info.ClientConnectURL array from the map
s.info.ClientConnectURLs = s.info.ClientConnectURLs[:0]
// Add this server client connect ULRs first...
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, s.clientConnectURLs...)
for url := range s.clientConnectURLsMap {
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
}
// Update the time of this update
s.lastCURLsUpdate = time.Now().UnixNano()
// Send to all registered clients that support async INFO protocols.
s.sendAsyncInfoToClients()
}
}
// Handle closing down a connection when the handshake has timedout.
func tlsTimeout(c *client, conn *tls.Conn) {
c.mu.Lock()
closed := c.isClosed()
c.mu.Unlock()
// Check if already closed
if closed {
return
}
cs := conn.ConnectionState()
if !cs.HandshakeComplete {
c.Errorf("TLS handshake timeout")
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
}
}
// Seems silly we have to write these
func tlsVersion(ver uint16) string {
switch ver {
case tls.VersionTLS10:
return "1.0"
case tls.VersionTLS11:
return "1.1"
case tls.VersionTLS12:
return "1.2"
case tls.VersionTLS13:
return "1.3"
}
return fmt.Sprintf("Unknown [0x%x]", ver)
}
// We use hex here so we don't need multiple versions
func tlsCipher(cs uint16) string {
name, present := cipherMapByID[cs]
if present {
return name
}
return fmt.Sprintf("Unknown [0x%x]", cs)
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
// kind is immutable, so can check without lock
switch c.kind {
case CLIENT:
c.mu.Lock()
cid := c.cid
updateProtoInfoCount := false
if c.kind == CLIENT && c.opts.Protocol >= ClientProtoInfo {
updateProtoInfoCount = true
}
c.mu.Unlock()
s.mu.Lock()
delete(s.clients, cid)
if updateProtoInfoCount {
s.cproto--
}
s.mu.Unlock()
case ROUTER:
s.removeRoute(c)
case GATEWAY:
s.removeRemoteGatewayConnection(c)
case LEAF:
s.removeLeafNodeConnection(c)
}
}
func (s *Server) removeFromTempClients(cid uint64) {
s.grMu.Lock()
delete(s.grTmpClients, cid)
s.grMu.Unlock()
}
func (s *Server) addToTempClients(cid uint64, c *client) bool {
added := false
s.grMu.Lock()
if s.grRunning {
s.grTmpClients[cid] = c
added = true
}
s.grMu.Unlock()
return added
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
nr := len(s.routes)
s.mu.Unlock()
return nr
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumLeafNodes will report number of leaf node connections.
func (s *Server) NumLeafNodes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.leafs)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// GetClient will return the client associated with cid.
func (s *Server) GetClient(cid uint64) *client {
return s.getClient(cid)
}
// getClient will return the client associated with cid.
func (s *Server) getClient(cid uint64) *client {
s.mu.Lock()
defer s.mu.Unlock()
return s.clients[cid]
}
// GetLeafNode returns the leafnode associated with the cid.
func (s *Server) GetLeafNode(cid uint64) *client {
s.mu.Lock()
defer s.mu.Unlock()
return s.leafs[cid]
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
defer s.mu.Unlock()
return s.numSubscriptions()
}
// numSubscriptions will report how many subscriptions are active.
// Lock should be held.
func (s *Server) numSubscriptions() uint32 {
var subs int
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
if acc.sl != nil {
subs += acc.TotalSubs()
}
return true
})
return uint32(subs)
}
// NumSlowConsumers will report the number of slow consumers.
func (s *Server) NumSlowConsumers() int64 {
return atomic.LoadInt64(&s.slowConsumers)
}
// ConfigTime will report the last time the server configuration was loaded.
func (s *Server) ConfigTime() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.configTime
}
// Addr will return the net.Addr object for the current listener.
func (s *Server) Addr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// MonitorAddr will return the net.Addr object for the monitoring listener.
func (s *Server) MonitorAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.http == nil {
return nil
}
return s.http.Addr().(*net.TCPAddr)
}
// ClusterAddr returns the net.Addr object for the route listener.
func (s *Server) ClusterAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.routeListener == nil {
return nil
}
return s.routeListener.Addr().(*net.TCPAddr)
}
// ProfilerAddr returns the net.Addr object for the profiler listener.
func (s *Server) ProfilerAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.profiler == nil {
return nil
}
return s.profiler.Addr().(*net.TCPAddr)
}
// ReadyForConnections returns `true` if the server is ready to accept clients
// and, if routing is enabled, route connections. If after the duration
// `dur` the server is still not ready, returns `false`.
func (s *Server) ReadyForConnections(dur time.Duration) bool {
// Snapshot server options.
opts := s.getOpts()
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
ok := s.listener != nil && (opts.Cluster.Port == 0 || s.routeListener != nil) && (opts.Gateway.Name == "" || s.gatewayListener != nil)
s.mu.Unlock()
if ok {
return true
}
time.Sleep(25 * time.Millisecond)
}
return false
}
// ID returns the server's ID
func (s *Server) ID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.ID
}
func (s *Server) startGoRoutine(f func()) {
s.grMu.Lock()
if s.grRunning {
s.grWG.Add(1)
go f()
}
s.grMu.Unlock()
}
func (s *Server) numClosedConns() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.len()
}
func (s *Server) totalClosedConns() uint64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.totalConns()
}
func (s *Server) closedClients() []*closedClient {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.closedClients()
}
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
// port based on the server options' Host and Port. If the Host corresponds to
// "any" interfaces, this call returns the list of resolved IP addresses.
// If ClientAdvertise is set, returns the client advertise host and port.
// The server lock is assumed held on entry.
func (s *Server) getClientConnectURLs() []string {
// Snapshot server options.
opts := s.getOpts()
urls := make([]string, 0, 1)
// short circuit if client advertise is set
if opts.ClientAdvertise != "" {
// just use the info host/port. This is updated in s.New()
urls = append(urls, net.JoinHostPort(s.info.Host, strconv.Itoa(s.info.Port)))
} else {
sPort := strconv.Itoa(opts.Port)
_, ips, err := s.getNonLocalIPsIfHostIsIPAny(opts.Host, true)
for _, ip := range ips {
urls = append(urls, net.JoinHostPort(ip, sPort))
}
if err != nil || len(urls) == 0 {
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
// reason we could not add any URL in the loop above.
// We had a case where a Windows VM was hosed and would have err == nil
// and not add any address in the array in the loop above, and we
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
if opts.Host == "0.0.0.0" || opts.Host == "::" {
s.Errorf("Address %q can not be resolved properly", opts.Host)
} else {
urls = append(urls, net.JoinHostPort(opts.Host, sPort))
}
}
}
return urls
}
// Returns an array of non local IPs if the provided host is
// 0.0.0.0 or ::. It returns the first resolved if `all` is
// false.
// The boolean indicate if the provided host was 0.0.0.0 (or ::)
// so that if the returned array is empty caller can decide
// what to do next.
func (s *Server) getNonLocalIPsIfHostIsIPAny(host string, all bool) (bool, []string, error) {
ip := net.ParseIP(host)
// If this is not an IP, we are done
if ip == nil {
return false, nil, nil
}
// If this is not 0.0.0.0 or :: we have nothing to do.
if !ip.IsUnspecified() {
return false, nil, nil
}
s.Debugf("Get non local IPs for %q", host)
var ips []string
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
ipStr := ip.String()
// Skip non global unicast addresses
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
ip = nil
continue
}
s.Debugf(" ip=%s", ipStr)
ips = append(ips, ipStr)
if !all {
break
}
}
}
return true, ips, nil
}
// if the ip is not specified, attempt to resolve it
func resolveHostPorts(addr net.Listener) []string {
hostPorts := make([]string, 0)
hp := addr.Addr().(*net.TCPAddr)
port := strconv.Itoa(hp.Port)
if hp.IP.IsUnspecified() {
var ip net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
hostPorts = append(hostPorts, net.JoinHostPort(ip.String(), port))
case *net.IPAddr:
ip = v.IP
hostPorts = append(hostPorts, net.JoinHostPort(ip.String(), port))
default:
continue
}
}
}
} else {
hostPorts = append(hostPorts, net.JoinHostPort(hp.IP.String(), port))
}
return hostPorts
}
// format the address of a net.Listener with a protocol
func formatURL(protocol string, addr net.Listener) []string {
hostports := resolveHostPorts(addr)
for i, hp := range hostports {
hostports[i] = fmt.Sprintf("%s://%s", protocol, hp)
}
return hostports
}
// Ports describes URLs that the server can be contacted in
type Ports struct {
Nats []string `json:"nats,omitempty"`
Monitoring []string `json:"monitoring,omitempty"`
Cluster []string `json:"cluster,omitempty"`
Profile []string `json:"profile,omitempty"`
}
// PortsInfo attempts to resolve all the ports. If after maxWait the ports are not
// resolved, it returns nil. Otherwise it returns a Ports struct
// describing ports where the server can be contacted
func (s *Server) PortsInfo(maxWait time.Duration) *Ports {
if s.readyForListeners(maxWait) {
opts := s.getOpts()
s.mu.Lock()
info := s.copyInfo()
listener := s.listener
httpListener := s.http
clusterListener := s.routeListener
profileListener := s.profiler
s.mu.Unlock()
ports := Ports{}
if listener != nil {
natsProto := "nats"
if info.TLSRequired {
natsProto = "tls"
}
ports.Nats = formatURL(natsProto, listener)
}
if httpListener != nil {
monProto := "http"
if opts.HTTPSPort != 0 {
monProto = "https"
}
ports.Monitoring = formatURL(monProto, httpListener)
}
if clusterListener != nil {
clusterProto := "nats"
if opts.Cluster.TLSConfig != nil {
clusterProto = "tls"
}
ports.Cluster = formatURL(clusterProto, clusterListener)
}
if profileListener != nil {
ports.Profile = formatURL("http", profileListener)
}
return &ports
}
return nil
}
// Returns the portsFile. If a non-empty dirHint is provided, the dirHint
// path is used instead of the server option value
func (s *Server) portFile(dirHint string) string {
dirname := s.getOpts().PortsFileDir
if dirHint != "" {
dirname = dirHint
}
if dirname == _EMPTY_ {
return _EMPTY_
}
return filepath.Join(dirname, fmt.Sprintf("%s_%d.ports", filepath.Base(os.Args[0]), os.Getpid()))
}
// Delete the ports file. If a non-empty dirHint is provided, the dirHint
// path is used instead of the server option value
func (s *Server) deletePortsFile(hintDir string) {
portsFile := s.portFile(hintDir)
if portsFile != "" {
if err := os.Remove(portsFile); err != nil {
s.Errorf("Error cleaning up ports file %s: %v", portsFile, err)
}
}
}
// Writes a file with a serialized Ports to the specified ports_file_dir.
// The name of the file is `exename_pid.ports`, typically nats-server_pid.ports.
// if ports file is not set, this function has no effect
func (s *Server) logPorts() {
opts := s.getOpts()
portsFile := s.portFile(opts.PortsFileDir)
if portsFile != _EMPTY_ {
go func() {
info := s.PortsInfo(5 * time.Second)
if info == nil {
s.Errorf("Unable to resolve the ports in the specified time")
return
}
data, err := json.Marshal(info)
if err != nil {
s.Errorf("Error marshaling ports file: %v", err)
return
}
if err := ioutil.WriteFile(portsFile, data, 0666); err != nil {
s.Errorf("Error writing ports file (%s): %v", portsFile, err)
return
}
}()
}
}
// waits until a calculated list of listeners is resolved or a timeout
func (s *Server) readyForListeners(dur time.Duration) bool {
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
listeners := s.serviceListeners()
s.mu.Unlock()
if len(listeners) == 0 {
return false
}
ok := true
for _, l := range listeners {
if l == nil {
ok = false
break
}
}
if ok {
return true
}
select {
case <-s.quitCh:
return false
case <-time.After(25 * time.Millisecond):
// continue - unable to select from quit - we are still running
}
}
return false
}
// returns a list of listeners that are intended for the process
// if the entry is nil, the interface is yet to be resolved
func (s *Server) serviceListeners() []net.Listener {
listeners := make([]net.Listener, 0)
opts := s.getOpts()
listeners = append(listeners, s.listener)
if opts.Cluster.Port != 0 {
listeners = append(listeners, s.routeListener)
}
if opts.HTTPPort != 0 || opts.HTTPSPort != 0 {
listeners = append(listeners, s.http)
}
if opts.ProfPort != 0 {
listeners = append(listeners, s.profiler)
}
return listeners
}
// Returns true if in lame duck mode.
func (s *Server) isLameDuckMode() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.ldm
}
// This function will close the client listener then close the clients
// at some interval to avoid a reconnecting storm.
func (s *Server) lameDuckMode() {
s.mu.Lock()
// Check if there is actually anything to do
if s.shutdown || s.ldm || s.listener == nil {
s.mu.Unlock()
return
}
s.Noticef("Entering lame duck mode, stop accepting new clients")
s.ldm = true
s.ldmCh = make(chan bool, 1)
s.listener.Close()
s.listener = nil
s.mu.Unlock()
// Wait for accept loop to be done to make sure that no new
// client can connect
<-s.ldmCh
s.mu.Lock()
// Need to recheck few things
if s.shutdown || len(s.clients) == 0 {
s.mu.Unlock()
// If there is no client, we need to call Shutdown() to complete
// the LDMode. If server has been shutdown while lock was released,
// calling Shutdown() should be no-op.
s.Shutdown()
return
}
dur := int64(s.getOpts().LameDuckDuration)
dur -= atomic.LoadInt64(&lameDuckModeInitialDelay)
if dur <= 0 {
dur = int64(time.Second)
}
numClients := int64(len(s.clients))
batch := 1
// Sleep interval between each client connection close.
si := dur / numClients
if si < 1 {
// Should not happen (except in test with very small LD duration), but
// if there are too many clients, batch the number of close and
// use a tiny sleep interval that will result in yield likely.
si = 1
batch = int(numClients / dur)
} else if si > int64(time.Second) {
// Conversely, there is no need to sleep too long between clients
// and spread say 10 clients for the 2min duration. Sleeping no
// more than 1sec.
si = int64(time.Second)
}
// Now capture all clients
clients := make([]*client, 0, len(s.clients))
for _, client := range s.clients {
clients = append(clients, client)
}
s.mu.Unlock()
t := time.NewTimer(time.Duration(atomic.LoadInt64(&lameDuckModeInitialDelay)))
// Delay start of closing of client connections in case
// we have several servers that we want to signal to enter LD mode
// and not have their client reconnect to each other.
select {
case <-t.C:
s.Noticef("Closing existing clients")
case <-s.quitCh:
return
}
for i, client := range clients {
client.closeConnection(ServerShutdown)
if i == len(clients)-1 {
break
}
if batch == 1 || i%batch == 0 {
// We pick a random interval which will be at least si/2
v := rand.Int63n(si)
if v < si/2 {
v = si / 2
}
t.Reset(time.Duration(v))
// Sleep for given interval or bail out if kicked by Shutdown().
select {
case <-t.C:
case <-s.quitCh:
t.Stop()
return
}
}
}
s.Shutdown()
}
// If given error is a net.Error and is temporary, sleeps for the given
// delay and double it, but cap it to ACCEPT_MAX_SLEEP. The sleep is
// interrupted if the server is shutdown.
// An error message is displayed depending on the type of error.
// Returns the new (or unchanged) delay.
func (s *Server) acceptError(acceptName string, err error, tmpDelay time.Duration) time.Duration {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
s.Errorf("Temporary %s Accept Error(%v), sleeping %dms", acceptName, ne, tmpDelay/time.Millisecond)
select {
case <-time.After(tmpDelay):
case <-s.quitCh:
return tmpDelay
}
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
s.Errorf("%s Accept error: %v", acceptName, err)
}
return tmpDelay
}
func (s *Server) getRandomIP(resolver netResolver, url string) (string, error) {
host, port, err := net.SplitHostPort(url)
if err != nil {
return "", err
}
// If already an IP, skip.
if net.ParseIP(host) != nil {
return url, nil
}
ips, err := resolver.LookupHost(context.Background(), host)
if err != nil {
return "", fmt.Errorf("lookup for host %q: %v", host, err)
}
var address string
if len(ips) == 0 {
s.Warnf("Unable to get IP for %s, will try with %s: %v", host, url, err)
address = url
} else {
var ip string
if len(ips) == 1 {
ip = ips[0]
} else {
ip = ips[rand.Int31n(int32(len(ips)))]
}
// add the port
address = net.JoinHostPort(ip, port)
}
return address, nil
}
// Returns true for the first attempt and depending on the nature
// of the attempt (first connect or a reconnect), when the number
// of attempts is equal to the configured report attempts.
func (s *Server) shouldReportConnectErr(firstConnect bool, attempts int) bool {
opts := s.getOpts()
if firstConnect {
if attempts == 1 || attempts%opts.ConnectErrorReports == 0 {
return true
}
return false
}
if attempts == 1 || attempts%opts.ReconnectErrorReports == 0 {
return true
}
return false
}
// Invoked for route, leaf and gateway connections. Set the very first
// PING to a lower interval to capture the initial RTT.
// After that the PING interval will be set to the user defined value.
// Client lock should be held.
func (s *Server) setFirstPingTimer(c *client) {
opts := s.getOpts()
d := opts.PingInterval
if !opts.DisableShortFirstPing {
if c.kind != CLIENT {
if d > firstPingInterval {
d = firstPingInterval
}
} else if d > firstClientPingInterval {
d = firstClientPingInterval
}
}
// We randomize the first one by an offset up to 20%, e.g. 2m ~= max 24s.
addDelay := rand.Int63n(int64(d / 5))
d += time.Duration(addDelay)
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
| 1 | 10,546 | That's fine though, meaning that you can send to this channel under the server lock. The internalSendLoop will pick up the change when the server lock is released (if loop is blocked trying to grab the server lock). Even the way you do it here (releasing the lock, sending, then reacquiring) does not guarantee that the internalSendLoop will have time to refresh in that interval. | nats-io-nats-server | go |
@@ -47,6 +47,7 @@ public interface CapabilityType {
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
+ String ENABLE_DOWNLOADING = "chromium:enableDownloading";
String LOGGING_PREFS = "loggingPrefs";
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
@Deprecated String PLATFORM = "platform";
String PLATFORM_NAME = "platformName";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String BROWSER_VERSION = "browserVersion";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String SUPPORTS_FINDING_BY_CSS = "cssSelectorsEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String ACCEPT_INSECURE_CERTS = "acceptInsecureCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String UNHANDLED_PROMPT_BEHAVIOUR = "unhandledPromptBehavior";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
/**
* @deprecated Use PAGE_LOAD_STRATEGY instead
*/
@Deprecated
String PAGE_LOADING_STRATEGY = "pageLoadingStrategy";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
/**
* Moved InternetExplorer specific CapabilityTypes into InternetExplorerDriver.java for consistency
*/
@Deprecated
String ENABLE_PERSISTENT_HOVERING = "enablePersistentHover";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 15,152 | The name `enableDownloading` implies this is a boolean capability. How about `downloadDir`? | SeleniumHQ-selenium | java |
@@ -28,10 +28,9 @@ public interface ValidatorManager {
Map<String, ValidationReport> validate(Project project, File projectDir);
/**
- * The ValidatorManager should have a default validator which checks for the most essential
- * components of a project. The ValidatorManager should always load the default validator. This
- * method returns the default validator of this ValidatorManager.
+ * ValidatorManager will not have any default validator.
*/
+ @Deprecated
ProjectValidator getDefaultValidator();
/** | 1 | package azkaban.project.validator;
import azkaban.project.Project;
import azkaban.utils.Props;
import java.io.File;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
/**
* ValidatorManager is responsible for loading the list of validators specified in the Azkaban
* validator configuration file. Once these validators are loaded, the ValidatorManager will use the
* registered validators to verify each uploaded project before persisting it.
*/
public interface ValidatorManager {
/**
* Load the validators using the given properties. Each validator is also given the specified
* logger to record any necessary message in the Azkaban log file.
*/
void loadValidators(Props props, Logger logger);
/**
* Validate the given project using the registered list of validators. This method returns a map
* of {@link ValidationReport} with the key being the validator's name and the value being the
* {@link ValidationReport} generated by that validator.
*/
Map<String, ValidationReport> validate(Project project, File projectDir);
/**
* The ValidatorManager should have a default validator which checks for the most essential
* components of a project. The ValidatorManager should always load the default validator. This
* method returns the default validator of this ValidatorManager.
*/
ProjectValidator getDefaultValidator();
/**
* Returns a list of String containing the name of each registered validators.
*/
List<String> getValidatorsInfo();
}
| 1 | 14,782 | Found one internal team is using this method. Will coordinate with them about the migration solution. | azkaban-azkaban | java |
@@ -230,6 +230,18 @@ namespace Datadog.Trace.Configuration
/// </summary>
public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED";
+ /// <summary>
+ /// Configuration key for the application's server http statuses to set spans as errors by.
+ /// </summary>
+ /// <seealso cref="TracerSettings.HttpServerErrorCodes"/>
+ public const string HttpServerErrorCodes = "DD_HTTP_SERVER_ERROR_STATUSES";
+
+ /// <summary>
+ /// Configuration key for the application's client http statuses to set spans as errors by.
+ /// </summary>
+ /// <seealso cref="TracerSettings.HttpClientErrorCodes"/>
+ public const string HttpClientErrorCodes = "DD_HTTP_CLIENT_ERROR_STATUSES";
+
/// <summary>
/// String format patterns used to match integration-specific configuration keys.
/// </summary> | 1 | namespace Datadog.Trace.Configuration
{
/// <summary>
/// String constants for standard Datadog configuration keys.
/// </summary>
public static class ConfigurationKeys
{
/// <summary>
/// Configuration key for the path to the configuration file.
/// Can only be set with an environment variable
/// or in the <c>app.config</c>/<c>web.config</c> file.
/// </summary>
public const string ConfigurationFileName = "DD_TRACE_CONFIG_FILE";
/// <summary>
/// Configuration key for the application's environment. Sets the "env" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.Environment"/>
public const string Environment = "DD_ENV";
/// <summary>
/// Configuration key for the application's default service name.
/// Used as the service name for top-level spans,
/// and used to determine service name of some child spans.
/// </summary>
/// <seealso cref="TracerSettings.ServiceName"/>
public const string ServiceName = "DD_SERVICE";
/// <summary>
/// Configuration key for the application's version. Sets the "version" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.ServiceVersion"/>
public const string ServiceVersion = "DD_VERSION";
/// <summary>
/// Configuration key for enabling or disabling the Tracer.
/// Default is value is true (enabled).
/// </summary>
/// <seealso cref="TracerSettings.TraceEnabled"/>
public const string TraceEnabled = "DD_TRACE_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling the Tracer's debug mode.
/// Default is value is false (disabled).
/// </summary>
/// <seealso cref="TracerSettings.DebugEnabled"/>
public const string DebugEnabled = "DD_TRACE_DEBUG";
/// <summary>
/// Configuration key for a list of integrations to disable. All other integrations remain enabled.
/// Default is empty (all integrations are enabled).
/// Supports multiple values separated with semi-colons.
/// </summary>
/// <seealso cref="TracerSettings.DisabledIntegrationNames"/>
public const string DisabledIntegrations = "DD_DISABLED_INTEGRATIONS";
/// <summary>
/// Configuration key for the Agent host where the Tracer can send traces.
/// Overridden by <see cref="AgentUri"/> if present.
/// Default value is "localhost".
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentHost = "DD_AGENT_HOST";
/// <summary>
/// Configuration key for the Agent port where the Tracer can send traces.
/// Default value is 8126.
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentPort = "DD_TRACE_AGENT_PORT";
/// <summary>
/// Sibling setting for <see cref="AgentPort"/>.
/// Used to force a specific port binding for the Trace Agent.
/// Default value is 8126.
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string TraceAgentPortKey = "DD_APM_RECEIVER_PORT";
/// <summary>
/// Configuration key for the Agent URL where the Tracer can send traces.
/// Overrides values in <see cref="AgentHost"/> and <see cref="AgentPort"/> if present.
/// Default value is "http://localhost:8126".
/// </summary>
/// <seealso cref="TracerSettings.AgentUri"/>
public const string AgentUri = "DD_TRACE_AGENT_URL";
/// <summary>
/// Configuration key for enabling or disabling default Analytics.
/// </summary>
/// <seealso cref="TracerSettings.AnalyticsEnabled"/>
public const string GlobalAnalyticsEnabled = "DD_TRACE_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key for a list of tags to be applied globally to spans.
/// </summary>
/// <seealso cref="TracerSettings.GlobalTags"/>
public const string GlobalTags = "DD_TAGS";
/// <summary>
/// Configuration key for a map of header keys to tag names.
/// Automatically apply header values as tags on traces.
/// </summary>
/// <seealso cref="TracerSettings.HeaderTags"/>
public const string HeaderTags = "DD_TRACE_HEADER_TAGS";
/// <summary>
/// Configuration key for setting the size of the trace buffer
/// </summary>
public const string QueueSize = "DD_TRACE_QUEUE_SIZE";
/// <summary>
/// Configuration key for enabling or disabling the automatic injection
/// of correlation identifiers into the logging context.
/// </summary>
/// <seealso cref="TracerSettings.LogsInjectionEnabled"/>
public const string LogsInjectionEnabled = "DD_LOGS_INJECTION";
/// <summary>
/// Configuration key for setting the number of traces allowed
/// to be submitted per second.
/// </summary>
/// <seealso cref="TracerSettings.MaxTracesSubmittedPerSecond"/>
public const string MaxTracesSubmittedPerSecond = "DD_MAX_TRACES_PER_SECOND";
/// <summary>
/// Configuration key for enabling or disabling the diagnostic log at startup
/// </summary>
/// <seealso cref="TracerSettings.StartupDiagnosticLogEnabled"/>
public const string StartupDiagnosticLogEnabled = "DD_TRACE_STARTUP_LOGS";
/// <summary>
/// Configuration key for setting custom sampling rules based on regular expressions.
/// Semi-colon separated list of sampling rules.
/// The rule is matched in order of specification. The first match in a list is used.
///
/// Per entry:
/// The item "sample_rate" is required in decimal format.
/// The item "service" is optional in regular expression format, to match on service name.
/// The item "name" is optional in regular expression format, to match on operation name.
///
/// To give a rate of 50% to any traces in a service starting with the text "cart":
/// '[{"sample_rate":0.5, "service":"cart.*"}]'
///
/// To give a rate of 20% to any traces which have an operation name of "http.request":
/// '[{"sample_rate":0.2, "name":"http.request"}]'
///
/// To give a rate of 100% to any traces within a service named "background" and with an operation name of "sql.query":
/// '[{"sample_rate":1.0, "service":"background", "name":"sql.query"}]
///
/// To give a rate of 10% to all traces
/// '[{"sample_rate":0.1}]'
///
/// To configure multiple rules, separate by semi-colon and order from most specific to least specific:
/// '[{"sample_rate":0.5, "service":"cart.*"}, {"sample_rate":0.2, "name":"http.request"}, {"sample_rate":1.0, "service":"background", "name":"sql.query"}, {"sample_rate":0.1}]'
///
/// If no rules are specified, or none match, default internal sampling logic will be used.
/// </summary>
/// <seealso cref="TracerSettings.CustomSamplingRules"/>
public const string CustomSamplingRules = "DD_TRACE_SAMPLING_RULES";
/// <summary>
/// Configuration key for setting the global rate for the sampler.
/// </summary>
public const string GlobalSamplingRate = "DD_TRACE_SAMPLE_RATE";
/// <summary>
/// Configuration key for the DogStatsd port where the Tracer can send metrics.
/// Default value is 8125.
/// </summary>
public const string DogStatsdPort = "DD_DOGSTATSD_PORT";
/// <summary>
/// Configuration key for enabling or disabling internal metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string TracerMetricsEnabled = "DD_TRACE_METRICS_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling runtime metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string RuntimeMetricsEnabled = "DD_RUNTIME_METRICS_ENABLED";
/// <summary>
/// Configuration key for setting the approximate maximum size,
/// in bytes, for Tracer log files.
/// Default value is 10 MB.
/// </summary>
public const string MaxLogFileSize = "DD_MAX_LOGFILE_SIZE";
/// <summary>
/// Configuration key for setting the path to the .NET Tracer native log file.
/// This also determines the output folder of the .NET Tracer managed log files.
/// Overridden by <see cref="LogDirectory"/> if present.
/// </summary>
public const string ProfilerLogPath = "DD_TRACE_LOG_PATH";
/// <summary>
/// Configuration key for setting the directory of the .NET Tracer logs.
/// Overrides the value in <see cref="ProfilerLogPath"/> if present.
/// Default value is "%ProgramData%"\Datadog .NET Tracer\logs\" on Windows
/// or "/var/log/datadog/dotnet/" on Linux.
/// </summary>
public const string LogDirectory = "DD_TRACE_LOG_DIRECTORY";
/// <summary>
/// Configuration key for when a standalone instance of the Trace Agent needs to be started.
/// </summary>
public const string TraceAgentPath = "DD_TRACE_AGENT_PATH";
/// <summary>
/// Configuration key for arguments to pass to the Trace Agent process.
/// </summary>
public const string TraceAgentArgs = "DD_TRACE_AGENT_ARGS";
/// <summary>
/// Configuration key for when a standalone instance of DogStatsD needs to be started.
/// </summary>
public const string DogStatsDPath = "DD_DOGSTATSD_PATH";
/// <summary>
/// Configuration key for arguments to pass to the DogStatsD process.
/// </summary>
public const string DogStatsDArgs = "DD_DOGSTATSD_ARGS";
/// <summary>
/// Configuration key for enabling or disabling the use of System.Diagnostics.DiagnosticSource.
/// Default value is <c>true</c> (enabled).
/// </summary>
public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED";
/// <summary>
/// String format patterns used to match integration-specific configuration keys.
/// </summary>
public static class Integrations
{
/// <summary>
/// Configuration key pattern for enabling or disabling an integration.
/// </summary>
public const string Enabled = "DD_TRACE_{0}_ENABLED";
/// <summary>
/// Configuration key pattern for enabling or disabling Analytics in an integration.
/// </summary>
public const string AnalyticsEnabled = "DD_TRACE_{0}_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key pattern for setting Analytics sampling rate in an integration.
/// </summary>
public const string AnalyticsSampleRate = "DD_TRACE_{0}_ANALYTICS_SAMPLE_RATE";
}
/// <summary>
/// String constants for debug configuration keys.
/// </summary>
internal static class Debug
{
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the mdToken method lookup mechanism.
/// </summary>
public const string ForceMdTokenLookup = "DD_TRACE_DEBUG_LOOKUP_MDTOKEN";
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the fallback method lookup mechanism.
/// </summary>
public const string ForceFallbackLookup = "DD_TRACE_DEBUG_LOOKUP_FALLBACK";
}
}
}
| 1 | 18,554 | The field `HttpServerErrorCodes` should be called `HttpServerErrorStatuses` | DataDog-dd-trace-dotnet | .cs |
@@ -35,6 +35,12 @@ MAXIMUM_LOOP_COUNT = 600
DEFAULT_BUCKET_FMT_V1 = 'gs://{}-data-{}'
DEFAULT_BUCKET_FMT_V2 = 'gs://{}-{}-data-{}'
+FORSETI_V1_RULE_FILES = [
+ 'bigquery_rules.yaml', 'blacklist_rules.yaml', 'bucket_rules.yaml',
+ 'cloudsql_rules.yaml', 'firewall_rules.yaml', 'forwarding_rules.yaml',
+ 'group_rules.yaml', 'iam_rules.yaml', 'iap_rules.yaml', 'ke_rules.yaml',
+ 'instance_network_interface_rules.yaml']
+
GCLOUD_MIN_VERSION = (180, 0, 0)
GCLOUD_VERSION_REGEX = r'Google Cloud SDK (.*)'
GCLOUD_ALPHA_REGEX = r'alpha.*' | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used for the setup of Forseti."""
import os
from enum import Enum
class FirewallRuleAction(Enum):
"""Firewall rule action object."""
ALLOW = 'ALLOW'
DENY = 'DENY'
class FirewallRuleDirection(Enum):
"""Firewall rule direction object."""
INGRESS = 'INGRESS'
EGRESS = 'EGRESS'
MAXIMUM_LOOP_COUNT = 600
DEFAULT_BUCKET_FMT_V1 = 'gs://{}-data-{}'
DEFAULT_BUCKET_FMT_V2 = 'gs://{}-{}-data-{}'
GCLOUD_MIN_VERSION = (180, 0, 0)
GCLOUD_VERSION_REGEX = r'Google Cloud SDK (.*)'
GCLOUD_ALPHA_REGEX = r'alpha.*'
SERVICE_ACCT_FMT = 'forseti-{}-{}-{}'
SERVICE_ACCT_EMAIL_FMT = '{}@{}.iam.gserviceaccount.com'
INPUT_DEPLOYMENT_TEMPLATE_FILENAME = {
'server': 'deploy-forseti-server.yaml.in',
'client': 'deploy-forseti-client.yaml.in'
}
INPUT_CONFIGURATION_TEMPLATE_FILENAME = {
'server': 'forseti_conf_server.yaml.in',
'client': 'forseti_conf_client.yaml.in'
}
NOTIFICATION_SENDER_EMAIL = '[email protected]'
RESOURCE_TYPE_ARGS_MAP = {
'organizations': ['organizations'],
'folders': ['alpha', 'resource-manager', 'folders'],
'projects': ['projects'],
'forseti_project': ['projects'],
'service_accounts': ['iam', 'service-accounts']
}
# Roles
GCP_READ_IAM_ROLES = [
'roles/browser',
'roles/compute.networkViewer',
'roles/iam.securityReviewer',
'roles/appengine.appViewer',
'roles/bigquery.dataViewer',
'roles/servicemanagement.quotaViewer',
'roles/cloudsql.viewer'
]
GCP_WRITE_IAM_ROLES = [
'roles/compute.securityAdmin'
]
PROJECT_IAM_ROLES_SERVER = [
'roles/storage.objectViewer',
'roles/storage.objectCreator',
'roles/cloudsql.client',
'roles/logging.logWriter'
]
PROJECT_IAM_ROLES_CLIENT = [
'roles/storage.objectViewer',
'roles/storage.objectCreator',
'roles/logging.logWriter'
]
SVC_ACCT_ROLES = [
'roles/iam.serviceAccountKeyAdmin'
]
# Required APIs
REQUIRED_APIS = [
{'name': 'Admin SDK',
'service': 'admin.googleapis.com'},
{'name': 'AppEngine Admin',
'service': 'appengine.googleapis.com'},
{'name': 'BigQuery',
'service': 'bigquery-json.googleapis.com'},
{'name': 'Cloud Billing',
'service': 'cloudbilling.googleapis.com'},
{'name': 'Cloud Resource Manager',
'service': 'cloudresourcemanager.googleapis.com'},
{'name': 'Cloud SQL',
'service': 'sql-component.googleapis.com'},
{'name': 'Cloud SQL Admin',
'service': 'sqladmin.googleapis.com'},
{'name': 'Compute Engine',
'service': 'compute.googleapis.com'},
{'name': 'Deployment Manager',
'service': 'deploymentmanager.googleapis.com'},
{'name': 'IAM',
'service': 'iam.googleapis.com'}
]
# Org Resource Types
RESOURCE_TYPES = ['organization', 'folder', 'project']
# Paths
ROOT_DIR_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)))))
RULES_DIR_PATH = os.path.abspath(
os.path.join(
ROOT_DIR_PATH, 'rules'))
FORSETI_SRC_PATH = os.path.join(
ROOT_DIR_PATH, 'google', 'cloud', 'forseti')
FORSETI_CONF_PATH = ('{bucket_name}/configs/{installer_type}/'
'forseti_conf_{installer_type}.yaml')
DEPLOYMENT_TEMPLATE_OUTPUT_PATH = '{}/deployment_templates/'
VERSIONFILE_REGEX = r'__version__ = \'(.*)\''
# Message templates
MESSAGE_GSUITE_DATA_COLLECTION = (
'To complete setup for G Suite Groups data collection, '
'follow the steps below:\n\n'
' 1. Click on: '
'https://console.cloud.google.com/iam-admin/serviceaccounts/'
'project?project={}&organizationId={}\n\n'
' 2. Locate the service account to enable '
'G Suite Groups collection:{}\n\n'
' 3. Select Edit and then the Enable G Suite Domain-wide '
'Delegation checkbox. Save.\n\n'
' 4. On the service account row, click View Client ID. '
'On the Client ID for Service account client panel that '
'appears, copy the Client ID value, which will be a large '
'number.\n\n'
' 5. Click on: '
'https://admin.google.com/ManageOauthClients\n\n'
' 6. In the Client Name box, paste the Client ID you '
'copied above.\n\n'
' 7. In the One or More API Scopes box, paste the '
'following scope:\n\n'
' https://www.googleapis.com/auth/admin.directory.'
'group.readonly,\n'
' https://www.googleapis.com/auth/admin.directory.'
'user.readonly\n\n'
' 8. Click Authorize\n\n'
'or refer to the guides:'
'http://forsetisecurity.org/docs/howto/configure/'
'gsuite-group-collection\n\n')
MESSAGE_SKIP_EMAIL = (
'If you would like to enable email notifications via '
'SendGrid, please refer to:\n\n'
' '
'http://forsetisecurity.org/docs/howto/configure/'
'email-notification\n\n')
MESSAGE_HAS_ROLE_SCRIPT = (
'Some roles could not be assigned to {} where you want '
'to grant Forseti access. A script `grant_forseti_roles.sh` '
'has been generated with the necessary commands to assign '
'those roles. Please run this script to assign the Forseti '
'roles so that Forseti will work properly.\n\n')
MESSAGE_ENABLE_GSUITE_GROUP = (
'If you want to enable G Suite Groups collection in '
'Forseti, for example, to use IAM Explain), follow '
' the steps in the guide below:\n\n'
' '
'http://forsetisecurity.org/docs/howto/configure/'
'gsuite-group-collection\n\n')
MESSAGE_ASK_GSUITE_SUPERADMIN_EMAIL = (
'\nTo read G Suite Groups data, for example, if you want to '
'use IAM Explain, please provide a G Suite super admin '
'email address. '
'This step is optional and can be configured later.')
MESSAGE_ASK_SENDGRID_API_KEY = (
'Forseti can send email notifications through SendGrid '
'via an API key. '
'This step is optional and can be configured later.')
MESSAGE_FORSETI_CONFIGURATION_ACCESS_LEVEL = (
'Forseti can be configured to access an '
'organization, folder, or project.')
MESSAGE_NO_CLOUD_SHELL = (
'Forseti highly recommends running this setup within '
'Cloud Shell. If you would like to run the setup '
'outside Cloud Shell, please be sure to do the '
'following:\n\n'
'1) Create a project.\n'
'2) Enable billing for the project.\n'
'3) Install gcloud and authenticate your account using '
'"gcloud auth login".\n'
'4) Set your project using '
'"gcloud config project set <PROJECT_ID>".\n'
'5) Run this setup again, with the --no-cloudshell flag, '
'i.e.\n\n python setup_forseti.py --no-cloudshell\n')
MESSAGE_FORSETI_CONFIGURATION_GENERATED = (
'A Forseti configuration file (configs/{installer_type}/'
'forseti_conf_{installer_type}_{datetimestamp}.yaml) '
'has been generated. If you wish to change your '
'Forseti configuration or rules, e.g. enabling G Suite '
'Groups collection, either download the conf file in '
'your bucket `{bucket_name}` or edit your local copy, then follow '
'the guide below to copy the files to Cloud Storage:\n\n'
' http://forsetisecurity.org/docs/howto/deploy/'
'gcp-deployment.html#move-configuration-to-gcs\n\n')
MESSAGE_FORSETI_CONFIGURATION_GENERATED_DRY_RUN = (
'A Forseti configuration file has been generated. '
'After you create your deployment, copy this file to '
'the bucket created in the deployment:\n\n'
' gsutil cp {} {}/configs/forseti_conf_server.yaml\n\n')
MESSAGE_DEPLOYMENT_HAD_ISSUES = (
'Your deployment had some issues. Please review the error '
'messages. If you need help, please either file an issue '
'on our Github Issues or email '
'[email protected].\n')
MESSAGE_FORSETI_BRANCH_DEPLOYED = (
'Forseti Security (branch/version: {}) has been '
'deployed to GCP.\n')
MESSAGE_DEPLOYMENT_TEMPLATE_LOCATION = (
'Your generated Deployment Manager template can be '
'found here:\n\n {}\n\n {}\n\n')
MESSAGE_VIEW_DEPLOYMENT_DETAILS = (
'You can view the details of your deployment in the '
'Cloud Console:\n\n '
'https://console.cloud.google.com/deployments/details/'
'{}?project={}&organizationId={}\n\n')
MESSAGE_GCLOUD_VERSION_MISMATCH = (
'You need the following gcloud setup:\n\n'
'gcloud version >= {}\n'
'gcloud alpha components\n\n'
'To install gcloud alpha components: '
'gcloud components install alpha\n\n'
'To update gcloud: gcloud components update\n')
MESSAGE_CREATE_ROLE_SCRIPT = (
'One or more roles could not be assigned. Writing a '
'script with the commands to assign those roles. Please '
'give this script to someone (like an admin) who can '
'assign these roles for you. If you do not assign these '
'roles, Forseti may not work properly!')
MESSAGE_BILLING_NOT_ENABLED = (
'\nIt seems that billing is not enabled for your project. '
'You can check whether billing has been enabled in the '
'Cloud Platform Console:\n\n'
' https://console.cloud.google.com/billing/linkedaccount?'
'project={}&organizationId={}\n\n'
'Once you have enabled billing, re-run this setup.\n')
MESSAGE_NO_ORGANIZATION = (
'You need to have an organization set up to use Forseti. '
'Refer to the following documentation for more information.\n\n'
'https://cloud.google.com/resource-manager/docs/'
'creating-managing-organization')
# Questions templates
QUESTION_ENABLE_WRITE_ACCESS = (
'Enable write access for Forseti? '
'This allows Forseti to make changes to policies '
'(e.g. for Enforcer) (y/n) ')
QUESTION_GSUITE_SUPERADMIN_EMAIL = (
'What is your organization\'s G Suite super admin email? '
'(press [enter] to skip) ')
QUESTION_SENDGRID_API_KEY = (
'What is your SendGrid API key? '
'(press [enter] to skip) ')
QUESTION_NOTIFICATION_RECIPIENT_EMAIL = (
'At what email address do you want to receive '
'notifications? (press [enter] to skip) ')
QUESTION_FORSETI_CONFIGURATION_ACCESS_LEVEL = (
'At what level do you want to enable Forseti '
'read (and optionally write) access? ')
QUESTION_ACCESS_TO_GRANT_ROLES = (
'Do you have access to grant Forseti IAM '
'roles on the target {}? (y/n) ')
QUESTION_CHOOSE_FOLDER = (
'To find the folder, go to Cloud Console:\n\n'
' https://console.cloud.google.com/'
'cloud-resource-manager?organizationId={}\n\n'
'Enter the folder id where you want '
'Forseti to crawl for data: ')
| 1 | 28,848 | Can you please make each of these in a separate line. It will be easier to keep them sorted, and add/remove. | forseti-security-forseti-security | py |
@@ -170,10 +170,14 @@ public class ConfigSetsHandler extends RequestHandlerBase implements PermissionN
boolean overwritesExisting = zkClient.exists(configPathInZk, true);
- if (overwritesExisting && !req.getParams().getBool(ConfigSetParams.OVERWRITE, false)) {
- throw new SolrException(ErrorCode.BAD_REQUEST,
- "The configuration " + configSetName + " already exists in zookeeper");
- }
+ // Get upload parameters
+ String singleFilePath = req.getParams().get(ConfigSetParams.FILE_PATH, "");
+ boolean allowOverwrite = req.getParams().getBool(ConfigSetParams.OVERWRITE, false);
+ // Cleanup is not allowed while using singleFilePath upload
+ boolean cleanup = singleFilePath.isEmpty() && req.getParams().getBool(ConfigSetParams.CLEANUP, false);
+
+ // Create a node for the configuration in zookeeper
+ createBaseZnode(zkClient, overwritesExisting, isTrusted(req, coreContainer.getAuthenticationPlugin()), cleanup, configPathInZk);
Iterator<ContentStream> contentStreamsIterator = req.getContentStreams().iterator();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.admin;
import java.io.InputStream;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.cloud.OverseerSolrResponse;
import org.apache.solr.cloud.OverseerSolrResponseSerializer;
import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.common.cloud.ZkMaintenanceUtils;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.params.ConfigSetParams;
import org.apache.solr.common.params.ConfigSetParams.ConfigSetAction;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ContentStream;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.security.AuthenticationPlugin;
import org.apache.solr.security.AuthorizationContext;
import org.apache.solr.security.PermissionNameProvider;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.BASE_CONFIGSET;
import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_ACTION_PREFIX;
import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.PROPERTY_PREFIX;
import static org.apache.solr.common.params.CommonParams.NAME;
import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.CREATE;
import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.DELETE;
import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.LIST;
/**
* A {@link org.apache.solr.request.SolrRequestHandler} for ConfigSets API requests.
*/
public class ConfigSetsHandler extends RequestHandlerBase implements PermissionNameProvider {
final public static Boolean DISABLE_CREATE_AUTH_CHECKS = Boolean.getBoolean("solr.disableConfigSetsCreateAuthChecks"); // this is for back compat only
final public static String DEFAULT_CONFIGSET_NAME = "_default";
final public static String AUTOCREATED_CONFIGSET_SUFFIX = ".AUTOCREATED";
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
protected final CoreContainer coreContainer;
public static long DEFAULT_ZK_TIMEOUT = 300 * 1000;
/**
* Overloaded ctor to inject CoreContainer into the handler.
*
* @param coreContainer Core Container of the solr webapp installed.
*/
public ConfigSetsHandler(final CoreContainer coreContainer) {
this.coreContainer = coreContainer;
}
public static String getSuffixedNameForAutoGeneratedConfigSet(String configName) {
return configName + AUTOCREATED_CONFIGSET_SUFFIX;
}
public static boolean isAutoGeneratedConfigSet(String configName) {
return configName != null && configName.endsWith(AUTOCREATED_CONFIGSET_SUFFIX);
}
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
checkErrors();
// Pick the action
SolrParams params = req.getParams();
String a = params.get(ConfigSetParams.ACTION);
if (a != null) {
ConfigSetAction action = ConfigSetAction.get(a);
if (action == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
if (action == ConfigSetAction.UPLOAD) {
handleConfigUploadRequest(req, rsp);
return;
}
invokeAction(req, rsp, action);
} else {
throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param");
}
rsp.setHttpCaching(false);
}
protected void checkErrors() {
if (coreContainer == null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Core container instance missing");
}
// Make sure that the core is ZKAware
if (!coreContainer.isZooKeeperAware()) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Solr instance is not running in SolrCloud mode.");
}
}
void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetAction action) throws Exception {
ConfigSetOperation operation = ConfigSetOperation.get(action);
if (log.isInfoEnabled()) {
log.info("Invoked ConfigSet Action :{} with params {} ", action.toLower(), req.getParamString());
}
Map<String, Object> result = operation.call(req, rsp, this);
sendToZk(rsp, operation, result);
}
protected void sendToZk(SolrQueryResponse rsp, ConfigSetOperation operation, Map<String, Object> result)
throws KeeperException, InterruptedException {
if (result != null) {
// We need to differentiate between collection and configsets actions since they currently
// use the same underlying queue.
result.put(QUEUE_OPERATION, CONFIGSETS_ACTION_PREFIX + operation.action.toLower());
ZkNodeProps props = new ZkNodeProps(result);
handleResponse(operation.action.toLower(), props, rsp, DEFAULT_ZK_TIMEOUT);
}
}
private void handleConfigUploadRequest(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
if (!"true".equals(System.getProperty("configset.upload.enabled", "true"))) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Configset upload feature is disabled. To enable this, start Solr with '-Dconfigset.upload.enabled=true'.");
}
String configSetName = req.getParams().get(NAME);
if (StringUtils.isBlank(configSetName)) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"The configuration name should be provided in the \"name\" parameter");
}
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
String configPathInZk = ZkConfigManager.CONFIGS_ZKNODE + "/" + configSetName;
boolean overwritesExisting = zkClient.exists(configPathInZk, true);
if (overwritesExisting && !req.getParams().getBool(ConfigSetParams.OVERWRITE, false)) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"The configuration " + configSetName + " already exists in zookeeper");
}
Iterator<ContentStream> contentStreamsIterator = req.getContentStreams().iterator();
if (!contentStreamsIterator.hasNext()) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"No stream found for the config data to be uploaded");
}
InputStream inputStream = contentStreamsIterator.next().getStream();
// Create a node for the configuration in zookeeper
boolean cleanup = req.getParams().getBool(ConfigSetParams.CLEANUP, false);
Set<String> filesToDelete;
if (overwritesExisting && cleanup) {
filesToDelete = getAllConfigsetFiles(zkClient, configPathInZk);
} else {
filesToDelete = Collections.emptySet();
}
createBaseZnode(zkClient, overwritesExisting, isTrusted(req, coreContainer.getAuthenticationPlugin()), cleanup, configPathInZk);
ZipInputStream zis = new ZipInputStream(inputStream, StandardCharsets.UTF_8);
ZipEntry zipEntry = null;
while ((zipEntry = zis.getNextEntry()) != null) {
String filePathInZk = configPathInZk + "/" + zipEntry.getName();
if (filePathInZk.endsWith("/")) {
filesToDelete.remove(filePathInZk.substring(0, filePathInZk.length() -1));
} else {
filesToDelete.remove(filePathInZk);
}
if (zipEntry.isDirectory()) {
zkClient.makePath(filePathInZk, false, true);
} else {
createZkNodeIfNotExistsAndSetData(zkClient, filePathInZk,
IOUtils.toByteArray(zis));
}
}
zis.close();
deleteUnusedFiles(zkClient, filesToDelete);
}
private void createBaseZnode(SolrZkClient zkClient, boolean overwritesExisting, boolean requestIsTrusted, boolean cleanup, String configPathInZk) throws KeeperException, InterruptedException {
byte[] baseZnodeData = ("{\"trusted\": " + Boolean.toString(requestIsTrusted) + "}").getBytes(StandardCharsets.UTF_8);
if (overwritesExisting) {
if (cleanup && requestIsTrusted) {
zkClient.setData(configPathInZk, baseZnodeData, true);
} else if (!requestIsTrusted) {
ensureOverwritingUntrustedConfigSet(zkClient, configPathInZk);
}
} else {
zkClient.makePath(configPathInZk, baseZnodeData, true);
}
}
private void deleteUnusedFiles(SolrZkClient zkClient, Set<String> filesToDelete) throws InterruptedException, KeeperException {
if (!filesToDelete.isEmpty()) {
if (log.isInfoEnabled()) {
log.info("Cleaning up {} unused files", filesToDelete.size());
}
if (log.isDebugEnabled()) {
log.debug("Cleaning up unused files: {}", filesToDelete);
}
for (String f:filesToDelete) {
try {
zkClient.delete(f, -1, true);
} catch (KeeperException.NoNodeException nne) {
}
}
}
}
private Set<String> getAllConfigsetFiles(SolrZkClient zkClient, String configPathInZk) throws KeeperException, InterruptedException {
final Set<String> files = new HashSet<>();
if (!configPathInZk.startsWith(ZkConfigManager.CONFIGS_ZKNODE + "/")) {
throw new IllegalArgumentException("\"" + configPathInZk + "\" not recognized as a configset path");
}
ZkMaintenanceUtils.traverseZkTree(zkClient, configPathInZk, ZkMaintenanceUtils.VISIT_ORDER.VISIT_POST, files::add);
files.remove(configPathInZk);
return files;
}
/*
* Fail if an untrusted request tries to update a trusted ConfigSet
*/
private void ensureOverwritingUntrustedConfigSet(SolrZkClient zkClient, String configSetZkPath) {
boolean isCurrentlyTrusted = isCurrentlyTrusted(zkClient, configSetZkPath);
if (isCurrentlyTrusted) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Trying to make an unstrusted ConfigSet update on a trusted configSet");
}
}
private static boolean isCurrentlyTrusted(SolrZkClient zkClient, String configSetZkPath) {
byte[] configSetNodeContent;
try {
configSetNodeContent = zkClient.getData(configSetZkPath, null, null, true);
if (configSetNodeContent == null || configSetNodeContent.length == 0) {
return true;
}
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Exception while fetching current configSet at " + configSetZkPath, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while fetching current configSet at " + configSetZkPath, e);
}
@SuppressWarnings("unchecked")
Map<Object, Object> contentMap = (Map<Object, Object>) Utils.fromJSON(configSetNodeContent);
return (boolean) contentMap.getOrDefault("trusted", true);
}
static boolean isTrusted(SolrQueryRequest req, AuthenticationPlugin authPlugin) {
if (authPlugin != null && req.getUserPrincipal() != null) {
log.debug("Trusted configset request");
return true;
}
log.debug("Untrusted configset request");
return false;
}
private void createZkNodeIfNotExistsAndSetData(SolrZkClient zkClient,
String filePathInZk, byte[] data) throws Exception {
if (!zkClient.exists(filePathInZk, true)) {
zkClient.create(filePathInZk, data, CreateMode.PERSISTENT, true);
} else {
zkClient.setData(filePathInZk, data, true);
}
}
@SuppressWarnings({"unchecked"})
private void handleResponse(String operation, ZkNodeProps m,
SolrQueryResponse rsp, long timeout) throws KeeperException, InterruptedException {
long time = System.nanoTime();
QueueEvent event = coreContainer.getZkController()
.getOverseerConfigSetQueue()
.offer(Utils.toJSON(m), timeout);
if (event.getBytes() != null) {
SolrResponse response = OverseerSolrResponseSerializer.deserialize(event.getBytes());
rsp.getValues().addAll(response.getResponse());
@SuppressWarnings({"rawtypes"})
SimpleOrderedMap exp = (SimpleOrderedMap) response.getResponse().get("exception");
if (exp != null) {
Integer code = (Integer) exp.get("rspCode");
rsp.setException(new SolrException(code != null && code != -1 ? ErrorCode.getErrorCode(code) : ErrorCode.SERVER_ERROR, (String) exp.get("msg")));
}
} else {
if (System.nanoTime() - time >= TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) {
throw new SolrException(ErrorCode.SERVER_ERROR, operation
+ " the configset time out:" + timeout / 1000 + "s");
} else if (event.getWatchedEvent() != null) {
throw new SolrException(ErrorCode.SERVER_ERROR, operation
+ " the configset error [Watcher fired on path: "
+ event.getWatchedEvent().getPath() + " state: "
+ event.getWatchedEvent().getState() + " type "
+ event.getWatchedEvent().getType() + "]");
} else {
throw new SolrException(ErrorCode.SERVER_ERROR, operation
+ " the configset unknown case");
}
}
}
private static Map<String, Object> copyPropertiesWithPrefix(SolrParams params, Map<String, Object> props, String prefix) {
Iterator<String> iter = params.getParameterNamesIterator();
while (iter.hasNext()) {
String param = iter.next();
if (param.startsWith(prefix)) {
props.put(param, params.get(param));
}
}
// The configset created via an API should be mutable.
props.put("immutable", "false");
return props;
}
@Override
public String getDescription() {
return "Manage SolrCloud ConfigSets";
}
@Override
public Category getCategory() {
return Category.ADMIN;
}
public enum ConfigSetOperation {
CREATE_OP(CREATE) {
@Override
public Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
String baseConfigSetName = req.getParams().get(BASE_CONFIGSET, DEFAULT_CONFIGSET_NAME);
String newConfigSetName = req.getParams().get(NAME);
if (newConfigSetName == null || newConfigSetName.length() == 0) {
throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet name not specified");
}
ZkConfigManager zkConfigManager = new ZkConfigManager(h.coreContainer.getZkController().getZkStateReader().getZkClient());
if (zkConfigManager.configExists(newConfigSetName)) {
throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet already exists: " + newConfigSetName);
}
// is there a base config that already exists
if (!zkConfigManager.configExists(baseConfigSetName)) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Base ConfigSet does not exist: " + baseConfigSetName);
}
Map<String, Object> props = CollectionsHandler.copy(req.getParams().required(), null, NAME);
props.put(BASE_CONFIGSET, baseConfigSetName);
if (!DISABLE_CREATE_AUTH_CHECKS &&
!isTrusted(req, h.coreContainer.getAuthenticationPlugin()) &&
isCurrentlyTrusted(h.coreContainer.getZkController().getZkClient(), ZkConfigManager.CONFIGS_ZKNODE + "/" + baseConfigSetName)) {
throw new SolrException(ErrorCode.UNAUTHORIZED, "Can't create a configset with an unauthenticated request from a trusted " + BASE_CONFIGSET);
}
return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX + ".");
}
},
DELETE_OP(DELETE) {
@Override
public Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
return CollectionsHandler.copy(req.getParams().required(), null, NAME);
}
},
@SuppressWarnings({"unchecked"})
LIST_OP(LIST) {
@Override
public Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
NamedList<Object> results = new NamedList<>();
SolrZkClient zk = h.coreContainer.getZkController().getZkStateReader().getZkClient();
ZkConfigManager zkConfigManager = new ZkConfigManager(zk);
List<String> configSetsList = zkConfigManager.listConfigs();
results.add("configSets", configSetsList);
SolrResponse response = new OverseerSolrResponse(results);
rsp.getValues().addAll(response.getResponse());
return null;
}
};
ConfigSetAction action;
ConfigSetOperation(ConfigSetAction action) {
this.action = action;
}
public abstract Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception;
public static ConfigSetOperation get(ConfigSetAction action) {
for (ConfigSetOperation op : values()) {
if (op.action == action) return op;
}
throw new SolrException(ErrorCode.SERVER_ERROR, "No such action" + action);
}
}
@Override
public Name getPermissionName(AuthorizationContext ctx) {
String a = ctx.getParams().get(ConfigSetParams.ACTION);
if (a != null) {
ConfigSetAction action = ConfigSetAction.get(a);
if (action == ConfigSetAction.CREATE || action == ConfigSetAction.DELETE || action == ConfigSetAction.UPLOAD) {
return Name.CONFIG_EDIT_PERM;
} else if (action == ConfigSetAction.LIST) {
return Name.CONFIG_READ_PERM;
}
}
return null;
}
}
| 1 | 37,731 | should we error instead of silently ignoring the `cleanup` param? it defaults to `false`, so someone must have explicitly set it to `true` | apache-lucene-solr | java |
@@ -191,7 +191,7 @@ func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
if len(archive.File) == 0 {
return nil, errors.New("archive is empty")
} else if fi := archive.File[0].FileInfo(); !fi.IsDir() {
- return nil, errors.New("archive root is not a directory")
+ return nil, fmt.Errorf("archive root directory missing: expected a 'root' directory but got %v", archive.File[0].Name)
}
root := archive.File[0].Name
pref, err := unitPrefix(root, archive.File) | 1 | /*
* Copyright 2018 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package kzip implements the kzip compilation storage file format.
//
// The package exports two types of interest: A kzip.Reader can be used to read
// the contents of an existing kzip archive, and a kzip.Writer can be used to
// construct a new kzip archive.
//
// Reading an Archive:
//
// r, err := kzip.NewReader(file, size)
// ...
//
// // Look up a compilation record by its digest.
// unit, err := r.Lookup(unitDigest)
// ...
//
// // Scan all the compilation records stored.
// err := r.Scan(func(unit *kzip.Unit) error {
// if hasInterestingProperty(unit) {
// doStuffWith(unit)
// }
// return nil
// })
//
// // Open a reader for a stored file.
// rc, err := r.Open(fileDigest)
// ...
// defer rc.Close()
//
// // Read the complete contents of a stored file.
// bits, err := r.ReadAll(fileDigest)
// ...
//
// Writing an Archive:
//
// w, err := kzip.NewWriter(file)
// ...
//
// // Add a compilation record and (optional) index data.
// udigest, err := w.AddUnit(unit, nil)
// ...
//
// // Add file contents.
// fdigest, err := w.AddFile(file)
// ...
//
package kzip // import "kythe.io/kythe/go/platform/kzip"
import (
"archive/zip"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"kythe.io/kythe/go/platform/kcd/kythe"
"bitbucket.org/creachadair/stringset"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
apb "kythe.io/kythe/proto/analysis_go_proto"
// These are common detail messages used by Kythe compilations, and
// required for JSON (un)marshaling to work.
_ "kythe.io/kythe/proto/buildinfo_go_proto"
_ "kythe.io/kythe/proto/cxx_go_proto"
_ "kythe.io/kythe/proto/filecontext_go_proto"
_ "kythe.io/kythe/proto/go_go_proto"
_ "kythe.io/kythe/proto/java_go_proto"
)
// Encoding describes how compilation units will be encoded when written to a kzip.
type Encoding int
const (
// EncodingJSON specifies to use JSON encoding
EncodingJSON Encoding = 1
// EncodingProto specifies to use Proto encoding
EncodingProto Encoding = 2
// EncodingAll specifies to encode using all known encodings
EncodingAll Encoding = EncodingJSON | EncodingProto
prefixJSON = "units"
prefixProto = "pbunits"
)
var (
// Use a constant file modification time in the kzip so file diffs only compare the contents,
// not when the kzips were created.
modifiedTime = time.Unix(0, 0)
)
// EncodingFor converts a string to an Encoding.
func EncodingFor(v string) (Encoding, error) {
v = strings.ToUpper(v)
switch {
case v == "ALL":
return EncodingAll, nil
case v == "JSON":
return EncodingJSON, nil
case v == "PROTO":
return EncodingProto, nil
default:
return EncodingProto, fmt.Errorf("unknown encoding %s", v)
}
}
// String stringifies an Encoding
func (e Encoding) String() string {
switch {
case e == EncodingAll:
return "All"
case e == EncodingJSON:
return "JSON"
case e == EncodingProto:
return "Proto"
default:
return "Encoding" + strconv.FormatInt(int64(e), 10)
}
}
// DefaultEncoding returns the default kzip encoding
func DefaultEncoding() Encoding {
if e := os.Getenv("KYTHE_KZIP_ENCODING"); e != "" {
enc, err := EncodingFor(e)
if err == nil {
return enc
}
log.Printf("Unknown kzip encoding: %s", e)
}
return EncodingProto
}
// A Reader permits reading and scanning compilation records and file contents
// stored in a .kzip archive. The Lookup and Scan methods are mutually safe for
// concurrent use by multiple goroutines.
type Reader struct {
zip *zip.Reader
// The archives written by this library always use "root/" for the root
// directory, but it's not required by the spec. Use whatever name the
// archive actually specifies in the leading directory.
root string
// The prefix used for the compilation unit directory; one of
// prefixJSON or prefixProto
unitsPrefix string
}
// NewReader constructs a new Reader that consumes zip data from r, whose total
// size in bytes is given.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
archive, err := zip.NewReader(r, size)
if err != nil {
return nil, err
}
// Order the files in the archive by path, so we can binary search.
sort.Slice(archive.File, func(i, j int) bool {
return archive.File[i].Name < archive.File[j].Name
})
if len(archive.File) == 0 {
return nil, errors.New("archive is empty")
} else if fi := archive.File[0].FileInfo(); !fi.IsDir() {
return nil, errors.New("archive root is not a directory")
}
root := archive.File[0].Name
pref, err := unitPrefix(root, archive.File)
if err != nil {
return nil, err
}
return &Reader{
zip: archive,
root: root,
unitsPrefix: pref,
}, nil
}
func unitPrefix(root string, fs []*zip.File) (string, error) {
jsonDir := root + prefixJSON + "/"
protoDir := root + prefixProto + "/"
j := sort.Search(len(fs), func(i int) bool {
return fs[i].Name > jsonDir
})
hasJSON := j < len(fs) && strings.HasPrefix(fs[j].Name, jsonDir)
p := sort.Search(len(fs), func(i int) bool {
return fs[i].Name > protoDir
})
hasProto := p < len(fs) && strings.HasPrefix(fs[p].Name, protoDir)
if hasJSON && hasProto {
// validate that they have identical units based on hash
for p < len(fs) && j < len(fs) {
ispb := strings.HasPrefix(fs[p].Name, protoDir)
isjson := strings.HasPrefix(fs[j].Name, jsonDir)
if ispb != isjson {
return "", fmt.Errorf("both proto and JSON units found but are not identical")
}
if !ispb {
break
}
pdigest := strings.Split(fs[p].Name, "/")[2]
jdigest := strings.Split(fs[j].Name, "/")[2]
if pdigest != jdigest {
return "", fmt.Errorf("both proto and JSON units found but are not identical")
}
p++
j++
}
}
if hasProto {
return prefixProto, nil
}
return prefixJSON, nil
}
// Encoding exposes the file encoding being used to read compilation units.
func (r *Reader) Encoding() (Encoding, error) {
switch {
case r.unitsPrefix == prefixJSON:
return EncodingJSON, nil
case r.unitsPrefix == prefixProto:
return EncodingProto, nil
}
return EncodingAll, fmt.Errorf("unknown encoding prefix: %v", r.unitsPrefix)
}
func (r *Reader) unitPath(digest string) string { return path.Join(r.root, r.unitsPrefix, digest) }
func (r *Reader) filePath(digest string) string { return path.Join(r.root, "files", digest) }
// ErrDigestNotFound is returned when a requested compilation unit or file
// digest is not found.
var ErrDigestNotFound = errors.New("digest not found")
// ErrUnitExists is returned by AddUnit when adding the same compilation
// multiple times.
var ErrUnitExists = errors.New("unit already exists")
func (r *Reader) readUnit(digest string, f *zip.File) (*Unit, error) {
rc, err := f.Open()
if err != nil {
return nil, err
}
rec := make([]byte, f.UncompressedSize64)
_, err = io.ReadFull(rc, rec)
rc.Close()
if err != nil {
return nil, err
}
var msg apb.IndexedCompilation
if r.unitsPrefix == prefixProto {
if err := proto.Unmarshal(rec, &msg); err != nil {
return nil, fmt.Errorf("error unmarshaling for %s: %s", digest, err)
}
} else if err := protojson.Unmarshal(rec, &msg); err != nil {
return nil, err
}
return &Unit{
Digest: digest,
Proto: msg.Unit,
Index: msg.Index,
}, nil
}
// firstIndex returns the first index in the archive's file list whose
// path starts with prefix, or -1 if no such index exists.
func (r *Reader) firstIndex(prefix string) int {
fs := r.zip.File
n := sort.Search(len(fs), func(i int) bool {
return fs[i].Name >= prefix
})
if n >= len(fs) {
return -1
}
if !strings.HasPrefix(fs[n].Name, prefix) {
return -1
}
return n
}
// Lookup returns the specified compilation from the archive, if it exists. If
// the requested digest is not in the archive, ErrDigestNotFound is returned.
func (r *Reader) Lookup(unitDigest string) (*Unit, error) {
needle := r.unitPath(unitDigest)
pos := r.firstIndex(needle)
if pos >= 0 {
if f := r.zip.File[pos]; f.Name == needle {
return r.readUnit(unitDigest, f)
}
}
return nil, ErrDigestNotFound
}
// A ScanOption configures the behavior of scanning a kzip file.
type ScanOption interface{ isScanOption() }
type readConcurrency int
func (readConcurrency) isScanOption() {}
// ReadConcurrency returns a ScanOption that configures the max concurrency of
// reading compilation units within a kzip archive.
func ReadConcurrency(n int) ScanOption {
return readConcurrency(n)
}
func (r *Reader) canonicalUnits() (string, []*zip.File) {
prefix := r.unitPath("") + "/"
pos := r.firstIndex(prefix)
if pos < 0 {
return "", nil
}
var res []*zip.File
for _, file := range r.zip.File[pos:] {
if !strings.HasPrefix(file.Name, prefix) {
break
}
if file.Name == prefix {
continue // tolerate an empty units directory entry
}
res = append(res, file)
}
return prefix, res
}
// Scan scans all the compilations stored in the archive, and invokes f for
// each compilation record. If f reports an error, the scan is terminated and
// that error is propagated to the caller of Scan. At most 1 invocation of f
// will occur at any one time.
func (r *Reader) Scan(f func(*Unit) error, opts ...ScanOption) error {
concurrency := 1
for _, opt := range opts {
switch opt := opt.(type) {
case readConcurrency:
if n := int(opt); n > 0 {
concurrency = n
}
default:
return fmt.Errorf("unknown ScanOption type: %T", opt)
}
}
prefix, fileUnits := r.canonicalUnits()
if len(fileUnits) == 0 {
return nil
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
g, ctx := errgroup.WithContext(ctx)
files := make(chan *zip.File)
g.Go(func() error {
defer close(files)
for _, file := range fileUnits {
select {
case <-ctx.Done():
return nil
case files <- file:
}
}
return nil
})
units := make(chan *Unit)
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
wg.Add(1)
g.Go(func() error {
defer wg.Done()
for file := range files {
digest := strings.TrimPrefix(file.Name, prefix)
unit, err := r.readUnit(digest, file)
if err != nil {
return err
}
select {
case <-ctx.Done():
return nil
case units <- unit:
}
}
return nil
})
}
go func() { wg.Wait(); close(units) }()
for unit := range units {
select {
case <-ctx.Done():
return g.Wait()
default:
if err := f(unit); err != nil {
return err
}
}
}
return g.Wait()
}
// Open opens a reader on the contents of the specified file digest. If the
// requested digest is not in the archive, ErrDigestNotFound is returned. The
// caller must close the reader when it is no longer needed.
func (r *Reader) Open(fileDigest string) (io.ReadCloser, error) {
needle := r.filePath(fileDigest)
if pos := r.firstIndex(needle); pos >= 0 {
if f := r.zip.File[pos]; f.Name == needle {
return f.Open()
}
}
return nil, ErrDigestNotFound
}
// ReadAll returns the complete contents of the file with the specified digest.
// It is a convenience wrapper for Open followed by ioutil.ReadAll.
func (r *Reader) ReadAll(fileDigest string) ([]byte, error) {
f, err := r.Open(fileDigest)
if err == nil {
defer f.Close()
return ioutil.ReadAll(f)
}
return nil, err
}
// A Unit represents a compilation record read from a kzip archive.
type Unit struct {
Digest string
Proto *apb.CompilationUnit
Index *apb.IndexedCompilation_Index
}
// A Writer permits construction of a .kzip archive.
type Writer struct {
mu sync.Mutex
zip *zip.Writer
fd stringset.Set // file digests already written
ud stringset.Set // unit digests already written
c io.Closer // a closer for the underlying writer (may be nil)
encoding Encoding // What encoding to use
}
// WriterOption describes options when creating a Writer
type WriterOption func(*Writer)
// WithEncoding sets the encoding to be used by a Writer
func WithEncoding(e Encoding) WriterOption {
return func(w *Writer) {
w.encoding = e
}
}
// NewWriter constructs a new empty Writer that delivers output to w. The
// AddUnit and AddFile methods are safe for use by concurrent goroutines.
func NewWriter(w io.Writer, options ...WriterOption) (*Writer, error) {
archive := zip.NewWriter(w)
// Create an entry for the root directory, which must be first.
root := &zip.FileHeader{
Name: "root/",
Comment: "kzip root directory",
Modified: modifiedTime,
}
root.SetMode(os.ModeDir | 0755)
if _, err := archive.CreateHeader(root); err != nil {
return nil, err
}
archive.SetComment("Kythe kzip archive")
kw := &Writer{
zip: archive,
fd: stringset.New(),
ud: stringset.New(),
encoding: DefaultEncoding(),
}
for _, opt := range options {
opt(kw)
}
return kw, nil
}
// NewWriteCloser behaves as NewWriter, but arranges that when the *Writer is
// closed it also closes wc.
func NewWriteCloser(wc io.WriteCloser, options ...WriterOption) (*Writer, error) {
w, err := NewWriter(wc, options...)
if err == nil {
w.c = wc
}
return w, err
}
// toJSON defines the encoding format for compilation messages.
var toJSON = &protojson.MarshalOptions{UseProtoNames: true}
// AddUnit adds a new compilation record to be added to the archive, returning
// the hex-encoded SHA256 digest of the unit's contents. It is legal for index
// to be nil, in which case no index terms will be added.
//
// If the same compilation is added multiple times, AddUnit returns the digest
// of the duplicated compilation along with ErrUnitExists to all callers after
// the first. The existing unit is not modified.
func (w *Writer) AddUnit(cu *apb.CompilationUnit, index *apb.IndexedCompilation_Index) (string, error) {
unit := kythe.Unit{Proto: cu}
unit.Canonicalize()
digest := unit.Digest()
w.mu.Lock()
defer w.mu.Unlock()
if w.ud.Contains(digest) {
return digest, ErrUnitExists
}
if w.encoding&EncodingJSON != 0 {
f, err := w.zip.CreateHeader(newFileHeader("root", prefixJSON, digest))
if err != nil {
return "", err
}
rec, err := toJSON.Marshal(&apb.IndexedCompilation{
Unit: unit.Proto,
Index: index,
})
if err != nil {
return "", err
}
if _, err := f.Write(rec); err != nil {
return "", err
}
}
if w.encoding&EncodingProto != 0 {
f, err := w.zip.CreateHeader(newFileHeader("root", prefixProto, digest))
if err != nil {
return "", err
}
rec, err := proto.Marshal(&apb.IndexedCompilation{
Unit: unit.Proto,
Index: index,
})
if err != nil {
return "", err
}
_, err = f.Write(rec)
if err != nil {
return "", err
}
}
w.ud.Add(digest)
return digest, nil
}
// AddFile copies the complete contents of r into the archive as a new file
// entry, returning the hex-encoded SHA256 digest of the file's contents.
func (w *Writer) AddFile(r io.Reader) (string, error) {
// Buffer the file contents and compute their digest.
// We have to do this ahead of time, because we have to provide the name of
// the file before we can start writing its contents.
var buf bytes.Buffer
hash := sha256.New()
if _, err := io.Copy(io.MultiWriter(hash, &buf), r); err != nil {
return "", err
}
digest := hex.EncodeToString(hash.Sum(nil))
w.mu.Lock()
defer w.mu.Unlock()
if w.fd.Contains(digest) {
return digest, nil // already written
}
f, err := w.zip.CreateHeader(newFileHeader("root", "files", digest))
if err != nil {
return "", err
}
if _, err := io.Copy(f, &buf); err != nil {
return "", err
}
w.fd.Add(digest)
return digest, nil
}
// Close closes the writer, flushing any remaining unwritten data out to the
// underlying zip file. It is safe to close w arbitrarily many times; all calls
// after the first will report nil.
func (w *Writer) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.zip != nil {
err := w.zip.Close()
w.zip = nil
if w.c != nil {
if cerr := w.c.Close(); err == nil {
return cerr
}
}
return err
}
return nil
}
func newFileHeader(parts ...string) *zip.FileHeader {
fh := &zip.FileHeader{Name: path.Join(parts...), Method: zip.Deflate}
fh.SetMode(0600)
fh.Modified = modifiedTime
return fh
}
// Scan is a convenience function that creates a *Reader from f and invokes its
// Scan method with the given callback. Each invocation of scan is passed the
// reader associated with f, along with the current compilation unit.
func Scan(f File, scan func(*Reader, *Unit) error, opts ...ScanOption) error {
size, err := f.Seek(0, io.SeekEnd)
if err != nil {
return fmt.Errorf("getting file size: %v", err)
}
r, err := NewReader(f, size)
if err != nil {
return err
}
return r.Scan(func(unit *Unit) error {
return scan(r, unit)
}, opts...)
}
// A File represents the file capabilities needed to scan a kzip file.
type File interface {
io.ReaderAt
io.Seeker
}
| 1 | 13,420 | There is no specific requirement that the root be called `root`, just that there is a single root directory and that it be the first entry in the zip. | kythe-kythe | go |
@@ -1,6 +1,9 @@
<% unless ENV['DISABLE_SANDBOX_WARNING'] == 'true' %>
<%= render partial: 'shared/sandbox_warning' %>
<% end %>
+<% if !current_page?(me_path) && current_user && current_user.requires_profile_attention? %>
+ <%= render partial: "shared/user_profile_warning" %>
+<% end %>
<header>
<div class='container'>
<div id='header-identity'> | 1 | <% unless ENV['DISABLE_SANDBOX_WARNING'] == 'true' %>
<%= render partial: 'shared/sandbox_warning' %>
<% end %>
<header>
<div class='container'>
<div id='header-identity'>
<div id="communicart_logo">Communicart</div>
<h1>Approval Portal</h1>
</div>
<%= render 'layouts/header_nav' %>
</div>
</header>
<%- if return_to %>
<%= render partial: 'shared/breadcrumb_nav', locals: {text: "Back to #{return_to[:name]}", href: return_to[:path]} %>
<%- elsif display_return_to_proposal %>
<%= render partial: 'shared/breadcrumb_nav', locals: {text: "Back to Proposal", href: proposal_path(params[:id])} %>
<%- elsif display_return_to_proposals %>
<%= render partial: 'shared/breadcrumb_nav', locals: {text: "Back to main portal", href: proposals_path} %>
<% end %>
| 1 | 15,451 | perhaps we should encapsulate this logic in a helper method w a test? | 18F-C2 | rb |
@@ -0,0 +1,7 @@
+[ 'options_hash', 'defaults', 'command_line_parser', 'pe_version_scraper', 'parser' ].each do |file|
+ begin
+ require "beaker/options/#{file}"
+ rescue LoadError
+ require File.expand_path(File.join(File.dirname(__FILE__), 'options', file))
+ end
+end | 1 | 1 | 4,524 | Now that we're only using this repo as a Gem you shouldn't need to `require` an expanded local file path like below. | voxpupuli-beaker | rb |
|
@@ -53,7 +53,8 @@ enum Timestamps implements Transform<Long, Integer> {
OffsetDateTime timestamp = Instant
.ofEpochSecond(timestampMicros / 1_000_000)
.atOffset(ZoneOffset.UTC);
- return (int) granularity.between(EPOCH, timestamp);
+ Integer year = Long.valueOf(granularity.between(EPOCH, timestamp)).intValue();
+ return year;
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.transforms;
import java.time.Instant;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import org.apache.iceberg.expressions.BoundPredicate;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.UnboundPredicate;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import static org.apache.iceberg.expressions.Expression.Operation.IS_NULL;
import static org.apache.iceberg.expressions.Expression.Operation.NOT_NULL;
enum Timestamps implements Transform<Long, Integer> {
YEAR(ChronoUnit.YEARS, "year"),
MONTH(ChronoUnit.MONTHS, "month"),
DAY(ChronoUnit.DAYS, "day"),
HOUR(ChronoUnit.HOURS, "hour");
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
private final ChronoUnit granularity;
private final String name;
Timestamps(ChronoUnit granularity, String name) {
this.granularity = granularity;
this.name = name;
}
@Override
public Integer apply(Long timestampMicros) {
// discards fractional seconds, not needed for calculation
OffsetDateTime timestamp = Instant
.ofEpochSecond(timestampMicros / 1_000_000)
.atOffset(ZoneOffset.UTC);
return (int) granularity.between(EPOCH, timestamp);
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.TIMESTAMP;
}
@Override
public Type getResultType(Type sourceType) {
return Types.IntegerType.get();
}
@Override
public UnboundPredicate<Integer> project(String fieldName, BoundPredicate<Long> pred) {
if (pred.op() == NOT_NULL || pred.op() == IS_NULL) {
return Expressions.predicate(pred.op(), fieldName);
}
return ProjectionUtil.truncateLong(fieldName, pred, this);
}
@Override
public UnboundPredicate<Integer> projectStrict(String fieldName, BoundPredicate<Long> predicate) {
return null;
}
@Override
public String toHumanString(Integer value) {
if (value == null) {
return "null";
}
switch (granularity) {
case YEARS:
return TransformUtil.humanYear(value);
case MONTHS:
return TransformUtil.humanMonth(value);
case DAYS:
return TransformUtil.humanDay(value);
case HOURS:
return TransformUtil.humanHour(value);
default:
throw new UnsupportedOperationException("Unsupported time unit: " + granularity);
}
}
@Override
public String toString() {
return name;
}
}
| 1 | 13,192 | This isn't necessarily a year. It may be months, days, or hours. Can we return `intValue()` directly instead? | apache-iceberg | java |
@@ -351,8 +351,6 @@ func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool {
mtask.handleDesiredStatusChange(acsTransition.desiredStatus, acsTransition.seqnum)
return false
case dockerChange := <-mtask.dockerMessages:
- seelog.Infof("Managed task [%s]: got container [%s (Runtime ID: %s)] event: [%s]",
- mtask.Arn, dockerChange.container.Name, dockerChange.container.GetRuntimeID(), dockerChange.event.Status.String())
mtask.handleContainerChange(dockerChange)
return false
case resChange := <-mtask.resourceStateChangeEvent: | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"context"
"io"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
const (
// waitForPullCredentialsTimeout is the timeout agent trying to wait for pull
// credentials from acs, after the timeout it will check the credentials manager
// and start processing the task or start another round of waiting
waitForPullCredentialsTimeout = 1 * time.Minute
defaultTaskSteadyStatePollInterval = 5 * time.Minute
defaultTaskSteadyStatePollIntervalJitter = 30 * time.Second
transitionPollTime = 5 * time.Second
stoppedSentWaitInterval = 30 * time.Second
maxStoppedWaitTimes = 72 * time.Hour / stoppedSentWaitInterval
taskUnableToTransitionToStoppedReason = "TaskStateError: Agent could not progress task's state to stopped"
)
var (
_stoppedSentWaitInterval = stoppedSentWaitInterval
_maxStoppedWaitTimes = int(maxStoppedWaitTimes)
taskNotWaitForSteadyStateError = errors.New("managed task: steady state check context is nil")
)
type acsTaskUpdate struct {
apitaskstatus.TaskStatus
}
type dockerContainerChange struct {
container *apicontainer.Container
event dockerapi.DockerContainerChangeEvent
}
// resourceStateChange represents the required status change after resource transition
type resourceStateChange struct {
resource taskresource.TaskResource
nextState resourcestatus.ResourceStatus
err error
}
type acsTransition struct {
seqnum int64
desiredStatus apitaskstatus.TaskStatus
}
// containerTransition defines the struct for a container to transition
type containerTransition struct {
nextState apicontainerstatus.ContainerStatus
actionRequired bool
blockedOn *apicontainer.DependsOn
reason error
}
// resourceTransition defines the struct for a resource to transition.
type resourceTransition struct {
// nextState represents the next known status that the resource can move to
nextState resourcestatus.ResourceStatus
// status is the string value of nextState
status string
// actionRequired indicates if the transition function needs to be called for
// the transition to be complete
actionRequired bool
// reason represents the error blocks transition
reason error
}
// managedTask is a type that is meant to manage the lifecycle of a task.
// There should be only one managed task construct for a given task arn and the
// managed task should be the only thing to modify the task's known or desired statuses.
//
// The managedTask should run serially in a single goroutine in which it reads
// messages from the two given channels and acts upon them.
// This design is chosen to allow a safe level if isolation and avoid any race
// conditions around the state of a task.
// The data sources (e.g. docker, acs) that write to the task's channels may
// block and it is expected that the managedTask listen to those channels
// almost constantly.
// The general operation should be:
// 1) Listen to the channels
// 2) On an event, update the status of the task and containers (known/desired)
// 3) Figure out if any action needs to be done. If so, do it
// 4) GOTO 1
// Item '3' obviously might lead to some duration where you are not listening
// to the channels. However, this can be solved by kicking off '3' as a
// goroutine and then only communicating the result back via the channels
// (obviously once you kick off a goroutine you give up the right to write the
// task's statuses yourself)
type managedTask struct {
*apitask.Task
ctx context.Context
cancel context.CancelFunc
engine *DockerTaskEngine
cfg *config.Config
saver statemanager.Saver
credentialsManager credentials.Manager
cniClient ecscni.CNIClient
taskStopWG *utilsync.SequentialWaitGroup
acsMessages chan acsTransition
dockerMessages chan dockerContainerChange
resourceStateChangeEvent chan resourceStateChange
stateChangeEvents chan statechange.Event
containerChangeEventStream *eventstream.EventStream
// unexpectedStart is a once that controls stopping a container that
// unexpectedly started one time.
// This exists because a 'start' after a container is meant to be stopped is
// possible under some circumstances (e.g. a timeout). However, if it
// continues to 'start' when we aren't asking it to, let it go through in
// case it's a user trying to debug it or in case we're fighting with another
// thing managing the container.
unexpectedStart sync.Once
_time ttime.Time
_timeOnce sync.Once
// steadyStatePollInterval is the duration that a managed task waits
// once the task gets into steady state before polling the state of all of
// the task's containers to re-evaluate if the task is still in steady state
// This is set to defaultTaskSteadyStatePollInterval in production code.
// This can be used by tests that are looking to ensure that the steady state
// verification logic gets executed to set it to a low interval
steadyStatePollInterval time.Duration
steadyStatePollIntervalJitter time.Duration
}
// newManagedTask is a method on DockerTaskEngine to create a new managedTask.
// This method must only be called when the engine.processTasks write lock is
// already held.
func (engine *DockerTaskEngine) newManagedTask(task *apitask.Task) *managedTask {
ctx, cancel := context.WithCancel(engine.ctx)
t := &managedTask{
ctx: ctx,
cancel: cancel,
Task: task,
acsMessages: make(chan acsTransition),
dockerMessages: make(chan dockerContainerChange),
resourceStateChangeEvent: make(chan resourceStateChange),
engine: engine,
cfg: engine.cfg,
stateChangeEvents: engine.stateChangeEvents,
containerChangeEventStream: engine.containerChangeEventStream,
saver: engine.saver,
credentialsManager: engine.credentialsManager,
cniClient: engine.cniClient,
taskStopWG: engine.taskStopGroup,
steadyStatePollInterval: engine.taskSteadyStatePollInterval,
steadyStatePollIntervalJitter: engine.taskSteadyStatePollIntervalJitter,
}
engine.managedTasks[task.Arn] = t
return t
}
// overseeTask is the main goroutine of the managedTask. It runs an infinite
// loop of receiving messages and attempting to take action based on those
// messages.
func (mtask *managedTask) overseeTask() {
// Do a single updatestatus at the beginning to create the container
// `desiredstatus`es which are a construct of the engine used only here,
// not present on the backend
mtask.UpdateStatus()
// If this was a 'state restore', send all unsent statuses
mtask.emitCurrentStatus()
// Wait for host resources required by this task to become available
mtask.waitForHostResources()
// Main infinite loop. This is where we receive messages and dispatch work.
for {
select {
case <-mtask.ctx.Done():
seelog.Infof("Managed task [%s]: parent context cancelled, exit", mtask.Arn)
return
default:
}
// If it's steadyState, just spin until we need to do work
for mtask.steadyState() {
mtask.waitSteady()
}
if !mtask.GetKnownStatus().Terminal() {
// If we aren't terminal and we aren't steady state, we should be
// able to move some containers along.
seelog.Infof("Managed task [%s]: task not steady state or terminal; progressing it",
mtask.Arn)
mtask.progressTask()
}
// If we reach this point, we've changed the task in some way.
// Conversely, for it to spin in steady state it will have to have been
// loaded in steady state or progressed through here, so saving here should
// be sufficient to capture state changes.
err := mtask.saver.Save()
if err != nil {
seelog.Warnf("Managed task [%s]: unable to checkpoint task's states to disk: %v",
mtask.Arn, err)
}
if mtask.GetKnownStatus().Terminal() {
break
}
}
// We only break out of the above if this task is known to be stopped. Do
// onetime cleanup here, including removing the task after a timeout
seelog.Infof("Managed task [%s]: task has reached stopped. Waiting for container cleanup", mtask.Arn)
mtask.cleanupCredentials()
if mtask.StopSequenceNumber != 0 {
seelog.Debugf("Managed task [%s]: marking done for this sequence: %d",
mtask.Arn, mtask.StopSequenceNumber)
mtask.taskStopWG.Done(mtask.StopSequenceNumber)
}
// TODO: make this idempotent on agent restart
go mtask.releaseIPInIPAM()
mtask.cleanupTask(mtask.cfg.TaskCleanupWaitDuration)
}
// emitCurrentStatus emits a container event for every container and a task
// event for the task
func (mtask *managedTask) emitCurrentStatus() {
for _, container := range mtask.Containers {
mtask.emitContainerEvent(mtask.Task, container, "")
}
mtask.emitTaskEvent(mtask.Task, "")
}
// waitForHostResources waits for host resources to become available to start
// the task. This involves waiting for previous stops to complete so the
// resources become free.
func (mtask *managedTask) waitForHostResources() {
if mtask.StartSequenceNumber == 0 {
// This is the first transition on this host. No need to wait
return
}
if mtask.GetDesiredStatus().Terminal() {
// Task's desired status is STOPPED. No need to wait in this case either
return
}
seelog.Infof("Managed task [%s]: waiting for any previous stops to complete. Sequence number: %d",
mtask.Arn, mtask.StartSequenceNumber)
othersStoppedCtx, cancel := context.WithCancel(mtask.ctx)
defer cancel()
go func() {
mtask.taskStopWG.Wait(mtask.StartSequenceNumber)
cancel()
}()
for !mtask.waitEvent(othersStoppedCtx.Done()) {
if mtask.GetDesiredStatus().Terminal() {
// If we end up here, that means we received a start then stop for this
// task before a task that was expected to stop before it could
// actually stop
break
}
}
seelog.Infof("Managed task [%s]: wait over; ready to move towards status: %s",
mtask.Arn, mtask.GetDesiredStatus().String())
}
// waitSteady waits for a task to leave steady-state by waiting for a new
// event, or a timeout.
func (mtask *managedTask) waitSteady() {
seelog.Infof("Managed task [%s]: task at steady state: %s", mtask.Arn, mtask.GetKnownStatus().String())
timeoutCtx, cancel := context.WithTimeout(mtask.ctx, retry.AddJitter(mtask.steadyStatePollInterval, mtask.steadyStatePollIntervalJitter))
defer cancel()
timedOut := mtask.waitEvent(timeoutCtx.Done())
if timedOut {
seelog.Debugf("Managed task [%s]: checking to make sure it's still at steadystate", mtask.Arn)
go mtask.engine.checkTaskState(mtask.Task)
}
}
// steadyState returns if the task is in a steady state. Steady state is when task's desired
// and known status are both RUNNING
func (mtask *managedTask) steadyState() bool {
select {
case <-mtask.ctx.Done():
seelog.Info("Context expired. No longer steady.")
return false
default:
taskKnownStatus := mtask.GetKnownStatus()
return taskKnownStatus == apitaskstatus.TaskRunning && taskKnownStatus >= mtask.GetDesiredStatus()
}
}
// cleanupCredentials removes credentials for a stopped task
func (mtask *managedTask) cleanupCredentials() {
taskCredentialsID := mtask.GetCredentialsID()
if taskCredentialsID != "" {
mtask.credentialsManager.RemoveCredentials(taskCredentialsID)
}
}
// waitEvent waits for any event to occur. If an event occurs, the appropriate
// handler is called. Generally the stopWaiting arg is the context's Done
// channel. When the Done channel is signalled by the context, waitEvent will
// return true.
func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool {
seelog.Infof("Managed task [%s]: waiting for event for task", mtask.Arn)
select {
case acsTransition := <-mtask.acsMessages:
seelog.Infof("Managed task [%s]: got acs event", mtask.Arn)
mtask.handleDesiredStatusChange(acsTransition.desiredStatus, acsTransition.seqnum)
return false
case dockerChange := <-mtask.dockerMessages:
seelog.Infof("Managed task [%s]: got container [%s (Runtime ID: %s)] event: [%s]",
mtask.Arn, dockerChange.container.Name, dockerChange.container.GetRuntimeID(), dockerChange.event.Status.String())
mtask.handleContainerChange(dockerChange)
return false
case resChange := <-mtask.resourceStateChangeEvent:
res := resChange.resource
seelog.Infof("Managed task [%s]: got resource [%s] event: [%s]",
mtask.Arn, res.GetName(), res.StatusString(resChange.nextState))
mtask.handleResourceStateChange(resChange)
return false
case <-stopWaiting:
seelog.Infof("Managed task [%s]: no longer waiting", mtask.Arn)
return true
}
}
// handleDesiredStatusChange updates the desired status on the task. Updates
// only occur if the new desired status is "compatible" (farther along than the
// current desired state); "redundant" (less-than or equal desired states) are
// ignored and dropped.
func (mtask *managedTask) handleDesiredStatusChange(desiredStatus apitaskstatus.TaskStatus, seqnum int64) {
// Handle acs message changes this task's desired status to whatever
// acs says it should be if it is compatible
seelog.Infof("Managed task [%s]: new acs transition to: %s; sequence number: %d; task stop sequence number: %d",
mtask.Arn, desiredStatus.String(), seqnum, mtask.StopSequenceNumber)
if desiredStatus <= mtask.GetDesiredStatus() {
seelog.Infof("Managed task [%s]: redundant task transition from [%s] to [%s], ignoring",
mtask.Arn, mtask.GetDesiredStatus().String(), desiredStatus.String())
return
}
if desiredStatus == apitaskstatus.TaskStopped && seqnum != 0 && mtask.GetStopSequenceNumber() == 0 {
seelog.Infof("Managed task [%s]: task moving to stopped, adding to stopgroup with sequence number: %d",
mtask.Arn, seqnum)
mtask.SetStopSequenceNumber(seqnum)
mtask.taskStopWG.Add(seqnum, 1)
}
mtask.SetDesiredStatus(desiredStatus)
mtask.UpdateDesiredStatus()
}
// handleContainerChange updates a container's known status. If the message
// contains any interesting information (like exit codes or ports), they are
// propagated.
func (mtask *managedTask) handleContainerChange(containerChange dockerContainerChange) {
// locate the container
container := containerChange.container
found := mtask.isContainerFound(container)
if !found {
seelog.Criticalf("Managed task [%s]: state error; invoked with another task's container [%s]!",
mtask.Arn, container.Name)
return
}
event := containerChange.event
seelog.Infof("Managed task [%s]: handling container change [%v] for container [%s (Runtime ID: %s)]",
mtask.Arn, event, container.Name, container.GetRuntimeID())
// If this is a backwards transition stopped->running, the first time set it
// to be known running so it will be stopped. Subsequently ignore these backward transitions
containerKnownStatus := container.GetKnownStatus()
mtask.handleStoppedToRunningContainerTransition(event.Status, container)
if event.Status <= containerKnownStatus {
seelog.Infof("Managed task [%s]: redundant container state change. %s to %s, but already %s",
mtask.Arn, container.Name, event.Status.String(), containerKnownStatus.String())
// Only update container metadata when status stays RUNNING
if event.Status == containerKnownStatus && event.Status == apicontainerstatus.ContainerRunning {
updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task)
}
return
}
// Update the container to be known
currentKnownStatus := containerKnownStatus
container.SetKnownStatus(event.Status)
updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task)
if event.Error != nil {
proceedAnyway := mtask.handleEventError(containerChange, currentKnownStatus)
if !proceedAnyway {
return
}
}
mtask.RecordExecutionStoppedAt(container)
seelog.Debugf("Managed task [%s]: sending container change event to tcs, container: [%s(%s)], status: %s",
mtask.Arn, container.Name, event.DockerID, event.Status.String())
err := mtask.containerChangeEventStream.WriteToEventStream(event)
if err != nil {
seelog.Warnf("Managed task [%s]: failed to write container [%s] change event to tcs event stream: %v",
mtask.Arn, container.Name, err)
}
mtask.emitContainerEvent(mtask.Task, container, "")
if mtask.UpdateStatus() {
seelog.Infof("Managed task [%s]: container change also resulted in task change [%s (Runtime ID: %s)]: [%s]",
mtask.Arn, container.Name, container.GetRuntimeID(), mtask.GetDesiredStatus().String())
// If knownStatus changed, let it be known
var taskStateChangeReason string
if mtask.GetKnownStatus().Terminal() {
taskStateChangeReason = mtask.Task.GetTerminalReason()
}
mtask.emitTaskEvent(mtask.Task, taskStateChangeReason)
}
}
// handleResourceStateChange attempts to update resource's known status depending on
// the current status and errors during transition
func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChange) {
// locate the resource
res := resChange.resource
if !mtask.isResourceFound(res) {
seelog.Criticalf("Managed task [%s]: state error; invoked with another task's resource [%s]",
mtask.Arn, res.GetName())
return
}
status := resChange.nextState
err := resChange.err
currentKnownStatus := res.GetKnownStatus()
if status <= currentKnownStatus {
seelog.Infof("Managed task [%s]: redundant resource state change. %s to %s, but already %s",
mtask.Arn, res.GetName(), res.StatusString(status), res.StatusString(currentKnownStatus))
return
}
defer mtask.engine.saver.Save()
// Set known status regardless of error so the applied status can be cleared. If there is error,
// the known status might be set again below (but that won't affect the applied status anymore).
// This follows how container state change is handled.
res.SetKnownStatus(status)
if err == nil {
return
}
if status == res.SteadyState() { // Failed to create resource.
seelog.Errorf("Managed task [%s]: failed to create task resource [%s]: %v", mtask.Arn, res.GetName(), err)
res.SetKnownStatus(currentKnownStatus) // Set status back to None.
seelog.Infof("Managed task [%s]: marking task desired status to STOPPED", mtask.Arn)
mtask.SetDesiredStatus(apitaskstatus.TaskStopped)
mtask.Task.SetTerminalReason(res.GetTerminalReason())
}
}
func (mtask *managedTask) emitResourceChange(change resourceStateChange) {
if mtask.ctx.Err() != nil {
seelog.Infof("Managed task [%s]: unable to emit resource state change due to closed context: %v",
mtask.Arn, mtask.ctx.Err())
}
mtask.resourceStateChangeEvent <- change
}
func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
seelog.Debugf("Managed task [%s]: skipping emitting event for task [%s]: %v",
task.Arn, reason, err)
return
}
seelog.Infof("Managed task [%s]: sending task change event [%s]", mtask.Arn, event.String())
mtask.stateChangeEvents <- event
seelog.Infof("Managed task [%s]: sent task change event [%s]", mtask.Arn, event.String())
}
// emitContainerEvent passes a given event up through the containerEvents channel if necessary.
// It will omit events the backend would not process and will perform best-effort deduplication of events.
func (mtask *managedTask) emitContainerEvent(task *apitask.Task, cont *apicontainer.Container, reason string) {
event, err := api.NewContainerStateChangeEvent(task, cont, reason)
if err != nil {
seelog.Debugf("Managed task [%s]: skipping emitting event for container [%s]: %v",
task.Arn, cont.Name, err)
return
}
seelog.Infof("Managed task [%s]: sending container change event [%s]: %s",
mtask.Arn, cont.Name, event.String())
mtask.stateChangeEvents <- event
seelog.Infof("Managed task [%s]: sent container change event [%s]: %s",
mtask.Arn, cont.Name, event.String())
}
func (mtask *managedTask) emitDockerContainerChange(change dockerContainerChange) {
if mtask.ctx.Err() != nil {
seelog.Infof("Managed task [%s]: unable to emit docker container change due to closed context: %v",
mtask.Arn, mtask.ctx.Err())
}
mtask.dockerMessages <- change
}
func (mtask *managedTask) emitACSTransition(transition acsTransition) {
if mtask.ctx.Err() != nil {
seelog.Infof("Managed task [%s]: unable to emit acs transition due to closed context: %v",
mtask.Arn, mtask.ctx.Err())
}
mtask.acsMessages <- transition
}
func (mtask *managedTask) isContainerFound(container *apicontainer.Container) bool {
found := false
for _, c := range mtask.Containers {
if container == c {
found = true
break
}
}
return found
}
func (mtask *managedTask) isResourceFound(res taskresource.TaskResource) bool {
for _, r := range mtask.GetResources() {
if res.GetName() == r.GetName() {
return true
}
}
return false
}
// releaseIPInIPAM releases the IP address used by the task in awsvpc mode.
func (mtask *managedTask) releaseIPInIPAM() {
if !mtask.IsNetworkModeAWSVPC() {
return
}
seelog.Infof("Managed task [%s]: IPAM releasing ip for task eni", mtask.Arn)
cfg, err := mtask.BuildCNIConfig(true, &ecscni.Config{
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
})
if err != nil {
seelog.Errorf("Managed task [%s]: failed to release ip; unable to build cni configuration: %v",
mtask.Arn, err)
return
}
err = mtask.cniClient.ReleaseIPResource(mtask.ctx, cfg, ipamCleanupTmeout)
if err != nil {
seelog.Errorf("Managed task [%s]: failed to release ip; IPAM error: %v",
mtask.Arn, err)
return
}
}
// handleStoppedToRunningContainerTransition detects a "backwards" container
// transition where a known-stopped container is found to be running again and
// handles it.
func (mtask *managedTask) handleStoppedToRunningContainerTransition(status apicontainerstatus.ContainerStatus, container *apicontainer.Container) {
containerKnownStatus := container.GetKnownStatus()
if status > containerKnownStatus {
// Event status is greater than container's known status.
// This is not a backward transition, return
return
}
if containerKnownStatus != apicontainerstatus.ContainerStopped {
// Container's known status is not STOPPED. Nothing to do here.
return
}
if !status.IsRunning() {
// Container's 'to' transition was not either of RUNNING or RESOURCES_PROVISIONED
// states. Nothing to do in this case as well
return
}
// If the container becomes running after we've stopped it (possibly
// because we got an error running it and it ran anyways), the first time
// update it to 'known running' so that it will be driven back to stopped
mtask.unexpectedStart.Do(func() {
seelog.Warnf("Managed task [%s]: stopped container [%s] came back; re-stopping it once",
mtask.Arn, container.Name)
go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped)
// This will not proceed afterwards because status <= knownstatus below
})
}
// handleEventError handles a container change event error and decides whether
// we should proceed to transition the container
func (mtask *managedTask) handleEventError(containerChange dockerContainerChange, currentKnownStatus apicontainerstatus.ContainerStatus) bool {
container := containerChange.container
event := containerChange.event
if container.ApplyingError == nil {
container.ApplyingError = apierrors.NewNamedError(event.Error)
}
switch event.Status {
// event.Status is the desired container transition from container's known status
// (* -> event.Status)
case apicontainerstatus.ContainerPulled:
// If the agent pull behavior is always or once, we receive the error because
// the image pull fails, the task should fail. If we don't fail task here,
// then the cached image will probably be used for creating container, and we
// don't want to use cached image for both cases.
if mtask.cfg.ImagePullBehavior == config.ImagePullAlwaysBehavior ||
mtask.cfg.ImagePullBehavior == config.ImagePullOnceBehavior {
seelog.Errorf("Managed task [%s]: error while pulling image %s for container %s , moving task to STOPPED: %v",
mtask.Arn, container.Image, container.Name, event.Error)
// The task should be stopped regardless of whether this container is
// essential or non-essential.
mtask.SetDesiredStatus(apitaskstatus.TaskStopped)
return false
}
// If the agent pull behavior is prefer-cached, we receive the error because
// the image pull fails and there is no cached image in local, we don't make
// the task fail here, will let create container handle it instead.
// If the agent pull behavior is default, use local image cache directly,
// assuming it exists.
seelog.Errorf("Managed task [%s]: error while pulling container %s and image %s, will try to run anyway: %v",
mtask.Arn, container.Name, container.Image, event.Error)
// proceed anyway
return true
case apicontainerstatus.ContainerStopped:
// Container's desired transition was to 'STOPPED'
return mtask.handleContainerStoppedTransitionError(event, container, currentKnownStatus)
case apicontainerstatus.ContainerStatusNone:
fallthrough
case apicontainerstatus.ContainerCreated:
// No need to explicitly stop containers if this is a * -> NONE/CREATED transition
seelog.Warnf("Managed task [%s]: error creating container [%s]; marking its desired status as STOPPED: %v",
mtask.Arn, container.Name, event.Error)
container.SetKnownStatus(currentKnownStatus)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
return false
default:
// If this is a * -> RUNNING / RESOURCES_PROVISIONED transition, we need to stop
// the container.
seelog.Warnf("Managed task [%s]: error starting/provisioning container[%s (Runtime ID: %s)]; marking its desired status as STOPPED: %v",
mtask.Arn, container.Name, container.GetRuntimeID(), event.Error)
container.SetKnownStatus(currentKnownStatus)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
errorName := event.Error.ErrorName()
errorStr := event.Error.Error()
shouldForceStop := false
if errorName == dockerapi.DockerTimeoutErrorName || errorName == dockerapi.CannotInspectContainerErrorName {
// If there's an error with inspecting the container or in case of timeout error,
// we'll assume that the container has transitioned to RUNNING and issue
// a stop. See #1043 for details
shouldForceStop = true
} else if errorName == dockerapi.CannotStartContainerErrorName && strings.HasSuffix(errorStr, io.EOF.Error()) {
// If we get an EOF error from Docker when starting the container, we don't really know whether the
// container is started anyway. So issuing a stop here as well. See #1708.
shouldForceStop = true
}
if shouldForceStop {
seelog.Warnf("Managed task [%s]: forcing container [%s (Runtime ID: %s)] to stop",
mtask.Arn, container.Name, container.GetRuntimeID())
go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped)
}
// Container known status not changed, no need for further processing
return false
}
}
// handleContainerStoppedTransitionError handles an error when transitioning a container to
// STOPPED. It returns a boolean indicating whether the tak can continue with updating its
// state
func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi.DockerContainerChangeEvent,
container *apicontainer.Container,
currentKnownStatus apicontainerstatus.ContainerStatus) bool {
// If we were trying to transition to stopped and had a timeout error
// from docker, reset the known status to the current status and return
// This ensures that we don't emit a containerstopped event; a
// terminal container event from docker event stream will instead be
// responsible for the transition. Alternatively, the steadyState check
// could also trigger the progress and have another go at stopping the
// container
if event.Error.ErrorName() == dockerapi.DockerTimeoutErrorName {
seelog.Infof("Managed task [%s]: '%s' error stopping container [%s (Runtime ID: %s)]. Ignoring state change: %v",
mtask.Arn, dockerapi.DockerTimeoutErrorName, container.Name, container.GetRuntimeID(), event.Error.Error())
container.SetKnownStatus(currentKnownStatus)
return false
}
// If docker returned a transient error while trying to stop a container,
// reset the known status to the current status and return
cannotStopContainerError, ok := event.Error.(cannotStopContainerError)
if ok && cannotStopContainerError.IsRetriableError() {
seelog.Infof("Managed task [%s]: error stopping the container [%s (Runtime ID: %s)]. Ignoring state change: %v",
mtask.Arn, container.Name, container.GetRuntimeID(), cannotStopContainerError.Error())
container.SetKnownStatus(currentKnownStatus)
return false
}
// If we were trying to transition to stopped and had an error, we
// clearly can't just continue trying to transition it to stopped
// again and again. In this case, assume it's stopped (or close
// enough) and get on with it
// This can happen in cases where the container we tried to stop
// was already stopped or did not exist at all.
seelog.Warnf("Managed task [%s]: 'docker stop' for container [%s] returned %s: %s",
mtask.Arn, container.Name, event.Error.ErrorName(), event.Error.Error())
container.SetKnownStatus(apicontainerstatus.ContainerStopped)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
return true
}
// progressTask tries to step forwards all containers and resources that are able to be
// transitioned in the task's current state.
// It will continue listening to events from all channels while it does so, but
// none of those changes will be acted upon until this set of requests to
// docker completes.
// Container changes may also prompt the task status to change as well.
func (mtask *managedTask) progressTask() {
seelog.Debugf("Managed task [%s]: progressing containers and resources in task", mtask.Arn)
// max number of transitions length to ensure writes will never block on
// these and if we exit early transitions can exit the goroutine and it'll
// get GC'd eventually
resources := mtask.GetResources()
transitionChange := make(chan struct{}, len(mtask.Containers)+len(resources))
transitionChangeEntity := make(chan string, len(mtask.Containers)+len(resources))
// startResourceTransitions should always be called before startContainerTransitions,
// else it might result in a state where none of the containers can transition and
// task may be moved to stopped.
// anyResourceTransition is set to true when transition function needs to be called or
// known status can be changed
anyResourceTransition, resTransitions := mtask.startResourceTransitions(
func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) {
mtask.transitionResource(resource, nextStatus)
transitionChange <- struct{}{}
transitionChangeEntity <- resource.GetName()
})
anyContainerTransition, blockedDependencies, contTransitions, reasons := mtask.startContainerTransitions(
func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) {
mtask.engine.transitionContainer(mtask.Task, container, nextStatus)
transitionChange <- struct{}{}
transitionChangeEntity <- container.Name
})
atLeastOneTransitionStarted := anyResourceTransition || anyContainerTransition
blockedByOrderingDependencies := len(blockedDependencies) > 0
// If no transitions happened and we aren't blocked by ordering dependencies, then we are possibly in a state where
// its impossible for containers to move forward. We will do an additional check to see if we are waiting for ACS
// execution credentials. If not, then we will abort the task progression.
if !atLeastOneTransitionStarted && !blockedByOrderingDependencies {
if !mtask.isWaitingForACSExecutionCredentials(reasons) {
mtask.handleContainersUnableToTransitionState()
}
return
}
// If no containers are starting and we are blocked on ordering dependencies, we should watch for the task to change
// over time. This will update the containers if they become healthy or stop, which makes it possible for the
// conditions "HEALTHY" and "SUCCESS" to succeed.
if !atLeastOneTransitionStarted && blockedByOrderingDependencies {
go mtask.engine.checkTaskState(mtask.Task)
ctx, cancel := context.WithTimeout(context.Background(), transitionPollTime)
defer cancel()
for timeout := mtask.waitEvent(ctx.Done()); !timeout; {
timeout = mtask.waitEvent(ctx.Done())
}
return
}
// combine the resource and container transitions
transitions := make(map[string]string)
for k, v := range resTransitions {
transitions[k] = v
}
for k, v := range contTransitions {
transitions[k] = v.String()
}
// We've kicked off one or more transitions, wait for them to
// complete, but keep reading events as we do. in fact, we have to for
// transitions to complete
mtask.waitForTransition(transitions, transitionChange, transitionChangeEntity)
// update the task status
if mtask.UpdateStatus() {
seelog.Infof("Managed task [%s]: container or resource change also resulted in task change", mtask.Arn)
// If knownStatus changed, let it be known
var taskStateChangeReason string
if mtask.GetKnownStatus().Terminal() {
taskStateChangeReason = mtask.Task.GetTerminalReason()
}
mtask.emitTaskEvent(mtask.Task, taskStateChangeReason)
}
}
// isWaitingForACSExecutionCredentials checks if the container that can't be transitioned
// was caused by waiting for credentials and start waiting
func (mtask *managedTask) isWaitingForACSExecutionCredentials(reasons []error) bool {
for _, reason := range reasons {
if reason == dependencygraph.CredentialsNotResolvedErr {
seelog.Infof("Managed task [%s]: waiting for credentials to pull from ECR", mtask.Arn)
timeoutCtx, timeoutCancel := context.WithTimeout(mtask.ctx, waitForPullCredentialsTimeout)
defer timeoutCancel()
timedOut := mtask.waitEvent(timeoutCtx.Done())
if timedOut {
seelog.Infof("Managed task [%s]: timed out waiting for acs credentials message", mtask.Arn)
}
return true
}
}
return false
}
// startContainerTransitions steps through each container in the task and calls
// the passed transition function when a transition should occur.
func (mtask *managedTask) startContainerTransitions(transitionFunc containerTransitionFunc) (bool, map[string]apicontainer.DependsOn, map[string]apicontainerstatus.ContainerStatus, []error) {
anyCanTransition := false
var reasons []error
blocked := make(map[string]apicontainer.DependsOn)
transitions := make(map[string]apicontainerstatus.ContainerStatus)
for _, cont := range mtask.Containers {
transition := mtask.containerNextState(cont)
if transition.reason != nil {
// container can't be transitioned
reasons = append(reasons, transition.reason)
if transition.blockedOn != nil {
blocked[cont.Name] = *transition.blockedOn
}
continue
}
// If the container is already in a transition, skip
if transition.actionRequired && !cont.SetAppliedStatus(transition.nextState) {
// At least one container is able to be moved forwards, so we're not deadlocked
anyCanTransition = true
continue
}
// At least one container is able to be moved forwards, so we're not deadlocked
anyCanTransition = true
if !transition.actionRequired {
// Updating the container status without calling any docker API, send in
// a goroutine so that it won't block here before the waitForContainerTransition
// was called after this function. And all the events sent to mtask.dockerMessages
// will be handled by handleContainerChange.
go func(cont *apicontainer.Container, status apicontainerstatus.ContainerStatus) {
mtask.dockerMessages <- dockerContainerChange{
container: cont,
event: dockerapi.DockerContainerChangeEvent{
Status: status,
},
}
}(cont, transition.nextState)
continue
}
transitions[cont.Name] = transition.nextState
go transitionFunc(cont, transition.nextState)
}
return anyCanTransition, blocked, transitions, reasons
}
// startResourceTransitions steps through each resource in the task and calls
// the passed transition function when a transition should occur
func (mtask *managedTask) startResourceTransitions(transitionFunc resourceTransitionFunc) (bool, map[string]string) {
anyCanTransition := false
transitions := make(map[string]string)
for _, res := range mtask.GetResources() {
knownStatus := res.GetKnownStatus()
desiredStatus := res.GetDesiredStatus()
if knownStatus >= desiredStatus {
seelog.Debugf("Managed task [%s]: resource [%s] has already transitioned to or beyond the desired status %s; current known is %s",
mtask.Arn, res.GetName(), res.StatusString(desiredStatus), res.StatusString(knownStatus))
continue
}
anyCanTransition = true
transition := mtask.resourceNextState(res)
// If the resource is already in a transition, skip
if transition.actionRequired && !res.SetAppliedStatus(transition.nextState) {
// At least one resource is able to be moved forwards, so we're not deadlocked
continue
}
if !transition.actionRequired {
// no action is required for the transition, just set the known status without
// calling any transition function
go mtask.emitResourceChange(resourceStateChange{
resource: res,
nextState: transition.nextState,
err: nil,
})
continue
}
// At least one resource is able to be move forwards, so we're not deadlocked
transitions[res.GetName()] = transition.status
go transitionFunc(res, transition.nextState)
}
return anyCanTransition, transitions
}
// transitionResource calls applyResourceState, and then notifies the managed
// task of the change. transitionResource is called by progressTask
func (mtask *managedTask) transitionResource(resource taskresource.TaskResource,
to resourcestatus.ResourceStatus) {
err := mtask.applyResourceState(resource, to)
if mtask.engine.isTaskManaged(mtask.Arn) {
mtask.emitResourceChange(resourceStateChange{
resource: resource,
nextState: to,
err: err,
})
}
}
// applyResourceState moves the resource to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (mtask *managedTask) applyResourceState(resource taskresource.TaskResource,
nextState resourcestatus.ResourceStatus) error {
resName := resource.GetName()
resStatus := resource.StatusString(nextState)
err := resource.ApplyTransition(nextState)
if err != nil {
seelog.Infof("Managed task [%s]: error transitioning resource [%s] to [%s]: %v",
mtask.Arn, resName, resStatus, err)
return err
}
seelog.Infof("Managed task [%s]: transitioned resource [%s] to [%s]",
mtask.Arn, resName, resStatus)
return nil
}
type containerTransitionFunc func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus)
type resourceTransitionFunc func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus)
// containerNextState determines the next state a container should go to.
// It returns a transition struct including the information:
// * container state it should transition to,
// * a bool indicating whether any action is required
// * an error indicating why this transition can't happen
//
// 'Stopped, false, ""' -> "You can move it to known stopped, but you don't have to call a transition function"
// 'Running, true, ""' -> "You can move it to running and you need to call the transition function"
// 'None, false, containerDependencyNotResolved' -> "This should not be moved; it has unresolved dependencies;"
//
// Next status is determined by whether the known and desired statuses are
// equal, the next numerically greater (iota-wise) status, and whether
// dependencies are fully resolved.
func (mtask *managedTask) containerNextState(container *apicontainer.Container) *containerTransition {
containerKnownStatus := container.GetKnownStatus()
containerDesiredStatus := container.GetDesiredStatus()
if containerKnownStatus == containerDesiredStatus {
seelog.Debugf("Managed task [%s]: container [%s (Runtime ID: %s)] at desired status: %s",
mtask.Arn, container.Name, container.GetRuntimeID(), containerDesiredStatus.String())
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: dependencygraph.ContainerPastDesiredStatusErr,
}
}
if containerKnownStatus > containerDesiredStatus {
seelog.Debugf("Managed task [%s]: container [%s (Runtime ID: %s)] has already transitioned beyond desired status(%s): %s",
mtask.Arn, container.Name, container.GetRuntimeID(), containerKnownStatus.String(), containerDesiredStatus.String())
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: dependencygraph.ContainerPastDesiredStatusErr,
}
}
if blocked, err := dependencygraph.DependenciesAreResolved(container, mtask.Containers,
mtask.Task.GetExecutionCredentialsID(), mtask.credentialsManager, mtask.GetResources()); err != nil {
seelog.Debugf("Managed task [%s]: can't apply state to container [%s (Runtime ID: %s)] yet due to unresolved dependencies: %v",
mtask.Arn, container.Name, container.GetRuntimeID(), err)
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: err,
blockedOn: blocked,
}
}
var nextState apicontainerstatus.ContainerStatus
if container.DesiredTerminal() {
nextState = apicontainerstatus.ContainerStopped
// It's not enough to just check if container is in steady state here
// we should really check if >= RUNNING <= STOPPED
if !container.IsRunning() {
// If the container's AppliedStatus is running, it means the StartContainer
// api call has already been scheduled, we should not mark it as stopped
// directly, because when the stopped container comes back, we will end up
// with either:
// 1. The task is not cleaned up, the handleStoppedToRunningContainerTransition
// function will handle this case, but only once. If there are some
// other stopped containers come back, they will not be stopped by
// Agent.
// 2. The task has already been cleaned up, in this case any stopped container
// will not be stopped by Agent when they come back.
if container.GetAppliedStatus() == apicontainerstatus.ContainerRunning {
nextState = apicontainerstatus.ContainerStatusNone
}
return &containerTransition{
nextState: nextState,
actionRequired: false,
}
}
} else {
nextState = container.GetNextKnownStateProgression()
}
return &containerTransition{
nextState: nextState,
actionRequired: true,
}
}
// resourceNextState determines the next state a resource should go to.
// It returns a transition struct including the information:
// * resource state it should transition to,
// * string presentation of the resource state
// * a bool indicating whether any action is required
// * an error indicating why this transition can't happen
//
// Next status is determined by whether the known and desired statuses are
// equal, the next numerically greater (iota-wise) status, and whether
// dependencies are fully resolved.
func (mtask *managedTask) resourceNextState(resource taskresource.TaskResource) *resourceTransition {
resKnownStatus := resource.GetKnownStatus()
resDesiredStatus := resource.GetDesiredStatus()
if resKnownStatus >= resDesiredStatus {
seelog.Debugf("Managed task [%s]: task resource [%s] has already transitioned to or beyond desired status(%s): %s",
mtask.Arn, resource.GetName(), resource.StatusString(resDesiredStatus), resource.StatusString(resKnownStatus))
return &resourceTransition{
nextState: resourcestatus.ResourceStatusNone,
status: resource.StatusString(resourcestatus.ResourceStatusNone),
actionRequired: false,
reason: dependencygraph.ResourcePastDesiredStatusErr,
}
}
if err := dependencygraph.TaskResourceDependenciesAreResolved(resource, mtask.Containers); err != nil {
seelog.Debugf("Managed task [%s]: can't apply state to resource [%s] yet due to unresolved dependencies: %v",
mtask.Arn, resource.GetName(), err)
return &resourceTransition{
nextState: resourcestatus.ResourceStatusNone,
status: resource.StatusString(resourcestatus.ResourceStatusNone),
actionRequired: false,
reason: err,
}
}
var nextState resourcestatus.ResourceStatus
if resource.DesiredTerminal() {
nextState := resource.TerminalStatus()
return &resourceTransition{
nextState: nextState,
status: resource.StatusString(nextState),
actionRequired: false, // Resource cleanup is done while cleaning up task, so not doing anything here.
}
}
nextState = resource.NextKnownState()
return &resourceTransition{
nextState: nextState,
status: resource.StatusString(nextState),
actionRequired: true,
}
}
func (mtask *managedTask) handleContainersUnableToTransitionState() {
seelog.Criticalf("Managed task [%s]: task in a bad state; it's not steadystate but no containers want to transition",
mtask.Arn)
if mtask.GetDesiredStatus().Terminal() {
// Ack, really bad. We want it to stop but the containers don't think
// that's possible. let's just break out and hope for the best!
seelog.Criticalf("Managed task [%s]: The state is so bad that we're just giving up on it", mtask.Arn)
mtask.SetKnownStatus(apitaskstatus.TaskStopped)
mtask.emitTaskEvent(mtask.Task, taskUnableToTransitionToStoppedReason)
// TODO we should probably panic here
} else {
seelog.Criticalf("Managed task [%s]: moving task to stopped due to bad state", mtask.Arn)
mtask.handleDesiredStatusChange(apitaskstatus.TaskStopped, 0)
}
}
func (mtask *managedTask) waitForTransition(transitions map[string]string,
transition <-chan struct{},
transitionChangeEntity <-chan string) {
// There could be multiple transitions, but we just need to wait for one of them
// to ensure that there is at least one container or resource can be processed in the next
// progressTask call. This is done by waiting for one transition/acs/docker message.
if !mtask.waitEvent(transition) {
seelog.Debugf("Managed task [%s]: received non-transition events", mtask.Arn)
return
}
transitionedEntity := <-transitionChangeEntity
seelog.Debugf("Managed task [%s]: transition for [%s] finished",
mtask.Arn, transitionedEntity)
delete(transitions, transitionedEntity)
seelog.Debugf("Managed task [%s]: still waiting for: %v", mtask.Arn, transitions)
}
func (mtask *managedTask) time() ttime.Time {
mtask._timeOnce.Do(func() {
if mtask._time == nil {
mtask._time = &ttime.DefaultTime{}
}
})
return mtask._time
}
func (mtask *managedTask) cleanupTask(taskStoppedDuration time.Duration) {
cleanupTimeDuration := mtask.GetKnownStatusTime().Add(taskStoppedDuration).Sub(ttime.Now())
cleanupTime := make(<-chan time.Time)
if cleanupTimeDuration < 0 {
seelog.Infof("Managed task [%s]: Cleanup Duration has been exceeded. Starting cleanup now ", mtask.Arn)
cleanupTime = mtask.time().After(time.Nanosecond)
} else {
cleanupTime = mtask.time().After(cleanupTimeDuration)
}
cleanupTimeBool := make(chan struct{})
go func() {
<-cleanupTime
close(cleanupTimeBool)
}()
// wait for the cleanup time to elapse, signalled by cleanupTimeBool
for !mtask.waitEvent(cleanupTimeBool) {
}
// wait for apitaskstatus.TaskStopped to be sent
ok := mtask.waitForStopReported()
if !ok {
seelog.Errorf("Managed task [%s]: aborting cleanup for task as it is not reported as stopped. SentStatus: %s",
mtask.Arn, mtask.GetSentStatus().String())
return
}
seelog.Infof("Managed task [%s]: cleaning up task's containers and data", mtask.Arn)
// For the duration of this, simply discard any task events; this ensures the
// speedy processing of other events for other tasks
// discard events while the task is being removed from engine state
go mtask.discardEvents()
mtask.engine.sweepTask(mtask.Task)
mtask.engine.deleteTask(mtask.Task)
// The last thing to do here is to cancel the context, which should cancel
// all outstanding go routines associated with this managed task.
mtask.cancel()
}
func (mtask *managedTask) discardEvents() {
for {
select {
case <-mtask.dockerMessages:
case <-mtask.acsMessages:
case <-mtask.resourceStateChangeEvent:
case <-mtask.ctx.Done():
// The task has been cancelled. No need to process any more
// events
close(mtask.dockerMessages)
close(mtask.acsMessages)
close(mtask.resourceStateChangeEvent)
return
}
}
}
// waitForStopReported will wait for the task to be reported stopped and return true, or will time-out and return false.
// Messages on the mtask.dockerMessages and mtask.acsMessages channels will be handled while this function is waiting.
func (mtask *managedTask) waitForStopReported() bool {
stoppedSentBool := make(chan struct{})
taskStopped := false
go func() {
for i := 0; i < _maxStoppedWaitTimes; i++ {
// ensure that we block until apitaskstatus.TaskStopped is actually sent
sentStatus := mtask.GetSentStatus()
if sentStatus >= apitaskstatus.TaskStopped {
taskStopped = true
break
}
seelog.Warnf("Managed task [%s]: blocking cleanup until the task has been reported stopped. SentStatus: %s (%d/%d)",
mtask.Arn, sentStatus.String(), i+1, _maxStoppedWaitTimes)
mtask._time.Sleep(_stoppedSentWaitInterval)
}
stoppedSentBool <- struct{}{}
close(stoppedSentBool)
}()
// wait for apitaskstatus.TaskStopped to be sent
for !mtask.waitEvent(stoppedSentBool) {
}
return taskStopped
}
| 1 | 24,362 | this is redundant because it gets logged immediately on entering the handleContainerChange function | aws-amazon-ecs-agent | go |
@@ -126,7 +126,7 @@ func (e *Executor) reportRequiringApproval(ctx context.Context) {
var approvers []string
for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
- if v.Event == "DEPLOYMENT_WAIT_APPROVAL" {
+ if e := "EVENT_" + v.Event; e == model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL.String() {
approvers = v.Slack
}
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package waitapproval
import (
"context"
"time"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
approvedByKey = "ApprovedBy"
)
type Executor struct {
executor.Input
}
type registerer interface {
Register(stage model.Stage, f executor.Factory) error
}
// Register registers this executor factory into a given registerer.
func Register(r registerer) {
f := func(in executor.Input) executor.Executor {
return &Executor{
Input: in,
}
}
r.Register(model.StageWaitApproval, f)
}
// Execute starts waiting until an approval from one of the specified users.
func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus {
var (
originalStatus = e.Stage.Status
ctx = sig.Context()
ticker = time.NewTicker(5 * time.Second)
)
defer ticker.Stop()
timeout := e.StageConfig.WaitApprovalStageOptions.Timeout.Duration()
timer := time.NewTimer(timeout)
e.reportRequiringApproval(ctx)
e.LogPersister.Info("Waiting for an approval...")
for {
select {
case <-ticker.C:
if commander, ok := e.checkApproval(ctx); ok {
e.LogPersister.Infof("Got an approval from %s", commander)
return model.StageStatus_STAGE_SUCCESS
}
case s := <-sig.Ch():
switch s {
case executor.StopSignalCancel:
return model.StageStatus_STAGE_CANCELLED
case executor.StopSignalTerminate:
return originalStatus
default:
return model.StageStatus_STAGE_FAILURE
}
case <-timer.C:
e.LogPersister.Errorf("Timed out %v", timeout)
return model.StageStatus_STAGE_FAILURE
}
}
}
func (e *Executor) checkApproval(ctx context.Context) (string, bool) {
var approveCmd *model.ReportableCommand
commands := e.CommandLister.ListCommands()
for i, cmd := range commands {
if cmd.GetApproveStage() != nil {
approveCmd = &commands[i]
break
}
}
if approveCmd == nil {
return "", false
}
metadata := map[string]string{
approvedByKey: approveCmd.Commander,
}
if ori, ok := e.MetadataStore.GetStageMetadata(e.Stage.Id); ok {
for k, v := range ori {
metadata[k] = v
}
}
if err := e.MetadataStore.SetStageMetadata(ctx, e.Stage.Id, metadata); err != nil {
e.LogPersister.Errorf("Unabled to save approver information to deployment, %v", err)
return "", false
}
if err := approveCmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, nil); err != nil {
e.Logger.Error("failed to report handled command", zap.Error(err))
}
return approveCmd.Commander, true
}
func (e *Executor) reportRequiringApproval(ctx context.Context) {
ds, err := e.TargetDSP.GetReadOnly(ctx, e.LogPersister)
if err != nil {
e.LogPersister.Errorf("Failed to prepare running deploy source data (%v)", err)
return
}
var approvers []string
for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
if v.Event == "DEPLOYMENT_WAIT_APPROVAL" {
approvers = v.Slack
}
}
e.Notifier.Notify(model.NotificationEvent{
Type: model.NotificationEventType_EVENT_DEPLOYMENT_WAIT_APPROVAL,
Metadata: &model.NotificationEventDeploymentWaitApproval{
Deployment: e.Deployment,
EnvName: e.EnvName,
MentionedAccounts: approvers,
},
})
}
| 1 | 20,539 | `ds.GenericDeploymentConfig.DeploymentNotification` in L128 is nullable. | pipe-cd-pipe | go |
@@ -42,4 +42,8 @@ public class CliqueMiningTracker {
public boolean blockCreatedLocally(final BlockHeader header) {
return CliqueHelpers.getProposerOfBlock(header).equals(localAddress);
}
+
+ public ProtocolContext getProtocolContext() {
+ return protocolContext;
+ }
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.clique;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.BlockHeader;
public class CliqueMiningTracker {
private final Address localAddress;
private final ProtocolContext protocolContext;
public CliqueMiningTracker(final Address localAddress, final ProtocolContext protocolContext) {
this.localAddress = localAddress;
this.protocolContext = protocolContext;
}
public boolean isProposerAfter(final BlockHeader header) {
final Address nextProposer =
CliqueHelpers.getProposerForBlockAfter(
header, protocolContext.getConsensusState(CliqueContext.class).getVoteTallyCache());
return localAddress.equals(nextProposer);
}
public boolean canMakeBlockNextRound(final BlockHeader header) {
return CliqueHelpers.addressIsAllowedToProduceNextBlock(localAddress, protocolContext, header);
}
public boolean blockCreatedLocally(final BlockHeader header) {
return CliqueHelpers.getProposerOfBlock(header).equals(localAddress);
}
}
| 1 | 22,860 | this shouldn't be exposed here - this class isn't a carriage for this - its used internally to determine if/how we can mine. | hyperledger-besu | java |
@@ -265,13 +265,11 @@ public class FirefoxDriver extends RemoteWebDriver
@Override
public String installExtension(Path path) {
- Require.nonNull("Path", path);
return extensions.installExtension(path);
}
@Override
public void uninstallExtension(String extensionId) {
- Require.nonNull("Extension ID", extensionId);
extensions.uninstallExtension(extensionId);
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.firefox;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.PersistentCapabilities;
import org.openqa.selenium.Proxy;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.devtools.CdpEndpointFinder;
import org.openqa.selenium.devtools.CdpInfo;
import org.openqa.selenium.devtools.CdpVersionFinder;
import org.openqa.selenium.devtools.Connection;
import org.openqa.selenium.devtools.DevTools;
import org.openqa.selenium.devtools.DevToolsException;
import org.openqa.selenium.devtools.HasDevTools;
import org.openqa.selenium.devtools.noop.NoOpCdpInfo;
import org.openqa.selenium.html5.LocalStorage;
import org.openqa.selenium.html5.SessionStorage;
import org.openqa.selenium.html5.WebStorage;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.CommandInfo;
import org.openqa.selenium.remote.FileDetector;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.remote.html5.RemoteWebStorage;
import org.openqa.selenium.remote.http.ClientConfig;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.service.DriverCommandExecutor;
import org.openqa.selenium.remote.service.DriverService;
import java.net.URI;
import java.nio.file.Path;
import java.util.Map;
import java.util.Optional;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.stream.StreamSupport;
import static org.openqa.selenium.remote.CapabilityType.PROXY;
/**
* An implementation of the {#link WebDriver} interface that drives Firefox.
* <p>
* The best way to construct a {@code FirefoxDriver} with various options is to make use of the
* {@link FirefoxOptions}, like so:
*
* <pre>
* FirefoxOptions options = new FirefoxOptions()
* .addPreference("browser.startup.page", 1)
* .addPreference("browser.startup.homepage", "https://www.google.co.uk")
* .setAcceptInsecureCerts(true)
* .setHeadless(true);
* WebDriver driver = new FirefoxDriver(options);
* </pre>
*/
public class FirefoxDriver extends RemoteWebDriver
implements WebStorage, HasExtensions, HasFullPageScreenshot, HasContext, HasDevTools {
public static final class SystemProperty {
/**
* System property that defines the location of the Firefox executable file.
*/
public static final String BROWSER_BINARY = "webdriver.firefox.bin";
/**
* System property that defines the location of the file where Firefox log should be stored.
*/
public static final String BROWSER_LOGFILE = "webdriver.firefox.logfile";
/**
* System property that defines the additional library path (Linux only).
*/
public static final String BROWSER_LIBRARY_PATH = "webdriver.firefox.library.path";
/**
* System property that defines the profile that should be used as a template.
* When the driver starts, it will make a copy of the profile it is using,
* rather than using that profile directly.
*/
public static final String BROWSER_PROFILE = "webdriver.firefox.profile";
/**
* System property that defines the location of the webdriver.xpi browser extension to install
* in the browser. If not set, the prebuilt extension bundled with this class will be used.
*/
public static final String DRIVER_XPI_PROPERTY = "webdriver.firefox.driver";
/**
* Boolean system property that instructs FirefoxDriver to use Marionette backend,
* overrides any capabilities specified by the user
*/
public static final String DRIVER_USE_MARIONETTE = "webdriver.firefox.marionette";
}
/**
* @deprecated Use {@link Capability#BINARY}
*/
@Deprecated
public static final String BINARY = Capability.BINARY;
/**
* @deprecated Use {@link Capability#PROFILE}
*/
@Deprecated
public static final String PROFILE = Capability.PROFILE;
/**
* @deprecated Use {@link Capability#MARIONETTE}
*/
@Deprecated
public static final String MARIONETTE = Capability.MARIONETTE;
public static final class Capability {
public static final String BINARY = "firefox_binary";
public static final String PROFILE = "firefox_profile";
public static final String MARIONETTE = "marionette";
}
private static class FirefoxDriverCommandExecutor extends DriverCommandExecutor {
public FirefoxDriverCommandExecutor(DriverService service) {
super(service, getExtraCommands());
}
private static Map<String, CommandInfo> getExtraCommands() {
return ImmutableMap.<String, CommandInfo>builder()
.putAll(new AddHasContext().getAdditionalCommands())
.putAll(new AddHasExtensions().getAdditionalCommands())
.putAll(new AddHasFullPageScreenshot().getAdditionalCommands())
.build();
}
}
private final Capabilities capabilities;
protected FirefoxBinary binary;
private final RemoteWebStorage webStorage;
private final HasExtensions extensions;
private final HasFullPageScreenshot fullPageScreenshot;
private final HasContext context;
private final Optional<URI> cdpUri;
private DevTools devTools;
public FirefoxDriver() {
this(new FirefoxOptions());
}
/**
* @deprecated Use {@link #FirefoxDriver(FirefoxOptions)}.
*/
@Deprecated
public FirefoxDriver(Capabilities desiredCapabilities) {
this(new FirefoxOptions(Require.nonNull("Capabilities", desiredCapabilities)));
}
/**
* @deprecated Use {@link #FirefoxDriver(FirefoxDriverService, FirefoxOptions)}.
*/
@Deprecated
public FirefoxDriver(FirefoxDriverService service, Capabilities desiredCapabilities) {
this(
Require.nonNull("Driver service", service),
new FirefoxOptions(desiredCapabilities));
}
public FirefoxDriver(FirefoxOptions options) {
this(toExecutor(options), options);
}
public FirefoxDriver(FirefoxDriverService service) {
this(service, new FirefoxOptions());
}
public FirefoxDriver(FirefoxDriverService service, FirefoxOptions options) {
this(new FirefoxDriverCommandExecutor(service), options);
}
private FirefoxDriver(FirefoxDriverCommandExecutor executor, FirefoxOptions options) {
super(executor, dropCapabilities(options));
webStorage = new RemoteWebStorage(getExecuteMethod());
extensions = new AddHasExtensions().getImplementation(getCapabilities(), getExecuteMethod());
fullPageScreenshot = new AddHasFullPageScreenshot().getImplementation(getCapabilities(), getExecuteMethod());
context = new AddHasContext().getImplementation(getCapabilities(), getExecuteMethod());
Capabilities capabilities = super.getCapabilities();
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
Optional<URI> cdpUri = CdpEndpointFinder.getReportedUri("moz:debuggerAddress", capabilities)
.flatMap(reported -> CdpEndpointFinder.getCdpEndPoint(clientFactory, reported));
this.cdpUri = cdpUri;
this.capabilities = cdpUri.map(uri ->
new ImmutableCapabilities(
new PersistentCapabilities(capabilities)
.setCapability("se:cdp", uri.toString())
.setCapability("se:cdpVersion", "85")))
.orElse(new ImmutableCapabilities(capabilities));
}
private static FirefoxDriverCommandExecutor toExecutor(FirefoxOptions options) {
Require.nonNull("Options to construct executor from", options);
String sysProperty = System.getProperty(SystemProperty.DRIVER_USE_MARIONETTE);
boolean isLegacy = (sysProperty != null && ! Boolean.parseBoolean(sysProperty))
|| options.isLegacy();
FirefoxDriverService.Builder<?, ?> builder =
StreamSupport.stream(ServiceLoader.load(DriverService.Builder.class).spliterator(), false)
.filter(b -> b instanceof FirefoxDriverService.Builder)
.map(FirefoxDriverService.Builder.class::cast)
.filter(b -> b.isLegacy() == isLegacy)
.findFirst().orElseThrow(WebDriverException::new);
return new FirefoxDriverCommandExecutor(builder.withOptions(options).build());
}
@Override
public Capabilities getCapabilities() {
return capabilities;
}
@Override
public void setFileDetector(FileDetector detector) {
throw new WebDriverException(
"Setting the file detector only works on remote webdriver instances obtained " +
"via RemoteWebDriver");
}
@Override
public LocalStorage getLocalStorage() {
return webStorage.getLocalStorage();
}
@Override
public SessionStorage getSessionStorage() {
return webStorage.getSessionStorage();
}
private static boolean isLegacy(Capabilities desiredCapabilities) {
Boolean forceMarionette = forceMarionetteFromSystemProperty();
if (forceMarionette != null) {
return !forceMarionette;
}
Object marionette = desiredCapabilities.getCapability(Capability.MARIONETTE);
return marionette instanceof Boolean && ! (Boolean) marionette;
}
@Override
public String installExtension(Path path) {
Require.nonNull("Path", path);
return extensions.installExtension(path);
}
@Override
public void uninstallExtension(String extensionId) {
Require.nonNull("Extension ID", extensionId);
extensions.uninstallExtension(extensionId);
}
/**
* Capture the full page screenshot and store it in the specified location.
*
* @param <X> Return type for getFullPageScreenshotAs.
* @param outputType target type, @see OutputType
* @return Object in which is stored information about the screenshot.
* @throws WebDriverException on failure.
*/
@Override
public <X> X getFullPageScreenshotAs(OutputType<X> outputType) throws WebDriverException {
return fullPageScreenshot.getFullPageScreenshotAs(outputType);
}
@Override public void setContext(FirefoxCommandContext commandContext) {
context.setContext(commandContext);
}
private static Boolean forceMarionetteFromSystemProperty() {
String useMarionette = System.getProperty(SystemProperty.DRIVER_USE_MARIONETTE);
if (useMarionette == null) {
return null;
}
return Boolean.valueOf(useMarionette);
}
/**
* Drops capabilities that we shouldn't send over the wire.
*
* Used for capabilities which aren't BeanToJson-convertable, and are only used by the local
* launcher.
*/
private static Capabilities dropCapabilities(Capabilities capabilities) {
if (capabilities == null) {
return new ImmutableCapabilities();
}
MutableCapabilities caps;
if (isLegacy(capabilities)) {
final Set<String> toRemove = Sets.newHashSet(Capability.BINARY, Capability.PROFILE);
caps = new MutableCapabilities(
Maps.filterKeys(capabilities.asMap(), key -> !toRemove.contains(key)));
} else {
caps = new MutableCapabilities(capabilities);
}
// Ensure that the proxy is in a state fit to be sent to the extension
Proxy proxy = Proxy.extractFrom(capabilities);
if (proxy != null) {
caps.setCapability(PROXY, proxy);
}
return caps;
}
@Override
public Optional<DevTools> maybeGetDevTools() {
if (devTools != null) {
return Optional.of(devTools);
}
if (!cdpUri.isPresent()) {
return Optional.empty();
}
URI wsUri = cdpUri.orElseThrow(() ->
new DevToolsException("This version of Firefox or geckodriver does not support CDP"));
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
ClientConfig wsConfig = ClientConfig.defaultConfig().baseUri(wsUri);
HttpClient wsClient = clientFactory.createClient(wsConfig);
Connection connection = new Connection(wsClient, wsUri.toString());
CdpInfo cdpInfo = new CdpVersionFinder().match("85.0").orElseGet(NoOpCdpInfo::new);
devTools = new DevTools(cdpInfo::getDomains, connection);
return Optional.of(devTools);
}
@Override
public DevTools getDevTools() {
if (!cdpUri.isPresent()) {
throw new DevToolsException("This version of Firefox or geckodriver does not support CDP");
}
return maybeGetDevTools().orElseThrow(() -> new DevToolsException("Unable to initialize CDP connection"));
}
}
| 1 | 19,125 | It's fine to leave these checks in. It'll make the exception come from `FirefoxDriver`, and that's probably clearer to a user. | SeleniumHQ-selenium | rb |
@@ -60,8 +60,9 @@ RSpec.configure do |config|
# Add modules for helpers
config.include ControllerSpecHelper, type: :controller
config.include RequestSpecHelper, type: :request
- [:feature, :request].each do |type|
+ [:feature, :request, :model].each do |type|
config.include IntegrationSpecHelper, type: type
+ config.include EnvironmentSpecHelper, type: type
end
config.include FeatureSpecHelper, type: :feature
| 1 | # This file is copied to spec/ when you run 'rails generate rspec:install'
ENV["RAILS_ENV"] ||= 'test'
require 'spec_helper'
require File.expand_path("../../config/environment", __FILE__)
require 'rspec/rails'
# Add additional requires below this line. Rails is not loaded until this point!
require 'steps/user_steps'
require 'steps/approval_steps'
# Requires supporting ruby files with custom matchers and macros, etc, in
# spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are
# run as spec files by default. This means that files in spec/support that end
# in _spec.rb will both be required and run as specs, causing the specs to be
# run twice. It is recommended that you do not name files matching this glob to
# end with _spec.rb. You can configure this pattern with the --pattern
# option on the command line or in ~/.rspec, .rspec or `.rspec-local`.
#
# The following line is provided for convenience purposes. It has the downside
# of increasing the boot-up time by auto-requiring all files in the support
# directory. Alternatively, in the individual `*_spec.rb` files, manually
# require only the support files necessary.
#
Dir[Rails.root.join('spec/support/**/*.rb')].each { |f| require f }
Dir.glob('./spec/steps/**/*_steps.rb') { |f| load f, true }
# Checks for pending migrations before tests are run.
# If you are not using ActiveRecord, you can remove this line.
ActiveRecord::Migration.maintain_test_schema!
RSpec.configure do |config|
# Remove this line if you're not using ActiveRecord or ActiveRecord fixtures
config.fixture_path = "#{::Rails.root}/spec/support/fixtures"
# If you're not using ActiveRecord, or you'd prefer not to run each of your
# examples within a transaction, remove the following line or assign false
# instead of true.
config.use_transactional_fixtures = false
# RSpec Rails can automatically mix in different behaviours to your tests
# based on their file location, for example enabling you to call `get` and
# `post` in specs under `spec/controllers`.
#
# You can disable this behaviour by removing the line below, and instead
# explicitly tag your specs with their type, e.g.:
#
# RSpec.describe UsersController, :type => :controller do
# # ...
# end
#
# The different available types are documented in the features, such as in
# https://relishapp.com/rspec/rspec-rails/docs
config.infer_spec_type_from_file_location!
# Add modules for Turnip acceptance tests
config.include ApprovalSteps, type: :feature
config.include UserSteps, type: :feature
# Add modules for helpers
config.include ControllerSpecHelper, type: :controller
config.include RequestSpecHelper, type: :request
[:feature, :request].each do |type|
config.include IntegrationSpecHelper, type: type
end
config.include FeatureSpecHelper, type: :feature
# Much of the config here pieced together from
# http://stackoverflow.com/questions/8178120/capybara-with-js-true-causes-test-to-fail/28083267
config.before(:suite) do
DatabaseCleaner.strategy = :transaction
DatabaseCleaner.clean_with(:truncation)
end
config.before(:each) do
if Capybara.current_driver == :rack_test
DatabaseCleaner.strategy = :transaction
else
DatabaseCleaner.strategy = :truncation
end
DatabaseCleaner.start
end
config.after(:each) do
DatabaseCleaner.clean
ActionMailer::Base.deliveries.clear
OmniAuth.config.mock_auth[:myusa] = nil
end
Capybara.default_host = 'http://localhost:3000'
OmniAuth.config.test_mode = true
end
| 1 | 13,230 | We don't want to include the `IntegrationSpecHelper` for models...mind moving this line out of the loop to be `config.include EnvironmentSpecHelper, type: :model`? | 18F-C2 | rb |
@@ -25,6 +25,9 @@ func TestWriteDrupalConfig(t *testing.T) {
err = WriteDrupalConfig(drupalConfig, file.Name())
assert.NoError(t, err)
+ os.Chmod(dir, 0755)
+ os.Chmod(file.Name(), 0666)
+
err = os.RemoveAll(dir)
assert.NoError(t, err)
} | 1 | package config
import (
"testing"
"os"
"io/ioutil"
"github.com/drud/ddev/pkg/cms/model"
"github.com/drud/ddev/pkg/testcommon"
"github.com/stretchr/testify/assert"
)
func TestWriteDrupalConfig(t *testing.T) {
dir := testcommon.CreateTmpDir("example")
file, err := ioutil.TempFile(dir, "file")
assert.NoError(t, err)
err = os.Chmod(file.Name(), 0444)
assert.NoError(t, err)
drupalConfig := model.NewDrupalConfig()
err = WriteDrupalConfig(drupalConfig, file.Name())
assert.NoError(t, err)
err = os.RemoveAll(dir)
assert.NoError(t, err)
}
func TestWriteDrushConfig(t *testing.T) {
dir := testcommon.CreateTmpDir("example")
file, err := ioutil.TempFile(dir, "file")
assert.NoError(t, err)
err = os.Chmod(file.Name(), 0444)
assert.NoError(t, err)
drushConfig := model.NewDrushConfig()
err = WriteDrushConfig(drushConfig, file.Name())
assert.NoError(t, err)
err = os.RemoveAll(dir)
assert.NoError(t, err)
}
func TestWriteWordpressConfig(t *testing.T) {
dir := testcommon.CreateTmpDir("example")
file, err := ioutil.TempFile(dir, "file")
assert.NoError(t, err)
err = os.Chmod(file.Name(), 0444)
assert.NoError(t, err)
wpConfig := model.NewWordpressConfig()
err = WriteWordpressConfig(wpConfig, file.Name())
assert.NoError(t, err)
err = os.RemoveAll(dir)
assert.NoError(t, err)
}
| 1 | 10,884 | Please check the return on these. | drud-ddev | go |
@@ -279,10 +279,14 @@ class Storage {
if (uplink == null) {
uplink = new Proxy({
url: file.url,
+ cache: true,
_autogenerated: true,
}, self.config);
}
- let savestream = self.local.add_tarball(name, filename);
+ let savestream = null;
+ if (uplink.config.cache) {
+ savestream = self.local.add_tarball(name, filename);
+ }
let on_open = function() {
// prevent it from being called twice
on_open = function() {}; | 1 | 'use strict';
const assert = require('assert');
const async = require('async');
const Error = require('http-errors');
const semver = require('semver');
const Stream = require('stream');
const Local = require('./local-storage');
const Logger = require('./logger');
const MyStreams = require('./streams');
const Proxy = require('./up-storage');
const Utils = require('./utils');
/**
* Implements Storage interface
* (same for storage.js, local-storage.js, up-storage.js).
*/
class Storage {
/**
*
* @param {*} config
*/
constructor(config) {
this.config = config;
// we support a number of uplinks, but only one local storage
// Proxy and Local classes should have similar API interfaces
this.uplinks = {};
for (let p in config.uplinks) {
if (Object.prototype.hasOwnProperty.call(config.uplinks, p)) {
// instance for each up-link definition
this.uplinks[p] = new Proxy(config.uplinks[p], config);
this.uplinks[p].upname = p;
}
}
// an instance for local storage
this.local = new Local(config);
this.logger = Logger.logger.child();
}
/**
* Add a {name} package to a system
Function checks if package with the same name is available from uplinks.
If it isn't, we create package locally
Used storages: local (write) && uplinks
* @param {*} name
* @param {*} metadata
* @param {*} callback
*/
add_package(name, metadata, callback) {
let self = this;
// NOTE:
// - when we checking package for existance, we ask ALL uplinks
// - when we publishing package, we only publish it to some of them
// so all requests are necessary
check_package_local(function(err) {
if (err) return callback(err);
check_package_remote(function(err) {
if (err) return callback(err);
publish_package(function(err) {
if (err) return callback(err);
callback();
});
});
});
/**
* Check whether a package it is already a local package
* @param {*} cb the callback method
*/
function check_package_local(cb) {
self.local.get_package(name, {}, function(err, results) {
if (err && err.status !== 404) {
return cb(err);
}
if (results) {
return cb( Error[409]('this package is already present') );
}
cb();
});
}
/**
* Check whether a package exist in any of the uplinks.
* @param {*} cb the callback method
*/
function check_package_remote(cb) {
self._sync_package_with_uplinks(name, null, {}, function(err, results, err_results) {
// something weird
if (err && err.status !== 404) {
return cb(err);
}
// checking package
if (results) {
return cb( Error[409]('this package is already present') );
}
for (let i=0; i<err_results.length; i++) {
// checking error
// if uplink fails with a status other than 404, we report failure
if (err_results[i][0] != null) {
if (err_results[i][0].status !== 404) {
return cb( Error[503]('one of the uplinks is down, refuse to publish') );
}
}
}
return cb();
});
}
/**
* Add a package to the local database
* @param {*} cb callback method
*/
function publish_package(cb) {
self.local.add_package(name, metadata, callback);
}
}
/**
* Add a new version of package {name} to a system
Used storages: local (write)
* @param {*} name
* @param {*} version
* @param {*} metadata
* @param {*} tag
* @param {*} callback
*/
add_version(name, version, metadata, tag, callback) {
this.local.add_version(name, version, metadata, tag, callback);
}
/**
* Tags a package version with a provided tag
Used storages: local (write)
* @param {*} name
* @param {*} tag_hash
* @param {*} callback
*/
merge_tags(name, tag_hash, callback) {
this.local.merge_tags(name, tag_hash, callback);
}
/**
* Tags a package version with a provided tag
Used storages: local (write)
* @param {*} name
* @param {*} tag_hash
* @param {*} callback
*/
replace_tags(name, tag_hash, callback) {
this.local.replace_tags(name, tag_hash, callback);
}
/**
* Change an existing package (i.e. unpublish one version)
Function changes a package info from local storage and all uplinks with write access./
Used storages: local (write)
* @param {*} name
* @param {*} metadata
* @param {*} revision
* @param {*} callback
*/
change_package(name, metadata, revision, callback) {
this.local.change_package(name, metadata, revision, callback);
}
/**
* Remove a package from a system
Function removes a package from local storage
Used storages: local (write)
* @param {*} name
* @param {*} callback
*/
remove_package(name, callback) {
this.local.remove_package(name, callback);
}
/**
Remove a tarball from a system
Function removes a tarball from local storage.
Tarball in question should not be linked to in any existing
versions, i.e. package version should be unpublished first.
Used storages: local (write)
* @param {*} name
* @param {*} filename
* @param {*} revision
* @param {*} callback
*/
remove_tarball(name, filename, revision, callback) {
this.local.remove_tarball(name, filename, revision, callback);
}
/**
* Upload a tarball for {name} package
Function is syncronous and returns a WritableStream
Used storages: local (write)
* @param {*} name
* @param {*} filename
* @return {Stream}
*/
add_tarball(name, filename) {
return this.local.add_tarball(name, filename);
}
/**
Get a tarball from a storage for {name} package
Function is syncronous and returns a ReadableStream
Function tries to read tarball locally, if it fails then it reads package
information in order to figure out where we can get this tarball from
Used storages: local || uplink (just one)
* @param {*} name
* @param {*} filename
* @return {Stream}
*/
get_tarball(name, filename) {
let stream = MyStreams.readTarballStream();
stream.abort = function() {};
let self = this;
// if someone requesting tarball, it means that we should already have some
// information about it, so fetching package info is unnecessary
// trying local first
let rstream = self.local.get_tarball(name, filename);
let is_open = false;
rstream.on('error', function(err) {
if (is_open || err.status !== 404) {
return stream.emit('error', err);
}
// local reported 404
let err404 = err;
rstream.abort();
rstream = null; // gc
self.local.get_package(name, function(err, info) {
if (!err && info._distfiles && info._distfiles[filename] != null) {
// information about this file exists locally
serve_file(info._distfiles[filename]);
} else {
// we know nothing about this file, trying to get information elsewhere
self._sync_package_with_uplinks(name, info, {}, function(err, info) {
if (err) {
return stream.emit('error', err);
}
if (!info._distfiles || info._distfiles[filename] == null) {
return stream.emit('error', err404);
}
serve_file(info._distfiles[filename]);
});
}
});
});
rstream.on('content-length', function(v) {
stream.emit('content-length', v);
});
rstream.on('open', function() {
is_open = true;
rstream.pipe(stream);
});
return stream;
/**
* Fetch and cache local/remote packages.
* @param {Object} file define the package shape
*/
function serve_file(file) {
let uplink = null;
for (let p in self.uplinks) {
if (self.uplinks[p].can_fetch_url(file.url)) {
uplink = self.uplinks[p];
}
}
if (uplink == null) {
uplink = new Proxy({
url: file.url,
_autogenerated: true,
}, self.config);
}
let savestream = self.local.add_tarball(name, filename);
let on_open = function() {
// prevent it from being called twice
on_open = function() {};
let rstream2 = uplink.get_url(file.url);
rstream2.on('error', function(err) {
if (savestream) {
savestream.abort();
}
savestream = null;
stream.emit('error', err);
});
rstream2.on('end', function() {
if (savestream) {
savestream.done();
}
});
rstream2.on('content-length', function(v) {
stream.emit('content-length', v);
if (savestream) {
savestream.emit('content-length', v);
}
});
rstream2.pipe(stream);
if (savestream) {
rstream2.pipe(savestream);
}
};
savestream.on('open', function() {
on_open();
});
savestream.on('error', function(err) {
self.logger.warn( {err: err}
, 'error saving file: @{err.message}\n@{err.stack}' );
if (savestream) {
savestream.abort();
}
savestream = null;
on_open();
});
}
}
/**
Retrieve a package metadata for {name} package
Function invokes local.get_package and uplink.get_package for every
uplink with proxy_access rights against {name} and combines results
into one json object
Used storages: local && uplink (proxy_access)
* @param {*} name
* @param {*} options
* @param {*} callback
*/
get_package(name, options, callback) {
if (typeof(options) === 'function') {
callback = options, options = {};
}
this.local.get_package(name, options, (err, data) => {
if (err && (!err.status || err.status >= 500)) {
// report internal errors right away
return callback(err);
}
this._sync_package_with_uplinks(name, data, options, function(err, result, uplink_errors) {
if (err) return callback(err);
const whitelist = ['_rev', 'name', 'versions', 'dist-tags', 'readme'];
for (let i in result) {
if (whitelist.indexOf(i) === -1) delete result[i];
}
Utils.normalize_dist_tags(result);
// npm can throw if this field doesn't exist
result._attachments = {};
callback(null, result, uplink_errors);
});
});
}
/**
Retrieve remote and local packages more recent than {startkey}
Function streams all packages from all uplinks first, and then
local packages.
Note that local packages could override registry ones just because
they appear in JSON last. That's a trade-off we make to avoid
memory issues.
Used storages: local && uplink (proxy_access)
* @param {*} startkey
* @param {*} options
* @return {Stream}
*/
search(startkey, options) {
let self = this;
// stream to write a tarball
let stream = new Stream.PassThrough({objectMode: true});
async.eachSeries(Object.keys(this.uplinks), function(up_name, cb) {
// shortcut: if `local=1` is supplied, don't call uplinks
if (options.req.query.local !== undefined) {
return cb();
}
// search by keyword for each uplink
let lstream = self.uplinks[up_name].search(startkey, options);
// join streams
lstream.pipe(stream, {end: false});
lstream.on('error', function(err) {
self.logger.error({err: err}, 'uplink error: @{err.message}');
cb(), cb = function() {};
});
lstream.on('end', function() {
cb(), cb = function() {};
});
stream.abort = function() {
if (lstream.abort) {
lstream.abort();
}
cb(), cb = function() {};
};
},
// executed after all series
function() {
// attach a local search results
let lstream = self.local.search(startkey, options);
stream.abort = function() {
lstream.abort();
};
lstream.pipe(stream, {end: true});
lstream.on('error', function(err) {
self.logger.error({err: err}, 'search error: @{err.message}');
stream.end();
});
});
return stream;
}
/**
*
* @param {*} callback
*/
get_local(callback) {
let self = this;
let locals = this.config.localList.get();
let packages = [];
const getPackage = function(i) {
self.local.get_package(locals[i], function(err, info) {
if (!err) {
let latest = info['dist-tags'].latest;
if (latest && info.versions[latest]) {
packages.push(info.versions[latest]);
} else {
self.logger.warn( {package: locals[i]}
, 'package @{package} does not have a "latest" tag?' );
}
}
if (i >= locals.length - 1) {
callback(null, packages);
} else {
getPackage(i + 1);
}
});
};
if (locals.length) {
getPackage(0);
} else {
callback(null, []);
}
}
/**
* Function fetches package information from uplinks and synchronizes it with local data
if package is available locally, it MUST be provided in pkginfo
returns callback(err, result, uplink_errors)
* @param {*} name
* @param {*} pkginfo
* @param {*} options
* @param {*} callback
*/
_sync_package_with_uplinks(name, pkginfo, options, callback) {
let self = this;
let exists = false;
if (!pkginfo) {
exists = false;
pkginfo = {
'name': name,
'versions': {},
'dist-tags': {},
'_uplinks': {},
};
} else {
exists = true;
}
let uplinks = [];
for (let i in self.uplinks) {
if (self.config.can_proxy_to(name, i)) {
uplinks.push(self.uplinks[i]);
}
}
async.map(uplinks, function(up, cb) {
let _options = Object.assign({}, options);
if (Utils.is_object(pkginfo._uplinks[up.upname])) {
let fetched = pkginfo._uplinks[up.upname].fetched;
if (fetched && fetched > (Date.now() - up.maxage)) {
return cb();
}
_options.etag = pkginfo._uplinks[up.upname].etag;
}
up.get_package(name, _options, function(err, up_res, etag) {
if (err && err.status === 304) {
pkginfo._uplinks[up.upname].fetched = Date.now();
}
if (err || !up_res) {
return cb(null, [err || Error('no data')]);
}
try {
Utils.validate_metadata(up_res, name);
} catch(err) {
self.logger.error({
sub: 'out',
err: err,
}, 'package.json validating error @{!err.message}\n@{err.stack}');
return cb(null, [err]);
}
pkginfo._uplinks[up.upname] = {
etag: etag,
fetched: Date.now(),
};
for (let i in up_res.versions) {
if (Object.prototype.hasOwnProperty.call(up_res.versions, i)) {
// this won't be serialized to json,
// kinda like an ES6 Symbol
// FIXME: perhaps Symbol('_verdaccio_uplink') here?
Object.defineProperty(up_res.versions[i], '_verdaccio_uplink', {
value: up.upname,
enumerable: false,
configurable: false,
writable: true,
});
}
}
try {
Storage._merge_versions(pkginfo, up_res, self.config);
} catch(err) {
self.logger.error({
sub: 'out',
err: err,
}, 'package.json parsing error @{!err.message}\n@{err.stack}');
return cb(null, [err]);
}
// if we got to this point, assume that the correct package exists
// on the uplink
exists = true;
cb();
});
}, function(err, uplink_errors) {
assert(!err && Array.isArray(uplink_errors));
if (!exists) {
return callback( Error[404]('no such package available')
, null
, uplink_errors );
}
self.local.update_versions(name, pkginfo, function(err, pkginfo) {
if (err) return callback(err);
return callback(null, pkginfo, uplink_errors);
});
});
}
/**
* Function gets a local info and an info from uplinks and tries to merge it
exported for unit tests only.
* @param {*} local
* @param {*} up
* @param {*} config
*/
static _merge_versions(local, up, config) {
// copy new versions to a cache
// NOTE: if a certain version was updated, we can't refresh it reliably
for (let i in up.versions) {
if (local.versions[i] == null) {
local.versions[i] = up.versions[i];
}
}
// refresh dist-tags
for (let i in up['dist-tags']) {
if (local['dist-tags'][i] !== up['dist-tags'][i]) {
if (!local['dist-tags'][i] || semver.lte(local['dist-tags'][i], up['dist-tags'][i])) {
local['dist-tags'][i] = up['dist-tags'][i];
}
if (i === 'latest' && local['dist-tags'][i] === up['dist-tags'][i]) {
// if remote has more fresh package, we should borrow its readme
local.readme = up.readme;
}
}
}
}
}
module.exports = Storage;
| 1 | 16,908 | Do we need `==` for true? | verdaccio-verdaccio | js |
@@ -17,7 +17,7 @@ class BaseEMAHook(Hook):
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = (1-momentum) * ema_param + momentum * cur_param`.
- Defaults to 0.0002.
+ Defaults to 0.0001.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to False. | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
class BaseEMAHook(Hook):
"""Exponential Moving Average Hook.
Use Exponential Moving Average on all parameters of model in training
process. All parameters have a ema backup, which update by the formula
as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,
the original model parameters are actually saved in ema field after train.
Args:
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = (1-momentum) * ema_param + momentum * cur_param`.
Defaults to 0.0002.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to False.
interval (int): Update ema parameter every interval iteration.
Defaults to 1.
resume_from (str, optional): The checkpoint path. Defaults to None.
momentum_fun (func, optional): The function to change momentum
during early iteration (also warmup) to help early training.
It uses `momentum` as a constant. Defaults to None.
"""
def __init__(self,
momentum=0.0002,
interval=1,
skip_buffers=False,
resume_from=None,
momentum_fun=None):
assert 0 < momentum < 1
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"""To resume model with it's ema parameters more friendly.
Register ema parameter as ``named_buffer`` to model.
"""
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for name, value in self.model_parameters.items():
# "." is not allowed in module's buffer name
buffer_name = f"ema_{name.replace('.', '_')}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if self.checkpoint is not None:
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return self.momentum_fun(runner.iter) if self.momentum_fun else \
self.momentum
def after_train_iter(self, runner):
"""Update ema parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
momentum = self.get_momentum(runner)
for name, parameter in self.model_parameters.items():
# exclude num_tracking
if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_(1 - momentum).add_(
parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
"""We load parameter values from ema backup to model before the
EvalHook."""
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"""We recover model's parameter from ema backup after last epoch's
EvalHook."""
self._swap_ema_parameters()
def _swap_ema_parameters(self):
"""Swap the parameter of model with parameter in ema_buffer."""
for name, value in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
"""EMAHook using exponential momentum strategy.
Args:
total_iter (int): The total number of iterations of EMA momentum.
Defaults to 2000.
"""
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-(
1 + x) / total_iter) + self.momentum
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
"""EMAHook using linear momentum strategy.
Args:
warm_up (int): During first warm_up steps, we may use smaller decay
to update ema parameters more slowly. Defaults to 100.
"""
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: min(self.momentum**self.interval,
(1 + x) / (warm_up + x))
| 1 | 26,376 | Changing the default value may cause BC-breaking. Suggest changing this value in config. | open-mmlab-mmdetection | py |
@@ -325,7 +325,7 @@ class SimpleConfig(Logger):
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
- num_blocks = FEE_ETA_TARGETS[slider_pos]
+ num_blocks = FEE_ETA_TARGETS[int(slider_pos)]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1) | 1 | import json
import threading
import time
import os
import stat
import ssl
from decimal import Decimal
from typing import Union, Optional, Dict, Sequence, Tuple
from numbers import Real
from copy import deepcopy
from aiorpcx import NetAddress
from . import util
from . import constants
from .util import base_units, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT
from .util import format_satoshis, format_fee_satoshis
from .util import user_dir, make_dir, NoDynamicFeeEstimates, quantize_feerate
from .i18n import _
from .logging import get_logger, Logger
FEE_ETA_TARGETS = [25, 10, 5, 2]
FEE_DEPTH_TARGETS = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000]
FEE_LN_ETA_TARGET = 2 # note: make sure the network is asking for estimates for this target
# satoshi per kbyte
FEERATE_MAX_DYNAMIC = 1500000
FEERATE_WARNING_HIGH_FEE = 600000
FEERATE_FALLBACK_STATIC_FEE = 150000
FEERATE_DEFAULT_RELAY = 1000
FEERATE_MAX_RELAY = 50000
FEERATE_STATIC_VALUES = [1000, 2000, 5000, 10000, 20000, 30000,
50000, 70000, 100000, 150000, 200000, 300000]
FEERATE_REGTEST_HARDCODED = 180000 # for eclair compat
FEE_RATIO_HIGH_WARNING = 0.05 # warn user if fee/amount for on-chain tx is higher than this
_logger = get_logger(__name__)
FINAL_CONFIG_VERSION = 3
class SimpleConfig(Logger):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are two different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
They are taken in order (1. overrides config options set in 2.)
"""
def __init__(self, options=None, read_user_config_function=None,
read_user_dir_function=None):
if options is None:
options = {}
Logger.__init__(self)
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.mempool_fees = None # type: Optional[Sequence[Tuple[Union[float, int], int]]]
self.fee_estimates = {}
self.fee_estimates_last_updated = {}
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# don't allow to be set on CLI:
self.cmdline_options.pop('config_version', None)
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
if not self.user_config:
# avoid new config getting upgraded
self.user_config = {'config_version': FINAL_CONFIG_VERSION}
self._not_modifiable_keys = set()
# config "upgrade" - CLI options
self.rename_config_keys(
self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)
# config upgrade - user config
if self.requires_upgrade():
self.upgrade()
self._check_dependent_keys()
# units and formatting
self.decimal_point = self.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(self.get('num_zeros', 0))
def electrum_path(self):
# Read electrum_path from command line
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
make_dir(path, allow_symlink=False)
if self.get('testnet'):
path = os.path.join(path, 'testnet')
make_dir(path, allow_symlink=False)
elif self.get('regtest'):
path = os.path.join(path, 'regtest')
make_dir(path, allow_symlink=False)
elif self.get('simnet'):
path = os.path.join(path, 'simnet')
make_dir(path, allow_symlink=False)
self.logger.info(f"electrum directory {path}")
return path
def rename_config_keys(self, config, keypairs, deprecation_warning=False):
"""Migrate old key names to new ones"""
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if new_key not in config:
config[new_key] = config[old_key]
if deprecation_warning:
self.logger.warning('Note that the {} variable has been deprecated. '
'You should use {} instead.'.format(old_key, new_key))
del config[old_key]
updated = True
return updated
def set_key(self, key, value, save=True):
if not self.is_modifiable(key):
self.logger.warning(f"not changing config key '{key}' set on the command line")
return
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return
self._set_key_in_user_config(key, value, save)
def _set_key_in_user_config(self, key, value, save=True):
with self.lock:
if value is not None:
self.user_config[key] = value
else:
self.user_config.pop(key, None)
if save:
self.save_user_config()
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key, default)
return out
def _check_dependent_keys(self) -> None:
if self.get('serverfingerprint'):
if not self.get('server'):
raise Exception("config key 'serverfingerprint' requires 'server' to also be set")
self.make_key_not_modifiable('server')
def requires_upgrade(self):
return self.get_config_version() < FINAL_CONFIG_VERSION
def upgrade(self):
with self.lock:
self.logger.info('upgrading config')
self.convert_version_2()
self.convert_version_3()
self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)
def convert_version_2(self):
if not self._is_upgrade_method_needed(1, 1):
return
self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})
try:
# change server string FROM host:port:proto TO host:port:s
server_str = self.user_config.get('server')
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in ('s', 't')
int(port) # Throw if cannot be converted to int
server_str = '{}:{}:s'.format(host, port)
self._set_key_in_user_config('server', server_str)
except BaseException:
self._set_key_in_user_config('server', None)
self.set_key('config_version', 2)
def convert_version_3(self):
if not self._is_upgrade_method_needed(2, 2):
return
base_unit = self.user_config.get('base_unit')
if isinstance(base_unit, str):
self._set_key_in_user_config('base_unit', None)
map_ = {'btc':8, 'mbtc':5, 'ubtc':2, 'bits':2, 'sat':0}
decimal_point = map_.get(base_unit.lower())
self._set_key_in_user_config('decimal_point', decimal_point)
self.set_key('config_version', 3)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_config_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise Exception(
('config upgrade: unexpected version %d (should be %d-%d)'
% (cur_version, min_version, max_version)))
else:
return True
def get_config_version(self):
config_version = self.get('config_version', 1)
if config_version > FINAL_CONFIG_VERSION:
self.logger.warning('config version ({}) is higher than latest ({})'
.format(config_version, FINAL_CONFIG_VERSION))
return config_version
def is_modifiable(self, key) -> bool:
return (key not in self.cmdline_options
and key not in self._not_modifiable_keys)
def make_key_not_modifiable(self, key) -> None:
self._not_modifiable_keys.add(key)
def save_user_config(self):
if self.get('forget_config'):
return
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
except FileNotFoundError:
# datadir probably deleted while running...
if os.path.exists(self.path): # or maybe not?
raise
def get_wallet_path(self, *, use_gui_last_wallet=False):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd', ''), self.get('wallet_path'))
if use_gui_last_wallet:
path = self.get('gui_last_wallet')
if path and os.path.exists(path):
return path
# default path
util.assert_datadir_available(self.path)
dirpath = os.path.join(self.path, "wallets")
make_dir(dirpath, allow_symlink=False)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.logger.info(f"session timeout -> {seconds} seconds")
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def impose_hard_limits_on_fee(func):
def get_fee_within_limits(self, *args, **kwargs):
fee = func(self, *args, **kwargs)
if fee is None:
return fee
fee = min(FEERATE_MAX_DYNAMIC, fee)
fee = max(FEERATE_DEFAULT_RELAY, fee)
return fee
return get_fee_within_limits
def eta_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
num_blocks = FEE_ETA_TARGETS[slider_pos]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1)
return fee
@impose_hard_limits_on_fee
def eta_target_to_fee(self, num_blocks: int) -> Optional[int]:
"""Returns fee in sat/kbyte."""
if num_blocks == 1:
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee / 2
fee = int(fee)
else:
fee = self.fee_estimates.get(num_blocks)
if fee is not None:
fee = int(fee)
return fee
def fee_to_depth(self, target_fee: Real) -> Optional[int]:
"""For a given sat/vbyte fee, returns an estimate of how deep
it would be in the current mempool in vbytes.
Pessimistic == overestimates the depth.
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if fee <= target_fee:
break
return depth
def depth_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
target = self.depth_target(slider_pos)
return self.depth_target_to_fee(target)
@impose_hard_limits_on_fee
def depth_target_to_fee(self, target: int) -> Optional[int]:
"""Returns fee in sat/kbyte.
target: desired mempool depth in vbytes
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if depth > target:
break
else:
return 0
# add one sat/byte as currently that is
# the max precision of the histogram
# (well, in case of ElectrumX at least. not for electrs)
fee += 1
# convert to sat/kbyte
return int(fee * 1000)
def depth_target(self, slider_pos: int) -> int:
"""Returns mempool depth target in bytes for a fee slider position."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)
return FEE_DEPTH_TARGETS[slider_pos]
def eta_target(self, slider_pos: int) -> int:
"""Returns 'num blocks' ETA target for a fee slider position."""
if slider_pos == len(FEE_ETA_TARGETS):
return 1
return FEE_ETA_TARGETS[slider_pos]
def fee_to_eta(self, fee_per_kb: int) -> int:
"""Returns 'num blocks' ETA estimate for given fee rate,
or -1 for low fee.
"""
import operator
lst = list(self.fee_estimates.items()) + [(1, self.eta_to_fee(len(FEE_ETA_TARGETS)))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), lst)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(FEE_ETA_TARGETS[0])/2:
min_target = -1
return min_target
def depth_tooltip(self, depth: Optional[int]) -> str:
"""Returns text tooltip for given mempool depth (in vbytes)."""
if depth is None:
return "unknown from tip"
return "%.1f MB from tip" % (depth/1_000_000)
def eta_tooltip(self, x):
if x < 0:
return _('Low fee')
elif x == 1:
return _('In the next block')
else:
return _('Within {} blocks').format(x)
def get_fee_status(self):
dyn = self.is_dynfee()
mempool = self.use_mempool_fees()
pos = self.get_depth_level() if mempool else self.get_fee_level()
fee_rate = self.fee_per_kb()
target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)
return tooltip + ' [%s]'%target if dyn else target + ' [Static]'
def get_fee_text(
self,
slider_pos: int,
dyn: bool,
mempool: bool,
fee_per_kb: Optional[int],
):
"""Returns (text, tooltip) where
text is what we target: static fee / num blocks to confirm in / mempool depth
tooltip is the corresponding estimate (e.g. num blocks for a static fee)
fee_rate is in sat/kbyte
"""
if fee_per_kb is None:
rate_str = 'unknown'
fee_per_byte = None
else:
fee_per_byte = fee_per_kb/1000
rate_str = format_fee_satoshis(fee_per_byte) + ' sat/byte'
if dyn:
if mempool:
depth = self.depth_target(slider_pos)
text = self.depth_tooltip(depth)
else:
eta = self.eta_target(slider_pos)
text = self.eta_tooltip(eta)
tooltip = rate_str
else: # using static fees
assert fee_per_kb is not None
assert fee_per_byte is not None
text = rate_str
if mempool and self.has_fee_mempool():
depth = self.fee_to_depth(fee_per_byte)
tooltip = self.depth_tooltip(depth)
elif not mempool and self.has_fee_etas():
eta = self.fee_to_eta(fee_per_kb)
tooltip = self.eta_tooltip(eta)
else:
tooltip = ''
return text, tooltip
def get_depth_level(self):
maxp = len(FEE_DEPTH_TARGETS) - 1
return min(maxp, self.get('depth_level', 2))
def get_fee_level(self):
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
return min(maxp, self.get('fee_level', 2))
def get_fee_slider(self, dyn, mempool) -> Tuple[int, int, Optional[int]]:
if dyn:
if mempool:
pos = self.get_depth_level()
maxp = len(FEE_DEPTH_TARGETS) - 1
fee_rate = self.depth_to_fee(pos)
else:
pos = self.get_fee_level()
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
fee_rate = self.eta_to_fee(pos)
else:
fee_rate = self.fee_per_kb(dyn=False)
pos = self.static_fee_index(fee_rate)
maxp = len(FEERATE_STATIC_VALUES) - 1
return maxp, pos, fee_rate
def static_fee(self, i):
return FEERATE_STATIC_VALUES[i]
def static_fee_index(self, value) -> int:
if value is None:
raise TypeError('static fee cannot be None')
dist = list(map(lambda x: abs(x - value), FEERATE_STATIC_VALUES))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_etas(self):
return len(self.fee_estimates) == 4
def has_fee_mempool(self) -> bool:
return self.mempool_fees is not None
def has_dynamic_fees_ready(self):
if self.use_mempool_fees():
return self.has_fee_mempool()
else:
return self.has_fee_etas()
def is_dynfee(self):
return bool(self.get('dynamic_fees', True))
def use_mempool_fees(self):
return bool(self.get('mempool_fees', False))
def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,
mempool: bool) -> Union[int, None]:
fee_level = max(fee_level, 0)
fee_level = min(fee_level, 1)
if dyn:
max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)
slider_pos = round(fee_level * max_pos)
fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)
else:
max_pos = len(FEERATE_STATIC_VALUES) - 1
slider_pos = round(fee_level * max_pos)
fee_rate = FEERATE_STATIC_VALUES[slider_pos]
return fee_rate
def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Optional[int]:
"""Returns sat/kvB fee to pay for a txn.
Note: might return None.
fee_level: float between 0.0 and 1.0, representing fee slider position
"""
if constants.net is constants.BitcoinRegtest:
return FEERATE_REGTEST_HARDCODED
if dyn is None:
dyn = self.is_dynfee()
if mempool is None:
mempool = self.use_mempool_fees()
if fee_level is not None:
return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)
# there is no fee_level specified; will use config.
# note: 'depth_level' and 'fee_level' in config are integer slider positions,
# unlike fee_level here, which (when given) is a float in [0.0, 1.0]
if dyn:
if mempool:
fee_rate = self.depth_to_fee(self.get_depth_level())
else:
fee_rate = self.eta_to_fee(self.get_fee_level())
else:
fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)
if fee_rate is not None:
fee_rate = int(fee_rate)
return fee_rate
def fee_per_byte(self):
"""Returns sat/vB fee to pay for a txn.
Note: might return None.
"""
fee_per_kb = self.fee_per_kb()
return fee_per_kb / 1000 if fee_per_kb is not None else None
def estimate_fee(self, size: Union[int, float, Decimal], *,
allow_fallback_to_static_rates: bool = False) -> int:
fee_per_kb = self.fee_per_kb()
if fee_per_kb is None:
if allow_fallback_to_static_rates:
fee_per_kb = FEERATE_FALLBACK_STATIC_FEE
else:
raise NoDynamicFeeEstimates()
return self.estimate_fee_for_feerate(fee_per_kb, size)
@classmethod
def estimate_fee_for_feerate(cls, fee_per_kb: Union[int, float, Decimal],
size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
fee_per_kb = Decimal(fee_per_kb)
fee_per_byte = fee_per_kb / 1000
# to be consistent with what is displayed in the GUI,
# the calculation needs to use the same precision:
fee_per_byte = quantize_feerate(fee_per_byte)
return round(fee_per_byte * size)
def update_fee_estimates(self, key, value):
self.fee_estimates[key] = value
self.fee_estimates_last_updated[key] = time.time()
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
return now - self.last_time_fee_estimates_requested > 60
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def get_ssl_context(self):
ssl_keyfile = self.get('ssl_keyfile')
ssl_certfile = self.get('ssl_certfile')
if ssl_keyfile and ssl_certfile:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(ssl_certfile, ssl_keyfile)
return ssl_context
def get_ssl_domain(self):
from .paymentrequest import check_ssl_config
if self.get('ssl_keyfile') and self.get('ssl_certfile'):
SSL_identity = check_ssl_config(self)
else:
SSL_identity = None
return SSL_identity
def get_netaddress(self, key: str) -> Optional[NetAddress]:
text = self.get(key)
if text:
try:
return NetAddress.from_string(text)
except:
pass
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=self.num_zeros,
decimal_point=self.decimal_point,
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, amount):
return self.format_amount(amount) + ' '+ self.get_base_unit()
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def set_base_unit(self, unit):
assert unit in base_units.keys()
self.decimal_point = base_unit_name_to_decimal_point(unit)
self.set_key('decimal_point', self.decimal_point, True)
def get_decimal_point(self):
return self.decimal_point
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding='utf-8') as f:
data = f.read()
result = json.loads(data)
except:
_logger.warning(f"Cannot read config file. {config_path}")
return {}
if not type(result) is dict:
return {}
return result
| 1 | 13,981 | how does that happen? | spesmilo-electrum | py |
@@ -31,9 +31,9 @@
#
from __future__ import print_function
-import unittest
+import unittest, doctest
import os,sys
-
+from rdkit.six import exec_
from rdkit.six.moves import cPickle
from rdkit import rdBase | 1 | # $Id$
#
# Copyright (c) 2007-2014, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import unittest
import os,sys
from rdkit.six.moves import cPickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions
from rdkit import Geometry
from rdkit import RDConfig
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x,pt2.x,tol) and feq(pt1.y,pt2.y,tol) and feq(pt1.z,pt2.z,tol)
class TestCase(unittest.TestCase) :
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','ChemReactions','testData')
def test1Basics(self):
rxna = rdChemReactions.ChemicalReaction()
# also tests empty copy constructor
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn.GetNumReactantTemplates()==0)
self.assertTrue(rxn.GetNumProductTemplates()==0)
r1= Chem.MolFromSmarts('[C:1](=[O:2])O')
rxn.AddReactantTemplate(r1)
self.assertTrue(rxn.GetNumReactantTemplates()==1)
r1= Chem.MolFromSmarts('[N:3]')
rxn.AddReactantTemplate(r1)
self.assertTrue(rxn.GetNumReactantTemplates()==2)
r1= Chem.MolFromSmarts('[C:1](=[O:2])[N:3]')
rxn.AddProductTemplate(r1)
self.assertTrue(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==3)
ps = rxn.RunReactants(list(reacts))
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==3)
def test2DaylightParser(self):
rxna = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn)
self.assertTrue(rxn.GetNumReactantTemplates()==2)
self.assertTrue(rxn.GetNumProductTemplates()==1)
self.assertTrue(rxn._getImplicitPropertiesFlag())
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==3)
reacts = (Chem.MolFromSmiles('CC(=O)OC'),Chem.MolFromSmiles('CN'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==5)
def test3MDLParsers(self):
fileN = os.path.join(self.dataDir,'AmideBond.rxn')
rxna = rdChemReactions.ReactionFromRxnFile(fileN)
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn)
self.assertFalse(rxn._getImplicitPropertiesFlag())
self.assertTrue(rxn.GetNumReactantTemplates()==2)
self.assertTrue(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==3)
with open(fileN, 'r') as rxnF:
rxnBlock = rxnF.read()
rxn = rdChemReactions.ReactionFromRxnBlock(rxnBlock)
self.assertTrue(rxn)
self.assertTrue(rxn.GetNumReactantTemplates()==2)
self.assertTrue(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
self.assertTrue(ps[0][0].GetNumAtoms()==3)
def test4ErrorHandling(self):
self.assertRaises(ValueError,lambda x='[C:1](=[O:2])Q.[N:3]>>[C:1](=[O:2])[N:3]':rdChemReactions.ReactionFromSmarts(x))
self.assertRaises(ValueError,lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]Q':rdChemReactions.ReactionFromSmarts(x))
self.assertRaises(ValueError,lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]>>CC':rdChemReactions.ReactionFromSmarts(x))
block="""$RXN
ISIS 082120061354
3 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
self.assertRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
block="""$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
4 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.assertRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
block="""$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 1 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.assertRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
def test5Validation(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate()==(0,0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:1])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate()==(1,1))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])[O:4].[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate()==(1,0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3][C:5]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate()==(1,0))
def test6Exceptions(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.assertTrue(rxn)
self.assertRaises(ValueError,lambda x=rxn:x.RunReactants(()))
self.assertRaises(ValueError,lambda x=rxn:x.RunReactants((Chem.MolFromSmiles('CC'),Chem.MolFromSmiles('C'))))
ps=rxn.RunReactants((Chem.MolFromSmiles('CCCl'),))
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
def _test7Leak(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.assertTrue(rxn)
print('running: ')
for i in range(1e5):
ps=rxn.RunReactants((Chem.MolFromSmiles('CCCl'),))
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
if not i%1000: print(i)
def test8Properties(self):
rxn = rdChemReactions.ReactionFromSmarts('[O:1]>>[O:1][3#0]')
self.assertTrue(rxn)
ps=rxn.RunReactants((Chem.MolFromSmiles('CO'),))
self.assertTrue(len(ps)==1)
self.assertTrue(len(ps[0])==1)
Chem.SanitizeMol(ps[0][0])
self.assertEqual(ps[0][0].GetAtomWithIdx(1).GetIsotope(),3);
def test9AromaticityTransfer(self):
# this was issue 2664121
mol = Chem.MolFromSmiles('c1ccc(C2C3(Cc4c(cccc4)C2)CCCC3)cc1')
rxn = rdChemReactions.ReactionFromSmarts('[A:1]1~[*:2]~[*:3]~[*:4]~[*:5]~[A:6]-;@1>>[*:1]~[*:2]~[*:3]~[*:4]~[*:5]~[*:6]')
products = rxn.RunReactants([mol])
self.assertEqual(len(products),6)
for p in products:
self.assertEqual(len(p),1)
Chem.SanitizeMol(p[0])
def test10DotSeparation(self):
# 08/05/14
# This test is changed due to a new behavior of the smarts
# reaction parser which now allows using parenthesis in products
# as well. original smiles: '[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1'
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>([C:1]1[O:2].[N:3]1)')
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(p[0].GetNumAtoms(),3)
self.assertEqual(p[0].GetNumBonds(),2)
def test11ImplicitProperties(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]O>>[C:1]')
mol = Chem.MolFromSmiles('CCO')
products = rxn.RunReactants([mol])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(Chem.MolToSmiles(p[0]),'CC')
mol2 = Chem.MolFromSmiles('C[CH-]O')
products = rxn.RunReactants([mol2])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(Chem.MolToSmiles(p[0]),'[CH2-]C')
rxn._setImplicitPropertiesFlag(False)
products = rxn.RunReactants([mol])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(Chem.MolToSmiles(p[0]),'CC')
products = rxn.RunReactants([mol2])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(Chem.MolToSmiles(p[0]),'CC')
def test12Pickles(self):
# 08/05/14
# This test is changed due to a new behavior of the smarts
# reaction parser which now allows using parenthesis in products
# as well. original smiles: '[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1'
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>([C:1]1[O:2].[N:3]1)')
pkl = cPickle.dumps(rxn)
rxn = cPickle.loads(pkl)
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(p[0].GetNumAtoms(),3)
self.assertEqual(p[0].GetNumBonds(),2)
rxn = rdChemReactions.ChemicalReaction(rxn.ToBinary())
products = rxn.RunReactants([mol])
self.assertEqual(len(products),1)
for p in products:
self.assertEqual(len(p),1)
self.assertEqual(p[0].GetNumAtoms(),3)
self.assertEqual(p[0].GetNumBonds(),2)
def test13GetTemplates(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1][O:2].[N:3]')
r1 = rxn.GetReactantTemplate(0)
sma=Chem.MolToSmarts(r1)
self.assertEqual(sma,'[C:1]1-,:[O:2]-,:[N:3]-,:1')
p1 = rxn.GetProductTemplate(0)
sma=Chem.MolToSmarts(p1)
self.assertEqual(sma,'[C:1]-,:[O:2]')
p2 = rxn.GetProductTemplate(1)
sma=Chem.MolToSmarts(p2)
self.assertEqual(sma,'[N:3]')
self.assertRaises(ValueError,lambda :rxn.GetProductTemplate(2))
self.assertRaises(ValueError,lambda :rxn.GetReactantTemplate(1))
def test14Matchers(self):
rxn = rdChemReactions.ReactionFromSmarts('[C;!$(C(-O)-O):1](=[O:2])[O;H,-1].[N;!H0:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
rxn.Initialize()
self.assertTrue(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)C')))
self.assertFalse(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)O')))
self.assertTrue(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CNC')))
self.assertFalse(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CN(C)C')))
self.assertTrue(rxn.IsMoleculeProduct(Chem.MolFromSmiles('NC(=O)C')))
self.assertTrue(rxn.IsMoleculeProduct(Chem.MolFromSmiles('CNC(=O)C')))
self.assertFalse(rxn.IsMoleculeProduct(Chem.MolFromSmiles('COC(=O)C')))
def test15Replacements(self):
rxn = rdChemReactions.ReactionFromSmarts('[{amine}:1]>>[*:1]-C',
replacements={'{amine}':'$([N;!H0;$(N-[#6]);!$(N-[!#6;!#1]);!$(N-C=[O,N,S])])'})
self.assertTrue(rxn)
rxn.Initialize()
reactants = (Chem.MolFromSmiles('CCN'),)
ps = rxn.RunReactants(reactants)
self.assertEqual(len(ps),1)
self.assertEqual(len(ps[0]),1)
self.assertEqual(ps[0][0].GetNumAtoms(),4)
def test16GetReactingAtoms(self):
rxn = rdChemReactions.ReactionFromSmarts("[O:1][C:2].[N:3]>>[N:1][C:2].[N:3]")
self.assertTrue(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.assertEqual(len(rAs),2)
self.assertEqual(len(rAs[0]),1)
self.assertEqual(len(rAs[1]),0)
rxn = rdChemReactions.ReactionFromSmarts("[O:1]C>>[O:1]C")
self.assertTrue(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.assertEqual(len(rAs),1)
self.assertEqual(len(rAs[0]),2)
rAs = rxn.GetReactingAtoms(True)
self.assertEqual(len(rAs),1)
self.assertEqual(len(rAs[0]),1)
def test17AddRecursiveQueriesToReaction(self):
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
self.assertTrue(rxn)
rxn.Initialize()
qs = {'aliphatic':Chem.MolFromSmiles('CC')}
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
rxn.AddRecursiveQueriesToReaction(qs,'query')
q = rxn.GetReactantTemplate(0)
m = Chem.MolFromSmiles('CCOC')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('CO')
self.assertFalse(m.HasSubstructMatch(q))
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
rxn.Initialize()
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
labels = rxn.AddRecursiveQueriesToReaction(qs,'query', getLabels=True)
self.assertTrue(len(labels), 1)
def test17bAddRecursiveQueriesToReaction(self):
from rdkit.Chem import FilterCatalog
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
self.assertTrue(rxn)
rxn.Initialize()
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'carboxylicacid')
querydefs = {k.lower():v
for k,v in FilterCatalog.GetFlattenedFunctionalGroupHierarchy().items()}
self.assertTrue('CarboxylicAcid' in FilterCatalog.GetFlattenedFunctionalGroupHierarchy())
rxn.AddRecursiveQueriesToReaction(querydefs,
'query')
q = rxn.GetReactantTemplate(0)
m = Chem.MolFromSmiles('C(=O)[O-].N')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('C.N')
self.assertFalse(m.HasSubstructMatch(q))
def test18GithubIssue16(self):
rxn = rdChemReactions.ReactionFromSmarts("[F:1]>>[Cl:1]")
self.assertTrue(rxn)
rxn.Initialize()
self.assertRaises(ValueError,lambda : rxn.RunReactants((None,)))
def test19RemoveUnmappedMoleculesToAgents(self):
rxn = rdChemReactions.ReactionFromSmarts("[C:1]=[O:2].[N:3].C(=O)O>[OH2].[Na].[Cl]>[N:3]~[C:1]=[O:2]")
self.failUnless(rxn)
rxn.Initialize()
self.failUnless(rxn.GetNumReactantTemplates()==3)
self.failUnless(rxn.GetNumProductTemplates()==1)
self.failUnless(rxn.GetNumAgentTemplates()==3)
rxn.RemoveUnmappedReactantTemplates()
rxn.RemoveUnmappedProductTemplates()
self.failUnless(rxn.GetNumReactantTemplates()==2)
self.failUnless(rxn.GetNumProductTemplates()==1)
self.failUnless(rxn.GetNumAgentTemplates()==4)
rxn = rdChemReactions.ReactionFromSmarts("[C:1]=[O:2].[N:3].C(=O)O>>[N:3]~[C:1]=[O:2].[OH2]")
self.failUnless(rxn)
rxn.Initialize()
self.failUnless(rxn.GetNumReactantTemplates()==3)
self.failUnless(rxn.GetNumProductTemplates()==2)
self.failUnless(rxn.GetNumAgentTemplates()==0)
agentList=[]
rxn.RemoveUnmappedReactantTemplates(moveToAgentTemplates=False, targetList=agentList)
rxn.RemoveUnmappedProductTemplates(targetList=agentList)
self.failUnless(rxn.GetNumReactantTemplates()==2)
self.failUnless(rxn.GetNumProductTemplates()==1)
self.failUnless(rxn.GetNumAgentTemplates()==1)
self.failUnless(len(agentList)==2)
def test20CheckCopyConstructedReactionAtomProps(self):
RLABEL = "_MolFileRLabel"
amine_rxn = '$RXN\n\n ISIS 090220091541\n\n 2 1\n$MOL\n\n -ISIS- 09020915412D\n\n 3 2 0 0 0 0 0 0 0 0999 V2000\n -2.9083 -0.4708 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n -2.3995 -0.1771 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n -2.4042 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0 0 0 0\n 2 3 2 0 0 0 0\nV 2 aldehyde\nM RGP 1 1 1\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 2 1 0 0 0 0 0 0 0 0999 V2000\n 2.8375 -0.2500 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 3.3463 0.0438 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 1 2 1 0 0 0 0\nV 2 amine\nM RGP 1 1 2\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 4 3 0 0 0 0 0 0 0 0999 V2000\n 13.3088 0.9436 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n 13.8206 1.2321 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n 13.3028 0.3561 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 12.7911 0.0676 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 1 3 1 0 0 0 0\n 1 2 1 0 0 0 0\n 3 4 1 0 0 0 0\nM RGP 2 2 1 4 2\nM END\n'
rxn = rdChemReactions.ReactionFromRxnBlock(amine_rxn)
res = []
for atom in rxn.GetReactantTemplate(0).GetAtoms():
if atom.HasProp(RLABEL):
res.append(( atom.GetIdx(), atom.GetProp(RLABEL)))
rxn2 = rdChemReactions.ChemicalReaction(rxn)
res2 = []
for atom in rxn2.GetReactantTemplate(0).GetAtoms():
if atom.HasProp(RLABEL):
res2.append(( atom.GetIdx(), atom.GetProp(RLABEL)))
self.assertEquals(res,res2)
# currently ToBinary does not save atom props
# rxn2 = rdChemReactions.ChemicalReaction(rxn.ToBinary())
def test21CheckRawIters(self):
RLABEL = "_MolFileRLabel"
amine_rxn = '$RXN\n\n ISIS 090220091541\n\n 2 1\n$MOL\n\n -ISIS- 09020915412D\n\n 3 2 0 0 0 0 0 0 0 0999 V2000\n -2.9083 -0.4708 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n -2.3995 -0.1771 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n -2.4042 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0 0 0 0\n 2 3 2 0 0 0 0\nV 2 aldehyde\nM RGP 1 1 1\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 2 1 0 0 0 0 0 0 0 0999 V2000\n 2.8375 -0.2500 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 3.3463 0.0438 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 1 2 1 0 0 0 0\nV 2 amine\nM RGP 1 1 2\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 4 3 0 0 0 0 0 0 0 0999 V2000\n 13.3088 0.9436 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n 13.8206 1.2321 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n 13.3028 0.3561 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 12.7911 0.0676 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 1 3 1 0 0 0 0\n 1 2 1 0 0 0 0\n 3 4 1 0 0 0 0\nM RGP 2 2 1 4 2\nM END\n'
rxn = rdChemReactions.ReactionFromRxnBlock(amine_rxn)
reactants = rxn.GetReactants()
self.assertEquals( len(reactants), rxn.GetNumReactantTemplates() )
products = rxn.GetProducts()
self.assertEquals( len(products), rxn.GetNumProductTemplates() )
agents = rxn.GetAgents()
self.assertEquals( len(agents), rxn.GetNumAgentTemplates() )
for i in range(rxn.GetNumReactantTemplates()):
p = rxn.GetReactantTemplate(i)
mb1 = Chem.MolToMolBlock(p)
mb2 = Chem.MolToMolBlock(reactants[i])
self.assertEquals(mb1, mb2)
def test22RunSingleReactant(self):
# from
# A Collection of Robust Organic Synthesis Reactions for In Silico Molecule Design
# Markus Hartenfeller,*, Martin Eberle, Peter Meier, Cristina Nieto-Oberhuber,
# Karl-Heinz Altmann, Gisbert Schneider, Edgar Jacoby, and Steffen Renner
# Novartis Institutes for BioMedical Research, Novartis Pharma AG, Forum 1,
# Novartis Campus, CH-4056 Basel, Switzerland Swiss Federal Institute of Technology (ETH)
# Zurich, Switzerland
smirks_thiourea = "[N;$(N-[#6]):3]=[C;$(C=S):1].[N;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(N[O,N]);!$(N[C,S]=[S,O,N]):2]>>[N:3]-[C:1]-[N+0:2]"
rxn = rdChemReactions.ReactionFromSmarts(smirks_thiourea)
reagents = [Chem.MolFromSmiles(x) for x in ['C=CCN=C=S', 'NCc1ncc(Cl)cc1Br']]
res = rxn.RunReactants(reagents)
self.assertTrue(res)
expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("C=CCNC(N)=S"))]
expected_result.sort()
sidechains_expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("[*:1]=S.[*:3]CC=C"), isomericSmiles=True)]
sidechains_nodummy_expected_result = [ [0,[3,],[1,]], [3,[1,],[2,]] ]
sidechains_nodummy = []
sidechains_expected_result.sort()
for addDummy in [True, False]:
res = rxn.RunReactant(reagents[0], 0)
assert res
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol,isomericSmiles=True))
sidechain = rdChemReactions.ReduceProductToSideChains(mol, addDummy)
sidechains.append(
Chem.MolToSmiles(sidechain, isomericSmiles=True))
if not addDummy:
for atom in sidechain.GetAtoms():
if atom.HasProp("_rgroupAtomMaps"):
sidechains_nodummy.append( [atom.GetIdx(),
eval(atom.GetProp("_rgroupAtomMaps")),
eval(atom.GetProp("_rgroupBonds")),
] )
result.sort()
sidechains.sort()
if addDummy:
self.assertEquals(result, expected_result)
self.assertEquals(sidechains, sidechains_expected_result)
else:
self.assertEquals(sidechains_nodummy, sidechains_nodummy_expected_result)
expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("NCNCc1ncc(Cl)cc1Br"))]
expected_result.sort()
sidechains_expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("[*:2]Cc1ncc(Cl)cc1Br"), isomericSmiles=True)]
sidechains_expected_result.sort()
res = rxn.RunReactant(reagents[1], 1)
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol,isomericSmiles=True))
sidechains.append(Chem.MolToSmiles(
rdChemReactions.ReduceProductToSideChains(mol),
isomericSmiles=True))
result.sort()
self.assertEquals(result, expected_result)
self.assertEquals(sidechains, sidechains_expected_result)
self.assertFalse(rxn.RunReactant(reagents[0], 1))
self.assertFalse(rxn.RunReactant(reagents[1], 0))
# try a broken ring based side-chain
sidechains_expected_result = ['c1ccc2c(c1)nc1n2CC[*:2]1']
reactant = Chem.MolFromSmiles('c1ccc2c(c1)nc1n2CCN1')
res = rxn.RunReactant(reactant, 1)
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol,isomericSmiles=True))
sidechains.append(Chem.MolToSmiles(
rdChemReactions.ReduceProductToSideChains(mol),
isomericSmiles=True))
sidechain = rdChemReactions.ReduceProductToSideChains(mol, addDummyAtoms=False)
self.assertEquals(sidechains, sidechains_expected_result)
def test23CheckNonProduct(self):
smirks_thiourea = "[N;$(N-[#6]):3]=[C;$(C=S):1].[N;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(N[O,N]);!$(N[C,S]=[S,O,N]):2]>>[N:3]-[C:1]-[N+0:2]"
rxn = rdChemReactions.ReactionFromSmarts(smirks_thiourea)
mol = Chem.MolFromSmiles("CCCCCCCC")
m = rdChemReactions.ReduceProductToSideChains(mol)
self.assertTrue(m.GetNumAtoms() == 0)
mol = Chem.AddHs(mol)
m = rdChemReactions.ReduceProductToSideChains(mol)
self.assertTrue(m.GetNumAtoms() == 0)
if __name__ == '__main__':
unittest.main()
| 1 | 15,036 | This is gross/clever. :-) | rdkit-rdkit | cpp |
@@ -1,6 +1,6 @@
require File.expand_path(File.dirname(__FILE__) + '/test_helper.rb')
-class TestZhCnLocale < Test::Unit::TestCase
+class TestZhLocale < Test::Unit::TestCase
def setup
Faker::Config.locale = 'zh-CN'
end | 1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper.rb')
class TestZhCnLocale < Test::Unit::TestCase
def setup
Faker::Config.locale = 'zh-CN'
end
def teardown
Faker::Config.locale = nil
end
def test_ch_methods
assert Faker::Address.postcode.is_a? String
assert Faker::Address.state.is_a? String
assert Faker::Address.state_abbr.is_a? String
assert Faker::Address.city_prefix.is_a? String
assert Faker::Address.city_suffix.is_a? String
assert Faker::Address.city.is_a? String
assert Faker::Address.street_name.is_a? String
assert Faker::Name.last_name.is_a? String
assert Faker::Name.first_name.is_a? String
assert Faker::Name.name.is_a? String
assert Faker::University.prefix.is_a? String
assert Faker::University.suffix.is_a? String
assert Faker::University.name.is_a? String
end
end | 1 | 8,396 | Take a look at the name of this file. Definitely copy and | faker-ruby-faker | rb |
@@ -163,12 +163,15 @@ func NewVolumeInfo(URL string, volname string, namespace string) (volInfo *Volum
if resp.StatusCode == 500 {
fmt.Printf("Volume: %s not found at namespace: %q\n", volname, namespace)
err = util.InternalServerError
+ return
} else if resp.StatusCode == 503 {
fmt.Printf("maya apiservice not reachable at %q\n", mapiserver.GetURL())
err = util.ServerUnavailable
+ return
} else if resp.StatusCode == 404 {
fmt.Printf("Volume: %s not found at namespace: %q error: %s\n", volname, namespace, http.StatusText(resp.StatusCode))
err = util.PageNotFound
+ return
}
fmt.Printf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode)
err = fmt.Errorf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode) | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"strings"
"time"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
// VolumeInfo stores the volume information
type VolumeInfo struct {
Volume v1alpha1.CASVolume
}
// CmdVolumeOptions stores information of volume being operated
type CmdVolumeOptions struct {
volName string
sourceVolumeName string
snapshotName string
size string
namespace string
json string
}
// CASType is engine type
type CASType string
const (
// VolumeAPIPath is the api path to get volume information
VolumeAPIPath = "/latest/volumes/"
controllerStatusOk = "running"
volumeStatusOK = "Running"
// JivaStorageEngine is constant for jiva engine
JivaStorageEngine CASType = "jiva"
// CstorStorageEngine is constant for cstor engine
CstorStorageEngine CASType = "cstor"
timeout = 5 * time.Second
)
// # Create a Volume:
// $ mayactl volume create --volname <vol> --size <size>
var (
volumeCommandHelpText = `
The following commands helps in operating a Volume such as create, list, and so on.
Usage: mayactl volume <subcommand> [options] [args]
Examples:
# List Volumes:
$ mayactl volume list
# Statistics of a Volume:
$ mayactl volume stats --volname <vol>
# Statistics of a Volume created in 'test' namespace:
$ mayactl volume stats --volname <vol> --namespace test
# Info of a Volume:
$ mayactl volume info --volname <vol>
# Info of a Volume created in 'test' namespace:
$ mayactl volume info --volname <vol> --namespace test
# Delete a Volume:
$ mayactl volume delete --volname <vol>
# Delete a Volume created in 'test' namespace:
$ mayactl volume delete --volname <vol> --namespace test
`
options = &CmdVolumeOptions{
namespace: "default",
}
)
// NewCmdVolume provides options for managing OpenEBS Volume
func NewCmdVolume() *cobra.Command {
cmd := &cobra.Command{
Use: "volume",
Short: "Provides operations related to a Volume",
Long: volumeCommandHelpText,
}
cmd.AddCommand(
// NewCmdVolumeCreate(),
NewCmdVolumesList(),
NewCmdVolumeDelete(),
NewCmdVolumeStats(),
NewCmdVolumeInfo(),
)
cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace,
"namespace name, required if volume is not in the default namespace")
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
flag.CommandLine.Parse([]string{})
return cmd
}
// Validate verifies whether a volume name,source name or snapshot name is provided or not followed by
// stats command. It returns nil and proceeds to execute the command if there is
// no error and returns an error if it is missing.
func (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error {
if snapshotnameverify {
if len(c.snapshotName) == 0 {
return errors.New("--snapname is missing. Please provide a snapshotname")
}
}
if sourcenameverify {
if len(c.sourceVolumeName) == 0 {
return errors.New("--sourcevol is missing. Please specify a sourcevolumename")
}
}
if volnameverify {
if len(c.volName) == 0 {
return errors.New("--volname is missing. Please specify a unique volumename")
}
}
return nil
}
// NewVolumeInfo fetches and fills CASVolume structure from URL given to it
func NewVolumeInfo(URL string, volname string, namespace string) (volInfo *VolumeInfo, err error) {
url := URL
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return
}
req.Header.Set("namespace", namespace)
c := &http.Client{
Timeout: timeout,
}
resp, err := c.Do(req)
if err != nil {
fmt.Printf("Can't get a response, error found: %v", err)
return
}
if resp != nil && resp.StatusCode != 200 {
if resp.StatusCode == 500 {
fmt.Printf("Volume: %s not found at namespace: %q\n", volname, namespace)
err = util.InternalServerError
} else if resp.StatusCode == 503 {
fmt.Printf("maya apiservice not reachable at %q\n", mapiserver.GetURL())
err = util.ServerUnavailable
} else if resp.StatusCode == 404 {
fmt.Printf("Volume: %s not found at namespace: %q error: %s\n", volname, namespace, http.StatusText(resp.StatusCode))
err = util.PageNotFound
}
fmt.Printf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode)
err = fmt.Errorf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode)
return
}
defer resp.Body.Close()
casVol := v1alpha1.CASVolume{}
err = json.NewDecoder(resp.Body).Decode(&casVol)
if err != nil {
fmt.Printf("Response decode failed: error '%+v'", err)
return
}
if casVol.Status.Reason == "pending" {
fmt.Println("VOLUME status Unknown to maya apiservice")
err = fmt.Errorf("VOLUME status Unknown to maya apiservice")
return
}
volInfo = &VolumeInfo{
Volume: casVol,
}
return
}
// GetCASType returns the CASType of the volume in lowercase
func (volInfo *VolumeInfo) GetCASType() string {
if len(volInfo.Volume.Spec.CasType) == 0 {
return string(JivaStorageEngine)
}
return strings.ToLower(volInfo.Volume.Spec.CasType)
}
// GetClusterIP returns the ClusterIP of the cluster
func (volInfo *VolumeInfo) GetClusterIP() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/cluster-ips"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/cluster-ips"]; ok {
return val
}
return ""
}
// GetControllerStatus returns the status of the volume controller
func (volInfo *VolumeInfo) GetControllerStatus() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/controller-status"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/controller-status"]; ok {
return val
}
return ""
}
// GetIQN returns the IQN of the volume
func (volInfo *VolumeInfo) GetIQN() string {
if len(volInfo.Volume.Spec.Iqn) > 0 {
return volInfo.Volume.Spec.Iqn
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/iqn"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/iqn"]; ok {
return val
}
return ""
}
// GetVolumeName returns the volume name
func (volInfo *VolumeInfo) GetVolumeName() string {
return volInfo.Volume.ObjectMeta.Name
}
// GetTargetPortal returns the TargetPortal of the volume
func (volInfo *VolumeInfo) GetTargetPortal() string {
if len(volInfo.Volume.Spec.TargetPortal) > 0 {
return volInfo.Volume.Spec.TargetPortal
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/targetportals"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/targetportals"]; ok {
return val
}
return ""
}
// GetVolumeSize returns the capacity of the volume
func (volInfo *VolumeInfo) GetVolumeSize() string {
if len(volInfo.Volume.Spec.Capacity) > 0 {
return volInfo.Volume.Spec.Capacity
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/volume-size"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/volume-size"]; ok {
return val
}
return ""
}
// GetReplicaCount returns the volume replica count
func (volInfo *VolumeInfo) GetReplicaCount() string {
if len(volInfo.Volume.Spec.Replicas) > 0 {
return volInfo.Volume.Spec.Replicas
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-count"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-count"]; ok {
return val
}
return ""
}
// GetReplicaStatus returns the replica status of the volume replica
func (volInfo *VolumeInfo) GetReplicaStatus() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-status"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-status"]; ok {
return val
}
return ""
}
// GetReplicaIP returns the IP of volume replica
func (volInfo *VolumeInfo) GetReplicaIP() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-ips"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-ips"]; ok {
return val
}
return ""
}
// GetStoragePool returns the name of the storage pool
func (volInfo *VolumeInfo) GetStoragePool() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/pool-names"]; ok {
return val
}
return ""
}
// GetCVRName returns the name of the CVR
func (volInfo *VolumeInfo) GetCVRName() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/cvr-names"]; ok {
return val
}
return ""
}
// GetNodeName returns the name of the node
func (volInfo *VolumeInfo) GetNodeName() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/node-names"]; ok {
return val
}
return ""
}
| 1 | 9,366 | Can you check with @mahebbar how to work this error. Should be different from 404. | openebs-maya | go |
@@ -46,7 +46,7 @@ setup(
'spark': ['pyspark>=2.4.0'],
'mlflow': ['mlflow>=1.0'],
},
- python_requires='>=3.5,<3.8',
+ python_requires='>=3.5',
install_requires=[
'pandas>=0.23.2',
'pyarrow>=0.10', | 1 | #!/usr/bin/env python
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from io import open
import sys
from setuptools import setup
from os import path
DESCRIPTION = "Koalas: pandas API on Apache Spark"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
try:
exec(open('databricks/koalas/version.py').read())
except IOError:
print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
setup(
name='koalas',
version=VERSION,
packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing',
'databricks.koalas.spark', 'databricks.koalas.typedef',
'databricks.koalas.usage_logging'],
extras_require={
'spark': ['pyspark>=2.4.0'],
'mlflow': ['mlflow>=1.0'],
},
python_requires='>=3.5,<3.8',
install_requires=[
'pandas>=0.23.2',
'pyarrow>=0.10',
'numpy>=1.14',
'matplotlib>=3.0.0',
],
author="Databricks",
author_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
url="https://github.com/databricks/koalas",
project_urls={
'Bug Tracker': 'https://github.com/databricks/koalas/issues',
'Documentation': 'https://koalas.readthedocs.io/',
'Source Code': 'https://github.com/databricks/koalas'
},
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 1 | 15,518 | Do we still need the upper bound `<3.9`? | databricks-koalas | py |
@@ -229,7 +229,10 @@ function getPathsToCheck($f_paths): ?array
/** @var string */
$input_path = $input_paths[$i];
- if (realpath($input_path) === realpath(dirname(__DIR__) . DIRECTORY_SEPARATOR . 'psalm')
+ if (
+ realpath($input_path) === realpath(dirname(__DIR__, 3) . DIRECTORY_SEPARATOR . 'bin' . DIRECTORY_SEPARATOR . 'psalm')
+ || realpath($input_path) === realpath(dirname(__DIR__, 3) . DIRECTORY_SEPARATOR . 'bin' . DIRECTORY_SEPARATOR . 'psalter')
+ || realpath($input_path) === realpath(dirname(__DIR__) . DIRECTORY_SEPARATOR . 'psalm')
|| realpath($input_path) === realpath(dirname(__DIR__) . DIRECTORY_SEPARATOR . 'psalter')
|| realpath($input_path) === realpath(Phar::running(false))
) { | 1 | <?php
namespace Psalm;
use Composer\Autoload\ClassLoader;
use Phar;
use Psalm\Internal\Composer;
use function dirname;
use function strpos;
use function realpath;
use const DIRECTORY_SEPARATOR;
use function file_exists;
use function in_array;
use const PHP_EOL;
use function fwrite;
use const STDERR;
use function implode;
use function define;
use function json_decode;
use function file_get_contents;
use function is_array;
use function is_string;
use function count;
use function strlen;
use function substr;
use function stream_get_meta_data;
use const STDIN;
use function stream_set_blocking;
use function fgets;
use function preg_split;
use function trim;
use function is_dir;
use function preg_replace;
use function substr_replace;
use function file_put_contents;
use function ini_get;
use function preg_match;
use function strtoupper;
function requireAutoloaders(string $current_dir, bool $has_explicit_root, string $vendor_dir): ?ClassLoader
{
$autoload_roots = [$current_dir];
$psalm_dir = dirname(__DIR__);
/** @psalm-suppress UndefinedConstant */
$in_phar = Phar::running() || strpos(__NAMESPACE__, 'HumbugBox');
if ($in_phar) {
require_once(__DIR__ . '/../vendor/autoload.php');
// hack required for JsonMapper
require_once __DIR__ . '/../vendor/netresearch/jsonmapper/src/JsonMapper.php';
require_once __DIR__ . '/../vendor/netresearch/jsonmapper/src/JsonMapper/Exception.php';
}
if (realpath($psalm_dir) !== realpath($current_dir) && !$in_phar) {
$autoload_roots[] = $psalm_dir;
}
$autoload_files = [];
foreach ($autoload_roots as $autoload_root) {
$has_autoloader = false;
$nested_autoload_file = dirname($autoload_root, 2). DIRECTORY_SEPARATOR . 'autoload.php';
// note: don't realpath $nested_autoload_file, or phar version will fail
if (file_exists($nested_autoload_file)) {
if (!in_array($nested_autoload_file, $autoload_files, false)) {
$autoload_files[] = $nested_autoload_file;
}
$has_autoloader = true;
}
$vendor_autoload_file =
$autoload_root . DIRECTORY_SEPARATOR . $vendor_dir . DIRECTORY_SEPARATOR . 'autoload.php';
// note: don't realpath $vendor_autoload_file, or phar version will fail
if (file_exists($vendor_autoload_file)) {
if (!in_array($vendor_autoload_file, $autoload_files, false)) {
$autoload_files[] = $vendor_autoload_file;
}
$has_autoloader = true;
}
$composer_json_file = Composer::getJsonFilePath($autoload_root);
if (!$has_autoloader && file_exists($composer_json_file)) {
$error_message = 'Could not find any composer autoloaders in ' . $autoload_root;
if (!$has_explicit_root) {
$error_message .= PHP_EOL . 'Add a --root=[your/project/directory] flag '
. 'to specify a particular project to run Psalm on.';
}
fwrite(STDERR, $error_message . PHP_EOL);
exit(1);
}
}
$first_autoloader = null;
foreach ($autoload_files as $file) {
/**
* @psalm-suppress UnresolvableInclude
*
* @var mixed
*/
$autoloader = require_once $file;
if (!$first_autoloader
&& $autoloader instanceof ClassLoader
) {
$first_autoloader = $autoloader;
}
}
if ($first_autoloader === null && !$in_phar) {
if (!$autoload_files) {
fwrite(STDERR, 'Failed to find a valid Composer autoloader' . "\n");
} else {
fwrite(STDERR, 'Failed to find a valid Composer autoloader in ' . implode(', ', $autoload_files) . "\n");
}
fwrite(
STDERR,
'Please make sure you’ve run `composer install` in the current directory before using Psalm.' . "\n"
);
exit(1);
}
define('PSALM_VERSION', \PackageVersions\Versions::getVersion('vimeo/psalm'));
define('PHP_PARSER_VERSION', \PackageVersions\Versions::getVersion('nikic/php-parser'));
return $first_autoloader;
}
/**
* @psalm-suppress MixedArrayAccess
* @psalm-suppress MixedAssignment
* @psalm-suppress PossiblyUndefinedStringArrayOffset
*/
function getVendorDir(string $current_dir): string
{
$composer_json_path = Composer::getJsonFilePath($current_dir);
if (!file_exists($composer_json_path)) {
return 'vendor';
}
if (!$composer_json = json_decode(file_get_contents($composer_json_path), true)) {
fwrite(
STDERR,
'Invalid composer.json at ' . $composer_json_path . "\n"
);
exit(1);
}
if (is_array($composer_json)
&& isset($composer_json['config'])
&& is_array($composer_json['config'])
&& isset($composer_json['config']['vendor-dir'])
&& is_string($composer_json['config']['vendor-dir'])
) {
return $composer_json['config']['vendor-dir'];
}
return 'vendor';
}
/**
* @return list<string>
*/
function getArguments() : array
{
global $argv;
if (!$argv) {
return [];
}
$filtered_input_paths = [];
for ($i = 0, $iMax = count($argv); $i < $iMax; ++$i) {
$input_path = $argv[$i];
if (realpath($input_path) !== false) {
continue;
}
if ($input_path[0] === '-' && strlen($input_path) === 2) {
if ($input_path[1] === 'c' || $input_path[1] === 'f') {
++$i;
}
continue;
}
if ($input_path[0] === '-' && $input_path[2] === '=') {
continue;
}
$filtered_input_paths[] = $input_path;
}
return $filtered_input_paths;
}
/**
* @param string|array|null|false $f_paths
*
* @return list<string>|null
*/
function getPathsToCheck($f_paths): ?array
{
global $argv;
$paths_to_check = [];
if ($f_paths) {
$input_paths = is_array($f_paths) ? $f_paths : [$f_paths];
} else {
$input_paths = $argv ? $argv : null;
}
if ($input_paths) {
$filtered_input_paths = [];
for ($i = 0, $iMax = count($input_paths); $i < $iMax; ++$i) {
/** @var string */
$input_path = $input_paths[$i];
if (realpath($input_path) === realpath(dirname(__DIR__) . DIRECTORY_SEPARATOR . 'psalm')
|| realpath($input_path) === realpath(dirname(__DIR__) . DIRECTORY_SEPARATOR . 'psalter')
|| realpath($input_path) === realpath(Phar::running(false))
) {
continue;
}
if ($input_path[0] === '-' && strlen($input_path) === 2) {
if ($input_path[1] === 'c' || $input_path[1] === 'f') {
++$i;
}
continue;
}
if ($input_path[0] === '-' && $input_path[2] === '=') {
continue;
}
if (substr($input_path, 0, 2) === '--' && strlen($input_path) > 2) {
continue;
}
$filtered_input_paths[] = $input_path;
}
if ($filtered_input_paths === ['-']) {
$meta = stream_get_meta_data(STDIN);
stream_set_blocking(STDIN, false);
if ($stdin = fgets(STDIN)) {
$filtered_input_paths = preg_split('/\s+/', trim($stdin));
}
$blocked = $meta['blocked'];
stream_set_blocking(STDIN, $blocked);
}
foreach ($filtered_input_paths as $path_to_check) {
if ($path_to_check[0] === '-') {
fwrite(STDERR, 'Invalid usage, expecting psalm [options] [file...]' . PHP_EOL);
exit(1);
}
if (!file_exists($path_to_check)) {
fwrite(STDERR, 'Cannot locate ' . $path_to_check . PHP_EOL);
exit(1);
}
$path_to_check = realpath($path_to_check);
if (!$path_to_check) {
fwrite(STDERR, 'Error getting realpath for file' . PHP_EOL);
exit(1);
}
$paths_to_check[] = $path_to_check;
}
if (!$paths_to_check) {
$paths_to_check = null;
}
}
return $paths_to_check;
}
/**
* @psalm-pure
*/
function getPsalmHelpText(): string
{
return <<<HELP
Usage:
psalm [options] [file...]
Basic configuration:
-c, --config=psalm.xml
Path to a psalm.xml configuration file. Run psalm --init to create one.
--use-ini-defaults
Use PHP-provided ini defaults for memory and error display
--memory-limit=LIMIT
Use a specific memory limit. Cannot be combined with --use-ini-defaults
--disable-extension=[extension]
Used to disable certain extensions while Psalm is running.
--threads=INT
If greater than one, Psalm will run analysis on multiple threads, speeding things up.
--no-diff
Turns off Psalm’s diff mode, checks all files regardless of whether they’ve changed.
Surfacing issues:
--show-info[=BOOLEAN]
Show non-exception parser findings (defaults to false).
--show-snippet[=true]
Show code snippets with errors. Options are 'true' or 'false'
--find-dead-code[=auto]
--find-unused-code[=auto]
Look for unused code. Options are 'auto' or 'always'. If no value is specified, default is 'auto'
--find-unused-psalm-suppress
Finds all @psalm-suppress annotations that aren’t used
--find-references-to=[class|method|property]
Searches the codebase for references to the given fully-qualified class or method,
where method is in the format class::methodName
--no-suggestions
Hide suggestions
--taint-analysis
Run Psalm in taint analysis mode – see https://psalm.dev/docs/security_analysis for more info
--dump-taint-graph=OUTPUT_PATH
Output the taint graph using the DOT language – requires --taint-analysis
Issue baselines:
--set-baseline=PATH
Save all current error level issues to a file, to mark them as info in subsequent runs
Add --include-php-versions to also include a list of PHP extension versions
--use-baseline=PATH
Allows you to use a baseline other than the default baseline provided in your config
--ignore-baseline
Ignore the error baseline
--update-baseline
Update the baseline by removing fixed issues. This will not add new issues to the baseline
Add --include-php-versions to also include a list of PHP extension versions
Plugins:
--plugin=PATH
Executes a plugin, an alternative to using the Psalm config
Output:
-m, --monochrome
Enable monochrome output
--output-format=console
Changes the output format.
Available formats: compact, console, text, emacs, json, pylint, xml, checkstyle, junit, sonarqube, github,
phpstorm, codeclimate
--no-progress
Disable the progress indicator
--long-progress
Use a progress indicator suitable for Continuous Integration logs
--stats
Shows a breakdown of Psalm’s ability to infer types in the codebase
Reports:
--report=PATH
The path where to output report file. The output format is based on the file extension.
(Currently supported formats: ".json", ".xml", ".txt", ".emacs", ".pylint", ".console",
".sarif", "checkstyle.xml", "sonarqube.json", "codeclimate.json", "summary.json", "junit.xml")
--report-show-info[=BOOLEAN]
Whether the report should include non-errors in its output (defaults to true)
Caching:
--clear-cache
Clears all cache files that Psalm uses for this specific project
--clear-global-cache
Clears all cache files that Psalm uses for all projects
--no-cache
Runs Psalm without using cache
--no-reflection-cache
Runs Psalm without using cached representations of unchanged classes and files.
Useful if you want the afterClassLikeVisit plugin hook to run every time you visit a file.
--no-file-cache
Runs Psalm without using caching every single file for later diffing.
This reduces the space Psalm uses on disk and file I/O.
Miscellaneous:
-h, --help
Display this help message
-v, --version
Display the Psalm version
-i, --init [source_dir=src] [level=3]
Create a psalm config file in the current directory that points to [source_dir]
at the required level, from 1, most strict, to 8, most permissive.
--debug
Debug information
--debug-by-line
Debug information on a line-by-line level
--debug-emitted-issues
Print a php backtrace to stderr when emitting issues.
-r, --root
If running Psalm globally you’ll need to specify a project root. Defaults to cwd
--generate-json-map=PATH
Generate a map of node references and types in JSON format, saved to the given path.
--generate-stubs=PATH
Generate stubs for the project and dump the file in the given path
--shepherd[=host]
Send data to Shepherd, Psalm’s GitHub integration tool.
--alter
Run Psalter
--language-server
Run Psalm Language Server
HELP;
}
function initialiseConfig(
?string $path_to_config,
string $current_dir,
string $output_format,
?ClassLoader $first_autoloader,
bool $create_if_non_existent = false
): Config {
try {
if ($path_to_config) {
$config = Config::loadFromXMLFile($path_to_config, $current_dir);
} else {
try {
$config = Config::getConfigForPath($current_dir, $current_dir);
} catch (\Psalm\Exception\ConfigNotFoundException $e) {
if (!$create_if_non_existent) {
if (in_array($output_format, [\Psalm\Report::TYPE_CONSOLE, \Psalm\Report::TYPE_PHP_STORM])) {
fwrite(
STDERR,
'Could not locate a config XML file in path ' . $current_dir
. '. Have you run \'psalm --init\' ?' . PHP_EOL
);
exit(1);
}
throw $e;
}
$config = \Psalm\Config\Creator::createBareConfig(
$current_dir,
null,
\Psalm\getVendorDir($current_dir)
);
}
}
} catch (\Psalm\Exception\ConfigException $e) {
fwrite(
STDERR,
$e->getMessage() . PHP_EOL
);
exit(1);
}
$config->setComposerClassLoader($first_autoloader);
return $config;
}
function update_config_file(Config $config, string $config_file_path, string $baseline_path) : void
{
if ($config->error_baseline === $baseline_path) {
return;
}
$configFile = $config_file_path;
if (is_dir($config_file_path)) {
$configFile = Config::locateConfigFile($config_file_path);
}
if (!$configFile) {
fwrite(STDERR, "Don't forget to set errorBaseline=\"{$baseline_path}\" to your config.");
return;
}
$configFileContents = file_get_contents($configFile);
if ($config->error_baseline) {
$amendedConfigFileContents = preg_replace(
'/errorBaseline=".*?"/',
"errorBaseline=\"{$baseline_path}\"",
$configFileContents
);
} else {
$endPsalmOpenTag = strpos($configFileContents, '>', (int)strpos($configFileContents, '<psalm'));
if (!$endPsalmOpenTag) {
fwrite(STDERR, " Don't forget to set errorBaseline=\"{$baseline_path}\" in your config.");
return;
}
if ($configFileContents[$endPsalmOpenTag - 1] === "\n") {
$amendedConfigFileContents = substr_replace(
$configFileContents,
" errorBaseline=\"{$baseline_path}\"\n>",
$endPsalmOpenTag,
1
);
} else {
$amendedConfigFileContents = substr_replace(
$configFileContents,
" errorBaseline=\"{$baseline_path}\">",
$endPsalmOpenTag,
1
);
}
}
file_put_contents($configFile, $amendedConfigFileContents);
}
function get_path_to_config(array $options): ?string
{
$path_to_config = isset($options['c']) && is_string($options['c']) ? realpath($options['c']) : null;
if ($path_to_config === false) {
fwrite(STDERR, 'Could not resolve path to config ' . (string) ($options['c'] ?? '') . PHP_EOL);
exit(1);
}
return $path_to_config;
}
/**
* @psalm-pure
*/
function getMemoryLimitInBytes(): int
{
$limit = ini_get('memory_limit');
// for unlimited = -1
if ($limit < 0) {
return -1;
}
if (preg_match('/^(\d+)(\D?)$/', $limit, $matches)) {
$limit = (int)$matches[1];
switch (strtoupper($matches[2] ?? '')) {
case 'G':
$limit *= 1024 * 1024 * 1024;
break;
case 'M':
$limit *= 1024 * 1024;
break;
case 'K':
$limit *= 1024;
break;
}
}
return (int)$limit;
}
| 1 | 10,344 | Does this mean `vendor/bin/psalm` is not a symlink (or whatever equivalent Windows has for symlinks) on Windows? | vimeo-psalm | php |
@@ -1,4 +1,4 @@
-package aws_test
+package aws
import (
"fmt" | 1 | package aws_test
import (
"fmt"
"os"
"testing"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/libopenstorage/openstorage/pkg/storageops"
"github.com/libopenstorage/openstorage/pkg/storageops/aws"
"github.com/libopenstorage/openstorage/pkg/storageops/test"
uuid "github.com/satori/go.uuid"
)
const (
newDiskSizeInGB = 10
newDiskPrefix = "openstorage-test"
)
var diskName = fmt.Sprintf("%s-%s", newDiskPrefix, uuid.NewV4())
func TestAll(t *testing.T) {
drivers := make(map[string]storageops.Ops)
diskTemplates := make(map[string]map[string]interface{})
if d, err := aws.NewEnvClient(); err != aws.ErrAWSEnvNotAvailable {
volType := opsworks.VolumeTypeGp2
volSize := int64(newDiskSizeInGB)
zone := os.Getenv("AWS_ZONE")
ebsVol := &ec2.Volume{
AvailabilityZone: &zone,
VolumeType: &volType,
Size: &volSize,
}
drivers[d.Name()] = d
diskTemplates[d.Name()] = map[string]interface{}{
diskName: ebsVol,
}
} else {
fmt.Printf("skipping AWS tests as environment is not set...\n")
}
test.RunTest(drivers, diskTemplates, t)
}
| 1 | 6,540 | @lpabon having a separate package name `aws_test` allows to test the package as if the tester was an external package. If the test package name is the same as the package being tested, the test package can also use methods and variables not exposed to the eventual user. | libopenstorage-openstorage | go |
@@ -54,8 +54,6 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter
instruments := map[string]data{
"test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1},
- "test-int64-histogram": {sdkapi.HistogramInstrumentKind, number.Int64Kind, 2},
- "test-float64-histogram": {sdkapi.HistogramInstrumentKind, number.Float64Kind, 2},
"test-int64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, 3},
} | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
// RunEndToEndTest can be used by protocol driver tests to validate
// themselves.
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
selector := simple.NewWithInexpensiveDistribution()
proc := processor.NewFactory(selector, aggregation.StatelessTemporalitySelector())
cont := controller.New(proc, controller.WithExporter(exp))
require.NoError(t, cont.Start(ctx))
meter := cont.Meter("test-meter")
labels := []attribute.KeyValue{attribute.Bool("test", true)}
type data struct {
iKind sdkapi.InstrumentKind
nKind number.Kind
val int64
}
instruments := map[string]data{
"test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1},
"test-int64-histogram": {sdkapi.HistogramInstrumentKind, number.Int64Kind, 2},
"test-float64-histogram": {sdkapi.HistogramInstrumentKind, number.Float64Kind, 2},
"test-int64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, 3},
}
for name, data := range instruments {
data := data
switch data.iKind {
case sdkapi.CounterInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64Counter(name).Add(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64Counter(name).Add(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case sdkapi.HistogramInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64Histogram(name).Record(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64Histogram(name).Record(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case sdkapi.GaugeObserverInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64GaugeObserver(name,
func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(data.val, labels...)
},
)
case number.Float64Kind:
callback := func(v float64) metric.Float64ObserverFunc {
return metric.Float64ObserverFunc(func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(v, labels...) })
}(float64(data.val))
metric.Must(meter).NewFloat64GaugeObserver(name, callback)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
default:
assert.Failf(t, "unsupported metrics testing kind", data.iKind.String())
}
}
// Flush and close.
require.NoError(t, cont.Stop(ctx))
// Wait >2 cycles.
<-time.After(40 * time.Millisecond)
// Now shutdown the exporter
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf("failed to stop the exporter: %v", err)
}
// Shutdown the collector too so that we can begin
// verification checks of expected data back.
_ = mcMetrics.Stop()
metrics := mcMetrics.GetMetrics()
assert.Len(t, metrics, len(instruments), "not enough metrics exported")
seen := make(map[string]struct{}, len(instruments))
for _, m := range metrics {
data, ok := instruments[m.Name]
if !ok {
assert.Failf(t, "unknown metrics", m.Name)
continue
}
seen[m.Name] = struct{}{}
switch data.iKind {
case sdkapi.CounterInstrumentKind, sdkapi.GaugeObserverInstrumentKind:
var dp []*metricpb.NumberDataPoint
switch data.iKind {
case sdkapi.CounterInstrumentKind:
require.NotNil(t, m.GetSum())
dp = m.GetSum().GetDataPoints()
case sdkapi.GaugeObserverInstrumentKind:
require.NotNil(t, m.GetGauge())
dp = m.GetGauge().GetDataPoints()
}
if assert.Len(t, dp, 1) {
switch data.nKind {
case number.Int64Kind:
v := &metricpb.NumberDataPoint_AsInt{AsInt: data.val}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
case number.Float64Kind:
v := &metricpb.NumberDataPoint_AsDouble{AsDouble: float64(data.val)}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
}
}
case sdkapi.HistogramInstrumentKind:
require.NotNil(t, m.GetSummary())
if dp := m.GetSummary().DataPoints; assert.Len(t, dp, 1) {
count := dp[0].Count
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
assert.Equal(t, float64(data.val*int64(count)), dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
}
default:
assert.Failf(t, "invalid metrics kind", data.iKind.String())
}
}
for i := range instruments {
if _, ok := seen[i]; !ok {
assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i))
}
}
}
| 1 | 17,085 | The exporter should still be able to test these histogram instrument kinds, right? Is there another reason to remove these? | open-telemetry-opentelemetry-go | go |
@@ -274,6 +274,13 @@ func (eval *BlockEvaluator) Round() basics.Round {
return eval.block.Round()
}
+// ResetTxnBytes resets the number of bytes tracked by the BlockEvaluator to
+// zero. This is a specialized operation used by the transaction pool to
+// simulate the effect of putting pending transactions in multiple blocks.
+func (eval *BlockEvaluator) ResetTxnBytes(validateTxnBytes bool) {
+ eval.totalTxBytes = 0
+}
+
// Transaction tentatively adds a new transaction as part of this block evaluation.
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged. | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"context"
"errors"
"fmt"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/execpool"
)
// ErrNoSpace indicates insufficient space for transaction in block
var ErrNoSpace = errors.New("block does not have space for transaction")
// evalAux is left after removing explicit reward claims,
// in case we need this infrastructure in the future.
type evalAux struct {
}
// VerifiedTxnCache captures the interface for a cache of previously
// verified transactions. This is expected to match the transaction
// pool object.
type VerifiedTxnCache interface {
Verified(txn transactions.SignedTxn) bool
}
type roundCowBase struct {
l ledgerForEvaluator
// The round number of the previous block, for looking up prior state.
rnd basics.Round
}
func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) {
return x.l.lookupWithoutRewards(x.rnd, addr)
}
func (x *roundCowBase) isDup(firstValid basics.Round, txid transactions.Txid) (bool, error) {
return x.l.isDup(firstValid, x.rnd, txid)
}
// wrappers for roundCowState to satisfy the (current) transactions.Balances interface
func (cs *roundCowState) Get(addr basics.Address) (basics.BalanceRecord, error) {
acctdata, err := cs.lookup(addr)
if err != nil {
return basics.BalanceRecord{}, err
}
acctdata = acctdata.WithUpdatedRewards(cs.proto, cs.rewardsLevel())
return basics.BalanceRecord{Addr: addr, AccountData: acctdata}, nil
}
func (cs *roundCowState) Put(record basics.BalanceRecord) error {
olddata, err := cs.lookup(record.Addr)
if err != nil {
return err
}
cs.put(record.Addr, olddata, record.AccountData)
return nil
}
func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics.MicroAlgos, fromRewards *basics.MicroAlgos, toRewards *basics.MicroAlgos) error {
rewardlvl := cs.rewardsLevel()
fromBal, err := cs.lookup(from)
if err != nil {
return err
}
fromBalNew := fromBal.WithUpdatedRewards(cs.proto, rewardlvl)
if fromRewards != nil {
var ot basics.OverflowTracker
newFromRewards := ot.AddA(*fromRewards, ot.SubA(fromBalNew.MicroAlgos, fromBal.MicroAlgos))
if ot.Overflowed {
return fmt.Errorf("overflowed tracking of fromRewards for account %v: %d + (%d - %d)", from, *fromRewards, fromBalNew.MicroAlgos, fromBal.MicroAlgos)
}
*fromRewards = newFromRewards
}
var overflowed bool
fromBalNew.MicroAlgos, overflowed = basics.OSubA(fromBalNew.MicroAlgos, amt)
if overflowed {
return fmt.Errorf("overspend (account %v, data %+v, tried to spend %v)", from, fromBal, amt)
}
cs.put(from, fromBal, fromBalNew)
toBal, err := cs.lookup(to)
if err != nil {
return err
}
toBalNew := toBal.WithUpdatedRewards(cs.proto, rewardlvl)
if toRewards != nil {
var ot basics.OverflowTracker
newToRewards := ot.AddA(*toRewards, ot.SubA(toBalNew.MicroAlgos, toBal.MicroAlgos))
if ot.Overflowed {
return fmt.Errorf("overflowed tracking of toRewards for account %v: %d + (%d - %d)", to, *toRewards, toBalNew.MicroAlgos, toBal.MicroAlgos)
}
*toRewards = newToRewards
}
toBalNew.MicroAlgos, overflowed = basics.OAddA(toBalNew.MicroAlgos, amt)
if overflowed {
return fmt.Errorf("balance overflow (account %v, data %+v, was going to receive %v)", to, toBal, amt)
}
cs.put(to, toBal, toBalNew)
return nil
}
func (cs *roundCowState) ConsensusParams() config.ConsensusParams {
return cs.proto
}
// BlockEvaluator represents an in-progress evaluation of a block
// against the ledger.
type BlockEvaluator struct {
state *roundCowState
aux *evalAux
validate bool
generate bool
txcache VerifiedTxnCache
prevHeader bookkeeping.BlockHeader // cached
proto config.ConsensusParams
genesisHash crypto.Digest
block bookkeeping.Block
totalTxBytes int
verificationPool execpool.BacklogPool
}
type ledgerForEvaluator interface {
GenesisHash() crypto.Digest
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
Lookup(basics.Round, basics.Address) (basics.AccountData, error)
Totals(basics.Round) (AccountTotals, error)
isDup(basics.Round, basics.Round, transactions.Txid) (bool, error)
lookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, error)
}
// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
// of the block that the caller is planning to evaluate.
func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*BlockEvaluator, error) {
return startEvaluator(l, hdr, nil, true, true, txcache, executionPool)
}
func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, aux *evalAux, validate bool, generate bool, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*BlockEvaluator, error) {
proto, ok := config.Consensus[hdr.CurrentProtocol]
if !ok {
return nil, ProtocolError(hdr.CurrentProtocol)
}
if aux == nil {
aux = &evalAux{}
}
base := &roundCowBase{
l: l,
// round that lookups come from is previous block. We validate
// the block at this round below, so underflow will be caught.
// If we are not validating, we must have previously checked
// an agreement.Certificate attesting that hdr is valid.
rnd: hdr.Round - 1,
}
eval := &BlockEvaluator{
aux: aux,
validate: validate,
generate: generate,
txcache: txcache,
block: bookkeeping.Block{BlockHeader: hdr},
proto: proto,
genesisHash: l.GenesisHash(),
verificationPool: executionPool,
}
if hdr.Round > 0 {
var err error
eval.prevHeader, err = l.BlockHdr(base.rnd)
if err != nil {
return nil, fmt.Errorf("can't evaluate block %v without previous header: %v", hdr.Round, err)
}
}
prevTotals, err := l.Totals(eval.prevHeader.Round)
if err != nil {
return nil, err
}
poolAddr := eval.prevHeader.RewardsPool
incentivePoolData, err := l.Lookup(eval.prevHeader.Round, poolAddr)
if err != nil {
return nil, err
}
if generate {
if eval.proto.SupportGenesisHash {
eval.block.BlockHeader.GenesisHash = eval.genesisHash
}
eval.block.BlockHeader.RewardsState = eval.prevHeader.NextRewardsState(hdr.Round, proto, incentivePoolData.MicroAlgos, prevTotals.RewardUnits())
}
// set the eval state with the current header
eval.state = makeRoundCowState(base, eval.block.BlockHeader)
if validate {
err := eval.block.BlockHeader.PreCheck(eval.prevHeader)
if err != nil {
return nil, err
}
// Check that the rewards rate, level and residue match expected values
expectedRewardsState := eval.prevHeader.NextRewardsState(hdr.Round, proto, incentivePoolData.MicroAlgos, prevTotals.RewardUnits())
if eval.block.RewardsState != expectedRewardsState {
return nil, fmt.Errorf("bad rewards state: %+v != %+v", eval.block.RewardsState, expectedRewardsState)
}
// For backwards compatibility: introduce Genesis Hash value
if eval.proto.SupportGenesisHash && eval.block.BlockHeader.GenesisHash != eval.genesisHash {
return nil, fmt.Errorf("wrong genesis hash: %s != %s", eval.block.BlockHeader.GenesisHash, eval.genesisHash)
}
}
// Withdraw rewards from the incentive pool
var ot basics.OverflowTracker
rewardsPerUnit := ot.Sub(eval.block.BlockHeader.RewardsLevel, eval.prevHeader.RewardsLevel)
poolOld, err := eval.state.Get(poolAddr)
if err != nil {
return nil, err
}
poolNew := poolOld
poolNew.MicroAlgos = ot.SubA(poolOld.MicroAlgos, basics.MicroAlgos{Raw: ot.Mul(prevTotals.RewardUnits(), rewardsPerUnit)})
err = eval.state.Put(poolNew)
if err != nil {
return nil, err
}
// ensure that we have at least MinBalance after withdrawing rewards
ot.SubA(poolNew.MicroAlgos, basics.MicroAlgos{Raw: proto.MinBalance})
if ot.Overflowed {
// TODO this should never happen; should we panic here?
return nil, fmt.Errorf("overflowed subtracting rewards for block %v", hdr.Round)
}
return eval, nil
}
// Round returns the round number of the block being evaluated by the BlockEvaluator.
func (eval *BlockEvaluator) Round() basics.Round {
return eval.block.Round()
}
// Transaction tentatively adds a new transaction as part of this block evaluation.
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad *transactions.ApplyData) error {
var err error
var thisTxBytes int
cow := eval.state.child()
spec := transactions.SpecialAddresses{
FeeSink: eval.block.BlockHeader.FeeSink,
RewardsPool: eval.block.BlockHeader.RewardsPool,
}
if eval.validate {
// Transaction valid (not expired)?
err = txn.Txn.Alive(eval.block)
if err != nil {
return err
}
// Transaction already in the ledger?
dup, err := cow.isDup(txn.Txn.First(), txn.ID())
if err != nil {
return err
}
if dup {
return TransactionInLedgerError{txn.ID()}
}
// Well-formed on its own?
err = txn.Txn.WellFormed(spec, eval.proto)
if err != nil {
return fmt.Errorf("transaction %v: malformed: %v", txn.ID(), err)
}
// Properly signed?
if eval.txcache == nil || !eval.txcache.Verified(txn) {
err = txn.PoolVerify(spec, eval.proto, eval.verificationPool)
if err != nil {
return fmt.Errorf("transaction %v: failed to verify: %v", txn.ID(), err)
}
}
}
// Apply the transaction, updating the cow balances
applyData, err := txn.Txn.Apply(cow, spec)
if err != nil {
return fmt.Errorf("transaction %v: %v", txn.ID(), err)
}
// Validate applyData if we are validating an existing block.
// If we are validating and generating, we have no ApplyData yet.
if eval.validate && !eval.generate {
if ad == nil {
return fmt.Errorf("transaction %v: no applyData for validation", txn.ID())
}
if eval.proto.ApplyData {
if *ad != applyData {
return fmt.Errorf("transaction %v: applyData mismatch: %v != %v", txn.ID(), *ad, applyData)
}
} else {
if *ad != (transactions.ApplyData{}) {
return fmt.Errorf("transaction %v: applyData not supported", txn.ID())
}
}
}
// Check if the transaction fits in the block, now that we can encode it.
txib, err := eval.block.EncodeSignedTxn(txn, applyData)
if err != nil {
return err
}
if eval.validate {
thisTxBytes = len(protocol.Encode(txib))
if eval.totalTxBytes+thisTxBytes > eval.proto.MaxTxnBytesPerBlock {
return ErrNoSpace
}
}
// Check if any affected accounts dipped below MinBalance (unless they are
// completely zero, which means the account will be deleted.)
rewardlvl := cow.rewardsLevel()
for _, addr := range cow.modifiedAccounts() {
data, err := cow.lookup(addr)
if err != nil {
return err
}
// It's always OK to have the account move to an empty state,
// because the accounts DB can delete it. Otherwise, we will
// enforce MinBalance.
if data == (basics.AccountData{}) {
continue
}
// Skip FeeSink and RewardsPool MinBalance checks here.
// There's only two accounts, so space isn't an issue, and we don't
// expect them to have low balances, but if they do, it may cause
// surprises for every transaction.
if addr == spec.FeeSink || addr == spec.RewardsPool {
continue
}
dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl)
if dataNew.MicroAlgos.Raw < eval.proto.MinBalance {
return fmt.Errorf("transaction %v: account %v balance %d below min %d",
txn.ID(), addr, dataNew.MicroAlgos.Raw, eval.proto.MinBalance)
}
}
// Remember this TXID (to detect duplicates)
cow.addTx(txn.ID())
eval.block.Payset = append(eval.block.Payset, txib)
eval.totalTxBytes += thisTxBytes
cow.commitToParent()
return nil
}
// Call "endOfBlock" after all the block's rewards and transactions are processed. Applies any deferred balance updates.
func (eval *BlockEvaluator) endOfBlock() error {
cow := eval.state.child()
if eval.generate {
eval.block.TxnRoot = eval.block.Payset.Commit(eval.proto.PaysetCommitFlat)
}
cow.commitToParent()
return nil
}
// FinalValidation does the validation that must happen after the block is built and all state updates are computed
func (eval *BlockEvaluator) finalValidation() error {
if eval.validate {
// check commitments
txnRoot := eval.block.Payset.Commit(eval.proto.PaysetCommitFlat)
if txnRoot != eval.block.TxnRoot {
return fmt.Errorf("txn root wrong: %v != %v", txnRoot, eval.block.TxnRoot)
}
}
return nil
}
// GenerateBlock produces a complete block from the BlockEvaluator. This is
// used during proposal to get an actual block that will be proposed, after
// feeding in tentative transactions into this block evaluator.
func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
if !eval.generate {
logging.Base().Panicf("GenerateBlock() called but generate is false")
}
err := eval.endOfBlock()
if err != nil {
return nil, err
}
err = eval.finalValidation()
if err != nil {
return nil, err
}
vb := ValidatedBlock{
blk: eval.block,
delta: eval.state.mods,
aux: *eval.aux,
}
return &vb, nil
}
func (l *Ledger) eval(ctx context.Context, blk bookkeeping.Block, aux *evalAux, validate bool, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (stateDelta, evalAux, error) {
eval, err := startEvaluator(l, blk.BlockHeader, aux, validate, false, txcache, executionPool)
if err != nil {
return stateDelta{}, evalAux{}, err
}
// TODO: batch tx sig verification: ingest blk.Payset and output a list of ValidatedTx
// Next, transactions
payset, err := blk.DecodePaysetWithAD()
if err != nil {
return stateDelta{}, evalAux{}, err
}
for _, txn := range payset {
select {
case <-ctx.Done():
return stateDelta{}, evalAux{}, ctx.Err()
default:
}
err = eval.Transaction(txn.SignedTxn, &txn.ApplyData)
if err != nil {
return stateDelta{}, evalAux{}, err
}
}
// Finally, procees any pending end-of-block state changes
err = eval.endOfBlock()
if err != nil {
return stateDelta{}, evalAux{}, err
}
// If validating, do final block checks that depend on our new state
if validate {
err = eval.finalValidation()
if err != nil {
return stateDelta{}, evalAux{}, err
}
}
return eval.state.mods, *eval.aux, nil
}
// Validate uses the ledger to validate block blk as a candidate next block.
// It returns an error if blk is not the expected next block, or if blk is
// not a valid block (e.g., it has duplicate transactions, overspends some
// account, etc).
func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*ValidatedBlock, error) {
delta, aux, err := l.eval(ctx, blk, nil, true, txcache, executionPool)
if err != nil {
return nil, err
}
vb := ValidatedBlock{
blk: blk,
delta: delta,
aux: aux,
}
return &vb, nil
}
// ValidatedBlock represents the result of a block validation. It can
// be used to efficiently add the block to the ledger, without repeating
// the work of applying the block's changes to the ledger state.
type ValidatedBlock struct {
blk bookkeeping.Block
delta stateDelta
aux evalAux
}
// Block returns the underlying Block for a ValidatedBlock.
func (vb ValidatedBlock) Block() bookkeeping.Block {
return vb.blk
}
// WithSeed returns a copy of the ValidatedBlock with a modified seed.
func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
newblock := vb.blk
newblock.BlockHeader.Seed = s
return ValidatedBlock{
blk: newblock,
delta: vb.delta,
aux: vb.aux,
}
}
| 1 | 35,766 | should this increment eval.Round somehow? | algorand-go-algorand | go |
@@ -91,7 +91,7 @@ class Blacklight::Solr::Response < ActiveSupport::HashWithIndifferentAccess
value.each { |v| force_to_utf8(v) }
when String
if value.encoding != Encoding::UTF_8
- Rails.logger.warn "Found a non utf-8 value in Blacklight::Solr::Response. \"#{value}\" Encoding is #{value.encoding}"
+ Blacklight.logger.warn "Found a non utf-8 value in Blacklight::Solr::Response. \"#{value}\" Encoding is #{value.encoding}"
value.dup.force_encoding('UTF-8')
else
value | 1 | # frozen_string_literal: true
class Blacklight::Solr::Response < ActiveSupport::HashWithIndifferentAccess
extend ActiveSupport::Autoload
eager_autoload do
autoload :PaginationMethods
autoload :Response
autoload :Spelling
autoload :Facets
autoload :MoreLikeThis
autoload :GroupResponse
autoload :Group
end
include PaginationMethods
include Spelling
include Facets
include Response
include MoreLikeThis
attr_reader :request_params
attr_accessor :document_model, :blacklight_config
def initialize(data, request_params, options = {})
super(force_to_utf8(ActiveSupport::HashWithIndifferentAccess.new(data)))
@request_params = ActiveSupport::HashWithIndifferentAccess.new(request_params)
self.document_model = options[:solr_document_model] || options[:document_model] || SolrDocument
self.blacklight_config = options[:blacklight_config]
end
def header
self['responseHeader'] || {}
end
def params
header['params'] || request_params
end
def start
params[:start].to_i
end
def rows
params[:rows].to_i
end
def sort
params[:sort]
end
def documents
@documents ||= (response['docs'] || []).collect { |doc| document_model.new(doc, self) }
end
alias_method :docs, :documents
def grouped
@groups ||= self["grouped"].map do |field, group|
# grouped responses can either be grouped by:
# - field, where this key is the field name, and there will be a list
# of documents grouped by field value, or:
# - function, where the key is the function, and the documents will be
# further grouped by function value, or:
# - query, where the key is the query, and the matching documents will be
# in the doclist on THIS object
if group["groups"] # field or function
GroupResponse.new field, group, self
else # query
Group.new field, group, self
end
end
end
def group key
grouped.find { |x| x.key == key }
end
def grouped?
key? "grouped"
end
def export_formats
documents.map { |x| x.export_formats.keys }.flatten.uniq
end
private
def force_to_utf8(value)
case value
when Hash
value.each { |k, v| value[k] = force_to_utf8(v) }
when Array
value.each { |v| force_to_utf8(v) }
when String
if value.encoding != Encoding::UTF_8
Rails.logger.warn "Found a non utf-8 value in Blacklight::Solr::Response. \"#{value}\" Encoding is #{value.encoding}"
value.dup.force_encoding('UTF-8')
else
value
end
end
value
end
end
| 1 | 7,101 | Although this change looks good. Maybe there are other inconsistent cases too? | projectblacklight-blacklight | rb |
@@ -21,7 +21,7 @@ module ApplicationHelper
'/auth/github'
end
- def format_resources(resources)
+ def format_markdown(resources)
BlueCloth.new(resources).to_html.html_safe
end
| 1 | module ApplicationHelper
def body_class
qualified_controller_name = controller.controller_path.gsub('/','-')
"#{qualified_controller_name} #{qualified_controller_name}-#{controller.action_name}"
end
def google_map_link_to(address, *options, &block)
google_link = "http://maps.google.com/maps?f=q&q=#{CGI.escape(address)}&z=17&iwloc=A"
if block_given?
link_to(capture(&block), google_link, *options)
else
link_to address, google_link, *options
end
end
def keywords(keywords = nil)
keywords.presence || Topic.top.pluck(:name).join(', ')
end
def github_auth_path
'/auth/github'
end
def format_resources(resources)
BlueCloth.new(resources).to_html.html_safe
end
def partial_name(model)
File.basename(model.to_partial_path)
end
def forum_url(suffix=nil)
"https://forum.upcase.com/#{suffix}"
end
def blog_articles_url(topic)
"http://robots.thoughtbot.com/tags/#{topic.slug}"
end
def current_user_has_access_to?(feature)
current_user && current_user.has_access_to?(feature)
end
def show_upgrade_to_annual_cta?
current_user_is_subscription_owner? &&
current_user_is_eligible_for_annual_upgrade?
end
end
| 1 | 13,406 | This is a way better method name. | thoughtbot-upcase | rb |
@@ -1,7 +1,5 @@
class NewLanguageConfirmationsController < ApplicationController
def index
- redirect_to welcome_to_upcase_path(
- confirmation: true, language_selected: params[:language],
- ), notice: "Thanks for signing up. We will be in touch!"
+ redirect_to root_path, notice: t("marketing.show.language_flash")
end
end | 1 | class NewLanguageConfirmationsController < ApplicationController
def index
redirect_to welcome_to_upcase_path(
confirmation: true, language_selected: params[:language],
), notice: "Thanks for signing up. We will be in touch!"
end
end
| 1 | 18,286 | Prefer single-quoted strings when you don't need string interpolation or special symbols. | thoughtbot-upcase | rb |
@@ -1,3 +1,5 @@
+options = Array.isArray(options) ? options : [];
+
var invalid = [];
var attr, attrName, allowed, | 1 | var invalid = [];
var attr, attrName, allowed,
role = node.getAttribute('role'),
attrs = node.attributes;
if (!role) {
role = axe.commons.aria.implicitRole(node);
}
allowed = axe.commons.aria.allowedAttr(role);
if (role && allowed) {
for (var i = 0, l = attrs.length; i < l; i++) {
attr = attrs[i];
attrName = attr.name;
if (axe.commons.aria.validateAttr(attrName) && allowed.indexOf(attrName) === -1) {
invalid.push(attrName + '="' + attr.nodeValue + '"');
}
}
}
if (invalid.length) {
this.data(invalid);
return false;
}
return true; | 1 | 11,983 | I was wondering if this should allow per role specification, instead of (or in addition to) a generic "allowed everywhere". So you could do: `{ separator: ['aria-valuenow', 'aria-valuemin', 'aria-valuemax'] }`. You could still allow the array, and you could add a wildcard option for the "generic" case you've got now: `{ '*': ['always-allowed'] }`. | dequelabs-axe-core | js |
@@ -942,7 +942,8 @@ SQLVarChar::SQLVarChar(Lng32 maxLen,
FALSE, allowSQLnull, isUpShifted, isCaseInsensitive,
TRUE, cs, co, ce,
encoding, vcIndLen),
- clientDataType_(collHeap()) // Get heap from NABasicObject. Can't allocate on stack.
+ clientDataType_(collHeap()), // Get heap from NABasicObject. Can't allocate on stack.
+ wasHiveString_(FALSE)
{}
#pragma warn(1506) // warning elimination
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
**************************************************************************
*
* File: CharType.cpp
* Description: Character Type Implementation
* Created: 4/27/94
* Language: C++
*
*
**************************************************************************
*/
#include <stdarg.h>
#include "CharType.h"
#include "ComASSERT.h"
#include "unicode_char_set.h"
#include "wstr.h"
#include "CmpCommon.h"
#include "csconvert.h"
#include "NLSConversion.h"
#include "EncodedValue.h"
static const NAString LiteralCHAR("CHAR");
static const NAString LiteralVARCHAR("VARCHAR");
static const NAString LiteralBYTE("BYTE");
static const NAString LiteralVARBYTE("VARBYTE");
const NAString CharType::LiteralSchema("SCHEMA");
const NAString CharType::LiteralCatalog("CATALOG");
static const NAString LiteralLOB("LOB");
static const NAString LiteralBLOB("BLOB");
static const NAString LiteralCLOB("CLOB");
// constructor used for CHAR length semantics only
CharType::CharType( const NAString& adtName,
Lng32 maxLenInBytesOrNAWchars,
short maxBytesPerChar,
NABoolean nullTerminated,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
NABoolean varLenFlag,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding,
Int32 vcIndLen // default is 0
)
: NAType( adtName
, NA_CHARACTER_TYPE
, (maxLenInBytesOrNAWchars + (nullTerminated ? 1 : 0)) * CharInfo::minBytesPerChar(cs)
, allowSQLnull
, allowSQLnull ? SQL_NULL_HDR_SIZE : 0
, varLenFlag
// computes length of VarCharLen field (0 or 2 or 4 bytes)
// if not passed in
, (varLenFlag ? ((vcIndLen > 0) ? vcIndLen :
(((maxLenInBytesOrNAWchars*CharInfo::minBytesPerChar(cs)) & 0xFFFF8000)
? SQL_VARCHAR_HDR_SIZE_4
: SQL_VARCHAR_HDR_SIZE))
: 0)
, CharInfo::minBytesPerChar(cs)
),
qualifier_ (CHARACTER_STRING_TYPE),
charLimitInUCS2or4chars_(maxLenInBytesOrNAWchars*CharInfo::minBytesPerChar(cs) /
CharInfo::maxBytesPerChar(cs)),
bytesPerChar_ (CharInfo::maxBytesPerChar(cs)),
nullTerminated_ (nullTerminated),
upshifted_ (isUpShifted),
caseinsensitive_(isCaseInsensitive),
charSet_ (cs),
collation_ (co),
coercibility_ (ce)
{
//LCOV_EXCL_START :rfi
ComASSERT(adtName == LiteralCHAR || adtName == LiteralVARCHAR
|| adtName == LiteralBYTE || adtName == LiteralVARBYTE
|| adtName == LiteralSchema || adtName == LiteralCatalog);
//LCOV_EXCL_STOP
if ( encoding == CharInfo::UnknownCharSet )
encodingCharSet_ = charSet_;
else
encodingCharSet_ = encoding;
}
// This constructor supports SJIS and UTF8
CharType::CharType( const NAString& adtName,
const CharLenInfo & maxLenInfo,
short maxBytesPerChar, // is maxBytesPerChar when cs is SJIS or UTF8
NABoolean nullTerminated,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
NABoolean varLenFlag,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding
)
: NAType( adtName
, NA_CHARACTER_TYPE
, (maxLenInfo.getMaxLenInBytes() +
(nullTerminated ? CharInfo::minBytesPerChar(cs) : 0))
, allowSQLnull
, allowSQLnull ? SQL_NULL_HDR_SIZE : 0
, varLenFlag
, (varLenFlag ? ((maxLenInfo.getMaxLenInBytes() & 0xFFFF8000) ? SQL_VARCHAR_HDR_SIZE_4
: SQL_VARCHAR_HDR_SIZE)
: 0) // computes length of VarCharLen field (0 or 2 or 4 bytes)
, CharInfo::minBytesPerChar(cs)
),
qualifier_ (CHARACTER_STRING_TYPE),
charLimitInUCS2or4chars_ (maxLenInfo.getMaxLenInChars()),
bytesPerChar_ (maxBytesPerChar),
nullTerminated_ (nullTerminated),
upshifted_ (isUpShifted),
caseinsensitive_(isCaseInsensitive),
charSet_ (cs),
collation_ (co),
coercibility_ (ce)
{
//LCOV_EXCL_START :rfi
ComASSERT(adtName == LiteralCHAR || adtName == LiteralVARCHAR
|| adtName == LiteralBYTE || adtName == LiteralVARBYTE
|| adtName == LiteralSchema || adtName == LiteralCatalog);
//LCOV_EXCL_STOP
if ( encoding == CharInfo::UnknownCharSet )
encodingCharSet_ = charSet_;
else
encodingCharSet_ = encoding;
}
// -----------------------------------------------------------------------
NAString CharType::getCharSetName() const
{
return NAString("CHARACTER SET ") + CharInfo::getCharSetName(getCharSet());
}
NAString CharType::getCharSetAsPrefix(CharInfo::CharSet cs)
{
return NAString("_") + CharInfo::getCharSetName(cs);
}
NAString CharType::getCharSetAsPrefix() const
{
return getCharSetAsPrefix(getCharSet());
}
NAString CharType::getCollationName() const
{
return CharInfo::getCollationName(getCollation());
}
NAString CharType::getCollateClause(CharInfo::Collation co)
{
return NAString("COLLATE ") + CharInfo::getCollationName(co);
}
//LCOV_EXCL_START :rfi
NAString CharType::getCoercibilityText(CharInfo::Coercibility ce)
{
return NAString("(") + CharInfo::getCoercibilityText(ce) + " coercibility)";
}
//LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// Return the SQL name for this type.
// Internal type qualifier to data type name mapping function
// Needed for supporting the SQL DESCRIBE command.
// -----------------------------------------------------------------------
NAString CharType::getSimpleTypeName() const
{
return isVaryingLen() ? LiteralVARCHAR : LiteralCHAR;
}
NAString CharType::getTypeSQLname(NABoolean terse) const
{
char lenbuf[30];
char* sp = lenbuf;
NAString rName((isVaryingLen() ||
DFS2REC::isAnyVarChar(getFSDatatype()))
? LiteralVARCHAR : LiteralCHAR);
// Convert the number that denotes the length for this datatype
if ( getCharSet() == CharInfo::UTF8 /* || ( getCharSet() == CharInfo::SJIS &&
getEncodingCharSet() == CharInfo::SJIS ) */ )
{
if (getStrCharLimit() * getBytesPerChar() == getNominalSize() )
{
Int32 charLen = getStrCharLimit();
if (charLen == 1)
sprintf(sp, "(1 CHAR) ");
else
sprintf(sp, "(%d CHARS) ", charLen);
}
else
{
Int32 byteLen = getNominalSize();
if (byteLen == 1)
sprintf(sp, "(1 BYTE) ");
else
sprintf(sp, "(%d BYTES) ", byteLen);
}
}
else
sprintf(sp, "(%d) ", getStrCharLimit());
rName += sp;
rName += getCharSetName();
if (isCaseinsensitive())
rName += " NOT CASESPECIFIC ";
if (! terse)
getTypeSQLnull(rName, terse);
return rName;
} // getTypeSQLname()
// This method is written based on the implememtation contained in
// EncVal_encodeString(const char * str, Lng32 strLen, CharType *cType).
NAString* CharType::convertToString(double v, NAMemory * h) const
{
// From EncVal_encodeString(), a CHAR string is encoded into a
// double as follows.
// we only use the 52 fraction bits of the floating point double so that the round trip conversion
// from decimal to double and then from double to decimal results in exactly the original decimal.
// From Wiki "If a decimal string with at most 15 significant digits is converted to IEEE 754 double
// precision representation and then converted back to a string with the same number of significant digits,
// then the final string should match the original"
// =======================================================
// 8 bits of the first character
// hiResult += (ULng32) stringValues[0] << 12; // char 0
// hiResult += (ULng32) stringValues[1] << 4; // char 1
// hiResult += (ULng32) stringValues[2] >> 4; // 4 bits of char 2
// loResult += (ULng32) stringValues[2] << 28; // 4 bits of char 2
// loResult += (ULng32) stringValues[3] << 20; // char 3
// loResult += (ULng32) stringValues[4] << 12; // char 4
// loResult += (ULng32) stringValues[5] << 4; // char 5
// loResult += (ULng32) stringValues[6] >> 4; // 4 bits of char 6
// combine the two 32 bit integers to a floating point number
// (2**32 * hiResult + loResult)
// result = hiResult * .4294967296E10 + loResult;
// =======================================================
// Here we reverse the above steps to generate an 8-byte string.
ULng32 hiResult = 0;
ULng32 loResult = 0;
hiResult = ((UInt64)v) >> 32;
loResult = ((UInt64)v) % (UInt64)(.4294967296E10);
// including the leading and trading single quote
char bits[9];
bits[0] = bits[8] = '\'';
// hiResult:
// bit 20-13: char 0
// bit 12-5: char 1
// bit 4-1: high 6-bits of char 2
bits[1] = (hiResult >> 12) & 0xff;
bits[2] = (hiResult >> 4) & 0xff;
bits[3] = (hiResult << 4) & 0xf0;
// loResult:
// bit 32-29: low 4-bits of char 2
// bit 28-21: char 3
// bit 20-13: char 4
// bit 12-5: char 5
// bit 4-1: high 4-bits of char 6
bits[3] |= (loResult >> 28);
bits[4] = (loResult >> 20) & 0xff;
bits[5] = (loResult >> 12) & 0xff;
bits[6] = (loResult >> 4) & 0xff;
bits[7] = (loResult << 4) & 0xff;
// compute the actual string length as any character
// decoded could be a \0.
Int32 i = 0;
for ( i=1; i<=7; i++ )
if ( bits[i] == 0 ) {
bits[i] = '\'';
break;
}
return new (h) NAString(bits, i+1, h);
}
//----------------------------------------------
// Compute the collation and the coercibility.
// See Ansi 4.2.3, tables 2 and 3.
// See the ES for details.
//
// Returns FALSE for "syntax error" case,
// in which case newCO returns as UNKNOWN_COLLATION
// (and newCE as EXPLICIT (defensive programming: but we could change this)).
//
// Returns TRUE otherwise (legal Ansi cases),
// in which case newCO *may* return as UNKNOWN_COLLATION
// and newCE as NO_COLLATING_SEQUENCE.
//
// So, if being called just for type synthesis
// (CharType::synthesizeType, defined below),
// per Ansi 4.2.3 table TWO, you only need to weed out syntax errors --
// the function result FALSE is all you need to check.
//
// If being called for comparability checking
// (CharType::isComparable, defined in ../optimizer/SynthType.cpp),
// per Ansi 4.2.3 table THREE, you need to flag both
// a) syntax errors, and b) non-propatable collations --
// both are covered by checking the newCO result UNKNOWN_COLLATION.
//
//----------------------------------------------
NABoolean
CharType::computeCoAndCo(const CharType& other,
CharInfo::Collation& newCO,
CharInfo::Coercibility& newCE) const
{
if ( getCoercibility() == CharInfo::COERCIBLE )
{
ComASSERT( getCollation() != CharInfo::UNKNOWN_COLLATION );
newCE = other.getCoercibility();
newCO = other.getCollation();
return TRUE;
}
if ( other.getCoercibility() == CharInfo::COERCIBLE )
{
ComASSERT( other.getCollation() != CharInfo::UNKNOWN_COLLATION );
newCE = getCoercibility();
newCO = getCollation();
return TRUE;
}
NABoolean sameCEdiffCO = ( getCoercibility() == other.getCoercibility() &&
getCollation() != other.getCollation() );
if ( getCoercibility() == CharInfo::IMPLICIT )
{
if ( sameCEdiffCO ) {
newCE = CharInfo::NO_COLLATING_SEQUENCE;
newCO = CharInfo::UNKNOWN_COLLATION;
} else {
newCE = other.getCoercibility();
newCO = other.getCollation();
}
return TRUE;
}
if ( getCoercibility() == CharInfo::EXPLICIT )
{
newCE = CharInfo::EXPLICIT;
if ( sameCEdiffCO ) {
newCO = CharInfo::UNKNOWN_COLLATION;
return FALSE; // syntax error
} else {
newCO = getCollation();
return TRUE;
}
}
if ( getCoercibility() == CharInfo::NO_COLLATING_SEQUENCE )
{
if ( other.getCoercibility() == CharInfo::EXPLICIT ) {
newCE = CharInfo::EXPLICIT;
newCO = other.getCollation();
} else {
newCE = CharInfo::NO_COLLATING_SEQUENCE;
newCO = CharInfo::UNKNOWN_COLLATION;
}
return TRUE;
}
ComASSERT(FALSE);
return FALSE;
}
// -----------------------------------------------------------------------
// Type synthesis for binary operators
// -----------------------------------------------------------------------
const NAType* CharType::synthesizeType(NATypeSynthRuleEnum synthRule,
const NAType& operand1,
const NAType& operand2,
CollHeap* h,
UInt32 *flags) const
{
//
// If the second operand's type synthesis rules have higher precedence than
// this operand's rules, use the second operand's rules.
//
if (operand2.getSynthesisPrecedence() > getSynthesisPrecedence())
return operand2.synthesizeType(synthRule, operand1, operand2, h, flags);
//
// If either operand is not character, the expression is invalid.
//
if (operand1.getTypeQualifier() != NA_CHARACTER_TYPE ||
operand2.getTypeQualifier() != NA_CHARACTER_TYPE)
return NULL;
const CharType& op1 = (CharType &) operand1;
const CharType& op2 = (CharType &) operand2;
//
// Charsets must be equal.
//
// Charset inference. If either is UnknownCharSet, we continue.
// The synthesized type with unknown charset attribute allows us to
// continue synthesization upwards. At some point in the process,
// we are able to determine the real charset. Then a push down process
// will drive the real charset downwards.
//
if ( op1.getCharSet() != op2.getCharSet() &&
op1.getCharSet() != CharInfo::UnknownCharSet &&
op2.getCharSet() != CharInfo::UnknownCharSet
)
return NULL;
Lng32 res_len_in_Chars;
Lng32 res_nominalSize;
CharInfo::Collation co;
CharInfo::Coercibility ce;
NABoolean caseinsensitive = FALSE;
NABoolean makeTypeVarchar = FALSE;
switch (synthRule) {
case SYNTH_RULE_UNION:
res_len_in_Chars = MAXOF( op1.getStrCharLimit(), op2.getStrCharLimit() );
res_nominalSize = MAXOF( op1.getNominalSize(), op2.getNominalSize());
if (!(DFS2REC::isAnyVarChar(op1.getFSDatatype()) ||
DFS2REC::isAnyVarChar(op2.getFSDatatype())))
{
if((flags && ((*flags & NAType::MAKE_RESULT_VARCHAR) != 0)) &&
( (op1.getStrCharLimit() != op2.getStrCharLimit()) ||
(op1.getNominalSize() != op2.getNominalSize()) ) )
{
makeTypeVarchar = TRUE;
}
}
// See Ansi SQL92 7.10 SR 10(b)(ii), which in turn refers to
// the rules of subclause 9.3 "set operation result data types",
// of which Ansi 9.3 SR 3(a) applies.
if (!op1.computeCoAndCo(op2, co, ce))
return NULL;
caseinsensitive = op1.isCaseinsensitive() AND
op2.isCaseinsensitive();
break;
case SYNTH_RULE_CONCAT:
res_len_in_Chars = op1.getStrCharLimit() + op2.getStrCharLimit();
res_nominalSize = op1.getNominalSize() + op2.getNominalSize();
// See Ansi 6.13 SR 4(b).
if (!op1.computeCoAndCo(op2, co, ce))
return NULL;
// caller sets case insensitive flag: Concat::syntheSiZeType()
break;
default:
return NULL;
}
NABoolean null = op1.supportsSQLnull() OR op2.supportsSQLnull();
NABoolean upshift = op1.isUpshifted() AND op2.isUpshifted();
CharLenInfo res_CharLenInfo(res_len_in_Chars,
res_nominalSize);
if (DFS2REC::isAnyVarChar(op1.getFSDatatype()) OR
DFS2REC::isAnyVarChar(op2.getFSDatatype()) OR
makeTypeVarchar)
return new(h) SQLVarChar(res_CharLenInfo, null, upshift, caseinsensitive,
op1.getCharSet(), co, ce);
else
return new(h) SQLChar(res_CharLenInfo, null, upshift, caseinsensitive, FALSE,
op1.getCharSet(), co, ce);
} // synthesizeType()
// ---------------------------------------------------------------------
// A method which tells if a conversion error can occur when converting
// a value of this type to the target type.
// ---------------------------------------------------------------------
NABoolean CharType::errorsCanOccur (const NAType& target, NABoolean lax) const
{
if (!NAType::errorsCanOccur(target) &&
getNominalSize() <= target.getNominalSize())
{return FALSE;}
else
{return TRUE;}
}
// ---------------------------------------------------------------------
// keyValue INPUT is the string representation of the current value, then
// keyValue is OUTPUT as the string rep of the very NEXT value, and RETURN TRUE.
// If we're already at the maximum value, keyValue returns empty, RETURN FALSE.
//
// We assume caller has removed trailing blanks if desired, and
// we do not pad out the returned value with minimal characters ('\0' chars).
// See Like::applyBeginEndKeys for the full treatment of trailing zeroes
// in the really correct way, for which this is just a helper function.
//
// ## We need to fix this for (a) multibyte characters, (b) collating seqs!
// ---------------------------------------------------------------------
NABoolean CharType::computeNextKeyValue(NAString &keyValue) const
{
ComASSERT(getBytesPerChar() == 1);
#pragma nowarn(259) // warning elimination
for (size_t i = keyValue.length(); i--; i)
#pragma warn(259) // warning elimination
{
unsigned char c = keyValue[i];
if (c < UCHAR_MAX)
{
keyValue[i] = ++c; // NOT keyValue[i]++: NAString is signed char
break;
}
keyValue.remove(i);
}
#pragma nowarn(1506) // warning elimination
return keyValue.length();
#pragma warn(1506) // warning elimination
}
NABoolean CharType::computeNextKeyValue_UTF8(NAString &keyValue) const
{
ComASSERT(getBytesPerChar() == 4);
#pragma nowarn(259) // warning elimination
for (size_t i = keyValue.length(); i--; i)
#pragma warn(259) // warning elimination
{
unsigned char c = keyValue[i];
if ( (c & 0xC0) == 0x80 ) // If not first byte in a char,
continue; // keep going back by one byte at a time
unsigned int UCS4val = 0; // LocaleCharToUCS4 requires "unsigned int"
int charLenIn = LocaleCharToUCS4( &keyValue[i], 4, &UCS4val, cnv_UTF8 );
if ( (charLenIn > 0) && (UCS4val < 0x1FFFFF) ) // Less than max char ?
{
char tmpBuf[10] ;
UCS4val++;
int charLenOut = UCS4ToLocaleChar( &UCS4val, tmpBuf, 10, cnv_UTF8 );
tmpBuf[charLenOut] = '\0';
//
// Replace character with next character
//
keyValue.remove(i);
keyValue.insert(i, tmpBuf);
break;
}
else keyValue.remove(i);
}
#pragma nowarn(1506) // warning elimination
return keyValue.length();
#pragma warn(1506) // warning elimination
}
NABoolean CharType::computeNextKeyValue(NAWString &keyValue) const
{
ComASSERT(getBytesPerChar() == SQL_DBCHAR_SIZE);
NAWchar maxValue = (NAWchar)CharType::getMaxSingleCharacterValue();
#pragma nowarn(259) // warning elimination
for (size_t i = keyValue.length(); i--; i)
#pragma warn(259) // warning elimination
{
NAWchar c = keyValue[i];
#ifdef NA_LITTLE_ENDIAN
#ifdef IS_MP
if ( CharInfo::is_NCHAR_MP(getCharSet()) )
wc_swap_bytes(&c, 1);
#endif
#endif
if (c < maxValue)
{
c += 1;
#ifdef NA_LITTLE_ENDIAN
#ifdef IS_MP
if ( CharInfo::is_NCHAR_MP(getCharSet()) )
wc_swap_bytes(&c, 1);
#endif
#endif
keyValue[i] = c; // NOT keyValue[i]++: NAWString is signed char
break;
}
keyValue.remove(i);
}
#pragma nowarn(1506) // warning elimination
return keyValue.length();
#pragma warn(1506) // warning elimination
}
NABoolean CharType::isEncodingNeeded() const
{
switch (getCharSet()) {
#if 0 /* SJIS NOT SUPPORTED YET */
case CharInfo::SJIS:
if (getEncodingCharSet() == CharInfo::SJIS)
return FALSE; // treated as a stream of 8-bit bytes
else if (getEncodingCharSet() == CharInfo::UCS2) // this option is not supported - i.e., does not work yet
return TRUE; // encoding is needed because of byte-order
return FALSE; // make a guess
#endif
case CharInfo::ISO88591:
case CharInfo::UTF8:
return FALSE; // treated as a stream of 8-bit bytes
case CharInfo::UNICODE: // encoding is needed because of byte-order
return TRUE;
case CharInfo::KANJI_MP: // Since KANJI/KSC5601 data in MP is stored
case CharInfo::KSC5601_MP: // as typed in, there is no need to encode.
default:
return FALSE;
}
}
// -- Blank and min and max permissible values for a single character
// ## These too will need to be changed to handle different collating sequences
// ## and character sets and such ...
Lng32 CharType::getBlankCharacterValue() const
{
switch (getCharSet()) {
case CharInfo::UNICODE:
return unicode_char_set::space_char();
default:
return Lng32(' '); //##NCHAR: is this correct for Kanji etc?
}
}
Lng32 CharType::getMinSingleCharacterValue() const
{
return 0;
}
Lng32 CharType::getMaxSingleCharacterValue() const
{
//##NCHAR: This switches-within-switch-stmt is not optimally maintainable.
//## Best way to implement this would likely be to augment the
//## CharInfo.cpp charset-map-struct and the CollationInfo class...
switch (getCharSet()) {
#ifdef IS_MP
case CharInfo::KANJI_MP:
case CharInfo::KSC5601_MP:
return 0xffff; // return max NAWchar as KANJI/KSC's code points are not
// checked
break;
#endif
case CharInfo::ISO88591:
switch (getCollation()) {
case CharInfo::DefaultCollation: return UCHAR_MAX;
#if 0 /* SJIS NOT SUPPORTED YET */
case CharInfo::SJIS_COLLATION: return UCHAR_MAX; //##NCHAR: [note 1]
#endif
case CharInfo::CZECH_COLLATION:
return collationMaxChar[CollationInfo::getCollationParamsIndex(CharInfo::CZECH_COLLATION)];
case CharInfo::CZECH_COLLATION_CI:
return collationMaxChar[CollationInfo::getCollationParamsIndex(CharInfo::CZECH_COLLATION_CI)];
} // switch (getCollation())
break;
case CharInfo::UTF8:
return UCHAR_MAX;
break;
#if 0 /* SJIS NOT SUPPORTED YET */
case CharInfo::SJIS:
if (getEncodingCharSet() == CharInfo::SJIS)
return UCHAR_MAX;
#endif
// assume getEncodingCharSet() == CharInfo::UCS2 // this option is not supported - i.e., does not work yet
// fall through
case CharInfo::UNICODE:
switch (getCollation()) {
case CharInfo::DefaultCollation: return 0xffff; // max UCS-2 character
case CharInfo::SJIS_COLLATION: return 0xfcfc; // max SJIS 2-byte char
}
break;
} // switch (getCharSet())
//LCOV_EXCL_START :rfi - unhandled charset&collation combination
ComASSERT(FALSE);
return INT_MIN;
//LCOV_EXCL_STOP
}
NABoolean CharType::isCharSetAndCollationComboOK() const
{
if ( getCollation() == CharInfo::CZECH_COLLATION &&
charSet_ != CharInfo::ISO88591 )
return FALSE;
// An unknown collation is allowed if not used in any dyadic operation
// or comparison -- the parser just issues a warning for it.
return getCollation() == CharInfo::UNKNOWN_COLLATION ||
getMaxSingleCharacterValue() != INT_MIN;
}
// -----------------------------------------------------------------------
// Equality comparison
// -----------------------------------------------------------------------
NABoolean CharType::operator==(const NAType& other) const
{
return NAType::operator==(other) &&
caseinsensitive_ == ((CharType&)other).caseinsensitive_ &&
nullTerminated_ == ((CharType&)other).nullTerminated_ &&
upshifted_ == ((CharType&)other).upshifted_ &&
charSet_ == ((CharType&)other).charSet_ &&
collation_ == ((CharType&)other).collation_ &&
coercibility_ == ((CharType&)other).coercibility_ &&
encodingCharSet_ == ((CharType&)other).encodingCharSet_ ;
}
// -----------------------------------------------------------------------
// Check compatibility for string types.
// -----------------------------------------------------------------------
NABoolean CharType::isCompatible(const NAType& other, UInt32 * flags) const
{
return (NAType::isCompatible(other, flags) &&
getCharSet() == ((CharType&)other).getCharSet() &&
getCharSet() != CharInfo::UnknownCharSet);
}
// -----------------------------------------------------------------------
// Check compatibility for string types allowing UnknownCharSet
// compatible if either is an unknown character set.
// -----------------------------------------------------------------------
NABoolean CharType::isCompatibleAllowUnknownCharset(const NAType& other) const
{
return (NAType::isCompatible(other) &&
(getCharSet() == CharInfo::UnknownCharSet ||
((CharType&)other).getCharSet() == CharInfo::UnknownCharSet));
}
NABoolean CharType::createSQLLiteral(const char * buf,
NAString *&stringLiteral,
NABoolean &isNull,
CollHeap *h) const
{
if (NAType::createSQLLiteral(buf, stringLiteral, isNull, h))
return TRUE;
// Generate a string literal in a target charset (usually UTF8)
// of the form _<type charset>'<string in target charset>'
// TBD: - consider using hex literal instead of unprintable characters
NABoolean result = TRUE;
NAString *resultLiteral = new(h) NAString(getCharSetAsPrefix());
const char *valPtr = buf + getSQLnullHdrSize();
Int32 valLen = 0;
CharInfo::CharSet sourceCS = getCharSet();
char *tempBuf = NULL;
if (getVarLenHdrSize() == 0)
valLen = getNominalSize();
else
{
if (getVarLenHdrSize() == 2)
valLen = *((Int16 *) valPtr);
else if (getVarLenHdrSize() == 4)
valLen = *((Int32 *) valPtr);
else
ComASSERT(FALSE);
valPtr += getVarLenHdrSize();
}
*resultLiteral += "'";
switch (sourceCS)
{
case CharInfo::UTF8:
*resultLiteral += NAString(valPtr, valLen);
break;
case CharInfo::ISO88591:
{
// try it the easy way, for all ASCII chars
NABoolean allAscii = TRUE;
unsigned char *ucharBuf = (unsigned char *) valPtr;
for (Int32 i=0; i<valLen && allAscii; i++)
if (ucharBuf[i] > 127)
allAscii = FALSE;
if (allAscii)
{
*resultLiteral += NAString((char *) valPtr, valLen);
break;
}
}
// otherwise fall through to the next case
case CharInfo::UNICODE:
{
char *firstUntranslatedChar = NULL;
unsigned int outputLength = 0;
Int32 tempBufSize = CharInfo::getMaxConvertedLenInBytes(sourceCS,
valLen,
CharInfo::UTF8);
tempBuf = new(h) char[tempBufSize];
int retCode = LocaleToUTF8(cnv_version1,
valPtr,
valLen,
tempBuf,
tempBufSize,
convertCharsetEnum(sourceCS),
firstUntranslatedChar,
&outputLength);
if (retCode != 0)
result = FALSE;
*resultLiteral += NAString(tempBuf, outputLength);
}
break;
default:
// non-supported character set
assert(FALSE);
break;
} // end case
// check for any quotes (skip the first one) and duplicate them
size_t firstQuotePos = resultLiteral->first('\'');
size_t otherQuotePos = firstQuotePos + 1;
while ((otherQuotePos = resultLiteral->first('\'', otherQuotePos)) != NA_NPOS)
{
resultLiteral->replace(otherQuotePos, 1, "''");
otherQuotePos += 2;
}
*resultLiteral += "'";
if (tempBuf)
NADELETEBASIC(tempBuf, h);
stringLiteral = resultLiteral;
return result;
}
#pragma nowarn(1506) // warning elimination
SQLChar::SQLChar(Lng32 maxLen,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
NABoolean varLenFlag,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding
)
: CharType(varLenFlag ? LiteralVARCHAR : LiteralCHAR,
maxLen, CharInfo::maxBytesPerChar(cs),
FALSE, allowSQLnull, isUpShifted, isCaseInsensitive,
varLenFlag, cs, co, ce,
encoding)
{}
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
SQLChar::SQLChar(const CharLenInfo & maxLenInfo,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
NABoolean varLenFlag,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding
)
: CharType(varLenFlag ? LiteralVARCHAR : LiteralCHAR,
maxLenInfo, CharInfo::maxBytesPerChar(cs),
FALSE, allowSQLnull, isUpShifted, isCaseInsensitive,
varLenFlag, cs, co, ce,
encoding)
{}
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
SQLVarChar::SQLVarChar(Lng32 maxLen,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding,
Lng32 vcIndLen
)
: CharType(LiteralVARCHAR,
maxLen, CharInfo::maxBytesPerChar(cs),
FALSE, allowSQLnull, isUpShifted, isCaseInsensitive,
TRUE, cs, co, ce,
encoding, vcIndLen),
clientDataType_(collHeap()) // Get heap from NABasicObject. Can't allocate on stack.
{}
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
SQLVarChar::SQLVarChar(const CharLenInfo & maxLenInfo,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean isCaseInsensitive,
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding
)
: CharType(LiteralVARCHAR,
maxLenInfo, CharInfo::maxBytesPerChar(cs),
FALSE, allowSQLnull, isUpShifted, isCaseInsensitive,
TRUE, cs, co, ce,
encoding),
clientDataType_(collHeap()) // Get heap from NABasicObject. Can't allocate on stack.
{}
// -----------------------------------------------------------------------
// The SQL/C Preprocessor rewrites a VARCHAR declaration (Ansi 19.4)
// as an ANSIVARCHAR datatype token (Tandem-internal extension),
// which is actually this ANSIChar datatype --
// FIXED char, with nul-terminator, *not* VAR char with a length prefix.
// In fact, the varLenFlag param is unused: lots of things break
// in cli + rfork if a CharType is both nul-terminated and has a vc-header.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
ANSIChar::ANSIChar(Lng32 maxLen,
NABoolean allowSQLnull,
NABoolean isUpShifted,
NABoolean varLenFlag, // *unused*
CharInfo::CharSet cs,
CharInfo::Collation co,
CharInfo::Coercibility ce,
CharInfo::CharSet encoding
)
: CharType(LiteralCHAR,
maxLen, CharInfo::maxBytesPerChar(cs),
TRUE, allowSQLnull, isUpShifted, FALSE, FALSE, cs, co, ce,
encoding)
//##
//## : CharType(varLenFlag ? LiteralVARCHAR : LiteralCHAR,
//## maxLen, CharInfo::maxBytesPerChar(cs),
//## TRUE, allowSQLnull, isUpShifted, varLenFlag, cs, co, ce,
//## tokNCHARinParser)
{}
#pragma warn(1506) // warning elimination
short ANSIChar::getFSDatatype() const
{
switch ( getBytesPerChar() ) {
case 1:
return REC_BYTE_V_ANSI;
case 2:
#ifdef IS_MP
if ( CharInfo::is_NCHAR_MP(getCharSet()) )
return REC_BYTE_V_ANSI;
else
#endif
return REC_BYTE_V_ANSI_DOUBLE;
default:
//LCOV_EXCL_START :rfi - ANSIChar can be ISO88591 or UCS2 only
ComASSERT(FALSE);
return REC_BYTE_V_ANSI;
//LCOV_EXCL_STOP
}
}
short SQLChar::getFSDatatype() const
{
switch ( getBytesPerChar() ) {
case 1:
return REC_BYTE_F_ASCII;
case 2:
#if 0 /* NCHAR_MP and SJIS are NOT supported yet */
if ( CharInfo::is_NCHAR_MP(getCharSet()) || ( getCharSet() == CharInfo::SJIS &&
getEncodingCharSet() == CharInfo::SJIS ) )
return REC_BYTE_F_ASCII;
else
#endif
return REC_BYTE_F_DOUBLE;
case 4:
if (getCharSet() == CharInfo::UTF8)
return REC_BYTE_F_ASCII;
// fall through
default:
//LCOV_EXCL_START :rfi - no values other than 1,2, & 4 are supported by the code.
ComASSERT(FALSE);
return REC_BYTE_F_ASCII;
//LCOV_EXCL_STOP
}
}
short SQLVarChar::getFSDatatype() const
{
switch ( getBytesPerChar() ) {
case 1:
return REC_BYTE_V_ASCII;
case 2:
#if 0 /* NCHAR_MP and SJIS are NOT supported yet */
if ( CharInfo::is_NCHAR_MP(getCharSet()) || ( getCharSet() == CharInfo::SJIS &&
getEncodingCharSet() == CharInfo::SJIS ) )
return REC_BYTE_V_ASCII;
else
#endif
return REC_BYTE_V_DOUBLE;
case 4:
if (getCharSet() == CharInfo::UTF8)
return REC_BYTE_V_ASCII;
// fall through
default:
//LCOV_EXCL_START :rfi - - no values other than 1,2, & 4 are supported by the code.
ComASSERT(FALSE);
return REC_BYTE_V_ASCII;
//LCOV_EXCL_STOP
}
}
//LCOV_EXCL_START :rfi
short CharType::getFSDatatype() const
{
ComASSERT(FALSE);
return -1;
}
//LCOV_EXCL_STOP
Lng32 CharType::getPrecisionOrMaxNumChars() const
{
if ( (charLimitInUCS2or4chars_ > 0) &&
(charSet_ == CharInfo::UTF8) &&
(charLimitInUCS2or4chars_ < getDataStorageSize() ) )
return charLimitInUCS2or4chars_ ;
return 0; // no limits for now
}
Lng32 CharType::getScaleOrCharset() const
{
return (Lng32) getCharSet();
}
void CharType::generateTextThenSetDisplayDataType ( CharInfo::CharSet cs // in
, NAString & ddt // in/out
)
{
if ( (cs NEQ CharInfo::UTF8 /* AND cs NEQ CharInfo::SJIS */) OR ddt.isNull() )
return; // only do this for CHARACTER SET SJIS and UTF8
ddt += "(";
UInt32 charLimit = getStrCharLimit();
UInt32 sizeInBytes = getNominalSize();
if ( charLimit * getBytesPerChar() == sizeInBytes )
{
ddt += LongToNAString(charLimit);
if (charLimit EQU 1)
ddt += " CHAR";
else
ddt += " CHARS";
}
else
{
ddt += LongToNAString(sizeInBytes);
if (sizeInBytes EQU 1)
ddt += " BYTE";
else
ddt += " BYTES";
}
ddt += ") CHARACTER SET ";
ddt += CharInfo::getCharSetName(cs);
setDisplayDataType(ddt.data());
}
// -----------------------------------------------------------------------
// Print function for debugging
// -----------------------------------------------------------------------
//LCOV_EXCL_START :rfi
void CharType::print(FILE *ofd, const char *indent)
{
#ifdef TRACING_ENABLED // NT_PORT ( bd 8/4/96 )
fprintf(ofd,"%s%s\n",indent,getTypeSQLname());
#endif
} // CharType::print()
//LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// A method for generating the hash key.
// SQL builtin types should return getTypeSQLName()
// -----------------------------------------------------------------------
NAString* CharType::getKey(CollHeap* h) const
{
return new (h) NAString(getTypeSQLname(), h);
}
void CharType::minMaxRepresentableValue(void* bufPtr,
Lng32* bufLen,
NAString ** stringLiteral,
NABoolean isMax,
CollHeap* h) const
{
Int32 i;
Int32 vcLenHdrSize = getVarLenHdrSize();
char *valPtr = reinterpret_cast<char *>(bufPtr) + vcLenHdrSize;
Int32 valBufLen = getNominalSize();
Int32 valLen = valBufLen; // output length of min/max value
char minmax_char;
wchar_t minmax_wchar;
ComASSERT(*bufLen >= vcLenHdrSize + valBufLen);
switch (getCharSet())
{
case CharInfo::ISO88591:
case CharInfo::SJIS:
if (isMax)
minmax_char = (char)getMaxSingleCharacterValue();
else
minmax_char = (char)getMinSingleCharacterValue();
memset(valPtr, minmax_char, valBufLen);
break;
case CharInfo::UTF8:
if (isMax)
valLen = fillWithMaxUTF8Chars(valPtr,
valBufLen,
getPrecisionOrMaxNumChars());
else
valLen = fillWithMinUTF8Chars(valPtr,
valBufLen,
getPrecisionOrMaxNumChars());
break;
case CharInfo::UCS2:
if (isMax)
minmax_wchar = (wchar_t)getMaxSingleCharacterValue();
else
minmax_wchar = (wchar_t)getMinSingleCharacterValue();
#ifdef NA_LITTLE_ENDIAN
wc_swap_bytes(&minmax_wchar, 1);
#endif // NA_LITTLE_ENDIAN
valBufLen /= SQL_DBCHAR_SIZE;
for (i=0 ;i < valBufLen; i++)
((wchar_t *)valPtr)[i] = minmax_wchar;
break;
default:
ComASSERT(FALSE); //LCOV_EXCL_LINE :rfi -- no other CharSets are supported yet.
}
// copy the output value length into the varchar len header
if (vcLenHdrSize == sizeof(short))
{
short vc_len = (short) valLen;
str_cpy_all((char *)bufPtr, (char *)&vc_len, vcLenHdrSize);
}
else if (vcLenHdrSize == sizeof(Int32))
{
Int32 vc_len = (Int32) valLen;
str_cpy_all((char *)bufPtr, (char *)&vc_len, vcLenHdrSize);
}
else
ComASSERT(vcLenHdrSize == 0);
if (stringLiteral)
{
NABoolean isNull = FALSE;
NABoolean res = createSQLLiteral((const char *) bufPtr, *stringLiteral, isNull, h);
assert(res);
}
}
// -- Min and max permissible values for a CHAR string
// ## These too will need to be changed to handle different collating sequences
// ## and character sets and multibyte chars and such ...
void SQLChar::minRepresentableValue(void* bufPtr, Lng32* bufLen,
NAString ** stringLiteral,
CollHeap* h) const
{
minMaxRepresentableValue(bufPtr, bufLen, stringLiteral, FALSE, h);
}
void SQLChar::maxRepresentableValue(void* bufPtr, Lng32* bufLen,
NAString ** stringLiteral,
CollHeap* h) const
{
minMaxRepresentableValue(bufPtr, bufLen, stringLiteral, TRUE, h);
}
// encoding of the max char value
double SQLChar::getMaxValue() const
{
EncodedValue dummyVal(0.0);
double encodedval = dummyVal.minMaxValue(this, FALSE);
return encodedval;
}
// encoding of the min char value
double SQLChar::getMinValue() const
{
EncodedValue dummyVal(0.0);
double encodedval = dummyVal.minMaxValue(this, TRUE);
return encodedval;
}
//LCOV_EXCL_START : cnu -- As of 8/30/2011, needed to link successfully, but not actually called.
double SQLChar::encode (void *bufPtr) const
{
#if 1 /* Stub */
return 0;
#else
/* Following code moved into EncVal_Char_encode() in .../optimizer/EncodedValue.cpp */
/* since the optimizer was the only place that needed it. */
char *charBufPtr = (char *) bufPtr;
if (supportsSQLnull())
charBufPtr += getSQLnullHdrSize();
return encodeString(charBufPtr,getNominalSize());
#endif
}
//LCOV_EXCL_STOP : cnu -- As of 8/30/2011, needed to link successfully, but not actually called.
// -- Min and max permissible values for a VARCHAR string
// ## These too will need to be changed to handle different collating sequences
// ## and character sets and multibyte chars and such ...
void SQLVarChar::minRepresentableValue(void* bufPtr, Lng32* bufLen,
NAString ** stringLiteral,
CollHeap* h) const
{
minMaxRepresentableValue(bufPtr, bufLen, stringLiteral, FALSE, h);
}
void SQLVarChar::maxRepresentableValue(void* bufPtr, Lng32* bufLen,
NAString ** stringLiteral,
CollHeap* h) const
{
minMaxRepresentableValue(bufPtr, bufLen, stringLiteral, TRUE, h);
}
//LCOV_EXCL_START : cnu -- As of 8/30/2011, needed to link successfully, but not actually called.
double SQLVarChar::encode (void *bufPtr) const
{
#if 1 /* Stub */
return 0;
#else
/* Following code moved into EncVal_Char_encode() in .../optimizer/EncodedValue.cpp */
/* since the optimizer was the only place that needed it. */
char *charBufPtr = (char *) bufPtr;
if (supportsSQLnull())
charBufPtr += getSQLnullHdrSize();
// copy the actual length of the string into an aligned variable
short actualLen;
ComASSERT(sizeof(short) == getVarLenHdrSize());
str_cpy_all((char *) &actualLen,charBufPtr,sizeof(short));
return encodeString(&charBufPtr[getVarLenHdrSize()],actualLen);
#endif
}
//LCOV_EXCL_STOP : cnu -- As of 8/30/2011, needed to link successfully, but not actually called.
THREAD_P NABoolean pushDownRequiredByODBC = TRUE;
// This function finds the chartype to be pushed down among a set S of
// char types and a default char type.
// Arguments: 0th: the default char type;
// 1st: the first known char type;
// 2nd: the second known char type;
// ... ...
// It returns
// 0 if each chartype in S is of a known charset;
// the default chartype if all chartypes in S are of unknown charset;
// the first chartype in S with known charset;
//
const CharType*
CharType::findPushDownCharType(CharInfo::CharSet cs,
const CharType* first, ...)
{
if ( pushDownRequiredByODBC == FALSE )
return 0;
va_list ap;
va_start(ap, first);
const CharType* ctp = first;
const CharType* sampleCharType = 0;
if ( first == NULL )
return desiredCharType(cs);
Int32 total = 0;
Int32 ct = 0;
do
{
total++;
if ( ctp->getCharSet() != CharInfo::UnknownCharSet ) {
ct++;
if ( sampleCharType == 0 )
sampleCharType = ctp;
}
} while ( (ctp = (CharType*)va_arg(ap, void*)) );
va_end(ap);
if ( ct == total )
return 0;
if ( sampleCharType )
return sampleCharType;
else
return desiredCharType(cs);
}
const CharType* CharType::desiredCharType(enum CharInfo::CharSet cs)
{
static SQLChar unicodeChar(0, TRUE, FALSE, FALSE, FALSE, CharInfo::UNICODE);
static SQLChar latin1Char(0, TRUE, FALSE, FALSE, FALSE, CharInfo::ISO88591);
static SQLChar sjisChar(0, TRUE, FALSE, FALSE, FALSE, CharInfo::SJIS /* ... old kludge ... CharInfo::ISO88591 */, CharInfo::DefaultCollation, CharInfo::COERCIBLE, CharInfo::SJIS/*encoding*/);
// static SQLChar sjisUnicodeChar(0, TRUE, FALSE, FALSE, FALSE, CharInfo::SJIS, CharInfo::DefaultCollation, CharInfo::COERCIBLE, CharInfo::UNICODE/*encoding*/);
static SQLChar utf8Char(0, TRUE, FALSE, FALSE, FALSE, CharInfo::UTF8 /* ... old kludge ... CharInfo::ISO88591 */, CharInfo::DefaultCollation, CharInfo::COERCIBLE, CharInfo::UTF8/*encoding*/);
switch ( cs ) {
case CharInfo::UNICODE:
return &unicodeChar;
case CharInfo::UTF8:
return &utf8Char;
#if 0 /* SJIS NOT SUPPORTED YET */
case CharInfo::SJIS:
// ??? if (getEncodingCharSet() == CharInfo::SJIS)
return &sjisChar;
// ??? else if (getEncodingCharSet() == CharInfo:UCS2)
// ??? return &sjisUnicodeChar;
// ??? else
// ??? return &sjisChar;
#endif
case CharInfo::ISO88591:
default:
return &latin1Char;
}
#pragma nowarn(203) // warning elimination
return 0;
#pragma warn(203) // warning elimination
}
// round length up to next bigger quantum step
static Lng32 quantize(Lng32 len)
{
static const Lng32 quanLen[] = { 8, 16, 32, 64, 128, 256, 512, 1024 };
Lng32 x, limit=sizeof(quanLen) / sizeof(quanLen[0]);
if (len <= 0) { return quanLen[0]; }
if (len >= quanLen[limit-1]) { return len; }
for (x=0; x<limit-1; x++) {
if (len <= quanLen[x]) { return quanLen[x]; }
}
return len;
}
// A virtual function to return a char equivalent of this type
NAType* SQLVarChar::equivalentCharType(NAMemory *h, NABoolean quantizeLen)
{
Lng32 len_in_chars ;
Lng32 len_in_bytes ;
len_in_chars = quantizeLen ? quantize(getStrCharLimit()) : getStrCharLimit();
len_in_bytes = quantizeLen ? quantize(getNominalSize()) : getNominalSize() ;
CharLenInfo CLInfo( len_in_chars, len_in_bytes );
return new(h) SQLChar(CLInfo, supportsSQLnull(), isUpshifted(),
isCaseinsensitive(), FALSE,
getCharSet(), getCollation(), getCoercibility());
}
// A virtual function to return a varchar equivalent of this type
NAType* SQLChar::equivalentVarCharType(NAMemory *h, NABoolean quantizeLen)
{
Lng32 len_in_chars ;
Lng32 len_in_bytes ;
len_in_chars = quantizeLen ? quantize(getStrCharLimit()) : getStrCharLimit();
len_in_bytes = quantizeLen ? quantize(getNominalSize()) : getNominalSize() ;
CharLenInfo CLInfo( len_in_chars, len_in_bytes );
return new(h) SQLVarChar(CLInfo, supportsSQLnull(), isUpshifted(),
isCaseinsensitive(),
getCharSet(), getCollation(), getCoercibility());
}
// A virtual function to return a varchar equivalent of this type
NAType* ANSIChar::equivalentVarCharType(NAMemory *h, NABoolean quantizeLen)
{
Lng32 len = quantizeLen ? quantize(getStrCharLimit()) : getStrCharLimit();
return new(h) SQLVarChar(len, supportsSQLnull(), isUpshifted(),
isCaseinsensitive(),
getCharSet(), getCollation(), getCoercibility());
}
short SQLLongVarChar::getFSDatatype() const
{
if ( getCharSet() == CharInfo::UNICODE )
return REC_BYTE_V_DOUBLE;
else
return REC_BYTE_V_ASCII_LONG;
}
short SQLLongVarChar::getTrueFSDatatype() const
{
return REC_BYTE_V_ASCII_LONG;
}
//////////////////////////////
// class SQLlob
//////////////////////////////
SQLlob::SQLlob(
NABuiltInTypeEnum ev,
Int64 lobLength,
LobsStorage ls,
NABoolean allowSQLnull,
NABoolean inlineIfPossible,
NABoolean externalFormat,
Lng32 extFormatLen
)
: NAType( (ev == NA_LOB_TYPE ? LiteralLOB : LiteralLOB)
, ev
, (externalFormat ? extFormatLen : 512)
, allowSQLnull
, allowSQLnull ? SQL_NULL_HDR_SIZE : 0
, TRUE
, SQL_VARCHAR_HDR_SIZE
),
inlineIfPossible_(inlineIfPossible),
externalFormat_(externalFormat),
extFormatLen_(extFormatLen),
lobStorage_(ls),
lobLength_(lobLength)
{
if (externalFormat_)
lobStorage_ = Lob_External_HDFS_File;
else
lobStorage_ = Lob_HDFS_File;
}
// ---------------------------------------------------------------------
// A method which tells if a conversion error can occur when converting
// a value of this type to the target type.
// ---------------------------------------------------------------------
NABoolean SQLlob::errorsCanOccur (const NAType& target, NABoolean lax) const
{
if (!NAType::errorsCanOccur(target))
{return FALSE;}
else
{return TRUE;}
}
NAString SQLlob::getTypeSQLname(NABoolean terse) const
{
NAString rName = "LOB";
getTypeSQLnull(rName, terse);
return rName;
} // getTypeSQLname()
/////////////////////////////////////
// class SQLBlob
/////////////////////////////////////
SQLBlob::SQLBlob(
Int64 blobLength,
LobsStorage lobStorage,
NABoolean allowSQLnull,
NABoolean inlineIfPossible,
NABoolean externalFormat,
Lng32 extFormatLen
)
: SQLlob(NA_LOB_TYPE,
blobLength,
lobStorage,
allowSQLnull,
inlineIfPossible,
externalFormat,
extFormatLen)
{
setCharSet(CharInfo::ISO88591);//lobhandle can only be in ISO format
}
NAType *SQLBlob::newCopy(NAMemory* h) const
{ return new(h) SQLBlob(*this,h); }
// -----------------------------------------------------------------------
// Type synthesis for binary operators
// -----------------------------------------------------------------------
const NAType* SQLBlob::synthesizeType(NATypeSynthRuleEnum synthRule,
const NAType& operand1,
const NAType& operand2,
CollHeap* h,
UInt32 *flags) const
{
if (operand1.getFSDatatype() != REC_BLOB ||
operand2.getFSDatatype() != REC_BLOB)
return NULL;
SQLBlob& op1 = (SQLBlob &) operand1;
SQLBlob& op2 = (SQLBlob &) operand2;
NABoolean null = op1.supportsSQLnull() OR op2.supportsSQLnull();
if (synthRule == SYNTH_RULE_UNION)
{
return new(h) SQLBlob(MAXOF(op1.getLobLength(), op2.getLobLength()),
op1.getLobStorage(),
null);
}
return NULL;
}
/////////////////////////////////////
// class SQLClob
/////////////////////////////////////
SQLClob::SQLClob(
Int64 clobLength,
LobsStorage lobStorage,
NABoolean allowSQLnull,
NABoolean inlineIfPossible,
NABoolean externalFormat,
Lng32 extFormatLen
)
: SQLlob(NA_LOB_TYPE,
clobLength,
lobStorage,
allowSQLnull,
inlineIfPossible,
externalFormat,
extFormatLen)
{
setCharSet(CharInfo::ISO88591); //lob handle can only be in this format
}
NAType *SQLClob::newCopy(NAMemory* h) const
{ return new(h) SQLClob(*this,h); }
// -----------------------------------------------------------------------
// Type synthesis for binary operators
// -----------------------------------------------------------------------
const NAType* SQLClob::synthesizeType(NATypeSynthRuleEnum synthRule,
const NAType& operand1,
const NAType& operand2,
CollHeap* h,
UInt32 *flags) const
{
if (operand1.getFSDatatype() != REC_CLOB ||
operand2.getFSDatatype() != REC_CLOB)
return NULL;
SQLClob& op1 = (SQLClob &) operand1;
SQLClob& op2 = (SQLClob &) operand2;
NABoolean null = op1.supportsSQLnull() OR op2.supportsSQLnull();
if (synthRule == SYNTH_RULE_UNION)
{
return new(h) SQLClob(MAXOF(op1.getLobLength(), op2.getLobLength()),
op1.getLobStorage(),
null);
}
return NULL;
}
| 1 | 12,310 | I'm confused by the name. The string was a Hive string but it isn't any longer? Why should we care? (I imagine I'll find the answer later... there is some different semantic that we want downstream...) | apache-trafodion | cpp |
@@ -31,12 +31,7 @@ import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.netty.server.NettyServer;
import org.openqa.selenium.remote.SessionId;
-import org.openqa.selenium.remote.http.HttpClient;
-import org.openqa.selenium.remote.http.HttpHandler;
-import org.openqa.selenium.remote.http.HttpRequest;
-import org.openqa.selenium.remote.http.HttpResponse;
-import org.openqa.selenium.remote.http.TextMessage;
-import org.openqa.selenium.remote.http.WebSocket;
+import org.openqa.selenium.remote.http.*;
import org.openqa.selenium.remote.tracing.DefaultTestTracer;
import org.openqa.selenium.remote.tracing.Tracer;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.router;
import org.junit.Before;
import org.junit.Test;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.local.GuavaEventBus;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.config.MapConfig;
import org.openqa.selenium.grid.data.Session;
import org.openqa.selenium.grid.server.BaseServerOptions;
import org.openqa.selenium.grid.server.Server;
import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.netty.server.NettyServer;
import org.openqa.selenium.remote.SessionId;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.TextMessage;
import org.openqa.selenium.remote.http.WebSocket;
import org.openqa.selenium.remote.tracing.DefaultTestTracer;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
public class ProxyCdpTest {
private final HttpHandler nullHandler = req -> new HttpResponse();
private final Config emptyConfig = new MapConfig(Map.of());
private Server<?> proxyServer;
private SessionMap sessions;
@Before
public void setUp() {
Tracer tracer = DefaultTestTracer.createTracer();
EventBus events = new GuavaEventBus();
sessions = new LocalSessionMap(tracer, events);
// Set up the proxy we'll be using
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
ProxyCdpIntoGrid proxy = new ProxyCdpIntoGrid(clientFactory, sessions);
proxyServer = new NettyServer(new BaseServerOptions(emptyConfig), nullHandler, proxy).start();
}
@Test
public void shouldForwardTextMessageToServer() throws URISyntaxException, InterruptedException {
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
// Create a backend server which will capture any incoming text message
AtomicReference<String> text = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Server<?> backend = createBackendServer(latch, text, "");
// Push a session that resolves to the backend server into the session map
SessionId id = new SessionId(UUID.randomUUID());
sessions.add(new Session(id, backend.getUrl().toURI(), new ImmutableCapabilities()));
// Now! Send a message. We expect it to eventually show up in the backend
WebSocket socket = clientFactory.createClient(proxyServer.getUrl())
.openSocket(new HttpRequest(GET, String.format("/session/%s/cdp", id)), new WebSocket.Listener(){});
socket.sendText("Cheese!");
assertThat(latch.await(5, SECONDS)).isTrue();
assertThat(text.get()).isEqualTo("Cheese!");
socket.close();
}
@Test
public void shouldForwardTextMessageFromServerToLocalEnd() throws URISyntaxException, InterruptedException {
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
Server<?> backend = createBackendServer(new CountDownLatch(1), new AtomicReference<>(), "Asiago");
// Push a session that resolves to the backend server into the session map
SessionId id = new SessionId(UUID.randomUUID());
sessions.add(new Session(id, backend.getUrl().toURI(), new ImmutableCapabilities()));
// Now! Send a message. We expect it to eventually show up in the backend
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<String> text = new AtomicReference<>();
WebSocket socket = clientFactory.createClient(proxyServer.getUrl())
.openSocket(new HttpRequest(GET, String.format("/session/%s/cdp", id)), new WebSocket.Listener() {
@Override
public void onText(CharSequence data) {
text.set(data.toString());
latch.countDown();
}
});
socket.sendText("Cheese!");
assertThat(latch.await(5, SECONDS)).isTrue();
assertThat(text.get()).isEqualTo("Asiago");
socket.close();
}
private Server<?> createBackendServer(CountDownLatch latch, AtomicReference<String> incomingRef, String response) {
return new NettyServer(
new BaseServerOptions(emptyConfig),
nullHandler,
(uri, sink) -> Optional.of(msg -> {
if (msg instanceof TextMessage) {
incomingRef.set(((TextMessage) msg).text());
sink.accept(new TextMessage(response));
latch.countDown();
}
}))
.start();
}
}
| 1 | 17,921 | Could you please leave the explicit imports? | SeleniumHQ-selenium | java |
@@ -23,7 +23,7 @@ import (
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
- "github.com/iotexproject/iotex-core/proto"
+ iproto "github.com/iotexproject/iotex-core/proto"
"github.com/iotexproject/iotex-core/state/factory"
)
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package block
import (
"bytes"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/iotexproject/go-ethereum/crypto"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/blake2b"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/proto"
"github.com/iotexproject/iotex-core/state/factory"
)
// Block defines the struct of block
type Block struct {
Header
Footer
Actions []action.SealedEnvelope
SecretProposals []*action.SecretProposal
SecretWitness *action.SecretWitness
// TODO: move receipts out of block struct
Receipts []*action.Receipt
WorkingSet factory.WorkingSet
}
// ByteStream returns a byte stream of the block
func (b *Block) ByteStream() []byte {
stream := b.Header.ByteStream()
// Add the stream of blockSig
stream = append(stream, b.Header.blockSig[:]...)
stream = append(stream, b.Header.dkgID[:]...)
stream = append(stream, b.Header.dkgPubkey[:]...)
stream = append(stream, b.Header.dkgBlockSig[:]...)
for _, act := range b.Actions {
stream = append(stream, act.ByteStream()...)
}
return stream
}
// ConvertToBlockHeaderPb converts BlockHeader to BlockHeaderPb
func (b *Block) ConvertToBlockHeaderPb() *iproto.BlockHeaderPb {
pbHeader := iproto.BlockHeaderPb{}
pbHeader.Version = b.Header.version
pbHeader.ChainID = b.Header.chainID
pbHeader.Height = b.Header.height
pbHeader.Timestamp = ×tamp.Timestamp{
Seconds: b.Header.Timestamp(),
}
pbHeader.PrevBlockHash = b.Header.prevBlockHash[:]
pbHeader.TxRoot = b.Header.txRoot[:]
pbHeader.StateRoot = b.Header.stateRoot[:]
pbHeader.DeltaStateDigest = b.Header.deltaStateDigest[:]
pbHeader.ReceiptRoot = b.Header.receiptRoot[:]
pbHeader.Signature = b.Header.blockSig[:]
pbHeader.Pubkey = keypair.PublicKeyToBytes(b.Header.pubkey)
pbHeader.DkgID = b.Header.dkgID[:]
pbHeader.DkgPubkey = b.Header.dkgPubkey[:]
pbHeader.DkgSignature = b.Header.dkgBlockSig[:]
return &pbHeader
}
// ConvertToBlockPb converts Block to BlockPb
func (b *Block) ConvertToBlockPb() *iproto.BlockPb {
actions := []*iproto.ActionPb{}
for _, act := range b.Actions {
actions = append(actions, act.Proto())
}
return &iproto.BlockPb{
Header: b.ConvertToBlockHeaderPb(),
Actions: actions,
Footer: b.ConvertToBlockFooterPb(),
}
}
// Serialize returns the serialized byte stream of the block
func (b *Block) Serialize() ([]byte, error) {
return proto.Marshal(b.ConvertToBlockPb())
}
// ConvertFromBlockHeaderPb converts BlockHeaderPb to BlockHeader
func (b *Block) ConvertFromBlockHeaderPb(pbBlock *iproto.BlockPb) {
b.Header = Header{}
b.Header.version = pbBlock.GetHeader().GetVersion()
b.Header.chainID = pbBlock.GetHeader().GetChainID()
b.Header.height = pbBlock.GetHeader().GetHeight()
b.Header.timestamp = pbBlock.GetHeader().GetTimestamp().GetSeconds()
copy(b.Header.prevBlockHash[:], pbBlock.GetHeader().GetPrevBlockHash())
copy(b.Header.txRoot[:], pbBlock.GetHeader().GetTxRoot())
copy(b.Header.stateRoot[:], pbBlock.GetHeader().GetStateRoot())
copy(b.Header.deltaStateDigest[:], pbBlock.GetHeader().GetDeltaStateDigest())
copy(b.Header.receiptRoot[:], pbBlock.GetHeader().GetReceiptRoot())
b.Header.blockSig = pbBlock.GetHeader().GetSignature()
b.Header.dkgID = pbBlock.GetHeader().GetDkgID()
b.Header.dkgPubkey = pbBlock.GetHeader().GetDkgPubkey()
b.Header.dkgBlockSig = pbBlock.GetHeader().GetDkgSignature()
pubKey, err := keypair.BytesToPublicKey(pbBlock.GetHeader().GetPubkey())
if err != nil {
log.L().Panic("Failed to unmarshal public key.", zap.Error(err))
}
b.Header.pubkey = pubKey
}
// ConvertFromBlockPb converts BlockPb to Block
func (b *Block) ConvertFromBlockPb(pbBlock *iproto.BlockPb) error {
b.ConvertFromBlockHeaderPb(pbBlock)
b.Actions = []action.SealedEnvelope{}
for _, actPb := range pbBlock.Actions {
act := action.SealedEnvelope{}
if err := act.LoadProto(actPb); err != nil {
return err
}
b.Actions = append(b.Actions, act)
// TODO handle SecretProposal and SecretWitness
}
return b.ConvertFromBlockFooterPb(pbBlock.GetFooter())
}
// Deserialize parses the byte stream into a Block
func (b *Block) Deserialize(buf []byte) error {
pbBlock := iproto.BlockPb{}
if err := proto.Unmarshal(buf, &pbBlock); err != nil {
return err
}
if err := b.ConvertFromBlockPb(&pbBlock); err != nil {
return err
}
b.WorkingSet = nil
// verify merkle root can match after deserialize
txroot := b.CalculateTxRoot()
if !bytes.Equal(b.Header.txRoot[:], txroot[:]) {
return errors.New("Failed to match merkle root after deserialize")
}
return nil
}
// CalculateTxRoot returns the Merkle root of all txs and actions in this block.
func (b *Block) CalculateTxRoot() hash.Hash32B {
return calculateTxRoot(b.Actions)
}
// HashBlock return the hash of this block (actually hash of block header)
func (b *Block) HashBlock() hash.Hash32B {
return blake2b.Sum256(b.Header.ByteStream())
}
// VerifyStateRoot verifies the state root in header
func (b *Block) VerifyStateRoot(root hash.Hash32B) error {
if b.Header.stateRoot != root {
return errors.Errorf(
"state root hash does not match, expected = %x, actual = %x",
b.Header.stateRoot,
root,
)
}
return nil
}
// VerifyDeltaStateDigest verifies the delta state digest in header
func (b *Block) VerifyDeltaStateDigest(digest hash.Hash32B) error {
if b.Header.deltaStateDigest != digest {
return errors.Errorf(
"delta state digest doesn't match, expected = %x, actual = %x",
b.Header.deltaStateDigest,
digest,
)
}
return nil
}
// VerifySignature verifies the signature saved in block header
func (b *Block) VerifySignature() bool {
blkHash := b.HashBlock()
if len(b.Header.blockSig) != action.SignatureLength {
return false
}
return crypto.VerifySignature(keypair.PublicKeyToBytes(b.Header.pubkey), blkHash[:],
b.Header.blockSig[:action.SignatureLength-1])
}
// VerifyReceiptRoot verifies the receipt root in header
func (b *Block) VerifyReceiptRoot(root hash.Hash32B) error {
if b.Header.receiptRoot != root {
return errors.New("receipt root hash does not match")
}
return nil
}
// ProducerAddress returns the address of producer
func (b *Block) ProducerAddress() string {
pkHash := keypair.HashPubKey(b.Header.pubkey)
addr := address.New(pkHash[:])
return addr.Bech32()
}
// RunnableActions abstructs RunnableActions from a Block.
func (b *Block) RunnableActions() RunnableActions {
pkHash := keypair.HashPubKey(b.Header.pubkey)
addr := address.New(pkHash[:])
return RunnableActions{
blockHeight: b.Header.height,
blockTimeStamp: b.Header.timestamp,
blockProducerPubKey: b.Header.pubkey,
blockProducerAddr: addr.Bech32(),
actions: b.Actions,
txHash: b.txRoot,
}
}
// Finalize creates a footer for the block
func (b *Block) Finalize(set *endorsement.Set, ts time.Time) error {
if b.endorsements != nil {
return errors.New("the block has been finalized")
}
if set == nil {
return errors.New("endorsement set is nil")
}
commitEndorsements, err := set.SubSet(endorsement.COMMIT)
if err != nil {
return err
}
b.endorsements = commitEndorsements
b.commitTimestamp = ts.Unix()
return nil
}
// FooterLogger logs the endorsements in block footer
func (b *Block) FooterLogger(l *zap.Logger) *zap.Logger {
if b.endorsements == nil {
h := b.HashBlock()
return l.With(
log.Hex("blockHash", h[:]),
zap.Uint64("blockHeight", b.Height()),
zap.Int("numOfEndorsements", 0),
)
}
return b.endorsements.EndorsementsLogger(l)
}
| 1 | 14,913 | Why need alias? | iotexproject-iotex-core | go |
@@ -148,6 +148,10 @@ func (c testTLFJournalConfig) teamMembershipChecker() kbfsmd.TeamMembershipCheck
return nil
}
+func (c testTLFJournalConfig) tlfIDGetter() tlfIDGetter {
+ return nil
+}
+
func (c testTLFJournalConfig) diskLimitTimeout() time.Duration {
return c.dlTimeout
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"math"
"os"
"reflect"
"sync"
"testing"
"time"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
// testBWDelegate is a delegate we pass to tlfJournal to get info
// about its state transitions.
type testBWDelegate struct {
t *testing.T
// Store a context so that the tlfJournal's background context
// will also obey the test timeout.
testCtx context.Context
stateCh chan bwState
shutdownCh chan struct{}
}
func (d testBWDelegate) GetBackgroundContext() context.Context {
return d.testCtx
}
func (d testBWDelegate) OnNewState(ctx context.Context, bws bwState) {
select {
case d.stateCh <- bws:
case <-ctx.Done():
assert.Fail(d.t, ctx.Err().Error())
}
}
func (d testBWDelegate) OnShutdown(ctx context.Context) {
select {
case d.shutdownCh <- struct{}{}:
case <-ctx.Done():
assert.Fail(d.t, ctx.Err().Error())
}
}
func (d testBWDelegate) requireNextState(
ctx context.Context, expectedState ...bwState) bwState {
select {
case bws := <-d.stateCh:
require.Contains(d.t, expectedState, bws)
return bws
case <-ctx.Done():
assert.Fail(d.t, ctx.Err().Error())
return bwIdle
}
}
// testTLFJournalConfig is the config we pass to the tlfJournal, and
// also contains some helper functions for testing.
type testTLFJournalConfig struct {
codecGetter
logMaker
t *testing.T
tlfID tlf.ID
splitter BlockSplitter
crypto CryptoLocal
bcache BlockCache
bops BlockOps
mdcache MDCache
ver kbfsmd.MetadataVer
reporter Reporter
uid keybase1.UID
verifyingKey kbfscrypto.VerifyingKey
ekg singleEncryptionKeyGetter
nug normalizedUsernameGetter
mdserver MDServer
dlTimeout time.Duration
}
func (c testTLFJournalConfig) BlockSplitter() BlockSplitter {
return c.splitter
}
func (c testTLFJournalConfig) Clock() Clock {
return wallClock{}
}
func (c testTLFJournalConfig) Crypto() Crypto {
return c.crypto
}
func (c testTLFJournalConfig) BlockCache() BlockCache {
return c.bcache
}
func (c testTLFJournalConfig) BlockOps() BlockOps {
return c.bops
}
func (c testTLFJournalConfig) MDCache() MDCache {
return c.mdcache
}
func (c testTLFJournalConfig) MetadataVersion() kbfsmd.MetadataVer {
return c.ver
}
func (c testTLFJournalConfig) Reporter() Reporter {
return c.reporter
}
func (c testTLFJournalConfig) cryptoPure() cryptoPure {
return c.crypto
}
func (c testTLFJournalConfig) encryptionKeyGetter() encryptionKeyGetter {
return c.ekg
}
func (c testTLFJournalConfig) mdDecryptionKeyGetter() mdDecryptionKeyGetter {
return c.ekg
}
func (c testTLFJournalConfig) usernameGetter() normalizedUsernameGetter {
return c.nug
}
func (c testTLFJournalConfig) MDServer() MDServer {
return c.mdserver
}
func (c testTLFJournalConfig) teamMembershipChecker() kbfsmd.TeamMembershipChecker {
// TODO: support team TLF tests.
return nil
}
func (c testTLFJournalConfig) diskLimitTimeout() time.Duration {
return c.dlTimeout
}
func (c testTLFJournalConfig) BGFlushDirOpBatchSize() int {
return 1
}
func (c testTLFJournalConfig) makeBlock(data []byte) (
kbfsblock.ID, kbfsblock.Context, kbfscrypto.BlockCryptKeyServerHalf) {
id, err := kbfsblock.MakePermanentID(data)
require.NoError(c.t, err)
bCtx := kbfsblock.MakeFirstContext(
c.uid.AsUserOrTeam(), keybase1.BlockType_DATA)
serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
require.NoError(c.t, err)
return id, bCtx, serverHalf
}
func (c testTLFJournalConfig) makeMD(
revision kbfsmd.Revision, prevRoot kbfsmd.ID) *RootMetadata {
return makeMDForTest(c.t, c.ver, c.tlfID, revision, c.uid, c.crypto, prevRoot)
}
func (c testTLFJournalConfig) checkMD(rmds *RootMetadataSigned,
extra kbfsmd.ExtraMetadata, expectedRevision kbfsmd.Revision,
expectedPrevRoot kbfsmd.ID, expectedMergeStatus kbfsmd.MergeStatus,
expectedBranchID kbfsmd.BranchID) {
verifyingKey := c.crypto.SigningKeySigner.Key.GetVerifyingKey()
checkBRMD(c.t, c.uid, verifyingKey, c.Codec(),
rmds.MD, extra, expectedRevision, expectedPrevRoot,
expectedMergeStatus, expectedBranchID)
err := rmds.IsValidAndSigned(
context.Background(), c.Codec(), nil, extra)
require.NoError(c.t, err)
err = rmds.IsLastModifiedBy(c.uid, verifyingKey)
require.NoError(c.t, err)
}
func (c testTLFJournalConfig) checkRange(rmdses []rmdsWithExtra,
firstRevision kbfsmd.Revision, firstPrevRoot kbfsmd.ID,
mStatus kbfsmd.MergeStatus, bid kbfsmd.BranchID) {
c.checkMD(rmdses[0].rmds, rmdses[0].extra, firstRevision,
firstPrevRoot, mStatus, bid)
for i := 1; i < len(rmdses); i++ {
prevID, err := kbfsmd.MakeID(c.Codec(), rmdses[i-1].rmds.MD)
require.NoError(c.t, err)
c.checkMD(rmdses[i].rmds, rmdses[i].extra,
firstRevision+kbfsmd.Revision(i), prevID, mStatus, bid)
err = rmdses[i-1].rmds.MD.CheckValidSuccessor(
prevID, rmdses[i].rmds.MD)
require.NoError(c.t, err)
}
}
func setupTLFJournalTest(
t *testing.T, ver kbfsmd.MetadataVer, bwStatus TLFJournalBackgroundWorkStatus) (
tempdir string, config *testTLFJournalConfig, ctx context.Context,
cancel context.CancelFunc, tlfJournal *tlfJournal,
delegate testBWDelegate) {
// Set up config and dependencies.
bsplitter := &BlockSplitterSimple{
64 * 1024, int(64 * 1024 / bpSize), 8 * 1024}
codec := kbfscodec.NewMsgpack()
signingKey := kbfscrypto.MakeFakeSigningKeyOrBust("client sign")
cryptPrivateKey := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("client crypt private")
crypto := NewCryptoLocal(codec, signingKey, cryptPrivateKey)
uid := keybase1.MakeTestUID(1)
verifyingKey := signingKey.GetVerifyingKey()
ekg := singleEncryptionKeyGetter{kbfscrypto.MakeTLFCryptKey([32]byte{0x1})}
cig := singleCurrentSessionGetter{
SessionInfo{
Name: "fake_user",
UID: uid,
VerifyingKey: verifyingKey,
},
}
mdserver, err := NewMDServerMemory(newTestMDServerLocalConfig(t, cig))
require.NoError(t, err)
config = &testTLFJournalConfig{
newTestCodecGetter(), newTestLogMaker(t), t,
tlf.FakeID(1, tlf.Private), bsplitter, crypto,
nil, nil, NewMDCacheStandard(10), ver,
NewReporterSimple(newTestClockNow(), 10), uid, verifyingKey, ekg, nil,
mdserver, defaultDiskLimitMaxDelay + time.Second,
}
ctx, cancel = context.WithTimeout(
context.Background(), individualTestTimeout)
// Clean up the context if the rest of the setup fails.
setupSucceeded := false
defer func() {
if !setupSucceeded {
cancel()
}
}()
delegate = testBWDelegate{
t: t,
testCtx: ctx,
stateCh: make(chan bwState),
shutdownCh: make(chan struct{}),
}
tempdir, err = ioutil.TempDir(os.TempDir(), "tlf_journal")
require.NoError(t, err)
// Clean up the tempdir if anything in the rest of the setup
// fails.
defer func() {
if !setupSucceeded {
err := ioutil.RemoveAll(tempdir)
assert.NoError(t, err)
}
}()
delegateBlockServer := NewBlockServerMemory(config.MakeLogger(""))
diskLimitSemaphore := newSemaphoreDiskLimiter(
math.MaxInt64, math.MaxInt64, math.MaxInt64)
tlfJournal, err = makeTLFJournal(ctx, uid, verifyingKey,
tempdir, config.tlfID, uid.AsUserOrTeam(), config, delegateBlockServer,
bwStatus, delegate, nil, nil, diskLimitSemaphore)
require.NoError(t, err)
switch bwStatus {
case TLFJournalBackgroundWorkEnabled:
// Same as the single op case.
fallthrough
case TLFJournalSingleOpBackgroundWorkEnabled:
// Read the state changes triggered by the initial
// work signal.
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
case TLFJournalBackgroundWorkPaused:
delegate.requireNextState(ctx, bwPaused)
default:
require.FailNow(t, "Unknown bwStatus %s", bwStatus)
}
setupSucceeded = true
return tempdir, config, ctx, cancel, tlfJournal, delegate
}
func teardownTLFJournalTest(
tempdir string, config *testTLFJournalConfig, ctx context.Context,
cancel context.CancelFunc, tlfJournal *tlfJournal,
delegate testBWDelegate) {
// Shutdown first so we don't get the Done() signal (from the
// cancel() call) spuriously.
tlfJournal.shutdown(ctx)
select {
case <-delegate.shutdownCh:
case <-ctx.Done():
assert.Fail(config.t, ctx.Err().Error())
}
cancel()
select {
case bws := <-delegate.stateCh:
assert.Fail(config.t, "Unexpected state %s", bws)
default:
}
config.mdserver.Shutdown()
tlfJournal.delegateBlockServer.Shutdown(ctx)
err := ioutil.RemoveAll(tempdir)
assert.NoError(config.t, err)
}
func putOneMD(ctx context.Context, config *testTLFJournalConfig,
tlfJournal *tlfJournal) {
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
_, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(config.t, err)
}
// The tests below primarily test the background work thread's
// behavior.
func testTLFJournalBasic(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
putOneMD(ctx, config, tlfJournal)
// Wait for it to be processed.
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
}
func testTLFJournalPauseResume(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.pauseBackgroundWork()
delegate.requireNextState(ctx, bwPaused)
putOneMD(ctx, config, tlfJournal)
// Unpause and wait for it to be processed.
tlfJournal.resumeBackgroundWork()
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
}
func testTLFJournalPauseShutdown(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.pauseBackgroundWork()
delegate.requireNextState(ctx, bwPaused)
putOneMD(ctx, config, tlfJournal)
// Should still be able to shut down while paused.
}
type hangingBlockServer struct {
BlockServer
// Closed on put.
onPutCh chan struct{}
}
func (bs hangingBlockServer) Put(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error {
close(bs.onPutCh)
// Hang until the context is cancelled.
<-ctx.Done()
return ctx.Err()
}
func (bs hangingBlockServer) waitForPut(ctx context.Context, t *testing.T) {
select {
case <-bs.onPutCh:
case <-ctx.Done():
require.FailNow(t, ctx.Err().Error())
}
}
func putBlock(ctx context.Context,
t *testing.T, config *testTLFJournalConfig,
tlfJournal *tlfJournal, data []byte) {
id, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
require.NoError(t, err)
}
func testTLFJournalBlockOpBasic(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
numFlushed, rev, converted, err :=
tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1)
require.NoError(t, err)
require.Equal(t, 1, numFlushed)
require.Equal(t, rev, kbfsmd.RevisionUninitialized)
require.False(t, converted)
}
func testTLFJournalBlockOpBusyPause(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
bs := hangingBlockServer{tlfJournal.delegateBlockServer,
make(chan struct{})}
tlfJournal.delegateBlockServer = bs
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
bs.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to pause while busy.
tlfJournal.pauseBackgroundWork()
delegate.requireNextState(ctx, bwPaused)
}
func testTLFJournalBlockOpBusyShutdown(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
bs := hangingBlockServer{tlfJournal.delegateBlockServer,
make(chan struct{})}
tlfJournal.delegateBlockServer = bs
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
bs.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to shut down while busy.
}
func testTLFJournalSecondBlockOpWhileBusy(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
bs := hangingBlockServer{tlfJournal.delegateBlockServer,
make(chan struct{})}
tlfJournal.delegateBlockServer = bs
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
bs.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to put a second block while busy.
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4, 5})
}
func testTLFJournalBlockOpDiskByteLimit(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, math.MaxInt64-6, 0, 0, tlfJournal.uid.AsUserOrTeam())
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
errCh := make(chan error, 1)
go func() {
data2 := []byte{5, 6, 7}
id, bCtx, serverHalf := config.makeBlock(data2)
errCh <- tlfJournal.putBlockData(
ctx, id, bCtx, data2, serverHalf)
}()
numFlushed, rev, converted, err :=
tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1)
require.NoError(t, err)
require.Equal(t, 1, numFlushed)
require.Equal(t, rev, kbfsmd.RevisionUninitialized)
require.False(t, converted)
// Fake an MD flush.
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry(
ctx, kbfsmd.ID{}, &RootMetadataSigned{RootMetadataSigned: kbfsmd.RootMetadataSigned{MD: md.bareMd}})
select {
case err := <-errCh:
require.NoError(t, err)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}
func testTLFJournalBlockOpDiskFileLimit(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, 0, 0, math.MaxInt64-2*filesPerBlockMax+1,
tlfJournal.uid.AsUserOrTeam())
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
errCh := make(chan error, 1)
go func() {
data2 := []byte{5, 6, 7}
id, bCtx, serverHalf := config.makeBlock(data2)
errCh <- tlfJournal.putBlockData(
ctx, id, bCtx, data2, serverHalf)
}()
numFlushed, rev, converted, err :=
tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1)
require.NoError(t, err)
require.Equal(t, 1, numFlushed)
require.Equal(t, rev, kbfsmd.RevisionUninitialized)
require.False(t, converted)
// Fake an MD flush.
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry(
ctx, kbfsmd.ID{}, &RootMetadataSigned{RootMetadataSigned: kbfsmd.RootMetadataSigned{MD: md.bareMd}})
select {
case err := <-errCh:
require.NoError(t, err)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}
func testTLFJournalBlockOpDiskQuotaLimit(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam())
data1 := []byte{1, 2, 3, 4}
putBlock(ctx, t, config, tlfJournal, data1)
usedQuotaBytes, quotaBytes :=
tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam())
require.Equal(t,
int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes)
require.Equal(t, int64(math.MaxInt64), quotaBytes)
data2 := []byte{5, 6, 7}
errCh := make(chan error, 1)
go func() {
id, bCtx, serverHalf := config.makeBlock(data2)
errCh <- tlfJournal.putBlockData(
ctx, id, bCtx, data2, serverHalf)
}()
numFlushed, rev, converted, err :=
tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1)
require.NoError(t, err)
require.Equal(t, 1, numFlushed)
require.Equal(t, rev, kbfsmd.RevisionUninitialized)
require.False(t, converted)
select {
case err := <-errCh:
require.NoError(t, err)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
usedQuotaBytes, quotaBytes =
tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam())
require.Equal(t,
int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes)
require.Equal(t, int64(math.MaxInt64), quotaBytes)
}
func testTLFJournalBlockOpDiskQuotaLimitResolve(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam())
data1 := []byte{1, 2, 3, 4}
id1, bCtx1, serverHalf1 := config.makeBlock(data1)
err := tlfJournal.putBlockData(ctx, id1, bCtx1, data1, serverHalf1)
require.NoError(t, err)
usedQuotaBytes, quotaBytes :=
tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam())
require.Equal(t,
int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes)
require.Equal(t, int64(math.MaxInt64), quotaBytes)
data2 := []byte{5, 6, 7}
errCh := make(chan error, 1)
go func() {
id2, bCtx2, serverHalf2 := config.makeBlock(data2)
errCh <- tlfJournal.putBlockData(
ctx, id2, bCtx2, data2, serverHalf2)
}()
md1 := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
mdID1 := irmd.mdID
err = tlfJournal.convertMDsToBranch(ctx)
require.NoError(t, err)
bid, err := tlfJournal.getBranchID()
require.NoError(t, err)
// Ignore the block instead of flushing it.
md2 := config.makeMD(kbfsmd.RevisionInitial+1, mdID1)
_, retry, err := tlfJournal.doResolveBranch(
ctx, bid, []kbfsblock.ID{id1}, md2,
unflushedPathMDInfo{}, unflushedPathsPerRevMap{}, tlfJournal.key)
require.NoError(t, err)
require.False(t, retry)
select {
case err := <-errCh:
require.NoError(t, err)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
usedQuotaBytes, quotaBytes =
tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam())
require.Equal(t,
int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes)
require.Equal(t, int64(math.MaxInt64), quotaBytes)
}
func testTLFJournalBlockOpDiskLimitDuplicate(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, math.MaxInt64-8, 0, math.MaxInt64-2*filesPerBlockMax,
tlfJournal.uid.AsUserOrTeam())
data := []byte{1, 2, 3, 4}
id, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
require.NoError(t, err)
// This should acquire some bytes and files, but then release
// them.
err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
require.NoError(t, err)
// If the above incorrectly does not release bytes or files,
// this will hang.
err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
require.NoError(t, err)
}
func testTLFJournalBlockOpDiskLimitCancel(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, math.MaxInt64, 0, 0, tlfJournal.uid.AsUserOrTeam())
ctx2, cancel2 := context.WithCancel(ctx)
cancel2()
data := []byte{1, 2, 3, 4}
id, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx2, id, bCtx, data, serverHalf)
require.Equal(t, context.Canceled, errors.Cause(err))
}
func testTLFJournalBlockOpDiskLimitTimeout(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, math.MaxInt64, 0, math.MaxInt64-1, tlfJournal.uid.AsUserOrTeam())
config.dlTimeout = 3 * time.Microsecond
data := []byte{1, 2, 3, 4}
id, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
timeoutErr, ok := errors.Cause(err).(*ErrDiskLimitTimeout)
require.True(t, ok)
require.Error(t, timeoutErr.err)
timeoutErr.err = nil
require.Equal(t, ErrDiskLimitTimeout{
3 * time.Microsecond, int64(len(data)),
filesPerBlockMax, 0, 1, 0, 1, math.MaxInt64, math.MaxInt64, nil, false,
}, *timeoutErr)
}
func testTLFJournalBlockOpDiskLimitPutFailure(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.diskLimiter.onJournalEnable(
ctx, math.MaxInt64-6, 0, math.MaxInt64-filesPerBlockMax,
tlfJournal.uid.AsUserOrTeam())
data := []byte{1, 2, 3, 4}
id, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, id, bCtx, []byte{1}, serverHalf)
require.IsType(t, kbfshash.HashMismatchError{}, errors.Cause(err))
// If the above incorrectly does not release bytes or files from
// diskLimiter on error, this will hang.
err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf)
require.NoError(t, err)
}
type hangingMDServer struct {
MDServer
// Closed on put.
onPutCh chan struct{}
}
func (md hangingMDServer) Put(ctx context.Context, rmds *RootMetadataSigned,
_ kbfsmd.ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error {
close(md.onPutCh)
// Hang until the context is cancelled.
<-ctx.Done()
return ctx.Err()
}
func (md hangingMDServer) waitForPut(ctx context.Context, t *testing.T) {
select {
case <-md.onPutCh:
case <-ctx.Done():
require.FailNow(t, ctx.Err().Error())
}
}
func testTLFJournalMDServerBusyPause(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
mdserver := hangingMDServer{config.MDServer(), make(chan struct{})}
config.mdserver = mdserver
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
_, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
mdserver.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to pause while busy.
tlfJournal.pauseBackgroundWork()
delegate.requireNextState(ctx, bwPaused)
}
func testTLFJournalMDServerBusyShutdown(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
mdserver := hangingMDServer{config.MDServer(), make(chan struct{})}
config.mdserver = mdserver
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
_, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
mdserver.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to shutdown while busy.
}
func testTLFJournalBlockOpWhileBusy(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
mdserver := hangingMDServer{config.MDServer(), make(chan struct{})}
config.mdserver = mdserver
md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{})
_, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
mdserver.waitForPut(ctx, t)
delegate.requireNextState(ctx, bwBusy)
// Should still be able to put a block while busy.
putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4})
}
type rmdsWithExtra struct {
rmds *RootMetadataSigned
extra kbfsmd.ExtraMetadata
}
type shimMDServer struct {
MDServer
rmdses []rmdsWithExtra
nextGetRange []*RootMetadataSigned
nextErr error
getForTLFCalled bool
}
func (s *shimMDServer) GetRange(
ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus,
start, stop kbfsmd.Revision, _ *keybase1.LockID) ([]*RootMetadataSigned, error) {
rmdses := s.nextGetRange
s.nextGetRange = nil
return rmdses, nil
}
func (s *shimMDServer) Put(ctx context.Context, rmds *RootMetadataSigned,
extra kbfsmd.ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error {
if s.nextErr != nil {
err := s.nextErr
s.nextErr = nil
return err
}
s.rmdses = append(s.rmdses, rmdsWithExtra{rmds, extra})
// Pretend all cancels happen after the actual put.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
return nil
}
func (s *shimMDServer) GetForTLF(
ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, _ *keybase1.LockID) (
*RootMetadataSigned, error) {
s.getForTLFCalled = true
if len(s.rmdses) == 0 {
return nil, nil
}
return s.rmdses[len(s.rmdses)-1].rmds, nil
}
func (s *shimMDServer) IsConnected() bool {
return true
}
func (s *shimMDServer) Shutdown() {
}
func requireJournalEntryCounts(t *testing.T, j *tlfJournal,
expectedBlockEntryCount, expectedMDEntryCount uint64) {
blockEntryCount, mdEntryCount, err := j.getJournalEntryCounts()
require.NoError(t, err)
require.Equal(t, expectedBlockEntryCount, blockEntryCount)
require.Equal(t, expectedMDEntryCount, mdEntryCount)
}
// The tests below test tlfJournal's MD flushing behavior.
func testTLFJournalFlushMDBasic(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
firstRevision := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
mdCount := 10
prevRoot := firstPrevRoot
for i := 0; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
// Flush all entries.
var mdserver shimMDServer
config.mdserver = &mdserver
_, mdEnd, err := tlfJournal.getJournalEnds(ctx)
require.NoError(t, err)
for i := 0; i < mdCount; i++ {
flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd, defaultFlushContext())
require.NoError(t, err)
require.True(t, flushed)
}
flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd, defaultFlushContext())
require.NoError(t, err)
require.False(t, flushed)
requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), 0)
testMDJournalGCd(t, tlfJournal.mdJournal)
// Check RMDSes on the server.
rmdses := mdserver.rmdses
require.Equal(t, mdCount, len(rmdses))
config.checkRange(
rmdses, firstRevision, firstPrevRoot, kbfsmd.Merged, kbfsmd.NullBranchID)
}
func testTLFJournalFlushMDConflict(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
firstRevision := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
mdCount := 10
prevRoot := firstPrevRoot
for i := 0; i < mdCount/2; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
var mdserver shimMDServer
mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{}
config.mdserver = &mdserver
_, mdEnd, err := tlfJournal.getJournalEnds(ctx)
require.NoError(t, err)
// Simulate a flush with a conflict error halfway through.
{
flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd, defaultFlushContext())
require.NoError(t, err)
require.False(t, flushed)
revision := firstRevision + kbfsmd.Revision(mdCount/2)
md := config.makeMD(revision, prevRoot)
_, err = tlfJournal.putMD(ctx, md, tlfJournal.key)
require.IsType(t, MDJournalConflictError{}, err)
md.SetUnmerged()
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
for i := mdCount/2 + 1; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
md.SetUnmerged()
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
// The journal won't flush anything while on a branch.
requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), uint64(mdCount))
}
// orderedBlockServer and orderedMDServer appends onto their shared
// puts slice when their Put() methods are called.
type orderedBlockServer struct {
BlockServer
lock *sync.Mutex
puts *[]interface{}
onceOnPut func()
}
func (s *orderedBlockServer) Put(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error {
s.lock.Lock()
defer s.lock.Unlock()
*s.puts = append(*s.puts, id)
if s.onceOnPut != nil {
s.onceOnPut()
s.onceOnPut = nil
}
return nil
}
func (s *orderedBlockServer) Shutdown(context.Context) {}
type orderedMDServer struct {
MDServer
lock *sync.Mutex
puts *[]interface{}
onceOnPut func() error
}
func (s *orderedMDServer) Put(
ctx context.Context, rmds *RootMetadataSigned, _ kbfsmd.ExtraMetadata,
_ *keybase1.LockContext, _ keybase1.MDPriority) error {
s.lock.Lock()
defer s.lock.Unlock()
*s.puts = append(*s.puts, rmds.MD.RevisionNumber())
if s.onceOnPut != nil {
err := s.onceOnPut()
s.onceOnPut = nil
if err != nil {
return err
}
}
return nil
}
func (s *orderedMDServer) Shutdown() {}
func testTLFJournalGCd(t *testing.T, tlfJournal *tlfJournal) {
// The root dir shouldn't exist.
_, err := ioutil.Stat(tlfJournal.dir)
require.True(t, ioutil.IsNotExist(err))
func() {
tlfJournal.journalLock.Lock()
defer tlfJournal.journalLock.Unlock()
unflushedPaths := tlfJournal.unflushedPaths.getUnflushedPaths()
require.Nil(t, unflushedPaths)
require.Equal(t, uint64(0), tlfJournal.unsquashedBytes)
require.Equal(t, 0, len(tlfJournal.flushingBlocks))
}()
requireJournalEntryCounts(t, tlfJournal, 0, 0)
// Check child journals.
testBlockJournalGCd(t, tlfJournal.blockJournal)
testMDJournalGCd(t, tlfJournal.mdJournal)
}
// testTLFJournalFlushOrdering tests that we respect the relative
// orderings of blocks and MD ops when flushing, i.e. if a block op
// was added to the block journal before an MD op was added to the MD
// journal, then that block op will be flushed before that MD op.
func testTLFJournalFlushOrdering(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
bid1, bCtx1, serverHalf1 := config.makeBlock([]byte{1})
bid2, bCtx2, serverHalf2 := config.makeBlock([]byte{2})
bid3, bCtx3, serverHalf3 := config.makeBlock([]byte{3})
md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1))
var lock sync.Mutex
var puts []interface{}
bserver := orderedBlockServer{
lock: &lock,
puts: &puts,
}
tlfJournal.delegateBlockServer.Shutdown(ctx)
tlfJournal.delegateBlockServer = &bserver
mdserver := orderedMDServer{
lock: &lock,
puts: &puts,
}
config.mdserver = &mdserver
// bid1 is-put-before kbfsmd.Revision(10).
err := tlfJournal.putBlockData(
ctx, bid1, bCtx1, []byte{1}, serverHalf1)
require.NoError(t, err)
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
prevRoot := irmd.mdID
bserver.onceOnPut = func() {
// bid2 is-put-before kbfsmd.Revision(11).
err := tlfJournal.putBlockData(
ctx, bid2, bCtx2, []byte{2}, serverHalf2)
require.NoError(t, err)
md2 := config.makeMD(kbfsmd.Revision(11), prevRoot)
irmd, err := tlfJournal.putMD(ctx, md2, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
mdserver.onceOnPut = func() error {
// bid3 is-put-before kbfsmd.Revision(12).
err := tlfJournal.putBlockData(
ctx, bid3, bCtx3, []byte{3}, serverHalf3)
require.NoError(t, err)
md3 := config.makeMD(kbfsmd.Revision(12), prevRoot)
irmd, err := tlfJournal.putMD(ctx, md3, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
return nil
}
err = tlfJournal.flush(ctx)
require.NoError(t, err)
testTLFJournalGCd(t, tlfJournal)
// These two orderings depend on the exact flushing process,
// but there are other possible orderings which respect the
// above is-put-before constraints and also respect the
// kbfsmd.Revision ordering.
expectedPuts1 := []interface{}{
bid1, kbfsmd.Revision(10), bid2, bid3,
kbfsmd.Revision(11), kbfsmd.Revision(12),
}
// This is possible since block puts are done in parallel.
expectedPuts2 := []interface{}{
bid1, kbfsmd.Revision(10), bid3, bid2,
kbfsmd.Revision(11), kbfsmd.Revision(12),
}
require.True(t, reflect.DeepEqual(puts, expectedPuts1) ||
reflect.DeepEqual(puts, expectedPuts2),
"Expected %v or %v, got %v", expectedPuts1,
expectedPuts2, puts)
}
// testTLFJournalFlushOrderingAfterSquashAndCR tests that after a
// branch is squashed multiple times, and then hits a conflict, the
// blocks are flushed completely before the conflict-resolving MD.
func testTLFJournalFlushOrderingAfterSquashAndCR(
t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.forcedSquashByBytes = 20
firstRev := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
md1 := config.makeMD(firstRev, firstPrevRoot)
var lock sync.Mutex
var puts []interface{}
bserver := orderedBlockServer{
lock: &lock,
puts: &puts,
}
tlfJournal.delegateBlockServer.Shutdown(ctx)
tlfJournal.delegateBlockServer = &bserver
var mdserverShim shimMDServer
mdserver := orderedMDServer{
MDServer: &mdserverShim,
lock: &lock,
puts: &puts,
}
config.mdserver = &mdserver
// Put almost a full batch worth of block before revs 10 and 11.
blockEnd := uint64(maxJournalBlockFlushBatchSize - 1)
for i := uint64(0); i < blockEnd; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
prevRoot := irmd.mdID
md2 := config.makeMD(firstRev+1, prevRoot)
require.NoError(t, err)
irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
// Squash revs 10 and 11. No blocks should actually be flushed
// yet.
err = tlfJournal.flush(ctx)
require.NoError(t, err)
require.Equal(
t, kbfsmd.PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID())
requireJournalEntryCounts(t, tlfJournal, blockEnd+2, 2)
squashMD := config.makeMD(firstRev, firstPrevRoot)
irmd, err = tlfJournal.resolveBranch(ctx,
kbfsmd.PendingLocalSquashBranchID, []kbfsblock.ID{}, squashMD, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
requireJournalEntryCounts(t, tlfJournal, blockEnd+3, 1)
// Another revision 11, with a squashable number of blocks to
// complete the initial batch.
for i := blockEnd; i < blockEnd+20; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
blockEnd += 20
md2 = config.makeMD(firstRev+1, prevRoot)
require.NoError(t, err)
irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
// Let it squash (avoiding a branch this time since there's only one MD).
err = tlfJournal.flush(ctx)
require.NoError(t, err)
require.Equal(t, kbfsmd.NullBranchID, tlfJournal.mdJournal.getBranchID())
requireJournalEntryCounts(t, tlfJournal, blockEnd+4, 2)
// Simulate an MD conflict and try to flush again. This will
// flush a full batch of blocks before hitting the conflict, as
// well as the marker for rev 10.
mdserver.onceOnPut = func() error {
return kbfsmd.ServerErrorConflictRevision{}
}
mergedBare := config.makeMD(md2.Revision(), firstPrevRoot).bareMd
mergedBare.SetSerializedPrivateMetadata([]byte{1})
rmds, err := SignBareRootMetadata(
ctx, config.Codec(), config.Crypto(), config.Crypto(),
mergedBare, time.Now())
require.NoError(t, err)
mdserverShim.nextGetRange = []*RootMetadataSigned{rmds}
err = tlfJournal.flush(ctx)
require.NoError(t, err)
branchID := tlfJournal.mdJournal.getBranchID()
require.NotEqual(t, kbfsmd.PendingLocalSquashBranchID, branchID)
require.NotEqual(t, kbfsmd.NullBranchID, branchID)
// Blocks: All the unflushed blocks, plus two unflushed rev markers.
requireJournalEntryCounts(
t, tlfJournal, blockEnd-maxJournalBlockFlushBatchSize+2, 2)
// More blocks that are part of the resolution.
blockEnd2 := blockEnd + maxJournalBlockFlushBatchSize + 2
for i := blockEnd; i < blockEnd2; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
// Use revision 11 (as if two revisions had been merged by another
// device).
resolveMD := config.makeMD(md2.Revision(), firstPrevRoot)
_, err = tlfJournal.resolveBranch(
ctx, branchID, []kbfsblock.ID{}, resolveMD, tlfJournal.key)
require.NoError(t, err)
// Blocks: the ones from the last check, plus the new blocks, plus
// the resolve rev marker.
requireJournalEntryCounts(
t, tlfJournal, blockEnd2-maxJournalBlockFlushBatchSize+3, 1)
// Flush everything remaining. All blocks should be flushed after
// `resolveMD`.
err = tlfJournal.flush(ctx)
require.NoError(t, err)
testTLFJournalGCd(t, tlfJournal)
require.Equal(t, resolveMD.Revision(), puts[len(puts)-1])
}
// testTLFJournalFlushInterleaving tests that we interleave block and
// MD ops while respecting the relative orderings of blocks and MD ops
// when flushing.
func testTLFJournalFlushInterleaving(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
var lock sync.Mutex
var puts []interface{}
bserver := orderedBlockServer{
lock: &lock,
puts: &puts,
}
tlfJournal.delegateBlockServer.Shutdown(ctx)
tlfJournal.delegateBlockServer = &bserver
var mdserverShim shimMDServer
mdserver := orderedMDServer{
MDServer: &mdserverShim,
lock: &lock,
puts: &puts,
}
config.mdserver = &mdserver
// Revision 1
var bids []kbfsblock.ID
rev1BlockEnd := maxJournalBlockFlushBatchSize * 2
for i := 0; i < rev1BlockEnd; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
bids = append(bids, bid)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1))
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
prevRoot := irmd.mdID
// Revision 2
rev2BlockEnd := rev1BlockEnd + maxJournalBlockFlushBatchSize*2
for i := rev1BlockEnd; i < rev2BlockEnd; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
bids = append(bids, bid)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
md2 := config.makeMD(kbfsmd.Revision(11), prevRoot)
irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
err = tlfJournal.flush(ctx)
require.NoError(t, err)
testTLFJournalGCd(t, tlfJournal)
// Make sure the flusher checks in between block flushes for
// conflicting MDs on the server.
require.True(t, mdserverShim.getForTLFCalled)
// Make sure that: before revision 1, all the rev1 blocks were
// put; rev2 comes last; some blocks are put between the two.
bidsSeen := make(map[kbfsblock.ID]bool)
md1Slot := 0
md2Slot := 0
for i, put := range puts {
if bid, ok := put.(kbfsblock.ID); ok {
t.Logf("Saw bid %s at %d", bid, i)
bidsSeen[bid] = true
continue
}
mdID, ok := put.(kbfsmd.Revision)
require.True(t, ok)
if mdID == md1.Revision() {
md1Slot = i
for j := 0; j < rev1BlockEnd; j++ {
t.Logf("Checking bid %s at %d", bids[j], i)
require.True(t, bidsSeen[bids[j]])
}
} else if mdID == md2.Revision() {
md2Slot = i
require.NotZero(t, md1Slot)
require.True(t, md1Slot+1 < i)
require.Equal(t, i, len(puts)-1)
}
}
require.NotZero(t, md1Slot)
require.NotZero(t, md2Slot)
}
type testBranchChangeListener struct {
c chan<- struct{}
}
func (tbcl testBranchChangeListener) onTLFBranchChange(_ tlf.ID, _ kbfsmd.BranchID) {
tbcl.c <- struct{}{}
}
func testTLFJournalPauseBlocksAndConvertBranch(t *testing.T,
ctx context.Context, tlfJournal *tlfJournal, config *testTLFJournalConfig) (
firstRev kbfsmd.Revision, firstRoot kbfsmd.ID,
retUnpauseBlockPutCh chan<- struct{}, retErrCh <-chan error,
blocksLeftAfterFlush uint64, mdsLeftAfterFlush uint64) {
branchCh := make(chan struct{}, 1)
tlfJournal.onBranchChange = testBranchChangeListener{branchCh}
var lock sync.Mutex
var puts []interface{}
unpauseBlockPutCh := make(chan struct{})
bserver := orderedBlockServer{
lock: &lock,
puts: &puts,
onceOnPut: func() { <-unpauseBlockPutCh },
}
tlfJournal.delegateBlockServer.Shutdown(ctx)
tlfJournal.delegateBlockServer = &bserver
// Revision 1
var bids []kbfsblock.ID
rev1BlockEnd := maxJournalBlockFlushBatchSize * 2
for i := 0; i < rev1BlockEnd; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
bids = append(bids, bid)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
firstRev = kbfsmd.Revision(10)
firstRoot = kbfsmd.FakeID(1)
md1 := config.makeMD(firstRev, firstRoot)
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
prevRoot := irmd.mdID
rev := firstRev
// Now start the blocks flushing. One of the block puts will be
// stuck. During that time, put a lot more MD revisions, enough
// to trigger branch conversion. However, no pause should be
// called.
errCh := make(chan error, 1)
go func() {
errCh <- tlfJournal.flush(ctx)
}()
markers := uint64(1)
for i := 0; i < ForcedBranchSquashRevThreshold+1; i++ {
rev++
md := config.makeMD(rev, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
if isRevisionConflict(err) {
// Branch conversion is done, we can stop now.
break
}
require.NoError(t, err)
prevRoot = irmd.mdID
markers++
}
// Wait for the local squash branch to appear.
select {
case <-branchCh:
case <-ctx.Done():
t.Fatalf("Timeout while waiting for branch change")
}
return firstRev, firstRoot, unpauseBlockPutCh, errCh,
maxJournalBlockFlushBatchSize + markers, markers
}
// testTLFJournalConvertWhileFlushing tests that we can do branch
// conversion while blocks are still flushing.
func testTLFJournalConvertWhileFlushing(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
_, _, unpauseBlockPutCh, errCh, blocksLeftAfterFlush, mdsLeftAfterFlush :=
testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config)
// Now finish the block put, and let the flush finish. We should
// be on a local squash branch now.
unpauseBlockPutCh <- struct{}{}
err := <-errCh
require.NoError(t, err)
// Should be a full batch worth of blocks left, plus all the
// revision markers above. No squash has actually happened yet,
// so all the revisions should be there now, just on a branch.
requireJournalEntryCounts(
t, tlfJournal, blocksLeftAfterFlush, mdsLeftAfterFlush)
require.Equal(
t, kbfsmd.PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID())
}
// testTLFJournalSquashWhileFlushing tests that we can do journal
// coalescing while blocks are still flushing.
func testTLFJournalSquashWhileFlushing(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
firstRev, firstPrevRoot, unpauseBlockPutCh, errCh,
blocksLeftAfterFlush, _ :=
testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config)
// While it's paused, resolve the branch.
resolveMD := config.makeMD(firstRev, firstPrevRoot)
_, err := tlfJournal.resolveBranch(ctx,
tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{}, resolveMD,
tlfJournal.key)
require.NoError(t, err)
requireJournalEntryCounts(
t, tlfJournal, blocksLeftAfterFlush+maxJournalBlockFlushBatchSize+1, 1)
// Now finish the block put, and let the flush finish. We
// shouldn't be on a branch anymore.
unpauseBlockPutCh <- struct{}{}
err = <-errCh
require.NoError(t, err)
// Since flush() never saw the branch in conflict, it will finish
// flushing everything.
testTLFJournalGCd(t, tlfJournal)
require.Equal(t, kbfsmd.NullBranchID, tlfJournal.mdJournal.getBranchID())
}
type testImmediateBackOff struct {
numBackOffs int
resetCh chan<- struct{}
}
func (t *testImmediateBackOff) NextBackOff() time.Duration {
t.numBackOffs++
return 1 * time.Nanosecond
}
func (t *testImmediateBackOff) Reset() {
close(t.resetCh)
}
func testTLFJournalFlushRetry(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
// Stop the current background loop; replace with one that retries
// immediately.
tlfJournal.needShutdownCh <- struct{}{}
<-tlfJournal.backgroundShutdownCh
resetCh := make(chan struct{})
b := &testImmediateBackOff{resetCh: resetCh}
tlfJournal.backgroundShutdownCh = make(chan struct{})
go tlfJournal.doBackgroundWorkLoop(TLFJournalBackgroundWorkPaused, b)
select {
case <-delegate.shutdownCh:
case <-ctx.Done():
assert.Fail(config.t, ctx.Err().Error())
}
firstRevision := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
mdCount := 10
prevRoot := firstPrevRoot
for i := 0; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
var mdserver shimMDServer
mdserver.nextErr = errors.New("Error to force a retry")
config.mdserver = &mdserver
delegate.requireNextState(ctx, bwPaused)
tlfJournal.resumeBackgroundWork()
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
<-resetCh
require.Equal(t, b.numBackOffs, 1)
testTLFJournalGCd(t, tlfJournal)
}
func testTLFJournalResolveBranch(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
var bids []kbfsblock.ID
for i := 0; i < 3; i++ {
data := []byte{byte(i)}
bid, bCtx, serverHalf := config.makeBlock(data)
bids = append(bids, bid)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
}
firstRevision := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
mdCount := 3
prevRoot := firstPrevRoot
for i := 0; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
var mdserver shimMDServer
mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{}
config.mdserver = &mdserver
_, mdEnd, err := tlfJournal.getJournalEnds(ctx)
require.NoError(t, err)
// This will convert to a branch.
flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd, defaultFlushContext())
require.NoError(t, err)
require.False(t, flushed)
// The background worker was already paused, so we won't get a
// paused signal here. But resume the background work now so that
// later when the conflict resolves, it will be able to send a
// resume signal.
tlfJournal.resumeBackgroundWork()
// Resolve the branch.
resolveMD := config.makeMD(firstRevision, firstPrevRoot)
_, err = tlfJournal.resolveBranch(ctx,
tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{bids[1]}, resolveMD,
tlfJournal.key)
require.NoError(t, err)
blockEnd, newMDEnd, err := tlfJournal.getJournalEnds(ctx)
require.NoError(t, err)
require.Equal(t, firstRevision+1, newMDEnd)
blocks, maxMD, err := tlfJournal.getNextBlockEntriesToFlush(ctx, blockEnd)
require.NoError(t, err)
require.Equal(t, firstRevision, maxMD)
// 3 blocks, 3 old MD markers, 1 new MD marker
require.Equal(t, 7, blocks.length())
require.Len(t, blocks.puts.blockStates, 2)
require.Len(t, blocks.adds.blockStates, 0)
// 1 ignored block, 3 ignored MD markers, 1 real MD marker
require.Len(t, blocks.other, 5)
require.Equal(t, bids[0], blocks.puts.blockStates[0].blockPtr.ID)
require.Equal(t, bids[2], blocks.puts.blockStates[1].blockPtr.ID)
// resolveBranch resumes background work.
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
}
func testTLFJournalSquashByBytes(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.forcedSquashByBytes = 10
data := make([]byte, tlfJournal.forcedSquashByBytes+1)
bid, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
firstRevision := kbfsmd.Revision(10)
firstPrevRoot := kbfsmd.FakeID(1)
mdCount := 3
prevRoot := firstPrevRoot
for i := 0; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
}
// This should convert it to a branch, based on the number of
// outstanding bytes.
err = tlfJournal.flush(ctx)
require.NoError(t, err)
require.Equal(
t, kbfsmd.PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID())
}
// Test that the first revision of a TLF doesn't get squashed.
func testTLFJournalFirstRevNoSquash(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
tlfJournal.forcedSquashByBytes = 10
data := make([]byte, tlfJournal.forcedSquashByBytes+1)
bid, bCtx, serverHalf := config.makeBlock(data)
err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf)
require.NoError(t, err)
firstRevision := kbfsmd.RevisionInitial
mdCount := 4
var firstMdID, prevRoot kbfsmd.ID
for i := 0; i < mdCount; i++ {
revision := firstRevision + kbfsmd.Revision(i)
md := config.makeMD(revision, prevRoot)
irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key)
require.NoError(t, err)
prevRoot = irmd.mdID
if i == 0 {
firstMdID = irmd.mdID
}
}
// This should convert it to a branch, based on the number of
// outstanding bytes.
err = tlfJournal.flush(ctx)
require.NoError(t, err)
require.Equal(
t, kbfsmd.PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID())
requireJournalEntryCounts(t, tlfJournal, 5, 4)
unsquashedRange, err := tlfJournal.getMDRange(
ctx, kbfsmd.NullBranchID, firstRevision, firstRevision+3)
require.NoError(t, err)
require.Len(t, unsquashedRange, 1)
require.Equal(t, firstRevision, unsquashedRange[0].RevisionNumber())
require.Equal(t, firstMdID, unsquashedRange[0].mdID)
squashRange, err := tlfJournal.getMDRange(
ctx, kbfsmd.PendingLocalSquashBranchID, firstRevision, firstRevision+3)
require.NoError(t, err)
require.Len(t, squashRange, 3)
require.Equal(t, firstRevision+1, squashRange[0].RevisionNumber())
}
// testTLFJournalSingleOp tests that when the journal is in single op
// mode, it doesn't flush any MDs until `finishSingleOp()` is called,
// and then it only flushes one squashed MD.
func testTLFJournalSingleOp(t *testing.T, ver kbfsmd.MetadataVer) {
tempdir, config, ctx, cancel, tlfJournal, delegate :=
setupTLFJournalTest(t, ver, TLFJournalSingleOpBackgroundWorkEnabled)
defer teardownTLFJournalTest(
tempdir, config, ctx, cancel, tlfJournal, delegate)
var mdserver shimMDServer
config.mdserver = &mdserver
tlfJournal.pauseBackgroundWork()
delegate.requireNextState(ctx, bwPaused)
putBlock(ctx, t, config, tlfJournal, []byte{1, 2})
putBlock(ctx, t, config, tlfJournal, []byte{3, 4})
putBlock(ctx, t, config, tlfJournal, []byte{5, 6})
md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1))
irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key)
require.NoError(t, err)
prevRoot := irmd.mdID
putBlock(ctx, t, config, tlfJournal, []byte{7, 8})
putBlock(ctx, t, config, tlfJournal, []byte{9, 10})
md2 := config.makeMD(kbfsmd.Revision(11), prevRoot)
_, err = tlfJournal.putMD(ctx, md2, tlfJournal.key)
require.NoError(t, err)
tlfJournal.resumeBackgroundWork()
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
requireJournalEntryCounts(t, tlfJournal, 0, 2)
// The `finishSingleOp` call below blocks, so we have to do it in
// a background goroutine to avoid deadlock.
errCh := make(chan error, 1)
go func() {
errCh <- tlfJournal.finishSingleOp(ctx, nil, keybase1.MDPriorityNormal)
}()
// Background loop awakens after the finish is signaled. Should
// now be on a conflict branch. The pause signal sent by the
// branch-converter races with the background work finishing
// (KBFS-2440), and so the second state could be either idle or
// paused, depending on what gets processed first.
delegate.requireNextState(ctx, bwBusy)
nextState := delegate.requireNextState(ctx, bwPaused, bwIdle)
if nextState == bwIdle {
delegate.requireNextState(ctx, bwPaused)
}
require.Equal(
t, kbfsmd.PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID())
resolveMD := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1))
_, err = tlfJournal.resolveBranch(ctx,
tlfJournal.mdJournal.getBranchID(), nil, resolveMD, tlfJournal.key)
require.NoError(t, err)
// Now the flushing should complete.
delegate.requireNextState(ctx, bwIdle)
delegate.requireNextState(ctx, bwBusy)
delegate.requireNextState(ctx, bwIdle)
select {
case err := <-errCh:
require.NoError(t, err)
case <-ctx.Done():
t.Fatal(ctx.Err().Error())
}
requireJournalEntryCounts(t, tlfJournal, 0, 0)
require.Len(t, mdserver.rmdses, 1)
}
func TestTLFJournal(t *testing.T) {
tests := []func(*testing.T, kbfsmd.MetadataVer){
testTLFJournalBasic,
testTLFJournalPauseResume,
testTLFJournalPauseShutdown,
testTLFJournalBlockOpBasic,
testTLFJournalBlockOpBusyPause,
testTLFJournalBlockOpBusyShutdown,
testTLFJournalSecondBlockOpWhileBusy,
testTLFJournalMDServerBusyPause,
testTLFJournalMDServerBusyShutdown,
testTLFJournalBlockOpWhileBusy,
testTLFJournalBlockOpDiskByteLimit,
testTLFJournalBlockOpDiskFileLimit,
testTLFJournalBlockOpDiskQuotaLimit,
testTLFJournalBlockOpDiskQuotaLimitResolve,
testTLFJournalBlockOpDiskLimitDuplicate,
testTLFJournalBlockOpDiskLimitCancel,
testTLFJournalBlockOpDiskLimitTimeout,
testTLFJournalBlockOpDiskLimitPutFailure,
testTLFJournalFlushMDBasic,
testTLFJournalFlushMDConflict,
testTLFJournalFlushOrdering,
testTLFJournalFlushOrderingAfterSquashAndCR,
testTLFJournalFlushInterleaving,
testTLFJournalConvertWhileFlushing,
testTLFJournalSquashWhileFlushing,
testTLFJournalFlushRetry,
testTLFJournalResolveBranch,
testTLFJournalSquashByBytes,
testTLFJournalFirstRevNoSquash,
testTLFJournalSingleOp,
}
runTestsOverMetadataVers(t, "testTLFJournal", tests)
}
| 1 | 18,436 | Shouldn't this be a `libfs.nullIDGetter` (exported, of course)? | keybase-kbfs | go |
@@ -156,6 +156,9 @@ const (
tealsignTooManyArg = "--set-lsig-arg-idx too large, maximum of %d arguments"
tealsignInfoWroteSig = "Wrote signature for %s to LSig.Args[%d]"
+ tealLogicSigSize = "%s: logicsig program size too large: %d > %d"
+ tealAppSize = "%s: app program size too large: %d > %d"
+
// Wallet
infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: "
infoChoosePasswordPrompt = "Please choose a password for wallet '%s': " | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
const (
// General
errorNoDataDirectory = "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment. Exiting."
errorOneDataDirSupported = "Only one data directory can be specified for this command."
errorRequestFail = "Error processing command: %s"
errorGenesisIDFail = "Error determining kmd folder (%s). Ensure the node is running in %s."
errorDirectoryNotExist = "Specified directory '%s' does not exist."
errorParseAddr = "Failed to parse addr: %v"
errorNonPrintableCharacters = "One or more non-printable characters were ommited from the following error message:"
infoNonPrintableCharacters = "One or more non-printable characters were ommited from the subsequent line:"
// Account
infoNoAccounts = "Did not find any account. Please import or create a new one."
infoRenamedAccount = "Renamed account '%s' to '%s'"
infoImportedKey = "Imported %s"
infoExportedKey = "Exported key for account %s: \"%s\""
infoImportedNKeys = "Imported %d key%s"
infoCreatedNewAccount = "Created new account with address %s"
errorNameAlreadyTaken = "The account name '%s' is already taken, please choose another."
errorNameDoesntExist = "An account named '%s' does not exist."
infoSetAccountToDefault = "Set account '%s' to be the default account"
errorSigningTX = "Couldn't sign tx with kmd: %s"
errorOnlineTX = "Couldn't sign tx: %s (for multisig accounts, write tx to file and sign manually)"
errorConstructingTX = "Couldn't construct tx: %s"
errorBroadcastingTX = "Couldn't broadcast tx with algod: %s"
warnMultisigDuplicatesDetected = "Warning: one or more duplicate addresses detected in multisig account creation. This will effectively give the duplicated address(es) extra signature weight. Continuing multisig account creation."
errLastRoundInvalid = "roundLastValid needs to be well after the current round (%d)"
errExistingPartKey = "Account already has a participation key valid at least until roundLastValid (%d) - current is %d"
errorSeedConversion = "Got private key for account %s, but was unable to convert to seed: %s"
errorMnemonicConversion = "Got seed for account %s, but was unable to convert to mnemonic: %s"
// KMD
infoKMDStopped = "Stopped kmd"
infoKMDAlreadyStarted = "kmd is already running"
infoKMDAlreadyStopped = "kmd doesn't appear to be running"
infoKMDStarted = "Successfully started kmd"
errorKMDFailedToStart = "Failed to start kmd: %s"
errorKMDFailedToStop = "Failed to stop kmd: %s"
// Node
infoNodeStart = "Algorand node successfully started!"
infoNodeAlreadyStarted = "Algorand node was already started!"
infoTryingToStopNode = "Trying to stop the node..."
infoNodeShuttingDown = "Algorand node is shutting down..."
infoNodeSuccessfullyStopped = "The node was successfully stopped."
infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
catchupStoppedOnUnsupported = "Last supported block (%d) is committed. The next block consensus protocol is not supported. Catchup service is stopped."
infoNodeCatchpointCatchupStatus = "Last committed block: %d\nSync Time: %s\nCatchpoint: %s"
infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d"
infoNodeCatchpointCatchupBlocks = "Catchpoint total blocks: %d\nCatchpoint downloaded blocks: %d"
nodeLastCatchpoint = "Last Catchpoint: %s"
errorNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number"
errorNodeNotDetected = "Algorand node does not appear to be running: %s"
errorNodeStatus = "Cannot contact Algorand node: %s"
errorNodeFailedToStart = "Algorand node failed to start: %s"
errorNodeRunning = "Node must be stopped before writing APIToken"
errorNodeFailGenToken = "Cannot generate API token: %s"
errorNodeCreation = "Error during node creation: %v"
errorNodeManagedBySystemd = "This node is managed by systemd, you must run the following command to make your desired state change to your node:\n\nsystemctl %s algorand.service"
errorKill = "Cannot kill node: %s"
errorCloningNode = "Error cloning the node: %s"
infoNodeCloned = "Node cloned successfully to: %s"
infoNodeWroteToken = "Successfully wrote new API token: %s"
infoNodePendingTxnsDescription = "Pending Transactions (Truncated max=%d, Total in pool=%d): "
infoNodeNoPendingTxnsDescription = "None"
infoDataDir = "[Data Directory: %s]"
errLoadingConfig = "Error loading Config file from '%s': %v"
errorNodeFailedToShutdown = "Unable to shut down node: %v"
errorCatchpointLabelParsingFailed = "The provided catchpoint is not a valid one"
errorCatchpointLabelMissing = "A catchpoint argument is needed"
errorTooManyCatchpointLabels = "The catchup command expect a single catchpoint"
// Asset
malformedMetadataHash = "Cannot base64-decode metadata hash %s: %s"
// Application
errorLocalGlobal = "Exactly one of --local or --global is required"
errorLocalStateRequiresAccount = "--local requires --from account"
errorAccountNotOptedInToApp = "%s has not opted in to application %d"
errorNoSuchApplication = "application %d does not exist"
errorMarshalingState = "failed to encode state: %s"
errorApprovProgArgsRequired = "Exactly one of --approval-prog or --approval-prog-raw is required"
errorClearProgArgsRequired = "Exactly one of --clear-prog or --clear-prog-raw is required"
// Clerk
infoTxIssued = "Sent %d MicroAlgos from account %s to address %s, transaction ID: %s. Fee set to %d"
infoTxCommitted = "Transaction %s committed in round %d"
infoTxPending = "Transaction %s still pending as of round %d"
malformedNote = "Cannot base64-decode note %s: %s"
malformedLease = "Cannot base64-decode lease %s: %s"
fileReadError = "Cannot read file %s: %s"
fileWriteError = "Cannot write file %s: %s"
txDecodeError = "Cannot decode transactions from %s: %s"
txDupError = "Duplicate transaction %s in %s"
txLengthError = "Transaction list length mismatch"
txMergeMismatch = "Cannot merge transactions: transaction IDs differ"
txMergeError = "Cannot merge signatures: %v"
txNoFilesError = "No input filenames specified"
soFlagError = "-s is not meaningful without -o"
infoRawTxIssued = "Raw transaction ID %s issued"
txPoolError = "Transaction %s kicked out of local node pool: %s"
addrNoSigError = "Exactly one of --address or --no-sig is required"
msigLookupError = "Could not lookup multisig information: %s"
msigParseError = "Multisig information parsing error: %s"
failDecodeAddressError = "Cannot decode address: %v"
rekeySenderTargetSameError = "The sender and the resulted multisig address are the same"
noOutputFileError = "--msig-params must be specified with an output file name (-o)"
infoAutoFeeSet = "Automatically set fee to %d MicroAlgos"
loggingNotConfigured = "Remote logging is not currently configured and won't be enabled"
loggingNotEnabled = "Remote logging is current disabled"
loggingEnabled = "Remote logging is enabled. Node = %s, Guid = %s"
infoNetworkAlreadyExists = "Network Root Directory '%s' already exists"
errorCreateNetwork = "Error creating private network: %s"
infoNetworkCreated = "Network %s created under %s"
errorLoadingNetwork = "Error loading deployed network: %s"
errorStartingNetwork = "Error starting deployed network: %s"
infoNetworkStarted = "Network Started under %s"
infoNetworkStopped = "Network Stopped under %s"
infoNetworkDeleted = "Network Deleted under %s"
multisigProgramCollision = "should have at most one of --program/-p | --program-bytes/-P | --lsig/-L"
tealsignMutKeyArgs = "Need exactly one of --keyfile or --account"
tealsignMutLsigArgs = "Need exactly one of --contract-addr or --lsig-txn"
tealsignKeyfileFail = "Failed to read keyfile: %v"
tealsignNoWithAcct = "--account is not yet supported"
tealsignEmptyLogic = "LogicSig must have non-empty program"
tealsignParseAddr = "Failed to parse contract addr: %v"
tealsignParseData = "Failed to parse data to sign: %v"
tealsignParseb64 = "failed to base64 decode data to sign: %v"
tealsignParseb32 = "failed to base32 decode data to sign: %v"
tealsignTxIDLsigReq = "--sign-txid requires --lsig-txn"
tealsignSetArgLsigReq = "--set-lsig-arg-idx requires --lsig-txn"
tealsignDataReq = "need exactly one of --sign-txid, --data-file, --data-b64, or --data-b32"
tealsignInfoSig = "Generated signature: %s"
tealsignTooManyArg = "--set-lsig-arg-idx too large, maximum of %d arguments"
tealsignInfoWroteSig = "Wrote signature for %s to LSig.Args[%d]"
// Wallet
infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: "
infoChoosePasswordPrompt = "Please choose a password for wallet '%s': "
infoPasswordConfirmation = "Please confirm the password: "
infoCreatingWallet = "Creating wallet..."
infoCreatedWallet = "Created wallet '%s'"
infoBackupExplanation = "Your new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): "
infoPrintedBackupPhrase = "Your backup phrase is printed below.\nKeep this information safe -- never share it with anyone!"
infoNoWallets = "No wallets found. You can create a wallet with `goal wallet new`"
errorCouldntCreateWallet = "Couldn't create wallet: %s"
errorCouldntInitializeWallet = "Couldn't initialize wallet: %s"
errorCouldntExportMDK = "Couldn't export master derivation key: %s"
errorCouldntMakeMnemonic = "Couldn't make mnemonic: %s"
errorCouldntListWallets = "Couldn't list wallets: %s"
errorPasswordConfirmation = "Password confirmation did not match"
errorBadMnemonic = "Problem with mnemonic: %s"
errorBadRecoveredKey = "Recovered invalid key"
errorFailedToReadResponse = "Couldn't read response: %s"
errorFailedToReadPassword = "Couldn't read password: %s"
// Commands
infoPasswordPrompt = "Please enter the password for wallet '%s': "
infoSetWalletToDefault = "Set wallet '%s' to be the default wallet"
errCouldNotListWallets = "Couldn't list wallets: %s"
errNoWallets = "No wallets found. Create a new wallet with `goal wallet new [wallet name]`"
errNoDefaultWallet = "No default wallet found. Specify a wallet by name with -w, or set a default with `goal wallet -f [wallet name]"
errFindingWallet = "Couldn't find wallet: %s"
errWalletNameAmbiguous = "More than one wallet named '%s' exists. Please remove any wallets with the same name from the kmd wallet directory"
errWalletIDDuplicate = "More than one wallet with ID '%s' exists. Please remove any wallets with the samd ID from the kmd wallet directory"
errGettingWalletName = "Couldn't get wallet name from ID '%s': %s"
errWalletNotFound = "Wallet '%s' not found"
errDefaultWalletNotFound = "Wallet with ID '%s' not found. Was the default wallet deleted?"
errGettingToken = "Couldn't get token for wallet '%s' (ID: %s): %s"
// Ledger
errParsingRoundNumber = "Error parsing round number: %s"
errBadBlockArgs = "Cannot combine --b32=true or --strict=true with --raw"
errEncodingBlockAsJSON = "Error encoding block as json: %s"
)
| 1 | 42,078 | I dislike the messages.go pattern; the strings are only used once and make more sense in context. The other two added reportErrorf() calls in clerk.go have inline strings. | algorand-go-algorand | go |
@@ -118,9 +118,6 @@ Status FetchVerticesExecutor::prepareTags() {
if (!tagIdStatus.ok()) {
return tagIdStatus.status();
}
- auto tagId = tagIdStatus.value();
- tagNames_.push_back(tagName);
- tagIds_.push_back(tagId);
auto result = tagNameSet_.emplace(tagName);
if (!result.second) {
return Status::Error(folly::sformat("tag({}) was dup", tagName)); | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "graph/FetchVerticesExecutor.h"
#include "meta/SchemaProviderIf.h"
#include "dataman/SchemaWriter.h"
namespace nebula {
namespace graph {
FetchVerticesExecutor::FetchVerticesExecutor(Sentence *sentence, ExecutionContext *ectx)
: TraverseExecutor(ectx, "fetch_vertices") {
sentence_ = dynamic_cast<FetchVerticesSentence*>(sentence);
}
Status FetchVerticesExecutor::prepare() {
return Status::OK();
}
Status FetchVerticesExecutor::prepareVids() {
Status status = Status::OK();
if (sentence_->isRef()) {
fromType_ = kRef;
auto *expr = sentence_->ref();
if (expr->isInputExpression()) {
auto *iexpr = dynamic_cast<InputPropertyExpression*>(expr);
colname_ = iexpr->prop();
inputsPtr_ = inputs_.get();
} else if (expr->isVariableExpression()) {
auto *vexpr = dynamic_cast<VariablePropertyExpression*>(expr);
auto varname = vexpr->alias();
colname_ = vexpr->prop();
bool existing = false;
inputsPtr_ = ectx()->variableHolder()->get(*varname, &existing);
if (!existing) {
return Status::Error("Variable `%s' not defined", varname->c_str());
}
} else {
// should never come to here.
// only support input and variable yet.
LOG(ERROR) << "Unknown kind of expression.";
return Status::Error("Unknown kind of expression.");
}
if (colname_ != nullptr && *colname_ == "*") {
return Status::Error("Cant not use `*' to reference a vertex id column.");
}
if (inputsPtr_ == nullptr || !inputsPtr_->hasData()) {
return Status::OK();
}
status = checkIfDuplicateColumn();
if (!status.ok()) {
return status;
}
auto vidsStatus = inputsPtr_->getDistinctVIDs(*colname_);
if (!vidsStatus.ok()) {
return std::move(vidsStatus).status();
}
vids_ = std::move(vidsStatus).value();
return Status::OK();
} else {
fromType_ = kInstantExpr;
std::unordered_set<VertexID> uniqID;
for (auto *expr : sentence_->vidList()) {
expr->setContext(expCtx_.get());
status = expr->prepare();
if (!status.ok()) {
break;
}
Getters getters;
auto value = expr->eval(getters);
if (!value.ok()) {
return value.status();
}
auto v = value.value();
if (!Expression::isInt(v)) {
status = Status::Error("Vertex ID should be of type integer");
break;
}
auto valInt = Expression::asInt(v);
if (distinct_) {
auto result = uniqID.emplace(valInt);
if (result.second) {
vids_.emplace_back(valInt);
}
} else {
vids_.emplace_back(valInt);
}
}
}
return status;
}
Status FetchVerticesExecutor::prepareTags() {
Status status = Status::OK();
auto* tags = sentence_->tags();
if (tags == nullptr) {
LOG(ERROR) << "tags shall never be null";
return Status::Error("tags shall never be null");
}
auto tagNames = tags->labels();
if (tagNames.empty()) {
LOG(ERROR) << "tags shall never be empty";
return Status::Error("tags shall never be empty");
}
if (tagNames.size() == 1 && *tagNames[0] == "*") {
auto tagsStatus = ectx()->schemaManager()->getAllTag(spaceId_);
if (!tagsStatus.ok()) {
return tagsStatus.status();
}
for (auto& tagName : std::move(tagsStatus).value()) {
auto tagIdStatus = ectx()->schemaManager()->toTagID(spaceId_, tagName);
if (!tagIdStatus.ok()) {
return tagIdStatus.status();
}
auto tagId = tagIdStatus.value();
tagNames_.push_back(tagName);
tagIds_.push_back(tagId);
auto result = tagNameSet_.emplace(tagName);
if (!result.second) {
return Status::Error(folly::sformat("tag({}) was dup", tagName));
}
}
} else {
for (auto tagName : tagNames) {
auto tagStatus = ectx()->schemaManager()->toTagID(spaceId_, *tagName);
if (!tagStatus.ok()) {
return tagStatus.status();
}
auto tagId = tagStatus.value();
tagNames_.push_back(*tagName);
tagIds_.push_back(tagId);
auto result = tagNameSet_.emplace(*tagName);
if (!result.second) {
return Status::Error(folly::sformat("tag({}) was dup", *tagName));
}
}
}
return status;
}
Status FetchVerticesExecutor::prepareYield() {
colNames_.emplace_back("VertexID");
colTypes_.emplace_back(nebula::cpp2::SupportedType::VID);
if (yieldClause_ == nullptr) {
// determine which columns to return after received response from storage.
for (unsigned i = 0; i < tagNames_.size(); i++) {
auto& tagName = tagNames_[i];
auto tagId = tagIds_[i];
std::shared_ptr<const meta::SchemaProviderIf> tagSchema =
ectx()->schemaManager()->getTagSchema(spaceId_, tagId);
if (tagSchema == nullptr) {
return Status::Error("No tag schema for %s", tagName.c_str());
}
for (auto iter = tagSchema->begin(); iter != tagSchema->end(); ++iter) {
auto *prop = iter->getName();
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::SOURCE;
pd.name = prop;
pd.id.set_tag_id(tagId);
props_.emplace_back(std::move(pd));
}
}
} else {
for (auto *col : yieldClause_->columns()) {
if (!col->getFunName().empty()) {
return Status::SyntaxError("Do not support aggregated query with fetch prop on.");
}
if (col->expr()->isInputExpression()) {
auto *inputExpr = dynamic_cast<InputPropertyExpression*>(col->expr());
auto *colName = inputExpr->prop();
if (*colName == "*") {
auto colNames = inputsPtr_->getColNames();
for (auto &prop : colNames) {
Expression *expr = new InputPropertyExpression(new std::string(prop));
auto *column = new YieldColumn(expr);
yieldColsHolder_.addColumn(column);
yields_.emplace_back(column);
colNames_.emplace_back(column->toString());
colTypes_.emplace_back(nebula::cpp2::SupportedType::UNKNOWN);
expCtx_->addInputProp(prop);
}
continue;
}
} else if (col->expr()->isVariableExpression()) {
auto *variableExpr = dynamic_cast<VariablePropertyExpression*>(col->expr());
auto *colName = variableExpr->prop();
if (*colName == "*") {
auto colNames = inputsPtr_->getColNames();
for (auto &prop : colNames) {
auto *alias = new std::string(*(variableExpr->alias()));
Expression *expr =
new VariablePropertyExpression(alias, new std::string(prop));
auto *column = new YieldColumn(expr);
yieldColsHolder_.addColumn(column);
yields_.emplace_back(column);
colNames_.emplace_back(column->toString());
colTypes_.emplace_back(nebula::cpp2::SupportedType::UNKNOWN);
expCtx_->addInputProp(prop);
}
continue;
}
}
yields_.emplace_back(col);
col->expr()->setContext(expCtx_.get());
Status status = col->expr()->prepare();
if (!status.ok()) {
return status;
}
if (col->alias() == nullptr) {
colNames_.emplace_back(col->expr()->toString());
} else {
colNames_.emplace_back(*col->alias());
}
auto type = calculateExprType(col->expr());
colTypes_.emplace_back(type);
VLOG(1) << "type: " << static_cast<int64_t>(colTypes_.back());
}
if (expCtx_->hasSrcTagProp() || expCtx_->hasDstTagProp()) {
return Status::SyntaxError(
"tag.prop and edgetype.prop are supported in fetch sentence.");
}
auto aliasProps = expCtx_->aliasProps();
for (auto &pair : aliasProps) {
auto& tagName = pair.first;
auto& prop = pair.second;
if (tagNameSet_.find(tagName) == tagNameSet_.end()) {
return Status::SyntaxError(
"Near [%s.%s], tag should be declared in `ON' clause first.",
tagName.c_str(), prop.c_str());
}
auto tagStatus = ectx()->schemaManager()->toTagID(spaceId_, tagName);
if (!tagStatus.ok()) {
return tagStatus.status();
}
auto tagId = tagStatus.value();
std::shared_ptr<const meta::SchemaProviderIf> tagSchema =
ectx()->schemaManager()->getTagSchema(spaceId_, tagId);
if (tagSchema == nullptr) {
return Status::Error("No tag schema for %s", tagName.c_str());
}
if (tagSchema->getFieldIndex(prop) == -1) {
return Status::Error(
"`%s' is not a prop of `%s'", tagName.c_str(), prop.c_str());
}
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::SOURCE;
pd.name = prop;
pd.id.set_tag_id(tagId);
props_.emplace_back(std::move(pd));
}
}
return Status::OK();
}
Status FetchVerticesExecutor::prepareClauses() {
DCHECK(sentence_ != nullptr);
spaceId_ = ectx()->rctx()->session()->space();
expCtx_ = std::make_unique<ExpressionContext>();
expCtx_->setStorageClient(ectx()->getStorageClient());
expCtx_->setSpace(spaceId_);
Status status;
do {
status = checkIfGraphSpaceChosen();
if (!status.ok()) {
break;
}
yieldClause_ = sentence_->yieldClause();
if (yieldClause_ != nullptr) {
distinct_ = yieldClause_->isDistinct();
}
status = prepareVids();
if (!status.ok()) {
break;
}
status = prepareTags();
if (!status.ok()) {
break;
}
status = prepareYield();
if (!status.ok()) {
break;
}
} while (false);
if (!status.ok()) {
LOG(ERROR) << "Preparing failed: " << status;
return status;
}
return status;
}
void FetchVerticesExecutor::onEmptyInputs() {
if (onResult_) {
auto outputs = std::make_unique<InterimResult>(std::move(colNames_));
onResult_(std::move(outputs));
} else if (resp_ == nullptr) {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
resp_->set_column_names(std::move(colNames_));
}
doFinish(Executor::ProcessControl::kNext);
}
void FetchVerticesExecutor::execute() {
auto status = prepareClauses();
if (!status.ok()) {
doError(std::move(status));
return;
}
if (vids_.empty()) {
LOG(WARNING) << "Empty vids";
onEmptyInputs();
return;
}
fetchVertices();
}
void FetchVerticesExecutor::fetchVertices() {
auto future = ectx()->getStorageClient()->getVertexProps(
spaceId_, vids_, std::move(props_));
auto *runner = ectx()->rctx()->runner();
auto cb = [this] (RpcResponse &&result) mutable {
auto completeness = result.completeness();
if (completeness == 0) {
doError(Status::Error("Get tag props failed"));
return;
} else if (completeness != 100) {
LOG(INFO) << "Get vertices partially failed: " << completeness << "%";
for (auto &error : result.failedParts()) {
LOG(ERROR) << "part: " << error.first
<< "error code: " << static_cast<int>(error.second);
}
ectx()->addWarningMsg("Fetch vertices executor was partially performed");
}
processResult(std::move(result));
};
auto error = [this] (auto &&e) {
auto msg = folly::stringPrintf("Get tag props exception: %s.", e.what().c_str());
LOG(ERROR) << msg;
doError(Status::Error(std::move(msg)));
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
void FetchVerticesExecutor::processResult(RpcResponse &&result) {
auto all = result.responses();
std::shared_ptr<SchemaWriter> outputSchema;
std::unique_ptr<RowSetWriter> rsWriter;
size_t num = 0;
for (auto &resp : all) {
num += resp.vertices.size();
}
if (num == 0) {
finishExecution(std::move(rsWriter));
return;
}
std::unordered_map<VertexID, std::map<TagID, RowReader>> dataMap;
dataMap.reserve(num);
std::unordered_map<TagID, std::shared_ptr<const meta::SchemaProviderIf>> tagSchemaMap;
std::set<TagID> tagIdSet;
for (auto &resp : all) {
if (!resp.__isset.vertices || resp.vertices.empty()) {
continue;
}
auto *vertexSchema = resp.get_vertex_schema();
if (vertexSchema != nullptr) {
std::transform(vertexSchema->cbegin(), vertexSchema->cend(),
std::inserter(tagSchemaMap, tagSchemaMap.begin()), [](auto &s) {
return std::make_pair(
s.first, std::make_shared<ResultSchemaProvider>(s.second));
});
}
for (auto &vdata : resp.vertices) {
if (!vdata.__isset.tag_data || vdata.tag_data.empty()) {
continue;
}
for (auto& tagData : vdata.tag_data) {
auto& data = tagData.data;
VertexID vid = vdata.vertex_id;
TagID tagId = tagData.tag_id;
if (tagSchemaMap.find(tagId) == tagSchemaMap.end()) {
auto ver = RowReader::getSchemaVer(data);
if (ver < 0) {
LOG(ERROR) << "Found schema version negative " << ver;
doError(Status::Error("Found schema version negative: %d", ver));
return;
}
auto schema = ectx()->schemaManager()->getTagSchema(spaceId_, tagId, ver);
if (schema == nullptr) {
VLOG(3) << "Schema not found for tag id: " << tagId;
// Ignore the bad data.
continue;
}
tagSchemaMap[tagId] = schema;
}
auto vschema = tagSchemaMap[tagId];
auto vreader = RowReader::getRowReader(data, vschema);
dataMap[vid].emplace(std::make_pair(tagId, std::move(vreader)));
tagIdSet.insert(tagId);
}
}
}
if (yieldClause_ == nullptr) {
for (TagID tagId : tagIdSet) {
auto tagSchema = tagSchemaMap[tagId];
auto tagFound = ectx()->schemaManager()->toTagName(spaceId_, tagId);
if (!tagFound.ok()) {
VLOG(3) << "Tag name not found for tag id: " << tagId;
// Ignore the bad data.
continue;
}
auto tagName = std::move(tagFound).value();
for (auto iter = tagSchema->begin(); iter != tagSchema->end(); ++iter) {
auto *ref = new std::string("");
auto *alias = new std::string(tagName);
auto *prop = iter->getName();
Expression *expr =
new AliasPropertyExpression(ref, alias, new std::string(prop));
auto *column = new YieldColumn(expr);
yieldColsHolder_.addColumn(column);
yields_.emplace_back(column);
colNames_.emplace_back(expr->toString());
colTypes_.emplace_back(nebula::cpp2::SupportedType::UNKNOWN);
}
}
}
if (fromType_ == kRef) {
if (inputsPtr_ == nullptr) {
LOG(ERROR) << "inputs is nullptr.";
doError(Status::Error("inputs is nullptr."));
return;
}
auto visitor = [&, this] (const RowReader *reader) -> Status {
VertexID vid = 0;
auto rc = reader->getVid(*colname_, vid);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Column `%s' not found", colname_->c_str());
}
if (dataMap.find(vid) == dataMap.end() && !expCtx_->hasInputProp()) {
return Status::OK();
}
// if yield input not empty, create empty item and keep on going.
auto& ds = dataMap[vid];
std::vector<VariantType> record;
record.emplace_back(VariantType(vid));
auto schema = reader->getSchema().get();
Getters getters;
getters.getVariableProp = [&] (const std::string &prop) -> OptVariantType {
return Collector::getProp(schema, prop, reader);
};
getters.getInputProp = [&] (const std::string &prop) -> OptVariantType {
return Collector::getProp(schema, prop, reader);
};
getters.getAliasProp = [&] (const std::string& tagName, const std::string &prop)
-> OptVariantType {
auto tagIdStatus = ectx()->schemaManager()->toTagID(spaceId_, tagName);
if (!tagIdStatus.ok()) {
return tagIdStatus.status();
}
TagID tagId = std::move(tagIdStatus).value();
auto tagIter = ds.find(tagId);
if (tagIter != ds.end()) {
auto vreader = tagIter->second.get();
auto vschema = vreader->getSchema().get();
return Collector::getProp(vschema, prop, vreader);
} else {
auto ts = ectx()->schemaManager()->getTagSchema(spaceId_, tagId);
if (ts == nullptr) {
return Status::Error("No tag schema for %s", tagName.c_str());
}
return RowReader::getDefaultProp(ts.get(), prop);
}
};
for (auto *column : yields_) {
auto *expr = column->expr();
auto value = expr->eval(getters);
if (!value.ok()) {
return value.status();
}
record.emplace_back(std::move(value).value());
}
if (outputSchema == nullptr) {
outputSchema = std::make_shared<SchemaWriter>();
rsWriter = std::make_unique<RowSetWriter>(outputSchema);
auto getSchemaStatus = Collector::getSchema(
record, colNames_, colTypes_, outputSchema.get());
if (!getSchemaStatus.ok()) {
return getSchemaStatus;
}
}
auto writer = std::make_unique<RowWriter>(outputSchema);
for (auto& value : record) {
auto status = Collector::collect(value, writer.get());
if (!status.ok()) {
return status;
}
}
rsWriter->addRow(*writer);
return Status::OK();
};
Status status = inputsPtr_->applyTo(visitor);
if (!status.ok()) {
LOG(ERROR) << "inputs visit failed. " << status.toString();
doError(status);
return;
}
} else {
for (auto vid : vids_) {
auto iter = dataMap.find(vid);
if (iter == dataMap.end()) {
continue;
}
if (dataMap.find(vid) == dataMap.end()) {
continue;
}
auto& ds = dataMap[vid];
std::vector<VariantType> record;
record.emplace_back(VariantType(vid));
Getters getters;
getters.getAliasProp = [&] (const std::string& tagName, const std::string &prop)
-> OptVariantType {
auto tagIdStatus = ectx()->schemaManager()->toTagID(spaceId_, tagName);
if (!tagIdStatus.ok()) {
return tagIdStatus.status();
}
TagID tagId = std::move(tagIdStatus).value();
auto tagIter = ds.find(tagId);
if (tagIter != ds.end()) {
auto vreader = tagIter->second.get();
auto vschema = vreader->getSchema().get();
return Collector::getProp(vschema, prop, vreader);
} else {
auto ts = ectx()->schemaManager()->getTagSchema(spaceId_, tagId);
if (ts == nullptr) {
return Status::Error("No tag schema for %s", tagName.c_str());
}
return RowReader::getDefaultProp(ts.get(), prop);
}
};
for (auto *column : yields_) {
auto *expr = column->expr();
auto value = expr->eval(getters);
if (!value.ok()) {
doError(value.status());
return;
}
record.emplace_back(std::move(value).value());
}
if (outputSchema == nullptr) {
outputSchema = std::make_shared<SchemaWriter>();
rsWriter = std::make_unique<RowSetWriter>(outputSchema);
auto getSchemaStatus = Collector::getSchema(
record, colNames_, colTypes_, outputSchema.get());
if (!getSchemaStatus.ok()) {
doError(getSchemaStatus);
return;
}
}
auto writer = std::make_unique<RowWriter>(outputSchema);
for (auto& value : record) {
auto status = Collector::collect(value, writer.get());
if (!status.ok()) {
doError(status);
return;
}
}
rsWriter->addRow(*writer);
}
}
finishExecution(std::move(rsWriter));
}
void FetchVerticesExecutor::setupResponse(cpp2::ExecutionResponse &resp) {
if (resp_ == nullptr) {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
resp_->set_column_names(std::move(colNames_));
}
resp = std::move(*resp_);
}
void FetchVerticesExecutor::finishExecution(std::unique_ptr<RowSetWriter> rsWriter) {
auto outputs = std::make_unique<InterimResult>(std::move(colNames_));
if (rsWriter != nullptr) {
outputs->setInterim(std::move(rsWriter));
}
if (onResult_) {
onResult_(std::move(outputs));
} else {
resp_ = std::make_unique<cpp2::ExecutionResponse>();
auto colNames = outputs->getColNames();
resp_->set_column_names(std::move(colNames));
if (outputs->hasData()) {
auto ret = outputs->getRows();
if (!ret.ok()) {
LOG(ERROR) << "Get rows failed: " << ret.status();
doError(std::move(ret).status());
return;
}
resp_->set_rows(std::move(ret).value());
}
}
doFinish(Executor::ProcessControl::kNext);
}
} // namespace graph
} // namespace nebula
| 1 | 30,037 | Seems we don't need it anymore. | vesoft-inc-nebula | cpp |
@@ -54,7 +54,7 @@ class Guidance < ActiveRecord::Base
validates :published, inclusion: { message: INCLUSION_MESSAGE,
in: BOOLEAN_VALUES}
- validates :themes, presence: { message: PRESENCE_MESSAGE }
+ validates :themes, presence: { message: PRESENCE_MESSAGE }, if: :published?
# Retrieves every guidance associated to an org
scope :by_org, -> (org) { | 1 | # Guidance provides information from organisations to Users, helping them when
# answering questions. (e.g. "Here's how to think about your data
# protection responsibilities...")
#
# == Schema Information
#
# Table name: guidances
#
# id :integer not null, primary key
# published :boolean
# text :text
# created_at :datetime not null
# updated_at :datetime not null
# guidance_group_id :integer
#
# Indexes
#
# index_guidances_on_guidance_group_id (guidance_group_id)
#
# Foreign Keys
#
# fk_rails_... (guidance_group_id => guidance_groups.id)
#
# [+Project:+] DMPRoadmap
# [+Description:+]
# This class keeps the information organisations enter to support users when answering questions.
# It always belongs to a guidance group class and it can be linked directly to a question or through one or more themes
# [+Created:+] 07/07/2014
# [+Copyright:+] Digital Curation Centre and California Digital Library
class Guidance < ActiveRecord::Base
include GlobalHelpers
include ValidationMessages
include ValidationValues
# ================
# = Associations =
# ================
belongs_to :guidance_group
has_and_belongs_to_many :themes, join_table: "themes_in_guidance"
# ===============
# = Validations =
# ===============
validates :text, presence: { message: PRESENCE_MESSAGE }
validates :guidance_group, presence: { message: PRESENCE_MESSAGE }
validates :published, inclusion: { message: INCLUSION_MESSAGE,
in: BOOLEAN_VALUES}
validates :themes, presence: { message: PRESENCE_MESSAGE }
# Retrieves every guidance associated to an org
scope :by_org, -> (org) {
joins(:guidance_group).merge(GuidanceGroup.by_org(org))
}
scope :search, -> (term) {
search_pattern = "%#{term}%"
joins(:guidance_group)
.where("guidances.text LIKE ? OR guidance_groups.name LIKE ?",
search_pattern,
search_pattern)
}
# =================
# = Class methods =
# =================
# Returns whether or not a given user can view a given guidance
# we define guidances viewable to a user by those owned by a guidance group:
# owned by the managing curation center
# owned by a funder organisation
# owned by an organisation, of which the user is a member
#
# id - The Integer id for a guidance
# user - A User object
#
# Returns Boolean
def self.can_view?(user, id)
guidance = Guidance.find_by(id: id)
viewable = false
unless guidance.nil?
unless guidance.guidance_group.nil?
# guidances are viewable if they are owned by the user's org
if guidance.guidance_group.org == user.org
viewable = true
end
# guidance groups are viewable if they are owned by the Managing
# Curation Center
if Org.managing_orgs.include?(guidance.guidance_group.org)
viewable = true
end
# guidance groups are viewable if they are owned by a funder
if Org.funder.include?(guidance.guidance_group.org)
viewable = true
end
end
end
return viewable
end
# Returns a list of all guidances which a specified user can view
# we define guidances viewable to a user by those owned by a guidance group:
# owned by the Managing Curation Center
# owned by a funder organisation
# owned by an organisation, of which the user is a member
#
# user - A User object
#
# Returns Array
def self.all_viewable(user)
managing_groups = Org.includes(guidance_groups: :guidances)
.managing_orgs.collect{|o| o.guidance_groups}
# find all groups owned by a Funder organisation
funder_groups = Org.includes(guidance_groups: :guidances)
.funder.collect{|org| org.guidance_groups}
# find all groups owned by any of the user's organisations
organisation_groups = user.org.guidance_groups
# find all guidances belonging to any of the viewable groups
all_viewable_groups = (managing_groups +
funder_groups +
organisation_groups).flatten
all_viewable_guidances = all_viewable_groups.collect do |group|
group.guidances
end
# pass the list of viewable guidances to the view
return all_viewable_guidances.flatten
end
# Determine if a guidance is in a group which belongs to a specified
# organisation
#
# org_id - The Integer id for an organisation
#
# Returns Boolean
def in_group_belonging_to?(org_id)
unless guidance_group.nil?
if guidance_group.org.id == org_id
return true
end
end
return false
end
end
| 1 | 18,167 | This might cause problems with the weird way we publish Guidance and Groups in the UI. We will have to make sure that UAT is thorough. | DMPRoadmap-roadmap | rb |
@@ -75,7 +75,7 @@ func NewRawExporter(config Config) (*Exporter, error) {
// defer pipeline.Stop()
// ... Done
func InstallNewPipeline(config Config) (*push.Controller, error) {
- controller, err := NewExportPipeline(config)
+ controller, err := NewExportPipeline(config, time.Hour)
if err != nil {
return controller, err
} | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dogstatsd // import "go.opentelemetry.io/otel/exporter/metric/dogstatsd"
import (
"bytes"
"time"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/exporter/metric/internal/statsd"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
type (
Config = statsd.Config
// Exporter implements a dogstatsd-format statsd exporter,
// which encodes label sets as independent fields in the
// output.
//
// TODO: find a link for this syntax. It's been copied out of
// code, not a specification:
//
// https://github.com/stripe/veneur/blob/master/sinks/datadog/datadog.go
Exporter struct {
*statsd.Exporter
*statsd.LabelEncoder
ReencodedLabelsCount int
}
)
var (
_ export.Exporter = &Exporter{}
_ export.LabelEncoder = &Exporter{}
)
// NewRawExporter returns a new Dogstatsd-syntax exporter for use in a pipeline.
// This type implements the metric.LabelEncoder interface,
// allowing the SDK's unique label encoding to be pre-computed
// for the exporter and stored in the LabelSet.
func NewRawExporter(config Config) (*Exporter, error) {
exp := &Exporter{
LabelEncoder: statsd.NewLabelEncoder(),
}
var err error
exp.Exporter, err = statsd.NewExporter(config, exp)
return exp, err
}
// InstallNewPipeline instantiates a NewExportPipeline and registers it globally.
// Typically called as:
//
// pipeline, err := dogstatsd.InstallNewPipeline(dogstatsd.Config{...})
// if err != nil {
// ...
// }
// defer pipeline.Stop()
// ... Done
func InstallNewPipeline(config Config) (*push.Controller, error) {
controller, err := NewExportPipeline(config)
if err != nil {
return controller, err
}
global.SetMeterProvider(controller)
return controller, err
}
// NewExportPipeline sets up a complete export pipeline with the recommended setup,
// chaining a NewRawExporter into the recommended selectors and batchers.
func NewExportPipeline(config Config) (*push.Controller, error) {
selector := simple.NewWithExactMeasure()
exporter, err := NewRawExporter(config)
if err != nil {
return nil, err
}
// The ungrouped batcher ensures that the export sees the full
// set of labels as dogstatsd tags.
batcher := ungrouped.New(selector, false)
// The pusher automatically recognizes that the exporter
// implements the LabelEncoder interface, which ensures the
// export encoding for labels is encoded in the LabelSet.
pusher := push.New(batcher, exporter, time.Hour)
pusher.Start()
return pusher, nil
}
// AppendName is part of the stats-internal adapter interface.
func (*Exporter) AppendName(rec export.Record, buf *bytes.Buffer) {
_, _ = buf.WriteString(rec.Descriptor().Name())
}
// AppendTags is part of the stats-internal adapter interface.
func (e *Exporter) AppendTags(rec export.Record, buf *bytes.Buffer) {
encoded, inefficient := e.LabelEncoder.ForceEncode(rec.Labels())
_, _ = buf.WriteString(encoded)
if inefficient {
e.ReencodedLabelsCount++
}
}
| 1 | 11,311 | this default needs to be on order 1 minute, I'm not sure why we defaulted to 1 hour below... | open-telemetry-opentelemetry-go | go |
@@ -92,6 +92,15 @@ func (ACMEIssuer) CaddyModule() caddy.ModuleInfo {
func (iss *ACMEIssuer) Provision(ctx caddy.Context) error {
iss.logger = ctx.Logger(iss)
+ // expand email address, if non-empty
+ if iss.Email != "" {
+ email, err := caddy.NewReplacer().ReplaceOrErr(iss.Email, true, true)
+ if err != nil {
+ return fmt.Errorf("expanding email address '%s': %v", iss.Email, err)
+ }
+ iss.Email = email
+ }
+
// DNS providers
if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.ProviderRaw != nil {
val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw") | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/x509"
"fmt"
"io/ioutil"
"net/url"
"strconv"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/certmagic"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
func init() {
caddy.RegisterModule(ACMEIssuer{})
}
// ACMEIssuer makes an ACME manager
// for managing certificates using ACME.
//
// TODO: support multiple ACME endpoints (probably
// requires an array of these structs) - caddy would
// also have to load certs from the backup CAs if the
// first one is expired...
type ACMEIssuer struct {
// The URL to the CA's ACME directory endpoint.
CA string `json:"ca,omitempty"`
// The URL to the test CA's ACME directory endpoint.
// This endpoint is only used during retries if there
// is a failure using the primary CA.
TestCA string `json:"test_ca,omitempty"`
// Your email address, so the CA can contact you if necessary.
// Not required, but strongly recommended to provide one so
// you can be reached if there is a problem. Your email is
// not sent to any Caddy mothership or used for any purpose
// other than ACME transactions.
Email string `json:"email,omitempty"`
// If using an ACME CA that requires an external account
// binding, specify the CA-provided credentials here.
ExternalAccount *acme.EAB `json:"external_account,omitempty"`
// Time to wait before timing out an ACME operation.
ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"`
// Configures the various ACME challenge types.
Challenges *ChallengesConfig `json:"challenges,omitempty"`
// An array of files of CA certificates to accept when connecting to the
// ACME CA. Generally, you should only use this if the ACME CA endpoint
// is internal or for development/testing purposes.
TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"`
rootPool *x509.CertPool
template certmagic.ACMEManager
magic *certmagic.Config
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (ACMEIssuer) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.issuance.acme",
New: func() caddy.Module { return new(ACMEIssuer) },
}
}
// Provision sets up iss.
func (iss *ACMEIssuer) Provision(ctx caddy.Context) error {
iss.logger = ctx.Logger(iss)
// DNS providers
if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.ProviderRaw != nil {
val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw")
if err != nil {
return fmt.Errorf("loading DNS provider module: %v", err)
}
if deprecatedProvider, ok := val.(acmez.Solver); ok {
// TODO: For a temporary amount of time, we are allowing the use of DNS
// providers from go-acme/lego since there are so many providers implemented
// using that API -- they are adapted as an all-in-one Caddy module in this
// repository: https://github.com/caddy-dns/lego-deprecated - the module is a
// acmez.Solver type, so we use it directly. The user must set environment
// variables to configure it. Remove this shim once a sufficient number of
// DNS providers are implemented for the libdns APIs instead.
iss.Challenges.DNS.solver = deprecatedProvider
} else {
iss.Challenges.DNS.solver = &certmagic.DNS01Solver{
DNSProvider: val.(certmagic.ACMEDNSProvider),
TTL: time.Duration(iss.Challenges.DNS.TTL),
PropagationTimeout: time.Duration(iss.Challenges.DNS.PropagationTimeout),
Resolvers: iss.Challenges.DNS.Resolvers,
}
}
}
// add any custom CAs to trust store
if len(iss.TrustedRootsPEMFiles) > 0 {
iss.rootPool = x509.NewCertPool()
for _, pemFile := range iss.TrustedRootsPEMFiles {
pemData, err := ioutil.ReadFile(pemFile)
if err != nil {
return fmt.Errorf("loading trusted root CA's PEM file: %s: %v", pemFile, err)
}
if !iss.rootPool.AppendCertsFromPEM(pemData) {
return fmt.Errorf("unable to add %s to trust pool: %v", pemFile, err)
}
}
}
var err error
iss.template, err = iss.makeIssuerTemplate()
if err != nil {
return err
}
return nil
}
func (iss *ACMEIssuer) makeIssuerTemplate() (certmagic.ACMEManager, error) {
template := certmagic.ACMEManager{
CA: iss.CA,
TestCA: iss.TestCA,
Email: iss.Email,
CertObtainTimeout: time.Duration(iss.ACMETimeout),
TrustedRoots: iss.rootPool,
ExternalAccount: iss.ExternalAccount,
Logger: iss.logger,
}
if iss.Challenges != nil {
if iss.Challenges.HTTP != nil {
template.DisableHTTPChallenge = iss.Challenges.HTTP.Disabled
template.AltHTTPPort = iss.Challenges.HTTP.AlternatePort
}
if iss.Challenges.TLSALPN != nil {
template.DisableTLSALPNChallenge = iss.Challenges.TLSALPN.Disabled
template.AltTLSALPNPort = iss.Challenges.TLSALPN.AlternatePort
}
if iss.Challenges.DNS != nil {
template.DNS01Solver = iss.Challenges.DNS.solver
}
template.ListenHost = iss.Challenges.BindHost
}
return template, nil
}
// SetConfig sets the associated certmagic config for this issuer.
// This is required because ACME needs values from the config in
// order to solve the challenges during issuance. This implements
// the ConfigSetter interface.
func (iss *ACMEIssuer) SetConfig(cfg *certmagic.Config) {
iss.magic = cfg
}
// TODO: I kind of hate how each call to these methods needs to
// make a new ACME manager to fill in defaults before using; can
// we find the right place to do that just once and then re-use?
// PreCheck implements the certmagic.PreChecker interface.
func (iss *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
return certmagic.NewACMEManager(iss.magic, iss.template).PreCheck(ctx, names, interactive)
}
// Issue obtains a certificate for the given csr.
func (iss *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
return certmagic.NewACMEManager(iss.magic, iss.template).Issue(ctx, csr)
}
// IssuerKey returns the unique issuer key for the configured CA endpoint.
func (iss *ACMEIssuer) IssuerKey() string {
return certmagic.NewACMEManager(iss.magic, iss.template).IssuerKey()
}
// Revoke revokes the given certificate.
func (iss *ACMEIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error {
return certmagic.NewACMEManager(iss.magic, iss.template).Revoke(ctx, cert, reason)
}
// GetACMEIssuer returns iss. This is useful when other types embed ACMEIssuer, because
// type-asserting them to *ACMEIssuer will fail, but type-asserting them to an interface
// with only this method will succeed, and will still allow the embedded ACMEIssuer
// to be accessed and manipulated.
func (iss *ACMEIssuer) GetACMEIssuer() *ACMEIssuer { return iss }
// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
//
// ... acme {
// dir <directory_url>
// test_dir <test_directory_url>
// email <email>
// timeout <duration>
// disable_http_challenge
// disable_tlsalpn_challenge
// alt_http_port <port>
// alt_tlsalpn_port <port>
// eab <key_id> <mac_key>
// trusted_roots <pem_files...>
// dns <provider_name> [<options>]
// resolvers <dns_servers...>
// }
//
func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "dir":
if !d.AllArgs(&iss.CA) {
return d.ArgErr()
}
case "test_dir":
if !d.AllArgs(&iss.TestCA) {
return d.ArgErr()
}
case "email":
if !d.AllArgs(&iss.Email) {
return d.ArgErr()
}
case "timeout":
var timeoutStr string
if !d.AllArgs(&timeoutStr) {
return d.ArgErr()
}
timeout, err := caddy.ParseDuration(timeoutStr)
if err != nil {
return d.Errf("invalid timeout duration %s: %v", timeoutStr, err)
}
iss.ACMETimeout = caddy.Duration(timeout)
case "disable_http_challenge":
if d.NextArg() {
return d.ArgErr()
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.HTTP == nil {
iss.Challenges.HTTP = new(HTTPChallengeConfig)
}
iss.Challenges.HTTP.Disabled = true
case "disable_tlsalpn_challenge":
if d.NextArg() {
return d.ArgErr()
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.TLSALPN == nil {
iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
}
iss.Challenges.TLSALPN.Disabled = true
case "alt_http_port":
if !d.NextArg() {
return d.ArgErr()
}
port, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid port %s: %v", d.Val(), err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.HTTP == nil {
iss.Challenges.HTTP = new(HTTPChallengeConfig)
}
iss.Challenges.HTTP.AlternatePort = port
case "alt_tlsalpn_port":
if !d.NextArg() {
return d.ArgErr()
}
port, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid port %s: %v", d.Val(), err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.TLSALPN == nil {
iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
}
iss.Challenges.TLSALPN.AlternatePort = port
case "eab":
iss.ExternalAccount = new(acme.EAB)
if !d.AllArgs(&iss.ExternalAccount.KeyID, &iss.ExternalAccount.MACKey) {
return d.ArgErr()
}
case "trusted_roots":
iss.TrustedRootsPEMFiles = d.RemainingArgs()
case "dns":
if !d.NextArg() {
return d.ArgErr()
}
provName := d.Val()
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
dnsProvModule, err := caddy.GetModule("dns.providers." + provName)
if err != nil {
return d.Errf("getting DNS provider module named '%s': %v", provName, err)
}
dnsProvModuleInstance := dnsProvModule.New()
if unm, ok := dnsProvModuleInstance.(caddyfile.Unmarshaler); ok {
err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
if err != nil {
return err
}
}
iss.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(dnsProvModuleInstance, "name", provName, nil)
case "resolvers":
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.Resolvers = d.RemainingArgs()
if len(iss.Challenges.DNS.Resolvers) == 0 {
return d.ArgErr()
}
default:
return d.Errf("unrecognized ACME issuer property: %s", d.Val())
}
}
}
return nil
}
// onDemandAskRequest makes a request to the ask URL
// to see if a certificate can be obtained for name.
// The certificate request should be denied if this
// returns an error.
func onDemandAskRequest(ask string, name string) error {
askURL, err := url.Parse(ask)
if err != nil {
return fmt.Errorf("parsing ask URL: %v", err)
}
qs := askURL.Query()
qs.Set("domain", name)
askURL.RawQuery = qs.Encode()
resp, err := onDemandAskClient.Get(askURL.String())
if err != nil {
return fmt.Errorf("error checking %v to determine if certificate for hostname '%s' should be allowed: %v",
ask, name, err)
}
resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return fmt.Errorf("certificate for hostname '%s' not allowed; non-2xx status code %d returned from %v",
name, resp.StatusCode, ask)
}
return nil
}
// Interface guards
var (
_ certmagic.PreChecker = (*ACMEIssuer)(nil)
_ certmagic.Issuer = (*ACMEIssuer)(nil)
_ certmagic.Revoker = (*ACMEIssuer)(nil)
_ caddy.Provisioner = (*ACMEIssuer)(nil)
_ ConfigSetter = (*ACMEIssuer)(nil)
_ caddyfile.Unmarshaler = (*ACMEIssuer)(nil)
)
| 1 | 15,937 | I figure to make `{env.*}` work, right? I can't think of any other placeholder that makes sense here | caddyserver-caddy | go |
@@ -29,7 +29,9 @@ TestEnv::~TestEnv() {
void TestEnv::SetUp() {
FLAGS_load_data_interval_secs = 1;
// Create metaServer
- metaServer_ = nebula::meta::TestUtils::mockMetaServer(0, metaRootPath_.path());
+ metaServer_ = nebula::meta::TestUtils::mockMetaServer(
+ network::NetworkUtils::getAvailablePort(),
+ metaRootPath_.path());
FLAGS_meta_server_addrs = folly::stringPrintf("127.0.0.1:%d", metaServerPort());
// Create storageServer | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "graph/test/TestEnv.h"
#include "meta/test/TestUtils.h"
#include "storage/test/TestUtils.h"
DECLARE_int32(load_data_interval_secs);
DECLARE_string(meta_server_addrs);
namespace nebula {
namespace graph {
TestEnv *gEnv = nullptr;
TestEnv::TestEnv() {
}
TestEnv::~TestEnv() {
}
void TestEnv::SetUp() {
FLAGS_load_data_interval_secs = 1;
// Create metaServer
metaServer_ = nebula::meta::TestUtils::mockMetaServer(0, metaRootPath_.path());
FLAGS_meta_server_addrs = folly::stringPrintf("127.0.0.1:%d", metaServerPort());
// Create storageServer
auto threadPool = std::make_shared<folly::IOThreadPoolExecutor>(1);
auto addrsRet
= network::NetworkUtils::toHosts(folly::stringPrintf("127.0.0.1:%d", metaServerPort()));
CHECK(addrsRet.ok()) << addrsRet.status();
mClient_ = std::make_unique<meta::MetaClient>(threadPool, std::move(addrsRet.value()), true);
mClient_->init();
uint32_t localIp;
nebula::network::NetworkUtils::ipv4ToInt("127.0.0.1", localIp);
storageServer_ = nebula::storage::TestUtils::mockStorageServer(mClient_.get(),
storageRootPath_.path(),
localIp,
0,
true);
// Create graphServer
graphServer_ = TestUtils::mockGraphServer(0);
}
void TestEnv::TearDown() {
mClient_.reset();
graphServer_.reset();
storageServer_.reset();
metaServer_.reset();
}
uint16_t TestEnv::graphServerPort() const {
return graphServer_->port_;
}
uint16_t TestEnv::metaServerPort() const {
return metaServer_->port_;
}
uint16_t TestEnv::storageServerPort() const {
return storageServer_->port_;
}
std::unique_ptr<GraphClient> TestEnv::getClient() const {
auto client = std::make_unique<GraphClient>("127.0.0.1", graphServerPort());
if (cpp2::ErrorCode::SUCCEEDED != client->connect("user", "password")) {
return nullptr;
}
return client;
}
} // namespace graph
} // namespace nebula
| 1 | 19,513 | Why change the port from 0 to getAvailablePort()? | vesoft-inc-nebula | cpp |
@@ -691,9 +691,9 @@ class TestSeleniumScriptGeneration(SeleniumTestCase):
content = fds.read()
target_lines = [
- "var_loc_keys=self.loc_mng.get_locator([{'name':'btn1',}])self.driver.find_element"
+ "var_loc_keys=self.loc_mng.get_locator([{'name':'btn1',}],30.0)self.driver.find_element"
"(var_loc_keys[0],var_loc_keys[1]).click()",
- "var_loc_keys=self.loc_mng.get_locator([{'id':'Id_123',}])self.driver.find_element"
+ "var_loc_keys=self.loc_mng.get_locator([{'id':'Id_123',}],30.0)self.driver.find_element"
"(var_loc_keys[0],var_loc_keys[1]).clear()",
"self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).send_keys('London')"
] | 1 | import os
from bzt import TaurusConfigError
from bzt.six import PY2
from tests import RESOURCES_DIR
from tests.modules.selenium import SeleniumTestCase
class TestSeleniumScriptGeneration(SeleniumTestCase):
def test_modern_actions_generator(self):
self.configure({
"execution": [{
"executor": "apiritif",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"default-address": "http://blazedemo.com",
"variables": {
"red_pill": "take_it",
"name": "Name"
},
"timeout": "3.5s",
"requests": [{
"label": "la-la",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
# windows
"switchWindow(0)",
"openWindow(some.url)",
"resizeWindow(750, 750)",
"maximizeWindow()",
"closeWindow()",
"closeWindow('win_ser_local')",
# frames
"switchFrameByIdx(1)",
"switchFrame(relative=parent)",
"switchFrameByName('my_frame')",
# chains
"mouseDownByXPath(/html/body/div[3]/form/select[1])",
"mouseOutById(id_abc)",
"mouseOverByName(name_abc)",
# drag, select, assert, store
{"dragByID(address)": "elementByName(toPort)"},
{"selectByName(my_frame)": "London"},
"assertTitle(BlazeDemo)",
{"storeTitle()": "hEaDeR"},
{"storeString(Title_Basic_By)": "Final"},
{"assertValueByID(address)": "123 Beautiful st."},
{"storeTextByID(address)": "Basic"},
# click, type, keys, submit
{"typeByName(\"toPort\")": "B"},
# exec, rawcode, go, edit
"scriptEval(\"alert('This is Sparta');\")",
{"rawCode": "for i in range(10):\n if i % 2 == 0:\n print(i)"},
"go(http:\\blazemeter.com)",
{"editContentById(editor)": "lo-la-lu"},
# print, wait, pause, clearcookies, screenshot
"echoString(${red_pill})",
{"waitByName('toPort')": "visible"},
"pauseFor(4.6s)",
"clearCookies()",
"screenshot('screen.png')",
"screenshot()"
]}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
if PY2:
print_i = "print i"
else:
print_i = "print(i)"
target_lines = [
"self.wnd_mng.switch('0')",
"""self.driver.execute_script("window.open('some.url');")""",
"self.wnd_mng.close()",
"self.wnd_mng.close('win_ser_local')",
"self.frm_mng.switch('index=1')",
"self.frm_mng.switch('relative=parent')",
"ActionChains(self.driver).click_and_hold(self.driver.find_element(var_loc_chain[0], "
"var_loc_chain[1])).perform()",
"ActionChains(self.driver).move_to_element_with_offset(self.driver.find_element(var_loc_chain[0],"
"var_loc_chain[1])",
"ActionChains(self.driver).move_to_element(self.driver.find_element(var_loc_chain[0],"
"var_loc_chain[1])).perform()",
"ActionChains(self.driver).drag_and_drop(self.driver.find_element(source[0], source[1]),"
"self.driver.find_element(target[0],target[1])).perform()",
"Select(self.driver.find_element(var_loc_select[0],var_loc_select[1])).select_by_visible_text",
"self.assertEqual(self.driver.title,'BlazeDemo')",
"self.vars['hEaDeR'] = self.driver.title",
"self.vars['Final'] = 'Title_Basic_By'",
"self.vars['Basic'] = self.driver.find_element(var_loc_as[0],var_loc_as[1])."
"get_attribute('innerText')",
"self.assertEqual(self.driver.find_element(var_loc_as[0],var_loc_as[1])."
"get_attribute('value').strip(),\'123 Beautiful st.\'.strip())",
"self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).clear()",
"self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).send_keys('B')",
"self.driver.execute_script(\"alert('This is Sparta');\")",
"for i in range(10):",
"if ((i % 2) == 0):",
print_i,
"self.driver.get('http:\\\\blazemeter.com')",
"ifself.driver.find_element(var_edit_content[0], var_edit_content[1])."
"get_attribute('contenteditable'):"
"self.driver.execute_script((\"arguments[0].innerHTML=\'%s\';\"%\'lo-la-lu\'),"
"self.driver.find_element(var_edit_content[0],var_edit_content[1]))"
"else:",
"raiseNoSuchElementException((\'The element (%s : %r)is not a contenteditable element\'%"
"(var_edit_content[0], var_edit_content[1])))"
"print(self.vars['red_pill'])",
"WebDriverWait(self.driver, 3.5).until(econd.visibility_of_element_located((var_loc_wait[0],"
"var_loc_wait[1])), \"Element 'name':'toPort' failed to appear within 3.5s\")",
"sleep(4.6)",
"self.driver.delete_all_cookies()",
"self.driver.save_screenshot('screen.png')",
"filename = os.path.join(os.getenv('TAURUS_ARTIFACTS_DIR'), "
"('screenshot-%d.png' % (time() * 1000)))",
"self.driver.save_screenshot(filename)"
]
for idx in range(len(target_lines)):
self.assertIn(TestSeleniumScriptGeneration.clear_spaces(target_lines[idx]),
TestSeleniumScriptGeneration.clear_spaces(content),
msg="\n\n%s. %s" % (idx, target_lines[idx]))
@staticmethod
def clear_spaces(content):
return content.replace(" ", "").replace("\t", "").replace("\n", "")
def test_firefox_setup_generator(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"headless": True,
"default-address": "http://blazedemo.com",
"variables": {
"red_pill": "take_it",
"name": "Name"
},
"timeout": "3.5s",
"requests": [{
"url": "bla.com",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target_lines = [
"options = webdriver.FirefoxOptions()",
"options.set_headless()",
"profile = webdriver.FirefoxProfile()",
"profile.set_preference('webdriver.log.file', '",
"driver = webdriver.Firefox(profile, firefox_options=options)"
]
for idx in range(len(target_lines)):
self.assertIn(target_lines[idx], content, msg="\n\n%s. %s" % (idx, target_lines[idx]))
def test_chrome_setup_generator(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
'generate-flow-markers': True,
"browser": "Chrome",
"default-address": "http://blazedemo.com",
"variables": {
"red_pill": "take_it",
"name": "Name"
},
"timeout": "3.5s",
"requests": [{
"url": "bla.com",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
self.assertNotIn("options.set_headless()", content)
target_lines = [
"options = webdriver.ChromeOptions()",
"driver = webdriver.Chrome(service_log_path='",
"', chrome_options=options)"
]
for idx in range(len(target_lines)):
self.assertIn(target_lines[idx], content, msg="\n\n%s. %s" % (idx, target_lines[idx]))
def test_build_script(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"default-address": "http://blazedemo.com",
"variables": {
"red_pill": "take_it",
"name": "Name"
},
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
"waitByXPath(//input[@type='submit'])",
"assertTitle(BlazeDemo)",
"mouseMoveByXPath(/html/body/div[2]/div/p[2]/a)",
"doubleClickByXPath(/html/body/div[3]/h2)",
"mouseDownByXPath(/html/body/div[3]/form/select[1])",
"mouseUpByXPath(/html/body/div[3]/form/select[1]/option[6])",
{"selectByName(toPort)": "London"},
{"keysByCSS(body input.btn.btn-primary)": "KEY_ENTER"},
{"assertValueByID(address)": "123 Beautiful st."},
{"assertTextByXPath(/html/body/div[2]/form/div[1]/label)": "${name}"},
{"waitByName('toPort')": "visible"},
{"keysByName(\"toPort\")": "B"},
{"typeByName(\"toPort\")": "B"},
{"keysByName(\"toPort\")": u"KEY_ENTER"},
{"typeByName(\"toPort\")": "KEY_ENTER"},
"clickByXPath(//div[3]/form/select[1]//option[3])",
"clickByXPath(//div[3]/form/select[2]//option[6])",
"switchWindow(0)",
"openWindow(some.url)",
"switchWindow('win_ser_local')",
"switchWindow('win_ser_1')",
"switchWindow('that_window')",
"closeWindow(1)",
"closeWindow('win_ser_local')",
"closeWindow('win_ser_1')",
"closeWindow('that_window')",
"submitByName(\"toPort\")",
"scriptEval(\"alert('This is Sparta');\")",
{"rawCode": "for i in range(10):\n if i % 2 == 0:\n print(i)"},
{"dragByID(address)": "elementByName(toPort)"},
"switchFrameByName('my_frame')",
"switchFrameByIdx(1)",
"switchFrame(relative=parent)",
{"editContentById(editor)": "lo-la-lu"},
"pauseFor(3.5s)",
"clearCookies()",
"clickByLinkText(destination of the week! The Beach!)",
{"storeTitle()": "Title"},
{"storeTextByXPath(//*[@id='basics']/h2)": "Basic"},
{"storeValueByXPath(//*[@id='basics']/h1)": "World"},
{"storeString(${Title} ${Basic} by ${By})": "Final"},
"go(http:\\blazemeter.com)",
"echoString(${red_pill})",
"screenshot(screen.png)",
"screenshot()",
],
},
{"label": "empty"}
]
},
"loc_sc_remote": {
"remote": "http://user:key@remote_web_driver_host:port/wd/hub",
"capabilities": {
"browserName": "firefox",
"version": "54.0",
"platformName": "linux",
"javascriptEnabled": "True",
"platformVersion": "",
"seleniumVersion": "",
"deviceName": "",
"app": ""
},
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
"waitByXPath(//input[@type='submit'])",
"assertTitle(BlazeDemo)"
],
},
{"label": "empty"}
]
}
}
})
self.obj.prepare()
exp_file = RESOURCES_DIR + "selenium/generated_from_requests.py"
str_to_replace = (self.obj.engine.artifacts_dir + os.path.sep).replace('\\', '\\\\')
self.assertFilesEqual(exp_file, self.obj.script, str_to_replace, "<somewhere>", python_files=True)
with open(self.obj.script) as script:
self.assertIn("bzt.resources.selenium_extras", script.read())
def test_headless_default(self):
self.configure({
"execution": [{
"executor": "selenium",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"browser": "Chrome",
"requests": ["http://blazedemo.com/"]
}}})
self.obj.prepare()
with open(self.obj.script) as generated:
gen_contents = generated.read()
self.assertNotIn("options.set_headless()", gen_contents)
def test_headless_chrome(self):
self.configure({
"execution": [{
"executor": "selenium",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"browser": "Chrome",
"headless": True,
"requests": ["http://blazedemo.com/"]
}}})
self.obj.prepare()
with open(self.obj.script) as generated:
gen_contents = generated.read()
self.assertIn("options.set_headless()", gen_contents)
def test_headless_firefox(self):
self.configure({
"execution": [{
"executor": "selenium",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"browser": "Firefox",
"headless": True,
"requests": ["http://blazedemo.com/"]
}}})
self.obj.prepare()
with open(self.obj.script) as generated:
gen_contents = generated.read()
self.assertIn("options.set_headless()", gen_contents)
def test_headless_safari(self):
self.configure({
"execution": [{
"executor": "selenium",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"browser": "Opera",
"headless": True,
"requests": ["http://blazedemo.com/"]
}}})
self.obj.prepare()
with open(self.obj.script) as generated:
gen_contents = generated.read()
self.assertNotIn("options.set_headless()", gen_contents)
def test_capabilities_order(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"scenario": "loc_sc_remote",
"capabilities": {
"name2": "execution",
"name4": "execution",
"name5": "execution"}}],
"scenarios": {
"loc_sc_remote": {
"remote": "http://user:key@remote_web_driver_host:port/wd/hub",
"capabilities": {
"name3": "scenario",
"name4": "scenario",
"name6": "scenario"},
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"actions": ["assertTitle(BlazeDemo)"]}]}},
"modules": {
"selenium": {
"capabilities": {
"name1": "settings",
"name2": "settings",
"name3": "settings"}}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target_lines = [
"'name1': 'settings'",
"'name2': 'execution'",
"'name3': 'scenario'",
"'name4': 'execution'",
"'name5': 'execution'",
"'name6': 'scenario'"]
for line in target_lines:
self.assertIn(line, content)
def test_build_script_remote(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc_remote"}],
"scenarios": {
"loc_sc_remote": {
"remote": "http://user:key@remote_web_driver_host:port/wd/hub",
"capabilities": {
"browserName": "firefox",
"version": "54.0",
"platformName": "linux",
"javascriptEnabled": "True",
"platformVersion": "",
"seleniumVersion": "",
"deviceName": "",
"app": ""
},
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
"waitByXPath(//input[@type='submit'])",
"assertTitle(BlazeDemo)"
],
},
{"label": "empty"}
]
}
}
})
self.obj.prepare()
exp_file = RESOURCES_DIR + "selenium/generated_from_requests_remote.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_build_script_appium_browser(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc_appium"}],
"scenarios": {
"loc_sc_appium": {
"browser": "Chrome-Android",
"capabilities": {
"deviceName": "",
},
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
"waitByXPath(//input[@type='submit'])",
"assertTitle(BlazeDemo)"
],
},
{"label": "empty"}
]
}
}
})
self.obj.prepare()
exp_file = RESOURCES_DIR + "selenium/generated_from_requests_appium_browser.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_build_script_remote_empty_browser(self):
""" taurus should not wipe browserName (from capabilities) """
self.configure({
"execution": [{
"executor": "selenium",
"remote": "http://addr-of-remote-server.com",
"scenario": "remote_sc"}],
"scenarios": {
"remote_sc": { # no 'browser' element
"capabilities": {
"browserName": "chrome"}, # must be faced in desired_capabilities
"timeout": "3.5s",
"requests": [{
"url": "http://blazedemo.com",
"actions": [
"waitByXPath(//input[@type='submit'])"]},
{"label": "empty"}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target = "'browserName': 'chrome'"
self.assertIn(target, content)
def test_build_script_remote_browser(self):
""" taurus should not wipe browserName (from capabilities) """
self.configure({
"execution": [{
"executor": "selenium",
"remote": "http://addr-of-remote-server.com",
"scenario": "remote_sc"}],
"scenarios": {
"remote_sc": {
"capabilities": {
"browserName": "chrome"}, # must be faced in desired_capabilities
"timeout": "3.5s",
"requests": [{
"url": "http://blazedemo.com",
"actions": [
"waitByXPath(//input[@type='submit'])"]},
{"label": "empty"}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target = "'browserName': 'chrome'"
self.assertIn(target, content)
def test_build_script_remote_Firefox_browser(self):
""" check usage of 'browser' scenario options as browserName (from capabilities) """
self.configure({
"execution": [{
"executor": "selenium",
"remote": "http://addr-of-remote-server.com",
"scenario": "remote_sc"}],
"scenarios": {
"remote_sc": {
"browser": "Firefox", # must be faced in desired_capabilities (in lower case)
"timeout": "3.5s",
"requests": [{
"url": "http://blazedemo.com",
"actions": [
"waitByXPath(//input[@type='submit'])"]},
{"label": "empty"}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target = "'browserName': 'firefox'"
self.assertIn(target, content)
def test_build_script_flow_markers(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"generate-flow-markers": True,
"browser": "Chrome",
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
"waitByXPath(//input[@type='submit'])",
"assertTitle(BlazeDemo)"
],
},
{"label": "empty"}
]
}
}
})
self.obj.prepare()
exp_file = RESOURCES_DIR + "selenium/generated_from_requests_flow_markers.py"
str_to_replace = (self.obj.engine.artifacts_dir + os.path.sep).replace('\\', '\\\\')
self.assertFilesEqual(exp_file, self.obj.script, str_to_replace, "<somewhere>", python_files=True)
def test_resize_window(self):
self.configure({
"execution": [{
"executor": "selenium",
"concurrency": "1",
"iterations": "1",
"scenario": "window"}],
"scenarios": {
"window": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"actions": [
"resizeWindow(450, 450)",
"maximizeWindow()",
"closeWindow()"
],
}, ]
},
}
})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target_lines = [
"self.driver.set_window_size('450', '450')",
"self.driver.maximize_window()"
]
for idx in range(len(target_lines)):
self.assertIn(target_lines[idx], content, msg="\n\n%s. %s" % (idx, target_lines[idx]))
def test_mix_syntax(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"requests": [{
"label": "la-la",
"actions": [
{
"type": "click",
"locators": [
{"name": "btn1"},
]
},
{"typeById(Id_123)": "London"}
]}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target_lines = [
"var_loc_keys=self.loc_mng.get_locator([{'name':'btn1',}])self.driver.find_element"
"(var_loc_keys[0],var_loc_keys[1]).click()",
"var_loc_keys=self.loc_mng.get_locator([{'id':'Id_123',}])self.driver.find_element"
"(var_loc_keys[0],var_loc_keys[1]).clear()",
"self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).send_keys('London')"
]
for idx in range(len(target_lines)):
self.assertIn(TestSeleniumScriptGeneration.clear_spaces(target_lines[idx]),
TestSeleniumScriptGeneration.clear_spaces(content),
msg="\n\n%s. %s" % (idx, target_lines[idx]))
def test_syntax2_drag_drop(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"requests": [{
"label": "la-la",
"actions": [
{
"type": "drag",
"source": [
{"xpath": "/xpath/to"}
],
"target": [
{"css": "mycss"},
{"id": "ID"}
]
}
]}]}}})
self.obj.prepare()
with open(self.obj.script) as fds:
content = fds.read()
target_lines = [
"source=self.loc_mng.get_locator([{'xpath':'/xpath/to',}])",
"target=self.loc_mng.get_locator([{'css':'mycss',},{'id':'ID',}])"
"ActionChains(self.driver).drag_and_drop(self.driver.find_element(source[0],source[1]),"
"self.driver.find_element(target[0],target[1])).perform()"
]
for idx in range(len(target_lines)):
self.assertIn(TestSeleniumScriptGeneration.clear_spaces(target_lines[idx]),
TestSeleniumScriptGeneration.clear_spaces(content),
msg="\n\n%s. %s" % (idx, target_lines[idx]))
def test_syntax2_drag_drop_missing_source(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"requests": [{
"label": "la-la",
"actions": [
{
"type": "drag",
"source": {
},
"target": {
"locators": [
{"css": "mycss"},
{"id": "ID"}
]
}
}
]}]}}})
with self.assertRaises(TaurusConfigError) as context:
self.obj.prepare()
self.assertTrue('Can not generate action for \'drag\'. Source is empty.' in str(context.exception))
def test_syntax2_missing_param_assert_store(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"requests": [{
"label": "la-la",
"actions": [
{
"type": "assertText",
"locators": [
{"css": "classname"}
]
}
]}]}}})
with self.assertRaises(TaurusConfigError) as context:
self.obj.prepare()
self.assertTrue('Missing param' in str(context.exception))
def test_syntax2_missing_param_edit(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"requests": [{
"label": "la-la",
"actions": [
{
"type": "editContent",
"locators": [
{"css": "classname"}
]
}
]}]}}})
with self.assertRaises(TaurusConfigError) as context:
self.obj.prepare()
self.assertTrue('Missing param' in str(context.exception))
def test_syntax2_build_script(self):
self.configure(
{
"execution": [
{
"executor": "apiritif",
"scenario": "loc_sc"
}
],
"scenarios": {
"loc_sc": {
"default-address": "http://blazedemo.com,",
"variables": {
"red_pill": "take_it,",
"name": "Name"
},
"timeout": "3.5s",
"requests": [
{
"label": "Test V2",
"actions": [
{
"type": "go",
"param": "http://blazedemo.com"
},
{
"type": "resizeWindow",
"param": "750, 750"
},
{
"type": "switchWindow",
"param": 0
},
{
"type": "mouseDown",
"locators": [
{"id": "invalid_id"},
{"xpath": "/html/body/div[3]/form/select[1]"}
]
},
{
"type": "mouseOut",
"locators": [{"id": "id_123"}]
},
{
"type": "mouseOver",
"locators": [{"name": "name_123"}]
},
{
"type": "drag",
"source": [
{"name": "invalid_name"},
{"xpath": "/html/body/div[2]/div/p[2]/a"}
],
"target": [
{"css": "invalid_css"},
{"xpath": "/html/body/div[3]/form/div"}
]
},
{
"type": "assertText",
"param": "Choose your departure city:",
"locators": [
{"css": "myclass"},
{"xpath": "/html/body/div[3]/h2"}
]
},
{
"type": "assertValue",
"param": "Find Flights",
"locators": [
{"css": "myclass"},
{"xpath": "/html/body/div[3]/form/div/input"}
]
},
{
"type": "assertTitle",
"param": "BlazeDemo"
},
{
"type": "storeTitle",
"param": "hEaDeR"
},
{
"type": "storeString",
"param": "final_var",
"value": "test_text"
},
{
"type": "storeText",
"param": "Basic",
"locators": [{"xpath": "/html/body/div[3]/h2"}]
},
{
"type": "click",
"locators": [
{"xpath": "/wrong/one"},
{"xpath": "/html/body/div[3]/form/div/input"}
]
},
{
"type": "keys",
"param": "KEY_ENTER",
"locators": [
{"xpath": "/doc/abc"},
{"css": "body > div.container > table > tbody > tr:nth-child(1) "
"> td:nth-child(2) > input"}
]
},
{
"type": "type",
"param": "myusername",
"locators": [
{"id": "fjkafjk"},
{"css": "testCss"}
]
},
{
"type": "select",
"param": "American Express",
"locators": [
{"css": "myclass"},
{"xpath": "//*[@id=\"cardType\"]"}
]
},
{
"type": "scriptEval",
"param": "window.scrollTo(0, document.body.scrollHeight);"
},
{
"type": "rawCode",
"param": "for i in range(10):\n if i % 2 == 0:\n print(i)"
},
{
"type": "echoString",
"param": "${red_pill}"
},
{
"type": "pauseFor",
"param": "4.6s"
},
{
"type": "clearCookies"
},
{
"type": "screenshot",
"param": "screen.png"
},
{
"type": "screenshot"
},
{
"type": "wait",
"param": "visible",
"locators": [
{"css": "invalid_css"},
{"name": "inputName"}
]
},
{
"type": "editContent",
"param": "lo-la-lu",
"locators": [{"id": "editor"}]
},
{
"type": "pauseFor",
"param": "4.6s"
},
{
"type": "clearCookies"
},
{
"type": "screenshot",
"param": "screen.png"
},
{
"type": "screenshot"
},
{
"type": "openWindow",
"param": "vacation.html"
},
{
"type": "maximizeWindow"
},
{
"type": "switchFrameByIdx",
"param": 1
},
{
"type": "switchFrame",
"param": "relative=parent"
},
{
"type": "switchFrameByName",
"param": "my_frame"
},
{
"type": "closeWindow"
}
]
}
]
}
}
}
)
self.obj.prepare()
exp_file = RESOURCES_DIR + "selenium/generated_from_requests_v2.py"
str_to_replace = (self.obj.engine.artifacts_dir + os.path.sep).replace('\\', '\\\\')
self.assertFilesEqual(exp_file, self.obj.script, str_to_replace, "<somewhere>", python_files=True)
with open(self.obj.script) as script:
self.assertIn("bzt.resources.selenium_extras", script.read())
| 1 | 15,535 | For me would be great don't repeat timeout sending in every get_locator call. Looks like it should be sent into LocatorsManager init. And possibly it should me non mandatory, something like that: `def __init__ (self, locators, timeout=60):` | Blazemeter-taurus | py |