patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -94,9 +94,18 @@ public class LatLonShape {
return new Field[] {new LatLonTriangle(fieldName, lat, lon, lat, lon, lat, lon)};
}
+ /** create a query to find all indexed shapes that comply the {@link QueryRelation} with the provided point
+ **/
+ public static Query newPointQuery(String field, QueryRelation queryRelation, double lat, double lon) {
+ return new LatLonShapePointQuery(field, queryRelation, lat, lon);
+ }
+
/** create a query to find all polygons that intersect a defined bounding box
**/
public static Query newBoxQuery(String field, QueryRelation queryRelation, double minLatitude, double maxLatitude, double minLongitude, double maxLongitude) {
+ if (minLatitude == maxLatitude && minLongitude == maxLongitude) {
+ return newPointQuery(field, queryRelation, minLatitude, minLongitude);
+ }
return new LatLonShapeBoundingBoxQuery(field, queryRelation, minLatitude, maxLatitude, minLongitude, maxLongitude);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.document;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.geo.GeoUtils;
import org.apache.lucene.geo.Line;
import org.apache.lucene.geo.Polygon;
import org.apache.lucene.geo.Tessellator;
import org.apache.lucene.geo.Tessellator.Triangle;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude;
/**
* An indexed shape utility class.
* <p>
* {@link Polygon}'s are decomposed into a triangular mesh using the {@link Tessellator} utility class
* Each {@link Triangle} is encoded and indexed as a multi-value field.
* <p>
* Finding all shapes that intersect a range (e.g., bounding box) at search time is efficient.
* <p>
* This class defines static factory methods for common operations:
* <ul>
* <li>{@link #createIndexableFields(String, Polygon)} for matching polygons that intersect a bounding box.
* <li>{@link #newBoxQuery newBoxQuery()} for matching polygons that intersect a bounding box.
* </ul>
* <b>WARNING</b>: Like {@link LatLonPoint}, vertex values are indexed with some loss of precision from the
* original {@code double} values (4.190951585769653E-8 for the latitude component
* and 8.381903171539307E-8 for longitude).
* @see PointValues
* @see LatLonDocValuesField
*
* @lucene.experimental
*/
public class LatLonShape {
static final int BYTES = Integer.BYTES;
protected static final FieldType TYPE = new FieldType();
static {
TYPE.setDimensions(7, 4, BYTES);
TYPE.freeze();
}
// no instance:
private LatLonShape() {
}
/** create indexable fields for polygon geometry */
public static Field[] createIndexableFields(String fieldName, Polygon polygon) {
// the lionshare of the indexing is done by the tessellator
List<Triangle> tessellation = Tessellator.tessellate(polygon);
List<LatLonTriangle> fields = new ArrayList<>();
for (Triangle t : tessellation) {
fields.add(new LatLonTriangle(fieldName, t));
}
return fields.toArray(new Field[fields.size()]);
}
/** create indexable fields for line geometry */
public static Field[] createIndexableFields(String fieldName, Line line) {
int numPoints = line.numPoints();
Field[] fields = new Field[numPoints - 1];
// create "flat" triangles
for (int i = 0, j = 1; j < numPoints; ++i, ++j) {
fields[i] = new LatLonTriangle(fieldName, line.getLat(i), line.getLon(i), line.getLat(j), line.getLon(j), line.getLat(i), line.getLon(i));
}
return fields;
}
/** create indexable fields for point geometry */
public static Field[] createIndexableFields(String fieldName, double lat, double lon) {
return new Field[] {new LatLonTriangle(fieldName, lat, lon, lat, lon, lat, lon)};
}
/** create a query to find all polygons that intersect a defined bounding box
**/
public static Query newBoxQuery(String field, QueryRelation queryRelation, double minLatitude, double maxLatitude, double minLongitude, double maxLongitude) {
return new LatLonShapeBoundingBoxQuery(field, queryRelation, minLatitude, maxLatitude, minLongitude, maxLongitude);
}
/** create a query to find all polygons that intersect a provided linestring (or array of linestrings)
* note: does not support dateline crossing
**/
public static Query newLineQuery(String field, QueryRelation queryRelation, Line... lines) {
return new LatLonShapeLineQuery(field, queryRelation, lines);
}
/** create a query to find all polygons that intersect a provided polygon (or array of polygons)
* note: does not support dateline crossing
**/
public static Query newPolygonQuery(String field, QueryRelation queryRelation, Polygon... polygons) {
return new LatLonShapePolygonQuery(field, queryRelation, polygons);
}
/** polygons are decomposed into tessellated triangles using {@link org.apache.lucene.geo.Tessellator}
* these triangles are encoded and inserted as separate indexed POINT fields
*/
private static class LatLonTriangle extends Field {
LatLonTriangle(String name, double aLat, double aLon, double bLat, double bLon, double cLat, double cLon) {
super(name, TYPE);
setTriangleValue(encodeLongitude(aLon), encodeLatitude(aLat), encodeLongitude(bLon), encodeLatitude(bLat), encodeLongitude(cLon), encodeLatitude(cLat));
}
LatLonTriangle(String name, Triangle t) {
super(name, TYPE);
setTriangleValue(t.getEncodedX(0), t.getEncodedY(0), t.getEncodedX(1), t.getEncodedY(1), t.getEncodedX(2), t.getEncodedY(2));
}
public void setTriangleValue(int aX, int aY, int bX, int bY, int cX, int cY) {
final byte[] bytes;
if (fieldsData == null) {
bytes = new byte[7 * BYTES];
fieldsData = new BytesRef(bytes);
} else {
bytes = ((BytesRef) fieldsData).bytes;
}
encodeTriangle(bytes, aY, aX, bY, bX, cY, cX);
}
}
/** Query Relation Types **/
public enum QueryRelation {
INTERSECTS, WITHIN, DISJOINT
}
private static final int MINY_MINX_MAXY_MAXX_Y_X = 0;
private static final int MINY_MINX_Y_X_MAXY_MAXX = 1;
private static final int MAXY_MINX_Y_X_MINY_MAXX = 2;
private static final int MAXY_MINX_MINY_MAXX_Y_X = 3;
private static final int Y_MINX_MINY_X_MAXY_MAXX = 4;
private static final int Y_MINX_MINY_MAXX_MAXY_X = 5;
private static final int MAXY_MINX_MINY_X_Y_MAXX = 6;
private static final int MINY_MINX_Y_MAXX_MAXY_X = 7;
/**
* A triangle is encoded using 6 points and an extra point with encoded information in three bits of how to reconstruct it.
* Triangles are encoded with CCW orientation and might be rotated to limit the number of possible reconstructions to 2^3.
* Reconstruction always happens from west to east.
*/
public static void encodeTriangle(byte[] bytes, int aLat, int aLon, int bLat, int bLon, int cLat, int cLon) {
assert bytes.length == 7 * BYTES;
int aX;
int bX;
int cX;
int aY;
int bY;
int cY;
//change orientation if CW
if (GeoUtils.orient(aLon, aLat, bLon, bLat, cLon, cLat) == -1) {
aX = cLon;
bX = bLon;
cX = aLon;
aY = cLat;
bY = bLat;
cY = aLat;
} else {
aX = aLon;
bX = bLon;
cX = cLon;
aY = aLat;
bY = bLat;
cY = cLat;
}
//rotate edges and place minX at the beginning
if (bX < aX || cX < aX) {
if (bX < cX) {
int tempX = aX;
int tempY = aY;
aX = bX;
aY = bY;
bX = cX;
bY = cY;
cX = tempX;
cY = tempY;
} else if (cX < aX) {
int tempX = aX;
int tempY = aY;
aX = cX;
aY = cY;
cX = bX;
cY = bY;
bX = tempX;
bY = tempY;
}
} else if (aX == bX && aX == cX) {
//degenerated case, all points with same longitude
//we need to prevent that aX is in the middle (not part of the MBS)
if (bY < aY || cY < aY) {
if (bY < cY) {
int tempX = aX;
int tempY = aY;
aX = bX;
aY = bY;
bX = cX;
bY = cY;
cX = tempX;
cY = tempY;
} else if (cY < aY) {
int tempX = aX;
int tempY = aY;
aX = cX;
aY = cY;
cX = bX;
cY = bY;
bX = tempX;
bY = tempY;
}
}
}
int minX = aX;
int minY = StrictMath.min(aY, StrictMath.min(bY, cY));
int maxX = StrictMath.max(aX, StrictMath.max(bX, cX));
int maxY = StrictMath.max(aY, StrictMath.max(bY, cY));
int bits, x, y;
if (minY == aY) {
if (maxY == bY && maxX == bX) {
y = cY;
x = cX;
bits = MINY_MINX_MAXY_MAXX_Y_X;
} else if (maxY == cY && maxX == cX) {
y = bY;
x = bX;
bits = MINY_MINX_Y_X_MAXY_MAXX;
} else {
y = bY;
x = cX;
bits = MINY_MINX_Y_MAXX_MAXY_X;
}
} else if (maxY == aY) {
if (minY == bY && maxX == bX) {
y = cY;
x = cX;
bits = MAXY_MINX_MINY_MAXX_Y_X;
} else if (minY == cY && maxX == cX) {
y = bY;
x = bX;
bits = MAXY_MINX_Y_X_MINY_MAXX;
} else {
y = cY;
x = bX;
bits = MAXY_MINX_MINY_X_Y_MAXX;
}
} else if (maxX == bX && minY == bY) {
y = aY;
x = cX;
bits = Y_MINX_MINY_MAXX_MAXY_X;
} else if (maxX == cX && maxY == cY) {
y = aY;
x = bX;
bits = Y_MINX_MINY_X_MAXY_MAXX;
} else {
throw new IllegalArgumentException("Could not encode the provided triangle");
}
NumericUtils.intToSortableBytes(minY, bytes, 0);
NumericUtils.intToSortableBytes(minX, bytes, BYTES);
NumericUtils.intToSortableBytes(maxY, bytes, 2 * BYTES);
NumericUtils.intToSortableBytes(maxX, bytes, 3 * BYTES);
NumericUtils.intToSortableBytes(y, bytes, 4 * BYTES);
NumericUtils.intToSortableBytes(x, bytes, 5 * BYTES);
NumericUtils.intToSortableBytes(bits, bytes, 6 * BYTES);
}
/**
* Decode a triangle encoded by {@link LatLonShape#encodeTriangle(byte[], int, int, int, int, int, int)}.
*/
public static void decodeTriangle(byte[] t, int[] triangle) {
assert triangle.length == 6;
int bits = NumericUtils.sortableBytesToInt(t, 6 * LatLonShape.BYTES);
//extract the first three bits
int tCode = (((1 << 3) - 1) & (bits >> 0));
switch (tCode) {
case MINY_MINX_MAXY_MAXX_Y_X:
triangle[0] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
break;
case MINY_MINX_Y_X_MAXY_MAXX:
triangle[0] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
break;
case MAXY_MINX_Y_X_MINY_MAXX:
triangle[0] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
break;
case MAXY_MINX_MINY_MAXX_Y_X:
triangle[0] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
break;
case Y_MINX_MINY_X_MAXY_MAXX:
triangle[0] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
break;
case Y_MINX_MINY_MAXX_MAXY_X:
triangle[0] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
break;
case MAXY_MINX_MINY_X_Y_MAXX:
triangle[0] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
break;
case MINY_MINX_Y_MAXX_MAXY_X:
triangle[0] = NumericUtils.sortableBytesToInt(t, 0 * LatLonShape.BYTES);
triangle[1] = NumericUtils.sortableBytesToInt(t, 1 * LatLonShape.BYTES);
triangle[2] = NumericUtils.sortableBytesToInt(t, 4 * LatLonShape.BYTES);
triangle[3] = NumericUtils.sortableBytesToInt(t, 3 * LatLonShape.BYTES);
triangle[4] = NumericUtils.sortableBytesToInt(t, 2 * LatLonShape.BYTES);
triangle[5] = NumericUtils.sortableBytesToInt(t, 5 * LatLonShape.BYTES);
break;
default:
throw new IllegalArgumentException("Could not decode the provided triangle");
}
//Points of the decoded triangle must be co-planar or CCW oriented
assert GeoUtils.orient(triangle[1], triangle[0], triangle[3], triangle[2], triangle[5], triangle[4]) >= 0;
}
}
| 1 | 29,661 | do we really need a relation or can we just assume INTERSECTS? | apache-lucene-solr | java |
@@ -16,7 +16,7 @@ import net.sourceforge.pmd.lang.java.oom.api.MetricVersion;
*
* @author Clément Fournier
*/
-public final class ParameterizedMetricKey {
+/* default */ final class ParameterizedMetricKey {
private static final Map<Integer, ParameterizedMetricKey> POOL = new HashMap<>();
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.oom;
import java.util.HashMap;
import java.util.Map;
import net.sourceforge.pmd.lang.java.oom.api.Metric;
import net.sourceforge.pmd.lang.java.oom.api.MetricKey;
import net.sourceforge.pmd.lang.java.oom.api.MetricVersion;
/**
* Represents a key parameterized with its version. Used to index memoization maps.
*
* @author Clément Fournier
*/
public final class ParameterizedMetricKey {
private static final Map<Integer, ParameterizedMetricKey> POOL = new HashMap<>();
/** The metric key. */
public final MetricKey<? extends Metric> key;
/** The version of the metric. */
public final MetricVersion version;
/** Used internally by the pooler. */
private ParameterizedMetricKey(MetricKey<? extends Metric> key, MetricVersion version) {
this.key = key;
this.version = version;
}
@Override
public String toString() {
return "ParameterizedMetricKey{key=" + key + ", version=" + version + '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ParameterizedMetricKey that = (ParameterizedMetricKey) o;
if (!key.equals(that.key)) {
return false;
}
return version.equals(that.version);
}
@Override
public int hashCode() {
return code(key, version);
}
/** Used by the pooler. */
private static int code(MetricKey key, MetricVersion version) {
return 31 * key.hashCode() + version.hashCode();
}
/** Builds a parameterized metric key. */
public static ParameterizedMetricKey getInstance(MetricKey<? extends Metric> key, MetricVersion version) {
int code = code(key, version);
ParameterizedMetricKey paramKey = POOL.get(code);
if (paramKey == null) {
POOL.put(code, new ParameterizedMetricKey(key, version));
}
return POOL.get(code);
}
}
| 1 | 12,456 | Is this a `MetricKey` ? | pmd-pmd | java |
@@ -49,6 +49,9 @@ public class Constants {
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
+ // One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
+ public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
+
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
| 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban;
/**
* Constants
*
* Global place for storing constants.
* Conventions:
* - All internal constants to be put in the root level ie. {@link Constants} class
* - All Configuration keys to be put in {@link ConfigurationKeys} class
* - Flow level Properties keys go to {@link FlowProperties}
* - Job level Properties keys go to {@link JobProperties}
*/
public class Constants {
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
}
| 1 | 13,330 | what's your rationale to pick up this date? | azkaban-azkaban | java |
@@ -11,6 +11,7 @@ import (
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
)
| 1 | // +build linux
package fs
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type BlkioGroup struct {
}
func (s *BlkioGroup) Name() string {
return "blkio"
}
func (s *BlkioGroup) Apply(d *cgroupData) error {
_, err := d.join("blkio")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.BlkioWeight != 0 {
if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
return err
}
}
if cgroup.Resources.BlkioLeafWeight != 0 {
if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
return err
}
}
for _, wd := range cgroup.Resources.BlkioWeightDevice {
if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
return err
}
if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
return err
}
}
return nil
}
func (s *BlkioGroup) Remove(d *cgroupData) error {
return removePath(d.path("blkio"))
}
/*
examples:
blkio.sectors
8:0 6792
blkio.io_service_bytes
8:0 Read 1282048
8:0 Write 2195456
8:0 Sync 2195456
8:0 Async 1282048
8:0 Total 3477504
Total 3477504
blkio.io_serviced
8:0 Read 124
8:0 Write 104
8:0 Sync 104
8:0 Async 124
8:0 Total 228
Total 228
blkio.io_queued
8:0 Read 0
8:0 Write 0
8:0 Sync 0
8:0 Async 0
8:0 Total 0
Total 0
*/
func splitBlkioStatLine(r rune) bool {
return r == ' ' || r == ':'
}
func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
var blkioStats []cgroups.BlkioStatEntry
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return blkioStats, nil
}
return nil, err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
// format: dev type amount
fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
if len(fields) < 3 {
if len(fields) == 2 && fields[0] == "Total" {
// skip total line
continue
} else {
return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
}
}
v, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, err
}
major := v
v, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
minor := v
op := ""
valueField := 2
if len(fields) == 4 {
op = fields[2]
valueField = 3
}
v, err = strconv.ParseUint(fields[valueField], 10, 64)
if err != nil {
return nil, err
}
blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
}
return blkioStats, nil
}
func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
// Try to read CFQ stats available on all CFQ enabled kernels first
if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
return getCFQStats(path, stats)
}
return getStats(path, stats) // Use generic stats as fallback
}
func getCFQStats(path string, stats *cgroups.Stats) error {
var blkioStats []cgroups.BlkioStatEntry
var err error
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
return err
}
stats.BlkioStats.SectorsRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServicedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
return err
}
stats.BlkioStats.IoQueuedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServiceTimeRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoWaitTimeRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
return err
}
stats.BlkioStats.IoMergedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoTimeRecursive = blkioStats
return nil
}
func getStats(path string, stats *cgroups.Stats) error {
var blkioStats []cgroups.BlkioStatEntry
var err error
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
return err
}
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
return err
}
stats.BlkioStats.IoServicedRecursive = blkioStats
return nil
}
| 1 | 18,176 | NIT: I'd have preferred that the new package be called "utils", but that's not a big deal. | opencontainers-runc | go |
@@ -1,6 +1,13 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
+# This code example demonstrates how to set the default encryption state for an
+# Amazon Simple Storage Solution (Amazon S3) bucket using server-side encryption (SSE)
+# with an AWS KMS customer master key (CMK).
+
+# snippet-start:[s3.ruby.s3_add_default_sse_encryption]
+
require 'aws-sdk-s3'
# Sets the default encryption state for an Amazon S3 bucket using | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
require 'aws-sdk-s3'
# Sets the default encryption state for an Amazon S3 bucket using
# server-side encryption (SSE) with an
# AWS KMS customer master key (CMK).
#
# Prerequisites:
#
# - An Amazon S3 bucket.
# - An AWS KMS CMK.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @param kms_master_key_id [String] The ID of the CMK.
# @return [Boolean] true if the default encryption state was
# successfully set; otherwise, false.
# @example
# exit 1 unless default_bucket_encryption_sse_cmk_set?(
# Aws::S3::Client.new(region: 'us-east-1'),
# 'doc-example-bucket',
# '9041e78c-7a20-4db3-929e-828abEXAMPLE'
# )
def default_bucket_encryption_sse_cmk_set?(
s3_client,
bucket_name,
kms_master_key_id
)
s3_client.put_bucket_encryption(
bucket: bucket_name,
server_side_encryption_configuration: {
rules: [
{
apply_server_side_encryption_by_default: {
sse_algorithm: 'aws:kms',
kms_master_key_id: kms_master_key_id
}
}
]
}
)
return true
rescue StandardError => e
puts "Error setting default encryption state: #{e.message}"
return false
end
def run_me
bucket_name = 'doc-example-bucket'
kms_master_key_id = '9041e78c-7a20-4db3-929e-828abEXAMPLE'
region = 'us-east-1'
s3_client = Aws::S3::Client.new(region: region)
if default_bucket_encryption_sse_cmk_set?(
s3_client,
bucket_name,
kms_master_key_id
)
puts 'Default encryption state set.'
else
puts 'Default encryption state not set.'
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,537 | Simple Storage **Service** | awsdocs-aws-doc-sdk-examples | rb |
@@ -201,12 +201,15 @@ class GarbageCollector extends AbstractDataHandlerListener implements SingletonI
$indexQueueItems = $this->getIndexQueue()->getItems($table, $uid);
foreach ($indexQueueItems as $indexQueueItem) {
$site = $indexQueueItem->getSite();
+ $solrConfiguration = $site->getSolrConfiguration();
// a site can have multiple connections (cores / languages)
$solrConnections = $connectionManager->getConnectionsBySite($site);
foreach ($solrConnections as $solr) {
$solr->deleteByQuery('type:' . $table . ' AND uid:' . intval($uid));
- $solr->commit(false, false, false);
+ if ($solrConfiguration->getEnableCommits()) {
+ $solr->commit(false, false, false);
+ }
}
}
} | 1 | <?php
namespace ApacheSolrForTypo3\Solr;
/***************************************************************
* Copyright notice
*
* (c) 2010-2015 Ingo Renner <ingo@typo3.org>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\ConnectionManager;
use ApacheSolrForTypo3\Solr\IndexQueue\Queue;
use TYPO3\CMS\Backend\Utility\BackendUtility;
use TYPO3\CMS\Core\DataHandling\DataHandler;
use TYPO3\CMS\Core\SingletonInterface;
use TYPO3\CMS\Core\Utility\GeneralUtility;
/**
* Garbage Collector, removes related documents from the index when a record is
* set to hidden, is deleted or is otherwise made invisible to website visitors.
*
* Garbage collection will happen for online/LIVE workspaces only.
*
* @author Ingo Renner <ingo@typo3.org>
* @author Timo Schmidt <timo.schmidt@dkd.de>
*/
class GarbageCollector extends AbstractDataHandlerListener implements SingletonInterface
{
protected $trackedRecords = array();
/**
* Hooks into TCE main and tracks record deletion commands.
*
* @param string $command The command.
* @param string $table The table the record belongs to
* @param int $uid The record's uid
* @param string $value Not used
* @param DataHandler $tceMain TYPO3 Core Engine parent object, not used
* @return void
*/
public function processCmdmap_preProcess(
$command,
$table,
$uid,
$value,
DataHandler $tceMain
) {
// workspaces: collect garbage only for LIVE workspace
if ($command == 'delete' && $GLOBALS['BE_USER']->workspace == 0) {
$this->collectGarbage($table, $uid);
if ($table == 'pages') {
$this->getIndexQueue()->deleteItem($table, $uid);
}
}
}
/**
* Holds the configuration when a recursive page queing should be triggered.
*
* @var array
* @return array
*/
protected function getUpdateSubPagesRecursiveTriggerConfiguration()
{
return array(
// the current page has the field "extendToSubpages" enabled and the field "hidden" was set to 1
'extendToSubpageEnabledAndHiddenFlagWasAdded' => array(
'currentState' => array('extendToSubpages' => '1'),
'changeSet' => array('hidden' => '1')
),
// the current page has the field "hidden" enabled and the field "extendToSubpages" was set to 1
'hiddenIsEnabledAndExtendToSubPagesWasAdded' => array(
'currentState' => array('hidden' => '1'),
'changeSet' => array('extendToSubpages' => '1')
)
);
}
/**
* Tracks down index documents belonging to a particular record or page and
* removes them from the index and the Index Queue.
*
* @param string $table The record's table name.
* @param int $uid The record's uid.
* @throws \UnexpectedValueException if a hook object does not implement interface \ApacheSolrForTypo3\Solr\GarbageCollectorPostProcessor
*/
public function collectGarbage($table, $uid)
{
if ($table == 'tt_content' || $table == 'pages' || $table == 'pages_language_overlay') {
$this->collectPageGarbage($table, $uid);
} else {
$this->collectRecordGarbage($table, $uid);
}
if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['postProcessGarbageCollector'])) {
foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['postProcessGarbageCollector'] as $classReference) {
$garbageCollectorPostProcessor = GeneralUtility::getUserObj($classReference);
if ($garbageCollectorPostProcessor instanceof GarbageCollectorPostProcessor) {
$garbageCollectorPostProcessor->postProcessGarbageCollector($table,
$uid);
} else {
throw new \UnexpectedValueException(
get_class($garbageCollectorPostProcessor) . ' must implement interface ApacheSolrForTypo3\Solr\GarbageCollectorPostProcessor',
1345807460
);
}
}
}
}
/**
* Tracks down index documents belonging to a particular page and
* removes them from the index and the Index Queue.
*
* @param string $table The record's table name.
* @param int $uid The record's uid.
*/
protected function collectPageGarbage($table, $uid)
{
switch ($table) {
case 'tt_content':
$contentElement = BackendUtility::getRecord('tt_content', $uid, 'uid, pid', '', false);
$table = 'pages';
$uid = $contentElement['pid'];
$this->deleteIndexDocuments($table, $uid);
// only a content element was removed, now update/re-index the page
$this->getIndexQueue()->updateItem($table, $uid);
break;
case 'pages_language_overlay':
$pageOverlayRecord = BackendUtility::getRecord('pages_language_overlay', $uid, 'uid, pid', '', false);
$table = 'pages';
$uid = $pageOverlayRecord['pid'];
$this->deleteIndexDocuments($table, $uid);
// only a page overlay was removed, now update/re-index the page
$this->getIndexQueue()->updateItem($table, $uid);
break;
case 'pages':
$this->deleteIndexDocuments($table, $uid);
$this->getIndexQueue()->deleteItem($table, $uid);
break;
}
}
/**
* @param string $table
* @param int $uid
* @param array $changedFields
*/
protected function deleteSubpagesWhenExtendToSubpagesIsSet($table, $uid, $changedFields)
{
if (!$this->isRecursiveUpdateRequired($uid, $changedFields)) {
return;
}
$indexQueue = $this->getIndexQueue();
// get affected subpages when "extendToSubpages" flag was set
$pagesToDelete = $this->getSubPageIds($uid);
// we need to at least remove this page
foreach ($pagesToDelete as $pageToDelete) {
$this->deleteIndexDocuments($table, $pageToDelete);
$indexQueue->deleteItem($table, $pageToDelete);
}
}
/**
* Deletes index documents for a given record identification.
*
* @param string $table The record's table name.
* @param int $uid The record's uid.
*/
protected function deleteIndexDocuments($table, $uid)
{
/** @var $connectionManager ConnectionManager */
$connectionManager = GeneralUtility::makeInstance(ConnectionManager::class);
// record can be indexed for multiple sites
$indexQueueItems = $this->getIndexQueue()->getItems($table, $uid);
foreach ($indexQueueItems as $indexQueueItem) {
$site = $indexQueueItem->getSite();
// a site can have multiple connections (cores / languages)
$solrConnections = $connectionManager->getConnectionsBySite($site);
foreach ($solrConnections as $solr) {
$solr->deleteByQuery('type:' . $table . ' AND uid:' . intval($uid));
$solr->commit(false, false, false);
}
}
}
/**
* Tracks down index documents belonging to a particular record and
* removes them from the index and the Index Queue.
*
* @param string $table The record's table name.
* @param int $uid The record's uid.
*/
protected function collectRecordGarbage($table, $uid)
{
$this->deleteIndexDocuments($table, $uid);
$this->getIndexQueue()->deleteItem($table, $uid);
}
// methods checking whether to trigger garbage collection
/**
* Hooks into TCE main and tracks page move commands.
*
* @param string $command The command.
* @param string $table The table the record belongs to
* @param int $uid The record's uid
* @param string $value Not used
* @param DataHandler $tceMain TYPO3 Core Engine parent object, not used
*/
public function processCmdmap_postProcess(
$command,
$table,
$uid,
$value,
DataHandler $tceMain
) {
// workspaces: collect garbage only for LIVE workspace
if ($command == 'move' && $table == 'pages' && $GLOBALS['BE_USER']->workspace == 0) {
// TODO the below comment is not valid anymore, pid has been removed from doc ID
// ...still needed?
// must be removed from index since the pid changes and
// is part of the Solr document ID
$this->collectGarbage($table, $uid);
// now re-index with new properties
$this->getIndexQueue()->updateItem($table, $uid);
}
}
/**
* Hooks into TCE main and tracks changed records. In this case the current
* record's values are stored to do a change comparison later on for fields
* like fe_group.
*
* @param array $incomingFields An array of incoming fields, new or changed, not used
* @param string $table The table the record belongs to
* @param mixed $uid The record's uid, [integer] or [string] (like 'NEW...')
* @param DataHandler $tceMain TYPO3 Core Engine parent object, not used
*/
public function processDatamap_preProcessFieldArray(
$incomingFields,
$table,
$uid,
DataHandler $tceMain
) {
if (!is_int($uid)) {
// a newly created record, skip
return;
}
if (Util::isDraftRecord($table, $uid)) {
// skip workspaces: collect garbage only for LIVE workspace
return;
}
$visibilityAffectingFields = $this->getVisibilityAffectingFieldsByTable($table);
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns'])
&& array_key_exists('fe_group',
$GLOBALS['TCA'][$table]['ctrl']['enablecolumns'])
) {
$record = BackendUtility::getRecord(
$table,
$uid,
$visibilityAffectingFields,
'',
false
);
$record = $this->normalizeFrontendGroupField($table, $record);
// keep previous state of important fields for later comparison
$this->trackedRecords[$table][$uid] = $record;
}
}
/**
* Compiles a list of visibility affecting fields of a table so that it can
* be used in SQL queries.
*
* @param string $table Table name to retrieve visibility affecting fields for
* @return string Comma separated list of field names that affect the visibility of a record on the website
*/
protected function getVisibilityAffectingFieldsByTable($table)
{
static $visibilityAffectingFields;
if (!isset($visibilityAffectingFields[$table])) {
// we always want to get the uid and pid although they do not affect visibility
$fields = array('uid', 'pid');
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns'])) {
$fields = array_merge($fields,
$GLOBALS['TCA'][$table]['ctrl']['enablecolumns']);
}
if (isset($GLOBALS['TCA'][$table]['ctrl']['delete'])) {
$fields[] = $GLOBALS['TCA'][$table]['ctrl']['delete'];
}
if ($table == 'pages') {
$fields[] = 'no_search';
$fields[] = 'doktype';
}
$visibilityAffectingFields[$table] = implode(', ', $fields);
}
return $visibilityAffectingFields[$table];
}
/**
* Makes sure that "empty" frontend group fields are always the same value.
*
* @param string $table The record's table name.
* @param array $record the record array.
* @return array The cleaned record
*/
protected function normalizeFrontendGroupField($table, $record)
{
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['fe_group'])) {
$frontendGroupsField = $GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['fe_group'];
if ($record[$frontendGroupsField] == '') {
$record[$frontendGroupsField] = '0';
}
}
return $record;
}
/**
* Hooks into TCE Main and watches all record updates. If a change is
* detected that would remove the record from the website, we try to find
* related documents and remove them from the index.
*
* @param string $status Status of the current operation, 'new' or 'update'
* @param string $table The table the record belongs to
* @param mixed $uid The record's uid, [integer] or [string] (like 'NEW...')
* @param array $fields The record's data, not used
* @param DataHandler $tceMain TYPO3 Core Engine parent object, not used
*/
public function processDatamap_afterDatabaseOperations(
$status,
$table,
$uid,
array $fields,
DataHandler $tceMain
) {
if ($status == 'new') {
// a newly created record, skip
return;
}
if (Util::isDraftRecord($table, $uid)) {
// skip workspaces: collect garbage only for LIVE workspace
return;
}
$garbageCollectionRelevantFields = $this->getVisibilityAffectingFieldsByTable($table);
$record = BackendUtility::getRecord($table, $uid,
$garbageCollectionRelevantFields, '', false);
$record = $this->normalizeFrontendGroupField($table, $record);
if ($this->isHidden($table, $record)
|| (($this->isStartTimeInFuture($table, $record)
|| $this->isEndTimeInPast($table, $record))
&& $this->isMarkedAsIndexed($table, $record)
)
|| $this->hasFrontendGroupsRemoved($table, $record)
|| ($table == 'pages' && $this->isPageExcludedFromSearch($record))
|| ($table == 'pages' && !$this->isIndexablePageType($record))
) {
$this->collectGarbage($table, $uid);
if ($table == 'pages') {
$this->deleteSubpagesWhenExtendToSubpagesIsSet($table, $uid, $fields);
}
}
}
/**
* Checks whether a hidden field exists for the current table and if so
* determines whether it is set on the current record.
*
* @param string $table The table name.
* @param array $record An array with record fields that may affect visibility.
* @return bool True if the record is hidden, FALSE otherwise.
*/
protected function isHidden($table, $record)
{
$hidden = false;
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['disabled'])) {
$hiddenField = $GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['disabled'];
$hidden = (boolean)$record[$hiddenField];
}
return $hidden;
}
/**
* Checks whether a start time field exists for the record's table and if so
* determines if a time is set and whether that time is in the future,
* making the record invisible on the website.
*
* @param string $table The table name.
* @param array $record An array with record fields that may affect visibility.
* @return bool True if the record's start time is in the future, FALSE otherwise.
*/
protected function isStartTimeInFuture($table, $record)
{
$startTimeInFuture = false;
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['starttime'])) {
$startTimeField = $GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['starttime'];
$startTimeInFuture = $record[$startTimeField] > time();
}
return $startTimeInFuture;
}
/**
* Checks whether a end time field exists for the record's table and if so
* determines if a time is set and whether that time is in the past,
* making the record invisible on the website.
*
* @param string $table The table name.
* @param array $record An array with record fields that may affect visibility.
* @return bool True if the record's end time is in the past, FALSE otherwise.
*/
protected function isEndTimeInPast($table, $record)
{
$endTimeInPast = false;
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['endtime'])) {
$endTimeField = $GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['endtime'];
$endTimeInPast = $record[$endTimeField] < time();
}
return $endTimeInPast;
}
/**
* Checks whether the record is in the Index Queue and whether it has been
* indexed already.
*
* @param string $table The table name.
* @param array $record An array with record fields that may affect visibility.
* @return bool True if the record is marked as being indexed
*/
protected function isMarkedAsIndexed($table, $record)
{
return $this->getIndexQueue()->containsIndexedItem($table, $record['uid']);
}
/**
* @return Queue
*/
private function getIndexQueue()
{
return GeneralUtility::makeInstance(Queue::class);
}
/**
* Checks whether the a frontend group field exists for the record and if so
* whether groups have been removed from accessing the record thus making
* the record invisible to at least some people.
*
* @param string $table The table name.
* @param array $record An array with record fields that may affect visibility.
* @return bool TRUE if frontend groups have been removed from access to the record, FALSE otherwise.
*/
protected function hasFrontendGroupsRemoved($table, $record)
{
$frontendGroupsRemoved = false;
if (isset($GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['fe_group'])) {
$frontendGroupsField = $GLOBALS['TCA'][$table]['ctrl']['enablecolumns']['fe_group'];
$previousGroups = explode(',',
(string)$this->trackedRecords[$table][$record['uid']][$frontendGroupsField]);
$currentGroups = explode(',',
(string)$record[$frontendGroupsField]);
$removedGroups = array_diff($previousGroups, $currentGroups);
$frontendGroupsRemoved = (boolean)count($removedGroups);
}
return $frontendGroupsRemoved;
}
/**
* Checks whether the page has been excluded from searching.
*
* @param array $record An array with record fields that may affect visibility.
* @return bool True if the page has been excluded from searching, FALSE otherwise
*/
protected function isPageExcludedFromSearch($record)
{
return (boolean)$record['no_search'];
}
/**
* Checks whether a page has a page type that can be indexed.
* Currently standard pages and mount pages can be indexed.
*
* @param array $record A page record
* @return bool TRUE if the page can be indexed according to its page type, FALSE otherwise
*/
protected function isIndexablePageType(array $record)
{
return Util::isAllowedPageType($record);
}
/**
* Cleans an index from garbage entries.
*
* Was used to clean the index from expired documents/past endtime. Solr 4.8
* introduced DocExpirationUpdateProcessor to do that job by itself.
*
* The method remains as a dummy for possible later cleanups and to prevent
* things from breaking if others were using it.
*
* @deprecated since 6.0 will be removed in 7.0. deletion is done by DocExpirationUpdateProcessor
* @param Site $site The site to clean indexes on
* @param bool $commitAfterCleanUp Whether to commit right after the clean up, defaults to TRUE
* @return void
*/
public function cleanIndex(Site $site, $commitAfterCleanUp = true)
{
GeneralUtility::logDeprecatedFunction();
}
}
| 1 | 6,022 | I think, retrieving the setting could also be done outside the loop. What do you think? | TYPO3-Solr-ext-solr | php |
@@ -257,6 +257,7 @@ public class TransactionPool implements BlockAddedObserver {
transaction.getGasLimit(), chainHeadBlockHeader.getGasLimit()));
}
+ // TODO: this is where we would use the private state to do the validation against
return protocolContext
.getWorldStateArchive()
.get(chainHeadBlockHeader.getStateRoot()) | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.transactions;
import static java.util.Collections.singletonList;
import static org.apache.logging.log4j.LogManager.getLogger;
import static org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason.CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.chain.BlockAddedEvent;
import org.hyperledger.besu.ethereum.chain.BlockAddedObserver;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.core.fees.BaseFee;
import org.hyperledger.besu.ethereum.core.fees.EIP1559;
import org.hyperledger.besu.ethereum.core.fees.TransactionPriceCalculator;
import org.hyperledger.besu.ethereum.eth.EthProtocol;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.eth.transactions.PendingTransactions.TransactionAddedStatus;
import org.hyperledger.besu.ethereum.mainnet.MainnetTransactionValidator;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidationParams;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.metrics.LabelledMetric;
import java.util.Collection;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import org.apache.logging.log4j.Logger;
/**
* Maintains the set of pending transactions received from JSON-RPC or other nodes. Transactions are
* removed automatically when they are included in a block on the canonical chain and re-added if a
* re-org removes them from the canonical chain again.
*
* <p>This class is safe for use across multiple threads.
*/
public class TransactionPool implements BlockAddedObserver {
private static final Logger LOG = getLogger();
private static final long SYNC_TOLERANCE = 100L;
private static final String REMOTE = "remote";
private static final String LOCAL = "local";
private final PendingTransactions pendingTransactions;
private final ProtocolSchedule protocolSchedule;
private final ProtocolContext protocolContext;
private final TransactionBatchAddedListener transactionBatchAddedListener;
private final Optional<TransactionBatchAddedListener> pendingTransactionBatchAddedListener;
private final SyncState syncState;
private final Wei minTransactionGasPrice;
private final LabelledMetric<Counter> duplicateTransactionCounter;
private final PeerTransactionTracker peerTransactionTracker;
private final Optional<PeerPendingTransactionTracker> maybePeerPendingTransactionTracker;
private final Optional<EIP1559> eip1559;
private final TransactionPriceCalculator frontierPriceCalculator =
TransactionPriceCalculator.frontier();
private final TransactionPriceCalculator eip1559PriceCalculator =
TransactionPriceCalculator.eip1559();
private final TransactionPoolConfiguration configuration;
public TransactionPool(
final PendingTransactions pendingTransactions,
final ProtocolSchedule protocolSchedule,
final ProtocolContext protocolContext,
final TransactionBatchAddedListener transactionBatchAddedListener,
final Optional<TransactionBatchAddedListener> pendingTransactionBatchAddedListener,
final SyncState syncState,
final EthContext ethContext,
final PeerTransactionTracker peerTransactionTracker,
final Optional<PeerPendingTransactionTracker> maybePeerPendingTransactionTracker,
final Wei minTransactionGasPrice,
final MetricsSystem metricsSystem,
final Optional<EIP1559> eip1559,
final TransactionPoolConfiguration configuration) {
this.pendingTransactions = pendingTransactions;
this.protocolSchedule = protocolSchedule;
this.protocolContext = protocolContext;
this.transactionBatchAddedListener = transactionBatchAddedListener;
this.pendingTransactionBatchAddedListener = pendingTransactionBatchAddedListener;
this.syncState = syncState;
this.peerTransactionTracker = peerTransactionTracker;
this.maybePeerPendingTransactionTracker = maybePeerPendingTransactionTracker;
this.minTransactionGasPrice = minTransactionGasPrice;
this.eip1559 = eip1559;
this.configuration = configuration;
duplicateTransactionCounter =
metricsSystem.createLabelledCounter(
BesuMetricCategory.TRANSACTION_POOL,
"transactions_duplicates_total",
"Total number of duplicate transactions received",
"source");
ethContext.getEthPeers().subscribeConnect(this::handleConnect);
}
void handleConnect(final EthPeer peer) {
pendingTransactions
.getLocalTransactions()
.forEach(transaction -> peerTransactionTracker.addToPeerSendQueue(peer, transaction));
maybePeerPendingTransactionTracker
.filter(
peerPendingTransactionTracker ->
peerPendingTransactionTracker.isPeerSupported(peer, EthProtocol.ETH65))
.ifPresent(
peerPendingTransactionTracker ->
pendingTransactions
.getNewPooledHashes()
.forEach(hash -> peerPendingTransactionTracker.addToPeerSendQueue(peer, hash)));
}
public boolean addTransactionHash(final Hash transactionHash) {
return pendingTransactions.addTransactionHash(transactionHash);
}
public ValidationResult<TransactionInvalidReason> addLocalTransaction(
final Transaction transaction) {
if (transaction.isFrontierTransaction()
&& (!ExperimentalEIPs.eip1559Enabled || this.eip1559.isEmpty())) {
final Wei transactionGasPrice = minTransactionGasPrice(transaction);
if (transactionGasPrice.compareTo(minTransactionGasPrice) < 0) {
return ValidationResult.invalid(TransactionInvalidReason.GAS_PRICE_TOO_LOW);
}
if (!configuration.getTxFeeCap().isZero()
&& transactionGasPrice.compareTo(configuration.getTxFeeCap()) > 0) {
return ValidationResult.invalid(TransactionInvalidReason.TX_FEECAP_EXCEEDED);
}
}
final ValidationResult<TransactionInvalidReason> validationResult =
validateTransaction(transaction);
if (validationResult.isValid()) {
final TransactionAddedStatus transactionAddedStatus =
pendingTransactions.addLocalTransaction(transaction);
if (!transactionAddedStatus.equals(TransactionAddedStatus.ADDED)) {
duplicateTransactionCounter.labels(LOCAL).inc();
return ValidationResult.invalid(transactionAddedStatus.getInvalidReason().orElseThrow());
}
final Collection<Transaction> txs = singletonList(transaction);
transactionBatchAddedListener.onTransactionsAdded(txs);
pendingTransactionBatchAddedListener.ifPresent(it -> it.onTransactionsAdded(txs));
}
return validationResult;
}
public void addRemoteTransactions(final Collection<Transaction> transactions) {
if (!syncState.isInSync(SYNC_TOLERANCE)) {
return;
}
final Set<Transaction> addedTransactions = new HashSet<>();
for (final Transaction transaction : transactions) {
pendingTransactions.tryEvictTransactionHash(transaction.getHash());
if (pendingTransactions.containsTransaction(transaction.getHash())) {
// We already have this transaction, don't even validate it.
duplicateTransactionCounter.labels(REMOTE).inc();
continue;
}
final Wei transactionGasPrice = minTransactionGasPrice(transaction);
if (transactionGasPrice.compareTo(minTransactionGasPrice) < 0) {
continue;
}
final ValidationResult<TransactionInvalidReason> validationResult =
validateTransaction(transaction);
if (validationResult.isValid()) {
final boolean added = pendingTransactions.addRemoteTransaction(transaction);
if (added) {
addedTransactions.add(transaction);
} else {
duplicateTransactionCounter.labels(REMOTE).inc();
}
} else {
LOG.trace(
"Validation failed ({}) for transaction {}. Discarding.",
validationResult.getInvalidReason(),
transaction);
}
}
if (!addedTransactions.isEmpty()) {
transactionBatchAddedListener.onTransactionsAdded(addedTransactions);
}
}
public long subscribePendingTransactions(final PendingTransactionListener listener) {
return pendingTransactions.subscribePendingTransactions(listener);
}
public void unsubscribePendingTransactions(final long id) {
pendingTransactions.unsubscribePendingTransactions(id);
}
public long subscribeDroppedTransactions(final PendingTransactionDroppedListener listener) {
return pendingTransactions.subscribeDroppedTransactions(listener);
}
public void unsubscribeDroppedTransactions(final long id) {
pendingTransactions.unsubscribeDroppedTransactions(id);
}
@Override
public void onBlockAdded(final BlockAddedEvent event) {
event.getAddedTransactions().forEach(pendingTransactions::transactionAddedToBlock);
addRemoteTransactions(event.getRemovedTransactions());
}
private MainnetTransactionValidator getTransactionValidator() {
return protocolSchedule
.getByBlockNumber(protocolContext.getBlockchain().getChainHeadBlockNumber())
.getTransactionValidator();
}
public PendingTransactions getPendingTransactions() {
return pendingTransactions;
}
private ValidationResult<TransactionInvalidReason> validateTransaction(
final Transaction transaction) {
final BlockHeader chainHeadBlockHeader = getChainHeadBlockHeader();
final ValidationResult<TransactionInvalidReason> basicValidationResult =
getTransactionValidator().validate(transaction, chainHeadBlockHeader.getBaseFee());
if (!basicValidationResult.isValid()) {
return basicValidationResult;
}
if (transaction.getGasLimit() > chainHeadBlockHeader.getGasLimit()) {
return ValidationResult.invalid(
TransactionInvalidReason.EXCEEDS_BLOCK_GAS_LIMIT,
String.format(
"Transaction gas limit of %s exceeds block gas limit of %s",
transaction.getGasLimit(), chainHeadBlockHeader.getGasLimit()));
}
return protocolContext
.getWorldStateArchive()
.get(chainHeadBlockHeader.getStateRoot())
.map(
worldState -> {
final Account senderAccount = worldState.get(transaction.getSender());
return getTransactionValidator()
.validateForSender(
transaction, senderAccount, TransactionValidationParams.transactionPool());
})
.orElseGet(() -> ValidationResult.invalid(CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE));
}
public Optional<Transaction> getTransactionByHash(final Hash hash) {
return pendingTransactions.getTransactionByHash(hash);
}
private BlockHeader getChainHeadBlockHeader() {
final MutableBlockchain blockchain = protocolContext.getBlockchain();
return blockchain.getBlockHeader(blockchain.getChainHeadHash()).get();
}
public interface TransactionBatchAddedListener {
void onTransactionsAdded(Iterable<Transaction> transactions);
}
private Wei minTransactionGasPrice(final Transaction transaction) {
// EIP-1559 enablement guard block
if (!ExperimentalEIPs.eip1559Enabled || this.eip1559.isEmpty()) {
return frontierPriceCalculator.price(transaction, Optional.empty());
}
final BlockHeader chainHeadBlockHeader = getChainHeadBlockHeader();
// Compute transaction price using EIP-1559 rules if chain head is after fork
if (this.eip1559.get().isEIP1559(chainHeadBlockHeader.getNumber())) {
return BaseFee.minTransactionPriceInNextBlock(
transaction, eip1559PriceCalculator, chainHeadBlockHeader::getBaseFee);
} else { // Use frontier rules otherwise
return frontierPriceCalculator.price(transaction, Optional.empty());
}
}
}
| 1 | 23,904 | This TODO isn't related to this change. We should remove it. | hyperledger-besu | java |
@@ -102,7 +102,7 @@ class ExternalDriverSupplier implements Supplier<WebDriver> {
Optional<Class<? extends Supplier<WebDriver>>> supplierClass = getDelegateClass();
if (supplierClass.isPresent()) {
Class<? extends Supplier<WebDriver>> clazz = supplierClass.get();
- logger.info("Using delegate supplier: " + clazz.getName());
+ logger.finest("Using delegate supplier: " + clazz.getName());
try {
@SuppressWarnings("unchecked")
Constructor<Supplier<WebDriver>> ctor = | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.testing.drivers;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.common.base.Suppliers;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.net.UrlChecker;
import org.openqa.selenium.remote.LocalFileDetector;
import org.openqa.selenium.remote.RemoteWebDriver;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.logging.Logger;
/**
* Supports providing WebDriver instances from an external source using the following system
* properties:
* <dl>
* <dt>selenium.external.serverUrl</dt>
* <dd>Defines the fully qualified URL of an external WebDriver server to send commands to.
* This server <i>must</i> be compliant with the
* <a href="https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol">JSON wire protocol</a>.
* If only this property is provided, then this supplier will provide a new
* {@link RemoteWebDriver} instance pointed at the designated server. Otherwise, if a
* custom supplier is also defined (see below), this supplier will wait for the server to
* be accepting commands before delegating to the designated class for the actual client
* creation.
* </dd>
* <dt>selenium.external.supplierClass</dt>
* <dd>Specifies the fully qualified name of another class on the classpath. This class must
* implement {@code Supplier<WebDriver>} and have a public constructor that accepts two
* {@link Capabilities} objects as arguments (for the desired and required capabilities,
* respectively).
* </dd>
* </dl>
*/
class ExternalDriverSupplier implements Supplier<WebDriver> {
private static final Logger logger = Logger.getLogger(ExternalDriverSupplier.class.getName());
private static final String DELEGATE_SUPPLIER_CLASS_PROPERTY = "selenium.external.supplierClass";
private static final String EXTERNAL_SERVER_URL_PROPERTY = "selenium.external.serverUrl";
private final Capabilities desiredCapabilities;
ExternalDriverSupplier(Capabilities desiredCapabilities) {
this.desiredCapabilities = new ImmutableCapabilities(desiredCapabilities);
}
@Override
public WebDriver get() {
Optional<Supplier<WebDriver>> delegate = createDelegate(desiredCapabilities);
delegate = createForExternalServer(desiredCapabilities, delegate);
return delegate.orElse(Suppliers.ofInstance(null)).get();
}
private static Optional<Supplier<WebDriver>> createForExternalServer(
Capabilities desiredCapabilities,
Optional<Supplier<WebDriver>> delegate) {
String externalUrl = System.getProperty(EXTERNAL_SERVER_URL_PROPERTY);
if (externalUrl != null) {
logger.info("Using external WebDriver server: " + externalUrl);
URL url;
try {
url = new URL(externalUrl);
} catch (MalformedURLException e) {
throw new RuntimeException("Invalid server URL: " + externalUrl, e);
}
Supplier<WebDriver> defaultSupplier = new DefaultRemoteSupplier(url, desiredCapabilities);
Supplier<WebDriver> supplier = new ExternalServerDriverSupplier(
url, delegate.orElse(defaultSupplier));
return Optional.of(supplier);
}
return delegate;
}
private static Optional<Supplier<WebDriver>> createDelegate(Capabilities desiredCapabilities) {
Optional<Class<? extends Supplier<WebDriver>>> supplierClass = getDelegateClass();
if (supplierClass.isPresent()) {
Class<? extends Supplier<WebDriver>> clazz = supplierClass.get();
logger.info("Using delegate supplier: " + clazz.getName());
try {
@SuppressWarnings("unchecked")
Constructor<Supplier<WebDriver>> ctor =
(Constructor<Supplier<WebDriver>>) clazz.getConstructor(Capabilities.class);
return Optional.of(ctor.newInstance(desiredCapabilities));
} catch (InvocationTargetException e) {
throw new RuntimeException(e.getTargetException());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return Optional.empty();
}
@SuppressWarnings("unchecked")
private static Optional<Class<? extends Supplier<WebDriver>>> getDelegateClass() {
String delegateClassName = System.getProperty(DELEGATE_SUPPLIER_CLASS_PROPERTY);
if (delegateClassName != null) {
try {
logger.info("Loading custom supplier: " + delegateClassName);
Class<? extends Supplier<WebDriver>> clazz =
(Class<? extends Supplier<WebDriver>>) Class.forName(delegateClassName);
return Optional.of(clazz);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return Optional.empty();
}
/**
* Waits for an external WebDriver server to be ready before delegating to another supplier
* for driver creation.
*/
private static class ExternalServerDriverSupplier implements Supplier<WebDriver> {
private final URL serverUrl;
private final Supplier<WebDriver> delegateSupplier;
private ExternalServerDriverSupplier(
URL serverUrl, Supplier<WebDriver> delegateSupplier) {
this.serverUrl = serverUrl;
this.delegateSupplier = delegateSupplier;
}
@Override
public WebDriver get() {
try {
logger.info("Waiting for server to be ready at " + serverUrl);
new UrlChecker().waitUntilAvailable(60, SECONDS, new URL(serverUrl + "/status"));
logger.info("Server is ready");
} catch (UrlChecker.TimeoutException e) {
throw new RuntimeException("The external server is not accepting commands", e);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
return delegateSupplier.get();
}
}
/**
* Creates basic {@link RemoteWebDriver} instances.
*/
private static class DefaultRemoteSupplier implements Supplier<WebDriver> {
private final URL url;
private final Capabilities desiredCapabilities;
private DefaultRemoteSupplier(URL url, Capabilities desiredCapabilities) {
this.url = url;
this.desiredCapabilities = desiredCapabilities;
}
@Override
public WebDriver get() {
RemoteWebDriver driver = new RemoteWebDriver(url, desiredCapabilities);
driver.setFileDetector(new LocalFileDetector());
return driver;
}
}
}
| 1 | 16,449 | We chose `info` in the test code for obvious reasons. Changing to `finest` makes debugging harder and noisier. | SeleniumHQ-selenium | java |
@@ -1689,7 +1689,14 @@ class RefactoringChecker(checkers.BaseTokenChecker):
and subscript == subscript.parent.target
):
# Ignore this subscript if it is the target of an assignment
- continue
+ if subscript.as_string() == subscript.parent.value.as_string():
+ # Fire error as d[k] += d[k] has an unnecessary index lookup
+ self.add_message(
+ "unnecessary-dict-index-lookup",
+ node=subscript,
+ args=(node.target.elts[1].as_string()),
+ )
+ return # Early termination; after reassignment dict index lookup will be necessary
# Case where .items is assigned to k,v (i.e., for k, v in d.items())
if isinstance(value, astroid.Name): | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
import collections
import copy
import itertools
import tokenize
from functools import reduce
from typing import List, Optional, Tuple, Union, cast
import astroid
from pylint import checkers, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import node_frame_class
KNOWN_INFINITE_ITERATORS = {"itertools.count"}
BUILTIN_EXIT_FUNCS = frozenset(("quit", "exit"))
CALLS_THAT_COULD_BE_REPLACED_BY_WITH = frozenset(
(
"threading.lock.acquire",
"threading._RLock.acquire",
"threading.Semaphore.acquire",
"multiprocessing.managers.BaseManager.start",
"multiprocessing.managers.SyncManager.start",
)
)
CALLS_RETURNING_CONTEXT_MANAGERS = frozenset(
(
"_io.open", # regular 'open()' call
"codecs.open",
"urllib.request.urlopen",
"tempfile.NamedTemporaryFile",
"tempfile.SpooledTemporaryFile",
"tempfile.TemporaryDirectory",
"zipfile.ZipFile",
"zipfile.PyZipFile",
"zipfile.ZipFile.open",
"zipfile.PyZipFile.open",
"tarfile.TarFile",
"tarfile.TarFile.open",
"multiprocessing.context.BaseContext.Pool",
"concurrent.futures.thread.ThreadPoolExecutor",
"concurrent.futures.process.ProcessPoolExecutor",
"subprocess.Popen",
)
)
def _if_statement_is_always_returning(if_node, returning_node_class) -> bool:
for node in if_node.body:
if isinstance(node, returning_node_class):
return True
return False
def _is_trailing_comma(tokens: List[tokenize.TokenInfo], index: int) -> bool:
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = itertools.islice(tokens, index + 1, None)
def same_start_token(
other_token: tokenize.TokenInfo, _token: tokenize.TokenInfo = token
) -> bool:
return other_token.start[0] == _token.start[0]
same_line_remaining_tokens = list(
itertools.takewhile(same_start_token, left_tokens)
)
# Note: If the newline is tokenize.NEWLINE and not tokenize.NL
# then the newline denotes the end of expression
is_last_element = all(
other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
for other_token in same_line_remaining_tokens
)
if not same_line_remaining_tokens or not is_last_element:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type == tokenize.NEWLINE:
return index - subindex
return 0
curline_start = get_curline_index_start()
expected_tokens = {"return", "yield"}
for prevtoken in tokens[curline_start:index]:
if "=" in prevtoken.string or prevtoken.string in expected_tokens:
return True
return False
def _is_inside_context_manager(node):
frame = node.frame()
if not isinstance(
frame, (astroid.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod)
):
return False
return frame.name == "__enter__" or utils.decorated_with(
frame, "contextlib.contextmanager"
)
class RefactoringChecker(checkers.BaseTokenChecker):
"""Looks for code which can be refactored
This checker also mixes the astroid and the token approaches
in order to create knowledge about whether an "else if" node
is a true "else if" node, or an "elif" node.
"""
__implements__ = (interfaces.ITokenChecker, interfaces.IAstroidChecker)
name = "refactoring"
msgs = {
"R1701": (
"Consider merging these isinstance calls to isinstance(%s, (%s))",
"consider-merging-isinstance",
"Used when multiple consecutive isinstance calls can be merged into one.",
),
"R1706": (
"Consider using ternary (%s)",
"consider-using-ternary",
"Used when one of known pre-python 2.5 ternary syntax is used.",
),
"R1709": (
"Boolean expression may be simplified to %s",
"simplify-boolean-expression",
"Emitted when redundant pre-python 2.5 ternary syntax is used.",
),
"R1726": (
"Boolean condition '%s' may be simplified to '%s'",
"simplifiable-condition",
"Emitted when a boolean condition is able to be simplified.",
),
"R1727": (
"Boolean condition '%s' will always evaluate to '%s'",
"condition-evals-to-constant",
"Emitted when a boolean condition can be simplified to a constant value.",
),
"R1702": (
"Too many nested blocks (%s/%s)",
"too-many-nested-blocks",
"Used when a function or a method has too many nested "
"blocks. This makes the code less understandable and "
"maintainable.",
{"old_names": [("R0101", "old-too-many-nested-blocks")]},
),
"R1703": (
"The if statement can be replaced with %s",
"simplifiable-if-statement",
"Used when an if statement can be replaced with 'bool(test)'. ",
{"old_names": [("R0102", "old-simplifiable-if-statement")]},
),
"R1704": (
"Redefining argument with the local name %r",
"redefined-argument-from-local",
"Used when a local name is redefining an argument, which might "
"suggest a potential error. This is taken in account only for "
"a handful of name binding operations, such as for iteration, "
"with statement assignment and exception handler assignment.",
),
"R1705": (
'Unnecessary "%s" after "return"',
"no-else-return",
"Used in order to highlight an unnecessary block of "
"code following an if containing a return statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"return statement.",
),
"R1707": (
"Disallow trailing comma tuple",
"trailing-comma-tuple",
"In Python, a tuple is actually created by the comma symbol, "
"not by the parentheses. Unfortunately, one can actually create a "
"tuple by misplacing a trailing comma, which can lead to potential "
"weird bugs in your code. You should always use parentheses "
"explicitly for creating a tuple.",
),
"R1708": (
"Do not raise StopIteration in generator, use return statement instead",
"stop-iteration-return",
"According to PEP479, the raise of StopIteration to end the loop of "
"a generator may lead to hard to find bugs. This PEP specify that "
"raise StopIteration has to be replaced by a simple return statement",
),
"R1710": (
"Either all return statements in a function should return an expression, "
"or none of them should.",
"inconsistent-return-statements",
"According to PEP8, if any return statement returns an expression, "
"any return statements where no value is returned should explicitly "
"state this as return None, and an explicit return statement "
"should be present at the end of the function (if reachable)",
),
"R1711": (
"Useless return at end of function or method",
"useless-return",
'Emitted when a single "return" or "return None" statement is found '
"at the end of function or method definition. This statement can safely be "
"removed because Python will implicitly return None",
),
"R1712": (
"Consider using tuple unpacking for swapping variables",
"consider-swap-variables",
"You do not have to use a temporary variable in order to "
'swap variables. Using "tuple unpacking" to directly swap '
"variables makes the intention more clear.",
),
"R1713": (
"Consider using str.join(sequence) for concatenating "
"strings from an iterable",
"consider-using-join",
"Using str.join(sequence) is faster, uses less memory "
"and increases readability compared to for-loop iteration.",
),
"R1714": (
'Consider merging these comparisons with "in" to %r',
"consider-using-in",
"To check if a variable is equal to one of many values,"
'combine the values into a tuple and check if the variable is contained "in" it '
"instead of checking for equality against each of the values."
"This is faster and less verbose.",
),
"R1715": (
"Consider using dict.get for getting values from a dict "
"if a key is present or a default if not",
"consider-using-get",
"Using the builtin dict.get for getting a value from a dictionary "
"if a key is present or a default if not, is simpler and considered "
"more idiomatic, although sometimes a bit slower",
),
"R1716": (
"Simplify chained comparison between the operands",
"chained-comparison",
"This message is emitted when pylint encounters boolean operation like"
'"a < b and b < c", suggesting instead to refactor it to "a < b < c"',
),
"R1717": (
"Consider using a dictionary comprehension",
"consider-using-dict-comprehension",
"Emitted when we detect the creation of a dictionary "
"using the dict() callable and a transient list. "
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a dict comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1718": (
"Consider using a set comprehension",
"consider-using-set-comprehension",
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a set comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1719": (
"The if expression can be replaced with %s",
"simplifiable-if-expression",
"Used when an if expression can be replaced with 'bool(test)'. ",
),
"R1720": (
'Unnecessary "%s" after "raise"',
"no-else-raise",
"Used in order to highlight an unnecessary block of "
"code following an if containing a raise statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"raise statement.",
),
"R1721": (
"Unnecessary use of a comprehension, use %s instead.",
"unnecessary-comprehension",
"Instead of using an identity comprehension, "
"consider using the list, dict or set constructor. "
"It is faster and simpler.",
),
"R1722": (
"Consider using sys.exit()",
"consider-using-sys-exit",
"Instead of using exit() or quit(), consider using the sys.exit().",
),
"R1723": (
'Unnecessary "%s" after "break"',
"no-else-break",
"Used in order to highlight an unnecessary block of "
"code following an if containing a break statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"break statement.",
),
"R1724": (
'Unnecessary "%s" after "continue"',
"no-else-continue",
"Used in order to highlight an unnecessary block of "
"code following an if containing a continue statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"continue statement.",
),
"R1725": (
"Consider using Python 3 style super() without arguments",
"super-with-arguments",
"Emitted when calling the super() builtin with the current class "
"and instance. On Python 3 these arguments are the default and they can be omitted.",
),
"R1728": (
"Consider using a generator instead '%s(%s)'",
"consider-using-generator",
"If your container can be large using "
"a generator will bring better performance.",
),
"R1729": (
"Use a generator instead '%s(%s)'",
"use-a-generator",
"Comprehension inside of 'any' or 'all' is unnecessary. "
"A generator would be sufficient and faster.",
),
"R1730": (
"Consider using '%s' instead of unnecessary if block",
"consider-using-min-builtin",
"Using the min builtin instead of a conditional improves readability and conciseness.",
),
"R1731": (
"Consider using '%s' instead of unnecessary if block",
"consider-using-max-builtin",
"Using the max builtin instead of a conditional improves readability and conciseness.",
),
"R1732": (
"Consider using 'with' for resource-allocating operations",
"consider-using-with",
"Emitted if a resource-allocating assignment or call may be replaced by a 'with' block. "
"By using 'with' the release of the allocated resources is ensured even in the case of an exception.",
),
"R1733": (
"Unnecessary dictionary index lookup, use '%s' instead",
"unnecessary-dict-index-lookup",
"Emitted when iterating over the dictionary items (key-item pairs) and accessing the "
"value by index lookup. "
"The value can be accessed directly instead.",
),
}
options = (
(
"max-nested-blocks",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of nested blocks for function / method body",
},
),
(
"never-returning-functions",
{
"default": ("sys.exit", "argparse.parse_error"),
"type": "csv",
"help": "Complete name of functions that never returns. When checking "
"for inconsistent-return-statements if a never returning function is "
"called then it will be considered as an explicit return statement "
"and no message will be printed.",
},
),
)
priority = 0
def __init__(self, linter=None):
checkers.BaseTokenChecker.__init__(self, linter)
self._return_nodes = {}
self._init()
self._never_returning_functions = None
def _init(self):
self._nested_blocks = []
self._elifs = []
self._nested_blocks_msg = None
self._reported_swap_nodes = set()
self._can_simplify_bool_op = False
def open(self):
# do this in open since config not fully initialized in __init__
self._never_returning_functions = set(self.config.never_returning_functions)
@astroid.decorators.cachedproperty
def _dummy_rgx(self):
return lint_utils.get_global_option(self, "dummy-variables-rgx", default=None)
@staticmethod
def _is_bool_const(node):
return isinstance(node.value, astroid.Const) and isinstance(
node.value.value, bool
)
def _is_actual_elif(self, node):
"""Check if the given node is an actual elif
This is a problem we're having with the builtin ast module,
which splits `elif` branches into a separate if statement.
Unfortunately we need to know the exact type in certain
cases.
"""
if isinstance(node.parent, astroid.If):
orelse = node.parent.orelse
# current if node must directly follow an "else"
if orelse and orelse == [node]:
if (node.lineno, node.col_offset) in self._elifs:
return True
return False
def _check_simplifiable_if(self, node):
"""Check if the given if node can be simplified.
The if statement can be reduced to a boolean expression
in some cases. For instance, if there are two branches
and both of them return a boolean value that depends on
the result of the statement's test, then this can be reduced
to `bool(test)` without losing any functionality.
"""
if self._is_actual_elif(node):
# Not interested in if statements with multiple branches.
return
if len(node.orelse) != 1 or len(node.body) != 1:
return
# Check if both branches can be reduced.
first_branch = node.body[0]
else_branch = node.orelse[0]
if isinstance(first_branch, astroid.Return):
if not isinstance(else_branch, astroid.Return):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'return bool(test)'"
elif isinstance(first_branch, astroid.Assign):
if not isinstance(else_branch, astroid.Assign):
return
# Check if we assign to the same value
first_branch_targets = [
target.name
for target in first_branch.targets
if isinstance(target, astroid.AssignName)
]
else_branch_targets = [
target.name
for target in else_branch.targets
if isinstance(target, astroid.AssignName)
]
if not first_branch_targets or not else_branch_targets:
return
if sorted(first_branch_targets) != sorted(else_branch_targets):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'var = bool(test)'"
else:
return
if not first_branch_is_bool or not else_branch_is_bool:
return
if not first_branch.value.value:
# This is a case that can't be easily simplified and
# if it can be simplified, it will usually result in a
# code that's harder to understand and comprehend.
# Let's take for instance `arg and arg <= 3`. This could theoretically be
# reduced to `not arg or arg > 3`, but the net result is that now the
# condition is harder to understand, because it requires understanding of
# an extra clause:
# * first, there is the negation of truthness with `not arg`
# * the second clause is `arg > 3`, which occurs when arg has a
# a truth value, but it implies that `arg > 3` is equivalent
# with `arg and arg > 3`, which means that the user must
# think about this assumption when evaluating `arg > 3`.
# The original form is easier to grasp.
return
self.add_message("simplifiable-if-statement", node=node, args=(reduced_to,))
def process_tokens(self, tokens):
# Process tokens and look for 'if' or 'elif'
for index, token in enumerate(tokens):
token_string = token[1]
if token_string == "elif":
# AST exists by the time process_tokens is called, so
# it's safe to assume tokens[index+1]
# exists. tokens[index+1][2] is the elif's position as
# reported by CPython and PyPy,
# tokens[index][2] is the actual position and also is
# reported by IronPython.
self._elifs.extend([tokens[index][2], tokens[index + 1][2]])
elif _is_trailing_comma(tokens, index):
if self.linter.is_message_enabled("trailing-comma-tuple"):
self.add_message("trailing-comma-tuple", line=token.start[0])
def leave_module(self, _):
self._init()
@utils.check_messages("too-many-nested-blocks")
def visit_tryexcept(self, node):
self._check_nested_blocks(node)
visit_tryfinally = visit_tryexcept
visit_while = visit_tryexcept
def _check_redefined_argument_from_local(self, name_node):
if self._dummy_rgx and self._dummy_rgx.match(name_node.name):
return
if not name_node.lineno:
# Unknown position, maybe it is a manually built AST?
return
scope = name_node.scope()
if not isinstance(scope, astroid.FunctionDef):
return
for defined_argument in scope.args.nodes_of_class(
astroid.AssignName, skip_klass=(astroid.Lambda,)
):
if defined_argument.name == name_node.name:
self.add_message(
"redefined-argument-from-local",
node=name_node,
args=(name_node.name,),
)
@utils.check_messages(
"redefined-argument-from-local",
"too-many-nested-blocks",
"unnecessary-dict-index-lookup",
)
def visit_for(self, node):
self._check_nested_blocks(node)
self._check_unnecessary_dict_index_lookup(node)
for name in node.target.nodes_of_class(astroid.AssignName):
self._check_redefined_argument_from_local(name)
@utils.check_messages("redefined-argument-from-local")
def visit_excepthandler(self, node):
if node.name and isinstance(node.name, astroid.AssignName):
self._check_redefined_argument_from_local(node.name)
@utils.check_messages("redefined-argument-from-local")
def visit_with(self, node):
for _, names in node.items:
if not names:
continue
for name in names.nodes_of_class(astroid.AssignName):
self._check_redefined_argument_from_local(name)
def _check_superfluous_else(self, node, msg_id, returning_node_class):
if not node.orelse:
# Not interested in if statements without else.
return
if self._is_actual_elif(node):
# Not interested in elif nodes; only if
return
if _if_statement_is_always_returning(node, returning_node_class):
orelse = node.orelse[0]
followed_by_elif = (orelse.lineno, orelse.col_offset) in self._elifs
self.add_message(
msg_id, node=node, args="elif" if followed_by_elif else "else"
)
def _check_superfluous_else_return(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-return", returning_node_class=astroid.Return
)
def _check_superfluous_else_raise(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-raise", returning_node_class=astroid.Raise
)
def _check_superfluous_else_break(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-break", returning_node_class=astroid.Break
)
def _check_superfluous_else_continue(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-continue", returning_node_class=astroid.Continue
)
@staticmethod
def _type_and_name_are_equal(node_a, node_b):
for _type in (astroid.Name, astroid.AssignName):
if all(isinstance(_node, _type) for _node in (node_a, node_b)):
return node_a.name == node_b.name
if all(isinstance(_node, astroid.Const) for _node in (node_a, node_b)):
return node_a.value == node_b.value
return False
def _is_dict_get_block(self, node):
# "if <compare node>"
if not isinstance(node.test, astroid.Compare):
return False
# Does not have a single statement in the guard's body
if len(node.body) != 1:
return False
# Look for a single variable assignment on the LHS and a subscript on RHS
stmt = node.body[0]
if not (
isinstance(stmt, astroid.Assign)
and len(node.body[0].targets) == 1
and isinstance(node.body[0].targets[0], astroid.AssignName)
and isinstance(stmt.value, astroid.Subscript)
):
return False
# The subscript's slice needs to be the same as the test variable.
slice_value = stmt.value.slice
if not (
self._type_and_name_are_equal(stmt.value.value, node.test.ops[0][1])
and self._type_and_name_are_equal(slice_value, node.test.left)
):
return False
# The object needs to be a dictionary instance
return isinstance(utils.safe_infer(node.test.ops[0][1]), astroid.Dict)
def _check_consider_get(self, node):
if_block_ok = self._is_dict_get_block(node)
if if_block_ok and not node.orelse:
self.add_message("consider-using-get", node=node)
elif (
if_block_ok
and len(node.orelse) == 1
and isinstance(node.orelse[0], astroid.Assign)
and self._type_and_name_are_equal(
node.orelse[0].targets[0], node.body[0].targets[0]
)
and len(node.orelse[0].targets) == 1
):
self.add_message("consider-using-get", node=node)
@utils.check_messages(
"too-many-nested-blocks",
"simplifiable-if-statement",
"no-else-return",
"no-else-raise",
"no-else-break",
"no-else-continue",
"consider-using-get",
)
def visit_if(self, node):
self._check_simplifiable_if(node)
self._check_nested_blocks(node)
self._check_superfluous_else_return(node)
self._check_superfluous_else_raise(node)
self._check_superfluous_else_break(node)
self._check_superfluous_else_continue(node)
self._check_consider_get(node)
self._check_consider_using_min_max_builtin(node)
def _check_consider_using_min_max_builtin(self, node: astroid.If):
"""Check if the given if node can be refactored as an min/max python builtin."""
if self._is_actual_elif(node) or node.orelse:
# Not interested in if statements with multiple branches.
return
if len(node.body) != 1:
return
body = node.body[0]
# Check if condition can be reduced.
if not hasattr(body, "targets") or len(body.targets) != 1:
return
target = body.targets[0]
if not (
isinstance(node.test, astroid.Compare)
and not isinstance(target, astroid.Subscript)
and not isinstance(node.test.left, astroid.Subscript)
and isinstance(body, astroid.Assign)
):
return
# Check that the assignation is on the same variable.
if hasattr(node.test.left, "name"):
left_operand = node.test.left.name
elif hasattr(node.test.left, "attrname"):
left_operand = node.test.left.attrname
else:
return
if hasattr(target, "name"):
target_assignation = target.name
elif hasattr(target, "attrname"):
target_assignation = target.attrname
else:
return
if not (left_operand == target_assignation):
return
if len(node.test.ops) > 1:
return
if not isinstance(body.value, (astroid.Name, astroid.Const)):
return
operator, right_statement = node.test.ops[0]
if isinstance(body.value, astroid.Name):
body_value = body.value.name
else:
body_value = body.value.value
if isinstance(right_statement, astroid.Name):
right_statement_value = right_statement.name
elif isinstance(right_statement, astroid.Const):
right_statement_value = right_statement.value
else:
return
# Verify the right part of the statement is the same.
if right_statement_value != body_value:
return
if operator in ("<", "<="):
reduced_to = "{target} = max({target}, {item})".format(
target=target_assignation, item=body_value
)
self.add_message(
"consider-using-max-builtin", node=node, args=(reduced_to,)
)
elif operator in (">", ">="):
reduced_to = "{target} = min({target}, {item})".format(
target=target_assignation, item=body_value
)
self.add_message(
"consider-using-min-builtin", node=node, args=(reduced_to,)
)
@utils.check_messages("simplifiable-if-expression")
def visit_ifexp(self, node):
self._check_simplifiable_ifexp(node)
def _check_simplifiable_ifexp(self, node):
if not isinstance(node.body, astroid.Const) or not isinstance(
node.orelse, astroid.Const
):
return
if not isinstance(node.body.value, bool) or not isinstance(
node.orelse.value, bool
):
return
if isinstance(node.test, astroid.Compare):
test_reduced_to = "test"
else:
test_reduced_to = "bool(test)"
if (node.body.value, node.orelse.value) == (True, False):
reduced_to = f"'{test_reduced_to}'"
elif (node.body.value, node.orelse.value) == (False, True):
reduced_to = "'not test'"
else:
return
self.add_message("simplifiable-if-expression", node=node, args=(reduced_to,))
@utils.check_messages(
"too-many-nested-blocks", "inconsistent-return-statements", "useless-return"
)
def leave_functiondef(self, node):
# check left-over nested blocks stack
self._emit_nested_blocks_message_if_needed(self._nested_blocks)
# new scope = reinitialize the stack of nested blocks
self._nested_blocks = []
# check consistent return statements
self._check_consistent_returns(node)
# check for single return or return None at the end
self._check_return_at_the_end(node)
self._return_nodes[node.name] = []
@utils.check_messages("stop-iteration-return")
def visit_raise(self, node):
self._check_stop_iteration_inside_generator(node)
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if not exc or not isinstance(exc, (astroid.Instance, astroid.ClassDef)):
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node)
@staticmethod
def _check_exception_inherit_from_stopiteration(exc):
"""Return True if the exception node in argument inherit from StopIteration"""
stopiteration_qname = f"{utils.EXCEPTIONS_MODULE}.StopIteration"
return any(_class.qname() == stopiteration_qname for _class in exc.mro())
def _check_consider_using_comprehension_constructor(self, node):
if (
isinstance(node.func, astroid.Name)
and node.args
and isinstance(node.args[0], astroid.ListComp)
):
if node.func.name == "dict" and not isinstance(
node.args[0].elt, astroid.Call
):
message_name = "consider-using-dict-comprehension"
self.add_message(message_name, node=node)
elif node.func.name == "set":
message_name = "consider-using-set-comprehension"
self.add_message(message_name, node=node)
def _check_consider_using_generator(self, node):
# 'any' and 'all' definitely should use generator, while 'list' and 'tuple' need to be considered first
# See https://github.com/PyCQA/pylint/pull/3309#discussion_r576683109
checked_call = ["any", "all", "list", "tuple"]
if (
isinstance(node, astroid.Call)
and node.func
and isinstance(node.func, astroid.Name)
and node.func.name in checked_call
):
# functions in checked_calls take exactly one argument
# check whether the argument is list comprehension
if len(node.args) == 1 and isinstance(node.args[0], astroid.ListComp):
# remove square brackets '[]'
inside_comp = node.args[0].as_string()[1:-1]
call_name = node.func.name
if call_name in ["any", "all"]:
self.add_message(
"use-a-generator",
node=node,
args=(call_name, inside_comp),
)
else:
self.add_message(
"consider-using-generator",
node=node,
args=(call_name, inside_comp),
)
@utils.check_messages(
"stop-iteration-return",
"consider-using-dict-comprehension",
"consider-using-set-comprehension",
"consider-using-sys-exit",
"super-with-arguments",
"consider-using-generator",
"consider-using-with",
)
def visit_call(self, node):
self._check_raising_stopiteration_in_generator_next_call(node)
self._check_consider_using_comprehension_constructor(node)
self._check_quit_exit_call(node)
self._check_super_with_arguments(node)
self._check_consider_using_generator(node)
self._check_consider_using_with(node)
@staticmethod
def _has_exit_in_scope(scope):
exit_func = scope.locals.get("exit")
return bool(
exit_func and isinstance(exit_func[0], (astroid.ImportFrom, astroid.Import))
)
def _check_quit_exit_call(self, node):
if isinstance(node.func, astroid.Name) and node.func.name in BUILTIN_EXIT_FUNCS:
# If we have `exit` imported from `sys` in the current or global scope, exempt this instance.
local_scope = node.scope()
if self._has_exit_in_scope(local_scope) or self._has_exit_in_scope(
node.root()
):
return
self.add_message("consider-using-sys-exit", node=node)
def _check_super_with_arguments(self, node):
if not isinstance(node.func, astroid.Name) or node.func.name != "super":
return
# pylint: disable=too-many-boolean-expressions
if (
len(node.args) != 2
or not isinstance(node.args[1], astroid.Name)
or node.args[1].name != "self"
or not isinstance(node.args[0], astroid.Name)
or not isinstance(node.args[1], astroid.Name)
or node_frame_class(node) is None
or node.args[0].name != node_frame_class(node).name
):
return
self.add_message("super-with-arguments", node=node)
def _check_raising_stopiteration_in_generator_next_call(self, node):
"""Check if a StopIteration exception is raised by the call to next function
If the next value has a default value, then do not add message.
:param node: Check to see if this Call node is a next function
:type node: :class:`astroid.node_classes.Call`
"""
def _looks_like_infinite_iterator(param):
inferred = utils.safe_infer(param)
if inferred:
return inferred.qname() in KNOWN_INFINITE_ITERATORS
return False
if isinstance(node.func, astroid.Attribute):
# A next() method, which is now what we want.
return
inferred = utils.safe_infer(node.func)
if getattr(inferred, "name", "") == "next":
frame = node.frame()
# The next builtin can only have up to two
# positional arguments and no keyword arguments
has_sentinel_value = len(node.args) > 1
if (
isinstance(frame, astroid.FunctionDef)
and frame.is_generator()
and not has_sentinel_value
and not utils.node_ignores_exception(node, StopIteration)
and not _looks_like_infinite_iterator(node.args[0])
):
self.add_message("stop-iteration-return", node=node)
def _check_nested_blocks(self, node):
"""Update and check the number of nested blocks"""
# only check block levels inside functions or methods
if not isinstance(node.scope(), astroid.FunctionDef):
return
# messages are triggered on leaving the nested block. Here we save the
# stack in case the current node isn't nested in the previous one
nested_blocks = self._nested_blocks[:]
if node.parent == node.scope():
self._nested_blocks = [node]
else:
# go through ancestors from the most nested to the less
for ancestor_node in reversed(self._nested_blocks):
if ancestor_node == node.parent:
break
self._nested_blocks.pop()
# if the node is an elif, this should not be another nesting level
if isinstance(node, astroid.If) and self._is_actual_elif(node):
if self._nested_blocks:
self._nested_blocks.pop()
self._nested_blocks.append(node)
# send message only once per group of nested blocks
if len(nested_blocks) > len(self._nested_blocks):
self._emit_nested_blocks_message_if_needed(nested_blocks)
def _emit_nested_blocks_message_if_needed(self, nested_blocks):
if len(nested_blocks) > self.config.max_nested_blocks:
self.add_message(
"too-many-nested-blocks",
node=nested_blocks[0],
args=(len(nested_blocks), self.config.max_nested_blocks),
)
@staticmethod
def _duplicated_isinstance_types(node):
"""Get the duplicated types from the underlying isinstance calls.
:param astroid.BoolOp node: Node which should contain a bunch of isinstance calls.
:returns: Dictionary of the comparison objects from the isinstance calls,
to duplicate values from consecutive calls.
:rtype: dict
"""
duplicated_objects = set()
all_types = collections.defaultdict(set)
for call in node.values:
if not isinstance(call, astroid.Call) or len(call.args) != 2:
continue
inferred = utils.safe_infer(call.func)
if not inferred or not utils.is_builtin_object(inferred):
continue
if inferred.name != "isinstance":
continue
isinstance_object = call.args[0].as_string()
isinstance_types = call.args[1]
if isinstance_object in all_types:
duplicated_objects.add(isinstance_object)
if isinstance(isinstance_types, astroid.Tuple):
elems = [
class_type.as_string() for class_type in isinstance_types.itered()
]
else:
elems = [isinstance_types.as_string()]
all_types[isinstance_object].update(elems)
# Remove all keys which not duplicated
return {
key: value for key, value in all_types.items() if key in duplicated_objects
}
def _check_consider_merging_isinstance(self, node):
"""Check isinstance calls which can be merged together."""
if node.op != "or":
return
first_args = self._duplicated_isinstance_types(node)
for duplicated_name, class_names in first_args.items():
names = sorted(name for name in class_names)
self.add_message(
"consider-merging-isinstance",
node=node,
args=(duplicated_name, ", ".join(names)),
)
def _check_consider_using_in(self, node):
allowed_ops = {"or": "==", "and": "!="}
if node.op not in allowed_ops or len(node.values) < 2:
return
for value in node.values:
if (
not isinstance(value, astroid.Compare)
or len(value.ops) != 1
or value.ops[0][0] not in allowed_ops[node.op]
):
return
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, astroid.Call):
return
# Gather variables and values from comparisons
variables, values = [], []
for value in node.values:
variable_set = set()
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, astroid.Name):
variable_set.add(comparable.as_string())
values.append(comparable.as_string())
variables.append(variable_set)
# Look for (common-)variables that occur in all comparisons
common_variables = reduce(lambda a, b: a.intersection(b), variables)
if not common_variables:
return
# Gather information for the suggestion
common_variable = sorted(list(common_variables))[0]
comprehension = "in" if node.op == "or" else "not in"
values = list(collections.OrderedDict.fromkeys(values))
values.remove(common_variable)
values_string = ", ".join(values) if len(values) != 1 else values[0] + ","
suggestion = f"{common_variable} {comprehension} ({values_string})"
self.add_message("consider-using-in", node=node, args=(suggestion,))
def _check_chained_comparison(self, node):
"""Check if there is any chained comparison in the expression.
Add a refactoring message if a boolOp contains comparison like a < b and b < c,
which can be chained as a < b < c.
Care is taken to avoid simplifying a < b < c and b < d.
"""
if node.op != "and" or len(node.values) < 2:
return
def _find_lower_upper_bounds(comparison_node, uses):
left_operand = comparison_node.left
for operator, right_operand in comparison_node.ops:
for operand in (left_operand, right_operand):
value = None
if isinstance(operand, astroid.Name):
value = operand.name
elif isinstance(operand, astroid.Const):
value = operand.value
if value is None:
continue
if operator in ("<", "<="):
if operand is left_operand:
uses[value]["lower_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operator in (">", ">="):
if operand is left_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["lower_bound"].add(comparison_node)
left_operand = right_operand
uses = collections.defaultdict(
lambda: {"lower_bound": set(), "upper_bound": set()}
)
for comparison_node in node.values:
if isinstance(comparison_node, astroid.Compare):
_find_lower_upper_bounds(comparison_node, uses)
for _, bounds in uses.items():
num_shared = len(bounds["lower_bound"].intersection(bounds["upper_bound"]))
num_lower_bounds = len(bounds["lower_bound"])
num_upper_bounds = len(bounds["upper_bound"])
if num_shared < num_lower_bounds and num_shared < num_upper_bounds:
self.add_message("chained-comparison", node=node)
break
@staticmethod
def _apply_boolean_simplification_rules(operator, values):
"""Removes irrelevant values or returns shortcircuiting values
This function applies the following two rules:
1) an OR expression with True in it will always be true, and the
reverse for AND
2) False values in OR expressions are only relevant if all values are
false, and the reverse for AND"""
simplified_values = []
for subnode in values:
inferred_bool = None
if not next(subnode.nodes_of_class(astroid.Name), False):
inferred = utils.safe_infer(subnode)
if inferred:
inferred_bool = inferred.bool_value()
if not isinstance(inferred_bool, bool):
simplified_values.append(subnode)
elif (operator == "or") == inferred_bool:
return [subnode]
return simplified_values or [astroid.Const(operator == "and")]
def _simplify_boolean_operation(self, bool_op):
"""Attempts to simplify a boolean operation
Recursively applies simplification on the operator terms,
and keeps track of whether reductions have been made."""
children = list(bool_op.get_children())
intermediate = [
self._simplify_boolean_operation(child)
if isinstance(child, astroid.BoolOp)
else child
for child in children
]
result = self._apply_boolean_simplification_rules(bool_op.op, intermediate)
if len(result) < len(children):
self._can_simplify_bool_op = True
if len(result) == 1:
return result[0]
simplified_bool_op = copy.copy(bool_op)
simplified_bool_op.postinit(result)
return simplified_bool_op
def _check_simplifiable_condition(self, node):
"""Check if a boolean condition can be simplified.
Variables will not be simplified, even in the value can be inferred,
and expressions like '3 + 4' will remain expanded."""
if not utils.is_test_condition(node):
return
self._can_simplify_bool_op = False
simplified_expr = self._simplify_boolean_operation(node)
if not self._can_simplify_bool_op:
return
if not next(simplified_expr.nodes_of_class(astroid.Name), False):
self.add_message(
"condition-evals-to-constant",
node=node,
args=(node.as_string(), simplified_expr.as_string()),
)
else:
self.add_message(
"simplifiable-condition",
node=node,
args=(node.as_string(), simplified_expr.as_string()),
)
@utils.check_messages(
"consider-merging-isinstance",
"consider-using-in",
"chained-comparison",
"simplifiable-condition",
"condition-evals-to-constant",
)
def visit_boolop(self, node):
self._check_consider_merging_isinstance(node)
self._check_consider_using_in(node)
self._check_chained_comparison(node)
self._check_simplifiable_condition(node)
@staticmethod
def _is_simple_assignment(node):
return (
isinstance(node, astroid.Assign)
and len(node.targets) == 1
and isinstance(node.targets[0], astroid.node_classes.AssignName)
and isinstance(node.value, astroid.node_classes.Name)
)
def _check_swap_variables(self, node):
if not node.next_sibling() or not node.next_sibling().next_sibling():
return
assignments = [node, node.next_sibling(), node.next_sibling().next_sibling()]
if not all(self._is_simple_assignment(node) for node in assignments):
return
if any(node in self._reported_swap_nodes for node in assignments):
return
left = [node.targets[0].name for node in assignments]
right = [node.value.name for node in assignments]
if left[0] == right[-1] and left[1:] == right[:-1]:
self._reported_swap_nodes.update(assignments)
message = "consider-swap-variables"
self.add_message(message, node=node)
@utils.check_messages(
"simplify-boolean-expression",
"consider-using-ternary",
"consider-swap-variables",
)
def visit_assign(self, node):
self._check_swap_variables(node)
if self._is_and_or_ternary(node.value):
cond, truth_value, false_value = self._and_or_ternary_arguments(node.value)
else:
return
if all(
isinstance(value, astroid.Compare) for value in (truth_value, false_value)
):
return
inferred_truth_value = utils.safe_infer(truth_value)
if inferred_truth_value in (None, astroid.Uninferable):
truth_boolean_value = True
else:
truth_boolean_value = truth_value.bool_value()
if truth_boolean_value is False:
message = "simplify-boolean-expression"
suggestion = false_value.as_string()
else:
message = "consider-using-ternary"
suggestion = "{truth} if {cond} else {false}".format(
truth=truth_value.as_string(),
cond=cond.as_string(),
false=false_value.as_string(),
)
self.add_message(message, node=node, args=(suggestion,))
visit_return = visit_assign
def _check_consider_using_with(self, node: astroid.Call):
inferred = utils.safe_infer(node.func)
if not inferred:
return
could_be_used_in_with = (
# things like ``lock.acquire()``
inferred.qname() in CALLS_THAT_COULD_BE_REPLACED_BY_WITH
or (
# things like ``open("foo")`` which are not already inside a ``with`` statement
inferred.qname() in CALLS_RETURNING_CONTEXT_MANAGERS
and not isinstance(node.parent, astroid.With)
)
)
if could_be_used_in_with and not _is_inside_context_manager(node):
self.add_message("consider-using-with", node=node)
def _check_consider_using_join(self, aug_assign):
"""
We start with the augmented assignment and work our way upwards.
Names of variables for nodes if match successful:
result = '' # assign
for number in ['1', '2', '3'] # for_loop
result += number # aug_assign
"""
for_loop = aug_assign.parent
if not isinstance(for_loop, astroid.For) or len(for_loop.body) > 1:
return
assign = for_loop.previous_sibling()
if not isinstance(assign, astroid.Assign):
return
result_assign_names = {
target.name
for target in assign.targets
if isinstance(target, astroid.AssignName)
}
is_concat_loop = (
aug_assign.op == "+="
and isinstance(aug_assign.target, astroid.AssignName)
and len(for_loop.body) == 1
and aug_assign.target.name in result_assign_names
and isinstance(assign.value, astroid.Const)
and isinstance(assign.value.value, str)
and isinstance(aug_assign.value, astroid.Name)
and aug_assign.value.name == for_loop.target.name
)
if is_concat_loop:
self.add_message("consider-using-join", node=aug_assign)
@utils.check_messages("consider-using-join")
def visit_augassign(self, node):
self._check_consider_using_join(node)
@utils.check_messages("unnecessary-comprehension", "unnecessary-dict-index-lookup")
def visit_comprehension(self, node: astroid.Comprehension) -> None:
self._check_unnecessary_comprehension(node)
self._check_unnecessary_dict_index_lookup(node)
def _check_unnecessary_comprehension(self, node: astroid.Comprehension) -> None:
if (
isinstance(node.parent, astroid.GeneratorExp)
or len(node.ifs) != 0
or len(node.parent.generators) != 1
or node.is_async
):
return
if (
isinstance(node.parent, astroid.DictComp)
and isinstance(node.parent.key, astroid.Name)
and isinstance(node.parent.value, astroid.Name)
and isinstance(node.target, astroid.Tuple)
and all(isinstance(elt, astroid.AssignName) for elt in node.target.elts)
):
expr_list = [node.parent.key.name, node.parent.value.name]
target_list = [elt.name for elt in node.target.elts]
elif isinstance(node.parent, (astroid.ListComp, astroid.SetComp)):
expr = node.parent.elt
if isinstance(expr, astroid.Name):
expr_list = expr.name
elif isinstance(expr, astroid.Tuple):
if any(not isinstance(elt, astroid.Name) for elt in expr.elts):
return
expr_list = [elt.name for elt in expr.elts]
else:
expr_list = []
target = node.parent.generators[0].target
target_list = (
target.name
if isinstance(target, astroid.AssignName)
else (
[
elt.name
for elt in target.elts
if isinstance(elt, astroid.AssignName)
]
if isinstance(target, astroid.Tuple)
else []
)
)
else:
return
if expr_list == target_list != []:
args: Optional[Tuple[str]] = None
inferred = utils.safe_infer(node.iter)
if isinstance(node.parent, astroid.DictComp) and isinstance(
inferred, astroid.objects.DictItems
):
args = (f"{node.iter.func.expr.as_string()}",)
elif (
isinstance(node.parent, astroid.ListComp)
and isinstance(inferred, astroid.List)
) or (
isinstance(node.parent, astroid.SetComp)
and isinstance(inferred, astroid.Set)
):
args = (f"{node.iter.as_string()}",)
if args:
self.add_message("unnecessary-comprehension", node=node, args=args)
return
if isinstance(node.parent, astroid.DictComp):
func = "dict"
elif isinstance(node.parent, astroid.ListComp):
func = "list"
elif isinstance(node.parent, astroid.SetComp):
func = "set"
else:
return
self.add_message(
"unnecessary-comprehension",
node=node,
args=(f"{func}({node.iter.as_string()})",),
)
@staticmethod
def _is_and_or_ternary(node):
"""
Returns true if node is 'condition and true_value or false_value' form.
All of: condition, true_value and false_value should not be a complex boolean expression
"""
return (
isinstance(node, astroid.BoolOp)
and node.op == "or"
and len(node.values) == 2
and isinstance(node.values[0], astroid.BoolOp)
and not isinstance(node.values[1], astroid.BoolOp)
and node.values[0].op == "and"
and not isinstance(node.values[0].values[1], astroid.BoolOp)
and len(node.values[0].values) == 2
)
@staticmethod
def _and_or_ternary_arguments(node):
false_value = node.values[1]
condition, true_value = node.values[0].values
return condition, true_value, false_value
def visit_functiondef(self, node):
self._return_nodes[node.name] = list(
node.nodes_of_class(astroid.Return, skip_klass=astroid.FunctionDef)
)
def _check_consistent_returns(self, node: astroid.FunctionDef) -> None:
"""Check that all return statements inside a function are consistent.
Return statements are consistent if:
- all returns are explicit and if there is no implicit return;
- all returns are empty and if there is, possibly, an implicit return.
Args:
node (astroid.FunctionDef): the function holding the return statements.
"""
# explicit return statements are those with a not None value
explicit_returns = [
_node for _node in self._return_nodes[node.name] if _node.value is not None
]
if not explicit_returns:
return
if len(explicit_returns) == len(
self._return_nodes[node.name]
) and self._is_node_return_ended(node):
return
self.add_message("inconsistent-return-statements", node=node)
def _is_if_node_return_ended(self, node: astroid.If) -> bool:
"""Check if the If node ends with an explicit return statement.
Args:
node (astroid.If): If node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Do not check if inner function definition are return ended.
is_if_returning = any(
self._is_node_return_ended(_ifn)
for _ifn in node.body
if not isinstance(_ifn, astroid.FunctionDef)
)
if not node.orelse:
# If there is not orelse part then the if statement is returning if :
# - there is at least one return statement in its siblings;
# - the if body is itself returning.
if not self._has_return_in_siblings(node):
return False
return is_if_returning
# If there is an orelse part then both if body and orelse part should return.
is_orelse_returning = any(
self._is_node_return_ended(_ore)
for _ore in node.orelse
if not isinstance(_ore, astroid.FunctionDef)
)
return is_if_returning and is_orelse_returning
def _is_raise_node_return_ended(self, node: astroid.Raise) -> bool:
"""Check if the Raise node ends with an explicit return statement.
Args:
node (astroid.Raise): Raise node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
if not node.exc:
# Ignore bare raises
return True
if not utils.is_node_inside_try_except(node):
# If the raise statement is not inside a try/except statement
# then the exception is raised and cannot be caught. No need
# to infer it.
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable or not hasattr(exc, "pytype"):
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = list(handlers) if handlers is not None else []
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(self._is_node_return_ended(_handler) for _handler in handlers)
# if no handlers handle the exception then it's ok
return True
def _is_node_return_ended(self, node: astroid.node_classes.NodeNG) -> bool:
"""Check if the node ends with an explicit return statement.
Args:
node (astroid.node_classes.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Recursion base case
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
# Avoid the check inside while loop as we don't know
# if they will be completed
if isinstance(node, astroid.While):
return True
if isinstance(node, astroid.Raise):
return self._is_raise_node_return_ended(node)
if isinstance(node, astroid.If):
return self._is_if_node_return_ended(node)
if isinstance(node, astroid.TryExcept):
handlers = {
_child
for _child in node.get_children()
if isinstance(_child, astroid.ExceptHandler)
}
all_but_handler = set(node.get_children()) - handlers
return any(
self._is_node_return_ended(_child) for _child in all_but_handler
) and all(self._is_node_return_ended(_child) for _child in handlers)
if (
isinstance(node, astroid.Assert)
and isinstance(node.test, astroid.Const)
and not node.test.value
):
# consider assert False as a return node
return True
# recurses on the children of the node
return any(self._is_node_return_ended(_child) for _child in node.get_children())
@staticmethod
def _has_return_in_siblings(node: astroid.node_classes.NodeNG) -> bool:
"""
Returns True if there is at least one return in the node's siblings
"""
next_sibling = node.next_sibling()
while next_sibling:
if isinstance(next_sibling, astroid.Return):
return True
next_sibling = next_sibling.next_sibling()
return False
def _is_function_def_never_returning(self, node: astroid.FunctionDef) -> bool:
"""Return True if the function never returns. False otherwise.
Args:
node (astroid.FunctionDef): function definition node to be analyzed.
Returns:
bool: True if the function never returns, False otherwise.
"""
if isinstance(node, astroid.FunctionDef) and node.returns:
return (
isinstance(node.returns, astroid.Attribute)
and node.returns.attrname == "NoReturn"
or isinstance(node.returns, astroid.Name)
and node.returns.name == "NoReturn"
)
try:
return node.qname() in self._never_returning_functions
except TypeError:
return False
def _check_return_at_the_end(self, node):
"""Check for presence of a *single* return statement at the end of a
function. "return" or "return None" are useless because None is the
default return type if they are missing.
NOTE: produces a message only if there is a single return statement
in the function body. Otherwise _check_consistent_returns() is called!
Per its implementation and PEP8 we can have a "return None" at the end
of the function body if there are other return statements before that!
"""
if len(self._return_nodes[node.name]) > 1:
return
if len(node.body) <= 1:
return
last = node.body[-1]
if isinstance(last, astroid.Return):
# e.g. "return"
if last.value is None:
self.add_message("useless-return", node=node)
# return None"
elif isinstance(last.value, astroid.Const) and (last.value.value is None):
self.add_message("useless-return", node=node)
def _check_unnecessary_dict_index_lookup(
self, node: Union[astroid.For, astroid.Comprehension]
) -> None:
"""Add message when accessing dict values by index lookup."""
# Verify that we have a .items() call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper items call?
if (
isinstance(node.iter, astroid.Call)
and isinstance(node.iter.func, astroid.Attribute)
and node.iter.func.attrname == "items"
):
inferred = utils.safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return
iterating_object_name = node.iter.func.expr.as_string()
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
children = (
node.body
if isinstance(node, astroid.For)
else node.parent.get_children()
)
for child in children:
for subscript in child.nodes_of_class(astroid.Subscript):
subscript = cast(astroid.Subscript, subscript)
if not isinstance(
subscript.value, (astroid.Name, astroid.Attribute)
):
continue
value = subscript.slice
if isinstance(node, astroid.For) and (
isinstance(subscript.parent, astroid.Assign)
and subscript in subscript.parent.targets
or isinstance(subscript.parent, astroid.AugAssign)
and subscript == subscript.parent.target
):
# Ignore this subscript if it is the target of an assignment
continue
# Case where .items is assigned to k,v (i.e., for k, v in d.items())
if isinstance(value, astroid.Name):
if (
not isinstance(node.target, astroid.Tuple)
or value.name != node.target.elts[0].name
or iterating_object_name != subscript.value.as_string()
):
continue
if (
isinstance(node, astroid.For)
and value.lookup(value.name)[1][-1].lineno > node.lineno
):
# Ignore this subscript if it has been redefined after
# the for loop. This checks for the line number using .lookup()
# to get the line number where the iterating object was last
# defined and compare that to the for loop's line number
continue
self.add_message(
"unnecessary-dict-index-lookup",
node=subscript,
args=(node.target.elts[1].as_string()),
)
# Case where .items is assigned to single var (i.e., for item in d.items())
elif isinstance(value, astroid.Subscript):
if (
not isinstance(node.target, astroid.AssignName)
or node.target.name != value.value.name
or iterating_object_name != subscript.value.as_string()
):
continue
if (
isinstance(node, astroid.For)
and value.value.lookup(value.value.name)[1][-1].lineno
> node.lineno
):
# Ignore this subscript if it has been redefined after
# the for loop. This checks for the line number using .lookup()
# to get the line number where the iterating object was last
# defined and compare that to the for loop's line number
continue
# check if subscripted by 0 (key)
inferred = utils.safe_infer(value.slice)
if (
not isinstance(inferred, astroid.Const)
or inferred.value != 0
):
continue
self.add_message(
"unnecessary-dict-index-lookup",
node=subscript,
args=("1".join(value.as_string().rsplit("0", maxsplit=1)),),
)
| 1 | 14,463 | I'm not really sure this is worth it. Keep in mind that every special case we add has the potential to introduce new errors and complicates the code further. For common cases that is acceptable, but in this instance I don't think it's beneficial. | PyCQA-pylint | py |
@@ -62,6 +62,11 @@ func (c *showCommand) Run(ctx context.Context, env *common_cli.Env, serverClient
return err
}
+ if agent.Selectors != nil {
+ for _, s := range agent.Selectors {
+ env.Printf("Selectors : %s:%s\n", s.Type, s.Value)
+ }
+ }
return nil
}
| 1 | package agent
import (
"errors"
"flag"
"github.com/mitchellh/cli"
"github.com/spiffe/go-spiffe/v2/spiffeid"
"github.com/spiffe/spire/cmd/spire-server/util"
common_cli "github.com/spiffe/spire/pkg/common/cli"
"github.com/spiffe/spire/pkg/server/api"
"github.com/spiffe/spire/proto/spire/api/server/agent/v1"
"golang.org/x/net/context"
)
type showCommand struct {
// SPIFFE ID of the agent being showed
spiffeID string
}
// NewShowCommand creates a new "show" subcommand for "agent" command.
func NewShowCommand() cli.Command {
return NewShowCommandWithEnv(common_cli.DefaultEnv)
}
// NewShowCommandWithEnv creates a new "show" subcommand for "agent" command
// using the environment specified
func NewShowCommandWithEnv(env *common_cli.Env) cli.Command {
return util.AdaptCommand(env, new(showCommand))
}
func (*showCommand) Name() string {
return "agent show"
}
func (showCommand) Synopsis() string {
return "Shows the details of an attested agent given its SPIFFE ID"
}
//Run shows an agent given its SPIFFE ID
func (c *showCommand) Run(ctx context.Context, env *common_cli.Env, serverClient util.ServerClient) error {
if c.spiffeID == "" {
return errors.New("a SPIFFE ID is required")
}
id, err := spiffeid.FromString(c.spiffeID)
if err != nil {
return err
}
agentClient := serverClient.NewAgentClient()
agent, err := agentClient.GetAgent(ctx, &agent.GetAgentRequest{Id: api.ProtoFromID(id)})
if err != nil {
return err
}
env.Printf("Found an attested agent given its SPIFFE ID\n\n")
if err := printAgents(env, agent); err != nil {
return err
}
return nil
}
func (c *showCommand) AppendFlags(fs *flag.FlagSet) {
fs.StringVar(&c.spiffeID, "spiffeID", "", "The SPIFFE ID of the agent to show (agent identity)")
}
| 1 | 16,052 | nit: this `if` isn't necessary since we immediately follow it up with a range over the slice, which works fine with a `nil` slice. | spiffe-spire | go |
@@ -3160,9 +3160,15 @@ void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
}
Player* player = getPlayerByID(playerId);
+
if (!player) {
return;
}
+
+ const Outfit* playerOutfit = Outfits::getInstance()->getOutfitByLookType(player->getSex(), outfit.lookType);
+ if (!playerOutfit) {
+ outfit.lookMount = 0;
+ }
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2015 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "items.h"
#include "commands.h"
#include "creature.h"
#include "monster.h"
#include "game.h"
#include "tile.h"
#include "house.h"
#include "actions.h"
#include "combat.h"
#include "iologindata.h"
#include "iomarket.h"
#include "chat.h"
#include "talkaction.h"
#include "spells.h"
#include "configmanager.h"
#include "ban.h"
#include "raids.h"
#include "database.h"
#include "server.h"
#include "ioguild.h"
#include "quests.h"
#include "globalevent.h"
#include "mounts.h"
#include "bed.h"
#include "scheduler.h"
#include "monster.h"
#include "spawn.h"
#include "connection.h"
#include "events.h"
#include "databasetasks.h"
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat* g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern Events* g_events;
Game::Game() :
wildcardTree(false),
offlineTrainingWindow(std::numeric_limits<uint32_t>::max(), "Choose a Skill", "Please choose a skill:")
{
gameState = GAME_STATE_NORMAL;
worldType = WORLD_TYPE_PVP;
serviceManager = nullptr;
lastStageLevel = 0;
playersRecord = 0;
motdNum = 0;
useLastStageLevel = false;
stagesEnabled = false;
lastBucket = 0;
//(1440 minutes/day)/(3600 seconds/day)*10 seconds event interval
int32_t dayCycle = 3600;
lightHourDelta = 1440 * 10 / dayCycle;
lightHour = SUNRISE + (SUNSET - SUNRISE) / 2;
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", 1);
offlineTrainingWindow.buttons.emplace_back("Cancel", 0);
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* manager)
{
serviceManager = manager;
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
commands.loadFromXml();
loadExperienceStages();
groups.load();
g_chat->load();
map.spawns.startup();
raids.loadFromXml();
raids.startup();
quests.loadFromXml();
mounts.loadFromXml();
loadMotdNum();
loadPlayersRecord();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_databaseTasks.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
Map::save();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
bool Game::loadMainMap(const std::string& filename)
{
Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE);
Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS);
return map.loadMap("data/world/" + filename + ".otbm", true);
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path, false);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const
{
if (pos.x != 0xFFFF) {
return map.getTile(pos);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const
{
if (pos.x != 0xFFFF) {
Tile* tile = map.getTile(pos);
if (!tile) {
return nullptr;
}
Thing* thing;
switch (type) {
case STACKPOS_LOOK: {
return tile->getTopVisibleThing(player);
}
case STACKPOS_MOVE: {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
break;
}
case STACKPOS_USEITEM: {
thing = tile->getUseItem();
break;
}
case STACKPOS_TOPDOWN_ITEM: {
thing = tile->getTopDownItem();
break;
}
case STACKPOS_USETARGET: {
thing = tile->getTopVisibleCreature(player);
if (!thing) {
thing = tile->getUseItem();
}
break;
}
default: {
thing = nullptr;
break;
}
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessable
if (thing && thing->getItem()) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
uint8_t slot = pos.z;
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
}
//inventory
slots_t slot = static_cast<slots_t>(pos.y);
return player->getInventoryItem(slot);
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container));
pos.z = container->getThingIndex(item);
stackpos = pos.z;
} else {
pos.y = player->getThingIndex(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->getThingIndex(item);
}
}
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerAutoID) {
return getPlayerByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
auto m_it = mappedPlayerNames.find(lowerCaseName);
if (m_it != mappedPlayerNames.end()) {
return m_it->second;
}
for (const auto& it : npcs) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
for (const auto& it : monsters) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
return nullptr;
}
Npc* Game::getNpcByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const char* npcName = s.c_str();
for (const auto& it : npcs) {
if (strcasecmp(npcName, it.second->getName().c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
for (const auto& it : players) {
if (guid == it.second->getGUID()) {
return it.second;
}
}
return nullptr;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > 20) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RETURNVALUE_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent() != nullptr) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->incrementReferenceCounter();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true);
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : list) {
spectator->onCreatureAppear(creature, true);
}
creature->getParent()->postAddNotification(creature, nullptr, 0);
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorVec list;
map.getSpectators(list, tile->getPosition(), true);
for (Creature* spectator : list) {
if (Player* player = spectator->getPlayer()) {
oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getStackposOfCreature(player, creature) : -1);
}
}
tile->removeCreature(creature);
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : list) {
if (Player* player = spectator->getPlayer()) {
player->sendRemoveTileThing(tilePosition, oldStackPosVector[i++]);
}
}
//event method
for (Creature* spectator : list) {
spectator->onRemoveCreature(creature, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, 0);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setLossSkill(false);
removeCreature(summon);
}
return true;
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
Tile* tile = map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(1000,
std::bind(&Game::playerMoveCreatureByID, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), tile->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile);
}
} else if (thing->getItem()) {
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder);
}
}
void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
Tile* toTile = map.getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile);
}
void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID,
this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::forward_list<Direction> listDir;
if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this,
player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->isAccessPlayer())) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
const Position& toPos = toTile->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (player != movingCreature) {
if (toTile->hasProperty(CONST_PROP_BLOCKPATH)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(*movingCreature, *toTile);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
const Position& currentPos = creature->getPosition();
Position destPos = getNextPosition(direction, currentPos);
bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0;
if (creature->getPlayer() && !diagonalMovement) {
//try go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasProperty(CONST_PROP_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasProperty(CONST_PROP_BLOCKSOLID)) {
flags = flags | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->floorChange()) {
destPos.z--;
}
}
}
} else {
//try go down
Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z);
if (currentPos.z != 7 && (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasProperty(CONST_PROP_BLOCKSOLID)))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
destPos.z++;
}
}
}
}
Tile* toTile = map.getTile(destPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
return internalMoveCreature(*creature, *toTile, flags);
}
ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toTile.queryAdd(0, creature, 1, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(creature, toTile);
if (creature.getParent() != &toTile) {
return RETURNVALUE_NOERROR;
}
int32_t index = 0;
Item* toItem = nullptr;
Tile* subCylinder = nullptr;
Tile* toCylinder = &toTile;
Tile* fromCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) {
map.moveCreature(creature, *subCylinder);
if (creature.getParent() != subCylinder) {
//could happen if a script move the creature
fromCylinder = nullptr;
break;
}
fromCylinder = toCylinder;
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
if (fromCylinder) {
const Position& fromPosition = fromCylinder->getPosition();
const Position& toPosition = toCylinder->getPosition();
if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) {
Direction dir = getDirectionTo(fromPosition, toPosition);
if ((dir & DIRECTION_DIAGONAL_MASK) == 0) {
internalCreatureTurn(&creature, dir);
}
}
}
return RETURNVALUE_NOERROR;
}
void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr);
}
void Game::playerMoveItem(Player* player, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (item == nullptr) {
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
item = thing->getItem();
}
if (item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
if (fromCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (toCylinder == nullptr) {
toCylinder = internalGetCylinder(player, toPos);
if (toCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z != mapFromPos.z) {
player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::forward_list<Direction> listDir;
if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos)
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
}
if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
if (!g_events->eventPlayerOnMoveItem(player, item, count, fromPos, toPos)) {
return;
}
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = static_cast<uint8_t>(toPos.y);
}
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/)
{
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RETURNVALUE_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor);
if (ret == RETURNVALUE_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0);
if (ret == RETURNVALUE_NOERROR) {
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags) == RETURNVALUE_NOERROR) {
int32_t oldToItemIndex = toCylinder->getThingIndex(toItem);
toCylinder->removeThing(toItem, toItem->getItemCount());
fromCylinder->addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex);
}
int32_t newToItemIndex = fromCylinder->getThingIndex(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->queryAdd(index, *item, count, flags);
toItem = nullptr;
}
}
}
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags);
if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->queryRemove(*item, m, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->getThingIndex(item);
Item* updateItem = nullptr;
fromCylinder->removeThing(item, m);
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (item->equals(toItem)) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = item->clone();
moveItem->setItemCount(newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->getThingIndex(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->getThingIndex(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
if (toCylinder == nullptr || item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (test) {
return RETURNVALUE_NOERROR;
}
if (item->isStackable() && item->equals(toItem)) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = item->clone();
remainderItem->setItemCount(count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->getThingIndex(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->getThingIndex(item);
//remove the item
cylinder->removeThing(item, count);
if (item->isRemoved()) {
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index);
}
item->onRemoved();
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount);
if (remainderCount != 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RETURNVALUE_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/) const
{
if (cylinder == nullptr) {
return nullptr;
}
std::vector<Container*> containers;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (cylinder == nullptr) {
return false;
}
if (money <= 0) {
return true;
}
std::vector<Container*> containers;
std::multimap<uint64_t, Item*> moneyMap;
uint64_t moneyCount = 0;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
int32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
int32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
}
if (moneyCount < money) {
return false;
}
for (const auto& moneyEntry : moneyMap) {
Item* item = moneyEntry.second;
if (moneyEntry.first > money) {
uint32_t worth = moneyEntry.first / item->getItemCount();
uint32_t removeCount = (money / worth) + 1;
addMoney(cylinder, (worth * removeCount) - money, flags);
internalRemoveItem(item, removeCount);
money = 0;
} else {
internalRemoveItem(item);
money -= moneyEntry.first;
}
if (money == 0) {
return true;
}
}
return false;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
uint32_t crys = money / 10000;
money -= crys * 10000;
while (crys > 0) {
Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, std::min<int32_t>(100, crys));
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
crys -= std::min<int32_t>(100, crys);
}
uint16_t plat = money / 100;
if (plat != 0) {
Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, plat);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
money -= plat * 100;
}
if (money != 0) {
Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->getThingIndex(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& newType = Item::items[newId];
if (newType.id == 0) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
cylinder->removeThing(item, item->getItemCount());
cylinder->postRemoveNotification(item, cylinder, itemIndex);
item->setID(newId);
if (newCount != -1) {
item->setSubType(newCount);
}
cylinder->addThing(item);
Cylinder* newParent = item->getParent();
if (newParent == nullptr) {
ReleaseItem(item);
return nullptr;
}
newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item));
return item;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = curType.decayTo;
}
if (newItemId < 0) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
} else {
return transformItem(item, newItemId);
}
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RETURNVALUE_NOERROR;
} else if (thing->isRemoved()) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* toTile = map.getTile(newPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(*creature, *toTile, !pushMove);
return RETURNVALUE_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
return RETURNVALUE_NOTPOSSIBLE;
}
//Implementation of player invoked events
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
player->startAutoWalk(std::forward_list<Direction> { direction });
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text) const
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat->addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsersPtr();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat->removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancelMessage("A player with this name does not exist.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorVec list;
map.getSpectators(list, player->getPosition());
for (Creature* spectator : list) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::forward_list<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextWalkActionTask(task);
return;
}
ret = RETURNVALUE_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getRealParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->incrementReferenceCounter();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z != pos.z) {
player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, pos)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Tile* tile = map.getTile(pos);
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, pos)) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->incrementReferenceCounter();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, false, index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
player->setEditHouse(nullptr);
}
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
std::ostringstream ss;
ss << tradePartner->getName() << " tells you to move closer.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!tradeThing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* tradeItem = tradeThing->getItem();
if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z != tradeItemPosition.z) {
player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) {
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->incrementReferenceCounter();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
std::ostringstream ss;
ss << player->getName() << " wants to trade with you.";
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false);
tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
Item* tradeItem1 = player->tradeItem;
Item* tradeItem2 = tradePartner->tradeItem;
if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, tradeItem1, tradeItem2)) {
internalCloseTrade(player);
return;
}
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
std::map<Item*, uint32_t>::iterator it = tradeItems.find(tradeItem1);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(tradeItem2);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue ret1 = internalAddItem(tradePartner, tradeItem1, INDEX_WHEREEVER, 0, true);
ReturnValue ret2 = internalAddItem(player, tradeItem2, INDEX_WHEREEVER, 0, true);
if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) {
ret1 = internalRemoveItem(tradeItem1, tradeItem1->getItemCount(), true);
ret2 = internalRemoveItem(tradeItem2, tradeItem2->getItemCount(), true);
if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) {
Cylinder* cylinder1 = tradeItem1->getParent();
Cylinder* cylinder2 = tradeItem2->getParent();
uint32_t count1 = tradeItem1->getItemCount();
uint32_t count2 = tradeItem2->getItemCount();
ret1 = internalMoveItem(cylinder1, tradePartner, INDEX_WHEREEVER, tradeItem1, count1, nullptr, FLAG_IGNOREAUTOSTACK, nullptr, tradeItem2);
if (ret1 == RETURNVALUE_NOERROR) {
internalMoveItem(cylinder2, player, INDEX_WHEREEVER, tradeItem2, count2, nullptr, FLAG_IGNOREAUTOSTACK);
tradeItem1->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
tradeItem2->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(ret1, tradeItem1);
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(ret2, tradeItem2);
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RETURNVALUE_NOTENOUGHCAPACITY) {
std::ostringstream ss;
ss << "You do not have enough capacity to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
ss << std::endl << ' ' << item->getWeightDescription();
return ss.str();
} else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) {
std::ostringstream ss;
ss << "You do not have enough room to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
return ss.str();
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer) {
return;
}
std::vector<const Container*> containers {tradeContainer};
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
if (--index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance);
return;
}
}
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
void Game::internalCloseTrade(Player* player)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return;
}
if (player->getTradeItem()) {
std::map<Item*, uint32_t>::iterator it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
std::map<Item*, uint32_t>::iterator it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
tradePartner->sendTradeClose();
}
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
if (!g_events->eventPlayerOnLookInShop(player, &it, subType)) {
return;
}
std::ostringstream ss;
ss << "You see " << Item::getDescription(it, 1, nullptr, subType);
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, chaseMode_t chaseMode, bool secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& name)
{
if (name.length() > 20) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* vipPlayer = getPlayerByName(name);
if (!vipPlayer) {
uint32_t guid;
bool specialVip;
std::string formattedName = name;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE);
} else {
if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE);
} else {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE);
}
}
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
if (player->isMounted()) {
Mount* prevMount = mounts.getMountByID(player->getCurrentMount());
if (prevMount) {
changeSpeed(player, mount->speed - prevMount->speed);
}
player->setCurrentMount(mount->id);
} else {
player->setCurrentMount(mount->id);
outfit.lookMount = 0;
}
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = quests.getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
std::ostringstream ss;
ss << "You are still muted for " << muteTime << " seconds.";
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
return;
}
if (playerSayCommand(player, text)) {
return;
}
if (playerSaySpell(player, type, text)) {
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
if (type != TALKTYPE_PRIVATE_PN) {
player->removeMessageBuffer();
}
switch (type) {
case TALKTYPE_SAY:
internalCreatureSay(player, TALKTYPE_SAY, text, false);
break;
case TALKTYPE_WHISPER:
playerWhisper(player, text);
break;
case TALKTYPE_YELL:
playerYell(player, text);
break;
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case TALKTYPE_CHANNEL_O:
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
g_chat->talkToChannel(*player, type, text, channelId);
break;
case TALKTYPE_PRIVATE_PN:
playerSpeakToNpc(player, text);
break;
case TALKTYPE_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSayCommand(Player* player, const std::string& text)
{
if (text.empty()) {
return false;
}
char firstCharacter = text.front();
for (char commandTag : commandTags) {
if (commandTag == firstCharacter) {
if (commands.exeCommand(*player, text)) {
return true;
}
}
}
return false;
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, TALKTYPE_SAY, words, false);
} else {
return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
void Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorVec list;
map.getSpectators(list, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : list) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : list) {
spectator->onCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->getLevel() == 1) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You may not yell as long as you are on level 1.");
return false;
}
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = TALKTYPE_PRIVATE_RED_FROM;
} else {
type = TALKTYPE_PRIVATE_FROM;
}
toPlayer->sendPrivateMessage(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
} else {
std::ostringstream ss;
ss << "Message sent to " << toPlayer->getName() << '.';
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
}
return true;
}
void Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorVec list;
map.getSpectators(list, player->getPosition());
for (Creature* spectator : list) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text);
}
}
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const
{
return map.isSightClear(fromPos, toPos, floorCheck);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorVec* listPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorVec list;
if (!listPtr || listPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorVec
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) {
map.getSpectators(list, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
map.getSpectators(list, *pos, true, false, 18, 18, 14, 14);
}
} else {
list = (*listPtr);
}
//send to client
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : list) {
spectator->onCreatureSay(creature, type, text);
}
return true;
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->incrementReferenceCounter();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
auto it = checkCreatureList.begin(), end = checkCreatureList.end();
while (it != end) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
} else {
creature->onDeath();
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), false, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) {
return;
}
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field)
{
if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->isInGhostMode()) {
return true;
}
if (damage.primary.value > 0) {
return false;
}
static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) {
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(targetPos, CONST_ME_POFF);
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(targetPos, CONST_ME_BLOCKHIT);
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE: {
return;
}
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = CONST_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = CONST_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = CONST_ME_POFF;
break;
}
}
addMagicEffect(targetPos, hitEffect);
}
};
BlockType_t primaryBlockType, secondaryBlockType;
if (damage.primary.type != COMBAT_NONE) {
damage.primary.value = -damage.primary.value;
primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field);
damage.primary.value = -damage.primary.value;
sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition());
} else {
primaryBlockType = BLOCK_NONE;
}
if (damage.secondary.type != COMBAT_NONE) {
damage.secondary.value = -damage.secondary.value;
secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field);
damage.secondary.value = -damage.secondary.value;
sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition());
} else {
secondaryBlockType = BLOCK_NONE;
}
return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE);
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_HITBYPOISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = CONST_ME_DRAWBLOOD;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = CONST_ME_HITAREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_DRAWBLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_PURPLE;
effect = CONST_ME_ENERGYHIT;
break;
default:
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_PURPLE;
effect = CONST_ME_ENERGYHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = CONST_ME_LOSEENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_HITBYFIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = CONST_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = CONST_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = CONST_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = CONST_ME_MAGIC_RED;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
if (damage.primary.value > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, damage.primary.value);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
std::string damageString = std::to_string(realHealthChange) + (realHealthChange != 1 ? " hitpoints." : " hitpoint.");
std::string spectatorMessage;
if (!attacker) {
spectatorMessage += ucfirst(target->getNameDescription());
spectatorMessage += " was healed for " + damageString;
} else {
spectatorMessage += ucfirst(attacker->getNameDescription());
spectatorMessage += " healed ";
if (attacker == target) {
spectatorMessage += (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself" : "himself") : "itself");
} else {
spectatorMessage += target->getNameDescription();
}
spectatorMessage += " for " + damageString;
}
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
SpectatorVec list;
map.getSpectators(list, targetPos, false, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_HEALED;
message.text = "You heal " + target->getNameDescription() + " for " + damageString;
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_HEALED;
if (!attacker) {
message.text = "You were healed for " + damageString;
} else if (targetPlayer == attackerPlayer) {
message.text = "You heal yourself for " + damageString;
} else {
message.text = "You were healed by " + attacker->getNameDescription() + " for " + damageString;
}
} else {
message.type = MESSAGE_HEALED_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
TextMessage message;
message.position = targetPos;
SpectatorVec list;
if (target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(target->getMana(), healthChange);
if (manaDamage != 0) {
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, healthChange, damage.origin);
}
if (healthChange == 0) {
return true;
}
manaDamage = std::min<int32_t>(target->getMana(), healthChange);
}
}
target->drainMana(attacker, manaDamage);
map.getSpectators(list, targetPos, true, true);
addMagicEffect(list, targetPos, CONST_ME_LOSEENERGY);
std::string damageString = std::to_string(manaDamage);
std::string spectatorMessage = ucfirst(target->getNameDescription()) + " loses " + damageString + " mana";
if (attacker) {
spectatorMessage += " due to ";
if (attacker == target) {
spectatorMessage += (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack") : "its own attack");
} else {
spectatorMessage += "an attack by " + attacker->getNameDescription();
}
}
spectatorMessage += '.';
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ucfirst(target->getNameDescription()) + " loses " + damageString + " mana due to your attack.";
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = "You lose " + damageString + " mana.";
} else if (targetPlayer == attackerPlayer) {
message.text = "You lose " + damageString + " mana due to your own attack.";
} else {
message.text = "You lose " + damageString + " mana due to an attack by " + attacker->getNameDescription() + '.';
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
} else if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
if (list.empty()) {
map.getSpectators(list, targetPos, true, true);
}
addCreatureHealth(list, target);
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(list, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(list, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
std::string damageString = std::to_string(realDamage) + (realDamage != 1 ? " hitpoints" : " hitpoint");
std::string spectatorMessage = ucfirst(target->getNameDescription()) + " loses " + damageString;
if (attacker) {
spectatorMessage += " due to ";
if (attacker == target) {
spectatorMessage += (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack") : "its own attack");
} else {
spectatorMessage += "an attack by " + attacker->getNameDescription();
}
}
spectatorMessage += '.';
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ucfirst(target->getNameDescription()) + " loses " + damageString + " due to your attack.";
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = "You lose " + damageString + '.';
} else if (targetPlayer == attackerPlayer) {
message.text = "You lose " + damageString + " due to your own attack.";
} else {
message.text = "You lose " + damageString + " due to an attack by " + attacker->getNameDescription() + '.';
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
// TODO: Avoid copying spectatorMessage everytime we send to a spectator
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, int32_t manaChange, CombatOrigin origin)
{
if (manaChange > 0) {
if (attacker) {
const Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && target->getPlayer() && attackerPlayer->getSkullClient(target) == SKULL_NONE) {
return false;
}
}
if (origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, manaChange, origin);
}
return combatChangeMana(attacker, target, manaChange, ORIGIN_NONE);
}
}
target->changeMana(manaChange);
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
int32_t manaLoss = std::min<int32_t>(target->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, CONST_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
if (origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, manaChange, origin);
}
return combatChangeMana(attacker, target, manaChange, ORIGIN_NONE);
}
}
target->drainMana(attacker, manaLoss);
std::string damageString = std::to_string(manaLoss);
std::string spectatorMessage = ucfirst(target->getNameDescription()) + " loses " + damageString + " mana";
if (attacker) {
spectatorMessage += " due to ";
if (attacker == target) {
spectatorMessage += (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack") : "its own attack");
} else {
spectatorMessage += "an attack by " + attacker->getNameDescription();
}
}
spectatorMessage += '.';
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorVec list;
map.getSpectators(list, targetPos, false, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ucfirst(target->getNameDescription()) + " loses " + damageString + " mana due to your attack.";
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = "You lose " + damageString + " mana.";
} else if (targetPlayer == attackerPlayer) {
message.text = "You lose " + damageString + " mana due to your own attack.";
} else {
message.text = "You lose " + damageString + " mana due to an attack by " + attacker->getNameDescription() + '.';
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorVec list;
map.getSpectators(list, target->getPosition(), true, true);
addCreatureHealth(list, target);
}
void Game::addCreatureHealth(const SpectatorVec& list, const Creature* target)
{
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
SpectatorVec list;
map.getSpectators(list, pos, true, true);
addMagicEffect(list, pos, effect);
}
void Game::addMagicEffect(const SpectatorVec& list, const Position& pos, uint8_t effect)
{
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
SpectatorVec list;
map.getSpectators(list, fromPos, false, true);
map.getSpectators(list, toPos, false, true);
addDistanceEffect(list, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorVec& list, const Position& fromPos, const Position& toPos, uint8_t effect)
{
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, it.decayTo);
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RETURNVALUE_NOERROR) {
std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
auto it = decayItems[bucket].begin(), end = decayItems[bucket].end();
while (it != end) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t duration = item->getDuration();
int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration);
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
lightHour += lightHourDelta;
if (lightHour > 1440) {
lightHour -= 1440;
}
if (std::abs(lightHour - SUNRISE) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNRISE;
} else if (std::abs(lightHour - SUNSET) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNSET;
}
int32_t newLightLevel = lightLevel;
bool lightChange = false;
switch (lightState) {
case LIGHT_STATE_SUNRISE: {
newLightLevel += (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
case LIGHT_STATE_SUNSET: {
newLightLevel -= (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
default:
break;
}
if (newLightLevel <= LIGHT_LEVEL_NIGHT) {
lightLevel = LIGHT_LEVEL_NIGHT;
lightState = LIGHT_STATE_NIGHT;
} else if (newLightLevel >= LIGHT_LEVEL_DAY) {
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
} else {
lightLevel = newLightLevel;
}
if (lightChange) {
LightInfo lightInfo;
getWorldLightInfo(lightInfo);
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
}
void Game::getWorldLightInfo(LightInfo& lightInfo) const
{
lightInfo.level = lightLevel;
lightInfo.color = 0xD7;
}
void Game::addCommandTag(char tag)
{
for (char commandTag : commandTags) {
if (commandTag == tag) {
return;
}
}
commandTags.push_back(tag);
}
void Game::resetCommandTag()
{
commandTags.clear();
}
void Game::shutdown()
{
std::cout << "Shutting down..." << std::flush;
g_scheduler.shutdown();
g_databaseTasks.shutdown();
g_dispatcher.shutdown();
map.spawns.clear();
raids.clear();
cleanup();
if (serviceManager) {
serviceManager->stop();
}
ConnectionManager::getInstance().closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->decrementReferenceCounter();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->decrementReferenceCounter();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type) const
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updateCreatureSkull(const Creature* creature)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureSkull(creature);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorVec list;
map.getSpectators(list, player->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::updatePlayerHelpers(const Player& player)
{
uint32_t creatureId = player.getID();
uint16_t helpers = player.getHelpers();
SpectatorVec list;
map.getSpectators(list, player.getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers);
}
}
void Game::updateCreatureType(Creature* creature)
{
const Player* masterPlayer = nullptr;
uint32_t creatureId = creature->getID();
CreatureType_t creatureType = creature->getType();
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
masterPlayer = master->getPlayer();
if (masterPlayer) {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
//send to clients
SpectatorVec list;
map.getSpectators(list, creature->getPosition(), true, true);
if (creatureType == CREATURETYPE_SUMMON_OTHERS) {
for (Creature* spectator : list) {
Player* player = spectator->getPlayer();
if (masterPlayer == player) {
player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN);
} else {
player->sendCreatureType(creatureId, creatureType);
}
}
} else {
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureType(creatureId, creatureType);
}
}
}
void Game::updatePremium(Account& account)
{
bool save = false;
time_t timeNow = time(nullptr);
if (account.premiumDays != 0 && account.premiumDays != std::numeric_limits<uint16_t>::max()) {
if (account.lastDay == 0) {
account.lastDay = timeNow;
save = true;
} else {
uint32_t days = (timeNow - account.lastDay) / 86400;
if (days > 0) {
if (days >= account.premiumDays) {
account.premiumDays = 0;
account.lastDay = 0;
} else {
account.premiumDays -= days;
time_t remainder = (timeNow - account.lastDay) % 86400;
account.lastDay = timeNow - remainder;
}
save = true;
}
}
} else if (account.lastDay != 0) {
account.lastDay = 0;
save = true;
}
if (save && !IOLoginData::saveAccount(account)) {
std::cout << "> ERROR: Failed to save account: " << account.name << "!" << std::endl;
}
}
void Game::loadMotdNum()
{
Database* db = Database::getInstance();
DBResult_ptr result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = result->getNumber<uint32_t>("value");
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database* db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'";
db->executeQuery(query.str());
query.str(std::string());
query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'";
db->executeQuery(query.str());
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (const auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second->executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database* db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'";
db->executeQuery(query.str());
}
void Game::loadPlayersRecord()
{
Database* db = Database::getInstance();
DBResult_ptr result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = result->getNumber<uint32_t>("value");
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
uint64_t Game::getExperienceStage(uint32_t level)
{
if (!stagesEnabled) {
return g_config.getNumber(ConfigManager::RATE_EXPERIENCE);
}
if (useLastStageLevel && level >= lastStageLevel) {
return stages[lastStageLevel];
}
return stages[level];
}
bool Game::loadExperienceStages()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/XML/stages.xml");
if (!result) {
printXMLError("Error - Game::loadExperienceStages", "data/XML/stages.xml", result);
return false;
}
for (auto stageNode : doc.child("stages").children()) {
if (strcasecmp(stageNode.name(), "config") == 0) {
stagesEnabled = stageNode.attribute("enabled").as_bool();
} else {
uint32_t minLevel, maxLevel, multiplier;
pugi::xml_attribute minLevelAttribute = stageNode.attribute("minlevel");
if (minLevelAttribute) {
minLevel = pugi::cast<uint32_t>(minLevelAttribute.value());
} else {
minLevel = 1;
}
pugi::xml_attribute maxLevelAttribute = stageNode.attribute("maxlevel");
if (maxLevelAttribute) {
maxLevel = pugi::cast<uint32_t>(maxLevelAttribute.value());
} else {
maxLevel = 0;
lastStageLevel = minLevel;
useLastStageLevel = true;
}
pugi::xml_attribute multiplierAttribute = stageNode.attribute("multiplier");
if (multiplierAttribute) {
multiplier = pugi::cast<uint32_t>(multiplierAttribute.value());
} else {
multiplier = 1;
}
if (useLastStageLevel) {
stages[lastStageLevel] = multiplier;
} else {
for (uint32_t i = minLevel; i <= maxLevel; ++i) {
stages[i] = multiplier;
}
}
}
}
return true;
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
std::ostringstream ss;
ss << invitedPlayer->getName() << " is already in a party.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAccountType() == ACCOUNT_TYPE_NORMAL) {
return;
}
std::string fileName = "data/reports/" + player->getName() + " report.txt";
FILE* file = fopen(fileName.c_str(), "a");
if (!file) {
player->sendTextMessage(MESSAGE_EVENT_DEFAULT, "There was an error when processing your report, please contact a gamemaster.");
return;
}
const Position& playerPosition = player->getPosition();
if (category == BUG_CATEGORY_MAP) {
fprintf(file, "------------------------------\nName: %s [Map Position: %u, %u, %u] [Player Position: %u, %u, %u]\nComment: %s\n", player->getName().c_str(), position.x, position.y, position.z, playerPosition.x, playerPosition.y, playerPosition.z, message.c_str());
} else {
fprintf(file, "------------------------------\nName: %s [Player Position: %u, %u, %u]\nComment: %s\n", player->getName().c_str(), playerPosition.x, playerPosition.y, playerPosition.z, message.c_str());
}
fclose(file);
player->sendTextMessage(MESSAGE_EVENT_DEFAULT, "Your report has been sent to " + g_config.getString(ConfigManager::SERVER_NAME) + ".");
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) {
return;
}
uint64_t fee = (price / 100.) * amount;
if (fee < 20) {
fee = 20;
} else if (fee > 1000) {
fee = 1000;
}
if (type == MARKETACTION_SELL) {
if (fee > player->bankBalance) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance -= fee;
} else {
uint64_t totalPrice = static_cast<uint64_t>(price) * amount;
totalPrice += fee;
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
}
IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox)
{
std::forward_list<Item*> itemList;
uint16_t count = 0;
std::list<Container*> containers { depotChest, inbox };
do {
Container* container = containers.front();
containers.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containers.push_back(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId != wareId) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
itemList.push_front(item);
count += Item::countByType(item, -1);
if (count >= sufficientCount) {
return itemList;
}
}
} while (!containers.empty());
return std::forward_list<Item*>();
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hardcoded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == 1) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::removeGuild(uint32_t guildId)
{
guilds.erase(guildId);
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = map.getTile(pos.x, pos.y, pos.z);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->decrementReferenceCounter();
}
}
void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable)
{
if (stackable) {
for (Item* item : itemList) {
if (item->getItemCount() > amount) {
internalRemoveItem(item, amount);
break;
} else {
amount -= item->getItemCount();
internalRemoveItem(item);
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
}
BedItem* Game::getBedBySleeper(uint32_t guid) const
{
auto it = bedSleepersMap.find(guid);
if (it == bedSleepersMap.end()) {
return nullptr;
}
return it->second;
}
void Game::setBedSleeper(BedItem* bed, uint32_t guid)
{
bedSleepersMap[guid] = bed;
}
void Game::removeBedSleeper(uint32_t guid)
{
auto it = bedSleepersMap.find(guid);
if (it != bedSleepersMap.end()) {
bedSleepersMap.erase(it);
}
}
Item* Game::getUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it == uniqueItems.end()) {
return nullptr;
}
return it->second;
}
bool Game::addUniqueItem(uint16_t uniqueId, Item* item)
{
auto result = uniqueItems.emplace(uniqueId, item);
if (!result.second) {
std::cout << "Duplicate unique id: " << uniqueId << std::endl;
}
return result.second;
}
void Game::removeUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it != uniqueItems.end()) {
uniqueItems.erase(it);
}
}
| 1 | 11,647 | Remove this line please. | otland-forgottenserver | cpp |
@@ -144,6 +144,7 @@ public class ZMSImpl implements Authorizer, KeyStore, ZMSHandler {
protected int domainNameMaxLen;
protected AuthorizedServices serverAuthorizedServices = null;
protected SolutionTemplates serverSolutionTemplates = null;
+ protected Map<String, Integer> eligibleTemplatesForAutoUpdate = null;
protected Map<String, String> serverPublicKeyMap = null;
protected boolean readOnlyMode = false;
protected boolean validateUserRoleMembers = false; | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms;
import com.google.common.primitives.Bytes;
import com.yahoo.athenz.auth.*;
import com.yahoo.athenz.auth.impl.SimplePrincipal;
import com.yahoo.athenz.auth.token.PrincipalToken;
import com.yahoo.athenz.auth.util.Crypto;
import com.yahoo.athenz.common.metrics.Metric;
import com.yahoo.athenz.common.metrics.MetricFactory;
import com.yahoo.athenz.common.server.audit.AuditReferenceValidator;
import com.yahoo.athenz.common.server.audit.AuditReferenceValidatorFactory;
import com.yahoo.athenz.common.server.log.AuditLogger;
import com.yahoo.athenz.common.server.log.AuditLoggerFactory;
import com.yahoo.athenz.common.server.notification.Notification;
import com.yahoo.athenz.common.server.notification.NotificationManager;
import com.yahoo.athenz.common.server.rest.Http;
import com.yahoo.athenz.common.server.rest.Http.AuthorityList;
import com.yahoo.athenz.common.server.util.ConfigProperties;
import com.yahoo.athenz.common.server.util.ServletRequestUtil;
import com.yahoo.athenz.common.server.util.StringUtils;
import com.yahoo.athenz.common.utils.SignUtils;
import com.yahoo.athenz.zms.config.AllowedOperation;
import com.yahoo.athenz.zms.config.AuthorizedService;
import com.yahoo.athenz.zms.config.AuthorizedServices;
import com.yahoo.athenz.zms.config.SolutionTemplates;
import com.yahoo.athenz.zms.notification.*;
import com.yahoo.athenz.zms.store.AthenzDomain;
import com.yahoo.athenz.zms.store.ObjectStore;
import com.yahoo.athenz.zms.store.ObjectStoreFactory;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.rdl.JSON;
import com.yahoo.rdl.Schema;
import com.yahoo.rdl.Timestamp;
import com.yahoo.rdl.UUID;
import com.yahoo.rdl.Validator;
import com.yahoo.rdl.Validator.Result;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.EntityTag;
import javax.ws.rs.core.Response;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.PrivateKey;
import java.security.PublicKey;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.regex.Pattern;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.yahoo.athenz.common.ServerCommonConsts.USER_DOMAIN_PREFIX;
import static com.yahoo.athenz.common.server.notification.NotificationServiceConstants.*;
public class ZMSImpl implements Authorizer, KeyStore, ZMSHandler {
private static final Logger LOG = LoggerFactory.getLogger(ZMSImpl.class);
private static String ROOT_DIR;
private static final String ROLE_PREFIX = "role.";
private static final String POLICY_PREFIX = "policy.";
private static final String ADMIN_POLICY_NAME = "admin";
private static final String ADMIN_ROLE_NAME = "admin";
private static final String META_ATTR_ACCOUNT = "account";
private static final String META_ATTR_YPM_ID = "ypmid";
private static final String META_ATTR_ALL = "all";
private static final String SYS_AUTH = "sys.auth";
private static final String USER_TOKEN_DEFAULT_NAME = "_self_";
// data validation types
private static final String TYPE_DOMAIN_NAME = "DomainName";
private static final String TYPE_ENTITY_NAME = "EntityName";
private static final String TYPE_SIMPLE_NAME = "SimpleName";
private static final String TYPE_MEMBER_NAME = "MemberName";
private static final String TYPE_COMPOUND_NAME = "CompoundName";
private static final String TYPE_RESOURCE_NAME = "ResourceName";
private static final String TYPE_SERVICE_NAME = "ServiceName";
private static final String TYPE_ROLE = "Role";
private static final String TYPE_POLICY = "Policy";
private static final String TYPE_ASSERTION = "Assertion";
private static final String TYPE_SERVICE_IDENTITY = "ServiceIdentity";
private static final String TYPE_TOP_LEVEL_DOMAIN = "TopLevelDomain";
private static final String TYPE_SUB_DOMAIN = "SubDomain";
private static final String TYPE_USER_DOMAIN = "UserDomain";
private static final String TYPE_DOMAIN_META = "DomainMeta";
private static final String TYPE_DOMAIN_TEMPLATE = "DomainTemplate";
private static final String TYPE_TENANT_RESOURCE_GROUP_ROLES = "TenantResourceGroupRoles";
private static final String TYPE_PROVIDER_RESOURCE_GROUP_ROLES = "ProviderResourceGroupRoles";
private static final String TYPE_PUBLIC_KEY_ENTRY = "PublicKeyEntry";
private static final String TYPE_MEMBERSHIP = "Membership";
private static final String TYPE_QUOTA = "Quota";
private static final String TYPE_ROLE_SYSTEM_META = "RoleSystemMeta";
private static final String TYPE_ROLE_META = "RoleMeta";
private static final String TYPE_SERVICE_IDENTITY_SYSTEM_META = "ServiceIdentitySystemMeta";
private static final String SERVER_READ_ONLY_MESSAGE = "Server in Maintenance Read-Only mode. Please try your request later";
private static final byte[] PERIOD = { 46 };
public static Metric metric;
public static String serverHostName = null;
protected DBService dbService = null;
protected Schema schema = null;
protected ServerPrivateKey privateKey = null;
protected ServerPrivateKey privateECKey = null;
protected ServerPrivateKey privateRSAKey = null;
protected int userTokenTimeout = 3600;
protected boolean virtualDomainSupport = true;
protected boolean productIdSupport = false;
protected int virtualDomainLimit = 2;
protected long signedPolicyTimeout;
protected int domainNameMaxLen;
protected AuthorizedServices serverAuthorizedServices = null;
protected SolutionTemplates serverSolutionTemplates = null;
protected Map<String, String> serverPublicKeyMap = null;
protected boolean readOnlyMode = false;
protected boolean validateUserRoleMembers = false;
protected boolean validateServiceRoleMembers = false;
protected boolean useMasterCopyForSignedDomains = false;
protected Set<String> validateServiceMemberSkipDomains;
protected static Validator validator;
protected String userDomain;
protected String userDomainPrefix;
protected String homeDomain;
protected String homeDomainPrefix;
protected String userDomainAlias;
protected String userDomainAliasPrefix;
protected String serverRegion = null;
protected List<String> addlUserCheckDomainPrefixList = null;
protected Http.AuthorityList authorities = null;
protected List<String> providerEndpoints = null;
protected Set<String> reservedServiceNames = null;
protected PrivateKeyStore keyStore = null;
protected boolean secureRequestsOnly = true;
protected AuditLogger auditLogger = null;
protected Authority userAuthority = null;
protected Authority principalAuthority = null;
protected Set<String> authFreeUriSet = null;
protected List<Pattern> authFreeUriList = null;
protected Set<String> corsOriginList = null;
protected int httpPort;
protected int httpsPort;
protected int statusPort;
protected int serviceNameMinLength;
protected Status successServerStatus = null;
protected Set<String> reservedSystemDomains = null;
protected File healthCheckFile = null;
protected AuditReferenceValidator auditReferenceValidator = null;
protected NotificationManager notificationManager = null;
protected ObjectMapper jsonMapper;
// enum to represent our access response since in some cases we want to
// handle domain not founds differently instead of just returning failure
enum AccessStatus {
ALLOWED,
DENIED,
DENIED_INVALID_ROLE_TOKEN
}
enum AthenzObject {
ASSERTION {
void convertToLowerCase(Object obj) {
Assertion assertion = (Assertion) obj;
assertion.setAction(assertion.getAction().toLowerCase());
assertion.setResource(assertion.getResource().toLowerCase());
assertion.setRole(assertion.getRole().toLowerCase());
}
},
DEFAULT_ADMINS {
void convertToLowerCase(Object obj) {
DefaultAdmins defaultAdmins = (DefaultAdmins) obj;
LIST.convertToLowerCase(defaultAdmins.getAdmins());
}
},
DOMAIN_TEMPLATE {
void convertToLowerCase(Object obj) {
DomainTemplate template = (DomainTemplate) obj;
if (template != null) {
LIST.convertToLowerCase(template.getTemplateNames());
List<TemplateParam> params = template.getParams();
if (params != null) {
for (TemplateParam param : params) {
param.setName(param.getName().toLowerCase());
param.setValue(param.getValue().toLowerCase());
}
}
}
}
},
DOMAIN_TEMPLATE_LIST {
void convertToLowerCase(Object obj) {
DomainTemplateList templates = (DomainTemplateList) obj;
if (templates != null) {
LIST.convertToLowerCase(templates.getTemplateNames());
}
}
},
ENTITY {
void convertToLowerCase(Object obj) {
Entity entity = (Entity) obj;
entity.setName(entity.getName().toLowerCase());
}
},
LIST {
void convertToLowerCase(Object obj) {
@SuppressWarnings("unchecked")
List<String> list = (List<String>) obj;
if (list != null) {
ListIterator<String> iter = list.listIterator();
while (iter.hasNext()) {
iter.set(iter.next().toLowerCase());
}
}
}
},
MEMBERSHIP {
void convertToLowerCase(Object obj) {
Membership membership = (Membership) obj;
membership.setMemberName(membership.getMemberName().toLowerCase());
if (membership.getRoleName() != null) {
membership.setRoleName(membership.getRoleName().toLowerCase());
}
}
},
POLICY {
void convertToLowerCase(Object obj) {
Policy policy = (Policy) obj;
policy.setName(policy.getName().toLowerCase());
if (policy.getAssertions() != null) {
for (Assertion assertion : policy.getAssertions()) {
ASSERTION.convertToLowerCase(assertion);
}
}
}
},
PROVIDER_RESOURCE_GROUP_ROLES {
void convertToLowerCase(Object obj) {
ProviderResourceGroupRoles tenantRoles = (ProviderResourceGroupRoles) obj;
tenantRoles.setDomain(tenantRoles.getDomain().toLowerCase());
tenantRoles.setService(tenantRoles.getService().toLowerCase());
tenantRoles.setTenant(tenantRoles.getTenant().toLowerCase());
tenantRoles.setResourceGroup(tenantRoles.getResourceGroup().toLowerCase());
if (tenantRoles.getRoles() != null) {
for (TenantRoleAction roleAction : tenantRoles.getRoles()) {
TENANT_ROLE_ACTION.convertToLowerCase(roleAction);
}
}
}
},
PUBLIC_KEY_ENTRY {
void convertToLowerCase(Object obj) {
PublicKeyEntry keyEntry = (PublicKeyEntry) obj;
keyEntry.setId(keyEntry.getId().toLowerCase());
}
},
ROLE {
void convertToLowerCase(Object obj) {
Role role = (Role) obj;
role.setName(role.getName().toLowerCase());
if (role.getTrust() != null) {
role.setTrust(role.getTrust().toLowerCase());
}
LIST.convertToLowerCase(role.getMembers());
ROLE_MEMBER.convertToLowerCase(role.getRoleMembers());
}
},
ROLE_META {
void convertToLowerCase(Object obj) {
RoleMeta roleMeta = (RoleMeta) obj;
if (roleMeta.getNotifyRoles() != null) {
roleMeta.setNotifyRoles(roleMeta.getNotifyRoles().toLowerCase());
}
if (roleMeta.getSignAlgorithm() != null) {
roleMeta.setSignAlgorithm(roleMeta.getSignAlgorithm().toLowerCase());
}
}
},
ROLE_MEMBER {
void convertToLowerCase(Object obj) {
@SuppressWarnings("unchecked")
List<RoleMember> list = (List<RoleMember>) obj;
if (list != null) {
ListIterator<RoleMember> iter = list.listIterator();
while (iter.hasNext()) {
RoleMember roleMember = iter.next();
iter.set(roleMember.setMemberName(roleMember.getMemberName().toLowerCase()));
}
}
}
},
SERVICE_IDENTITY {
void convertToLowerCase(Object obj) {
ServiceIdentity service = (ServiceIdentity) obj;
service.setName(service.getName().toLowerCase());
LIST.convertToLowerCase(service.getHosts());
if (service.getPublicKeys() != null) {
for (PublicKeyEntry key : service.getPublicKeys()) {
PUBLIC_KEY_ENTRY.convertToLowerCase(key);
}
}
}
},
SUB_DOMAIN {
void convertToLowerCase(Object obj) {
SubDomain subdomain = (SubDomain) obj;
subdomain.setName(subdomain.getName().toLowerCase());
subdomain.setParent(subdomain.getParent().toLowerCase());
if (subdomain.getSignAlgorithm() != null) {
subdomain.setSignAlgorithm(subdomain.getSignAlgorithm().toLowerCase());
}
LIST.convertToLowerCase(subdomain.getAdminUsers());
DOMAIN_TEMPLATE_LIST.convertToLowerCase(subdomain.getTemplates());
}
},
TENANCY {
void convertToLowerCase(Object obj) {
Tenancy tenancy = (Tenancy) obj;
tenancy.setDomain(tenancy.getDomain().toLowerCase());
tenancy.setService(tenancy.getService().toLowerCase());
LIST.convertToLowerCase(tenancy.getResourceGroups());
}
},
TENANT_RESOURCE_GROUP_ROLES {
void convertToLowerCase(Object obj) {
TenantResourceGroupRoles tenantRoles = (TenantResourceGroupRoles) obj;
tenantRoles.setDomain(tenantRoles.getDomain().toLowerCase());
tenantRoles.setService(tenantRoles.getService().toLowerCase());
tenantRoles.setTenant(tenantRoles.getTenant().toLowerCase());
tenantRoles.setResourceGroup(tenantRoles.getResourceGroup().toLowerCase());
if (tenantRoles.getRoles() != null) {
for (TenantRoleAction roleAction : tenantRoles.getRoles()) {
TENANT_ROLE_ACTION.convertToLowerCase(roleAction);
}
}
}
},
TENANT_ROLE_ACTION {
void convertToLowerCase(Object obj) {
TenantRoleAction roleAction = (TenantRoleAction) obj;
roleAction.setAction(roleAction.getAction().toLowerCase());
roleAction.setRole(roleAction.getRole().toLowerCase());
}
},
TOP_LEVEL_DOMAIN {
void convertToLowerCase(Object obj) {
TopLevelDomain domain = (TopLevelDomain) obj;
domain.setName(domain.getName().toLowerCase());
LIST.convertToLowerCase(domain.getAdminUsers());
DOMAIN_TEMPLATE_LIST.convertToLowerCase(domain.getTemplates());
if (domain.getOrg() != null) {
domain.setOrg(domain.getOrg().toLowerCase());
}
if (domain.getSignAlgorithm() != null) {
domain.setSignAlgorithm(domain.getSignAlgorithm().toLowerCase());
}
}
},
QUOTA {
void convertToLowerCase(Object obj) {
Quota quota = (Quota) obj;
quota.setName(quota.getName().toLowerCase());
}
},
USER_DOMAIN {
void convertToLowerCase(Object obj) {
UserDomain userDomain = (UserDomain) obj;
userDomain.setName(userDomain.getName().toLowerCase());
if (userDomain.getSignAlgorithm() != null) {
userDomain.setSignAlgorithm(userDomain.getSignAlgorithm().toLowerCase());
}
DOMAIN_TEMPLATE_LIST.convertToLowerCase(userDomain.getTemplates());
}
},
DOMAIN_META {
void convertToLowerCase(Object obj) {
DomainMeta domainMeta = (DomainMeta) obj;
if (domainMeta.getCertDnsDomain() != null) {
domainMeta.setCertDnsDomain(domainMeta.getCertDnsDomain().toLowerCase());
}
if (domainMeta.getOrg() != null) {
domainMeta.setOrg(domainMeta.getOrg().toLowerCase());
}
if (domainMeta.getSignAlgorithm() != null) {
domainMeta.setSignAlgorithm(domainMeta.getSignAlgorithm().toLowerCase());
}
}
};
abstract void convertToLowerCase(Object obj);
}
public ZMSImpl() {
// before doing anything else we need to load our
// system properties from our config file
loadSystemProperties();
// let's first get our server hostname
ZMSImpl.serverHostName = getServerHostName();
// create our json mapper
jsonMapper = new ObjectMapper();
// before we do anything we need to load our configuration
// settings
loadConfigurationSettings();
// load our schema validator - we need this before we initialize
// our store, if necessary
loadSchemaValidator();
// let's load our audit logger
loadAuditLogger();
// load any audit reference validator
loadAuditRefValidator();
// load any configured authorities to authenticate principals
loadAuthorities();
// we need a private key to sign any tokens and documents
loadPrivateKeyStore();
// check if we need to load any metric support for stats
loadMetricObject();
// load the Solution templates
loadSolutionTemplates();
// our object store - either mysql or file based
loadObjectStore();
// initialize our store with default domains
// this should only happen when running ZMS in local/debug mode
// otherwise the store should have been initialized by now
initObjectStore();
// load the list of authorized services
loadAuthorizedServices();
// retrieve our public keys
loadServerPublicKeys();
// make sure to set the keystore for any instance that requires it
setAuthorityKeyStore();
// Initialize Notification Manager
setNotificationManager();
}
private void setNotificationManager() {
ZMSNotificationTaskFactory zmsNotificationTaskFactory = new ZMSNotificationTaskFactory(dbService, userDomainPrefix);
notificationManager = new NotificationManager(zmsNotificationTaskFactory.getNotificationTasks());
}
void loadSystemProperties() {
String propFile = System.getProperty(ZMSConsts.ZMS_PROP_FILE_NAME,
getRootDir() + "/conf/zms_server/zms.properties");
ConfigProperties.loadProperties(propFile);
}
void setAuthorityKeyStore() {
for (Authority authority : authorities.getAuthorities()) {
if (AuthorityKeyStore.class.isInstance(authority)) {
((AuthorityKeyStore) authority).setKeyStore(this);
}
}
}
void loadSchemaValidator() {
schema = ZMSSchema.instance();
validator = new Validator(schema);
}
void loadConfigurationSettings() {
// make sure all requests run in secure mode
secureRequestsOnly = Boolean.parseBoolean(System.getProperty(ZMSConsts.ZMS_PROP_SECURE_REQUESTS_ONLY, "true"));
// retrieve the regular and status ports
httpPort = ConfigProperties.getPortNumber(ZMSConsts.ZMS_PROP_HTTP_PORT,
ZMSConsts.ZMS_HTTP_PORT_DEFAULT);
httpsPort = ConfigProperties.getPortNumber(ZMSConsts.ZMS_PROP_HTTPS_PORT,
ZMSConsts.ZMS_HTTPS_PORT_DEFAULT);
statusPort = ConfigProperties.getPortNumber(ZMSConsts.ZMS_PROP_STATUS_PORT, 0);
successServerStatus = new Status().setCode(ResourceException.OK).setMessage("OK");
// retrieve the user domain we're supposed to use
userDomain = System.getProperty(ZMSConsts.ZMS_PROP_USER_DOMAIN, ZMSConsts.USER_DOMAIN);
userDomainPrefix = userDomain + ".";
userDomainAlias = System.getProperty(ZMSConsts.ZMS_PROP_USER_DOMAIN_ALIAS);
if (userDomainAlias != null) {
userDomainAliasPrefix = userDomainAlias + ".";
}
final String addlUserCheckDomains = System.getProperty(ZMSConsts.ZMS_PROP_ADDL_USER_CHECK_DOMAINS);
if (addlUserCheckDomains != null && !addlUserCheckDomains.isEmpty()) {
String[] checkDomains = addlUserCheckDomains.split(",");
addlUserCheckDomainPrefixList = new ArrayList<>();
for (String checkDomain : checkDomains) {
addlUserCheckDomainPrefixList.add(checkDomain + ".");
}
}
homeDomain = System.getProperty(ZMSConsts.ZMS_PROP_HOME_DOMAIN, userDomain);
homeDomainPrefix = homeDomain + ".";
// default token timeout for issued tokens
userTokenTimeout = Integer.parseInt(
System.getProperty(ZMSConsts.ZMS_PROP_TIMEOUT, "3600"));
// check if we need to run in maintenance read only mode
readOnlyMode = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_READ_ONLY_MODE, "false"));
// check to see if we need to validate all user and service members
// when adding them to roles
validateUserRoleMembers = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_VALIDATE_USER_MEMBERS, "false"));
validateServiceRoleMembers = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_VALIDATE_SERVICE_MEMBERS, "false"));
// there are going to be domains like our ci/cd dynamic project domain
// where we can't verify the service role members so for those we're
// going to skip specific domains from validation checks
final String skipDomains = System.getProperty(
ZMSConsts.ZMS_PROP_VALIDATE_SERVICE_MEMBERS_SKIP_DOMAINS, "");
validateServiceMemberSkipDomains = new HashSet<>(Arrays.asList(skipDomains.split(",")));
// check to see if we need to support product ids as required
// for top level domains
productIdSupport = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_PRODUCT_ID_SUPPORT, "false"));
// get the list of valid provider endpoints
final String endPoints = System.getProperty(ZMSConsts.ZMS_PROP_PROVIDER_ENDPOINTS);
if (endPoints != null) {
providerEndpoints = Arrays.asList(endPoints.split(","));
}
// retrieve virtual domain support and limit. If we're given an invalid negative
// value for limit, we'll default back to our configured value of 5
virtualDomainSupport = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_VIRTUAL_DOMAIN, "true"));
virtualDomainLimit = Integer.parseInt(
System.getProperty(ZMSConsts.ZMS_PROP_VIRTUAL_DOMAIN_LIMIT, "5"));
if (virtualDomainLimit < 0) {
virtualDomainLimit = 5;
}
// signedPolicyTimeout is in milliseconds but the config setting should be in seconds
// to be consistent with other configuration properties (Default 7 days)
signedPolicyTimeout = 1000 * Long.parseLong(
System.getProperty(ZMSConsts.ZMS_PROP_SIGNED_POLICY_TIMEOUT, "604800"));
if (signedPolicyTimeout < 0) {
signedPolicyTimeout = 1000 * 604800;
}
useMasterCopyForSignedDomains = Boolean.parseBoolean(
System.getProperty(ZMSConsts.ZMS_PROP_MASTER_COPY_FOR_SIGNED_DOMAINS, "false"));
// get the maximum length allowed for a top level domain name
domainNameMaxLen = Integer.parseInt(System.getProperty(
ZMSConsts.ZMS_PROP_DOMAIN_NAME_MAX_SIZE, ZMSConsts.ZMS_DOMAIN_NAME_MAX_SIZE_DEFAULT));
if (domainNameMaxLen < 10) { // 10 is arbitrary
int domNameMaxDefault = Integer.parseInt(ZMSConsts.ZMS_DOMAIN_NAME_MAX_SIZE_DEFAULT);
LOG.warn("init: Warning: maximum domain name length specified is too small: " +
domainNameMaxLen + " : reverting to default: " + domNameMaxDefault);
domainNameMaxLen = domNameMaxDefault;
}
LOG.info("init: using maximum domain name length: " + domainNameMaxLen);
// get the list of uris that we want to allow an-authenticated access
final String uriList = System.getProperty(ZMSConsts.ZMS_PROP_NOAUTH_URI_LIST);
if (uriList != null) {
authFreeUriSet = new HashSet<>();
authFreeUriList = new ArrayList<>();
String[] list = uriList.split(",");
for (String uri : list) {
if (uri.indexOf('+') != -1) {
authFreeUriList.add(Pattern.compile(uri));
} else {
authFreeUriSet.add(uri);
}
}
}
// get the list of white listed origin values for cors requests
final String originList = System.getProperty(ZMSConsts.ZMS_PROP_CORS_ORIGIN_LIST);
if (originList != null) {
corsOriginList = new HashSet<>(Arrays.asList(originList.split(",")));
}
// get the list of valid provider endpoints
final String serviceNames = System.getProperty(ZMSConsts.ZMS_PROP_RESERVED_SERVICE_NAMES,
ZMSConsts.ZMS_RESERVED_SERVICE_NAMES_DEFAULT);
reservedServiceNames = new HashSet<>(Arrays.asList(serviceNames.split(",")));
// min length for service names
serviceNameMinLength = Integer.parseInt(
System.getProperty(ZMSConsts.ZMS_PROP_SERVICE_NAME_MIN_LENGTH, "3"));
// setup our reserved system domain names
reservedSystemDomains = new HashSet<>();
reservedSystemDomains.add("sys");
reservedSystemDomains.add("sys.auth");
reservedSystemDomains.add("sys.auth.audit");
reservedSystemDomains.add("sys.auth.audit.org");
reservedSystemDomains.add("sys.auth.audit.domain");
reservedSystemDomains.add(userDomain);
reservedSystemDomains.add(homeDomain);
// setup our health check file
final String healthCheckPath = System.getProperty(ZMSConsts.ZMS_PROP_HEALTH_CHECK_PATH);
if (healthCheckPath != null && !healthCheckPath.isEmpty()) {
healthCheckFile = new File(healthCheckPath);
}
// get server region
serverRegion = System.getProperty(ZMSConsts.ZMS_PROP_SERVER_REGION);
}
void loadObjectStore() {
String objFactoryClass = System.getProperty(ZMSConsts.ZMS_PROP_OBJECT_STORE_FACTORY_CLASS,
ZMSConsts.ZMS_OBJECT_STORE_FACTORY_CLASS);
ObjectStoreFactory objFactory;
try {
objFactory = (ObjectStoreFactory) Class.forName(objFactoryClass).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid ObjectStoreFactory class: " + objFactoryClass
+ " error: " + e.getMessage());
throw new IllegalArgumentException("Invalid object store");
}
ZMSConfig zmsConfig = new ZMSConfig();
zmsConfig.setUserDomain(userDomain);
zmsConfig.setAddlUserCheckDomainPrefixList(addlUserCheckDomainPrefixList);
zmsConfig.setUserDomainPrefix(userDomainPrefix);
zmsConfig.setServerHostName(serverHostName);
zmsConfig.setServerSolutionTemplates(serverSolutionTemplates);
zmsConfig.setUserAuthority(userAuthority);
ObjectStore store = objFactory.create(keyStore);
dbService = new DBService(store, auditLogger, zmsConfig, auditReferenceValidator);
}
void loadMetricObject() {
String metricFactoryClass = System.getProperty(ZMSConsts.ZMS_PROP_METRIC_FACTORY_CLASS,
ZMSConsts.ZMS_METRIC_FACTORY_CLASS);
MetricFactory metricFactory;
try {
metricFactory = (MetricFactory) Class.forName(metricFactoryClass).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid MetricFactory class: " + metricFactoryClass
+ " error: " + e.getMessage());
throw new IllegalArgumentException("Invalid metric class");
}
// create our metric and increment our startup count
ZMSImpl.metric = metricFactory.create();
metric.increment("zms_sa_startup");
}
void loadPrivateKeyStore() {
String pkeyFactoryClass = System.getProperty(ZMSConsts.ZMS_PROP_PRIVATE_KEY_STORE_FACTORY_CLASS,
ZMSConsts.ZMS_PRIVATE_KEY_STORE_FACTORY_CLASS);
PrivateKeyStoreFactory pkeyFactory;
try {
pkeyFactory = (PrivateKeyStoreFactory) Class.forName(pkeyFactoryClass).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid PrivateKeyStoreFactory class: " + pkeyFactoryClass
+ " error: " + e.getMessage());
throw new IllegalArgumentException("Invalid private key store");
}
// extract the private key and public keys for our service
keyStore = pkeyFactory.create();
privateECKey = keyStore.getPrivateKey(ZMSConsts.ZMS_SERVICE, serverHostName,
serverRegion, ZMSConsts.EC);
privateRSAKey = keyStore.getPrivateKey(ZMSConsts.ZMS_SERVICE, serverHostName,
serverRegion, ZMSConsts.RSA);
// if we don't have ec and rsa specific keys specified then we're going to fall
// back and use the old private key api and use that for our private key
// if both ec and rsa keys are provided, we use the ec key as preferred
// when signing policy files
if (privateECKey == null && privateRSAKey == null) {
StringBuilder privKeyId = new StringBuilder(256);
PrivateKey pkey = keyStore.getPrivateKey(ZMSConsts.ZMS_SERVICE, serverHostName, privKeyId);
privateKey = new ServerPrivateKey(pkey, privKeyId.toString());
} else if (privateECKey != null) {
privateKey = privateECKey;
} else {
privateKey = privateRSAKey;
}
}
void loadAuthorities() {
// get our authorities
final String authListConfig = System.getProperty(ZMSConsts.ZMS_PROP_AUTHORITY_CLASSES,
ZMSConsts.ZMS_PRINCIPAL_AUTHORITY_CLASS);
final String principalAuthorityClass = System.getProperty(ZMSConsts.ZMS_PROP_PRINCIPAL_AUTHORITY_CLASS);
final String userAuthorityClass = System.getProperty(ZMSConsts.ZMS_PROP_USER_AUTHORITY_CLASS);
authorities = new AuthorityList();
String[] authorityList = authListConfig.split(",");
for (String authorityClass : authorityList) {
Authority authority = getAuthority(authorityClass);
if (authority == null) {
throw new IllegalArgumentException("Invalid authority");
}
if (authorityClass.equals(principalAuthorityClass)) {
principalAuthority = authority;
}
if (authorityClass.equals(userAuthorityClass)) {
userAuthority = authority;
}
authority.initialize();
authorities.add(authority);
}
}
void loadAuditLogger() {
String auditFactoryClass = System.getProperty(ZMSConsts.ZMS_PROP_AUDIT_LOGGER_FACTORY_CLASS,
ZMSConsts.ZMS_AUDIT_LOGGER_FACTORY_CLASS);
AuditLoggerFactory auditLogFactory;
try {
auditLogFactory = (AuditLoggerFactory) Class.forName(auditFactoryClass).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid AuditLoggerFactory class: " + auditFactoryClass
+ " error: " + e.getMessage());
throw new IllegalArgumentException("Invalid audit logger class");
}
// create our audit logger
auditLogger = auditLogFactory.create();
}
void loadAuditRefValidator() {
final String auditRefValidatorClass = System.getProperty(ZMSConsts.ZMS_PROP_AUDIT_REF_VALIDATOR_FACTORY_CLASS);
AuditReferenceValidatorFactory auditReferenceValidatorFactory;
if (auditRefValidatorClass != null && !auditRefValidatorClass.isEmpty()) {
try {
auditReferenceValidatorFactory = (AuditReferenceValidatorFactory) Class.forName(auditRefValidatorClass).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid AuditReferenceValidatorFactory class: " + auditRefValidatorClass
+ " error: " + e.getMessage());
throw new IllegalArgumentException("Invalid audit reference factory class");
}
// create our audit reference validator
auditReferenceValidator = auditReferenceValidatorFactory.create();
}
}
void loadServerPublicKeys() {
// initialize our public key map
serverPublicKeyMap = new ConcurrentHashMap<>();
// retrieve our zms service identity object
ServiceIdentity identity = dbService.getServiceIdentity(SYS_AUTH, ZMSConsts.ZMS_SERVICE, false);
if (identity != null) {
// process all the public keys and add them to the map
List<PublicKeyEntry> publicKeyList = identity.getPublicKeys();
if (publicKeyList != null) {
for (PublicKeyEntry entry : publicKeyList) {
serverPublicKeyMap.put(entry.getId(), entry.getKey());
}
}
}
// this should never happen but just in case we'll just
// use the public key we retrieved ourselves to the map
if (serverPublicKeyMap.isEmpty() && privateKey != null) {
final String publicKey = Crypto.convertToPEMFormat(Crypto.extractPublicKey(privateKey.getKey()));
serverPublicKeyMap.put(privateKey.getId(), Crypto.ybase64EncodeString(publicKey));
}
}
void loadSolutionTemplates() {
// get the configured path for the list of service templates
String solutionTemplatesFname = System.getProperty(ZMSConsts.ZMS_PROP_SOLUTION_TEMPLATE_FNAME,
getRootDir() + "/conf/zms_server/solution_templates.json");
Path path = Paths.get(solutionTemplatesFname);
try {
serverSolutionTemplates = JSON.fromBytes(Files.readAllBytes(path), SolutionTemplates.class);
} catch (IOException ex) {
LOG.error("Unable to parse service templates file {}: {}",
solutionTemplatesFname, ex.getMessage());
}
if (serverSolutionTemplates == null) {
LOG.error("Generating empty solution template list...");
serverSolutionTemplates = new SolutionTemplates();
serverSolutionTemplates.setTemplates(new HashMap<>());
}
}
void loadAuthorizedServices() {
// get the configured path for the list of authorized services and what operations
// those services are allowed to process
String authzServiceFname = System.getProperty(ZMSConsts.ZMS_PROP_AUTHZ_SERVICE_FNAME,
getRootDir() + "/conf/zms_server/authorized_services.json");
Path path = Paths.get(authzServiceFname);
try {
serverAuthorizedServices = JSON.fromBytes(Files.readAllBytes(path), AuthorizedServices.class);
} catch (IOException ex) {
LOG.error("Unable to parse authorized service file {}: {}",
authzServiceFname, ex.getMessage());
}
if (serverAuthorizedServices == null) {
LOG.error("Generating empty authorized service list...");
serverAuthorizedServices = new AuthorizedServices();
serverAuthorizedServices.setTemplates(new HashMap<>());
}
}
void initObjectStore() {
final String caller = "initstore";
List<String> domains = dbService.listDomains(null, 0);
if (domains.size() > 0 && domains.contains(SYS_AUTH)) {
return;
}
String adminUserList = System.getProperty(ZMSConsts.ZMS_PROP_DOMAIN_ADMIN);
if (adminUserList == null) {
throw ZMSUtils.internalServerError("init: No ZMS admin user specified", caller);
}
String[] users = adminUserList.split(",");
ArrayList<String> adminUsers = new ArrayList<>();
for (String user : users) {
final String adminUser = user.trim();
if (!adminUser.startsWith(userDomainPrefix)) {
throw ZMSUtils.internalServerError("init: Bad domain user name(" + adminUser +
"), must begin with (" + userDomainPrefix + ")", caller);
}
adminUsers.add(adminUser);
}
// create system required top level domains
Domain domain = new Domain().setName(userDomain).setDescription("The reserved domain for user authentication")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createTopLevelDomain(null, domain, adminUsers, null, "System Setup");
if (!ZMSConsts.USER_DOMAIN.equals(userDomain)) {
domain = new Domain().setName(ZMSConsts.USER_DOMAIN).setDescription("The reserved domain for user authentication")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createTopLevelDomain(null, domain, adminUsers, null, "System Setup");
}
if (!homeDomain.equals(userDomain)) {
domain = new Domain().setName(homeDomain).setDescription("The reserved domain for personal user domains")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createTopLevelDomain(null, domain, adminUsers, null, "System Setup");
}
domain = new Domain().setName("sys").setDescription("The reserved domain for system related information")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createTopLevelDomain(null, domain, adminUsers, null, "System Setup");
// now create required subdomains in sys top level domain
domain = new Domain().setName("sys.auth").setDescription("The Athenz domain")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createSubDomain(null, domain, adminUsers, null, "System Setup", caller);
domain = new Domain().setName("sys.auth.audit").setDescription("The Athenz audit domain")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createSubDomain(null, domain, adminUsers, null, "System Setup", caller);
domain = new Domain().setName("sys.auth.audit.org").setDescription("The Athenz audit domain based on org name")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createSubDomain(null, domain, adminUsers, null, "System Setup", caller);
domain = new Domain().setName("sys.auth.audit.domain").setDescription("The Athenz audit domain based on domain name")
.setId(UUID.fromCurrentTime()).setModified(Timestamp.fromCurrentTime());
createSubDomain(null, domain, adminUsers, null, "System Setup", caller);
if (privateKey != null) {
List<PublicKeyEntry> pubKeys = new ArrayList<>();
final String publicKey = Crypto.convertToPEMFormat(Crypto.extractPublicKey(privateKey.getKey()));
pubKeys.add(new PublicKeyEntry().setId(privateKey.getId()).setKey(Crypto.ybase64EncodeString(publicKey)));
ServiceIdentity id = new ServiceIdentity().setName("sys.auth.zms").setPublicKeys(pubKeys);
dbService.executePutServiceIdentity(null, SYS_AUTH, ZMSConsts.ZMS_SERVICE, id, null, caller);
} else {
if (LOG.isWarnEnabled()) {
LOG.warn("init: Warning: no public key, cannot register sys.auth.zms identity");
}
}
}
/**
* @return the ZMS Schema object, describing its API and types.
*/
public Schema schema() {
return schema;
}
public DomainList getDomainList(ResourceContext ctx, Integer limit, String skip, String prefix,
Integer depth, String account, Integer productId, String roleMember, String roleName,
String modifiedSince) {
final String caller = "getdomainlist";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getdomainlist_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainList: limit: " + limit + " skip: " + skip
+ " prefix: " + prefix + " depth: " + depth + " modifiedSince: " + modifiedSince);
}
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
if (skip != null) {
skip = skip.toLowerCase();
}
if (prefix != null) {
prefix = prefix.toLowerCase();
}
if (roleMember != null) {
roleMember = roleMember.toLowerCase();
validate(roleMember, TYPE_ENTITY_NAME, caller);
}
if (roleName != null) {
roleName = roleName.toLowerCase();
validate(roleName, TYPE_ENTITY_NAME, caller);
}
if (limit != null && limit <= 0) {
throw ZMSUtils.requestError("getDomainList: limit must be positive: " + limit, caller);
}
long modTime = 0;
if (modifiedSince != null && !modifiedSince.isEmpty()) {
// we only support RFC1123 format for if-modified-since format
SimpleDateFormat dateFmt = new SimpleDateFormat(ZMSConsts.HTTP_RFC1123_DATE_FORMAT);
dateFmt.setTimeZone(TimeZone.getTimeZone(ZMSConsts.HTTP_DATE_GMT_ZONE));
try {
Date date = dateFmt.parse(modifiedSince);
modTime = date.getTime();
} catch (ParseException ex) {
throw ZMSUtils.requestError("getDomainList: If-Modified-Since header value must be valid RFC1123 date"
+ ex.getMessage(), caller);
}
}
// if we have account specified then we're going to ignore all
// other fields since there should only be one domain that
// matches the specified account. Otherwise, we're going to do
// the same thing for product id since there should also be one
// domain with that id. If neither one is present, then we'll
// do our regular domain list
DomainList dlist;
if (account != null && !account.isEmpty()) {
dlist = dbService.lookupDomainByAccount(account);
} else if (productId != null && productId != 0) {
dlist = dbService.lookupDomainByProductId(productId);
} else if (roleMember != null || roleName != null) {
dlist = dbService.lookupDomainByRole(normalizeDomainAliasUser(roleMember), roleName);
} else {
dlist = listDomains(limit, skip, prefix, depth, modTime);
}
metric.stopTiming(timerMetric, null, principalDomain);
return dlist;
}
public Domain getDomain(ResourceContext ctx, String domainName) {
final String caller = "getdomain";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getdomain_timing", domainName, principalDomain);
Domain domain = dbService.getDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("getDomain: Domain not found: " + domainName, caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return domain;
}
public Domain postTopLevelDomain(ResourceContext ctx, String auditRef, TopLevelDomain detail) {
final String caller = "posttopleveldomain";
metric.increment(ZMSConsts.HTTP_POST);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(detail, TYPE_TOP_LEVEL_DOMAIN, caller);
String domainName = detail.getName();
validate(domainName, TYPE_DOMAIN_NAME, caller);
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("posttopleveldomain_timing", domainName, principalDomain);
if (domainName.indexOf('_') != -1 && !isSysAdminUser(((RsrcCtxWrapper) ctx).principal())) {
throw ZMSUtils.requestError("Domain name cannot contain underscores", caller);
}
// verify length of domain name
if (domainName.length() > domainNameMaxLen) {
throw ZMSUtils.requestError("Invalid Domain name: " + domainName
+ " : name length cannot exceed: " + domainNameMaxLen, caller);
}
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
AthenzObject.TOP_LEVEL_DOMAIN.convertToLowerCase(detail);
List<String> solutionTemplates = null;
DomainTemplateList templates = detail.getTemplates();
if (templates != null) {
solutionTemplates = templates.getTemplateNames();
validateSolutionTemplates(solutionTemplates, caller);
}
// check to see if we need to validate our product id for the top
// level domains. The server code assumes that product id with
// 0 indicates no enforcement
int productId = 0;
if (productIdSupport) {
if (detail.getYpmId() != null) {
if ((productId = detail.getYpmId()) <= 0) {
throw ZMSUtils.requestError("Product Id must be a positive integer", caller);
}
} else {
throw ZMSUtils.requestError("Product Id is required when creating top level domain", caller);
}
}
Domain topLevelDomain = new Domain()
.setName(domainName)
.setAuditEnabled(detail.getAuditEnabled())
.setDescription(detail.getDescription())
.setOrg(detail.getOrg())
.setId(UUID.fromCurrentTime())
.setAccount(detail.getAccount())
.setYpmId(productId)
.setModified(Timestamp.fromCurrentTime())
.setApplicationId(detail.getApplicationId())
.setMemberExpiryDays(detail.getMemberExpiryDays())
.setServiceExpiryDays(detail.getServiceExpiryDays())
.setTokenExpiryMins(detail.getTokenExpiryMins())
.setServiceCertExpiryMins(detail.getServiceCertExpiryMins())
.setRoleCertExpiryMins(detail.getRoleCertExpiryMins())
.setSignAlgorithm(detail.getSignAlgorithm());
List<String> adminUsers = normalizedAdminUsers(detail.getAdminUsers());
Domain domain = createTopLevelDomain(ctx, topLevelDomain, adminUsers, solutionTemplates, auditRef);
metric.stopTiming(timerMetric, domainName, principalDomain);
return domain;
}
public void deleteTopLevelDomain(ResourceContext ctx, String domainName, String auditRef) {
final String caller = "deletetopleveldomain";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletetopleveldomain_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
deleteDomain(ctx, auditRef, domainName, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void deleteDomain(ResourceContext ctx, String auditRef, String domainName, String caller) {
// make sure we're not deleting any of the reserved system domain
if (reservedSystemDomains.contains(domainName)) {
throw ZMSUtils.requestError("Cannot delete reserved system domain", caller);
}
DomainList subDomainList = listDomains(null, null, domainName + ".", null, 0);
if (subDomainList.getNames().size() > 0) {
throw ZMSUtils.requestError(caller + ": Cannot delete domain " +
domainName + ": " + subDomainList.getNames().size() + " subdomains of it exist", caller);
}
dbService.executeDeleteDomain(ctx, domainName, auditRef, caller);
}
boolean isVirtualDomain(String domain) {
// all virtual domains start with our user domain
return domain.startsWith(homeDomainPrefix);
}
boolean hasExceededVirtualSubDomainLimit(String domain) {
// we need to find our username which is our second
// component in the domain name - e.g. user.joe[.subdomain]
// when counting we need to make to include the trailing .
// since we're counting subdomains and we need to make sure
// not to match other users who have the same prefix
String userDomainCheck;
int idx = domain.indexOf('.', homeDomainPrefix.length());
if (idx == -1) {
userDomainCheck = domain + ".";
} else {
userDomainCheck = domain.substring(0, idx + 1);
}
// retrieve the number of domains with this prefix
DomainList dlist = listDomains(null, null, userDomainCheck, null, 0);
if (dlist.getNames().size() < virtualDomainLimit) {
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("hasExceededVirtualSubDomainLimit: subdomains with prefix " + userDomainCheck
+ ": " + dlist.getNames().size() + " while limit is: " + virtualDomainLimit);
}
return true;
}
public Domain postUserDomain(ResourceContext ctx, String name, String auditRef, UserDomain detail) {
final String caller = "postuserdomain";
metric.increment(ZMSConsts.HTTP_POST);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(detail, TYPE_USER_DOMAIN, caller);
validate(name, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
name = name.toLowerCase();
AthenzObject.USER_DOMAIN.convertToLowerCase(detail);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, name, principalDomain);
metric.increment(caller, name, principalDomain);
Object timerMetric = metric.startTiming("postuserdomain_timing", name, principalDomain);
if (detail.getName().indexOf('_') != -1 && !isSysAdminUser(((RsrcCtxWrapper) ctx).principal())) {
throw ZMSUtils.requestError("Domain name cannot contain underscores", caller);
}
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
if (!name.equals(detail.getName())) {
throw ZMSUtils.forbiddenError("postUserDomain: Request and detail domain names do not match", caller);
}
// we're dealing with user's top level domain so the parent is going
// to be the home domain and the admin of the domain is the user
List<String> adminUsers = new ArrayList<>();
adminUsers.add(userDomainPrefix + principal.getName());
List<String> solutionTemplates = null;
DomainTemplateList templates = detail.getTemplates();
if (templates != null) {
solutionTemplates = templates.getTemplateNames();
validateSolutionTemplates(solutionTemplates, caller);
}
Domain subDomain = new Domain()
.setName(homeDomain + "." + getUserDomainName(detail.getName()))
.setAuditEnabled(detail.getAuditEnabled())
.setDescription(detail.getDescription())
.setOrg(detail.getOrg())
.setId(UUID.fromCurrentTime())
.setAccount(detail.getAccount())
.setModified(Timestamp.fromCurrentTime())
.setApplicationId(detail.getApplicationId())
.setMemberExpiryDays(detail.getMemberExpiryDays())
.setServiceExpiryDays(detail.getServiceExpiryDays())
.setTokenExpiryMins(detail.getTokenExpiryMins())
.setServiceCertExpiryMins(detail.getServiceCertExpiryMins())
.setRoleCertExpiryMins(detail.getRoleCertExpiryMins())
.setSignAlgorithm(detail.getSignAlgorithm());
Domain domain = createSubDomain(ctx, subDomain, adminUsers, solutionTemplates, auditRef, caller);
metric.stopTiming(timerMetric, name, principalDomain);
return domain;
}
public Domain postSubDomain(ResourceContext ctx, String parent, String auditRef, SubDomain detail) {
final String caller = "postsubdomain";
metric.increment(ZMSConsts.HTTP_POST);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(detail, TYPE_SUB_DOMAIN, caller);
validate(parent, TYPE_DOMAIN_NAME, caller);
validate(detail.getName(), TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
parent = parent.toLowerCase();
AthenzObject.SUB_DOMAIN.convertToLowerCase(detail);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, parent, principalDomain);
metric.increment(caller, parent, principalDomain);
Object timerMetric = metric.startTiming("postsubdomain_timing", parent, principalDomain);
if (detail.getName().indexOf('_') != -1 && !isSysAdminUser(((RsrcCtxWrapper) ctx).principal())) {
throw ZMSUtils.requestError("Domain name cannot contain underscores", caller);
}
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (!parent.equals(detail.getParent())) {
throw ZMSUtils.forbiddenError("postSubDomain: Request and detail parent domains do not match", caller);
}
// if we're dealing with virtual/home domains (in the user's own namespace)
// and we don't have unlimited support for virtual domains then we need to
// make sure we don't exceed our configured number of virtual subdomains
// allowed per user
if (virtualDomainLimit != 0 && isVirtualDomain(parent) && hasExceededVirtualSubDomainLimit(parent)) {
throw ZMSUtils.forbiddenError("postSubDomain: Exceeding the configured number of virtual subdomains", caller);
}
List<String> solutionTemplates = null;
DomainTemplateList templates = detail.getTemplates();
if (templates != null) {
solutionTemplates = templates.getTemplateNames();
validateSolutionTemplates(solutionTemplates, caller);
}
// while it's not required for sub domains to have product ids
// we're going to store it in case there is a requirement to
// generate reports based on product ids even for subdomains
// unlike top level domains, passing 0 is ok here as it indicates
// that there is no product id
int productId = 0;
if (productIdSupport) {
if (detail.getYpmId() != null) {
if ((productId = detail.getYpmId()) < 0) {
throw ZMSUtils.requestError("Product Id must be a positive integer", caller);
}
}
}
List<String> adminUsers = normalizedAdminUsers(detail.getAdminUsers());
// inherit audit_enabled flag and organization from parent domain
AthenzDomain parentDomain = getAthenzDomain(parent, false);
if (parentDomain != null && parentDomain.getDomain() != null) {
detail.setAuditEnabled(parentDomain.getDomain().getAuditEnabled());
detail.setOrg(parentDomain.getDomain().getOrg());
}
Domain subDomain = new Domain()
.setName(detail.getParent() + "." + detail.getName())
.setAuditEnabled(detail.getAuditEnabled())
.setDescription(detail.getDescription())
.setOrg(detail.getOrg())
.setId(UUID.fromCurrentTime())
.setYpmId(productId)
.setAccount(detail.getAccount())
.setModified(Timestamp.fromCurrentTime())
.setApplicationId(detail.getApplicationId())
.setMemberExpiryDays(detail.getMemberExpiryDays())
.setServiceExpiryDays(detail.getServiceExpiryDays())
.setTokenExpiryMins(detail.getTokenExpiryMins())
.setServiceCertExpiryMins(detail.getServiceCertExpiryMins())
.setRoleCertExpiryMins(detail.getRoleCertExpiryMins())
.setSignAlgorithm(detail.getSignAlgorithm());
Domain domain = createSubDomain(ctx, subDomain, adminUsers, solutionTemplates, auditRef, caller);
metric.stopTiming(timerMetric, parent, principalDomain);
return domain;
}
boolean isSysAdminUser(Principal principal) {
// verify we're dealing with system administrator
// authorize ("CREATE", "sys.auth:domain");
// first check - the domain must be the user domain
if (!principal.getDomain().equals(userDomain)) {
return false;
}
AthenzDomain domain = getAthenzDomain(SYS_AUTH, true);
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
// our action are always converted to lowercase
String resource = SYS_AUTH + ":domain";
AccessStatus accessStatus = evaluateAccess(domain, principal.getFullName(), "create",
resource, null, null);
return accessStatus == AccessStatus.ALLOWED;
}
boolean isAllowedResourceLookForAllUsers(Principal principal) {
// the authorization policy resides in official sys.auth domain
AthenzDomain domain = getAthenzDomain(SYS_AUTH, true);
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
// our action are always converted to lowercase
String resource = SYS_AUTH + ":resource-lookup-all";
AccessStatus accessStatus = evaluateAccess(domain, principal.getFullName(), "access",
resource, null, null);
return accessStatus == AccessStatus.ALLOWED;
}
public void deleteSubDomain(ResourceContext ctx, String parent, String name, String auditRef) {
final String caller = "deletesubdomain";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
parent = parent.toLowerCase();
name = name.toLowerCase();
String domainName = parent + "." + name;
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, parent, principalDomain);
metric.increment(caller, parent, principalDomain);
Object timerMetric = metric.startTiming("deletesubdomain_timing", parent, principalDomain);
validate(parent, TYPE_DOMAIN_NAME, caller);
validate(name, TYPE_SIMPLE_NAME, caller);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
deleteDomain(ctx, auditRef, domainName, caller);
metric.stopTiming(timerMetric, parent, principalDomain);
}
public void deleteUserDomain(ResourceContext ctx, String name, String auditRef) {
final String caller = "deleteuserdomain";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(name, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
name = name.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, name, principalDomain);
metric.increment(caller, name, principalDomain);
Object timerMetric = metric.startTiming("deleteuserdomain_timing", name, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
String domainName = homeDomainPrefix + name;
deleteDomain(ctx, auditRef, domainName, caller);
metric.stopTiming(timerMetric, name, principalDomain);
}
public UserList getUserList(ResourceContext ctx) {
final String caller = "getuserlist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getuserlist_timing", null, principalDomain);
List<String> names = dbService.listPrincipals(userDomain, true);
UserList result = new UserList().setNames(names);
metric.stopTiming(timerMetric, null, principalDomain);
return result;
}
@Override
public void deleteDomainRoleMember(ResourceContext ctx, String domainName, String memberName, String auditRef) {
final String caller = "deletedomainrolemember";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
memberName = memberName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletedomainrolemember_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteDomainRoleMember(ctx, domainName, memberName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void deleteUser(ResourceContext ctx, String name, String auditRef) {
final String caller = "deleteuser";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(name, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
name = name.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, userDomain, principalDomain);
metric.increment(caller, userDomain, principalDomain);
Object timerMetric = metric.startTiming("deleteuser_timing", name, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
String userName = userDomainPrefix + name;
String domainName = homeDomainPrefix + getUserDomainName(name);
dbService.executeDeleteUser(ctx, userName, domainName, auditRef, caller);
metric.stopTiming(timerMetric, name, principalDomain);
}
String getUserDomainName(String userName) {
return (userAuthority == null) ? userName : userAuthority.getUserDomainName(userName);
}
void validateString(final String value, final String type, final String caller) {
if (value != null && !value.isEmpty()) {
validate(value, type, caller);
}
}
@Override
public void putDomainMeta(ResourceContext ctx, String domainName, String auditRef,
DomainMeta meta) {
final String caller = "putdomainmeta";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(meta, TYPE_DOMAIN_META, caller);
validateString(meta.getApplicationId(), TYPE_COMPOUND_NAME, caller);
// validate meta values - for now we're making sure we're not
// getting any negative values for our integer settings
validateDomainMetaValues(meta);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
AthenzObject.DOMAIN_META.convertToLowerCase(meta);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putdomainmeta_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(),
caller);
if (LOG.isDebugEnabled()) {
LOG.debug("putDomainMeta: name={}, meta={}", domainName, meta);
}
// process put domain meta request
dbService.executePutDomainMeta(ctx, domainName, meta, null, false, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void validateIntegerValue(final Integer value, final String fieldName) {
if (value != null && value < 0) {
throw ZMSUtils.requestError(fieldName + " cannot be negative", "validateMetaFields");
}
}
void validateDomainMetaValues(DomainMeta meta) {
validateIntegerValue(meta.getServiceCertExpiryMins(), "serviceCertExpiryMins");
validateIntegerValue(meta.getMemberExpiryDays(), "memberExpiryDays");
validateIntegerValue(meta.getRoleCertExpiryMins(), "roleCertExpiryMins");
validateIntegerValue(meta.getServiceExpiryDays(), "serviceExpiryDays");
validateIntegerValue(meta.getTokenExpiryMins(), "tokenExpiryMins");
validateIntegerValue(meta.getYpmId(), "ypmId");
}
void validateRoleMetaValues(RoleMeta meta) {
validateIntegerValue(meta.getMemberExpiryDays(), "memberExpiryDays");
validateIntegerValue(meta.getServiceExpiryDays(), "serviceExpiryDays");
validateIntegerValue(meta.getTokenExpiryMins(), "tokenExpiryMins");
validateIntegerValue(meta.getCertExpiryMins(), "certExpiryMins");
validateIntegerValue(meta.getMemberReviewDays(), "memberReviewDays");
validateIntegerValue(meta.getServiceReviewDays(), "serviceReviewDays");
}
@Override
public void putDomainSystemMeta(ResourceContext ctx, String domainName, String attribute,
String auditRef, DomainMeta meta) {
final String caller = "putdomainsystemmeta";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(meta, TYPE_DOMAIN_META, caller);
validate(attribute, TYPE_SIMPLE_NAME, caller);
validateString(meta.getAccount(), TYPE_COMPOUND_NAME, caller);
// validate meta values - for now we're making sure we're not
// getting any negative values for our integer settings
validateDomainMetaValues(meta);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
attribute = attribute.toLowerCase();
AthenzObject.DOMAIN_META.convertToLowerCase(meta);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putdomainsystemmeta_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
if (LOG.isDebugEnabled()) {
LOG.debug("putDomainSystemMeta: name={}, attribute={}, meta={}",
domainName, attribute, meta);
}
// if we are resetting the configured value then the caller
// must also have a delete action available for the same resource
boolean deleteAllowed = isAllowedSystemMetaDelete(principal, domainName, attribute, "domain");
// if this productId is already used by any domain it will be
// seen in dbService and exception thrown but we want to make
// sure here if product id support is required then we must
// have one specified for a top level domain.
if (productIdSupport && meta.getYpmId() == null && domainName.indexOf('.') == -1 &&
ZMSConsts.SYSTEM_META_PRODUCT_ID.equals(attribute)) {
throw ZMSUtils.requestError("Unique Product Id must be specified for top level domain", caller);
}
// if this is just to update the timestamp then we will handle it separately
if (ZMSConsts.SYSTEM_META_LAST_MOD_TIME.equals(attribute)) {
dbService.updateDomainModTimestamp(domainName);
} else {
dbService.executePutDomainMeta(ctx, domainName, meta, attribute, deleteAllowed, auditRef, caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void validateSolutionTemplates(List<String> templateNames, String caller) {
for (String templateName : templateNames) {
if (!serverSolutionTemplates.contains(templateName)) {
throw ZMSUtils.notFoundError("validateSolutionTemplates: Template not found: "
+ templateName, caller);
}
}
}
public DomainTemplateList getDomainTemplateList(ResourceContext ctx, String domainName) {
final String caller = "getdomaintemplatelist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getdomaintemplatelist_timing", domainName, principalDomain);
DomainTemplateList domainTemplateList = dbService.listDomainTemplates(domainName);
if (domainTemplateList == null) {
throw ZMSUtils.notFoundError("getDomainTemplateList: Domain not found: '" + domainName + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return domainTemplateList;
}
@Override
public void putDomainTemplate(ResourceContext ctx, String domainName, String auditRef,
DomainTemplate domainTemplate) {
final String caller = "putdomaintemplate";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(domainTemplate, TYPE_DOMAIN_TEMPLATE, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
AthenzObject.DOMAIN_TEMPLATE.convertToLowerCase(domainTemplate);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putdomaintemplate_timing", domainName, principalDomain);
// verify that all template names are valid
List<String> templateNames = domainTemplate.getTemplateNames();
if (templateNames == null || templateNames.size() == 0) {
throw ZMSUtils.requestError("putDomainTemplate: No templates specified", caller);
}
validateSolutionTemplates(templateNames, caller);
// verify that request is properly authenticated for this request
// Make sure each template name is verified
for (String templateName : domainTemplate.getTemplateNames()) {
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(),
caller, "name", templateName);
}
dbService.executePutDomainTemplate(ctx, domainName, domainTemplate, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void putDomainTemplateExt(ResourceContext ctx, String domainName,
String templateName, String auditRef, DomainTemplate domainTemplate) {
final String caller = "putdomaintemplateext";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(templateName, TYPE_SIMPLE_NAME, caller);
validate(domainTemplate, TYPE_DOMAIN_TEMPLATE, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
templateName = templateName.toLowerCase();
AthenzObject.DOMAIN_TEMPLATE.convertToLowerCase(domainTemplate);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putdomaintemplateext_timing", domainName, principalDomain);
// verify that all template names are valid
List<String> templateNames = domainTemplate.getTemplateNames();
if (templateNames == null) {
throw ZMSUtils.requestError("putDomainTemplateExt: No templates specified", caller);
}
// the template name in the object must match to the uri
if (!(templateNames.size() == 1 && templateNames.get(0).equals(templateName))) {
throw ZMSUtils.requestError("putDomainTemplateExt: template name mismatch", caller);
}
validateSolutionTemplates(templateNames, caller);
// verify that request is properly authenticated for this request
// Make sure each template name is verified
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(),
caller, "name", templateName);
dbService.executePutDomainTemplate(ctx, domainName, domainTemplate, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public void deleteDomainTemplate(ResourceContext ctx, String domainName, String templateName, String auditRef) {
final String caller = "deletedomaintemplate";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(templateName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
templateName = templateName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletedomaintemplate_timing", domainName, principalDomain);
if (LOG.isDebugEnabled()) {
LOG.debug("deleteDomainTemplate: domain=" + domainName + ", template=" + templateName);
}
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(),
caller, "name", templateName);
List<String> templateNames = new ArrayList<>();
templateNames.add(templateName);
validateSolutionTemplates(templateNames, caller);
dbService.executeDeleteDomainTemplate(ctx, domainName, templateName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
Principal createPrincipalForName(String principalName) {
String domain;
String name;
// if we have no . in the principal name we're going to default
// to our configured user domain
int idx = principalName.lastIndexOf('.');
if (idx == -1) {
domain = userDomain;
name = principalName;
} else {
domain = principalName.substring(0, idx);
if (userDomainAlias != null && userDomainAlias.equals(domain)) {
domain = userDomain;
}
name = principalName.substring(idx + 1);
}
return SimplePrincipal.create(domain, name, (String) null);
}
boolean validRoleTokenAccess(String trustDomain, String domainName, String principalName) {
if (trustDomain != null) {
if (LOG.isWarnEnabled()) {
LOG.warn("validRoleTokenAccess: Cannot access cross-domain resources with RoleToken");
}
return false;
}
// for Role tokens we don't have a name component in the principal
// so the principal name should be the same as the domain value
// thus it must match the domain name from the resource
if (!domainName.equalsIgnoreCase(principalName)) {
if (LOG.isWarnEnabled()) {
LOG.warn("validRoleTokenAccess: resource domain does not match RoleToken domain");
}
return false;
}
return true;
}
AthenzDomain getAthenzDomain(String domainName, boolean ignoreExceptions) {
return getAthenzDomain(domainName, ignoreExceptions, false);
}
AthenzDomain getAthenzDomain(String domainName, boolean ignoreExceptions, boolean masterCopy) {
AthenzDomain domain = null;
try {
domain = dbService.getAthenzDomain(domainName, masterCopy);
} catch (ResourceException ex) {
if (LOG.isDebugEnabled()) {
LOG.debug("getAthenzDomain failure: " + ex.getMessage());
}
if (!ignoreExceptions) {
if (ex.getCode() != ResourceException.NOT_FOUND) {
throw ex;
}
}
}
return domain;
}
AthenzDomain retrieveAccessDomain(String domainName, Principal principal) {
if (LOG.isDebugEnabled()) {
LOG.debug("retrieveAccessDomain: identity: {} domain: {}", principal.getFullName(), domainName);
}
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain != null) {
return domain;
}
if (LOG.isDebugEnabled()) {
LOG.debug("retrieveAccessDomain: domain not found, looking for virtual domain");
}
// if we don't have virtual/home domains enabled then no need
// to continue further
if (!virtualDomainSupport) {
return null;
}
if (principal.getDomain() == null) {
return null;
}
// the principals user name must match to the corresponding
// home domain name for the user
if (!principal.getDomain().equals(userDomain)) {
return null;
}
final String userHomeDomain = homeDomainPrefix + getUserDomainName(principal.getName());
if (!userHomeDomain.equals(domainName)) {
return null;
}
return virtualHomeDomain(principal, domainName);
}
AccessStatus evaluateAccess(AthenzDomain domain, String identity, String action, String resource,
List<String> authenticatedRoles, String trustDomain) {
AccessStatus accessStatus = AccessStatus.DENIED;
List<Policy> policies = domain.getPolicies();
List<Role> roles = domain.getRoles();
for (Policy policy : policies) {
if (LOG.isDebugEnabled()) {
LOG.debug("evaluateAccess: processing policy: {}", policy.getName());
}
// we are going to process all the assertions defined in this
// policy. As soon as we get a match for an assertion that
// denies access, we're going to return that result. If we
// get a match for an assertion that allows access we're
// going to remember that result and continue looking at
// all the assertions in case there is something else that
// explicitly denies access
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
for (Assertion assertion : assertions) {
// get the effect for the assertion which is set
// as allowed by default
AssertionEffect effect = assertion.getEffect();
if (effect == null) {
effect = AssertionEffect.ALLOW;
}
// if we have already matched an allow assertion then
// we'll automatically skip any assertion that has
// allow effect since there is no point of matching it
if (accessStatus == AccessStatus.ALLOWED && effect == AssertionEffect.ALLOW) {
continue;
}
// if no match then process the next assertion
if (!assertionMatch(assertion, identity, action, resource, domain.getName(),
roles, authenticatedRoles, trustDomain)) {
continue;
}
// if the assertion has matched and the effect is deny
// then we're going to return right away otherwise we'll
// set our return allow matched flag to true and continue
// processing other assertions
if (effect == AssertionEffect.DENY) {
return AccessStatus.DENIED;
}
accessStatus = AccessStatus.ALLOWED;
}
}
return accessStatus;
}
String userHomeDomainResource(String resource) {
// if the resource does not start with user domain prefix then
// we have nothing to do and we'll return resource as is
if (!resource.startsWith(USER_DOMAIN_PREFIX)) {
return resource;
}
String homeResource = null;
// if we have different userDomain and homeDomain values then
// we need to replace both domain and user names otherwise
// we only need to update the domain value
if (!userDomain.equals(homeDomain)) {
// let's extract the user name. at this point we should
// have the format user.<user-name>:resource
int idx = resource.indexOf(':');
if (idx == -1) {
return resource;
}
final String userName = resource.substring(USER_DOMAIN_PREFIX.length(), idx);
homeResource = homeDomainPrefix + getUserDomainName(userName) + resource.substring(idx);
} else if (!homeDomain.equals(ZMSConsts.USER_DOMAIN)) {
homeResource = homeDomainPrefix + resource.substring(USER_DOMAIN_PREFIX.length());
}
return homeResource == null ? resource : homeResource;
}
public boolean access(String action, String resource, Principal principal, String trustDomain) {
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
resource = resource.toLowerCase();
if (trustDomain != null) {
trustDomain = trustDomain.toLowerCase();
}
action = action.toLowerCase();
// if the resource starts with the user domain and the environment is using
// a different domain name we'll dynamically update the resource value
resource = userHomeDomainResource(resource);
if (LOG.isDebugEnabled()) {
LOG.debug("access:(" + action + ", " + resource + ", " + principal + ", " + trustDomain + ")");
}
// check to see if the authority is allowed to be processed in
// authorization checks. If this value is false then the principal
// must get a usertoken from ZMS first and the submit the request
// with that token
if (!authorityAuthorizationAllowed(principal)) {
LOG.error("Authority is not allowed to support authorization checks");
return false;
}
// retrieve our domain based on resource and action/trustDomain pair
// we want to provider better error reporting to the users so if we get a
// request where the domain is not found instead of just returning 403
// forbidden (which is confusing since it assumes the user doesn't have
// access as oppose to possible mistype of the domain name by the user)
// we want to return 404 not found. The athenz server common has special handling
// for rest.ResourceExceptions so we'll throw that exception in this
// special case of not found domains.
String domainName = retrieveResourceDomain(resource, action, trustDomain);
if (domainName == null) {
throw new com.yahoo.athenz.common.server.rest.ResourceException(
ResourceException.NOT_FOUND, "Domain not found");
}
AthenzDomain domain = retrieveAccessDomain(domainName, principal);
if (domain == null) {
throw new com.yahoo.athenz.common.server.rest.ResourceException(
ResourceException.NOT_FOUND, "Domain not found");
}
// if the domain is disabled then we're going to reject this
// request right away
if (domain.getDomain().getEnabled() == Boolean.FALSE) {
throw new com.yahoo.athenz.common.server.rest.ResourceException(
ResourceException.FORBIDDEN, "Disabled Domain");
}
AccessStatus accessStatus = hasAccess(domain, action, resource, principal, trustDomain);
return accessStatus == AccessStatus.ALLOWED;
}
boolean authorityAuthorizationAllowed(Principal principal) {
Authority authority = principal.getAuthority();
if (authority == null) {
return true;
}
return authority.allowAuthorization();
}
String retrieveResourceDomain(String resource, String op, String trustDomain) {
// special handling for ASSUME_ROLE assertions. Since any assertion with
// that action refers to a resource in another domain, there is no point
// to retrieve the domain name from the resource. In these cases the caller
// must specify the trust domain attribute so we'll use that instead and
// if one is not specified then we'll fall back to using the domain name
// from the resource
String domainName;
if (ZMSConsts.ACTION_ASSUME_ROLE.equalsIgnoreCase(op) && trustDomain != null) {
domainName = trustDomain;
} else {
domainName = extractDomainName(resource);
}
return domainName;
}
AccessStatus hasAccess(AthenzDomain domain, String action, String resource,
Principal principal, String trustDomain) {
String identity = principal.getFullName();
// if we're dealing with an access check based on a Role token then
// make sure it's valid before processing it
List<String> authenticatedRoles = principal.getRoles();
if (authenticatedRoles != null && !validRoleTokenAccess(trustDomain, domain.getName(), identity)) {
return AccessStatus.DENIED_INVALID_ROLE_TOKEN;
}
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
return evaluateAccess(domain, identity, action, resource, authenticatedRoles, trustDomain);
}
public Access getAccessExt(ResourceContext ctx, String action, String resource,
String trustDomain, String checkPrincipal) {
final String caller = "getaccessext";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(action, TYPE_COMPOUND_NAME, caller);
return getAccessCheck(((RsrcCtxWrapper) ctx).principal(), action, resource,
trustDomain, checkPrincipal);
}
public Access getAccess(ResourceContext ctx, String action, String resource,
String trustDomain, String checkPrincipal) {
final String caller = "getaccess";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(action, TYPE_COMPOUND_NAME, caller);
validate(resource, TYPE_RESOURCE_NAME, caller);
return getAccessCheck(((RsrcCtxWrapper) ctx).principal(), action, resource,
trustDomain, checkPrincipal);
}
Access getAccessCheck(Principal principal, String action, String resource,
String trustDomain, String checkPrincipal) {
final String caller = "getaccess";
if (LOG.isDebugEnabled()) {
LOG.debug("getAccessCheck:(" + action + ", " + resource + ", " + principal +
", " + trustDomain + ", " + checkPrincipal + ")");
}
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
action = action.toLowerCase();
resource = resource.toLowerCase();
if (checkPrincipal != null) {
checkPrincipal = checkPrincipal.toLowerCase();
}
if (trustDomain != null) {
trustDomain = trustDomain.toLowerCase();
}
// retrieve the domain based on our resource and action/trustDomain pair
String domainName = retrieveResourceDomain(resource, action, trustDomain);
if (domainName == null) {
metric.increment(ZMSConsts.HTTP_REQUEST, ZMSConsts.ZMS_INVALID_DOMAIN, principal.getDomain());
metric.increment(caller, ZMSConsts.ZMS_INVALID_DOMAIN, principal.getDomain());
throw ZMSUtils.notFoundError("getAccessCheck: Unable to extract resource domain", caller);
}
AthenzDomain domain = retrieveAccessDomain(domainName, principal);
if (domain == null) {
metric.increment(ZMSConsts.HTTP_REQUEST, ZMSConsts.ZMS_UNKNOWN_DOMAIN, principal.getDomain());
metric.increment(caller, ZMSConsts.ZMS_UNKNOWN_DOMAIN, principal.getDomain());
throw ZMSUtils.notFoundError("getAccessCheck: Resource Domain not found: '"
+ domainName + "'", caller);
}
// if the domain is disabled then we're going to reject this
// request right away
if (domain.getDomain().getEnabled() == Boolean.FALSE) {
throw ZMSUtils.forbiddenError("getAccessCheck: Disabled domain: '"
+ domainName + "'", caller);
}
// start our counter with domain dimension. we're moving the metric here
// after the domain name has been confirmed as valid since with
// dimensions we get stuck with persistent indexes so we only want
// to create them for valid domain names
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principal.getDomain());
metric.increment(caller, domainName, principal.getDomain());
Object timerMetric = metric.startTiming("getaccess_timing", domainName, principal.getDomain());
// if the check principal is given then we need to carry out the access
// check against that principal
if (checkPrincipal != null) {
principal = createPrincipalForName(checkPrincipal);
if (principal == null) {
throw ZMSUtils.unauthorizedError("getAccessCheck: Invalid check principal value specified", caller);
}
}
boolean accessAllowed = false;
AccessStatus accessStatus = hasAccess(domain, action, resource, principal, trustDomain);
if (accessStatus == AccessStatus.ALLOWED) {
accessAllowed = true;
}
Access access = new Access().setGranted(accessAllowed);
metric.stopTiming(timerMetric, domainName, principal.getDomain());
return access;
}
void validateEntity(String entityName, Entity entity) {
final String caller = "validateentity";
if (!entityName.equals(entity.getName())) {
throw ZMSUtils.requestError("validateEntity: Entity name mismatch: " + entityName + " != " + entity.getName(), caller);
}
if (entity.getValue() == null) {
throw ZMSUtils.requestError("validateEntity: Entity value is empty: " + entityName, caller);
}
}
@Override
public void putEntity(ResourceContext ctx, String domainName, String entityName, String auditRef, Entity resource) {
final String caller = "putentity";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(entityName, TYPE_ENTITY_NAME, caller);
validateEntity(entityName, resource);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
entityName = entityName.toLowerCase();
AthenzObject.ENTITY.convertToLowerCase(resource);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putentity_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executePutEntity(ctx, domainName, entityName, resource, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public EntityList getEntityList(ResourceContext ctx, String domainName) {
final String caller = "getentitylist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getentitylist_timing", domainName, principalDomain);
EntityList result = new EntityList();
List<String> names = dbService.listEntities(domainName);
result.setNames(names);
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
public Entity getEntity(ResourceContext ctx, String domainName, String entityName) {
final String caller = "getentity";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(entityName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
entityName = entityName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getentity_timing", domainName, principalDomain);
Entity entity = dbService.getEntity(domainName, entityName);
if (entity == null) {
throw ZMSUtils.notFoundError("getEntity: Entity not found: '" +
ZMSUtils.entityResourceName(domainName, entityName) + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return entity;
}
public void deleteEntity(ResourceContext ctx, String domainName, String entityName, String auditRef) {
final String caller = "deleteentity";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(entityName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
entityName = entityName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deleteentity_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteEntity(ctx, domainName, entityName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public ServerTemplateList getServerTemplateList(ResourceContext ctx) {
final String caller = "getservertemplatelist";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getservertemplatelist_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
ServerTemplateList result = new ServerTemplateList();
result.setTemplateNames(new ArrayList<>(serverSolutionTemplates.names()));
metric.stopTiming(timerMetric, null, principalDomain);
return result;
}
public Template getTemplate(ResourceContext ctx, String templateName) {
final String caller = "gettemplate";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("gettemplate_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(templateName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
templateName = templateName.toLowerCase();
Template template = serverSolutionTemplates.get(templateName);
if (template == null) {
throw ZMSUtils.notFoundError("getTemplate: Template not found: '" + templateName + "'", caller);
}
List<Role> roles = template.getRoles();
if (roles != null && !roles.isEmpty()) {
for (Role role : roles) {
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers != null) {
role.setMembers(ZMSUtils.convertRoleMembersToMembers(roleMembers));
}
}
}
metric.stopTiming(timerMetric, null, principalDomain);
return template;
}
@Override
public DomainTemplateDetailsList getDomainTemplateDetailsList(ResourceContext ctx, String domainName) {
final String caller = "getDomainTemplateDetailsList";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all domain into lower case
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getDomainTemplateDetailsList_timing", domainName, principalDomain);
List<TemplateMetaData> templateDomainMapping = dbService.getDomainTemplates(domainName);
DomainTemplateDetailsList domainTemplateDetailsList = null;
if (templateDomainMapping != null) {
domainTemplateDetailsList = new DomainTemplateDetailsList();
for (TemplateMetaData metaData : templateDomainMapping) {
Template template = serverSolutionTemplates.get(metaData.getTemplateName());
// there is a possibility of a stale template coming back from DB over time(caused by template clean up)
if (template != null) {
//Merging template metadata fields from solution-templates.json and template data from DB
metaData.setLatestVersion(template.getMetadata().getLatestVersion());
metaData.setAutoUpdate(template.getMetadata().getAutoUpdate());
metaData.setDescription(template.getMetadata().getDescription());
metaData.setKeywordsToReplace(template.metadata.getKeywordsToReplace());
metaData.setTimestamp(template.metadata.getTimestamp());
}
}
domainTemplateDetailsList.setMetaData(templateDomainMapping);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return domainTemplateDetailsList;
}
public RoleList getRoleList(ResourceContext ctx, String domainName, Integer limit, String skip) {
final String caller = "getrolelist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
if (skip != null) {
skip = skip.toLowerCase();
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getrolelist_timing", domainName, principalDomain);
RoleList result = new RoleList();
List<String> names = new ArrayList<>();
String next = processListRequest(domainName, AthenzObject.ROLE, limit, skip, names);
result.setNames(names);
if (next != null) {
result.setNext(next);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
List<Role> setupRoleList(AthenzDomain domain, Boolean members) {
// if we're asked to return the members as well then we
// just need to return the data as is without any modifications
List<Role> roles;
if (members == Boolean.TRUE) {
roles = domain.getRoles();
} else {
roles = new ArrayList<>();
for (Role role : domain.getRoles()) {
Role newRole = new Role()
.setName(role.getName())
.setModified(role.getModified())
.setTrust(role.getTrust())
.setAuditEnabled(role.getAuditEnabled())
.setSelfServe(role.getSelfServe())
.setMemberExpiryDays(role.getMemberExpiryDays())
.setServiceExpiryDays(role.getServiceExpiryDays())
.setTokenExpiryMins(role.getTokenExpiryMins())
.setCertExpiryMins(role.getCertExpiryMins())
.setMemberReviewDays(role.getMemberReviewDays())
.setServiceReviewDays(role.getServiceReviewDays())
.setSignAlgorithm(role.getSignAlgorithm())
.setReviewEnabled(role.getReviewEnabled())
.setLastReviewedDate(role.getLastReviewedDate());
roles.add(newRole);
}
}
return roles;
}
public Roles getRoles(ResourceContext ctx, String domainName, Boolean members) {
final String caller = "getroles";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getroles_timing", domainName, principalDomain);
Roles result = new Roles();
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("getRoles: Domain not found: '" + domainName + "'", caller);
}
result.setList(setupRoleList(domain, members));
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
@Override
public DomainRoleMembers getDomainRoleMembers(ResourceContext ctx, String domainName) {
final String caller = "getdomainrolemembers";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getdomainrolemembers_timing", domainName, principalDomain);
DomainRoleMembers roleMembers = dbService.listDomainRoleMembers(domainName);
metric.stopTiming(timerMetric, domainName, principalDomain);
return roleMembers;
}
@Override
public Role getRole(ResourceContext ctx, String domainName, String roleName,
Boolean auditLog, Boolean expand, Boolean pending) {
final String caller = "getrole";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getrole_timing", domainName, principalDomain);
Role role = dbService.getRole(domainName, roleName, auditLog, expand, pending);
if (role == null) {
throw ZMSUtils.notFoundError("getRole: Role not found: '" +
ZMSUtils.roleResourceName(domainName, roleName) + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return role;
}
List<String> normalizedAdminUsers(List<String> admins) {
List<String> normalizedAdmins = new ArrayList<>();
for (String admin : admins) {
normalizedAdmins.add(normalizeDomainAliasUser(admin));
}
return normalizedAdmins;
}
String normalizeDomainAliasUser(String user) {
if (user != null && userDomainAliasPrefix != null && user.startsWith(userDomainAliasPrefix)) {
if (user.indexOf('.', userDomainAliasPrefix.length()) == -1) {
return userDomainPrefix + user.substring(userDomainAliasPrefix.length());
}
}
return user;
}
private void addNormalizedRoleMember(Map<String, RoleMember> normalizedMembers,
RoleMember member) {
member.setMemberName(normalizeDomainAliasUser(member.getMemberName()));
// we'll automatically ignore any duplicates
if (!normalizedMembers.containsKey(member.getMemberName())) {
normalizedMembers.put(member.getMemberName(), member);
}
}
void normalizeRoleMembers(Role role) {
Map<String, RoleMember> normalizedMembers = new HashMap<>();
// normalize getMembers() first
List<String> members = role.getMembers();
if (members != null) {
for (String memberOld : members) {
RoleMember member = new RoleMember().setMemberName(memberOld);
addNormalizedRoleMember(normalizedMembers, member);
}
}
// normalize getRoleMembers() now
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers != null) {
for (RoleMember member : roleMembers) {
addNormalizedRoleMember(normalizedMembers, member);
}
}
role.setRoleMembers(new ArrayList<>(normalizedMembers.values()));
role.setMembers(null);
}
boolean isConsistentRoleName(final String domainName, final String roleName, Role role) {
String resourceName = ZMSUtils.roleResourceName(domainName, roleName);
// first lets assume we have the expected name specified in the role
if (resourceName.equals(role.getName())) {
return true;
}
// if not check to see if the role contains the relative local name
// part only instead of the expected resourceName and update accordingly
if (roleName.equals(role.getName())) {
role.setName(resourceName);
return true;
}
// we have a mismatch
return false;
}
@Override
public void putRole(ResourceContext ctx, String domainName, String roleName, String auditRef, Role role) {
final String caller = "putrole";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(role, TYPE_ROLE, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
AthenzObject.ROLE.convertToLowerCase(role);
// validate the user authority settings if they're provided
validateRoleUserAuthorityAttributes(role.getUserAuthorityFilter(), role.getUserAuthorityExpiration(), caller);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putrole_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// verify the role name in the URI and request are consistent
if (!isConsistentRoleName(domainName, roleName, role)) {
throw ZMSUtils.requestError("putRole: Inconsistent role names - expected: "
+ ZMSUtils.roleResourceName(domainName, roleName) + ", actual: "
+ role.getName(), caller);
}
Domain domain = dbService.getDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("No such domain: " + domainName, caller);
}
// validate role and trust settings are as expected
ZMSUtils.validateRoleStructure(role, caller, domainName);
// normalize and remove duplicate members
normalizeRoleMembers(role);
// check to see if we need to validate user and service members
// and possibly user authority filter restrictions
validateRoleMemberPrincipals(role, caller);
// if the role is review enabled then it cannot contain
// role members as we want review and audit enabled roles
// to be enabled as such and then add individual members
if (role.getReviewEnabled() == Boolean.TRUE && !role.getRoleMembers().isEmpty()) {
throw ZMSUtils.requestError("Set review enabled flag using role meta api", caller);
}
// update role expiry based on our configurations
updateRoleMemberExpiration(
domain.getMemberExpiryDays(),
role.getMemberExpiryDays(),
domain.getServiceExpiryDays(),
role.getServiceExpiryDays(),
role.getRoleMembers());
// update role expiry based on user authority expiry
// if configured
updateRoleMemberUserAuthorityExpiry(role, caller);
// update role review based on our configurations
updateRoleMemberReviewReminder(role.getMemberReviewDays(), role.getServiceReviewDays(), role.getRoleMembers());
// process our request
dbService.executePutRole(ctx, domainName, roleName, role, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void validateRoleMemberPrincipals(final Role role, final String caller) {
// make sure we have either one of the options enabled for verification
final String userAuthorityFilter = enforcedUserAuthorityFilter(role.getUserAuthorityFilter());
if (!shouldValidateRoleMembers(userAuthorityFilter)) {
return;
}
for (RoleMember roleMember : role.getRoleMembers()) {
validateRoleMemberPrincipal(roleMember.getMemberName(), userAuthorityFilter, caller);
}
}
void updateRoleMemberUserAuthorityExpiry(final Role role, final String caller) {
final String userAuthorityExpiry = getUserAuthorityExpiryAttr(role);
if (userAuthorityExpiry == null) {
return;
}
for (RoleMember roleMember : role.getRoleMembers()) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), userDomainPrefix,
addlUserCheckDomainPrefixList);
if (bUser) {
// if we don't have an expiry specified for the user
// then we're not going to allow this member
Date expiry = userAuthority.getDateAttribute(roleMember.getMemberName(), userAuthorityExpiry);
if (expiry == null) {
throw ZMSUtils.requestError("Invalid member: " + roleMember.getMemberName() +
". No expiry date attribute specified in user authority", caller);
}
roleMember.setExpiration(Timestamp.fromDate(expiry));
} else {
// if we have a user authority expiry attribute date then then
// role cannot have service members
throw ZMSUtils.requestError("Role cannot have non-user member due to user authority expiry setup", caller);
}
}
}
void validateRoleMemberPrincipal(final String memberName, final String userAuthorityFilter, final String caller) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(memberName, userDomainPrefix, addlUserCheckDomainPrefixList);
if (bUser) {
// if the account contains a wildcard then we're going
// to let the user authority decide if it's valid or not
if (validateUserRoleMembers && userAuthority != null) {
if (!userAuthority.isValidUser(memberName)) {
throw ZMSUtils.requestError("Principal " + memberName + " is not valid", caller);
}
}
// once we know it's a valid principal and we have a user
// authority filter configured, we'll check that as well
// if we're already determined that the principal is not
// valid there is no point of running this check
if (userAuthorityFilter != null) {
if (!ZMSUtils.isUserAuthorityFilterValid(userAuthority, userAuthorityFilter, memberName)) {
throw ZMSUtils.requestError("Invalid member: " + memberName +
". Required user authority filter not valid for the member", caller);
}
}
} else {
// if it's a service and we have a user authority filter specified
// then it's automatically assumed it's not a valid principal so we'll
// only evaluate if the filter is null
if (userAuthorityFilter != null) {
throw ZMSUtils.requestError("Invalid member: " + memberName +
". Required user authority filter not valid service members", caller);
} else if (validateServiceRoleMembers) {
// if the account contains a wildcard character then
// we're going to assume it's valid
int idx = memberName.indexOf('*');
if (idx == -1) {
idx = memberName.lastIndexOf('.');
if (idx != -1) {
final String domainName = memberName.substring(0, idx);
final String serviceName = memberName.substring(idx + 1);
// first we need to check if the domain is on the list of
// our skip domains for service member validation. these
// are typically domains (like for ci/cd) where services
// are dynamic and do not need to be registered in Athenz
if (!validateServiceMemberSkipDomains.contains(domainName)) {
if (dbService.getServiceIdentity(domainName, serviceName, true) == null) {
throw ZMSUtils.requestError("Principal " + memberName + " is not a valid service", caller);
}
}
} else {
throw ZMSUtils.requestError("Principal " + memberName + " is not valid", caller);
}
}
}
}
}
public void deleteRole(ResourceContext ctx, String domainName, String roleName, String auditRef) {
final String caller = "deleterole";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deleterole_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
/* we are not going to allow any user to delete
* the admin role and policy since those are required
* for standard domain operations */
if (roleName.equalsIgnoreCase(ADMIN_ROLE_NAME)) {
throw ZMSUtils.requestError("deleteRole: admin role cannot be deleted", caller);
}
dbService.executeDeleteRole(ctx, domainName, roleName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
boolean memberNameMatch(String memberName, String matchName) {
// we are supporting 3 formats for role members
// *, <domain>.* and <domain>.<user>*
if (memberName.equals("*")) {
return true;
} else if (memberName.endsWith("*")) {
return matchName.startsWith(memberName.substring(0, memberName.length() - 1));
} else {
return memberName.equals(matchName);
}
}
boolean checkRoleMemberExpiration(List<RoleMember> roleMembers, String member) {
boolean isMember = false;
for (RoleMember memberInfo: roleMembers) {
final String memberName = memberInfo.getMemberName();
if (memberNameMatch(memberName, member)) {
// check expiration, if is not defined, its not expired.
Timestamp expiration = memberInfo.getExpiration();
if (expiration != null) {
isMember = !(expiration.millis() < System.currentTimeMillis());
} else {
isMember = true;
}
break;
}
}
return isMember;
}
boolean isMemberOfRole(Role role, String member) {
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers == null) {
return false;
}
return checkRoleMemberExpiration(roleMembers, member);
}
@Override
public Membership getMembership(ResourceContext ctx, String domainName,
String roleName, String memberName, String expiration) {
final String caller = "getmembership";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
memberName = normalizeDomainAliasUser(memberName.toLowerCase());
long expiryTimestamp = getModTimestamp(expiration);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getmembership_timing", domainName, principalDomain);
Membership result = dbService.getMembership(domainName, roleName, memberName, expiryTimestamp, false);
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
@Override
public DomainRoleMembers getOverdueReview(ResourceContext ctx, String domainName) {
final String caller = "getoverduereview";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getoverduereview_timing", domainName, principalDomain);
DomainRoleMembers roleMembers = dbService.listOverdueReviewRoleMembers(domainName);
metric.stopTiming(timerMetric, domainName, principalDomain);
return roleMembers;
}
long configuredDueDateMillis(Integer domainDueDateDays, Integer roleDueDateDays) {
// the role expiry days settings overrides the domain one if one configured
int expiryDays = 0;
if (roleDueDateDays != null && roleDueDateDays > 0) {
expiryDays = roleDueDateDays;
} else if (domainDueDateDays != null && domainDueDateDays > 0) {
expiryDays = domainDueDateDays;
}
return expiryDays == 0 ? 0 : System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(expiryDays, TimeUnit.DAYS);
}
Timestamp getMemberDueDate(long cfgDueDateMillis, Timestamp memberDueDate) {
if (memberDueDate == null) {
return Timestamp.fromMillis(cfgDueDateMillis);
} else if (memberDueDate.millis() > cfgDueDateMillis) {
return Timestamp.fromMillis(cfgDueDateMillis);
} else {
return memberDueDate;
}
}
void updateRoleMemberExpiration(Integer domainUserMemberDueDateDays,
Integer roleUserMemberDueDateDays,
Integer domainServiceMemberDueDateDays,
Integer roleServiceMemberDueDateDays,
List<RoleMember> roleMembers) {
updateRoleMemberDueDate(
domainUserMemberDueDateDays,
roleUserMemberDueDateDays,
domainServiceMemberDueDateDays,
roleServiceMemberDueDateDays,
roleMembers,
roleMember -> roleMember.getExpiration(),
(roleMember, expiration) -> roleMember.setExpiration(expiration));
}
void updateRoleMemberReviewReminder(Integer roleUserMemberDueDateDays,
Integer roleServiceMemberDueDateDays,
List<RoleMember> roleMembers) {
updateRoleMemberDueDate(
null,
roleUserMemberDueDateDays,
null,
roleServiceMemberDueDateDays,
roleMembers,
roleMember -> roleMember.getReviewReminder(),
(roleMember, reviewReminder) -> roleMember.setReviewReminder(reviewReminder));
}
private void updateRoleMemberDueDate(Integer domainUserMemberDueDateDays,
Integer roleUserMemberDueDateDays,
Integer domainServiceMemberDueDateDays,
Integer roleServiceMemberDueDateDays,
List<RoleMember> roleMembers,
Function<RoleMember, Timestamp> dueDateGetter,
BiConsumer<RoleMember, Timestamp> dueDateSetter) {
long cfgUserMemberDueDateMillis = configuredDueDateMillis(domainUserMemberDueDateDays, roleUserMemberDueDateDays);
long cfgServiceMemberDueDateMillis = configuredDueDateMillis(domainServiceMemberDueDateDays, roleServiceMemberDueDateDays);
// if we have no value configured then we have nothing to
// do so we'll just return right away
if (cfgUserMemberDueDateMillis == 0 && cfgServiceMemberDueDateMillis == 0) {
return;
}
// go through the members and update due date as necessary
for (RoleMember roleMember : roleMembers) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), userDomainPrefix,
addlUserCheckDomainPrefixList);
Timestamp currentDueDate = dueDateGetter.apply(roleMember);
if (bUser && cfgUserMemberDueDateMillis != 0) {
Timestamp newDueDate = getMemberDueDate(cfgUserMemberDueDateMillis, currentDueDate);
dueDateSetter.accept(roleMember, newDueDate);
} else if (!bUser && cfgServiceMemberDueDateMillis != 0) {
Timestamp newDueDate = getMemberDueDate(cfgServiceMemberDueDateMillis, currentDueDate);
dueDateSetter.accept(roleMember, newDueDate);
}
}
}
Timestamp memberDueDateTimestamp(Integer domainDueDateDays, Integer roleDueDateDays, Timestamp memberDueDate) {
long cfgExpiryMillis = configuredDueDateMillis(domainDueDateDays, roleDueDateDays);
// if we have no value configured then return
// the membership expiration as is
if (cfgExpiryMillis == 0) {
return memberDueDate;
}
// otherwise compare the configured expiry days with the specified
// membership value and choose the smallest expiration value
return getMemberDueDate(cfgExpiryMillis, memberDueDate);
}
@Override
public void putMembership(ResourceContext ctx, String domainName, String roleName,
String memberName, String auditRef, Membership membership) {
final String caller = "putmembership";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
validate(membership, TYPE_MEMBERSHIP, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
memberName = memberName.toLowerCase();
AthenzObject.MEMBERSHIP.convertToLowerCase(membership);
final Principal principal = ((RsrcCtxWrapper) ctx).principal();
final String principalDomain = principal.getDomain();
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putmembership_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceRoleOperation(principal.getAuthorizedService(), caller, roleName);
// verify that the member name in the URI and object provided match
if (!memberName.equals(membership.getMemberName())) {
throw ZMSUtils.requestError("putMembership: Member name in URI and Membership object do not match", caller);
}
// role name is optional so we'll verify only if the value is present in the object
if (membership.getRoleName() != null && !roleName.equals(membership.getRoleName())) {
throw ZMSUtils.requestError("putMembership: Role name in URI and Membership object do not match", caller);
}
// extract our role object to get its attributes
AthenzDomain domain = getAthenzDomain(domainName, false);
Role role = getRoleFromDomain(roleName, domain);
if (role == null) {
throw ZMSUtils.requestError("Invalid rolename specified", caller);
}
// create and normalize the role member object
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(normalizeDomainAliasUser(memberName));
setRoleMemberExpiration(domain, role, roleMember, membership, caller);
setRoleMemberReview(role, roleMember, membership);
// check to see if we need to validate the principal
final String userAuthorityFilter = enforcedUserAuthorityFilter(role.getUserAuthorityFilter());
if (shouldValidateRoleMembers(userAuthorityFilter)) {
validateRoleMemberPrincipal(roleMember.getMemberName(), userAuthorityFilter, caller);
}
// authorization check which also automatically updates
// the active and approved flags for the request
if (!isAllowedPutMembership(principal, domain, role, roleMember)) {
metric.stopTiming(timerMetric, domainName, principalDomain);
throw ZMSUtils.forbiddenError("putMembership: principal is not authorized to add members", caller);
}
// add the member to the specified role
dbService.executePutMembership(ctx, domainName, roleName, roleMember, auditRef, caller);
// new role member with pending status. Notify approvers
if (roleMember.getApproved() == Boolean.FALSE) {
sendMembershipApprovalNotification(domainName, domain.getDomain().getOrg(), roleName,
roleMember.getMemberName(), auditRef, principal.getFullName(), role);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
}
String enforcedUserAuthorityFilter(final String userAuthorityFilter) {
// for a filter to be enforced we need to make sure we have
// a valid user authority object along with non-empty filter
if (userAuthority == null || userAuthorityFilter == null || userAuthorityFilter.isEmpty()) {
return null;
}
return userAuthorityFilter;
}
boolean shouldValidateRoleMembers(final String userAuthorityFilter) {
return validateUserRoleMembers || validateServiceRoleMembers || userAuthorityFilter != null;
}
String getUserAuthorityExpiryAttr(final Role role) {
// we must have a valid user authority
if (userAuthority == null) {
return null;
}
final String userAuthorityExpiry = role.getUserAuthorityExpiration();
if (userAuthorityExpiry == null || userAuthorityExpiry.isEmpty()) {
return null;
}
return userAuthorityExpiry;
}
Timestamp getUserAuthorityExpiry(final String userName, final Role role, final String caller) {
final String userAuthorityExpiry = getUserAuthorityExpiryAttr(role);
if (userAuthorityExpiry == null) {
return null;
}
// if we don't get an expiry then we're going to throw an exception
// since based on our config we must have an expiry specified
Date expiry = userAuthority.getDateAttribute(userName, userAuthorityExpiry);
if (expiry == null) {
throw ZMSUtils.requestError("User does not have required user authority expiry configured", caller);
}
return Timestamp.fromDate(expiry);
}
void setRoleMemberExpiration(final AthenzDomain domain, final Role role, final RoleMember roleMember,
final Membership membership, final String caller) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), userDomainPrefix,
addlUserCheckDomainPrefixList);
if (bUser) {
Timestamp userAuthorityExpiry = getUserAuthorityExpiry(roleMember.memberName, role, caller);
if (userAuthorityExpiry != null) {
roleMember.setExpiration(userAuthorityExpiry);
} else {
roleMember.setExpiration(memberDueDateTimestamp(domain.getDomain().getMemberExpiryDays(),
role.getMemberExpiryDays(), membership.getExpiration()));
}
} else {
// if we have a user authority expiry attribute date then then
// role cannot have service members
final String userAuthorityExpiry = getUserAuthorityExpiryAttr(role);
if (userAuthorityExpiry != null) {
throw ZMSUtils.requestError("Role cannot have non-user member due to user authority expiry setup", caller);
}
roleMember.setExpiration(memberDueDateTimestamp(domain.getDomain().getServiceExpiryDays(),
role.getServiceExpiryDays(), membership.getExpiration()));
}
}
void setRoleMemberReview(final Role role, final RoleMember roleMember,
final Membership membership) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), userDomainPrefix,
addlUserCheckDomainPrefixList);
if (bUser) {
roleMember.setReviewReminder(memberDueDateTimestamp(null,
role.getMemberReviewDays(), membership.getReviewReminder()));
} else {
roleMember.setReviewReminder(memberDueDateTimestamp(null,
role.getServiceReviewDays(), membership.getReviewReminder()));
}
}
void sendMembershipApprovalNotification(final String domain, final String org, final String roleName,
final String member, final String auditRef, final String principal, final Role role) {
Map<String, String> details = new HashMap<>();
details.put(NOTIFICATION_DETAILS_DOMAIN, domain);
details.put(NOTIFICATION_DETAILS_ROLE, roleName);
details.put(NOTIFICATION_DETAILS_MEMBER, member);
details.put(NOTIFICATION_DETAILS_REASON, auditRef);
details.put(NOTIFICATION_DETAILS_REQUESTER, principal);
if (LOG.isDebugEnabled()) {
LOG.debug("Sending Membership Approval notification after putMembership");
}
List<Notification> notifications = new PutMembershipNotificationTask(domain, org, role, details, dbService, userDomainPrefix).getNotifications();
notificationManager.sendNotifications(notifications);
}
public void deletePendingMembership(ResourceContext ctx, String domainName, String roleName,
String memberName, String auditRef) {
final String caller = "deletependingmembership";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
memberName = normalizeDomainAliasUser(memberName.toLowerCase());
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletependingmembership_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceRoleOperation(principal.getAuthorizedService(), caller, roleName);
// authorization check - there are two supported use cases
// 1) the caller has authorization in the domain to update members in a role
// 2) the caller is the original requestor for the pending request
if (!isAllowedDeletePendingMembership(principal, domainName, roleName, memberName)) {
metric.stopTiming(timerMetric, domainName, principalDomain);
throw ZMSUtils.forbiddenError("deletePendingMembership: principal is not authorized to delete pending members", caller);
}
// add the member to the specified role
dbService.executeDeletePendingMembership(ctx, domainName, roleName, memberName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public void deleteMembership(ResourceContext ctx, String domainName, String roleName,
String memberName, String auditRef) {
final String caller = "deletemembership";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
memberName = memberName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletemembership_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceRoleOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller, roleName);
dbService.executeDeleteMembership(ctx, domainName, roleName,
normalizeDomainAliasUser(memberName), auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public Quota getQuota(ResourceContext ctx, String domainName) {
final String caller = "getquota";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getquota_timing", domainName, principalDomain);
Quota result = dbService.getQuota(domainName);
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
@Override
public void putQuota(ResourceContext ctx, String domainName, String auditRef, Quota quota) {
final String caller = "putQuota";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(quota, TYPE_QUOTA, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
AthenzObject.QUOTA.convertToLowerCase(quota);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putquota_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(),
caller);
// verify that the domain name in the URI and object provided match
if (!domainName.equals(quota.getName())) {
throw ZMSUtils.requestError("putQuota: Domain name in URI and Quota object do not match", caller);
}
dbService.executePutQuota(ctx, domainName, quota, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public void deleteQuota(ResourceContext ctx, String domainName, String auditRef) {
final String caller = "deleteQuota";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletequota_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteQuota(ctx, domainName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
boolean hasExceededListLimit(Integer limit, int count) {
if (limit == null) {
return false;
}
return limit > 0 && count > limit;
}
/**
* process the list request for the given object type - e.g. role, policy, etc
* if the limit is specified and we have reached that limit then return
* the name of the object that should be set at the next item for the
* subsequent list operation.
*/
String processListRequest(String domainName, AthenzObject objType, Integer limit,
String skip, List<String> names) {
switch (objType) {
case ROLE:
names.addAll(dbService.listRoles(domainName));
break;
case POLICY:
names.addAll(dbService.listPolicies(domainName));
break;
case SERVICE_IDENTITY:
names.addAll(dbService.listServiceIdentities(domainName));
break;
default:
return null;
}
int count = names.size();
if (skip != null) {
for (int i = 0; i < count; i++) {
String name = names.get(i);
if (skip.equals(name)) {
names.subList(0, i + 1).clear();
count = names.size();
break;
}
}
}
String next = null;
if (hasExceededListLimit(limit, count)) {
names.subList(limit, count).clear();
next = names.get(limit - 1);
}
return next;
}
public PolicyList getPolicyList(ResourceContext ctx, String domainName, Integer limit, String skip) {
final String caller = "getpolicylist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
if (skip != null) {
skip = skip.toLowerCase();
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getpolicylist_timing", domainName, principalDomain);
List<String> names = new ArrayList<>();
String next = processListRequest(domainName, AthenzObject.POLICY, limit, skip, names);
PolicyList result = new PolicyList().setNames(names);
if (next != null) {
result.setNext(next);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
List<Policy> setupPolicyList(AthenzDomain domain, Boolean assertions) {
// if we're asked to return the assertions as well then we
// just need to return the data as is without any modifications
List<Policy> policies;
if (assertions == Boolean.TRUE) {
policies = domain.getPolicies();
} else {
policies = new ArrayList<>();
for (Policy policy : domain.getPolicies()) {
Policy newPolicy = new Policy()
.setName(policy.getName())
.setModified(policy.getModified());
policies.add(newPolicy);
}
}
return policies;
}
public Policies getPolicies(ResourceContext ctx, String domainName, Boolean assertions) {
final String caller = "getpolicies";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getpolicies_timing", domainName, principalDomain);
Policies result = new Policies();
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("getPolicies: Domain not found: '" + domainName + "'", caller);
}
result.setList(setupPolicyList(domain, assertions));
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
public Policy getPolicy(ResourceContext ctx, String domainName, String policyName) {
final String caller = "getpolicy";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getpolicy_timing", domainName, principalDomain);
Policy policy = dbService.getPolicy(domainName, policyName);
if (policy == null) {
throw ZMSUtils.notFoundError("getPolicy: Policy not found: '" +
ZMSUtils.policyResourceName(domainName, policyName) + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return policy;
}
public Assertion getAssertion(ResourceContext ctx, String domainName, String policyName,
Long assertionId) {
final String caller = "getassertion";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getassertion_timing", domainName, principalDomain);
Assertion assertion = dbService.getAssertion(domainName, policyName, assertionId);
if (assertion == null) {
throw ZMSUtils.notFoundError("getAssertion: Assertion not found: '" +
ZMSUtils.policyResourceName(domainName, policyName) + "' Assertion: '" +
assertionId + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return assertion;
}
@Override
public Assertion putAssertion(ResourceContext ctx, String domainName, String policyName,
String auditRef, Assertion assertion) {
final String caller = "putassertion";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_COMPOUND_NAME, caller);
validate(assertion, TYPE_ASSERTION, caller);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
AthenzObject.ASSERTION.convertToLowerCase(assertion);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putassertion_timing", domainName, principalDomain);
// we are not going to allow any user to update
// the admin policy since that is required
// for standard domain operations */
if (policyName.equalsIgnoreCase(ADMIN_POLICY_NAME)) {
throw ZMSUtils.requestError("putAssertion: admin policy cannot be modified", caller);
}
// validate to make sure we have expected values for assertion fields
validatePolicyAssertion(assertion, caller);
dbService.executePutAssertion(ctx, domainName, policyName, assertion, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
return assertion;
}
public void deleteAssertion(ResourceContext ctx, String domainName, String policyName,
Long assertionId, String auditRef) {
final String caller = "deleteassertion";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_ENTITY_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deleteassertion_timing", domainName, principalDomain);
// we are not going to allow any user to update
// the admin policy since that is required
// for standard domain operations */
if (policyName.equalsIgnoreCase(ADMIN_POLICY_NAME)) {
throw ZMSUtils.requestError("deleteAssertion: admin policy cannot be modified", caller);
}
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteAssertion(ctx, domainName, policyName, assertionId, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void validatePolicyAssertions(List<Assertion> assertions, String caller) {
if (assertions == null) {
return;
}
for (Assertion assertion : assertions) {
validatePolicyAssertion(assertion, caller);
}
}
void validatePolicyAssertion(Assertion assertion, String caller) {
// extract the domain name from the resource
final String resource = assertion.getResource();
int idx = resource.indexOf(':');
if (idx == -1) {
throw ZMSUtils.requestError("Missing domain name from assertion resource: "
+ resource, caller);
}
// we need to validate our domain name with special
// case of * that is allowed to match any domain
String domainName = resource.substring(0, idx);
if (!domainName.equals("*")) {
validate(domainName, TYPE_DOMAIN_NAME, caller);
}
// we'll also verify that the resource does not contain
// any control characters since those cause issues when
// data is serialized/deserialized and signature is generated
if (StringUtils.containsControlCharacter(resource)) {
throw ZMSUtils.requestError("Assertion resource contains control characters: "
+ resource, caller);
}
// verify the action is not empty and does not contain
// any control characters
final String action = assertion.getAction();
if (action == null || action.isEmpty()) {
throw ZMSUtils.requestError("Assertion action cannot be empty", caller);
}
if (StringUtils.containsControlCharacter(action)) {
throw ZMSUtils.requestError("Assertion action contains control characters: "
+ resource, caller);
}
}
boolean isConsistentPolicyName(final String domainName, final String policyName, Policy policy) {
String resourceName = ZMSUtils.policyResourceName(domainName, policyName);
// first lets assume we have the expected name specified in the policy
if (resourceName.equals(policy.getName())) {
return true;
}
// if not check to see if the policy contains the relative local name
// part only instead of the expected resourceName and update accordingly
if (policyName.equals(policy.getName())) {
policy.setName(resourceName);
return true;
}
// we have a mismatch
return false;
}
@Override
public void putPolicy(ResourceContext ctx, String domainName, String policyName, String auditRef, Policy policy) {
final String caller = "putpolicy";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_COMPOUND_NAME, caller);
validate(policy, TYPE_POLICY, caller);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
AthenzObject.POLICY.convertToLowerCase(policy);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putpolicy_timing", domainName, principalDomain);
// we are not going to allow any user to update
// the admin policy since that is required
// for standard domain operations */
if (policyName.equalsIgnoreCase(ADMIN_POLICY_NAME)) {
throw ZMSUtils.requestError("putPolicy: admin policy cannot be modified", caller);
}
// verify the policy name in the URI and request are consistent
if (!isConsistentPolicyName(domainName, policyName, policy)) {
throw ZMSUtils.requestError("putPolicy: Inconsistent policy names - expected: "
+ ZMSUtils.policyResourceName(domainName, policyName) + ", actual: "
+ policy.getName(), caller);
}
// validate to make sure we have expected values for assertion fields
validatePolicyAssertions(policy.getAssertions(), caller);
dbService.executePutPolicy(ctx, domainName, policyName, policy, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public void deletePolicy(ResourceContext ctx, String domainName, String policyName, String auditRef) {
final String caller = "deletepolicy";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(policyName, TYPE_ENTITY_NAME, caller);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
policyName = policyName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletepolicy_timing", domainName, principalDomain);
// we are not going to allow any user to delete
// the admin role and policy since those are required
// for standard domain operations */
if (policyName.equalsIgnoreCase(ADMIN_POLICY_NAME)) {
throw ZMSUtils.requestError("deletePolicy: admin policy cannot be deleted", caller);
}
dbService.executeDeletePolicy(ctx, domainName, policyName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
boolean matchDelegatedTrustAssertion(Assertion assertion, String roleName,
String roleMember, List<Role> roles) {
if (!ZMSUtils.assumeRoleResourceMatch(roleName, assertion)) {
return false;
}
String rolePattern = StringUtils.patternFromGlob(assertion.getRole());
for (Role role : roles) {
String name = role.getName();
if (!name.matches(rolePattern)) {
continue;
}
if (isMemberOfRole(role, roleMember)) {
return true;
}
}
return false;
}
boolean matchDelegatedTrustPolicy(Policy policy, String roleName, String roleMember, List<Role> roles) {
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
return false;
}
for (Assertion assertion : assertions) {
if (matchDelegatedTrustAssertion(assertion, roleName, roleMember, roles)) {
return true;
}
}
return false;
}
boolean delegatedTrust(String domainName, String roleName, String roleMember) {
AthenzDomain domain = getAthenzDomain(domainName, true);
if (domain == null) {
return false;
}
for (Policy policy : domain.getPolicies()) {
if (matchDelegatedTrustPolicy(policy, roleName, roleMember, domain.getRoles())) {
return true;
}
}
return false;
}
boolean matchRole(String domain, List<Role> roles, String rolePattern,
List<String> authenticatedRoles) {
if (LOG.isDebugEnabled()) {
LOG.debug("matchRole domain: " + domain + " rolePattern: " + rolePattern);
}
String prefix = domain + AuthorityConsts.ROLE_SEP;
int prefixLen = prefix.length();
for (Role role : roles) {
String name = role.getName();
if (!name.matches(rolePattern)) {
continue;
}
String shortName = name.substring(prefixLen);
if (authenticatedRoles.contains(shortName)) {
return true;
}
}
return false;
}
boolean shouldRunDelegatedTrustCheck(String trust, String trustDomain) {
// if no trust field field then no delegated trust check
if (trust == null) {
return false;
}
// if no specific trust domain specifies then we need
// run the delegated trust check for this domain
if (trustDomain == null) {
return true;
}
// otherwise we'll run the delegated trust check only if
// domain name matches
return trust.equalsIgnoreCase(trustDomain);
}
boolean matchPrincipalInRole(Role role, String roleName, String fullUser, String trustDomain) {
// if we have members in the role then we're going to check
// against that list only
if (role.getRoleMembers() != null) {
return isMemberOfRole(role, fullUser);
}
// no members so let's check if this is a trust domain
String trust = role.getTrust();
if (!shouldRunDelegatedTrustCheck(trust, trustDomain)) {
return false;
}
// delegate to another domain.
if (LOG.isDebugEnabled()) {
LOG.debug("matchPrincipal: [delegated trust. Checking with: " + trust + "]");
}
return delegatedTrust(trust, roleName, fullUser);
}
boolean matchPrincipal(List<Role> roles, String rolePattern, String fullUser, String trustDomain) {
if (LOG.isDebugEnabled()) {
LOG.debug("matchPrincipal - rolePattern: " + rolePattern + " user: " + fullUser +
" trust: " + trustDomain);
}
for (Role role : roles) {
String name = role.getName();
if (!name.matches(rolePattern)) {
continue;
}
if (matchPrincipalInRole(role, name, fullUser, trustDomain)) {
if (LOG.isDebugEnabled()) {
LOG.debug("assertionMatch: -> OK (by principal)");
}
return true;
}
}
return false;
}
AthenzDomain virtualHomeDomain(Principal principal, String domainName) {
if (LOG.isDebugEnabled()) {
LOG.debug("homeDomain: home domain detected. Create on the fly.");
}
AthenzDomain athenzDomain = new AthenzDomain(domainName);
Domain domain = new Domain().setName(domainName).setEnabled(Boolean.TRUE);
athenzDomain.setDomain(domain);
List<String> adminUsers = new ArrayList<>();
adminUsers.add(principal.getFullName());
Role role = ZMSUtils.makeAdminRole(domainName, adminUsers);
athenzDomain.getRoles().add(role);
Policy policy = ZMSUtils.makeAdminPolicy(domainName, role);
athenzDomain.getPolicies().add(policy);
return athenzDomain;
}
boolean assertionMatch(Assertion assertion, String identity, String action, String resource,
String domain, List<Role> roles, List<String> authenticatedRoles, String trustDomain) {
String actionPattern = StringUtils.patternFromGlob(assertion.getAction());
if (LOG.isDebugEnabled()) {
LOG.debug("assertionMatch: action '{}' pattern '{}'", action, actionPattern);
}
if (!action.matches(actionPattern)) {
return false;
}
String rezPattern = StringUtils.patternFromGlob(assertion.getResource());
if (LOG.isDebugEnabled()) {
LOG.debug("assertionMatch: resource '{}' pattern '{}'", resource, rezPattern);
}
if (!resource.matches(rezPattern)) {
return false;
}
boolean matchResult;
String rolePattern = StringUtils.patternFromGlob(assertion.getRole());
if (authenticatedRoles != null) {
matchResult = matchRole(domain, roles, rolePattern, authenticatedRoles);
} else {
matchResult = matchPrincipal(roles, rolePattern, identity, trustDomain);
}
if (LOG.isDebugEnabled()) {
LOG.debug("assertionMatch: -> " + matchResult +
" (effect: " + assertion.getEffect() + ")");
}
return matchResult;
}
boolean verifyProviderEndpoint(String providerEndpoint) {
// verify that we have a valid endpoint that ends in one of our
// configured domains. if it's not present or an empty value then
// there is no field to verify
if (providerEndpoint == null) {
return true;
}
if (providerEndpoint.isEmpty()) {
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("verifyProviderEndpoint: verifying endpoint: " + providerEndpoint);
}
java.net.URI uri;
try {
uri = new java.net.URI(providerEndpoint);
} catch (URISyntaxException ex) {
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("verifyProviderEndpoint: host: " + uri.getHost() + " scheme: " + uri.getScheme());
}
String scheme = uri.getScheme();
if (scheme == null) {
return false;
}
scheme = scheme.toLowerCase();
// if our scheme is class then we have no further checks to carry
if (scheme.equalsIgnoreCase(ZMSConsts.SCHEME_CLASS)) {
return true;
}
// otherwise it must be one of our http schemes
if (!(scheme.equalsIgnoreCase(ZMSConsts.SCHEME_HTTP) || scheme.equalsIgnoreCase(ZMSConsts.SCHEME_HTTPS))) {
return false;
}
String host = uri.getHost();
if (host == null) {
return false;
}
host = host.toLowerCase();
// if we have no endpoint configured then we should
// allow all hostnames
if (providerEndpoints == null || providerEndpoints.isEmpty()) {
return true;
}
// we're going to allow localhost as a special case since
// that's often used for dev testing
boolean valid = host.equals(ZMSConsts.LOCALHOST);
if (!valid) {
for (String endpoint : providerEndpoints) {
valid = host.endsWith(endpoint);
if (valid) {
break;
}
}
}
return valid;
}
boolean verifyServicePublicKey(String key) {
try {
PublicKey pub = Crypto.loadPublicKey(Crypto.ybase64DecodeString(key));
if (LOG.isDebugEnabled()) {
LOG.debug("verifyServicePublicKey: public key looks valid: " + pub);
}
} catch (Exception ex) {
LOG.error("verifyServicePublicKey: Invalid Public Key: " + ex.getMessage());
return false;
}
return true;
}
boolean verifyServicePublicKeys(ServiceIdentity service) {
// verify that the public keys specified are valid
// It's okay to not specify any public keys
List<PublicKeyEntry> publicKeyList = service.getPublicKeys();
if (publicKeyList == null || publicKeyList.size() == 0) {
return true;
}
for (PublicKeyEntry entry : publicKeyList) {
if (!verifyServicePublicKey(entry.getKey())) {
return false;
}
}
return true;
}
public boolean isValidServiceName(final String serviceName) {
if (reservedServiceNames != null && reservedServiceNames.contains(serviceName)) {
return false;
}
return serviceNameMinLength <= 0 || serviceNameMinLength <= serviceName.length();
}
@Override
public void putServiceIdentity(ResourceContext ctx, String domainName, String serviceName,
String auditRef, ServiceIdentity service) {
final String caller = "putserviceidentity";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
validate(service, TYPE_SERVICE_IDENTITY, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
AthenzObject.SERVICE_IDENTITY.convertToLowerCase(service);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putserviceidentity_timing", domainName, principalDomain);
// validate that the service name is valid
if (!isValidServiceName(serviceName)) {
throw ZMSUtils.requestError("putServiceIdentity: Invalid/Reserved service name", caller);
}
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (!ZMSUtils.serviceResourceName(domainName, serviceName).equals(service.getName())) {
throw ZMSUtils.requestError("putServiceIdentity: Inconsistent service/domain names", caller);
}
if (!verifyServicePublicKeys(service)) {
throw ZMSUtils.requestError("putServiceIdentity: Provided public key is invalid", caller);
}
if (!verifyProviderEndpoint(service.getProviderEndpoint())) {
throw ZMSUtils.requestError("putServiceIdentity: Invalid endpoint: "
+ service.getProviderEndpoint() + " - must be http(s) and in configured domain", caller);
}
dbService.executePutServiceIdentity(ctx, domainName, serviceName, service, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void putServiceIdentitySystemMeta(ResourceContext ctx, String domainName, String serviceName,
String attribute, String auditRef, ServiceIdentitySystemMeta meta) {
final String caller = "putservicesystemmeta";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
validate(meta, TYPE_SERVICE_IDENTITY_SYSTEM_META, caller);
validate(attribute, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
attribute = attribute.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putservicesystemmeta_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
if (LOG.isDebugEnabled()) {
LOG.debug("putServiceSystemMeta: name={}, service={} attribute={}, meta={}",
domainName, serviceName, attribute, meta);
}
// if we are resetting the configured value then the caller
// must also have a delete action available for the same resource
boolean deleteAllowed = isAllowedSystemMetaDelete(principal, domainName, attribute, "service");
dbService.executePutServiceIdentitySystemMeta(ctx, domainName, serviceName, meta, attribute,
deleteAllowed, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
public ServiceIdentity getServiceIdentity(ResourceContext ctx, String domainName, String serviceName) {
final String caller = "getserviceidentity";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getserviceidentity_timing", domainName, principalDomain);
ServiceIdentity service = dbService.getServiceIdentity(domainName, serviceName, false);
if (service == null) {
throw ZMSUtils.notFoundError("getServiceIdentity: Service not found: '" +
ZMSUtils.serviceResourceName(domainName, serviceName) + "'", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return service;
}
public void deleteServiceIdentity(ResourceContext ctx, String domainName,
String serviceName, String auditRef) {
final String caller = "deleteserviceidentity";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deleteserviceidentity_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteServiceIdentity(ctx, domainName, serviceName, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
List<ServiceIdentity> setupServiceIdentityList(AthenzDomain domain, Boolean publicKeys, Boolean hosts) {
// if we're asked to return the public keys and hosts as well then we
// just need to return the data as is without any modifications
List<ServiceIdentity> services;
if (publicKeys == Boolean.TRUE && hosts == Boolean.TRUE) {
services = domain.getServices();
} else {
services = new ArrayList<>();
for (ServiceIdentity service : domain.getServices()) {
ServiceIdentity newService = new ServiceIdentity()
.setName(service.getName())
.setModified(service.getModified())
.setExecutable(service.getExecutable())
.setGroup(service.getGroup())
.setUser(service.getUser())
.setProviderEndpoint(service.getProviderEndpoint());
if (publicKeys == Boolean.TRUE) {
newService.setPublicKeys(service.getPublicKeys());
} else if (hosts == Boolean.TRUE) {
newService.setHosts(service.getHosts());
}
services.add(newService);
}
}
return services;
}
public ServiceIdentities getServiceIdentities(ResourceContext ctx, String domainName,
Boolean publicKeys, Boolean hosts) {
final String caller = "getserviceidentities";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getserviceidentities_timing", domainName, principalDomain);
ServiceIdentities result = new ServiceIdentities();
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("getServiceIdentities: Domain not found: '"
+ domainName + "'", caller);
}
result.setList(setupServiceIdentityList(domain, publicKeys, hosts));
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
public ServiceIdentityList getServiceIdentityList(ResourceContext ctx, String domainName,
Integer limit, String skip) {
final String caller = "getserviceidentitylist";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
if (skip != null) {
skip = skip.toLowerCase();
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getserviceidentitylist_timing", domainName, principalDomain);
List<String> names = new ArrayList<>();
String next = processListRequest(domainName, AthenzObject.SERVICE_IDENTITY, limit, skip, names);
ServiceIdentityList result = new ServiceIdentityList().setNames(names);
if (next != null) {
result.setNext(next);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return result;
}
public PublicKeyEntry getPublicKeyEntry(ResourceContext ctx, String domainName, String serviceName, String keyId) {
final String caller = "getpublickeyentry";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
keyId = keyId.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getpublickeyentry_timing", domainName, principalDomain);
PublicKeyEntry entry = dbService.getServicePublicKeyEntry(domainName, serviceName, keyId, false);
if (entry == null) {
throw ZMSUtils.notFoundError("getPublicKeyEntry: PublicKey " + keyId + " in service " +
ZMSUtils.serviceResourceName(domainName, serviceName) + " not found", caller);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return entry;
}
public void deletePublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
String keyId, String auditRef) {
final String caller = "deletepublickeyentry";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
keyId = keyId.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("deletepublickeyentry_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeletePublicKeyEntry(ctx, domainName, serviceName, keyId, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void putPublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
String keyId, String auditRef, PublicKeyEntry keyEntry) {
final String caller = "putpublickeyentry";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(serviceName, TYPE_SIMPLE_NAME, caller);
validate(keyEntry, TYPE_PUBLIC_KEY_ENTRY, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
serviceName = serviceName.toLowerCase();
keyId = keyId.toLowerCase();
AthenzObject.PUBLIC_KEY_ENTRY.convertToLowerCase(keyEntry);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putpublickeyentry_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// verify that key id specified in request and object do match
if (!keyId.equals(keyEntry.getId())) {
throw ZMSUtils.requestError("putPublicKeyEntry: keyId in URI and PublicKeyEntry object do not match", caller);
}
// verify we have a valid public key specified
if (!verifyServicePublicKey(keyEntry.getKey())) {
throw ZMSUtils.requestError("putPublicKeyEntry: Invalid public key", caller);
}
dbService.executePutPublicKeyEntry(ctx, domainName, serviceName, keyEntry, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
String removeQuotes(String value) {
if (value.startsWith("\"")) {
value = value.substring(1);
}
if (value.endsWith("\"")) {
value = value.substring(0, value.length() - 1);
}
return value;
}
long getModTimestamp(String matchingTag) {
long timestamp = 0;
if (matchingTag == null) {
return timestamp;
}
matchingTag = removeQuotes(matchingTag);
if (LOG.isDebugEnabled()) {
LOG.debug("getModTimestamp: matching tag ({})", matchingTag);
}
if (matchingTag.isEmpty()) {
return timestamp;
}
try {
Timestamp tagStamp = Timestamp.fromString(matchingTag);
if (tagStamp == null) {
throw new IllegalArgumentException("Timestamp failed");
}
timestamp = tagStamp.millis();
} catch (IllegalArgumentException exc) {
if (LOG.isWarnEnabled()) {
LOG.warn("getModTimestamp: matching tag({}) has bad format. Return 0 by default.",
matchingTag);
}
}
return timestamp;
}
SignedDomain createSignedDomain(String domainName, long modifiedTime) {
SignedDomain signedDomain = new SignedDomain();
DomainData domainData = new DomainData().setName(domainName);
signedDomain.setDomain(domainData);
domainData.setModified(Timestamp.fromMillis(modifiedTime));
return signedDomain;
}
SignedDomain retrieveSignedDomainMeta(final Domain domain, final String metaAttr) {
SignedDomain signedDomain = createSignedDomain(domain.getName(), domain.getModified().millis());
if (metaAttr != null) {
switch (metaAttr) {
case META_ATTR_ACCOUNT:
final String account = domain.getAccount();
if (account == null) {
return null;
}
signedDomain.getDomain().setAccount(account);
break;
case META_ATTR_YPM_ID:
final Integer ypmId = domain.getYpmId();
if (ypmId == null) {
return null;
}
signedDomain.getDomain().setYpmId(ypmId);
break;
case META_ATTR_ALL:
DomainData domainData = signedDomain.getDomain();
domainData.setDescription(domain.getDescription());
domainData.setAccount(domain.getAccount());
domainData.setYpmId(domain.getYpmId());
domainData.setApplicationId(domain.getApplicationId());
domainData.setMemberExpiryDays(domain.getMemberExpiryDays());
domainData.setServiceExpiryDays(domain.getServiceExpiryDays());
domainData.setRoleCertExpiryMins(domain.getRoleCertExpiryMins());
domainData.setServiceCertExpiryMins(domain.getServiceCertExpiryMins());
domainData.setTokenExpiryMins(domain.getTokenExpiryMins());
domainData.setOrg(domain.getOrg());
domainData.setAuditEnabled(domain.getAuditEnabled());
break;
}
}
return signedDomain;
}
SignedDomain retrieveSignedDomain(Domain domain, final String metaAttr, boolean setMetaDataOnly, boolean masterCopy) {
// check if we're asked to only return the meta data which
// we already have - name and last modified time, so we can
// add the domain to our return list and continue with the
// next domain
SignedDomain signedDomain;
if (setMetaDataOnly) {
signedDomain = retrieveSignedDomainMeta(domain, metaAttr);
} else {
signedDomain = retrieveSignedDomainData(domain.getName(), domain.getModified().millis(), masterCopy);
}
return signedDomain;
}
SignedDomain retrieveSignedDomainData(final String domainName, long modifiedTime, boolean masterCopy) {
// generate our signed domain object
SignedDomain signedDomain = createSignedDomain(domainName, modifiedTime);
// get the policies, roles, and service identities to create the
// DomainData
if (LOG.isDebugEnabled()) {
LOG.debug("retrieveSignedDomain: retrieving domain " + domainName);
}
AthenzDomain athenzDomain = getAthenzDomain(domainName, true, masterCopy);
// it's possible that our domain was deleted by another
// thread while we were processing this request so
// we'll return null so the caller can skip this domain
if (athenzDomain == null) {
return null;
}
// set domain attributes - for enabled flag only set it
// if it set to false
DomainData domainData = signedDomain.getDomain();
if (athenzDomain.getDomain().getEnabled() == Boolean.FALSE) {
domainData.setEnabled(false);
}
if (athenzDomain.getDomain().getAuditEnabled() == Boolean.TRUE) {
domainData.setAuditEnabled(true);
}
domainData.setAccount(athenzDomain.getDomain().getAccount());
domainData.setYpmId(athenzDomain.getDomain().getYpmId());
domainData.setApplicationId(athenzDomain.getDomain().getApplicationId());
domainData.setSignAlgorithm(athenzDomain.getDomain().getSignAlgorithm());
if (athenzDomain.getDomain().getServiceCertExpiryMins() != null) {
domainData.setServiceCertExpiryMins(athenzDomain.getDomain().getServiceCertExpiryMins());
}
if (athenzDomain.getDomain().getRoleCertExpiryMins() != null) {
domainData.setRoleCertExpiryMins(athenzDomain.getDomain().getRoleCertExpiryMins());
}
if (athenzDomain.getDomain().getTokenExpiryMins() != null) {
domainData.setTokenExpiryMins(athenzDomain.getDomain().getTokenExpiryMins());
}
// set the roles and services
domainData.setRoles(athenzDomain.getRoles());
domainData.setServices(athenzDomain.getServices());
// generate the domain policy object that includes the domain
// name and all policies. Then we'll sign this struct using
// server's private key to get signed policy object
DomainPolicies domainPolicies = new DomainPolicies().setDomain(domainName);
domainPolicies.setPolicies(getPolicyListWithoutAssertionId(athenzDomain.getPolicies()));
SignedPolicies signedPolicies = new SignedPolicies();
signedPolicies.setContents(domainPolicies);
domainData.setPolicies(signedPolicies);
String signature = Crypto.sign(
SignUtils.asCanonicalString(signedPolicies.getContents()), privateKey.getKey());
signedPolicies.setSignature(signature).setKeyId(privateKey.getId());
// then sign the data and set the data and signature in a SignedDomain
signature = Crypto.sign(SignUtils.asCanonicalString(domainData), privateKey.getKey());
signedDomain.setSignature(signature).setKeyId(privateKey.getId());
return signedDomain;
}
@Override
public Response getSignedDomains(ResourceContext ctx, String domainName, String metaOnly,
String metaAttr, Boolean master, String matchingTag) {
final String caller = "getsigneddomains";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getsigneddomains_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
if (domainName != null) {
domainName = domainName.toLowerCase();
validate(domainName, TYPE_DOMAIN_NAME, caller);
}
if (metaAttr != null) {
metaAttr = metaAttr.toLowerCase();
validate(metaAttr, TYPE_SIMPLE_NAME, caller);
}
boolean setMetaDataOnly = ZMSUtils.parseBoolean(metaOnly, false);
long timestamp = getModTimestamp(matchingTag);
// if this is one of our system principals then we're going to
// to use the master copy instead of read-only replicas
// unless we're configured to always use read-only replicas
// for all signed domain operations
Principal principal = ((RsrcCtxWrapper) ctx).principal();
boolean masterCopy = (useMasterCopyForSignedDomains || master == Boolean.TRUE)
&& principal.getFullName().startsWith("sys.");
// if we're given a specific domain then we don't need to
// retrieve the list of modified domains
List<SignedDomain> sdList = new ArrayList<>();
Long youngestDomMod = -1L;
if (domainName != null && !domainName.isEmpty()) {
Domain domain = null;
try {
domain = dbService.getDomain(domainName, masterCopy);
} catch (ResourceException ex) {
// in case the domain does not exist we're just
// going to return an empty set
if (ex.getCode() != ResourceException.NOT_FOUND) {
throw ex;
}
}
if (domain != null) {
youngestDomMod = domain.getModified().millis();
if (timestamp != 0 && youngestDomMod <= timestamp) {
EntityTag eTag = new EntityTag(domain.getModified().toString());
return Response.status(ResourceException.NOT_MODIFIED)
.header("ETag", eTag.toString()).build();
}
// generate our signed domain object
SignedDomain signedDomain = retrieveSignedDomain(domain, metaAttr, setMetaDataOnly, masterCopy);
if (signedDomain != null) {
sdList.add(signedDomain);
}
} else {
youngestDomMod = System.currentTimeMillis();
}
} else {
// if we don't have a domain name then the meta flag must
// be set to true otherwise it's expensive to fetch all
// domains and sign all domains into a single response
// unless the request is from a system service
if (!setMetaDataOnly && !masterCopy) {
return Response.status(ResourceException.BAD_REQUEST).build();
}
// we should get our matching tag before calling get modified list
// in case we get a domain added/updated right after an empty domain list
// was returned and before the matchingTag was set to a value
if (matchingTag == null) {
EntityTag eTag = new EntityTag(Timestamp.fromMillis(0).toString());
matchingTag = eTag.toString();
}
DomainMetaList dmlist = dbService.listModifiedDomains(timestamp);
List<Domain> modlist = dmlist.getDomains();
if (modlist == null || modlist.size() == 0) {
return Response.status(ResourceException.NOT_MODIFIED)
.header("ETag", matchingTag).build();
}
// now we can iterate through our list and retrieve each domain
for (Domain dmod : modlist) {
Long domModMillis = dmod.getModified().millis();
if (domModMillis.compareTo(youngestDomMod) > 0) {
youngestDomMod = domModMillis;
}
// generate our signed domain object
SignedDomain signedDomain = retrieveSignedDomain(dmod, metaAttr, setMetaDataOnly, masterCopy);
// it's possible that our domain was deleted by another
// thread while we were processing this request so
// if we get a null object, we'll just skip this
// item and continue with the next one
if (signedDomain == null) {
continue;
}
// we have a valid domain so we'll add it to our return list
sdList.add(signedDomain);
}
}
SignedDomains sdoms = new SignedDomains();
sdoms.setDomains(sdList);
Timestamp youngest = Timestamp.fromMillis(youngestDomMod);
EntityTag eTag = new EntityTag(youngest.toString());
metric.stopTiming(timerMetric, null, principalDomain);
return Response.status(ResourceException.OK).entity(sdoms)
.header("ETag", eTag.toString()).build();
}
@Override
public JWSDomain getJWSDomain(ResourceContext ctx, String domainName) {
final String caller = "getjwsdomain";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getjwsdomain_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
// generate our signed domain object
JWSDomain jwsDomain = retrieveJWSDomain(domainName);
if (jwsDomain == null) {
throw ZMSUtils.notFoundError("Unable to retrieve domain=" + domainName, caller);
}
metric.stopTiming(timerMetric, null, principalDomain);
return jwsDomain;
}
JWSDomain retrieveJWSDomain(final String domainName) {
// get the policies, roles, and service identities to create the
// DomainData
if (LOG.isDebugEnabled()) {
LOG.debug("retrieveJWSDomain: retrieving domain {}", domainName);
}
AthenzDomain athenzDomain = getAthenzDomain(domainName, true, false);
if (athenzDomain == null) {
return null;
}
// set all domain attributes including roles and services
final Domain domain = athenzDomain.getDomain();
DomainData domainData = new DomainData()
.setName(domainName)
.setModified(domain.getModified())
.setEnabled(domain.getEnabled())
.setAuditEnabled(domain.getAuditEnabled())
.setAccount(domain.getAccount())
.setYpmId(domain.getYpmId())
.setApplicationId(domain.getApplicationId())
.setSignAlgorithm(domain.getSignAlgorithm())
.setServiceCertExpiryMins(domain.getServiceCertExpiryMins())
.setRoleCertExpiryMins(domain.getRoleCertExpiryMins())
.setTokenExpiryMins(domain.getTokenExpiryMins())
.setServiceExpiryDays(domain.getServiceExpiryDays())
.setDescription(domain.getDescription())
.setOrg(domain.getOrg())
.setCertDnsDomain(domain.getCertDnsDomain())
.setMemberExpiryDays(domain.getMemberExpiryDays())
.setRoles(athenzDomain.getRoles())
.setServices(athenzDomain.getServices());
// generate the domain policy object that includes the domain
// name and all policies.
DomainPolicies domainPolicies = new DomainPolicies().setDomain(domainName);
domainPolicies.setPolicies(getPolicyListWithoutAssertionId(athenzDomain.getPolicies()));
SignedPolicies signedPolicies = new SignedPolicies();
signedPolicies.setContents(domainPolicies);
domainData.setPolicies(signedPolicies);
return signJwsDomain(domainData);
}
JWSDomain signJwsDomain(DomainData domainData) {
// https://tools.ietf.org/html/rfc7515#section-7.2.2
// first generate the json output of our object
JWSDomain jwsDomain = null;
try {
// spec requires base64 url encoder without any padding
final Base64.Encoder encoder = Base64.getUrlEncoder().withoutPadding();
// generate our domain data payload and encode it
final byte[] jsonDomain = jsonMapper.writeValueAsBytes(domainData);
final byte[] encodedDomain = encoder.encode(jsonDomain);
// generate our protected header - just includes the algorithm
final String protectedHeader = "{\"alg\":\"" + privateKey.getAlgorithm() + "\"}";
final byte[] encodedHeader = encoder.encode(protectedHeader.getBytes(StandardCharsets.UTF_8));
// combine protectedheader . payload and sign the result
final byte[] signature = encoder.encode(Crypto.sign(
Bytes.concat(encodedHeader, PERIOD, encodedDomain), privateKey.getKey(), Crypto.SHA256));
// our header contains a single entry with the keyid
final Map<String, String> headerMap = new HashMap<>();
headerMap.put("keyid", privateKey.getId());
jwsDomain = new JWSDomain().setHeader(headerMap)
.setPayload(new String(encodedDomain))
.setProtectedHeader(new String(encodedHeader))
.setSignature(new String(signature));
} catch (Exception ex) {
LOG.error("Unable to generate signed athenz domain object", ex);
}
return jwsDomain;
}
List<Policy> getPolicyListWithoutAssertionId(List<Policy> policies) {
if (policies == null) {
return null;
}
// we are going to remove the assertion id from our assertions
// since the data is signed and the clients don't need to be
// updated due to this new attribute being returned
List<Policy> policyList = new ArrayList<>();
for (Policy policy : policies) {
Policy newPolicy = new Policy()
.setModified(policy.getModified())
.setName(policy.getName());
if (policy.getAssertions() != null) {
List<Assertion> assertions = new ArrayList<>();
for (Assertion assertion : policy.getAssertions()) {
Assertion newAssertion = new Assertion()
.setAction(assertion.getAction())
.setResource(assertion.getResource())
.setRole(assertion.getRole());
if (assertion.getEffect() != null) {
newAssertion.setEffect(assertion.getEffect());
} else {
newAssertion.setEffect(AssertionEffect.ALLOW);
}
assertions.add(newAssertion);
}
newPolicy.setAssertions(assertions);
}
policyList.add(newPolicy);
}
return policyList;
}
boolean isValidUserTokenRequest(Principal principal, String userName) {
if (principal == null) {
return false;
}
Authority authority = principal.getAuthority();
if (authority == null) {
return false;
}
// if authority allowed to carry out authorization checks there
// is no need to request user tokens
if (authority.allowAuthorization()) {
if (LOG.isDebugEnabled()) {
LOG.debug("User Token request - Authority cannot request user tokens");
}
return false;
}
String authDomain = authority.getDomain();
if (authDomain == null || !authDomain.equalsIgnoreCase(userDomain)) {
if (LOG.isDebugEnabled()) {
LOG.debug("User Token request - not authenticated by User Authority");
}
return false;
}
// if the username is not our pre-defined skip value we are going
// to verify that it matches to the principal's name
if (userName.equalsIgnoreCase(USER_TOKEN_DEFAULT_NAME)) {
return true;
}
if (!userName.equalsIgnoreCase(principal.getName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("User Token request - mismatch between request user name and userid");
}
return false;
}
return true;
}
@Override
public UserToken getUserToken(ResourceContext ctx, String userName, String authorizedServices,
Boolean header) {
final String caller = "getusertoken";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getusertoken_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
userName = userName.toLowerCase();
Principal principal = ((RsrcCtxWrapper) ctx).principal();
if (!isValidUserTokenRequest(principal, userName)) {
throw ZMSUtils.unauthorizedError("getUserToken: Invalid request - missing User credentials or userName mismatch", caller);
}
// if the user is requesting authorized services we need to verify that
// all the service names are valid
List<String> services = null;
if (authorizedServices != null && !authorizedServices.isEmpty()) {
services = Arrays.asList(authorizedServices.split(","));
for (String service : services) {
if (!serverAuthorizedServices.contains(service)) {
throw ZMSUtils.unauthorizedError("getUserToken: Service " + service + " is not authorized in ZMS", caller);
}
}
}
PrincipalToken token = new PrincipalToken.Builder("U1", userDomain, principal.getName())
.expirationWindow(userTokenTimeout).keyId(privateKey.getId()).host(serverHostName)
.ip(ServletRequestUtil.getRemoteAddress(ctx.request())).authorizedServices(services).build();
token.sign(privateKey.getKey());
UserToken userToken = new UserToken().setToken(token.getSignedToken());
if (header == Boolean.TRUE && principalAuthority != null) {
userToken.setHeader(principalAuthority.getHeader());
}
// set our standard CORS headers in our response if we're processing
// a get user token for an authorized service
if (services != null) {
setStandardCORSHeaders(ctx);
}
metric.stopTiming(timerMetric, null, principalDomain);
return userToken;
}
public UserToken optionsUserToken(ResourceContext ctx, String userName, String authorizedServices) {
final String caller = "optionsusertoken";
metric.increment(ZMSConsts.HTTP_OPTIONS);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("optionsusertoken_timing", null, principalDomain);
validateRequest(ctx.request(), caller);
// if the user must be requesting authorized service token
if (authorizedServices == null || authorizedServices.isEmpty()) {
throw ZMSUtils.requestError("optionsUserToken: No authorized services specified in the request", caller);
}
// verify that all specified services are valid
String[] services = authorizedServices.split(",");
for (String service : services) {
if (!serverAuthorizedServices.contains(service)) {
throw ZMSUtils.requestError("optionsUserToken: Service " + service + " is not authorized in ZMS", caller);
}
}
// set our standard CORS headers in our response
setStandardCORSHeaders(ctx);
// since this is the preflight request we are going to report that
// we only allow GET method and configure the user-agent to cache
// this request results for up-to 30 days
ctx.response().addHeader(ZMSConsts.HTTP_ACCESS_CONTROL_ALLOW_METHODS, ZMSConsts.HTTP_GET);
ctx.response().addHeader(ZMSConsts.HTTP_ACCESS_CONTROL_MAX_AGE, "2592000");
metric.stopTiming(timerMetric, null, principalDomain);
return null;
}
boolean isValidCORSOrigin(final String origin) {
// first check for non-empty origin value
if (origin == null || origin.isEmpty()) {
return false;
}
// check if we have whitelist configured
if (corsOriginList == null || corsOriginList.isEmpty()) {
return true;
}
return corsOriginList.contains(origin);
}
void setStandardCORSHeaders(ResourceContext ctx) {
// if we get an Origin header in our request then we're going to return
// the same value in the Allow-Origin header
String origin = ctx.request().getHeader(ZMSConsts.HTTP_ORIGIN);
if (isValidCORSOrigin(origin)) {
ctx.response().addHeader(ZMSConsts.HTTP_ACCESS_CONTROL_ALLOW_ORIGIN, origin);
}
// we must allow credentials to be passed by the client
ctx.response().addHeader(ZMSConsts.HTTP_ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
// if the client is asking us to allow any headers then we're going
// to return that set back as allowed
String allowHeaders = ctx.request().getHeader(ZMSConsts.HTTP_ACCESS_CONTROL_REQUEST_HEADERS);
if (allowHeaders != null && !allowHeaders.isEmpty()) {
ctx.response().addHeader(ZMSConsts.HTTP_ACCESS_CONTROL_ALLOW_HEADERS, allowHeaders);
}
}
String providerServiceDomain(String provider) {
int n = provider.lastIndexOf('.');
if (n <= 0 || n == provider.length() - 1) {
return null;
}
return provider.substring(0, n);
}
String providerServiceName(String provider) {
int n = provider.lastIndexOf('.');
if (n <= 0 || n == provider.length() - 1) {
return null;
}
return provider.substring(n + 1);
}
@Override
public void putTenancy(ResourceContext ctx, String tenantDomain, String provider,
String auditRef, Tenancy detail) {
final String caller = "puttenancy";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(provider, TYPE_SERVICE_NAME, caller); //the fully qualified service name to provision on
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
tenantDomain = tenantDomain.toLowerCase();
provider = provider.toLowerCase();
AthenzObject.TENANCY.convertToLowerCase(detail);
// validate our detail object against uri components
if (!validateTenancyObject(detail, tenantDomain, provider)) {
throw ZMSUtils.requestError("Invalid tenancy object", caller);
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, tenantDomain, principalDomain);
metric.increment(caller, tenantDomain, principalDomain);
Object timerMetric = metric.startTiming("puttenancy_timing", tenantDomain, principalDomain);
// verify that request is properly authenticated for this request
String authorizedService = ((RsrcCtxWrapper) ctx).principal().getAuthorizedService();
verifyAuthorizedServiceOperation(authorizedService, caller);
String provSvcDomain = providerServiceDomain(provider); // provider service domain
String provSvcName = providerServiceName(provider); // provider service name
// we can't have the provider and tenant be in the same domain
// as we don't allow delegation of roles onto themselves
if (provSvcDomain.equals(tenantDomain)) {
throw ZMSUtils.requestError("Provider and tenant domains cannot be the same", caller);
}
if (dbService.getServiceIdentity(provSvcDomain, provSvcName, true) == null) {
throw ZMSUtils.notFoundError("Unable to retrieve service=" + provider, caller);
}
// we are going to allow the authorize service token owner to call
// put tenancy on its own service
boolean authzServiceTokenOperation = isAuthorizedProviderService(authorizedService,
provSvcDomain, provSvcName);
if (authorizedService != null && !authzServiceTokenOperation) {
throw ZMSUtils.requestError("Authorized service provider mismatch: "
+ provider + "/" + authorizedService, caller);
}
// set up our tenant admin policy so provider can check admin's access
dbService.setupTenantAdminPolicy(tenantDomain, provSvcDomain,
provSvcName, auditRef, caller);
// if this is an authorized service token request then we're going to create
// the corresponding admin role in the provider domain since that's been
// authenticated already
if (authzServiceTokenOperation) {
setupTenantAdminPolicyInProvider(ctx, provSvcDomain, provSvcName, tenantDomain,
auditRef, caller);
}
metric.stopTiming(timerMetric, tenantDomain, principalDomain);
}
@Override
public void deleteTenancy(ResourceContext ctx, String tenantDomain, String provider, String auditRef) {
final String caller = "deletetenancy";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(provider, TYPE_SERVICE_NAME, caller); // fully qualified provider's service name
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
tenantDomain = tenantDomain.toLowerCase();
provider = provider.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, tenantDomain, principalDomain);
metric.increment(caller, tenantDomain, principalDomain);
Object timerMetric = metric.startTiming("deletetenancy_timing", tenantDomain, principalDomain);
// verify that request is properly authenticated for this request
String authorizedService = ((RsrcCtxWrapper) ctx).principal().getAuthorizedService();
verifyAuthorizedServiceOperation(authorizedService, caller);
// make sure we have a valid provider service
String provSvcDomain = providerServiceDomain(provider);
String provSvcName = providerServiceName(provider);
if (dbService.getServiceIdentity(provSvcDomain, provSvcName, true) == null) {
throw ZMSUtils.notFoundError("Unable to retrieve service: " + provider, caller);
}
// we are going to allow the authorize service token owner to call
// delete tenancy on its own service without configuring a controller
// end point
boolean authzServiceTokenOperation = isAuthorizedProviderService(authorizedService,
provSvcDomain, provSvcName);
if (authzServiceTokenOperation) {
dbService.executeDeleteTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain, null,
auditRef, caller);
}
// now clean-up local domain roles and policies for this tenant
dbService.executeDeleteTenancy(ctx, tenantDomain, provSvcDomain, provSvcName,
null, auditRef, caller);
metric.stopTiming(timerMetric, tenantDomain, principalDomain);
}
@Override
public void putTenant(ResourceContext ctx, String providerDomain, String providerService,
String tenantDomain, String auditRef, Tenancy detail) {
final String caller = "puttenant";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(providerDomain, TYPE_DOMAIN_NAME, caller);
validate(providerService, TYPE_SIMPLE_NAME, caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
providerDomain = providerDomain.toLowerCase();
providerService = providerService.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
AthenzObject.TENANCY.convertToLowerCase(detail);
// we can't have the provider and tenant be in the same domain
// as we don't allow delegation of roles onto themselves
if (providerDomain.equals(tenantDomain)) {
throw ZMSUtils.requestError("Provider and tenant domains cannot be the same", caller);
}
// validate our detail object against uri components
if (!validateTenancyObject(detail, tenantDomain, providerDomain + "." + providerService)) {
throw ZMSUtils.requestError("Invalid tenancy object", caller);
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, providerDomain, principalDomain);
metric.increment(caller, providerDomain, principalDomain);
Object timerMetric = metric.startTiming("puttenant_timing", providerDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (dbService.getServiceIdentity(providerDomain, providerService, true) == null) {
throw ZMSUtils.notFoundError("Unable to retrieve service=" + providerService, caller);
}
setupTenantAdminPolicyInProvider(ctx, providerDomain, providerService, tenantDomain,
auditRef, caller);
metric.stopTiming(timerMetric, providerDomain, principalDomain);
}
@Override
public void deleteTenant(ResourceContext ctx, String providerDomain, String providerService,
String tenantDomain, String auditRef) {
final String caller = "deletetenant";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(providerDomain, TYPE_DOMAIN_NAME, caller);
validate(providerService, TYPE_SIMPLE_NAME, caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
providerDomain = providerDomain.toLowerCase();
providerService = providerService.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, providerDomain, principalDomain);
metric.increment(caller, providerDomain, principalDomain);
Object timerMetric = metric.startTiming("deletetenant_timing", providerDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (dbService.getServiceIdentity(providerDomain, providerService, true) == null) {
throw ZMSUtils.notFoundError("Unable to retrieve service=" + providerService, caller);
}
dbService.executeDeleteTenantRoles(ctx, providerDomain, providerService, tenantDomain,
null, auditRef, caller);
metric.stopTiming(timerMetric, providerDomain, principalDomain);
}
boolean validateTenancyObject(Tenancy tenant, final String tenantDomain, final String providerService) {
if (!tenant.getDomain().equals(tenantDomain)) {
return false;
}
return tenant.getService().equals(providerService);
}
boolean validateTenantResourceGroupRolesObject(TenantResourceGroupRoles roles, final String providerDomain,
final String providerService, final String tenantDomain, final String resourceGroup) {
if (!providerDomain.equals(roles.getDomain())) {
return false;
}
if (!providerService.equals(roles.getService())) {
return false;
}
if (!tenantDomain.equals(roles.getTenant())) {
return false;
}
if (!resourceGroup.equals(roles.getResourceGroup())) {
return false;
}
// we must have at least one role in the object
List<TenantRoleAction> list = roles.getRoles();
return (list != null && list.size() > 0);
}
boolean validateProviderResourceGroupRolesObject(ProviderResourceGroupRoles roles, final String providerDomain,
final String providerService, final String tenantDomain, final String resourceGroup) {
if (!providerDomain.equals(roles.getDomain())) {
return false;
}
if (!providerService.equals(roles.getService())) {
return false;
}
if (!tenantDomain.equals(roles.getTenant())) {
return false;
}
if (!resourceGroup.equals(roles.getResourceGroup())) {
return false;
}
// we must have at least one role in the object
List<TenantRoleAction> list = roles.getRoles();
return (list != null && list.size() > 0);
}
// put the trust roles into provider domain
//
@Override
public TenantResourceGroupRoles putTenantResourceGroupRoles(ResourceContext ctx, String provSvcDomain,
String provSvcName, String tenantDomain, String resourceGroup, String auditRef,
TenantResourceGroupRoles detail) {
final String caller = "puttenantresourcegrouproles";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller); //not including the domain, this is the domain's service
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(detail, TYPE_TENANT_RESOURCE_GROUP_ROLES, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
AthenzObject.TENANT_RESOURCE_GROUP_ROLES.convertToLowerCase(detail);
// we can't have the provider and tenant be in the same domain
// as we don't allow delegation of roles onto themselves
if (provSvcDomain.equals(tenantDomain)) {
throw ZMSUtils.requestError("Provider and tenant domains cannot be the same", caller);
}
// validate our detail object against uri components
if (!validateTenantResourceGroupRolesObject(detail, provSvcDomain, provSvcName, tenantDomain,
resourceGroup)) {
throw ZMSUtils.requestError("Invalid tenant resource group role object", caller);
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("puttenantresourcegrouproles_timing", provSvcDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (LOG.isInfoEnabled()) {
LOG.info("putTenantResourceGroupRoles: ==== putTenantRoles(domain=" + provSvcDomain + ", service=" +
provSvcName + ", tenant-domain=" + tenantDomain + ", resource-group=" + resourceGroup +
", detail=" + detail + ")");
}
// first setup the domain as a tenant in the provider domain
setupTenantAdminPolicyInProvider(ctx, provSvcDomain, provSvcName, tenantDomain,
auditRef, caller);
// then setup the requested resource group roles
dbService.executePutTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain,
resourceGroup, detail.getRoles(), auditRef, caller);
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
return detail;
}
public DomainDataCheck getDomainDataCheck(ResourceContext ctx, String domainName) {
final String caller = "getdomaindatacheck";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
domainName = domainName.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("getdomaindatacheck_timing", domainName, principalDomain);
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: domain=" + domainName);
}
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("getDomainDataCheck: Domain not found: '" + domainName + "'", caller);
}
// build set of roles
// iterate them to look for trust roles - in case this is a provider domain
Set<String> roleSet = new HashSet<>();
Set<String> trustRoleSet = new HashSet<>();
// map per trust/tenant domain that contains the trust roles
Map<String, Set<String>> trustRoleMap = new HashMap<>();
for (Role role : domain.getRoles()) {
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: processing role - " + role.getName());
}
roleSet.add(role.getName());
String roleName = ZMSUtils.removeDomainPrefix(role.getName(), domainName, ROLE_PREFIX);
String trustDomain = role.getTrust();
if (trustDomain != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("trust role for domain: " + trustDomain);
}
trustRoleSet.add(trustDomain);
Set<String> tset = trustRoleMap.computeIfAbsent(trustDomain, k -> new HashSet<>());
tset.add(roleName);
}
}
// look for dangling roles and policies
//
int assertionCount = 0;
int roleWildcardCount = 0;
Set<String> usedRoleSet = new HashSet<>(); // keep track of roles used by policies
Set<String> providerSet = new HashSet<>(); // keep track of providers from assume_role policies
// assume_role resources are placed into the set per provider service domain
Map<String, Set<String>> svcRoleMap = new HashMap<>();
List<DanglingPolicy> danglingPolicies = new ArrayList<>();
List<Policy> policies = domain.getPolicies();
for (Policy policy : policies) {
String pname = ZMSUtils.removeDomainPrefix(policy.getName(), domainName, POLICY_PREFIX);
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: processing policy=" + pname + " in domain=" + domainName);
}
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
for (Assertion assertion : assertions) {
assertionCount++;
if (ZMSConsts.ACTION_ASSUME_ROLE.equalsIgnoreCase(assertion.getAction())) {
// get provider domain+service name and add to set of providers
// Note there may be a resource appended - to be dealt with later
// ex: testgetdomaindatacheck:policy.tenancy.testgetdomaindatacheckprovider.storage.reader
// ex: testgetdomaindatacheck:policy.tenancy.testgetdomaindatacheckprovider.sub.storage.res_group.ravers.reader
// index after "tenancy." and index of last dot
int index = pname.indexOf("tenancy.");
if (index == -1) {
continue;
}
int lindex = pname.lastIndexOf('.');
if (lindex == -1) {
continue;
}
String provSvcDomain = pname.substring(index + "tenancy.".length(), lindex);
providerSet.add(provSvcDomain);
// lets collect the resource field that is name of role in provider
// ex: testgetdomaindatacheckprovider.sub:role.storage.tenant.testgetdomaindatacheck.reader
// ex: testgetdomaindatacheckprovider.sub:role.storage.tenant.testgetdomaindatacheck.res_group.ravers.reader
String rsrc = assertion.getResource();
Set<String> rset = svcRoleMap.computeIfAbsent(provSvcDomain, k -> new HashSet<>());
rset.add(rsrc);
}
String roleName = assertion.getRole();
// check for wildcard role
if (roleName.lastIndexOf('*') != -1) {
roleWildcardCount++;
// make sure there is at least 1 role that can match
// this wildcard - else its a dangling policy
String rolePattern = StringUtils.patternFromGlob(roleName);
boolean wildCardMatch = false;
for (String role: roleSet) {
if (role.matches(rolePattern)) {
wildCardMatch = true;
break;
}
}
if (!wildCardMatch) { // dangling policy
DanglingPolicy dp = new DanglingPolicy();
// we need to remove the domain:role. and domain:policy prefixes
// according to RDL definitions for role and policy names
dp.setRoleName(ZMSUtils.removeDomainPrefix(roleName, domainName, ROLE_PREFIX));
dp.setPolicyName(ZMSUtils.removeDomainPrefix(pname, domainName, POLICY_PREFIX));
danglingPolicies.add(dp);
}
} else if (roleSet.contains(roleName)) {
usedRoleSet.add(roleName);
} else { // dangling policy
DanglingPolicy dp = new DanglingPolicy();
// we need to remove the domain:role. and domain:policy prefixes
// according to RDL definitions for role and policy names
dp.setRoleName(ZMSUtils.removeDomainPrefix(roleName, domainName, ROLE_PREFIX));
dp.setPolicyName(ZMSUtils.removeDomainPrefix(pname, domainName, POLICY_PREFIX));
danglingPolicies.add(dp);
}
}
}
DomainDataCheck ddc = new DomainDataCheck();
ddc.setPolicyCount(policies.size());
ddc.setAssertionCount(assertionCount);
ddc.setRoleWildCardCount(roleWildcardCount);
if (!danglingPolicies.isEmpty()) {
ddc.setDanglingPolicies(danglingPolicies);
}
if (roleSet.size() != usedRoleSet.size()) {
// oh oh, some roles are unused - need to subtract the usedRoleSet
// from roleSet - the leftovers are the unused roles
roleSet.removeAll(usedRoleSet);
// we need to remove the domain:role. prefix according to
// RDL definition for dangling role names
List<String> danglingRoleList = new ArrayList<>();
for (String roleName : roleSet) {
danglingRoleList.add(ZMSUtils.removeDomainPrefix(roleName, domainName, ROLE_PREFIX));
}
ddc.setDanglingRoles(danglingRoleList);
}
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: domain=" + domainName +
" policy-count=" + policies.size() + " assertion-count=" +
assertionCount + " wildcard-count==" + roleWildcardCount +
" dangling-policies=" + danglingPolicies.size() +
" dangling-roles=" + roleSet.size());
}
// Tenant Domain Check: does each provider fully support this tenant?
// collect Service names (domain.service) for domains that don't contain
// trust role
List<String> provsWithoutTrust = new ArrayList<>();
for (String provSvc : providerSet) {
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: domain=" + domainName +
" provider-service=" + provSvc);
}
// 2 cases to resolve, one with resource group, one without
// ex: iaas.stuff.storage.read
// ex: iaas.stuff.storage.res_group.my_resource_group.read
int idx = provSvc.indexOf(".res_group.");
String provSvcDomain;
if (idx == -1) {
provSvcDomain = providerServiceDomain(provSvc);
} else {
provSvcDomain = providerServiceDomain(provSvc.substring(0, idx));
}
AthenzDomain providerDomain = getAthenzDomain(provSvcDomain, true);
Set<String> rset = svcRoleMap.get(provSvc);
if (rset == null || rset.isEmpty() || providerDomain == null) {
provsWithoutTrust.add(provSvc);
continue;
}
// find trust role in the provider that contains the tenant domain
int foundTrust = 0;
for (Role role : providerDomain.getRoles()) {
String trustDomain = role.getTrust();
if (trustDomain != null) {
if (domainName.equals(trustDomain)) {
// is this role a match for an assume role in the tenant
// look for the role in the role set for this service
if (rset.contains(role.getName())) {
foundTrust++;
}
}
}
}
if (foundTrust != rset.size()) {
provsWithoutTrust.add(provSvc);
}
}
if (!provsWithoutTrust.isEmpty()) {
ddc.setProvidersWithoutTrust(provsWithoutTrust);
}
// Provider Domain Check: does each tenant have all the assume_role
// assertions to match each trust role.
// tenantsWithoutProv: names of Tenant domains that don't contain assume
// role assertions if this is a provider domain
List<String> tenantsWithoutProv = new ArrayList<>();
// tenantDomMap: optimize reading tenant domains once already read
// This is optimizing for Providers with lots of tenants.
Map<String, AthenzDomain> tenantDomMap = new HashMap<>();
for (String trustRole: trustRoleSet) {
if (LOG.isDebugEnabled()) {
LOG.debug("getDomainDataCheck: processing trust role: " + trustRole);
}
AthenzDomain tenantDomain = tenantDomMap.get(trustRole);
if (tenantDomain == null) {
tenantDomain = getAthenzDomain(trustRole, true);
if (tenantDomain == null) {
tenantsWithoutProv.add(trustRole);
continue;
} else {
tenantDomMap.put(trustRole, tenantDomain);
}
}
// Get set of providers trust roles for trust/tenant domain.
Set<String> tset = trustRoleMap.get(trustRole);
if (tset == null || tset.isEmpty()) {
tenantsWithoutProv.add(trustRole);
continue;
}
int foundProviderCnt = 0;
// Check for assume_role containing the provider in the tenantDomain
for (Policy policy : tenantDomain.getPolicies()) {
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
for (Assertion assertion : assertions) {
if (ZMSConsts.ACTION_ASSUME_ROLE.equalsIgnoreCase(assertion.getAction())) {
String rsrc = assertion.getResource();
// If the provider domain contains a role that matches
// the tenant domain resource - then the tenant is supported
if (roleSet.contains(rsrc)) {
// HAVE: an assume_role with resource pointing at the provider
foundProviderCnt++;
}
}
}
}
if (foundProviderCnt < tset.size()) {
// didn't find all required matching provider trust-role to assume_role-resource pairs
tenantsWithoutProv.add(trustRole);
}
}
if (!tenantsWithoutProv.isEmpty()) {
ddc.setTenantsWithoutAssumeRole(tenantsWithoutProv);
}
metric.stopTiming(timerMetric, domainName, principalDomain);
return ddc;
}
public void deleteProviderResourceGroupRoles(ResourceContext ctx, String tenantDomain,
String provSvcDomain, String provSvcName, String resourceGroup, String auditRef) {
final String caller = "deleteproviderresourcegrouproles";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("deleteproviderresourcegrouproles_timing", provSvcDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// first clean-up local domain roles and policies for this tenant
dbService.executeDeleteTenancy(ctx, tenantDomain, provSvcDomain, provSvcName,
resourceGroup, auditRef, caller);
// at this point the tenant side is complete. If the token was a chained
// token signed by the provider service then we're going to process the
// provider side as well thus complete the tenancy delete process
String authorizedService = ((RsrcCtxWrapper) ctx).principal().getAuthorizedService();
if (isAuthorizedProviderService(authorizedService, provSvcDomain, provSvcName)) {
dbService.executeDeleteTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain,
resourceGroup, auditRef, caller);
}
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
}
public ProviderResourceGroupRoles getProviderResourceGroupRoles(ResourceContext ctx, String tenantDomain,
String provSvcDomain, String provSvcName, String resourceGroup) {
final String caller = "getproviderresourcegrouproles";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller);
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("getproviderresourcegrouproles_timing", provSvcDomain, principalDomain);
if (dbService.getDomain(tenantDomain, false) == null) {
throw ZMSUtils.notFoundError("No such domain: " + tenantDomain, caller);
}
// look for this provider roles, ex: storage.tenant.sports.reader
String rolePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain, provSvcName, resourceGroup);
ProviderResourceGroupRoles provRoles = new ProviderResourceGroupRoles().setDomain(provSvcDomain)
.setService(provSvcName).setTenant(tenantDomain).setResourceGroup(resourceGroup);
List<TenantRoleAction> tralist = new ArrayList<>();
// find roles matching the prefix
List<String> rcollection = dbService.listRoles(tenantDomain);
for (String rname: rcollection) {
if (dbService.isTenantRolePrefixMatch(rname, rolePrefix, resourceGroup, null)) {
// for provider roles we don't have the action, that's
// for the provider domain only so we're just going
// to return the list of roles without any actions
// for the role name we must return the SimpleName
// part only so we'll remove the prefix section
TenantRoleAction tra = new TenantRoleAction()
.setRole(rname.substring(rolePrefix.length()))
.setAction("n/a");
tralist.add(tra);
}
}
provRoles.setRoles(tralist);
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
return provRoles;
}
boolean isAuthorizedProviderService(String authorizedService, String provSvcDomain,
String provSvcName) {
// make sure we have a service provided and it matches to our provider
if (authorizedService == null) {
return false;
}
if (!authorizedService.equals(provSvcDomain + "." + provSvcName)) {
return false;
}
// verify that provider service does indeed have access to provision
// its own tenants. the authorize statement for the putTenantRole
// command is defined in the RDL as:
// authorize ("UPDATE", "{domain}:tenant.{service}");
AthenzDomain domain = getAthenzDomain(provSvcDomain, true);
if (domain == null) {
return false;
}
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
String resource = provSvcDomain + ":tenant." + provSvcName;
AccessStatus accessStatus = evaluateAccess(domain, authorizedService, "update",
resource, null, null);
return accessStatus == AccessStatus.ALLOWED;
}
/**
* This sets up the assume roles in the tenant. If the tenants admin user
* token has been authorized by the provider, the providers domain will be
* updated as well, thus completing the tenancy on-boarding in a single step.
**/
@Override
public ProviderResourceGroupRoles putProviderResourceGroupRoles(ResourceContext ctx, String tenantDomain,
String provSvcDomain, String provSvcName, String resourceGroup, String auditRef,
ProviderResourceGroupRoles detail) {
final String caller = "putproviderresourcegrouproles";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller); //not including the domain, this is the domain's service
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(detail, TYPE_PROVIDER_RESOURCE_GROUP_ROLES, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
AthenzObject.PROVIDER_RESOURCE_GROUP_ROLES.convertToLowerCase(detail);
// we can't have the provider and tenant be in the same domain
// as we don't allow delegation of roles onto themselves
if (provSvcDomain.equals(tenantDomain)) {
throw ZMSUtils.requestError("Provider and tenant domains cannot be the same", caller);
}
// validate our detail object against uri components
if (!validateProviderResourceGroupRolesObject(detail, provSvcDomain, provSvcName, tenantDomain,
resourceGroup)) {
throw ZMSUtils.requestError("Invalid provider resource group role object", caller);
}
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("putproviderresourcegrouproles_timing", provSvcDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
if (LOG.isInfoEnabled()) {
LOG.info("putProviderResourceGroupRoles: domain=" + provSvcDomain + ", service=" +
provSvcName + ", tenant-domain=" + tenantDomain + ", resource-group=" + resourceGroup +
", detail=" + detail);
}
// set up our tenant admin policy so provider can check admin's access
dbService.setupTenantAdminPolicy(tenantDomain, provSvcDomain, provSvcName, auditRef, caller);
// now we're going to setup our roles
List<TenantRoleAction> roleActions = detail.getRoles();
List<String> roles = new ArrayList<>();
for (TenantRoleAction roleAction : roleActions) {
roles.add(roleAction.getRole());
}
// we're going to create a separate role for each one of tenant roles returned
// based on its action and set the caller as a member in each role
dbService.executePutProviderRoles(ctx, tenantDomain, provSvcDomain, provSvcName, resourceGroup,
roles, auditRef, caller);
// at this point the tenant side is complete. If the token was a chained
// token signed by the provider service then we're going to process the
// provider side as well thus complete the tenancy on-boarding process
String authorizedService = ((RsrcCtxWrapper) ctx).principal().getAuthorizedService();
if (isAuthorizedProviderService(authorizedService, provSvcDomain, provSvcName)) {
// first we need to setup the admin roles in case this
// happens to be the first resource group
setupTenantAdminPolicyInProvider(ctx, provSvcDomain, provSvcName, tenantDomain,
auditRef, caller);
// now onboard the requested resource group
dbService.executePutTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain,
resourceGroup, roleActions, auditRef, caller);
}
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
return detail;
}
void setupTenantAdminPolicyInProvider(ResourceContext ctx, final String provSvcDomain,
final String provSvcName, final String tenantDomain, final String auditRef,
final String caller) {
List<TenantRoleAction> roles = new ArrayList<>();
TenantRoleAction roleAction = new TenantRoleAction().setAction("*").setRole(ADMIN_ROLE_NAME);
roles.add(roleAction);
dbService.executePutTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain, null,
roles, auditRef, caller);
}
String getProviderRoleAction(String provSvcDomain, String roleName) {
// if no match then we're going to default action of empty string
Policy policy = dbService.getPolicy(provSvcDomain, roleName); // policy has same name
if (policy == null) {
return "";
}
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
return "";
}
for (Assertion assertion : assertions) {
if (!assertion.getRole().endsWith(roleName)) {
continue;
}
return assertion.getAction();
}
return "";
}
public TenantResourceGroupRoles getTenantResourceGroupRoles(ResourceContext ctx, String provSvcDomain,
String provSvcName, String tenantDomain, String resourceGroup) {
final String caller = "gettenantresourcegrouproles";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller); // not including the domain, this is the domain's service type
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("gettenantresourcegrouproles_timing", provSvcDomain, principalDomain);
if (dbService.getDomain(provSvcDomain, false) == null) {
throw ZMSUtils.notFoundError("getTenantResourceGroupRoles: No such domain: " + provSvcDomain, caller);
}
// look for this tenants roles, ex: storage.tenant.sports.reader
String rolePrefix = ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup);
TenantResourceGroupRoles troles = new TenantResourceGroupRoles().setDomain(provSvcDomain)
.setService(provSvcName).setTenant(tenantDomain).setResourceGroup(resourceGroup);
List<TenantRoleAction> tralist = new ArrayList<>();
// find roles matching the prefix
List<String> rcollection = dbService.listRoles(provSvcDomain);
for (String rname: rcollection) {
if (dbService.isTrustRoleForTenant(provSvcDomain, rname, rolePrefix, resourceGroup, tenantDomain)) {
// good, its exactly what we are looking for, but
// now we want the ACTION that was set in the provider
String action = getProviderRoleAction(provSvcDomain, rname);
// for the role name we must return the SimpleName
// part only so we'll remove the prefix section
TenantRoleAction tra = new TenantRoleAction()
.setRole(rname.substring(rolePrefix.length()))
.setAction(action);
tralist.add(tra);
}
}
troles.setRoles(tralist);
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
return troles;
}
public void deleteTenantResourceGroupRoles(ResourceContext ctx, String provSvcDomain,
String provSvcName, String tenantDomain, String resourceGroup, String auditRef) {
final String caller = "deletetenantresourcegrouproles";
metric.increment(ZMSConsts.HTTP_DELETE);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(provSvcDomain, TYPE_DOMAIN_NAME, caller);
validate(provSvcName, TYPE_SIMPLE_NAME, caller); // not including the domain, this is the domain's service type
validate(tenantDomain, TYPE_DOMAIN_NAME, caller);
validate(resourceGroup, TYPE_COMPOUND_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
provSvcDomain = provSvcDomain.toLowerCase();
provSvcName = provSvcName.toLowerCase();
tenantDomain = tenantDomain.toLowerCase();
resourceGroup = resourceGroup.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, provSvcDomain, principalDomain);
metric.increment(caller, provSvcDomain, principalDomain);
Object timerMetric = metric.startTiming("deletetenantresourcegrouproles_timing", provSvcDomain, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
dbService.executeDeleteTenantRoles(ctx, provSvcDomain, provSvcName, tenantDomain,
resourceGroup, auditRef, caller);
metric.stopTiming(timerMetric, provSvcDomain, principalDomain);
}
String extractDomainName(String resource) {
int idx = resource.indexOf(':');
if (idx == -1) {
if (LOG.isDebugEnabled()) {
LOG.debug("extractDomainName: missing domain name: " + resource);
}
return null;
}
return resource.substring(0, idx);
}
void validateRequest(HttpServletRequest request, String caller) {
validateRequest(request, caller, false);
}
void validateRequest(HttpServletRequest request, String caller, boolean statusRequest) {
// first validate if we're required process this over TLS only
if (secureRequestsOnly && !request.isSecure()) {
throw ZMSUtils.requestError(caller + "request must be over TLS", caller);
}
// second check if this is a status port so we can only
// process on status requests
if (statusPort > 0 && statusPort != httpPort && statusPort != httpsPort) {
// non status requests must not take place on the status port
if (!statusRequest && request.getLocalPort() == statusPort) {
throw ZMSUtils.requestError("incorrect port number for a non-status request", caller);
}
// status requests must not take place on a non-status port
if (statusRequest && request.getLocalPort() != statusPort) {
throw ZMSUtils.requestError("incorrect port number for a status request", caller);
}
}
}
void validate(Object val, String type, String caller) {
if (val == null) {
throw ZMSUtils.requestError("Missing or malformed " + type, caller);
}
Result result = validator.validate(val, type);
if (!result.valid) {
throw ZMSUtils.requestError("Invalid " + type + " error: " + result.error, caller);
}
}
List<String> validatedAdminUsers(List<String> lst) {
final String caller = "validatedadminusers";
if (lst == null || lst.size() == 0) {
throw ZMSUtils.requestError("validatedAdminUsers: Missing adminUsers", caller);
}
Set<String> users = new HashSet<>();
for (String user : lst) {
validate(user, TYPE_RESOURCE_NAME, caller);
users.add(user);
}
return new ArrayList<>(users);
}
Domain createTopLevelDomain(ResourceContext ctx, Domain domain, List<String> adminUsers,
List<String> solutionTemplates, String auditRef) {
List<String> users = validatedAdminUsers(adminUsers);
return dbService.makeDomain(ctx, domain, users, solutionTemplates, auditRef);
}
Domain createSubDomain(ResourceContext ctx, Domain domain, List<String> adminUsers,
List<String> solutionTemplates, String auditRef, String caller) {
// verify length of full sub domain name
if (domain.getName().length() > domainNameMaxLen) {
throw ZMSUtils.requestError("Invalid SubDomain name: " + domain.getName()
+ " : name length cannot exceed: " + domainNameMaxLen, caller);
}
List<String> users = validatedAdminUsers(adminUsers);
return dbService.makeDomain(ctx, domain, users, solutionTemplates, auditRef);
}
int countDots(String str) {
int count = 0;
int i = str.indexOf('.');
while (i >= 0) {
count++;
i = str.indexOf('.', i + 1);
}
return count;
}
boolean hasExceededDepthLimit(Integer depth, String name) {
if (depth == null) {
return false;
}
// depth=0 means only top level
return countDots(name) > depth;
}
DomainList listDomains(Integer limit, String skip, String prefix, Integer depth, long modTime) {
//note: we don't use the store's options, because we also need to filter on depth
List<String> allDomains = dbService.listDomains(prefix, modTime);
List<String> names = new ArrayList<>();
for (String name : allDomains) {
if (hasExceededDepthLimit(depth, name)) {
continue;
}
names.add(name);
}
int count = names.size();
if (skip != null) {
for (int i = 0; i < count; i++) {
String name = names.get(i);
if (skip.equals(name)) {
names = names.subList(i + 1, count);
count = names.size();
break;
}
}
}
DomainList result = new DomainList();
// if we have exceeded our requested list then
// set the next skip entry in our result
if (hasExceededListLimit(limit, count)) {
names = names.subList(0, limit);
result.setNext(names.get(limit - 1));
}
result.setNames(names);
return result;
}
boolean isZMSService(String domain, String service) {
return (SYS_AUTH.equalsIgnoreCase(domain) && ZMSConsts.ZMS_SERVICE.equalsIgnoreCase(service));
}
/**
* implements KeyStore getPublicKey
* @return String with PEM encoded key, which should be ybase64decoded prior
* to return if ybase64encoded
**/
@Override
public String getPublicKey(String domain, String service, String keyId) {
if (LOG.isDebugEnabled()) {
LOG.debug("getPublicKey: service=" + domain + "." + service + " key-id=" + keyId);
}
if (service == null || keyId == null) {
return null;
}
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domain = domain.toLowerCase();
service = service.toLowerCase();
keyId = keyId.toLowerCase();
// special handling for service sys.auth.zms which is ourselves
// so we'll just lookup our key in our map
String pubKey = null;
if (isZMSService(domain, service)) {
pubKey = serverPublicKeyMap.get(keyId);
}
// if it's not the ZMS Server public key then lookup the
// public key from ZMS data
if (pubKey == null) {
try {
PublicKeyEntry keyEntry = dbService.getServicePublicKeyEntry(domain, service, keyId, true);
if (keyEntry != null) {
pubKey = keyEntry.getKey();
}
} catch (ResourceException ex) {
if (LOG.isDebugEnabled()) {
LOG.debug("getPublicKey: unable to get public key: " + ex.getMessage());
}
return null;
}
}
if (pubKey == null) {
if (LOG.isWarnEnabled()) {
LOG.warn("getPublicKey: service=" + domain + "." + service + " has no public key registered");
}
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("getPublicKey: service public key: " + pubKey);
}
return Crypto.ybase64DecodeString(pubKey);
}
@Override
public void putDefaultAdmins(ResourceContext ctx, String domainName, String auditRef,
DefaultAdmins defaultAdmins) {
final String caller = "putdefaultadmins";
metric.increment(ZMSConsts.HTTP_PUT);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
logPrincipal(ctx);
if (LOG.isDebugEnabled()) {
LOG.debug("putDefaultAdmins: domain = " + domainName);
}
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("putdefaultadmins_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
AthenzObject.DEFAULT_ADMINS.convertToLowerCase(defaultAdmins);
defaultAdmins.setAdmins(normalizedAdminUsers(defaultAdmins.getAdmins()));
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("putDefaultAdmins: Domain not found: '" + domainName + "'", caller);
}
Role adminRole = null;
for (Role role : domain.getRoles()) {
if (ADMIN_ROLE_NAME.equals(ZMSUtils.removeDomainPrefix(role.getName(), domainName, ROLE_PREFIX))) {
adminRole = role;
break;
}
}
if (adminRole == null) {
// if the admin role does not exist in the role section then add it
// this typically should never happen since we have added the
// check to disallow deletion of the admin role but we'll keep
// the logic in place
if (LOG.isInfoEnabled()) {
LOG.info("putDefaultAdmins: Adding domain admin role because no domain admin role was found for domain: " + domainName);
}
adminRole = ZMSUtils.makeAdminRole(domainName, new ArrayList<>());
dbService.executePutRole(ctx, domainName, ADMIN_ROLE_NAME, adminRole, auditRef, caller);
}
Policy adminPolicy = null;
for (Policy policy : domain.getPolicies()) {
if (ADMIN_POLICY_NAME.equals(ZMSUtils.removeDomainPrefix(policy.getName(), domainName, POLICY_PREFIX))) {
adminPolicy = policy;
break;
}
}
if (adminPolicy == null) {
// if the admin policy does not exist in the policy section then add it
// this typically should never happen since we have added the
// check to disallow deletion of the admin policy but we'll keep
// the logic in place
if (LOG.isInfoEnabled()) {
LOG.info("putDefaultAdmins: Adding domain admin policy because no domain admin policy was found for domain: " + domainName);
}
//Create and add the admin policy
adminPolicy = ZMSUtils.makeAdminPolicy(domainName, adminRole);
dbService.executePutPolicy(ctx, domainName, ADMIN_POLICY_NAME, adminPolicy, auditRef, caller);
}
addDefaultAdminAssertion(ctx, domainName, adminPolicy, auditRef, caller);
removeAdminDenyAssertions(ctx, domainName, domain.getPolicies(), domain.getRoles(), adminRole,
defaultAdmins, auditRef);
addDefaultAdminMembers(ctx, domainName, adminRole, defaultAdmins, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void addDefaultAdminAssertion(ResourceContext ctx, String domainName, Policy adminPolicy,
String auditRef, String caller) {
if (LOG.isDebugEnabled()) {
LOG.debug("addDefaultAdminAssertion");
}
String domainAllResources = domainName + ":*";
String domainAdminRole = ZMSUtils.roleResourceName(domainName, ADMIN_ROLE_NAME);
boolean invalidAssertions = false;
List<Assertion> assertions = adminPolicy.getAssertions();
if (assertions != null) {
for (Assertion assertion : assertions) {
String resource = assertion.getResource();
if (resource == null) {
invalidAssertions = true;
continue;
}
String action = assertion.getAction();
if (action == null) {
invalidAssertions = true;
continue;
}
String role = assertion.getRole();
if (role == null) {
invalidAssertions = true;
continue;
}
// default effect is no value is ALLOW
AssertionEffect effect = assertion.getEffect();
if (effect == null) {
effect = AssertionEffect.ALLOW;
}
if (resource.equals(domainAllResources) && action.equals("*") &&
role.equals(domainAdminRole) && (effect == AssertionEffect.ALLOW)) {
// found an assertion for resource = <domain>:*, with action = "*",
// for role = <domainName>:role.admin and effect = "ALLOW"
// (if effect is null then defaults to ALLOW) so no need to add it
return;
}
}
}
if (LOG.isInfoEnabled()) {
LOG.info("Adding default admin assertion to admin policy because no default admin assertion was found for admin policy for domain: " + domainName);
}
// if we had invalid assertions then we're going to
// reset the assertion list otherwise we can't update
if (invalidAssertions) {
adminPolicy.setAssertions(new ArrayList<>());
}
ZMSUtils.addAssertion(adminPolicy, domainAllResources, "*", domainAdminRole, AssertionEffect.ALLOW);
dbService.executePutPolicy(ctx, domainName, ADMIN_POLICY_NAME, adminPolicy, auditRef, caller);
}
void removeAdminDenyAssertions(ResourceContext ctx, final String domainName, List<Policy> policies,
List<Role> roles, Role adminRole, DefaultAdmins defaultAdmins, final String auditRef) {
final String caller = "putdefaultadmins";
if (LOG.isDebugEnabled()) {
LOG.debug("removeAdminDenyAssertions");
}
for (Policy policy : policies) {
if (LOG.isDebugEnabled()) {
LOG.debug("access: processing policy: " + policy.getName());
}
// Process all the assertions defined in this policy
// As soon as match for an assertion that
// denies access to the admin role is detected, remove it
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
List<Assertion> assertionsToDelete = new ArrayList<>();
for (Assertion assertion : assertions) {
// If there is no "effect" in the assertion then default is ALLOW
// so continue because logic is looking for DENY
AssertionEffect effect = assertion.getEffect();
if (effect != AssertionEffect.DENY) {
continue;
}
// If there is no role in the assertion then admin is not being denied
String assertionRole = assertion.getRole();
if (assertionRole == null) {
continue;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Found DENY assertion for role " + assertionRole);
}
// role matches admin role then remove it
if (assertionRole.equals(adminRole.getName())) {
assertionsToDelete.add(assertion);
} else {
removeAdminMembers(ctx, domainName, roles, assertionRole, defaultAdmins, auditRef, caller);
}
}
if (assertionsToDelete.isEmpty()) {
continue;
}
if (LOG.isInfoEnabled()) {
LOG.info("Removing assertion from policy: " + policy.getName() + " because it was for the domain admin role.");
}
for (Assertion assertion : assertionsToDelete) {
assertions.remove(assertion);
}
String policyName = ZMSUtils.removeDomainPrefix(policy.getName(), domainName, POLICY_PREFIX);
if (assertions.size() == 0) {
if (LOG.isInfoEnabled()) {
LOG.info("Removing policy: " + policyName +
" because it did not have any assertions after removing a DENY" +
" assertion for the domain admin role.");
}
dbService.executeDeletePolicy(ctx, domainName, policyName, auditRef, caller);
} else {
dbService.executePutPolicy(ctx, domainName, policyName, policy, auditRef, caller);
}
}
}
void removeAdminMembers(ResourceContext ctx, String domainName, List<Role> roles,
String assertionRole, DefaultAdmins defaultAdmins, String auditRef, String caller) {
for (Role role : roles) {
if (LOG.isDebugEnabled()) {
LOG.debug("removeAdminMembers: Removing admin members from role: " + role.getName());
}
if (!assertionRole.equals(role.getName())) {
continue;
}
String roleName = ZMSUtils.removeDomainPrefix(role.getName(), domainName, ROLE_PREFIX);
for (String adminName : defaultAdmins.getAdmins()) {
if (isMemberOfRole(role, adminName)) {
if (LOG.isInfoEnabled()) {
LOG.info("removeAdminMembers: removing member: " + adminName + " from role: " +
roleName + " because there is a DENY assertion for this role in this domain.");
}
dbService.executeDeleteMembership(ctx, domainName, roleName, adminName, auditRef, caller);
}
}
}
}
void addDefaultAdminMembers(ResourceContext ctx, String domainName, Role adminRole,
DefaultAdmins defaultAdmins, String auditRef, String caller) {
if (LOG.isDebugEnabled()) {
LOG.debug("addDefaultAdminMembers");
}
for (String adminName : defaultAdmins.getAdmins()) {
if (!isMemberOfRole(adminRole, adminName)) {
if (LOG.isInfoEnabled()) {
LOG.info("Adding member: " + adminName + " to admin role for domain: " + domainName);
}
RoleMember roleMember = new RoleMember().setMemberName(adminName);
dbService.executePutMembership(ctx, domainName, ADMIN_ROLE_NAME,
roleMember, auditRef, caller);
}
}
}
public ServicePrincipal getServicePrincipal(ResourceContext ctx) {
final String caller = "getserviceprincipal";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
Principal principal = ((RsrcCtxWrapper) ctx).principal();
final String principalDomain = principal.getDomain();
Authority authority = principal.getAuthority();
metric.increment(ZMSConsts.HTTP_REQUEST, principalDomain, principalDomain);
metric.increment(caller, principalDomain, principalDomain);
Object timerMetric = metric.startTiming("getserviceprincipal_timing", principalDomain, principalDomain);
// If the authority does not support authorization then we're going to
// generate a new ServiceToken signed by ZMS and send that back.
ServicePrincipal servicePrincipal = new ServicePrincipal();
servicePrincipal.setDomain(principal.getDomain());
servicePrincipal.setService(principal.getName());
if (!authority.allowAuthorization()) {
PrincipalToken sdToken = new PrincipalToken(principal.getCredentials());
PrincipalToken zmsToken = new PrincipalToken.Builder("S1", sdToken.getDomain(), sdToken.getName())
.issueTime(sdToken.getTimestamp())
.expirationWindow(sdToken.getExpiryTime() - sdToken.getTimestamp())
.ip(sdToken.getIP()).keyId(privateKey.getId()).host(serverHostName)
.keyService(ZMSConsts.ZMS_SERVICE).build();
zmsToken.sign(privateKey.getKey());
servicePrincipal.setToken(zmsToken.getSignedToken());
} else {
servicePrincipal.setToken(principal.getCredentials());
}
metric.stopTiming(timerMetric, principalDomain, principalDomain);
return servicePrincipal;
}
ArrayList<AllowedOperation> getAuthorizedServiceOperations(final String authorizedService, final String operationName) {
// lookup the authorized services struct and see if we have the
// service specified in the allowed list
AuthorizedService authzService = serverAuthorizedServices.get(authorizedService);
if (authzService == null) {
throw ZMSUtils.forbiddenError("Unauthorized Service " + authorizedService,
operationName);
}
// if the list is empty then we do not allow any operations
ArrayList<AllowedOperation> ops = authzService.getAllowedOperations();
if (ops == null || ops.isEmpty()) {
throw ZMSUtils.forbiddenError("Unauthorized Operation (" + operationName
+ ") for Service " + authorizedService, operationName);
}
return ops;
}
void verifyAuthorizedServiceOperation(final String authorizedService, final String operationName) {
verifyAuthorizedServiceOperation(authorizedService, operationName, null, null);
}
void verifyAuthorizedServiceRoleOperation(final String authorizedService, final String operationName,
final String roleName) {
// only process this request if we have an authorized service specified
if (authorizedService == null) {
return;
}
// lookup the authorized services struct and see if we have the
// service specified in the allowed list
ArrayList<AllowedOperation> ops = getAuthorizedServiceOperations(authorizedService, operationName);
// otherwise make sure the operation is allowed for this service
boolean opAllowed = false;
for (AllowedOperation op : ops) {
if (!op.getName().equalsIgnoreCase(operationName)) {
continue;
}
opAllowed = op.isOperationAllowedOn("role", roleName, AllowedOperation.MatchType.EQUALS) ||
op.isOperationAllowedOn("role-prefix", roleName, AllowedOperation.MatchType.STARTS_WITH);
break;
}
if (!opAllowed) {
throw ZMSUtils.forbiddenError("Unauthorized Operation (" + operationName
+ ") for Service " + authorizedService
+ " on role " + roleName, operationName);
}
}
/**
* If opItemType and value are not defined in the authorized_services JSON file,
* you can simply pass NULL for these two values.
*/
void verifyAuthorizedServiceOperation(final String authorizedService, final String operationName,
final String opItemType, final String opItemVal) {
// only process this request if we have an authorized service specified
if (authorizedService == null) {
return;
}
// lookup the authorized services struct and see if we have the
// service specified in the allowed list
ArrayList<AllowedOperation> ops = getAuthorizedServiceOperations(authorizedService, operationName);
// otherwise make sure the operation is allowed for this service
boolean opAllowed = false;
for (AllowedOperation op : ops) {
if (!op.getName().equalsIgnoreCase(operationName)) {
continue;
}
opAllowed = op.isOperationAllowedOn(opItemType, opItemVal, AllowedOperation.MatchType.EQUALS);
break;
}
if (!opAllowed) {
throw ZMSUtils.forbiddenError("Unauthorized Operation (" + operationName
+ ") for Service " + authorizedService
+ (opItemType != null && !opItemType.isEmpty() ? " on opItemKey " + opItemType + " and opItemVal " + opItemVal : ""),
operationName);
}
}
@Override
public ResourceAccessList getResourceAccessList(ResourceContext ctx, String principal,
String action) {
final String caller = "getresourceaccesslist";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getresourceaccesslist_timing", null, principalDomain);
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
Principal ctxPrincipal = ((RsrcCtxWrapper) ctx).principal();
if (LOG.isDebugEnabled()) {
LOG.debug("getResourceAccessList:(" + ctxPrincipal + ", " + principal
+ ", " + action + ")");
}
if (principal != null) {
validate(principal, TYPE_ENTITY_NAME, caller);
principal = normalizeDomainAliasUser(principal.toLowerCase());
}
if (action != null) {
validate(action, TYPE_COMPOUND_NAME, caller);
action = action.toLowerCase();
}
// if principal is null then we it's a special case
// so we need to make sure the caller is authorized
// to make this request
if (principal == null || principal.isEmpty()) {
if (!isAllowedResourceLookForAllUsers(ctxPrincipal)) {
throw ZMSUtils.forbiddenError("Principal: " + ctxPrincipal.getFullName() +
" not authorized to lookup resources for all users in Athenz", caller);
}
}
ResourceAccessList rsrcAccessList = dbService.getResourceAccessList(principal, action);
metric.stopTiming(timerMetric, null, principalDomain);
return rsrcAccessList;
}
@Override
public Status getStatus(ResourceContext ctx) {
final String caller = "getstatus";
metric.increment(ZMSConsts.HTTP_GET);
logPrincipal(ctx);
// validate our request as status request
validateRequest(ctx.request(), caller, true);
// create our timer object
metric.increment(caller);
final String principalDomain = getPrincipalDomain(ctx);
Object timerMetric = metric.startTiming("getstatus_timing", null, principalDomain);
// for now we're going to verify our database connectivity
// in case of failure we're going to return not found
DomainList dlist = listDomains(null, null, null, null, 0);
if (dlist.getNames() == null || dlist.getNames().isEmpty()) {
throw ZMSUtils.notFoundError("Error - no domains available", caller);
}
// check if we're configured to check for the status file
if (healthCheckFile != null && !healthCheckFile.exists()) {
throw ZMSUtils.notFoundError("Error - no status available", caller);
}
metric.stopTiming(timerMetric, null, principalDomain);
return successServerStatus;
}
String getPrincipalDomain(ResourceContext ctx) {
final Principal ctxPrincipal = ((RsrcCtxWrapper) ctx).principal();
return ctxPrincipal == null ? null : ctxPrincipal.getDomain();
}
void logPrincipal(ResourceContext ctx) {
// we are going to log our principal and validate that it
// contains expected data
final Principal ctxPrincipal = ((RsrcCtxWrapper) ctx).principal();
((RsrcCtxWrapper) ctx).logPrincipal(ctxPrincipal);
if (ctxPrincipal != null && ctxPrincipal.getFullName() != null) {
validate(ctxPrincipal.getFullName(), TYPE_SERVICE_NAME, "logPrincipal");
}
}
public ResourceContext newResourceContext(HttpServletRequest request,
HttpServletResponse response) {
// check to see if we want to allow this URI to be available
// with optional authentication support
boolean optionalAuth = StringUtils.requestUriMatch(request.getRequestURI(),
authFreeUriSet, authFreeUriList);
return new RsrcCtxWrapper(request, response, authorities, optionalAuth, this);
}
@Override
public Schema getRdlSchema(ResourceContext context) {
return schema;
}
static String getServerHostName() {
String serverHostName = System.getProperty(ZMSConsts.ZMS_PROP_HOSTNAME);
if (serverHostName == null || serverHostName.isEmpty()) {
try {
InetAddress localhost = java.net.InetAddress.getLocalHost();
serverHostName = localhost.getCanonicalHostName();
} catch (java.net.UnknownHostException e) {
LOG.info("Unable to determine local hostname: " + e.getMessage());
serverHostName = "localhost";
}
}
return serverHostName;
}
Authority getAuthority(String className) {
LOG.debug("Loading authority {}...", className);
Authority authority;
try {
authority = (Authority) Class.forName(className).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
LOG.error("Invalid Authority class: " + className + " error: " + e.getMessage());
return null;
}
return authority;
}
public static String getRootDir() {
if (ROOT_DIR == null) {
ROOT_DIR = System.getProperty(ZMSConsts.ZMS_PROP_ROOT_DIR, ZMSConsts.STR_DEF_ROOT);
}
return ROOT_DIR;
}
boolean isAllowedSystemMetaDelete(Principal principal, final String reqDomain, final String attribute,
final String objectType) {
// the authorization policy resides in official sys.auth domain
AthenzDomain domain = getAthenzDomain(SYS_AUTH, true);
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
// our action are always converted to lowercase
String resource = SYS_AUTH + ":meta." + objectType + "." + attribute + "." + reqDomain;
AccessStatus accessStatus = evaluateAccess(domain, principal.getFullName(), "delete",
resource, null, null);
return accessStatus == AccessStatus.ALLOWED;
}
@Override
public void putRoleSystemMeta(ResourceContext ctx, String domainName, String roleName, String attribute,
String auditRef, RoleSystemMeta meta) {
final String caller = "putrolesystemmeta";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(meta, TYPE_ROLE_SYSTEM_META, caller);
validate(attribute, TYPE_SIMPLE_NAME, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
attribute = attribute.toLowerCase();
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putrolesystemmeta_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
if (LOG.isDebugEnabled()) {
LOG.debug("putRoleSystemMeta: name={}, role={} attribute={}, meta={}",
domainName, roleName, attribute, meta);
}
// if we are resetting the configured value then the caller
// must also have a delete action available for the same resource
boolean deleteAllowed = isAllowedSystemMetaDelete(principal, domainName, attribute, "role");
dbService.executePutRoleSystemMeta(ctx, domainName, roleName, meta, attribute, deleteAllowed, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void putRoleMeta(ResourceContext ctx, String domainName, String roleName, String auditRef, RoleMeta meta) {
final String caller = "putrolemeta";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(meta, TYPE_ROLE_META, caller);
// validate meta values - for now we're making sure we're not
// getting any negative values for our integer settings
validateRoleMetaValues(meta);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
AthenzObject.ROLE_META.convertToLowerCase(meta);
// validate the user authority settings if they're provided
validateRoleUserAuthorityAttributes(meta.getUserAuthorityFilter(), meta.getUserAuthorityExpiration(), caller);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putrolemeta_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
Principal principal = ((RsrcCtxWrapper) ctx).principal();
verifyAuthorizedServiceOperation(principal.getAuthorizedService(), caller);
if (LOG.isDebugEnabled()) {
LOG.debug("putRoleMeta: name={}, role={} meta={}", domainName, roleName, meta);
}
dbService.executePutRoleMeta(ctx, domainName, roleName, meta, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
@Override
public void putMembershipDecision(ResourceContext ctx, String domainName, String roleName,
String memberName, String auditRef, Membership membership) {
final String caller = "putmembershipdecision";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(memberName, TYPE_MEMBER_NAME, caller);
validate(membership, TYPE_MEMBERSHIP, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
memberName = memberName.toLowerCase();
AthenzObject.MEMBERSHIP.convertToLowerCase(membership);
final Principal principal = ((RsrcCtxWrapper) ctx).principal();
final String principalDomain = principal.getDomain();
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming("putmembershipdecision_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceRoleOperation(principal.getAuthorizedService(), caller, roleName);
// verify that the member name in the URI and object provided match
if (!memberName.equals(membership.getMemberName())) {
throw ZMSUtils.requestError("putMembershipDecision: Member name in URI and Membership object do not match", caller);
}
// role name is optional so we'll verify only if the value is present in the object
if (membership.getRoleName() != null && !roleName.equals(membership.getRoleName())) {
throw ZMSUtils.requestError("putMembershipDecision: Role name in URI and Membership object do not match", caller);
}
AthenzDomain domain = getAthenzDomain(domainName, false);
Role role = getRoleFromDomain(roleName, domain);
if (role == null) {
throw ZMSUtils.requestError("Invalid rolename specified", caller);
}
// initially create the role member and only set the
// user name which is all we need in case we need to
// lookup the pending entry for review approval
// we'll set the state and expiration after the
// authorization check is successful
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(normalizeDomainAliasUser(memberName));
// authorization check
if (!isAllowedPutMembershipDecision(principal, domain, role, roleMember)) {
throw ZMSUtils.forbiddenError("putMembershipDecision: principal is not authorized to approve / reject members", caller);
}
roleMember.setApproved(membership.getApproved());
roleMember.setActive(membership.getActive());
// set the user state, expiration and review date values
// no need to update the review/expiration dates if the
// request is going to be rejected
if (roleMember.getApproved() == Boolean.TRUE) {
setRoleMemberExpiration(domain, role, roleMember, membership, caller);
setRoleMemberReview(role, roleMember, membership);
// check to see if we need to validate the principal
// but only if the decision is to approve. We don't
// want to block removal of rejected user requests
final String userAuthorityFilter = enforcedUserAuthorityFilter(role.getUserAuthorityFilter());
if (shouldValidateRoleMembers(userAuthorityFilter)) {
validateRoleMemberPrincipal(roleMember.getMemberName(), userAuthorityFilter, caller);
}
}
dbService.executePutMembershipDecision(ctx, domainName, roleName,
roleMember, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
private boolean isAllowedPutMembershipDecision(final Principal principal, final AthenzDomain domain,
final Role role, final RoleMember roleMember) {
final String caller = "putmembershipdecision";
// if this is an audit enabled domain then we're going to carry
// out the authorization in the sys.auth.audit domains
if (role.getAuditEnabled() == Boolean.TRUE) {
return isAllowedAuditRoleMembershipApproval(principal, domain);
}
// otherwise we're going to do a standard check if the principal
// is authorized to update the domain role membership
boolean allowed = isAllowedPutMembershipAccess(principal, domain, role.getName());
// if the user is allowed to make changes in the domain but
// the role is review enabled then we need to make sure
// the approver cannot be the same as the requester
if (allowed && role.getReviewEnabled() == Boolean.TRUE) {
Membership pendingMember = dbService.getMembership(domain.getName(),
ZMSUtils.extractRoleName(domain.getName(), role.getName()),
roleMember.getMemberName(), 0, true);
// if the member is not found then we're going to throw a not found exception
if (!pendingMember.getIsMember()) {
throw ZMSUtils.notFoundError("Pending member " + roleMember.getMemberName() + " not found", caller);
}
if (pendingMember.getRequestPrincipal().equalsIgnoreCase(principal.getFullName())) {
LOG.error("Principal {} cannot approve his/her own request", principal.getFullName());
allowed = false;
}
}
return allowed;
}
boolean isAllowedAuditRoleMembershipApproval(Principal principal, final AthenzDomain reqDomain) {
// the authorization policy resides in official sys.auth.audit domains
// first we're going to check the per domain one and then we'll
// follow up with per org domain
AthenzDomain authDomain = getAthenzDomain(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, true);
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
// our action are always converted to lowercase
String resource = ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN + ":audit." + reqDomain.getDomain().getName();
AccessStatus accessStatus = evaluateAccess(authDomain, principal.getFullName(),
"update", resource, null, null);
if (accessStatus == AccessStatus.ALLOWED) {
return true;
}
// if we didn't find any authorization for the per-domain setup
// we're going to look at the per-org setup
authDomain = getAthenzDomain(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, true);
resource = ZMSConsts.SYS_AUTH_AUDIT_BY_ORG + ":audit." + reqDomain.getDomain().getOrg();
accessStatus = evaluateAccess(authDomain, principal.getFullName(),
"update", resource, null, null);
return accessStatus == AccessStatus.ALLOWED;
}
Role getRoleFromDomain(final String roleName, AthenzDomain domain) {
if (domain != null && domain.getRoles() != null) {
for (Role role : domain.getRoles()) {
if (role.getName().equalsIgnoreCase(domain.getName() + AuthorityConsts.ROLE_SEP + roleName)) {
return role;
}
}
}
return null;
}
boolean isAllowedPutMembershipAccess(Principal principal, final AthenzDomain domain, final String roleName) {
// evaluate our domain's roles and policies to see if access
// is allowed or not for the given operation and resource
// our action are always converted to lowercase
return evaluateAccess(domain, principal.getFullName(), "update", roleName, null, null) == AccessStatus.ALLOWED;
}
boolean isAllowedPutMembershipWithoutApproval(Principal principal, final AthenzDomain reqDomain, final Role role) {
if (role.getAuditEnabled() == Boolean.TRUE) {
return false;
}
return isAllowedPutMembershipAccess(principal, reqDomain, role.getName());
}
boolean isAllowedPutMembership(Principal principal, final AthenzDomain domain, final Role role,
final RoleMember member) {
// first lets check if the principal has update access on the role
if (isAllowedPutMembershipAccess(principal, domain, role.getName())) {
// even with update access, if the role is audit/review enabled, member status
// can not be set to active/approved. It has to be approved by audit/review admins.
// for all other roles, set member status to active/approved immediately
boolean auditEnabled = (role.getAuditEnabled() == Boolean.TRUE || role.getReviewEnabled() == Boolean.TRUE);
member.setActive(!auditEnabled);
member.setApproved(!auditEnabled);
return true;
} else if (role.getSelfServe() == Boolean.TRUE) {
// if the role is self-serve then users are allowed to add anyone
// since the request must be approved by someone else so we'll allow it
// but with member status set to inactive.
member.setActive(false);
member.setApproved(false);
return true;
}
return false;
}
boolean isAllowedDeletePendingMembership(Principal principal, final String domainName,
final String roleName, final String memberName) {
// first lets check if the principal has update access on the role
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("Domain not found: " + domainName, "deletePendingMembership");
}
if (isAllowedPutMembershipAccess(principal, domain, ZMSUtils.roleResourceName(domainName, roleName))) {
return true;
}
// check of the requestor of the pending request is the principal
Membership pendingMember = dbService.getMembership(domainName, roleName, memberName, 0, true);
return pendingMember != null && principal.getFullName().equals(pendingMember.getRequestPrincipal());
}
@Override
public DomainRoleMembership getPendingDomainRoleMembersList(ResourceContext ctx, String principal) {
final String caller = "getpendingdomainrolememberslist";
metric.increment(ZMSConsts.HTTP_GET);
metric.increment(ZMSConsts.HTTP_REQUEST);
metric.increment(caller);
final Principal ctxPrincipal = ((RsrcCtxWrapper) ctx).principal();
Object timerMetric = metric.startTiming("getpendingdomainrolememberslist_timing", null, ctxPrincipal.getDomain());
logPrincipal(ctx);
validateRequest(ctx.request(), caller);
String checkPrincipal;
if (principal != null && !principal.isEmpty()) {
validate(principal, TYPE_ENTITY_NAME, caller);
checkPrincipal = normalizeDomainAliasUser(principal.toLowerCase());
} else {
checkPrincipal = ctxPrincipal.getFullName();
}
if (LOG.isDebugEnabled()) {
LOG.debug("getpendingdomainrolememberslist principal: ({})", checkPrincipal);
}
DomainRoleMembership domainRoleMembership = dbService.getPendingDomainRoleMembers(checkPrincipal);
metric.stopTiming(timerMetric, null, ctxPrincipal.getDomain());
return domainRoleMembership;
}
@Override
public void putRoleReview(ResourceContext ctx, String domainName, String roleName, String auditRef, Role role) {
final String caller = "putrolereview";
metric.increment(ZMSConsts.HTTP_PUT);
logPrincipal(ctx);
if (readOnlyMode) {
throw ZMSUtils.requestError(SERVER_READ_ONLY_MESSAGE, caller);
}
validateRequest(ctx.request(), caller);
validate(domainName, TYPE_DOMAIN_NAME, caller);
validate(roleName, TYPE_ENTITY_NAME, caller);
validate(role, TYPE_ROLE, caller);
// for consistent handling of all requests, we're going to convert
// all incoming object values into lower case (e.g. domain, role,
// policy, service, etc name)
domainName = domainName.toLowerCase();
roleName = roleName.toLowerCase();
AthenzObject.ROLE.convertToLowerCase(role);
final String principalDomain = getPrincipalDomain(ctx);
metric.increment(ZMSConsts.HTTP_REQUEST, domainName, principalDomain);
metric.increment(caller, domainName, principalDomain);
Object timerMetric = metric.startTiming(caller + "_timing", domainName, principalDomain);
// verify that request is properly authenticated for this request
verifyAuthorizedServiceOperation(((RsrcCtxWrapper) ctx).principal().getAuthorizedService(), caller);
// verify the role name in the URI and request are consistent
if (!isConsistentRoleName(domainName, roleName, role)) {
throw ZMSUtils.requestError(caller + ": Inconsistent role names - expected: "
+ ZMSUtils.roleResourceName(domainName, roleName) + ", actual: "
+ role.getName(), caller);
}
AthenzDomain domain = getAthenzDomain(domainName, false);
if (domain == null) {
throw ZMSUtils.notFoundError("No such domain: " + domainName, caller);
}
Role dbRole = getRoleFromDomain(roleName, domain);
if (configuredDueDateMillis(domain.getDomain().getMemberExpiryDays(), dbRole.getMemberExpiryDays()) == 0 &&
configuredDueDateMillis(domain.getDomain().getServiceExpiryDays(), dbRole.getServiceExpiryDays()) == 0) {
throw ZMSUtils.requestError(caller + ": Domain member expiry / Role member expiry must be set to review the role. ", caller);
}
// normalize and remove duplicate members
normalizeRoleMembers(role);
// update role expiry based on our configurations
updateRoleMemberExpiration(
domain.getDomain().getMemberExpiryDays(),
dbRole.getMemberExpiryDays(),
domain.getDomain().getServiceExpiryDays(),
dbRole.getServiceExpiryDays(),
role.getRoleMembers());
// update role review based on our configurations
updateRoleMemberReviewReminder(dbRole.getMemberReviewDays(), dbRole.getServiceReviewDays(), role.getRoleMembers());
// process our request
dbService.executePutRoleReview(ctx, domainName, roleName, role, auditRef, caller);
metric.stopTiming(timerMetric, domainName, principalDomain);
}
void validateRoleUserAuthorityAttributes(final String authorityFilter, final String authorityExpiration,
final String caller) {
if (authorityFilter != null && !authorityFilter.isEmpty()) {
if (userAuthority == null) {
throw ZMSUtils.requestError("Role User Authority filter specified without a valid user authority", caller);
}
Set<String> attrSet = userAuthority.booleanAttributesSupported();
for (String attr : authorityFilter.split(",")) {
if (!attrSet.contains(attr)) {
throw ZMSUtils.requestError(attr + " is not a valid user authority attribute", caller);
}
}
}
if (authorityExpiration != null && !authorityExpiration.isEmpty()) {
if (userAuthority == null) {
throw ZMSUtils.requestError("Role User Authority expiry specified without a valid user authority", caller);
}
Set<String> attrSet = userAuthority.dateAttributesSupported();
if (!attrSet.contains(authorityExpiration)) {
throw ZMSUtils.requestError(authorityExpiration + " is not a valid user authority date attribute", caller);
}
}
}
}
| 1 | 5,115 | why is this is class field? it should be something local within the auto apply template method since we only need this once to process templates and never use again. | AthenZ-athenz | java |
@@ -39,14 +39,14 @@ module.exports = function(grunt) {
const { diff: rolesTable, notes: rolesFootnotes } = getDiff(
roles,
- axe.commons.aria.lookupTable.role,
+ axe.utils.getStandards().ariaRoles,
listType
);
const ariaQueryAriaAttributes = getAriaQueryAttributes();
const { diff: attributesTable, notes: attributesFootnotes } = getDiff(
ariaQueryAriaAttributes,
- axe.commons.aria.lookupTable.attributes,
+ axe.utils.getStandards().ariaAttrs,
listType
);
const attributesTableMarkdown = mdTable([ | 1 | /*eslint-env node */
'use strict';
const { roles, aria: props } = require('aria-query');
const mdTable = require('markdown-table');
const format = require('../shared/format');
module.exports = function(grunt) {
grunt.registerMultiTask(
'aria-supported',
'Task for generating a diff of supported aria roles and properties.',
function() {
/**
* NOTE:
* `axe` has to be dynamically required at this stage,
* as `axe` does not exist until grunt task `build:uglify` is complete,
* hence cannot be required at the top of the file.
*/
const { langs } = this.options();
const fileNameSuffix = langs && langs.length > 0 ? `${langs[0]}` : '';
const axe = require(`../../axe${fileNameSuffix}`);
const listType = this.data.listType.toLowerCase();
const headings = {
main:
`# ARIA Roles and Attributes ${
listType === 'all' ? 'available' : listType
} in axe-core.\n\n` +
'It can be difficult to know which features of web technologies are accessible across ' +
'different platforms, and with different screen readers and other assistive technologies. ' +
'Axe-core does some of this work for you, by raising issues when accessibility features are ' +
'used that are known to cause problems.\n\n' +
'This page contains a list of ARIA 1.1 features that axe-core raises as unsupported. ' +
'For more information, read [We’ve got your back with “Accessibility Supported” in axe]' +
'(https://www.deque.com/blog/weve-got-your-back-with-accessibility-supported-in-axe/).\n\n' +
'For a detailed description about how accessibility support is decided, see [How we make ' +
'decisions on rules](accessibility-supported.md).',
attributesMdTableHeader: ['aria-attribute', 'axe-core support']
};
const { diff: rolesTable, notes: rolesFootnotes } = getDiff(
roles,
axe.commons.aria.lookupTable.role,
listType
);
const ariaQueryAriaAttributes = getAriaQueryAttributes();
const { diff: attributesTable, notes: attributesFootnotes } = getDiff(
ariaQueryAriaAttributes,
axe.commons.aria.lookupTable.attributes,
listType
);
const attributesTableMarkdown = mdTable([
headings.attributesMdTableHeader,
...attributesTable
]);
const footnotes = [...rolesFootnotes, ...attributesFootnotes].map(
(footnote, index) => `[^${index + 1}]: ${footnote}`
);
const content = `${headings.main}\n\n## Attributes\n\n${attributesTableMarkdown}\n\n${footnotes}`;
const destFile = this.data.destFile;
// Format the content so Prettier doesn't create a diff after running.
// See https://github.com/dequelabs/axe-core/issues/1310.
const formattedContent = format(content, destFile);
// write `aria supported` file contents
grunt.file.write(destFile, formattedContent);
/**
* Get list of aria attributes, from `aria-query`
* @returns {Set|Object} collection of aria attributes from `aria-query` module
*/
function getAriaQueryAttributes() {
const ariaKeys = Array.from(props).map(([key]) => key);
const roleAriaKeys = Array.from(roles).reduce((out, [name, rule]) => {
return [...out, ...Object.keys(rule.props)];
}, []);
return new Set(axe.utils.uniqueArray(roleAriaKeys, ariaKeys));
}
/**
* Given a `base` Map and `subject` Map object,
* The function converts the `base` Map entries to an array which is sorted then enumerated to compare each entry against the `subject` Map
* The function constructs a `string` to represent a `markdown table`, as well as returns notes to append to footnote
* @param {Map} base Base Map Object
* @param {Map} subject Subject Map Object
* @param {String} type type to compare
* @returns {Array<Object>[]}
* @example Example Output: [ [ 'alert', 'No' ], [ 'figure', 'Yes' ] ]
*/
function getDiff(base, subject, type) {
const diff = [];
const notes = [];
const sortedBase = Array.from(base.entries()).sort();
sortedBase.forEach(([key] = item) => {
switch (type) {
case 'supported':
if (
subject.hasOwnProperty(key) &&
subject[key].unsupported === false
) {
diff.push([`${key}`, 'Yes']);
}
break;
case 'unsupported':
if (
(subject[key] && subject[key].unsupported === true) ||
!subject.hasOwnProperty(key)
) {
diff.push([`${key}`, 'No']);
} else if (
subject[key] &&
subject[key].unsupported &&
subject[key].unsupported.exceptions
) {
diff.push([`${key}`, `Mixed[^${notes.length + 1}]`]);
notes.push(
getSupportedElementsAsFootnote(
subject[key].unsupported.exceptions
)
);
}
break;
case 'all':
default:
diff.push([
`${key}`,
subject.hasOwnProperty(key) &&
subject[key].unsupported === false
? 'Yes'
: 'No'
]);
break;
}
});
return {
diff,
notes
};
}
/**
* Parse a list of unsupported exception elements and add a footnote
* detailing which HTML elements are supported.
*
* @param {Array<String|Object>} elements List of supported elements
* @returns {Array<String|Object>} notes
*/
function getSupportedElementsAsFootnote(elements) {
const notes = [];
const supportedElements = elements.map(element => {
if (typeof element === 'string') {
return `\`<${element}>\``;
}
/**
* if element is not a string it will be an object with structure:
{
nodeName: string,
properties: {
type: {string|string[]}
}
}
*/
return Object.keys(element.properties).map(prop => {
const value = element.properties[prop];
// the 'type' property can be a string or an array
if (typeof value === 'string') {
return `\`<${element.nodeName} ${prop}="${value}">\``;
}
// output format for an array of types:
// <input type="button" | "checkbox">
const values = value.map(v => `"${v}"`).join(' | ');
return `\`<${element.nodeName} ${prop}=${values}>\``;
});
});
notes.push('Supported on elements: ' + supportedElements.join(', '));
return notes;
}
}
);
};
| 1 | 16,920 | I agree with Stephen it's better to invoke getStandards() only once. | dequelabs-axe-core | js |
@@ -198,7 +198,7 @@ class GridInterface(DictInterface):
@classmethod
- def canonicalize(cls, dataset, data, coord_dims=None):
+ def canonicalize(cls, dataset, data, coord_dims=None, irregular_dims=[]):
"""
Canonicalize takes an array of values as input and
reorients and transposes it to match the canonical | 1 | from collections import OrderedDict, defaultdict, Iterable
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .dictionary import DictInterface
from .interface import Interface, DataError
from ..dimension import Dimension
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check
from .. import util
class GridInterface(DictInterface):
"""
Interface for simple dictionary-based dataset format using a
compressed representation that uses the cartesian product between
key dimensions. As with DictInterface, the dictionary keys correspond
to the column (i.e dimension) names and the values are NumPy arrays
representing the values in that column.
To use this compressed format, the key dimensions must be orthogonal
to one another with each key dimension specifying an axis of the
multidimensional space occupied by the value dimension data. For
instance, given an temperature recordings sampled regularly across
the earth surface, a list of N unique latitudes and M unique
longitudes can specify the position of NxM temperature samples.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'grid'
gridded = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
if not vdims:
raise ValueError('GridInterface interface requires at least '
'one value dimension.')
ndims = len(kdims)
dimensions = [d.name if isinstance(d, Dimension) else
d for d in kdims + vdims]
if isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
elif not isinstance(data, dict):
raise TypeError('GridInterface must be instantiated as a '
'dictionary or tuple')
for dim in kdims+vdims:
name = dim.name if isinstance(dim, Dimension) else dim
if name not in data:
raise ValueError("Values for dimension %s not found" % dim)
if not isinstance(data[name], np.ndarray):
data[name] = np.array(data[name])
kdim_names = [d.name if isinstance(d, Dimension) else d for d in kdims]
vdim_names = [d.name if isinstance(d, Dimension) else d for d in vdims]
expected = tuple([len(data[kd]) for kd in kdim_names])
irregular_shape = data[kdim_names[0]].shape if kdim_names else ()
valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1]
shapes = tuple([data[kd].shape for kd in kdim_names])
for vdim in vdim_names:
shape = data[vdim].shape
error = DataError if len(shape) > 1 else ValueError
if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1):
# If empty or an irregular mesh
pass
elif len(shape) != len(expected):
raise error('The shape of the %s value array does not '
'match the expected dimensionality indicated '
'by the key dimensions. Expected %d-D array, '
'found %d-D array.' % (vdim, len(expected), len(shape)))
elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)):
raise error('Key dimension values and value array %s '
'shapes do not match. Expected shape %s, '
'actual shape: %s' % (vdim, valid_shape, shape), cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def irregular(cls, dataset, dim):
return dataset.data[dim.name if isinstance(dim, Dimension) else dim].ndim > 1
@classmethod
def isscalar(cls, dataset, dim):
return np.unique(cls.values(dataset, dim, expanded=False)) == 1
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if dim in dataset.dimensions():
arr = cls.values(dataset, dim, False, False)
else:
return None
return arr.dtype.type
@classmethod
def shape(cls, dataset, gridded=False):
shape = dataset.data[dataset.vdims[0].name].shape
if gridded:
return shape
else:
return (np.product(shape), len(dataset.dimensions()))
@classmethod
def length(cls, dataset):
return cls.shape(dataset)[0]
@classmethod
def _infer_interval_breaks(cls, coord, axis=0):
"""
>>> GridInterface._infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
deltas = 0.5 * np.diff(coord, axis=axis)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):
"""
Returns the coordinates along a dimension. Ordered ensures
coordinates are in ascending order and expanded creates
ND-array matching the dimensionality of the dataset.
"""
dim = dataset.get_dimension(dim, strict=True)
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim.name]
else:
data = util.expand_grid_coords(dataset, dim)
if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data
data = dataset.data[dim.name]
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = np.convolve(data, [0.5, 0.5], 'valid')
return data
@classmethod
def canonicalize(cls, dataset, data, coord_dims=None):
"""
Canonicalize takes an array of values as input and
reorients and transposes it to match the canonical
format expected by plotting functions. In addition
to the dataset and the particular array to apply
transforms to a list of coord_dims may be supplied
in case the array indexing does not match the key
dimensions of the dataset.
"""
if coord_dims is None:
coord_dims = dataset.dimensions('key', label='name')[::-1]
# Reorient data
invert = False
slices = []
for d in coord_dims:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]):
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[slices] if invert else data
# Transpose data
dims = [name for name in coord_dims
if isinstance(cls.coords(dataset, name), np.ndarray)]
dropped = [dims.index(d) for d in dims if d not in dataset.kdims]
inds = [dims.index(kd.name)for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if dropped:
data = data.squeeze(axis=tuple(dropped))
if inds:
data = data.transpose(inds[::-1])
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data
@classmethod
def invert_index(cls, index, length):
if np.isscalar(index):
return length - index
elif isinstance(index, slice):
start, stop = index.start, index.stop
new_start, new_stop = None, None
if start is not None:
new_stop = length - start
if stop is not None:
new_start = length - stop
return slice(new_start-1, new_stop-1)
elif isinstance(index, Iterable):
new_index = []
for ind in index:
new_index.append(length-ind)
return new_index
@classmethod
def ndloc(cls, dataset, indices):
selected = {}
adjusted_inds = []
all_scalar = True
for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)):
coords = cls.coords(dataset, kd.name, True)
if np.isscalar(ind):
ind = [ind]
else:
all_scalar = False
selected[kd.name] = coords[ind]
adjusted_inds.append(ind)
for kd in dataset.kdims:
if kd.name not in selected:
coords = cls.coords(dataset, kd.name)
selected[kd.name] = coords
all_scalar = False
for d in dataset.dimensions():
if d in dataset.kdims and not cls.irregular(dataset, d):
continue
arr = dataset.dimension_values(d, flat=False)
if all_scalar and len(dataset.vdims) == 1:
return arr[tuple(ind[0] for ind in adjusted_inds)]
selected[d.name] = arr[tuple(adjusted_inds)]
return tuple(selected[d.name] for d in dataset.dimensions())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True):
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims or dataset.data[dim.name].ndim > 1:
data = dataset.data[dim.name]
data = cls.canonicalize(dataset, data)
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)
# Find all the keys along supplied dimensions
keys = [cls.coords(dataset, d.name) for d in dimensions]
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in zip(*util.cartesian_product(keys)):
select = dict(zip(dim_names, unique_key))
if drop_dim:
group_data = dataset.select(**select)
group_data = group_data if np.isscalar(group_data) else group_data.columns()
else:
group_data = cls.select(dataset, **select)
if np.isscalar(group_data):
group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
for dim, v in zip(dim_names, unique_key):
group_data[dim] = np.atleast_1d(v)
elif not drop_dim:
for vdim in dataset.vdims:
group_data[vdim.name] = np.squeeze(group_data[vdim.name])
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((tuple(unique_key), group_data))
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def key_select_mask(cls, dataset, values, ind):
if isinstance(ind, tuple):
ind = slice(*ind)
if isinstance(ind, np.ndarray):
mask = ind
elif isinstance(ind, slice):
mask = True
if ind.start is not None:
mask &= ind.start <= values
if ind.stop is not None:
mask &= values < ind.stop
# Expand empty mask
if mask is True:
mask = np.ones(values.shape, dtype=np.bool)
elif isinstance(ind, (set, list)):
iter_slcs = []
for ik in ind:
iter_slcs.append(values == ik)
mask = np.logical_or.reduce(iter_slcs)
elif callable(ind):
mask = ind(values)
elif ind is None:
mask = None
else:
index_mask = values == ind
if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(values - ind))
mask = np.zeros(len(values), dtype=np.bool)
mask[data_index] = True
else:
mask = index_mask
if mask is None:
mask = np.ones(values.shape, dtype=bool)
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
dimensions = dataset.kdims
val_dims = [vdim for vdim in dataset.vdims if vdim in selection]
if val_dims:
raise IndexError('Cannot slice value dimensions in compressed format, '
'convert to expanded format before slicing.')
indexed = cls.indexed(dataset, selection)
full_selection = [(d, selection.get(d.name, selection.get(d.label)))
for d in dimensions]
data = {}
value_select = []
for i, (dim, ind) in enumerate(full_selection):
irregular = cls.irregular(dataset, dim)
values = cls.coords(dataset, dim, irregular)
mask = cls.key_select_mask(dataset, values, ind)
if irregular:
if np.isscalar(ind) or isinstance(ind, (set, list)):
raise IndexError("Indexing not supported for irregularly "
"sampled data. %s value along %s dimension."
"must be a slice or 2D boolean mask."
% (ind, dim))
mask = mask.max(axis=i)
elif dataset._binned:
edges = cls.coords(dataset, dim, False, edges=True)
inds = np.argwhere(mask)
if np.isscalar(ind):
emin, emax = edges.min(), edges.max()
if ind < emin:
raise IndexError("Index %s less than lower bound "
"of %s for %s dimension." % (ind, emin, dim))
elif ind >= emax:
raise IndexError("Index %s more than or equal to upper bound "
"of %s for %s dimension." % (ind, emax, dim))
idx = max([np.digitize([ind], edges)[0]-1, 0])
mask = np.zeros(len(values), dtype=np.bool)
mask[idx] = True
values = edges[idx:idx+2]
elif len(inds):
values = edges[inds.min(): inds.max()+2]
else:
values = edges[0:0]
else:
values = values[mask]
values, mask = np.asarray(values), np.asarray(mask)
value_select.append(mask)
data[dim.name] = np.array([values]) if np.isscalar(values) else values
int_inds = [np.argwhere(v) for v in value_select][::-1]
index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind)
for ind in int_inds])
for kdim in dataset.kdims:
if cls.irregular(dataset, dim):
data[kdim.name] = np.asarray(data[kdim.name])[index]
for vdim in dataset.vdims:
data[vdim.name] = np.asarray(dataset.data[vdim.name])[index]
if indexed:
if len(dataset.vdims) == 1:
arr = np.squeeze(data[dataset.vdims[0].name])
return arr if np.isscalar(arr) else arr[()]
else:
return np.array([np.squeeze(data[vd.name])
for vd in dataset.vdims])
return data
@classmethod
def sample(cls, dataset, samples=[]):
"""
Samples the gridded data into dataset of samples.
"""
ndims = dataset.ndims
dimensions = dataset.dimensions(label='name')
arrays = [dataset.data[vdim.name] for vdim in dataset.vdims]
data = defaultdict(list)
for sample in samples:
if np.isscalar(sample): sample = [sample]
if len(sample) != ndims:
sample = [sample[i] if i < len(sample) else None
for i in range(ndims)]
sampled, int_inds = [], []
for d, ind in zip(dimensions, sample):
cdata = dataset.data[d]
mask = cls.key_select_mask(dataset, cdata, ind)
inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask)
int_inds.append(inds)
sampled.append(cdata[mask])
for d, arr in zip(dimensions, np.meshgrid(*sampled)):
data[d].append(arr)
for vdim, array in zip(dataset.vdims, arrays):
flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape)
data[vdim.name].append(array.flat[flat_index])
concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()}
return concatenated
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [kd.name if isinstance(kd, Dimension) else kd for kd in kdims]
data = {kdim: dataset.data[kdim] for kdim in kdims}
axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1
for kdim in dataset.kdims if kdim not in kdims)
for vdim in dataset.vdims:
data[vdim.name] = np.atleast_1d(function(dataset.data[vdim.name],
axis=axes, **kwargs))
return data
@classmethod
def reindex(cls, dataset, kdims, vdims):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
dropped_vdims = ([vdim for vdim in dataset.vdims
if vdim not in vdims] if vdims else [])
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
data = {k: values for k, values in dataset.data.items()
if k not in dropped_kdims+dropped_vdims}
if len(constant) == len(dropped_kdims):
joined_dims = kdims+dropped_kdims
axes = tuple(dataset.ndims-dataset.kdims.index(d)-1
for d in joined_dims)
dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1
for d in dropped_kdims)
for vdim in vdims:
vdata = data[vdim.name]
if len(axes) > 1:
vdata = vdata.transpose(axes[::-1])
if dropped_axes:
vdata = vdata.squeeze(axis=dropped_axes)
data[vdim.name] = vdata
return data
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return data
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension.name if isinstance(dimension, Dimension) else dimension
return dict(dataset.data, **{dim: values})
@classmethod
def sort(cls, dataset, by=[], reverse=False):
if not by or by in [dataset.kdims, dataset.dimensions()]:
return dataset.data
else:
raise Exception('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if np.isscalar(rows):
rows = [rows]
new_data = []
for d in cols:
new_data.append(dataset.dimension_values(d)[rows])
if scalar:
return new_data[0][0]
return tuple(new_data)
@classmethod
def range(cls, dataset, dimension):
if dataset._binned and dimension in dataset.kdims:
expanded = cls.irregular(dataset, dimension)
column = cls.coords(dataset, dimension, expanded=expanded, edges=True)
else:
column = dataset.dimension_values(dimension)
if dataset.get_dimension_type(dimension) is np.datetime64:
return column.min(), column.max()
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
return (np.nanmin(column), np.nanmax(column))
except TypeError:
column.sort()
return column[0], column[-1]
Interface.register(GridInterface)
| 1 | 19,725 | After discussing what ``irregular_dims`` really is, we agreed that we need a better name that makes it clearer that this is more of an xarray concept of irregular dimensions than a holoviews one. | holoviz-holoviews | py |
@@ -35,7 +35,7 @@ public class Container {
private final ContainerId id;
public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) {
- LOG.info("Created container " + id);
+ LOG.finest("Created container " + id);
this.client = Objects.requireNonNull(client);
this.id = Objects.requireNonNull(id);
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.time.Duration;
import java.util.Objects;
import java.util.function.Function;
import java.util.logging.Logger;
public class Container {
public static final Logger LOG = Logger.getLogger(Container.class.getName());
private final Function<HttpRequest, HttpResponse> client;
private final ContainerId id;
public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) {
LOG.info("Created container " + id);
this.client = Objects.requireNonNull(client);
this.id = Objects.requireNonNull(id);
}
public ContainerId getId() {
return id;
}
public void start() {
LOG.info("Starting " + getId());
client.apply(new HttpRequest(POST, String.format("/containers/%s/start", id)));
}
public void stop(Duration timeout) {
Objects.requireNonNull(timeout);
LOG.info("Stopping " + getId());
String seconds = String.valueOf(timeout.toMillis() / 1000);
HttpRequest request = new HttpRequest(POST, String.format("/containers/%s/stop", id));
request.addQueryParameter("t", seconds);
client.apply(request);
}
public void delete() {
LOG.info("Removing " + getId());
HttpRequest request = new HttpRequest(DELETE, "/containers/" + id);
client.apply(request);
}
}
| 1 | 16,458 | This code is new and not tested well. While we may drop the log level before we ship 4.0, right now this is extremely helpful to users. | SeleniumHQ-selenium | js |
@@ -222,11 +222,16 @@ th svg {
}
td {
+ white-space: nowrap;
font-size: 14px;
}
td:first-child {
- width: 50%;
+ width: 100%;
+}
+
+td:nth-child(2) {
+ padding: 0 20px 0 20px;
}
th:last-child, | 1 | package browse
import (
"fmt"
"io/ioutil"
"net/http"
"text/template"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyhttp/httpserver"
"github.com/mholt/caddy/caddyhttp/staticfiles"
)
func init() {
caddy.RegisterPlugin("browse", caddy.Plugin{
ServerType: "http",
Action: setup,
})
}
// setup configures a new Browse middleware instance.
func setup(c *caddy.Controller) error {
configs, err := browseParse(c)
if err != nil {
return err
}
b := Browse{
Configs: configs,
IgnoreIndexes: false,
}
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
b.Next = next
return b
})
return nil
}
func browseParse(c *caddy.Controller) ([]Config, error) {
var configs []Config
cfg := httpserver.GetConfig(c)
appendCfg := func(bc Config) error {
for _, c := range configs {
if c.PathScope == bc.PathScope {
return fmt.Errorf("duplicate browsing config for %s", c.PathScope)
}
}
configs = append(configs, bc)
return nil
}
for c.Next() {
var bc Config
// First argument is directory to allow browsing; default is site root
if c.NextArg() {
bc.PathScope = c.Val()
} else {
bc.PathScope = "/"
}
bc.Fs = staticfiles.FileServer{
Root: http.Dir(cfg.Root),
Hide: cfg.HiddenFiles,
}
// Second argument would be the template file to use
var tplText string
if c.NextArg() {
tplBytes, err := ioutil.ReadFile(c.Val())
if err != nil {
return configs, err
}
tplText = string(tplBytes)
} else {
tplText = defaultTemplate
}
// Build the template
tpl, err := template.New("listing").Parse(tplText)
if err != nil {
return configs, err
}
bc.Template = tpl
// Save configuration
err = appendCfg(bc)
if err != nil {
return configs, err
}
}
return configs, nil
}
// The default template to use when serving up directory listings
const defaultTemplate = `<!DOCTYPE html>
<html>
<head>
<title>{{html .Name}}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
* { padding: 0; margin: 0; }
body {
font-family: sans-serif;
text-rendering: optimizespeed;
}
a {
color: #006ed3;
text-decoration: none;
}
a:hover,
h1 a:hover {
color: #319cff;
}
header,
#summary {
padding-left: 5%;
padding-right: 5%;
}
th:first-child,
td:first-child {
padding-left: 5%;
}
th:last-child,
td:last-child {
padding-right: 5%;
}
header {
padding-top: 25px;
padding-bottom: 15px;
background-color: #f2f2f2;
}
h1 {
font-size: 20px;
font-weight: normal;
white-space: nowrap;
overflow-x: hidden;
text-overflow: ellipsis;
color: #999;
}
h1 a {
color: #000;
margin: 0 4px;
}
h1 a:hover {
text-decoration: underline;
}
h1 a:first-child {
margin: 0;
}
main {
display: block;
}
.meta {
font-size: 12px;
font-family: Verdana, sans-serif;
border-bottom: 1px solid #9C9C9C;
padding-top: 10px;
padding-bottom: 10px;
}
.meta-item {
margin-right: 1em;
}
#filter {
padding: 4px;
border: 1px solid #CCC;
}
table {
width: 100%;
border-collapse: collapse;
}
tr {
border-bottom: 1px dashed #dadada;
}
tbody tr:hover {
background-color: #ffffec;
}
th,
td {
text-align: left;
padding: 10px 0;
}
th {
padding-top: 15px;
padding-bottom: 15px;
font-size: 16px;
white-space: nowrap;
}
th a {
color: black;
}
th svg {
vertical-align: middle;
}
td {
font-size: 14px;
}
td:first-child {
width: 50%;
}
th:last-child,
td:last-child {
text-align: right;
}
td:first-child svg {
position: absolute;
}
td .name,
td .goup {
margin-left: 1.75em;
word-break: break-all;
overflow-wrap: break-word;
white-space: pre-wrap;
}
.icon {
margin-right: 5px;
}
.icon.sort {
display: inline-block;
width: 1em;
height: 1em;
position: relative;
top: .2em;
}
.icon.sort .top {
position: absolute;
left: 0;
top: -1px;
}
.icon.sort .bottom {
position: absolute;
bottom: -1px;
left: 0;
}
footer {
padding: 40px 20px;
font-size: 12px;
text-align: center;
}
@media (max-width: 600px) {
.hideable {
display: none;
}
td:first-child {
width: auto;
}
th:nth-child(2),
td:nth-child(2) {
padding-right: 5%;
text-align: right;
}
h1 {
color: #000;
}
h1 a {
margin: 0;
}
#filter {
max-width: 100px;
}
}
</style>
</head>
<body>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" height="0" width="0" style="position: absolute;">
<defs>
<!-- Folder -->
<g id="folder" fill-rule="nonzero" fill="none">
<path d="M285.22 37.55h-142.6L110.9 0H31.7C14.25 0 0 16.9 0 37.55v75.1h316.92V75.1c0-20.65-14.26-37.55-31.7-37.55z" fill="#FFA000"/>
<path d="M285.22 36H31.7C14.25 36 0 50.28 0 67.74v158.7c0 17.47 14.26 31.75 31.7 31.75H285.2c17.44 0 31.7-14.3 31.7-31.75V67.75c0-17.47-14.26-31.75-31.7-31.75z" fill="#FFCA28"/>
</g>
<g id="folder-shortcut" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="folder-shortcut-group" fill-rule="nonzero">
<g id="folder-shortcut-shape">
<path d="M285.224876,37.5486902 L142.612438,37.5486902 L110.920785,0 L31.6916529,0 C14.2612438,0 0,16.8969106 0,37.5486902 L0,112.646071 L316.916529,112.646071 L316.916529,75.0973805 C316.916529,54.4456008 302.655285,37.5486902 285.224876,37.5486902 Z" id="Shape" fill="#FFA000"></path>
<path d="M285.224876,36 L31.6916529,36 C14.2612438,36 0,50.2838568 0,67.7419039 L0,226.451424 C0,243.909471 14.2612438,258.193328 31.6916529,258.193328 L285.224876,258.193328 C302.655285,258.193328 316.916529,243.909471 316.916529,226.451424 L316.916529,67.7419039 C316.916529,50.2838568 302.655285,36 285.224876,36 Z" id="Shape" fill="#FFCA28"></path>
</g>
<path d="M126.154134,250.559184 C126.850974,251.883673 127.300549,253.006122 127.772602,254.106122 C128.469442,255.206122 128.919016,256.104082 129.638335,257.002041 C130.559962,258.326531 131.728855,259 133.100057,259 C134.493737,259 135.415364,258.55102 136.112204,257.67551 C136.809044,257.002041 137.258619,255.902041 137.258619,254.577551 C137.258619,253.904082 137.258619,252.804082 137.033832,251.457143 C136.786566,249.908163 136.561779,249.032653 136.561779,248.583673 C136.089726,242.814286 135.864939,237.920408 135.864939,233.273469 C135.864939,225.057143 136.786566,217.514286 138.180246,210.846939 C139.798713,204.202041 141.889234,198.634694 144.429328,193.763265 C147.216689,188.869388 150.678411,184.873469 154.836973,181.326531 C158.995535,177.779592 163.626149,174.883673 168.481552,172.661224 C173.336954,170.438776 179.113983,168.665306 185.587852,167.340816 C192.061722,166.218367 198.760378,165.342857 205.481514,164.669388 C212.18017,164.220408 219.598146,163.995918 228.162535,163.995918 L246.055591,163.995918 L246.055591,195.514286 C246.055591,197.736735 246.752431,199.510204 248.370899,201.059184 C250.214153,202.608163 252.079886,203.506122 254.372715,203.506122 C256.463236,203.506122 258.531277,202.608163 260.172223,201.059184 L326.102289,137.797959 C327.720757,136.24898 328.642384,134.47551 328.642384,132.253061 C328.642384,130.030612 327.720757,128.257143 326.102289,126.708163 L260.172223,63.4469388 C258.553756,61.8979592 256.463236,61 254.395194,61 C252.079886,61 250.236632,61.8979592 248.393377,63.4469388 C246.77491,64.9959184 246.07807,66.7693878 246.07807,68.9918367 L246.07807,100.510204 L228.162535,100.510204 C166.863084,100.510204 129.166282,117.167347 115.274437,150.459184 C110.666301,161.54898 108.350993,175.310204 108.350993,191.742857 C108.350993,205.279592 113.903236,223.912245 124.760454,247.438776 C125.00772,248.112245 125.457294,249.010204 126.154134,250.559184 Z" id="Shape" fill="#FFFFFF" transform="translate(218.496689, 160.000000) scale(-1, 1) translate(-218.496689, -160.000000) "></path>
</g>
</g>
<!-- File -->
<g id="file" stroke="#000" stroke-width="25" fill="#FFF" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round">
<path d="M13 24.12v274.76c0 6.16 5.87 11.12 13.17 11.12H239c7.3 0 13.17-4.96 13.17-11.12V136.15S132.6 13 128.37 13H26.17C18.87 13 13 17.96 13 24.12z"/>
<path d="M129.37 13L129 113.9c0 10.58 7.26 19.1 16.27 19.1H249L129.37 13z"/>
</g>
<g id="file-shortcut" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="file-shortcut-group" transform="translate(13.000000, 13.000000)">
<g id="file-shortcut-shape" stroke="#000000" stroke-width="25" fill="#FFFFFF" stroke-linecap="round" stroke-linejoin="round">
<path d="M0,11.1214886 L0,285.878477 C0,292.039924 5.87498876,296.999983 13.1728373,296.999983 L225.997983,296.999983 C233.295974,296.999983 239.17082,292.039942 239.17082,285.878477 L239.17082,123.145388 C239.17082,123.145388 119.58541,2.84217094e-14 115.369423,2.84217094e-14 L13.1728576,2.84217094e-14 C5.87500907,-1.71479982e-05 0,4.96022995 0,11.1214886 Z" id="rect1171"></path>
<path d="M116.37005,0 L116,100.904964 C116,111.483663 123.258008,120 132.273377,120 L236,120 L116.37005,0 L116.37005,0 Z" id="rect1794"></path>
</g>
<path d="M47.803141,294.093878 C48.4999811,295.177551 48.9495553,296.095918 49.4216083,296.995918 C50.1184484,297.895918 50.5680227,298.630612 51.2873415,299.365306 C52.2089688,300.44898 53.3778619,301 54.7490634,301 C56.1427436,301 57.0643709,300.632653 57.761211,299.916327 C58.4580511,299.365306 58.9076254,298.465306 58.9076254,297.381633 C58.9076254,296.830612 58.9076254,295.930612 58.6828382,294.828571 C58.4355724,293.561224 58.2107852,292.844898 58.2107852,292.477551 C57.7387323,287.757143 57.5139451,283.753061 57.5139451,279.95102 C57.5139451,273.228571 58.4355724,267.057143 59.8292526,261.602041 C61.44772,256.165306 63.5382403,251.610204 66.0783349,247.62449 C68.8656954,243.620408 72.3274172,240.35102 76.4859792,237.44898 C80.6445412,234.546939 85.2751561,232.177551 90.1305582,230.359184 C94.9859603,228.540816 100.76299,227.089796 107.236859,226.006122 C113.710728,225.087755 120.409385,224.371429 127.13052,223.820408 C133.829177,223.453061 141.247152,223.269388 149.811542,223.269388 L167.704598,223.269388 L167.704598,249.057143 C167.704598,250.87551 168.401438,252.326531 170.019905,253.593878 C171.86316,254.861224 173.728893,255.595918 176.021722,255.595918 C178.112242,255.595918 180.180284,254.861224 181.82123,253.593878 L247.751296,201.834694 C249.369763,200.567347 250.291391,199.116327 250.291391,197.297959 C250.291391,195.479592 249.369763,194.028571 247.751296,192.761224 L181.82123,141.002041 C180.202763,139.734694 178.112242,139 176.044201,139 C173.728893,139 171.885639,139.734694 170.042384,141.002041 C168.423917,142.269388 167.727077,143.720408 167.727077,145.538776 L167.727077,171.326531 L149.811542,171.326531 C88.5120908,171.326531 50.8152886,184.955102 36.9234437,212.193878 C32.3153075,221.267347 30,232.526531 30,245.971429 C30,257.046939 35.5522422,272.291837 46.4094607,291.540816 C46.6567266,292.091837 47.1063009,292.826531 47.803141,294.093878 Z" id="Shape-Copy" fill="#000000" fill-rule="nonzero" transform="translate(140.145695, 220.000000) scale(-1, 1) translate(-140.145695, -220.000000) "></path>
</g>
</g>
<!-- Up arrow -->
<g id="up-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 .12089 335.67 164.35)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
<!-- Down arrow -->
<g id="down-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 -.12089 335.67 257.93)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
</defs>
</svg>
<header>
<h1>
{{range $i, $crumb := .Breadcrumbs}}<a href="{{html $crumb.Link}}">{{html $crumb.Text}}</a>{{if ne $i 0}}/{{end}}{{end}}
</h1>
</header>
<main>
<div class="meta">
<div id="summary">
<span class="meta-item"><b>{{.NumDirs}}</b> director{{if eq 1 .NumDirs}}y{{else}}ies{{end}}</span>
<span class="meta-item"><b>{{.NumFiles}}</b> file{{if ne 1 .NumFiles}}s{{end}}</span>
{{- if ne 0 .ItemsLimitedTo}}
<span class="meta-item">(of which only <b>{{.ItemsLimitedTo}}</b> are displayed)</span>
{{- end}}
<span class="meta-item"><input type="text" placeholder="filter" id="filter" onkeyup='filter()'></span>
</div>
</div>
<div class="listing">
<table aria-describedby="summary">
<thead>
<tr>
<th>
{{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}}
<a href="?sort=namedirfirst&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon"><svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
{{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}}
<a href="?sort=namedirfirst&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon"><svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
{{- else}}
<a href="?sort=namedirfirst&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon sort"><svg class="top" width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg><svg class="bottom" width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
{{- end}}
{{- if and (eq .Sort "name") (ne .Order "desc")}}
<a href="?sort=name&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
{{- else if and (eq .Sort "name") (ne .Order "asc")}}
<a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
{{- else}}
<a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name</a>
{{- end}}
</th>
<th>
{{- if and (eq .Sort "size") (ne .Order "desc")}}
<a href="?sort=size&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
{{- else if and (eq .Sort "size") (ne .Order "asc")}}
<a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
{{- else}}
<a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size</a>
{{- end}}
</th>
<th class="hideable">
{{- if and (eq .Sort "time") (ne .Order "desc")}}
<a href="?sort=time&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
{{- else if and (eq .Sort "time") (ne .Order "asc")}}
<a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
{{- else}}
<a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified</a>
{{- end}}
</th>
</tr>
</thead>
<tbody>
{{- if .CanGoUp}}
<tr>
<td>
<a href="..">
<span class="goup">Go up</span>
</a>
</td>
<td>—</td>
<td class="hideable">—</td>
</tr>
{{- end}}
{{- range .Items}}
<tr class="file">
<td>
<a href="{{html .URL}}">
{{- if .IsDir}}
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 317 259"><use xlink:href="#folder{{if .IsSymlink}}-shortcut{{end}}"></use></svg>
{{- else}}
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 265 323"><use xlink:href="#file{{if .IsSymlink}}-shortcut{{end}}"></use></svg>
{{- end}}
<span class="name">{{html .Name}}</span>
</a>
</td>
{{- if .IsDir}}
<td data-order="-1">—</td>
{{- else}}
<td data-order="{{.Size}}">{{.HumanSize}}</td>
{{- end}}
<td class="hideable"><time datetime="{{.HumanModTime "2006-01-02T15:04:05Z"}}">{{.HumanModTime "01/02/2006 03:04:05 PM -07:00"}}</time></td>
</tr>
{{- end}}
</tbody>
</table>
</div>
</main>
<footer>
Served with <a rel="noopener noreferrer" href="https://caddyserver.com">Caddy</a>
</footer>
<script>
var filterEl = document.getElementById('filter');
filterEl.focus();
function filter() {
var q = filterEl.value.trim().toLowerCase();
var elems = document.querySelectorAll('tr.file');
elems.forEach(function(el) {
if (!q) {
el.style.display = '';
return;
}
var nameEl = el.querySelector('.name');
var nameVal = nameEl.textContent.trim().toLowerCase();
if (nameVal.indexOf(q) !== -1) {
el.style.display = '';
} else {
el.style.display = 'none';
}
});
}
function localizeDatetime(e, index, ar) {
if (e.textContent === undefined) {
return;
}
var d = new Date(e.getAttribute('datetime'));
if (isNaN(d)) {
d = new Date(e.textContent);
if (isNaN(d)) {
return;
}
}
e.textContent = d.toLocaleString();
}
var timeList = Array.prototype.slice.call(document.getElementsByTagName("time"));
timeList.forEach(localizeDatetime);
</script>
</body>
</html>`
| 1 | 11,358 | this makes sure there is some padding space around the size column | caddyserver-caddy | go |
@@ -47,6 +47,7 @@ extern "C" {
#include "Util/InputFileUtil.h"
#include "Util/GraphLoader.h"
#include "Util/StringUtil.h"
+#include "Util/LuaUtil.h"
using namespace std;
| 1 | /*
open source routing machine
Copyright (C) Dennis Luxen, 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
extern "C" {
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
}
#include <luabind/luabind.hpp>
#include <boost/foreach.hpp>
#include <fstream>
#include <istream>
#include <iostream>
#include <cstring>
#include <string>
#include <vector>
#include "Algorithms/IteratorBasedCRC32.h"
#include "Util/OpenMPWrapper.h"
#include "typedefs.h"
#include "Contractor/Contractor.h"
#include "Contractor/EdgeBasedGraphFactory.h"
#include "DataStructures/BinaryHeap.h"
#include "DataStructures/DeallocatingVector.h"
#include "DataStructures/NNGrid.h"
#include "DataStructures/QueryEdge.h"
#include "Util/BaseConfiguration.h"
#include "Util/InputFileUtil.h"
#include "Util/GraphLoader.h"
#include "Util/StringUtil.h"
using namespace std;
typedef QueryEdge::EdgeData EdgeData;
typedef DynamicGraph<EdgeData>::InputEdge InputEdge;
typedef StaticGraph<EdgeData>::InputEdge StaticEdge;
typedef BaseConfiguration ContractorConfiguration;
std::vector<NodeInfo> internalToExternalNodeMapping;
std::vector<_Restriction> inputRestrictions;
std::vector<NodeID> bollardNodes;
std::vector<NodeID> trafficLightNodes;
int main (int argc, char *argv[]) {
if(argc < 3) {
ERR("usage: " << std::endl << argv[0] << " <osrm-data> <osrm-restrictions> [<profile>]");
}
double startupTime = get_timestamp();
unsigned numberOfThreads = omp_get_num_procs();
if(testDataFile("contractor.ini")) {
ContractorConfiguration contractorConfig("contractor.ini");
unsigned rawNumber = stringToInt(contractorConfig.GetParameter("Threads"));
if(rawNumber != 0 && rawNumber <= numberOfThreads)
numberOfThreads = rawNumber;
}
omp_set_num_threads(numberOfThreads);
INFO("Using restrictions from file: " << argv[2]);
std::ifstream restrictionsInstream(argv[2], ios::binary);
if(!restrictionsInstream.good()) {
ERR("Could not access <osrm-restrictions> files");
}
_Restriction restriction;
unsigned usableRestrictionsCounter(0);
restrictionsInstream.read((char*)&usableRestrictionsCounter, sizeof(unsigned));
inputRestrictions.resize(usableRestrictionsCounter);
restrictionsInstream.read((char *)&(inputRestrictions[0]), usableRestrictionsCounter*sizeof(_Restriction));
restrictionsInstream.close();
std::ifstream in;
in.open (argv[1], std::ifstream::in | std::ifstream::binary);
if (!in.is_open()) {
ERR("Cannot open " << argv[1]);
}
char nodeOut[1024]; strcpy(nodeOut, argv[1]); strcat(nodeOut, ".nodes");
char edgeOut[1024]; strcpy(edgeOut, argv[1]); strcat(edgeOut, ".edges");
char graphOut[1024]; strcpy(graphOut, argv[1]); strcat(graphOut, ".hsgr");
char ramIndexOut[1024]; strcpy(ramIndexOut, argv[1]); strcat(ramIndexOut, ".ramIndex");
char fileIndexOut[1024]; strcpy(fileIndexOut, argv[1]); strcat(fileIndexOut, ".fileIndex");
char levelInfoOut[1024]; strcpy(levelInfoOut, argv[1]); strcat(levelInfoOut, ".levels");
/*** Setup Scripting Environment ***/
if(!testDataFile( (argc > 3 ? argv[3] : "profile.lua") )) {
ERR("Need profile.lua to apply traffic signal penalty");
}
// Create a new lua state
lua_State *myLuaState = luaL_newstate();
// Connect LuaBind to this lua state
luabind::open(myLuaState);
// Now call our function in a lua script
INFO("Parsing speedprofile from " << (argc > 3 ? argv[3] : "profile.lua") );
if(0 != luaL_dofile(myLuaState, (argc > 3 ? argv[3] : "profile.lua") )) {
ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
}
EdgeBasedGraphFactory::SpeedProfileProperties speedProfile;
if(0 != luaL_dostring( myLuaState, "return traffic_signal_penalty\n")) {
ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
}
speedProfile.trafficSignalPenalty = 10*lua_tointeger(myLuaState, -1);
if(0 != luaL_dostring( myLuaState, "return u_turn_penalty\n")) {
ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
}
speedProfile.uTurnPenalty = 10*lua_tointeger(myLuaState, -1);
std::vector<ImportEdge> edgeList;
NodeID nodeBasedNodeNumber = readBinaryOSRMGraphFromStream(in, edgeList, bollardNodes, trafficLightNodes, &internalToExternalNodeMapping, inputRestrictions);
in.close();
INFO(inputRestrictions.size() << " restrictions, " << bollardNodes.size() << " bollard nodes, " << trafficLightNodes.size() << " traffic lights");
if(0 == edgeList.size())
ERR("The input data is broken. It is impossible to do any turns in this graph");
/***
* Building an edge-expanded graph from node-based input an turn restrictions
*/
INFO("Generating edge-expanded graph representation");
EdgeBasedGraphFactory * edgeBasedGraphFactory = new EdgeBasedGraphFactory (nodeBasedNodeNumber, edgeList, bollardNodes, trafficLightNodes, inputRestrictions, internalToExternalNodeMapping, speedProfile);
std::vector<ImportEdge>().swap(edgeList);
edgeBasedGraphFactory->Run(edgeOut);
std::vector<_Restriction>().swap(inputRestrictions);
std::vector<NodeID>().swap(bollardNodes);
std::vector<NodeID>().swap(trafficLightNodes);
NodeID edgeBasedNodeNumber = edgeBasedGraphFactory->GetNumberOfNodes();
DeallocatingVector<EdgeBasedEdge> edgeBasedEdgeList;
edgeBasedGraphFactory->GetEdgeBasedEdges(edgeBasedEdgeList);
/***
* Writing info on original (node-based) nodes
*/
INFO("writing node map ...");
std::ofstream mapOutFile(nodeOut, std::ios::binary);
mapOutFile.write((char *)&(internalToExternalNodeMapping[0]), internalToExternalNodeMapping.size()*sizeof(NodeInfo));
mapOutFile.close();
std::vector<NodeInfo>().swap(internalToExternalNodeMapping);
/***
* Writing info on original (node-based) edges
*/
INFO("writing info on original edges");
std::vector<OriginalEdgeData> originalEdgeData;
edgeBasedGraphFactory->GetOriginalEdgeData(originalEdgeData);
DeallocatingVector<EdgeBasedGraphFactory::EdgeBasedNode> nodeBasedEdgeList;
edgeBasedGraphFactory->GetEdgeBasedNodes(nodeBasedEdgeList);
delete edgeBasedGraphFactory;
double expansionHasFinishedTime = get_timestamp() - startupTime;
/***
* Building grid-like nearest-neighbor data structure
*/
INFO("building grid ...");
WritableGrid * writeableGrid = new WritableGrid();
writeableGrid->ConstructGrid(nodeBasedEdgeList, ramIndexOut, fileIndexOut);
delete writeableGrid;
IteratorbasedCRC32<DeallocatingVector<EdgeBasedGraphFactory::EdgeBasedNode> > crc32;
unsigned crc32OfNodeBasedEdgeList = crc32(nodeBasedEdgeList.begin(), nodeBasedEdgeList.end() );
nodeBasedEdgeList.clear();
INFO("CRC32 based checksum is " << crc32OfNodeBasedEdgeList);
/***
* Contracting the edge-expanded graph
*/
INFO("initializing contractor");
Contractor* contractor = new Contractor( edgeBasedNodeNumber, edgeBasedEdgeList );
double contractionStartedTimestamp(get_timestamp());
contractor->Run();
INFO("Contraction took " << get_timestamp() - contractionStartedTimestamp << " sec");
DeallocatingVector< QueryEdge > contractedEdgeList;
contractor->GetEdges( contractedEdgeList );
delete contractor;
/***
* Sorting contracted edges in a way that the static query graph can read some in in-place.
*/
INFO("Building Node Array");
sort(contractedEdgeList.begin(), contractedEdgeList.end());
unsigned numberOfNodes = 0;
unsigned numberOfEdges = contractedEdgeList.size();
INFO("Serializing compacted graph");
ofstream edgeOutFile(graphOut, ios::binary);
BOOST_FOREACH(QueryEdge & edge, contractedEdgeList) {
if(edge.source > numberOfNodes) {
numberOfNodes = edge.source;
}
if(edge.target > numberOfNodes) {
numberOfNodes = edge.target;
}
}
numberOfNodes+=1;
std::vector< StaticGraph<EdgeData>::_StrNode > _nodes;
_nodes.resize( numberOfNodes + 1 );
StaticGraph<EdgeData>::EdgeIterator edge = 0;
StaticGraph<EdgeData>::EdgeIterator position = 0;
for ( StaticGraph<EdgeData>::NodeIterator node = 0; node <= numberOfNodes; ++node ) {
StaticGraph<EdgeData>::EdgeIterator lastEdge = edge;
while ( edge < numberOfEdges && contractedEdgeList[edge].source == node )
++edge;
_nodes[node].firstEdge = position; //=edge
position += edge - lastEdge; //remove
}
++numberOfNodes;
//Serialize numberOfNodes, nodes
edgeOutFile.write((char*) &crc32OfNodeBasedEdgeList, sizeof(unsigned));
edgeOutFile.write((char*) &numberOfNodes, sizeof(unsigned));
edgeOutFile.write((char*) &_nodes[0], sizeof(StaticGraph<EdgeData>::_StrNode)*(numberOfNodes));
//Serialize number of Edges
edgeOutFile.write((char*) &position, sizeof(unsigned));
--numberOfNodes;
edge = 0;
int usedEdgeCounter = 0;
StaticGraph<EdgeData>::_StrEdge currentEdge;
for ( StaticGraph<EdgeData>::NodeIterator node = 0; node < numberOfNodes; ++node ) {
for ( StaticGraph<EdgeData>::EdgeIterator i = _nodes[node].firstEdge, e = _nodes[node+1].firstEdge; i != e; ++i ) {
assert(node != contractedEdgeList[edge].target);
currentEdge.target = contractedEdgeList[edge].target;
currentEdge.data = contractedEdgeList[edge].data;
if(currentEdge.data.distance <= 0) {
INFO("Edge: " << i << ",source: " << contractedEdgeList[edge].source << ", target: " << contractedEdgeList[edge].target << ", dist: " << currentEdge.data.distance);
ERR("Failed at edges of node " << node << " of " << numberOfNodes);
}
//Serialize edges
edgeOutFile.write((char*) ¤tEdge, sizeof(StaticGraph<EdgeData>::_StrEdge));
++edge;
++usedEdgeCounter;
}
}
double endTime = (get_timestamp() - startupTime);
INFO("Expansion : " << (nodeBasedNodeNumber/expansionHasFinishedTime) << " nodes/sec and "<< (edgeBasedNodeNumber/expansionHasFinishedTime) << " edges/sec");
INFO("Contraction: " << (edgeBasedNodeNumber/expansionHasFinishedTime) << " nodes/sec and "<< usedEdgeCounter/endTime << " edges/sec");
edgeOutFile.close();
//cleanedEdgeList.clear();
_nodes.clear();
INFO("finished preprocessing");
return 0;
}
| 1 | 12,294 | Put these includes in alphabetical order | Project-OSRM-osrm-backend | cpp |
@@ -246,7 +246,12 @@ class JAB(Window):
continue
keyList=[]
# We assume alt if there are no modifiers at all and its not a menu item as this is clearly a nmonic
- if (binding.modifiers&JABHandler.ACCESSIBLE_ALT_KEYSTROKE) or (not binding.modifiers and self.role!=controlTypes.ROLE_MENUITEM):
+ altModifier = binding.modifiers&JABHandler.ACCESSIBLE_ALT_KEYSTROKE
+ if not altModifier:
+ if not binding.modifiers and self.role!=controlTypes.ROLE_MENUITEM:
+ parent=self.parent
+ altModifier=isinstance(parent,JAB) and parent.role==controlTypes.ROLE_MENUBAR
+ if altModifier:
keyList.append(keyLabels.localizedKeyLabels['alt'])
if binding.modifiers&JABHandler.ACCESSIBLE_CONTROL_KEYSTROKE:
keyList.append(keyLabels.localizedKeyLabels['control']) | 1 | import ctypes
import re
import eventHandler
import keyLabels
import JABHandler
import controlTypes
from ..window import Window
from ..behaviors import EditableTextWithoutAutoSelectDetection, Dialog
import textInfos.offsets
from logHandler import log
from .. import InvalidNVDAObject
JABRolesToNVDARoles={
"alert":controlTypes.ROLE_DIALOG,
"column header":controlTypes.ROLE_TABLECOLUMNHEADER,
"canvas":controlTypes.ROLE_CANVAS,
"combo box":controlTypes.ROLE_COMBOBOX,
"desktop icon":controlTypes.ROLE_DESKTOPICON,
"internal frame":controlTypes.ROLE_INTERNALFRAME,
"desktop pane":controlTypes.ROLE_DESKTOPPANE,
"option pane":controlTypes.ROLE_OPTIONPANE,
"window":controlTypes.ROLE_WINDOW,
"frame":controlTypes.ROLE_FRAME,
"dialog":controlTypes.ROLE_DIALOG,
"color chooser":controlTypes.ROLE_COLORCHOOSER,
"directory pane":controlTypes.ROLE_DIRECTORYPANE,
"file chooser":controlTypes.ROLE_FILECHOOSER,
"filler":controlTypes.ROLE_FILLER,
"hyperlink":controlTypes.ROLE_LINK,
"icon":controlTypes.ROLE_ICON,
"label":controlTypes.ROLE_LABEL,
"root pane":controlTypes.ROLE_PANEL,
"glass pane":controlTypes.ROLE_PANEL,
"layered pane":controlTypes.ROLE_PANEL,
"list":controlTypes.ROLE_LIST,
"list item":controlTypes.ROLE_LISTITEM,
"menu bar":controlTypes.ROLE_MENUBAR,
"popup menu":controlTypes.ROLE_POPUPMENU,
"menu":controlTypes.ROLE_MENU,
"menu item":controlTypes.ROLE_MENUITEM,
"separator":controlTypes.ROLE_SEPARATOR,
"page tab list":controlTypes.ROLE_TABCONTROL,
"page tab":controlTypes.ROLE_TAB,
"panel":controlTypes.ROLE_PANEL,
"progress bar":controlTypes.ROLE_PROGRESSBAR,
"password text":controlTypes.ROLE_PASSWORDEDIT,
"push button":controlTypes.ROLE_BUTTON,
"toggle button":controlTypes.ROLE_TOGGLEBUTTON,
"check box":controlTypes.ROLE_CHECKBOX,
"radio button":controlTypes.ROLE_RADIOBUTTON,
"row header":controlTypes.ROLE_TABLEROWHEADER,
"scroll pane":controlTypes.ROLE_SCROLLPANE,
"scroll bar":controlTypes.ROLE_SCROLLBAR,
"view port":controlTypes.ROLE_VIEWPORT,
"slider":controlTypes.ROLE_SLIDER,
"split pane":controlTypes.ROLE_SPLITPANE,
"table":controlTypes.ROLE_TABLE,
"text":controlTypes.ROLE_EDITABLETEXT,
"tree":controlTypes.ROLE_TREEVIEW,
"tool bar":controlTypes.ROLE_TOOLBAR,
"tool tip":controlTypes.ROLE_TOOLTIP,
"status bar":controlTypes.ROLE_STATUSBAR,
"statusbar":controlTypes.ROLE_STATUSBAR,
"date editor":controlTypes.ROLE_DATEEDITOR,
"spin box":controlTypes.ROLE_SPINBUTTON,
"font chooser":controlTypes.ROLE_FONTCHOOSER,
"group box":controlTypes.ROLE_GROUPING,
"header":controlTypes.ROLE_HEADER,
"footer":controlTypes.ROLE_FOOTER,
"paragraph":controlTypes.ROLE_PARAGRAPH,
"ruler":controlTypes.ROLE_RULER,
"edit bar":controlTypes.ROLE_EDITBAR,
}
JABStatesToNVDAStates={
"busy":controlTypes.STATE_BUSY,
"checked":controlTypes.STATE_CHECKED,
"focused":controlTypes.STATE_FOCUSED,
"selected":controlTypes.STATE_SELECTED,
"pressed":controlTypes.STATE_PRESSED,
"expanded":controlTypes.STATE_EXPANDED,
"collapsed":controlTypes.STATE_COLLAPSED,
"iconified":controlTypes.STATE_ICONIFIED,
"modal":controlTypes.STATE_MODAL,
"multi_line":controlTypes.STATE_MULTILINE,
"focusable":controlTypes.STATE_FOCUSABLE,
"editable":controlTypes.STATE_EDITABLE,
}
re_simpleXmlTag=re.compile(r"\<[^>]+\>")
class JABTextInfo(textInfos.offsets.OffsetsTextInfo):
def _getOffsetFromPoint(self,x,y):
info=self.obj.jabContext.getAccessibleTextInfo(x,y)
offset=max(min(info.indexAtPoint,info.charCount-1),0)
return offset
def _getCaretOffset(self):
textInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)
offset=textInfo.caretIndex
# OpenOffice sometimes returns nonsense, so treat charCount < offset as no caret.
if offset==-1 or textInfo.charCount<offset:
raise RuntimeError("no available caret in this object")
return offset
def _setCaretOffset(self,offset):
self.obj.jabContext.setCaretPosition(offset)
def _getSelectionOffsets(self):
info=self.obj.jabContext.getAccessibleTextSelectionInfo()
start=max(info.selectionStartIndex,0)
end=max(info.selectionEndIndex,0)
return (start,end)
def _setSelectionOffsets(self,start,end):
self.obj.jabContext.selectTextRange(start,end)
def _getStoryLength(self):
if not hasattr(self,'_storyLength'):
textInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)
self._storyLength=textInfo.charCount
return self._storyLength
def _getTextRange(self,start,end):
#Java needs end of range as last character, not one past the last character
text=self.obj.jabContext.getAccessibleTextRange(start,end-1)
return text
def _getLineNumFromOffset(self,offset):
return None
def _getLineOffsets(self,offset):
(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset)
if end==-1 and offset>0:
# #1892: JAB returns -1 for the end insertion position
# instead of returning the offsets for the last line.
# Try one character back.
(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset-1)
#Java gives end as the last character, not one past the last character
end=end+1
return (start,end)
def _getParagraphOffsets(self,offset):
return self._getLineOffsets(offset)
def _getFormatFieldAndOffsets(self, offset, formatConfig, calculateOffsets=True):
attribs, length = self.obj.jabContext.getTextAttributesInRange(offset, self._endOffset - 1)
field = textInfos.FormatField()
field["font-family"] = attribs.fontFamily
field["font-size"] = "%dpt" % attribs.fontSize
field["bold"] = bool(attribs.bold)
field["italic"] = bool(attribs.italic)
field["strikethrough"] = bool(attribs.strikethrough)
field["underline"] = bool(attribs.underline)
if attribs.superscript:
field["text-position"] = "super"
elif attribs.subscript:
field["text-position"] = "sub"
# TODO: Not sure how to interpret Java's alignment numbers.
return field, (offset, offset + length)
def getEmbeddedObject(self, offset=0):
offset += self._startOffset
# We need to count the embedded objects to determine which child to use.
# This could possibly be optimised by caching.
text = self._getTextRange(0, offset + 1)
childIndex = text.count(u"\uFFFC") - 1
jabContext=self.obj.jabContext.getAccessibleChildFromContext(childIndex)
if jabContext:
return JAB(jabContext=jabContext)
raise LookupError
class JAB(Window):
def findOverlayClasses(self,clsList):
role = self.JABRole
if self._JABAccContextInfo.accessibleText and role in ("text","password text","edit bar","view port","paragraph"):
clsList.append(EditableTextWithoutAutoSelectDetection)
elif role in ("dialog", "alert"):
clsList.append(Dialog)
elif role=="combo box":
clsList.append(ComboBox)
elif role=="table":
clsList.append(Table)
elif self.parent and isinstance(self.parent,Table) and self.parent._jabTableInfo:
clsList.append(TableCell)
clsList.append(JAB)
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
jabContext=None
windowHandle=kwargs['windowHandle']
if relation=="focus":
vmID=ctypes.c_int()
accContext=JABHandler.JOBJECT64()
JABHandler.bridgeDll.getAccessibleContextWithFocus(windowHandle,ctypes.byref(vmID),ctypes.byref(accContext))
jabContext=JABHandler.JABContext(hwnd=windowHandle,vmID=vmID.value,accContext=accContext.value)
elif isinstance(relation,tuple):
jabContext=JABHandler.JABContext(hwnd=windowHandle)
if jabContext:
jabContext=jabContext.getAccessibleContextAt(*relation)
else:
jabContext=JABHandler.JABContext(hwnd=windowHandle)
if not jabContext:
return False
kwargs['jabContext']=jabContext
return True
def __init__(self,relation=None,windowHandle=None,jabContext=None):
if not windowHandle:
windowHandle=jabContext.hwnd
self.windowHandle=windowHandle
self.jabContext=jabContext
super(JAB,self).__init__(windowHandle=windowHandle)
try:
self._JABAccContextInfo
except RuntimeError:
raise InvalidNVDAObject("Could not get accessible context info")
def _get__JABAccContextInfo(self):
return self.jabContext.getAccessibleContextInfo()
def _get_TextInfo(self):
if self._JABAccContextInfo.accessibleText and self.role not in [controlTypes.ROLE_BUTTON,controlTypes.ROLE_MENUITEM,controlTypes.ROLE_MENU,controlTypes.ROLE_LISTITEM]:
return JABTextInfo
return super(JAB,self).TextInfo
def _isEqual(self,other):
try:
return self.jabContext==other.jabContext
except:
return False
def _get_keyboardShortcut(self):
bindings=self.jabContext.getAccessibleKeyBindings()
if not bindings or bindings.keyBindingsCount<1:
return None
shortcutsList=[]
for index in xrange(bindings.keyBindingsCount):
binding=bindings.keyBindingInfo[index]
# We don't support these modifiers
if binding.modifiers&(JABHandler.ACCESSIBLE_META_KEYSTROKE|JABHandler.ACCESSIBLE_ALT_GRAPH_KEYSTROKE|JABHandler.ACCESSIBLE_BUTTON1_KEYSTROKE|JABHandler.ACCESSIBLE_BUTTON2_KEYSTROKE|JABHandler.ACCESSIBLE_BUTTON3_KEYSTROKE):
continue
keyList=[]
# We assume alt if there are no modifiers at all and its not a menu item as this is clearly a nmonic
if (binding.modifiers&JABHandler.ACCESSIBLE_ALT_KEYSTROKE) or (not binding.modifiers and self.role!=controlTypes.ROLE_MENUITEM):
keyList.append(keyLabels.localizedKeyLabels['alt'])
if binding.modifiers&JABHandler.ACCESSIBLE_CONTROL_KEYSTROKE:
keyList.append(keyLabels.localizedKeyLabels['control'])
if binding.modifiers&JABHandler.ACCESSIBLE_SHIFT_KEYSTROKE:
keyList.append(keyLabels.localizedKeyLabels['shift'])
keyList.append(binding.character)
shortcutsList.append("+".join(keyList))
return ", ".join(shortcutsList)
def _get_name(self):
return re_simpleXmlTag.sub(" ", self._JABAccContextInfo.name)
def _get_JABRole(self):
return self._JABAccContextInfo.role_en_US
def _get_role(self):
role = JABRolesToNVDARoles.get(self.JABRole,controlTypes.ROLE_UNKNOWN)
if role in ( controlTypes.ROLE_LABEL, controlTypes.ROLE_PANEL) and self.parent:
parentRole = self.parent.role
if parentRole == controlTypes.ROLE_LIST:
return controlTypes.ROLE_LISTITEM
elif parentRole in (controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
return controlTypes.ROLE_TREEVIEWITEM
if role==controlTypes.ROLE_LABEL:
return controlTypes.ROLE_STATICTEXT
return role
def _get_JABStates(self):
return self._JABAccContextInfo.states_en_US
def _get_states(self):
log.debug("states: %s"%self.JABStates)
stateSet=set()
stateString=self.JABStates
stateStrings=stateString.split(',')
for state in stateStrings:
if JABStatesToNVDAStates.has_key(state):
stateSet.add(JABStatesToNVDAStates[state])
if "visible" not in stateStrings:
stateSet.add(controlTypes.STATE_INVISIBLE)
if "showing" not in stateStrings:
stateSet.add(controlTypes.STATE_OFFSCREEN)
if "expandable" not in stateStrings:
stateSet.discard(controlTypes.STATE_COLLAPSED)
return stateSet
def _get_value(self):
if self.role not in [controlTypes.ROLE_CHECKBOX,controlTypes.ROLE_MENU,controlTypes.ROLE_MENUITEM,controlTypes.ROLE_RADIOBUTTON,controlTypes.ROLE_BUTTON] and self._JABAccContextInfo.accessibleValue and not self._JABAccContextInfo.accessibleText:
return self.jabContext.getCurrentAccessibleValueFromContext()
def _get_description(self):
return re_simpleXmlTag.sub(" ", self._JABAccContextInfo.description)
def _get_location(self):
return (self._JABAccContextInfo.x,self._JABAccContextInfo.y,self._JABAccContextInfo.width,self._JABAccContextInfo.height)
def _get_hasFocus(self):
if controlTypes.STATE_FOCUSED in self.states:
return True
else:
return False
def _get_positionInfo(self):
info=super(JAB,self).positionInfo or {}
# If tree view item, try to retrieve the level via JAB
if self.role==controlTypes.ROLE_TREEVIEWITEM:
try:
tree=self.jabContext.getAccessibleParentWithRole("tree")
if tree:
treeDepth=tree.getObjectDepth()
selfDepth=self.jabContext.getObjectDepth()
if selfDepth > treeDepth:
info['level']=selfDepth-treeDepth
except:
pass
targets=self._getJABRelationTargets('memberOf')
for index,target in enumerate(targets):
if target==self.jabContext:
info['indexInGroup']=index+1
info['similarItemsInGroup']=len(targets)
return info
parent=self.parent
if isinstance(parent,JAB) and self.role in (controlTypes.ROLE_TREEVIEWITEM,controlTypes.ROLE_LISTITEM):
index=self._JABAccContextInfo.indexInParent+1
childCount=parent._JABAccContextInfo.childrenCount
info['indexInGroup']=index
info['similarItemsInGroup']=childCount
return info
def _get_activeChild(self):
jabContext=self.jabContext.getActiveDescendent()
if jabContext:
return JAB(jabContext=jabContext)
else:
return None
def _get_parent(self):
if not hasattr(self,'_parent'):
jabContext=self.jabContext.getAccessibleParentFromContext()
if jabContext:
self._parent=JAB(jabContext=jabContext)
else:
self._parent=super(JAB,self).parent
return self._parent
def _get_next(self):
parent=self.parent
if not isinstance(parent,JAB):
return super(JAB,self).next
if self.indexInParent is None:
return None
newIndex=self.indexInParent+1
if newIndex>=parent._JABAccContextInfo.childrenCount:
return None
jabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)
if not jabContext:
return None
obj=JAB(jabContext=jabContext)
if not isinstance(obj.parent,JAB):
obj.parent=parent
if obj.indexInParent is None:
obj.indexInParent=newIndex
elif obj.indexInParent<=self.indexInParent:
return None
return obj
def _get_previous(self):
parent=self.parent
if not isinstance(parent,JAB):
return super(JAB,self).previous
if self.indexInParent is None:
return None
newIndex=self.indexInParent-1
if newIndex<0:
return None
jabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)
if not jabContext:
return None
obj=JAB(jabContext=jabContext)
if not isinstance(obj.parent,JAB):
obj.parent=parent
if obj.indexInParent is None:
obj.indexInParent=newIndex
elif obj.indexInParent>=self.indexInParent:
return None
return obj
def _get_firstChild(self):
if self._JABAccContextInfo.childrenCount<=0:
return None
jabContext=self.jabContext.getAccessibleChildFromContext(0)
if jabContext:
obj=JAB(jabContext=jabContext)
if not isinstance(obj.parent,JAB):
obj.parent=self
if obj.indexInParent is None:
obj.indexInParent=0
return obj
else:
return None
def _get_lastChild(self):
if self._JABAccContextInfo.childrenCount<=0:
return None
jabContext=self.jabContext.getAccessibleChildFromContext(self.childCount-1)
if jabContext:
obj=JAB(jabContext=jabContext)
if not isinstance(obj.parent,JAB):
obj.parent=self
if obj.indexInParent is None:
obj.indexInParent=self.childCount-1
return obj
else:
return None
def _get_childCount(self):
return self._JABAccContextInfo.childrenCount
def _get_children(self):
children=[]
for index in xrange(self._JABAccContextInfo.childrenCount):
jabContext=self.jabContext.getAccessibleChildFromContext(index)
if jabContext:
obj=JAB(jabContext=jabContext)
if not isinstance(obj.parent,JAB):
obj.parent=self
if obj.indexInParent is None:
obj.indexInParent=index
children.append(obj)
return children
def _get_indexInParent(self):
index = self._JABAccContextInfo.indexInParent
if index == -1:
return None
return index
def _getJABRelationTargets(self, key):
rs = self.jabContext.getAccessibleRelationSet()
targets=[]
for relation in rs.relations[:rs.relationCount]:
for target in relation.targets[:relation.targetCount]:
if relation.key == key:
targets.append(JABHandler.JABContext(self.jabContext.hwnd, self.jabContext.vmID, target))
else:
JABHandler.bridgeDll.releaseJavaObject(self.jabContext.vmID,target)
return targets
def _get_flowsTo(self):
targets=self._getJABRelationTargets("flowsTo")
if targets:
return targets[0]
def _get_flowsFrom(self):
targets=self._getJABRelationTargets("flowsFrom")
if targets:
return targets[0]
def reportFocus(self):
parent=self.parent
if self.role in [controlTypes.ROLE_LIST] and isinstance(parent,JAB) and parent.role==controlTypes.ROLE_COMBOBOX:
return
super(JAB,self).reportFocus()
def _get__actions(self):
actions = JABHandler.AccessibleActions()
JABHandler.bridgeDll.getAccessibleActions(self.jabContext.vmID, self.jabContext.accContext, actions)
return actions.actionInfo[:actions.actionsCount]
def _get_actionCount(self):
return len(self._actions)
def getActionName(self, index=None):
if index is None:
index = self.defaultActionIndex
try:
return self._actions[index].name
except IndexError:
raise NotImplementedError
def doAction(self, index=None):
if index is None:
index = self.defaultActionIndex
try:
JABHandler.bridgeDll.doAccessibleActions(self.jabContext.vmID, self.jabContext.accContext,
JABHandler.AccessibleActionsToDo(actionsCount=1, actions=(self._actions[index],)),
JABHandler.jint())
except (IndexError, RuntimeError):
raise NotImplementedError
def _get_activeDescendant(self):
descendantFound=False
jabContext=self.jabContext
while jabContext:
try:
tempContext=jabContext.getActiveDescendent()
except:
break
if not tempContext:
break
try:
depth=tempContext.getObjectDepth()
except:
depth=-1
if depth<=0 or tempContext==jabContext:
break
jabContext=tempContext
descendantFound=True
if descendantFound:
return JAB(jabContext=jabContext)
def event_gainFocus(self):
if eventHandler.isPendingEvents("gainFocus"):
return
super(JAB,self).event_gainFocus()
if eventHandler.isPendingEvents("gainFocus"):
return
activeDescendant=self.activeDescendant
if activeDescendant:
eventHandler.queueEvent("gainFocus",activeDescendant)
class ComboBox(JAB):
def _get_states(self):
states=super(ComboBox,self).states
if controlTypes.STATE_COLLAPSED not in states and controlTypes.STATE_EXPANDED not in states:
if self.childCount==1 and self.firstChild and self.firstChild.role==controlTypes.ROLE_POPUPMENU:
if controlTypes.STATE_INVISIBLE in self.firstChild.states:
states.add(controlTypes.STATE_COLLAPSED)
else:
states.add(controlTypes.STATE_EXPANDED)
return states
def _get_activeDescendant(self):
if controlTypes.STATE_COLLAPSED in self.states:
return None
return super(ComboBox,self).activeDescendant
def _get_value(self):
value=super(ComboBox,self).value
if not value and not self.activeDescendant:
descendant=super(ComboBox,self).activeDescendant
if descendant:
value=descendant.name
return value
class Table(JAB):
def _get__jabTableInfo(self):
info=self.jabContext.getAccessibleTableInfo()
if info:
self._jabTableInfo=info
return info
def _get_rowCount(self):
if self._jabTableInfo:
return self._jabTableInfo.rowCount
def _get_columnCount(self):
if self._jabTableInfo:
return self._jabTableInfo.columnCount
def _get_tableID(self):
return self._jabTableInfo.jabTable.accContext.value
class TableCell(JAB):
role=controlTypes.ROLE_TABLECELL
def _get_table(self):
if self.parent and isinstance(self.parent,Table):
self.table=self.parent
return self.table
def _get_tableID(self):
return self.table.tableID
def _get_rowNumber(self):
return self.table._jabTableInfo.jabTable.getAccessibleTableRow(self.indexInParent)+1
def _get_columnNumber(self):
return self.table._jabTableInfo.jabTable.getAccessibleTableColumn(self.indexInParent)+1
def _get_rowHeaderText(self):
headerTableInfo=self.table.jabContext.getAccessibleTableRowHeader()
if headerTableInfo and headerTableInfo.jabTable:
textList=[]
row=self.rowNumber-1
for col in xrange(headerTableInfo.columnCount):
cellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)
if cellInfo and cellInfo.jabContext:
obj=JAB(jabContext=cellInfo.jabContext)
if obj.name: textList.append(obj.name)
if obj.description: textList.append(obj.description)
jabContext=self.table._jabTableInfo.jabTable.getAccessibleTableRowDescription(row)
if jabContext:
obj=JAB(jabContext=jabContext)
if obj.name: textList.append(obj.name)
if obj.description: textList.append(obj.description)
return " ".join(textList)
def _get_columnHeaderText(self):
headerTableInfo=self.table.jabContext.getAccessibleTableColumnHeader()
if headerTableInfo and headerTableInfo.jabTable:
textList=[]
col=self.columnNumber-1
for row in xrange(headerTableInfo.rowCount):
cellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)
if cellInfo and cellInfo.jabContext:
obj=JAB(jabContext=cellInfo.jabContext)
if obj.name: textList.append(obj.name)
if obj.description: textList.append(obj.description)
jabContext=self.table._jabTableInfo.jabTable.getAccessibleTableColumnDescription(col)
if jabContext:
obj=JAB(jabContext=jabContext)
if obj.name: textList.append(obj.name)
if obj.description: textList.append(obj.description)
return " ".join(textList)
| 1 | 18,032 | Can this just be an else? | nvaccess-nvda | py |
@@ -7,6 +7,7 @@ import torch.nn as nn
from mmcv.utils import print_log
from mmdet.core import auto_fp16
+from mmdet.utils import get_root_logger
class BaseDetector(nn.Module, metaclass=ABCMeta): | 1 | import warnings
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import torch.nn as nn
from mmcv.utils import print_log
from mmdet.core import auto_fp16
class BaseDetector(nn.Module, metaclass=ABCMeta):
"""Base class for detectors"""
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self):
return hasattr(self.roi_head,
'shared_head') and self.roi_head.shared_head is not None
@property
def with_bbox(self):
return ((hasattr(self.roi_head, 'bbox_head')
and self.roi_head.bbox_head is not None)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self):
return ((hasattr(self.roi_head, 'mask_head')
and self.roi_head.mask_head is not None)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
print_log(f'load model from: {pretrained}', logger='root')
async def aforward_test(self, *, img, img_metas, **kwargs):
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(img)}) '
f'!= num of image metas ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = img[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = imgs[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
"""
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
if 'proposals' in kwargs:
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
# TODO: support test augmentation for predefined proposals
assert 'proposals' not in kwargs
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""
Calls either forward_train or forward_test depending on whether
return_loss=True. Note this setting will change the expected inputs.
When `return_loss=True`, img and img_meta are single-nested (i.e.
Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i]
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
thickness=thickness,
font_scale=font_scale,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 1 | 19,885 | Merge this line with Line7 | open-mmlab-mmdetection | py |
@@ -19,7 +19,7 @@ module GeocoderHelper
html << result[:suffix] if result[:suffix]
if result[:type] and result[:id]
- html << content_tag(:small, :class => ["deemphasize", "search_details"]) do
+ html << content_tag(:small, :class => ["deemphasize", "search_details", "clearfix"]) do
link_to(t("browse.#{result[:type]}_history.view_details"), :controller => :browse, :action => result[:type], :id => result[:id])
end
end | 1 | module GeocoderHelper
def result_to_html(result)
html_options = { :class => "set_position", :data => {} }
if result[:min_lon] and result[:min_lat] and result[:max_lon] and result[:max_lat]
url = "?minlon=#{result[:min_lon]}&minlat=#{result[:min_lat]}&maxlon=#{result[:max_lon]}&maxlat=#{result[:max_lat]}"
else
url = "?mlat=#{result[:lat]}&mlon=#{result[:lon]}&zoom=#{result[:zoom]}"
end
result.each do |key,value|
html_options[:data][key.to_s.tr('_', '-')] = value
end
html = ""
html << result[:prefix] if result[:prefix]
html << " " if result[:prefix] and result[:name]
html << link_to(result[:name], url, html_options) if result[:name]
html << result[:suffix] if result[:suffix]
if result[:type] and result[:id]
html << content_tag(:small, :class => ["deemphasize", "search_details"]) do
link_to(t("browse.#{result[:type]}_history.view_details"), :controller => :browse, :action => result[:type], :id => result[:id])
end
end
return raw(html)
end
def describe_location(lat, lon, zoom = nil, language = nil)
Nominatim.describe_location(lat, lon, zoom, language)
end
end
| 1 | 8,751 | `clearfix` is needed to stop the `float:right` content overflowing the list item container. | openstreetmap-openstreetmap-website | rb |
@@ -6,7 +6,7 @@ using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
namespace Microsoft.AspNetCore.Server.Kestrel.Performance
{
- [ParameterizedJobConfig(typeof(CoreConfig))]
+ [AspNetCoreBenchmark]
public class StringUtilitiesBenchmark
{
private const int Iterations = 500_000; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using BenchmarkDotNet.Attributes;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
namespace Microsoft.AspNetCore.Server.Kestrel.Performance
{
[ParameterizedJobConfig(typeof(CoreConfig))]
public class StringUtilitiesBenchmark
{
private const int Iterations = 500_000;
[Benchmark(Baseline = true, OperationsPerInvoke = Iterations)]
public void UintToString()
{
var connectionId = CorrelationIdGenerator.GetNextId();
for (uint i = 0; i < Iterations; i++)
{
var id = connectionId + ':' + i.ToString("X8");
}
}
[Benchmark(OperationsPerInvoke = Iterations)]
public void ConcatAsHexSuffix()
{
var connectionId = CorrelationIdGenerator.GetNextId();
for (uint i = 0; i < Iterations; i++)
{
var id = StringUtilities.ConcatAsHexSuffix(connectionId, ':', i);
}
}
}
}
| 1 | 14,570 | Can this be put on the assembly? | aspnet-KestrelHttpServer | .cs |
@@ -124,7 +124,8 @@ public class HealthCheckServlet extends HttpServlet {
if (results.isEmpty()) {
resp.setStatus(HttpServletResponse.SC_NOT_IMPLEMENTED);
} else {
- if (isAllHealthy(results)) {
+ final boolean alwaysOk = Boolean.parseBoolean(req.getParameter("alwaysOk"));
+ if (alwaysOk || isAllHealthy(results)) {
resp.setStatus(HttpServletResponse.SC_OK);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); | 1 | package com.codahale.metrics.servlets;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import java.util.SortedMap;
import java.util.concurrent.ExecutorService;
import javax.servlet.ServletConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.codahale.metrics.health.HealthCheck;
import com.codahale.metrics.health.HealthCheckFilter;
import com.codahale.metrics.health.HealthCheckRegistry;
import com.codahale.metrics.json.HealthCheckModule;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
public class HealthCheckServlet extends HttpServlet {
public static abstract class ContextListener implements ServletContextListener {
/**
* @return the {@link HealthCheckRegistry} to inject into the servlet context.
*/
protected abstract HealthCheckRegistry getHealthCheckRegistry();
/**
* @return the {@link ExecutorService} to inject into the servlet context, or {@code null}
* if the health checks should be run in the servlet worker thread.
*/
protected ExecutorService getExecutorService() {
// don't use a thread pool by default
return null;
}
/**
* @return the {@link HealthCheckFilter} that shall be used to filter health checks,
* or {@link HealthCheckFilter#ALL} if the default should be used.
*/
protected HealthCheckFilter getHealthCheckFilter() {
return HealthCheckFilter.ALL;
}
@Override
public void contextInitialized(ServletContextEvent event) {
final ServletContext context = event.getServletContext();
context.setAttribute(HEALTH_CHECK_REGISTRY, getHealthCheckRegistry());
context.setAttribute(HEALTH_CHECK_EXECUTOR, getExecutorService());
}
@Override
public void contextDestroyed(ServletContextEvent event) {
// no-op
}
}
public static final String HEALTH_CHECK_REGISTRY = HealthCheckServlet.class.getCanonicalName() + ".registry";
public static final String HEALTH_CHECK_EXECUTOR = HealthCheckServlet.class.getCanonicalName() + ".executor";
public static final String HEALTH_CHECK_FILTER = HealthCheckServlet.class.getCanonicalName() + ".healthCheckFilter";
private static final long serialVersionUID = -8432996484889177321L;
private static final String CONTENT_TYPE = "application/json";
private transient HealthCheckRegistry registry;
private transient ExecutorService executorService;
private transient HealthCheckFilter filter;
private transient ObjectMapper mapper;
public HealthCheckServlet() {
}
public HealthCheckServlet(HealthCheckRegistry registry) {
this.registry = registry;
}
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
final ServletContext context = config.getServletContext();
if (null == registry) {
final Object registryAttr = context.getAttribute(HEALTH_CHECK_REGISTRY);
if (registryAttr instanceof HealthCheckRegistry) {
this.registry = (HealthCheckRegistry) registryAttr;
} else {
throw new ServletException("Couldn't find a HealthCheckRegistry instance.");
}
}
final Object executorAttr = context.getAttribute(HEALTH_CHECK_EXECUTOR);
if (executorAttr instanceof ExecutorService) {
this.executorService = (ExecutorService) executorAttr;
}
final Object filterAttr = context.getAttribute(HEALTH_CHECK_FILTER);
if (filterAttr instanceof HealthCheckFilter) {
filter = (HealthCheckFilter) filterAttr;
}
if (filter == null) {
filter = HealthCheckFilter.ALL;
}
this.mapper = new ObjectMapper().registerModule(new HealthCheckModule());
}
@Override
public void destroy() {
super.destroy();
registry.shutdown();
}
@Override
protected void doGet(HttpServletRequest req,
HttpServletResponse resp) throws ServletException, IOException {
final SortedMap<String, HealthCheck.Result> results = runHealthChecks();
resp.setContentType(CONTENT_TYPE);
resp.setHeader("Cache-Control", "must-revalidate,no-cache,no-store");
if (results.isEmpty()) {
resp.setStatus(HttpServletResponse.SC_NOT_IMPLEMENTED);
} else {
if (isAllHealthy(results)) {
resp.setStatus(HttpServletResponse.SC_OK);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
try (OutputStream output = resp.getOutputStream()) {
getWriter(req).writeValue(output, results);
}
}
private ObjectWriter getWriter(HttpServletRequest request) {
final boolean prettyPrint = Boolean.parseBoolean(request.getParameter("pretty"));
if (prettyPrint) {
return mapper.writerWithDefaultPrettyPrinter();
}
return mapper.writer();
}
private SortedMap<String, HealthCheck.Result> runHealthChecks() {
if (executorService == null) {
return registry.runHealthChecks(filter);
}
return registry.runHealthChecks(executorService, filter);
}
private static boolean isAllHealthy(Map<String, HealthCheck.Result> results) {
for (HealthCheck.Result result : results.values()) {
if (!result.isHealthy()) {
return false;
}
}
return true;
}
}
| 1 | 7,620 | Could we rename the parameter to `overrideStatusCode`? The name `alwaysOk` implies that the health check result would be always healthy. | dropwizard-metrics | java |
@@ -37,5 +37,9 @@ func (c *clusterApi) Routes() []*Route {
{verb: "GET", path: clusterSecretPath("/defaultsecretkey", cluster.APIVersion), fn: c.getDefaultSecretKey},
{verb: "PUT", path: clusterSecretPath("/defaultsecretkey", cluster.APIVersion), fn: c.setDefaultSecretKey},
{verb: "POST", path: clusterSecretPath("/login", cluster.APIVersion), fn: c.secretsLogin},
+ {verb: "GET", path: clusterPath("/schedpolicy", cluster.APIVersion), fn: c.listSchedPolicy},
+ {verb: "POST", path: clusterPath("/schedpolicy", cluster.APIVersion), fn: c.createSchedPolicy},
+ {verb: "PUT", path: clusterPath("/schedpolicy", cluster.APIVersion), fn: c.updateSchedPolicy},
+ {verb: "DELETE", path: clusterPath("/schedpolicy/{name}", cluster.APIVersion), fn: c.deleteSchedPolicy},
}
} | 1 | package server
import (
client "github.com/libopenstorage/openstorage/api/client/cluster"
"github.com/libopenstorage/openstorage/cluster"
)
func (c *clusterApi) Routes() []*Route {
return []*Route{
{verb: "GET", path: "/cluster/versions", fn: c.versions},
{verb: "GET", path: clusterPath("/enumerate", cluster.APIVersion), fn: c.enumerate},
{verb: "GET", path: clusterPath("/gossipstate", cluster.APIVersion), fn: c.gossipState},
{verb: "GET", path: clusterPath("/nodestatus", cluster.APIVersion), fn: c.nodeStatus},
{verb: "GET", path: clusterPath("/nodehealth", cluster.APIVersion), fn: c.nodeHealth},
{verb: "GET", path: clusterPath("/status", cluster.APIVersion), fn: c.status},
{verb: "GET", path: clusterPath("/peerstatus", cluster.APIVersion), fn: c.peerStatus},
{verb: "GET", path: clusterPath("/inspect/{id}", cluster.APIVersion), fn: c.inspect},
{verb: "DELETE", path: clusterPath("", cluster.APIVersion), fn: c.delete},
{verb: "DELETE", path: clusterPath("/{id}", cluster.APIVersion), fn: c.delete},
{verb: "PUT", path: clusterPath("/enablegossip", cluster.APIVersion), fn: c.enableGossip},
{verb: "PUT", path: clusterPath("/disablegossip", cluster.APIVersion), fn: c.disableGossip},
{verb: "PUT", path: clusterPath("/shutdown", cluster.APIVersion), fn: c.shutdown},
{verb: "PUT", path: clusterPath("/shutdown/{id}", cluster.APIVersion), fn: c.shutdown},
{verb: "GET", path: clusterPath("/alerts/{resource}", cluster.APIVersion), fn: c.enumerateAlerts},
{verb: "PUT", path: clusterPath("/alerts/{resource}/{id}", cluster.APIVersion), fn: c.clearAlert},
{verb: "DELETE", path: clusterPath("/alerts/{resource}/{id}", cluster.APIVersion), fn: c.eraseAlert},
{verb: "GET", path: clusterPath(client.UriCluster, cluster.APIVersion), fn: c.getClusterConf},
{verb: "GET", path: clusterPath(client.UriNode+"/{id}", cluster.APIVersion), fn: c.getNodeConf},
{verb: "GET", path: clusterPath(client.UriEnumerate, cluster.APIVersion), fn: c.enumerateConf},
{verb: "POST", path: clusterPath(client.UriCluster, cluster.APIVersion), fn: c.setClusterConf},
{verb: "POST", path: clusterPath(client.UriNode, cluster.APIVersion), fn: c.setNodeConf},
{verb: "DELETE", path: clusterPath(client.UriNode+"/{id}", cluster.APIVersion), fn: c.delNodeConf},
{verb: "GET", path: clusterPath("/getnodeidfromip/{idip}", cluster.APIVersion), fn: c.getNodeIdFromIp},
{verb: "GET", path: clusterSecretPath("/verify", cluster.APIVersion), fn: c.secretLoginCheck},
{verb: "GET", path: clusterSecretPath("", cluster.APIVersion), fn: c.getSecret},
{verb: "PUT", path: clusterSecretPath("", cluster.APIVersion), fn: c.setSecret},
{verb: "GET", path: clusterSecretPath("/defaultsecretkey", cluster.APIVersion), fn: c.getDefaultSecretKey},
{verb: "PUT", path: clusterSecretPath("/defaultsecretkey", cluster.APIVersion), fn: c.setDefaultSecretKey},
{verb: "POST", path: clusterSecretPath("/login", cluster.APIVersion), fn: c.secretsLogin},
}
}
| 1 | 6,865 | "/schedpolicy" is repeated, i would make it a constant and use it here and cluster client.go | libopenstorage-openstorage | go |
@@ -0,0 +1,14 @@
+module RSpec::Core::Ordering
+ class RandomOrdering
+ def order(items, configuration = RSpec.configuration)
+ Kernel.srand configuration.seed
+ ordering = items.shuffle
+ Kernel.srand # reset random generation
+ ordering
+ end
+
+ def built_in?
+ true
+ end
+ end
+end | 1 | 1 | 9,651 | Having a second `configuration` arg seems kinda odd to me as an interface. It looks like you're just using it as a form of dependency injection for the tests, right? Part of what makes it seem weird is that it's leaked into all the other orderers where they don't use `configuration` at all. Instead, what do you think about accepting the configuration in `initialize` (and having it default to `RSpec.configuration` in the same way)? To me, the config feels like a bit of state the instance should hold onto rather than being given each time `order` is called. It also means the other orderers wouldn't need to take the odd second argument. Thoughts? | rspec-rspec-core | rb |
|
@@ -248,8 +248,9 @@ class AddonsDialog(
self.getAddonsButton = generalActions.addButton(self, label=_("&Get add-ons..."))
self.getAddonsButton.Bind(wx.EVT_BUTTON, self.onGetAddonsClick)
# Translators: The label for a button in Add-ons Manager dialog to install an add-on.
- self.addButton = generalActions.addButton(self, label=_("&Install..."))
- self.addButton.Bind(wx.EVT_BUTTON, self.onAddClick)
+ if not globalVars.appArgs.secure:
+ self.addButton = generalActions.addButton(self, label=_("&Install..."))
+ self.addButton.Bind(wx.EVT_BUTTON, self.onAddClick)
# Translators: The label of a button in the Add-ons Manager to open the list of incompatible add-ons.
self.incompatAddonsButton = generalActions.addButton(self, label=_("&View incompatible add-ons..."))
self.incompatAddonsButton.Bind(wx.EVT_BUTTON, self.onIncompatAddonsShowClick) | 1 | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2012-2019 NV Access Limited, Beqa Gozalishvili, Joseph Lee,
# Babbage B.V., Ethan Holliger, Arnold Loubriat, Thomas Stivers
import os
import weakref
from locale import strxfrm
import addonAPIVersion
import wx
import core
import config
import gui
from addonHandler import addonVersionCheck
from logHandler import log
import addonHandler
import globalVars
import buildVersion
from . import guiHelper
from . import nvdaControls
from .dpiScalingHelper import DpiScalingHelperMixin, DpiScalingHelperMixinWithoutInit
import gui.contextHelp
def promptUserForRestart():
restartMessage = _(
# Translators: A message asking the user if they wish to restart NVDA
# as addons have been added, enabled/disabled or removed.
"Changes were made to add-ons. "
"You must restart NVDA for these changes to take effect. "
"Would you like to restart now?"
)
# Translators: Title for message asking if the user wishes to restart NVDA as addons have been added or removed.
restartTitle = _("Restart NVDA")
result = gui.messageBox(
message=restartMessage,
caption=restartTitle,
style=wx.YES | wx.NO | wx.ICON_WARNING
)
if wx.YES == result:
core.restart()
class ConfirmAddonInstallDialog(nvdaControls.MessageDialog):
def __init__(self, parent, title, message, showAddonInfoFunction):
super(ConfirmAddonInstallDialog, self).__init__(
parent,
title,
message,
dialogType=nvdaControls.MessageDialog.DIALOG_TYPE_WARNING
)
self._showAddonInfoFunction = showAddonInfoFunction
def _addButtons(self, buttonHelper):
addonInfoButton = buttonHelper.addButton(
self,
# Translators: A button in the addon installation warning / blocked dialog which shows
# more information about the addon
label=_("&About add-on...")
)
addonInfoButton.Bind(wx.EVT_BUTTON, lambda evt: self._showAddonInfoFunction())
yesButton = buttonHelper.addButton(
self,
id=wx.ID_YES,
# Translators: A button in the addon installation warning dialog which allows the user to agree to installing
# the add-on
label=_("&Yes")
)
yesButton.SetDefault()
yesButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.YES))
noButton = buttonHelper.addButton(
self,
id=wx.ID_NO,
# Translators: A button in the addon installation warning dialog which allows the user to decide not to
# install the add-on
label=_("&No")
)
noButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.NO))
class ErrorAddonInstallDialog(nvdaControls.MessageDialog):
def __init__(self, parent, title, message, showAddonInfoFunction):
super(ErrorAddonInstallDialog, self).__init__(
parent,
title,
message,
dialogType=nvdaControls.MessageDialog.DIALOG_TYPE_ERROR
)
self._showAddonInfoFunction = showAddonInfoFunction
def _addButtons(self, buttonHelper):
addonInfoButton = buttonHelper.addButton(
self,
# Translators: A button in the addon installation warning / blocked dialog which shows
# more information about the addon
label=_("&About add-on...")
)
addonInfoButton.Bind(wx.EVT_BUTTON, lambda evt: self._showAddonInfoFunction())
okButton = buttonHelper.addButton(
self,
id=wx.ID_OK,
# Translators: A button in the addon installation blocked dialog which will dismiss the dialog.
label=_("OK")
)
okButton.SetDefault()
okButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.OK))
def _showAddonInfo(addon):
manifest = addon.manifest
message=[_(
# Translators: message shown in the Addon Information dialog.
"{summary} ({name})\n"
"Version: {version}\n"
"Author: {author}\n"
"Description: {description}\n"
).format(**manifest)]
url=manifest.get('url')
if url:
# Translators: the url part of the About Add-on information
message.append(_("URL: {url}").format(url=url))
minimumNVDAVersion = addonAPIVersion.formatForGUI(addon.minimumNVDAVersion)
message.append(
# Translators: the minimum NVDA version part of the About Add-on information
_("Minimum required NVDA version: {}").format(minimumNVDAVersion)
)
lastTestedNVDAVersion = addonAPIVersion.formatForGUI(addon.lastTestedNVDAVersion)
message.append(
# Translators: the last NVDA version tested part of the About Add-on information
_("Last NVDA version tested: {}").format(lastTestedNVDAVersion)
)
# Translators: title for the Addon Information dialog
title=_("Add-on Information")
gui.messageBox("\n".join(message), title, wx.OK)
class AddonsDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
@classmethod
def _instance(cls):
""" type: () -> AddonsDialog
return None until this is replaced with a weakref.ref object. Then the instance is retrieved
with by treating that object as a callable.
"""
return None
helpId = "AddonsManager"
def __new__(cls, *args, **kwargs):
instance = AddonsDialog._instance()
if instance is None:
return super(AddonsDialog, cls).__new__(cls, *args, **kwargs)
return instance
def __init__(self, parent):
if AddonsDialog._instance() is not None:
return
# #7077: _instance must not be kept alive once the dialog is closed or there can be issues
# when add-ons manager reopens or another add-on is installed remotely.
AddonsDialog._instance = weakref.ref(self)
# Translators: The title of the Addons Dialog
title = _("Add-ons Manager")
# Translators: The title of the Addons Dialog when add-ons are disabled
titleWhenAddonsAreDisabled = _("Add-ons Manager (add-ons disabled)")
super().__init__(
parent,
title=title if not globalVars.appArgs.disableAddons else titleWhenAddonsAreDisabled,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.MAXIMIZE_BOX,
)
mainSizer = wx.BoxSizer(wx.VERTICAL)
firstTextSizer = wx.BoxSizer(wx.VERTICAL)
listAndButtonsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=wx.BoxSizer(wx.HORIZONTAL))
if globalVars.appArgs.disableAddons:
label = _(
# Translators: A message in the add-ons manager shown when add-ons are globally disabled.
"NVDA was started with all add-ons disabled. "
"You may modify the enabled / disabled state, and install or uninstall add-ons. "
"Changes will not take effect until after NVDA is restarted."
)
addonsDisabledText = wx.StaticText(self, label=label)
addonsDisabledText.Wrap(self.scaleSize(670))
firstTextSizer.Add(addonsDisabledText)
# Translators: the label for the installed addons list in the addons manager.
entriesLabel = _("Installed Add-ons")
firstTextSizer.Add(wx.StaticText(self, label=entriesLabel))
mainSizer.Add(
firstTextSizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.TOP|wx.LEFT|wx.RIGHT
)
self.addonsList = listAndButtonsSizerHelper.addItem(
nvdaControls.AutoWidthColumnListCtrl(
parent=self,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL,
),
flag=wx.EXPAND,
proportion=1,
)
# Translators: The label for a column in add-ons list used to identify add-on package name (example: package is OCR).
self.addonsList.InsertColumn(0, _("Package"), width=self.scaleSize(150))
# Translators: The label for a column in add-ons list used to identify add-on's running status (example: status is running).
self.addonsList.InsertColumn(1, _("Status"), width=self.scaleSize(50))
# Translators: The label for a column in add-ons list used to identify add-on's version (example: version is 0.3).
self.addonsList.InsertColumn(2, _("Version"), width=self.scaleSize(50))
# Translators: The label for a column in add-ons list used to identify add-on's author (example: author is NV Access).
self.addonsList.InsertColumn(3, _("Author"), width=self.scaleSize(300))
self.addonsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemSelected)
# this is the group of buttons that affects the currently selected addon
entryButtonsHelper=guiHelper.ButtonHelper(wx.VERTICAL)
# Translators: The label for a button in Add-ons Manager dialog to show information about the selected add-on.
self.aboutButton = entryButtonsHelper.addButton(self, label=_("&About add-on..."))
self.aboutButton.Disable()
self.aboutButton.Bind(wx.EVT_BUTTON, self.onAbout)
# Translators: The label for a button in Add-ons Manager dialog to show the help for the selected add-on.
self.helpButton = entryButtonsHelper.addButton(self, label=_("Add-on &help"))
self.helpButton.Disable()
self.helpButton.Bind(wx.EVT_BUTTON, self.onHelp)
# Translators: The label for a button in Add-ons Manager dialog to enable or disable the selected add-on.
self.enableDisableButton = entryButtonsHelper.addButton(self, label=_("&Disable add-on"))
self.enableDisableButton.Disable()
self.enableDisableButton.Bind(wx.EVT_BUTTON, self.onEnableDisable)
# Translators: The label for a button to remove either:
# Remove the selected add-on in Add-ons Manager dialog.
# Remove a speech dictionary entry.
self.removeButton = entryButtonsHelper.addButton(self, label=_("&Remove"))
self.removeButton.Disable()
self.removeButton.Bind(wx.EVT_BUTTON, self.onRemoveClick)
listAndButtonsSizerHelper.addItem(entryButtonsHelper.sizer)
mainSizer.Add(
listAndButtonsSizerHelper.sizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL | wx.EXPAND,
proportion=1,
)
# the following buttons are more general and apply regardless of the current selection.
generalActions=guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button in Add-ons Manager to open the Add-ons website and get more add-ons.
self.getAddonsButton = generalActions.addButton(self, label=_("&Get add-ons..."))
self.getAddonsButton.Bind(wx.EVT_BUTTON, self.onGetAddonsClick)
# Translators: The label for a button in Add-ons Manager dialog to install an add-on.
self.addButton = generalActions.addButton(self, label=_("&Install..."))
self.addButton.Bind(wx.EVT_BUTTON, self.onAddClick)
# Translators: The label of a button in the Add-ons Manager to open the list of incompatible add-ons.
self.incompatAddonsButton = generalActions.addButton(self, label=_("&View incompatible add-ons..."))
self.incompatAddonsButton.Bind(wx.EVT_BUTTON, self.onIncompatAddonsShowClick)
mainSizer.Add(
generalActions.sizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.LEFT | wx.RIGHT
)
mainSizer.Add(
wx.StaticLine(self),
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL | wx.EXPAND
)
# Translators: The label of a button to close the Addons dialog.
closeButton = wx.Button(self, label=_("&Close"), id=wx.ID_CLOSE)
closeButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
mainSizer.Add(
closeButton,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.CENTER | wx.ALIGN_RIGHT
)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.EscapeId = wx.ID_CLOSE
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.refreshAddonsList()
self.SetMinSize(mainSizer.GetMinSize())
# Historical initial size, result of L{self.addonsList} being (550, 350) as of commit 1364839447.
# Setting an initial size on L{self.addonsList} by passing a L{size} argument when
# creating the control would also set its minimum size and thus block the dialog from being shrunk.
self.SetSize(self.scaleSize((763, 509)))
self.CentreOnScreen()
self.addonsList.SetFocus()
def onAddClick(self, evt):
# Translators: The message displayed in the dialog that allows you to choose an add-on package for installation.
fd = wx.FileDialog(self, message=_("Choose Add-on Package File"),
# Translators: the label for the NVDA add-on package file type in the Choose add-on dialog.
wildcard=(_("NVDA Add-on Package (*.{ext})")+"|*.{ext}").format(ext=addonHandler.BUNDLE_EXTENSION),
defaultDir="c:", style=wx.FD_OPEN)
if fd.ShowModal() != wx.ID_OK:
return
addonPath = fd.GetPath()
if installAddon(self, addonPath):
self.refreshAddonsList(activeIndex=-1)
else:
self.refreshAddonsList()
def onRemoveClick(self,evt):
index = self.addonsList.GetFirstSelected()
if index < 0:
return
addon = self.curAddons[index]
if gui.messageBox(
(_(
# Translators: Presented when attempting to remove the selected add-on.
# {addon} is replaced with the add-on name.
"Are you sure you wish to remove the {addon} add-on from NVDA? "
"This cannot be undone."
)).format(addon=addon.name),
# Translators: Title for message asking if the user really wishes to remove the selected Addon.
_("Remove Add-on"),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_WARNING
) != wx.YES:
return
addon.requestRemove()
self.refreshAddonsList(activeIndex=index)
self.addonsList.SetFocus()
def getAddonStatus(self, addon):
if addon.isBlocked:
# Translators: The status shown for an addon when it's not considered compatible with this version of NVDA.
incompatibleStatus =_("Incompatible")
# When the addon is incompatible, it can not be enabled/disabled. Its state no longer matters.
# So, return early.
return incompatibleStatus
statusList = []
if addon.isRunning:
# Translators: The status shown for an addon when its currently running in NVDA.
statusList.append(_("Enabled"))
elif addon.isPendingInstall:
# Translators: The status shown for a newly installed addon before NVDA is restarted.
statusList.append(_("Install"))
# in some cases an addon can be expected to be disabled after install, so we want "install" to take precedence here
# If add-ons are globally disabled, don't show this status.
elif addon.isDisabled and not globalVars.appArgs.disableAddons:
# Translators: The status shown for an addon when its currently suspended do to addons being disabled.
statusList.append(_("Disabled"))
if addon.isPendingRemove:
# Translators: The status shown for an addon that has been marked as removed, before NVDA has been restarted.
statusList.append(_("Removed after restart"))
elif addon.isPendingDisable or (
# yet to be installed, disabled after install
not addon.isPendingEnable and addon.isPendingInstall and addon.isDisabled
) or (
# addons globally disabled, disabled after restart
globalVars.appArgs.disableAddons and addon.isDisabled and not addon.isPendingEnable
):
# Translators: The status shown for an addon when it requires a restart to become disabled
statusList.append(_("Disabled after restart"))
elif addon.isPendingEnable or (
# yet to be installed, enabled after install
addon.isPendingInstall and not addon.isDisabled
) or (
# addons globally disabled, enabled after restart
globalVars.appArgs.disableAddons and not addon.isDisabled
):
# Translators: The status shown for an addon when it requires a restart to become enabled
statusList.append(_("Enabled after restart"))
return ", ".join(statusList)
def refreshAddonsList(self,activeIndex=0):
self.addonsList.DeleteAllItems()
self.curAddons=[]
anyAddonIncompatible = False
for addon in sorted(addonHandler.getAvailableAddons(), key=lambda a: strxfrm(a.manifest['summary'])):
self.addonsList.Append((
addon.manifest['summary'],
self.getAddonStatus(addon),
addon.manifest['version'],
addon.manifest['author']
))
self.curAddons.append(addon)
anyAddonIncompatible = (
anyAddonIncompatible # once we find one incompatible addon we don't need to continue
or not addonVersionCheck.isAddonCompatible(
addon,
currentAPIVersion=addonAPIVersion.CURRENT,
backwardsCompatToVersion=addonAPIVersion.BACK_COMPAT_TO
)
)
self.incompatAddonsButton.Enable(anyAddonIncompatible)
# select the given active addon or the first addon if not given
curAddonsLen=len(self.curAddons)
if curAddonsLen>0:
if activeIndex==-1:
activeIndex=curAddonsLen-1
elif activeIndex<0 or activeIndex>=curAddonsLen:
activeIndex=0
self.addonsList.Select(activeIndex,on=1)
self.addonsList.SetItemState(activeIndex,wx.LIST_STATE_FOCUSED,wx.LIST_STATE_FOCUSED)
else:
self.aboutButton.Disable()
self.helpButton.Disable()
self.removeButton.Disable()
def _shouldDisable(self, addon):
return not (addon.isPendingDisable or (addon.isDisabled and not addon.isPendingEnable))
def onListItemSelected(self, evt):
index=evt.GetIndex()
addon=self.curAddons[index] if index>=0 else None
# #3090: Change toggle button label to indicate action to be taken if clicked.
if addon is not None:
# Translators: The label for a button in Add-ons Manager dialog to enable or disable the selected add-on.
self.enableDisableButton.SetLabel(_("&Enable add-on") if not self._shouldDisable(addon) else _("&Disable add-on"))
self.aboutButton.Enable(addon is not None and not addon.isPendingRemove)
self.helpButton.Enable(bool(addon is not None and not addon.isPendingRemove and addon.getDocFilePath()))
self.enableDisableButton.Enable(
addon is not None and
not addon.isPendingRemove and
addonVersionCheck.isAddonCompatible(addon)
)
self.removeButton.Enable(addon is not None and not addon.isPendingRemove)
def onClose(self,evt):
self.DestroyChildren()
self.Destroy()
needsRestart = False
for addon in self.curAddons:
if (addon.isPendingInstall or addon.isPendingRemove
or addon.isDisabled and addon.isPendingEnable
or addon.isRunning and addon.isPendingDisable
or not addon.isDisabled and addon.isPendingDisable):
needsRestart = True
break
if needsRestart:
promptUserForRestart()
def onAbout(self,evt):
index=self.addonsList.GetFirstSelected()
if index<0: return
addon=self.curAddons[index]
_showAddonInfo(addon)
def onHelp(self, evt):
index = self.addonsList.GetFirstSelected()
if index < 0:
return
path = self.curAddons[index].getDocFilePath()
os.startfile(path)
def onEnableDisable(self, evt):
index=self.addonsList.GetFirstSelected()
if index<0: return
addon=self.curAddons[index]
shouldDisable = self._shouldDisable(addon)
try:
# Counterintuitive, but makes sense when context is taken into account.
addon.enable(not shouldDisable)
except addonHandler.AddonError:
log.error("Couldn't change state for %s add-on"%addon.name, exc_info=True)
if shouldDisable:
# Translators: The message displayed when the add-on cannot be disabled.
message = _("Could not disable the {description} add-on.").format(
description=addon.manifest['summary'])
else:
# Translators: The message displayed when the add-on cannot be enabled.
message = _("Could not enable the {description} add-on.").format(
description=addon.manifest['summary'])
gui.messageBox(
message,
# Translators: The title of a dialog presented when an error occurs.
_("Error"),
wx.OK | wx.ICON_ERROR
)
return
self.enableDisableButton.SetLabel(_("&Enable add-on") if shouldDisable else _("&Disable add-on"))
self.refreshAddonsList(activeIndex=index)
def onGetAddonsClick(self, evt):
ADDONS_URL = "http://addons.nvda-project.org"
os.startfile(ADDONS_URL)
def onIncompatAddonsShowClick(self, evt):
IncompatibleAddonsDialog(
parent=self,
# the defaults from the addon GUI are fine. We are testing against the running version.
).ShowModal()
def installAddon(parentWindow, addonPath):
""" Installs the addon at path. Any error messages / warnings are presented to the user via a GUI message box.
If attempting to install an addon that is pending removal, it will no longer be pending removal.
:return True on success or False on failure.
"""
try:
bundle = addonHandler.AddonBundle(addonPath)
except:
log.error("Error opening addon bundle from %s" % addonPath, exc_info=True)
gui.messageBox(
# Translators: The message displayed when an error occurs when opening an add-on package for adding.
_("Failed to open add-on package file at %s - missing file or invalid file format") % addonPath,
# Translators: The title of a dialog presented when an error occurs.
_("Error"),
wx.OK | wx.ICON_ERROR
)
return False # Exit early, can't install an invalid bundle
if not addonVersionCheck.hasAddonGotRequiredSupport(bundle):
_showAddonRequiresNVDAUpdateDialog(parentWindow, bundle)
return False # Exit early, addon does not have required support
elif not addonVersionCheck.isAddonTested(bundle):
_showAddonTooOldDialog(parentWindow, bundle)
return False # Exit early, addon is not up to date with the latest API version.
elif wx.YES != _showConfirmAddonInstallDialog(parentWindow, bundle):
return False # Exit early, User changed their mind about installation.
prevAddon = None
for addon in addonHandler.getAvailableAddons():
if not addon.isPendingRemove and bundle.name.lower()==addon.manifest['name'].lower():
prevAddon=addon
break
if prevAddon:
summary=bundle.manifest["summary"]
curVersion=prevAddon.manifest["version"]
newVersion=bundle.manifest["version"]
# Translators: A title for the dialog asking if the user wishes to update a previously installed
# add-on with this one.
messageBoxTitle = _("Add-on Installation")
overwriteExistingAddonInstallationMessage = _(
# Translators: A message asking if the user wishes to update an add-on with the same version
# currently installed according to the version number.
"You are about to install version {newVersion} of {summary},"
" which appears to be already installed. "
"Would you still like to update?"
).format(summary=summary, newVersion=newVersion)
updateAddonInstallationMessage = _(
# Translators: A message asking if the user wishes to update a previously installed
# add-on with this one.
"A version of this add-on is already installed. "
"Would you like to update {summary} version {curVersion} to version {newVersion}?"
).format(summary=summary, curVersion=curVersion, newVersion=newVersion)
if gui.messageBox(
overwriteExistingAddonInstallationMessage if curVersion == newVersion else updateAddonInstallationMessage,
messageBoxTitle,
wx.YES|wx.NO|wx.ICON_WARNING
) != wx.YES:
return False
from contextlib import contextmanager
@contextmanager
def doneAndDestroy(window):
try:
yield window
except:
# pass on any exceptions
raise
finally:
# but ensure that done and Destroy are called.
window.done()
window.Destroy()
# use a progress dialog so users know that something is happening.
progressDialog = gui.IndeterminateProgressDialog(
parentWindow,
# Translators: The title of the dialog presented while an Addon is being installed.
_("Installing Add-on"),
# Translators: The message displayed while an addon is being installed.
_("Please wait while the add-on is being installed.")
)
try:
# Use context manager to ensure that `done` and `Destroy` are called on the progress dialog afterwards
with doneAndDestroy(progressDialog):
gui.ExecAndPump(addonHandler.installAddonBundle, bundle)
if prevAddon:
prevAddon.requestRemove()
return True
except:
log.error("Error installing addon bundle from %s" % addonPath, exc_info=True)
gui.messageBox(
# Translators: The message displayed when an error occurs when installing an add-on package.
_("Failed to install add-on from %s") % addonPath,
# Translators: The title of a dialog presented when an error occurs.
_("Error"),
wx.OK | wx.ICON_ERROR
)
return False
def handleRemoteAddonInstall(addonPath):
# Add-ons cannot be installed into a Windows store version of NVDA
if config.isAppX:
gui.messageBox(
# Translators: The message displayed when an add-on cannot be installed due to NVDA running as a Windows Store app
_("Add-ons cannot be installed in the Windows Store version of NVDA"),
# Translators: The title of a dialog presented when an error occurs.
_("Error"),
wx.OK | wx.ICON_ERROR)
return
gui.mainFrame.prePopup()
if installAddon(gui.mainFrame, addonPath):
promptUserForRestart()
gui.mainFrame.postPopup()
def _showAddonRequiresNVDAUpdateDialog(parent, bundle):
incompatibleMessage = _(
# Translators: The message displayed when installing an add-on package is prohibited,
# because it requires a later version of NVDA than is currently installed.
"Installation of {summary} {version} has been blocked. The minimum NVDA version required for "
"this add-on is {minimumNVDAVersion}, your current NVDA version is {NVDAVersion}"
).format(
summary=bundle.manifest['summary'],
version=bundle.manifest['version'],
minimumNVDAVersion=addonAPIVersion.formatForGUI(bundle.minimumNVDAVersion),
NVDAVersion=addonAPIVersion.formatForGUI(addonAPIVersion.CURRENT)
)
ErrorAddonInstallDialog(
parent=parent,
# Translators: The title of a dialog presented when an error occurs.
title=_("Add-on not compatible"),
message=incompatibleMessage,
showAddonInfoFunction=lambda: _showAddonInfo(bundle)
).ShowModal()
def _showAddonTooOldDialog(parent, bundle):
confirmInstallMessage = _(
# Translators: A message informing the user that this addon can not be installed
# because it is not compatible.
"Installation of {summary} {version} has been blocked."
" An updated version of this add-on is required,"
" the minimum add-on API supported by this version of NVDA is {backCompatToAPIVersion}"
).format(
backCompatToAPIVersion=addonAPIVersion.formatForGUI(addonAPIVersion.BACK_COMPAT_TO),
**bundle.manifest
)
return ErrorAddonInstallDialog(
parent=parent,
# Translators: The title of a dialog presented when an error occurs.
title=_("Add-on not compatible"),
message=confirmInstallMessage,
showAddonInfoFunction=lambda: _showAddonInfo(bundle)
).ShowModal()
def _showConfirmAddonInstallDialog(parent, bundle):
confirmInstallMessage = _(
# Translators: A message asking the user if they really wish to install an addon.
"Are you sure you want to install this add-on?\n"
"Only install add-ons from trusted sources.\n"
"Addon: {summary} {version}"
).format(**bundle.manifest)
return ConfirmAddonInstallDialog(
parent=parent,
# Translators: Title for message asking if the user really wishes to install an Addon.
title=_("Add-on Installation"),
message=confirmInstallMessage,
showAddonInfoFunction=lambda: _showAddonInfo(bundle)
).ShowModal()
class IncompatibleAddonsDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
"""A dialog that lists incompatible addons, and why they are not compatible"""
@classmethod
def _instance(cls):
""" type: () -> IncompatibleAddonsDialog
return None until this is replaced with a weakref.ref object. Then the instance is retrieved
with by treating that object as a callable.
"""
return None
helpId = "IncompatibleAddonsManager"
def __new__(cls, *args, **kwargs):
instance = IncompatibleAddonsDialog._instance()
if instance is None:
return super(IncompatibleAddonsDialog, cls).__new__(cls, *args, **kwargs)
return instance
def __init__(
self,
parent,
APIVersion = addonAPIVersion.CURRENT,
APIBackwardsCompatToVersion = addonAPIVersion.BACK_COMPAT_TO
):
if IncompatibleAddonsDialog._instance() is not None:
raise RuntimeError("Attempting to open multiple IncompatibleAddonsDialog instances")
IncompatibleAddonsDialog._instance = weakref.ref(self)
self._APIVersion = APIVersion
self._APIBackwardsCompatToVersion = APIBackwardsCompatToVersion
self.unknownCompatibilityAddonsList = list(addonHandler.getIncompatibleAddons(
currentAPIVersion=APIVersion,
backCompatToAPIVersion=APIBackwardsCompatToVersion
))
if not len(self.unknownCompatibilityAddonsList) > 0:
# this dialog is not designed to show an empty list.
raise RuntimeError("No incompatible addons.")
super().__init__(
parent,
# Translators: The title of the Incompatible Addons Dialog
title=_("Incompatible Add-ons"),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.MAXIMIZE_BOX,
)
mainSizer=wx.BoxSizer(wx.VERTICAL)
settingsSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
maxControlWidth = 550
introText = _(
# Translators: The title of the Incompatible Addons Dialog
"The following add-ons are incompatible with NVDA version {}."
" These add-ons can not be enabled."
" Please contact the add-on author for further assistance."
).format(addonAPIVersion.formatForGUI(self._APIVersion))
AddonSelectionIntroLabel=wx.StaticText(self, label=introText)
AddonSelectionIntroLabel.Wrap(self.scaleSize(maxControlWidth))
sHelper.addItem(AddonSelectionIntroLabel)
# Translators: the label for the addons list in the incompatible addons dialog.
entriesLabel=_("Incompatible add-ons")
self.addonsList = sHelper.addLabeledControl(
entriesLabel,
nvdaControls.AutoWidthColumnListCtrl,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL,
)
# Translators: The label for a column in add-ons list used to identify add-on package name (example: package is OCR).
self.addonsList.InsertColumn(1, _("Package"), width=self.scaleSize(150))
# Translators: The label for a column in add-ons list used to identify add-on's running status (example: status is running).
self.addonsList.InsertColumn(2, _("Version"), width=self.scaleSize(150))
# Translators: The label for a column in add-ons list used to provide some explanation about incompatibility
self.addonsList.InsertColumn(3, _("Incompatible reason"), width=self.scaleSize(180))
buttonSizer = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label for a button in Add-ons Manager dialog to show information about the selected add-on.
self.aboutButton = buttonSizer.addButton(self, label=_("&About add-on..."))
self.aboutButton.Disable()
self.aboutButton.Bind(wx.EVT_BUTTON, self.onAbout)
# Translators: The close button on an NVDA dialog. This button will dismiss the dialog.
button = buttonSizer.addButton(self, label=_("&Close"), id=wx.ID_CLOSE)
self.Bind(wx.EVT_CLOSE, self.onClose)
sHelper.addDialogDismissButtons(buttonSizer, separated=True)
mainSizer.Add(
settingsSizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL | wx.EXPAND,
proportion=1
)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.SetAffirmativeId(wx.ID_CLOSE)
self.SetEscapeId(wx.ID_CLOSE)
button.Bind(wx.EVT_BUTTON, self.onClose)
self.refreshAddonsList()
self.SetMinSize(mainSizer.GetMinSize())
# Historical initial size, result of L{self.addonsList} being (550, 350) as of PR #8006.
# Setting an initial size on L{self.addonsList} by passing a L{size} argument when
# creating the control would also set its minimum size and thus block the dialog from being shrunk.
self.SetSize(self.scaleSize((606, 525)))
self.CentreOnScreen()
self.addonsList.SetFocus()
def _getIncompatReason(self, addon):
if not addonVersionCheck.hasAddonGotRequiredSupport(
addon,
currentAPIVersion=self._APIVersion
):
# Translators: The reason an add-on is not compatible. A more recent version of NVDA is
# required for the add-on to work. The placeholder will be replaced with Year.Major.Minor (EG 2019.1).
return _("An updated version of NVDA is required. NVDA version {} or later."
).format(addonAPIVersion.formatForGUI(addon.minimumNVDAVersion))
elif not addonVersionCheck.isAddonTested(
addon,
backwardsCompatToVersion=self._APIBackwardsCompatToVersion
):
# Translators: The reason an add-on is not compatible. The addon relies on older, removed features of NVDA,
# an updated add-on is required. The placeholder will be replaced with Year.Major.Minor (EG 2019.1).
return _("An updated version of this add-on is required. The minimum supported API version is now {}"
).format(addonAPIVersion.formatForGUI(self._APIBackwardsCompatToVersion))
def refreshAddonsList(self):
self.addonsList.DeleteAllItems()
self.curAddons=[]
for idx, addon in enumerate(self.unknownCompatibilityAddonsList):
self.addonsList.Append((
addon.manifest['summary'],
addon.version,
self._getIncompatReason(addon)
))
self.curAddons.append(addon) # onAbout depends on being able to recall the current addon based on selected index
activeIndex=0
self.addonsList.Select(activeIndex, on=1)
self.addonsList.SetItemState(activeIndex, wx.LIST_STATE_FOCUSED, wx.LIST_STATE_FOCUSED)
self.aboutButton.Enable(True)
def onAbout(self,evt):
index=self.addonsList.GetFirstSelected()
if index<0: return
addon=self.curAddons[index]
_showAddonInfo(addon)
def onClose(self, evt):
evt.Skip()
self.EndModal(wx.OK)
self.DestroyLater() # ensure that the _instance weakref is destroyed.
| 1 | 34,445 | This button should also be disabled when in secure mode since it opens a web browser from which you can easily do a lot of insecure stuff. | nvaccess-nvda | py |
@@ -221,4 +221,11 @@ public interface Table {
* @return a {@link LocationProvider} to provide locations for new data files
*/
LocationProvider locationProvider();
+
+ /**
+ * Create a new {@link ExpireTableMetadata expire API} to manage table metadata files in this table and commit.
+ *
+ * @return a new {@link ExpireTableMetadata}
+ */
+ ExpireTableMetadata expireTableMetadata();
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
*/
Rollback rollback();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* @return a {@link FileIO} to read and write table data and metadata files
*/
FileIO io();
/**
* @return an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt
* data files.
*/
EncryptionManager encryption();
/**
* @return a {@link LocationProvider} to provide locations for new data files
*/
LocationProvider locationProvider();
}
| 1 | 14,568 | Should this be done as part of `expireSnapshots`? I'd like to avoid adding a lot of operations to `Table` because it is already a large API. | apache-iceberg | java |
@@ -0,0 +1,18 @@
+package libp2p
+
+import (
+ "github.com/libp2p/go-libp2p-core/helpers"
+ "github.com/libp2p/go-libp2p-core/network"
+)
+
+type Stream struct {
+ network.Stream
+}
+
+func (s *Stream) FullClose() error {
+ return helpers.FullClose(s)
+}
+
+func NewStream(stream network.Stream) *Stream {
+ return &Stream{Stream: stream}
+} | 1 | 1 | 8,840 | I do not think that this type (and its constructor) have to be exported. | ethersphere-bee | go |
|
@@ -55,7 +55,10 @@ func (r *receiptLog) SetData(data []byte) {
r.data = data
}
-func (r *receiptLog) Build(ctx context.Context, err error) *action.Log {
+func (r *receiptLog) Build(ctx context.Context, err error, skipLogOnErr bool) *action.Log {
+ if err != nil && skipLogOnErr {
+ return nil
+ }
blkCtx := protocol.MustGetBlockCtx(ctx)
actionCtx := protocol.MustGetActionCtx(ctx)
| 1 | // Copyright (c) 2020 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package staking
import (
"context"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
)
type receiptLog struct {
addr string
topics action.Topics
data []byte
postFairbankMigration bool
}
func newReceiptLog(addr, topic string, postFairbankMigration bool) *receiptLog {
r := receiptLog{
addr: addr,
postFairbankMigration: postFairbankMigration,
}
if postFairbankMigration {
r.topics = action.Topics{hash.BytesToHash256([]byte(topic))}
} else {
r.topics = action.Topics{hash.Hash256b([]byte(topic))}
}
return &r
}
func (r *receiptLog) AddTopics(topics ...[]byte) {
if r.postFairbankMigration {
for i := range topics {
r.topics = append(r.topics, hash.BytesToHash256(topics[i]))
}
}
}
func (r *receiptLog) AddAddress(addr address.Address) {
if !r.postFairbankMigration && addr != nil {
r.topics = append(r.topics, hash.Hash256b(addr.Bytes()))
}
}
func (r *receiptLog) SetData(data []byte) {
r.data = data
}
func (r *receiptLog) Build(ctx context.Context, err error) *action.Log {
blkCtx := protocol.MustGetBlockCtx(ctx)
actionCtx := protocol.MustGetActionCtx(ctx)
log := action.Log{
Address: r.addr,
Topics: r.topics,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
}
if r.postFairbankMigration {
return &log
}
if err == nil {
log.Data = r.data
return &log
}
return nil
}
| 1 | 22,359 | why add this? don't think it's correct? for instance, it returns ErrCandidateNotExist (not critical), in this case we should return a receipt with corresponding status | iotexproject-iotex-core | go |
@@ -0,0 +1,6 @@
+package caddytls
+
+import "github.com/xenolf/lego/acme"
+
+// ChallengeProvider type to be used in Caddy plugins over acme.ChallengeProvider directly, to avoid https://github.com/mattfarina/golang-broken-vendor
+type ChallengeProvider acme.ChallengeProvider | 1 | 1 | 10,974 | Instead of creating a new file, put this in tls.go, like right after or before DNSProviderConstructor is defined. | caddyserver-caddy | go |
|
@@ -62,7 +62,8 @@ app.controller('AttendeeController', function($scope, AutoCompletionService) {
'role': 'REQ-PARTICIPANT',
'rsvp': 'TRUE',
'partstat': 'NEEDS-ACTION',
- 'cutype': 'INDIVIDUAL'
+ 'cutype': 'INDIVIDUAL',
+ 'language': OC.getLocale() // Use current user's timezone as a default value
}
});
} | 1 | /**
* Calendar App
*
* @author Raghu Nayyar
* @author Georg Ehrke
* @copyright 2016 Raghu Nayyar <hey@raghunayyar.com>
* @copyright 2016 Georg Ehrke <oc.list@georgehrke.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
* License as published by the Free Software Foundation; either
* version 3 of the License, or any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU AFFERO GENERAL PUBLIC LICENSE for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
*/
app.controller('AttendeeController', function($scope, AutoCompletionService) {
'use strict';
$scope.newAttendeeGroup = -1;
$scope.cutstats = [
{displayname: t('calendar', 'Individual'), val: 'INDIVIDUAL'},
{displayname: t('calendar', 'Group'), val: 'GROUP'},
{displayname: t('calendar', 'Resource'), val: 'RESOURCE'},
{displayname: t('calendar', 'Room'), val: 'ROOM'},
{displayname: t('calendar', 'Unknown'), val: 'UNKNOWN'}
];
$scope.partstats = [
{displayname: t('calendar', 'Required'), val: 'REQ-PARTICIPANT'},
{displayname: t('calendar', 'Optional'), val: 'OPT-PARTICIPANT'},
{displayname: t('calendar', 'Does not attend'), val: 'NON-PARTICIPANT'}
];
$scope.$parent.registerPostHook(function() {
$scope.properties.attendee = $scope.properties.attendee || [];
if ($scope.properties.attendee.length > 0 && $scope.properties.organizer === null) {
$scope.properties.organizer = {
value: 'MAILTO:' + $scope.$parent.emailAddress,
parameters: {
cn: OC.getCurrentUser().displayName
}
};
}
});
$scope.add = function (email) {
if (email !== '') {
$scope.properties.attendee = $scope.properties.attendee || [];
$scope.properties.attendee.push({
value: 'MAILTO:' + email,
group: $scope.newAttendeeGroup--,
parameters: {
'role': 'REQ-PARTICIPANT',
'rsvp': 'TRUE',
'partstat': 'NEEDS-ACTION',
'cutype': 'INDIVIDUAL'
}
});
}
$scope.attendeeoptions = false;
$scope.nameofattendee = '';
};
$scope.remove = function (attendee) {
$scope.properties.attendee = $scope.properties.attendee.filter(function(elem) {
return elem.group !== attendee.group;
});
};
$scope.search = function (value) {
return AutoCompletionService.searchAttendee(value).then((attendees) => {
const arr = [];
attendees.forEach((attendee) => {
const emailCount = attendee.email.length;
attendee.email.forEach((email) => {
let displayname;
if (emailCount === 1) {
displayname = attendee.name;
} else {
displayname = t('calendar', '{name} ({email})', {
name: attendee.name,
email: email
});
}
arr.push({
displayname: displayname,
email: email,
name: attendee.name
});
});
});
return arr;
});
};
$scope.selectFromTypeahead = function (item) {
$scope.properties.attendee = $scope.properties.attendee || [];
$scope.properties.attendee.push({
value: 'MAILTO:' + item.email,
parameters: {
cn: item.name,
role: 'REQ-PARTICIPANT',
rsvp: 'TRUE',
partstat: 'NEEDS-ACTION',
cutype: 'INDIVIDUAL'
}
});
$scope.nameofattendee = '';
};
});
| 1 | 6,115 | Instead of OC.getLocale() for the default value the default should be the value of the user value 'core' 'lang'. When loading the page this can be added to the parameters by retrieving $this->config->getUserValue($user->getUID(), 'core', 'lang'); in viewcontroller.php. | nextcloud-calendar | js |
@@ -42,7 +42,7 @@ namespace interface1
* \param[in] id Identifier of the result
* \return Result that corresponds to the given identifier
*/
-classifier::quality_metric::binary_confusion_matrix::ResultPtr ResultCollection::getResult(QualityMetricId id) const
+classifier::quality_metric::binary_confusion_matrix::ResultPtr daal::algorithms::adaboost::quality_metric_set::interface1::ResultCollection::getResult(daal::algorithms::adaboost::quality_metric_set::QualityMetricId id) const
{
return staticPointerCast<classifier::quality_metric::binary_confusion_matrix::Result, SerializationIface>((*this)[(size_t)id]);
} | 1 | /* file: adaboost_quality_metric_set_types.cpp */
/*******************************************************************************
* Copyright 2014-2019 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
//++
// Interface for the AdaBoost algorithm quality metrics
//--
*/
#include "adaboost_quality_metric_set_types.h"
using namespace daal::data_management;
using namespace daal::services;
namespace daal
{
namespace algorithms
{
namespace adaboost
{
namespace quality_metric_set
{
namespace interface1
{
/**
* Returns the result of the quality metrics algorithm
* \param[in] id Identifier of the result
* \return Result that corresponds to the given identifier
*/
classifier::quality_metric::binary_confusion_matrix::ResultPtr ResultCollection::getResult(QualityMetricId id) const
{
return staticPointerCast<classifier::quality_metric::binary_confusion_matrix::Result, SerializationIface>((*this)[(size_t)id]);
}
/**
* Returns the input object for the quality metrics algorithm
* \param[in] id Identifier of the input object
* \return %Input object that corresponds to the given identifier
*/
classifier::quality_metric::binary_confusion_matrix::InputPtr InputDataCollection::getInput(QualityMetricId id) const
{
return staticPointerCast<classifier::quality_metric::binary_confusion_matrix::Input, algorithms::Input>(
algorithms::quality_metric_set::InputDataCollection::getInput((size_t)id));
}
} //namespace interface1
} //namespace quality_metric_set
} //namespace adaboost
} //namespace algorithms
} //namespace daal
| 1 | 18,753 | Why interface1 ? It should be in inner so. | oneapi-src-oneDAL | cpp |
@@ -32,6 +32,16 @@ class User(UserMixin):
login_id=user['login_id'],
)
+ def to_dict(self):
+ return {
+ "id": self.id,
+ "created": self.created,
+ "musicbrainz_id": self.musicbrainz_id,
+ "auth_token": self.auth_token,
+ "gdpr_agreed": self.gdpr_agreed,
+ "login_id": self.login_id
+ }
+
@login_manager.user_loader
def load_user(user_login_id):
try: | 1 | from flask import redirect, url_for, current_app, request
from flask_login import LoginManager, UserMixin, current_user
from functools import wraps
import listenbrainz.db.user as db_user
from werkzeug.exceptions import Unauthorized
from listenbrainz.webserver.errors import APIUnauthorized
login_manager = LoginManager()
login_manager.login_view = 'login.index'
class User(UserMixin):
def __init__(self, id, created, musicbrainz_id, auth_token, gdpr_agreed, login_id):
self.id = id
self.created = created
self.musicbrainz_id = musicbrainz_id
self.auth_token = auth_token
self.gdpr_agreed = gdpr_agreed
self.login_id = login_id
def get_id(self):
return self.login_id
@classmethod
def from_dbrow(cls, user):
return cls(
id=user['id'],
created=user['created'],
musicbrainz_id=user['musicbrainz_id'],
auth_token=user['auth_token'],
gdpr_agreed=user['gdpr_agreed'],
login_id=user['login_id'],
)
@login_manager.user_loader
def load_user(user_login_id):
try:
user = db_user.get_by_login_id(user_login_id)
except Exception as e:
current_app.logger.error("Error while getting user by login ID: %s", str(e), exc_info=True)
return None
if user:
return User.from_dbrow(user)
else:
return None
def login_forbidden(f):
@wraps(f)
def decorated(*args, **kwargs):
if not current_user.is_anonymous:
return redirect(url_for('index.index'))
return f(*args, **kwargs)
return decorated
def api_login_required(f):
@wraps(f)
def decorated(*args, **kwargs):
if not current_user.is_authenticated:
raise APIUnauthorized("You must be logged in to access this endpoint")
return f(*args, **kwargs)
return decorated
| 1 | 20,004 | Ideally, I'd have converted dicts to object but this way was less changes so this way for now. | metabrainz-listenbrainz-server | py |
@@ -4061,8 +4061,17 @@ void t_c_glib_generator::generate_deserialize_field(ostream& out,
throw "compiler error: no C reader for base type " + t_base_type::t_base_name(tbase) + name;
}
out << ", error)) < 0)" << endl;
- out << indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;"
- << endl;
+ if ((tbase != t_base_type::TYPE_STRING) && allocate) {
+ scope_up(out);
+ out << indent() << "g_free (&" << name << ");" << endl;
+ out << indent() << "return " << error_ret << ";" << endl;
+ scope_down(out);
+ out << indent() << "xfer += ret;" << endl;
+ }
+ else{
+ out << indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;"
+ << endl;
+ }
// load the byte array with the data
if (tbase == t_base_type::TYPE_STRING && type->is_binary()) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Contains some contributions under the Thrift Software License.
* Please see doc/old-thrift-license.txt in the Thrift distribution for
* details.
*/
#include <fstream>
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#include <ctype.h>
#include "thrift/platform.h"
#include "thrift/generate/t_oop_generator.h"
using std::map;
using std::ostream;
using std::ostringstream;
using std::string;
using std::stringstream;
using std::vector;
static const string endl = "\n"; // avoid ostream << std::endl flushes
/* forward declarations */
string initial_caps_to_underscores(string name);
string underscores_to_initial_caps(string name);
string to_upper_case(string name);
string to_lower_case(string name);
/**
* C code generator, using glib for C typing.
*/
class t_c_glib_generator : public t_oop_generator {
public:
/* constructor */
t_c_glib_generator(t_program* program,
const map<string, string>& parsed_options,
const string& option_string)
: t_oop_generator(program) {
(void)option_string;
std::map<std::string, std::string>::const_iterator iter;
/* set the output directory */
this->out_dir_base_ = "gen-c_glib";
/* no options yet */
for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) {
throw "unknown option c_glib:" + iter->first;
}
/* set the namespace */
this->nspace = program_->get_namespace("c_glib");
if (this->nspace.empty()) {
this->nspace = "";
this->nspace_u = "";
this->nspace_uc = "";
this->nspace_lc = "";
} else {
/* replace dots with underscores */
char* tmp = strdup(this->nspace.c_str());
for (unsigned int i = 0; i < strlen(tmp); i++) {
if (tmp[i] == '.') {
tmp[i] = '_';
}
}
this->nspace = string(tmp, strlen(tmp));
free(tmp);
/* clean up the namespace for C.
* An input of 'namespace foo' should result in:
* - nspace = foo - for thrift objects and typedefs
* - nspace_u = Foo - for internal GObject prefixes
* - nspace_uc = FOO_ - for macro prefixes
* - nspace_lc = foo_ - for filename and method prefixes
* The underscores are there since uc and lc strings are used as file and
* variable prefixes.
*/
this->nspace_u = initial_caps_to_underscores(this->nspace);
this->nspace_uc = to_upper_case(this->nspace_u) + "_";
this->nspace_lc = to_lower_case(this->nspace_u) + "_";
}
}
/* initialization and destruction */
void init_generator() override;
void close_generator() override;
/* generation functions */
void generate_typedef(t_typedef* ttypedef) override;
void generate_enum(t_enum* tenum) override;
void generate_consts(vector<t_const*> consts) override;
void generate_struct(t_struct* tstruct) override;
void generate_service(t_service* tservice) override;
void generate_xception(t_struct* tstruct) override;
private:
/* file streams */
ofstream_with_content_based_conditional_update f_types_;
ofstream_with_content_based_conditional_update f_types_impl_;
ofstream_with_content_based_conditional_update f_header_;
ofstream_with_content_based_conditional_update f_service_;
/* namespace variables */
string nspace;
string nspace_u;
string nspace_uc;
string nspace_lc;
/* helper functions */
bool is_complex_type(t_type* ttype);
bool is_numeric(t_type* ttype);
string type_name(t_type* ttype, bool in_typedef = false, bool is_const = false);
string property_type_name(t_type* ttype, bool in_typedef = false, bool is_const = false);
string base_type_name(t_type* type);
string type_to_enum(t_type* type);
string constant_literal(t_type* type, t_const_value* value);
string constant_value(string name, t_type* type, t_const_value* value);
string constant_value_with_storage(string name, t_type* type, t_const_value* value);
string function_signature(t_function* tfunction);
string argument_list(t_struct* tstruct);
string xception_list(t_struct* tstruct);
string declare_field(t_field* tfield,
bool init = false,
bool pointer = false,
bool constant = false,
bool reference = false);
void declare_local_variable(ostream& out, t_type* ttype, string& base_name, bool for_hash_table);
void declore_local_variable_for_write(ostream& out, t_type* ttype, string& base_name);
/* generation functions */
void generate_const_initializer(string name,
t_type* type,
t_const_value* value,
bool top_level = false);
void generate_service_helpers(t_service* tservice);
void generate_service_client(t_service* tservice);
void generate_service_handler(t_service* tservice);
void generate_service_processor(t_service* tservice);
void generate_service_server(t_service* tservice);
void generate_object(t_struct* tstruct);
void generate_struct_writer(ostream& out,
t_struct* tstruct,
string this_name,
string this_get = "",
bool is_function = true);
void generate_struct_reader(ostream& out,
t_struct* tstruct,
string this_name,
string this_get = "",
bool is_function = true);
void generate_serialize_field(ostream& out,
t_field* tfield,
string prefix,
string suffix,
int error_ret);
void generate_serialize_struct(ostream& out, t_struct* tstruct, string prefix, int error_ret);
void generate_serialize_container(ostream& out, t_type* ttype, string prefix, int error_ret);
void generate_serialize_map_element(ostream& out,
t_map* tmap,
string key,
string value,
int error_ret);
void generate_serialize_set_element(ostream& out, t_set* tset, string element, int error_ret);
void generate_serialize_list_element(ostream& out,
t_list* tlist,
string list,
string index,
int error_ret);
void generate_deserialize_field(ostream& out,
t_field* tfield,
string prefix,
string suffix,
int error_ret,
bool allocate = true);
void generate_deserialize_struct(ostream& out,
t_struct* tstruct,
string prefix,
int error_ret,
bool allocate = true);
void generate_deserialize_container(ostream& out, t_type* ttype, string prefix, int error_ret);
void generate_deserialize_map_element(ostream& out, t_map* tmap, string prefix, int error_ret);
void generate_deserialize_set_element(ostream& out, t_set* tset, string prefix, int error_ret);
void generate_deserialize_list_element(ostream& out,
t_list* tlist,
string prefix,
string index,
int error_ret);
string generate_new_hash_from_type(t_type* key, t_type* value);
string generate_new_array_from_type(t_type* ttype);
string generate_free_func_from_type(t_type* ttype);
string generate_hash_func_from_type(t_type* ttype);
string generate_cmp_func_from_type(t_type* ttype);
};
/**
* Prepare for file generation by opening up the necessary file
* output streams.
*/
void t_c_glib_generator::init_generator() {
/* create output directory */
MKDIR(get_out_dir().c_str());
string program_name_u = initial_caps_to_underscores(program_name_);
string program_name_uc = to_upper_case(program_name_u);
string program_name_lc = to_lower_case(program_name_u);
/* create output files */
string f_types_name = get_out_dir() + this->nspace_lc + program_name_lc + "_types.h";
f_types_.open(f_types_name.c_str());
string f_types_impl_name = get_out_dir() + this->nspace_lc + program_name_lc + "_types.c";
f_types_impl_.open(f_types_impl_name.c_str());
/* add thrift boilerplate headers */
f_types_ << autogen_comment();
f_types_impl_ << autogen_comment();
/* include inclusion guard */
f_types_ << "#ifndef " << this->nspace_uc << program_name_uc << "_TYPES_H" << endl << "#define "
<< this->nspace_uc << program_name_uc << "_TYPES_H" << endl << endl;
/* include base types */
f_types_ << "/* base includes */" << endl << "#include <glib-object.h>" << endl
<< "#include <thrift/c_glib/thrift_struct.h>" << endl
<< "#include <thrift/c_glib/protocol/thrift_protocol.h>" << endl;
/* include other thrift includes */
const vector<t_program*>& includes = program_->get_includes();
if (!includes.empty()) {
f_types_ << "/* other thrift includes */" << endl;
for (auto include : includes) {
const std::string& include_nspace = include->get_namespace("c_glib");
std::string include_nspace_prefix =
include_nspace.empty() ? "" : initial_caps_to_underscores(include_nspace) + "_";
f_types_ << "#include \"" << include_nspace_prefix
<< initial_caps_to_underscores(include->get_name()) << "_types.h\"" << endl;
}
f_types_ << endl;
}
/* include custom headers */
const vector<string>& c_includes = program_->get_c_includes();
f_types_ << "/* custom thrift includes */" << endl;
for (const auto & c_include : c_includes) {
if (c_include[0] == '<') {
f_types_ << "#include " << c_include << endl;
} else {
f_types_ << "#include \"" << c_include << "\"" << endl;
}
}
f_types_ << endl;
/* include math.h (for "INFINITY") in the implementation file, in case we
encounter a struct with a member of type double */
f_types_impl_ << endl << "#include <math.h>" << endl;
// include the types file
f_types_impl_ << endl << "#include \"" << this->nspace_lc << program_name_u << "_types.h\""
<< endl << "#include <thrift/c_glib/thrift.h>" << endl << endl;
f_types_ << "/* begin types */" << endl << endl;
}
/**
* Finish up generation and close all file streams.
*/
void t_c_glib_generator::close_generator() {
string program_name_uc = to_upper_case(initial_caps_to_underscores(program_name_));
/* end the header inclusion guard */
f_types_ << "#endif /* " << this->nspace_uc << program_name_uc << "_TYPES_H */" << endl;
/* close output file */
f_types_.close();
f_types_impl_.close();
}
/**
* Generates a Thrift typedef in C code. For example:
*
* Thrift:
* typedef map<i32,i32> SomeMap
*
* C:
* typedef GHashTable * ThriftSomeMap;
*/
void t_c_glib_generator::generate_typedef(t_typedef* ttypedef) {
f_types_ << indent() << "typedef " << type_name(ttypedef->get_type(), true) << " " << this->nspace
<< ttypedef->get_symbolic() << ";" << endl << endl;
}
/**
* Generates a C enumeration. For example:
*
* Thrift:
* enum MyEnum {
* ONE = 1,
* TWO
* }
*
* C:
* enum _ThriftMyEnum {
* THRIFT_MY_ENUM_ONE = 1,
* THRIFT_MY_ENUM_TWO
* };
* typedef enum _ThriftMyEnum ThriftMyEnum;
*/
void t_c_glib_generator::generate_enum(t_enum* tenum) {
string name = tenum->get_name();
string name_uc = to_upper_case(initial_caps_to_underscores(name));
f_types_ << indent() << "enum _" << this->nspace << name << " {" << endl;
indent_up();
vector<t_enum_value*> constants = tenum->get_constants();
vector<t_enum_value*>::iterator c_iter;
bool first = true;
/* output each of the enumeration elements */
for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
if (first) {
first = false;
} else {
f_types_ << "," << endl;
}
f_types_ << indent() << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name();
f_types_ << " = " << (*c_iter)->get_value();
}
indent_down();
f_types_ << endl << "};" << endl << "typedef enum _" << this->nspace << name << " "
<< this->nspace << name << ";" << endl << endl;
f_types_ << "/* return the name of the constant */" << endl;
f_types_ << "const char *" << endl;
f_types_ << "toString_" << name << "(int value); " << endl << endl;
;
f_types_impl_ << "/* return the name of the constant */" << endl;
f_types_impl_ << "const char *" << endl;
f_types_impl_ << "toString_" << name << "(int value) " << endl;
f_types_impl_ << "{" << endl;
f_types_impl_ << " static __thread char buf[16] = {0};" << endl;
f_types_impl_ << " switch(value) {" << endl;
std::set<int> done;
for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
int value = (*c_iter)->get_value();
// Skipping duplicate value
if (done.find(value) == done.end()) {
done.insert(value);
f_types_impl_ << " case " << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name()
<< ":"
<< "return \"" << this->nspace_uc << name_uc << "_" << (*c_iter)->get_name()
<< "\";" << endl;
}
}
f_types_impl_ << " default: g_snprintf(buf, 16, \"%d\", value); return buf;" << endl;
f_types_impl_ << " }" << endl;
f_types_impl_ << "}" << endl << endl;
}
/**
* Generates Thrift constants in C code.
*/
void t_c_glib_generator::generate_consts(vector<t_const*> consts) {
f_types_ << "/* constants */" << endl;
f_types_impl_ << "/* constants */" << endl;
vector<t_const*>::iterator c_iter;
for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) {
string name = (*c_iter)->get_name();
string name_uc = to_upper_case(name);
string name_lc = to_lower_case(name);
t_type* type = (*c_iter)->get_type();
t_const_value* value = (*c_iter)->get_value();
if (is_complex_type(type)) {
f_types_ << type_name(type) << indent() << this->nspace_lc << name_lc
<< "_constant();" << endl;
}
f_types_ << indent() << "#define " << this->nspace_uc << name_uc << " "
<< constant_value(name_lc, type, value) << endl;
generate_const_initializer(name_lc, type, value, true);
}
f_types_ << endl;
f_types_impl_ << endl;
}
/**
* Generate Thrift structs in C code, as GObjects. Example:
*
* Thrift:
* struct Bonk
* {
* 1: string message,
* 2: i32 type
* }
*
* C GObject instance header:
* struct _ThriftBonk
* {
* GObject parent;
*
* gchar * message;
* gint32 type;
* };
* typedef struct _ThriftBonk ThriftBonk
* // ... additional GObject boilerplate ...
*/
void t_c_glib_generator::generate_struct(t_struct* tstruct) {
f_types_ << "/* struct " << tstruct->get_name() << " */" << endl;
generate_object(tstruct);
}
/**
* Generate C code to represent Thrift services. Creates a new GObject
* which can be used to access the service.
*/
void t_c_glib_generator::generate_service(t_service* tservice) {
string svcname_u = initial_caps_to_underscores(tservice->get_name());
string svcname_uc = this->nspace_uc + to_upper_case(svcname_u);
string filename = this->nspace_lc + to_lower_case(svcname_u);
// make output files
string f_header_name = get_out_dir() + filename + ".h";
f_header_.open(f_header_name.c_str());
string program_name_u = initial_caps_to_underscores(program_name_);
string program_name_lc = to_lower_case(program_name_u);
// add header file boilerplate
f_header_ << autogen_comment();
// add an inclusion guard
f_header_ << "#ifndef " << svcname_uc << "_H" << endl << "#define " << svcname_uc << "_H" << endl
<< endl;
// add standard includes
f_header_ << "#include <thrift/c_glib/processor/thrift_dispatch_processor.h>" << endl << endl;
f_header_ << "#include \"" << this->nspace_lc << program_name_lc << "_types.h\"" << endl;
// if we are inheriting from another service, include its header
t_service* extends_service = tservice->get_extends();
if (extends_service != nullptr) {
f_header_ << "#include \"" << this->nspace_lc
<< to_lower_case(initial_caps_to_underscores(extends_service->get_name())) << ".h\""
<< endl;
}
f_header_ << endl;
// create the service implementation
string f_service_name = get_out_dir() + filename + ".c";
f_service_.open(f_service_name.c_str());
// add the boilerplace header
f_service_ << autogen_comment();
// include the headers
f_service_ << "#include <string.h>" << endl << "#include <thrift/c_glib/thrift.h>" << endl
<< "#include <thrift/c_glib/thrift_application_exception.h>" << endl << "#include \""
<< filename << ".h\"" << endl << endl;
// generate the service-helper classes
generate_service_helpers(tservice);
// generate the client objects
generate_service_client(tservice);
// generate the server objects
generate_service_server(tservice);
// end the header inclusion guard
f_header_ << "#endif /* " << svcname_uc << "_H */" << endl;
// close the files
f_service_.close();
f_header_.close();
}
/**
*
*/
void t_c_glib_generator::generate_xception(t_struct* tstruct) {
string name = tstruct->get_name();
string name_u = initial_caps_to_underscores(name);
string name_lc = to_lower_case(name_u);
string name_uc = to_upper_case(name_u);
generate_object(tstruct);
f_types_ << "/* exception */" << endl
<< "typedef enum" << endl
<< "{" << endl;
indent_up();
f_types_ << indent() << this->nspace_uc << name_uc << "_ERROR_CODE" << endl;
indent_down();
f_types_ << "} " << this->nspace << name << "Error;" << endl
<< endl
<< "GQuark " << this->nspace_lc << name_lc
<< "_error_quark (void);" << endl
<< "#define " << this->nspace_uc << name_uc << "_ERROR ("
<< this->nspace_lc << name_lc << "_error_quark())" << endl
<< endl
<< endl;
f_types_impl_ << "/* define the GError domain for exceptions */" << endl << "#define "
<< this->nspace_uc << name_uc << "_ERROR_DOMAIN \"" << this->nspace_lc << name_lc
<< "_error_quark\"" << endl << "GQuark" << endl << this->nspace_lc << name_lc
<< "_error_quark (void)" << endl << "{" << endl
<< " return g_quark_from_static_string (" << this->nspace_uc << name_uc
<< "_ERROR_DOMAIN);" << endl << "}" << endl << endl;
}
/********************
* HELPER FUNCTIONS *
********************/
/**
* Returns true if ttype is not a primitive.
*/
bool t_c_glib_generator::is_complex_type(t_type* ttype) {
ttype = get_true_type(ttype);
return ttype->is_container() || ttype->is_struct() || ttype->is_xception();
}
bool t_c_glib_generator::is_numeric(t_type* ttype) {
return ttype->is_enum() || (ttype->is_base_type() && !ttype->is_string());
}
/**
* Maps a Thrift t_type to a C type.
*/
string t_c_glib_generator::type_name(t_type* ttype, bool in_typedef, bool is_const) {
if (ttype->is_base_type()) {
string bname = base_type_name(ttype);
if (is_const) {
return "const " + bname;
} else {
return bname;
}
}
if (ttype->is_container()) {
string cname;
t_container* tcontainer = (t_container*)ttype;
if (tcontainer->has_cpp_name()) {
cname = tcontainer->get_cpp_name();
} else if (ttype->is_map()) {
cname = "GHashTable";
} else if (ttype->is_set()) {
// since a set requires unique elements, use a GHashTable, and
// populate the keys and values with the same data, using keys for
// the actual writes and reads.
// TODO: discuss whether or not to implement TSet, THashSet or GHashSet
cname = "GHashTable";
} else if (ttype->is_list()) {
t_type* etype = get_true_type(((t_list*)ttype)->get_elem_type());
if (etype->is_void()) {
throw std::runtime_error("compiler error: list element type cannot be void");
}
// TODO: investigate other implementations besides GPtrArray
cname = is_numeric(etype) ? "GArray" : "GPtrArray";
}
/* Omit the dereference operator if we are aliasing this type within a
typedef, to allow the type to be used more naturally in client code;
otherwise, include it */
if (!in_typedef) {
cname += " *";
}
if (is_const) {
return "const " + cname;
} else {
return cname;
}
}
// check for a namespace
t_program* tprogram = ttype->get_program();
string pname = (tprogram ? tprogram->get_namespace("c_glib") : "") + ttype->get_name();
if (is_complex_type(ttype)) {
pname += " *";
}
if (is_const) {
return "const " + pname;
} else {
return pname;
}
}
/**
* Maps a Thrift primitive to the type needed to hold its value when used as an
* object property.
*
* This method is needed because all integer properties of width less than 64
* bits map to the same type, gint, as opposed to their width-specific type
* (gint8, gint16 or gint32).
*/
string t_c_glib_generator::property_type_name(t_type* ttype, bool in_typedef, bool is_const) {
string result;
if (ttype->is_base_type()) {
switch (((t_base_type*)ttype)->get_base()) {
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
if (is_const) {
result = "const gint";
} else {
result = "gint";
}
break;
default:
result = type_name(ttype, in_typedef, is_const);
}
} else {
result = type_name(ttype, in_typedef, is_const);
}
return result;
}
/**
* Maps a Thrift primitive to a C primitive.
*/
string t_c_glib_generator::base_type_name(t_type* type) {
if (type->is_enum()) {
return type_name(type);
}
if (!type->is_base_type()) {
throw std::invalid_argument("Only base types are suppported.");
}
t_base_type* base_type = reinterpret_cast<t_base_type*>(type);
t_base_type::t_base tbase = base_type->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
return "void";
case t_base_type::TYPE_STRING:
if (base_type->is_binary()) {
return "GByteArray *";
} else {
return "gchar *";
}
case t_base_type::TYPE_BOOL:
return "gboolean";
case t_base_type::TYPE_I8:
return "gint8";
case t_base_type::TYPE_I16:
return "gint16";
case t_base_type::TYPE_I32:
return "gint32";
case t_base_type::TYPE_I64:
return "gint64";
case t_base_type::TYPE_DOUBLE:
return "gdouble";
default:
throw std::logic_error("compiler error: no C base type name for base type "
+ t_base_type::t_base_name(tbase));
}
}
/**
* Returns a member of the ThriftType C enumeration in thrift_protocol.h
* for a Thrift type.
*/
string t_c_glib_generator::type_to_enum(t_type* type) {
type = get_true_type(type);
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "NO T_VOID CONSTRUCT";
case t_base_type::TYPE_STRING:
return "T_STRING";
case t_base_type::TYPE_BOOL:
return "T_BOOL";
case t_base_type::TYPE_I8:
return "T_BYTE";
case t_base_type::TYPE_I16:
return "T_I16";
case t_base_type::TYPE_I32:
return "T_I32";
case t_base_type::TYPE_I64:
return "T_I64";
case t_base_type::TYPE_DOUBLE:
return "T_DOUBLE";
}
} else if (type->is_enum()) {
return "T_I32";
} else if (type->is_struct()) {
return "T_STRUCT";
} else if (type->is_xception()) {
return "T_STRUCT";
} else if (type->is_map()) {
return "T_MAP";
} else if (type->is_set()) {
return "T_SET";
} else if (type->is_list()) {
return "T_LIST";
}
throw "INVALID TYPE IN type_to_enum: " + type->get_name();
}
/**
* Returns a Thrift constant formatted as a literal for inclusion in C code.
*/
string t_c_glib_generator::constant_literal(t_type* type, t_const_value* value) {
ostringstream render;
if (type->is_base_type()) {
/* primitives */
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_STRING:
render << "\"" + value->get_string() + "\"";
break;
case t_base_type::TYPE_BOOL:
render << ((value->get_integer() != 0) ? "TRUE" : "FALSE");
break;
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
render << value->get_integer();
break;
case t_base_type::TYPE_DOUBLE:
render << value->get_double();
break;
default:
throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase);
}
} else {
t_const_value::t_const_value_type value_type = value->get_type();
switch (value_type) {
case t_const_value::CV_IDENTIFIER:
render << value->get_integer();
break;
case t_const_value::CV_LIST:
render << "{ ";
{
t_type* elem_type = ((t_list*)type)->get_elem_type();
const vector<t_const_value*>& list = value->get_list();
vector<t_const_value*>::const_iterator list_iter;
if (list.size() > 0) {
list_iter = list.begin();
render << constant_literal(elem_type, *list_iter);
while (++list_iter != list.end()) {
render << ", " << constant_literal(elem_type, *list_iter);
}
}
}
render << " }";
break;
case t_const_value::CV_MAP:
default:
render << "NULL /* not supported */";
}
}
return render.str();
}
/**
* Returns C code that represents a Thrift constant.
*/
string t_c_glib_generator::constant_value(string name, t_type* type, t_const_value* value) {
ostringstream render;
if (type->is_base_type()) {
/* primitives */
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_STRING:
render << "g_strdup (\"" + value->get_string() + "\")";
break;
case t_base_type::TYPE_BOOL:
render << ((value->get_integer() != 0) ? 1 : 0);
break;
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
render << value->get_integer();
break;
case t_base_type::TYPE_I64:
render << "G_GINT64_CONSTANT (" << value->get_integer() << ")";
break;
case t_base_type::TYPE_DOUBLE:
if (value->get_type() == t_const_value::CV_INTEGER) {
render << value->get_integer();
} else {
render << value->get_double();
}
break;
default:
throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase);
}
} else if (type->is_enum()) {
render << "(" << type_name(type) << ")" << value->get_integer();
} else if (is_complex_type(type)) {
render << "(" << this->nspace_lc << to_lower_case(name) << "_constant())";
} else {
render << "NULL /* not supported */";
}
return render.str();
}
/**
* Renders a function signature of the form 'type name(args)'
*
* @param tfunction Function definition
* @return String of rendered function definition
*/
string t_c_glib_generator::function_signature(t_function* tfunction) {
t_type* ttype = tfunction->get_returntype();
t_struct* arglist = tfunction->get_arglist();
t_struct* xlist = tfunction->get_xceptions();
string fname = initial_caps_to_underscores(tfunction->get_name());
bool has_return = !ttype->is_void();
bool has_args = arglist->get_members().size() == 0;
bool has_xceptions = xlist->get_members().size() == 0;
return "gboolean " + this->nspace_lc + fname + " (" + this->nspace + service_name_ + "If * iface"
+ (has_return ? ", " + type_name(ttype) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arglist)))
+ (has_xceptions ? "" : (", " + xception_list(xlist))) + ", GError ** error)";
}
/**
* Renders a field list
*
* @param tstruct The struct definition
* @return Comma sepearated list of all field names in that struct
*/
string t_c_glib_generator::argument_list(t_struct* tstruct) {
string result = "";
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
bool first = true;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
} else {
result += ", ";
}
result += type_name((*f_iter)->get_type(), false, true) + " " + (*f_iter)->get_name();
}
return result;
}
/**
* Renders mutable exception lists
*
* @param tstruct The struct definition
* @return Comma sepearated list of all field names in that struct
*/
string t_c_glib_generator::xception_list(t_struct* tstruct) {
string result = "";
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
bool first = true;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
} else {
result += ", ";
}
result += type_name((*f_iter)->get_type(), false, false) + "* " + (*f_iter)->get_name();
}
return result;
}
/**
* Declares a field, including any necessary initialization.
*/
string t_c_glib_generator::declare_field(t_field* tfield,
bool init,
bool pointer,
bool constant,
bool reference) {
string result = "";
if (constant) {
result += "const ";
}
result += type_name(tfield->get_type());
if (pointer) {
result += "*";
}
if (reference) {
result += "*";
}
result += " " + tfield->get_name();
if (init) {
t_type* type = get_true_type(tfield->get_type());
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
break;
case t_base_type::TYPE_BOOL:
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
result += " = 0";
break;
case t_base_type::TYPE_DOUBLE:
result += " = (gdouble) 0";
break;
case t_base_type::TYPE_STRING:
result += " = NULL";
break;
default:
throw "compiler error: no C intializer for base type " + t_base_type::t_base_name(tbase);
}
} else if (type->is_enum()) {
result += " = (" + type_name(type) + ") 0";
} else if (type->is_struct() || type->is_container()) {
result += " = NULL";
}
}
if (!reference) {
result += ";";
}
return result;
}
string t_c_glib_generator::constant_value_with_storage(string fname,
t_type* etype,
t_const_value* value) {
ostringstream render;
if (is_numeric(etype)) {
render << " " << type_name(etype) << " *" << fname << " = "
<< "g_new (" << base_type_name(etype) << ", 1);" << endl
<< " *" << fname << " = " << constant_value(fname, (t_type*)etype, value) << ";"
<< endl;
} else {
render << " " << type_name(etype) << " " << fname << " = "
<< constant_value(fname, (t_type*)etype, value) << ";" << endl;
}
return render.str();
}
/**
* Generates C code that initializes complex constants.
*/
void t_c_glib_generator::generate_const_initializer(string name,
t_type* type,
t_const_value* value,
bool top_level) {
string name_u = initial_caps_to_underscores(name);
string name_lc = to_lower_case(name_u);
string type_u = initial_caps_to_underscores(type->get_name());
string type_uc = to_upper_case(type_u);
string maybe_static = top_level ? "" : "static ";
if (type->is_struct() || type->is_xception()) {
const vector<t_field*>& fields = ((t_struct*)type)->get_members();
vector<t_field*>::const_iterator f_iter;
const map<t_const_value*, t_const_value*, t_const_value::value_compare>& val = value->get_map();
map<t_const_value*, t_const_value*, t_const_value::value_compare>::const_iterator v_iter;
ostringstream initializers;
// initialize any constants that may be referenced by this initializer
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
t_type* field_type = nullptr;
string field_name = "";
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_name() == v_iter->first->get_string()) {
field_type = (*f_iter)->get_type();
field_name = (*f_iter)->get_name();
break;
}
}
if (field_type == nullptr) {
throw "type error: " + type->get_name() + " has no field "
+ v_iter->first->get_string();
}
field_name = tmp(field_name);
generate_const_initializer(name + "_constant_" + field_name,
field_type,
v_iter->second);
initializers << " constant->" << v_iter->first->get_string() << " = "
<< constant_value(name + "_constant_" + field_name,
field_type,
v_iter->second) << ";" << endl
<< " constant->__isset_" << v_iter->first->get_string()
<< " = TRUE;" << endl;
}
// implement the initializer
f_types_impl_ << maybe_static << this->nspace << type->get_name() << " *"
<< endl
<< this->nspace_lc << name_lc << "_constant (void)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "static " << this->nspace << type->get_name()
<< " *constant = NULL;" << endl
<< indent() << "if (constant == NULL)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "constant = g_object_new (" << this->nspace_uc
<< "TYPE_" << type_uc << ", NULL);" << endl
<< initializers.str();
scope_down(f_types_impl_);
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
t_type* field_type = nullptr;
string field_name = "";
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_name() == v_iter->first->get_string()) {
field_type = (*f_iter)->get_type();
field_name = (*f_iter)->get_name();
break;
}
}
if (field_type == nullptr) {
throw "type error: " + type->get_name() + " has no field "
+ v_iter->first->get_string();
}
field_name = tmp(field_name);
}
f_types_impl_ << indent() << "return constant;" << endl;
scope_down(f_types_impl_);
f_types_impl_ << endl;
} else if (type->is_list()) {
string list_type = "GPtrArray *";
string free_func
= generate_free_func_from_type(reinterpret_cast<t_list*>(type)->get_elem_type());
string list_initializer = "g_ptr_array_new_with_free_func (" + free_func + ");";
string list_appender = "g_ptr_array_add";
bool list_variable = false;
t_type* etype = ((t_list*)type)->get_elem_type();
const vector<t_const_value*>& val = value->get_list();
vector<t_const_value*>::const_iterator v_iter;
ostringstream initializers;
ostringstream appenders;
list_initializer = generate_new_array_from_type(etype);
if (etype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)etype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine array type";
case t_base_type::TYPE_BOOL:
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
case t_base_type::TYPE_DOUBLE:
list_type = "GArray *";
list_appender = "g_array_append_val";
list_variable = true;
break;
case t_base_type::TYPE_STRING:
break;
default:
throw "compiler error: no array info for type";
}
} else if (etype->is_enum()) {
list_type = "GArray *";
list_appender = "g_array_append_val";
list_variable = true;
}
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
string fname = tmp(name);
generate_const_initializer(fname, etype, (*v_iter));
if (list_variable) {
initializers << " " << type_name(etype) << " " << fname << " = "
<< constant_value(fname, (t_type*)etype, (*v_iter)) << ";"
<< endl;
appenders << " " << list_appender << "(constant, " << fname << ");"
<< endl;
} else {
appenders << " " << list_appender << "(constant, "
<< constant_value(fname, (t_type*)etype, (*v_iter)) << ");"
<< endl;
}
}
f_types_impl_ << maybe_static << list_type << endl
<< this->nspace_lc << name_lc << "_constant (void)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "static " << list_type << " constant = NULL;"
<< endl
<< indent() << "if (constant == NULL)" << endl;
scope_up(f_types_impl_);
if (!initializers.str().empty()) {
f_types_impl_ << initializers.str()
<< endl;
}
f_types_impl_ << indent() << "constant = " << list_initializer << endl
<< appenders.str();
scope_down(f_types_impl_);
f_types_impl_ << indent() << "return constant;" << endl;
scope_down(f_types_impl_);
f_types_impl_ << endl;
} else if (type->is_set()) {
t_type* etype = ((t_set*)type)->get_elem_type();
const vector<t_const_value*>& val = value->get_list();
vector<t_const_value*>::const_iterator v_iter;
ostringstream initializers;
ostringstream appenders;
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
string fname = tmp(name);
string ptr = is_numeric(etype) ? "*" : "";
generate_const_initializer(fname, etype, (*v_iter));
initializers << constant_value_with_storage(fname, (t_type*)etype, *v_iter);
appenders << " g_hash_table_insert (constant, " << fname << ", 0);" << endl;
}
f_types_impl_ << maybe_static << "GHashTable *" << endl
<< this->nspace_lc << name_lc << "_constant (void)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "static GHashTable *constant = NULL;" << endl
<< indent() << "if (constant == NULL)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << initializers.str() << endl
<< indent() << "constant = " << generate_new_hash_from_type(etype, nullptr) << endl
<< appenders.str();
scope_down(f_types_impl_);
f_types_impl_ << indent() << "return constant;" << endl;
scope_down(f_types_impl_);
f_types_impl_ << endl;
} else if (type->is_map()) {
t_type* ktype = ((t_map*)type)->get_key_type();
t_type* vtype = ((t_map*)type)->get_val_type();
const map<t_const_value*, t_const_value*, t_const_value::value_compare>& val = value->get_map();
map<t_const_value*, t_const_value*, t_const_value::value_compare>::const_iterator v_iter;
ostringstream initializers;
ostringstream appenders;
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
string fname = tmp(name);
string kname = fname + "key";
string vname = fname + "val";
generate_const_initializer(kname, ktype, v_iter->first);
generate_const_initializer(vname, vtype, v_iter->second);
initializers << constant_value_with_storage(kname, (t_type*)ktype, v_iter->first);
initializers << constant_value_with_storage(vname, (t_type*)vtype, v_iter->second);
appenders << " g_hash_table_insert (constant, " << kname << ", " << vname << ");" << endl;
}
f_types_impl_ << maybe_static << "GHashTable *" << endl
<< this->nspace_lc << name_lc << "_constant (void)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "static GHashTable *constant = NULL;" << endl
<< indent() << "if (constant == NULL)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << initializers.str() << endl
<< indent() << "constant = " << generate_new_hash_from_type(ktype, vtype) << endl
<< appenders.str();
scope_down(f_types_impl_);
f_types_impl_ << indent() << "return constant;" << endl;
scope_down(f_types_impl_);
f_types_impl_ << endl;
}
}
/**
* Generates helper classes for a service, consisting of a ThriftStruct subclass
* for the arguments to and the result from each method.
*
* @param tservice The service for which to generate helper classes
*/
void t_c_glib_generator::generate_service_helpers(t_service* tservice) {
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::iterator function_iter;
// Iterate through the service's methods
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string function_name = (*function_iter)->get_name();
t_struct* arg_list = (*function_iter)->get_arglist();
string arg_list_name_orig = arg_list->get_name();
// Generate the arguments class
arg_list->set_name(tservice->get_name() + underscores_to_initial_caps(function_name) + "Args");
generate_struct(arg_list);
arg_list->set_name(arg_list_name_orig);
// Generate the result class
if (!(*function_iter)->is_oneway()) {
t_struct result(program_,
tservice->get_name() + underscores_to_initial_caps(function_name) + "Result");
t_field success((*function_iter)->get_returntype(), "success", 0);
success.set_req(t_field::T_OPTIONAL);
if (!(*function_iter)->get_returntype()->is_void()) {
result.append(&success);
}
t_struct* xs = (*function_iter)->get_xceptions();
const vector<t_field*>& fields = xs->get_members();
vector<t_field*>::const_iterator field_iter;
for (field_iter = fields.begin(); field_iter != fields.end(); ++field_iter) {
(*field_iter)->set_req(t_field::T_OPTIONAL);
result.append(*field_iter);
}
generate_struct(&result);
}
}
}
/**
* Generates C code that represents a Thrift service client.
*/
void t_c_glib_generator::generate_service_client(t_service* tservice) {
/* get some C friendly service names */
string service_name_lc = to_lower_case(initial_caps_to_underscores(service_name_));
string service_name_uc = to_upper_case(service_name_lc);
string parent_service_name;
string parent_service_name_lc;
string parent_service_name_uc;
string parent_class_name = "GObject";
string parent_type_name = "G_TYPE_OBJECT";
// The service this service extends, or nullptr if it extends no
// service
t_service* extends_service = tservice->get_extends();
if (extends_service) {
// The name of the parent service
parent_service_name = extends_service->get_name();
parent_service_name_lc = to_lower_case(initial_caps_to_underscores(parent_service_name));
parent_service_name_uc = to_upper_case(parent_service_name_lc);
// The names of the client class' parent class and type
parent_class_name = this->nspace + parent_service_name + "Client";
parent_type_name = this->nspace_uc + "TYPE_" + parent_service_name_uc + "_CLIENT";
}
// The base service (the topmost in the "extends" hierarchy), on
// whose client class the "input_protocol" and "output_protocol"
// properties are defined
t_service* base_service = tservice;
while (base_service->get_extends()) {
base_service = base_service->get_extends();
}
string base_service_name = base_service->get_name();
string base_service_name_lc = to_lower_case(initial_caps_to_underscores(base_service_name));
string base_service_name_uc = to_upper_case(base_service_name_lc);
// Generate the client interface dummy object in the header.
f_header_ << "/* " << service_name_ << " service interface */" << endl << "typedef struct _"
<< this->nspace << service_name_ << "If " << this->nspace << service_name_ << "If; "
<< " /* dummy object */" << endl << endl;
// Generate the client interface object in the header.
f_header_ << "struct _" << this->nspace << service_name_ << "IfInterface" << endl << "{" << endl
<< " GTypeInterface parent;" << endl << endl;
/* write out the functions for this interface */
indent_up();
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::const_iterator f_iter;
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
/* make the function name C friendly */
string funname = initial_caps_to_underscores((*f_iter)->get_name());
t_type* ttype = (*f_iter)->get_returntype();
t_struct* arglist = (*f_iter)->get_arglist();
t_struct* xlist = (*f_iter)->get_xceptions();
bool has_return = !ttype->is_void();
bool has_args = arglist->get_members().size() == 0;
bool has_xceptions = xlist->get_members().size() == 0;
string params = "(" + this->nspace + service_name_ + "If *iface"
+ (has_return ? ", " + type_name(ttype) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arglist)))
+ (has_xceptions ? "" : (", " + xception_list(xlist))) + ", GError **error)";
indent(f_header_) << "gboolean (*" << funname << ") " << params << ";" << endl;
}
indent_down();
f_header_ << "};" << endl << "typedef struct _" << this->nspace << service_name_ << "IfInterface "
<< this->nspace << service_name_ << "IfInterface;" << endl << endl;
// generate all the interface boilerplate
f_header_ << "GType " << this->nspace_lc << service_name_lc << "_if_get_type (void);" << endl
<< "#define " << this->nspace_uc << "TYPE_" << service_name_uc << "_IF "
<< "(" << this->nspace_lc << service_name_lc << "_if_get_type())" << endl << "#define "
<< this->nspace_uc << service_name_uc << "_IF(obj) "
<< "(G_TYPE_CHECK_INSTANCE_CAST ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_IF, " << this->nspace << service_name_ << "If))" << endl
<< "#define " << this->nspace_uc << "IS_" << service_name_uc << "_IF(obj) "
<< "(G_TYPE_CHECK_INSTANCE_TYPE ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_IF))" << endl << "#define " << this->nspace_uc
<< service_name_uc << "_IF_GET_INTERFACE(inst) (G_TYPE_INSTANCE_GET_INTERFACE ((inst), "
<< this->nspace_uc << "TYPE_" << service_name_uc << "_IF, " << this->nspace
<< service_name_ << "IfInterface))" << endl << endl;
// write out all the interface function prototypes
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
/* make the function name C friendly */
string funname = initial_caps_to_underscores((*f_iter)->get_name());
t_type* ttype = (*f_iter)->get_returntype();
t_struct* arglist = (*f_iter)->get_arglist();
t_struct* xlist = (*f_iter)->get_xceptions();
bool has_return = !ttype->is_void();
bool has_args = arglist->get_members().size() == 0;
bool has_xceptions = xlist->get_members().size() == 0;
string params = "(" + this->nspace + service_name_ + "If *iface"
+ (has_return ? ", " + type_name(ttype) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arglist)))
+ (has_xceptions ? "" : (", " + xception_list(xlist))) + ", GError **error)";
f_header_ << "gboolean " << this->nspace_lc << service_name_lc << "_if_" << funname << " "
<< params << ";" << endl;
}
f_header_ << endl;
// Generate the client object instance definition in the header.
f_header_ << "/* " << service_name_ << " service client */" << endl << "struct _" << this->nspace
<< service_name_ << "Client" << endl << "{" << endl << " " << parent_class_name
<< " parent;" << endl;
if (!extends_service) {
// Define "input_protocol" and "output_protocol" properties only
// for base services; child service-client classes will inherit
// these
f_header_ << endl << " ThriftProtocol *input_protocol;" << endl
<< " ThriftProtocol *output_protocol;" << endl;
}
f_header_ << "};" << endl << "typedef struct _" << this->nspace << service_name_ << "Client "
<< this->nspace << service_name_ << "Client;" << endl << endl;
// Generate the class definition in the header.
f_header_ << "struct _" << this->nspace << service_name_ << "ClientClass" << endl << "{" << endl
<< " " << parent_class_name << "Class parent;" << endl << "};" << endl
<< "typedef struct _" << this->nspace << service_name_ << "ClientClass " << this->nspace
<< service_name_ << "ClientClass;" << endl << endl;
// Create all the GObject boilerplate
f_header_ << "GType " << this->nspace_lc << service_name_lc << "_client_get_type (void);" << endl
<< "#define " << this->nspace_uc << "TYPE_" << service_name_uc << "_CLIENT "
<< "(" << this->nspace_lc << service_name_lc << "_client_get_type())" << endl
<< "#define " << this->nspace_uc << service_name_uc << "_CLIENT(obj) "
<< "(G_TYPE_CHECK_INSTANCE_CAST ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_CLIENT, " << this->nspace << service_name_ << "Client))" << endl
<< "#define " << this->nspace_uc << service_name_uc << "_CLIENT_CLASS(c) "
<< "(G_TYPE_CHECK_CLASS_CAST ((c), " << this->nspace_uc << "TYPE_" << service_name_uc
<< "_CLIENT, " << this->nspace << service_name_ << "ClientClass))" << endl << "#define "
<< this->nspace_uc << service_name_uc << "_IS_CLIENT(obj) "
<< "(G_TYPE_CHECK_INSTANCE_TYPE ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_CLIENT))" << endl << "#define " << this->nspace_uc
<< service_name_uc << "_IS_CLIENT_CLASS(c) "
<< "(G_TYPE_CHECK_CLASS_TYPE ((c), " << this->nspace_uc << "TYPE_" << service_name_uc
<< "_CLIENT))" << endl << "#define " << this->nspace_uc << service_name_uc
<< "_CLIENT_GET_CLASS(obj) "
<< "(G_TYPE_INSTANCE_GET_CLASS ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_CLIENT, " << this->nspace << service_name_ << "ClientClass))"
<< endl << endl;
/* write out the function prototypes */
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
/* make the function name C friendly */
string funname = to_lower_case(initial_caps_to_underscores((*f_iter)->get_name()));
t_function service_function((*f_iter)->get_returntype(),
service_name_lc + string("_client_") + funname,
(*f_iter)->get_arglist(),
(*f_iter)->get_xceptions());
indent(f_header_) << function_signature(&service_function) << ";" << endl;
t_function send_function(g_type_void,
service_name_lc + string("_client_send_") + funname,
(*f_iter)->get_arglist());
indent(f_header_) << function_signature(&send_function) << ";" << endl;
// implement recv if not a oneway service
if (!(*f_iter)->is_oneway()) {
t_struct noargs(program_);
t_function recv_function((*f_iter)->get_returntype(),
service_name_lc + string("_client_recv_") + funname,
&noargs,
(*f_iter)->get_xceptions());
indent(f_header_) << function_signature(&recv_function) << ";" << endl;
}
}
/* write out the get/set function prototypes */
f_header_ << "void " + service_name_lc + "_client_set_property (GObject *object, guint "
"property_id, const GValue *value, GParamSpec *pspec);"
<< endl;
f_header_ << "void " + service_name_lc + "_client_get_property (GObject *object, guint "
"property_id, GValue *value, GParamSpec *pspec);"
<< endl;
f_header_ << endl;
// end of header code
// Generate interface method implementations
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
/* make the function name C friendly */
string funname = initial_caps_to_underscores((*f_iter)->get_name());
t_type* ttype = (*f_iter)->get_returntype();
t_struct* arglist = (*f_iter)->get_arglist();
t_struct* xlist = (*f_iter)->get_xceptions();
bool has_return = !ttype->is_void();
bool has_args = arglist->get_members().size() == 0;
bool has_xceptions = xlist->get_members().size() == 0;
string params = "(" + this->nspace + service_name_ + "If *iface"
+ (has_return ? ", " + type_name(ttype) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arglist)))
+ (has_xceptions ? "" : (", " + xception_list(xlist))) + ", GError **error)";
string params_without_type = string("iface, ") + (has_return ? "_return, " : "");
const vector<t_field*>& fields = arglist->get_members();
vector<t_field*>::const_iterator f_iter_field;
for (f_iter_field = fields.begin(); f_iter_field != fields.end(); ++f_iter_field) {
params_without_type += (*f_iter_field)->get_name();
params_without_type += ", ";
}
const vector<t_field*>& xceptions = xlist->get_members();
vector<t_field*>::const_iterator x_iter;
for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) {
params_without_type += (*x_iter)->get_name();
params_without_type += ", ";
}
f_service_ << "gboolean" << endl << this->nspace_lc << service_name_lc << "_if_" << funname
<< " " << params << endl << "{" << endl << " return " << this->nspace_uc
<< service_name_uc << "_IF_GET_INTERFACE (iface)->" << funname << " ("
<< params_without_type << "error);" << endl << "}" << endl << endl;
}
// Generate interface boilerplate
f_service_ << "GType" << endl << this->nspace_lc << service_name_lc << "_if_get_type (void)"
<< endl << "{" << endl << " static GType type = 0;" << endl << " if (type == 0)"
<< endl << " {" << endl << " static const GTypeInfo type_info =" << endl << " {"
<< endl << " sizeof (" << this->nspace << service_name_ << "IfInterface)," << endl
<< " NULL, /* base_init */" << endl << " NULL, /* base_finalize */" << endl
<< " NULL, /* class_init */" << endl << " NULL, /* class_finalize */"
<< endl << " NULL, /* class_data */" << endl
<< " 0, /* instance_size */" << endl << " 0, /* n_preallocs */"
<< endl << " NULL, /* instance_init */" << endl
<< " NULL /* value_table */" << endl << " };" << endl
<< " type = g_type_register_static (G_TYPE_INTERFACE," << endl
<< " \"" << this->nspace << service_name_ << "If\","
<< endl << " &type_info, 0);" << endl << " }"
<< endl << " return type;" << endl << "}" << endl << endl;
// Generate client boilerplate
f_service_ << "static void " << endl << this->nspace_lc << service_name_lc
<< "_if_interface_init (" << this->nspace << service_name_ << "IfInterface *iface);"
<< endl << endl << "G_DEFINE_TYPE_WITH_CODE (" << this->nspace << service_name_
<< "Client, " << this->nspace_lc << service_name_lc << "_client," << endl
<< " " << parent_type_name << ", " << endl
<< " G_IMPLEMENT_INTERFACE (" << this->nspace_uc << "TYPE_"
<< service_name_uc << "_IF," << endl
<< " " << this->nspace_lc
<< service_name_lc << "_if_interface_init))" << endl << endl;
// Generate property-related code only for base services---child
// service-client classes have only properties inherited from their
// parent class
if (!extends_service) {
// Generate client properties
f_service_ << "enum _" << this->nspace << service_name_ << "ClientProperties" << endl << "{"
<< endl << " PROP_0," << endl << " PROP_" << this->nspace_uc << service_name_uc
<< "_CLIENT_INPUT_PROTOCOL," << endl << " PROP_" << this->nspace_uc
<< service_name_uc << "_CLIENT_OUTPUT_PROTOCOL" << endl << "};" << endl << endl;
// generate property setter
f_service_ << "void" << endl << this->nspace_lc << service_name_lc << "_client_set_property ("
<< "GObject *object, guint property_id, const GValue *value, "
<< "GParamSpec *pspec)" << endl << "{" << endl << " " << this->nspace
<< service_name_ << "Client *client = " << this->nspace_uc << service_name_uc
<< "_CLIENT (object);" << endl << endl << " THRIFT_UNUSED_VAR (pspec);" << endl
<< endl << " switch (property_id)" << endl << " {" << endl << " case PROP_"
<< this->nspace_uc << service_name_uc << "_CLIENT_INPUT_PROTOCOL:" << endl
<< " client->input_protocol = g_value_get_object (value);" << endl
<< " break;" << endl << " case PROP_" << this->nspace_uc << service_name_uc
<< "_CLIENT_OUTPUT_PROTOCOL:" << endl
<< " client->output_protocol = g_value_get_object (value);" << endl
<< " break;" << endl << " }" << endl << "}" << endl << endl;
// generate property getter
f_service_ << "void" << endl << this->nspace_lc << service_name_lc << "_client_get_property ("
<< "GObject *object, guint property_id, GValue *value, "
<< "GParamSpec *pspec)" << endl << "{" << endl << " " << this->nspace
<< service_name_ << "Client *client = " << this->nspace_uc << service_name_uc
<< "_CLIENT (object);" << endl << endl << " THRIFT_UNUSED_VAR (pspec);" << endl
<< endl << " switch (property_id)" << endl << " {" << endl << " case PROP_"
<< this->nspace_uc << service_name_uc << "_CLIENT_INPUT_PROTOCOL:" << endl
<< " g_value_set_object (value, client->input_protocol);" << endl
<< " break;" << endl << " case PROP_" << this->nspace_uc << service_name_uc
<< "_CLIENT_OUTPUT_PROTOCOL:" << endl
<< " g_value_set_object (value, client->output_protocol);" << endl
<< " break;" << endl << " }" << endl << "}" << endl << endl;
}
// Generate client method implementations
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
string name = (*f_iter)->get_name();
string funname = initial_caps_to_underscores(name);
// Get the struct of function call params and exceptions
t_struct* arg_struct = (*f_iter)->get_arglist();
// Function for sending
t_function send_function(g_type_void,
service_name_lc + string("_client_send_") + funname,
(*f_iter)->get_arglist());
// Open the send function
indent(f_service_) << function_signature(&send_function) << endl;
scope_up(f_service_);
string reqType = (*f_iter)->is_oneway() ? "T_ONEWAY" : "T_CALL";
// Serialize the request
f_service_ << indent() << "gint32 cseqid = 0;" << endl << indent()
<< "ThriftProtocol * protocol = " << this->nspace_uc << base_service_name_uc
<< "_CLIENT (iface)->output_protocol;" << endl << endl << indent()
<< "if (thrift_protocol_write_message_begin (protocol, \"" << name << "\", "
<< reqType << ", cseqid, error) < 0)" << endl << indent() << " return FALSE;"
<< endl << endl;
generate_struct_writer(f_service_, arg_struct, "", "", false);
f_service_ << indent() << "if (thrift_protocol_write_message_end (protocol, error) < 0)" << endl
<< indent() << " return FALSE;" << endl << indent()
<< "if (!thrift_transport_flush (protocol->transport, error))" << endl << indent()
<< " return FALSE;" << endl << indent()
<< "if (!thrift_transport_write_end (protocol->transport, error))" << endl
<< indent() << " return FALSE;" << endl << endl << indent() << "return TRUE;"
<< endl;
scope_down(f_service_);
f_service_ << endl;
// Generate recv function only if not an async function
if (!(*f_iter)->is_oneway()) {
t_struct noargs(program_);
t_function recv_function((*f_iter)->get_returntype(),
service_name_lc + string("_client_recv_") + funname,
&noargs,
(*f_iter)->get_xceptions());
// Open function
indent(f_service_) << function_signature(&recv_function) << endl;
scope_up(f_service_);
f_service_ << indent() << "gint32 rseqid;" << endl
<< indent() << "gchar * fname = NULL;" << endl
<< indent() << "ThriftMessageType mtype;" << endl
<< indent() << "ThriftProtocol * protocol = "
<< this->nspace_uc << base_service_name_uc
<< "_CLIENT (iface)->input_protocol;" << endl
<< indent() << "ThriftApplicationException *xception;" << endl
<< endl
<< indent() << "if (thrift_protocol_read_message_begin "
"(protocol, &fname, &mtype, &rseqid, error) < 0) {" << endl;
indent_up();
f_service_ << indent() << "if (fname) g_free (fname);" << endl
<< indent() << "return FALSE;" << endl;
indent_down();
f_service_ << indent() << "}" << endl
<< endl
<< indent() << "if (mtype == T_EXCEPTION) {" << endl;
indent_up();
f_service_ << indent() << "if (fname) g_free (fname);" << endl
<< indent() << "xception = g_object_new "
"(THRIFT_TYPE_APPLICATION_EXCEPTION, NULL);" << endl
<< indent() << "thrift_struct_read (THRIFT_STRUCT (xception), "
"protocol, NULL);" << endl
<< indent() << "thrift_protocol_read_message_end "
"(protocol, NULL);" << endl
<< indent() << "thrift_transport_read_end "
"(protocol->transport, NULL);" << endl
<< indent() << "g_set_error (error, "
"THRIFT_APPLICATION_EXCEPTION_ERROR,xception->type, "
"\"application error: %s\", xception->message);" << endl
<< indent() << "g_object_unref (xception);" << endl
<< indent() << "return FALSE;" << endl;
indent_down();
f_service_ << indent() << "} else if (mtype != T_REPLY) {" << endl;
indent_up();
f_service_ << indent() << "if (fname) g_free (fname);" << endl
<< indent() << "thrift_protocol_skip (protocol, T_STRUCT, "
"NULL);" << endl
<< indent() << "thrift_protocol_read_message_end (protocol, "
"NULL);" << endl
<< indent() << "thrift_transport_read_end ("
"protocol->transport, NULL);" << endl
<< indent() << "g_set_error (error, "
"THRIFT_APPLICATION_EXCEPTION_ERROR, "
"THRIFT_APPLICATION_EXCEPTION_ERROR_INVALID_MESSAGE_TYPE, "
"\"invalid message type %d, expected T_REPLY\", mtype);"
<< endl
<< indent() << "return FALSE;" << endl;
indent_down();
f_service_ << indent() << "} else if (strncmp (fname, \"" << name
<< "\", " << name.length() << ") != 0) {" << endl;
indent_up();
f_service_ << indent() << "thrift_protocol_skip (protocol, T_STRUCT, "
"NULL);" << endl
<< indent() << "thrift_protocol_read_message_end (protocol,"
"error);" << endl
<< indent() << "thrift_transport_read_end ("
"protocol->transport, error);" << endl
<< indent() << "g_set_error (error, "
"THRIFT_APPLICATION_EXCEPTION_ERROR, "
"THRIFT_APPLICATION_EXCEPTION_ERROR_WRONG_METHOD_NAME, "
"\"wrong method name %s, expected " << name
<< "\", fname);" << endl
<< indent() << "if (fname) g_free (fname);" << endl
<< indent() << "return FALSE;" << endl;
indent_down();
f_service_ << indent() << "}" << endl
<< indent() << "if (fname) g_free (fname);" << endl
<< endl;
t_struct* xs = (*f_iter)->get_xceptions();
const std::vector<t_field*>& xceptions = xs->get_members();
vector<t_field*>::const_iterator x_iter;
{
t_struct result(program_, tservice->get_name() + "_" + (*f_iter)->get_name() + "_result");
t_field success((*f_iter)->get_returntype(), "*_return", 0);
if (!(*f_iter)->get_returntype()->is_void()) {
result.append(&success);
}
// add readers for exceptions, dereferencing the pointer.
for (x_iter = xceptions.begin(); x_iter != xceptions.end(); x_iter++) {
t_field* xception = new t_field((*x_iter)->get_type(),
"*" + (*x_iter)->get_name(),
(*x_iter)->get_key());
result.append(xception);
}
generate_struct_reader(f_service_, &result, "", "", false);
}
f_service_ << indent() << "if (thrift_protocol_read_message_end (protocol, error) < 0)"
<< endl << indent() << " return FALSE;" << endl << endl << indent()
<< "if (!thrift_transport_read_end (protocol->transport, error))" << endl
<< indent() << " return FALSE;" << endl << endl;
// copy over any throw exceptions and return failure
for (x_iter = xceptions.begin(); x_iter != xceptions.end(); x_iter++) {
f_service_ << indent() << "if (*" << (*x_iter)->get_name() << " != NULL)" << endl
<< indent() << "{" << endl << indent() << " g_set_error (error, "
<< this->nspace_uc
<< to_upper_case(initial_caps_to_underscores((*x_iter)->get_type()->get_name()))
<< "_ERROR, " << this->nspace_uc
<< to_upper_case(initial_caps_to_underscores((*x_iter)->get_type()->get_name()))
<< "_ERROR_CODE, \"" << (*x_iter)->get_type()->get_name() << "\");" << endl
<< indent() << " return FALSE;" << endl << indent() << "}" << endl;
}
// Close function
indent(f_service_) << "return TRUE;" << endl;
scope_down(f_service_);
f_service_ << endl;
}
// Open function
t_function service_function((*f_iter)->get_returntype(),
service_name_lc + string("_client_") + funname,
(*f_iter)->get_arglist(),
(*f_iter)->get_xceptions());
indent(f_service_) << function_signature(&service_function) << endl;
scope_up(f_service_);
// wrap each function
f_service_ << indent() << "if (!" << this->nspace_lc << service_name_lc << "_client_send_"
<< funname << " (iface";
// Declare the function arguments
const vector<t_field*>& fields = arg_struct->get_members();
vector<t_field*>::const_iterator fld_iter;
for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) {
f_service_ << ", " << (*fld_iter)->get_name();
}
f_service_ << ", error))" << endl << indent() << " return FALSE;" << endl;
// if not oneway, implement recv
if (!(*f_iter)->is_oneway()) {
string ret = (*f_iter)->get_returntype()->is_void() ? "" : "_return, ";
const vector<t_field*>& xceptions = (*f_iter)->get_xceptions()->get_members();
vector<t_field*>::const_iterator x_iter;
for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) {
ret += (*x_iter)->get_name();
ret += ", ";
}
f_service_ << indent() << "if (!" << this->nspace_lc << service_name_lc << "_client_recv_"
<< funname << " (iface, " << ret << "error))" << endl << indent()
<< " return FALSE;" << endl;
}
// return TRUE which means all functions were called OK
indent(f_service_) << "return TRUE;" << endl;
scope_down(f_service_);
f_service_ << endl;
}
// create the interface initializer
f_service_ << "static void" << endl
<< this->nspace_lc << service_name_lc << "_if_interface_init ("
<< this->nspace << service_name_ << "IfInterface *iface)" << endl;
scope_up(f_service_);
if (functions.size() > 0) {
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
/* make the function name C friendly */
string funname = initial_caps_to_underscores((*f_iter)->get_name());
f_service_ << indent() << "iface->" << funname << " = " << this->nspace_lc
<< service_name_lc << "_client_" << funname << ";" << endl;
}
}
else {
f_service_ << indent() << "THRIFT_UNUSED_VAR (iface);" << endl;
}
scope_down(f_service_);
f_service_ << endl;
// create the client instance initializer
f_service_ << "static void" << endl
<< this->nspace_lc << service_name_lc << "_client_init ("
<< this->nspace << service_name_ << "Client *client)" << endl;
scope_up(f_service_);
if (!extends_service) {
f_service_ << indent() << "client->input_protocol = NULL;" << endl
<< indent() << "client->output_protocol = NULL;" << endl;
}
else {
f_service_ << indent() << "THRIFT_UNUSED_VAR (client);" << endl;
}
scope_down(f_service_);
f_service_ << endl;
// create the client class initializer
f_service_ << "static void" << endl << this->nspace_lc << service_name_lc
<< "_client_class_init (" << this->nspace << service_name_ << "ClientClass *cls)"
<< endl << "{" << endl;
if (!extends_service) {
f_service_ << " GObjectClass *gobject_class = G_OBJECT_CLASS (cls);" << endl
<< " GParamSpec *param_spec;" << endl << endl
<< " gobject_class->set_property = " << this->nspace_lc << service_name_lc
<< "_client_set_property;" << endl
<< " gobject_class->get_property = " << this->nspace_lc << service_name_lc
<< "_client_get_property;" << endl << endl
<< " param_spec = g_param_spec_object (\"input_protocol\"," << endl
<< " \"input protocol (construct)\"," << endl
<< " \"Set the client input protocol\"," << endl
<< " THRIFT_TYPE_PROTOCOL," << endl
<< " G_PARAM_READWRITE);" << endl
<< " g_object_class_install_property (gobject_class," << endl
<< " PROP_" << this->nspace_uc << service_name_uc
<< "_CLIENT_INPUT_PROTOCOL, param_spec);" << endl << endl
<< " param_spec = g_param_spec_object (\"output_protocol\"," << endl
<< " \"output protocol (construct)\"," << endl
<< " \"Set the client output protocol\"," << endl
<< " THRIFT_TYPE_PROTOCOL," << endl
<< " G_PARAM_READWRITE);" << endl
<< " g_object_class_install_property (gobject_class," << endl
<< " PROP_" << this->nspace_uc << service_name_uc
<< "_CLIENT_OUTPUT_PROTOCOL, param_spec);" << endl;
}
else {
f_service_ << " THRIFT_UNUSED_VAR (cls);" << endl;
}
f_service_ << "}" << endl << endl;
}
/**
* Generates C code that represents a Thrift service handler.
*
* @param tservice The service for which to generate a handler.
*/
void t_c_glib_generator::generate_service_handler(t_service* tservice) {
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::const_iterator function_iter;
string service_name_lc = to_lower_case(initial_caps_to_underscores(service_name_));
string service_name_uc = to_upper_case(service_name_lc);
string service_handler_name = service_name_ + "Handler";
string class_name = this->nspace + service_handler_name;
string class_name_lc = this->nspace_lc + initial_caps_to_underscores(service_handler_name);
string class_name_uc = to_upper_case(class_name_lc);
string parent_class_name;
string parent_type_name;
string args_indent;
// The service this service extends, or nullptr if it extends no service
t_service* extends_service = tservice->get_extends();
// Determine the name of our parent service (if any) and the handler class'
// parent class name and type
if (extends_service) {
string parent_service_name = extends_service->get_name();
string parent_service_name_lc = to_lower_case(initial_caps_to_underscores(parent_service_name));
string parent_service_name_uc = to_upper_case(parent_service_name_lc);
parent_class_name = this->nspace + parent_service_name + "Handler";
parent_type_name = this->nspace_uc + "TYPE_" + parent_service_name_uc + "_HANDLER";
} else {
parent_class_name = "GObject";
parent_type_name = "G_TYPE_OBJECT";
}
// Generate the handler class' definition in the header file
// Generate the handler instance definition
f_header_ << "/* " << service_name_ << " handler (abstract base class) */" << endl << "struct _"
<< class_name << endl << "{" << endl;
indent_up();
f_header_ << indent() << parent_class_name << " parent;" << endl;
indent_down();
f_header_ << "};" << endl << "typedef struct _" << class_name << " " << class_name << ";" << endl
<< endl;
// Generate the handler class definition, including its class members
// (methods)
f_header_ << "struct _" << class_name << "Class" << endl << "{" << endl;
indent_up();
f_header_ << indent() << parent_class_name << "Class parent;" << endl << endl;
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string method_name = initial_caps_to_underscores((*function_iter)->get_name());
t_type* return_type = (*function_iter)->get_returntype();
t_struct* arg_list = (*function_iter)->get_arglist();
t_struct* x_list = (*function_iter)->get_xceptions();
bool has_return = !return_type->is_void();
bool has_args = arg_list->get_members().size() == 0;
bool has_xceptions = x_list->get_members().size() == 0;
string params = "(" + this->nspace + service_name_ + "If *iface"
+ (has_return ? ", " + type_name(return_type) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arg_list)))
+ (has_xceptions ? "" : (", " + xception_list(x_list))) + ", GError **error)";
indent(f_header_) << "gboolean (*" << method_name << ") " << params << ";" << endl;
}
indent_down();
f_header_ << "};" << endl << "typedef struct _" << class_name << "Class " << class_name
<< "Class;" << endl << endl;
// Generate the remaining header boilerplate
f_header_ << "GType " << class_name_lc << "_get_type (void);" << endl << "#define "
<< this->nspace_uc << "TYPE_" << service_name_uc << "_HANDLER "
<< "(" << class_name_lc << "_get_type())" << endl << "#define " << class_name_uc
<< "(obj) "
<< "(G_TYPE_CHECK_INSTANCE_CAST ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_HANDLER, " << class_name << "))" << endl << "#define "
<< this->nspace_uc << "IS_" << service_name_uc << "_HANDLER(obj) "
<< "(G_TYPE_CHECK_INSTANCE_TYPE ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_HANDLER))" << endl << "#define " << class_name_uc
<< "_CLASS(c) (G_TYPE_CHECK_CLASS_CAST ((c), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_HANDLER, " << class_name << "Class))" << endl << "#define "
<< this->nspace_uc << "IS_" << service_name_uc << "_HANDLER_CLASS(c) "
<< "(G_TYPE_CHECK_CLASS_TYPE ((c), " << this->nspace_uc << "TYPE_" << service_name_uc
<< "_HANDLER))" << endl << "#define " << this->nspace_uc << service_name_uc
<< "_HANDLER_GET_CLASS(obj) "
<< "(G_TYPE_INSTANCE_GET_CLASS ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_HANDLER, " << class_name << "Class))" << endl << endl;
// Generate the handler class' method definitions
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string method_name = initial_caps_to_underscores((*function_iter)->get_name());
t_type* return_type = (*function_iter)->get_returntype();
t_struct* arg_list = (*function_iter)->get_arglist();
t_struct* x_list = (*function_iter)->get_xceptions();
bool has_return = !return_type->is_void();
bool has_args = arg_list->get_members().size() == 0;
bool has_xceptions = x_list->get_members().size() == 0;
string params = "(" + this->nspace + service_name_ + "If *iface"
+ (has_return ? ", " + type_name(return_type) + "* _return" : "")
+ (has_args ? "" : (", " + argument_list(arg_list)))
+ (has_xceptions ? "" : (", " + xception_list(x_list))) + ", GError **error)";
f_header_ << "gboolean " << class_name_lc << "_" << method_name << " " << params << ";" << endl;
}
f_header_ << endl;
// Generate the handler's implementation in the implementation file
// Generate the implementation boilerplate
f_service_ << "static void" << endl << class_name_lc << "_" << service_name_lc
<< "_if_interface_init (" << this->nspace << service_name_ << "IfInterface *iface);"
<< endl << endl;
args_indent = string(25, ' ');
f_service_ << "G_DEFINE_TYPE_WITH_CODE (" << class_name << ", " << endl << args_indent
<< class_name_lc << "," << endl << args_indent << parent_type_name << "," << endl
<< args_indent << "G_IMPLEMENT_INTERFACE (" << this->nspace_uc << "TYPE_"
<< service_name_uc << "_IF," << endl;
args_indent += string(23, ' ');
f_service_ << args_indent << class_name_lc << "_" << service_name_lc << "_if_interface_init))"
<< endl << endl;
// Generate the handler method implementations
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string function_name = (*function_iter)->get_name();
string method_name = initial_caps_to_underscores(function_name);
t_type* return_type = (*function_iter)->get_returntype();
t_struct* arg_list = (*function_iter)->get_arglist();
t_struct* x_list = (*function_iter)->get_xceptions();
const vector<t_field*>& args = arg_list->get_members();
const vector<t_field*>& xceptions = x_list->get_members();
vector<t_field*>::const_iterator field_iter;
t_function implementing_function(return_type,
service_name_lc + "_handler_" + method_name,
arg_list,
x_list,
(*function_iter)->is_oneway());
indent(f_service_) << function_signature(&implementing_function) << endl;
scope_up(f_service_);
f_service_ << indent() << "g_return_val_if_fail (" << this->nspace_uc << "IS_"
<< service_name_uc << "_HANDLER (iface), FALSE);" << endl << endl << indent()
<< "return " << class_name_uc << "_GET_CLASS (iface)"
<< "->" << method_name << " (iface, ";
if (!return_type->is_void()) {
f_service_ << "_return, ";
}
for (field_iter = args.begin(); field_iter != args.end(); ++field_iter) {
f_service_ << (*field_iter)->get_name() << ", ";
}
for (field_iter = xceptions.begin(); field_iter != xceptions.end(); ++field_iter) {
f_service_ << (*field_iter)->get_name() << ", ";
}
f_service_ << "error);" << endl;
scope_down(f_service_);
f_service_ << endl;
}
// Generate the handler interface initializer
f_service_ << "static void" << endl << class_name_lc << "_" << service_name_lc
<< "_if_interface_init (" << this->nspace << service_name_ << "IfInterface *iface)"
<< endl;
scope_up(f_service_);
if (functions.size() > 0) {
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string method_name = initial_caps_to_underscores((*function_iter)->get_name());
f_service_ << indent() << "iface->" << method_name << " = " << class_name_lc << "_"
<< method_name << ";" << endl;
}
}
else {
f_service_ << "THRIFT_UNUSED_VAR (iface);" << endl;
}
scope_down(f_service_);
f_service_ << endl;
// Generate the handler instance initializer
f_service_ << "static void" << endl << class_name_lc << "_init (" << class_name << " *self)"
<< endl;
scope_up(f_service_);
f_service_ << indent() << "THRIFT_UNUSED_VAR (self);" << endl;
scope_down(f_service_);
f_service_ << endl;
// Generate the handler class initializer
f_service_ << "static void" << endl
<< class_name_lc << "_class_init (" << class_name << "Class *cls)"
<< endl;
scope_up(f_service_);
if (functions.size() > 0) {
for (function_iter = functions.begin();
function_iter != functions.end();
++function_iter) {
string function_name = (*function_iter)->get_name();
string method_name = initial_caps_to_underscores(function_name);
// All methods are pure virtual and must be implemented by subclasses
f_service_ << indent() << "cls->" << method_name << " = NULL;" << endl;
}
}
else {
f_service_ << indent() << "THRIFT_UNUSED_VAR (cls);" << endl;
}
scope_down(f_service_);
f_service_ << endl;
}
/**
* Generates C code that represents a Thrift service processor.
*
* @param tservice The service for which to generate a processor
*/
void t_c_glib_generator::generate_service_processor(t_service* tservice) {
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::const_iterator function_iter;
string service_name_lc = to_lower_case(initial_caps_to_underscores(service_name_));
string service_name_uc = to_upper_case(service_name_lc);
string service_processor_name = service_name_ + "Processor";
string class_name = this->nspace + service_processor_name;
string class_name_lc = this->nspace_lc + initial_caps_to_underscores(service_processor_name);
string class_name_uc = to_upper_case(class_name_lc);
string parent_class_name;
string parent_type_name;
string handler_class_name = this->nspace + service_name_ + "Handler";
string handler_class_name_lc = initial_caps_to_underscores(handler_class_name);
string process_function_type_name = class_name + "ProcessFunction";
string process_function_def_type_name =
class_name_lc + "_process_function_def";
string function_name;
string args_indent;
// The service this service extends, or nullptr if it extends no service
t_service* extends_service = tservice->get_extends();
// Determine the name of our parent service (if any) and the
// processor class' parent class name and type
if (extends_service) {
string parent_service_name = extends_service->get_name();
string parent_service_name_lc = to_lower_case(initial_caps_to_underscores(parent_service_name));
string parent_service_name_uc = to_upper_case(parent_service_name_lc);
parent_class_name = this->nspace + parent_service_name + "Processor";
parent_type_name = this->nspace_uc + "TYPE_" + parent_service_name_uc + "_PROCESSOR";
} else {
parent_class_name = "ThriftDispatchProcessor";
parent_type_name = "THRIFT_TYPE_DISPATCH_PROCESSOR";
}
// Generate the processor class' definition in the header file
// Generate the processor instance definition
f_header_ << "/* " << service_name_ << " processor */" << endl << "struct _" << class_name << endl
<< "{" << endl;
indent_up();
f_header_ << indent() << parent_class_name << " parent;" << endl << endl << indent()
<< "/* protected */" << endl << indent()
<< this->nspace + service_name_ + "Handler *handler;" << endl << indent()
<< "GHashTable *process_map;" << endl;
indent_down();
f_header_ << "};" << endl << "typedef struct _" << class_name << " " << class_name << ";" << endl
<< endl;
// Generate the processor class definition
f_header_ << "struct _" << class_name << "Class" << endl << "{" << endl;
indent_up();
f_header_ << indent() << parent_class_name << "Class parent;" << endl << endl << indent()
<< "/* protected */" << endl << indent()
<< "gboolean (*dispatch_call) (ThriftDispatchProcessor *processor," << endl;
args_indent = indent() + string(27, ' ');
f_header_ << args_indent << "ThriftProtocol *in," << endl << args_indent << "ThriftProtocol *out,"
<< endl << args_indent << "gchar *fname," << endl << args_indent << "gint32 seqid,"
<< endl << args_indent << "GError **error);" << endl;
indent_down();
f_header_ << "};" << endl << "typedef struct _" << class_name << "Class " << class_name
<< "Class;" << endl << endl;
// Generate the remaining header boilerplate
f_header_ << "GType " << class_name_lc << "_get_type (void);" << endl << "#define "
<< this->nspace_uc << "TYPE_" << service_name_uc << "_PROCESSOR "
<< "(" << class_name_lc << "_get_type())" << endl << "#define " << class_name_uc
<< "(obj) "
<< "(G_TYPE_CHECK_INSTANCE_CAST ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_PROCESSOR, " << class_name << "))" << endl << "#define "
<< this->nspace_uc << "IS_" << service_name_uc << "_PROCESSOR(obj) "
<< "(G_TYPE_CHECK_INSTANCE_TYPE ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_PROCESSOR))" << endl << "#define " << class_name_uc
<< "_CLASS(c) (G_TYPE_CHECK_CLASS_CAST ((c), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_PROCESSOR, " << class_name << "Class))" << endl << "#define "
<< this->nspace_uc << "IS_" << service_name_uc << "_PROCESSOR_CLASS(c) "
<< "(G_TYPE_CHECK_CLASS_TYPE ((c), " << this->nspace_uc << "TYPE_" << service_name_uc
<< "_PROCESSOR))" << endl << "#define " << this->nspace_uc << service_name_uc
<< "_PROCESSOR_GET_CLASS(obj) "
<< "(G_TYPE_INSTANCE_GET_CLASS ((obj), " << this->nspace_uc << "TYPE_"
<< service_name_uc << "_PROCESSOR, " << class_name << "Class))" << endl << endl;
// Generate the processor's implementation in the implementation file
// Generate the processor's properties enum
f_service_ << "enum _" << class_name << "Properties" << endl << "{" << endl;
indent_up();
f_service_ << indent() << "PROP_" << class_name_uc << "_0," << endl << indent() << "PROP_"
<< class_name_uc << "_HANDLER" << endl;
indent_down();
f_service_ << "};" << endl << endl;
// Generate the implementation boilerplate
args_indent = string(15, ' ');
f_service_ << "G_DEFINE_TYPE (" << class_name << "," << endl << args_indent << class_name_lc
<< "," << endl << args_indent << parent_type_name << ")" << endl << endl;
// Generate the processor's processing-function type
args_indent = string(process_function_type_name.length() + 23, ' ');
f_service_ << "typedef gboolean (* " << process_function_type_name << ") ("
<< class_name << " *, " << endl
<< args_indent << "gint32," << endl
<< args_indent << "ThriftProtocol *," << endl
<< args_indent << "ThriftProtocol *," << endl
<< args_indent << "GError **);" << endl
<< endl;
// Generate the processor's processing-function-definition type
f_service_ << "typedef struct" << endl
<< "{" << endl;
indent_up();
f_service_ << indent() << "gchar *name;" << endl
<< indent() << process_function_type_name << " function;" << endl;
indent_down();
f_service_ << "} " << process_function_def_type_name << ";" << endl
<< endl;
// Generate forward declarations of the processor's processing functions so we
// can refer to them in the processing-function-definition struct below and
// keep all of the processor's declarations in one place
for (function_iter = functions.begin();
function_iter != functions.end();
++function_iter) {
function_name = class_name_lc + "_process_"
+ initial_caps_to_underscores((*function_iter)->get_name());
args_indent = string(function_name.length() + 2, ' ');
f_service_ << "static gboolean" << endl
<< function_name << " ("
<< class_name << " *," << endl
<< args_indent << "gint32," << endl
<< args_indent << "ThriftProtocol *," << endl
<< args_indent << "ThriftProtocol *," << endl
<< args_indent << "GError **);" << endl;
}
f_service_ << endl;
// Generate the processor's processing-function definitions, if the service
// defines any methods
if (functions.size() > 0) {
f_service_ << indent() << "static " << process_function_def_type_name
<< endl
<< indent() << class_name_lc << "_process_function_defs["
<< functions.size() << "] = {" << endl;
indent_up();
for (function_iter = functions.begin();
function_iter != functions.end();
++function_iter) {
string service_function_name = (*function_iter)->get_name();
string process_function_name = class_name_lc + "_process_"
+ initial_caps_to_underscores(service_function_name);
f_service_ << indent() << "{" << endl;
indent_up();
f_service_ << indent() << "\"" << service_function_name << "\"," << endl
<< indent() << process_function_name << endl;
indent_down();
f_service_ << indent() << "}"
<< (function_iter == --functions.end() ? "" : ",") << endl;
}
indent_down();
f_service_ << indent() << "};" << endl
<< endl;
}
// Generate the processor's processing functions
for (function_iter = functions.begin(); function_iter != functions.end(); ++function_iter) {
string service_function_name = (*function_iter)->get_name();
string service_function_name_ic = underscores_to_initial_caps(service_function_name);
string service_function_name_lc = initial_caps_to_underscores(service_function_name);
string service_function_name_uc = to_upper_case(service_function_name_lc);
t_type* return_type = (*function_iter)->get_returntype();
bool has_return_value = !return_type->is_void();
t_struct* arg_list = (*function_iter)->get_arglist();
const vector<t_field*>& args = arg_list->get_members();
vector<t_field*>::const_iterator arg_iter;
const vector<t_field*>& xceptions = (*function_iter)->get_xceptions()->get_members();
vector<t_field*>::const_iterator xception_iter;
string args_class_name = this->nspace + service_name_ + service_function_name_ic + "Args";
string args_class_type = this->nspace_uc + "TYPE_" + service_name_uc + "_"
+ service_function_name_uc + "_ARGS";
string result_class_name = this->nspace + service_name_ + service_function_name_ic + "Result";
string result_class_type = this->nspace_uc + "TYPE_" + service_name_uc + "_"
+ service_function_name_uc + "_RESULT";
string handler_function_name = handler_class_name_lc + "_" + service_function_name_lc;
function_name = class_name_lc + "_process_"
+ initial_caps_to_underscores(service_function_name);
args_indent = string(function_name.length() + 2, ' ');
f_service_ << "static gboolean" << endl << function_name << " (" << class_name << " *self,"
<< endl << args_indent << "gint32 sequence_id," << endl << args_indent
<< "ThriftProtocol *input_protocol," << endl << args_indent
<< "ThriftProtocol *output_protocol," << endl << args_indent << "GError **error)"
<< endl;
scope_up(f_service_);
f_service_ << indent() << "gboolean result = TRUE;" << endl
<< indent() << "ThriftTransport * transport;" << endl
<< indent() << "ThriftApplicationException *xception;" << endl
<< indent() << args_class_name + " * args =" << endl;
indent_up();
f_service_ << indent() << "g_object_new (" << args_class_type << ", NULL);" << endl << endl;
indent_down();
if ((*function_iter)->is_oneway()) {
f_service_ << indent() << "THRIFT_UNUSED_VAR (sequence_id);" << endl << indent()
<< "THRIFT_UNUSED_VAR (output_protocol);" << endl << endl;
}
f_service_ << indent() << "g_object_get (input_protocol, \"transport\", "
<< "&transport, NULL);" << endl << endl;
// Read the method's arguments from the caller
f_service_ << indent() << "if ((thrift_struct_read (THRIFT_STRUCT (args), "
<< "input_protocol, error) != -1) &&" << endl << indent()
<< " (thrift_protocol_read_message_end (input_protocol, "
<< "error) != -1) &&" << endl << indent()
<< " (thrift_transport_read_end (transport, error) != FALSE))" << endl;
scope_up(f_service_);
for (arg_iter = args.begin(); arg_iter != args.end(); ++arg_iter) {
f_service_ << indent() << property_type_name((*arg_iter)->get_type()) << " "
<< (*arg_iter)->get_name() << ";" << endl;
}
for (xception_iter = xceptions.begin(); xception_iter != xceptions.end(); ++xception_iter) {
f_service_ << indent() << type_name((*xception_iter)->get_type()) << " "
<< initial_caps_to_underscores((*xception_iter)->get_name()) << " = NULL;" << endl;
}
if (has_return_value) {
f_service_ << indent() << property_type_name(return_type) << " return_value;" << endl;
}
if (!(*function_iter)->is_oneway()) {
f_service_ << indent() << result_class_name << " * result_struct;" << endl;
}
f_service_ << endl;
if (args.size() > 0) {
f_service_ << indent() << "g_object_get (args," << endl;
args_indent = indent() + string(14, ' ');
for (arg_iter = args.begin(); arg_iter != args.end(); ++arg_iter) {
string arg_name = (*arg_iter)->get_name();
f_service_ << args_indent << "\"" << arg_name << "\", &" << arg_name << "," << endl;
}
f_service_ << args_indent << "NULL);" << endl << endl;
}
if (!(*function_iter)->is_oneway()) {
f_service_ << indent() << "g_object_unref (transport);" << endl << indent()
<< "g_object_get (output_protocol, \"transport\", "
<< "&transport, NULL);" << endl << endl << indent()
<< "result_struct = g_object_new (" << result_class_type << ", NULL);" << endl;
if (has_return_value) {
f_service_ << indent() << "g_object_get (result_struct, "
"\"success\", &return_value, NULL);" << endl;
}
f_service_ << endl;
}
// Pass the arguments to the corresponding method in the handler
f_service_ << indent() << "if (" << handler_function_name << " (" << this->nspace_uc
<< service_name_uc << "_IF (self->handler)," << endl;
args_indent = indent() + string(handler_function_name.length() + 6, ' ');
if (has_return_value) {
string return_type_name = type_name(return_type);
f_service_ << args_indent;
// Cast return_value if it was declared as a type other than the return
// value's actual type---this is true for integer values 32 bits or fewer
// in width, for which GLib requires a plain gint type be used when
// storing or retrieving as an object property
if (return_type_name != property_type_name(return_type)) {
if (return_type_name[return_type_name.length() - 1] != '*') {
return_type_name += ' ';
}
return_type_name += '*';
f_service_ << "(" << return_type_name << ")";
}
f_service_ << "&return_value," << endl;
}
for (arg_iter = args.begin(); arg_iter != args.end(); ++arg_iter) {
f_service_ << args_indent << (*arg_iter)->get_name() << "," << endl;
}
for (xception_iter = xceptions.begin(); xception_iter != xceptions.end(); ++xception_iter) {
f_service_ << args_indent << "&" << initial_caps_to_underscores((*xception_iter)->get_name())
<< "," << endl;
}
f_service_ << args_indent << "error) == TRUE)" << endl;
scope_up(f_service_);
// The handler reported success; return the result, if any, to the caller
if (!(*function_iter)->is_oneway()) {
if (has_return_value) {
f_service_ << indent() << "g_object_set (result_struct, \"success\", ";
if (type_name(return_type) != property_type_name(return_type)) {
// Roundtrip cast to fix the position of sign bit.
f_service_ << "(" << property_type_name(return_type) << ")"
<< "(" << type_name(return_type) << ")";
}
f_service_ << "return_value, "
<< "NULL);" << endl;
f_service_ << endl;
}
f_service_ << indent() << "result =" << endl;
indent_up();
f_service_ << indent() << "((thrift_protocol_write_message_begin (output_protocol," << endl;
args_indent = indent() + string(39, ' ');
f_service_ << args_indent << "\"" << service_function_name << "\"," << endl << args_indent
<< "T_REPLY," << endl << args_indent << "sequence_id," << endl << args_indent
<< "error) != -1) &&" << endl << indent()
<< " (thrift_struct_write (THRIFT_STRUCT (result_struct)," << endl;
args_indent = indent() + string(23, ' ');
f_service_ << args_indent << "output_protocol," << endl << args_indent << "error) != -1));"
<< endl;
indent_down();
}
scope_down(f_service_);
f_service_ << indent() << "else" << endl;
scope_up(f_service_);
// The handler reported failure; check to see if an application-defined
// exception was raised and if so, return it to the caller
f_service_ << indent();
if (xceptions.size() > 0) {
for (xception_iter = xceptions.begin(); xception_iter != xceptions.end(); ++xception_iter) {
f_service_ << "if (" << initial_caps_to_underscores((*xception_iter)->get_name())
<< " != NULL)" << endl;
scope_up(f_service_);
f_service_ << indent() << "g_object_set (result_struct," << endl;
args_indent = indent() + string(14, ' ');
f_service_ << args_indent << "\"" << (*xception_iter)->get_name() << "\", "
<< (*xception_iter)->get_name() << "," << endl << args_indent << "NULL);" << endl
<< endl;
f_service_ << indent() << "g_object_unref ("<< (*xception_iter)->get_name() <<");"<< endl;
f_service_ << indent() << "result =" << endl;
indent_up();
f_service_ << indent() << "((thrift_protocol_write_message_begin (output_protocol," << endl;
args_indent = indent() + string(39, ' ');
f_service_ << args_indent << "\"" << service_function_name << "\"," << endl << args_indent
<< "T_REPLY," << endl << args_indent << "sequence_id," << endl << args_indent
<< "error) != -1) &&" << endl << indent()
<< " (thrift_struct_write (THRIFT_STRUCT (result_struct)," << endl;
args_indent = indent() + string(23, ' ');
f_service_ << args_indent << "output_protocol," << endl << args_indent << "error) != -1));"
<< endl;
indent_down();
scope_down(f_service_);
f_service_ << indent() << "else" << endl;
}
scope_up(f_service_);
f_service_ << indent();
}
// If the handler reported failure but raised no application-defined
// exception, return a Thrift application exception with the information
// returned via GLib's own error-reporting mechanism
f_service_ << "if (*error == NULL)" << endl;
indent_up();
f_service_ << indent() << "g_warning (\"" << service_name_ << "."
<< (*function_iter)->get_name() << " implementation returned FALSE \"" << endl
<< indent() << string(11, ' ') << "\"but did not set an error\");" << endl << endl;
indent_down();
f_service_ << indent() << "xception =" << endl;
indent_up();
f_service_ << indent() << "g_object_new (THRIFT_TYPE_APPLICATION_EXCEPTION," << endl;
args_indent = indent() + string(14, ' ');
f_service_ << args_indent << "\"type\", *error != NULL ? (*error)->code :" << endl
<< args_indent << string(11, ' ') << "THRIFT_APPLICATION_EXCEPTION_ERROR_UNKNOWN,"
<< endl << args_indent << "\"message\", *error != NULL ? (*error)->message : NULL,"
<< endl << args_indent << "NULL);" << endl;
indent_down();
f_service_ << indent() << "g_clear_error (error);" << endl << endl << indent()
<< "result =" << endl;
indent_up();
f_service_ << indent() << "((thrift_protocol_write_message_begin (output_protocol," << endl;
args_indent = indent() + string(39, ' ');
f_service_ << args_indent << "\"" << service_function_name << "\"," << endl << args_indent
<< "T_EXCEPTION," << endl << args_indent << "sequence_id," << endl << args_indent
<< "error) != -1) &&" << endl << indent()
<< " (thrift_struct_write (THRIFT_STRUCT (xception)," << endl;
args_indent = indent() + string(23, ' ');
f_service_ << args_indent << "output_protocol," << endl << args_indent << "error) != -1));"
<< endl;
indent_down();
f_service_ << endl << indent() << "g_object_unref (xception);" << endl;
if (xceptions.size() > 0) {
scope_down(f_service_);
}
scope_down(f_service_);
f_service_ << endl;
// Dellocate or unref retrieved argument values as necessary
for (arg_iter = args.begin(); arg_iter != args.end(); ++arg_iter) {
string arg_name = (*arg_iter)->get_name();
t_type* arg_type = get_true_type((*arg_iter)->get_type());
if (arg_type->is_base_type()) {
t_base_type* base_type = ((t_base_type*)arg_type);
if (base_type->get_base() == t_base_type::TYPE_STRING) {
f_service_ << indent() << "if (" << arg_name << " != NULL)" << endl;
indent_up();
if (base_type->is_binary()) {
f_service_ << indent() << "g_byte_array_unref (" << arg_name << ");" << endl;
} else {
f_service_ << indent() << "g_free (" << arg_name << ");" << endl;
}
indent_down();
}
} else if (arg_type->is_container()) {
f_service_ << indent() << "if (" << arg_name << " != NULL)" << endl;
indent_up();
if (arg_type->is_list()) {
t_type* elem_type = ((t_list*)arg_type)->get_elem_type();
f_service_ << indent();
if (is_numeric(elem_type)) {
f_service_ << "g_array_unref";
} else {
f_service_ << "g_ptr_array_unref";
}
f_service_ << " (" << arg_name << ");" << endl;
} else if (arg_type->is_map() || arg_type->is_set()) {
f_service_ << indent() << "g_hash_table_unref (" << arg_name << ");" << endl;
}
indent_down();
} else if (arg_type->is_struct()) {
f_service_ << indent() << "if (" << arg_name << " != NULL)" << endl;
indent_up();
f_service_ << indent() << "g_object_unref (" << arg_name << ");" << endl;
indent_down();
}
}
if (!(*function_iter)->is_oneway()) {
if (has_return_value) {
// Deallocate (or unref) return_value
return_type = get_true_type(return_type);
if (return_type->is_base_type()) {
t_base_type* base_type = ((t_base_type*)return_type);
if (base_type->get_base() == t_base_type::TYPE_STRING) {
f_service_ << indent() << "if (return_value != NULL)" << endl;
indent_up();
if (base_type->is_binary()) {
f_service_ << indent() << "g_byte_array_unref (return_value);" << endl;
} else {
f_service_ << indent() << "g_free (return_value);" << endl;
}
indent_down();
}
} else if (return_type->is_container()) {
f_service_ << indent() << "if (return_value != NULL)" << endl;
indent_up();
if (return_type->is_list()) {
t_type* elem_type = ((t_list*)return_type)->get_elem_type();
f_service_ << indent();
if (is_numeric(elem_type)) {
f_service_ << "g_array_unref";
} else {
f_service_ << "g_ptr_array_unref";
}
f_service_ << " (return_value);" << endl;
} else if (return_type->is_map() || return_type->is_set()) {
f_service_ << indent() << "g_hash_table_unref (return_value);" << endl;
}
indent_down();
} else if (return_type->is_struct()) {
f_service_ << indent() << "if (return_value != NULL)" << endl;
indent_up();
f_service_ << indent() << "g_object_unref (return_value);" << endl;
indent_down();
}
}
f_service_ << indent() << "g_object_unref (result_struct);" << endl << endl << indent()
<< "if (result == TRUE)" << endl;
indent_up();
f_service_ << indent() << "result =" << endl;
indent_up();
f_service_ << indent() << "((thrift_protocol_write_message_end "
<< "(output_protocol, error) != -1) &&" << endl << indent()
<< " (thrift_transport_write_end (transport, error) "
<< "!= FALSE) &&" << endl << indent()
<< " (thrift_transport_flush (transport, error) "
<< "!= FALSE));" << endl;
indent_down();
indent_down();
}
scope_down(f_service_);
f_service_ << indent() << "else" << endl;
indent_up();
f_service_ << indent() << "result = FALSE;" << endl;
indent_down();
f_service_ << endl << indent() << "g_object_unref (transport);" << endl << indent()
<< "g_object_unref (args);" << endl << endl << indent() << "return result;" << endl;
scope_down(f_service_);
f_service_ << endl;
}
// Generate the processor's dispatch_call implementation
function_name = class_name_lc + "_dispatch_call";
args_indent = indent() + string(function_name.length() + 2, ' ');
f_service_ << "static gboolean" << endl << function_name
<< " (ThriftDispatchProcessor *dispatch_processor," << endl << args_indent
<< "ThriftProtocol *input_protocol," << endl << args_indent
<< "ThriftProtocol *output_protocol," << endl << args_indent << "gchar *method_name,"
<< endl << args_indent << "gint32 sequence_id," << endl << args_indent
<< "GError **error)" << endl;
scope_up(f_service_);
f_service_ << indent() << class_name_lc << "_process_function_def *"
<< "process_function_def;" << endl;
f_service_ << indent() << "gboolean dispatch_result = FALSE;" << endl << endl << indent()
<< class_name << " *self = " << class_name_uc << " (dispatch_processor);" << endl;
f_service_ << indent() << parent_class_name << "Class "
"*parent_class =" << endl;
indent_up();
f_service_ << indent() << "g_type_class_peek_parent (" << class_name_uc << "_GET_CLASS (self));"
<< endl;
indent_down();
f_service_ << endl
<< indent() << "process_function_def = "
<< "g_hash_table_lookup (self->process_map, method_name);" << endl
<< indent() << "if (process_function_def != NULL)" << endl;
scope_up(f_service_);
args_indent = indent() + string(53, ' ');
f_service_ << indent() << "g_free (method_name);" << endl
<< indent() << "dispatch_result = "
<< "(*process_function_def->function) (self," << endl
<< args_indent << "sequence_id," << endl
<< args_indent << "input_protocol," << endl
<< args_indent << "output_protocol," << endl
<< args_indent << "error);" << endl;
scope_down(f_service_);
f_service_ << indent() << "else" << endl;
scope_up(f_service_);
// Method name not recognized; chain up to our parent processor---note the
// top-most implementation of this method, in ThriftDispatchProcessor itself,
// will return an application exception to the caller if no class in the
// hierarchy recognizes the method name
f_service_ << indent() << "dispatch_result = parent_class->dispatch_call "
"(dispatch_processor," << endl;
args_indent = indent() + string(47, ' ');
f_service_ << args_indent << "input_protocol," << endl << args_indent << "output_protocol,"
<< endl << args_indent << "method_name," << endl << args_indent << "sequence_id,"
<< endl << args_indent << "error);" << endl;
scope_down(f_service_);
f_service_ << endl << indent() << "return dispatch_result;" << endl;
scope_down(f_service_);
f_service_ << endl;
// Generate the processor's property setter
function_name = class_name_lc + "_set_property";
args_indent = string(function_name.length() + 2, ' ');
f_service_ << "static void" << endl << function_name << " (GObject *object," << endl
<< args_indent << "guint property_id," << endl << args_indent << "const GValue *value,"
<< endl << args_indent << "GParamSpec *pspec)" << endl;
scope_up(f_service_);
f_service_ << indent() << class_name << " *self = " << class_name_uc << " (object);" << endl
<< endl << indent() << "switch (property_id)" << endl;
scope_up(f_service_);
f_service_ << indent() << "case PROP_" << class_name_uc << "_HANDLER:" << endl;
indent_up();
f_service_ << indent() << "if (self->handler != NULL)" << endl;
indent_up();
f_service_ << indent() << "g_object_unref (self->handler);" << endl;
indent_down();
f_service_ << indent() << "self->handler = g_value_get_object (value);" << endl << indent()
<< "g_object_ref (self->handler);" << endl;
if (extends_service) {
// Chain up to set the handler in every superclass as well
f_service_ << endl << indent() << "G_OBJECT_CLASS (" << class_name_lc << "_parent_class)->"
<< endl;
indent_up();
f_service_ << indent() << "set_property (object, property_id, value, pspec);" << endl;
indent_down();
}
f_service_ << indent() << "break;" << endl;
indent_down();
f_service_ << indent() << "default:" << endl;
indent_up();
f_service_ << indent() << "G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);"
<< endl << indent() << "break;" << endl;
indent_down();
scope_down(f_service_);
scope_down(f_service_);
f_service_ << endl;
// Generate processor's property getter
function_name = class_name_lc + "_get_property";
args_indent = string(function_name.length() + 2, ' ');
f_service_ << "static void" << endl << function_name << " (GObject *object," << endl
<< args_indent << "guint property_id," << endl << args_indent << "GValue *value,"
<< endl << args_indent << "GParamSpec *pspec)" << endl;
scope_up(f_service_);
f_service_ << indent() << class_name << " *self = " << class_name_uc << " (object);" << endl
<< endl << indent() << "switch (property_id)" << endl;
scope_up(f_service_);
f_service_ << indent() << "case PROP_" << class_name_uc << "_HANDLER:" << endl;
indent_up();
f_service_ << indent() << "g_value_set_object (value, self->handler);" << endl << indent()
<< "break;" << endl;
indent_down();
f_service_ << indent() << "default:" << endl;
indent_up();
f_service_ << indent() << "G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);"
<< endl << indent() << "break;" << endl;
indent_down();
scope_down(f_service_);
scope_down(f_service_);
f_service_ << endl;
// Generator the processor's dispose function
f_service_ << "static void" << endl << class_name_lc << "_dispose (GObject *gobject)" << endl;
scope_up(f_service_);
f_service_ << indent() << class_name << " *self = " << class_name_uc << " (gobject);" << endl
<< endl << indent() << "if (self->handler != NULL)" << endl;
scope_up(f_service_);
f_service_ << indent() << "g_object_unref (self->handler);" << endl << indent()
<< "self->handler = NULL;" << endl;
scope_down(f_service_);
f_service_ << endl << indent() << "G_OBJECT_CLASS (" << class_name_lc << "_parent_class)"
"->dispose (gobject);"
<< endl;
scope_down(f_service_);
f_service_ << endl;
// Generate processor finalize function
f_service_ << "static void" << endl << class_name_lc << "_finalize (GObject *gobject)" << endl;
scope_up(f_service_);
f_service_ << indent() << this->nspace << service_name_ << "Processor *self = " << this->nspace_uc
<< service_name_uc << "_PROCESSOR (gobject);" << endl << endl << indent()
<< "thrift_safe_hash_table_destroy (self->process_map);" << endl << endl << indent()
<< "G_OBJECT_CLASS (" << class_name_lc << "_parent_class)"
"->finalize (gobject);" << endl;
scope_down(f_service_);
f_service_ << endl;
// Generate processor instance initializer
f_service_ << "static void" << endl << class_name_lc << "_init (" << class_name << " *self)"
<< endl;
scope_up(f_service_);
if (functions.size() > 0) {
f_service_ << indent() << "guint index;" << endl
<< endl;
}
f_service_ << indent() << "self->handler = NULL;" << endl << indent()
<< "self->process_map = "
"g_hash_table_new (g_str_hash, g_str_equal);" << endl;
if (functions.size() > 0) {
args_indent = string(21, ' ');
f_service_ << endl
<< indent() << "for (index = 0; index < "
<< functions.size() << "; index += 1)" << endl;
indent_up();
f_service_ << indent() << "g_hash_table_insert (self->process_map," << endl
<< indent() << args_indent
<< class_name_lc << "_process_function_defs[index].name," << endl
<< indent() << args_indent
<< "&" << class_name_lc << "_process_function_defs[index]" << ");"
<< endl;
indent_down();
}
scope_down(f_service_);
f_service_ << endl;
// Generate processor class initializer
f_service_ << "static void" << endl << class_name_lc << "_class_init (" << class_name
<< "Class *cls)" << endl;
scope_up(f_service_);
f_service_ << indent() << "GObjectClass *gobject_class = G_OBJECT_CLASS (cls);" << endl
<< indent() << "ThriftDispatchProcessorClass *dispatch_processor_class =" << endl;
indent_up();
f_service_ << indent() << "THRIFT_DISPATCH_PROCESSOR_CLASS (cls);" << endl;
indent_down();
f_service_ << indent() << "GParamSpec *param_spec;" << endl << endl << indent()
<< "gobject_class->dispose = " << class_name_lc << "_dispose;" << endl << indent()
<< "gobject_class->finalize = " << class_name_lc << "_finalize;" << endl << indent()
<< "gobject_class->set_property = " << class_name_lc << "_set_property;" << endl
<< indent() << "gobject_class->get_property = " << class_name_lc << "_get_property;"
<< endl << endl << indent()
<< "dispatch_processor_class->dispatch_call = " << class_name_lc << "_dispatch_call;"
<< endl << indent() << "cls->dispatch_call = " << class_name_lc << "_dispatch_call;"
<< endl << endl << indent() << "param_spec = g_param_spec_object (\"handler\","
<< endl;
args_indent = indent() + string(34, ' ');
f_service_ << args_indent << "\"Service handler implementation\"," << endl << args_indent
<< "\"The service handler implementation \"" << endl << args_indent
<< "\"to which method calls are dispatched.\"," << endl << args_indent
<< this->nspace_uc + "TYPE_" + service_name_uc + "_HANDLER," << endl << args_indent
<< "G_PARAM_READWRITE);" << endl;
f_service_ << indent() << "g_object_class_install_property (gobject_class," << endl;
args_indent = indent() + string(33, ' ');
f_service_ << args_indent << "PROP_" << class_name_uc << "_HANDLER," << endl << args_indent
<< "param_spec);" << endl;
scope_down(f_service_);
}
/**
* Generates C code that represents a Thrift service server.
*/
void t_c_glib_generator::generate_service_server(t_service* tservice) {
(void)tservice;
// Generate the service's handler class
generate_service_handler(tservice);
// Generate the service's processor class
generate_service_processor(tservice);
}
/**
* Generates C code to represent a THrift structure as a GObject.
*/
void t_c_glib_generator::generate_object(t_struct* tstruct) {
string name = tstruct->get_name();
string name_u = initial_caps_to_underscores(name);
string name_uc = to_upper_case(name_u);
string class_name = this->nspace + name;
string class_name_lc = this->nspace_lc + initial_caps_to_underscores(name);
string class_name_uc = to_upper_case(class_name_lc);
string function_name;
string args_indent;
// write the instance definition
f_types_ << "struct _" << this->nspace << name << endl << "{ " << endl
<< " ThriftStruct parent; " << endl << endl << " /* public */" << endl;
// for each field, add a member variable
vector<t_field*>::const_iterator m_iter;
const vector<t_field*>& members = tstruct->get_members();
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_type* t = get_true_type((*m_iter)->get_type());
f_types_ << " " << type_name(t) << " " << (*m_iter)->get_name() << ";" << endl;
if ((*m_iter)->get_req() != t_field::T_REQUIRED) {
f_types_ << " gboolean __isset_" << (*m_iter)->get_name() << ";" << endl;
}
}
// close the structure definition and create a typedef
f_types_ << "};" << endl << "typedef struct _" << this->nspace << name << " " << this->nspace
<< name << ";" << endl << endl;
// write the class definition
f_types_ << "struct _" << this->nspace << name << "Class" << endl << "{" << endl
<< " ThriftStructClass parent;" << endl << "};" << endl << "typedef struct _"
<< this->nspace << name << "Class " << this->nspace << name << "Class;" << endl << endl;
// write the standard GObject boilerplate
f_types_ << "GType " << this->nspace_lc << name_u << "_get_type (void);" << endl << "#define "
<< this->nspace_uc << "TYPE_" << name_uc << " (" << this->nspace_lc << name_u
<< "_get_type())" << endl << "#define " << this->nspace_uc << name_uc
<< "(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), " << this->nspace_uc << "TYPE_" << name_uc
<< ", " << this->nspace << name << "))" << endl << "#define " << this->nspace_uc
<< name_uc << "_CLASS(c) (G_TYPE_CHECK_CLASS_CAST ((c), " << this->nspace_uc << "_TYPE_"
<< name_uc << ", " << this->nspace << name << "Class))" << endl << "#define "
<< this->nspace_uc << "IS_" << name_uc << "(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), "
<< this->nspace_uc << "TYPE_" << name_uc << "))" << endl << "#define " << this->nspace_uc
<< "IS_" << name_uc << "_CLASS(c) (G_TYPE_CHECK_CLASS_TYPE ((c), " << this->nspace_uc
<< "TYPE_" << name_uc << "))" << endl << "#define " << this->nspace_uc << name_uc
<< "_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), " << this->nspace_uc << "TYPE_"
<< name_uc << ", " << this->nspace << name << "Class))" << endl << endl;
// start writing the object implementation .c file
// generate properties enum
if (members.size() > 0) {
f_types_impl_ << "enum _" << class_name << "Properties" << endl << "{" << endl;
indent_up();
f_types_impl_ << indent() << "PROP_" << class_name_uc << "_0";
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
string member_name_uc
= to_upper_case(to_lower_case(initial_caps_to_underscores((*m_iter)->get_name())));
f_types_impl_ << "," << endl << indent() << "PROP_" << class_name_uc << "_" << member_name_uc;
}
f_types_impl_ << endl;
indent_down();
f_types_impl_ << "};" << endl << endl;
}
// generate struct I/O methods
string this_get = this->nspace + name + " * this_object = " + this->nspace_uc + name_uc
+ "(object);";
generate_struct_reader(f_types_impl_, tstruct, "this_object->", this_get);
generate_struct_writer(f_types_impl_, tstruct, "this_object->", this_get);
// generate property setter and getter
if (members.size() > 0) {
// generate property setter
function_name = class_name_lc + "_set_property";
args_indent = string(function_name.length() + 2, ' ');
f_types_impl_ << "static void" << endl << function_name << " (GObject *object," << endl
<< args_indent << "guint property_id," << endl << args_indent
<< "const GValue *value," << endl << args_indent << "GParamSpec *pspec)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << class_name << " *self = " << class_name_uc << " (object);" << endl
<< endl << indent() << "switch (property_id)" << endl;
scope_up(f_types_impl_);
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_field* member = (*m_iter);
string member_name = member->get_name();
string member_name_uc
= to_upper_case(to_lower_case(initial_caps_to_underscores(member_name)));
t_type* member_type = get_true_type(member->get_type());
string property_identifier = "PROP_" + class_name_uc + "_" + member_name_uc;
f_types_impl_ << indent() << "case " << property_identifier + ":" << endl;
indent_up();
if (member_type->is_base_type()) {
t_base_type* base_type = ((t_base_type*)member_type);
string assign_function_name;
if (base_type->get_base() == t_base_type::TYPE_STRING) {
string release_function_name;
f_types_impl_ << indent() << "if (self->" << member_name << " != NULL)" << endl;
indent_up();
if (base_type->is_binary()) {
release_function_name = "g_byte_array_unref";
assign_function_name = "g_value_dup_boxed";
} else {
release_function_name = "g_free";
assign_function_name = "g_value_dup_string";
}
f_types_impl_ << indent() << release_function_name << " (self->" << member_name << ");"
<< endl;
indent_down();
} else {
switch (base_type->get_base()) {
case t_base_type::TYPE_BOOL:
assign_function_name = "g_value_get_boolean";
break;
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
assign_function_name = "g_value_get_int";
break;
case t_base_type::TYPE_I64:
assign_function_name = "g_value_get_int64";
break;
case t_base_type::TYPE_DOUBLE:
assign_function_name = "g_value_get_double";
break;
default:
throw "compiler error: "
"unrecognized base type \"" + base_type->get_name() + "\" "
"for struct member \""
+ member_name + "\"";
break;
}
}
f_types_impl_ << indent() << "self->" << member_name << " = " << assign_function_name
<< " (value);" << endl;
} else if (member_type->is_enum()) {
f_types_impl_ << indent() << "self->" << member_name << " = g_value_get_int (value);"
<< endl;
} else if (member_type->is_container()) {
string release_function_name;
string assign_function_name;
if (member_type->is_list()) {
t_type* elem_type = ((t_list*)member_type)->get_elem_type();
// Lists of base types other than strings are represented as GArrays;
// all others as GPtrArrays
if (is_numeric(elem_type)) {
release_function_name = "g_array_unref";
} else {
release_function_name = "g_ptr_array_unref";
}
assign_function_name = "g_value_dup_boxed";
} else if (member_type->is_set() || member_type->is_map()) {
release_function_name = "g_hash_table_unref";
assign_function_name = "g_value_dup_boxed";
}
f_types_impl_ << indent() << "if (self->" << member_name << " != NULL)" << endl;
indent_up();
f_types_impl_ << indent() << release_function_name << " (self->" << member_name << ");"
<< endl;
indent_down();
f_types_impl_ << indent() << "self->" << member_name << " = " << assign_function_name
<< " (value);" << endl;
} else if (member_type->is_struct() || member_type->is_xception()) {
f_types_impl_ << indent() << "if (self->" << member_name << " != NULL)" << endl;
indent_up();
f_types_impl_ << indent() << "g_object_unref (self->" << member_name << ");" << endl;
indent_down();
f_types_impl_ << indent() << "self->" << member_name << " = g_value_dup_object (value);"
<< endl;
}
if (member->get_req() != t_field::T_REQUIRED) {
f_types_impl_ << indent() << "self->__isset_" << member_name << " = TRUE;" << endl;
}
f_types_impl_ << indent() << "break;" << endl << endl;
indent_down();
}
f_types_impl_ << indent() << "default:" << endl;
indent_up();
f_types_impl_ << indent() << "G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);"
<< endl << indent() << "break;" << endl;
indent_down();
scope_down(f_types_impl_);
scope_down(f_types_impl_);
f_types_impl_ << endl;
// generate property getter
function_name = class_name_lc + "_get_property";
args_indent = string(function_name.length() + 2, ' ');
f_types_impl_ << "static void" << endl << function_name << " (GObject *object," << endl
<< args_indent << "guint property_id," << endl << args_indent << "GValue *value,"
<< endl << args_indent << "GParamSpec *pspec)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << class_name << " *self = " << class_name_uc << " (object);" << endl
<< endl << indent() << "switch (property_id)" << endl;
scope_up(f_types_impl_);
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_field* member = (*m_iter);
string member_name = (*m_iter)->get_name();
string member_name_uc
= to_upper_case(to_lower_case(initial_caps_to_underscores(member_name)));
t_type* member_type = get_true_type(member->get_type());
string property_identifier = "PROP_" + class_name_uc + "_" + member_name_uc;
string setter_function_name;
if (member_type->is_base_type()) {
t_base_type* base_type = ((t_base_type*)member_type);
switch (base_type->get_base()) {
case t_base_type::TYPE_BOOL:
setter_function_name = "g_value_set_boolean";
break;
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
setter_function_name = "g_value_set_int";
break;
case t_base_type::TYPE_I64:
setter_function_name = "g_value_set_int64";
break;
case t_base_type::TYPE_DOUBLE:
setter_function_name = "g_value_set_double";
break;
case t_base_type::TYPE_STRING:
if (base_type->is_binary()) {
setter_function_name = "g_value_set_boxed";
} else {
setter_function_name = "g_value_set_string";
}
break;
default:
throw "compiler error: "
"unrecognized base type \"" + base_type->get_name() + "\" "
"for struct member \""
+ member_name + "\"";
break;
}
} else if (member_type->is_enum()) {
setter_function_name = "g_value_set_int";
} else if (member_type->is_struct() || member_type->is_xception()) {
setter_function_name = "g_value_set_object";
} else if (member_type->is_container()) {
setter_function_name = "g_value_set_boxed";
} else {
throw "compiler error: "
"unrecognized type for struct member \"" + member_name + "\"";
}
f_types_impl_ << indent() << "case " << property_identifier + ":" << endl;
indent_up();
f_types_impl_ << indent() << setter_function_name << " (value, self->" << member_name << ");"
<< endl << indent() << "break;" << endl << endl;
indent_down();
}
f_types_impl_ << indent() << "default:" << endl;
indent_up();
f_types_impl_ << indent() << "G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);"
<< endl << indent() << "break;" << endl;
indent_down();
scope_down(f_types_impl_);
scope_down(f_types_impl_);
f_types_impl_ << endl;
}
// generate the instance init function
f_types_impl_ << "static void " << endl << this->nspace_lc << name_u << "_instance_init ("
<< this->nspace << name << " * object)" << endl << "{" << endl;
indent_up();
// generate default-value structures for container-type members
bool constant_declaration_output = false;
bool string_list_constant_output = false;
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_field* member = *m_iter;
t_const_value* member_value = member->get_value();
if (member_value != nullptr) {
string member_name = member->get_name();
t_type* member_type = get_true_type(member->get_type());
if (member_type->is_list()) {
const vector<t_const_value*>& list = member_value->get_list();
t_type* elem_type = ((t_list*)member_type)->get_elem_type();
// Generate an array with the list literal
indent(f_types_impl_) << "static " << type_name(elem_type, false, true) << " __default_"
<< member_name << "[" << list.size() << "] = " << endl;
indent_up();
f_types_impl_ << indent() << constant_literal(member_type, member_value) << ";" << endl;
indent_down();
constant_declaration_output = true;
// If we are generating values for a pointer array (i.e. a list of
// strings), set a flag so we know to also declare an index variable to
// use in pre-populating the array
if (elem_type->is_string()) {
string_list_constant_output = true;
}
}
// TODO: Handle container types other than list
}
}
if (constant_declaration_output) {
if (string_list_constant_output) {
indent(f_types_impl_) << "unsigned int list_index;" << endl;
}
f_types_impl_ << endl;
}
// satisfy compilers with -Wall turned on
indent(f_types_impl_) << "/* satisfy -Wall */" << endl << indent()
<< "THRIFT_UNUSED_VAR (object);" << endl;
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_type* member_type = (*m_iter)->get_type();
t_type* t = get_true_type(member_type);
if (t->is_base_type()) {
string dval = " = ";
if (t->is_enum()) {
dval += "(" + type_name(t) + ")";
}
t_const_value* cv = (*m_iter)->get_value();
if (cv != nullptr) {
dval += constant_value("", t, cv);
} else {
dval += t->is_string() ? "NULL" : "0";
}
indent(f_types_impl_) << "object->" << (*m_iter)->get_name() << dval << ";" << endl;
} else if (t->is_struct()) {
string name = (*m_iter)->get_name();
t_program* type_program = member_type->get_program();
string type_nspace = type_program ? type_program->get_namespace("c_glib") : "";
string type_nspace_prefix =
type_nspace.empty() ? "" : initial_caps_to_underscores(type_nspace) + "_";
string type_name_uc = to_upper_case(initial_caps_to_underscores(member_type->get_name()));
indent(f_types_impl_) << "object->" << name << " = g_object_new ("
<< to_upper_case(type_nspace_prefix) << "TYPE_" << type_name_uc
<< ", NULL);" << endl;
} else if (t->is_xception()) {
string name = (*m_iter)->get_name();
indent(f_types_impl_) << "object->" << name << " = NULL;" << endl;
} else if (t->is_container()) {
string name = (*m_iter)->get_name();
string init_function;
t_type* etype = nullptr;
if (t->is_map()) {
t_type* key = ((t_map*)t)->get_key_type();
t_type* value = ((t_map*)t)->get_val_type();
init_function = generate_new_hash_from_type(key, value);
} else if (t->is_set()) {
etype = ((t_set*)t)->get_elem_type();
init_function = generate_new_hash_from_type(etype, nullptr);
} else if (t->is_list()) {
etype = ((t_list*)t)->get_elem_type();
init_function = generate_new_array_from_type(etype);
}
indent(f_types_impl_) << "object->" << name << " = " << init_function << endl;
// Pre-populate the container with the specified default values, if any
if ((*m_iter)->get_value()) {
t_const_value* member_value = (*m_iter)->get_value();
if (t->is_list()) {
const vector<t_const_value*>& list = member_value->get_list();
if (is_numeric(etype)) {
indent(f_types_impl_) <<
"g_array_append_vals (object->" << name << ", &__default_" <<
name << ", " << list.size() << ");" << endl;
}
else {
indent(f_types_impl_) <<
"for (list_index = 0; list_index < " << list.size() << "; " <<
"list_index += 1)" << endl;
indent_up();
indent(f_types_impl_) <<
"g_ptr_array_add (object->" << name << "," << endl <<
indent() << string(17, ' ') << "g_strdup (__default_" <<
name << "[list_index]));" << endl;
indent_down();
}
}
// TODO: Handle container types other than list
}
}
/* if not required, initialize the __isset variable */
if ((*m_iter)->get_req() != t_field::T_REQUIRED) {
indent(f_types_impl_) << "object->__isset_" << (*m_iter)->get_name() << " = FALSE;" << endl;
}
}
indent_down();
f_types_impl_ << "}" << endl << endl;
/* create the destructor */
f_types_impl_ << "static void " << endl << this->nspace_lc << name_u
<< "_finalize (GObject *object)" << endl << "{" << endl;
indent_up();
f_types_impl_ << indent() << this->nspace << name << " *tobject = " << this->nspace_uc << name_uc
<< " (object);" << endl << endl;
f_types_impl_ << indent() << "/* satisfy -Wall in case we don't use tobject */" << endl
<< indent() << "THRIFT_UNUSED_VAR (tobject);" << endl;
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_type* t = get_true_type((*m_iter)->get_type());
if (t->is_container()) {
string name = (*m_iter)->get_name();
if (t->is_map() || t->is_set()) {
f_types_impl_ << indent() << "if (tobject->" << name << " != NULL)" << endl;
f_types_impl_ << indent() << "{" << endl;
indent_up();
f_types_impl_ << indent() << "g_hash_table_destroy (tobject->" << name << ");" << endl;
f_types_impl_ << indent() << "tobject->" << name << " = NULL;" << endl;
indent_down();
f_types_impl_ << indent() << "}" << endl;
} else if (t->is_list()) {
t_type* etype = ((t_list*)t)->get_elem_type();
string destructor_function = "g_ptr_array_unref";
if (etype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)etype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine array type";
case t_base_type::TYPE_BOOL:
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
case t_base_type::TYPE_DOUBLE:
destructor_function = "g_array_unref";
break;
case t_base_type::TYPE_STRING:
break;
default:
throw "compiler error: no array info for type";
}
} else if (etype->is_enum()) {
destructor_function = "g_array_unref";
}
f_types_impl_ << indent() << "if (tobject->" << name << " != NULL)" << endl;
f_types_impl_ << indent() << "{" << endl;
indent_up();
f_types_impl_ << indent() << destructor_function << " (tobject->" << name << ");" << endl;
f_types_impl_ << indent() << "tobject->" << name << " = NULL;" << endl;
indent_down();
f_types_impl_ << indent() << "}" << endl;
}
} else if (t->is_struct() || t->is_xception()) {
string name = (*m_iter)->get_name();
// TODO: g_clear_object needs glib >= 2.28
// f_types_impl_ << indent() << "g_clear_object (&(tobject->" << name << "));" << endl;
// does g_object_unref the trick?
f_types_impl_ << indent() << "if (tobject->" << name << " != NULL)" << endl;
f_types_impl_ << indent() << "{" << endl;
indent_up();
f_types_impl_ << indent() << "g_object_unref(tobject->" << name << ");" << endl;
f_types_impl_ << indent() << "tobject->" << name << " = NULL;" << endl;
indent_down();
f_types_impl_ << indent() << "}" << endl;
} else if (t->is_string()) {
string name = (*m_iter)->get_name();
f_types_impl_ << indent() << "if (tobject->" << name << " != NULL)" << endl;
f_types_impl_ << indent() << "{" << endl;
indent_up();
f_types_impl_ << indent() << generate_free_func_from_type(t) << "(tobject->" << name << ");"
<< endl;
f_types_impl_ << indent() << "tobject->" << name << " = NULL;" << endl;
indent_down();
f_types_impl_ << indent() << "}" << endl;
}
}
indent_down();
f_types_impl_ << "}" << endl << endl;
// generate the class init function
f_types_impl_ << "static void" << endl << class_name_lc << "_class_init (" << class_name
<< "Class * cls)" << endl;
scope_up(f_types_impl_);
f_types_impl_ << indent() << "GObjectClass *gobject_class = G_OBJECT_CLASS (cls);" << endl
<< indent() << "ThriftStructClass *struct_class = "
<< "THRIFT_STRUCT_CLASS (cls);" << endl << endl << indent()
<< "struct_class->read = " << class_name_lc << "_read;" << endl << indent()
<< "struct_class->write = " << class_name_lc << "_write;" << endl << endl
<< indent() << "gobject_class->finalize = " << class_name_lc << "_finalize;"
<< endl;
if (members.size() > 0) {
f_types_impl_ << indent() << "gobject_class->get_property = " << class_name_lc
<< "_get_property;" << endl << indent()
<< "gobject_class->set_property = " << class_name_lc << "_set_property;" << endl;
// install a property for each member
for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) {
t_field* member = (*m_iter);
string member_name = member->get_name();
string member_name_uc
= to_upper_case(to_lower_case(initial_caps_to_underscores(member_name)));
t_type* member_type = get_true_type(member->get_type());
t_const_value* member_value = member->get_value();
string property_identifier = "PROP_" + class_name_uc + "_" + member_name_uc;
f_types_impl_ << endl << indent() << "g_object_class_install_property" << endl;
indent_up();
args_indent = indent() + ' ';
f_types_impl_ << indent() << "(gobject_class," << endl << args_indent << property_identifier
<< "," << endl << args_indent;
if (member_type->is_base_type()) {
t_base_type::t_base base_type = ((t_base_type*)member_type)->get_base();
if (base_type == t_base_type::TYPE_STRING) {
if (((t_base_type*)member_type)->is_binary()) {
args_indent += string(20, ' ');
f_types_impl_ << "g_param_spec_boxed (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< "G_TYPE_BYTE_ARRAY," << endl << args_indent << "G_PARAM_READWRITE));"
<< endl;
} else {
args_indent += string(21, ' ');
f_types_impl_ << "g_param_spec_string (\"" << member_name << "\"," << endl
<< args_indent << "NULL," << endl << args_indent << "NULL," << endl
<< args_indent
<< ((member_value != NULL) ? "\"" + member_value->get_string() + "\""
: "NULL") << "," << endl << args_indent
<< "G_PARAM_READWRITE));" << endl;
}
} else if (base_type == t_base_type::TYPE_BOOL) {
args_indent += string(22, ' ');
f_types_impl_ << "g_param_spec_boolean (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< (((member_value != NULL) && (member_value->get_integer() != 0))
? "TRUE"
: "FALSE") << "," << endl << args_indent << "G_PARAM_READWRITE));"
<< endl;
} else if ((base_type == t_base_type::TYPE_I8) || (base_type == t_base_type::TYPE_I16)
|| (base_type == t_base_type::TYPE_I32) || (base_type == t_base_type::TYPE_I64)
|| (base_type == t_base_type::TYPE_DOUBLE)) {
string param_spec_function_name = "g_param_spec_int";
string min_value;
string max_value;
ostringstream default_value;
switch (base_type) {
case t_base_type::TYPE_I8:
min_value = "G_MININT8";
max_value = "G_MAXINT8";
break;
case t_base_type::TYPE_I16:
min_value = "G_MININT16";
max_value = "G_MAXINT16";
break;
case t_base_type::TYPE_I32:
min_value = "G_MININT32";
max_value = "G_MAXINT32";
break;
case t_base_type::TYPE_I64:
param_spec_function_name = "g_param_spec_int64";
min_value = "G_MININT64";
max_value = "G_MAXINT64";
break;
case t_base_type::TYPE_DOUBLE:
param_spec_function_name = "g_param_spec_double";
min_value = "-INFINITY";
max_value = "INFINITY";
break;
default:
throw "compiler error: "
"unrecognized base type \"" + member_type->get_name() + "\" "
"for struct member \""
+ member_name + "\"";
break;
}
if (member_value != nullptr) {
default_value << (base_type == t_base_type::TYPE_DOUBLE ? member_value->get_double()
: member_value->get_integer());
} else {
default_value << "0";
}
args_indent += string(param_spec_function_name.length() + 2, ' ');
f_types_impl_ << param_spec_function_name << " (\"" << member_name << "\"," << endl
<< args_indent << "NULL," << endl << args_indent << "NULL," << endl
<< args_indent << min_value << "," << endl << args_indent << max_value
<< "," << endl << args_indent << default_value.str() << "," << endl
<< args_indent << "G_PARAM_READWRITE));" << endl;
}
indent_down();
} else if (member_type->is_enum()) {
t_enum_value* enum_min_value = ((t_enum*)member_type)->get_min_value();
t_enum_value* enum_max_value = ((t_enum*)member_type)->get_max_value();
int min_value = (enum_min_value != nullptr) ? enum_min_value->get_value() : 0;
int max_value = (enum_max_value != nullptr) ? enum_max_value->get_value() : 0;
args_indent += string(18, ' ');
f_types_impl_ << "g_param_spec_int (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< min_value << "," << endl << args_indent << max_value << "," << endl
<< args_indent << min_value << "," << endl << args_indent
<< "G_PARAM_READWRITE));" << endl;
indent_down();
} else if (member_type->is_struct() || member_type->is_xception()) {
t_program* type_program = member_type->get_program();
string type_nspace = type_program ? type_program->get_namespace("c_glib") : "";
string type_nspace_prefix =
type_nspace.empty() ? "" : initial_caps_to_underscores(type_nspace) + "_";
string param_type = to_upper_case(type_nspace_prefix) + "TYPE_"
+ to_upper_case(initial_caps_to_underscores(member_type->get_name()));
args_indent += string(20, ' ');
f_types_impl_ << "g_param_spec_object (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< param_type << "," << endl << args_indent << "G_PARAM_READWRITE));" << endl;
indent_down();
} else if (member_type->is_list()) {
t_type* elem_type = ((t_list*)member_type)->get_elem_type();
string param_type;
if (elem_type->is_base_type() && !elem_type->is_string()) {
param_type = "G_TYPE_ARRAY";
} else {
param_type = "G_TYPE_PTR_ARRAY";
}
args_indent += string(20, ' ');
f_types_impl_ << "g_param_spec_boxed (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< param_type << "," << endl << args_indent << "G_PARAM_READWRITE));" << endl;
indent_down();
} else if (member_type->is_set() || member_type->is_map()) {
args_indent += string(20, ' ');
f_types_impl_ << "g_param_spec_boxed (\"" << member_name << "\"," << endl << args_indent
<< "NULL," << endl << args_indent << "NULL," << endl << args_indent
<< "G_TYPE_HASH_TABLE," << endl << args_indent << "G_PARAM_READWRITE));"
<< endl;
indent_down();
}
}
}
scope_down(f_types_impl_);
f_types_impl_ << endl;
f_types_impl_ << "GType" << endl << this->nspace_lc << name_u << "_get_type (void)" << endl << "{"
<< endl << " static GType type = 0;" << endl << endl << " if (type == 0) " << endl
<< " {" << endl << " static const GTypeInfo type_info = " << endl << " {"
<< endl << " sizeof (" << this->nspace << name << "Class)," << endl
<< " NULL, /* base_init */" << endl << " NULL, /* base_finalize */"
<< endl << " (GClassInitFunc) " << this->nspace_lc << name_u << "_class_init,"
<< endl << " NULL, /* class_finalize */" << endl
<< " NULL, /* class_data */" << endl << " sizeof (" << this->nspace
<< name << ")," << endl << " 0, /* n_preallocs */" << endl
<< " (GInstanceInitFunc) " << this->nspace_lc << name_u << "_instance_init,"
<< endl << " NULL, /* value_table */" << endl << " };" << endl << endl
<< " type = g_type_register_static (THRIFT_TYPE_STRUCT, " << endl
<< " \"" << this->nspace << name << "Type\","
<< endl << " &type_info, 0);" << endl << " }"
<< endl << endl << " return type;" << endl << "}" << endl << endl;
}
/**
* Generates functions to write Thrift structures to a stream.
*/
void t_c_glib_generator::generate_struct_writer(ostream& out,
t_struct* tstruct,
string this_name,
string this_get,
bool is_function) {
string name = tstruct->get_name();
string name_u = initial_caps_to_underscores(name);
string name_uc = to_upper_case(name_u);
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
int error_ret = 0;
if (is_function) {
error_ret = -1;
indent(out) << "static gint32" << endl << this->nspace_lc << name_u
<< "_write (ThriftStruct *object, ThriftProtocol *protocol, GError **error)"
<< endl;
}
indent(out) << "{" << endl;
indent_up();
out << indent() << "gint32 ret;" << endl << indent() << "gint32 xfer = 0;" << endl << endl;
indent(out) << this_get << endl;
// satisfy -Wall in the case of an empty struct
if (!this_get.empty()) {
indent(out) << "THRIFT_UNUSED_VAR (this_object);" << endl;
}
out << indent() << "if ((ret = thrift_protocol_write_struct_begin (protocol, \"" << name
<< "\", error)) < 0)" << endl << indent() << " return " << error_ret << ";" << endl
<< indent() << "xfer += ret;" << endl;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_OPTIONAL) {
indent(out) << "if (this_object->__isset_" << (*f_iter)->get_name() << " == TRUE) {" << endl;
indent_up();
}
out << indent() << "if ((ret = thrift_protocol_write_field_begin (protocol, "
<< "\"" << (*f_iter)->get_name() << "\", " << type_to_enum((*f_iter)->get_type()) << ", "
<< (*f_iter)->get_key() << ", error)) < 0)" << endl << indent() << " return " << error_ret
<< ";" << endl << indent() << "xfer += ret;" << endl;
generate_serialize_field(out, *f_iter, this_name, "", error_ret);
out << indent() << "if ((ret = thrift_protocol_write_field_end (protocol, error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;"
<< endl;
if ((*f_iter)->get_req() == t_field::T_OPTIONAL) {
indent_down();
indent(out) << "}" << endl;
}
}
// write the struct map
out << indent() << "if ((ret = thrift_protocol_write_field_stop (protocol, error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;" << endl
<< indent() << "if ((ret = thrift_protocol_write_struct_end (protocol, error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;" << endl
<< endl;
if (is_function) {
indent(out) << "return xfer;" << endl;
}
indent_down();
indent(out) << "}" << endl << endl;
}
/**
* Generates code to read Thrift structures from a stream.
*/
void t_c_glib_generator::generate_struct_reader(ostream& out,
t_struct* tstruct,
string this_name,
string this_get,
bool is_function) {
string name = tstruct->get_name();
string name_u = initial_caps_to_underscores(name);
string name_uc = to_upper_case(name_u);
int error_ret = 0;
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
if (is_function) {
error_ret = -1;
indent(out) << "/* reads a " << name_u << " object */" << endl << "static gint32" << endl
<< this->nspace_lc << name_u
<< "_read (ThriftStruct *object, ThriftProtocol *protocol, GError **error)" << endl;
}
indent(out) << "{" << endl;
indent_up();
// declare stack temp variables
out << indent() << "gint32 ret;" << endl << indent() << "gint32 xfer = 0;" << endl << indent()
<< "gchar *name = NULL;" << endl << indent() << "ThriftType ftype;" << endl << indent()
<< "gint16 fid;" << endl << indent() << "guint32 len = 0;" << endl << indent()
<< "gpointer data = NULL;" << endl << indent() << this_get << endl;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
indent(out) << "gboolean isset_" << (*f_iter)->get_name() << " = FALSE;" << endl;
}
}
out << endl;
// satisfy -Wall in case we don't use some variables
out << indent() << "/* satisfy -Wall in case these aren't used */" << endl << indent()
<< "THRIFT_UNUSED_VAR (len);" << endl << indent() << "THRIFT_UNUSED_VAR (data);" << endl;
if (!this_get.empty()) {
out << indent() << "THRIFT_UNUSED_VAR (this_object);" << endl;
}
out << endl;
// read the beginning of the structure marker
out << indent() << "/* read the struct begin marker */" << endl << indent()
<< "if ((ret = thrift_protocol_read_struct_begin (protocol, &name, error)) < 0)" << endl
<< indent() << "{" << endl << indent() << " if (name) g_free (name);" << endl << indent()
<< " return " << error_ret << ";" << endl << indent() << "}" << endl << indent()
<< "xfer += ret;" << endl << indent() << "if (name) g_free (name);" << endl << indent()
<< "name = NULL;" << endl << endl;
// read the struct fields
out << indent() << "/* read the struct fields */" << endl << indent() << "while (1)" << endl;
scope_up(out);
// read beginning field marker
out << indent() << "/* read the beginning of a field */" << endl << indent()
<< "if ((ret = thrift_protocol_read_field_begin (protocol, &name, &ftype, &fid, error)) < 0)"
<< endl << indent() << "{" << endl << indent() << " if (name) g_free (name);" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "}" << endl << indent()
<< "xfer += ret;" << endl << indent() << "if (name) g_free (name);" << endl << indent()
<< "name = NULL;" << endl << endl;
// check for field STOP marker
out << indent() << "/* break if we get a STOP field */" << endl << indent()
<< "if (ftype == T_STOP)" << endl << indent() << "{" << endl << indent() << " break;" << endl
<< indent() << "}" << endl << endl;
// switch depending on the field type
indent(out) << "switch (fid)" << endl;
// start switch
scope_up(out);
// generate deserialization code for known types
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
indent(out) << "case " << (*f_iter)->get_key() << ":" << endl;
indent_up();
indent(out) << "if (ftype == " << type_to_enum((*f_iter)->get_type()) << ")" << endl;
indent(out) << "{" << endl;
indent_up();
// generate deserialize field
generate_deserialize_field(out, *f_iter, this_name, "", error_ret, false);
indent_down();
out << indent() << "} else {" << endl << indent()
<< " if ((ret = thrift_protocol_skip (protocol, ftype, error)) < 0)" << endl << indent()
<< " return " << error_ret << ";" << endl << indent() << " xfer += ret;" << endl
<< indent() << "}" << endl << indent() << "break;" << endl;
indent_down();
}
// create the default case
out << indent() << "default:" << endl << indent()
<< " if ((ret = thrift_protocol_skip (protocol, ftype, error)) < 0)" << endl << indent()
<< " return " << error_ret << ";" << endl << indent() << " xfer += ret;" << endl
<< indent() << " break;" << endl;
// end switch
scope_down(out);
// read field end marker
out << indent() << "if ((ret = thrift_protocol_read_field_end (protocol, error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;" << endl;
// end while loop
scope_down(out);
out << endl;
// read the end of the structure
out << indent() << "if ((ret = thrift_protocol_read_struct_end (protocol, error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;" << endl
<< endl;
// if a required field is missing, throw an error
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
out << indent() << "if (!isset_" << (*f_iter)->get_name() << ")" << endl << indent() << "{"
<< endl << indent() << " g_set_error (error, THRIFT_PROTOCOL_ERROR," << endl << indent()
<< " THRIFT_PROTOCOL_ERROR_INVALID_DATA," << endl << indent()
<< " \"missing field\");" << endl << indent() << " return -1;" << endl
<< indent() << "}" << endl << endl;
}
}
if (is_function) {
indent(out) << "return xfer;" << endl;
}
// end the function/structure
indent_down();
indent(out) << "}" << endl << endl;
}
void t_c_glib_generator::generate_serialize_field(ostream& out,
t_field* tfield,
string prefix,
string suffix,
int error_ret) {
t_type* type = get_true_type(tfield->get_type());
string name = prefix + tfield->get_name() + suffix;
if (type->is_void()) {
throw "CANNOT GENERATE SERIALIZE CODE FOR void TYPE: " + name;
}
if (type->is_struct() || type->is_xception()) {
generate_serialize_struct(out, (t_struct*)type, name, error_ret);
} else if (type->is_container()) {
generate_serialize_container(out, type, name, error_ret);
} else if (type->is_base_type() || type->is_enum()) {
indent(out) << "if ((ret = thrift_protocol_write_";
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot serialize void field in a struct: " + name;
break;
case t_base_type::TYPE_BOOL:
out << "bool (protocol, " << name;
break;
case t_base_type::TYPE_I8:
out << "byte (protocol, " << name;
break;
case t_base_type::TYPE_I16:
out << "i16 (protocol, " << name;
break;
case t_base_type::TYPE_I32:
out << "i32 (protocol, " << name;
break;
case t_base_type::TYPE_I64:
out << "i64 (protocol, " << name;
break;
case t_base_type::TYPE_DOUBLE:
out << "double (protocol, " << name;
break;
case t_base_type::TYPE_STRING:
if (type->is_binary()) {
out << "binary (protocol, " << name << " ? ((GByteArray *) " << name << ")->data : NULL, "
<< name << " ? ((GByteArray *) " << name << ")->len : 0";
} else {
out << "string (protocol, " << name;
}
break;
default:
throw "compiler error: no C writer for base type " + t_base_type::t_base_name(tbase) + name;
}
} else {
out << "i32 (protocol, (gint32) " << name;
}
out << ", error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl
<< indent() << "xfer += ret;" << endl << endl;
} else {
throw std::logic_error("DO NOT KNOW HOW TO SERIALIZE FIELD '" + name + "' TYPE '"
+ type_name(type));
}
}
void t_c_glib_generator::generate_serialize_struct(ostream& out,
t_struct* tstruct,
string prefix,
int error_ret) {
(void)tstruct;
out << indent() << "if ((ret = thrift_struct_write (THRIFT_STRUCT (" << prefix
<< "), protocol, error)) < 0)" << endl << indent() << " return " << error_ret << ";" << endl
<< indent() << "xfer += ret;" << endl << endl;
}
void t_c_glib_generator::generate_serialize_container(ostream& out,
t_type* ttype,
string prefix,
int error_ret) {
scope_up(out);
if (ttype->is_map()) {
t_type* tkey = ((t_map*)ttype)->get_key_type();
t_type* tval = ((t_map*)ttype)->get_val_type();
string tkey_name = type_name(tkey);
string tval_name = type_name(tval);
string tkey_ptr;
string tval_ptr;
string keyname = tmp("key");
string valname = tmp("val");
declore_local_variable_for_write(out, tkey, keyname);
declore_local_variable_for_write(out, tval, valname);
/* If either the key or value type is a typedef, find its underlying type so
we can correctly determine how to generate a pointer to it */
tkey = get_true_type(tkey);
tval = get_true_type(tval);
tkey_ptr = tkey->is_string() || !tkey->is_base_type() ? "" : "*";
tval_ptr = tval->is_string() || !tval->is_base_type() ? "" : "*";
/*
* Some ugliness here. To maximize backwards compatibility, we
* avoid using GHashTableIter and instead get a GList of all keys,
* then copy it into a array on the stack, and free it.
* This is because we may exit early before we get a chance to free the
* GList.
*/
out << indent() << "GList *key_list = NULL, *iter = NULL;" << endl
<< indent() << tkey_name << tkey_ptr << "* keys;" << endl
<< indent() << "int i = 0, key_count;" << endl
<< endl
<< indent() << "if ((ret = thrift_protocol_write_map_begin (protocol, "
<< type_to_enum(tkey) << ", " << type_to_enum(tval) << ", " << prefix << " ? "
<< "(gint32) g_hash_table_size ((GHashTable *) " << prefix << ") : 0"
<< ", error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< indent() << "if (" << prefix << ")" << endl
<< indent() << " g_hash_table_foreach ((GHashTable *) " << prefix
<< ", thrift_hash_table_get_keys, &key_list);" << endl
<< indent() << "key_count = g_list_length (key_list);" << endl
<< indent() << "keys = g_newa (" << tkey_name << tkey_ptr
<< ", key_count);" << endl
<< indent() << "for (iter = g_list_first (key_list); iter; "
"iter = iter->next)" << endl;
indent_up();
out << indent() << "keys[i++] = (" << tkey_name << tkey_ptr
<< ") iter->data;" << endl;
indent_down();
out << indent() << "g_list_free (key_list);" << endl
<< endl
<< indent() << "for (i = 0; i < key_count; ++i)" << endl;
scope_up(out);
out << indent() << keyname << " = keys[i];" << endl
<< indent() << valname << " = (" << tval_name << tval_ptr
<< ") g_hash_table_lookup (((GHashTable *) " << prefix
<< "), (gpointer) " << keyname << ");" << endl
<< endl;
generate_serialize_map_element(out,
(t_map*)ttype,
tkey_ptr + " " + keyname,
tval_ptr + " " + valname,
error_ret);
scope_down(out);
out << indent() << "if ((ret = thrift_protocol_write_map_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl;
} else if (ttype->is_set()) {
t_type* telem = ((t_set*)ttype)->get_elem_type();
string telem_name = type_name(telem);
string telem_ptr = telem->is_string() || !telem->is_base_type() ? "" : "*";
out << indent() << "GList *key_list = NULL, *iter = NULL;" << endl
<< indent() << telem_name << telem_ptr << "* keys;" << endl
<< indent() << "int i = 0, key_count;" << endl
<< indent() << telem_name << telem_ptr << " elem;" << endl
<< indent() << "gpointer value;" << endl
<< indent() << "THRIFT_UNUSED_VAR (value);" << endl
<< endl
<< indent() << "if ((ret = thrift_protocol_write_set_begin (protocol, "
<< type_to_enum(telem) << ", " << prefix << " ? "
<< "(gint32) g_hash_table_size ((GHashTable *) " << prefix << ") : 0"
<< ", error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< indent() << "if (" << prefix << ")" << endl
<< indent() << " g_hash_table_foreach ((GHashTable *) " << prefix
<< ", thrift_hash_table_get_keys, &key_list);" << endl
<< indent() << "key_count = g_list_length (key_list);" << endl
<< indent() << "keys = g_newa (" << telem_name << telem_ptr
<< ", key_count);" << endl
<< indent() << "for (iter = g_list_first (key_list); iter; "
"iter = iter->next)" << endl;
indent_up();
out << indent() << "keys[i++] = (" << telem_name << telem_ptr
<< ") iter->data;" << endl;
indent_down();
out << indent() << "g_list_free (key_list);" << endl
<< endl
<< indent() << "for (i = 0; i < key_count; ++i)" << endl;
scope_up(out);
out << indent() << "elem = keys[i];" << endl
<< indent() << "value = (gpointer) g_hash_table_lookup "
"(((GHashTable *) " << prefix << "), (gpointer) elem);" << endl
<< endl;
generate_serialize_set_element(out,
(t_set*)ttype,
telem_ptr + "elem",
error_ret);
scope_down(out);
out << indent() << "if ((ret = thrift_protocol_write_set_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl;
} else if (ttype->is_list()) {
string length = "(" + prefix + " ? " + prefix + "->len : 0)";
string i = tmp("i");
out << indent() << "guint " << i << ";" << endl
<< endl
<< indent() << "if ((ret = thrift_protocol_write_list_begin (protocol, "
<< type_to_enum(((t_list*)ttype)->get_elem_type()) << ", (gint32) "
<< length << ", error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< indent() << "for (" << i << " = 0; " << i << " < " << length << "; "
<< i << "++)" << endl;
scope_up(out);
generate_serialize_list_element(out, (t_list*)ttype, prefix, i, error_ret);
scope_down(out);
out << indent() << "if ((ret = thrift_protocol_write_list_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl;
}
scope_down(out);
}
void t_c_glib_generator::generate_serialize_map_element(ostream& out,
t_map* tmap,
string key,
string value,
int error_ret) {
t_field kfield(tmap->get_key_type(), key);
generate_serialize_field(out, &kfield, "", "", error_ret);
t_field vfield(tmap->get_val_type(), value);
generate_serialize_field(out, &vfield, "", "", error_ret);
}
void t_c_glib_generator::generate_serialize_set_element(ostream& out,
t_set* tset,
string element,
int error_ret) {
t_field efield(tset->get_elem_type(), element);
generate_serialize_field(out, &efield, "", "", error_ret);
}
void t_c_glib_generator::generate_serialize_list_element(ostream& out,
t_list* tlist,
string list,
string index,
int error_ret) {
t_type* ttype = get_true_type(tlist->get_elem_type());
// cast to non-const
string cast = "";
string name = "g_ptr_array_index ((GPtrArray *) " + list + ", " + index + ")";
if (ttype->is_void()) {
throw std::runtime_error("compiler error: list element type cannot be void");
} else if (is_numeric(ttype)) {
name = "g_array_index (" + list + ", " + base_type_name(ttype) + ", " + index + ")";
} else if (ttype->is_string()) {
cast = "(gchar*)";
} else if (ttype->is_map() || ttype->is_set()) {
cast = "(GHashTable*)";
} else if (ttype->is_list()) {
t_type* etype = ((t_list*)ttype)->get_elem_type();
if (etype->is_void()) {
throw std::runtime_error("compiler error: list element type cannot be void");
}
cast = is_numeric(etype) ? "(GArray*)" : "(GPtrArray*)";
}
t_field efield(ttype, "(" + cast + name + ")");
generate_serialize_field(out, &efield, "", "", error_ret);
}
/* deserializes a field of any type. */
void t_c_glib_generator::generate_deserialize_field(ostream& out,
t_field* tfield,
string prefix,
string suffix,
int error_ret,
bool allocate) {
t_type* type = get_true_type(tfield->get_type());
if (type->is_void()) {
throw std::runtime_error("CANNOT GENERATE DESERIALIZE CODE FOR void TYPE: " + prefix
+ tfield->get_name());
}
string name = prefix + tfield->get_name() + suffix;
if (type->is_struct() || type->is_xception()) {
generate_deserialize_struct(out, (t_struct*)type, name, error_ret, allocate);
} else if (type->is_container()) {
generate_deserialize_container(out, type, name, error_ret);
} else if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
if (tbase == t_base_type::TYPE_STRING) {
indent(out) << "if (" << name << " != NULL)" << endl << indent() << "{" << endl;
indent_up();
indent(out) << "g_free(" << name << ");" << endl << indent() << name << " = NULL;" << endl;
indent_down();
indent(out) << "}" << endl << endl;
}
indent(out) << "if ((ret = thrift_protocol_read_";
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot serialize void field in a struct: " + name;
break;
case t_base_type::TYPE_STRING:
if (type->is_binary()) {
out << "binary (protocol, &data, &len";
} else {
out << "string (protocol, &" << name;
}
break;
case t_base_type::TYPE_BOOL:
out << "bool (protocol, &" << name;
break;
case t_base_type::TYPE_I8:
out << "byte (protocol, &" << name;
break;
case t_base_type::TYPE_I16:
out << "i16 (protocol, &" << name;
break;
case t_base_type::TYPE_I32:
out << "i32 (protocol, &" << name;
break;
case t_base_type::TYPE_I64:
out << "i64 (protocol, &" << name;
break;
case t_base_type::TYPE_DOUBLE:
out << "double (protocol, &" << name;
break;
default:
throw "compiler error: no C reader for base type " + t_base_type::t_base_name(tbase) + name;
}
out << ", error)) < 0)" << endl;
out << indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;"
<< endl;
// load the byte array with the data
if (tbase == t_base_type::TYPE_STRING && type->is_binary()) {
indent(out) << name << " = g_byte_array_new();" << endl;
indent(out) << "g_byte_array_append (" << name << ", (guint8 *) data, (guint) len);" << endl;
indent(out) << "g_free (data);" << endl;
}
} else if (type->is_enum()) {
string t = tmp("ecast");
out << indent() << "gint32 " << t << ";" << endl << indent()
<< "if ((ret = thrift_protocol_read_i32 (protocol, &" << t << ", error)) < 0)" << endl
<< indent() << " return " << error_ret << ";" << endl << indent() << "xfer += ret;" << endl
<< indent() << name << " = (" << type_name(type) << ")" << t << ";" << endl;
} else {
throw std::logic_error("DO NOT KNOW HOW TO SERIALIZE FIELD '" + tfield->get_name() + "' TYPE '"
+ type_name(type));
}
// if the type is not required and this is a thrift struct (no prefix),
// set the isset variable. if the type is required, then set the
// local variable indicating the value was set, so that we can do // validation later.
if (prefix != "" && tfield->get_req() != t_field::T_REQUIRED) {
indent(out) << prefix << "__isset_" << tfield->get_name() << suffix << " = TRUE;" << endl;
} else if (prefix != "" && tfield->get_req() == t_field::T_REQUIRED) {
indent(out) << "isset_" << tfield->get_name() << " = TRUE;" << endl;
}
}
void t_c_glib_generator::generate_deserialize_struct(ostream& out,
t_struct* tstruct,
string prefix,
int error_ret,
bool allocate) {
string name_uc = to_upper_case(initial_caps_to_underscores(tstruct->get_name()));
if (tstruct->is_xception()) {
out << indent() << "/* This struct is an exception */" << endl;
allocate = true;
}
if (allocate) {
out << indent() << "if ( " << prefix << " != NULL)" << endl << indent() << "{" << endl;
indent_up();
out << indent() << "g_object_unref (" << prefix << ");" << endl;
indent_down();
out << indent() << "}" << endl << indent() << prefix << " = g_object_new (" << this->nspace_uc
<< "TYPE_" << name_uc << ", NULL);" << endl;
}
out << indent() << "if ((ret = thrift_struct_read (THRIFT_STRUCT (" << prefix
<< "), protocol, error)) < 0)" << endl << indent() << "{" << endl;
indent_up();
if (allocate) {
indent(out) << "g_object_unref (" << prefix << ");" << endl;
if (tstruct->is_xception()) {
indent(out) << prefix << " = NULL;" << endl;
}
}
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "}" << endl << indent() << "xfer += ret;" << endl;
}
void t_c_glib_generator::generate_deserialize_container(ostream& out,
t_type* ttype,
string prefix,
int error_ret) {
scope_up(out);
if (ttype->is_map()) {
out << indent() << "guint32 size;" << endl
<< indent() << "guint32 i;" << endl
<< indent() << "ThriftType key_type;" << endl
<< indent() << "ThriftType value_type;" << endl
<< endl
<< indent() << "/* read the map begin marker */" << endl
<< indent() << "if ((ret = thrift_protocol_read_map_begin (protocol, "
"&key_type, &value_type, &size, error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< endl;
// iterate over map elements
out << indent() << "/* iterate through each of the map's fields */" << endl
<< indent() << "for (i = 0; i < size; i++)" << endl;
scope_up(out);
generate_deserialize_map_element(out, (t_map*)ttype, prefix, error_ret);
scope_down(out);
out << endl;
// read map end
out << indent() << "/* read the map end marker */" << endl
<< indent() << "if ((ret = thrift_protocol_read_map_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl;
} else if (ttype->is_set()) {
out << indent() << "guint32 size;" << endl
<< indent() << "guint32 i;" << endl
<< indent() << "ThriftType element_type;" << endl
<< endl
<< indent() << "if ((ret = thrift_protocol_read_set_begin (protocol, "
"&element_type, &size, error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< endl;
// iterate over the elements
out << indent() << "/* iterate through the set elements */" << endl
<< indent() << "for (i = 0; i < size; ++i)" << endl;
scope_up(out);
generate_deserialize_set_element(out, (t_set*)ttype, prefix, error_ret);
scope_down(out);
// read set end
out << indent() << "if ((ret = thrift_protocol_read_set_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< endl;
} else if (ttype->is_list()) {
out << indent() << "guint32 size;" << endl
<< indent() << "guint32 i;" << endl
<< indent() << "ThriftType element_type;" << endl
<< endl
<< indent() << "if ((ret = thrift_protocol_read_list_begin (protocol, "
"&element_type,&size, error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl
<< endl;
// iterate over the elements
out << indent() << "/* iterate through list elements */" << endl
<< indent() << "for (i = 0; i < size; i++)" << endl;
scope_up(out);
generate_deserialize_list_element(out,
(t_list*)ttype,
prefix,
"i",
error_ret);
scope_down(out);
// read list end
out << indent() << "if ((ret = thrift_protocol_read_list_end (protocol, "
"error)) < 0)" << endl;
indent_up();
out << indent() << "return " << error_ret << ";" << endl;
indent_down();
out << indent() << "xfer += ret;" << endl;
}
scope_down(out);
}
void t_c_glib_generator::declare_local_variable(ostream& out, t_type* ttype, string& name, bool for_hash_table) {
string tname = type_name(ttype);
/* If the given type is a typedef, find its underlying type so we
can correctly determine how to generate a pointer to it */
ttype = get_true_type(ttype);
string ptr = !is_numeric(ttype) ? "" : "*";
if (ttype->is_map()) {
t_map* tmap = (t_map*)ttype;
out << indent() << tname << ptr << " " << name << " = "
<< generate_new_hash_from_type(tmap->get_key_type(), tmap->get_val_type()) << endl;
} else if (ttype->is_list()) {
t_list* tlist = (t_list*)ttype;
out << indent() << tname << ptr << " " << name << " = "
<< generate_new_array_from_type(tlist->get_elem_type()) << endl;
} else if (for_hash_table && ttype->is_enum()) {
out << indent() << tname << " " << name << ";" << endl;
} else {
out << indent() << tname << ptr << " " << name
<< (ptr != "" ? " = g_new (" + tname + ", 1)" : " = NULL") << ";" << endl;
}
}
void t_c_glib_generator::declore_local_variable_for_write(ostream& out,
t_type* ttype,
string& name) {
string tname = type_name(ttype);
ttype = get_true_type(ttype);
string ptr = ttype->is_string() || !ttype->is_base_type() ? " " : "* ";
string init_val = ttype->is_enum() ? "" : " = NULL";
out << indent() << tname << ptr << name << init_val << ";" << endl;
}
void t_c_glib_generator::generate_deserialize_map_element(ostream& out,
t_map* tmap,
string prefix,
int error_ret) {
t_type* tkey = tmap->get_key_type();
t_type* tval = tmap->get_val_type();
string keyname = tmp("key");
string valname = tmp("val");
declare_local_variable(out, tkey, keyname, true);
declare_local_variable(out, tval, valname, true);
/* If either the key or value type is a typedef, find its underlying
type so we can correctly determine how to generate a pointer to
it */
tkey = get_true_type(tkey);
tval = get_true_type(tval);
string tkey_ptr = tkey->is_string() || !tkey->is_base_type() ? "" : "*";
string tval_ptr = tval->is_string() || !tval->is_base_type() ? "" : "*";
// deserialize the fields of the map element
t_field fkey(tkey, tkey_ptr + keyname);
generate_deserialize_field(out, &fkey, "", "", error_ret);
t_field fval(tval, tval_ptr + valname);
generate_deserialize_field(out, &fval, "", "", error_ret);
indent(out) << "if (" << prefix << " && " << keyname << ")" << endl;
indent_up();
indent(out) << "g_hash_table_insert ((GHashTable *)" << prefix << ", (gpointer) " << keyname
<< ", (gpointer) " << valname << ");" << endl;
indent_down();
}
void t_c_glib_generator::generate_deserialize_set_element(ostream& out,
t_set* tset,
string prefix,
int error_ret) {
t_type* telem = tset->get_elem_type();
string elem = tmp("_elem");
string telem_ptr = telem->is_string() || !telem->is_base_type() ? "" : "*";
declare_local_variable(out, telem, elem, true);
t_field felem(telem, telem_ptr + elem);
generate_deserialize_field(out, &felem, "", "", error_ret);
indent(out) << "if (" << prefix << " && " << elem << ")" << endl;
indent_up();
indent(out) << "g_hash_table_insert ((GHashTable *) " << prefix << ", (gpointer) " << elem
<< ", (gpointer) " << elem << ");" << endl;
indent_down();
}
void t_c_glib_generator::generate_deserialize_list_element(ostream& out,
t_list* tlist,
string prefix,
string index,
int error_ret) {
(void)index;
t_type* ttype = get_true_type(tlist->get_elem_type());
string elem = tmp("_elem");
string telem_ptr = !is_numeric(ttype) ? "" : "*";
declare_local_variable(out, ttype, elem, false);
t_field felem(ttype, telem_ptr + elem);
generate_deserialize_field(out, &felem, "", "", error_ret);
if (ttype->is_void()) {
throw std::runtime_error("compiler error: list element type cannot be void");
} else if (is_numeric(ttype)) {
indent(out) << "g_array_append_vals (" << prefix << ", " << elem << ", 1);" << endl;
indent(out) << "g_free (" << elem << ");" << endl;
} else {
indent(out) << "g_ptr_array_add (" << prefix << ", " << elem << ");" << endl;
}
}
string t_c_glib_generator::generate_free_func_from_type(t_type* ttype) {
if (ttype == nullptr)
return "NULL";
if (ttype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine hash type";
break;
case t_base_type::TYPE_BOOL:
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
case t_base_type::TYPE_DOUBLE:
return "g_free";
case t_base_type::TYPE_STRING:
if (((t_base_type*)ttype)->is_binary()) {
return "thrift_string_free";
}
return "g_free";
default:
throw "compiler error: no hash table info for type";
}
} else if (ttype->is_enum()) {
return "NULL";
} else if (ttype->is_map() || ttype->is_set()) {
return "(GDestroyNotify) thrift_safe_hash_table_destroy";
} else if (ttype->is_struct()) {
return "g_object_unref";
} else if (ttype->is_list()) {
t_type* etype = ((t_list*)ttype)->get_elem_type();
if (etype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)etype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine array type";
break;
case t_base_type::TYPE_BOOL:
case t_base_type::TYPE_I8:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
case t_base_type::TYPE_DOUBLE:
return "(GDestroyNotify) g_array_unref";
case t_base_type::TYPE_STRING:
return "(GDestroyNotify) g_ptr_array_unref";
default:
throw "compiler error: no array info for type";
}
} else if (etype->is_container() || etype->is_struct()) {
return "(GDestroyNotify) g_ptr_array_unref";
;
} else if (etype->is_enum()) {
return "(GDestroyNotify) g_array_unref";
}
printf("Type not expected inside the array: %s\n", etype->get_name().c_str());
throw "Type not expected inside array";
} else if (ttype->is_typedef()) {
return generate_free_func_from_type(((t_typedef*)ttype)->get_type());
}
printf("Type not expected: %s\n", ttype->get_name().c_str());
throw "Type not expected";
}
string t_c_glib_generator::generate_hash_func_from_type(t_type* ttype) {
if (ttype == nullptr)
return "NULL";
if (ttype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine hash type";
break;
case t_base_type::TYPE_BOOL:
return "thrift_boolean_hash";
case t_base_type::TYPE_I8:
return "thrift_int8_hash";
case t_base_type::TYPE_I16:
return "thrift_int16_hash";
case t_base_type::TYPE_I32:
return "g_int_hash";
case t_base_type::TYPE_I64:
return "g_int64_hash";
case t_base_type::TYPE_DOUBLE:
return "g_double_hash";
case t_base_type::TYPE_STRING:
return "g_str_hash";
default:
throw "compiler error: no hash table info for type";
}
} else if (ttype->is_enum()) {
return "g_direct_hash";
} else if (ttype->is_container() || ttype->is_struct()) {
return "g_direct_hash";
} else if (ttype->is_typedef()) {
return generate_hash_func_from_type(((t_typedef*)ttype)->get_type());
}
printf("Type not expected: %s\n", ttype->get_name().c_str());
throw "Type not expected";
}
string t_c_glib_generator::generate_cmp_func_from_type(t_type* ttype) {
if (ttype == nullptr)
return "NULL";
if (ttype->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "compiler error: cannot determine hash type";
break;
case t_base_type::TYPE_BOOL:
return "thrift_boolean_equal";
case t_base_type::TYPE_I8:
return "thrift_int8_equal";
case t_base_type::TYPE_I16:
return "thrift_int16_equal";
case t_base_type::TYPE_I32:
return "g_int_equal";
case t_base_type::TYPE_I64:
return "g_int64_equal";
case t_base_type::TYPE_DOUBLE:
return "g_double_equal";
case t_base_type::TYPE_STRING:
return "g_str_equal";
default:
throw "compiler error: no hash table info for type";
}
} else if (ttype->is_enum()) {
return "g_direct_equal";
} else if (ttype->is_container() || ttype->is_struct()) {
return "g_direct_equal";
} else if (ttype->is_typedef()) {
return generate_cmp_func_from_type(((t_typedef*)ttype)->get_type());
}
printf("Type not expected: %s\n", ttype->get_name().c_str());
throw "Type not expected";
}
string t_c_glib_generator::generate_new_hash_from_type(t_type* key, t_type* value) {
string hash_func = generate_hash_func_from_type(key);
string cmp_func = generate_cmp_func_from_type(key);
string key_free_func = generate_free_func_from_type(key);
string value_free_func = generate_free_func_from_type(value);
return "g_hash_table_new_full (" + hash_func + ", " + cmp_func + ", " + key_free_func + ", "
+ value_free_func + ");";
}
string t_c_glib_generator::generate_new_array_from_type(t_type* ttype) {
if (ttype->is_void()) {
throw std::runtime_error("compiler error: cannot determine array type");
} else if (is_numeric(ttype)) {
return "g_array_new (0, 1, sizeof (" + base_type_name(ttype) + "));";
} else {
string free_func = generate_free_func_from_type(ttype);
return "g_ptr_array_new_with_free_func (" + free_func + ");";
}
}
/***************************************
* UTILITY FUNCTIONS *
***************************************/
/**
* Upper case a string.
*/
string to_upper_case(string name) {
string s(name);
std::transform(s.begin(), s.end(), s.begin(), ::toupper);
return s;
}
/**
* Lower case a string.
*/
string to_lower_case(string name) {
string s(name);
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
return s;
}
/**
* Makes a string friendly to C code standards by lowercasing and adding
* underscores, with the exception of the first character. For example:
*
* Input: "ZomgCamelCase"
* Output: "zomg_camel_case"
*/
string initial_caps_to_underscores(string name) {
string ret;
const char* tmp = name.c_str();
int pos = 0;
/* the first character isn't underscored if uppercase, just lowercased */
ret += tolower(tmp[pos]);
pos++;
for (unsigned int i = pos; i < name.length(); i++) {
char lc = tolower(tmp[i]);
if (lc != tmp[i]) {
ret += '_';
}
ret += lc;
}
return ret;
}
/**
* Performs the reverse operation of initial_caps_to_underscores: The first
* character of the string is made uppercase, along with each character that
* follows an underscore (which is removed). Useful for converting Thrift
* service-method names into GObject-style class names.
*
* Input: "zomg_camel_case"
* Output: "ZomgCamelCase"
*/
string underscores_to_initial_caps(string name) {
string ret;
const char* tmp = name.c_str();
bool uppercase_next = true;
for (unsigned int i = 0; i < name.length(); i++) {
char c = tmp[i];
if (c == '_') {
uppercase_next = true;
} else {
if (uppercase_next) {
ret += toupper(c);
uppercase_next = false;
} else {
ret += c;
}
}
}
return ret;
}
/* register this generator with the main program */
THRIFT_REGISTER_GENERATOR(c_glib, "C, using GLib", "")
| 1 | 17,144 | Why is that test different to line 4022 (allocate is not tested there)? Shouldn't that be consistent? | apache-thrift | c |
@@ -472,7 +472,15 @@ ExWorkProcRetcode ExHbaseAccessInsertSQTcb::work()
{
rc = applyPred(scanExpr());
if (rc == 1) // expr is true or no expr
- step_ = CREATE_MUTATIONS;
+ {
+ rc = evalInsDelPreCondExpr();
+ if (rc == -1)
+ step_ = HANDLE_ERROR;
+ else if (rc == 0)
+ step_ = INSERT_CLOSE;
+ else
+ step_ = CREATE_MUTATIONS;
+ }
else if (rc == 0) // expr is false
step_ = INSERT_CLOSE;
else // error | 1 | // **********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
// **********************************************************************
#include "Platform.h"
#include "ex_stdh.h"
#include "ComTdb.h"
#include "ex_tcb.h"
#include "ExHbaseAccess.h"
#include "ex_exe_stmt_globals.h"
#include "ExpHbaseInterface.h"
#include "hs_util.h"
#include "NLSConversion.h"
#include "ExHdfsScan.h"
ExHbaseAccessInsertTcb::ExHbaseAccessInsertTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessTcb( hbaseAccessTdb, glob),
step_(NOT_STARTED)
{
insertRowlen_ = 0;
}
ExWorkProcRetcode ExHbaseAccessInsertTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
step_ = INSERT_INIT;
}
break;
case INSERT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
step_ = SETUP_INSERT;
}
break;
case SETUP_INSERT:
{
step_ = EVAL_INSERT_EXPR;
}
break;
case EVAL_INSERT_EXPR:
{
workAtp_->getTupp(hbaseAccessTdb().convertTuppIndex_)
.setDataPointer(convertRow_);
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
ExpTupleDesc * convertRowTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(hbaseAccessTdb().convertTuppIndex_);
for (Lng32 i = 0; i < convertRowTD->numAttrs(); i++)
{
Attributes * attr = convertRowTD->getAttr(i);
Lng32 len = 0;
if (attr)
{
if (attr->getVCIndicatorLength() == sizeof(short))
len = *(short*)&convertRow_[attr->getVCLenIndOffset()];
else
len = *(Lng32*)&convertRow_[attr->getVCLenIndOffset()];
switch (i)
{
case HBASE_ROW_ID_INDEX:
{
insRowId_.assign(&convertRow_[attr->getOffset()], len);
}
break;
case HBASE_COL_FAMILY_INDEX:
{
insColFam_.assign(&convertRow_[attr->getOffset()], len);
}
break;
case HBASE_COL_NAME_INDEX:
{
insColNam_.assign(&convertRow_[attr->getOffset()], len);
}
break;
case HBASE_COL_VALUE_INDEX:
{
insColVal_.assign(&convertRow_[attr->getOffset()], len);
}
break;
case HBASE_COL_TS_INDEX:
{
insColTS_ = (Int64*)&convertRow_[attr->getOffset()];
}
break;
} // switch
} // if attr
} // convertExpr
step_ = PROCESS_INSERT;
}
break;
case PROCESS_INSERT:
{
createDirectRowBuffer(insColFam_, insColNam_, insColVal_);
HbaseStr rowID;
rowID.val = (char *)insRowId_.data();
rowID.len = insRowId_.size();
retcode = ehi_->insertRow(table_,
rowID,
row_,
hbaseAccessTdb().useHbaseXn(),
*insColTS_,
FALSE); // AsyncOperations is always FALSE for native HBase
if (setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (getHbaseAccessStats())
getHbaseAccessStats()->incUsedRows();
matches_++;
step_ = INSERT_CLOSE;
}
break;
case INSERT_CLOSE:
{
retcode = ehi_->close();
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc, matches_))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessInsertRowwiseTcb::ExHbaseAccessInsertRowwiseTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessInsertTcb( hbaseAccessTdb, glob)
{
}
ExWorkProcRetcode ExHbaseAccessInsertRowwiseTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
step_ = INSERT_INIT;
}
break;
case INSERT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
step_ = SETUP_INSERT;
}
break;
case SETUP_INSERT:
{
step_ = EVAL_INSERT_EXPR;
}
break;
case EVAL_INSERT_EXPR:
{
workAtp_->getTupp(hbaseAccessTdb().convertTuppIndex_)
.setDataPointer(convertRow_);
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
ExpTupleDesc * convertRowTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(hbaseAccessTdb().convertTuppIndex_);
for (Lng32 i = 0; i < convertRowTD->numAttrs(); i++)
{
Attributes * attr = convertRowTD->getAttr(i);
short len = 0;
if (attr)
{
len = *(short*)&convertRow_[attr->getVCLenIndOffset()];
switch (i)
{
case HBASE_ROW_ID_INDEX:
{
insRowId_.assign(&convertRow_[attr->getOffset()], len);
}
break;
case HBASE_COL_DETAILS_INDEX:
{
char * convRow = &convertRow_[attr->getOffset()];
retcode = createDirectRowwiseBuffer(convRow);
}
break;
} // switch
} // if attr
} // for
step_ = PROCESS_INSERT;
}
break;
case PROCESS_INSERT:
{
if (numColsInDirectBuffer() > 0)
{
HbaseStr rowID;
rowID.val = (char *)insRowId_.data();
rowID.len = insRowId_.size();
retcode = ehi_->insertRow(table_,
rowID,
row_,
hbaseAccessTdb().useHbaseXn(),
-1, //*insColTS_
FALSE); // AsyncOperations is always FALSE for native HBase
if (setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (getHbaseAccessStats())
getHbaseAccessStats()->incUsedRows();
matches_++;
}
step_ = INSERT_CLOSE;
}
break;
case INSERT_CLOSE:
{
retcode = ehi_->close();
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc, matches_))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessInsertSQTcb::ExHbaseAccessInsertSQTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessInsertTcb( hbaseAccessTdb, glob)
{
}
ExWorkProcRetcode ExHbaseAccessInsertSQTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
asyncCompleteRetryCount_ = 0;
asyncOperationTimeout_ = 1;
asyncOperation_ = hbaseAccessTdb().asyncOperations() && getTransactionIDFromContext();
step_ = INSERT_INIT;
}
break;
case INSERT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
step_ = SETUP_INSERT;
}
break;
case SETUP_INSERT:
{
step_ = EVAL_INSERT_EXPR;
}
break;
case EVAL_INSERT_EXPR:
{
workAtp_->getTupp(hbaseAccessTdb().convertTuppIndex_)
.setDataPointer(convertRow_);
if (convertExpr())
{
insertRowlen_ = hbaseAccessTdb().convertRowLen_;
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(pentry_down->getAtp(), workAtp_,
NULL, -1, &insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
genAndAssignSyskey(hbaseAccessTdb().convertTuppIndex_, convertRow_);
step_ = EVAL_CONSTRAINT;
}
break;
case EVAL_CONSTRAINT:
{
rc = applyPred(scanExpr());
if (rc == 1) // expr is true or no expr
step_ = CREATE_MUTATIONS;
else if (rc == 0) // expr is false
step_ = INSERT_CLOSE;
else // error
step_ = HANDLE_ERROR;
}
break;
case CREATE_MUTATIONS:
{
retcode = createDirectRowBuffer( hbaseAccessTdb().convertTuppIndex_,
convertRow_,
hbaseAccessTdb().listOfUpdatedColNames(),
(hbaseAccessTdb().hbaseSqlIUD() ? FALSE : TRUE));
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
insColTSval_ = -1;
step_ = EVAL_ROWID_EXPR;
}
break;
case EVAL_ROWID_EXPR:
{
if (evalRowIdExpr(TRUE) == -1)
{
step_ = HANDLE_ERROR;
break;
}
insRowId_.assign(rowId_.val, rowId_.len);
if (hbaseAccessTdb().hbaseSqlIUD())
step_ = CHECK_AND_INSERT;
else
step_ = PROCESS_INSERT;
}
break;
case CHECK_AND_INSERT:
{
HbaseStr rowID;
rowID.val = (char *)insRowId_.data();
rowID.len = insRowId_.size();
retcode = ehi_->checkAndInsertRow(table_,
rowID,
row_,
hbaseAccessTdb().useHbaseXn(),
insColTSval_,
asyncOperation_);
if (retcode == HBASE_DUP_ROW_ERROR) // row exists, return error
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8102));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
if (setupError(retcode, "ExpHbaseInterface::checkAndInsertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (getHbaseAccessStats())
getHbaseAccessStats()->incUsedRows();
if (hbaseAccessTdb().returnRow()) {
step_ = RETURN_ROW;
break;
}
matches_++;
if (asyncOperation_) {
step_ = COMPLETE_ASYNC_INSERT;
return WORK_CALL_AGAIN;
}
else {
step_ = INSERT_CLOSE;
}
}
break;
case COMPLETE_ASYNC_INSERT:
{
if (resultArray_ == NULL)
resultArray_ = new (getHeap()) NABoolean[1];
Int32 timeout;
if (asyncCompleteRetryCount_ < 10)
timeout = -1;
else {
asyncOperationTimeout_ = asyncOperationTimeout_ * 2;
timeout = asyncOperationTimeout_;
}
retcode = ehi_->completeAsyncOperation(timeout, resultArray_, 1);
if (retcode == HBASE_RETRY_AGAIN) {
asyncCompleteRetryCount_++;
return WORK_CALL_AGAIN;
}
asyncCompleteRetryCount_ = 0;
if (setupError(retcode, "ExpHbaseInterface::completeAsyncOperation")) {
step_ = HANDLE_ERROR;
break;
}
if (resultArray_[0] == FALSE) {
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8102));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
step_ = INSERT_CLOSE;
}
break;
case PROCESS_INSERT:
{
HbaseStr rowID;
rowID.val = (char *)insRowId_.data();
rowID.len = insRowId_.size();
retcode = ehi_->insertRow(table_,
rowID,
row_,
hbaseAccessTdb().useHbaseXn(),
insColTSval_,
asyncOperation_);
if (setupError(retcode, "ExpHbaseInterface::insertRow")) {
step_ = HANDLE_ERROR;
break;
}
if (getHbaseAccessStats())
getHbaseAccessStats()->incUsedRows();
if (hbaseAccessTdb().returnRow()) {
step_ = RETURN_ROW;
break;
}
matches_++;
if (asyncOperation_) {
step_ = COMPLETE_ASYNC_INSERT;
return WORK_CALL_AGAIN;
}
else {
step_ = INSERT_CLOSE;
}
}
break;
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
if (returnUpdateExpr())
{
ex_queue_entry * up_entry = qparent_.up->getTailEntry();
// allocate tupps where returned rows will be created
if (allocateUpEntryTupps(
-1,
0,
hbaseAccessTdb().returnedTuppIndex_,
hbaseAccessTdb().returnUpdatedRowLen_,
FALSE,
&rc))
return 1;
ex_expr::exp_return_type exprRetCode =
returnUpdateExpr()->eval(up_entry->getAtp(), workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
rc = 0;
// moveRowToUpQueue also increments matches_
if (moveRowToUpQueue(&rc))
return 1;
}
else
{
rc = 0;
// moveRowToUpQueue also increments matches_
if (moveRowToUpQueue(convertRow_, hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
}
if (asyncOperation_) {
step_ = COMPLETE_ASYNC_INSERT;
return WORK_CALL_AGAIN;
}
else
step_ = INSERT_CLOSE;
}
break;
case INSERT_CLOSE:
{
retcode = ehi_->close();
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
retcode = ehi_->close();
step_ = DONE;
}
break;
case DONE:
{
if (NOT hbaseAccessTdb().computeRowsAffected())
matches_ = 0;
if (handleDone(rc, matches_))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessUpsertVsbbSQTcb::ExHbaseAccessUpsertVsbbSQTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessInsertTcb( hbaseAccessTdb, glob)
{
prevTailIndex_ = 0;
nextRequest_ = qparent_.down->getHeadIndex();
numRetries_ = 0;
rowsInserted_ = 0;
lastHandledStep_ = NOT_STARTED;
numRowsInVsbbBuffer_ = 0;
}
ExWorkProcRetcode ExHbaseAccessUpsertVsbbSQTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
ExMasterStmtGlobals *g = getGlobals()->
castToExExeStmtGlobals()->castToExMasterStmtGlobals();
while (!qparent_.down->isEmpty())
{
nextRequest_ = qparent_.down->getHeadIndex();
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = ALL_DONE;
else if (pentry_down->downState.request == ex_queue::GET_EOD)
if (currRowNum_ > rowsInserted_)
{
step_ = PROCESS_INSERT_FLUSH_AND_CLOSE;
}
else
{
if (lastHandledStep_ == ALL_DONE)
matches_=0;
step_ = ALL_DONE;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
currRowNum_ = 0;
numRetries_ = 0;
prevTailIndex_ = 0;
lastHandledStep_ = NOT_STARTED;
nextRequest_ = qparent_.down->getHeadIndex();
rowsInserted_ = 0;
asyncCompleteRetryCount_ = 0;
asyncOperationTimeout_ = 1;
asyncOperation_ = hbaseAccessTdb().asyncOperations() && getTransactionIDFromContext();
numRowsInVsbbBuffer_ = 0;
step_ = INSERT_INIT;
}
break;
case INSERT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
ExpTupleDesc * rowTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(hbaseAccessTdb().convertTuppIndex_);
allocateDirectRowBufferForJNI(rowTD->numAttrs(), hbaseAccessTdb().getHbaseRowsetVsbbSize());
allocateDirectRowIDBufferForJNI(hbaseAccessTdb().getHbaseRowsetVsbbSize());
if (hbaseAccessTdb().getCanAdjustTrafParams())
{
if (hbaseAccessTdb().getWBSize() > 0)
{
retcode = ehi_->setWriteBufferSize(table_,
hbaseAccessTdb().getWBSize());
if (setupError(retcode, "ExpHbaseInterface::setWriteBufferSize"))
{
step_ = HANDLE_ERROR;
break;
}
}
retcode = ehi_->setWriteToWAL(table_,
hbaseAccessTdb().getTrafWriteToWAL());
if (setupError(retcode, "ExpHbaseInterface::setWriteToWAL"))
{
step_ = HANDLE_ERROR;
break;
}
}
step_ = SETUP_INSERT;
}
break;
case SETUP_INSERT:
{
step_ = EVAL_INSERT_EXPR;
}
break;
case EVAL_INSERT_EXPR:
{
workAtp_->getTupp(hbaseAccessTdb().convertTuppIndex_)
.setDataPointer(convertRow_);
if (convertExpr())
{
insertRowlen_ = hbaseAccessTdb().convertRowLen_;
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(pentry_down->getAtp(), workAtp_,
NULL, -1, &insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
genAndAssignSyskey(hbaseAccessTdb().convertTuppIndex_, convertRow_);
step_ = EVAL_CONSTRAINT;
}
break;
case EVAL_CONSTRAINT:
{
rc = applyPred(scanExpr());
if (rc == 1) // expr is true or no expr
step_ = CREATE_MUTATIONS;
else if (rc == 0) // expr is false
step_ = INSERT_CLOSE;
else // error
step_ = HANDLE_ERROR;
}
break;
case CREATE_MUTATIONS:
{
retcode = createDirectRowBuffer(
hbaseAccessTdb().convertTuppIndex_,
convertRow_,
hbaseAccessTdb().listOfUpdatedColNames(),
TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
insColTSval_ = -1;
step_ = EVAL_ROWID_EXPR;
}
break;
case EVAL_ROWID_EXPR:
{
if (evalRowIdExpr(TRUE) == -1)
{
step_ = HANDLE_ERROR;
break;
}
copyRowIDToDirectBuffer(rowId_);
currRowNum_++;
matches_++;
if (currRowNum_ < hbaseAccessTdb().getHbaseRowsetVsbbSize())
{
step_ = DONE;
break;
}
step_ = PROCESS_INSERT;
}
break;
case PROCESS_INSERT:
case PROCESS_INSERT_AND_CLOSE:
case PROCESS_INSERT_FLUSH_AND_CLOSE:
{
numRowsInVsbbBuffer_ = patchDirectRowBuffers();
retcode = ehi_->insertRows(table_,
hbaseAccessTdb().getRowIDLen(),
rowIDs_,
rows_,
hbaseAccessTdb().useHbaseXn(),
insColTSval_,
hbaseAccessTdb().getIsTrafLoadAutoFlush(),
asyncOperation_);
if (setupError(retcode, "ExpHbaseInterface::insertRows")) {
step_ = HANDLE_ERROR;
break;
}
if (getHbaseAccessStats()) {
getHbaseAccessStats()->lobStats()->numReadReqs++;
getHbaseAccessStats()->incUsedRows(numRowsInVsbbBuffer_);
}
rowsInserted_ += numRowsInVsbbBuffer_;
if (asyncOperation_) {
lastHandledStep_ = step_;
step_ = COMPLETE_ASYNC_INSERT;
}
else if (step_ == PROCESS_INSERT_FLUSH_AND_CLOSE)
step_ = FLUSH_BUFFERS;
else if (step_ == PROCESS_INSERT_AND_CLOSE)
step_ = INSERT_CLOSE;
else
step_ = ALL_DONE;
}
break;
case COMPLETE_ASYNC_INSERT:
{
if (resultArray_ == NULL)
resultArray_ = new (getHeap()) NABoolean[hbaseAccessTdb().getHbaseRowsetVsbbSize()];
Int32 timeout;
if (asyncCompleteRetryCount_ < 10)
timeout = -1;
else {
asyncOperationTimeout_ = asyncOperationTimeout_ * 2;
timeout = asyncOperationTimeout_;
}
retcode = ehi_->completeAsyncOperation(timeout, resultArray_, numRowsInVsbbBuffer_);
if (retcode == HBASE_RETRY_AGAIN) {
asyncCompleteRetryCount_++;
return WORK_CALL_AGAIN;
}
asyncCompleteRetryCount_ = 0;
if (setupError(retcode, "ExpHbaseInterface::completeAsyncOperation")) {
step_ = HANDLE_ERROR;
break;
}
for (int i = 0 ; i < numRowsInVsbbBuffer_; i++) {
if (resultArray_[i] == FALSE) {
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8102));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
if (step_ == HANDLE_ERROR)
break;
if (lastHandledStep_ == PROCESS_INSERT_FLUSH_AND_CLOSE)
step_ = FLUSH_BUFFERS;
else if (lastHandledStep_ == PROCESS_INSERT_AND_CLOSE)
step_ = INSERT_CLOSE;
else
step_ = ALL_DONE;
}
break;
case INSERT_CLOSE:
{
retcode = ehi_->close();
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = ALL_DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
retcode = ehi_->close();
step_ = ALL_DONE;
}
break;
case FLUSH_BUFFERS:
{
// add call to flushBuffers for this table. TBD.
retcode = ehi_->flushTable();
if (setupError(retcode, "ExpHbaseInterface::flushTable"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = INSERT_CLOSE;
}
break;
case DONE:
case ALL_DONE:
{
if (NOT hbaseAccessTdb().computeRowsAffected())
matches_ = 0;
if ((step_ == DONE) &&
(qparent_.down->getLength() == 1))
{
// only one row in the down queue.
// Before we send input buffer to hbase, give parent
// another chance in case there is more input data.
// If parent doesn't input any more data on second (or
// later) chances, then process the request.
if (numRetries_ == 3)
{
numRetries_ = 0;
// Insert the current batch and then done.
step_ = PROCESS_INSERT_AND_CLOSE;
break;
}
numRetries_++;
return WORK_CALL_AGAIN;
}
if (handleDone(rc, (step_ == ALL_DONE ? matches_ : 0)))
return rc;
lastHandledStep_ = step_;
if (step_ == DONE)
step_ = SETUP_INSERT;
else
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessBulkLoadPrepSQTcb::ExHbaseAccessBulkLoadPrepSQTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessUpsertVsbbSQTcb( hbaseAccessTdb, glob),
prevRowId_ (NULL),
hdfs_(NULL),
hdfsSampleFile_(NULL),
lastErrorCnd_(NULL)
{
hFileParamsInitialized_ = false; ////temporary-- need better mechanism later
//sortedListOfColNames_ = NULL;
posVec_.clear();
Lng32 fileNum = getGlobals()->castToExExeStmtGlobals()->getMyInstanceNumber();
ExHbaseAccessTcb::buildLoggingPath(((ExHbaseAccessTdb &)hbaseAccessTdb).getLoggingLocation(),
(char *)((ExHbaseAccessTdb &)hbaseAccessTdb).getErrCountRowId(),
((ExHbaseAccessTdb &)hbaseAccessTdb).getTableName(),
"traf_upsert_err",
fileNum,
loggingFileName_);
LoggingFileCreated_ = FALSE;
loggingRow_ = new(glob->getDefaultHeap()) char[hbaseAccessTdb.updateRowLen_];
}
ExHbaseAccessBulkLoadPrepSQTcb::~ExHbaseAccessBulkLoadPrepSQTcb()
{
// Flush and close sample file if used, and disconnect from HDFS.
if (hdfs_)
{
if (hdfsSampleFile_)
{
hdfsFlush(hdfs_, hdfsSampleFile_);
hdfsCloseFile(hdfs_, hdfsSampleFile_);
}
hdfsDisconnect(hdfs_);
}
}
// Given the type information available via the argument, return the name of
// the Hive type we use to represent it in the Hive sample table created by
// the bulk load utility.
static const char* TrafToHiveType(Attributes* attrs)
{
Int64 maxValue = 0;
Int16 precision = 0;
Int16 scale = 0;
Int16 datatype = attrs->getDatatype();
if (DFS2REC::isInterval(datatype))
{
precision = dynamic_cast<SimpleType*>(attrs)->getPrecision();
scale = dynamic_cast<SimpleType*>(attrs)->getScale();
}
switch (datatype)
{
case REC_BIN16_SIGNED:
case REC_BIN16_UNSIGNED:
case REC_BPINT_UNSIGNED:
return "smallint";
case REC_BIN32_SIGNED:
case REC_BIN32_UNSIGNED:
return "int";
case REC_BIN64_SIGNED:
return "bigint";
case REC_TDM_FLOAT32:
case REC_IEEE_FLOAT32:
return "float";
case REC_TDM_FLOAT64:
case REC_IEEE_FLOAT64:
return "double";
case REC_DECIMAL_UNSIGNED:
case REC_DECIMAL_LS:
case REC_DECIMAL_LSE:
maxValue = (Int64)pow(10, dynamic_cast<SimpleType*>(attrs)->getPrecision());
break;
//case REC_NUM_BIG_UNSIGNED: return extFormat? (char *)"NUMERIC":(char *)"REC_NUM_BIG_UNSIGNED";
//case REC_NUM_BIG_SIGNED: return extFormat? (char *)"NUMERIC":(char *)"REC_NUM_BIG_SIGNED";
case REC_BYTE_F_ASCII:
case REC_NCHAR_F_UNICODE:
case REC_BYTE_V_ASCII:
case REC_NCHAR_V_UNICODE:
case REC_BYTE_V_ASCII_LONG:
case REC_BYTE_V_ANSI:
case REC_BYTE_V_ANSI_DOUBLE:
case REC_SBYTE_LOCALE_F:
case REC_MBYTE_LOCALE_F:
case REC_MBYTE_F_SJIS:
case REC_MBYTE_V_SJIS:
return "string";
case REC_DATETIME:
return "timestamp";
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_MINUTE:
maxValue = (Int64)pow(10, precision);
break;
case REC_INT_SECOND:
maxValue = (Int64)pow(10, precision + scale);
break;
case REC_INT_YEAR_MONTH:
maxValue = 12 * (Int64)pow(10, precision);
break;
case REC_INT_DAY_HOUR:
maxValue = 24 * (Int64)pow(10, precision);
break;
case REC_INT_HOUR_MINUTE:
maxValue = 60 * (Int64)pow(10, precision);
break;
case REC_INT_DAY_MINUTE:
maxValue = 24 * 60 * (Int64)pow(10, precision);
break;
case REC_INT_MINUTE_SECOND:
maxValue = (Int64)pow(10, precision + 2 + scale);
break;
case REC_INT_HOUR_SECOND:
maxValue = (Int64)pow(10, precision + 4 + scale);
break;
case REC_INT_DAY_SECOND:
maxValue = (Int64)pow(10, precision + 5 + scale);
break;
default:
break;
} // switch
//assert(maxValue > 0);
if (maxValue < SHRT_MAX)
return "smallint";
else if (maxValue <= INT_MAX)
return "int";
else
return "bigint";
}
// Return in ddlText the Hive statement to create the Hive external table that
// that will hold a sample for the Trafodion table being loaded. The files
// containing the sample data are written independently to HDFS and linked to
// the Hive table by the location clause in the generated Hive DDL.
void ExHbaseAccessBulkLoadPrepSQTcb::getHiveCreateTableDDL(NAString& hiveSampleTblNm, NAString& ddlText)
{
ExHbaseAccessTdb& hbaTdb = ((ExHbaseAccessTdb&)hbaseAccessTdb());
ddlText = "create external table ";
ddlText.append(hiveSampleTblNm).append("(");
ExpTupleDesc* td = hbaTdb.workCriDesc_->getTupleDescriptor(hbaTdb.convertTuppIndex_);
hbaTdb.listOfUpdatedColNames()->position();
Attributes* attrs;
char colNumBuf[12];
for (UInt32 i = 0; i < td->numAttrs(); i++)
{
attrs = td->getAttr(i);
sprintf(colNumBuf, "%d", *(UInt32*) hbaTdb.listOfUpdatedColNames()->getCurr());
ddlText.append("col").append(colNumBuf).append(" ");
ddlText.append(TrafToHiveType(attrs));
if (i < td->numAttrs() - 1)
ddlText.append(", ");
else
ddlText.append(") row format delimited fields terminated by '|' location '")
.append(hbaTdb.getSampleLocation())
.append((const char*)hbaTdb.getTableName())
.append("/'");
hbaTdb.listOfUpdatedColNames()->advance();
}
}
ExWorkProcRetcode ExHbaseAccessBulkLoadPrepSQTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
ExMasterStmtGlobals *g = getGlobals()->
castToExExeStmtGlobals()->castToExMasterStmtGlobals();
NABoolean eodSeen = false;
// Get the percentage of rows to include in the ustat sample table. A value of
// 0 indicates that no sample table is to be created.
static NABoolean displayed = FALSE;
double samplingRate = ((ExHbaseAccessTdb&)hbaseAccessTdb()).getSamplingRate();
while (!qparent_.down->isEmpty())
{
nextRequest_ = qparent_.down->getHeadIndex();
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = ALL_DONE;
else if (pentry_down->downState.request == ex_queue::GET_EOD &&
step_ != HANDLE_ERROR && lastHandledStep_ != HANDLE_ERROR) {
eodSeen = true;
if (currRowNum_ > rowsInserted_)
step_ = PROCESS_INSERT;
else
{
if (lastHandledStep_ == ALL_DONE)
matches_ = 0;
step_ = ALL_DONE;
}
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
currRowNum_ = 0;
numRetries_ = 0;
hFileParamsInitialized_ = FALSE;
prevTailIndex_ = 0;
lastHandledStep_ = NOT_STARTED;
nextRequest_ = qparent_.down->getHeadIndex();
rowsInserted_ = 0;
step_ = INSERT_INIT;
}
break;
case INSERT_INIT:
{
retcode = ehi_->initHBLC(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::initHBLC"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
short numCols = 0;
if (!hFileParamsInitialized_)
{
importLocation_= std::string(((ExHbaseAccessTdb&)hbaseAccessTdb()).getLoadPrepLocation()) +
((ExHbaseAccessTdb&)hbaseAccessTdb()).getTableName() ;
familyLocation_ = std::string(importLocation_ + "/#1");
Lng32 fileNum = getGlobals()->castToExExeStmtGlobals()->getMyInstanceNumber();
hFileName_ = std::string("hfile");
char hFileName[50];
snprintf(hFileName, 50, "hfile%d", fileNum);
hFileName_ = hFileName;
NAString hiveDDL;
NAString hiveSampleTblNm;
if (samplingRate > 0 && fileNum == 0) // master exec creates hive sample table
{
hiveSampleTblNm = ((ExHbaseAccessTdb&)hbaseAccessTdb()).getTableName();
TrafToHiveSampleTableName(hiveSampleTblNm);
getHiveCreateTableDDL(hiveSampleTblNm, hiveDDL);
}
retcode = ehi_->initHFileParams(table_, familyLocation_, hFileName_,
hbaseAccessTdb().getMaxHFileSize(),
hiveSampleTblNm.data(), hiveDDL.data());
hFileParamsInitialized_ = true;
if (samplingRate > 0)
{
// Seed random number generator (used to select rows to write to sample table).
srand(time(0));
// Set up HDFS file for sample table.
hdfs_ = hdfsConnect("default", 0);
Text samplePath = std::string(((ExHbaseAccessTdb&)hbaseAccessTdb()).getSampleLocation()) +
((ExHbaseAccessTdb&)hbaseAccessTdb()).getTableName() ;
char filePart[10];
sprintf(filePart, "/%d", fileNum);
samplePath.append(filePart);
hdfsSampleFile_ = hdfsOpenFile(hdfs_, samplePath.data(), O_WRONLY|O_CREAT, 0, 0, 0);
}
posVec_.clear();
hbaseAccessTdb().listOfUpdatedColNames()->position();
while (NOT hbaseAccessTdb().listOfUpdatedColNames()->atEnd())
{
UInt32 pos = *(UInt32*) hbaseAccessTdb().listOfUpdatedColNames()->getCurr();
posVec_.push_back(pos);
hbaseAccessTdb().listOfUpdatedColNames()->advance();
numCols++;
}
}
if (setupError(retcode, "ExpHbaseInterface::createHFile"))
{
step_ = HANDLE_ERROR;
break;
}
allocateDirectRowBufferForJNI(
numCols,
hbaseAccessTdb().getHbaseRowsetVsbbSize());
allocateDirectRowIDBufferForJNI(hbaseAccessTdb().getHbaseRowsetVsbbSize());
step_ = SETUP_INSERT;
}
break;
case SETUP_INSERT:
{
step_ = EVAL_INSERT_EXPR;
}
break;
case EVAL_INSERT_EXPR:
{
workAtp_->getTupp(hbaseAccessTdb().convertTuppIndex_)
.setDataPointer(convertRow_);
lastErrorCnd_ = NULL;
if (convertExpr())
{
insertRowlen_ = hbaseAccessTdb().convertRowLen_;
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(pentry_down->getAtp(), workAtp_,
NULL, -1, &insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR) {
if (hbaseAccessTdb().getContinueOnError()) {
if (pentry_down->getDiagsArea()) {
Lng32 errorCount = pentry_down->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
lastErrorCnd_ = pentry_down->getDiagsArea()->getErrorEntry(errorCount);
}
step_= HANDLE_EXCEPTION;
break;
}
else
{
step_ = HANDLE_ERROR;
break;
}
}
}
genAndAssignSyskey(hbaseAccessTdb().convertTuppIndex_, convertRow_);
step_ = EVAL_ROWID_EXPR;
}
break;
case EVAL_ROWID_EXPR:
{
if (evalRowIdExpr(TRUE) == -1)
{
step_ = HANDLE_ERROR;
break;
}
lastErrorCnd_ = NULL;
// duplicates (same rowid) are not allowed in Hfiles. adding duplicates causes Hfiles to generate
// errors
if (prevRowId_ == NULL)
{
prevRowId_ = new char[rowId_.len + 1];
memmove(prevRowId_, rowId_.val, rowId_.len);
}
else
{
// rows are supposed to sorted by rowId and to detect duplicates
// compare the current rowId to the previous one
if (memcmp(prevRowId_, rowId_.val, rowId_.len) == 0)
{
if (((ExHbaseAccessTdb&) hbaseAccessTdb()).getNoDuplicates() ||
((NOT ((ExHbaseAccessTdb&) hbaseAccessTdb()).getNoDuplicates()) &&
hbaseAccessTdb().getContinueOnError())) {
//8110 Duplicate rows detected.
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8110));
pentry_down->setDiagsArea(diagsArea);
if (hbaseAccessTdb().getContinueOnError()) {
if (pentry_down->getDiagsArea()) {
Lng32 errorCount = pentry_down->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
lastErrorCnd_ = pentry_down->getDiagsArea()->getErrorEntry(errorCount);
}
step_= HANDLE_EXCEPTION;
break;
}
else {
step_ = HANDLE_ERROR;
break;
}
}
else
{
//skip duplicate
step_ = DONE;
break;
}
}
memmove(prevRowId_, rowId_.val, rowId_.len);
}
step_ = CREATE_MUTATIONS;
}
break;
case CREATE_MUTATIONS:
{
retcode = createDirectRowBuffer(
hbaseAccessTdb().convertTuppIndex_,
convertRow_,
hbaseAccessTdb().listOfUpdatedColNames(),
FALSE, //TRUE,
&posVec_,
samplingRate);
if (retcode == -1)
{
//need to re-verify error handling
step_ = HANDLE_ERROR;
break;
}
copyRowIDToDirectBuffer( rowId_);
currRowNum_++;
if (!hbaseAccessTdb().returnRow()) {
matches_++; // if we are returning a row moveRowToUpQueue
//will increment matches_
}
else {
step_ = RETURN_ROW;
break ;
}
if (currRowNum_ < hbaseAccessTdb().getHbaseRowsetVsbbSize())
{
step_ = DONE;
break;
}
step_ = PROCESS_INSERT; // currRowNum_ == rowset size && we are not returning a row
}
break;
case PROCESS_INSERT:
{
numRowsInVsbbBuffer_ = patchDirectRowBuffers();
retcode = ehi_->addToHFile(hbaseAccessTdb().getRowIDLen(),
rowIDs_,
rows_);
if (setupError(retcode, "ExpHbaseInterface::addToHFile"))
{
step_ = HANDLE_ERROR;
break;
}
rowsInserted_ += numRowsInVsbbBuffer_;
if (getHbaseAccessStats())
{
getHbaseAccessStats()->lobStats()->numReadReqs++;
getHbaseAccessStats()->incUsedRows(numRowsInVsbbBuffer_);
}
step_ = ALL_DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
lastHandledStep_ =HANDLE_ERROR;
eodSeen = true;
matches_ = 0;
step_ = ALL_DONE;
}
break;
case HANDLE_EXCEPTION:
{
if (hbaseAccessTdb().getMaxErrorRows() > 0)
{
Int64 exceptionCount = 0;
ExHbaseAccessTcb::incrErrorCount( ehi_,exceptionCount, hbaseAccessTdb().getErrCountTab(),
hbaseAccessTdb().getErrCountRowId());
if (exceptionCount > hbaseAccessTdb().getMaxErrorRows())
{
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
//8112 max number of error rows exceeded.
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_MAX_ERROR_ROWS_EXCEEDED));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
if (hbaseAccessTdb().getLogErrorRows())
{
workAtp_->getTupp(hbaseAccessTdb().updateTuppIndex_).setDataPointer(updateRow_);
if (updateExpr())
{
ex_expr::exp_return_type evalRetCode =
updateExpr()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
int loggingRowLen = 0;
Lng32 errorMsgLen = 0;
createLoggingRow( hbaseAccessTdb().updateTuppIndex_, updateRow_,
loggingRow_ , loggingRowLen);
ExHbaseAccessTcb::handleException((NAHeap *)getHeap(), loggingRow_, loggingRowLen,
lastErrorCnd_,
ehi_,
LoggingFileCreated_,
loggingFileName_);
}
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
step_ = DONE;
}
break;
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
if (returnUpdateExpr())
{
ex_queue_entry * up_entry = qparent_.up->getTailEntry();
// allocate tupps where returned rows will be created
if (allocateUpEntryTupps(
-1,
0,
hbaseAccessTdb().returnedTuppIndex_,
hbaseAccessTdb().returnUpdatedRowLen_,
FALSE,
&rc))
return rc;
ex_expr::exp_return_type exprRetCode =
returnUpdateExpr()->eval(up_entry->getAtp(), workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
rc = 0;
// moveRowToUpQueue also increments matches_
if (moveRowToUpQueue(&rc))
return rc;
}
else
{
rc = 0;
// moveRowToUpQueue also increments matches_
if (moveRowToUpQueue(convertRow_, hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return rc;
}
if (currRowNum_ < hbaseAccessTdb().getHbaseRowsetVsbbSize())
step_ = DONE;
else
step_ = PROCESS_INSERT;
break;
}
case DONE:
case ALL_DONE:
{
if (handleDone(rc, (step_ == ALL_DONE ? matches_ : 0)))
return rc;
lastHandledStep_ = step_;
if (step_ == DONE)
step_ = SETUP_INSERT;
else
{
step_ = NOT_STARTED;
if (eodSeen)
{
ehi_->closeHFile(table_);
ehi_->hdfsClose();
hFileParamsInitialized_ = false;
retcode = ehi_->close();
}
}
}
break;
} // switch
} // while
return WORK_OK;
}
short ExHbaseAccessBulkLoadPrepSQTcb::createLoggingRow( UInt16 tuppIndex, char * tuppRow, char * targetRow, int &targetRowLen)
{
ExpTupleDesc * rowTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tuppIndex);
short colNameLen;
char * colName;
short nullVal = 0;
short nullValLen = 0;
short colValLen;
char *colVal;
Attributes * attr;
short *numColsPtr;
char * tmpTargetRow = targetRow;
for (Lng32 i = 0; i < rowTD->numAttrs(); i++)
{
Attributes * attr = rowTD->getAttr(i);
if (attr)
{
colVal = &tuppRow[attr->getOffset()];
nullVal = 0;
if (attr->getNullFlag() &&
(*(short*)&tuppRow[attr->getNullIndOffset()]))
{
targetRow[0] = '|';
targetRow++;
}
else
{
colValLen = attr->getLength(&tuppRow[attr->getVCLenIndOffset()]);
memcpy(targetRow,colVal, colValLen);
targetRow +=colValLen;
if (i != rowTD->numAttrs() -1)
targetRow[0] = '|';
else
targetRow[0] = '\n';
targetRow++;
}
}
else
{
ex_assert(false, "Unable to obtain column descriptor");
}
} // for
targetRowLen= targetRow - tmpTargetRow;
return 0;
}
// UMD (unique UpdMergeDel on Trafodion tables)
ExHbaseUMDtrafUniqueTaskTcb::ExHbaseUMDtrafUniqueTaskTcb
(ExHbaseAccessUMDTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
latestRowTimestamp_ = -1;
columnToCheck_.val = (char *)(new (tcb->getHeap()) BYTE[MAX_COLNAME_LEN]);
columnToCheck_.len = MAX_COLNAME_LEN;
colValToCheck_.val = (char *)(new (tcb->getHeap()) BYTE[tcb->hbaseAccessTdb().getRowIDLen()]);
colValToCheck_.len = tcb->hbaseAccessTdb().getRowIDLen();
}
void ExHbaseUMDtrafUniqueTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseUMDtrafUniqueTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
rowUpdated_ = FALSE;
latestRowTimestamp_ = -1;
step_ = SETUP_UMD;
}
break;
case SETUP_UMD:
{
tcb_->currRowidIdx_ = 0;
step_ = GET_NEXT_ROWID;
}
break;
case GET_NEXT_ROWID:
{
if (tcb_->currRowidIdx_ == tcb_->rowIds_.entries())
{
step_ = GET_CLOSE;
break;
}
if ((tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_) &&
(tcb_->hbaseAccessTdb().canDoCheckAndUpdel()))
{
if (tcb_->hbaseAccessTdb().hbaseSqlIUD())
step_ = CHECK_AND_DELETE_ROW;
else
step_ = DELETE_ROW;
break;
}
else if ((tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_) &&
(tcb_->hbaseAccessTdb().canDoCheckAndUpdel()))
{
step_ = CREATE_UPDATED_ROW;
break;
}
retcode = tcb_->ehi_->getRowOpen( tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
tcb_->columns_, -1);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::getRowOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case NEXT_ROW:
{
retcode = tcb_->ehi_->nextRow();
if ( (retcode == HBASE_ACCESS_EOD) || (retcode == HBASE_ACCESS_EOR) )
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::MERGE_)
{
// didn't find the row, cannot update.
// evaluate the mergeInsert expr and insert the row.
step_ = CREATE_MERGE_INSERTED_ROW;
break;
}
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
if ( tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else if ((tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_) &&
(! tcb_->scanExpr()) &&
(NOT tcb_->hbaseAccessTdb().returnRow()))
step_ = DELETE_ROW;
else
step_ = CREATE_FETCHED_ROW;
}
break;
case CREATE_FETCHED_ROW:
{
retcode = tcb_->createSQRowDirect(&latestRowTimestamp_);
if (retcode == HBASE_ACCESS_NO_ROW)
{
step_ = NEXT_ROW;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
tcb_->setupError(rc, "createSQRowDirect");
step_ = HANDLE_ERROR;
break;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = DELETE_ROW;
else if ((tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::MERGE_) &&
(tcb_->mergeUpdScanExpr()))
step_ = APPLY_MERGE_UPD_SCAN_PRED;
else
step_ = CREATE_UPDATED_ROW;
}
else if (rc == -1)
step_ = HANDLE_ERROR;
else
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::MERGE_)
{
// didn't find the row, cannot update.
// evaluate the mergeInsert expr and insert the row.
step_ = CREATE_MERGE_INSERTED_ROW;
break;
}
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
}
}
break;
case APPLY_MERGE_UPD_SCAN_PRED:
{
rc = tcb_->applyPred(tcb_->mergeUpdScanExpr());
if (rc == 1)
{
step_ = CREATE_UPDATED_ROW;
}
else if (rc == -1)
step_ = HANDLE_ERROR;
else
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
}
}
break;
case CREATE_UPDATED_ROW:
{
if (! tcb_->updateExpr())
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().updateTuppIndex_)
.setDataPointer(tcb_->updateRow_);
if (tcb_->updateExpr())
{
tcb_->insertRowlen_ = tcb_->hbaseAccessTdb().updateRowLen_;
ex_expr::exp_return_type evalRetCode =
tcb_->updateExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_,
NULL, -1, &tcb_->insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
step_ = EVAL_CONSTRAINT;
}
break;
case EVAL_CONSTRAINT:
{
rc = tcb_->applyPred(tcb_->mergeUpdScanExpr());
if (rc == 1) // expr is true or no expr
step_ = CREATE_MUTATIONS;
else if (rc == 0) // expr is false
step_ = NEXT_ROW_AFTER_UPDATE;
else // error
step_ = HANDLE_ERROR;
}
break;
case CREATE_MUTATIONS:
{
rowUpdated_ = TRUE;
// Merge can result in inserting rows.
// Use Number of columns in insert rather number
// of columns in update if an insert is involved in this tcb
if (tcb_->hbaseAccessTdb().getAccessType()
== ComTdbHbaseAccess::MERGE_)
{
ExpTupleDesc * rowTD = NULL;
if (tcb_->mergeInsertExpr())
{
rowTD = tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().mergeInsertTuppIndex_);
}
else
{
rowTD = tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().updateTuppIndex_);
}
if (rowTD->numAttrs() > 0)
tcb_->allocateDirectRowBufferForJNI(rowTD->numAttrs());
}
retcode = tcb_->createDirectRowBuffer( tcb_->hbaseAccessTdb().updateTuppIndex_,
tcb_->updateRow_,
tcb_->hbaseAccessTdb().listOfUpdatedColNames(),
TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->hbaseAccessTdb().canDoCheckAndUpdel())
step_ = CHECK_AND_UPDATE_ROW;
else
step_ = UPDATE_ROW;
}
break;
case CREATE_MERGE_INSERTED_ROW:
{
if (! tcb_->mergeInsertExpr())
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().mergeInsertTuppIndex_)
.setDataPointer(tcb_->mergeInsertRow_);
if (tcb_->mergeInsertExpr())
{
tcb_->insertRowlen_ = tcb_->hbaseAccessTdb().mergeInsertRowLen_;
ex_expr::exp_return_type evalRetCode =
tcb_->mergeInsertExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_,
NULL, -1, &tcb_->insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::MERGE_)
rowUpdated_ = FALSE;
retcode = tcb_->createDirectRowBuffer( tcb_->hbaseAccessTdb().mergeInsertTuppIndex_,
tcb_->mergeInsertRow_,
tcb_->hbaseAccessTdb().listOfMergedColNames());
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::MERGE_)
step_ = CHECK_AND_INSERT_ROW;
else
step_ = UPDATE_ROW;
}
break;
case UPDATE_ROW:
{
retcode = tcb_->ehi_->insertRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
tcb_->row_,
(tcb_->hbaseAccessTdb().useHbaseXn() ? TRUE : FALSE),
-1, //colTS_
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
// matches will get incremented during return row.
if (NOT tcb_->hbaseAccessTdb().returnRow())
tcb_->matches_++;
step_ = NEXT_ROW_AFTER_UPDATE;
}
break;
case CHECK_AND_UPDATE_ROW:
{
rc = tcb_->evalKeyColValExpr(columnToCheck_, colValToCheck_);
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->checkAndUpdateRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
tcb_->row_,
columnToCheck_,
colValToCheck_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1, //colTS_
tcb_->asyncOperation_);
if (retcode == HBASE_ROW_NOTFOUND_ERROR)
{
step_ = NEXT_ROW_AFTER_UPDATE;
break;
}
if ( tcb_->setupError(retcode, "ExpHbaseInterface::checkAndUpdateRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
// matches will get incremented during return row.
if (NOT tcb_->hbaseAccessTdb().returnRow())
tcb_->matches_++;
step_ = NEXT_ROW_AFTER_UPDATE;
}
break;
case CHECK_AND_INSERT_ROW:
{
Text rowIdRow;
if (tcb_->mergeInsertRowIdExpr())
{
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().mergeInsertRowIdTuppIndex_)
.setDataPointer(tcb_->rowIdRow_);
ex_expr::exp_return_type evalRetCode =
tcb_->mergeInsertRowIdExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
rowIdRow.assign(tcb_->rowIdRow_, tcb_->hbaseAccessTdb().getRowIDLen());
}
HbaseStr rowID;
if (tcb_->mergeInsertRowIdExpr())
{
rowID.val = (char *)rowIdRow.data();
rowID.len = rowIdRow.size();
}
else
{
rowID.val = (char *)tcb_->rowIds_[tcb_->currRowidIdx_].val;
rowID.len = tcb_->rowIds_[tcb_->currRowidIdx_].len;
}
retcode = tcb_->ehi_->checkAndInsertRow(tcb_->table_,
rowID,
tcb_->row_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1, // colTS
tcb_->asyncOperation_);
if (retcode == HBASE_DUP_ROW_ERROR)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(tcb_->getHeap(), &diagsArea,
(ExeErrorCode)(8102));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
else if (tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
// matches will get incremented during return row.
if (NOT tcb_->hbaseAccessTdb().returnRow())
tcb_->matches_++;
step_ = NEXT_ROW_AFTER_UPDATE;
}
break;
case NEXT_ROW_AFTER_UPDATE:
{
tcb_->currRowidIdx_++;
if (tcb_->hbaseAccessTdb().returnRow())
{
step_ = EVAL_RETURN_ROW_EXPRS;
break;
}
step_ = GET_NEXT_ROWID;
}
break;
case DELETE_ROW:
{
rc = tcb_->evalDeletePreCondExpr();
if (rc == -1) {
step_ = HANDLE_ERROR;
break;
}
if (rc == 0) { // No need to delete
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
retcode = tcb_->ehi_->deleteRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
NULL,
tcb_->hbaseAccessTdb().useHbaseXn(),
latestRowTimestamp_,
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::deleteRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->currRowidIdx_++;
if (tcb_->hbaseAccessTdb().returnRow())
{
step_ = RETURN_ROW;
break;
}
tcb_->matches_++;
step_ = GET_NEXT_ROWID;
}
break;
case CHECK_AND_DELETE_ROW:
{
rc = tcb_->evalDeletePreCondExpr();
if (rc == -1) {
step_ = HANDLE_ERROR;
break;
}
if (rc == 0) { // donot delete
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
rc = tcb_->evalKeyColValExpr(columnToCheck_, colValToCheck_);
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->checkAndDeleteRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
columnToCheck_,
colValToCheck_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1 //colTS_
);
if (retcode == HBASE_ROW_NOTFOUND_ERROR)
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
if ( tcb_->setupError(retcode, "ExpHbaseInterface::checkAndDeleteRow"))
{
step_ = HANDLE_ERROR;
break;
}
tcb_->currRowidIdx_++;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if (tcb_->hbaseAccessTdb().returnRow())
{
step_ = RETURN_ROW;
break;
}
tcb_->matches_++;
step_ = GET_NEXT_ROWID;
}
break;
case RETURN_ROW:
{
if (tcb_->qparent_.up->isFull())
{
rc = WORK_OK;
return 1;
}
rc = 0;
// moveRowToUpQueue also increments matches_
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = GET_CLOSE;
break;
}
step_ = GET_NEXT_ROWID;
}
break;
case EVAL_RETURN_ROW_EXPRS:
{
ex_queue_entry * up_entry = tcb_->qparent_.up->getTailEntry();
rc = 0;
// allocate tupps where returned rows will be created
if (tcb_->allocateUpEntryTupps(
tcb_->hbaseAccessTdb().returnedFetchedTuppIndex_,
tcb_->hbaseAccessTdb().returnFetchedRowLen_,
tcb_->hbaseAccessTdb().returnedUpdatedTuppIndex_,
tcb_->hbaseAccessTdb().returnUpdatedRowLen_,
FALSE,
&rc))
return 1;
ex_expr::exp_return_type exprRetCode;
char * fetchedDataPtr = NULL;
char * updatedDataPtr = NULL;
if (tcb_->returnFetchExpr())
{
exprRetCode =
tcb_->returnFetchExpr()->eval(up_entry->getAtp(), tcb_->workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
fetchedDataPtr = up_entry->getAtp()->getTupp(tcb_->hbaseAccessTdb().returnedFetchedTuppIndex_).getDataPointer();
}
if (rowUpdated_)
{
if (tcb_->returnUpdateExpr())
{
exprRetCode =
tcb_->returnUpdateExpr()->eval(up_entry->getAtp(), tcb_->workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
updatedDataPtr =
up_entry->getAtp()->getTupp(tcb_->hbaseAccessTdb().returnedUpdatedTuppIndex_).getDataPointer();
}
}
else
{
if (tcb_->returnMergeInsertExpr())
{
exprRetCode =
tcb_->returnMergeInsertExpr()->eval(up_entry->getAtp(), tcb_->workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
updatedDataPtr =
up_entry->getAtp()->getTupp(tcb_->hbaseAccessTdb().returnedUpdatedTuppIndex_).getDataPointer();
}
}
step_ = RETURN_UPDATED_ROWS;
}
break;
case RETURN_UPDATED_ROWS:
{
rc = 0;
// moveRowToUpQueue also increments matches_
if (tcb_->moveRowToUpQueue(&rc))
return 1;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = GET_CLOSE;
break;
}
step_ = GET_NEXT_ROWID;
}
break;
case GET_CLOSE:
{
retcode = tcb_->ehi_->getClose();
if ( tcb_->setupError(retcode, "ExpHbaseInterface::getClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
// UMD (unique UpdMergeDel on hbase tables. Well, Merge not supported yet)
ExHbaseUMDnativeUniqueTaskTcb::ExHbaseUMDnativeUniqueTaskTcb
(ExHbaseAccessUMDTcb * tcb)
: ExHbaseUMDtrafUniqueTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseUMDnativeUniqueTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseUMDnativeUniqueTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
rowUpdated_ = FALSE;
step_ = SETUP_UMD;
}
break;
case SETUP_UMD:
{
tcb_->currRowidIdx_ = 0;
tcb_->setupListOfColNames(tcb_->hbaseAccessTdb().listOfDeletedColNames(),
tcb_->deletedColumns_);
tcb_->setupListOfColNames(tcb_->hbaseAccessTdb().listOfFetchedColNames(),
tcb_->columns_);
step_ = GET_NEXT_ROWID;
}
break;
case GET_NEXT_ROWID:
{
if (tcb_->currRowidIdx_ == tcb_->rowIds_.entries())
{
step_ = GET_CLOSE;
break;
}
// retrieve columns to be deleted. If none of the columns exist, then
// this row cannot be deleted.
// But if there is a scan expr, then we need to also retrieve the columns used
// in the pred. Add those.
LIST(HbaseStr) columns(tcb_->getHeap());
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
{
columns = tcb_->deletedColumns_;
if (tcb_->scanExpr())
{
// retrieve all columns if none is specified.
if (tcb_->columns_.entries() == 0)
columns.clear();
else
// append retrieved columns to deleted columns.
columns.insert(tcb_->columns_);
}
}
retcode = tcb_->ehi_->getRowOpen( tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
columns, -1);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::getRowOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case NEXT_ROW:
{
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = GET_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->colVal_.len = tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell(tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_, tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
{
if ((tcb_->hbaseAccessTdb().getAccessType()
== ComTdbHbaseAccess::DELETE_) && (! tcb_->scanExpr()))
step_ = DELETE_ROW;
else
step_ = CREATE_FETCHED_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
step_ = HANDLE_ERROR;
else
step_ = APPEND_CELL_TO_ROW;
}
break;
case APPEND_CELL_TO_ROW:
{
tcb_->copyCell();
step_ = NEXT_CELL;
}
break;
case CREATE_FETCHED_ROW:
{
rc = tcb_->createRowwiseRow();
if (rc < 0)
{
if (rc != -1)
tcb_->setupError(rc, "createRowwiseRow");
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = DELETE_ROW;
else
step_ = CREATE_UPDATED_ROW;
}
else if (rc == -1)
step_ = HANDLE_ERROR;
else
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
}
}
break;
case CREATE_UPDATED_ROW:
{
if (! tcb_->updateExpr())
{
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
break;
}
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().updateTuppIndex_)
.setDataPointer(tcb_->updateRow_);
if (tcb_->updateExpr())
{
ex_expr::exp_return_type evalRetCode =
tcb_->updateExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_,
NULL, -1, &tcb_->insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
ExpTupleDesc * rowTD =
tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().updateTuppIndex_);
Attributes * attr = rowTD->getAttr(0);
rowUpdated_ = TRUE;
retcode = tcb_->createDirectRowwiseBuffer(
&tcb_->updateRow_[attr->getOffset()]);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = UPDATE_ROW;
}
break;
case DELETE_ROW:
{
retcode = tcb_->ehi_->deleteRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
&tcb_->deletedColumns_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1 ,
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::deleteRow"))
{
step_ = HANDLE_ERROR;
break;
}
tcb_->currRowidIdx_++;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->matches_++;
step_ = GET_NEXT_ROWID;
}
break;
case UPDATE_ROW:
{
if (tcb_->numColsInDirectBuffer() > 0)
{
retcode = tcb_->ehi_->insertRow(tcb_->table_,
tcb_->rowIds_[tcb_->currRowidIdx_],
tcb_->row_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1, // colTS_
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->matches_++;
}
tcb_->currRowidIdx_++;
step_ = GET_NEXT_ROWID;
}
break;
case GET_CLOSE:
{
retcode = tcb_->ehi_->getClose();
if ( tcb_->setupError(retcode, "ExpHbaseInterface::getClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseUMDtrafSubsetTaskTcb::ExHbaseUMDtrafSubsetTaskTcb
(ExHbaseAccessUMDTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseUMDtrafSubsetTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseUMDtrafSubsetTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
HbaseStr rowID;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = SCAN_OPEN;
}
break;
case SCAN_OPEN:
{
// Pre-fetch is disabled because it interfers with
// Delete operations
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
tcb_->columns_, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->numCacheRows(),
FALSE, NULL, NULL, NULL);
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = SCAN_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
{
if ((! tcb_->scanExpr()) &&
(NOT tcb_->hbaseAccessTdb().returnRow()))
{
step_ = DELETE_ROW;
break;
}
}
step_ = CREATE_FETCHED_ROW;
}
break;
case CREATE_FETCHED_ROW:
{
retcode = tcb_->createSQRowDirect();
if (retcode == HBASE_ACCESS_NO_ROW)
{
step_ = NEXT_ROW;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
tcb_->setupError(rc, "createSQRowDirect");
step_ = HANDLE_ERROR;
break;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = DELETE_ROW;
else
step_ = CREATE_UPDATED_ROW;
}
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case CREATE_UPDATED_ROW:
{
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().updateTuppIndex_)
.setDataPointer(tcb_->updateRow_);
if (tcb_->updateExpr())
{
tcb_->insertRowlen_ = tcb_->hbaseAccessTdb().updateRowLen_;
ex_expr::exp_return_type evalRetCode =
tcb_->updateExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_,
NULL, -1, &tcb_->insertRowlen_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
step_ = EVAL_CONSTRAINT;
}
break;
case EVAL_CONSTRAINT:
{
rc = tcb_->applyPred(tcb_->mergeUpdScanExpr(),
tcb_->hbaseAccessTdb().updateTuppIndex_, tcb_->updateRow_);
if (rc == 1) // expr is true or no expr
step_ = CREATE_MUTATIONS;
else if (rc == 0) // expr is false
step_ = NEXT_ROW;
else // error
step_ = HANDLE_ERROR;
}
break;
case CREATE_MUTATIONS:
{
// Merge can result in inserting rows
// Use Number of columns in insert rather number
// of columns in update if an insert is involved in this tcb
if (tcb_->hbaseAccessTdb().getAccessType()
== ComTdbHbaseAccess::MERGE_)
{
ExpTupleDesc * rowTD = NULL;
if (tcb_->mergeInsertExpr())
{
rowTD = tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().mergeInsertTuppIndex_);
}
else
{
rowTD = tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().updateTuppIndex_);
}
if (rowTD->numAttrs() > 0)
tcb_->allocateDirectRowBufferForJNI(rowTD->numAttrs());
}
retcode = tcb_->createDirectRowBuffer(
tcb_->hbaseAccessTdb().updateTuppIndex_,
tcb_->updateRow_,
tcb_->hbaseAccessTdb().listOfUpdatedColNames(),
TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = UPDATE_ROW;
}
break;
case UPDATE_ROW:
{
retcode = tcb_->ehi_->getRowID(rowID);
if (tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->insertRow(tcb_->table_,
rowID,
tcb_->row_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1, // colTS_
tcb_->asyncOperation_);
if (tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->hbaseAccessTdb().returnRow())
{
step_ = EVAL_RETURN_ROW_EXPRS;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->matches_++;
step_ = NEXT_ROW;
}
break;
case DELETE_ROW:
{
retcode = tcb_->ehi_->getRowID(rowID);
if (tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->deleteRow(tcb_->table_,
rowID,
NULL,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1,
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::deleteRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->currRowidIdx_++;
if (tcb_->hbaseAccessTdb().returnRow())
{
step_ = RETURN_ROW;
break;
}
tcb_->matches_++;
step_ = NEXT_ROW;
}
break;
case RETURN_ROW:
{
if (tcb_->qparent_.up->isFull())
{
rc = WORK_OK;
return 1;
}
rc = 0;
// moveRowToUpQueue also increments matches_
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = SCAN_CLOSE;
break;
}
step_ = NEXT_ROW;
}
break;
case EVAL_RETURN_ROW_EXPRS:
{
ex_queue_entry * up_entry = tcb_->qparent_.up->getTailEntry();
rc = 0;
// allocate tupps where returned rows will be created
if (tcb_->allocateUpEntryTupps(
tcb_->hbaseAccessTdb().returnedFetchedTuppIndex_,
tcb_->hbaseAccessTdb().returnFetchedRowLen_,
tcb_->hbaseAccessTdb().returnedUpdatedTuppIndex_,
tcb_->hbaseAccessTdb().returnUpdatedRowLen_,
FALSE,
&rc))
return 1;
ex_expr::exp_return_type exprRetCode;
char * fetchedDataPtr = NULL;
char * updatedDataPtr = NULL;
if (tcb_->returnFetchExpr())
{
exprRetCode =
tcb_->returnFetchExpr()->eval(up_entry->getAtp(), tcb_->workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
fetchedDataPtr = up_entry->getAtp()->getTupp(tcb_->hbaseAccessTdb().returnedFetchedTuppIndex_).getDataPointer();
}
if (tcb_->returnUpdateExpr())
{
exprRetCode =
tcb_->returnUpdateExpr()->eval(up_entry->getAtp(), tcb_->workAtp_);
if (exprRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
updatedDataPtr = up_entry->getAtp()->getTupp(tcb_->hbaseAccessTdb().returnedUpdatedTuppIndex_).getDataPointer();
}
step_ = RETURN_UPDATED_ROWS;
}
break;
case RETURN_UPDATED_ROWS:
{
rc = 0;
// moveRowToUpQueue also increments matches_
if (tcb_->moveRowToUpQueue(&rc))
return 1;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = SCAN_CLOSE;
break;
}
step_ = NEXT_ROW;
}
break;
case SCAN_CLOSE:
{
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseUMDnativeSubsetTaskTcb::ExHbaseUMDnativeSubsetTaskTcb
(ExHbaseAccessUMDTcb * tcb)
: ExHbaseUMDtrafSubsetTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseUMDnativeSubsetTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseUMDnativeSubsetTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
tcb_->setupListOfColNames(tcb_->hbaseAccessTdb().listOfDeletedColNames(),
tcb_->deletedColumns_);
tcb_->setupListOfColNames(tcb_->hbaseAccessTdb().listOfFetchedColNames(),
tcb_->columns_);
step_ = SCAN_OPEN;
}
break;
case SCAN_OPEN:
{
// retrieve columns to be deleted. If the column doesn't exist, then
// this row cannot be deleted.
// But if there is a scan expr, then we need to also retrieve the columns used
// in the pred. Add those.
LIST(HbaseStr) columns(tcb_->getHeap());
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
{
columns = tcb_->deletedColumns_;
if (tcb_->scanExpr())
{
// retrieve all columns if none is specified.
if (tcb_->columns_.entries() == 0)
columns.clear();
else
// append retrieved columns to deleted columns.
columns.insert(tcb_->columns_);
}
}
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
columns, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->numCacheRows(),
FALSE, NULL, NULL, NULL);
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
step_ = HANDLE_ERROR;
else
{
step_ = NEXT_ROW;
tcb_->isEOD_ = FALSE;
}
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
tcb_->isEOD_ = TRUE;
step_ = SCAN_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->colVal_.len = tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell( tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_,
tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
{
if (tcb_->hbaseAccessTdb().getAccessType()
== ComTdbHbaseAccess::DELETE_)
{
if (! tcb_->scanExpr())
{
step_ = DELETE_ROW;
break;
}
}
step_ = CREATE_FETCHED_ROWWISE_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPEND_CELL_TO_ROW;
}
break;
case APPEND_CELL_TO_ROW:
{
tcb_->copyCell();
step_ = NEXT_CELL;
}
break;
case CREATE_FETCHED_ROWWISE_ROW:
{
rc = tcb_->createRowwiseRow();
if (rc < 0)
{
if (rc != -1)
tcb_->setupError(rc, "createRowwiseRow");
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
{
if (tcb_->hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = DELETE_ROW;
else
step_ = CREATE_UPDATED_ROWWISE_ROW;
}
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case CREATE_UPDATED_ROWWISE_ROW:
{
tcb_->workAtp_->getTupp(tcb_->hbaseAccessTdb().updateTuppIndex_)
.setDataPointer(tcb_->updateRow_);
if (tcb_->updateExpr())
{
ex_expr::exp_return_type evalRetCode =
tcb_->updateExpr()->eval(pentry_down->getAtp(), tcb_->workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
step_ = CREATE_MUTATIONS;
}
break;
case CREATE_MUTATIONS:
{
ExpTupleDesc * rowTD =
tcb_->hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(tcb_->hbaseAccessTdb().updateTuppIndex_);
Attributes * attr = rowTD->getAttr(0);
retcode = tcb_->createDirectRowwiseBuffer(
&tcb_->updateRow_[attr->getOffset()]);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = UPDATE_ROW;
}
break;
case UPDATE_ROW:
{
if (tcb_->numColsInDirectBuffer() > 0)
{
retcode = tcb_->ehi_->insertRow(tcb_->table_,
tcb_->rowId_,
tcb_->row_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1,// colTS_
tcb_->asyncOperation_);
if (tcb_->setupError(retcode, "ExpHbaseInterface::insertRow"))
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->matches_++;
}
step_ = NEXT_ROW;
}
break;
case DELETE_ROW:
{
retcode = tcb_->ehi_->deleteRow(tcb_->table_,
tcb_->rowId_,
&tcb_->deletedColumns_,
tcb_->hbaseAccessTdb().useHbaseXn(),
-1,
tcb_->asyncOperation_);
if ( tcb_->setupError(retcode, "ExpHbaseInterface::deleteRow"))
{
step_ = HANDLE_ERROR;
break;
}
tcb_->currRowidIdx_++;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
tcb_->matches_++;
step_ = NEXT_ROW;
}
break;
case SCAN_CLOSE:
{
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseAccessUMDTcb::ExHbaseAccessUMDTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessTcb(hbaseAccessTdb, glob),
step_(NOT_STARTED)
{
umdSQSubsetTaskTcb_ = NULL;
umdSQUniqueTaskTcb_ = NULL;
for (Lng32 i = 0; i < UMD_MAX_TASKS; i++)
{
tasks_[i] = FALSE;
}
ExHbaseAccessTdb &hbaseTdb = (ExHbaseAccessTdb&)hbaseAccessTdb;
if (hbaseTdb.listOfScanRows())
{
tasks_[UMD_SUBSET_TASK] = TRUE;
if (hbaseTdb.sqHbaseTable())
umdSQSubsetTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDtrafSubsetTaskTcb(this);
else
umdSQSubsetTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDnativeSubsetTaskTcb(this);
}
if ((hbaseTdb.keySubsetGen()) &&
(NOT hbaseTdb.uniqueKeyInfo()))
{
tasks_[UMD_SUBSET_KEY_TASK] = TRUE;
if (hbaseTdb.sqHbaseTable())
umdSQSubsetTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDtrafSubsetTaskTcb(this);
else
umdSQSubsetTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDnativeSubsetTaskTcb(this);
}
if (hbaseTdb.listOfGetRows())
{
tasks_[UMD_UNIQUE_TASK] = TRUE;
if (hbaseTdb.sqHbaseTable())
umdSQUniqueTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDtrafUniqueTaskTcb(this);
else
umdSQUniqueTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDnativeUniqueTaskTcb(this);
}
if ((hbaseTdb.keySubsetGen()) &&
(hbaseTdb.uniqueKeyInfo()))
{
tasks_[UMD_UNIQUE_KEY_TASK] = TRUE;
if (hbaseTdb.sqHbaseTable())
umdSQUniqueTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDtrafUniqueTaskTcb(this);
else
umdSQUniqueTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseUMDnativeUniqueTaskTcb(this);
}
}
ExWorkProcRetcode ExHbaseAccessUMDTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
ExMasterStmtGlobals *g = getGlobals()->
castToExExeStmtGlobals()->castToExMasterStmtGlobals();
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if ((pentry_down->downState.request == ex_queue::GET_NOMORE) &&
(step_ != DONE))
{
step_ = UMD_CLOSE_NO_ERROR; //DONE;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
step_ = UMD_INIT;
}
break;
case UMD_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
if (hbaseAccessTdb().listOfScanRows())
hbaseAccessTdb().listOfScanRows()->position();
if (hbaseAccessTdb().listOfGetRows())
{
if (! rowIdExpr())
{
setupError(-HBASE_OPEN_ERROR, "", "RowId Expr is empty");
step_ = HANDLE_ERROR;
break;
}
hbaseAccessTdb().listOfGetRows()->position();
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
if (umdSQSubsetTaskTcb_)
umdSQSubsetTaskTcb_->init();
if (umdSQUniqueTaskTcb_)
umdSQUniqueTaskTcb_->init();
step_ = SETUP_SUBSET;
}
break;
case SETUP_SUBSET:
{
if (NOT tasks_[UMD_SUBSET_TASK])
{
step_ = SETUP_UNIQUE;
break;
}
hsr_ =
(ComTdbHbaseAccess::HbaseScanRows*)hbaseAccessTdb().listOfScanRows()
->getCurr();
retcode = setupSubsetRowIdsAndCols(hsr_);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_SUBSET;
}
break;
case PROCESS_SUBSET:
{
rc = 0;
retcode = umdSQSubsetTaskTcb_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else
step_ = NEXT_SUBSET;
}
break;
case NEXT_SUBSET:
{
hbaseAccessTdb().listOfScanRows()->advance();
if (! hbaseAccessTdb().listOfScanRows()->atEnd())
{
step_ = SETUP_SUBSET;
break;
}
step_ = SETUP_UNIQUE;
}
break;
case SETUP_UNIQUE:
{
if (NOT tasks_[UMD_UNIQUE_TASK])
{
step_ = SETUP_SUBSET_KEY;
break;
}
hgr_ =
(ComTdbHbaseAccess::HbaseGetRows*)hbaseAccessTdb().listOfGetRows()
->getCurr();
retcode = setupUniqueRowIdsAndCols(hgr_);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_UNIQUE;
}
break;
case PROCESS_UNIQUE:
{
rc = 0;
retcode = umdSQUniqueTaskTcb_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else
step_ = NEXT_UNIQUE;
}
break;
case NEXT_UNIQUE:
{
hbaseAccessTdb().listOfGetRows()->advance();
if (! hbaseAccessTdb().listOfGetRows()->atEnd())
{
step_ = SETUP_UNIQUE;
break;
}
step_ = SETUP_SUBSET_KEY;
}
break;
case SETUP_SUBSET_KEY:
{
if (NOT tasks_[UMD_SUBSET_KEY_TASK])
{
step_ = SETUP_UNIQUE_KEY;
break;
}
retcode = setupSubsetKeysAndCols();
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_SUBSET_KEY;
}
break;
case PROCESS_SUBSET_KEY:
{
rc = 0;
retcode = umdSQSubsetTaskTcb_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else
step_ = SETUP_UNIQUE_KEY;
}
break;
case SETUP_UNIQUE_KEY:
{
if (NOT tasks_[UMD_UNIQUE_KEY_TASK])
{
step_ = UMD_CLOSE;
break;
}
retcode = setupUniqueKeyAndCols(TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_UNIQUE_KEY;
}
break;
case PROCESS_UNIQUE_KEY:
{
rc = 0;
retcode = umdSQUniqueTaskTcb_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else
step_ = UMD_CLOSE;
}
break;
case UMD_CLOSE:
case UMD_CLOSE_NO_ERROR:
{
retcode = ehi_->close();
if (step_ == UMD_CLOSE)
{
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
retcode = ehi_->close();
step_ = DONE;
}
break;
case DONE:
{
if (NOT hbaseAccessTdb().computeRowsAffected())
matches_ = 0;
if (handleDone(rc, matches_))
return rc;
if (umdSQSubsetTaskTcb_)
umdSQSubsetTaskTcb_->init();
if (umdSQUniqueTaskTcb_)
umdSQUniqueTaskTcb_->init();
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessSQRowsetTcb::ExHbaseAccessSQRowsetTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessTcb( hbaseAccessTdb, glob)
, step_(NOT_STARTED)
{
if (getHbaseAccessStats())
getHbaseAccessStats()->init();
prevTailIndex_ = 0;
nextRequest_ = qparent_.down->getHeadIndex();
numRetries_ = 0;
lastHandledStep_ = NOT_STARTED;
numRowsInVsbbBuffer_ = 0;
}
Lng32 ExHbaseAccessSQRowsetTcb::setupUniqueKey()
{
ex_queue_entry *pentry_down = qparent_.down->getQueueEntry(nextRequest_);
if (pentry_down->downState.request == ex_queue::GET_NOMORE
|| pentry_down->downState.request == ex_queue::GET_EOD)
return 1;
ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK;
keyRangeEx::getNextKeyRangeReturnType keyRangeStatus;
initNextKeyRange(pool_, pentry_down->getAtp());
keyRangeStatus =
keySubsetExeExpr_->getNextKeyRange(pentry_down->getAtp(), FALSE, TRUE);
if (keyRangeStatus == keyRangeEx::EXPRESSION_ERROR)
return -1;
tupp &keyData = keySubsetExeExpr_->getBkData();
char * beginKeyRow = keyData.getDataPointer();
HbaseStr rowIdRowText;
if (hbaseAccessTdb().sqHbaseTable()) {
rowIdRowText.val = beginKeyRow;
rowIdRowText.len = hbaseAccessTdb().keyLen_;
} else {
// hbase table. Key is in varchar format.
short keyLen = *(short*)beginKeyRow;
rowIdRowText.val = beginKeyRow + sizeof(short);
rowIdRowText.len = keyLen;
}
if (keyRangeStatus == keyRangeEx::NO_MORE_RANGES)
{
// To ensure no row is found, add extra byte with "0" value
rowIdRowText.val[rowIdRowText.len] = '\0';
rowIdRowText.len += 1;
}
copyRowIDToDirectBuffer(rowIdRowText);
return 0;
}
Lng32 ExHbaseAccessSQRowsetTcb::setupRowIds()
{
Lng32 retcode;
UInt16 rowsetMaxRows = hbaseAccessTdb().getHbaseRowsetVsbbSize();
queue_index tlindex = qparent_.down->getTailIndex();
while (nextRequest_ != tlindex) {
retcode = setupUniqueKey();
if (retcode != 0)
return retcode;
nextRequest_++;
// Don't buffer more than HBASE_ROWSET_VSBB_SIZE
if (numRowsInDirectBuffer() >= rowsetMaxRows)
return 1;
}
return 0;
}
ExWorkProcRetcode ExHbaseAccessSQRowsetTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
ExMasterStmtGlobals *g = getGlobals()->
castToExExeStmtGlobals()->castToExMasterStmtGlobals();
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = ALL_DONE;
else if (pentry_down->downState.request == ex_queue::GET_EOD) {
if (numRowsInDirectBuffer() > 0) {
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_)
step_ = PROCESS_UPDATE_AND_CLOSE;
else if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = PROCESS_DELETE_AND_CLOSE;
else
ex_assert(0, "EOD and Select is not handled here");
}
else
step_ = ALL_DONE;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
currRowNum_ = 0;
numRetries_ = 0;
prevTailIndex_ = 0;
asyncCompleteRetryCount_ = 0;
asyncOperationTimeout_ = 1;
asyncOperation_ = hbaseAccessTdb().asyncOperations() && getTransactionIDFromContext();
numRowsInVsbbBuffer_ = 0;
lastHandledStep_ = NOT_STARTED;
nextRequest_ = qparent_.down->getHeadIndex();
step_ = RS_INIT;
}
break;
case RS_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_)
{
ExpTupleDesc * rowTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(hbaseAccessTdb().updateTuppIndex_);
allocateDirectRowBufferForJNI(rowTD->numAttrs(),
hbaseAccessTdb().getHbaseRowsetVsbbSize());
}
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_
|| hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_
|| hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
allocateDirectRowIDBufferForJNI(hbaseAccessTdb().getHbaseRowsetVsbbSize());
setupListOfColNames(hbaseAccessTdb().listOfFetchedColNames(),
columns_);
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_)
step_ = SETUP_SELECT;
else
step_ = SETUP_UMD;
}
break;
case SETUP_SELECT:
{
retcode = setupRowIds();
switch (retcode) {
case 0:
if (qparent_.down->getLength() == 1) {
// only one row in the down queue.
// Before we send input buffer to hbase, give parent
// another chance in case there is more input data.
// If parent doesn't input any more data on second (or
// later) chances, then process the request.
if (numRetries_ == 3) {
numRetries_ = 0;
step_ = PROCESS_SELECT;
} else {
numRetries_++;
return WORK_CALL_AGAIN;
}
}
else
step_ = PROCESS_SELECT;
break;
case 1:
// Reached the max. number of rowIds
// Process the rowIds in the buffer
step_ = PROCESS_SELECT;
break;
default:
step_ = HANDLE_ERROR;
break;
}
}
break;
case SETUP_UMD:
{
rowIds_.clear();
retcode = setupUniqueKeyAndCols(FALSE);
if (retcode == -1) {
step_ = HANDLE_ERROR;
break;
}
rc = evalDeletePreCondExpr();
if (rc == -1) {
step_ = HANDLE_ERROR;
break;
}
if (rc == 0) { // No need to delete
step_ = NEXT_ROW;
break;
}
copyRowIDToDirectBuffer(rowIds_[0]);
if ((hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_) ||
(hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_))
step_ = NEXT_ROW;
else if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_)
step_ = CREATE_UPDATED_ROW;
else
step_ = HANDLE_ERROR;
}
break;
case NEXT_ROW:
{
currRowNum_++;
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_) {
// matches_ is set to 1 when the row is projected by moveRowToUpQueue
// to denote that there is a matching entry
matches_ = 0;
retcode = ehi_->nextRow();
// EOR is end of result set for the current Rowset
// EOD is no data for the current row
// But EOD is never returned, instead HBASE_ACCESS_NO_ROW is returned
// when no row is found in CREATE_ROW step
if (retcode == HBASE_ACCESS_EOR) {
step_ = RS_CLOSE;
break;
}
if (retcode == HBASE_ACCESS_EOD) {
step_ = ROW_DONE;
break;
}
if (setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = CREATE_ROW;
break;
}
matches_++;
if (numRowsInDirectBuffer() < hbaseAccessTdb().getHbaseRowsetVsbbSize()) {
step_ = DONE;
break;
}
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = PROCESS_DELETE;
else if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_)
step_ = PROCESS_UPDATE;
else
step_ = HANDLE_ERROR;
}
break;
case CREATE_ROW:
{
retcode = createSQRowDirect();
if (retcode == HBASE_ACCESS_NO_ROW) {
step_ = ROW_DONE;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
setupError(rc, "createSQRowDirect");
step_ = HANDLE_ERROR;
break;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = applyPred(scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = ROW_DONE;
}
break;
case RETURN_ROW:
{
rc = 0;
if (moveRowToUpQueue(convertRow_, hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return rc;
if (getHbaseAccessStats())
getHbaseAccessStats()->incUsedRows();
step_ = ROW_DONE;
}
break;
case PROCESS_DELETE:
case PROCESS_DELETE_AND_CLOSE:
{
numRowsInVsbbBuffer_ = patchDirectRowIDBuffers();
retcode = ehi_->deleteRows(table_,
hbaseAccessTdb().getRowIDLen(),
rowIDs_,
hbaseAccessTdb().useHbaseXn(),
-1,
asyncOperation_);
currRowNum_ = 0;
if (setupError(retcode, "ExpHbaseInterface::deleteRows"))
{
step_ = HANDLE_ERROR;
break;
}
if (asyncOperation_) {
lastHandledStep_ = step_;
step_ = COMPLETE_ASYNC_OPERATION;
break;
}
if (getHbaseAccessStats()) {
getHbaseAccessStats()->lobStats()->numReadReqs++;
getHbaseAccessStats()->incUsedRows(numRowsInVsbbBuffer_);
}
if (step_ == PROCESS_DELETE_AND_CLOSE)
step_ = RS_CLOSE;
else
step_ = DONE;
}
break;
case PROCESS_SELECT:
{
if (numRowsInDirectBuffer() > 0) {
numRowsInVsbbBuffer_ = patchDirectRowIDBuffers();
retcode = ehi_->getRowsOpen(
table_,
hbaseAccessTdb().getRowIDLen(),
rowIDs_,
columns_);
currRowNum_ = 0;
if (setupError(retcode, "ExpHbaseInterface::getRowsOpen"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = NEXT_ROW;
if (getHbaseAccessStats())
{
getHbaseAccessStats()->lobStats()->numReadReqs++;
}
}
else
step_ = SETUP_SELECT;
}
break;
case CREATE_UPDATED_ROW:
{
workAtp_->getTupp(hbaseAccessTdb().updateTuppIndex_)
.setDataPointer(updateRow_);
if (updateExpr())
{
ex_expr::exp_return_type evalRetCode =
updateExpr()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
retcode = createDirectRowBuffer(
hbaseAccessTdb().updateTuppIndex_,
updateRow_,
hbaseAccessTdb().listOfUpdatedColNames(),
TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = NEXT_ROW;
}
break;
case PROCESS_UPDATE:
case PROCESS_UPDATE_AND_CLOSE:
{
numRowsInVsbbBuffer_ = patchDirectRowBuffers();
retcode = ehi_->insertRows(table_,
hbaseAccessTdb().getRowIDLen(),
rowIDs_,
rows_,
hbaseAccessTdb().useHbaseXn(),
-1,
asyncOperation_);
currRowNum_ = 0;
if (setupError(retcode, "ExpHbaseInterface::insertRows"))
{
step_ = HANDLE_ERROR;
break;
}
if (asyncOperation_) {
lastHandledStep_ = step_;
step_ = COMPLETE_ASYNC_OPERATION;
break;
}
if (getHbaseAccessStats()) {
getHbaseAccessStats()->lobStats()->numReadReqs++;
getHbaseAccessStats()->incUsedRows(numRowsInVsbbBuffer_);
}
if (step_ == PROCESS_UPDATE_AND_CLOSE)
step_ = RS_CLOSE;
else
step_ = DONE;
}
break;
case COMPLETE_ASYNC_OPERATION:
{
if (resultArray_ == NULL)
resultArray_ = new (getHeap()) NABoolean[hbaseAccessTdb().getHbaseRowsetVsbbSize()];
Int32 timeout;
if (asyncCompleteRetryCount_ < 10)
timeout = -1;
else {
asyncOperationTimeout_ = asyncOperationTimeout_ * 2;
timeout = asyncOperationTimeout_;
}
retcode = ehi_->completeAsyncOperation(timeout, resultArray_, numRowsInVsbbBuffer_);
if (retcode == HBASE_RETRY_AGAIN) {
asyncCompleteRetryCount_++;
return WORK_CALL_AGAIN;
}
asyncCompleteRetryCount_ = 0;
if (setupError(retcode, "ExpHbaseInterface::completeAsyncOperation")) {
step_ = HANDLE_ERROR;
break;
}
for (int i = 0 ; i < numRowsInVsbbBuffer_; i++) {
if (resultArray_[i] == FALSE) {
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8102));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
if (step_ == HANDLE_ERROR)
break;
if (getHbaseAccessStats()) {
getHbaseAccessStats()->lobStats()->numReadReqs++;
getHbaseAccessStats()->incUsedRows(numRowsInVsbbBuffer_);
}
if ((lastHandledStep_ == PROCESS_UPDATE_AND_CLOSE)
|| (lastHandledStep_ == PROCESS_DELETE_AND_CLOSE))
step_ = RS_CLOSE;
else
step_ = DONE;
}
break;
case RS_CLOSE:
{
retcode = ehi_->close();
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR;
break;
}
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_)
step_ = NOT_STARTED;
else
step_ = ALL_DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
retcode = ehi_->close();
step_ = ALL_DONE;
}
break;
case ROW_DONE:
{
if (handleDone(rc, 0))
return rc;
step_ = NEXT_ROW;
}
break;
case DONE:
case ALL_DONE:
{
if (NOT hbaseAccessTdb().computeRowsAffected())
matches_ = 0;
if ((step_ == DONE) &&
(qparent_.down->getLength() == 1))
{
// only one row in the down queue.
// Before we send input buffer to hbase, give parent
// another chance in case there is more input data.
// If parent doesn't input any more data on second (or
// later) chances, then process the request.
if (numRetries_ == 3 || numRowsInDirectBuffer() > 1)
{
numRetries_ = 0;
// Delete/update the current batch and then done.
if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::DELETE_)
step_ = PROCESS_DELETE_AND_CLOSE;
else if (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::UPDATE_)
step_ = PROCESS_UPDATE_AND_CLOSE;
else
{
ex_assert(false, "DONE state is invalid in Rowset SELECT");
}
break;
}
numRetries_++;
return WORK_CALL_AGAIN;
}
if (handleDone(rc, (step_ == ALL_DONE ? matches_ : 0)))
return rc;
if (step_ == DONE)
step_ = SETUP_UMD;
else
step_ = NOT_STARTED;
}
break;
} // switch
} // while
if (qparent_.down->isEmpty()
&& (hbaseAccessTdb().getAccessType() == ComTdbHbaseAccess::SELECT_)) {
ehi_->close();
step_ = NOT_STARTED;
}
return WORK_OK;
}
| 1 | 7,529 | Should we consider move this expression evaluation to SETUP_INSERT step since it evaluates from queue entry. Also, this expression is not evaluated in ExHbaseAccessVsbbUpsertTcb. | apache-trafodion | cpp |
@@ -143,7 +143,8 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation
{
Name = activity.DisplayName,
- Kind = (OtlpTrace.Span.Types.SpanKind)(activity.Kind + 1), // TODO: there is an offset of 1 on the enum.
+ // There is an offset of 1 on the Otlp enum.
+ Kind = (OtlpTrace.Span.Types.SpanKind)(activity.Kind + 1),
TraceId = ByteStringCtorFunc(traceIdBytes),
SpanId = ByteStringCtorFunc(spanIdBytes), | 1 | // <copyright file="ActivityExtensions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Reflection;
using System.Reflection.Emit;
using System.Runtime.CompilerServices;
using Google.Protobuf;
using Google.Protobuf.Collections;
using OpenTelemetry.Internal;
using OpenTelemetry.Trace;
using OtlpCollector = Opentelemetry.Proto.Collector.Trace.V1;
using OtlpCommon = Opentelemetry.Proto.Common.V1;
using OtlpResource = Opentelemetry.Proto.Resource.V1;
using OtlpTrace = Opentelemetry.Proto.Trace.V1;
namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation
{
internal static class ActivityExtensions
{
private static readonly ConcurrentBag<OtlpTrace.InstrumentationLibrarySpans> SpanListPool = new ConcurrentBag<OtlpTrace.InstrumentationLibrarySpans>();
private static readonly Action<RepeatedField<OtlpTrace.Span>, int> RepeatedFieldOfSpanSetCountAction = CreateRepeatedFieldOfSpanSetCountAction();
private static readonly Func<byte[], ByteString> ByteStringCtorFunc = CreateByteStringCtorFunc();
internal static void AddBatch(
this OtlpCollector.ExportTraceServiceRequest request,
OtlpResource.Resource processResource,
in Batch<Activity> activityBatch)
{
Dictionary<string, OtlpTrace.InstrumentationLibrarySpans> spansByLibrary = new Dictionary<string, OtlpTrace.InstrumentationLibrarySpans>();
OtlpTrace.ResourceSpans resourceSpans = new OtlpTrace.ResourceSpans
{
Resource = processResource,
};
request.ResourceSpans.Add(resourceSpans);
foreach (var activity in activityBatch)
{
OtlpTrace.Span span = activity.ToOtlpSpan();
if (span == null)
{
OpenTelemetryProtocolExporterEventSource.Log.CouldNotTranslateActivity(
nameof(ActivityExtensions),
nameof(AddBatch));
continue;
}
var activitySourceName = activity.Source.Name;
if (!spansByLibrary.TryGetValue(activitySourceName, out var spans))
{
spans = GetSpanListFromPool(activitySourceName, activity.Source.Version);
spansByLibrary.Add(activitySourceName, spans);
resourceSpans.InstrumentationLibrarySpans.Add(spans);
}
spans.Spans.Add(span);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static void Return(this OtlpCollector.ExportTraceServiceRequest request)
{
var resourceSpans = request.ResourceSpans.FirstOrDefault();
if (resourceSpans == null)
{
return;
}
foreach (var librarySpans in resourceSpans.InstrumentationLibrarySpans)
{
RepeatedFieldOfSpanSetCountAction(librarySpans.Spans, 0);
SpanListPool.Add(librarySpans);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static OtlpTrace.InstrumentationLibrarySpans GetSpanListFromPool(string name, string version)
{
if (!SpanListPool.TryTake(out var spans))
{
spans = new OtlpTrace.InstrumentationLibrarySpans
{
InstrumentationLibrary = new OtlpCommon.InstrumentationLibrary
{
Name = name, // Name is enforced to not be null, but it can be empty.
Version = version ?? string.Empty, // NRE throw by proto
},
};
}
else
{
spans.InstrumentationLibrary.Name = name;
spans.InstrumentationLibrary.Version = version ?? string.Empty;
}
return spans;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static OtlpTrace.Span ToOtlpSpan(this Activity activity)
{
if (activity.IdFormat != ActivityIdFormat.W3C)
{
// Only ActivityIdFormat.W3C is supported, in principle this should never be
// hit under the OpenTelemetry SDK.
return null;
}
byte[] traceIdBytes = new byte[16];
byte[] spanIdBytes = new byte[8];
activity.TraceId.CopyTo(traceIdBytes);
activity.SpanId.CopyTo(spanIdBytes);
var parentSpanIdString = ByteString.Empty;
if (activity.ParentSpanId != default)
{
byte[] parentSpanIdBytes = new byte[8];
activity.ParentSpanId.CopyTo(parentSpanIdBytes);
parentSpanIdString = ByteStringCtorFunc(parentSpanIdBytes);
}
var startTimeUnixNano = activity.StartTimeUtc.ToUnixTimeNanoseconds();
var otlpSpan = new OtlpTrace.Span
{
Name = activity.DisplayName,
Kind = (OtlpTrace.Span.Types.SpanKind)(activity.Kind + 1), // TODO: there is an offset of 1 on the enum.
TraceId = ByteStringCtorFunc(traceIdBytes),
SpanId = ByteStringCtorFunc(spanIdBytes),
ParentSpanId = parentSpanIdString,
StartTimeUnixNano = (ulong)startTimeUnixNano,
EndTimeUnixNano = (ulong)(startTimeUnixNano + activity.Duration.ToNanoseconds()),
};
TagEnumerationState otlpTags = default;
activity.EnumerateTags(ref otlpTags);
if (activity.Kind == ActivityKind.Client || activity.Kind == ActivityKind.Producer)
{
PeerServiceResolver.Resolve(ref otlpTags, out string peerServiceName, out bool addAsTag);
if (peerServiceName != null && addAsTag)
{
PooledList<OtlpCommon.KeyValue>.Add(
ref otlpTags.Tags,
new OtlpCommon.KeyValue
{
Key = SemanticConventions.AttributePeerService,
Value = new OtlpCommon.AnyValue { StringValue = peerServiceName },
});
}
}
if (otlpTags.Created)
{
otlpSpan.Attributes.AddRange(otlpTags.Tags);
otlpTags.Tags.Return();
}
otlpSpan.Status = ToOtlpStatus(ref otlpTags);
EventEnumerationState otlpEvents = default;
activity.EnumerateEvents(ref otlpEvents);
if (otlpEvents.Created)
{
otlpSpan.Events.AddRange(otlpEvents.Events);
otlpEvents.Events.Return();
}
LinkEnumerationState otlpLinks = default;
activity.EnumerateLinks(ref otlpLinks);
if (otlpLinks.Created)
{
otlpSpan.Links.AddRange(otlpLinks.Links);
otlpLinks.Links.Return();
}
// Activity does not limit number of attributes, events, links, etc so drop counts are always zero.
return otlpSpan;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static OtlpCommon.KeyValue ToOtlpAttribute(this KeyValuePair<string, object> kvp)
{
if (kvp.Value == null)
{
return null;
}
var attrib = new OtlpCommon.KeyValue { Key = kvp.Key, Value = new OtlpCommon.AnyValue { } };
switch (kvp.Value)
{
case string s:
attrib.Value.StringValue = s;
break;
case bool b:
attrib.Value.BoolValue = b;
break;
case int i:
attrib.Value.IntValue = i;
break;
case long l:
attrib.Value.IntValue = l;
break;
case double d:
attrib.Value.DoubleValue = d;
break;
default:
attrib.Value.StringValue = kvp.Value.ToString();
break;
}
return attrib;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static OtlpTrace.Status ToOtlpStatus(ref TagEnumerationState otlpTags)
{
var status = StatusHelper.GetStatusCodeForTagValue(otlpTags.StatusCode);
if (!status.HasValue)
{
return null;
}
var otlpStatus = new OtlpTrace.Status
{
// The numerical values of the two enumerations match, a simple cast is enough.
Code = (OtlpTrace.Status.Types.StatusCode)(int)status,
};
if (otlpStatus.Code != OtlpTrace.Status.Types.StatusCode.Error)
{
#pragma warning disable CS0612 // Type or member is obsolete
otlpStatus.DeprecatedCode = OtlpTrace.Status.Types.DeprecatedStatusCode.Ok;
#pragma warning restore CS0612 // Type or member is obsolete
}
else
{
#pragma warning disable CS0612 // Type or member is obsolete
otlpStatus.DeprecatedCode = OtlpTrace.Status.Types.DeprecatedStatusCode.UnknownError;
#pragma warning restore CS0612 // Type or member is obsolete
}
if (!string.IsNullOrEmpty(otlpTags.StatusDescription))
{
otlpStatus.Message = otlpTags.StatusDescription;
}
return otlpStatus;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static OtlpTrace.Span.Types.Link ToOtlpLink(ActivityLink activityLink)
{
byte[] traceIdBytes = new byte[16];
byte[] spanIdBytes = new byte[8];
activityLink.Context.TraceId.CopyTo(traceIdBytes);
activityLink.Context.SpanId.CopyTo(spanIdBytes);
var otlpLink = new OtlpTrace.Span.Types.Link
{
TraceId = ByteStringCtorFunc(traceIdBytes),
SpanId = ByteStringCtorFunc(spanIdBytes),
};
TagEnumerationState otlpTags = default;
activityLink.EnumerateTags(ref otlpTags);
if (otlpTags.Created)
{
otlpLink.Attributes.AddRange(otlpTags.Tags);
otlpTags.Tags.Return();
}
return otlpLink;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static OtlpTrace.Span.Types.Event ToOtlpEvent(ActivityEvent activityEvent)
{
var otlpEvent = new OtlpTrace.Span.Types.Event
{
Name = activityEvent.Name,
TimeUnixNano = (ulong)activityEvent.Timestamp.ToUnixTimeNanoseconds(),
};
TagEnumerationState otlpTags = default;
activityEvent.EnumerateTags(ref otlpTags);
if (otlpTags.Created)
{
otlpEvent.Attributes.AddRange(otlpTags.Tags);
otlpTags.Tags.Return();
}
return otlpEvent;
}
private static Action<RepeatedField<OtlpTrace.Span>, int> CreateRepeatedFieldOfSpanSetCountAction()
{
FieldInfo repeatedFieldOfSpanCountField = typeof(RepeatedField<OtlpTrace.Span>).GetField("count", BindingFlags.NonPublic | BindingFlags.Instance);
DynamicMethod dynamicMethod = new DynamicMethod(
"CreateSetCountAction",
null,
new[] { typeof(RepeatedField<OtlpTrace.Span>), typeof(int) },
typeof(ActivityExtensions).Module,
skipVisibility: true);
var generator = dynamicMethod.GetILGenerator();
generator.Emit(OpCodes.Ldarg_0);
generator.Emit(OpCodes.Ldarg_1);
generator.Emit(OpCodes.Stfld, repeatedFieldOfSpanCountField);
generator.Emit(OpCodes.Ret);
return (Action<RepeatedField<OtlpTrace.Span>, int>)dynamicMethod.CreateDelegate(typeof(Action<RepeatedField<OtlpTrace.Span>, int>));
}
private static Func<byte[], ByteString> CreateByteStringCtorFunc()
{
ConstructorInfo byteStringCtor = typeof(ByteString).GetConstructor(BindingFlags.NonPublic | BindingFlags.Instance, null, new[] { typeof(byte[]) }, null);
DynamicMethod dynamicMethod = new DynamicMethod(
"ByteStringCtor",
typeof(ByteString),
new[] { typeof(byte[]) },
typeof(ActivityExtensions).Module,
skipVisibility: true);
var generator = dynamicMethod.GetILGenerator();
generator.Emit(OpCodes.Ldarg_0);
generator.Emit(OpCodes.Newobj, byteStringCtor);
generator.Emit(OpCodes.Ret);
return (Func<byte[], ByteString>)dynamicMethod.CreateDelegate(typeof(Func<byte[], ByteString>));
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static OtlpCommon.KeyValue CreateOtlpKeyValue(string key, OtlpCommon.AnyValue value)
{
return new OtlpCommon.KeyValue { Key = key, Value = value };
}
private struct TagEnumerationState : IActivityEnumerator<KeyValuePair<string, object>>, PeerServiceResolver.IPeerServiceState
{
public bool Created;
public PooledList<OtlpCommon.KeyValue> Tags;
public string StatusCode;
public string StatusDescription;
public string PeerService { get; set; }
public int? PeerServicePriority { get; set; }
public string HostName { get; set; }
public string IpAddress { get; set; }
public long Port { get; set; }
public bool ForEach(KeyValuePair<string, object> activityTag)
{
if (activityTag.Value == null)
{
return true;
}
var key = activityTag.Key;
switch (key)
{
case SpanAttributeConstants.StatusCodeKey:
this.StatusCode = activityTag.Value as string;
return true;
case SpanAttributeConstants.StatusDescriptionKey:
this.StatusDescription = activityTag.Value as string;
return true;
}
if (!this.Created)
{
this.Tags = PooledList<OtlpCommon.KeyValue>.Create();
this.Created = true;
}
switch (activityTag.Value)
{
case string s:
PeerServiceResolver.InspectTag(ref this, key, s);
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { StringValue = s }));
break;
case bool b:
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { BoolValue = b }));
break;
case int i:
PeerServiceResolver.InspectTag(ref this, key, i);
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { IntValue = i }));
break;
case long l:
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { IntValue = l }));
break;
case double d:
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { DoubleValue = d }));
break;
case int[] intArray:
foreach (var item in intArray)
{
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { IntValue = item }));
}
break;
case double[] doubleArray:
foreach (var item in doubleArray)
{
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { DoubleValue = item }));
}
break;
case bool[] boolArray:
foreach (var item in boolArray)
{
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { BoolValue = item }));
}
break;
case string[] stringArray:
foreach (var item in stringArray)
{
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { StringValue = item }));
}
break;
default:
PooledList<OtlpCommon.KeyValue>.Add(ref this.Tags, CreateOtlpKeyValue(key, new OtlpCommon.AnyValue { StringValue = activityTag.Value.ToString() }));
break;
}
return true;
}
}
private struct EventEnumerationState : IActivityEnumerator<ActivityEvent>
{
public bool Created;
public PooledList<OtlpTrace.Span.Types.Event> Events;
public bool ForEach(ActivityEvent activityEvent)
{
if (!this.Created)
{
this.Events = PooledList<OtlpTrace.Span.Types.Event>.Create();
this.Created = true;
}
PooledList<OtlpTrace.Span.Types.Event>.Add(ref this.Events, ToOtlpEvent(activityEvent));
return true;
}
}
private struct LinkEnumerationState : IActivityEnumerator<ActivityLink>
{
public bool Created;
public PooledList<OtlpTrace.Span.Types.Link> Links;
public bool ForEach(ActivityLink activityLink)
{
if (!this.Created)
{
this.Links = PooledList<OtlpTrace.Span.Types.Link>.Create();
this.Created = true;
}
PooledList<OtlpTrace.Span.Types.Link>.Add(ref this.Links, ToOtlpLink(activityLink));
return true;
}
}
}
}
| 1 | 19,339 | Don't see any more TODOs here.. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -190,6 +190,13 @@ func (task *Task) DockerConfig(container *Container) (*docker.Config, *DockerCli
}
func (task *Task) dockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
+ // Detect the name for S3 images
+ dockerImage := container.Image
+ if strings.HasPrefix(dockerImage, "s3://") {
+ slice := strings.Split(dockerImage, "/")
+ dockerImage = slice[len(slice)-1]
+ }
+
dockerVolumes, err := task.dockerConfigVolumes(container)
if err != nil {
return nil, &DockerClientConfigError{err.Error()} | 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"encoding/json"
"errors"
"strconv"
"strings"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/fsouza/go-dockerclient"
)
const emptyHostVolumeName = "~internal~ecs-emptyvolume-source"
// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been
// run. It is possible it will be subsequently called after that and should be
// able to handle such an occurrence appropriately (e.g. behave idempotently).
func (task *Task) PostUnmarshalTask() {
// TODO, add rudimentary plugin support and call any plugins that want to
// hook into this
task.initializeEmptyVolumes()
}
func (task *Task) initializeEmptyVolumes() {
requiredEmptyVolumes := []string{}
for _, container := range task.Containers {
for _, mountPoint := range container.MountPoints {
vol, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
continue
}
if _, ok := vol.(*EmptyHostVolume); ok {
if container.RunDependencies == nil {
container.RunDependencies = make([]string, 0)
}
container.RunDependencies = append(container.RunDependencies, emptyHostVolumeName)
requiredEmptyVolumes = append(requiredEmptyVolumes, mountPoint.SourceVolume)
}
}
}
if len(requiredEmptyVolumes) == 0 {
// No need to create the auxiliary 'empty-volumes' container
return
}
// If we have required empty volumes, add an 'internal' container that handles all
// of them
_, ok := task.ContainerByName(emptyHostVolumeName)
if !ok {
mountPoints := make([]MountPoint, len(requiredEmptyVolumes))
for i, volume := range requiredEmptyVolumes {
containerPath := "/ecs-empty-volume/" + volume
mountPoints[i] = MountPoint{SourceVolume: volume, ContainerPath: containerPath}
}
sourceContainer := &Container{
Name: emptyHostVolumeName,
Image: emptyvolume.Image + ":" + emptyvolume.Tag,
Command: []string{"not-applicable"}, // Command required, but this only gets created so N/A
MountPoints: mountPoints,
Essential: false,
IsInternal: true,
DesiredStatus: ContainerRunning,
}
task.Containers = append(task.Containers, sourceContainer)
}
}
func (task *Task) _containersByName() map[string]*Container {
task.containersByNameLock.Lock()
defer task.containersByNameLock.Unlock()
if task.containersByName != nil {
return task.containersByName
}
task.containersByName = make(map[string]*Container)
for _, container := range task.Containers {
task.containersByName[container.Name] = container
}
return task.containersByName
}
func (task *Task) ContainerByName(name string) (*Container, bool) {
container, ok := task._containersByName()[name]
return container, ok
}
// HostVolumeByName returns the task Volume for the given a volume name in that
// task. The second return value indicates the presense of that volume
func (task *Task) HostVolumeByName(name string) (HostVolume, bool) {
for _, v := range task.Volumes {
if v.Name == name {
return v.Volume, true
}
}
return nil, false
}
func (task *Task) UpdateMountPoints(cont *Container, vols map[string]string) {
for _, mountPoint := range cont.MountPoints {
hostPath, ok := vols[mountPoint.ContainerPath]
if !ok {
// /path/ -> /path
hostPath, ok = vols[strings.TrimRight(mountPoint.ContainerPath, "/")]
}
if ok {
if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists {
if empty, ok := hostVolume.(*EmptyHostVolume); ok {
empty.HostPath = hostPath
}
}
}
}
}
// updateContainerDesiredStatus sets all container's desired status's to the
// task's desired status
func (task *Task) updateContainerDesiredStatus() {
for _, c := range task.Containers {
if c.DesiredStatus < task.DesiredStatus.ContainerStatus() {
c.DesiredStatus = task.DesiredStatus.ContainerStatus()
}
}
}
// updateTaskKnownState updates the given task's status based on its container's status.
// It updates to the minimum of all containers no matter what
// It returns a TaskStatus indicating what change occured or TaskStatusNone if
// there was no change
func (task *Task) updateTaskKnownStatus() (newStatus TaskStatus) {
llog := log.New("task", task)
llog.Debug("Updating task")
// Set to a large 'impossible' status that can't be the min
earliestStatus := ContainerZombie
for _, cont := range task.Containers {
if cont.KnownStatus < earliestStatus {
earliestStatus = cont.KnownStatus
}
}
llog.Debug("Earliest status is " + earliestStatus.String())
if task.KnownStatus < earliestStatus.TaskStatus() {
task.SetKnownStatus(earliestStatus.TaskStatus())
return task.KnownStatus
}
return TaskStatusNone
}
// Overridden returns a copy of the task with all container's overridden and
// itself overridden as well
func (task *Task) Overridden() *Task {
result := *task
// Task has no overrides currently, just do the containers
// Shallow copy, take care of the deeper bits too
result.containersByNameLock.Lock()
result.containersByName = make(map[string]*Container)
result.containersByNameLock.Unlock()
result.Containers = make([]*Container, len(result.Containers))
for i, cont := range task.Containers {
result.Containers[i] = cont.Overridden()
}
return &result
}
// DockerConfig converts the given container in this task to the format of
// GoDockerClient's 'Config' struct
func (task *Task) DockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
return task.Overridden().dockerConfig(container.Overridden())
}
func (task *Task) dockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
dockerVolumes, err := task.dockerConfigVolumes(container)
if err != nil {
return nil, &DockerClientConfigError{err.Error()}
}
dockerEnv := make([]string, 0, len(container.Environment))
for envKey, envVal := range container.Environment {
dockerEnv = append(dockerEnv, envKey+"="+envVal)
}
// Convert MB to B
dockerMem := int64(container.Memory * 1024 * 1024)
if dockerMem != 0 && dockerMem < DOCKER_MINIMUM_MEMORY {
dockerMem = DOCKER_MINIMUM_MEMORY
}
var entryPoint []string
if container.EntryPoint != nil {
entryPoint = *container.EntryPoint
}
config := &docker.Config{
Image: container.Image,
Cmd: container.Command,
Entrypoint: entryPoint,
ExposedPorts: task.dockerExposedPorts(container),
Volumes: dockerVolumes,
Env: dockerEnv,
Memory: dockerMem,
CPUShares: task.dockerCpuShares(container.Cpu),
}
if container.DockerConfig.Config != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.Config), &config)
if err != nil {
return nil, &DockerClientConfigError{"Unable decode given docker config: " + err.Error()}
}
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
// Augment labels with some metadata from the agent. Explicitly do this last
// such that it will always override duplicates in the provided raw config
// data.
config.Labels["com.amazonaws.ecs.task-arn"] = task.Arn
config.Labels["com.amazonaws.ecs.container-name"] = container.Name
config.Labels["com.amazonaws.ecs.task-definition-family"] = task.Family
config.Labels["com.amazonaws.ecs.task-definition-version"] = task.Version
return config, nil
}
// Docker silently converts 0 to 1024 CPU shares, which is probably not what we
// want. Instead, we convert 0 to 2 to be closer to expected behavior. The
// reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not
// Docker's).
func (task *Task) dockerCpuShares(containerCpu uint) int64 {
if containerCpu <= 1 {
log.Debug("Converting CPU shares to allowed minimum of 2", "task", task.Arn, "cpuShares", containerCpu)
return 2
}
return int64(containerCpu)
}
func (task *Task) dockerExposedPorts(container *Container) map[docker.Port]struct{} {
dockerExposedPorts := make(map[docker.Port]struct{})
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
dockerExposedPorts[dockerPort] = struct{}{}
}
return dockerExposedPorts
}
func (task *Task) dockerConfigVolumes(container *Container) (map[string]struct{}, error) {
volumeMap := make(map[string]struct{})
for _, m := range container.MountPoints {
vol, exists := task.HostVolumeByName(m.SourceVolume)
if !exists {
return nil, &badVolumeError{"Container " + container.Name + " in task " + task.Arn + " references invalid volume " + m.SourceVolume}
}
// you can handle most volume mount types in the HostConfig at run-time;
// empty mounts are created by docker at create-time (Config) so set
// them here.
if container.Name == emptyHostVolumeName && container.IsInternal {
_, ok := vol.(*EmptyHostVolume)
if !ok {
return nil, &badVolumeError{"Empty volume container in task " + task.Arn + " was the wrong type"}
}
volumeMap[m.ContainerPath] = struct{}{}
}
}
return volumeMap, nil
}
func (task *Task) DockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
return task.Overridden().dockerHostConfig(container.Overridden(), dockerContainerMap)
}
func (task *Task) dockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
dockerPortMap := task.dockerPortMap(container)
volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
binds, err := task.dockerHostBinds(container)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
hostConfig := &docker.HostConfig{
Links: dockerLinkArr,
Binds: binds,
PortBindings: dockerPortMap,
VolumesFrom: volumesFrom,
}
if container.DockerConfig.HostConfig != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig)
if err != nil {
return nil, &HostConfigError{"Unable to decode given host config: " + err.Error()}
}
}
return hostConfig, nil
}
func (task *Task) dockerLinks(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
dockerLinkArr := make([]string, len(container.Links))
for i, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return []string{}, errors.New("Invalid link format")
}
linkName := linkParts[0]
var linkAlias string
if len(linkParts) == 2 {
linkAlias = linkParts[1]
} else {
log.Warn("Warning, link with no linkalias", "linkName", linkName, "task", task, "container", container)
linkAlias = linkName
}
targetContainer, ok := dockerContainerMap[linkName]
if !ok {
return []string{}, errors.New("Link target not available: " + linkName)
}
dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias
}
return dockerLinkArr, nil
}
func (task *Task) dockerPortMap(container *Container) map[docker.Port][]docker.PortBinding {
dockerPortMap := make(map[docker.Port][]docker.PortBinding)
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
currentMappings, existing := dockerPortMap[dockerPort]
if existing {
dockerPortMap[dockerPort] = append(currentMappings, docker.PortBinding{HostIP: "0.0.0.0", HostPort: strconv.Itoa(int(portBinding.HostPort))})
} else {
dockerPortMap[dockerPort] = []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0", HostPort: strconv.Itoa(int(portBinding.HostPort))}}
}
}
return dockerPortMap
}
func (task *Task) dockerVolumesFrom(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
volumesFrom := make([]string, len(container.VolumesFrom))
for i, volume := range container.VolumesFrom {
targetContainer, ok := dockerContainerMap[volume.SourceContainer]
if !ok {
return []string{}, errors.New("Volume target not available: " + volume.SourceContainer)
}
if volume.ReadOnly {
volumesFrom[i] = targetContainer.DockerName + ":ro"
} else {
volumesFrom[i] = targetContainer.DockerName
}
}
return volumesFrom, nil
}
func (task *Task) dockerHostBinds(container *Container) ([]string, error) {
if container.Name == emptyHostVolumeName {
// emptyHostVolumes are handled as a special case in config, not
// hostConfig
return []string{}, nil
}
binds := make([]string, len(container.MountPoints))
for i, mountPoint := range container.MountPoints {
hv, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume)
}
if hv.SourcePath() == "" || mountPoint.ContainerPath == "" {
log.Error("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath)
return []string{}, errors.New("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath)
}
bind := hv.SourcePath() + ":" + mountPoint.ContainerPath
if mountPoint.ReadOnly {
bind += ":ro"
}
binds[i] = bind
}
return binds, nil
}
func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) {
data, err := jsonutil.BuildJSON(acsTask)
if err != nil {
return nil, err
}
task := &Task{}
err = json.Unmarshal(data, task)
if err != nil {
return nil, err
}
if task.DesiredStatus == TaskRunning && envelope.SeqNum != nil {
task.StartSequenceNumber = *envelope.SeqNum
} else if task.DesiredStatus == TaskStopped && envelope.SeqNum != nil {
task.StopSequenceNumber = *envelope.SeqNum
}
return task, nil
}
// updateTaskDesiredStatus determines what status the task should properly be at based on its container's statuses
func (task *Task) updateTaskDesiredStatus() {
llog := log.New("task", task)
llog.Debug("Updating task")
// A task's desired status is stopped if any essential container is stopped
// Otherwise, the task's desired status is unchanged (typically running, but no need to change)
for _, cont := range task.Containers {
if cont.Essential && (cont.KnownStatus.Terminal() || cont.DesiredStatus.Terminal()) {
llog.Debug("Updating task desired status to stopped", "container", cont.Name)
task.DesiredStatus = TaskStopped
}
}
}
// UpdateStatus updates a task's known and desired statuses to be compatible
// with all of its containers
// It will return a bool indicating if there was a change
func (t *Task) UpdateStatus() bool {
change := t.updateTaskKnownStatus()
// DesiredStatus can change based on a new known status
t.UpdateDesiredStatus()
return change != TaskStatusNone
}
func (t *Task) UpdateDesiredStatus() {
t.updateTaskDesiredStatus()
t.updateContainerDesiredStatus()
}
func (t *Task) SetKnownStatus(status TaskStatus) {
t.KnownStatus = status
t.KnownStatusTime = ttime.Now()
}
| 1 | 13,890 | Can you extract this out to a constant? | aws-amazon-ecs-agent | go |
@@ -452,12 +452,12 @@ def main(args):
quiet_build=args.quiet_build,
logfile=logfile
)
-
- log_module = __load_module("log")
__update_if_key_exists(args, log_args, "verbose")
+ log_module = __load_module("log")
LOG.debug("Calling LOG with args:")
LOG.debug(log_args)
+
log_module.main(log_args)
elif 'logfile' in args:
logfile = args.logfile | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Check implements a wrapper over 'log' + 'analyze' + 'store', essentially
giving an easy way to perform analysis from a log command and print results to
stdout.
"""
import argparse
import os
import shutil
import sys
from libcodechecker import libhandlers
from libcodechecker import util
from libcodechecker.analyze.analyzers import analyzer_types
from libcodechecker.logger import add_verbose_arguments
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('CHECK')
class OrderedCheckersAction(argparse.Action):
"""
Action to store enabled and disabled checkers
and keep ordering from command line.
Create separate lists based on the checker names for
each analyzer.
"""
# Users can supply invocation to 'codechecker-analyze' as follows:
# -e core -d core.uninitialized -e core.uninitialized.Assign
# We must support having multiple '-e' and '-d' options and the order
# specified must be kept when the list of checkers are assembled for Clang.
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(OrderedCheckersAction, self).__init__(option_strings, dest,
**kwargs)
def __call__(self, parser, namespace, value, option_string=None):
if 'ordered_checkers' not in namespace:
namespace.ordered_checkers = []
ordered_checkers = namespace.ordered_checkers
ordered_checkers.append((value, self.dest == 'enable'))
namespace.ordered_checkers = ordered_checkers
class DeprecatedOptionAction(argparse.Action):
"""
Deprecated argument action.
"""
def __init__(self,
option_strings,
dest,
nargs=0,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
kill_if_used=False,
error_string=None):
super(DeprecatedOptionAction, self). \
__init__(option_strings,
dest,
const='deprecated_option',
default=argparse.SUPPRESS,
type=None,
nargs=nargs,
choices=None,
required=False,
help="(Usage of this argument is DEPRECATED and has no "
"effect!)",
metavar='')
self.__error_string = error_string
self.__kill_if_used = kill_if_used
def __call__(self, parser, namespace, value=None, option_string=None):
if not self.__error_string:
LOG.warning("Deprecated command line option used: '" +
option_string + "'")
else:
LOG.warning(self.__error_string)
if self.__kill_if_used:
setattr(namespace, '_deprecated', True)
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker check',
'formatter_class': argparse.ArgumentDefaultsHelpFormatter,
# Description is shown when the command's help is queried directly
'description': "Run analysis for a project with storing results "
"in the database. Check only needs a build command or "
"an already existing logfile and performs every step "
"of doing the analysis in batch.",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': "If you wish to reuse the logfile resulting from executing "
"the build, see 'codechecker-log'. To keep analysis "
"results for later, see and use 'codechecker-analyze'. "
"To store previously saved analysis results in a database, "
"see 'codechecker-store'. 'CodeChecker check' exposes a "
"wrapper calling these three commands in succession. Please "
"make sure your build command actually builds the files -- "
"it is advised to execute builds on empty trees, aka. after "
"a 'make clean', as CodeChecker only analyzes files that "
"had been used by the build system. Analysis results can be "
"viewed by connecting to the server which was used in "
"storing the results from a Web browser, or via "
"'CodeChecker cmd'.",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Perform analysis on a project and store results to database."
}
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
# Some arguments were deprecated already in 'CodeChecker check'.
parser.add_argument('--keep-tmp',
action=DeprecatedOptionAction)
parser.add_argument('-c', '--clean',
action=DeprecatedOptionAction)
parser.add_argument('--update',
action=DeprecatedOptionAction)
# In 'store', --name is not a required argument by argparse, as 'analyze'
# can prepare a name, which is read after 'store' is started.
# If the name is missing, the user is explicitly warned.
# TODO: This should be an optional argument here too.
parser.add_argument('-n', '--name',
type=str,
dest="name",
required=True,
default=argparse.SUPPRESS,
help="The name of the analysis run to use in storing "
"the reports to the database. If not specified, "
"the '--name' parameter given to 'codechecker-"
"analyze' will be used, if exists.")
# TODO: Workspace is no longer a concept in the new subcommands.
parser.add_argument('-w', '--workspace',
type=str,
default=util.get_default_workspace(),
dest="workspace",
help="Directory where CodeChecker can store analysis "
"related data, such as intermediate result files "
"and the database.")
parser.add_argument('-f', '--force',
dest="force",
default=False,
action='store_true',
required=False,
help="Delete analysis results stored in the database "
"for the current analysis run's name and store "
"only the results reported in the 'input' files. "
"(By default, CodeChecker would keep reports "
"that were coming from files not affected by the "
"analysis, and only incrementally update defect "
"reports for source files that were analysed.)")
log_args = parser.add_argument_group(
"log arguments",
"Specify how the build information database should be obtained. You "
"need to specify either an already existing log file, or a build "
"command which will be used to generate a log file on the fly.")
log_args.add_argument('-q', '--quiet-build',
dest="quiet_build",
action='store_true',
default=False,
required=False,
help="Do not print the output of the build tool "
"into the output of this command.")
log_args = log_args.add_mutually_exclusive_group(required=True)
log_args.add_argument('-b', '--build',
type=str,
dest="command",
default=argparse.SUPPRESS,
required=False,
help="Execute and record a build command. Build "
"commands can be simple calls to 'g++' or "
"'clang++' or 'make', but a more complex "
"command, or the call of a custom script file "
"is also supported.")
log_args.add_argument('-l', '--logfile',
type=str,
dest="logfile",
help="Use an already existing JSON compilation "
"command database file specified at this path.")
analyzer_opts = parser.add_argument_group("analyzer arguments")
analyzer_opts.add_argument('-j', '--jobs',
type=int,
dest="jobs",
required=False,
default=1,
help="Number of threads to use in analysis. "
"More threads mean faster analysis at "
"the cost of using more memory.")
# TODO: Analyze knows '--ignore' also for this.
analyzer_opts.add_argument('-i', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which "
"project files should be omitted from "
"analysis. Please consult the User guide "
"on how a Skipfile should be laid out.")
analyzer_opts.add_argument('--analyzers',
nargs='+',
dest='analyzers',
metavar='ANALYZER',
required=False,
choices=analyzer_types.supported_analyzers,
default=argparse.SUPPRESS,
help="Run analysis only with the analyzers "
"specified. Currently supported analyzers "
"are: " +
', '.join(analyzer_types.
supported_analyzers) + ".")
analyzer_opts.add_argument('--add-compiler-defaults',
action='store_true',
default=False,
required=False,
help="Retrieve compiler-specific configuration "
"from the compilers themselves, and use "
"them with Clang. This is used when the "
"compiler on the system is special, e.g. "
"when doing cross-compilation.")
analyzer_opts.add_argument('--saargs',
dest="clangsa_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang Static "
"analyzer.")
analyzer_opts.add_argument('--tidyargs',
dest="tidy_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang-Tidy "
"analyzer.")
checkers_opts = parser.add_argument_group(
"checker configuration",
"See 'codechecker-checkers' for the list of available checkers. "
"You can fine-tune which checkers to use in the analysis by setting "
"the enabled and disabled flags starting from the bigger groups "
"and going inwards, e.g. '-e core -d core.uninitialized -e "
"core.uninitialized.Assign' will enable every 'core' checker, but "
"only 'core.uninitialized.Assign' from the 'core.uninitialized' "
"group. Please consult the manual for details. Disabling certain "
"checkers - such as the 'core' group - is unsupported by the LLVM/"
"Clang community, and thus discouraged.")
checkers_opts.add_argument('-e', '--enable',
dest="enable",
metavar='checker/checker-group',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE USED in the analysis.")
checkers_opts.add_argument('-d', '--disable',
dest="disable",
metavar='checker/checker-group',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE PROHIBITED from use in the "
"analysis.")
# TODO: Analyze does not know '-u', only '--suppress'
parser.add_argument('-u', '--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"storage of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to do "
"so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
server_args = parser.add_argument_group(
"server arguments",
"Specifies a 'CodeChecker server' instance which will be used to "
"store the results. This server must be running and listening prior "
"to the 'store' command being ran.")
server_args.add_argument('--host',
type=str,
dest="host",
required=False,
default="localhost",
help="The IP address or hostname of the "
"CodeChecker server.")
server_args.add_argument('-p', '--port',
type=int,
dest="port",
required=False,
default=8001,
help="The port of the server to use for storing.")
# TODO: These arguments have been retroactively removed from 'store'
# and are deprecated here. They should be completely removed.
dbmodes = parser.add_argument_group("database arguments")
dbmodes = dbmodes.add_mutually_exclusive_group(required=False)
db_deprec = "Database connectivity has been removed from 'check'. " \
"Please specify a CodeChecker server address via --host " \
"and --port instead!"
# SQLite is the default, and for 'check', it was deprecated.
# TODO: In 'store', --sqlite has been replaced as an option to specify the
# .sqlite file, essentially replacing the concept of 'workspace'.
dbmodes.add_argument('--sqlite',
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
dbmodes.add_argument('--postgresql',
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
pgsql = parser.add_argument_group("PostgreSQL arguments",
"Values of these arguments are ignored, "
"unless '--postgresql' is specified!")
# WARNING: '--dbaddress' default value influences workspace creation
# in SQLite.
# TODO: These are '--db-something' in 'store', not '--dbsomething'.
pgsql.add_argument('--dbaddress',
nargs=1,
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
pgsql.add_argument('--dbport',
nargs=1,
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
pgsql.add_argument('--dbusername',
nargs=1,
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
pgsql.add_argument('--dbname',
nargs=1,
kill_if_used=True,
error_string=db_deprec,
action=DeprecatedOptionAction)
add_verbose_arguments(parser)
def __kill_deprec(args):
if '_deprecated' in args:
LOG.warning("A deprecated argument was passed to the "
"commandline. This argument has no effect anymore, "
"and the behaviour has changed.")
LOG.error("Execution halted: CodeChecker would work in an "
"unexpected way with this argument passed.")
sys.exit(2) # argparse kills with error code 2.
# Call the main process if everything matches.
main(args)
parser.set_defaults(func=__kill_deprec)
def main(args):
"""
Execute a wrapper over log-analyze-store, aka 'check'.
"""
def __load_module(name):
"""Loads the given subcommand's definition from the libs."""
try:
module = libhandlers.load_module(name)
except ImportError:
LOG.error("Couldn't import subcommand '" + name + "'.")
raise
return module
def __update_if_key_exists(source, target, key):
"""Append the source Namespace's element with 'key' to target with
the same key, but only if it exists."""
if key in source:
setattr(target, key, getattr(source, key))
workspace = os.path.abspath(args.workspace)
report_dir = os.path.join(workspace, "reports")
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
logfile = None
try:
# --- Step 1.: Perform logging if build command was specified.
if 'command' in args:
logfile = os.path.join(workspace, "compile_cmd.json")
# Translate the argument list between quickcheck and log.
log_args = argparse.Namespace(
command=args.command,
quiet_build=args.quiet_build,
logfile=logfile
)
log_module = __load_module("log")
__update_if_key_exists(args, log_args, "verbose")
LOG.debug("Calling LOG with args:")
LOG.debug(log_args)
log_module.main(log_args)
elif 'logfile' in args:
logfile = args.logfile
# --- Step 2.: Perform the analysis.
if not os.path.exists(logfile):
raise OSError("The specified logfile '" + logfile + "' does not "
"exist.")
analyze_args = argparse.Namespace(
logfile=[logfile],
output_path=report_dir,
output_format='plist',
jobs=args.jobs,
add_compiler_defaults=args.add_compiler_defaults
)
# Some arguments don't have default values.
# We can't set these keys to None because it would result in an error
# after the call.
args_to_update = ['skipfile',
'analyzers',
'saargs',
'tidyargs',
'ordered_checkers' # enable and disable.
]
for key in args_to_update:
__update_if_key_exists(args, analyze_args, key)
analyze_module = __load_module("analyze")
__update_if_key_exists(args, analyze_args, "verbose")
LOG.debug("Calling ANALYZE with args:")
LOG.debug(analyze_args)
analyze_module.main(analyze_args)
# --- Step 3.: Store to database.
# TODO: The store command supposes that in case of PostgreSQL a
# database instance is already running. The "CodeChecker check" command
# is able to start its own instance in the given workdir, so we pass
# this argument to the argument list. Although this is not used by
# store command at all, the SQL utility is still able to start the
# database. When changing this behavior, the workspace argument should
# be removed from here.
store_args = argparse.Namespace(
workspace=args.workspace,
input=[report_dir],
input_format='plist',
jobs=args.jobs,
force=args.force,
host=args.host,
port=args.port
)
# Some arguments don't have default values.
# We can't set these keys to None because it would result in an error
# after the call.
if 'postgresql' in args:
__update_if_key_exists(args, store_args, 'postgresql')
else:
# If we are saving to a SQLite database, the wrapped 'check'
# command used to do it in the workspace folder.
setattr(store_args, 'sqlite', os.path.join(workspace,
"codechecker.sqlite"))
setattr(store_args, 'postgresql', False)
args_to_update = ['suppress',
'name'
]
for key in args_to_update:
__update_if_key_exists(args, store_args, key)
store_module = __load_module("store")
__update_if_key_exists(args, store_args, "verbose")
LOG.debug("Calling STORE with args:")
LOG.debug(store_args)
store_module.main(store_args)
# Show a hint for server start.
LOG.info("To view results, open the CodeChecker server "
"'http://{0}:{1}' in your browser".format(args.host,
args.port))
except ImportError:
LOG.error("Check failed: couldn't import a library.")
except Exception as ex:
LOG.error("Running check failed. " + ex.message)
finally:
LOG.debug("Cleaning up reports folder ...")
shutil.rmtree(report_dir)
if 'command' in args and logfile:
# Only remove the build.json if it was on-the-fly created by us!
LOG.debug("Cleaning up build.json ...")
os.remove(logfile)
LOG.debug("Check finished.")
| 1 | 7,390 | any testcase for saargs, and tidyargs argument processing? | Ericsson-codechecker | c |
@@ -18,6 +18,7 @@ namespace Microsoft.Cci.Writers.CSharp
private bool _forCompilation;
private bool _forCompilationIncludeGlobalprefix;
private bool _forCompilationThrowPlatformNotSupported;
+ private string _platformNotSupportedExceptionMessage;
private bool _includeFakeAttributes;
public CSDeclarationWriter(ISyntaxWriter writer) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using Microsoft.Cci.Extensions.CSharp;
using Microsoft.Cci.Filters;
using Microsoft.Cci.Writers.Syntax;
namespace Microsoft.Cci.Writers.CSharp
{
public partial class CSDeclarationWriter : ICciDeclarationWriter
{
private readonly ISyntaxWriter _writer;
private readonly ICciFilter _filter;
private bool _forCompilation;
private bool _forCompilationIncludeGlobalprefix;
private bool _forCompilationThrowPlatformNotSupported;
private bool _includeFakeAttributes;
public CSDeclarationWriter(ISyntaxWriter writer)
: this(writer, new PublicOnlyCciFilter())
{
}
public CSDeclarationWriter(ISyntaxWriter writer, ICciFilter filter)
: this(writer, filter, true)
{
}
public CSDeclarationWriter(ISyntaxWriter writer, ICciFilter filter, bool forCompilation)
{
Contract.Requires(writer != null);
_writer = writer;
_filter = filter;
_forCompilation = forCompilation;
_forCompilationIncludeGlobalprefix = false;
_forCompilationThrowPlatformNotSupported = false;
_includeFakeAttributes = false;
}
public CSDeclarationWriter(ISyntaxWriter writer, ICciFilter filter, bool forCompilation, bool includePseudoCustomAttributes = false)
: this(writer, filter, forCompilation)
{
_includeFakeAttributes = includePseudoCustomAttributes;
}
public bool ForCompilation
{
get { return _forCompilation; }
set { _forCompilation = value; }
}
public bool ForCompilationIncludeGlobalPrefix
{
get { return _forCompilationIncludeGlobalprefix; }
set { _forCompilationIncludeGlobalprefix = value; }
}
public bool ForCompilationThrowPlatformNotSupported
{
get { return _forCompilationThrowPlatformNotSupported; }
set { _forCompilationThrowPlatformNotSupported = value; }
}
public ISyntaxWriter SyntaxtWriter { get { return _writer; } }
public ICciFilter Filter { get { return _filter; } }
public void WriteDeclaration(IDefinition definition)
{
if (definition == null)
return;
IAssembly assembly = definition as IAssembly;
if (assembly != null)
{
WriteAssemblyDeclaration(assembly);
return;
}
INamespaceDefinition ns = definition as INamespaceDefinition;
if (ns != null)
{
WriteNamespaceDeclaration(ns);
return;
}
ITypeDefinition type = definition as ITypeDefinition;
if (type != null)
{
WriteTypeDeclaration(type);
return;
}
ITypeDefinitionMember member = definition as ITypeDefinitionMember;
if (member != null)
{
WriteMemberDeclaration(member);
return;
}
DummyInternalConstructor ctor = definition as DummyInternalConstructor;
if (ctor != null)
{
WritePrivateConstructor(ctor.ContainingType);
return;
}
INamedEntity named = definition as INamedEntity;
if (named != null)
{
WriteIdentifier(named.Name);
return;
}
_writer.Write("Unknown definition type {0}", definition.ToString());
}
public void WriteAttribute(ICustomAttribute attribute)
{
WriteSymbol("[");
WriteAttribute(attribute, null);
WriteSymbol("]");
}
public void WriteAssemblyDeclaration(IAssembly assembly)
{
WriteAttributes(assembly.Attributes, prefix: "assembly");
WriteAttributes(assembly.SecurityAttributes, prefix: "assembly");
}
public void WriteMemberDeclaration(ITypeDefinitionMember member)
{
IMethodDefinition method = member as IMethodDefinition;
if (method != null)
{
WriteMethodDefinition(method);
return;
}
IPropertyDefinition property = member as IPropertyDefinition;
if (property != null)
{
WritePropertyDefinition(property);
return;
}
IEventDefinition evnt = member as IEventDefinition;
if (evnt != null)
{
WriteEventDefinition(evnt);
return;
}
IFieldDefinition field = member as IFieldDefinition;
if (field != null)
{
WriteFieldDefinition(field);
return;
}
_writer.Write("Unknown member definitions type {0}", member.ToString());
}
private void WriteVisibility(TypeMemberVisibility visibility)
{
switch (visibility)
{
case TypeMemberVisibility.Public:
WriteKeyword("public"); break;
case TypeMemberVisibility.Private:
WriteKeyword("private"); break;
case TypeMemberVisibility.Assembly:
WriteKeyword("internal"); break;
case TypeMemberVisibility.Family:
WriteKeyword("protected"); break;
case TypeMemberVisibility.FamilyOrAssembly:
WriteKeyword("protected"); WriteKeyword("internal"); break;
case TypeMemberVisibility.FamilyAndAssembly:
WriteKeyword("internal"); WriteKeyword("protected"); break; // Is this right?
default:
WriteKeyword("<Unknown-Visibility>"); break;
}
}
// Writer Helpers these are the only methods that should directly acess _writer
private void WriteKeyword(string keyword, bool noSpace = false)
{
_writer.WriteKeyword(keyword);
if (!noSpace) WriteSpace();
}
private void WriteSymbol(string symbol, bool addSpace = false)
{
_writer.WriteSymbol(symbol);
if (addSpace)
WriteSpace();
}
private void Write(string literal)
{
_writer.Write(literal);
}
private void WriteTypeName(ITypeReference type, bool noSpace = false, bool isDynamic = false, bool useTypeKeywords = true)
{
if (isDynamic)
{
WriteKeyword("dynamic", noSpace: noSpace);
return;
}
NameFormattingOptions namingOptions = NameFormattingOptions.TypeParameters;
if (useTypeKeywords)
namingOptions |= NameFormattingOptions.UseTypeKeywords;
if (_forCompilationIncludeGlobalprefix)
namingOptions |= NameFormattingOptions.UseGlobalPrefix;
if (!_forCompilation)
namingOptions |= NameFormattingOptions.OmitContainingNamespace;
string name = TypeHelper.GetTypeName(type, namingOptions);
if (CSharpCciExtensions.IsKeyword(name))
_writer.WriteKeyword(name);
else
_writer.WriteTypeName(name);
if (!noSpace) WriteSpace();
}
public void WriteIdentifier(string id)
{
WriteIdentifier(id, true);
}
public void WriteIdentifier(string id, bool escape)
{
// Escape keywords
if (escape && CSharpCciExtensions.IsKeyword(id))
id = "@" + id;
_writer.WriteIdentifier(id);
}
private void WriteIdentifier(IName name)
{
WriteIdentifier(name.Value);
}
private void WriteSpace()
{
_writer.Write(" ");
}
private void WriteList<T>(IEnumerable<T> list, Action<T> writeItem)
{
_writer.WriteList(list, writeItem);
}
}
}
| 1 | 12,547 | @ericstj do you think it is worth combining these two? With the presence of the message meaning it is enabled? | dotnet-buildtools | .cs |
@@ -271,8 +271,8 @@ func (s *ec2Ops) DeviceMappings() (map[string]string, error) {
if d.DeviceName != nil && d.Ebs != nil && d.Ebs.VolumeId != nil {
devName := *d.DeviceName
// Per AWS docs EC instances have the root mounted at
- // /dev/sda1, this label should be skipped
- if devName == "/dev/sda1" {
+ // /dev/sda1, /dev/xvda this label should be skipped
+ if devName == "/dev/sda1" || devName == "/dev/xvda" {
continue
}
// AWS EBS volumes get mapped from /dev/sdN -->/dev/xvdN | 1 | package aws
import (
"fmt"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/libopenstorage/openstorage/api"
)
type ec2Ops struct {
instance string
ec2 *ec2.EC2
mutex sync.Mutex
}
const (
SetIdentifierNone = "None"
)
// Custom AWS volume error codes.
const (
_ = iota + 5000
ErrVolDetached
ErrVolInval
ErrVolAttachedOnRemoteNode
)
// StorageError error returned for AWS volumes
type StorageError struct {
// Code is one of AWS volume error codes.
Code int
// Msg is human understandable error message.
Msg string
// Instance provides more information on the error.
Instance string
}
// StorageOps interface to perform basic operations on aws.
type StorageOps interface {
// Create volume based on input template volume.
// Apply labels as tags on EBS volumes
Create(template *ec2.Volume, labels map[string]string) (*ec2.Volume, error)
// Attach volumeID.
// Return attach path.
Attach(volumeID string) (string, error)
// Detach volumeID.
Detach(volumeID string) error
// Delete volumeID.
Delete(volumeID string) error
// Inspect volumes specified by volumeID
Inspect(volumeIds []*string) ([]*ec2.Volume, error)
// DeviceMappings returns map[local_volume_path]->aws volume ID
DeviceMappings() (map[string]string, error)
// Enumerate EBS volumes that match given filters. Organize them into
// sets identified by setIdentifier.
// labels can be nil, setIdentifier can be empty string.
Enumerate(volumeIds []*string,
labels map[string]string,
setIdentifier string,
) (map[string][]*ec2.Volume, error)
// DevicePath for attached EBS volume.
DevicePath(volume *ec2.Volume) (string, error)
// Snapshot EBS volume
Snapshot(volumeID string, readonly bool) (*ec2.Snapshot, error)
// ApplyTags
ApplyTags(v *ec2.Volume, labels map[string]string) error
// RemoveTags removes labels from the volume
RemoveTags(v *ec2.Volume, labels map[string]string) error
// Tags
Tags(v *ec2.Volume) map[string]string
}
func NewStorageError(code int, msg string, instance string) error {
return &StorageError{Code: code, Msg: msg, Instance: instance}
}
func (e *StorageError) Error() string {
return e.Msg
}
func NewEc2Storage(instance string, ec2 *ec2.EC2) StorageOps {
return &ec2Ops{
instance: instance,
ec2: ec2,
}
}
func (s *ec2Ops) mapVolumeType(awsVol *ec2.Volume) api.StorageMedium {
switch *awsVol.VolumeType {
case opsworks.VolumeTypeGp2:
return api.StorageMedium_STORAGE_MEDIUM_SSD
case opsworks.VolumeTypeIo1:
return api.StorageMedium_STORAGE_MEDIUM_NVME
case opsworks.VolumeTypeStandard:
return api.StorageMedium_STORAGE_MEDIUM_MAGNETIC
}
return api.StorageMedium_STORAGE_MEDIUM_MAGNETIC
}
func (s *ec2Ops) filters(
labels map[string]string,
keys []string,
) []*ec2.Filter {
if len(labels) == 0 {
return nil
}
f := make([]*ec2.Filter, len(labels)+len(keys))
i := 0
for k, v := range labels {
s := string("tag:") + k
value := v
f[i] = &ec2.Filter{Name: &s, Values: []*string{&value}}
i++
}
for _, k := range keys {
s := string("tag-key:") + k
f[i] = &ec2.Filter{Name: &s}
i++
}
return f
}
func (s *ec2Ops) tags(labels map[string]string) []*ec2.Tag {
if len(labels) == 0 {
return nil
}
t := make([]*ec2.Tag, len(labels))
i := 0
for k, v := range labels {
key := k
value := v
t[i] = &ec2.Tag{Key: &key, Value: &value}
i++
}
return t
}
func (s *ec2Ops) waitStatus(id string, desired string) error {
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
actual := ""
for retries, maxRetries := 0, 10; actual != desired && retries < maxRetries; retries++ {
awsVols, err := s.ec2.DescribeVolumes(request)
if err != nil {
return err
}
if len(awsVols.Volumes) != 1 {
return fmt.Errorf("expected one volume %v got %v",
id, len(awsVols.Volumes))
}
if awsVols.Volumes[0].State == nil {
return fmt.Errorf("Nil volume state for %v", id)
}
actual = *awsVols.Volumes[0].State
if actual == desired {
break
}
time.Sleep(3 * time.Second)
}
if actual != desired {
return fmt.Errorf(
"Volume %v did not transition to %v current state %v",
id, desired, actual)
}
return nil
}
func (s *ec2Ops) waitAttachmentStatus(
volumeID string,
desired string,
timeout time.Duration,
) (*ec2.Volume, error) {
id := volumeID
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
actual := ""
interval := 2 * time.Second
fmt.Printf("Waiting for state transition to %q", desired)
var outVol *ec2.Volume
for elapsed, runs := 0*time.Second, 0; actual != desired && elapsed < timeout; elapsed += interval {
awsVols, err := s.ec2.DescribeVolumes(request)
if err != nil {
return nil, err
}
if len(awsVols.Volumes) != 1 {
return nil, fmt.Errorf("expected one volume %v got %v",
volumeID, len(awsVols.Volumes))
}
outVol = awsVols.Volumes[0]
awsAttachment := awsVols.Volumes[0].Attachments
if awsAttachment == nil || len(awsAttachment) == 0 {
// We have encountered scenarios where AWS returns a nil attachment state
// for a volume transitioning from detaching -> attaching.
actual = ec2.VolumeAttachmentStateDetached
} else {
actual = *awsAttachment[0].State
}
if actual == desired {
break
}
time.Sleep(interval)
if (runs % 10) == 0 {
fmt.Print(".")
}
}
fmt.Printf("\n")
if actual != desired {
return nil, fmt.Errorf("Volume %v failed to transition to %v current state %v",
volumeID, desired, actual)
}
return outVol, nil
}
func (s *ec2Ops) ApplyTags(
v *ec2.Volume,
labels map[string]string,
) error {
req := &ec2.CreateTagsInput{
Resources: []*string{v.VolumeId},
Tags: s.tags(labels),
}
_, err := s.ec2.CreateTags(req)
return err
}
func (s *ec2Ops) RemoveTags(
v *ec2.Volume,
labels map[string]string,
) error {
req := &ec2.DeleteTagsInput{
Resources: []*string{v.VolumeId},
Tags: s.tags(labels),
}
_, err := s.ec2.DeleteTags(req)
return err
}
func (s *ec2Ops) matchTag(tag *ec2.Tag, match string) bool {
return tag.Key != nil &&
tag.Value != nil &&
len(*tag.Key) != 0 &&
len(*tag.Value) != 0 &&
*tag.Key == match
}
func (s *ec2Ops) addResource(
sets map[string][]*ec2.Volume,
vol *ec2.Volume,
key string,
) {
if s, ok := sets[key]; ok {
sets[key] = append(s, vol)
} else {
sets[key] = []*ec2.Volume{vol}
}
}
func (s *ec2Ops) DeviceMappings() (map[string]string, error) {
instance, err := s.describe()
if err != nil {
return nil, err
}
devPrefix := "/dev/sd"
m := make(map[string]string)
for _, d := range instance.BlockDeviceMappings {
if d.DeviceName != nil && d.Ebs != nil && d.Ebs.VolumeId != nil {
devName := *d.DeviceName
// Per AWS docs EC instances have the root mounted at
// /dev/sda1, this label should be skipped
if devName == "/dev/sda1" {
continue
}
// AWS EBS volumes get mapped from /dev/sdN -->/dev/xvdN
if strings.HasPrefix(devName, devPrefix) {
devName = "/dev/xvd" + devName[len(devPrefix):]
}
m[devName] = *d.Ebs.VolumeId
}
}
return m, nil
}
// describe current instance.
func (s *ec2Ops) describe() (*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&s.instance},
}
out, err := s.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(out.Reservations) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v reservations, expect 1",
s.instance, len(out.Reservations))
}
if len(out.Reservations[0].Instances) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v Reservations, expect 1",
s.instance, len(out.Reservations[0].Instances))
}
return out.Reservations[0].Instances[0], nil
}
// freeDevices returns list of available device IDs.
func (s *ec2Ops) freeDevices() ([]string, error) {
initial := []byte("fghijklmnop")
self, err := s.describe()
if err != nil {
return nil, err
}
devPrefix := "/dev/sd"
for _, dev := range self.BlockDeviceMappings {
if dev.DeviceName == nil {
return nil, fmt.Errorf("Nil device name")
}
devName := *dev.DeviceName
// per AWS docs EC instances have the root mounted at /dev/sda1,
// this label should be skipped
if devName == "/dev/sda1" {
continue
}
if !strings.HasPrefix(devName, devPrefix) {
devPrefix = "/dev/xvd"
if !strings.HasPrefix(devName, devPrefix) {
return nil, fmt.Errorf("bad device name %q", devName)
}
}
letter := devName[len(devPrefix):]
if len(letter) != 1 {
return nil, fmt.Errorf("too many letters %q", devName)
}
// Reset devPrefix for next devices
devPrefix = "/dev/sd"
index := letter[0] - 'f'
if index > ('p' - 'f') {
continue
}
initial[index] = '0'
}
free := make([]string, len(initial))
count := 0
for _, b := range initial {
if b != '0' {
free[count] = devPrefix + string(b)
count++
}
}
if count == 0 {
return nil, fmt.Errorf("No more free devices")
}
return free[:count], nil
}
func (s *ec2Ops) rollbackCreate(id string, createErr error) error {
logrus.Warnf("Rollback create volume %v, Error %v", id, createErr)
err := s.Delete(id)
if err != nil {
logrus.Warnf("Rollback failed volume %v, Error %v", id, err)
}
return createErr
}
func (s *ec2Ops) deleted(v *ec2.Volume) bool {
return *v.State == ec2.VolumeStateDeleting ||
*v.State == ec2.VolumeStateDeleted
}
func (s *ec2Ops) available(v *ec2.Volume) bool {
return *v.State == ec2.VolumeStateAvailable
}
func (s *ec2Ops) Inspect(volumeIds []*string) ([]*ec2.Volume, error) {
req := &ec2.DescribeVolumesInput{VolumeIds: volumeIds}
awsVols, err := s.ec2.DescribeVolumes(req)
if err != nil {
return nil, err
}
return awsVols.Volumes, nil
}
func (s *ec2Ops) Tags(v *ec2.Volume) map[string]string {
labels := make(map[string]string)
for _, tag := range v.Tags {
labels[*tag.Key] = *tag.Value
}
return labels
}
func (s *ec2Ops) Enumerate(
volumeIds []*string,
labels map[string]string,
setIdentifier string,
) (map[string][]*ec2.Volume, error) {
sets := make(map[string][]*ec2.Volume)
// Enumerate all volumes that have same labels.
f := s.filters(labels, nil)
req := &ec2.DescribeVolumesInput{Filters: f, VolumeIds: volumeIds}
awsVols, err := s.ec2.DescribeVolumes(req)
if err != nil {
return nil, err
}
// Volume sets are identified by volumes with the same setIdentifer.
found := false
for _, vol := range awsVols.Volumes {
if s.deleted(vol) {
continue
}
if len(setIdentifier) == 0 {
s.addResource(sets, vol, SetIdentifierNone)
} else {
found = false
for _, tag := range vol.Tags {
if s.matchTag(tag, setIdentifier) {
s.addResource(sets, vol, *tag.Value)
found = true
break
}
}
if !found {
s.addResource(sets, vol, SetIdentifierNone)
}
}
}
return sets, nil
}
func (s *ec2Ops) Create(
v *ec2.Volume,
labels map[string]string,
) (*ec2.Volume, error) {
req := &ec2.CreateVolumeInput{
AvailabilityZone: v.AvailabilityZone,
Encrypted: v.Encrypted,
KmsKeyId: v.KmsKeyId,
Size: v.Size,
VolumeType: v.VolumeType,
SnapshotId: v.SnapshotId,
}
if *v.VolumeType == opsworks.VolumeTypeIo1 {
req.Iops = v.Iops
}
newVol, err := s.ec2.CreateVolume(req)
if err != nil {
return nil, err
}
if err = s.waitStatus(
*newVol.VolumeId,
ec2.VolumeStateAvailable,
); err != nil {
return nil, s.rollbackCreate(*newVol.VolumeId, err)
}
if len(labels) > 0 {
if err = s.ApplyTags(newVol, labels); err != nil {
return nil, s.rollbackCreate(*newVol.VolumeId, err)
}
}
return newVol, nil
}
func (s *ec2Ops) Delete(id string) error {
req := &ec2.DeleteVolumeInput{VolumeId: &id}
_, err := s.ec2.DeleteVolume(req)
return err
}
func (s *ec2Ops) Attach(volumeID string) (string, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
devices, err := s.freeDevices()
if err != nil {
return "", err
}
req := &ec2.AttachVolumeInput{
Device: &devices[0],
InstanceId: &s.instance,
VolumeId: &volumeID,
}
if _, err = s.ec2.AttachVolume(req); err != nil {
return "", err
}
vol, err := s.waitAttachmentStatus(
volumeID,
ec2.VolumeAttachmentStateAttached,
time.Minute,
)
if err != nil {
return "", err
}
return s.DevicePath(vol)
}
func (s *ec2Ops) Detach(volumeID string) error {
force := false
req := &ec2.DetachVolumeInput{
InstanceId: &s.instance,
VolumeId: &volumeID,
Force: &force,
}
if _, err := s.ec2.DetachVolume(req); err != nil {
return err
}
_, err := s.waitAttachmentStatus(volumeID,
ec2.VolumeAttachmentStateDetached,
time.Minute,
)
return err
}
func (s *ec2Ops) Snapshot(
volumeID string,
readonly bool,
) (*ec2.Snapshot, error) {
request := &ec2.CreateSnapshotInput{
VolumeId: &volumeID,
}
return s.ec2.CreateSnapshot(request)
}
func (s *ec2Ops) DevicePath(vol *ec2.Volume) (string, error) {
if vol.Attachments == nil || len(vol.Attachments) == 0 {
return "", NewStorageError(ErrVolDetached,
"Volume is detached", *vol.VolumeId)
}
if vol.Attachments[0].InstanceId == nil {
return "", NewStorageError(ErrVolInval,
"Unable to determine volume instance attachment", "")
}
if s.instance != *vol.Attachments[0].InstanceId {
return "", NewStorageError(ErrVolAttachedOnRemoteNode,
fmt.Sprintf("Volume attached on %q current instance %q",
*vol.Attachments[0].InstanceId, s.instance),
*vol.Attachments[0].InstanceId)
}
if vol.Attachments[0].State == nil {
return "", NewStorageError(ErrVolInval,
"Unable to determine volume attachment state", "")
}
if *vol.Attachments[0].State != ec2.VolumeAttachmentStateAttached {
return "", NewStorageError(ErrVolInval,
fmt.Sprintf("Invalid state %q, volume is not attached",
*vol.Attachments[0].State), "")
}
if vol.Attachments[0].Device == nil {
return "", NewStorageError(ErrVolInval,
"Unable to determine volume attachment path", "")
}
dev := strings.TrimPrefix(*vol.Attachments[0].Device, "/dev/sd")
if dev != *vol.Attachments[0].Device {
dev = "/dev/xvd" + dev
}
return dev, nil
}
| 1 | 6,157 | why not use instance.RootDeviceName ? | libopenstorage-openstorage | go |
@@ -14,6 +14,7 @@
#include <ios> //std::ios_base::failure
#include <iostream> //std::cout
+#include <fstream> //std::ifstream
#include <mpi.h>
#include <stdexcept> //std::invalid_argument std::exception
#include <vector> | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* helloBPReader.cpp: Simple self-descriptive example of how to read a variable
* to a BP File.
*
* Try running like this from the build directory:
* mpirun -np 3 ./bin/hello_bpReader
*
* Created on: Feb 16, 2017
* Author: William F Godoy godoywf@ornl.gov
*/
#include <ios> //std::ios_base::failure
#include <iostream> //std::cout
#include <mpi.h>
#include <stdexcept> //std::invalid_argument std::exception
#include <vector>
#include <adios2.h>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/** Application variable */
const std::size_t Nx = 10;
std::vector<float> myFloats(Nx);
std::vector<int> myInts(Nx);
try
{
/** ADIOS class factory of IO class objects, DebugON is recommended */
adios2::ADIOS adios(MPI_COMM_WORLD, adios2::DebugON);
/*** IO class object: settings and factory of Settings: Variables,
* Parameters, Transports, and Execution: Engines */
adios2::IO bpIO = adios.DeclareIO("ReadBP");
/** Engine derived class, spawned to start IO operations */
adios2::Engine bpReader =
bpIO.Open("myVector_cpp.bp", adios2::Mode::Read);
const std::map<std::string, adios2::Params> variables =
bpIO.AvailableVariables();
for (const auto variablePair : variables)
{
std::cout << "Name: " << variablePair.first;
for (const auto ¶meter : variablePair.second)
{
std::cout << "\t" << parameter.first << ": " << parameter.second
<< "\n";
}
}
/** Write variable for buffering */
adios2::Variable<float> bpFloats =
bpIO.InquireVariable<float>("bpFloats");
adios2::Variable<int> bpInts = bpIO.InquireVariable<int>("bpInts");
if (bpFloats) // means found
{
// read only the chunk corresponding to our rank
bpFloats.SetSelection({{Nx * rank}, {Nx}});
// myFloats.data is pre-allocated
bpReader.Get<float>(bpFloats, myFloats.data(), adios2::Mode::Sync);
std::cout << "MyFloats: \n";
for (const auto number : myFloats)
{
std::cout << number << " ";
}
std::cout << "\n";
}
if (bpInts) // means not found
{
// read only the chunk corresponding to our rank
bpInts.SetSelection({{Nx * rank}, {Nx}});
// myInts.data is pre-allocated
bpReader.Get<int>(bpInts, myInts.data(), adios2::Mode::Sync);
std::cout << "MyInts: \n";
for (const auto number : myInts)
{
std::cout << number << " ";
}
std::cout << "\n";
}
/** Close bp file, engine becomes unreachable after this*/
bpReader.Close();
}
catch (std::invalid_argument &e)
{
std::cout << "Invalid argument exception, STOPPING PROGRAM from rank "
<< rank << "\n";
std::cout << e.what() << "\n";
}
catch (std::ios_base::failure &e)
{
std::cout << "IO System base failure exception, STOPPING PROGRAM "
"from rank "
<< rank << "\n";
std::cout << e.what() << "\n";
}
catch (std::exception &e)
{
std::cout << "Exception, STOPPING PROGRAM from rank " << rank << "\n";
std::cout << e.what() << "\n";
}
MPI_Finalize();
return 0;
}
| 1 | 12,932 | not needed, ADIOS2 also needs to check for subfiles. ADIOS2 tries to remove dependency on serial `fstream`. | ornladios-ADIOS2 | cpp |
@@ -50,6 +50,12 @@ class DatabaseDriverReactNative {
});
}
+ loadExtension(path) {
+ return new Promise(() => {
+ throw new Error(`No extension support for ${path} in react-native-sqlite-storage`);
+ });
+ }
+
exec(sql, params = null) {
return new Promise((resolve, reject) => {
this.db_.executeSql( | 1 | const SQLite = require('react-native-sqlite-storage');
class DatabaseDriverReactNative {
constructor() {
this.lastInsertId_ = null;
}
open(options) {
// SQLite.DEBUG(true);
return new Promise((resolve, reject) => {
SQLite.openDatabase(
{ name: options.name },
db => {
this.db_ = db;
resolve();
},
error => {
reject(error);
}
);
});
}
sqliteErrorToJsError(error) {
return error;
}
selectOne(sql, params = null) {
return new Promise((resolve, reject) => {
this.db_.executeSql(
sql,
params,
r => {
resolve(r.rows.length ? r.rows.item(0) : null);
},
error => {
reject(error);
}
);
});
}
selectAll(sql, params = null) {
return this.exec(sql, params).then(r => {
const output = [];
for (let i = 0; i < r.rows.length; i++) {
output.push(r.rows.item(i));
}
return output;
});
}
exec(sql, params = null) {
return new Promise((resolve, reject) => {
this.db_.executeSql(
sql,
params,
r => {
if ('insertId' in r) this.lastInsertId_ = r.insertId;
resolve(r);
},
error => {
reject(error);
}
);
});
}
lastInsertId() {
return this.lastInsertId_;
}
}
module.exports = { DatabaseDriverReactNative };
| 1 | 15,184 | no need to wrap in new Promise - you can simply throw the exception | laurent22-joplin | js |
@@ -2438,3 +2438,17 @@ func getOCSPStatus(s tls.ConnectionState) (*ocsp.Response, error) {
}
return resp, nil
}
+
+func TestOCSPManualConfig(t *testing.T) {
+ o := DefaultTestOptions
+ o.HTTPHost = "127.0.0.1"
+ o.HTTPSPort = -1
+ o.TLSConfig = &tls.Config{ServerName: "localhost"}
+ cert, err := tls.LoadX509KeyPair("configs/certs/server-cert.pem", "configs/certs/server-key.pem")
+ if err != nil {
+ t.Fatalf("Got error reading certificates: %s", err)
+ }
+ o.TLSConfig.Certificates = []tls.Certificate{cert}
+ s := RunServer(&o)
+ s.Shutdown()
+} | 1 | // Copyright 2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"context"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/nats-io/nats-server/v2/server"
"github.com/nats-io/nats.go"
"golang.org/x/crypto/ocsp"
)
func TestOCSPAlwaysMustStapleAndShutdown(t *testing.T) {
// Certs that have must staple will auto shutdown the server.
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
serverKey = "configs/certs/ocsp/server-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
opts := server.Options{}
opts.Host = "127.0.0.1"
opts.NoLog = true
opts.NoSigs = true
opts.MaxControlLine = 4096
opts.Port = -1
opts.TLSCert = serverCert
opts.TLSKey = serverKey
opts.TLSCaCert = caCert
opts.TLSTimeout = 5
tcOpts := &server.TLSConfigOpts{
CertFile: opts.TLSCert,
KeyFile: opts.TLSKey,
CaFile: opts.TLSCaCert,
Timeout: opts.TLSTimeout,
}
tlsConf, err := server.GenTLSConfig(tcOpts)
if err != nil {
t.Fatal(err)
}
opts.TLSConfig = tlsConf
opts.OCSPConfig = &server.OCSPConfig{
Mode: server.OCSPModeAlways,
OverrideURLs: []string{addr},
}
srv := RunServer(&opts)
defer srv.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
return fmt.Errorf("invalid staple")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// The server will shutdown because the server becomes revoked
// and the policy is to always must-staple. The OCSP Responder
// instructs the NATS Server to fetch OCSP Staples every 2 seconds.
time.Sleep(2 * time.Second)
setOCSPStatus(t, addr, serverCert, ocsp.Revoked)
time.Sleep(2 * time.Second)
// Should be connection refused since server will abort now.
_, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nats.ErrNoServers {
t.Errorf("Expected connection refused")
}
}
func TestOCSPMustStapleShutdown(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-status-request-cert.pem"
serverKey = "configs/certs/ocsp/server-status-request-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
opts := server.Options{}
opts.Host = "127.0.0.1"
opts.NoLog = true
opts.NoSigs = true
opts.MaxControlLine = 4096
opts.Port = -1
opts.TLSCert = serverCert
opts.TLSKey = serverKey
opts.TLSCaCert = caCert
opts.TLSTimeout = 5
tlsConfigOpts := &server.TLSConfigOpts{
CertFile: opts.TLSCert,
KeyFile: opts.TLSKey,
CaFile: opts.TLSCaCert,
Timeout: opts.TLSTimeout,
}
tlsConf, err := server.GenTLSConfig(tlsConfigOpts)
if err != nil {
t.Fatal(err)
}
opts.TLSConfig = tlsConf
opts.OCSPConfig = &server.OCSPConfig{
Mode: server.OCSPModeMust,
OverrideURLs: []string{addr},
}
srv := RunServer(&opts)
defer srv.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
return fmt.Errorf("invalid staple")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// The server will shutdown because the server becomes revoked
// and the policy is to always must-staple. The OCSP Responder
// instructs the NATS Server to fetch OCSP Staples every 2 seconds.
time.Sleep(2 * time.Second)
setOCSPStatus(t, addr, serverCert, ocsp.Revoked)
time.Sleep(2 * time.Second)
// Should be connection refused since server will abort now.
_, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nats.ErrNoServers {
t.Errorf("Expected connection refused")
}
}
func TestOCSPMustStapleAutoDoesNotShutdown(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
content := `
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
t.Errorf("Expected valid OCSP staple status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// The server will shutdown because the server becomes revoked
// and the policy is to always must-staple. The OCSP Responder
// instructs the NATS Server to fetch OCSP Staples every 2 seconds.
time.Sleep(2 * time.Second)
setOCSPStatus(t, addr, serverCert, ocsp.Revoked)
time.Sleep(2 * time.Second)
// Should not be connection refused, the client will continue running and
// be served the stale OCSP staple instead.
_, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Revoked {
t.Errorf("Expected revoked status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
}
func TestOCSPAutoWithoutMustStapleDoesNotShutdownOnRevoke(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
serverKey = "configs/certs/ocsp/server-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
opts := server.Options{}
opts.Host = "127.0.0.1"
opts.NoLog = true
opts.NoSigs = true
opts.MaxControlLine = 4096
opts.Port = -1
opts.TLSCert = serverCert
opts.TLSKey = serverKey
opts.TLSCaCert = caCert
opts.TLSTimeout = 5
tlsConfigOpts := &server.TLSConfigOpts{
CertFile: opts.TLSCert,
KeyFile: opts.TLSKey,
CaFile: opts.TLSCaCert,
Timeout: opts.TLSTimeout,
}
tlsConf, err := server.GenTLSConfig(tlsConfigOpts)
if err != nil {
t.Fatal(err)
}
opts.TLSConfig = tlsConf
opts.OCSPConfig = &server.OCSPConfig{
Mode: server.OCSPModeAuto,
OverrideURLs: []string{addr},
}
srv := RunServer(&opts)
defer srv.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return fmt.Errorf("Unexpected OCSP staple for certificate")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Revoke the client certificate, nothing will happens since does
// not have MustStaple.
time.Sleep(2 * time.Second)
setOCSPStatus(t, addr, serverCert, ocsp.Revoked)
time.Sleep(2 * time.Second)
// Should not be connection refused since server will continue running.
_, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Errorf("Unexpected error: %s", err)
}
}
func TestOCSPClient(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
serverKey = "configs/certs/ocsp/server-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
ocspURL := fmt.Sprintf("http://%s", ocspr.Addr)
defer ocspr.Shutdown(ctx)
for _, test := range []struct {
name string
config string
opts []nats.Option
err error
rerr error
configure func()
}{
{
"OCSP Stapling makes server fail to boot if status is unknown",
`
port: -1
# Enable OCSP stapling with policy to honor must staple if present.
ocsp: true
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {},
},
{
"OCSP Stapling ignored by default if server without must staple status",
`
port: -1
ocsp: true
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) },
},
{
"OCSP Stapling honored by default if server has must staple status",
`
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {
setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
},
},
{
"OCSP Stapling can be disabled even if server has must staple status",
`
port: -1
ocsp: false
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {
setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Revoked)
},
},
} {
t.Run(test.name, func(t *testing.T) {
test.configure()
content := test.config
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...)
if test.err == nil && err != nil {
t.Errorf("Expected to connect, got %v", err)
} else if test.err != nil && err == nil {
t.Errorf("Expected error on connect")
} else if test.err != nil && err != nil {
// Error on connect was expected
if test.err.Error() != err.Error() {
t.Errorf("Expected error %s, got: %s", test.err, err)
}
return
}
defer nc.Close()
nc.Subscribe("ping", func(m *nats.Msg) {
m.Respond([]byte("pong"))
})
nc.Flush()
_, err = nc.Request("ping", []byte("ping"), 250*time.Millisecond)
if test.rerr != nil && err == nil {
t.Errorf("Expected error getting response")
} else if test.rerr == nil && err != nil {
t.Errorf("Expected response")
}
})
}
}
func TestOCSPReloadRotateTLSCertWithNoURL(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
updatedServerCert = "configs/certs/ocsp/server-status-request-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
content := `
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
t.Errorf("Expected valid OCSP staple status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Change the contents with another that will fail to get a staple
// since it does not have an URL.
content = `
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
// Reload show warning because of cert missing OCSP Url so cannot be used
// with OCSP stapling.
if err := s.Reload(); err != nil {
t.Fatal(err)
}
expectedErr := fmt.Errorf("missing OCSP response")
// The server will not shutdown because the reload will fail.
_, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
// The new certificate does not have OCSP Staples since
// it could not fetch one from a OCSP server.
if s.OCSPResponse == nil {
return expectedErr
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != expectedErr {
t.Fatalf("Unexpected error: %s", expectedErr)
}
}
func TestOCSPReloadRotateTLSCertDisableMustStaple(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
updatedServerCert = "configs/certs/ocsp/server-status-request-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
storeDir := createDir(t, "_ocsp")
defer removeDir(t, storeDir)
originalContent := `
port: -1
store_dir: "%s"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
content := fmt.Sprintf(originalContent, storeDir)
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
var staple []byte
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
staple = s.OCSPResponse
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
t.Errorf("Expected valid OCSP staple status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
files := []string{}
err = filepath.Walk(storeDir+"/ocsp/", func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
files = append(files, path)
return nil
})
if err != nil {
t.Fatal(err)
}
found := false
for _, file := range files {
data, err := ioutil.ReadFile(file)
if err != nil {
t.Error(err)
}
if bytes.Equal(staple, data) {
found = true
}
}
if !found {
t.Error("Could not find OCSP Staple")
}
// Change the contents with another that has OCSP Stapling disabled.
updatedContent := `
port: -1
store_dir: "%s"
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
content = fmt.Sprintf(updatedContent, storeDir)
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatal(err)
}
// The new certificate does not have must staple so they will be missing.
time.Sleep(4 * time.Second)
nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return fmt.Errorf("unexpected OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Re-enable OCSP Stapling
content = fmt.Sprintf(originalContent, storeDir)
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatal(err)
}
var newStaple []byte
nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
newStaple = s.OCSPResponse
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
t.Errorf("Expected valid OCSP staple status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Confirm that it got a new staple.
files = []string{}
err = filepath.Walk(storeDir+"/ocsp/", func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
files = append(files, path)
return nil
})
if err != nil {
t.Fatal(err)
}
found = false
for _, file := range files {
data, err := ioutil.ReadFile(file)
if err != nil {
t.Error(err)
}
if bytes.Equal(newStaple, data) {
found = true
}
}
if !found {
t.Error("Could not find OCSP Staple")
}
if bytes.Equal(staple, newStaple) {
t.Error("Expected new OCSP Staple")
}
}
func TestOCSPReloadRotateTLSCertEnableMustStaple(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
setOCSPStatus(t, addr, updatedServerCert, ocsp.Good)
// Start without OCSP Stapling MustStaple
content := `
port: -1
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return fmt.Errorf("unexpected OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Change the contents with another that has OCSP Stapling enabled.
content = `
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatal(err)
}
// The new certificate does not have must staple so they will be missing.
time.Sleep(2 * time.Second)
nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
resp, err := getOCSPStatus(s)
if err != nil {
return err
}
if resp.Status != ocsp.Good {
t.Errorf("Expected valid OCSP staple status")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
nc.Close()
}
func TestOCSPCluster(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good)
// Store Dirs
storeDirA := createDir(t, "_ocspA")
defer removeDir(t, storeDirA)
storeDirB := createDir(t, "_ocspB")
defer removeDir(t, storeDirB)
storeDirC := createDir(t, "_ocspC")
defer removeDir(t, storeDirC)
// Seed server configuration
srvConfA := `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
cluster {
name: AB
host: "127.0.0.1"
advertise: 127.0.0.1
port: -1
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
sconfA := createConfFile(t, []byte(srvConfA))
defer removeFile(t, sconfA)
srvA, optsA := RunServerWithConfig(sconfA)
defer srvA.Shutdown()
// The rest
srvConfB := `
host: "127.0.0.1"
port: -1
server_name: "BBB"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
cluster {
name: AB
host: "127.0.0.1"
advertise: 127.0.0.1
port: -1
routes: [ nats://127.0.0.1:%d ]
connect_retries: 30
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.Cluster.Port)
conf := createConfFile(t, []byte(srvConfB))
defer removeFile(t, conf)
srvB, optsB := RunServerWithConfig(conf)
defer srvB.Shutdown()
// Client connects to server A.
cA, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", optsA.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
checkClusterFormed(t, srvA, srvB)
// Revoke the seed server cluster certificate, following servers will not be able to verify connection.
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked)
// Original set of servers still can communicate to each other, even though the cert has been revoked.
// NOTE: Should we unplug from the cluster in case our server is revoke and OCSP policy is always or must?
checkClusterFormed(t, srvA, srvB)
// Wait for seed server to notice that its certificate has been revoked,
// so that new routes can't connect to it.
time.Sleep(6 * time.Second)
// Start another server against the seed server that has an invalid OCSP Staple
srvConfC := `
host: "127.0.0.1"
port: -1
server_name: "CCC"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
cluster {
name: AB
host: "127.0.0.1"
advertise: 127.0.0.1
port: -1
routes: [ nats://127.0.0.1:%d ]
connect_retries: 30
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.Cluster.Port)
conf = createConfFile(t, []byte(srvConfC))
defer removeFile(t, conf)
srvC, optsC := RunServerWithConfig(conf)
defer srvC.Shutdown()
cB, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", optsB.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
cC, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", optsC.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
// There should be no connectivity between the clients due to the revoked staple.
_, err = cA.Subscribe("foo", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Errorf("%v", err)
}
cA.Flush()
_, err = cB.Subscribe("bar", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Fatal(err)
}
cB.Flush()
resp, err := cC.Request("foo", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
resp, err = cC.Request("bar", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
// Switch the certs from the seed server to new ones that are not revoked,
// this should restart OCSP Stapling for the cluster routes.
srvConfA = `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
cluster {
port: -1
name: AB
host: "127.0.0.1"
advertise: 127.0.0.1
connect_retries: 30
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
if err := ioutil.WriteFile(sconfA, []byte(srvConfA), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := srvA.Reload(); err != nil {
t.Fatal(err)
}
// Wait to get a new OCSP Staple.
time.Sleep(10 * time.Second)
checkClusterFormed(t, srvA, srvB, srvC)
// Now clients connect to C can communicate with B and A.
_, err = cC.Request("foo", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
_, err = cC.Request("bar", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
}
func TestOCSPLeaf(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good)
// Store Dirs
storeDirA := createDir(t, "_ocspA")
defer removeDir(t, storeDirA)
storeDirB := createDir(t, "_ocspB")
defer removeDir(t, storeDirB)
storeDirC := createDir(t, "_ocspC")
defer removeDir(t, storeDirC)
// LeafNode server configuration
srvConfA := `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
leafnodes {
host: "127.0.0.1"
port: -1
advertise: "127.0.0.1"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
sconfA := createConfFile(t, []byte(srvConfA))
defer removeFile(t, sconfA)
srvA, optsA := RunServerWithConfig(sconfA)
defer srvA.Shutdown()
// LeafNode that has the original as a remote.
srvConfB := `
host: "127.0.0.1"
port: -1
server_name: "BBB"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
leafnodes {
remotes: [ {
url: "tls://127.0.0.1:%d"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
} ]
}
`
srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.LeafNode.Port)
conf := createConfFile(t, []byte(srvConfB))
defer removeFile(t, conf)
srvB, optsB := RunServerWithConfig(conf)
defer srvB.Shutdown()
// Client connects to server A.
cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
// checkLeafNodeConnected(t, srvA)
// Revoke the seed server cluster certificate, following servers will not be able to verify connection.
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked)
// Original set of servers still can communicate to each other, even though the cert has been revoked.
// checkLeafNodeConnected(t, srvA)
// Wait for seed server to notice that its certificate has been revoked,
// so that new leafnodes can't connect to it.
time.Sleep(6 * time.Second)
// Start another server against the seed server that has an invalid OCSP Staple
srvConfC := `
host: "127.0.0.1"
port: -1
server_name: "CCC"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
leafnodes {
remotes: [ {
url: "tls://127.0.0.1:%d"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
} ]
}
`
srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.LeafNode.Port)
conf = createConfFile(t, []byte(srvConfC))
defer removeFile(t, conf)
srvC, optsC := RunServerWithConfig(conf)
defer srvC.Shutdown()
cB, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsB.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
cC, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsC.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
// There should be no connectivity between the clients due to the revoked staple.
_, err = cA.Subscribe("foo", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Errorf("%v", err)
}
cA.Flush()
_, err = cB.Subscribe("bar", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Fatal(err)
}
cB.Flush()
resp, err := cC.Request("foo", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
resp, err = cC.Request("bar", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
// Switch the certs from the leafnode server to new ones that are not revoked,
// this should restart OCSP Stapling for the leafnode server.
srvConfA = `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
leafnodes {
host: "127.0.0.1"
port: -1
advertise: "127.0.0.1"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
if err := ioutil.WriteFile(sconfA, []byte(srvConfA), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := srvA.Reload(); err != nil {
t.Fatal(err)
}
time.Sleep(4 * time.Second)
// A <-> A
_, err = cA.Request("foo", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
// B <-> A
_, err = cB.Request("foo", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
// C <-> A
_, err = cC.Request("foo", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
// C <-> B via leafnode A
_, err = cC.Request("bar", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
}
func TestOCSPGateway(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good)
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good)
// Store Dirs
storeDirA := createDir(t, "_ocspA")
defer removeDir(t, storeDirA)
storeDirB := createDir(t, "_ocspB")
defer removeDir(t, storeDirB)
storeDirC := createDir(t, "_ocspC")
defer removeDir(t, storeDirC)
// Gateway server configuration
srvConfA := `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
gateway {
name: A
host: "127.0.0.1"
port: -1
advertise: "127.0.0.1"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
sconfA := createConfFile(t, []byte(srvConfA))
defer removeFile(t, sconfA)
srvA, optsA := RunServerWithConfig(sconfA)
defer srvA.Shutdown()
// LeafNode that has the original as a remote.
srvConfB := `
host: "127.0.0.1"
port: -1
server_name: "BBB"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
gateway {
name: B
host: "127.0.0.1"
advertise: "127.0.0.1"
port: -1
gateways: [{
name: "A"
url: "nats://127.0.0.1:%d"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}]
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.Gateway.Port)
conf := createConfFile(t, []byte(srvConfB))
defer removeFile(t, conf)
srvB, optsB := RunServerWithConfig(conf)
defer srvB.Shutdown()
// Client connects to server A.
cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
waitForOutboundGateways(t, srvB, 1, 5*time.Second)
// Revoke the seed server cluster certificate, following servers will not be able to verify connection.
setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked)
// Original set of servers still can communicate to each other, even though the cert has been revoked.
waitForOutboundGateways(t, srvA, 1, 5*time.Second)
waitForOutboundGateways(t, srvB, 1, 5*time.Second)
// Wait for gateway A to notice that its certificate has been revoked,
// so that new gateways can't connect to it.
time.Sleep(6 * time.Second)
// Start another server against the seed server that has an invalid OCSP Staple
srvConfC := `
host: "127.0.0.1"
port: -1
server_name: "CCC"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
gateway {
name: C
host: "127.0.0.1"
advertise: "127.0.0.1"
port: -1
gateways: [{name: "A", url: "nats://127.0.0.1:%d" }]
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.Gateway.Port)
conf = createConfFile(t, []byte(srvConfC))
defer removeFile(t, conf)
srvC, optsC := RunServerWithConfig(conf)
defer srvC.Shutdown()
cB, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsB.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
cC, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsC.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple from server")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
// There should be no connectivity between the clients due to the revoked staple.
_, err = cA.Subscribe("foo", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Errorf("%v", err)
}
cA.Flush()
_, err = cB.Subscribe("bar", func(m *nats.Msg) {
m.Respond(nil)
})
if err != nil {
t.Fatal(err)
}
cB.Flush()
// Gateway C was not able to mesh with Gateway A because of the revoked OCSP staple
// so these requests to A and B should fail.
resp, err := cC.Request("foo", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
// Make request to B
resp, err = cC.Request("bar", nil, 2*time.Second)
if err == nil {
t.Errorf("Unexpected success, response: %+v", resp)
}
// Switch the certs from the seed server to new ones that are not revoked,
// this should restart OCSP Stapling for the cluster routes.
srvConfA = `
host: "127.0.0.1"
port: -1
server_name: "AAA"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
store_dir: "%s"
gateway {
name: A
host: "127.0.0.1"
port: -1
advertise: "127.0.0.1"
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
}
`
srvConfA = fmt.Sprintf(srvConfA, storeDirA)
if err := ioutil.WriteFile(sconfA, []byte(srvConfA), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := srvA.Reload(); err != nil {
t.Fatal(err)
}
time.Sleep(4 * time.Second)
waitForOutboundGateways(t, srvA, 2, 5*time.Second)
waitForOutboundGateways(t, srvB, 2, 5*time.Second)
waitForOutboundGateways(t, srvC, 2, 5*time.Second)
// Now clients connect to C can communicate with B and A.
_, err = cC.Request("foo", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
_, err = cC.Request("bar", nil, 2*time.Second)
if err != nil {
t.Errorf("%v", err)
}
}
func TestOCSPCustomConfig(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
serverKey = "configs/certs/ocsp/server-key.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
ocspURL := fmt.Sprintf("http://%s", ocspr.Addr)
defer ocspr.Shutdown(ctx)
var (
errExpectedNoStaple = fmt.Errorf("expected no staple")
errMissingStaple = fmt.Errorf("missing OCSP Staple from server")
)
for _, test := range []struct {
name string
config string
opts []nats.Option
err error
rerr error
configure func()
}{
{
"OCSP Stapling in auto mode makes server fail to boot if status is revoked",
`
port: -1
ocsp {
mode: auto
}
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return errExpectedNoStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Revoked) },
},
{
"OCSP Stapling must staple ignored if disabled with ocsp: false",
`
port: -1
ocsp: false
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return errExpectedNoStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {
setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
},
},
{
"OCSP Stapling must staple ignored if disabled with ocsp mode never",
`
port: -1
ocsp: { mode: never }
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return errExpectedNoStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {
setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
},
},
{
"OCSP Stapling in always mode fetches a staple even if cert does not have one",
`
port: -1
ocsp {
mode: always
url: "http://127.0.0.1:8888"
}
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return errMissingStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) },
},
{
"OCSP Stapling in must staple mode does not fetch staple if there is no must staple flag",
`
port: -1
ocsp {
mode: must
url: "http://127.0.0.1:8888"
}
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return errExpectedNoStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) },
},
{
"OCSP Stapling in must staple mode fetches staple if there is a must staple flag",
`
port: -1
ocsp {
mode: must
url: "http://127.0.0.1:8888"
}
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`,
[]nats.Option{
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return errMissingStaple
}
return nil
},
}),
nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
},
nil,
nil,
func() {
setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good)
},
},
} {
t.Run(test.name, func(t *testing.T) {
test.configure()
content := test.config
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...)
if test.err == nil && err != nil {
t.Errorf("Expected to connect, got %v", err)
} else if test.err != nil && err == nil {
t.Errorf("Expected error on connect")
} else if test.err != nil && err != nil {
// Error on connect was expected
if test.err.Error() != err.Error() {
t.Errorf("Expected error %s, got: %s", test.err, err)
}
return
}
defer nc.Close()
nc.Subscribe("ping", func(m *nats.Msg) {
m.Respond([]byte("pong"))
})
nc.Flush()
_, err = nc.Request("ping", []byte("ping"), 250*time.Millisecond)
if test.rerr != nil && err == nil {
t.Errorf("Expected error getting response")
} else if test.rerr == nil && err != nil {
t.Errorf("Expected response")
}
})
}
}
func TestOCSPCustomConfigReloadDisable(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
setOCSPStatus(t, addr, updatedServerCert, ocsp.Good)
// Start with server without OCSP Stapling MustStaple
content := `
port: -1
ocsp: { mode: always, url: "http://127.0.0.1:8888" }
tls {
cert_file: "configs/certs/ocsp/server-cert.pem"
key_file: "configs/certs/ocsp/server-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Change and disable OCSP Stapling.
content = `
port: -1
ocsp: { mode: never }
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatal(err)
}
// The new certificate has must staple but OCSP Stapling is disabled.
time.Sleep(2 * time.Second)
nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return fmt.Errorf("unexpected OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
nc.Close()
}
func TestOCSPCustomConfigReloadEnable(t *testing.T) {
const (
caCert = "configs/certs/ocsp/ca-cert.pem"
caKey = "configs/certs/ocsp/ca-key.pem"
serverCert = "configs/certs/ocsp/server-cert.pem"
updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem"
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ocspr := newOCSPResponder(t, caCert, caKey)
defer ocspr.Shutdown(ctx)
addr := fmt.Sprintf("http://%s", ocspr.Addr)
setOCSPStatus(t, addr, serverCert, ocsp.Good)
setOCSPStatus(t, addr, updatedServerCert, ocsp.Good)
// Start with server without OCSP Stapling MustStaple
content := `
port: -1
ocsp: { mode: never, url: "http://127.0.0.1:8888" }
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
conf := createConfFile(t, []byte(content))
defer removeFile(t, conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse != nil {
return fmt.Errorf("unexpected OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
sub, err := nc.SubscribeSync("foo")
if err != nil {
t.Fatal(err)
}
nc.Publish("foo", []byte("hello world"))
nc.Flush()
_, err = sub.NextMsg(1 * time.Second)
if err != nil {
t.Fatal(err)
}
nc.Close()
// Change and disable OCSP Stapling.
content = `
port: -1
ocsp: { mode: always, url: "http://127.0.0.1:8888" }
tls {
cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem"
key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem"
ca_file: "configs/certs/ocsp/ca-cert.pem"
timeout: 5
}
`
if err := ioutil.WriteFile(conf, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatal(err)
}
time.Sleep(2 * time.Second)
nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port),
nats.Secure(&tls.Config{
VerifyConnection: func(s tls.ConnectionState) error {
if s.OCSPResponse == nil {
return fmt.Errorf("missing OCSP Staple!")
}
return nil
},
}),
nats.RootCAs(caCert),
nats.ErrorHandler(noOpErrHandler),
)
if err != nil {
t.Fatal(err)
}
nc.Close()
}
func newOCSPResponder(t *testing.T, issuerCertPEM, issuerKeyPEM string) *http.Server {
t.Helper()
var mu sync.Mutex
status := make(map[string]int)
issuerCert := parseCertPEM(t, issuerCertPEM)
issuerKey := parseKeyPEM(t, issuerKeyPEM)
mux := http.NewServeMux()
// The "/statuses/" endpoint is for directly setting a key-value pair in
// the CA's status database.
mux.HandleFunc("/statuses/", func(rw http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
key := r.URL.Path[len("/statuses/"):]
switch r.Method {
case "GET":
mu.Lock()
n, ok := status[key]
if !ok {
n = ocsp.Unknown
}
mu.Unlock()
fmt.Fprintf(rw, "%s %d", key, n)
case "POST":
data, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
n, err := strconv.Atoi(string(data))
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
mu.Lock()
status[key] = n
mu.Unlock()
fmt.Fprintf(rw, "%s %d", key, n)
default:
http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
})
// The "/" endpoint is for normal OCSP requests. This actually parses an
// OCSP status request and signs a response with a CA. Lightly based off:
// https://www.ietf.org/rfc/rfc2560.txt
mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
reqData, err := base64.StdEncoding.DecodeString(r.URL.Path[1:])
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
ocspReq, err := ocsp.ParseRequest(reqData)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
mu.Lock()
n, ok := status[ocspReq.SerialNumber.String()]
if !ok {
n = ocsp.Unknown
}
mu.Unlock()
tmpl := ocsp.Response{
Status: n,
SerialNumber: ocspReq.SerialNumber,
ThisUpdate: time.Now(),
NextUpdate: time.Now().Add(4 * time.Second),
}
respData, err := ocsp.CreateResponse(issuerCert, issuerCert, tmpl, issuerKey)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/ocsp-response")
rw.Header().Set("Content-Length", fmt.Sprint(len(respData)))
fmt.Fprint(rw, string(respData))
})
srv := &http.Server{
Addr: "127.0.0.1:8888",
Handler: mux,
}
go srv.ListenAndServe()
time.Sleep(1 * time.Second)
return srv
}
func setOCSPStatus(t *testing.T, ocspURL, certPEM string, status int) {
t.Helper()
cert := parseCertPEM(t, certPEM)
hc := &http.Client{Timeout: 10 * time.Second}
resp, err := hc.Post(
fmt.Sprintf("%s/statuses/%s", ocspURL, cert.SerialNumber),
"",
strings.NewReader(fmt.Sprint(status)),
)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to read OCSP HTTP response body: %s", err)
}
if got, want := resp.Status, "200 OK"; got != want {
t.Error(strings.TrimSpace(string(data)))
t.Fatalf("unexpected OCSP HTTP set status, got %q, want %q", got, want)
}
}
func parseCertPEM(t *testing.T, certPEM string) *x509.Certificate {
t.Helper()
block := parsePEM(t, certPEM)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
t.Fatalf("failed to parse cert '%s': %s", certPEM, err)
}
return cert
}
func parseKeyPEM(t *testing.T, keyPEM string) *rsa.PrivateKey {
t.Helper()
block := parsePEM(t, keyPEM)
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
t.Fatalf("failed to parse ikey %s: %s", keyPEM, err)
}
return key
}
func parsePEM(t *testing.T, pemPath string) *pem.Block {
t.Helper()
data, err := ioutil.ReadFile(pemPath)
if err != nil {
t.Fatal(err)
}
block, _ := pem.Decode(data)
if block == nil {
t.Fatalf("failed to decode PEM %s", pemPath)
}
return block
}
func getOCSPStatus(s tls.ConnectionState) (*ocsp.Response, error) {
if len(s.VerifiedChains) == 0 {
return nil, fmt.Errorf("missing TLS verified chains")
}
chain := s.VerifiedChains[0]
if got, want := len(chain), 2; got < want {
return nil, fmt.Errorf("incomplete cert chain, got %d, want at least %d", got, want)
}
leaf, issuer := chain[0], chain[1]
resp, err := ocsp.ParseResponseForCert(s.OCSPResponse, leaf, issuer)
if err != nil {
return nil, fmt.Errorf("failed to parse OCSP response: %w", err)
}
if err := resp.CheckSignatureFrom(issuer); err != nil {
return resp, err
}
return resp, nil
}
| 1 | 13,747 | Isn't this testing an implicit config, not a manual config? | nats-io-nats-server | go |
@@ -205,6 +205,12 @@ type Table struct {
// to what we calculate from chainToContents.
chainToDataplaneHashes map[string][]string
+ // chainsToFullRules contains the full rules, mapped from chain name to slices of rules in that chain.
+ chainsToFullRules map[string][]string
+
+ // hashToFullRules contains a mapping of rule hashes to the full rules.
+ hashToFullRules map[string]string
+
// hashCommentPrefix holds the prefix that we prepend to our rule-tracking hashes.
hashCommentPrefix string
// hashCommentRegexp matches the rule-tracking comment, capturing the rule hash. | 1 | // Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
MaxChainNameLength = 28
minPostWriteInterval = 50 * time.Millisecond
)
var (
// List of all the top-level kernel-created chains by iptables table.
tableToKernelChains = map[string][]string{
"filter": []string{"INPUT", "FORWARD", "OUTPUT"},
"nat": []string{"PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"},
"mangle": []string{"PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"},
"raw": []string{"PREROUTING", "OUTPUT"},
}
// chainCreateRegexp matches iptables-save output lines for chain forward reference lines.
// It captures the name of the chain.
chainCreateRegexp = regexp.MustCompile(`^:(\S+)`)
// appendRegexp matches an iptables-save output line for an append operation.
appendRegexp = regexp.MustCompile(`^-A (\S+)`)
// Prometheus metrics.
countNumRestoreCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_calls",
Help: "Number of iptables-restore calls.",
})
countNumRestoreErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_errors",
Help: "Number of iptables-restore errors.",
})
countNumSaveCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_calls",
Help: "Number of iptables-save calls.",
})
countNumSaveErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_errors",
Help: "Number of iptables-save errors.",
})
gaugeNumChains = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_chains",
Help: "Number of active iptables chains.",
}, []string{"ip_version", "table"})
gaugeNumRules = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_rules",
Help: "Number of active iptables rules.",
}, []string{"ip_version", "table"})
countNumLinesExecuted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_iptables_lines_executed",
Help: "Number of iptables rule updates executed.",
}, []string{"ip_version", "table"})
)
func init() {
prometheus.MustRegister(countNumRestoreCalls)
prometheus.MustRegister(countNumRestoreErrors)
prometheus.MustRegister(countNumSaveCalls)
prometheus.MustRegister(countNumSaveErrors)
prometheus.MustRegister(gaugeNumChains)
prometheus.MustRegister(gaugeNumRules)
prometheus.MustRegister(countNumLinesExecuted)
}
// Table represents a single one of the iptables tables i.e. "raw", "nat", "filter", etc. It
// caches the desired state of that table, then attempts to bring it into sync when Apply() is
// called.
//
// API Model
//
// Table supports two classes of operation: "rule insertions" and "full chain updates".
//
// As the name suggests, rule insertions allow for inserting one or more rules into a pre-existing
// chain. Rule insertions are intended to be used to hook kernel chains (such as "FORWARD") in
// order to direct them to a Felix-owned chain. It is important to minimise the use of rule
// insertions because the top-level chains are shared resources, which can be modified by other
// applications. In addition, rule insertions are harder to clean up after an upgrade to a new
// version of Felix (because we need a way to recognise our rules in a crowded chain).
//
// Full chain updates replace the entire contents of a Felix-owned chain with a new set of rules.
// Limiting the operation to "replace whole chain" in this way significantly simplifies the API.
// Although the API operates on full chains, the dataplane write logic tries to avoid rewriting
// a whole chain if only part of it has changed (this was not the case in Felix 1.4). This
// prevents iptables counters from being reset unnecessarily.
//
// In either case, the actual dataplane updates are deferred until the next call to Apply() so
// chain updates and insertions may occur in any order as long as they are consistent (i.e. there
// are no references to non-existent chains) by the time Apply() is called.
//
// Design
//
// We had several goals in designing the iptables machinery in 2.0.0:
//
// (1) High performance. Felix needs to handle high churn of endpoints and rules.
//
// (2) Ability to restore rules, even if other applications accidentally break them: we found that
// other applications sometimes misuse iptables-save and iptables-restore to do a read, modify,
// write cycle. That behaviour is not safe under concurrent modification.
//
// (3) Avoid rewriting rules that haven't changed so that we don't reset iptables counters.
//
// (4) Avoid parsing iptables commands (for example, the output from iptables/iptables-save).
// This is very hard to do robustly because iptables rules do not necessarily round-trip through
// the kernel in the same form. In addition, the format could easily change due to changes or
// fixes in the iptables/iptables-save command.
//
// (5) Support for graceful restart. I.e. deferring potentially incorrect updates until we're
// in-sync with the datastore. For example, if we have 100 endpoints on a host, after a restart
// we don't want to write a "dispatch" chain when we learn about the first endpoint (possibly
// replacing an existing one that had all 100 endpoints in place and causing traffic to glitch);
// instead, we want to defer until we've seen all 100 and then do the write.
//
// (6) Improved handling of rule inserts vs Felix 1.4.x. Previous versions of Felix sometimes
// inserted special-case rules that were not marked as Calico rules in any sensible way making
// cleanup of those rules after an upgrade difficult.
//
// Implementation
//
// For high performance (goal 1), we use iptables-restore to do bulk updates to iptables. This is
// much faster than individual iptables calls.
//
// To allow us to restore rules after they are clobbered by another process (goal 2), we cache
// them at this layer. This means that we don't need a mechanism to ask the other layers of Felix
// to do a resync. Note: Table doesn't start a thread of its own so it relies on the main event
// loop to trigger any dataplane resync polls.
//
// There is tension between goals 3 and 4. In order to avoid full rewrites (goal 3), we need to
// know what rules are in place, but we also don't want to parse them to find out (goal 4)! As
// a compromise, we deterministically calculate an ID for each rule and store it in an iptables
// comment. Then, when we want to know what rules are in place, we _do_ parse the output from
// iptables-save, but only to read back the rule IDs. That limits the amount of parsing we need
// to do and keeps it manageable/robust.
//
// To support graceful restart (goal 5), we defer updates to the dataplane until Apply() is called,
// then we do an atomic update using iptables-restore. As long as the first Apply() call is
// after we're in sync, the dataplane won't be touched until the right time. Felix 1.4.x had a
// more complex mechanism to support partial updates during the graceful restart period but
// Felix 2.0.0 resyncs so quickly that the added complexity is not justified.
//
// To make it easier to manage rule insertions (goal 6), we add rule IDs to those too. With
// rule IDs in place, we can easily distinguish Calico rules from non-Calico rules without needing
// to know exactly which rules to expect. To deal with cleanup after upgrade from older versions
// that did not write rule IDs, we support special-case regexes to detect our old rules.
//
// Thread safety
//
// Table doesn't do any internal synchronization, its methods should only be called from one
// thread. To avoid conflicts in the dataplane itself, there should only be one instance of
// Table for each iptable table in an application.
type Table struct {
Name string
IPVersion uint8
// featureDetector detects the features of the dataplane.
featureDetector *FeatureDetector
// chainToInsertedRules maps from chain name to a list of rules to be inserted at the start
// of that chain. Rules are written with rule hash comments. The Table cleans up inserted
// rules with unknown hashes.
chainToInsertedRules map[string][]Rule
dirtyInserts set.Set
// chainToRuleFragments contains the desired state of our iptables chains, indexed by
// chain name. The values are slices of iptables fragments, such as
// "--match foo --jump DROP" (i.e. omitting the action and chain name, which are calculated
// as needed).
chainNameToChain map[string]*Chain
dirtyChains set.Set
inSyncWithDataPlane bool
// chainToDataplaneHashes contains the rule hashes that we think are in the dataplane.
// it is updated when we write to the dataplane but it can also be read back and compared
// to what we calculate from chainToContents.
chainToDataplaneHashes map[string][]string
// hashCommentPrefix holds the prefix that we prepend to our rule-tracking hashes.
hashCommentPrefix string
// hashCommentRegexp matches the rule-tracking comment, capturing the rule hash.
hashCommentRegexp *regexp.Regexp
// ourChainsRegexp matches the names of chains that are "ours", i.e. start with one of our
// prefixes.
ourChainsRegexp *regexp.Regexp
// oldInsertRegexp matches inserted rules from old pre rule-hash versions of felix.
oldInsertRegexp *regexp.Regexp
// nftablesMode should be set to true if iptables is using the nftables backend.
nftablesMode bool
iptablesRestoreCmd string
iptablesSaveCmd string
// insertMode is either "insert" or "append"; whether we insert our rules or append them
// to top-level chains.
insertMode string
// Record when we did our most recent reads and writes of the table. We use these to
// calculate the next time we should force a refresh.
lastReadTime time.Time
lastWriteTime time.Time
initialPostWriteInterval time.Duration
postWriteInterval time.Duration
refreshInterval time.Duration
// calicoXtablesLock, if enabled, our implementation of the xtables lock.
calicoXtablesLock sync.Locker
// lockTimeout is the timeout used for iptables-restore's native xtables lock implementation.
lockTimeout time.Duration
// lockTimeout is the lock probe interval used for iptables-restore's native xtables lock
// implementation.
lockProbeInterval time.Duration
logCxt *log.Entry
gaugeNumChains prometheus.Gauge
gaugeNumRules prometheus.Gauge
countNumLinesExecuted prometheus.Counter
// Reusable buffer for writing to iptables.
restoreInputBuffer RestoreInputBuilder
// Factory for making commands, used by UTs to shim exec.Command().
newCmd cmdFactory
// Shims for time.XXX functions:
timeSleep func(d time.Duration)
timeNow func() time.Time
// lookPath is a shim for exec.LookPath.
lookPath func(file string) (string, error)
}
type TableOptions struct {
HistoricChainPrefixes []string
ExtraCleanupRegexPattern string
BackendMode string
InsertMode string
RefreshInterval time.Duration
PostWriteInterval time.Duration
// LockTimeout is the timeout to use for iptables-restore's native xtables lock.
LockTimeout time.Duration
// LockProbeInterval is the probe interval to use for iptables-restore's native xtables lock.
LockProbeInterval time.Duration
// NewCmdOverride for tests, if non-nil, factory to use instead of the real exec.Command()
NewCmdOverride cmdFactory
// SleepOverride for tests, if non-nil, replacement for time.Sleep()
SleepOverride func(d time.Duration)
// NowOverride for tests, if non-nil, replacement for time.Now()
NowOverride func() time.Time
// LookPathOverride for tests, if non-nil, replacement for exec.LookPath()
LookPathOverride func(file string) (string, error)
}
func NewTable(
name string,
ipVersion uint8,
hashPrefix string,
iptablesWriteLock sync.Locker,
detector *FeatureDetector,
options TableOptions,
) *Table {
// Calculate the regex used to match the hash comment. The comment looks like this:
// --comment "cali:abcd1234_-".
hashCommentRegexp := regexp.MustCompile(`--comment "?` + hashPrefix + `([a-zA-Z0-9_-]+)"?`)
ourChainsPattern := "^(" + strings.Join(options.HistoricChainPrefixes, "|") + ")"
ourChainsRegexp := regexp.MustCompile(ourChainsPattern)
oldInsertRegexpParts := []string{}
for _, prefix := range options.HistoricChainPrefixes {
part := fmt.Sprintf("(?:-j|--jump) %s", prefix)
oldInsertRegexpParts = append(oldInsertRegexpParts, part)
}
if options.ExtraCleanupRegexPattern != "" {
oldInsertRegexpParts = append(oldInsertRegexpParts,
options.ExtraCleanupRegexPattern)
}
oldInsertPattern := strings.Join(oldInsertRegexpParts, "|")
oldInsertRegexp := regexp.MustCompile(oldInsertPattern)
// Pre-populate the insert table with empty lists for each kernel chain. Ensures that we
// clean up any chains that we hooked on a previous run.
inserts := map[string][]Rule{}
dirtyInserts := set.New()
for _, kernelChain := range tableToKernelChains[name] {
inserts[kernelChain] = []Rule{}
dirtyInserts.Add(kernelChain)
}
var insertMode string
switch options.InsertMode {
case "", "insert":
insertMode = "insert"
case "append":
insertMode = "append"
default:
log.WithField("insertMode", options.InsertMode).Panic("Unknown insert mode")
}
if options.PostWriteInterval <= minPostWriteInterval {
log.WithFields(log.Fields{
"setValue": options.PostWriteInterval,
"default": minPostWriteInterval,
}).Info("PostWriteInterval too small, defaulting.")
options.PostWriteInterval = minPostWriteInterval
}
// Allow override of exec.Command() and time.Sleep() for test purposes.
newCmd := newRealCmd
if options.NewCmdOverride != nil {
newCmd = options.NewCmdOverride
}
sleep := time.Sleep
if options.SleepOverride != nil {
sleep = options.SleepOverride
}
now := time.Now
if options.NowOverride != nil {
now = options.NowOverride
}
lookPath := exec.LookPath
if options.LookPathOverride != nil {
lookPath = options.LookPathOverride
}
table := &Table{
Name: name,
IPVersion: ipVersion,
featureDetector: detector,
chainToInsertedRules: inserts,
dirtyInserts: dirtyInserts,
chainNameToChain: map[string]*Chain{},
dirtyChains: set.New(),
chainToDataplaneHashes: map[string][]string{},
logCxt: log.WithFields(log.Fields{
"ipVersion": ipVersion,
"table": name,
}),
hashCommentPrefix: hashPrefix,
hashCommentRegexp: hashCommentRegexp,
ourChainsRegexp: ourChainsRegexp,
oldInsertRegexp: oldInsertRegexp,
insertMode: insertMode,
// Initialise the write tracking as if we'd just done a write, this will trigger
// us to recheck the dataplane at exponentially increasing intervals at startup.
// Note: if we didn't do this, the calculation logic would need to be modified
// to cope with zero values for these fields.
lastWriteTime: now(),
initialPostWriteInterval: options.PostWriteInterval,
postWriteInterval: options.PostWriteInterval,
refreshInterval: options.RefreshInterval,
calicoXtablesLock: iptablesWriteLock,
lockTimeout: options.LockTimeout,
lockProbeInterval: options.LockProbeInterval,
newCmd: newCmd,
timeSleep: sleep,
timeNow: now,
lookPath: lookPath,
gaugeNumChains: gaugeNumChains.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
gaugeNumRules: gaugeNumRules.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
countNumLinesExecuted: countNumLinesExecuted.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
}
table.restoreInputBuffer.NumLinesWritten = table.countNumLinesExecuted
iptablesVariant := strings.ToLower(options.BackendMode)
if iptablesVariant == "" {
iptablesVariant = "legacy"
}
if iptablesVariant == "nft" {
log.Info("Enabling iptables-in-nftables-mode workarounds.")
table.nftablesMode = true
}
table.iptablesRestoreCmd = table.findBestBinary(ipVersion, iptablesVariant, "restore")
table.iptablesSaveCmd = table.findBestBinary(ipVersion, iptablesVariant, "save")
return table
}
// findBestBinary tries to find an iptables binary for the specific variant (legacy/nftables mode) and returns the name
// of the binary. Falls back on iptables-restore/iptables-save if the specific variant isn't available.
// Panics if no binary can be found.
func (t *Table) findBestBinary(ipVersion uint8, backendMode, saveOrRestore string) string {
verInfix := ""
if ipVersion == 6 {
verInfix = "6"
}
candidates := []string{
"ip" + verInfix + "tables-" + backendMode + "-" + saveOrRestore,
"ip" + verInfix + "tables-" + saveOrRestore,
}
logCxt := log.WithFields(log.Fields{
"ipVersion": ipVersion,
"backendMode": backendMode,
"saveOrRestore": saveOrRestore,
"candidates": candidates,
})
for _, candidate := range candidates {
_, err := t.lookPath(candidate)
if err == nil {
logCxt.WithField("command", candidate).Info("Looked up iptables command")
return candidate
}
}
logCxt.Panic("Failed to find iptables command")
return ""
}
func (t *Table) SetRuleInsertions(chainName string, rules []Rule) {
t.logCxt.WithField("chainName", chainName).Debug("Updating rule insertions")
oldRules := t.chainToInsertedRules[chainName]
t.chainToInsertedRules[chainName] = rules
numRulesDelta := len(rules) - len(oldRules)
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyInserts.Add(chainName)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("insertion")
}
func (t *Table) UpdateChains(chains []*Chain) {
for _, chain := range chains {
t.UpdateChain(chain)
}
}
func (t *Table) UpdateChain(chain *Chain) {
t.logCxt.WithField("chainName", chain.Name).Info("Queueing update of chain.")
oldNumRules := 0
if oldChain := t.chainNameToChain[chain.Name]; oldChain != nil {
oldNumRules = len(oldChain.Rules)
}
t.chainNameToChain[chain.Name] = chain
numRulesDelta := len(chain.Rules) - oldNumRules
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyChains.Add(chain.Name)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain update")
}
func (t *Table) RemoveChains(chains []*Chain) {
for _, chain := range chains {
t.RemoveChainByName(chain.Name)
}
}
func (t *Table) RemoveChainByName(name string) {
t.logCxt.WithField("chainName", name).Info("Queing deletion of chain.")
if oldChain, known := t.chainNameToChain[name]; known {
t.gaugeNumRules.Sub(float64(len(oldChain.Rules)))
delete(t.chainNameToChain, name)
t.dirtyChains.Add(name)
}
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain removal")
}
func (t *Table) loadDataplaneState() {
// Refresh the cache of feature data.
t.featureDetector.RefreshFeatures()
// Load the hashes from the dataplane.
t.logCxt.Info("Loading current iptables state and checking it is correct.")
t.lastReadTime = t.timeNow()
dataplaneHashes := t.getHashesFromDataplane()
// Check that the rules we think we've programmed are still there and mark any inconsistent
// chains for refresh.
for chainName, expectedHashes := range t.chainToDataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain; no point in flagging it as
// out-of-sync.
logCxt.Debug("Skipping known-dirty chain")
continue
}
dpHashes := dataplaneHashes[chainName]
if !t.ourChainsRegexp.MatchString(chainName) {
// Not one of our chains so it may be one that we're inserting rules into.
insertedRules := t.chainToInsertedRules[chainName]
if len(insertedRules) == 0 {
// This chain shouldn't have any inserts, make sure that's the
// case. This case also covers the case where a chain was removed,
// making dpHashes nil.
dataplaneHasInserts := false
for _, hash := range dpHashes {
if hash != "" {
dataplaneHasInserts = true
break
}
}
if dataplaneHasInserts {
logCxt.WithField("actualRuleIDs", dpHashes).Warn(
"Chain had unexpected inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
continue
}
// Re-calculate the expected rule insertions based on the current length
// of the chain (since other processes may have inserted/removed rules
// from the chain, throwing off the numbers).
expectedHashes, _ = t.expectedHashesForInsertChain(
chainName,
numEmptyStrings(dpHashes),
)
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.WithFields(log.Fields{
"expectedRuleIDs": expectedHashes,
"actualRuleIDs": dpHashes,
}).Warn("Detected out-of-sync inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
} else {
// One of our chains, should match exactly.
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.Warn("Detected out-of-sync Calico chain, marking for resync")
t.dirtyChains.Add(chainName)
}
}
}
// Now scan for chains that shouldn't be there and mark for deletion.
t.logCxt.Debug("Scanning for unexpected iptables chains")
for chainName, dataplaneHashes := range dataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain.
logCxt.Debug("Skipping known-dirty chain")
continue
}
if _, ok := t.chainToDataplaneHashes[chainName]; ok {
// Chain expected, we'll have checked its contents above.
logCxt.Debug("Skipping expected chain")
continue
}
if !t.ourChainsRegexp.MatchString(chainName) {
// Non-calico chain that is not tracked in chainToDataplaneHashes. We
// haven't seen the chain before and we haven't been asked to insert
// anything into it. Check that it doesn't have an rule insertions in it
// from a previous run of Felix.
for _, hash := range dataplaneHashes {
if hash != "" {
logCxt.Info("Found unexpected insert, marking for cleanup")
t.dirtyInserts.Add(chainName)
break
}
}
continue
}
// Chain exists in dataplane but not in memory, mark as dirty so we'll clean it up.
logCxt.Info("Found unexpected chain, marking for cleanup")
t.dirtyChains.Add(chainName)
}
t.logCxt.Debug("Finished loading iptables state")
t.chainToDataplaneHashes = dataplaneHashes
t.inSyncWithDataPlane = true
}
// expectedHashesForInsertChain calculates the expected hashes for a whole top-level chain
// given our inserts. If we're in append mode, that consists of numNonCalicoRules empty strings
// followed by our hashes; in insert mode, the opposite way round. To avoid recalculation, it
// returns the rule hashes as a second output.
func (t *Table) expectedHashesForInsertChain(
chainName string,
numNonCalicoRules int,
) (allHashes, ourHashes []string) {
insertedRules := t.chainToInsertedRules[chainName]
allHashes = make([]string, len(insertedRules)+numNonCalicoRules)
features := t.featureDetector.GetFeatures()
ourHashes = calculateRuleInsertHashes(chainName, insertedRules, features)
offset := 0
if t.insertMode == "append" {
log.Debug("In append mode, returning our hashes at end.")
offset = numNonCalicoRules
}
for i, hash := range ourHashes {
allHashes[i+offset] = hash
}
return
}
// getHashesFromDataplane loads the current state of our table and parses out the hashes that we
// add to rules. It returns a map with an entry for each chain in the table. Each entry is a slice
// containing the hashes for the rules in that table. Rules with no hashes are represented by
// an empty string.
func (t *Table) getHashesFromDataplane() map[string][]string {
retries := 3
retryDelay := 100 * time.Millisecond
// Retry a few times before we panic. This deals with any transient errors and it prevents
// us from spamming a panic into the log when we're being gracefully shut down by a SIGTERM.
for {
hashes, err := t.attemptToGetHashesFromDataplane()
if err != nil {
countNumSaveErrors.Inc()
var stderr string
if ee, ok := err.(*exec.ExitError); ok {
stderr = string(ee.Stderr)
}
t.logCxt.WithError(err).WithField("stderr", stderr).Warnf("%s command failed", t.iptablesSaveCmd)
if retries > 0 {
retries--
t.timeSleep(retryDelay)
retryDelay *= 2
} else {
t.logCxt.Panicf("%s command failed after retries", t.iptablesSaveCmd)
}
continue
}
return hashes
}
}
// attemptToGetHashesFromDataplane starts an iptables-save subprocess and feeds its output to
// readHashesFrom() via a pipe. It handles the various error cases.
func (t *Table) attemptToGetHashesFromDataplane() (hashes map[string][]string, err error) {
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
countNumSaveCalls.Inc()
stdout, err := cmd.StdoutPipe()
if err != nil {
log.WithError(err).Warnf("Failed to get stdout pipe for %s", t.iptablesSaveCmd)
return
}
err = cmd.Start()
if err != nil {
// Failed even before we started, close the pipe. (This would normally be done
// by Wait().
log.WithError(err).Warnf("Failed to start %s", t.iptablesSaveCmd)
closeErr := stdout.Close()
if closeErr != nil {
log.WithError(closeErr).Warn("Error closing stdout after Start() failed.")
}
return
}
hashes, err = t.readHashesFrom(stdout)
if err != nil {
// In case readHashesFrom() returned due to an error that didn't cause the
// process to exit, kill it now.
log.WithError(err).Warnf("Killing %s process after a failure", t.iptablesSaveCmd)
killErr := cmd.Kill()
if killErr != nil {
// If we don't know what state the process is in, we can't Wait() on it.
log.WithError(killErr).Panicf(
"Failed to kill %s process after failure.", t.iptablesSaveCmd)
}
}
waitErr := cmd.Wait()
if waitErr != nil {
log.WithError(waitErr).Warn("iptables save failed")
if err == nil {
err = waitErr
}
}
return
}
// readHashesFrom scans the given reader containing iptables-save output for this table, extracting
// our rule hashes. Entries in the returned map are indexed by chain name. For rules that we
// wrote, the hash is extracted from a comment that we added to the rule. For rules written by
// previous versions of Felix, returns a dummy non-zero value. For rules not written by Felix,
// returns a zero string. Hence, the lengths of the returned values are the lengths of the chains
// whether written by Felix or not.
func (t *Table) readHashesFrom(r io.ReadCloser) (hashes map[string][]string, err error) {
hashes = map[string][]string{}
scanner := bufio.NewScanner(r)
// Figure out if debug logging is enabled so we can skip some WithFields() calls in the
// tight loop below if the log wouldn't be emitted anyway.
debug := log.GetLevel() >= log.DebugLevel
for scanner.Scan() {
// Read the next line of the output.
line := scanner.Bytes()
// Look for lines of the form ":chain-name - [0:0]", which are forward declarations
// for (possibly empty) chains.
logCxt := t.logCxt
if debug {
// Avoid stringifying the line (and hence copying it) unless we're at debug
// level.
logCxt = logCxt.WithField("line", string(line))
logCxt.Debug("Parsing line")
}
captures := chainCreateRegexp.FindSubmatch(line)
if captures != nil {
// Chain forward-reference, make sure the chain exists.
chainName := string(captures[1])
if debug {
logCxt.WithField("chainName", chainName).Debug("Found forward-reference")
}
hashes[chainName] = []string{}
continue
}
// Look for append lines, such as "-A chain-name -m foo --foo bar"; these are the
// actual rules.
captures = appendRegexp.FindSubmatch(line)
if captures == nil {
// Skip any non-append lines.
logCxt.Debug("Not an append, skipping")
continue
}
chainName := string(captures[1])
// Look for one of our hashes on the rule. We record a zero hash for unknown rules
// so that they get cleaned up. Note: we're implicitly capturing the first match
// of the regex. When writing the rules, we ensure that the hash is written as the
// first comment.
hash := ""
captures = t.hashCommentRegexp.FindSubmatch(line)
if captures != nil {
hash = string(captures[1])
if debug {
logCxt.WithField("hash", hash).Debug("Found hash in rule")
}
} else if t.oldInsertRegexp.Find(line) != nil {
logCxt.WithFields(log.Fields{
"rule": line,
"chainName": chainName,
}).Info("Found inserted rule from previous Felix version, marking for cleanup.")
hash = "OLD INSERT RULE"
}
hashes[chainName] = append(hashes[chainName], hash)
}
if scanner.Err() != nil {
log.WithError(scanner.Err()).Error("Failed to read hashes from dataplane")
return nil, scanner.Err()
}
t.logCxt.Debugf("Read hashes from dataplane: %#v", hashes)
return hashes, nil
}
func (t *Table) InvalidateDataplaneCache(reason string) {
logCxt := t.logCxt.WithField("reason", reason)
if !t.inSyncWithDataPlane {
logCxt.Debug("Would invalidate dataplane cache but it was already invalid.")
return
}
logCxt.Info("Invalidating dataplane cache")
t.inSyncWithDataPlane = false
}
func (t *Table) Apply() (rescheduleAfter time.Duration) {
now := t.timeNow()
// We _think_ we're in sync, check if there are any reasons to think we might
// not be in sync.
lastReadToNow := now.Sub(t.lastReadTime)
invalidated := false
if t.refreshInterval > 0 && lastReadToNow > t.refreshInterval {
// Too long since we've forced a refresh.
t.InvalidateDataplaneCache("refresh timer")
invalidated = true
}
// To workaround the possibility of another process clobbering our updates, we refresh the
// dataplane after we do a write at exponentially increasing intervals. We do a refresh
// if the delta from the last write to now is twice the delta from the last read.
for t.postWriteInterval != 0 &&
t.postWriteInterval < time.Hour &&
!now.Before(t.lastWriteTime.Add(t.postWriteInterval)) {
t.postWriteInterval *= 2
t.logCxt.WithField("newPostWriteInterval", t.postWriteInterval).Debug("Updating post-write interval")
if !invalidated {
t.InvalidateDataplaneCache("post update")
invalidated = true
}
}
// Retry until we succeed. There are several reasons that updating iptables may fail:
//
// - A concurrent write may invalidate iptables-restore's compare-and-swap; this manifests
// as a failure on the COMMIT line.
// - Another process may have clobbered some of our state, resulting in inconsistencies
// in what we try to program. This could manifest in a number of ways depending on what
// the other process did.
// - Random transient failure.
//
// It's also possible that we're bugged and trying to write bad data so we give up
// eventually.
retries := 10
backoffTime := 1 * time.Millisecond
failedAtLeastOnce := false
for {
if !t.inSyncWithDataPlane {
// We have reason to believe that our picture of the dataplane is out of
// sync. Refresh it. This may mark more chains as dirty.
t.loadDataplaneState()
}
if err := t.applyUpdates(); err != nil {
if retries > 0 {
retries--
t.logCxt.WithError(err).Warn("Failed to program iptables, will retry")
t.timeSleep(backoffTime)
backoffTime *= 2
t.logCxt.WithError(err).Warn("Retrying...")
failedAtLeastOnce = true
continue
} else {
t.logCxt.WithError(err).Error("Failed to program iptables, loading diags before panic.")
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
output, err2 := cmd.Output()
if err2 != nil {
t.logCxt.WithError(err2).Error("Failed to load iptables state")
} else {
t.logCxt.WithField("iptablesState", string(output)).Error("Current state of iptables")
}
t.logCxt.WithError(err).Panic("Failed to program iptables, giving up after retries")
}
}
if failedAtLeastOnce {
t.logCxt.Warn("Succeeded after retry.")
}
break
}
t.gaugeNumChains.Set(float64(len(t.chainNameToChain)))
// Check whether we need to be rescheduled and how soon.
if t.refreshInterval > 0 {
// Refresh interval is set, start with that.
lastReadToNow = now.Sub(t.lastReadTime)
rescheduleAfter = t.refreshInterval - lastReadToNow
}
if t.postWriteInterval < time.Hour {
postWriteReched := t.lastWriteTime.Add(t.postWriteInterval).Sub(now)
if postWriteReched <= 0 {
rescheduleAfter = 1 * time.Millisecond
} else if t.refreshInterval <= 0 || postWriteReched < rescheduleAfter {
rescheduleAfter = postWriteReched
}
}
return
}
func (t *Table) applyUpdates() error {
// If needed, detect the dataplane features.
features := t.featureDetector.GetFeatures()
// Build up the iptables-restore input in an in-memory buffer. This allows us to log out the exact input after
// a failure, which has proven to be a very useful diagnostic tool.
buf := &t.restoreInputBuffer
buf.Reset() // Defensive.
// iptables-restore commands live in per-table transactions.
buf.StartTransaction(t.Name)
// Make a pass over the dirty chains and generate a forward reference for any that we're about to update.
// Writing a forward reference ensures that the chain exists and that it is empty.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
chainNeedsToBeFlushed := false
if t.nftablesMode {
// iptables-nft-restore <v1.8.3 has a bug (https://bugzilla.netfilter.org/show_bug.cgi?id=1348)
// where only the first replace command sets the rule index. Work around that by refreshing the
// whole chain using a flush.
chain := t.chainNameToChain[chainName]
currentHashes := chain.RuleHashes(features)
previousHashes := t.chainToDataplaneHashes[chainName]
t.logCxt.WithFields(log.Fields{
"previous": previousHashes,
"current": currentHashes,
}).Debug("Comparing old to new hashes.")
if len(previousHashes) > 0 && reflect.DeepEqual(currentHashes, previousHashes) {
// Chain is already correct, skip it.
log.Debug("Chain already correct")
return set.RemoveItem
}
chainNeedsToBeFlushed = true
} else if _, ok := t.chainNameToChain[chainName]; !ok {
// About to delete this chain, flush it first to sever dependencies.
chainNeedsToBeFlushed = true
} else if _, ok := t.chainToDataplaneHashes[chainName]; !ok {
// Chain doesn't exist in dataplane, mark it for creation.
chainNeedsToBeFlushed = true
}
if chainNeedsToBeFlushed {
buf.WriteForwardReference(chainName)
}
return nil
})
// Make a second pass over the dirty chains. This time, we write out the rule changes.
newHashes := map[string][]string{}
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if chain, ok := t.chainNameToChain[chainName]; ok {
// Chain update or creation. Scan the chain against its previous hashes
// and replace/append/delete as appropriate.
var previousHashes []string
if t.nftablesMode {
// Due to a bug in iptables nft mode, force a whole-chain rewrite. (See above.)
previousHashes = nil
} else {
// In iptables legacy mode, we compare the rules one by one and apply deltas rule by rule.
previousHashes = t.chainToDataplaneHashes[chainName]
}
currentHashes := chain.RuleHashes(features)
newHashes[chainName] = currentHashes
for i := 0; i < len(previousHashes) || i < len(currentHashes); i++ {
var line string
if i < len(previousHashes) && i < len(currentHashes) {
if previousHashes[i] == currentHashes[i] {
continue
}
// Hash doesn't match, replace the rule.
ruleNum := i + 1 // 1-indexed.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderReplace(chainName, ruleNum, prefixFrag, features)
} else if i < len(previousHashes) {
// previousHashes was longer, remove the old rules from the end.
ruleNum := len(currentHashes) + 1 // 1-indexed
line = deleteRule(chainName, ruleNum)
} else {
// currentHashes was longer. Append.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderAppend(chainName, prefixFrag, features)
}
buf.WriteLine(line)
}
}
return nil // Delay clearing the set until we've programmed iptables.
})
// Now calculate iptables updates for our inserted rules, which are used to hook top-level chains.
t.dirtyInserts.Iter(func(item interface{}) error {
chainName := item.(string)
previousHashes := t.chainToDataplaneHashes[chainName]
// Calculate the hashes for our inserted rules.
newChainHashes, newRuleHashes := t.expectedHashesForInsertChain(
chainName, numEmptyStrings(previousHashes))
if reflect.DeepEqual(newChainHashes, previousHashes) {
// Chain is in sync, skip to next one.
return nil
}
// For simplicity, if we've discovered that we're out-of-sync, remove all our
// rules from this chain, then re-insert/re-append them below.
//
// Remove in reverse order so that we don't disturb the rule numbers of rules we're
// about to remove.
for i := len(previousHashes) - 1; i >= 0; i-- {
if previousHashes[i] != "" {
ruleNum := i + 1
line := deleteRule(chainName, ruleNum)
buf.WriteLine(line)
}
}
rules := t.chainToInsertedRules[chainName]
if t.insertMode == "insert" {
t.logCxt.Debug("Rendering insert rules.")
// Since each insert is pushed onto the top of the chain, do the inserts in
// reverse order so that they end up in the correct order in the final
// state of the chain.
for i := len(rules) - 1; i >= 0; i-- {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderInsert(chainName, prefixFrag, features)
buf.WriteLine(line)
}
} else {
t.logCxt.Debug("Rendering append rules.")
for i := 0; i < len(rules); i++ {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderAppend(chainName, prefixFrag, features)
buf.WriteLine(line)
}
}
newHashes[chainName] = newChainHashes
return nil // Delay clearing the set until we've programmed iptables.
})
if t.nftablesMode {
// The nftables version of iptables-restore requires that chains are unreferenced at the start of the
// transaction before they can be deleted (i.e. it doesn't seem to update the reference calculation as
// rules are deleted). Close the current transaction and open a new one for the deletions in order to
// refresh its state. The buffer will discard a no-op transaction so we don't need to check.
t.logCxt.Debug("In nftables mode, restarting transaction between updates and deletions.")
buf.EndTransaction()
buf.StartTransaction(t.Name)
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if _, ok := t.chainNameToChain[chainName]; !ok {
// Chain deletion
buf.WriteForwardReference(chainName)
}
return nil // Delay clearing the set until we've programmed iptables.
})
}
// Do deletions at the end. This ensures that we don't try to delete any chains that
// are still referenced (because we'll have removed the references in the modify pass
// above). Note: if a chain is being deleted at the same time as a chain that it refers to
// then we'll issue a create+flush instruction in the very first pass, which will sever the
// references.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if _, ok := t.chainNameToChain[chainName]; !ok {
// Chain deletion
buf.WriteLine(fmt.Sprintf("--delete-chain %s", chainName))
newHashes[chainName] = nil
}
return nil // Delay clearing the set until we've programmed iptables.
})
buf.EndTransaction()
if buf.Empty() {
t.logCxt.Debug("Update ended up being no-op, skipping call to ip(6)tables-restore.")
} else {
// Get the contents of the buffer ready to send to iptables-restore. Warning: for perf, this is directly
// accessing the buffer's internal array; don't touch the buffer after this point.
inputBytes := buf.GetBytesAndReset()
if log.GetLevel() >= log.DebugLevel {
// Only convert (potentially very large slice) to string at debug level.
inputStr := string(inputBytes)
t.logCxt.WithField("iptablesInput", inputStr).Debug("Writing to iptables")
}
var outputBuf, errBuf bytes.Buffer
args := []string{"--noflush", "--verbose"}
if features.RestoreSupportsLock {
// Versions of iptables-restore that support the xtables lock also make it impossible to disable. Make
// sure that we configure it to retry and configure for a short retry interval (the default is to try to
// acquire the lock only once).
lockTimeout := t.lockTimeout.Seconds()
if lockTimeout <= 0 {
// Before iptables-restore added lock support, we were able to disable the lock completely, which
// was indicated by a value <=0 (and was our default). Newer versions of iptables-restore require the
// lock so we override the default and set it to 10s.
lockTimeout = 10
}
lockProbeMicros := t.lockProbeInterval.Nanoseconds() / 1000
timeoutStr := fmt.Sprintf("%.0f", lockTimeout)
intervalStr := fmt.Sprintf("%d", lockProbeMicros)
args = append(args,
"--wait", timeoutStr, // seconds
"--wait-interval", intervalStr, // microseconds
)
log.WithFields(log.Fields{
"timeoutSecs": timeoutStr,
"probeIntervalMicros": intervalStr,
}).Debug("Using native iptables-restore xtables lock.")
}
cmd := t.newCmd(t.iptablesRestoreCmd, args...)
cmd.SetStdin(bytes.NewReader(inputBytes))
cmd.SetStdout(&outputBuf)
cmd.SetStderr(&errBuf)
countNumRestoreCalls.Inc()
// Note: calicoXtablesLock will be a dummy lock if our xtables lock is disabled (i.e. if iptables-restore
// supports the xtables lock itself, or if our implementation is disabled by config.
t.calicoXtablesLock.Lock()
err := cmd.Run()
t.calicoXtablesLock.Unlock()
if err != nil {
// To log out the input, we must convert to string here since, after we return, the buffer can be re-used
// (and the logger may convert to string on a background thread).
inputStr := string(inputBytes)
t.logCxt.WithFields(log.Fields{
"output": outputBuf.String(),
"errorOutput": errBuf.String(),
"error": err,
"input": inputStr,
}).Warn("Failed to execute ip(6)tables-restore command")
t.inSyncWithDataPlane = false
countNumRestoreErrors.Inc()
return err
}
t.lastWriteTime = t.timeNow()
t.postWriteInterval = t.initialPostWriteInterval
}
// Now we've successfully updated iptables, clear the dirty sets. We do this even if we
// found there was nothing to do above, since we may have found out that a dirty chain
// was actually a no-op update.
t.dirtyChains = set.New()
t.dirtyInserts = set.New()
// Store off the updates.
for chainName, hashes := range newHashes {
if hashes == nil {
delete(t.chainToDataplaneHashes, chainName)
} else {
t.chainToDataplaneHashes[chainName] = hashes
}
}
return nil
}
func (t *Table) commentFrag(hash string) string {
return fmt.Sprintf(`-m comment --comment "%s%s"`, t.hashCommentPrefix, hash)
}
func deleteRule(chainName string, ruleNum int) string {
return fmt.Sprintf("-D %s %d", chainName, ruleNum)
}
func calculateRuleInsertHashes(chainName string, rules []Rule, features *Features) []string {
chain := Chain{
Name: chainName,
Rules: rules,
}
return (&chain).RuleHashes(features)
}
func numEmptyStrings(strs []string) int {
count := 0
for _, s := range strs {
if s == "" {
count++
}
}
return count
}
| 1 | 17,083 | I don't think this is used any more, please remove. | projectcalico-felix | c |
@@ -42,9 +42,11 @@ func NewManager(mysteriumClient server.Client, dialogEstablisherFactory DialogEs
}
}
-func (manager *connectionManager) Connect(id identity.Identity, nodeKey string) error {
+func (manager *connectionManager) Connect(myId identity.Identity, nodeKey string) error {
manager.status = statusConnecting()
+ providerId := identity.FromAddress(nodeKey)
+
proposals, err := manager.mysteriumClient.FindProposals(nodeKey)
if err != nil {
manager.status = statusError(err) | 1 | package client_connection
import (
"errors"
"github.com/mysterium/node/communication"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/openvpn"
"github.com/mysterium/node/openvpn/middlewares/client/auth"
"github.com/mysterium/node/openvpn/middlewares/client/bytescount"
"github.com/mysterium/node/server"
"github.com/mysterium/node/session"
"path/filepath"
"time"
)
type DialogEstablisherFactory func(identity identity.Identity) communication.DialogEstablisher
type VpnClientFactory func(vpnSession session.SessionDto, identity identity.Identity) (openvpn.Client, error)
type connectionManager struct {
//these are passed on creation
mysteriumClient server.Client
dialogEstablisherFactory DialogEstablisherFactory
vpnClientFactory VpnClientFactory
statsKeeper bytescount.SessionStatsKeeper
//these are populated by Connect at runtime
dialog communication.Dialog
vpnClient openvpn.Client
status ConnectionStatus
}
func NewManager(mysteriumClient server.Client, dialogEstablisherFactory DialogEstablisherFactory,
vpnClientFactory VpnClientFactory, statsKeeper bytescount.SessionStatsKeeper) *connectionManager {
return &connectionManager{
mysteriumClient: mysteriumClient,
dialogEstablisherFactory: dialogEstablisherFactory,
vpnClientFactory: vpnClientFactory,
statsKeeper: statsKeeper,
dialog: nil,
vpnClient: nil,
status: statusNotConnected(),
}
}
func (manager *connectionManager) Connect(id identity.Identity, nodeKey string) error {
manager.status = statusConnecting()
proposals, err := manager.mysteriumClient.FindProposals(nodeKey)
if err != nil {
manager.status = statusError(err)
return err
}
if len(proposals) == 0 {
err = errors.New("node has no service proposals")
manager.status = statusError(err)
return err
}
proposal := proposals[0]
dialogEstablisher := manager.dialogEstablisherFactory(id)
manager.dialog, err = dialogEstablisher.CreateDialog(proposal.ProviderContacts[0])
if err != nil {
manager.status = statusError(err)
return err
}
vpnSession, err := session.RequestSessionCreate(manager.dialog, proposal.ID)
if err != nil {
manager.status = statusError(err)
return err
}
manager.vpnClient, err = manager.vpnClientFactory(*vpnSession, id)
if err := manager.vpnClient.Start(); err != nil {
manager.status = statusError(err)
return err
}
manager.statsKeeper.MarkSessionStart()
manager.status = statusConnected(vpnSession.ID)
return nil
}
func (manager *connectionManager) Status() ConnectionStatus {
return manager.status
}
func (manager *connectionManager) Disconnect() error {
manager.status = statusDisconnecting()
if manager.vpnClient != nil {
if err := manager.vpnClient.Stop(); err != nil {
return err
}
}
if manager.dialog != nil {
if err := manager.dialog.Close(); err != nil {
return err
}
}
manager.status = statusNotConnected()
return nil
}
func (manager *connectionManager) Wait() error {
return manager.vpnClient.Wait()
}
func statusError(err error) ConnectionStatus {
return ConnectionStatus{NotConnected, "", err}
}
func statusConnecting() ConnectionStatus {
return ConnectionStatus{Connecting, "", nil}
}
func statusConnected(sessionID session.SessionID) ConnectionStatus {
return ConnectionStatus{Connected, sessionID, nil}
}
func statusNotConnected() ConnectionStatus {
return ConnectionStatus{NotConnected, "", nil}
}
func statusDisconnecting() ConnectionStatus {
return ConnectionStatus{Disconnecting, "", nil}
}
func ConfigureVpnClientFactory(mysteriumAPIClient server.Client, vpnClientRuntimeDirectory string,
signerFactory identity.SignerFactory, statsKeeper bytescount.SessionStatsKeeper) VpnClientFactory {
return func(vpnSession session.SessionDto, id identity.Identity) (openvpn.Client, error) {
vpnConfig, err := openvpn.NewClientConfigFromString(
vpnSession.Config,
filepath.Join(vpnClientRuntimeDirectory, "client.ovpn"),
)
if err != nil {
return nil, err
}
statsSaver := bytescount.NewSessionStatsSaver(statsKeeper)
statsSender := bytescount.NewSessionStatsSender(mysteriumAPIClient, vpnSession.ID, signerFactory(id))
statsHandler := bytescount.NewCompositeStatsHandler(statsSaver, statsSender)
authenticator := auth.NewAuthenticatorFake()
vpnMiddlewares := []openvpn.ManagementMiddleware{
bytescount.NewMiddleware(statsHandler, 1*time.Minute),
auth.NewMiddleware(authenticator),
}
return openvpn.NewClient(
vpnConfig,
vpnClientRuntimeDirectory,
vpnMiddlewares...,
), nil
}
}
| 1 | 10,268 | shouldn't we name abbreviations up-cased? `myId` -> `myID`? | mysteriumnetwork-node | go |
@@ -287,7 +287,10 @@ export function recollectNodeTree(node, unmountOnly) {
else {
// If the node's VNode had a ref function, invoke it with null here.
// (this is part of the React spec, and smart for unsetting references)
- if (node[ATTR_KEY]!=null) applyRef(node[ATTR_KEY].ref, null);
+ if (node[ATTR_KEY]!=null) {
+ applyRef(node[ATTR_KEY].ref, null);
+ delete node[ATTR_KEY].ref;
+ }
if (unmountOnly===false || node[ATTR_KEY]==null) {
removeNode(node); | 1 | import { ATTR_KEY } from '../constants';
import { isSameNodeType, isNamedNode } from './index';
import { buildComponentFromVNode } from './component';
import { createNode, setAccessor } from '../dom/index';
import { unmountComponent } from './component';
import options from '../options';
import { applyRef } from '../util';
import { removeNode } from '../dom/index';
/**
* Queue of components that have been mounted and are awaiting componentDidMount
* @type {Array<import('../component').Component>}
*/
export const mounts = [];
/** Diff recursion count, used to track the end of the diff cycle. */
export let diffLevel = 0;
/** Global flag indicating if the diff is currently within an SVG */
let isSvgMode = false;
/** Global flag indicating if the diff is performing hydration */
let hydrating = false;
/** Invoke queued componentDidMount lifecycle methods */
export function flushMounts() {
let c, i;
for (i=0; i<mounts.length; ++i) {
c = mounts[i];
if (options.afterMount) options.afterMount(c);
if (c.componentDidMount) c.componentDidMount();
}
mounts.length = 0;
}
/**
* Apply differences in a given vnode (and it's deep children) to a real DOM Node.
* @param {import('../dom').PreactElement} dom A DOM node to mutate into the shape of a `vnode`
* @param {import('../vnode').VNode} vnode A VNode (with descendants forming a tree) representing
* the desired DOM structure
* @param {object} context The current context
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {Element} parent ?
* @param {boolean} componentRoot ?
* @returns {import('../dom').PreactElement} The created/mutated element
* @private
*/
export function diff(dom, vnode, context, mountAll, parent, componentRoot) {
// diffLevel having been 0 here indicates initial entry into the diff (not a subdiff)
if (!diffLevel++) {
// when first starting the diff, check if we're diffing an SVG or within an SVG
isSvgMode = parent!=null && parent.ownerSVGElement!==undefined;
// hydration is indicated by the existing element to be diffed not having a prop cache
hydrating = dom!=null && !(ATTR_KEY in dom);
}
let ret = idiff(dom, vnode, context, mountAll, componentRoot);
// append the element if its a new parent
if (parent && ret.parentNode!==parent) parent.appendChild(ret);
// diffLevel being reduced to 0 means we're exiting the diff
if (!--diffLevel) {
hydrating = false;
// invoke queued componentDidMount lifecycle methods
if (!componentRoot) flushMounts();
}
return ret;
}
/**
* Internals of `diff()`, separated to allow bypassing diffLevel / mount flushing.
* @param {import('../dom').PreactElement} dom A DOM node to mutate into the shape of a `vnode`
* @param {import('../vnode').VNode} vnode A VNode (with descendants forming a tree) representing the desired DOM structure
* @param {object} context The current context
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {boolean} [componentRoot] ?
* @private
*/
function idiff(dom, vnode, context, mountAll, componentRoot) {
let out = dom,
prevSvgMode = isSvgMode;
// empty values (null, undefined, booleans) render as empty Text nodes
if (vnode==null || typeof vnode==='boolean') vnode = '';
// Fast case: Strings & Numbers create/update Text nodes.
if (typeof vnode==='string' || typeof vnode==='number') {
// update if it's already a Text node:
if (dom && dom.splitText!==undefined && dom.parentNode && (!dom._component || componentRoot)) {
/* istanbul ignore if */ /* Browser quirk that can't be covered: https://github.com/developit/preact/commit/fd4f21f5c45dfd75151bd27b4c217d8003aa5eb9 */
if (dom.nodeValue!=vnode) {
dom.nodeValue = vnode;
}
}
else {
// it wasn't a Text node: replace it with one and recycle the old Element
out = document.createTextNode(vnode);
if (dom) {
if (dom.parentNode) dom.parentNode.replaceChild(out, dom);
recollectNodeTree(dom, true);
}
}
out[ATTR_KEY] = true;
return out;
}
// If the VNode represents a Component, perform a component diff:
let vnodeName = vnode.nodeName;
if (typeof vnodeName==='function') {
return buildComponentFromVNode(dom, vnode, context, mountAll);
}
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvgMode = vnodeName==='svg' ? true : vnodeName==='foreignObject' ? false : isSvgMode;
// If there's no existing element or it's the wrong type, create a new one:
vnodeName = String(vnodeName);
if (!dom || !isNamedNode(dom, vnodeName)) {
out = createNode(vnodeName, isSvgMode);
if (dom) {
// move children into the replacement node
while (dom.firstChild) out.appendChild(dom.firstChild);
// if the previous Element was mounted into the DOM, replace it inline
if (dom.parentNode) dom.parentNode.replaceChild(out, dom);
// recycle the old element (skips non-Element node types)
recollectNodeTree(dom, true);
}
}
let fc = out.firstChild,
props = out[ATTR_KEY],
vchildren = vnode.children;
if (props==null) {
props = out[ATTR_KEY] = {};
for (let a=out.attributes, i=a.length; i--; ) props[a[i].name] = a[i].value;
}
// Optimization: fast-path for elements containing a single TextNode:
if (!hydrating && vchildren && vchildren.length===1 && typeof vchildren[0]==='string' && fc!=null && fc.splitText!==undefined && fc.nextSibling==null) {
if (fc.nodeValue!=vchildren[0]) {
fc.nodeValue = vchildren[0];
}
}
// otherwise, if there are existing or new children, diff them:
else if (vchildren && vchildren.length || fc!=null) {
innerDiffNode(out, vchildren, context, mountAll, hydrating || props.dangerouslySetInnerHTML!=null);
}
// Apply attributes/props from VNode to the DOM Element:
diffAttributes(out, vnode.attributes, props);
// restore previous SVG mode: (in case we're exiting an SVG namespace)
isSvgMode = prevSvgMode;
return out;
}
/**
* Apply child and attribute changes between a VNode and a DOM Node to the DOM.
* @param {import('../dom').PreactElement} dom Element whose children should be compared & mutated
* @param {Array<import('../vnode').VNode>} vchildren Array of VNodes to compare to `dom.childNodes`
* @param {object} context Implicitly descendant context object (from most
* recent `getChildContext()`)
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {boolean} isHydrating if `true`, consumes externally created elements
* similar to hydration
*/
function innerDiffNode(dom, vchildren, context, mountAll, isHydrating) {
let originalChildren = dom.childNodes,
children = [],
keyed = {},
keyedLen = 0,
min = 0,
len = originalChildren.length,
childrenLen = 0,
vlen = vchildren ? vchildren.length : 0,
j, c, f, vchild, child;
// Build up a map of keyed children and an Array of unkeyed children:
if (len!==0) {
for (let i=0; i<len; i++) {
let child = originalChildren[i],
props = child[ATTR_KEY],
key = vlen && props ? child._component ? child._component.__key : props.key : null;
if (key!=null) {
keyedLen++;
keyed[key] = child;
}
else if (props || (child.splitText!==undefined ? (isHydrating ? child.nodeValue.trim() : true) : isHydrating)) {
children[childrenLen++] = child;
}
}
}
if (vlen!==0) {
for (let i=0; i<vlen; i++) {
vchild = vchildren[i];
child = null;
// attempt to find a node based on key matching
let key = vchild.key;
if (key!=null) {
if (keyedLen && keyed[key]!==undefined) {
child = keyed[key];
keyed[key] = undefined;
keyedLen--;
}
}
// attempt to pluck a node of the same type from the existing children
else if (min<childrenLen) {
for (j=min; j<childrenLen; j++) {
if (children[j]!==undefined && isSameNodeType(c = children[j], vchild, isHydrating)) {
child = c;
children[j] = undefined;
if (j===childrenLen-1) childrenLen--;
if (j===min) min++;
break;
}
}
}
// morph the matched/found/created DOM child to match vchild (deep)
child = idiff(child, vchild, context, mountAll);
f = originalChildren[i];
if (child && child!==dom && child!==f) {
if (f==null) {
dom.appendChild(child);
}
else if (child===f.nextSibling) {
removeNode(f);
}
else {
dom.insertBefore(child, f);
}
}
}
}
// remove unused keyed children:
if (keyedLen) {
for (let i in keyed) if (keyed[i]!==undefined) recollectNodeTree(keyed[i], false);
}
// remove orphaned unkeyed children:
while (min<=childrenLen) {
if ((child = children[childrenLen--])!==undefined) recollectNodeTree(child, false);
}
}
/**
* Recursively recycle (or just unmount) a node and its descendants.
* @param {import('../dom').PreactElement} node DOM node to start
* unmount/removal from
* @param {boolean} [unmountOnly=false] If `true`, only triggers unmount
* lifecycle, skips removal
*/
export function recollectNodeTree(node, unmountOnly) {
let component = node._component;
if (component) {
// if node is owned by a Component, unmount that component (ends up recursing back here)
unmountComponent(component);
}
else {
// If the node's VNode had a ref function, invoke it with null here.
// (this is part of the React spec, and smart for unsetting references)
if (node[ATTR_KEY]!=null) applyRef(node[ATTR_KEY].ref, null);
if (unmountOnly===false || node[ATTR_KEY]==null) {
removeNode(node);
}
removeChildren(node);
}
}
/**
* Recollect/unmount all children.
* - we use .lastChild here because it causes less reflow than .firstChild
* - it's also cheaper than accessing the .childNodes Live NodeList
*/
export function removeChildren(node) {
node = node.lastChild;
while (node) {
let next = node.previousSibling;
recollectNodeTree(node, true);
node = next;
}
}
/**
* Apply differences in attributes from a VNode to the given DOM Element.
* @param {import('../dom').PreactElement} dom Element with attributes to diff `attrs` against
* @param {object} attrs The desired end-state key-value attribute pairs
* @param {object} old Current/previous attributes (from previous VNode or
* element's prop cache)
*/
function diffAttributes(dom, attrs, old) {
let name;
// remove attributes no longer present on the vnode by setting them to undefined
for (name in old) {
if (!(attrs && attrs[name]!=null) && old[name]!=null) {
setAccessor(dom, name, old[name], old[name] = undefined, isSvgMode);
}
}
// add new & update changed attributes
for (name in attrs) {
if (name!=='children' && name!=='innerHTML' && (!(name in old) || attrs[name]!==(name==='value' || name==='checked' ? dom[name] : old[name]))) {
setAccessor(dom, name, old[name], old[name] = attrs[name], isSvgMode);
}
}
}
| 1 | 12,285 | This just always calls refs. I think we need to either hoist ref invocation back out of `setProperty()` (it used to happen during rendering), or wait for component recycling to go away. | preactjs-preact | js |
@@ -618,7 +618,10 @@ func runTest(t *testing.T, test testT) {
defer test.builder.Stop()
c := &controller{}
- c.Register(test.builder.Context)
+ _, _, err := c.Register(test.builder.Context)
+ if err != nil {
+ t.Errorf("Error registering the controller: %v", err)
+ }
c.accountRegistry = &accountstest.FakeRegistry{
GetClientFunc: func(_ string) (acmecl.Interface, error) {
return test.acmeClient, nil | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acmeorders
import (
"context"
"encoding/pem"
"errors"
"fmt"
"testing"
"time"
acmeapi "golang.org/x/crypto/acme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
coretesting "k8s.io/client-go/testing"
fakeclock "k8s.io/utils/clock/testing"
accountstest "github.com/jetstack/cert-manager/pkg/acme/accounts/test"
acmecl "github.com/jetstack/cert-manager/pkg/acme/client"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
testpkg "github.com/jetstack/cert-manager/pkg/controller/test"
"github.com/jetstack/cert-manager/test/unit/gen"
)
func TestSyncHappyPath(t *testing.T) {
nowTime := time.Now()
nowMetaTime := metav1.NewTime(nowTime)
fixedClock := fakeclock.NewFakeClock(nowTime)
testIssuerHTTP01 := gen.Issuer("testissuer", gen.SetIssuerACME(cmacme.ACMEIssuer{
Solvers: []cmacme.ACMEChallengeSolver{
{
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{},
},
},
},
}))
testIssuerHTTP01TestCom := gen.Issuer("testissuer", gen.SetIssuerACME(cmacme.ACMEIssuer{
Solvers: []cmacme.ACMEChallengeSolver{
{
Selector: &cmacme.CertificateDNSNameSelector{
DNSNames: []string{"test.com"},
},
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{},
},
},
},
}))
testIssuerHTTP01TestComPreferredChain := gen.Issuer("testissuer", gen.SetIssuerACME(cmacme.ACMEIssuer{
PreferredChain: "ISRG Root X1",
Solvers: []cmacme.ACMEChallengeSolver{
{
Selector: &cmacme.CertificateDNSNameSelector{
DNSNames: []string{"test.com"},
},
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{},
},
},
},
}))
testOrder := gen.Order("testorder",
gen.SetOrderCommonName("test.com"),
gen.SetOrderIssuer(cmmeta.ObjectReference{
Name: testIssuerHTTP01TestCom.Name,
}),
)
testOrderIP := gen.Order("testorder", gen.SetOrderIssuer(cmmeta.ObjectReference{Name: testIssuerHTTP01.Name}), gen.SetOrderIPAddresses("10.0.0.1"))
pendingStatus := cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
Identifier: "test.com",
Challenges: []cmacme.ACMEChallenge{
{
URL: "http://chalurl",
Token: "token",
Type: "http-01",
},
},
},
},
}
testOrderPending := gen.OrderFrom(testOrder, gen.SetOrderStatus(pendingStatus))
testOrderInvalid := testOrderPending.DeepCopy()
testOrderInvalid.Status.State = cmacme.Invalid
testOrderInvalid.Status.FailureTime = &nowMetaTime
testOrderValid := testOrderPending.DeepCopy()
testOrderValid.Status.State = cmacme.Valid
// pem encoded word 'test'
testOrderValid.Status.Certificate = []byte(`-----BEGIN CERTIFICATE-----
dGVzdA==
-----END CERTIFICATE-----
`)
testOrderReady := testOrderPending.DeepCopy()
testOrderReady.Status.State = cmacme.Ready
testCert := []byte(`-----BEGIN CERTIFICATE-----
MIIFjTCCA3WgAwIBAgIRANOxciY0IzLc9AUoUSrsnGowDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTYxMDA2MTU0MzU1
WhcNMjExMDA2MTU0MzU1WjBKMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDEjMCEGA1UEAxMaTGV0J3MgRW5jcnlwdCBBdXRob3JpdHkgWDMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCc0wzwWuUuR7dyXTeDs2hjMOrX
NSYZJeG9vjXxcJIvt7hLQQWrqZ41CFjssSrEaIcLo+N15Obzp2JxunmBYB/XkZqf
89B4Z3HIaQ6Vkc/+5pnpYDxIzH7KTXcSJJ1HG1rrueweNwAcnKx7pwXqzkrrvUHl
Npi5y/1tPJZo3yMqQpAMhnRnyH+lmrhSYRQTP2XpgofL2/oOVvaGifOFP5eGr7Dc
Gu9rDZUWfcQroGWymQQ2dYBrrErzG5BJeC+ilk8qICUpBMZ0wNAxzY8xOJUWuqgz
uEPxsR/DMH+ieTETPS02+OP88jNquTkxxa/EjQ0dZBYzqvqEKbbUC8DYfcOTAgMB
AAGjggFnMIIBYzAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADBU
BgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEBATAwMC4GCCsGAQUFBwIB
FiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQub3JnMB0GA1UdDgQWBBSo
SmpjBH3duubRObemRWXv86jsoTAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vY3Js
LnJvb3QteDEubGV0c2VuY3J5cHQub3JnMHIGCCsGAQUFBwEBBGYwZDAwBggrBgEF
BQcwAYYkaHR0cDovL29jc3Aucm9vdC14MS5sZXRzZW5jcnlwdC5vcmcvMDAGCCsG
AQUFBzAChiRodHRwOi8vY2VydC5yb290LXgxLmxldHNlbmNyeXB0Lm9yZy8wHwYD
VR0jBBgwFoAUebRZ5nu25eQBc4AIiMgaWPbpm24wDQYJKoZIhvcNAQELBQADggIB
ABnPdSA0LTqmRf/Q1eaM2jLonG4bQdEnqOJQ8nCqxOeTRrToEKtwT++36gTSlBGx
A/5dut82jJQ2jxN8RI8L9QFXrWi4xXnA2EqA10yjHiR6H9cj6MFiOnb5In1eWsRM
UM2v3e9tNsCAgBukPHAg1lQh07rvFKm/Bz9BCjaxorALINUfZ9DD64j2igLIxle2
DPxW8dI/F2loHMjXZjqG8RkqZUdoxtID5+90FgsGIfkMpqgRS05f4zPbCEHqCXl1
eO5HyELTgcVlLXXQDgAWnRzut1hFJeczY1tjQQno6f6s+nMydLN26WuU4s3UYvOu
OsUxRlJu7TSRHqDC3lSE5XggVkzdaPkuKGQbGpny+01/47hfXXNB7HntWNZ6N2Vw
p7G6OfY+YQrZwIaQmhrIqJZuigsrbe3W+gdn5ykE9+Ky0VgVUsfxo52mwFYs1JKY
2PGDuWx8M6DlS6qQkvHaRUo0FMd8TsSlbF0/v965qGFKhSDeQoMpYnwcmQilRh/0
ayLThlHLN81gSkJjVrPI0Y8xCVPB4twb1PFUd2fPM3sA1tJ83sZ5v8vgFv2yofKR
PB0t6JzUA81mSqM3kxl5e+IZwhYAyO0OTg3/fs8HqGTNKd9BqoUwSRBzp06JMg5b
rUCGwbCUDI0mxadJ3Bz4WxR6fyNpBK2yAinWEsikxqEt
-----END CERTIFICATE-----
`)
rawTestCert, _ := pem.Decode(testCert)
testOrderValidAltCert := gen.OrderFrom(testOrder, gen.SetOrderStatus(pendingStatus))
testOrderValidAltCert.Status.State = cmacme.Valid
testOrderValidAltCert.Status.Certificate = testCert
fakeHTTP01ACMECl := &acmecl.FakeACME{
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
}
testAuthorizationChallenge, err := buildChallenge(context.TODO(), fakeHTTP01ACMECl, testIssuerHTTP01TestCom, testOrderPending, testOrderPending.Status.Authorizations[0])
if err != nil {
t.Fatalf("error building Challenge resource test fixture: %v", err)
}
testAuthorizationChallengeValid := testAuthorizationChallenge.DeepCopy()
testAuthorizationChallengeValid.Status.State = cmacme.Valid
testAuthorizationChallengeInvalid := testAuthorizationChallenge.DeepCopy()
testAuthorizationChallengeInvalid.Status.State = cmacme.Invalid
testACMEAuthorizationPending := &acmeapi.Authorization{
URI: "http://authzurl",
Status: acmeapi.StatusPending,
Identifier: acmeapi.AuthzID{
Value: "test.com",
},
Challenges: []*acmeapi.Challenge{
{
Type: "http-01",
Token: "token",
},
},
}
testACMEOrderPending := &acmeapi.Order{
URI: testOrderPending.Status.URL,
Identifiers: []acmeapi.AuthzID{
{
Type: "dns",
Value: "test.com",
},
},
FinalizeURL: testOrderPending.Status.FinalizeURL,
AuthzURLs: []string{"http://authzurl"},
Status: acmeapi.StatusPending,
}
// shallow copy
testACMEOrderValid := &acmeapi.Order{}
*testACMEOrderValid = *testACMEOrderPending
testACMEOrderValid.Status = acmeapi.StatusValid
// shallow copy
testACMEOrderReady := &acmeapi.Order{}
*testACMEOrderReady = *testACMEOrderPending
testACMEOrderReady.Status = acmeapi.StatusReady
// shallow copy
testACMEOrderInvalid := &acmeapi.Order{}
*testACMEOrderInvalid = *testACMEOrderPending
testACMEOrderInvalid.Status = acmeapi.StatusInvalid
tests := map[string]testT{
"create a new order with the acme server, set the order url on the status resource and return nil to avoid cache timing issues": {
order: testOrder,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrder},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderPending.Namespace,
gen.OrderFrom(testOrder, gen.SetOrderStatus(cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
},
},
})))),
},
},
acmeClient: &acmecl.FakeACME{
FakeAuthorizeOrder: func(ctx context.Context, id []acmeapi.AuthzID, opt ...acmeapi.OrderOption) (*acmeapi.Order, error) {
return testACMEOrderPending, nil
},
FakeGetAuthorization: func(ctx context.Context, url string) (*acmeapi.Authorization, error) {
if url != "http://authzurl" {
return nil, fmt.Errorf("Invalid URL: expected http://authzurl got %q", url)
}
return testACMEAuthorizationPending, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"create a new order with the acme server with an IP address": {
order: testOrderIP,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01, testOrderIP},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderPending.Namespace,
gen.OrderFrom(testOrderIP, gen.SetOrderStatus(cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
},
},
})))),
},
},
acmeClient: &acmecl.FakeACME{
FakeAuthorizeOrder: func(ctx context.Context, id []acmeapi.AuthzID, opt ...acmeapi.OrderOption) (*acmeapi.Order, error) {
if id[0].Value != "10.0.0.1" || id[0].Type != "ip" {
return nil, errors.New("AuthzID needs to be the IP")
}
return testACMEOrderPending, nil
},
FakeGetAuthorization: func(ctx context.Context, url string) (*acmeapi.Authorization, error) {
if url != "http://authzurl" {
return nil, fmt.Errorf("Invalid URL: expected http://authzurl got %q", url)
}
return testACMEAuthorizationPending, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"create a challenge resource for the test.com dnsName on the order": {
order: testOrderPending,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewCreateAction(cmacme.SchemeGroupVersion.WithResource("challenges"), testAuthorizationChallenge.Namespace, testAuthorizationChallenge)),
},
ExpectedEvents: []string{
`Normal Created Created Challenge resource "testorder-2179654896" for domain "test.com"`,
},
},
acmeClient: &acmecl.FakeACME{
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"should refuse to create a challenge if only an unknown challenge type is offered": {
order: gen.OrderFrom(testOrderPending, gen.SetOrderStatus(cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
Identifier: "test.com",
Challenges: []cmacme.ACMEChallenge{
{
URL: "http://chalurl",
Token: "token",
Type: "unknown-type",
},
},
},
},
})),
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{
testIssuerHTTP01TestCom,
},
ExpectedEvents: []string{
// the 'unsupported challenge type' text is not printed here as the code that 'selects'
// a solver to use for a challenge filters out unsupported challenge types earlier
// in its selection routine.
`Warning Solver Failed to determine a valid solver configuration for the set of domains on the Order: no configured challenge solvers can be used for this challenge`,
},
},
},
// TODO: we should improve this behaviour as this is the 'stuck order' problem described in:
// https://github.com/jetstack/cert-manager/issues/2868
"skip creating a Challenge for an already valid authorization, and do nothing if the order is pending": {
order: gen.OrderFrom(testOrder, gen.SetOrderStatus(
cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
Identifier: "test.com",
InitialState: cmacme.Valid,
Challenges: []cmacme.ACMEChallenge{
{
URL: "http://chalurl",
Token: "token",
Type: "http-01",
},
},
},
},
},
)),
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending},
ExpectedActions: []testpkg.Action{},
ExpectedEvents: []string{},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(ctx context.Context, url string) (*acmeapi.Order, error) {
return &acmeapi.Order{
URI: "http://testurl.com/abcde",
Status: acmeapi.StatusPending,
FinalizeURL: "http://testurl.com/abcde/finalize",
CertURL: "",
}, nil
},
},
},
"skip creating a Challenge for an already valid authorization": {
order: gen.OrderFrom(testOrder, gen.SetOrderStatus(
cmacme.OrderStatus{
State: cmacme.Pending,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
Identifier: "test.com",
InitialState: cmacme.Valid,
Challenges: []cmacme.ACMEChallenge{
{
URL: "http://chalurl",
Token: "token",
Type: "http-01",
},
},
},
},
},
)),
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrder.Namespace, gen.OrderFrom(testOrder, gen.SetOrderStatus(
cmacme.OrderStatus{
// The 'state' field should be updated to reflect the
// Order returned by FakeGetOrder
State: cmacme.Valid,
URL: "http://testurl.com/abcde",
FinalizeURL: "http://testurl.com/abcde/finalize",
Authorizations: []cmacme.ACMEAuthorization{
{
URL: "http://authzurl",
Identifier: "test.com",
InitialState: cmacme.Valid,
Challenges: []cmacme.ACMEChallenge{
{
URL: "http://chalurl",
Token: "token",
Type: "http-01",
},
},
},
},
},
)),
)),
},
ExpectedEvents: []string{},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(ctx context.Context, url string) (*acmeapi.Order, error) {
return &acmeapi.Order{
URI: "http://testurl.com/abcde",
Status: acmeapi.StatusValid,
FinalizeURL: "http://testurl.com/abcde/finalize",
CertURL: "",
}, nil
},
},
},
"do nothing if the challenge for test.com is still pending": {
order: testOrderPending,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending, testAuthorizationChallenge},
ExpectedActions: []testpkg.Action{},
},
acmeClient: &acmecl.FakeACME{
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"call GetOrder and update the order state to 'ready' if all challenges are 'valid'": {
order: testOrderPending,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending, testAuthorizationChallengeValid},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderReady.Namespace, testOrderReady)),
},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(_ context.Context, url string) (*acmeapi.Order, error) {
return testACMEOrderReady, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"call FinalizeOrder and update the order state to 'valid' if finalize succeeds": {
order: testOrderReady,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderReady, testAuthorizationChallengeValid},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderValid.Namespace, testOrderValid)),
},
ExpectedEvents: []string{
"Normal Complete Order completed successfully",
},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(_ context.Context, url string) (*acmeapi.Order, error) {
return testACMEOrderValid, nil
},
FakeCreateOrderCert: func(_ context.Context, url string, csr []byte, bundle bool) ([][]byte, string, error) {
testData := []byte("test")
return [][]byte{testData}, "http://testurl", nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"call FinalizeOrder fetch alternate cert chain": {
order: testOrderReady.DeepCopy(),
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestComPreferredChain, testOrderReady, testAuthorizationChallengeValid},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderValid.Namespace, testOrderValidAltCert)),
},
ExpectedEvents: []string{
"Normal Complete Order completed successfully",
},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(_ context.Context, url string) (*acmeapi.Order, error) {
return testACMEOrderValid, nil
},
FakeCreateOrderCert: func(_ context.Context, url string, csr []byte, bundle bool) ([][]byte, string, error) {
testData := []byte("test")
return [][]byte{testData}, "http://testurl", nil
},
FakeFetchCertAlternatives: func(_ context.Context, url string, bundle bool) ([][][]byte, error) {
if url != "http://testurl" {
return nil, errors.New("Cert URL is incorrect")
}
return [][][]byte{{rawTestCert.Bytes}}, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"call GetOrder and update the order state if the challenge is 'failed'": {
order: testOrderPending,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending, testAuthorizationChallengeInvalid},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(cmacme.SchemeGroupVersion.WithResource("orders"),
"status",
testOrderInvalid.Namespace, testOrderInvalid)),
},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(_ context.Context, url string) (*acmeapi.Order, error) {
return testACMEOrderInvalid, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
return "key", nil
},
},
},
"should leave the order state as-is if the challenge is marked invalid but the acme order is pending": {
order: testOrderPending,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderPending, testAuthorizationChallengeInvalid},
ExpectedActions: []testpkg.Action{},
},
acmeClient: &acmecl.FakeACME{
FakeGetOrder: func(_ context.Context, url string) (*acmeapi.Order, error) {
return testACMEOrderPending, nil
},
FakeHTTP01ChallengeResponse: func(s string) (string, error) {
// TODO: assert s = "token"
return "key", nil
},
},
},
"do nothing if the order is valid": {
order: testOrderValid,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderValid},
ExpectedActions: []testpkg.Action{},
},
acmeClient: &acmecl.FakeACME{},
},
"do nothing if the order is failed": {
order: testOrderInvalid,
builder: &testpkg.Builder{
CertManagerObjects: []runtime.Object{testIssuerHTTP01TestCom, testOrderInvalid},
ExpectedActions: []testpkg.Action{},
},
acmeClient: &acmecl.FakeACME{},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
// reset the fixedClock at the start of each test
fixedClock.SetTime(nowTime)
// always use the fixedClock unless otherwise specified
if test.builder.Clock == nil {
test.builder.Clock = fixedClock
}
runTest(t, test)
})
}
}
type testT struct {
order *cmacme.Order
builder *testpkg.Builder
acmeClient acmecl.Interface
expectErr bool
}
func runTest(t *testing.T, test testT) {
test.builder.T = t
test.builder.Init()
defer test.builder.Stop()
c := &controller{}
c.Register(test.builder.Context)
c.accountRegistry = &accountstest.FakeRegistry{
GetClientFunc: func(_ string) (acmecl.Interface, error) {
return test.acmeClient, nil
},
}
test.builder.Start()
err := c.Sync(context.Background(), test.order)
if err != nil && !test.expectErr {
t.Errorf("Expected function to not error, but got: %v", err)
}
if err == nil && test.expectErr {
t.Errorf("Expected function to get an error, but got: %v", err)
}
test.builder.CheckAndFinish(err)
}
| 1 | 26,702 | Oof. I may have missed this while reviewing #3805 | jetstack-cert-manager | go |
@@ -3,8 +3,12 @@
// caffe::Caffe functions so that one could easily call it from matlab.
// Note that for matlab, we will simply use float as the data type.
-#include <string>
-#include <vector>
+// these need to be included after boost on OS X
+#include <string> // NOLINT(build/include_order)
+#include <vector> // NOLINT(build/include_order)
+#include <fstream> // NOLINT
+#include <stdexcept>
+
#include "mex.h"
| 1 | //
// matcaffe.cpp provides a wrapper of the caffe::Net class as well as some
// caffe::Caffe functions so that one could easily call it from matlab.
// Note that for matlab, we will simply use float as the data type.
#include <string>
#include <vector>
#include "mex.h"
#include "caffe/caffe.hpp"
#define MEX_ARGS int nlhs, mxArray **plhs, int nrhs, const mxArray **prhs
using namespace caffe; // NOLINT(build/namespaces)
// The pointer to the internal caffe::Net instance
static shared_ptr<Net<float> > net_;
static int init_key = -2;
// Five things to be aware of:
// caffe uses row-major order
// matlab uses column-major order
// caffe uses BGR color channel order
// matlab uses RGB color channel order
// images need to have the data mean subtracted
//
// Data coming in from matlab needs to be in the order
// [width, height, channels, images]
// where width is the fastest dimension.
// Here is the rough matlab for putting image data into the correct
// format:
// % convert from uint8 to single
// im = single(im);
// % reshape to a fixed size (e.g., 227x227)
// im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear');
// % permute from RGB to BGR and subtract the data mean (already in BGR)
// im = im(:,:,[3 2 1]) - data_mean;
// % flip width and height to make width the fastest dimension
// im = permute(im, [2 1 3]);
//
// If you have multiple images, cat them with cat(4, ...)
//
// The actual forward function. It takes in a cell array of 4-D arrays as
// input and outputs a cell array.
static mxArray* do_forward(const mxArray* const bottom) {
vector<Blob<float>*>& input_blobs = net_->input_blobs();
CHECK_EQ(static_cast<unsigned int>(mxGetDimensions(bottom)[0]),
input_blobs.size());
for (unsigned int i = 0; i < input_blobs.size(); ++i) {
const mxArray* const elem = mxGetCell(bottom, i);
CHECK(mxIsSingle(elem))
<< "MatCaffe require single-precision float point data";
CHECK_EQ(mxGetNumberOfElements(elem), input_blobs[i]->count())
<< "MatCaffe input size does not match the input size of the network";
const float* const data_ptr =
reinterpret_cast<const float* const>(mxGetPr(elem));
switch (Caffe::mode()) {
case Caffe::CPU:
caffe_copy(input_blobs[i]->count(), data_ptr,
input_blobs[i]->mutable_cpu_data());
break;
case Caffe::GPU:
caffe_copy(input_blobs[i]->count(), data_ptr,
input_blobs[i]->mutable_gpu_data());
break;
default:
LOG(FATAL) << "Unknown Caffe mode.";
} // switch (Caffe::mode())
}
const vector<Blob<float>*>& output_blobs = net_->ForwardPrefilled();
mxArray* mx_out = mxCreateCellMatrix(output_blobs.size(), 1);
for (unsigned int i = 0; i < output_blobs.size(); ++i) {
// internally data is stored as (width, height, channels, num)
// where width is the fastest dimension
mwSize dims[4] = {output_blobs[i]->width(), output_blobs[i]->height(),
output_blobs[i]->channels(), output_blobs[i]->num()};
mxArray* mx_blob = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL);
mxSetCell(mx_out, i, mx_blob);
float* data_ptr = reinterpret_cast<float*>(mxGetPr(mx_blob));
switch (Caffe::mode()) {
case Caffe::CPU:
caffe_copy(output_blobs[i]->count(), output_blobs[i]->cpu_data(),
data_ptr);
break;
case Caffe::GPU:
caffe_copy(output_blobs[i]->count(), output_blobs[i]->gpu_data(),
data_ptr);
break;
default:
LOG(FATAL) << "Unknown Caffe mode.";
} // switch (Caffe::mode())
}
return mx_out;
}
static mxArray* do_backward(const mxArray* const top_diff) {
vector<Blob<float>*>& output_blobs = net_->output_blobs();
vector<Blob<float>*>& input_blobs = net_->input_blobs();
CHECK_EQ(static_cast<unsigned int>(mxGetDimensions(top_diff)[0]),
output_blobs.size());
// First, copy the output diff
for (unsigned int i = 0; i < output_blobs.size(); ++i) {
const mxArray* const elem = mxGetCell(top_diff, i);
const float* const data_ptr =
reinterpret_cast<const float* const>(mxGetPr(elem));
switch (Caffe::mode()) {
case Caffe::CPU:
caffe_copy(output_blobs[i]->count(), data_ptr,
output_blobs[i]->mutable_cpu_diff());
break;
case Caffe::GPU:
caffe_copy(output_blobs[i]->count(), data_ptr,
output_blobs[i]->mutable_gpu_diff());
break;
default:
LOG(FATAL) << "Unknown Caffe mode.";
} // switch (Caffe::mode())
}
// LOG(INFO) << "Start";
net_->Backward();
// LOG(INFO) << "End";
mxArray* mx_out = mxCreateCellMatrix(input_blobs.size(), 1);
for (unsigned int i = 0; i < input_blobs.size(); ++i) {
// internally data is stored as (width, height, channels, num)
// where width is the fastest dimension
mwSize dims[4] = {input_blobs[i]->width(), input_blobs[i]->height(),
input_blobs[i]->channels(), input_blobs[i]->num()};
mxArray* mx_blob = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL);
mxSetCell(mx_out, i, mx_blob);
float* data_ptr = reinterpret_cast<float*>(mxGetPr(mx_blob));
switch (Caffe::mode()) {
case Caffe::CPU:
caffe_copy(input_blobs[i]->count(), input_blobs[i]->cpu_diff(), data_ptr);
break;
case Caffe::GPU:
caffe_copy(input_blobs[i]->count(), input_blobs[i]->gpu_diff(), data_ptr);
break;
default:
LOG(FATAL) << "Unknown Caffe mode.";
} // switch (Caffe::mode())
}
return mx_out;
}
static mxArray* do_get_weights() {
const vector<shared_ptr<Layer<float> > >& layers = net_->layers();
const vector<string>& layer_names = net_->layer_names();
// Step 1: count the number of layers with weights
int num_layers = 0;
{
string prev_layer_name = "";
for (unsigned int i = 0; i < layers.size(); ++i) {
vector<shared_ptr<Blob<float> > >& layer_blobs = layers[i]->blobs();
if (layer_blobs.size() == 0) {
continue;
}
if (layer_names[i] != prev_layer_name) {
prev_layer_name = layer_names[i];
num_layers++;
}
}
}
// Step 2: prepare output array of structures
mxArray* mx_layers;
{
const mwSize dims[2] = {num_layers, 1};
const char* fnames[2] = {"weights", "layer_names"};
mx_layers = mxCreateStructArray(2, dims, 2, fnames);
}
// Step 3: copy weights into output
{
string prev_layer_name = "";
int mx_layer_index = 0;
for (unsigned int i = 0; i < layers.size(); ++i) {
vector<shared_ptr<Blob<float> > >& layer_blobs = layers[i]->blobs();
if (layer_blobs.size() == 0) {
continue;
}
mxArray* mx_layer_cells = NULL;
if (layer_names[i] != prev_layer_name) {
prev_layer_name = layer_names[i];
const mwSize dims[2] = {static_cast<mwSize>(layer_blobs.size()), 1};
mx_layer_cells = mxCreateCellArray(2, dims);
mxSetField(mx_layers, mx_layer_index, "weights", mx_layer_cells);
mxSetField(mx_layers, mx_layer_index, "layer_names",
mxCreateString(layer_names[i].c_str()));
mx_layer_index++;
}
for (unsigned int j = 0; j < layer_blobs.size(); ++j) {
// internally data is stored as (width, height, channels, num)
// where width is the fastest dimension
mwSize dims[4] = {layer_blobs[j]->width(), layer_blobs[j]->height(),
layer_blobs[j]->channels(), layer_blobs[j]->num()};
mxArray* mx_weights =
mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL);
mxSetCell(mx_layer_cells, j, mx_weights);
float* weights_ptr = reinterpret_cast<float*>(mxGetPr(mx_weights));
switch (Caffe::mode()) {
case Caffe::CPU:
caffe_copy(layer_blobs[j]->count(), layer_blobs[j]->cpu_data(),
weights_ptr);
break;
case Caffe::GPU:
caffe_copy(layer_blobs[j]->count(), layer_blobs[j]->gpu_data(),
weights_ptr);
break;
default:
LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode();
}
}
}
}
return mx_layers;
}
static void get_weights(MEX_ARGS) {
plhs[0] = do_get_weights();
}
static void set_mode_cpu(MEX_ARGS) {
Caffe::set_mode(Caffe::CPU);
}
static void set_mode_gpu(MEX_ARGS) {
Caffe::set_mode(Caffe::GPU);
}
static void set_phase_train(MEX_ARGS) {
Caffe::set_phase(Caffe::TRAIN);
}
static void set_phase_test(MEX_ARGS) {
Caffe::set_phase(Caffe::TEST);
}
static void set_device(MEX_ARGS) {
if (nrhs != 1) {
LOG(ERROR) << "Only given " << nrhs << " arguments";
mexErrMsgTxt("Wrong number of arguments");
}
int device_id = static_cast<int>(mxGetScalar(prhs[0]));
Caffe::SetDevice(device_id);
}
static void get_init_key(MEX_ARGS) {
plhs[0] = mxCreateDoubleScalar(init_key);
}
static void init(MEX_ARGS) {
if (nrhs != 2) {
LOG(ERROR) << "Only given " << nrhs << " arguments";
mexErrMsgTxt("Wrong number of arguments");
}
char* param_file = mxArrayToString(prhs[0]);
char* model_file = mxArrayToString(prhs[1]);
net_.reset(new Net<float>(string(param_file)));
net_->CopyTrainedLayersFrom(string(model_file));
mxFree(param_file);
mxFree(model_file);
init_key = random(); // NOLINT(caffe/random_fn)
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(init_key);
}
}
static void reset(MEX_ARGS) {
if (net_) {
net_.reset();
init_key = -2;
LOG(INFO) << "Network reset, call init before use it again";
}
}
static void forward(MEX_ARGS) {
if (nrhs != 1) {
LOG(ERROR) << "Only given " << nrhs << " arguments";
mexErrMsgTxt("Wrong number of arguments");
}
plhs[0] = do_forward(prhs[0]);
}
static void backward(MEX_ARGS) {
if (nrhs != 1) {
LOG(ERROR) << "Only given " << nrhs << " arguments";
mexErrMsgTxt("Wrong number of arguments");
}
plhs[0] = do_backward(prhs[0]);
}
static void is_initialized(MEX_ARGS) {
if (!net_) {
plhs[0] = mxCreateDoubleScalar(0);
} else {
plhs[0] = mxCreateDoubleScalar(1);
}
}
static void read_mean(MEX_ARGS) {
if (nrhs != 1) {
mexErrMsgTxt("Usage: caffe('read_mean', 'path_to_binary_mean_file'");
return;
}
const string& mean_file = mxArrayToString(prhs[0]);
Blob<float> data_mean;
LOG(INFO) << "Loading mean file from" << mean_file;
BlobProto blob_proto;
bool result = ReadProtoFromBinaryFile(mean_file.c_str(), &blob_proto);
if (!result) {
mexErrMsgTxt("Couldn't read the file");
return;
}
data_mean.FromProto(blob_proto);
mwSize dims[4] = {data_mean.width(), data_mean.height(),
data_mean.channels(), data_mean.num() };
mxArray* mx_blob = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL);
float* data_ptr = reinterpret_cast<float*>(mxGetPr(mx_blob));
caffe_copy(data_mean.count(), data_mean.cpu_data(), data_ptr);
mexWarnMsgTxt("Remember that Caffe saves in [width, height, channels]"
" format and channels are also BGR!");
plhs[0] = mx_blob;
}
/** -----------------------------------------------------------------
** Available commands.
**/
struct handler_registry {
string cmd;
void (*func)(MEX_ARGS);
};
static handler_registry handlers[] = {
// Public API functions
{ "forward", forward },
{ "backward", backward },
{ "init", init },
{ "is_initialized", is_initialized },
{ "set_mode_cpu", set_mode_cpu },
{ "set_mode_gpu", set_mode_gpu },
{ "set_phase_train", set_phase_train },
{ "set_phase_test", set_phase_test },
{ "set_device", set_device },
{ "get_weights", get_weights },
{ "get_init_key", get_init_key },
{ "reset", reset },
{ "read_mean", read_mean },
// The end.
{ "END", NULL },
};
/** -----------------------------------------------------------------
** matlab entry point: caffe(api_command, arg1, arg2, ...)
**/
void mexFunction(MEX_ARGS) {
if (nrhs == 0) {
LOG(ERROR) << "No API command given";
mexErrMsgTxt("An API command is requires");
return;
}
{ // Handle input command
char *cmd = mxArrayToString(prhs[0]);
bool dispatched = false;
// Dispatch to cmd handler
for (int i = 0; handlers[i].func != NULL; i++) {
if (handlers[i].cmd.compare(cmd) == 0) {
handlers[i].func(nlhs, plhs, nrhs-1, prhs+1);
dispatched = true;
break;
}
}
if (!dispatched) {
LOG(ERROR) << "Unknown command `" << cmd << "'";
mexErrMsgTxt("API command not recognized");
}
mxFree(cmd);
}
}
| 1 | 28,701 | why the NOLINTs here? please just alphabetize the headers (should be easier than adding NOLINTs) unless there's a good reason not to... | BVLC-caffe | cpp |
@@ -0,0 +1,5 @@
+export default function() {
+ return <div />;
+}
+
+export const ReactComponent = () => <div />; | 1 | 1 | 32,556 | Should we maybe return `<svg />` instead? | google-site-kit-wp | js |
|
@@ -11,6 +11,7 @@ const Cats = require('../lib/status-cats');
const Storage = require('../lib/storage');
const _ = require('lodash');
const cors = require('cors');
+const load_plugins = require('../lib/plugin-loader').load_plugins;
module.exports = function(config_hash) {
// Config | 1 | 'use strict';
const express = require('express');
const Error = require('http-errors');
const compression = require('compression');
const Auth = require('../lib/auth');
const Logger = require('../lib/logger');
const Config = require('../lib/config');
const Middleware = require('./web/middleware');
const Cats = require('../lib/status-cats');
const Storage = require('../lib/storage');
const _ = require('lodash');
const cors = require('cors');
module.exports = function(config_hash) {
// Config
Logger.setup(config_hash.logs);
const config = new Config(config_hash);
const storage = new Storage(config);
const auth = new Auth(config);
const app = express();
// run in production mode by default, just in case
// it shouldn't make any difference anyway
app.set('env', process.env.NODE_ENV || 'production');
app.use(cors());
// Middleware
const error_reporting_middleware = function(req, res, next) {
res.report_error = res.report_error || function(err) {
if (err.status && err.status >= 400 && err.status < 600) {
if (_.isNil(res.headersSent) === false) {
res.status(err.status);
next({error: err.message || 'unknown error'});
}
} else {
Logger.logger.error( {err: err}
, 'unexpected error: @{!err.message}\n@{err.stack}');
if (!res.status || !res.send) {
Logger.logger.error('this is an error in express.js, please report this');
res.destroy();
} else if (!res.headersSent) {
res.status(500);
next({error: 'internal server error'});
} else {
// socket should be already closed
}
}
};
next();
};
// Router setup
app.use(Middleware.log);
app.use(error_reporting_middleware);
app.use(function(req, res, next) {
res.setHeader('X-Powered-By', config.user_agent);
next();
});
app.use(Cats.middleware);
app.use(compression());
app.get('/favicon.ico', function(req, res, next) {
req.url = '/-/static/favicon.png';
next();
});
// Hook for tests only
if (config._debug) {
app.get('/-/_debug', function(req, res, next) {
const do_gc = _.isNil(global.gc) === false;
if (do_gc) {
global.gc();
}
next({
pid: process.pid,
main: process.mainModule.filename,
conf: config.self_path,
mem: process.memoryUsage(),
gc: do_gc,
});
});
}
// For npm request
app.use(require('./endpoint')(config, auth, storage));
// For WebUI & WebUI API
if (_.get(config, 'web.enable', true)) {
app.use('/', require('./web')(config, auth, storage));
app.use('/-/verdaccio/', require('./web/api')(config, auth, storage));
} else {
app.get('/', function(req, res, next) {
next(Error[404]('Web interface is disabled in the config file'));
});
}
// Catch 404
app.get('/*', function(req, res, next) {
next(Error[404]('File not found'));
});
app.use(function(err, req, res, next) {
if (_.isError(err)) {
if (err.code === 'ECONNABORT' && res.statusCode === 304) {
return next();
}
if (_.isFunction(res.report_error) === false) {
// in case of very early error this middleware may not be loaded before error is generated
// fixing that
error_reporting_middleware(req, res, _.noop);
}
res.report_error(err);
} else {
// Fall to Middleware.final
return next(err);
}
});
app.use(Middleware.final);
return app;
};
| 1 | 17,393 | Please use camelCase in new code | verdaccio-verdaccio | js |
@@ -14,5 +14,6 @@ return [
'selectLocale' => 'Select one of the supported languages',
'contact' => 'Contact',
'contact-us' => 'Contact Us',
+ 'places' => 'Places',
]; | 1 | <?php
return [
'about' => 'About',
'help' => 'Help',
'language' => 'Language',
'fediverse' => 'Fediverse',
'opensource' => 'Open Source',
'terms' => 'Terms',
'privacy' => 'Privacy',
'l10nWip' => 'We’re still working on localization support',
'currentLocale' => 'Current locale',
'selectLocale' => 'Select one of the supported languages',
'contact' => 'Contact',
'contact-us' => 'Contact Us',
];
| 1 | 11,325 | Looks like the whitespace is off here. Not sure if there's a space or two too many or if there is an issue with tabs vs. spaces, but you probably want to fix this :) | pixelfed-pixelfed | php |
@@ -17,7 +17,12 @@
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
-"""Base bundles."""
+"""Base bundles.
+
+.. note:: `bootstrap.js` bundle must be loaded after jQuery UI to avoid conflicts.
+ You can use `noConflict()` if you need to access functions
+ of jQuery UI covered by `bootstrap.js`.
+"""
from invenio.ext.assets import Bundle
| 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Base bundles."""
from invenio.ext.assets import Bundle
invenio = Bundle(
"js/invenio.js",
output="invenio.js",
filters="requirejs",
weight=90
)
styles = Bundle(
"css/token-input.css",
"css/token-input-facebook.css",
"css/typeahead.js-bootstrap.css",
"less/base.less",
"css/tags/popover.css",
output="invenio.css",
depends=[
"less/base.less",
"less/base/**/*.less"
],
filters="less,cleancss",
weight=50
)
# FIXME
#if config.CFG_WEBSTYLE_TEMPLATE_SKIN != "default":
# styles.contents.append("css/" + config.CFG_WEBSTYLE_TEMPLATE_SKIN + ".css")
jquery = Bundle(
"js/jquery.js",
"js/jquery.jeditable.mini.js",
"js/jquery.tokeninput.js",
"js/jquery-caret.js",
"js/typeahead.js",
"js/bootstrap.js",
"js/bootstrap-select.js",
"js/hogan.js",
"js/translate.js",
output="jquery.js",
filters="uglifyjs",
weight=10,
bower={
"jquery": "2.1.0",
"bootstrap": "3.2.0",
"hogan": "3.0.0",
"jquery.jeditable": "http://invenio-software.org/download/jquery/v1.5/js/jquery.jeditable.mini.js",
"jquery-tokeninput": "*"
}
)
# jQuery UI
jqueryui = Bundle(
"js/jqueryui/jquery-ui.custom.js",
"js/jquery-ui-timepicker-addon.js",
filters="uglifyjs",
output="jquery-ui.js",
weight=11,
bower={
"jqueryui": "1.11.0",
"jquery.ui.timepicker": "http://invenio-software.org/download/jquery/jquery-ui-timepicker-addon-1.0.3.js"
}
)
# if ASSETS_DEBUG and not LESS_RUN_IN_DEBUG
lessjs = Bundle(
"js/less.js",
output="less.js",
filters="uglifyjs",
weight=0,
bower={
"less": "1.7.0"
}
)
# if ASSETS_DEBUG and not REQUIRESJS_RUN_IN_DEBUG
requirejs = Bundle(
"js/require.js",
"js/settings.js",
output="require.js",
filters="uglifyjs",
weight=0,
bower={
"requirejs": "latest"
}
)
# else
almondjs = Bundle(
"js/almond.js",
"js/settings.js",
output="almond.js",
filters="uglifyjs",
weight=0,
bower={
"almond": "latest"
}
)
| 1 | 11,639 | This was just my laziness and I didn't copy the first line. Just remove it together with one empty line. Thanks | inveniosoftware-invenio | py |
@@ -9,8 +9,8 @@ use Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat;
use Shopsys\FrameworkBundle\Model\Pricing\Vat\VatData;
use Shopsys\FrameworkBundle\Model\Product\Availability\Availability;
use Shopsys\FrameworkBundle\Model\Product\Availability\AvailabilityData;
-use Shopsys\FrameworkBundle\Model\Product\Product;
-use Shopsys\FrameworkBundle\Model\Product\ProductDataFactory;
+use Shopsys\FrameworkBundle\Model\Product\ProductDataFactoryInterface;
+use Shopsys\ShopBundle\Model\Product\Product;
use Tests\ShopBundle\Test\DatabaseTestCase;
class CartItemTest extends DatabaseTestCase | 1 | <?php
namespace Tests\ShopBundle\Database\Model\Cart;
use Shopsys\FrameworkBundle\DataFixtures\Demo\UnitDataFixture;
use Shopsys\FrameworkBundle\Model\Cart\Item\CartItem;
use Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifier;
use Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat;
use Shopsys\FrameworkBundle\Model\Pricing\Vat\VatData;
use Shopsys\FrameworkBundle\Model\Product\Availability\Availability;
use Shopsys\FrameworkBundle\Model\Product\Availability\AvailabilityData;
use Shopsys\FrameworkBundle\Model\Product\Product;
use Shopsys\FrameworkBundle\Model\Product\ProductDataFactory;
use Tests\ShopBundle\Test\DatabaseTestCase;
class CartItemTest extends DatabaseTestCase
{
public function testIsSimilarItemAs()
{
$em = $this->getEntityManager();
$productDataFactory = $this->getContainer()->get(ProductDataFactory::class);
$customerIdentifier = new CustomerIdentifier('randomString');
$vatData = new VatData();
$vatData->name = 'vat';
$vatData->percent = 21;
$vat = new Vat($vatData);
$availabilityData = new AvailabilityData();
$availabilityData->dispatchTime = 0;
$availability = new Availability($availabilityData);
$productData = $productDataFactory->create();
$productData->name = [];
$productData->price = 100;
$productData->vat = $vat;
$productData->availability = $availability;
$productData->unit = $this->getReference(UnitDataFixture::UNIT_PIECES);
$product1 = Product::create($productData);
$product2 = Product::create($productData);
$em->persist($vat);
$em->persist($availability);
$em->persist($product1);
$em->persist($product2);
$em->flush();
$cartItem1 = new CartItem($customerIdentifier, $product1, 1, '0.0');
$cartItem2 = new CartItem($customerIdentifier, $product1, 3, '0.0');
$cartItem3 = new CartItem($customerIdentifier, $product2, 1, '0.0');
$this->assertTrue($cartItem1->isSimilarItemAs($cartItem2));
$this->assertFalse($cartItem1->isSimilarItemAs($cartItem3));
}
}
| 1 | 11,517 | Why did you decide to change CartItemTest but you didnt change QueryBuilderWithRowManipulatorDataSourceTest? | shopsys-shopsys | php |
@@ -425,7 +425,15 @@ func (k *KeybaseServiceBase) LoadUserPlusKeys(ctx context.Context,
uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error) {
cachedUserInfo := k.getCachedUserInfo(uid)
if cachedUserInfo.Name != libkb.NormalizedUsername("") {
- return cachedUserInfo, nil
+ if pollForKID == keybase1.KID("") {
+ return cachedUserInfo, nil
+ }
+ // Skip the cache if pollForKID isn't present.
+ for _, key := range cachedUserInfo.VerifyingKeys {
+ if key.KID().Equal(pollForKID) {
+ return cachedUserInfo, nil
+ }
+ }
}
arg := keybase1.LoadUserPlusKeysArg{Uid: uid, PollForKID: pollForKID} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"sync"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
// KeybaseServiceBase implements most of KeybaseService from protocol
// defined clients.
type KeybaseServiceBase struct {
context Context
identifyClient keybase1.IdentifyInterface
userClient keybase1.UserInterface
teamsClient keybase1.TeamsInterface
merkleClient keybase1.MerkleInterface
sessionClient keybase1.SessionInterface
favoriteClient keybase1.FavoriteInterface
kbfsClient keybase1.KbfsInterface
kbfsMountClient keybase1.KbfsMountInterface
log logger.Logger
config Config
sessionCacheLock sync.RWMutex
// Set to the zero value when invalidated.
cachedCurrentSession SessionInfo
userCacheLock sync.RWMutex
// Map entries are removed when invalidated.
userCache map[keybase1.UID]UserInfo
userCacheUnverifiedKeys map[keybase1.UID][]keybase1.PublicKey
teamCacheLock sync.RWMutex
// Map entries are removed when invalidated.
teamCache map[keybase1.TeamID]TeamInfo
lastNotificationFilenameLock sync.Mutex
lastNotificationFilename string
lastSyncNotificationPath string
}
// NewKeybaseServiceBase makes a new KeybaseService.
func NewKeybaseServiceBase(config Config, kbCtx Context, log logger.Logger) *KeybaseServiceBase {
k := KeybaseServiceBase{
config: config,
context: kbCtx,
log: log,
userCache: make(map[keybase1.UID]UserInfo),
userCacheUnverifiedKeys: make(map[keybase1.UID][]keybase1.PublicKey),
teamCache: make(map[keybase1.TeamID]TeamInfo),
}
return &k
}
// FillClients sets the client protocol implementations needed for a KeybaseService.
func (k *KeybaseServiceBase) FillClients(
identifyClient keybase1.IdentifyInterface,
userClient keybase1.UserInterface, teamsClient keybase1.TeamsInterface,
merkleClient keybase1.MerkleInterface,
sessionClient keybase1.SessionInterface,
favoriteClient keybase1.FavoriteInterface,
kbfsClient keybase1.KbfsInterface,
kbfsMountClient keybase1.KbfsMountInterface) {
k.identifyClient = identifyClient
k.userClient = userClient
k.teamsClient = teamsClient
k.merkleClient = merkleClient
k.sessionClient = sessionClient
k.favoriteClient = favoriteClient
k.kbfsClient = kbfsClient
k.kbfsMountClient = kbfsMountClient
}
type addVerifyingKeyFunc func(kbfscrypto.VerifyingKey)
type addCryptPublicKeyFunc func(kbfscrypto.CryptPublicKey)
// processKey adds the given public key to the appropriate verifying
// or crypt list (as return values), and also updates the given name
// map and parent map in place.
func processKey(publicKey keybase1.PublicKey,
addVerifyingKey addVerifyingKeyFunc,
addCryptPublicKey addCryptPublicKeyFunc,
kidNames map[keybase1.KID]string,
parents map[keybase1.KID]keybase1.KID) error {
if len(publicKey.PGPFingerprint) > 0 {
return nil
}
// Import the KID to validate it.
key, err := libkb.ImportKeypairFromKID(publicKey.KID)
if err != nil {
return err
}
if publicKey.IsSibkey {
addVerifyingKey(kbfscrypto.MakeVerifyingKey(key.GetKID()))
} else {
addCryptPublicKey(kbfscrypto.MakeCryptPublicKey(key.GetKID()))
}
if publicKey.DeviceDescription != "" {
kidNames[publicKey.KID] = publicKey.DeviceDescription
}
if publicKey.ParentID != "" {
parentKID, err := keybase1.KIDFromStringChecked(
publicKey.ParentID)
if err != nil {
return err
}
parents[publicKey.KID] = parentKID
}
return nil
}
// updateKIDNamesFromParents sets the name of each KID without a name
// that has a a parent with a name, to that parent's name.
func updateKIDNamesFromParents(kidNames map[keybase1.KID]string,
parents map[keybase1.KID]keybase1.KID) {
for kid, parent := range parents {
if _, ok := kidNames[kid]; ok {
continue
}
if parentName, ok := kidNames[parent]; ok {
kidNames[kid] = parentName
}
}
}
func filterKeys(keys []keybase1.PublicKey) (
[]kbfscrypto.VerifyingKey, []kbfscrypto.CryptPublicKey,
map[keybase1.KID]string, error) {
var verifyingKeys []kbfscrypto.VerifyingKey
var cryptPublicKeys []kbfscrypto.CryptPublicKey
var kidNames = map[keybase1.KID]string{}
var parents = map[keybase1.KID]keybase1.KID{}
addVerifyingKey := func(key kbfscrypto.VerifyingKey) {
verifyingKeys = append(verifyingKeys, key)
}
addCryptPublicKey := func(key kbfscrypto.CryptPublicKey) {
cryptPublicKeys = append(cryptPublicKeys, key)
}
for _, publicKey := range keys {
err := processKey(publicKey, addVerifyingKey, addCryptPublicKey,
kidNames, parents)
if err != nil {
return nil, nil, nil, err
}
}
updateKIDNamesFromParents(kidNames, parents)
return verifyingKeys, cryptPublicKeys, kidNames, nil
}
func filterRevokedKeys(keys []keybase1.RevokedKey) (
map[kbfscrypto.VerifyingKey]keybase1.KeybaseTime,
map[kbfscrypto.CryptPublicKey]keybase1.KeybaseTime,
map[keybase1.KID]string, error) {
verifyingKeys := make(map[kbfscrypto.VerifyingKey]keybase1.KeybaseTime)
cryptPublicKeys := make(map[kbfscrypto.CryptPublicKey]keybase1.KeybaseTime)
var kidNames = map[keybase1.KID]string{}
var parents = map[keybase1.KID]keybase1.KID{}
for _, revokedKey := range keys {
addVerifyingKey := func(key kbfscrypto.VerifyingKey) {
verifyingKeys[key] = revokedKey.Time
}
addCryptPublicKey := func(key kbfscrypto.CryptPublicKey) {
cryptPublicKeys[key] = revokedKey.Time
}
err := processKey(revokedKey.Key, addVerifyingKey, addCryptPublicKey,
kidNames, parents)
if err != nil {
return nil, nil, nil, err
}
}
updateKIDNamesFromParents(kidNames, parents)
return verifyingKeys, cryptPublicKeys, kidNames, nil
}
func (k *KeybaseServiceBase) getCachedCurrentSession() SessionInfo {
k.sessionCacheLock.RLock()
defer k.sessionCacheLock.RUnlock()
return k.cachedCurrentSession
}
func (k *KeybaseServiceBase) setCachedCurrentSession(s SessionInfo) {
k.sessionCacheLock.Lock()
defer k.sessionCacheLock.Unlock()
k.cachedCurrentSession = s
}
func (k *KeybaseServiceBase) getCachedUserInfo(uid keybase1.UID) UserInfo {
k.userCacheLock.RLock()
defer k.userCacheLock.RUnlock()
return k.userCache[uid]
}
func (k *KeybaseServiceBase) setCachedUserInfo(uid keybase1.UID, info UserInfo) {
k.userCacheLock.Lock()
defer k.userCacheLock.Unlock()
if info.Name == libkb.NormalizedUsername("") {
delete(k.userCache, uid)
} else {
k.userCache[uid] = info
}
}
func (k *KeybaseServiceBase) getCachedUnverifiedKeys(uid keybase1.UID) (
[]keybase1.PublicKey, bool) {
k.userCacheLock.RLock()
defer k.userCacheLock.RUnlock()
if unverifiedKeys, ok := k.userCacheUnverifiedKeys[uid]; ok {
return unverifiedKeys, true
}
return nil, false
}
func (k *KeybaseServiceBase) setCachedUnverifiedKeys(uid keybase1.UID, pk []keybase1.PublicKey) {
k.userCacheLock.Lock()
defer k.userCacheLock.Unlock()
k.userCacheUnverifiedKeys[uid] = pk
}
func (k *KeybaseServiceBase) clearCachedUnverifiedKeys(uid keybase1.UID) {
k.userCacheLock.Lock()
defer k.userCacheLock.Unlock()
delete(k.userCacheUnverifiedKeys, uid)
}
func (k *KeybaseServiceBase) getCachedTeamInfo(tid keybase1.TeamID) TeamInfo {
k.teamCacheLock.RLock()
defer k.teamCacheLock.RUnlock()
return k.teamCache[tid]
}
func (k *KeybaseServiceBase) setCachedTeamInfo(
tid keybase1.TeamID, info TeamInfo) {
k.teamCacheLock.Lock()
defer k.teamCacheLock.Unlock()
if info.Name == libkb.NormalizedUsername("") {
delete(k.teamCache, tid)
} else {
k.teamCache[tid] = info
}
}
func (k *KeybaseServiceBase) clearCaches() {
k.setCachedCurrentSession(SessionInfo{})
func() {
k.userCacheLock.Lock()
defer k.userCacheLock.Unlock()
k.userCache = make(map[keybase1.UID]UserInfo)
k.userCacheUnverifiedKeys = make(map[keybase1.UID][]keybase1.PublicKey)
}()
k.teamCacheLock.Lock()
defer k.teamCacheLock.Unlock()
k.teamCache = make(map[keybase1.TeamID]TeamInfo)
}
// LoggedIn implements keybase1.NotifySessionInterface.
func (k *KeybaseServiceBase) LoggedIn(ctx context.Context, name string) error {
k.log.CDebugf(ctx, "Current session logged in: %s", name)
// Since we don't have the whole session, just clear the cache.
k.setCachedCurrentSession(SessionInfo{})
if k.config != nil {
serviceLoggedIn(
ctx, k.config, name, TLFJournalBackgroundWorkEnabled)
}
return nil
}
// LoggedOut implements keybase1.NotifySessionInterface.
func (k *KeybaseServiceBase) LoggedOut(ctx context.Context) error {
k.log.CDebugf(ctx, "Current session logged out")
k.setCachedCurrentSession(SessionInfo{})
if k.config != nil {
serviceLoggedOut(ctx, k.config)
}
return nil
}
// KeyfamilyChanged implements keybase1.NotifyKeyfamilyInterface.
func (k *KeybaseServiceBase) KeyfamilyChanged(ctx context.Context,
uid keybase1.UID) error {
k.log.CDebugf(ctx, "Key family for user %s changed", uid)
k.setCachedUserInfo(uid, UserInfo{})
k.clearCachedUnverifiedKeys(uid)
if k.getCachedCurrentSession().UID == uid {
// Ignore any errors for now, we don't want to block this
// notification and it's not worth spawning a goroutine for.
k.config.MDServer().CheckForRekeys(context.Background())
}
return nil
}
// ReachabilityChanged implements keybase1.ReachabiltyInterface.
func (k *KeybaseServiceBase) ReachabilityChanged(ctx context.Context,
reachability keybase1.Reachability) error {
k.log.CDebugf(ctx, "CheckReachability invoked: %v", reachability)
k.config.MDServer().CheckReachability(ctx)
return nil
}
// StartReachability implements keybase1.ReachabilityInterface.
func (k *KeybaseServiceBase) StartReachability(ctx context.Context) (res keybase1.Reachability, err error) {
return k.CheckReachability(ctx)
}
// CheckReachability implements keybase1.ReachabilityInterface.
func (k *KeybaseServiceBase) CheckReachability(ctx context.Context) (res keybase1.Reachability, err error) {
res.Reachable = keybase1.Reachable_NO
if k.config.MDServer().IsConnected() {
res.Reachable = keybase1.Reachable_YES
}
return res, nil
}
// PaperKeyCached implements keybase1.NotifyPaperKeyInterface.
func (k *KeybaseServiceBase) PaperKeyCached(ctx context.Context,
arg keybase1.PaperKeyCachedArg) error {
k.log.CDebugf(ctx, "Paper key for %s cached", arg.Uid)
if k.getCachedCurrentSession().UID == arg.Uid {
// Ignore any errors for now, we don't want to block this
// notification and it's not worth spawning a goroutine for.
k.config.MDServer().CheckForRekeys(context.Background())
}
return nil
}
// ClientOutOfDate implements keybase1.NotifySessionInterface.
func (k *KeybaseServiceBase) ClientOutOfDate(ctx context.Context,
arg keybase1.ClientOutOfDateArg) error {
k.log.CDebugf(ctx, "Client out of date: %v", arg)
return nil
}
// ConvertIdentifyError converts a errors during identify into KBFS errors
func ConvertIdentifyError(assertion string, err error) error {
switch err.(type) {
case libkb.NotFoundError:
return NoSuchUserError{assertion}
case libkb.ResolutionError:
return NoSuchUserError{assertion}
}
return err
}
// Resolve implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UserOrTeamID, error) {
res, err := k.identifyClient.Resolve3(ctx, assertion)
if err != nil {
return libkb.NormalizedUsername(""), keybase1.UserOrTeamID(""),
ConvertIdentifyError(assertion, err)
}
return libkb.NewNormalizedUsername(res.Name), res.Id, nil
}
// Identify implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) Identify(ctx context.Context, assertion, reason string) (
libkb.NormalizedUsername, keybase1.UserOrTeamID, error) {
// setting UseDelegateUI to true here will cause daemon to use
// registered identify ui providers instead of terminal if any
// are available. If not, then it will use the terminal UI.
arg := keybase1.IdentifyLiteArg{
Assertion: assertion,
UseDelegateUI: true,
Reason: keybase1.IdentifyReason{Reason: reason},
// No need to go back and forth with the UI until the service
// knows for sure there's a need for a dialogue.
CanSuppressUI: true,
}
ei := getExtendedIdentify(ctx)
arg.IdentifyBehavior = ei.behavior
res, err := k.identifyClient.IdentifyLite(ctx, arg)
// Identify2 still returns keybase1.UserPlusKeys data (sans keys),
// even if it gives a NoSigChainError, and in KBFS it's fine if
// the user doesn't have a full sigchain yet (e.g., it's just like
// the sharing before signup case, except the user already has a
// UID).
if _, ok := err.(libkb.NoSigChainError); ok {
k.log.CDebugf(ctx, "Ignoring error (%s) for user %s with no sigchain",
err, res.Ul.Name)
} else if err != nil {
return libkb.NormalizedUsername(""), keybase1.UserOrTeamID(""),
ConvertIdentifyError(assertion, err)
}
// This is required for every identify call. The userBreak
// function will take care of checking if res.TrackBreaks is nil
// or not.
name := libkb.NormalizedUsername(res.Ul.Name)
if res.Ul.Id.IsUser() {
asUser, err := res.Ul.Id.AsUser()
if err != nil {
return libkb.NormalizedUsername(""), keybase1.UserOrTeamID(""), err
}
ei.userBreak(name, asUser, res.TrackBreaks)
}
return name, res.Ul.Id, nil
}
// LoadUserPlusKeys implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) LoadUserPlusKeys(ctx context.Context,
uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error) {
cachedUserInfo := k.getCachedUserInfo(uid)
if cachedUserInfo.Name != libkb.NormalizedUsername("") {
return cachedUserInfo, nil
}
arg := keybase1.LoadUserPlusKeysArg{Uid: uid, PollForKID: pollForKID}
res, err := k.userClient.LoadUserPlusKeys(ctx, arg)
if err != nil {
return UserInfo{}, err
}
return k.processUserPlusKeys(res)
}
// LoadTeamPlusKeys implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) LoadTeamPlusKeys(
ctx context.Context, tid keybase1.TeamID) (TeamInfo, error) {
cachedTeamInfo := k.getCachedTeamInfo(tid)
if cachedTeamInfo.Name != libkb.NormalizedUsername("") {
return cachedTeamInfo, nil
}
arg := keybase1.LoadTeamPlusApplicationKeysArg{
Id: tid,
Application: keybase1.TeamApplication_KBFS,
}
res, err := k.teamsClient.LoadTeamPlusApplicationKeys(ctx, arg)
if err != nil {
return TeamInfo{}, err
}
if tid != res.Id {
return TeamInfo{}, fmt.Errorf(
"TID doesn't match: %s vs %s", tid, res.Id)
}
info := TeamInfo{
Name: libkb.NormalizedUsername(res.Name),
TID: res.Id,
CryptKeys: make(map[KeyGen]kbfscrypto.TLFCryptKey),
Writers: make(map[keybase1.UID]bool),
Readers: make(map[keybase1.UID]bool),
}
for _, key := range res.ApplicationKeys {
keyGen := KeyGen(key.KeyGeneration)
info.CryptKeys[keyGen] =
kbfscrypto.MakeTLFCryptKey(key.Key)
if keyGen > info.LatestKeyGen {
info.LatestKeyGen = keyGen
}
}
for _, user := range res.Writers {
info.Writers[user.Uid] = true
}
for _, user := range res.OnlyReaders {
info.Readers[user.Uid] = true
}
k.setCachedTeamInfo(tid, info)
return info, nil
}
// GetCurrentMerkleSeqNo implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) GetCurrentMerkleSeqNo(ctx context.Context) (
MerkleSeqNo, error) {
const merkleFreshnessMs = int(time.Second * 60 / time.Millisecond)
res, err := k.merkleClient.GetCurrentMerkleRoot(ctx, merkleFreshnessMs)
if err != nil {
return 0, err
}
if res.Root.Seqno < 0 {
return 0, fmt.Errorf(
"Illegal negative merkle seqno: %d", res.Root.Seqno)
}
// NOTE: `res.Seqno` is an int64, while `MerkleSeqNo` is a uint64,
// so casting in this direction should be safe.
return MerkleSeqNo(res.Root.Seqno), nil
}
func (k *KeybaseServiceBase) processUserPlusKeys(upk keybase1.UserPlusKeys) (
UserInfo, error) {
verifyingKeys, cryptPublicKeys, kidNames, err := filterKeys(upk.DeviceKeys)
if err != nil {
return UserInfo{}, err
}
revokedVerifyingKeys, revokedCryptPublicKeys, revokedKidNames, err :=
filterRevokedKeys(upk.RevokedDeviceKeys)
if err != nil {
return UserInfo{}, err
}
if len(revokedKidNames) > 0 {
for k, v := range revokedKidNames {
kidNames[k] = v
}
}
u := UserInfo{
Name: libkb.NewNormalizedUsername(upk.Username),
UID: upk.Uid,
VerifyingKeys: verifyingKeys,
CryptPublicKeys: cryptPublicKeys,
KIDNames: kidNames,
RevokedVerifyingKeys: revokedVerifyingKeys,
RevokedCryptPublicKeys: revokedCryptPublicKeys,
}
k.setCachedUserInfo(upk.Uid, u)
return u, nil
}
// LoadUnverifiedKeys implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) (
[]keybase1.PublicKey, error) {
if keys, ok := k.getCachedUnverifiedKeys(uid); ok {
return keys, nil
}
arg := keybase1.LoadAllPublicKeysUnverifiedArg{Uid: uid}
keys, err := k.userClient.LoadAllPublicKeysUnverified(ctx, arg)
if err != nil {
return nil, err
}
k.setCachedUnverifiedKeys(uid, keys)
return keys, nil
}
// CurrentSession implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) CurrentSession(ctx context.Context, sessionID int) (
SessionInfo, error) {
cachedCurrentSession := k.getCachedCurrentSession()
if cachedCurrentSession != (SessionInfo{}) {
return cachedCurrentSession, nil
}
res, err := k.sessionClient.CurrentSession(ctx, sessionID)
if err != nil {
if _, ok := err.(libkb.NoSessionError); ok {
// Use an error with a proper OS error code attached to it.
err = NoCurrentSessionError{}
}
return SessionInfo{}, err
}
s, err := SessionInfoFromProtocol(res)
if err != nil {
return s, err
}
k.log.CDebugf(
ctx, "new session with username %s, uid %s, crypt public key %s, and verifying key %s",
s.Name, s.UID, s.CryptPublicKey, s.VerifyingKey)
k.setCachedCurrentSession(s)
return s, nil
}
// FavoriteAdd implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) FavoriteAdd(ctx context.Context, folder keybase1.Folder) error {
return k.favoriteClient.FavoriteAdd(ctx, keybase1.FavoriteAddArg{Folder: folder})
}
// FavoriteDelete implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) FavoriteDelete(ctx context.Context, folder keybase1.Folder) error {
return k.favoriteClient.FavoriteIgnore(ctx,
keybase1.FavoriteIgnoreArg{Folder: folder})
}
// FavoriteList implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error) {
results, err := k.favoriteClient.GetFavorites(ctx, sessionID)
if err != nil {
return nil, err
}
return results.FavoriteFolders, nil
}
// Notify implements the KeybaseService interface for KeybaseServiceBase.
func (k *KeybaseServiceBase) Notify(ctx context.Context, notification *keybase1.FSNotification) error {
// Reduce log spam by not repeating log lines for
// notifications with the same filename.
//
// TODO: Only do this in debug mode.
func() {
k.lastNotificationFilenameLock.Lock()
defer k.lastNotificationFilenameLock.Unlock()
if notification.Filename != k.lastNotificationFilename {
k.lastNotificationFilename = notification.Filename
k.log.CDebugf(ctx, "Sending notification for %s", notification.Filename)
}
}()
return k.kbfsClient.FSEvent(ctx, *notification)
}
// NotifySyncStatus implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) NotifySyncStatus(ctx context.Context,
status *keybase1.FSPathSyncStatus) error {
// Reduce log spam by not repeating log lines for
// notifications with the same pathname.
//
// TODO: Only do this in debug mode.
func() {
k.lastNotificationFilenameLock.Lock()
defer k.lastNotificationFilenameLock.Unlock()
if status.Path != k.lastSyncNotificationPath {
k.lastSyncNotificationPath = status.Path
k.log.CDebugf(ctx, "Sending notification for %s", status.Path)
}
}()
return k.kbfsClient.FSSyncEvent(ctx, *status)
}
// FlushUserFromLocalCache implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) FlushUserFromLocalCache(ctx context.Context,
uid keybase1.UID) {
k.log.CDebugf(ctx, "Flushing cache for user %s", uid)
k.setCachedUserInfo(uid, UserInfo{})
}
// FlushUserUnverifiedKeysFromLocalCache implements the KeybaseService interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) FlushUserUnverifiedKeysFromLocalCache(ctx context.Context,
uid keybase1.UID) {
k.log.CDebugf(ctx, "Flushing cache of unverified keys for user %s", uid)
k.clearCachedUnverifiedKeys(uid)
}
// CtxKeybaseServiceTagKey is the type used for unique context tags
// used while servicing incoming keybase requests.
type CtxKeybaseServiceTagKey int
const (
// CtxKeybaseServiceIDKey is the type of the tag for unique
// operation IDs used while servicing incoming keybase requests.
CtxKeybaseServiceIDKey CtxKeybaseServiceTagKey = iota
)
// CtxKeybaseServiceOpID is the display name for the unique operation
// enqueued rekey ID tag.
const CtxKeybaseServiceOpID = "KSID"
func (k *KeybaseServiceBase) getHandleFromFolderName(ctx context.Context,
tlfName string, public bool) (*TlfHandle, error) {
for {
// TODO(KBFS-2185): update the protocol to support requests
// for single-team TLFs.
t := tlf.Private
if public {
t = tlf.Public
}
tlfHandle, err := ParseTlfHandle(ctx, k.config.KBPKI(), tlfName, t)
switch e := err.(type) {
case TlfNameNotCanonical:
tlfName = e.NameToTry
case nil:
return tlfHandle, nil
default:
return nil, err
}
}
}
// FSEditListRequest implements keybase1.NotifyFSRequestInterface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) FSEditListRequest(ctx context.Context,
req keybase1.FSEditListRequest) (err error) {
ctx = ctxWithRandomIDReplayable(ctx, CtxKeybaseServiceIDKey, CtxKeybaseServiceOpID,
k.log)
k.log.CDebugf(ctx, "Edit list request for %s (public: %t)",
req.Folder.Name, !req.Folder.Private)
tlfHandle, err := k.getHandleFromFolderName(ctx, req.Folder.Name,
!req.Folder.Private)
if err != nil {
return err
}
rootNode, _, err := k.config.KBFSOps().
GetOrCreateRootNode(ctx, tlfHandle, MasterBranch)
if err != nil {
return err
}
editHistory, err := k.config.KBFSOps().GetEditHistory(ctx,
rootNode.GetFolderBranch())
if err != nil {
return err
}
// Convert the edits to an RPC response.
var resp keybase1.FSEditListArg
for writer, edits := range editHistory {
for _, edit := range edits {
var nType keybase1.FSNotificationType
switch edit.Type {
case FileCreated:
nType = keybase1.FSNotificationType_FILE_CREATED
case FileModified:
nType = keybase1.FSNotificationType_FILE_MODIFIED
default:
k.log.CDebugf(ctx, "Bad notification type in edit history: %v",
edit.Type)
continue
}
n := keybase1.FSNotification{
Filename: edit.Filepath,
StatusCode: keybase1.FSStatusCode_FINISH,
NotificationType: nType,
WriterUid: writer,
LocalTime: keybase1.ToTime(edit.LocalTime),
}
resp.Edits = append(resp.Edits, n)
}
}
resp.RequestID = req.RequestID
k.log.CDebugf(ctx, "Sending edit history response with %d edits",
len(resp.Edits))
return k.kbfsClient.FSEditList(ctx, resp)
}
// FSSyncStatusRequest implements keybase1.NotifyFSRequestInterface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) FSSyncStatusRequest(ctx context.Context,
req keybase1.FSSyncStatusRequest) (err error) {
k.log.CDebugf(ctx, "Got sync status request: %v", req)
resp := keybase1.FSSyncStatusArg{RequestID: req.RequestID}
// For now, just return the number of syncing bytes.
jServer, err := GetJournalServer(k.config)
if err == nil {
status, _ := jServer.Status(ctx)
resp.Status.TotalSyncingBytes = status.UnflushedBytes
k.log.CDebugf(ctx, "Sending sync status response with %d syncing bytes",
status.UnflushedBytes)
} else {
k.log.CDebugf(ctx, "No journal server, sending empty response")
}
return k.kbfsClient.FSSyncStatus(ctx, resp)
}
// TeamChanged implements keybase1.NotifyTeamInterface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) TeamChanged(
ctx context.Context, arg keybase1.TeamChangedArg) error {
k.log.CDebugf(ctx, "Flushing cache for team %s/%s "+
"(membershipChange=%t, keyRotated=%t, renamed=%t)",
arg.TeamName, arg.TeamID, arg.Changes.MembershipChanged,
arg.Changes.KeyRotated, arg.Changes.Renamed)
k.setCachedTeamInfo(arg.TeamID, TeamInfo{})
if arg.Changes.Renamed {
k.config.KBFSOps().TeamNameChanged(ctx, arg.TeamID)
}
return nil
}
// GetTLFCryptKeys implements the TlfKeysInterface interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) GetTLFCryptKeys(ctx context.Context,
query keybase1.TLFQuery) (res keybase1.GetTLFCryptKeysRes, err error) {
if ctx, err = makeExtendedIdentify(
ctxWithRandomIDReplayable(ctx,
CtxKeybaseServiceIDKey, CtxKeybaseServiceOpID, k.log),
query.IdentifyBehavior,
); err != nil {
return keybase1.GetTLFCryptKeysRes{}, err
}
tlfHandle, err := k.getHandleFromFolderName(ctx, query.TlfName, false)
if err != nil {
return res, err
}
res.NameIDBreaks.CanonicalName = keybase1.CanonicalTlfName(
tlfHandle.GetCanonicalName())
keys, id, err := k.config.KBFSOps().GetTLFCryptKeys(ctx, tlfHandle)
if err != nil {
return res, err
}
res.NameIDBreaks.TlfID = keybase1.TLFID(id.String())
for i, key := range keys {
res.CryptKeys = append(res.CryptKeys, keybase1.CryptKey{
KeyGeneration: int(FirstValidKeyGen) + i,
Key: keybase1.Bytes32(key.Data()),
})
}
if query.IdentifyBehavior.WarningInsteadOfErrorOnBrokenTracks() {
res.NameIDBreaks.Breaks = getExtendedIdentify(ctx).getTlfBreakAndClose()
}
return res, nil
}
// GetPublicCanonicalTLFNameAndID implements the TlfKeysInterface interface for
// KeybaseServiceBase.
func (k *KeybaseServiceBase) GetPublicCanonicalTLFNameAndID(
ctx context.Context, query keybase1.TLFQuery) (
res keybase1.CanonicalTLFNameAndIDWithBreaks, err error) {
if ctx, err = makeExtendedIdentify(
ctxWithRandomIDReplayable(ctx,
CtxKeybaseServiceIDKey, CtxKeybaseServiceOpID, k.log),
query.IdentifyBehavior,
); err != nil {
return keybase1.CanonicalTLFNameAndIDWithBreaks{}, err
}
tlfHandle, err := k.getHandleFromFolderName(
ctx, query.TlfName, true /* public */)
if err != nil {
return res, err
}
res.CanonicalName = keybase1.CanonicalTlfName(
tlfHandle.GetCanonicalName())
id, err := k.config.KBFSOps().GetTLFID(ctx, tlfHandle)
if err != nil {
return res, err
}
res.TlfID = keybase1.TLFID(id.String())
if query.IdentifyBehavior.WarningInsteadOfErrorOnBrokenTracks() {
res.Breaks = getExtendedIdentify(ctx).getTlfBreakAndClose()
}
return res, nil
}
// EstablishMountDir asks the service for the current mount path
func (k *KeybaseServiceBase) EstablishMountDir(ctx context.Context) (
string, error) {
dir, err := k.kbfsMountClient.GetCurrentMountDir(ctx)
if err != nil {
return "", err
}
if dir == "" {
dirs, err2 := k.kbfsMountClient.GetAllAvailableMountDirs(ctx)
if err != nil {
return "", err2
}
dir, err = chooseDefaultMount(ctx, dirs, k.log)
if err != nil {
return "", err
}
err2 = k.kbfsMountClient.SetCurrentMountDir(ctx, dir)
if err2 != nil {
k.log.CInfof(ctx, "SetCurrentMount Dir fails - ", err2)
}
// Continue mounting even if we can't save the mount
k.log.CDebugf(ctx, "Choosing mountdir %s from %v", dir, dirs)
}
return dir, err
}
| 1 | 17,204 | This was sort of a pre-existing bug -- we should be busting our local cache if the key isn't present. However, `KBPKIClient.HasVerifyingKey` already took care of it on that path. This way is better though. | keybase-kbfs | go |
@@ -1,5 +1,6 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+//go:build go1.15 && integration
// +build go1.15,integration
package cloudfront_test | 1 | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// +build go1.15,integration
package cloudfront_test
import (
"context"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/integration"
"github.com/aws/aws-sdk-go/service/cloudfront"
)
var _ aws.Config
var _ awserr.Error
var _ request.Request
func TestInteg_00_ListCloudFrontOriginAccessIdentities(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
sess := integration.SessionWithDefaultRegion("us-east-1")
svc := cloudfront.New(sess)
params := &cloudfront.ListCloudFrontOriginAccessIdentitiesInput{
MaxItems: aws.Int64(1),
}
_, err := svc.ListCloudFrontOriginAccessIdentitiesWithContext(ctx, params, func(r *request.Request) {
r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler")
})
if err != nil {
t.Errorf("expect no error, got %v", err)
}
}
func TestInteg_01_GetDistribution(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
sess := integration.SessionWithDefaultRegion("us-east-1")
svc := cloudfront.New(sess)
params := &cloudfront.GetDistributionInput{
Id: aws.String("fake-id"),
}
_, err := svc.GetDistributionWithContext(ctx, params, func(r *request.Request) {
r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler")
})
if err == nil {
t.Fatalf("expect request to fail")
}
aerr, ok := err.(awserr.RequestFailure)
if !ok {
t.Fatalf("expect awserr, was %T", err)
}
if len(aerr.Code()) == 0 {
t.Errorf("expect non-empty error code")
}
if len(aerr.Message()) == 0 {
t.Errorf("expect non-empty error message")
}
if v := aerr.Code(); v == request.ErrCodeSerialization {
t.Errorf("expect API error code got serialization failure")
}
}
| 1 | 10,416 | Should this tag addition for generated files be handled explicitly in `private/model/cli/gen-api/main.go` | aws-aws-sdk-go | go |
@@ -4753,6 +4753,11 @@ public class MessagingController implements Runnable {
*/
private void notifyAccount(Context context, Account account,
LocalMessage message, int previousUnreadMessageCount) {
+ // if it's quiet time and notifications are disabled, then we shouldn't show a notification
+ if (K9.isQuietTime() && K9.getQuietTimeNotificationEnabled()) {
+ return;
+ }
+
final NotificationData data = getNotificationData(account, previousUnreadMessageCount);
synchronized (data) {
notifyAccountWithDataLocked(context, account, message, data); | 1 | package com.fsck.k9.controller;
import java.io.CharArrayWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import android.app.KeyguardManager;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageInfo;
import android.database.Cursor;
import android.net.Uri;
import android.os.Build;
import android.os.PowerManager;
import android.os.Process;
import android.support.v4.app.NotificationCompat;
import android.support.v4.app.TaskStackBuilder;
import android.text.SpannableStringBuilder;
import android.text.TextUtils;
import android.text.style.TextAppearanceSpan;
import android.util.Log;
import com.fsck.k9.Account;
import com.fsck.k9.Account.DeletePolicy;
import com.fsck.k9.Account.Expunge;
import com.fsck.k9.AccountStats;
import com.fsck.k9.K9;
import com.fsck.k9.K9.NotificationHideSubject;
import com.fsck.k9.K9.Intents;
import com.fsck.k9.K9.NotificationQuickDelete;
import com.fsck.k9.NotificationSetting;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.Accounts;
import com.fsck.k9.activity.FolderList;
import com.fsck.k9.activity.MessageList;
import com.fsck.k9.activity.MessageReference;
import com.fsck.k9.activity.NotificationDeleteConfirmation;
import com.fsck.k9.activity.setup.AccountSetupCheckSettings.CheckDirection;
import com.fsck.k9.activity.setup.AccountSetupIncoming;
import com.fsck.k9.activity.setup.AccountSetupOutgoing;
import com.fsck.k9.cache.EmailProviderCache;
import com.fsck.k9.helper.Contacts;
import com.fsck.k9.helper.MessageHelper;
import com.fsck.k9.mail.power.TracingPowerManager;
import com.fsck.k9.mail.power.TracingPowerManager.TracingWakeLock;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.FetchProfile;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Folder;
import com.fsck.k9.mail.Folder.FolderType;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.mail.CertificateValidationException;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.PushReceiver;
import com.fsck.k9.mail.Pusher;
import com.fsck.k9.mail.Store;
import com.fsck.k9.mail.Transport;
import com.fsck.k9.mail.internet.MessageExtractor;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeMessageHelper;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mail.internet.TextBody;
import com.fsck.k9.mailstore.MessageRemovalListener;
import com.fsck.k9.mail.MessageRetrievalListener;
import com.fsck.k9.mailstore.LocalFolder;
import com.fsck.k9.mailstore.LocalMessage;
import com.fsck.k9.mailstore.LocalStore;
import com.fsck.k9.mailstore.LocalStore.PendingCommand;
import com.fsck.k9.mail.store.pop3.Pop3Store;
import com.fsck.k9.mailstore.UnavailableStorageException;
import com.fsck.k9.provider.EmailProvider;
import com.fsck.k9.provider.EmailProvider.StatsColumns;
import com.fsck.k9.search.ConditionsTreeNode;
import com.fsck.k9.search.LocalSearch;
import com.fsck.k9.search.SearchAccount;
import com.fsck.k9.search.SearchSpecification;
import com.fsck.k9.search.SqlQueryBuilder;
import com.fsck.k9.service.NotificationActionService;
/**
* Starts a long running (application) Thread that will run through commands
* that require remote mailbox access. This class is used to serialize and
* prioritize these commands. Each method that will submit a command requires a
* MessagingListener instance to be provided. It is expected that that listener
* has also been added as a registered listener using addListener(). When a
* command is to be executed, if the listener that was provided with the command
* is no longer registered the command is skipped. The design idea for the above
* is that when an Activity starts it registers as a listener. When it is paused
* it removes itself. Thus, any commands that that activity submitted are
* removed from the queue once the activity is no longer active.
*/
public class MessagingController implements Runnable {
public static final long INVALID_MESSAGE_ID = -1;
/**
* Immutable empty {@link String} array
*/
private static final String[] EMPTY_STRING_ARRAY = new String[0];
/**
* The maximum message size that we'll consider to be "small". A small message is downloaded
* in full immediately instead of in pieces. Anything over this size will be downloaded in
* pieces with attachments being left off completely and downloaded on demand.
*
*
* 25k for a "small" message was picked by educated trial and error.
* http://answers.google.com/answers/threadview?id=312463 claims that the
* average size of an email is 59k, which I feel is too large for our
* blind download. The following tests were performed on a download of
* 25 random messages.
* <pre>
* 5k - 61 seconds,
* 25k - 51 seconds,
* 55k - 53 seconds,
* </pre>
* So 25k gives good performance and a reasonable data footprint. Sounds good to me.
*/
private static final String PENDING_COMMAND_MOVE_OR_COPY = "com.fsck.k9.MessagingController.moveOrCopy";
private static final String PENDING_COMMAND_MOVE_OR_COPY_BULK = "com.fsck.k9.MessagingController.moveOrCopyBulk";
private static final String PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW = "com.fsck.k9.MessagingController.moveOrCopyBulkNew";
private static final String PENDING_COMMAND_EMPTY_TRASH = "com.fsck.k9.MessagingController.emptyTrash";
private static final String PENDING_COMMAND_SET_FLAG_BULK = "com.fsck.k9.MessagingController.setFlagBulk";
private static final String PENDING_COMMAND_SET_FLAG = "com.fsck.k9.MessagingController.setFlag";
private static final String PENDING_COMMAND_APPEND = "com.fsck.k9.MessagingController.append";
private static final String PENDING_COMMAND_MARK_ALL_AS_READ = "com.fsck.k9.MessagingController.markAllAsRead";
private static final String PENDING_COMMAND_EXPUNGE = "com.fsck.k9.MessagingController.expunge";
public static class UidReverseComparator implements Comparator<Message> {
@Override
public int compare(Message o1, Message o2) {
if (o1 == null || o2 == null || o1.getUid() == null || o2.getUid() == null) {
return 0;
}
int id1, id2;
try {
id1 = Integer.parseInt(o1.getUid());
id2 = Integer.parseInt(o2.getUid());
} catch (NumberFormatException e) {
return 0;
}
//reversed intentionally.
if (id1 < id2)
return 1;
if (id1 > id2)
return -1;
return 0;
}
}
/**
* Maximum number of unsynced messages to store at once
*/
private static final int UNSYNC_CHUNK_SIZE = 5;
private static MessagingController inst = null;
private BlockingQueue<Command> mCommands = new PriorityBlockingQueue<Command>();
private Thread mThread;
private Set<MessagingListener> mListeners = new CopyOnWriteArraySet<MessagingListener>();
private final ConcurrentHashMap<String, AtomicInteger> sendCount = new ConcurrentHashMap<String, AtomicInteger>();
ConcurrentHashMap<Account, Pusher> pushers = new ConcurrentHashMap<Account, Pusher>();
private final ExecutorService threadPool = Executors.newCachedThreadPool();
private MessagingListener checkMailListener = null;
private MemorizingListener memorizingListener = new MemorizingListener();
private boolean mBusy;
private Context context;
/**
* A holder class for pending notification data
*
* This class holds all pieces of information for constructing
* a notification with message preview.
*/
private static class NotificationData {
/** Number of unread messages before constructing the notification */
int unreadBeforeNotification;
/**
* List of messages that should be used for the inbox-style overview.
* It's sorted from newest to oldest message.
* Don't modify this list directly, but use {@link #addMessage(com.fsck.k9.mailstore.LocalMessage)} and
* {@link #removeMatchingMessage(android.content.Context, com.fsck.k9.activity.MessageReference)} instead.
*/
LinkedList<LocalMessage> messages;
/**
* List of references for messages that the user is still to be notified of,
* but which don't fit into the inbox style anymore. It's sorted from newest
* to oldest message.
*/
LinkedList<MessageReference> droppedMessages;
/**
* Maximum number of messages to keep for the inbox-style overview.
* As of Jellybean, phone notifications show a maximum of 5 lines, while tablet
* notifications show 7 lines. To make sure no lines are silently dropped,
* we default to 5 lines.
*/
private final static int MAX_MESSAGES = 5;
/**
* Constructs a new data instance.
*
* @param unread Number of unread messages prior to instance construction
*/
public NotificationData(int unread) {
unreadBeforeNotification = unread;
droppedMessages = new LinkedList<MessageReference>();
messages = new LinkedList<LocalMessage>();
}
/**
* Adds a new message to the list of pending messages for this notification.
*
* The implementation will take care of keeping a meaningful amount of
* messages in {@link #messages}.
*
* @param m The new message to add.
*/
public void addMessage(LocalMessage m) {
while (messages.size() >= MAX_MESSAGES) {
LocalMessage dropped = messages.removeLast();
droppedMessages.addFirst(dropped.makeMessageReference());
}
messages.addFirst(m);
}
/**
* Remove a certain message from the message list.
*
* @param context A context.
* @param ref Reference of the message to remove
* @return true if message was found and removed, false otherwise
*/
public boolean removeMatchingMessage(Context context, MessageReference ref) {
for (MessageReference dropped : droppedMessages) {
if (dropped.equals(ref)) {
droppedMessages.remove(dropped);
return true;
}
}
for (LocalMessage message : messages) {
if (message.makeMessageReference().equals(ref)) {
if (messages.remove(message) && !droppedMessages.isEmpty()) {
LocalMessage restoredMessage = droppedMessages.getFirst().restoreToLocalMessage(context);
if (restoredMessage != null) {
messages.addLast(restoredMessage);
droppedMessages.removeFirst();
}
}
return true;
}
}
return false;
}
/**
* Adds a list of references for all pending messages for the notification to the supplied
* List.
*/
public void supplyAllMessageRefs(List<MessageReference> refs) {
for (LocalMessage m : messages) {
refs.add(m.makeMessageReference());
}
refs.addAll(droppedMessages);
}
/**
* Gets the total number of messages the user is to be notified of.
*
* @return Amount of new messages the notification notifies for
*/
public int getNewMessageCount() {
return messages.size() + droppedMessages.size();
}
};
// Key is accountNumber
private final ConcurrentMap<Integer, NotificationData> notificationData = new ConcurrentHashMap<Integer, NotificationData>();
private static final Set<Flag> SYNC_FLAGS = EnumSet.of(Flag.SEEN, Flag.FLAGGED, Flag.ANSWERED, Flag.FORWARDED);
private void suppressMessages(Account account, List<LocalMessage> messages) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
cache.hideMessages(messages);
}
private void unsuppressMessages(Account account, List<? extends Message> messages) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
cache.unhideMessages(messages);
}
private boolean isMessageSuppressed(LocalMessage message) {
long messageId = message.getId();
long folderId = message.getFolder().getId();
EmailProviderCache cache = EmailProviderCache.getCache(message.getFolder().getAccountUuid(), context);
return cache.isMessageHidden(messageId, folderId);
}
private void setFlagInCache(final Account account, final List<Long> messageIds,
final Flag flag, final boolean newState) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
String columnName = LocalStore.getColumnNameForFlag(flag);
String value = Integer.toString((newState) ? 1 : 0);
cache.setValueForMessages(messageIds, columnName, value);
}
private void removeFlagFromCache(final Account account, final List<Long> messageIds,
final Flag flag) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
String columnName = LocalStore.getColumnNameForFlag(flag);
cache.removeValueForMessages(messageIds, columnName);
}
private void setFlagForThreadsInCache(final Account account, final List<Long> threadRootIds,
final Flag flag, final boolean newState) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
String columnName = LocalStore.getColumnNameForFlag(flag);
String value = Integer.toString((newState) ? 1 : 0);
cache.setValueForThreads(threadRootIds, columnName, value);
}
private void removeFlagForThreadsFromCache(final Account account, final List<Long> messageIds,
final Flag flag) {
EmailProviderCache cache = EmailProviderCache.getCache(account.getUuid(), context);
String columnName = LocalStore.getColumnNameForFlag(flag);
cache.removeValueForThreads(messageIds, columnName);
}
private MessagingController(Context context) {
this.context = context;
mThread = new Thread(this);
mThread.setName("MessagingController");
mThread.start();
if (memorizingListener != null) {
addListener(memorizingListener);
}
}
public synchronized static MessagingController getInstance(Context context) {
if (inst == null) {
inst = new MessagingController(context.getApplicationContext());
}
return inst;
}
public boolean isBusy() {
return mBusy;
}
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
while (true) {
String commandDescription = null;
try {
final Command command = mCommands.take();
if (command != null) {
commandDescription = command.description;
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Running " + (command.isForeground ? "Foreground" : "Background") + " command '" + command.description + "', seq = " + command.sequence);
mBusy = true;
try {
command.runnable.run();
} catch (UnavailableAccountException e) {
// retry later
new Thread() {
@Override
public void run() {
try {
sleep(30 * 1000);
mCommands.put(command);
} catch (InterruptedException e) {
Log.e(K9.LOG_TAG, "interrupted while putting a pending command for"
+ " an unavailable account back into the queue."
+ " THIS SHOULD NEVER HAPPEN.");
}
}
} .start();
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, (command.isForeground ? "Foreground" : "Background") +
" Command '" + command.description + "' completed");
for (MessagingListener l : getListeners(command.listener)) {
l.controllerCommandCompleted(!mCommands.isEmpty());
}
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Error running command '" + commandDescription + "'", e);
}
mBusy = false;
}
}
private void put(String description, MessagingListener listener, Runnable runnable) {
putCommand(mCommands, description, listener, runnable, true);
}
private void putBackground(String description, MessagingListener listener, Runnable runnable) {
putCommand(mCommands, description, listener, runnable, false);
}
private void putCommand(BlockingQueue<Command> queue, String description, MessagingListener listener, Runnable runnable, boolean isForeground) {
int retries = 10;
Exception e = null;
while (retries-- > 0) {
try {
Command command = new Command();
command.listener = listener;
command.runnable = runnable;
command.description = description;
command.isForeground = isForeground;
queue.put(command);
return;
} catch (InterruptedException ie) {
try {
Thread.sleep(200);
} catch (InterruptedException ne) {
}
e = ie;
}
}
throw new Error(e);
}
public void addListener(MessagingListener listener) {
mListeners.add(listener);
refreshListener(listener);
}
public void refreshListener(MessagingListener listener) {
if (memorizingListener != null && listener != null) {
memorizingListener.refreshOther(listener);
}
}
public void removeListener(MessagingListener listener) {
mListeners.remove(listener);
}
public Set<MessagingListener> getListeners() {
return mListeners;
}
public Set<MessagingListener> getListeners(MessagingListener listener) {
if (listener == null) {
return mListeners;
}
Set<MessagingListener> listeners = new HashSet<MessagingListener>(mListeners);
listeners.add(listener);
return listeners;
}
/**
* Lists folders that are available locally and remotely. This method calls
* listFoldersCallback for local folders before it returns, and then for
* remote folders at some later point. If there are no local folders
* includeRemote is forced by this method. This method should be called from
* a Thread as it may take several seconds to list the local folders.
* TODO this needs to cache the remote folder list
*
* @param account
* @param listener
* @throws MessagingException
*/
public void listFolders(final Account account, final boolean refreshRemote, final MessagingListener listener) {
threadPool.execute(new Runnable() {
@Override
public void run() {
listFoldersSynchronous(account, refreshRemote, listener);
}
});
}
/**
* Lists folders that are available locally and remotely. This method calls
* listFoldersCallback for local folders before it returns, and then for
* remote folders at some later point. If there are no local folders
* includeRemote is forced by this method. This method is called in the
* foreground.
* TODO this needs to cache the remote folder list
*
* @param account
* @param listener
* @throws MessagingException
*/
public void listFoldersSynchronous(final Account account, final boolean refreshRemote, final MessagingListener listener) {
for (MessagingListener l : getListeners(listener)) {
l.listFoldersStarted(account);
}
List <? extends Folder > localFolders = null;
if (!account.isAvailable(context)) {
Log.i(K9.LOG_TAG, "not listing folders of unavailable account");
} else {
try {
Store localStore = account.getLocalStore();
localFolders = localStore.getPersonalNamespaces(false);
if (refreshRemote || localFolders.isEmpty()) {
doRefreshRemote(account, listener);
return;
}
for (MessagingListener l : getListeners(listener)) {
l.listFolders(account, localFolders);
}
} catch (Exception e) {
for (MessagingListener l : getListeners(listener)) {
l.listFoldersFailed(account, e.getMessage());
}
addErrorMessage(account, null, e);
return;
} finally {
if (localFolders != null) {
for (Folder localFolder : localFolders) {
closeFolder(localFolder);
}
}
}
}
for (MessagingListener l : getListeners(listener)) {
l.listFoldersFinished(account);
}
}
private void doRefreshRemote(final Account account, final MessagingListener listener) {
put("doRefreshRemote", listener, new Runnable() {
@Override
public void run() {
List <? extends Folder > localFolders = null;
try {
Store store = account.getRemoteStore();
List <? extends Folder > remoteFolders = store.getPersonalNamespaces(false);
LocalStore localStore = account.getLocalStore();
Set<String> remoteFolderNames = new HashSet<String>();
List<LocalFolder> foldersToCreate = new LinkedList<LocalFolder>();
localFolders = localStore.getPersonalNamespaces(false);
Set<String> localFolderNames = new HashSet<String>();
for (Folder localFolder : localFolders) {
localFolderNames.add(localFolder.getName());
}
for (Folder remoteFolder : remoteFolders) {
if (localFolderNames.contains(remoteFolder.getName()) == false) {
LocalFolder localFolder = localStore.getFolder(remoteFolder.getName());
foldersToCreate.add(localFolder);
}
remoteFolderNames.add(remoteFolder.getName());
}
localStore.createFolders(foldersToCreate, account.getDisplayCount());
localFolders = localStore.getPersonalNamespaces(false);
/*
* Clear out any folders that are no longer on the remote store.
*/
for (Folder localFolder : localFolders) {
String localFolderName = localFolder.getName();
// FIXME: This is a hack used to clean up when we accidentally created the
// special placeholder folder "-NONE-".
if (K9.FOLDER_NONE.equals(localFolderName)) {
localFolder.delete(false);
}
if (!account.isSpecialFolder(localFolderName) &&
!remoteFolderNames.contains(localFolderName)) {
localFolder.delete(false);
}
}
localFolders = localStore.getPersonalNamespaces(false);
for (MessagingListener l : getListeners(listener)) {
l.listFolders(account, localFolders);
}
for (MessagingListener l : getListeners(listener)) {
l.listFoldersFinished(account);
}
} catch (Exception e) {
for (MessagingListener l : getListeners(listener)) {
l.listFoldersFailed(account, "");
}
addErrorMessage(account, null, e);
} finally {
if (localFolders != null) {
for (Folder localFolder : localFolders) {
closeFolder(localFolder);
}
}
}
}
});
}
/**
* Find all messages in any local account which match the query 'query'
* @throws MessagingException
*/
public void searchLocalMessages(final LocalSearch search, final MessagingListener listener) {
threadPool.execute(new Runnable() {
@Override
public void run() {
searchLocalMessagesSynchronous(search, listener);
}
});
}
public void searchLocalMessagesSynchronous(final LocalSearch search, final MessagingListener listener) {
final AccountStats stats = new AccountStats();
final Set<String> uuidSet = new HashSet<String>(Arrays.asList(search.getAccountUuids()));
List<Account> accounts = Preferences.getPreferences(context).getAccounts();
boolean allAccounts = uuidSet.contains(SearchSpecification.ALL_ACCOUNTS);
// for every account we want to search do the query in the localstore
for (final Account account : accounts) {
if (!allAccounts && !uuidSet.contains(account.getUuid())) {
continue;
}
// Collecting statistics of the search result
MessageRetrievalListener retrievalListener = new MessageRetrievalListener<LocalMessage>() {
@Override
public void messageStarted(String message, int number, int ofTotal) {}
@Override
public void messagesFinished(int number) {}
@Override
public void messageFinished(LocalMessage message, int number, int ofTotal) {
if (!isMessageSuppressed(message)) {
List<LocalMessage> messages = new ArrayList<LocalMessage>();
messages.add(message);
stats.unreadMessageCount += (!message.isSet(Flag.SEEN)) ? 1 : 0;
stats.flaggedMessageCount += (message.isSet(Flag.FLAGGED)) ? 1 : 0;
if (listener != null) {
listener.listLocalMessagesAddMessages(account, null, messages);
}
}
}
};
// alert everyone the search has started
if (listener != null) {
listener.listLocalMessagesStarted(account, null);
}
// build and do the query in the localstore
try {
LocalStore localStore = account.getLocalStore();
localStore.searchForMessages(retrievalListener, search);
} catch (Exception e) {
if (listener != null) {
listener.listLocalMessagesFailed(account, null, e.getMessage());
}
addErrorMessage(account, null, e);
} finally {
if (listener != null) {
listener.listLocalMessagesFinished(account, null);
}
}
}
// publish the total search statistics
if (listener != null) {
listener.searchStats(stats);
}
}
public Future<?> searchRemoteMessages(final String acctUuid, final String folderName, final String query,
final Set<Flag> requiredFlags, final Set<Flag> forbiddenFlags, final MessagingListener listener) {
if (K9.DEBUG) {
String msg = "searchRemoteMessages ("
+ "acct=" + acctUuid
+ ", folderName = " + folderName
+ ", query = " + query
+ ")";
Log.i(K9.LOG_TAG, msg);
}
return threadPool.submit(new Runnable() {
@Override
public void run() {
searchRemoteMessagesSynchronous(acctUuid, folderName, query, requiredFlags, forbiddenFlags, listener);
}
});
}
public void searchRemoteMessagesSynchronous(final String acctUuid, final String folderName, final String query,
final Set<Flag> requiredFlags, final Set<Flag> forbiddenFlags, final MessagingListener listener) {
final Account acct = Preferences.getPreferences(context).getAccount(acctUuid);
if (listener != null) {
listener.remoteSearchStarted(folderName);
}
List<Message> extraResults = new ArrayList<Message>();
try {
Store remoteStore = acct.getRemoteStore();
LocalStore localStore = acct.getLocalStore();
if (remoteStore == null || localStore == null) {
throw new MessagingException("Could not get store");
}
Folder remoteFolder = remoteStore.getFolder(folderName);
LocalFolder localFolder = localStore.getFolder(folderName);
if (remoteFolder == null || localFolder == null) {
throw new MessagingException("Folder not found");
}
List<Message> messages = remoteFolder.search(query, requiredFlags, forbiddenFlags);
if (K9.DEBUG) {
Log.i("Remote Search", "Remote search got " + messages.size() + " results");
}
// There's no need to fetch messages already completely downloaded
List<Message> remoteMessages = localFolder.extractNewMessages(messages);
messages.clear();
if (listener != null) {
listener.remoteSearchServerQueryComplete(folderName, remoteMessages.size(), acct.getRemoteSearchNumResults());
}
Collections.sort(remoteMessages, new UidReverseComparator());
int resultLimit = acct.getRemoteSearchNumResults();
if (resultLimit > 0 && remoteMessages.size() > resultLimit) {
extraResults = remoteMessages.subList(resultLimit, remoteMessages.size());
remoteMessages = remoteMessages.subList(0, resultLimit);
}
loadSearchResultsSynchronous(remoteMessages, localFolder, remoteFolder, listener);
} catch (Exception e) {
if (Thread.currentThread().isInterrupted()) {
Log.i(K9.LOG_TAG, "Caught exception on aborted remote search; safe to ignore.", e);
} else {
Log.e(K9.LOG_TAG, "Could not complete remote search", e);
if (listener != null) {
listener.remoteSearchFailed(null, e.getMessage());
}
addErrorMessage(acct, null, e);
}
} finally {
if (listener != null) {
listener.remoteSearchFinished(folderName, 0, acct.getRemoteSearchNumResults(), extraResults);
}
}
}
public void loadSearchResults(final Account account, final String folderName, final List<Message> messages, final MessagingListener listener) {
threadPool.execute(new Runnable() {
@Override
public void run() {
if (listener != null) {
listener.enableProgressIndicator(true);
}
try {
Store remoteStore = account.getRemoteStore();
LocalStore localStore = account.getLocalStore();
if (remoteStore == null || localStore == null) {
throw new MessagingException("Could not get store");
}
Folder remoteFolder = remoteStore.getFolder(folderName);
LocalFolder localFolder = localStore.getFolder(folderName);
if (remoteFolder == null || localFolder == null) {
throw new MessagingException("Folder not found");
}
loadSearchResultsSynchronous(messages, localFolder, remoteFolder, listener);
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Exception in loadSearchResults: " + e);
addErrorMessage(account, null, e);
} finally {
if (listener != null) {
listener.enableProgressIndicator(false);
}
}
}
});
}
public void loadSearchResultsSynchronous(List<Message> messages, LocalFolder localFolder, Folder remoteFolder, MessagingListener listener) throws MessagingException {
final FetchProfile header = new FetchProfile();
header.add(FetchProfile.Item.FLAGS);
header.add(FetchProfile.Item.ENVELOPE);
final FetchProfile structure = new FetchProfile();
structure.add(FetchProfile.Item.STRUCTURE);
int i = 0;
for (Message message : messages) {
i++;
LocalMessage localMsg = localFolder.getMessage(message.getUid());
if (localMsg == null) {
remoteFolder.fetch(Collections.singletonList(message), header, null);
//fun fact: ImapFolder.fetch can't handle getting STRUCTURE at same time as headers
remoteFolder.fetch(Collections.singletonList(message), structure, null);
localFolder.appendMessages(Collections.singletonList(message));
localMsg = localFolder.getMessage(message.getUid());
}
if (listener != null) {
listener.remoteSearchAddMessage(remoteFolder.getName(), localMsg, i, messages.size());
}
}
}
public void loadMoreMessages(Account account, String folder, MessagingListener listener) {
try {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolder(folder);
if (localFolder.getVisibleLimit() > 0) {
localFolder.setVisibleLimit(localFolder.getVisibleLimit() + account.getDisplayCount());
}
synchronizeMailbox(account, folder, listener, null);
} catch (MessagingException me) {
addErrorMessage(account, null, me);
throw new RuntimeException("Unable to set visible limit on folder", me);
}
}
public void resetVisibleLimits(Collection<Account> accounts) {
for (Account account : accounts) {
account.resetVisibleLimits();
}
}
/**
* Start background synchronization of the specified folder.
* @param account
* @param folder
* @param listener
* @param providedRemoteFolder TODO
*/
public void synchronizeMailbox(final Account account, final String folder, final MessagingListener listener, final Folder providedRemoteFolder) {
putBackground("synchronizeMailbox", listener, new Runnable() {
@Override
public void run() {
synchronizeMailboxSynchronous(account, folder, listener, providedRemoteFolder);
}
});
}
/**
* Start foreground synchronization of the specified folder. This is generally only called
* by synchronizeMailbox.
* @param account
* @param folder
*
* TODO Break this method up into smaller chunks.
* @param providedRemoteFolder TODO
*/
private void synchronizeMailboxSynchronous(final Account account, final String folder, final MessagingListener listener, Folder providedRemoteFolder) {
Folder remoteFolder = null;
LocalFolder tLocalFolder = null;
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Synchronizing folder " + account.getDescription() + ":" + folder);
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxStarted(account, folder);
}
/*
* We don't ever sync the Outbox or errors folder
*/
if (folder.equals(account.getOutboxFolderName()) || folder.equals(account.getErrorFolderName())) {
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxFinished(account, folder, 0, 0);
}
return;
}
Exception commandException = null;
try {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: About to process pending commands for account " + account.getDescription());
try {
processPendingCommandsSynchronous(account);
} catch (Exception e) {
addErrorMessage(account, null, e);
Log.e(K9.LOG_TAG, "Failure processing command, but allow message sync attempt", e);
commandException = e;
}
/*
* Get the message list from the local store and create an index of
* the uids within the list.
*/
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: About to get local folder " + folder);
final LocalStore localStore = account.getLocalStore();
tLocalFolder = localStore.getFolder(folder);
final LocalFolder localFolder = tLocalFolder;
localFolder.open(Folder.OPEN_MODE_RW);
localFolder.updateLastUid();
List<? extends Message> localMessages = localFolder.getMessages(null);
Map<String, Message> localUidMap = new HashMap<String, Message>();
for (Message message : localMessages) {
localUidMap.put(message.getUid(), message);
}
if (providedRemoteFolder != null) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: using providedRemoteFolder " + folder);
remoteFolder = providedRemoteFolder;
} else {
Store remoteStore = account.getRemoteStore();
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: About to get remote folder " + folder);
remoteFolder = remoteStore.getFolder(folder);
if (! verifyOrCreateRemoteSpecialFolder(account, folder, remoteFolder, listener)) {
return;
}
/*
* Synchronization process:
*
Open the folder
Upload any local messages that are marked as PENDING_UPLOAD (Drafts, Sent, Trash)
Get the message count
Get the list of the newest K9.DEFAULT_VISIBLE_LIMIT messages
getMessages(messageCount - K9.DEFAULT_VISIBLE_LIMIT, messageCount)
See if we have each message locally, if not fetch it's flags and envelope
Get and update the unread count for the folder
Update the remote flags of any messages we have locally with an internal date newer than the remote message.
Get the current flags for any messages we have locally but did not just download
Update local flags
For any message we have locally but not remotely, delete the local message to keep cache clean.
Download larger parts of any new messages.
(Optional) Download small attachments in the background.
*/
/*
* Open the remote folder. This pre-loads certain metadata like message count.
*/
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: About to open remote folder " + folder);
remoteFolder.open(Folder.OPEN_MODE_RW);
if (Expunge.EXPUNGE_ON_POLL == account.getExpungePolicy()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Expunging folder " + account.getDescription() + ":" + folder);
remoteFolder.expunge();
}
}
/*
* Get the remote message count.
*/
int remoteMessageCount = remoteFolder.getMessageCount();
int visibleLimit = localFolder.getVisibleLimit();
if (visibleLimit < 0) {
visibleLimit = K9.DEFAULT_VISIBLE_LIMIT;
}
final List<Message> remoteMessages = new ArrayList<Message>();
Map<String, Message> remoteUidMap = new HashMap<String, Message>();
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: Remote message count for folder " + folder + " is " + remoteMessageCount);
final Date earliestDate = account.getEarliestPollDate();
if (remoteMessageCount > 0) {
/* Message numbers start at 1. */
int remoteStart;
if (visibleLimit > 0) {
remoteStart = Math.max(0, remoteMessageCount - visibleLimit) + 1;
} else {
remoteStart = 1;
}
int remoteEnd = remoteMessageCount;
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: About to get messages " + remoteStart + " through " + remoteEnd + " for folder " + folder);
final AtomicInteger headerProgress = new AtomicInteger(0);
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxHeadersStarted(account, folder);
}
List<? extends Message> remoteMessageArray = remoteFolder.getMessages(remoteStart, remoteEnd, earliestDate, null);
int messageCount = remoteMessageArray.size();
for (Message thisMess : remoteMessageArray) {
headerProgress.incrementAndGet();
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxHeadersProgress(account, folder, headerProgress.get(), messageCount);
}
Message localMessage = localUidMap.get(thisMess.getUid());
if (localMessage == null || !localMessage.olderThan(earliestDate)) {
remoteMessages.add(thisMess);
remoteUidMap.put(thisMess.getUid(), thisMess);
}
}
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "SYNC: Got " + remoteUidMap.size() + " messages for folder " + folder);
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxHeadersFinished(account, folder, headerProgress.get(), remoteUidMap.size());
}
} else if (remoteMessageCount < 0) {
throw new Exception("Message count " + remoteMessageCount + " for folder " + folder);
}
/*
* Remove any messages that are in the local store but no longer on the remote store or are too old
*/
if (account.syncRemoteDeletions()) {
List<Message> destroyMessages = new ArrayList<Message>();
for (Message localMessage : localMessages) {
if (remoteUidMap.get(localMessage.getUid()) == null) {
destroyMessages.add(localMessage);
}
}
localFolder.destroyMessages(destroyMessages);
for (Message destroyMessage : destroyMessages) {
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxRemovedMessage(account, folder, destroyMessage);
}
}
}
localMessages = null;
/*
* Now we download the actual content of messages.
*/
int newMessages = downloadMessages(account, remoteFolder, localFolder, remoteMessages, false);
int unreadMessageCount = localFolder.getUnreadMessageCount();
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, folder, unreadMessageCount);
}
/* Notify listeners that we're finally done. */
localFolder.setLastChecked(System.currentTimeMillis());
localFolder.setStatus(null);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Done synchronizing folder " + account.getDescription() + ":" + folder +
" @ " + new Date() + " with " + newMessages + " new messages");
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxFinished(account, folder, remoteMessageCount, newMessages);
}
if (commandException != null) {
String rootMessage = getRootCauseMessage(commandException);
Log.e(K9.LOG_TAG, "Root cause failure in " + account.getDescription() + ":" +
tLocalFolder.getName() + " was '" + rootMessage + "'");
localFolder.setStatus(rootMessage);
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxFailed(account, folder, rootMessage);
}
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Done synchronizing folder " + account.getDescription() + ":" + folder);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "synchronizeMailbox", e);
// If we don't set the last checked, it can try too often during
// failure conditions
String rootMessage = getRootCauseMessage(e);
if (tLocalFolder != null) {
try {
tLocalFolder.setStatus(rootMessage);
tLocalFolder.setLastChecked(System.currentTimeMillis());
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Could not set last checked on folder " + account.getDescription() + ":" +
tLocalFolder.getName(), e);
}
}
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxFailed(account, folder, rootMessage);
}
notifyUserIfCertificateProblem(context, e, account, true);
addErrorMessage(account, null, e);
Log.e(K9.LOG_TAG, "Failed synchronizing folder " + account.getDescription() + ":" + folder + " @ " + new Date());
} finally {
if (providedRemoteFolder == null) {
closeFolder(remoteFolder);
}
closeFolder(tLocalFolder);
}
}
private void closeFolder(Folder f) {
if (f != null) {
f.close();
}
}
/*
* If the folder is a "special" folder we need to see if it exists
* on the remote server. It if does not exist we'll try to create it. If we
* can't create we'll abort. This will happen on every single Pop3 folder as
* designed and on Imap folders during error conditions. This allows us
* to treat Pop3 and Imap the same in this code.
*/
private boolean verifyOrCreateRemoteSpecialFolder(final Account account, final String folder, final Folder remoteFolder, final MessagingListener listener) throws MessagingException {
if (folder.equals(account.getTrashFolderName()) ||
folder.equals(account.getSentFolderName()) ||
folder.equals(account.getDraftsFolderName())) {
if (!remoteFolder.exists()) {
if (!remoteFolder.create(FolderType.HOLDS_MESSAGES)) {
for (MessagingListener l : getListeners(listener)) {
l.synchronizeMailboxFinished(account, folder, 0, 0);
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Done synchronizing folder " + folder);
return false;
}
}
}
return true;
}
/**
* Fetches the messages described by inputMessages from the remote store and writes them to
* local storage.
*
* @param account
* The account the remote store belongs to.
* @param remoteFolder
* The remote folder to download messages from.
* @param localFolder
* The {@link LocalFolder} instance corresponding to the remote folder.
* @param inputMessages
* A list of messages objects that store the UIDs of which messages to download.
* @param flagSyncOnly
* Only flags will be fetched from the remote store if this is {@code true}.
*
* @return The number of downloaded messages that are not flagged as {@link Flag#SEEN}.
*
* @throws MessagingException
*/
private int downloadMessages(final Account account, final Folder remoteFolder,
final LocalFolder localFolder, List<Message> inputMessages,
boolean flagSyncOnly) throws MessagingException {
final Date earliestDate = account.getEarliestPollDate();
Date downloadStarted = new Date(); // now
if (earliestDate != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Only syncing messages after " + earliestDate);
}
}
final String folder = remoteFolder.getName();
int unreadBeforeStart = 0;
try {
AccountStats stats = account.getStats(context);
unreadBeforeStart = stats.unreadMessageCount;
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to getUnreadMessageCount for account: " + account, e);
}
List<Message> syncFlagMessages = new ArrayList<Message>();
List<Message> unsyncedMessages = new ArrayList<Message>();
final AtomicInteger newMessages = new AtomicInteger(0);
List<Message> messages = new ArrayList<Message>(inputMessages);
for (Message message : messages) {
evaluateMessageForDownload(message, folder, localFolder, remoteFolder, account, unsyncedMessages, syncFlagMessages , flagSyncOnly);
}
final AtomicInteger progress = new AtomicInteger(0);
final int todo = unsyncedMessages.size() + syncFlagMessages.size();
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, folder, progress.get(), todo);
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Have " + unsyncedMessages.size() + " unsynced messages");
messages.clear();
final List<Message> largeMessages = new ArrayList<Message>();
final List<Message> smallMessages = new ArrayList<Message>();
if (!unsyncedMessages.isEmpty()) {
/*
* Reverse the order of the messages. Depending on the server this may get us
* fetch results for newest to oldest. If not, no harm done.
*/
Collections.sort(unsyncedMessages, new UidReverseComparator());
int visibleLimit = localFolder.getVisibleLimit();
int listSize = unsyncedMessages.size();
if ((visibleLimit > 0) && (listSize > visibleLimit)) {
unsyncedMessages = unsyncedMessages.subList(0, visibleLimit);
}
FetchProfile fp = new FetchProfile();
if (remoteFolder.supportsFetchingFlags()) {
fp.add(FetchProfile.Item.FLAGS);
}
fp.add(FetchProfile.Item.ENVELOPE);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: About to fetch " + unsyncedMessages.size() + " unsynced messages for folder " + folder);
fetchUnsyncedMessages(account, remoteFolder, localFolder, unsyncedMessages, smallMessages, largeMessages, progress, todo, fp);
// If a message didn't exist, messageFinished won't be called, but we shouldn't try again
// If we got here, nothing failed
for (Message message : unsyncedMessages) {
String newPushState = remoteFolder.getNewPushState(localFolder.getPushState(), message);
if (newPushState != null) {
localFolder.setPushState(newPushState);
}
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "SYNC: Synced unsynced messages for folder " + folder);
}
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Have "
+ largeMessages.size() + " large messages and "
+ smallMessages.size() + " small messages out of "
+ unsyncedMessages.size() + " unsynced messages");
unsyncedMessages.clear();
/*
* Grab the content of the small messages first. This is going to
* be very fast and at very worst will be a single up of a few bytes and a single
* download of 625k.
*/
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.BODY);
// fp.add(FetchProfile.Item.FLAGS);
// fp.add(FetchProfile.Item.ENVELOPE);
downloadSmallMessages(account, remoteFolder, localFolder, smallMessages, progress, unreadBeforeStart, newMessages, todo, fp);
smallMessages.clear();
/*
* Now do the large messages that require more round trips.
*/
fp.clear();
fp.add(FetchProfile.Item.STRUCTURE);
downloadLargeMessages(account, remoteFolder, localFolder, largeMessages, progress, unreadBeforeStart, newMessages, todo, fp);
largeMessages.clear();
/*
* Refresh the flags for any messages in the local store that we didn't just
* download.
*/
refreshLocalMessageFlags(account, remoteFolder, localFolder, syncFlagMessages, progress, todo);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Synced remote messages for folder " + folder + ", " + newMessages.get() + " new messages");
localFolder.purgeToVisibleLimit(new MessageRemovalListener() {
@Override
public void messageRemoved(Message message) {
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxRemovedMessage(account, folder, message);
}
}
});
// If the oldest message seen on this sync is newer than
// the oldest message seen on the previous sync, then
// we want to move our high-water mark forward
// this is all here just for pop which only syncs inbox
// this would be a little wrong for IMAP (we'd want a folder-level pref, not an account level pref.)
// fortunately, we just don't care.
Long oldestMessageTime = localFolder.getOldestMessageDate();
if (oldestMessageTime != null) {
Date oldestExtantMessage = new Date(oldestMessageTime);
if (oldestExtantMessage.before(downloadStarted) &&
oldestExtantMessage.after(new Date(account.getLatestOldMessageSeenTime()))) {
account.setLatestOldMessageSeenTime(oldestExtantMessage.getTime());
account.save(Preferences.getPreferences(context));
}
}
return newMessages.get();
}
private void evaluateMessageForDownload(final Message message, final String folder,
final LocalFolder localFolder,
final Folder remoteFolder,
final Account account,
final List<Message> unsyncedMessages,
final List<Message> syncFlagMessages,
boolean flagSyncOnly) throws MessagingException {
if (message.isSet(Flag.DELETED)) {
syncFlagMessages.add(message);
return;
}
Message localMessage = localFolder.getMessage(message.getUid());
if (localMessage == null) {
if (!flagSyncOnly) {
if (!message.isSet(Flag.X_DOWNLOADED_FULL) && !message.isSet(Flag.X_DOWNLOADED_PARTIAL)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " has not yet been downloaded");
unsyncedMessages.add(message);
} else {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " is partially or fully downloaded");
// Store the updated message locally
localFolder.appendMessages(Collections.singletonList(message));
localMessage = localFolder.getMessage(message.getUid());
localMessage.setFlag(Flag.X_DOWNLOADED_FULL, message.isSet(Flag.X_DOWNLOADED_FULL));
localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, message.isSet(Flag.X_DOWNLOADED_PARTIAL));
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage);
if (!localMessage.isSet(Flag.SEEN)) {
l.synchronizeMailboxNewMessage(account, folder, localMessage);
}
}
}
}
} else if (!localMessage.isSet(Flag.DELETED)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Message with uid " + message.getUid() + " is present in the local store");
if (!localMessage.isSet(Flag.X_DOWNLOADED_FULL) && !localMessage.isSet(Flag.X_DOWNLOADED_PARTIAL)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Message with uid " + message.getUid()
+ " is not downloaded, even partially; trying again");
unsyncedMessages.add(message);
} else {
String newPushState = remoteFolder.getNewPushState(localFolder.getPushState(), message);
if (newPushState != null) {
localFolder.setPushState(newPushState);
}
syncFlagMessages.add(message);
}
}
}
private <T extends Message> void fetchUnsyncedMessages(final Account account, final Folder<T> remoteFolder,
final LocalFolder localFolder,
List<T> unsyncedMessages,
final List<Message> smallMessages,
final List<Message> largeMessages,
final AtomicInteger progress,
final int todo,
FetchProfile fp) throws MessagingException {
final String folder = remoteFolder.getName();
final Date earliestDate = account.getEarliestPollDate();
/*
* Messages to be batch written
*/
final List<Message> chunk = new ArrayList<Message>(UNSYNC_CHUNK_SIZE);
remoteFolder.fetch(unsyncedMessages, fp,
new MessageRetrievalListener<T>() {
@Override
public void messageFinished(T message, int number, int ofTotal) {
try {
String newPushState = remoteFolder.getNewPushState(localFolder.getPushState(), message);
if (newPushState != null) {
localFolder.setPushState(newPushState);
}
if (message.isSet(Flag.DELETED) || message.olderThan(earliestDate)) {
if (K9.DEBUG) {
if (message.isSet(Flag.DELETED)) {
Log.v(K9.LOG_TAG, "Newly downloaded message " + account + ":" + folder + ":" + message.getUid()
+ " was marked deleted on server, skipping");
} else {
Log.d(K9.LOG_TAG, "Newly downloaded message " + message.getUid() + " is older than "
+ earliestDate + ", skipping");
}
}
progress.incrementAndGet();
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, folder, progress.get(), todo);
}
return;
}
if (account.getMaximumAutoDownloadMessageSize() > 0 &&
message.getSize() > account.getMaximumAutoDownloadMessageSize()) {
largeMessages.add(message);
} else {
smallMessages.add(message);
}
// And include it in the view
if (message.getSubject() != null && message.getFrom() != null) {
/*
* We check to make sure that we got something worth
* showing (subject and from) because some protocols
* (POP) may not be able to give us headers for
* ENVELOPE, only size.
*/
// keep message for delayed storing
chunk.add(message);
if (chunk.size() >= UNSYNC_CHUNK_SIZE) {
writeUnsyncedMessages(chunk, localFolder, account, folder);
chunk.clear();
}
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Error while storing downloaded message.", e);
addErrorMessage(account, null, e);
}
}
@Override
public void messageStarted(String uid, int number, int ofTotal) {}
@Override
public void messagesFinished(int total) {
// FIXME this method is almost never invoked by various Stores! Don't rely on it unless fixed!!
}
});
if (!chunk.isEmpty()) {
writeUnsyncedMessages(chunk, localFolder, account, folder);
chunk.clear();
}
}
/**
* Actual storing of messages
*
* <br>
* FIXME: <strong>This method should really be moved in the above MessageRetrievalListener once {@link MessageRetrievalListener#messagesFinished(int)} is properly invoked by various stores</strong>
*
* @param messages Never <code>null</code>.
* @param localFolder
* @param account
* @param folder
*/
private void writeUnsyncedMessages(final List<Message> messages, final LocalFolder localFolder, final Account account, final String folder) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Batch writing " + Integer.toString(messages.size()) + " messages");
}
try {
// Store the new message locally
localFolder.appendMessages(messages);
for (final Message message : messages) {
final LocalMessage localMessage = localFolder.getMessage(message.getUid());
syncFlags(localMessage, message);
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "About to notify listeners that we got a new unsynced message "
+ account + ":" + folder + ":" + message.getUid());
for (final MessagingListener l : getListeners()) {
l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage);
}
}
} catch (final Exception e) {
Log.e(K9.LOG_TAG, "Error while storing downloaded message.", e);
addErrorMessage(account, null, e);
}
}
private boolean shouldImportMessage(final Account account, final String folder, final Message message, final AtomicInteger progress, final Date earliestDate) {
if (account.isSearchByDateCapable() && message.olderThan(earliestDate)) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Message " + message.getUid() + " is older than "
+ earliestDate + ", hence not saving");
}
return false;
}
return true;
}
private <T extends Message> void downloadSmallMessages(final Account account, final Folder<T> remoteFolder,
final LocalFolder localFolder,
List<T> smallMessages,
final AtomicInteger progress,
final int unreadBeforeStart,
final AtomicInteger newMessages,
final int todo,
FetchProfile fp) throws MessagingException {
final String folder = remoteFolder.getName();
final Date earliestDate = account.getEarliestPollDate();
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Fetching small messages for folder " + folder);
remoteFolder.fetch(smallMessages,
fp, new MessageRetrievalListener<T>() {
@Override
public void messageFinished(final T message, int number, int ofTotal) {
try {
if (!shouldImportMessage(account, folder, message, progress, earliestDate)) {
progress.incrementAndGet();
return;
}
// Store the updated message locally
final LocalMessage localMessage = localFolder.storeSmallMessage(message, new Runnable() {
@Override
public void run() {
progress.incrementAndGet();
}
});
// Increment the number of "new messages" if the newly downloaded message is
// not marked as read.
if (!localMessage.isSet(Flag.SEEN)) {
newMessages.incrementAndGet();
}
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "About to notify listeners that we got a new small message "
+ account + ":" + folder + ":" + message.getUid());
// Update the listener with what we've found
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage);
l.synchronizeMailboxProgress(account, folder, progress.get(), todo);
if (!localMessage.isSet(Flag.SEEN)) {
l.synchronizeMailboxNewMessage(account, folder, localMessage);
}
}
// Send a notification of this message
if (shouldNotifyForMessage(account, localFolder, message)) {
// Notify with the localMessage so that we don't have to recalculate the content preview.
notifyAccount(context, account, localMessage, unreadBeforeStart);
}
} catch (MessagingException me) {
addErrorMessage(account, null, me);
Log.e(K9.LOG_TAG, "SYNC: fetch small messages", me);
}
}
@Override
public void messageStarted(String uid, int number, int ofTotal) {}
@Override
public void messagesFinished(int total) {}
});
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Done fetching small messages for folder " + folder);
}
private <T extends Message> void downloadLargeMessages(final Account account, final Folder<T> remoteFolder,
final LocalFolder localFolder,
List<T> largeMessages,
final AtomicInteger progress,
final int unreadBeforeStart,
final AtomicInteger newMessages,
final int todo,
FetchProfile fp) throws MessagingException {
final String folder = remoteFolder.getName();
final Date earliestDate = account.getEarliestPollDate();
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Fetching large messages for folder " + folder);
remoteFolder.fetch(largeMessages, fp, null);
for (T message : largeMessages) {
if (!shouldImportMessage(account, folder, message, progress, earliestDate)) {
progress.incrementAndGet();
continue;
}
if (message.getBody() == null) {
/*
* The provider was unable to get the structure of the message, so
* we'll download a reasonable portion of the messge and mark it as
* incomplete so the entire thing can be downloaded later if the user
* wishes to download it.
*/
fp.clear();
fp.add(FetchProfile.Item.BODY_SANE);
/*
* TODO a good optimization here would be to make sure that all Stores set
* the proper size after this fetch and compare the before and after size. If
* they equal we can mark this SYNCHRONIZED instead of PARTIALLY_SYNCHRONIZED
*/
remoteFolder.fetch(Collections.singletonList(message), fp, null);
// Store the updated message locally
localFolder.appendMessages(Collections.singletonList(message));
Message localMessage = localFolder.getMessage(message.getUid());
// Certain (POP3) servers give you the whole message even when you ask for only the first x Kb
if (!message.isSet(Flag.X_DOWNLOADED_FULL)) {
/*
* Mark the message as fully downloaded if the message size is smaller than
* the account's autodownload size limit, otherwise mark as only a partial
* download. This will prevent the system from downloading the same message
* twice.
*
* If there is no limit on autodownload size, that's the same as the message
* being smaller than the max size
*/
if (account.getMaximumAutoDownloadMessageSize() == 0 || message.getSize() < account.getMaximumAutoDownloadMessageSize()) {
localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true);
} else {
// Set a flag indicating that the message has been partially downloaded and
// is ready for view.
localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, true);
}
}
} else {
/*
* We have a structure to deal with, from which
* we can pull down the parts we want to actually store.
* Build a list of parts we are interested in. Text parts will be downloaded
* right now, attachments will be left for later.
*/
Set<Part> viewables = MessageExtractor.collectTextParts(message);
/*
* Now download the parts we're interested in storing.
*/
for (Part part : viewables) {
remoteFolder.fetchPart(message, part, null);
}
// Store the updated message locally
localFolder.appendMessages(Collections.singletonList(message));
Message localMessage = localFolder.getMessage(message.getUid());
// Set a flag indicating this message has been fully downloaded and can be
// viewed.
localMessage.setFlag(Flag.X_DOWNLOADED_PARTIAL, true);
}
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "About to notify listeners that we got a new large message "
+ account + ":" + folder + ":" + message.getUid());
// Update the listener with what we've found
progress.incrementAndGet();
// TODO do we need to re-fetch this here?
LocalMessage localMessage = localFolder.getMessage(message.getUid());
// Increment the number of "new messages" if the newly downloaded message is
// not marked as read.
if (!localMessage.isSet(Flag.SEEN)) {
newMessages.incrementAndGet();
}
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage);
l.synchronizeMailboxProgress(account, folder, progress.get(), todo);
if (!localMessage.isSet(Flag.SEEN)) {
l.synchronizeMailboxNewMessage(account, folder, localMessage);
}
}
// Send a notification of this message
if (shouldNotifyForMessage(account, localFolder, message)) {
// Notify with the localMessage so that we don't have to recalculate the content preview.
notifyAccount(context, account, localMessage, unreadBeforeStart);
}
}//for large messages
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: Done fetching large messages for folder " + folder);
}
private void refreshLocalMessageFlags(final Account account, final Folder remoteFolder,
final LocalFolder localFolder,
List<Message> syncFlagMessages,
final AtomicInteger progress,
final int todo
) throws MessagingException {
final String folder = remoteFolder.getName();
if (remoteFolder.supportsFetchingFlags()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "SYNC: About to sync flags for "
+ syncFlagMessages.size() + " remote messages for folder " + folder);
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.FLAGS);
List<Message> undeletedMessages = new LinkedList<Message>();
for (Message message : syncFlagMessages) {
if (!message.isSet(Flag.DELETED)) {
undeletedMessages.add(message);
}
}
remoteFolder.fetch(undeletedMessages, fp, null);
for (Message remoteMessage : syncFlagMessages) {
LocalMessage localMessage = localFolder.getMessage(remoteMessage.getUid());
boolean messageChanged = syncFlags(localMessage, remoteMessage);
if (messageChanged) {
boolean shouldBeNotifiedOf = false;
if (localMessage.isSet(Flag.DELETED) || isMessageSuppressed(localMessage)) {
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxRemovedMessage(account, folder, localMessage);
}
} else {
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxAddOrUpdateMessage(account, folder, localMessage);
}
if (shouldNotifyForMessage(account, localFolder, localMessage)) {
shouldBeNotifiedOf = true;
}
}
// we're only interested in messages that need removing
if (!shouldBeNotifiedOf) {
NotificationData data = getNotificationData(account, null);
if (data != null) {
synchronized (data) {
MessageReference ref = localMessage.makeMessageReference();
if (data.removeMatchingMessage(context, ref)) {
notifyAccountWithDataLocked(context, account, null, data);
}
}
}
}
}
progress.incrementAndGet();
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, folder, progress.get(), todo);
}
}
}
}
private boolean syncFlags(LocalMessage localMessage, Message remoteMessage) throws MessagingException {
boolean messageChanged = false;
if (localMessage == null || localMessage.isSet(Flag.DELETED)) {
return false;
}
if (remoteMessage.isSet(Flag.DELETED)) {
if (localMessage.getFolder().syncRemoteDeletions()) {
localMessage.setFlag(Flag.DELETED, true);
messageChanged = true;
}
} else {
for (Flag flag : MessagingController.SYNC_FLAGS) {
if (remoteMessage.isSet(flag) != localMessage.isSet(flag)) {
localMessage.setFlag(flag, remoteMessage.isSet(flag));
messageChanged = true;
}
}
}
return messageChanged;
}
private String getRootCauseMessage(Throwable t) {
Throwable rootCause = t;
Throwable nextCause = rootCause;
do {
nextCause = rootCause.getCause();
if (nextCause != null) {
rootCause = nextCause;
}
} while (nextCause != null);
if (rootCause instanceof MessagingException) {
return rootCause.getMessage();
} else {
// Remove the namespace on the exception so we have a fighting chance of seeing more of the error in the
// notification.
return (rootCause.getLocalizedMessage() != null)
? (rootCause.getClass().getSimpleName() + ": " + rootCause.getLocalizedMessage())
: rootCause.getClass().getSimpleName();
}
}
private void queuePendingCommand(Account account, PendingCommand command) {
try {
LocalStore localStore = account.getLocalStore();
localStore.addPendingCommand(command);
} catch (Exception e) {
addErrorMessage(account, null, e);
throw new RuntimeException("Unable to enqueue pending command", e);
}
}
private void processPendingCommands(final Account account) {
putBackground("processPendingCommands", null, new Runnable() {
@Override
public void run() {
try {
processPendingCommandsSynchronous(account);
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to process pending command because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "processPendingCommands", me);
addErrorMessage(account, null, me);
/*
* Ignore any exceptions from the commands. Commands will be processed
* on the next round.
*/
}
}
});
}
private void processPendingCommandsSynchronous(Account account) throws MessagingException {
LocalStore localStore = account.getLocalStore();
List<PendingCommand> commands = localStore.getPendingCommands();
int progress = 0;
int todo = commands.size();
if (todo == 0) {
return;
}
for (MessagingListener l : getListeners()) {
l.pendingCommandsProcessing(account);
l.synchronizeMailboxProgress(account, null, progress, todo);
}
PendingCommand processingCommand = null;
try {
for (PendingCommand command : commands) {
processingCommand = command;
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Processing pending command '" + command + "'");
String[] components = command.command.split("\\.");
String commandTitle = components[components.length - 1];
for (MessagingListener l : getListeners()) {
l.pendingCommandStarted(account, commandTitle);
}
/*
* We specifically do not catch any exceptions here. If a command fails it is
* most likely due to a server or IO error and it must be retried before any
* other command processes. This maintains the order of the commands.
*/
try {
if (PENDING_COMMAND_APPEND.equals(command.command)) {
processPendingAppend(command, account);
} else if (PENDING_COMMAND_SET_FLAG_BULK.equals(command.command)) {
processPendingSetFlag(command, account);
} else if (PENDING_COMMAND_SET_FLAG.equals(command.command)) {
processPendingSetFlagOld(command, account);
} else if (PENDING_COMMAND_MARK_ALL_AS_READ.equals(command.command)) {
processPendingMarkAllAsRead(command, account);
} else if (PENDING_COMMAND_MOVE_OR_COPY_BULK.equals(command.command)) {
processPendingMoveOrCopyOld2(command, account);
} else if (PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW.equals(command.command)) {
processPendingMoveOrCopy(command, account);
} else if (PENDING_COMMAND_MOVE_OR_COPY.equals(command.command)) {
processPendingMoveOrCopyOld(command, account);
} else if (PENDING_COMMAND_EMPTY_TRASH.equals(command.command)) {
processPendingEmptyTrash(command, account);
} else if (PENDING_COMMAND_EXPUNGE.equals(command.command)) {
processPendingExpunge(command, account);
}
localStore.removePendingCommand(command);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Done processing pending command '" + command + "'");
} catch (MessagingException me) {
if (me.isPermanentFailure()) {
addErrorMessage(account, null, me);
Log.e(K9.LOG_TAG, "Failure of command '" + command + "' was permanent, removing command from queue");
localStore.removePendingCommand(processingCommand);
} else {
throw me;
}
} finally {
progress++;
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, null, progress, todo);
l.pendingCommandCompleted(account, commandTitle);
}
}
}
} catch (MessagingException me) {
notifyUserIfCertificateProblem(context, me, account, true);
addErrorMessage(account, null, me);
Log.e(K9.LOG_TAG, "Could not process command '" + processingCommand + "'", me);
throw me;
} finally {
for (MessagingListener l : getListeners()) {
l.pendingCommandsFinished(account);
}
}
}
/**
* Process a pending append message command. This command uploads a local message to the
* server, first checking to be sure that the server message is not newer than
* the local message. Once the local message is successfully processed it is deleted so
* that the server message will be synchronized down without an additional copy being
* created.
* TODO update the local message UID instead of deleteing it
*
* @param command arguments = (String folder, String uid)
* @param account
* @throws MessagingException
*/
private void processPendingAppend(PendingCommand command, Account account)
throws MessagingException {
Folder remoteFolder = null;
LocalFolder localFolder = null;
try {
String folder = command.arguments[0];
String uid = command.arguments[1];
if (account.getErrorFolderName().equals(folder)) {
return;
}
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(folder);
LocalMessage localMessage = localFolder.getMessage(uid);
if (localMessage == null) {
return;
}
Store remoteStore = account.getRemoteStore();
remoteFolder = remoteStore.getFolder(folder);
if (!remoteFolder.exists()) {
if (!remoteFolder.create(FolderType.HOLDS_MESSAGES)) {
return;
}
}
remoteFolder.open(Folder.OPEN_MODE_RW);
if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) {
return;
}
Message remoteMessage = null;
if (!localMessage.getUid().startsWith(K9.LOCAL_UID_PREFIX)) {
remoteMessage = remoteFolder.getMessage(localMessage.getUid());
}
if (remoteMessage == null) {
if (localMessage.isSet(Flag.X_REMOTE_COPY_STARTED)) {
Log.w(K9.LOG_TAG, "Local message with uid " + localMessage.getUid() +
" has flag " + Flag.X_REMOTE_COPY_STARTED + " already set, checking for remote message with " +
" same message id");
String rUid = remoteFolder.getUidFromMessageId(localMessage);
if (rUid != null) {
Log.w(K9.LOG_TAG, "Local message has flag " + Flag.X_REMOTE_COPY_STARTED + " already set, and there is a remote message with " +
" uid " + rUid + ", assuming message was already copied and aborting this copy");
String oldUid = localMessage.getUid();
localMessage.setUid(rUid);
localFolder.changeUid(localMessage);
for (MessagingListener l : getListeners()) {
l.messageUidChanged(account, folder, oldUid, localMessage.getUid());
}
return;
} else {
Log.w(K9.LOG_TAG, "No remote message with message-id found, proceeding with append");
}
}
/*
* If the message does not exist remotely we just upload it and then
* update our local copy with the new uid.
*/
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.BODY);
localFolder.fetch(Collections.singletonList(localMessage) , fp, null);
String oldUid = localMessage.getUid();
localMessage.setFlag(Flag.X_REMOTE_COPY_STARTED, true);
remoteFolder.appendMessages(Collections.singletonList(localMessage));
localFolder.changeUid(localMessage);
for (MessagingListener l : getListeners()) {
l.messageUidChanged(account, folder, oldUid, localMessage.getUid());
}
} else {
/*
* If the remote message exists we need to determine which copy to keep.
*/
/*
* See if the remote message is newer than ours.
*/
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
remoteFolder.fetch(Collections.singletonList(remoteMessage), fp, null);
Date localDate = localMessage.getInternalDate();
Date remoteDate = remoteMessage.getInternalDate();
if (remoteDate != null && remoteDate.compareTo(localDate) > 0) {
/*
* If the remote message is newer than ours we'll just
* delete ours and move on. A sync will get the server message
* if we need to be able to see it.
*/
localMessage.destroy();
} else {
/*
* Otherwise we'll upload our message and then delete the remote message.
*/
fp.clear();
fp = new FetchProfile();
fp.add(FetchProfile.Item.BODY);
localFolder.fetch(Collections.singletonList(localMessage), fp, null);
String oldUid = localMessage.getUid();
localMessage.setFlag(Flag.X_REMOTE_COPY_STARTED, true);
remoteFolder.appendMessages(Collections.singletonList(localMessage));
localFolder.changeUid(localMessage);
for (MessagingListener l : getListeners()) {
l.messageUidChanged(account, folder, oldUid, localMessage.getUid());
}
if (remoteDate != null) {
remoteMessage.setFlag(Flag.DELETED, true);
if (Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) {
remoteFolder.expunge();
}
}
}
}
} finally {
closeFolder(remoteFolder);
closeFolder(localFolder);
}
}
private void queueMoveOrCopy(Account account, String srcFolder, String destFolder, boolean isCopy, String uids[]) {
if (account.getErrorFolderName().equals(srcFolder)) {
return;
}
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW;
int length = 4 + uids.length;
command.arguments = new String[length];
command.arguments[0] = srcFolder;
command.arguments[1] = destFolder;
command.arguments[2] = Boolean.toString(isCopy);
command.arguments[3] = Boolean.toString(false);
System.arraycopy(uids, 0, command.arguments, 4, uids.length);
queuePendingCommand(account, command);
}
private void queueMoveOrCopy(Account account, String srcFolder, String destFolder, boolean isCopy, String uids[], Map<String, String> uidMap) {
if (uidMap == null || uidMap.isEmpty()) {
queueMoveOrCopy(account, srcFolder, destFolder, isCopy, uids);
} else {
if (account.getErrorFolderName().equals(srcFolder)) {
return;
}
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW;
int length = 4 + uidMap.keySet().size() + uidMap.values().size();
command.arguments = new String[length];
command.arguments[0] = srcFolder;
command.arguments[1] = destFolder;
command.arguments[2] = Boolean.toString(isCopy);
command.arguments[3] = Boolean.toString(true);
System.arraycopy(uidMap.keySet().toArray(), 0, command.arguments, 4, uidMap.keySet().size());
System.arraycopy(uidMap.values().toArray(), 0, command.arguments, 4 + uidMap.keySet().size(), uidMap.values().size());
queuePendingCommand(account, command);
}
}
/**
* Convert pending command to new format and call
* {@link #processPendingMoveOrCopy(PendingCommand, Account)}.
*
* <p>
* TODO: This method is obsolete and is only for transition from K-9 4.0 to K-9 4.2
* Eventually, it should be removed.
* </p>
*
* @param command
* Pending move/copy command in old format.
* @param account
* The account the pending command belongs to.
*
* @throws MessagingException
* In case of an error.
*/
private void processPendingMoveOrCopyOld2(PendingCommand command, Account account)
throws MessagingException {
PendingCommand newCommand = new PendingCommand();
int len = command.arguments.length;
newCommand.command = PENDING_COMMAND_MOVE_OR_COPY_BULK_NEW;
newCommand.arguments = new String[len + 1];
newCommand.arguments[0] = command.arguments[0];
newCommand.arguments[1] = command.arguments[1];
newCommand.arguments[2] = command.arguments[2];
newCommand.arguments[3] = Boolean.toString(false);
System.arraycopy(command.arguments, 3, newCommand.arguments, 4, len - 3);
processPendingMoveOrCopy(newCommand, account);
}
/**
* Process a pending trash message command.
*
* @param command arguments = (String folder, String uid)
* @param account
* @throws MessagingException
*/
private void processPendingMoveOrCopy(PendingCommand command, Account account)
throws MessagingException {
Folder remoteSrcFolder = null;
Folder remoteDestFolder = null;
LocalFolder localDestFolder = null;
try {
String srcFolder = command.arguments[0];
if (account.getErrorFolderName().equals(srcFolder)) {
return;
}
String destFolder = command.arguments[1];
String isCopyS = command.arguments[2];
String hasNewUidsS = command.arguments[3];
boolean hasNewUids = false;
if (hasNewUidsS != null) {
hasNewUids = Boolean.parseBoolean(hasNewUidsS);
}
Store remoteStore = account.getRemoteStore();
remoteSrcFolder = remoteStore.getFolder(srcFolder);
Store localStore = account.getLocalStore();
localDestFolder = (LocalFolder) localStore.getFolder(destFolder);
List<Message> messages = new ArrayList<Message>();
/*
* We split up the localUidMap into two parts while sending the command, here we assemble it back.
*/
Map<String, String> localUidMap = new HashMap<String, String>();
if (hasNewUids) {
int offset = (command.arguments.length - 4) / 2;
for (int i = 4; i < 4 + offset; i++) {
localUidMap.put(command.arguments[i], command.arguments[i + offset]);
String uid = command.arguments[i];
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
messages.add(remoteSrcFolder.getMessage(uid));
}
}
} else {
for (int i = 4; i < command.arguments.length; i++) {
String uid = command.arguments[i];
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
messages.add(remoteSrcFolder.getMessage(uid));
}
}
}
boolean isCopy = false;
if (isCopyS != null) {
isCopy = Boolean.parseBoolean(isCopyS);
}
if (!remoteSrcFolder.exists()) {
throw new MessagingException("processingPendingMoveOrCopy: remoteFolder " + srcFolder + " does not exist", true);
}
remoteSrcFolder.open(Folder.OPEN_MODE_RW);
if (remoteSrcFolder.getMode() != Folder.OPEN_MODE_RW) {
throw new MessagingException("processingPendingMoveOrCopy: could not open remoteSrcFolder " + srcFolder + " read/write", true);
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processingPendingMoveOrCopy: source folder = " + srcFolder
+ ", " + messages.size() + " messages, destination folder = " + destFolder + ", isCopy = " + isCopy);
Map <String, String> remoteUidMap = null;
if (!isCopy && destFolder.equals(account.getTrashFolderName())) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processingPendingMoveOrCopy doing special case for deleting message");
String destFolderName = destFolder;
if (K9.FOLDER_NONE.equals(destFolderName)) {
destFolderName = null;
}
remoteSrcFolder.delete(messages, destFolderName);
} else {
remoteDestFolder = remoteStore.getFolder(destFolder);
if (isCopy) {
remoteUidMap = remoteSrcFolder.copyMessages(messages, remoteDestFolder);
} else {
remoteUidMap = remoteSrcFolder.moveMessages(messages, remoteDestFolder);
}
}
if (!isCopy && Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "processingPendingMoveOrCopy expunging folder " + account.getDescription() + ":" + srcFolder);
remoteSrcFolder.expunge();
}
/*
* This next part is used to bring the local UIDs of the local destination folder
* upto speed with the remote UIDs of remote destination folder.
*/
if (!localUidMap.isEmpty() && remoteUidMap != null && !remoteUidMap.isEmpty()) {
for (Map.Entry<String, String> entry : remoteUidMap.entrySet()) {
String remoteSrcUid = entry.getKey();
String localDestUid = localUidMap.get(remoteSrcUid);
String newUid = entry.getValue();
Message localDestMessage = localDestFolder.getMessage(localDestUid);
if (localDestMessage != null) {
localDestMessage.setUid(newUid);
localDestFolder.changeUid((LocalMessage)localDestMessage);
for (MessagingListener l : getListeners()) {
l.messageUidChanged(account, destFolder, localDestUid, newUid);
}
}
}
}
} finally {
closeFolder(remoteSrcFolder);
closeFolder(remoteDestFolder);
}
}
private void queueSetFlag(final Account account, final String folderName, final String newState, final String flag, final String[] uids) {
putBackground("queueSetFlag " + account.getDescription() + ":" + folderName, null, new Runnable() {
@Override
public void run() {
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_SET_FLAG_BULK;
int length = 3 + uids.length;
command.arguments = new String[length];
command.arguments[0] = folderName;
command.arguments[1] = newState;
command.arguments[2] = flag;
System.arraycopy(uids, 0, command.arguments, 3, uids.length);
queuePendingCommand(account, command);
processPendingCommands(account);
}
});
}
/**
* Processes a pending mark read or unread command.
*
* @param command arguments = (String folder, String uid, boolean read)
* @param account
*/
private void processPendingSetFlag(PendingCommand command, Account account)
throws MessagingException {
String folder = command.arguments[0];
if (account.getErrorFolderName().equals(folder)) {
return;
}
boolean newState = Boolean.parseBoolean(command.arguments[1]);
Flag flag = Flag.valueOf(command.arguments[2]);
Store remoteStore = account.getRemoteStore();
Folder remoteFolder = remoteStore.getFolder(folder);
if (!remoteFolder.exists() || !remoteFolder.isFlagSupported(flag)) {
return;
}
try {
remoteFolder.open(Folder.OPEN_MODE_RW);
if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) {
return;
}
List<Message> messages = new ArrayList<Message>();
for (int i = 3; i < command.arguments.length; i++) {
String uid = command.arguments[i];
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
messages.add(remoteFolder.getMessage(uid));
}
}
if (messages.isEmpty()) {
return;
}
remoteFolder.setFlags(messages, Collections.singleton(flag), newState);
} finally {
closeFolder(remoteFolder);
}
}
// TODO: This method is obsolete and is only for transition from K-9 2.0 to K-9 2.1
// Eventually, it should be removed
private void processPendingSetFlagOld(PendingCommand command, Account account)
throws MessagingException {
String folder = command.arguments[0];
String uid = command.arguments[1];
if (account.getErrorFolderName().equals(folder)) {
return;
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processPendingSetFlagOld: folder = " + folder + ", uid = " + uid);
boolean newState = Boolean.parseBoolean(command.arguments[2]);
Flag flag = Flag.valueOf(command.arguments[3]);
Folder remoteFolder = null;
try {
Store remoteStore = account.getRemoteStore();
remoteFolder = remoteStore.getFolder(folder);
if (!remoteFolder.exists()) {
return;
}
remoteFolder.open(Folder.OPEN_MODE_RW);
if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) {
return;
}
Message remoteMessage = null;
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
remoteMessage = remoteFolder.getMessage(uid);
}
if (remoteMessage == null) {
return;
}
remoteMessage.setFlag(flag, newState);
} finally {
closeFolder(remoteFolder);
}
}
private void queueExpunge(final Account account, final String folderName) {
putBackground("queueExpunge " + account.getDescription() + ":" + folderName, null, new Runnable() {
@Override
public void run() {
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_EXPUNGE;
command.arguments = new String[1];
command.arguments[0] = folderName;
queuePendingCommand(account, command);
processPendingCommands(account);
}
});
}
private void processPendingExpunge(PendingCommand command, Account account)
throws MessagingException {
String folder = command.arguments[0];
if (account.getErrorFolderName().equals(folder)) {
return;
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processPendingExpunge: folder = " + folder);
Store remoteStore = account.getRemoteStore();
Folder remoteFolder = remoteStore.getFolder(folder);
try {
if (!remoteFolder.exists()) {
return;
}
remoteFolder.open(Folder.OPEN_MODE_RW);
if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) {
return;
}
remoteFolder.expunge();
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processPendingExpunge: complete for folder = " + folder);
} finally {
closeFolder(remoteFolder);
}
}
// TODO: This method is obsolete and is only for transition from K-9 2.0 to K-9 2.1
// Eventually, it should be removed
private void processPendingMoveOrCopyOld(PendingCommand command, Account account)
throws MessagingException {
String srcFolder = command.arguments[0];
String uid = command.arguments[1];
String destFolder = command.arguments[2];
String isCopyS = command.arguments[3];
boolean isCopy = false;
if (isCopyS != null) {
isCopy = Boolean.parseBoolean(isCopyS);
}
if (account.getErrorFolderName().equals(srcFolder)) {
return;
}
Store remoteStore = account.getRemoteStore();
Folder remoteSrcFolder = remoteStore.getFolder(srcFolder);
Folder remoteDestFolder = remoteStore.getFolder(destFolder);
if (!remoteSrcFolder.exists()) {
throw new MessagingException("processPendingMoveOrCopyOld: remoteFolder " + srcFolder + " does not exist", true);
}
remoteSrcFolder.open(Folder.OPEN_MODE_RW);
if (remoteSrcFolder.getMode() != Folder.OPEN_MODE_RW) {
throw new MessagingException("processPendingMoveOrCopyOld: could not open remoteSrcFolder " + srcFolder + " read/write", true);
}
Message remoteMessage = null;
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
remoteMessage = remoteSrcFolder.getMessage(uid);
}
if (remoteMessage == null) {
throw new MessagingException("processPendingMoveOrCopyOld: remoteMessage " + uid + " does not exist", true);
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processPendingMoveOrCopyOld: source folder = " + srcFolder
+ ", uid = " + uid + ", destination folder = " + destFolder + ", isCopy = " + isCopy);
if (!isCopy && destFolder.equals(account.getTrashFolderName())) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "processPendingMoveOrCopyOld doing special case for deleting message");
remoteMessage.delete(account.getTrashFolderName());
remoteSrcFolder.close();
return;
}
remoteDestFolder.open(Folder.OPEN_MODE_RW);
if (remoteDestFolder.getMode() != Folder.OPEN_MODE_RW) {
throw new MessagingException("processPendingMoveOrCopyOld: could not open remoteDestFolder " + srcFolder + " read/write", true);
}
if (isCopy) {
remoteSrcFolder.copyMessages(Collections.singletonList(remoteMessage), remoteDestFolder);
} else {
remoteSrcFolder.moveMessages(Collections.singletonList(remoteMessage), remoteDestFolder);
}
remoteSrcFolder.close();
remoteDestFolder.close();
}
private void processPendingMarkAllAsRead(PendingCommand command, Account account) throws MessagingException {
String folder = command.arguments[0];
Folder remoteFolder = null;
LocalFolder localFolder = null;
try {
Store localStore = account.getLocalStore();
localFolder = (LocalFolder) localStore.getFolder(folder);
localFolder.open(Folder.OPEN_MODE_RW);
List<? extends Message> messages = localFolder.getMessages(null, false);
for (Message message : messages) {
if (!message.isSet(Flag.SEEN)) {
message.setFlag(Flag.SEEN, true);
for (MessagingListener l : getListeners()) {
l.listLocalMessagesUpdateMessage(account, folder, message);
}
}
}
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, folder, 0);
}
if (account.getErrorFolderName().equals(folder)) {
return;
}
Store remoteStore = account.getRemoteStore();
remoteFolder = remoteStore.getFolder(folder);
if (!remoteFolder.exists() || !remoteFolder.isFlagSupported(Flag.SEEN)) {
return;
}
remoteFolder.open(Folder.OPEN_MODE_RW);
if (remoteFolder.getMode() != Folder.OPEN_MODE_RW) {
return;
}
remoteFolder.setFlags(Collections.singleton(Flag.SEEN), true);
remoteFolder.close();
} catch (UnsupportedOperationException uoe) {
Log.w(K9.LOG_TAG, "Could not mark all server-side as read because store doesn't support operation", uoe);
} finally {
closeFolder(localFolder);
closeFolder(remoteFolder);
}
}
void notifyUserIfCertificateProblem(Context context, Exception e,
Account account, boolean incoming) {
if (!(e instanceof CertificateValidationException)) {
return;
}
CertificateValidationException cve = (CertificateValidationException) e;
if (!cve.needsUserAttention()) {
return;
}
final int id = incoming
? K9.CERTIFICATE_EXCEPTION_NOTIFICATION_INCOMING + account.getAccountNumber()
: K9.CERTIFICATE_EXCEPTION_NOTIFICATION_OUTGOING + account.getAccountNumber();
final Intent i = incoming
? AccountSetupIncoming.intentActionEditIncomingSettings(context, account)
: AccountSetupOutgoing.intentActionEditOutgoingSettings(context, account);
final PendingIntent pi = PendingIntent.getActivity(context,
account.getAccountNumber(), i, PendingIntent.FLAG_UPDATE_CURRENT);
final String title = context.getString(
R.string.notification_certificate_error_title, account.getDescription());
final NotificationCompat.Builder builder = new NotificationCompat.Builder(context);
builder.setSmallIcon(platformSupportsLockScreenNotifications()
? R.drawable.ic_notify_new_mail_vector
: R.drawable.ic_notify_new_mail);
builder.setWhen(System.currentTimeMillis());
builder.setAutoCancel(true);
builder.setTicker(title);
builder.setContentTitle(title);
builder.setContentText(context.getString(R.string.notification_certificate_error_text));
builder.setContentIntent(pi);
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
configureNotification(builder, null, null,
K9.NOTIFICATION_LED_FAILURE_COLOR,
K9.NOTIFICATION_LED_BLINK_FAST, true);
final NotificationManager nm = (NotificationManager)
context.getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(null, id, builder.build());
}
public void clearCertificateErrorNotifications(Context context,
final Account account, CheckDirection direction) {
final NotificationManager nm = (NotificationManager)
context.getSystemService(Context.NOTIFICATION_SERVICE);
if (direction == CheckDirection.INCOMING) {
nm.cancel(null, K9.CERTIFICATE_EXCEPTION_NOTIFICATION_INCOMING + account.getAccountNumber());
} else {
nm.cancel(null, K9.CERTIFICATE_EXCEPTION_NOTIFICATION_OUTGOING + account.getAccountNumber());
}
}
static long uidfill = 0;
static AtomicBoolean loopCatch = new AtomicBoolean();
public void addErrorMessage(Account account, String subject, Throwable t) {
try {
if (t == null) {
return;
}
CharArrayWriter baos = new CharArrayWriter(t.getStackTrace().length * 10);
PrintWriter ps = new PrintWriter(baos);
try {
PackageInfo packageInfo = context.getPackageManager().getPackageInfo(
context.getPackageName(), 0);
ps.format("K9-Mail version: %s\r\n", packageInfo.versionName);
} catch (Exception e) {
// ignore
}
ps.format("Device make: %s\r\n", Build.MANUFACTURER);
ps.format("Device model: %s\r\n", Build.MODEL);
ps.format("Android version: %s\r\n\r\n", Build.VERSION.RELEASE);
t.printStackTrace(ps);
ps.close();
if (subject == null) {
subject = getRootCauseMessage(t);
}
addErrorMessage(account, subject, baos.toString());
} catch (Throwable it) {
Log.e(K9.LOG_TAG, "Could not save error message to " + account.getErrorFolderName(), it);
}
}
public void addErrorMessage(Account account, String subject, String body) {
if (!K9.DEBUG) {
return;
}
if (!loopCatch.compareAndSet(false, true)) {
return;
}
try {
if (body == null || body.length() < 1) {
return;
}
Store localStore = account.getLocalStore();
LocalFolder localFolder = (LocalFolder)localStore.getFolder(account.getErrorFolderName());
MimeMessage message = new MimeMessage();
MimeMessageHelper.setBody(message, new TextBody(body));
message.setFlag(Flag.X_DOWNLOADED_FULL, true);
message.setSubject(subject);
long nowTime = System.currentTimeMillis();
Date nowDate = new Date(nowTime);
message.setInternalDate(nowDate);
message.addSentDate(nowDate, K9.hideTimeZone());
message.setFrom(new Address(account.getEmail(), "K9mail internal"));
localFolder.appendMessages(Collections.singletonList(message));
localFolder.clearMessagesOlderThan(nowTime - (15 * 60 * 1000));
} catch (Throwable it) {
Log.e(K9.LOG_TAG, "Could not save error message to " + account.getErrorFolderName(), it);
} finally {
loopCatch.set(false);
}
}
public void markAllMessagesRead(final Account account, final String folder) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Marking all messages in " + account.getDescription() + ":" + folder + " as read");
List<String> args = new ArrayList<String>();
args.add(folder);
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_MARK_ALL_AS_READ;
command.arguments = args.toArray(EMPTY_STRING_ARRAY);
queuePendingCommand(account, command);
processPendingCommands(account);
}
public void setFlag(final Account account, final List<Long> messageIds, final Flag flag,
final boolean newState) {
setFlagInCache(account, messageIds, flag, newState);
threadPool.execute(new Runnable() {
@Override
public void run() {
setFlagSynchronous(account, messageIds, flag, newState, false);
}
});
}
public void setFlagForThreads(final Account account, final List<Long> threadRootIds,
final Flag flag, final boolean newState) {
setFlagForThreadsInCache(account, threadRootIds, flag, newState);
threadPool.execute(new Runnable() {
@Override
public void run() {
setFlagSynchronous(account, threadRootIds, flag, newState, true);
}
});
}
private void setFlagSynchronous(final Account account, final List<Long> ids,
final Flag flag, final boolean newState, final boolean threadedList) {
LocalStore localStore;
try {
localStore = account.getLocalStore();
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Couldn't get LocalStore instance", e);
return;
}
// Update affected messages in the database. This should be as fast as possible so the UI
// can be updated with the new state.
try {
if (threadedList) {
localStore.setFlagForThreads(ids, flag, newState);
removeFlagForThreadsFromCache(account, ids, flag);
} else {
localStore.setFlag(ids, flag, newState);
removeFlagFromCache(account, ids, flag);
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Couldn't set flags in local database", e);
}
// Read folder name and UID of messages from the database
Map<String, List<String>> folderMap;
try {
folderMap = localStore.getFoldersAndUids(ids, threadedList);
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Couldn't get folder name and UID of messages", e);
return;
}
// Loop over all folders
for (Entry<String, List<String>> entry : folderMap.entrySet()) {
String folderName = entry.getKey();
// Notify listeners of changed folder status
LocalFolder localFolder = localStore.getFolder(folderName);
try {
int unreadMessageCount = localFolder.getUnreadMessageCount();
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, folderName, unreadMessageCount);
}
} catch (MessagingException e) {
Log.w(K9.LOG_TAG, "Couldn't get unread count for folder: " + folderName, e);
}
// The error folder is always a local folder
// TODO: Skip the remote part for all local-only folders
if (account.getErrorFolderName().equals(folderName)) {
continue;
}
// Send flag change to server
String[] uids = entry.getValue().toArray(EMPTY_STRING_ARRAY);
queueSetFlag(account, folderName, Boolean.toString(newState), flag.toString(), uids);
processPendingCommands(account);
}
}
/**
* Set or remove a flag for a set of messages in a specific folder.
*
* <p>
* The {@link Message} objects passed in are updated to reflect the new flag state.
* </p>
*
* @param account
* The account the folder containing the messages belongs to.
* @param folderName
* The name of the folder.
* @param messages
* The messages to change the flag for.
* @param flag
* The flag to change.
* @param newState
* {@code true}, if the flag should be set. {@code false} if it should be removed.
*/
public void setFlag(Account account, String folderName, List<? extends Message> messages, Flag flag,
boolean newState) {
// TODO: Put this into the background, but right now some callers depend on the message
// objects being modified right after this method returns.
Folder localFolder = null;
try {
Store localStore = account.getLocalStore();
localFolder = localStore.getFolder(folderName);
localFolder.open(Folder.OPEN_MODE_RW);
// Allows for re-allowing sending of messages that could not be sent
if (flag == Flag.FLAGGED && !newState &&
account.getOutboxFolderName().equals(folderName)) {
for (Message message : messages) {
String uid = message.getUid();
if (uid != null) {
sendCount.remove(uid);
}
}
}
// Update the messages in the local store
localFolder.setFlags(messages, Collections.singleton(flag), newState);
int unreadMessageCount = localFolder.getUnreadMessageCount();
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, folderName, unreadMessageCount);
}
/*
* Handle the remote side
*/
// The error folder is always a local folder
// TODO: Skip the remote part for all local-only folders
if (account.getErrorFolderName().equals(folderName)) {
return;
}
String[] uids = new String[messages.size()];
for (int i = 0, end = uids.length; i < end; i++) {
uids[i] = messages.get(i).getUid();
}
queueSetFlag(account, folderName, Boolean.toString(newState), flag.toString(), uids);
processPendingCommands(account);
} catch (MessagingException me) {
addErrorMessage(account, null, me);
throw new RuntimeException(me);
} finally {
closeFolder(localFolder);
}
}
/**
* Set or remove a flag for a message referenced by message UID.
*
* @param account
* The account the folder containing the message belongs to.
* @param folderName
* The name of the folder.
* @param uid
* The UID of the message to change the flag for.
* @param flag
* The flag to change.
* @param newState
* {@code true}, if the flag should be set. {@code false} if it should be removed.
*/
public void setFlag(Account account, String folderName, String uid, Flag flag,
boolean newState) {
Folder localFolder = null;
try {
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(folderName);
localFolder.open(Folder.OPEN_MODE_RW);
Message message = localFolder.getMessage(uid);
if (message != null) {
setFlag(account, folderName, Collections.singletonList(message), flag, newState);
}
} catch (MessagingException me) {
addErrorMessage(account, null, me);
throw new RuntimeException(me);
} finally {
closeFolder(localFolder);
}
}
public void clearAllPending(final Account account) {
try {
Log.w(K9.LOG_TAG, "Clearing pending commands!");
LocalStore localStore = account.getLocalStore();
localStore.removePendingCommands();
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Unable to clear pending command", me);
addErrorMessage(account, null, me);
}
}
public void loadMessageForViewRemote(final Account account, final String folder,
final String uid, final MessagingListener listener) {
put("loadMessageForViewRemote", listener, new Runnable() {
@Override
public void run() {
loadMessageForViewRemoteSynchronous(account, folder, uid, listener, false, false);
}
});
}
public boolean loadMessageForViewRemoteSynchronous(final Account account, final String folder,
final String uid, final MessagingListener listener, final boolean force,
final boolean loadPartialFromSearch) {
Folder remoteFolder = null;
LocalFolder localFolder = null;
try {
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(folder);
localFolder.open(Folder.OPEN_MODE_RW);
LocalMessage message = localFolder.getMessage(uid);
if (uid.startsWith(K9.LOCAL_UID_PREFIX)) {
Log.w(K9.LOG_TAG, "Message has local UID so cannot download fully.");
// ASH move toast
android.widget.Toast.makeText(context,
"Message has local UID so cannot download fully",
android.widget.Toast.LENGTH_LONG).show();
// TODO: Using X_DOWNLOADED_FULL is wrong because it's only a partial message. But
// one we can't download completely. Maybe add a new flag; X_PARTIAL_MESSAGE ?
message.setFlag(Flag.X_DOWNLOADED_FULL, true);
message.setFlag(Flag.X_DOWNLOADED_PARTIAL, false);
}
/* commented out because this was pulled from another unmerged branch:
} else if (localFolder.isLocalOnly() && !force) {
Log.w(K9.LOG_TAG, "Message in local-only folder so cannot download fully.");
// ASH move toast
android.widget.Toast.makeText(mApplication,
"Message in local-only folder so cannot download fully",
android.widget.Toast.LENGTH_LONG).show();
message.setFlag(Flag.X_DOWNLOADED_FULL, true);
message.setFlag(Flag.X_DOWNLOADED_PARTIAL, false);
}*/
if (message.isSet(Flag.X_DOWNLOADED_FULL)) {
/*
* If the message has been synchronized since we were called we'll
* just hand it back cause it's ready to go.
*/
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
fp.add(FetchProfile.Item.BODY);
localFolder.fetch(Collections.singletonList(message), fp, null);
} else {
/*
* At this point the message is not available, so we need to download it
* fully if possible.
*/
Store remoteStore = account.getRemoteStore();
remoteFolder = remoteStore.getFolder(folder);
remoteFolder.open(Folder.OPEN_MODE_RW);
// Get the remote message and fully download it
Message remoteMessage = remoteFolder.getMessage(uid);
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.BODY);
remoteFolder.fetch(Collections.singletonList(remoteMessage), fp, null);
// Store the message locally and load the stored message into memory
localFolder.appendMessages(Collections.singletonList(remoteMessage));
if (loadPartialFromSearch) {
fp.add(FetchProfile.Item.BODY);
}
fp.add(FetchProfile.Item.ENVELOPE);
message = localFolder.getMessage(uid);
localFolder.fetch(Collections.singletonList(message), fp, null);
// Mark that this message is now fully synched
if (account.isMarkMessageAsReadOnView()) {
message.setFlag(Flag.SEEN, true);
}
message.setFlag(Flag.X_DOWNLOADED_FULL, true);
}
// now that we have the full message, refresh the headers
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewHeadersAvailable(account, folder, uid, message);
}
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewBodyAvailable(account, folder, uid, message);
}
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewFinished(account, folder, uid, message);
}
return true;
} catch (Exception e) {
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewFailed(account, folder, uid, e);
}
notifyUserIfCertificateProblem(context, e, account, true);
addErrorMessage(account, null, e);
return false;
} finally {
closeFolder(remoteFolder);
closeFolder(localFolder);
}
}
public void loadMessageForView(final Account account, final String folder, final String uid,
final MessagingListener listener) {
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewStarted(account, folder, uid);
}
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolder(folder);
localFolder.open(Folder.OPEN_MODE_RW);
LocalMessage message = localFolder.getMessage(uid);
if (message == null
|| message.getId() == 0) {
throw new IllegalArgumentException("Message not found: folder=" + folder + ", uid=" + uid);
}
// IMAP search results will usually need to be downloaded before viewing.
// TODO: limit by account.getMaximumAutoDownloadMessageSize().
if (!message.isSet(Flag.X_DOWNLOADED_FULL) &&
!message.isSet(Flag.X_DOWNLOADED_PARTIAL)) {
if (loadMessageForViewRemoteSynchronous(account, folder, uid, listener,
false, true)) {
markMessageAsReadOnView(account, message);
}
return;
}
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewHeadersAvailable(account, folder, uid, message);
}
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
fp.add(FetchProfile.Item.BODY);
localFolder.fetch(Collections.singletonList(message), fp, null);
localFolder.close();
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewBodyAvailable(account, folder, uid, message);
}
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewFinished(account, folder, uid, message);
}
markMessageAsReadOnView(account, message);
} catch (Exception e) {
for (MessagingListener l : getListeners(listener)) {
l.loadMessageForViewFailed(account, folder, uid, e);
}
addErrorMessage(account, null, e);
}
}
});
}
public LocalMessage loadMessage(Account account, String folderName, String uid) throws MessagingException {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolder(folderName);
localFolder.open(Folder.OPEN_MODE_RW);
LocalMessage message = localFolder.getMessage(uid);
if (message == null || message.getId() == 0) {
throw new IllegalArgumentException("Message not found: folder=" + folderName + ", uid=" + uid);
}
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.BODY);
localFolder.fetch(Collections.singletonList(message), fp, null);
localFolder.close();
markMessageAsReadOnView(account, message);
return message;
}
private void markMessageAsReadOnView(Account account, LocalMessage message)
throws MessagingException {
if (account.isMarkMessageAsReadOnView() && !message.isSet(Flag.SEEN)) {
List<Long> messageIds = Collections.singletonList(message.getId());
setFlag(account, messageIds, Flag.SEEN, true);
message.setFlagInternal(Flag.SEEN, true);
}
}
public void loadAttachment(final Account account, final LocalMessage message, final Part part,
final MessagingListener listener) {
put("loadAttachment", listener, new Runnable() {
@Override
public void run() {
Folder remoteFolder = null;
LocalFolder localFolder = null;
try {
String folderName = message.getFolder().getName();
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(folderName);
Store remoteStore = account.getRemoteStore();
remoteFolder = remoteStore.getFolder(folderName);
remoteFolder.open(Folder.OPEN_MODE_RW);
Message remoteMessage = remoteFolder.getMessage(message.getUid());
remoteFolder.fetchPart(remoteMessage, part, null);
localFolder.addPartToMessage(message, part);
for (MessagingListener l : getListeners(listener)) {
l.loadAttachmentFinished(account, message, part);
}
} catch (MessagingException me) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Exception loading attachment", me);
for (MessagingListener l : getListeners(listener)) {
l.loadAttachmentFailed(account, message, part, me.getMessage());
}
notifyUserIfCertificateProblem(context, me, account, true);
addErrorMessage(account, null, me);
} finally {
closeFolder(localFolder);
closeFolder(remoteFolder);
}
}
});
}
/**
* Stores the given message in the Outbox and starts a sendPendingMessages command to
* attempt to send the message.
* @param account
* @param message
* @param listener
*/
public void sendMessage(final Account account,
final Message message,
MessagingListener listener) {
try {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolder(account.getOutboxFolderName());
localFolder.open(Folder.OPEN_MODE_RW);
localFolder.appendMessages(Collections.singletonList(message));
Message localMessage = localFolder.getMessage(message.getUid());
localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true);
localFolder.close();
sendPendingMessages(account, listener);
} catch (Exception e) {
/*
for (MessagingListener l : getListeners())
{
// TODO general failed
}
*/
addErrorMessage(account, null, e);
}
}
public void sendPendingMessages(MessagingListener listener) {
final Preferences prefs = Preferences.getPreferences(context);
for (Account account : prefs.getAvailableAccounts()) {
sendPendingMessages(account, listener);
}
}
/**
* Attempt to send any messages that are sitting in the Outbox.
* @param account
* @param listener
*/
public void sendPendingMessages(final Account account,
MessagingListener listener) {
putBackground("sendPendingMessages", listener, new Runnable() {
@Override
public void run() {
if (!account.isAvailable(context)) {
throw new UnavailableAccountException();
}
if (messagesPendingSend(account)) {
notifyWhileSending(account);
try {
sendPendingMessagesSynchronous(account);
} finally {
notifyWhileSendingDone(account);
}
}
}
});
}
private void cancelNotification(int id) {
NotificationManager notifMgr =
(NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
notifMgr.cancel(id);
}
private void notifyWhileSendingDone(Account account) {
if (account.isShowOngoing()) {
cancelNotification(K9.FETCHING_EMAIL_NOTIFICATION - account.getAccountNumber());
}
}
/**
* Display an ongoing notification while a message is being sent.
*
* @param account
* The account the message is sent from. Never {@code null}.
*/
private void notifyWhileSending(Account account) {
if (!account.isShowOngoing()) {
return;
}
NotificationManager notifMgr =
(NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
NotificationCompat.Builder builder = new NotificationCompat.Builder(context);
builder.setSmallIcon(R.drawable.ic_notify_check_mail);
builder.setWhen(System.currentTimeMillis());
builder.setOngoing(true);
String accountDescription = account.getDescription();
String accountName = (TextUtils.isEmpty(accountDescription)) ?
account.getEmail() : accountDescription;
builder.setTicker(context.getString(R.string.notification_bg_send_ticker,
accountName));
builder.setContentTitle(context.getString(R.string.notification_bg_send_title));
builder.setContentText(account.getDescription());
TaskStackBuilder stack = buildMessageListBackStack(context, account,
account.getInboxFolderName());
builder.setContentIntent(stack.getPendingIntent(0, 0));
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
if (K9.NOTIFICATION_LED_WHILE_SYNCING) {
configureNotification(builder, null, null,
account.getNotificationSetting().getLedColor(),
K9.NOTIFICATION_LED_BLINK_FAST, true);
}
notifMgr.notify(K9.FETCHING_EMAIL_NOTIFICATION - account.getAccountNumber(),
builder.build());
}
private void notifySendTempFailed(Account account, Exception lastFailure) {
notifySendFailed(account, lastFailure, account.getOutboxFolderName());
}
private void notifySendPermFailed(Account account, Exception lastFailure) {
notifySendFailed(account, lastFailure, account.getDraftsFolderName());
}
/**
* Display a notification when sending a message has failed.
*
* @param account
* The account that was used to sent the message.
* @param lastFailure
* The {@link Exception} instance that indicated sending the message has failed.
* @param openFolder
* The name of the folder to open when the notification is clicked.
*/
private void notifySendFailed(Account account, Exception lastFailure, String openFolder) {
NotificationManager notifMgr =
(NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
NotificationCompat.Builder builder = new NotificationCompat.Builder(context);
builder.setSmallIcon(platformSupportsLockScreenNotifications()
? R.drawable.ic_notify_new_mail_vector
: R.drawable.ic_notify_new_mail);
builder.setWhen(System.currentTimeMillis());
builder.setAutoCancel(true);
builder.setTicker(context.getString(R.string.send_failure_subject));
builder.setContentTitle(context.getString(R.string.send_failure_subject));
builder.setContentText(getRootCauseMessage(lastFailure));
TaskStackBuilder stack = buildFolderListBackStack(context, account);
builder.setContentIntent(stack.getPendingIntent(0, 0));
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
configureNotification(builder, null, null, K9.NOTIFICATION_LED_FAILURE_COLOR,
K9.NOTIFICATION_LED_BLINK_FAST, true);
notifMgr.notify(K9.SEND_FAILED_NOTIFICATION - account.getAccountNumber(),
builder.build());
}
/**
* Display an ongoing notification while checking for new messages on the server.
*
* @param account
* The account that is checked for new messages. Never {@code null}.
* @param folder
* The folder that is being checked for new messages. Never {@code null}.
*/
private void notifyFetchingMail(final Account account, final Folder folder) {
if (!account.isShowOngoing()) {
return;
}
final NotificationManager notifMgr =
(NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
NotificationCompat.Builder builder = new NotificationCompat.Builder(context);
builder.setSmallIcon(R.drawable.ic_notify_check_mail);
builder.setWhen(System.currentTimeMillis());
builder.setOngoing(true);
builder.setTicker(context.getString(
R.string.notification_bg_sync_ticker, account.getDescription(), folder.getName()));
builder.setContentTitle(context.getString(R.string.notification_bg_sync_title));
builder.setContentText(account.getDescription() +
context.getString(R.string.notification_bg_title_separator) +
folder.getName());
TaskStackBuilder stack = buildMessageListBackStack(context, account,
account.getInboxFolderName());
builder.setContentIntent(stack.getPendingIntent(0, 0));
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
if (K9.NOTIFICATION_LED_WHILE_SYNCING) {
configureNotification(builder, null, null,
account.getNotificationSetting().getLedColor(),
K9.NOTIFICATION_LED_BLINK_FAST, true);
}
notifMgr.notify(K9.FETCHING_EMAIL_NOTIFICATION - account.getAccountNumber(),
builder.build());
}
private void notifyFetchingMailCancel(final Account account) {
if (account.isShowOngoing()) {
cancelNotification(K9.FETCHING_EMAIL_NOTIFICATION - account.getAccountNumber());
}
}
public boolean messagesPendingSend(final Account account) {
Folder localFolder = null;
try {
localFolder = account.getLocalStore().getFolder(
account.getOutboxFolderName());
if (!localFolder.exists()) {
return false;
}
localFolder.open(Folder.OPEN_MODE_RW);
if (localFolder.getMessageCount() > 0) {
return true;
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Exception while checking for unsent messages", e);
} finally {
closeFolder(localFolder);
}
return false;
}
/**
* Attempt to send any messages that are sitting in the Outbox.
* @param account
*/
public void sendPendingMessagesSynchronous(final Account account) {
Folder localFolder = null;
Exception lastFailure = null;
try {
Store localStore = account.getLocalStore();
localFolder = localStore.getFolder(
account.getOutboxFolderName());
if (!localFolder.exists()) {
return;
}
for (MessagingListener l : getListeners()) {
l.sendPendingMessagesStarted(account);
}
localFolder.open(Folder.OPEN_MODE_RW);
List<? extends Message> localMessages = localFolder.getMessages(null);
int progress = 0;
int todo = localMessages.size();
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, account.getSentFolderName(), progress, todo);
}
/*
* The profile we will use to pull all of the content
* for a given local message into memory for sending.
*/
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
fp.add(FetchProfile.Item.BODY);
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Scanning folder '" + account.getOutboxFolderName() + "' (" + ((LocalFolder)localFolder).getId() + ") for messages to send");
Transport transport = Transport.getInstance(K9.app, account);
for (Message message : localMessages) {
if (message.isSet(Flag.DELETED)) {
message.destroy();
continue;
}
try {
AtomicInteger count = new AtomicInteger(0);
AtomicInteger oldCount = sendCount.putIfAbsent(message.getUid(), count);
if (oldCount != null) {
count = oldCount;
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Send count for message " + message.getUid() + " is " + count.get());
if (count.incrementAndGet() > K9.MAX_SEND_ATTEMPTS) {
Log.e(K9.LOG_TAG, "Send count for message " + message.getUid() + " can't be delivered after " + K9.MAX_SEND_ATTEMPTS + " attempts. Giving up until the user restarts the device");
notifySendTempFailed(account, new MessagingException(message.getSubject()));
continue;
}
localFolder.fetch(Collections.singletonList(message), fp, null);
try {
if (message.getHeader(K9.IDENTITY_HEADER) != null) {
Log.v(K9.LOG_TAG, "The user has set the Outbox and Drafts folder to the same thing. " +
"This message appears to be a draft, so K-9 will not send it");
continue;
}
message.setFlag(Flag.X_SEND_IN_PROGRESS, true);
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Sending message with UID " + message.getUid());
transport.sendMessage(message);
message.setFlag(Flag.X_SEND_IN_PROGRESS, false);
message.setFlag(Flag.SEEN, true);
progress++;
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxProgress(account, account.getSentFolderName(), progress, todo);
}
if (!account.hasSentFolder()) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Account does not have a sent mail folder; deleting sent message");
message.setFlag(Flag.DELETED, true);
} else {
LocalFolder localSentFolder = (LocalFolder) localStore.getFolder(account.getSentFolderName());
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Moving sent message to folder '" + account.getSentFolderName() + "' (" + localSentFolder.getId() + ") ");
localFolder.moveMessages(Collections.singletonList(message), localSentFolder);
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Moved sent message to folder '" + account.getSentFolderName() + "' (" + localSentFolder.getId() + ") ");
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_APPEND;
command.arguments = new String[] { localSentFolder.getName(), message.getUid() };
queuePendingCommand(account, command);
processPendingCommands(account);
}
} catch (Exception e) {
// 5.x.x errors from the SMTP server are "PERMFAIL"
// move the message over to drafts rather than leaving it in the outbox
// This is a complete hack, but is worlds better than the previous
// "don't even bother" functionality
if (getRootCauseMessage(e).startsWith("5")) {
localFolder.moveMessages(Collections.singletonList(message), (LocalFolder) localStore.getFolder(account.getDraftsFolderName()));
}
notifyUserIfCertificateProblem(context, e, account, false);
addErrorMessage(account, "Failed to send message", e);
message.setFlag(Flag.X_SEND_FAILED, true);
Log.e(K9.LOG_TAG, "Failed to send message", e);
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxFailed(account, localFolder.getName(), getRootCauseMessage(e));
}
lastFailure = e;
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to fetch message for sending", e);
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxFailed(account, localFolder.getName(), getRootCauseMessage(e));
}
addErrorMessage(account, "Failed to fetch message for sending", e);
lastFailure = e;
}
}
for (MessagingListener l : getListeners()) {
l.sendPendingMessagesCompleted(account);
}
if (lastFailure != null) {
if (getRootCauseMessage(lastFailure).startsWith("5")) {
notifySendPermFailed(account, lastFailure);
} else {
notifySendTempFailed(account, lastFailure);
}
}
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to send pending messages because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (Exception e) {
for (MessagingListener l : getListeners()) {
l.sendPendingMessagesFailed(account);
}
addErrorMessage(account, null, e);
} finally {
if (lastFailure == null) {
cancelNotification(K9.SEND_FAILED_NOTIFICATION - account.getAccountNumber());
}
closeFolder(localFolder);
}
}
public void getAccountStats(final Context context, final Account account,
final MessagingListener listener) {
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
AccountStats stats = account.getStats(context);
listener.accountStatusChanged(account, stats);
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Count not get unread count for account " +
account.getDescription(), me);
}
}
});
}
public void getSearchAccountStats(final SearchAccount searchAccount,
final MessagingListener listener) {
threadPool.execute(new Runnable() {
@Override
public void run() {
getSearchAccountStatsSynchronous(searchAccount, listener);
}
});
}
public AccountStats getSearchAccountStatsSynchronous(final SearchAccount searchAccount,
final MessagingListener listener) {
Preferences preferences = Preferences.getPreferences(context);
LocalSearch search = searchAccount.getRelatedSearch();
// Collect accounts that belong to the search
String[] accountUuids = search.getAccountUuids();
List<Account> accounts;
if (search.searchAllAccounts()) {
accounts = preferences.getAccounts();
} else {
accounts = new ArrayList<Account>(accountUuids.length);
for (int i = 0, len = accountUuids.length; i < len; i++) {
String accountUuid = accountUuids[i];
accounts.set(i, preferences.getAccount(accountUuid));
}
}
ContentResolver cr = context.getContentResolver();
int unreadMessageCount = 0;
int flaggedMessageCount = 0;
String[] projection = {
StatsColumns.UNREAD_COUNT,
StatsColumns.FLAGGED_COUNT
};
for (Account account : accounts) {
StringBuilder query = new StringBuilder();
List<String> queryArgs = new ArrayList<String>();
ConditionsTreeNode conditions = search.getConditions();
SqlQueryBuilder.buildWhereClause(account, conditions, query, queryArgs);
String selection = query.toString();
String[] selectionArgs = queryArgs.toArray(EMPTY_STRING_ARRAY);
Uri uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI,
"account/" + account.getUuid() + "/stats");
// Query content provider to get the account stats
Cursor cursor = cr.query(uri, projection, selection, selectionArgs, null);
try {
if (cursor.moveToFirst()) {
unreadMessageCount += cursor.getInt(0);
flaggedMessageCount += cursor.getInt(1);
}
} finally {
cursor.close();
}
}
// Create AccountStats instance...
AccountStats stats = new AccountStats();
stats.unreadMessageCount = unreadMessageCount;
stats.flaggedMessageCount = flaggedMessageCount;
// ...and notify the listener
if (listener != null) {
listener.accountStatusChanged(searchAccount, stats);
}
return stats;
}
public void getFolderUnreadMessageCount(final Account account, final String folderName,
final MessagingListener l) {
Runnable unreadRunnable = new Runnable() {
@Override
public void run() {
int unreadMessageCount = 0;
try {
Folder localFolder = account.getLocalStore().getFolder(folderName);
unreadMessageCount = localFolder.getUnreadMessageCount();
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Count not get unread count for account " + account.getDescription(), me);
}
l.folderStatusChanged(account, folderName, unreadMessageCount);
}
};
put("getFolderUnread:" + account.getDescription() + ":" + folderName, l, unreadRunnable);
}
public boolean isMoveCapable(Message message) {
return !message.getUid().startsWith(K9.LOCAL_UID_PREFIX);
}
public boolean isCopyCapable(Message message) {
return isMoveCapable(message);
}
public boolean isMoveCapable(final Account account) {
try {
Store localStore = account.getLocalStore();
Store remoteStore = account.getRemoteStore();
return localStore.isMoveCapable() && remoteStore.isMoveCapable();
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Exception while ascertaining move capability", me);
return false;
}
}
public boolean isCopyCapable(final Account account) {
try {
Store localStore = account.getLocalStore();
Store remoteStore = account.getRemoteStore();
return localStore.isCopyCapable() && remoteStore.isCopyCapable();
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Exception while ascertaining copy capability", me);
return false;
}
}
public void moveMessages(final Account account, final String srcFolder,
final List<LocalMessage> messages, final String destFolder,
final MessagingListener listener) {
suppressMessages(account, messages);
putBackground("moveMessages", null, new Runnable() {
@Override
public void run() {
moveOrCopyMessageSynchronous(account, srcFolder, messages, destFolder, false,
listener);
}
});
}
public void moveMessagesInThread(final Account account, final String srcFolder,
final List<LocalMessage> messages, final String destFolder) {
suppressMessages(account, messages);
putBackground("moveMessagesInThread", null, new Runnable() {
@Override
public void run() {
try {
List<Message> messagesInThreads = collectMessagesInThreads(account, messages);
moveOrCopyMessageSynchronous(account, srcFolder, messagesInThreads, destFolder,
false, null);
} catch (MessagingException e) {
addErrorMessage(account, "Exception while moving messages", e);
}
}
});
}
public void moveMessage(final Account account, final String srcFolder, final LocalMessage message,
final String destFolder, final MessagingListener listener) {
moveMessages(account, srcFolder, Collections.singletonList(message), destFolder, listener);
}
public void copyMessages(final Account account, final String srcFolder,
final List<? extends Message> messages, final String destFolder,
final MessagingListener listener) {
putBackground("copyMessages", null, new Runnable() {
@Override
public void run() {
moveOrCopyMessageSynchronous(account, srcFolder, messages, destFolder, true,
listener);
}
});
}
public void copyMessagesInThread(final Account account, final String srcFolder,
final List<? extends Message> messages, final String destFolder) {
putBackground("copyMessagesInThread", null, new Runnable() {
@Override
public void run() {
try {
List<Message> messagesInThreads = collectMessagesInThreads(account, messages);
moveOrCopyMessageSynchronous(account, srcFolder, messagesInThreads, destFolder,
true, null);
} catch (MessagingException e) {
addErrorMessage(account, "Exception while copying messages", e);
}
}
});
}
public void copyMessage(final Account account, final String srcFolder, final Message message,
final String destFolder, final MessagingListener listener) {
copyMessages(account, srcFolder, Collections.singletonList(message), destFolder, listener);
}
private void moveOrCopyMessageSynchronous(final Account account, final String srcFolder,
final List<? extends Message> inMessages, final String destFolder, final boolean isCopy,
MessagingListener listener) {
try {
Map<String, String> uidMap = new HashMap<String, String>();
Store localStore = account.getLocalStore();
Store remoteStore = account.getRemoteStore();
if (!isCopy && (!remoteStore.isMoveCapable() || !localStore.isMoveCapable())) {
return;
}
if (isCopy && (!remoteStore.isCopyCapable() || !localStore.isCopyCapable())) {
return;
}
Folder localSrcFolder = localStore.getFolder(srcFolder);
Folder localDestFolder = localStore.getFolder(destFolder);
boolean unreadCountAffected = false;
List<String> uids = new LinkedList<String>();
for (Message message : inMessages) {
String uid = message.getUid();
if (!uid.startsWith(K9.LOCAL_UID_PREFIX)) {
uids.add(uid);
}
if (!unreadCountAffected && !message.isSet(Flag.SEEN)) {
unreadCountAffected = true;
}
}
List<? extends Message> messages = localSrcFolder.getMessages(uids.toArray(EMPTY_STRING_ARRAY), null);
if (messages.size() > 0) {
Map<String, Message> origUidMap = new HashMap<String, Message>();
for (Message message : messages) {
origUidMap.put(message.getUid(), message);
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "moveOrCopyMessageSynchronous: source folder = " + srcFolder
+ ", " + messages.size() + " messages, " + ", destination folder = " + destFolder + ", isCopy = " + isCopy);
if (isCopy) {
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
fp.add(FetchProfile.Item.BODY);
localSrcFolder.fetch(messages, fp, null);
uidMap = localSrcFolder.copyMessages(messages, localDestFolder);
if (unreadCountAffected) {
// If this copy operation changes the unread count in the destination
// folder, notify the listeners.
int unreadMessageCount = localDestFolder.getUnreadMessageCount();
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, destFolder, unreadMessageCount);
}
}
} else {
uidMap = localSrcFolder.moveMessages(messages, localDestFolder);
for (Map.Entry<String, Message> entry : origUidMap.entrySet()) {
String origUid = entry.getKey();
Message message = entry.getValue();
for (MessagingListener l : getListeners()) {
l.messageUidChanged(account, srcFolder, origUid, message.getUid());
}
}
unsuppressMessages(account, messages);
if (unreadCountAffected) {
// If this move operation changes the unread count, notify the listeners
// that the unread count changed in both the source and destination folder.
int unreadMessageCountSrc = localSrcFolder.getUnreadMessageCount();
int unreadMessageCountDest = localDestFolder.getUnreadMessageCount();
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, srcFolder, unreadMessageCountSrc);
l.folderStatusChanged(account, destFolder, unreadMessageCountDest);
}
}
}
queueMoveOrCopy(account, srcFolder, destFolder, isCopy, origUidMap.keySet().toArray(EMPTY_STRING_ARRAY), uidMap);
}
processPendingCommands(account);
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to move/copy message because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (MessagingException me) {
addErrorMessage(account, null, me);
throw new RuntimeException("Error moving message", me);
}
}
public void expunge(final Account account, final String folder, final MessagingListener listener) {
putBackground("expunge", null, new Runnable() {
@Override
public void run() {
queueExpunge(account, folder);
}
});
}
public void deleteDraft(final Account account, long id) {
LocalFolder localFolder = null;
try {
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(account.getDraftsFolderName());
localFolder.open(Folder.OPEN_MODE_RW);
String uid = localFolder.getMessageUidById(id);
if (uid != null) {
LocalMessage message = localFolder.getMessage(uid);
if (message != null) {
deleteMessages(Collections.singletonList(message), null);
}
}
} catch (MessagingException me) {
addErrorMessage(account, null, me);
} finally {
closeFolder(localFolder);
}
}
public void deleteThreads(final List<LocalMessage> messages) {
actOnMessages(messages, new MessageActor() {
@Override
public void act(final Account account, final Folder folder,
final List<Message> accountMessages) {
suppressMessages(account, messages);
putBackground("deleteThreads", null, new Runnable() {
@Override
public void run() {
deleteThreadsSynchronous(account, folder.getName(), accountMessages);
}
});
}
});
}
public void deleteThreadsSynchronous(Account account, String folderName,
List<Message> messages) {
try {
List<Message> messagesToDelete = collectMessagesInThreads(account, messages);
deleteMessagesSynchronous(account, folderName,
messagesToDelete, null);
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Something went wrong while deleting threads", e);
}
}
public List<Message> collectMessagesInThreads(Account account, List<? extends Message> messages)
throws MessagingException {
LocalStore localStore = account.getLocalStore();
List<Message> messagesInThreads = new ArrayList<Message>();
for (Message message : messages) {
LocalMessage localMessage = (LocalMessage) message;
long rootId = localMessage.getRootId();
long threadId = (rootId == -1) ? localMessage.getThreadId() : rootId;
List<? extends Message> messagesInThread = localStore.getMessagesInThread(threadId);
messagesInThreads.addAll(messagesInThread);
}
return messagesInThreads;
}
public void deleteMessages(final List<LocalMessage> messages, final MessagingListener listener) {
actOnMessages(messages, new MessageActor() {
@Override
public void act(final Account account, final Folder folder,
final List<Message> accountMessages) {
suppressMessages(account, messages);
putBackground("deleteMessages", null, new Runnable() {
@Override
public void run() {
deleteMessagesSynchronous(account, folder.getName(),
accountMessages, listener);
}
});
}
});
}
private void deleteMessagesSynchronous(final Account account, final String folder, final List<? extends Message> messages,
MessagingListener listener) {
Folder localFolder = null;
Folder localTrashFolder = null;
String[] uids = getUidsFromMessages(messages);
try {
//We need to make these callbacks before moving the messages to the trash
//as messages get a new UID after being moved
for (Message message : messages) {
for (MessagingListener l : getListeners(listener)) {
l.messageDeleted(account, folder, message);
}
}
Store localStore = account.getLocalStore();
localFolder = localStore.getFolder(folder);
Map<String, String> uidMap = null;
if (folder.equals(account.getTrashFolderName()) || !account.hasTrashFolder()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Deleting messages in trash folder or trash set to -None-, not copying");
localFolder.setFlags(messages, Collections.singleton(Flag.DELETED), true);
} else {
localTrashFolder = localStore.getFolder(account.getTrashFolderName());
if (!localTrashFolder.exists()) {
localTrashFolder.create(Folder.FolderType.HOLDS_MESSAGES);
}
if (localTrashFolder.exists()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Deleting messages in normal folder, moving");
uidMap = localFolder.moveMessages(messages, localTrashFolder);
}
}
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, folder, localFolder.getUnreadMessageCount());
if (localTrashFolder != null) {
l.folderStatusChanged(account, account.getTrashFolderName(), localTrashFolder.getUnreadMessageCount());
}
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Delete policy for account " + account.getDescription() + " is " + account.getDeletePolicy());
if (folder.equals(account.getOutboxFolderName())) {
for (Message message : messages) {
// If the message was in the Outbox, then it has been copied to local Trash, and has
// to be copied to remote trash
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_APPEND;
command.arguments =
new String[] {
account.getTrashFolderName(),
message.getUid()
};
queuePendingCommand(account, command);
}
processPendingCommands(account);
} else if (account.getDeletePolicy() == DeletePolicy.ON_DELETE) {
if (folder.equals(account.getTrashFolderName())) {
queueSetFlag(account, folder, Boolean.toString(true), Flag.DELETED.toString(), uids);
} else {
queueMoveOrCopy(account, folder, account.getTrashFolderName(), false, uids, uidMap);
}
processPendingCommands(account);
} else if (account.getDeletePolicy() == DeletePolicy.MARK_AS_READ) {
queueSetFlag(account, folder, Boolean.toString(true), Flag.SEEN.toString(), uids);
processPendingCommands(account);
} else {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Delete policy " + account.getDeletePolicy() + " prevents delete from server");
}
unsuppressMessages(account, messages);
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to delete message because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (MessagingException me) {
addErrorMessage(account, null, me);
throw new RuntimeException("Error deleting message from local store.", me);
} finally {
closeFolder(localFolder);
closeFolder(localTrashFolder);
}
}
private String[] getUidsFromMessages(List <? extends Message> messages) {
String[] uids = new String[messages.size()];
for (int i = 0; i < messages.size(); i++) {
uids[i] = messages.get(i).getUid();
}
return uids;
}
private void processPendingEmptyTrash(PendingCommand command, Account account) throws MessagingException {
Store remoteStore = account.getRemoteStore();
Folder remoteFolder = remoteStore.getFolder(account.getTrashFolderName());
try {
if (remoteFolder.exists()) {
remoteFolder.open(Folder.OPEN_MODE_RW);
remoteFolder.setFlags(Collections.singleton(Flag.DELETED), true);
if (Expunge.EXPUNGE_IMMEDIATELY == account.getExpungePolicy()) {
remoteFolder.expunge();
}
// When we empty trash, we need to actually synchronize the folder
// or local deletes will never get cleaned up
synchronizeFolder(account, remoteFolder, true, 0, null);
compact(account, null);
}
} finally {
closeFolder(remoteFolder);
}
}
public void emptyTrash(final Account account, MessagingListener listener) {
putBackground("emptyTrash", listener, new Runnable() {
@Override
public void run() {
LocalFolder localFolder = null;
try {
Store localStore = account.getLocalStore();
localFolder = (LocalFolder) localStore.getFolder(account.getTrashFolderName());
localFolder.open(Folder.OPEN_MODE_RW);
boolean isTrashLocalOnly = isTrashLocalOnly(account);
if (isTrashLocalOnly) {
localFolder.clearAllMessages();
} else {
localFolder.setFlags(Collections.singleton(Flag.DELETED), true);
}
for (MessagingListener l : getListeners()) {
l.emptyTrashCompleted(account);
}
if (!isTrashLocalOnly) {
List<String> args = new ArrayList<String>();
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_EMPTY_TRASH;
command.arguments = args.toArray(EMPTY_STRING_ARRAY);
queuePendingCommand(account, command);
processPendingCommands(account);
}
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to empty trash because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "emptyTrash failed", e);
addErrorMessage(account, null, e);
} finally {
closeFolder(localFolder);
}
}
});
}
/**
* Find out whether the account type only supports a local Trash folder.
*
* <p>Note: Currently this is only the case for POP3 accounts.</p>
*
* @param account
* The account to check.
*
* @return {@code true} if the account only has a local Trash folder that is not synchronized
* with a folder on the server. {@code false} otherwise.
*
* @throws MessagingException
* In case of an error.
*/
private boolean isTrashLocalOnly(Account account) throws MessagingException {
// TODO: Get rid of the tight coupling once we properly support local folders
return (account.getRemoteStore() instanceof Pop3Store);
}
public void sendAlternate(final Context context, Account account, Message message) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "About to load message " + account.getDescription() + ":" + message.getFolder().getName()
+ ":" + message.getUid() + " for sendAlternate");
loadMessageForView(account, message.getFolder().getName(),
message.getUid(), new MessagingListener() {
@Override
public void loadMessageForViewBodyAvailable(Account account, String folder, String uid,
Message message) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got message " + account.getDescription() + ":" + folder
+ ":" + message.getUid() + " for sendAlternate");
try {
Intent msg = new Intent(Intent.ACTION_SEND);
String quotedText = null;
Part part = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (part == null) {
part = MimeUtility.findFirstPartByMimeType(message, "text/html");
}
if (part != null) {
quotedText = MessageExtractor.getTextFromPart(part);
}
if (quotedText != null) {
msg.putExtra(Intent.EXTRA_TEXT, quotedText);
}
msg.putExtra(Intent.EXTRA_SUBJECT, message.getSubject());
Address[] from = message.getFrom();
String[] senders = new String[from.length];
for (int i = 0; i < from.length; i++) {
senders[i] = from[i].toString();
}
msg.putExtra(Intents.Share.EXTRA_FROM, senders);
Address[] to = message.getRecipients(RecipientType.TO);
String[] recipientsTo = new String[to.length];
for (int i = 0; i < to.length; i++) {
recipientsTo[i] = to[i].toString();
}
msg.putExtra(Intent.EXTRA_EMAIL, recipientsTo);
Address[] cc = message.getRecipients(RecipientType.CC);
String[] recipientsCc = new String[cc.length];
for (int i = 0; i < cc.length; i++) {
recipientsCc[i] = cc[i].toString();
}
msg.putExtra(Intent.EXTRA_CC, recipientsCc);
msg.setType("text/plain");
context.startActivity(Intent.createChooser(msg, context.getString(R.string.send_alternate_chooser_title)));
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Unable to send email through alternate program", me);
}
}
});
}
/**
* Checks mail for one or multiple accounts. If account is null all accounts
* are checked.
*
* @param context
* @param account
* @param listener
*/
public void checkMail(final Context context, final Account account,
final boolean ignoreLastCheckedTime,
final boolean useManualWakeLock,
final MessagingListener listener) {
TracingWakeLock twakeLock = null;
if (useManualWakeLock) {
TracingPowerManager pm = TracingPowerManager.getPowerManager(context);
twakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "K9 MessagingController.checkMail");
twakeLock.setReferenceCounted(false);
twakeLock.acquire(K9.MANUAL_WAKE_LOCK_TIMEOUT);
}
final TracingWakeLock wakeLock = twakeLock;
for (MessagingListener l : getListeners()) {
l.checkMailStarted(context, account);
}
putBackground("checkMail", listener, new Runnable() {
@Override
public void run() {
try {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Starting mail check");
Preferences prefs = Preferences.getPreferences(context);
Collection<Account> accounts;
if (account != null) {
accounts = new ArrayList<Account>(1);
accounts.add(account);
} else {
accounts = prefs.getAvailableAccounts();
}
for (final Account account : accounts) {
checkMailForAccount(context, account, ignoreLastCheckedTime, prefs, listener);
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to synchronize mail", e);
addErrorMessage(account, null, e);
}
putBackground("finalize sync", null, new Runnable() {
@Override
public void run() {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Finished mail sync");
if (wakeLock != null) {
wakeLock.release();
}
for (MessagingListener l : getListeners()) {
l.checkMailFinished(context, account);
}
}
}
);
}
});
}
private void checkMailForAccount(final Context context, final Account account,
final boolean ignoreLastCheckedTime,
final Preferences prefs,
final MessagingListener listener) {
if (!account.isAvailable(context)) {
if (K9.DEBUG) {
Log.i(K9.LOG_TAG, "Skipping synchronizing unavailable account " + account.getDescription());
}
return;
}
final long accountInterval = account.getAutomaticCheckIntervalMinutes() * 60 * 1000;
if (!ignoreLastCheckedTime && accountInterval <= 0) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Skipping synchronizing account " + account.getDescription());
return;
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Synchronizing account " + account.getDescription());
account.setRingNotified(false);
sendPendingMessages(account, listener);
try {
Account.FolderMode aDisplayMode = account.getFolderDisplayMode();
Account.FolderMode aSyncMode = account.getFolderSyncMode();
Store localStore = account.getLocalStore();
for (final Folder folder : localStore.getPersonalNamespaces(false)) {
folder.open(Folder.OPEN_MODE_RW);
Folder.FolderClass fDisplayClass = folder.getDisplayClass();
Folder.FolderClass fSyncClass = folder.getSyncClass();
if (modeMismatch(aDisplayMode, fDisplayClass)) {
// Never sync a folder that isn't displayed
/*
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName() +
" which is in display mode " + fDisplayClass + " while account is in display mode " + aDisplayMode);
*/
continue;
}
if (modeMismatch(aSyncMode, fSyncClass)) {
// Do not sync folders in the wrong class
/*
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName() +
" which is in sync mode " + fSyncClass + " while account is in sync mode " + aSyncMode);
*/
continue;
}
synchronizeFolder(account, folder, ignoreLastCheckedTime, accountInterval, listener);
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to synchronize account " + account.getName(), e);
addErrorMessage(account, null, e);
} finally {
putBackground("clear notification flag for " + account.getDescription(), null, new Runnable() {
@Override
public void run() {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Clearing notification flag for " + account.getDescription());
account.setRingNotified(false);
try {
AccountStats stats = account.getStats(context);
if (stats == null || stats.unreadMessageCount == 0) {
notifyAccountCancel(context, account);
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to getUnreadMessageCount for account: " + account, e);
}
}
}
);
}
}
private void synchronizeFolder(
final Account account,
final Folder folder,
final boolean ignoreLastCheckedTime,
final long accountInterval,
final MessagingListener listener) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Folder " + folder.getName() + " was last synced @ " +
new Date(folder.getLastChecked()));
if (!ignoreLastCheckedTime && folder.getLastChecked() >
(System.currentTimeMillis() - accountInterval)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not syncing folder " + folder.getName()
+ ", previously synced @ " + new Date(folder.getLastChecked())
+ " which would be too recent for the account period");
return;
}
putBackground("sync" + folder.getName(), null, new Runnable() {
@Override
public void run() {
LocalFolder tLocalFolder = null;
try {
// In case multiple Commands get enqueued, don't run more than
// once
final LocalStore localStore = account.getLocalStore();
tLocalFolder = localStore.getFolder(folder.getName());
tLocalFolder.open(Folder.OPEN_MODE_RW);
if (!ignoreLastCheckedTime && tLocalFolder.getLastChecked() >
(System.currentTimeMillis() - accountInterval)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not running Command for folder " + folder.getName()
+ ", previously synced @ " + new Date(folder.getLastChecked())
+ " which would be too recent for the account period");
return;
}
notifyFetchingMail(account, folder);
try {
synchronizeMailboxSynchronous(account, folder.getName(), listener, null);
} finally {
notifyFetchingMailCancel(account);
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Exception while processing folder " +
account.getDescription() + ":" + folder.getName(), e);
addErrorMessage(account, null, e);
} finally {
closeFolder(tLocalFolder);
}
}
}
);
}
public void compact(final Account account, final MessagingListener ml) {
putBackground("compact:" + account.getDescription(), ml, new Runnable() {
@Override
public void run() {
try {
LocalStore localStore = account.getLocalStore();
long oldSize = localStore.getSize();
localStore.compact();
long newSize = localStore.getSize();
for (MessagingListener l : getListeners(ml)) {
l.accountSizeChanged(account, oldSize, newSize);
}
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to compact account because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to compact account " + account.getDescription(), e);
}
}
});
}
public void clear(final Account account, final MessagingListener ml) {
putBackground("clear:" + account.getDescription(), ml, new Runnable() {
@Override
public void run() {
try {
LocalStore localStore = account.getLocalStore();
long oldSize = localStore.getSize();
localStore.clear();
localStore.resetVisibleLimits(account.getDisplayCount());
long newSize = localStore.getSize();
AccountStats stats = new AccountStats();
stats.size = newSize;
stats.unreadMessageCount = 0;
stats.flaggedMessageCount = 0;
for (MessagingListener l : getListeners(ml)) {
l.accountSizeChanged(account, oldSize, newSize);
l.accountStatusChanged(account, stats);
}
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to clear account because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to clear account " + account.getDescription(), e);
}
}
});
}
public void recreate(final Account account, final MessagingListener ml) {
putBackground("recreate:" + account.getDescription(), ml, new Runnable() {
@Override
public void run() {
try {
LocalStore localStore = account.getLocalStore();
long oldSize = localStore.getSize();
localStore.recreate();
localStore.resetVisibleLimits(account.getDisplayCount());
long newSize = localStore.getSize();
AccountStats stats = new AccountStats();
stats.size = newSize;
stats.unreadMessageCount = 0;
stats.flaggedMessageCount = 0;
for (MessagingListener l : getListeners(ml)) {
l.accountSizeChanged(account, oldSize, newSize);
l.accountStatusChanged(account, stats);
}
} catch (UnavailableStorageException e) {
Log.i(K9.LOG_TAG, "Failed to recreate an account because storage is not available - trying again later.");
throw new UnavailableAccountException(e);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to recreate account " + account.getDescription(), e);
}
}
});
}
private boolean shouldNotifyForMessage(Account account, LocalFolder localFolder, Message message) {
// If we don't even have an account name, don't show the notification.
// (This happens during initial account setup)
if (account.getName() == null) {
return false;
}
// Do not notify if the user does not have notifications enabled or if the message has
// been read.
if (!account.isNotifyNewMail() || message.isSet(Flag.SEEN)) {
return false;
}
Account.FolderMode aDisplayMode = account.getFolderDisplayMode();
Account.FolderMode aNotifyMode = account.getFolderNotifyNewMailMode();
Folder.FolderClass fDisplayClass = localFolder.getDisplayClass();
Folder.FolderClass fNotifyClass = localFolder.getNotifyClass();
if (modeMismatch(aDisplayMode, fDisplayClass)) {
// Never notify a folder that isn't displayed
return false;
}
if (modeMismatch(aNotifyMode, fNotifyClass)) {
// Do not notify folders in the wrong class
return false;
}
// If the account is a POP3 account and the message is older than the oldest message we've
// previously seen, then don't notify about it.
if (account.getStoreUri().startsWith("pop3") &&
message.olderThan(new Date(account.getLatestOldMessageSeenTime()))) {
return false;
}
// No notification for new messages in Trash, Drafts, Spam or Sent folder.
// But do notify if it's the INBOX (see issue 1817).
Folder folder = message.getFolder();
if (folder != null) {
String folderName = folder.getName();
if (!account.getInboxFolderName().equals(folderName) &&
(account.getTrashFolderName().equals(folderName)
|| account.getDraftsFolderName().equals(folderName)
|| account.getSpamFolderName().equals(folderName)
|| account.getSentFolderName().equals(folderName))) {
return false;
}
}
if (message.getUid() != null && localFolder.getLastUid() != null) {
try {
Integer messageUid = Integer.parseInt(message.getUid());
if (messageUid <= localFolder.getLastUid()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Message uid is " + messageUid + ", max message uid is " +
localFolder.getLastUid() + ". Skipping notification.");
return false;
}
} catch (NumberFormatException e) {
// Nothing to be done here.
}
}
// Don't notify if the sender address matches one of our identities and the user chose not
// to be notified for such messages.
if (account.isAnIdentity(message.getFrom()) && !account.isNotifySelfNewMail()) {
return false;
}
return true;
}
/**
* Get the pending notification data for an account.
* See {@link NotificationData}.
*
* @param account The account to retrieve the pending data for
* @param previousUnreadMessageCount The number of currently pending messages, which will be used
* if there's no pending data yet. If passed as null, a new instance
* won't be created if currently not existent.
* @return A pending data instance, or null if one doesn't exist and
* previousUnreadMessageCount was passed as null.
*/
private NotificationData getNotificationData(Account account, Integer previousUnreadMessageCount) {
NotificationData data;
synchronized (notificationData) {
data = notificationData.get(account.getAccountNumber());
if (data == null && previousUnreadMessageCount != null) {
data = new NotificationData(previousUnreadMessageCount);
notificationData.put(account.getAccountNumber(), data);
}
}
return data;
}
private CharSequence getMessageSender(Context context, Account account, Message message) {
try {
boolean isSelf = false;
final Contacts contacts = K9.showContactName() ? Contacts.getInstance(context) : null;
final Address[] fromAddrs = message.getFrom();
if (fromAddrs != null) {
isSelf = account.isAnIdentity(fromAddrs);
if (!isSelf && fromAddrs.length > 0) {
return MessageHelper.toFriendly(fromAddrs[0], contacts).toString();
}
}
if (isSelf) {
// show To: if the message was sent from me
Address[] rcpts = message.getRecipients(Message.RecipientType.TO);
if (rcpts != null && rcpts.length > 0) {
return context.getString(R.string.message_to_fmt,
MessageHelper.toFriendly(rcpts[0], contacts).toString());
}
return context.getString(R.string.general_no_sender);
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to get sender information for notification.", e);
}
return null;
}
private CharSequence getMessageSubject(Context context, Message message) {
String subject = message.getSubject();
if (!TextUtils.isEmpty(subject)) {
return subject;
}
return context.getString(R.string.general_no_subject);
}
private static TextAppearanceSpan sEmphasizedSpan;
private TextAppearanceSpan getEmphasizedSpan(Context context) {
if (sEmphasizedSpan == null) {
sEmphasizedSpan = new TextAppearanceSpan(context,
R.style.TextAppearance_StatusBar_EventContent_Emphasized);
}
return sEmphasizedSpan;
}
private CharSequence getMessagePreview(Context context, Message message) {
CharSequence subject = getMessageSubject(context, message);
String snippet = message.getPreview();
if (TextUtils.isEmpty(subject)) {
return snippet;
} else if (TextUtils.isEmpty(snippet)) {
return subject;
}
SpannableStringBuilder preview = new SpannableStringBuilder();
preview.append(subject);
preview.append('\n');
preview.append(snippet);
preview.setSpan(getEmphasizedSpan(context), 0, subject.length(), 0);
return preview;
}
private CharSequence buildMessageSummary(Context context, CharSequence sender, CharSequence subject) {
if (sender == null) {
return subject;
}
SpannableStringBuilder summary = new SpannableStringBuilder();
summary.append(sender);
summary.append(" ");
summary.append(subject);
summary.setSpan(getEmphasizedSpan(context), 0, sender.length(), 0);
return summary;
}
public static final boolean platformSupportsExtendedNotifications() {
// supported in Jellybean
// TODO: use constant once target SDK is set to >= 16
return Build.VERSION.SDK_INT >= 16;
}
public static boolean platformSupportsLockScreenNotifications() {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
}
private LocalMessage findNewestMessageForNotificationLocked(Context context, NotificationData data) {
if (!data.messages.isEmpty()) {
return data.messages.getFirst();
}
if (!data.droppedMessages.isEmpty()) {
return data.droppedMessages.getFirst().restoreToLocalMessage(context);
}
return null;
}
/**
* Creates a notification of a newly received message.
*/
private void notifyAccount(Context context, Account account,
LocalMessage message, int previousUnreadMessageCount) {
final NotificationData data = getNotificationData(account, previousUnreadMessageCount);
synchronized (data) {
notifyAccountWithDataLocked(context, account, message, data);
}
}
// Maximum number of senders to display in a lock screen notification.
private static final int NUM_SENDERS_IN_LOCK_SCREEN_NOTIFICATION = 5;
private void notifyAccountWithDataLocked(Context context, Account account,
LocalMessage message, NotificationData data) {
boolean updateSilently = false;
if (message == null) {
/* this can happen if a message we previously notified for is read or deleted remotely */
message = findNewestMessageForNotificationLocked(context, data);
updateSilently = true;
if (message == null) {
// seemingly both the message list as well as the overflow list is empty;
// it probably is a good idea to cancel the notification in that case
notifyAccountCancel(context, account);
return;
}
} else {
data.addMessage(message);
}
final KeyguardManager keyguardService = (KeyguardManager) context.getSystemService(Context.KEYGUARD_SERVICE);
final CharSequence sender = getMessageSender(context, account, message);
final CharSequence subject = getMessageSubject(context, message);
CharSequence summary = buildMessageSummary(context, sender, subject);
boolean privacyModeEnabled =
(K9.getNotificationHideSubject() == NotificationHideSubject.ALWAYS) ||
(K9.getNotificationHideSubject() == NotificationHideSubject.WHEN_LOCKED &&
keyguardService.inKeyguardRestrictedInputMode());
if (privacyModeEnabled || summary.length() == 0) {
summary = context.getString(R.string.notification_new_title);
}
NotificationManager notifMgr =
(NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
NotificationCompat.Builder builder = new NotificationCompat.Builder(context);
builder.setSmallIcon(R.drawable.ic_notify_new_mail);
builder.setWhen(System.currentTimeMillis());
if (!updateSilently) {
builder.setTicker(summary);
}
final int newMessages = data.getNewMessageCount();
final int unreadCount = data.unreadBeforeNotification + newMessages;
builder.setNumber(unreadCount);
String accountDescr = (account.getDescription() != null) ?
account.getDescription() : account.getEmail();
final ArrayList<MessageReference> allRefs = new ArrayList<MessageReference>();
data.supplyAllMessageRefs(allRefs);
if (platformSupportsExtendedNotifications() && !privacyModeEnabled) {
if (newMessages > 1) {
// multiple messages pending, show inbox style
NotificationCompat.InboxStyle style = new NotificationCompat.InboxStyle(builder);
for (Message m : data.messages) {
style.addLine(buildMessageSummary(context,
getMessageSender(context, account, m),
getMessageSubject(context, m)));
}
if (!data.droppedMessages.isEmpty()) {
style.setSummaryText(context.getString(R.string.notification_additional_messages,
data.droppedMessages.size(), accountDescr));
}
final String title = context.getResources().getQuantityString(
R.plurals.notification_new_messages_title, newMessages, newMessages);
style.setBigContentTitle(title);
builder.setContentTitle(title);
builder.setSubText(accountDescr);
builder.setStyle(style);
} else {
// single message pending, show big text
NotificationCompat.BigTextStyle style = new NotificationCompat.BigTextStyle(builder);
CharSequence preview = getMessagePreview(context, message);
if (preview != null) {
style.bigText(preview);
}
builder.setContentText(subject);
builder.setSubText(accountDescr);
builder.setContentTitle(sender);
builder.setStyle(style);
builder.addAction(
platformSupportsLockScreenNotifications()
? R.drawable.ic_action_single_message_options_dark_vector
: R.drawable.ic_action_single_message_options_dark,
context.getString(R.string.notification_action_reply),
NotificationActionService.getReplyIntent(context, account, message.makeMessageReference()));
}
// Mark Read on phone
builder.addAction(
platformSupportsLockScreenNotifications()
? R.drawable.ic_action_mark_as_read_dark_vector
: R.drawable.ic_action_mark_as_read_dark,
context.getString(R.string.notification_action_mark_as_read),
NotificationActionService.getReadAllMessagesIntent(context, account, allRefs));
NotificationQuickDelete deleteOption = K9.getNotificationQuickDeleteBehaviour();
boolean showDeleteAction = deleteOption == NotificationQuickDelete.ALWAYS ||
(deleteOption == NotificationQuickDelete.FOR_SINGLE_MSG && newMessages == 1);
NotificationCompat.WearableExtender wearableExtender = new NotificationCompat.WearableExtender();
if (showDeleteAction) {
// we need to pass the action directly to the activity, otherwise the
// status bar won't be pulled up and we won't see the confirmation (if used)
// Delete on phone
builder.addAction(
platformSupportsLockScreenNotifications()
? R.drawable.ic_action_delete_dark_vector
: R.drawable.ic_action_delete_dark,
context.getString(R.string.notification_action_delete),
NotificationDeleteConfirmation.getIntent(context, account, allRefs));
// Delete on wear only if no confirmation is required
if (!K9.confirmDeleteFromNotification()) {
NotificationCompat.Action wearActionDelete =
new NotificationCompat.Action.Builder(
R.drawable.ic_action_delete_dark,
context.getString(R.string.notification_action_delete),
NotificationDeleteConfirmation.getIntent(context, account, allRefs))
.build();
builder.extend(wearableExtender.addAction(wearActionDelete));
}
}
if (NotificationActionService.isArchiveAllMessagesWearAvaliable(context, account, data.messages)) {
// Archive on wear
NotificationCompat.Action wearActionArchive =
new NotificationCompat.Action.Builder(
R.drawable.ic_action_delete_dark,
context.getString(R.string.notification_action_archive),
NotificationActionService.getArchiveAllMessagesIntent(context, account, allRefs))
.build();
builder.extend(wearableExtender.addAction(wearActionArchive));
}
if (NotificationActionService.isSpamAllMessagesWearAvaliable(context, account, data.messages)) {
// Archive on wear
NotificationCompat.Action wearActionSpam =
new NotificationCompat.Action.Builder(
R.drawable.ic_action_delete_dark,
context.getString(R.string.notification_action_spam),
NotificationActionService.getSpamAllMessagesIntent(context, account, allRefs))
.build();
builder.extend(wearableExtender.addAction(wearActionSpam));
}
} else {
String accountNotice = context.getString(R.string.notification_new_one_account_fmt,
unreadCount, accountDescr);
builder.setContentTitle(accountNotice);
builder.setContentText(summary);
}
for (Message m : data.messages) {
if (m.isSet(Flag.FLAGGED)) {
builder.setPriority(NotificationCompat.PRIORITY_HIGH);
break;
}
}
TaskStackBuilder stack;
boolean treatAsSingleMessageNotification;
if (platformSupportsExtendedNotifications()) {
// in the new-style notifications, we focus on the new messages, not the unread ones
treatAsSingleMessageNotification = newMessages == 1;
} else {
// in the old-style notifications, we focus on unread messages, as we don't have a
// good way to express the new message count
treatAsSingleMessageNotification = unreadCount == 1;
}
if (treatAsSingleMessageNotification) {
stack = buildMessageViewBackStack(context, message.makeMessageReference());
} else if (account.goToUnreadMessageSearch()) {
stack = buildUnreadBackStack(context, account);
} else {
String initialFolder = message.getFolder().getName();
/* only go to folder if all messages are in the same folder, else go to folder list */
for (MessageReference ref : allRefs) {
if (!TextUtils.equals(initialFolder, ref.getFolderName())) {
initialFolder = null;
break;
}
}
stack = buildMessageListBackStack(context, account, initialFolder);
}
builder.setContentIntent(stack.getPendingIntent(
account.getAccountNumber(),
PendingIntent.FLAG_CANCEL_CURRENT | PendingIntent.FLAG_ONE_SHOT));
builder.setDeleteIntent(NotificationActionService.getAcknowledgeIntent(context, account));
// Only ring or vibrate if we have not done so already on this account and fetch
boolean ringAndVibrate = false;
if (!updateSilently && !account.isRingNotified()) {
account.setRingNotified(true);
ringAndVibrate = true;
}
NotificationSetting n = account.getNotificationSetting();
configureLockScreenNotification(builder, context, account, newMessages, unreadCount, accountDescr, sender, data.messages);
configureNotification(
builder,
(n.shouldRing()) ? n.getRingtone() : null,
(n.shouldVibrate()) ? n.getVibration() : null,
(n.isLed()) ? Integer.valueOf(n.getLedColor()) : null,
K9.NOTIFICATION_LED_BLINK_SLOW,
ringAndVibrate);
notifMgr.notify(account.getAccountNumber(), builder.build());
}
private TaskStackBuilder buildAccountsBackStack(Context context) {
TaskStackBuilder stack = TaskStackBuilder.create(context);
if (!skipAccountsInBackStack(context)) {
stack.addNextIntent(new Intent(context, Accounts.class).putExtra(Accounts.EXTRA_STARTUP, false));
}
return stack;
}
private TaskStackBuilder buildFolderListBackStack(Context context, Account account) {
TaskStackBuilder stack = buildAccountsBackStack(context);
stack.addNextIntent(FolderList.actionHandleAccountIntent(context, account, false));
return stack;
}
private TaskStackBuilder buildUnreadBackStack(Context context, final Account account) {
TaskStackBuilder stack = buildAccountsBackStack(context);
LocalSearch search = Accounts.createUnreadSearch(context, account);
stack.addNextIntent(MessageList.intentDisplaySearch(context, search, true, false, false));
return stack;
}
private TaskStackBuilder buildMessageListBackStack(Context context, Account account, String folder) {
TaskStackBuilder stack = skipFolderListInBackStack(context, account, folder)
? buildAccountsBackStack(context)
: buildFolderListBackStack(context, account);
if (folder != null) {
LocalSearch search = new LocalSearch(folder);
search.addAllowedFolder(folder);
search.addAccountUuid(account.getUuid());
stack.addNextIntent(MessageList.intentDisplaySearch(context, search, false, true, true));
}
return stack;
}
private TaskStackBuilder buildMessageViewBackStack(Context context, MessageReference message) {
Account account = Preferences.getPreferences(context).getAccount(message.getAccountUuid());
TaskStackBuilder stack = buildMessageListBackStack(context, account, message.getFolderName());
stack.addNextIntent(MessageList.actionDisplayMessageIntent(context, message));
return stack;
}
private boolean skipFolderListInBackStack(Context context, Account account, String folder) {
return folder != null && folder.equals(account.getAutoExpandFolderName());
}
private boolean skipAccountsInBackStack(Context context) {
return Preferences.getPreferences(context).getAccounts().size() == 1;
}
/**
* Configure the notification sound and LED
*
* @param builder
* {@link NotificationCompat.Builder} instance used to configure the notification.
* Never {@code null}.
* @param ringtone
* String name of ringtone. {@code null}, if no ringtone should be played.
* @param vibrationPattern
* {@code long[]} vibration pattern. {@code null}, if no vibration should be played.
* @param ledColor
* Color to flash LED. {@code null}, if no LED flash should happen.
* @param ledSpeed
* Either {@link K9#NOTIFICATION_LED_BLINK_SLOW} or
* {@link K9#NOTIFICATION_LED_BLINK_FAST}.
* @param ringAndVibrate
* {@code true}, if ringtone/vibration are allowed. {@code false}, otherwise.
*/
private void configureNotification(NotificationCompat.Builder builder, String ringtone,
long[] vibrationPattern, Integer ledColor, int ledSpeed, boolean ringAndVibrate) {
// if it's quiet time, then we shouldn't be ringing, buzzing or flashing
if (K9.isQuietTime()) {
return;
}
if (ringAndVibrate) {
if (ringtone != null && !TextUtils.isEmpty(ringtone)) {
builder.setSound(Uri.parse(ringtone));
}
if (vibrationPattern != null) {
builder.setVibrate(vibrationPattern);
}
}
if (ledColor != null) {
int ledOnMS;
int ledOffMS;
if (ledSpeed == K9.NOTIFICATION_LED_BLINK_SLOW) {
ledOnMS = K9.NOTIFICATION_LED_ON_TIME;
ledOffMS = K9.NOTIFICATION_LED_OFF_TIME;
} else {
ledOnMS = K9.NOTIFICATION_LED_FAST_ON_TIME;
ledOffMS = K9.NOTIFICATION_LED_FAST_OFF_TIME;
}
builder.setLights(ledColor, ledOnMS, ledOffMS);
}
}
/**
* Configure lock screen notifications on platforms that support it
*
* @param builder Unlocked notification
* @param context Context
* @param account Account being notified
* @param newMessages Number of new messages being notified for
* @param unreadCount Total number of unread messages in this account
* @param accountDescription Formatted account name for display
* @param formattedSender Formatted sender name for display
* @param messages List of messages if notifying for multiple messages. Null otherwise.
*/
private void configureLockScreenNotification(NotificationCompat.Builder builder,
Context context,
Account account,
int newMessages,
int unreadCount,
CharSequence accountDescription,
CharSequence formattedSender,
List<? extends Message> messages) {
if (!platformSupportsLockScreenNotifications()) {
return;
}
builder.setSmallIcon(R.drawable.ic_notify_new_mail_vector);
builder.setColor(account.getChipColor());
NotificationCompat.Builder publicNotification = new NotificationCompat.Builder(context);
publicNotification.setSmallIcon(R.drawable.ic_notify_new_mail_vector);
publicNotification.setColor(account.getChipColor());
publicNotification.setNumber(unreadCount);
final String title = context.getResources().getQuantityString(
R.plurals.notification_new_messages_title, newMessages, newMessages);
publicNotification.setContentTitle(title);
switch (K9.getLockScreenNotificationVisibility()) {
case NOTHING:
builder.setVisibility(NotificationCompat.VISIBILITY_SECRET);
break;
case APP_NAME:
// This is the Android default, but we should be explicit in case that changes in the future.
builder.setVisibility(NotificationCompat.VISIBILITY_PRIVATE);
break;
case SENDERS:
if (newMessages == 1) {
publicNotification.setContentText(formattedSender);
} else {
// Use a LinkedHashSet so that we preserve ordering (newest to oldest), but still remove duplicates
Set<CharSequence> senders = new LinkedHashSet<CharSequence>(NUM_SENDERS_IN_LOCK_SCREEN_NOTIFICATION);
for (Message message : messages) {
senders.add(getMessageSender(context, account, message));
if (senders.size() == NUM_SENDERS_IN_LOCK_SCREEN_NOTIFICATION) {
break;
}
}
publicNotification.setContentText(TextUtils.join(", ", senders));
}
builder.setPublicVersion(publicNotification.build());
break;
case EVERYTHING:
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
break;
case MESSAGE_COUNT:
default:
publicNotification.setContentText(accountDescription);
builder.setPublicVersion(publicNotification.build());
break;
}
}
/** Cancel a notification of new email messages */
public void notifyAccountCancel(Context context, Account account) {
NotificationManager notifMgr =
(NotificationManager)context.getSystemService(Context.NOTIFICATION_SERVICE);
notifMgr.cancel(account.getAccountNumber());
notifMgr.cancel(-1000 - account.getAccountNumber());
notificationData.remove(account.getAccountNumber());
}
public void deleteAccount(Context context, Account account) {
notifyAccountCancel(context, account);
memorizingListener.removeAccount(account);
}
/**
* Save a draft message.
* @param account Account we are saving for.
* @param message Message to save.
* @return Message representing the entry in the local store.
*/
public Message saveDraft(final Account account, final Message message, long existingDraftId) {
Message localMessage = null;
try {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolder(account.getDraftsFolderName());
localFolder.open(Folder.OPEN_MODE_RW);
if (existingDraftId != INVALID_MESSAGE_ID) {
String uid = localFolder.getMessageUidById(existingDraftId);
message.setUid(uid);
}
// Save the message to the store.
localFolder.appendMessages(Collections.singletonList(message));
// Fetch the message back from the store. This is the Message that's returned to the caller.
localMessage = localFolder.getMessage(message.getUid());
localMessage.setFlag(Flag.X_DOWNLOADED_FULL, true);
PendingCommand command = new PendingCommand();
command.command = PENDING_COMMAND_APPEND;
command.arguments = new String[] {
localFolder.getName(),
localMessage.getUid()
};
queuePendingCommand(account, command);
processPendingCommands(account);
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to save message as draft.", e);
addErrorMessage(account, null, e);
}
return localMessage;
}
public long getId(Message message) {
long id;
if (message instanceof LocalMessage) {
id = ((LocalMessage) message).getId();
} else {
Log.w(K9.LOG_TAG, "MessagingController.getId() called without a LocalMessage");
id = INVALID_MESSAGE_ID;
}
return id;
}
public boolean modeMismatch(Account.FolderMode aMode, Folder.FolderClass fMode) {
if (aMode == Account.FolderMode.NONE
|| (aMode == Account.FolderMode.FIRST_CLASS &&
fMode != Folder.FolderClass.FIRST_CLASS)
|| (aMode == Account.FolderMode.FIRST_AND_SECOND_CLASS &&
fMode != Folder.FolderClass.FIRST_CLASS &&
fMode != Folder.FolderClass.SECOND_CLASS)
|| (aMode == Account.FolderMode.NOT_SECOND_CLASS &&
fMode == Folder.FolderClass.SECOND_CLASS)) {
return true;
} else {
return false;
}
}
static AtomicInteger sequencing = new AtomicInteger(0);
static class Command implements Comparable<Command> {
public Runnable runnable;
public MessagingListener listener;
public String description;
boolean isForeground;
int sequence = sequencing.getAndIncrement();
@Override
public int compareTo(Command other) {
if (other.isForeground && !isForeground) {
return 1;
} else if (!other.isForeground && isForeground) {
return -1;
} else {
return (sequence - other.sequence);
}
}
}
public MessagingListener getCheckMailListener() {
return checkMailListener;
}
public void setCheckMailListener(MessagingListener checkMailListener) {
if (this.checkMailListener != null) {
removeListener(this.checkMailListener);
}
this.checkMailListener = checkMailListener;
if (this.checkMailListener != null) {
addListener(this.checkMailListener);
}
}
public Collection<Pusher> getPushers() {
return pushers.values();
}
public boolean setupPushing(final Account account) {
try {
Pusher previousPusher = pushers.remove(account);
if (previousPusher != null) {
previousPusher.stop();
}
Account.FolderMode aDisplayMode = account.getFolderDisplayMode();
Account.FolderMode aPushMode = account.getFolderPushMode();
List<String> names = new ArrayList<String>();
Store localStore = account.getLocalStore();
for (final Folder folder : localStore.getPersonalNamespaces(false)) {
if (folder.getName().equals(account.getErrorFolderName())
|| folder.getName().equals(account.getOutboxFolderName())) {
/*
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() +
" which should never be pushed");
*/
continue;
}
folder.open(Folder.OPEN_MODE_RW);
Folder.FolderClass fDisplayClass = folder.getDisplayClass();
Folder.FolderClass fPushClass = folder.getPushClass();
if (modeMismatch(aDisplayMode, fDisplayClass)) {
// Never push a folder that isn't displayed
/*
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() +
" which is in display class " + fDisplayClass + " while account is in display mode " + aDisplayMode);
*/
continue;
}
if (modeMismatch(aPushMode, fPushClass)) {
// Do not push folders in the wrong class
/*
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Not pushing folder " + folder.getName() +
" which is in push mode " + fPushClass + " while account is in push mode " + aPushMode);
*/
continue;
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Starting pusher for " + account.getDescription() + ":" + folder.getName());
names.add(folder.getName());
}
if (!names.isEmpty()) {
PushReceiver receiver = new MessagingControllerPushReceiver(context, account, this);
int maxPushFolders = account.getMaxPushFolders();
if (names.size() > maxPushFolders) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Count of folders to push for account " + account.getDescription() + " is " + names.size()
+ ", greater than limit of " + maxPushFolders + ", truncating");
names = names.subList(0, maxPushFolders);
}
try {
Store store = account.getRemoteStore();
if (!store.isPushCapable()) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Account " + account.getDescription() + " is not push capable, skipping");
return false;
}
Pusher pusher = store.getPusher(receiver);
if (pusher != null) {
Pusher oldPusher = pushers.putIfAbsent(account, pusher);
if (oldPusher == null) {
pusher.start(names);
}
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not get remote store", e);
return false;
}
return true;
} else {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "No folders are configured for pushing in account " + account.getDescription());
return false;
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Got exception while setting up pushing", e);
}
return false;
}
public void stopAllPushing() {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Stopping all pushers");
Iterator<Pusher> iter = pushers.values().iterator();
while (iter.hasNext()) {
Pusher pusher = iter.next();
iter.remove();
pusher.stop();
}
}
public void messagesArrived(final Account account, final Folder remoteFolder, final List<Message> messages, final boolean flagSyncOnly) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Got new pushed email messages for account " + account.getDescription()
+ ", folder " + remoteFolder.getName());
final CountDownLatch latch = new CountDownLatch(1);
putBackground("Push messageArrived of account " + account.getDescription()
+ ", folder " + remoteFolder.getName(), null, new Runnable() {
@Override
public void run() {
LocalFolder localFolder = null;
try {
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(remoteFolder.getName());
localFolder.open(Folder.OPEN_MODE_RW);
account.setRingNotified(false);
int newCount = downloadMessages(account, remoteFolder, localFolder, messages, flagSyncOnly);
int unreadMessageCount = localFolder.getUnreadMessageCount();
localFolder.setLastPush(System.currentTimeMillis());
localFolder.setStatus(null);
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "messagesArrived newCount = " + newCount + ", unread count = " + unreadMessageCount);
if (unreadMessageCount == 0) {
notifyAccountCancel(context, account);
}
for (MessagingListener l : getListeners()) {
l.folderStatusChanged(account, remoteFolder.getName(), unreadMessageCount);
}
} catch (Exception e) {
String rootMessage = getRootCauseMessage(e);
String errorMessage = "Push failed: " + rootMessage;
try {
// Oddly enough, using a local variable gets rid of a
// potential null pointer access warning with Eclipse.
LocalFolder folder = localFolder;
folder.setStatus(errorMessage);
} catch (Exception se) {
Log.e(K9.LOG_TAG, "Unable to set failed status on localFolder", se);
}
for (MessagingListener l : getListeners()) {
l.synchronizeMailboxFailed(account, remoteFolder.getName(), errorMessage);
}
addErrorMessage(account, null, e);
} finally {
closeFolder(localFolder);
latch.countDown();
}
}
});
try {
latch.await();
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Interrupted while awaiting latch release", e);
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "MessagingController.messagesArrivedLatch released");
}
public void systemStatusChanged() {
for (MessagingListener l : getListeners()) {
l.systemStatusChanged();
}
}
enum MemorizingState { STARTED, FINISHED, FAILED }
static class Memory {
Account account;
String folderName;
MemorizingState syncingState = null;
MemorizingState sendingState = null;
MemorizingState pushingState = null;
MemorizingState processingState = null;
String failureMessage = null;
int syncingTotalMessagesInMailbox;
int syncingNumNewMessages;
int folderCompleted = 0;
int folderTotal = 0;
String processingCommandTitle = null;
Memory(Account nAccount, String nFolderName) {
account = nAccount;
folderName = nFolderName;
}
String getKey() {
return getMemoryKey(account, folderName);
}
}
static String getMemoryKey(Account taccount, String tfolderName) {
return taccount.getDescription() + ":" + tfolderName;
}
static class MemorizingListener extends MessagingListener {
Map<String, Memory> memories = new HashMap<String, Memory>(31);
Memory getMemory(Account account, String folderName) {
Memory memory = memories.get(getMemoryKey(account, folderName));
if (memory == null) {
memory = new Memory(account, folderName);
memories.put(memory.getKey(), memory);
}
return memory;
}
synchronized void removeAccount(Account account) {
Iterator<Entry<String, Memory>> memIt = memories.entrySet().iterator();
while (memIt.hasNext()) {
Entry<String, Memory> memoryEntry = memIt.next();
String uuidForMemory = memoryEntry.getValue().account.getUuid();
if (uuidForMemory.equals(account.getUuid())) {
memIt.remove();
}
}
}
@Override
public synchronized void synchronizeMailboxStarted(Account account, String folder) {
Memory memory = getMemory(account, folder);
memory.syncingState = MemorizingState.STARTED;
memory.folderCompleted = 0;
memory.folderTotal = 0;
}
@Override
public synchronized void synchronizeMailboxFinished(Account account, String folder,
int totalMessagesInMailbox, int numNewMessages) {
Memory memory = getMemory(account, folder);
memory.syncingState = MemorizingState.FINISHED;
memory.syncingTotalMessagesInMailbox = totalMessagesInMailbox;
memory.syncingNumNewMessages = numNewMessages;
}
@Override
public synchronized void synchronizeMailboxFailed(Account account, String folder,
String message) {
Memory memory = getMemory(account, folder);
memory.syncingState = MemorizingState.FAILED;
memory.failureMessage = message;
}
synchronized void refreshOther(MessagingListener other) {
if (other != null) {
Memory syncStarted = null;
Memory sendStarted = null;
Memory processingStarted = null;
for (Memory memory : memories.values()) {
if (memory.syncingState != null) {
switch (memory.syncingState) {
case STARTED:
syncStarted = memory;
break;
case FINISHED:
other.synchronizeMailboxFinished(memory.account, memory.folderName,
memory.syncingTotalMessagesInMailbox, memory.syncingNumNewMessages);
break;
case FAILED:
other.synchronizeMailboxFailed(memory.account, memory.folderName,
memory.failureMessage);
break;
}
}
if (memory.sendingState != null) {
switch (memory.sendingState) {
case STARTED:
sendStarted = memory;
break;
case FINISHED:
other.sendPendingMessagesCompleted(memory.account);
break;
case FAILED:
other.sendPendingMessagesFailed(memory.account);
break;
}
}
if (memory.pushingState != null) {
switch (memory.pushingState) {
case STARTED:
other.setPushActive(memory.account, memory.folderName, true);
break;
case FINISHED:
other.setPushActive(memory.account, memory.folderName, false);
break;
case FAILED:
break;
}
}
if (memory.processingState != null) {
switch (memory.processingState) {
case STARTED:
processingStarted = memory;
break;
case FINISHED:
case FAILED:
other.pendingCommandsFinished(memory.account);
break;
}
}
}
Memory somethingStarted = null;
if (syncStarted != null) {
other.synchronizeMailboxStarted(syncStarted.account, syncStarted.folderName);
somethingStarted = syncStarted;
}
if (sendStarted != null) {
other.sendPendingMessagesStarted(sendStarted.account);
somethingStarted = sendStarted;
}
if (processingStarted != null) {
other.pendingCommandsProcessing(processingStarted.account);
if (processingStarted.processingCommandTitle != null) {
other.pendingCommandStarted(processingStarted.account, processingStarted.processingCommandTitle);
} else {
other.pendingCommandCompleted(processingStarted.account, processingStarted.processingCommandTitle);
}
somethingStarted = processingStarted;
}
if (somethingStarted != null && somethingStarted.folderTotal > 0) {
other.synchronizeMailboxProgress(somethingStarted.account, somethingStarted.folderName, somethingStarted.folderCompleted, somethingStarted.folderTotal);
}
}
}
@Override
public synchronized void setPushActive(Account account, String folderName, boolean active) {
Memory memory = getMemory(account, folderName);
memory.pushingState = (active ? MemorizingState.STARTED : MemorizingState.FINISHED);
}
@Override
public synchronized void sendPendingMessagesStarted(Account account) {
Memory memory = getMemory(account, null);
memory.sendingState = MemorizingState.STARTED;
memory.folderCompleted = 0;
memory.folderTotal = 0;
}
@Override
public synchronized void sendPendingMessagesCompleted(Account account) {
Memory memory = getMemory(account, null);
memory.sendingState = MemorizingState.FINISHED;
}
@Override
public synchronized void sendPendingMessagesFailed(Account account) {
Memory memory = getMemory(account, null);
memory.sendingState = MemorizingState.FAILED;
}
@Override
public synchronized void synchronizeMailboxProgress(Account account, String folderName, int completed, int total) {
Memory memory = getMemory(account, folderName);
memory.folderCompleted = completed;
memory.folderTotal = total;
}
@Override
public synchronized void pendingCommandsProcessing(Account account) {
Memory memory = getMemory(account, null);
memory.processingState = MemorizingState.STARTED;
memory.folderCompleted = 0;
memory.folderTotal = 0;
}
@Override
public synchronized void pendingCommandsFinished(Account account) {
Memory memory = getMemory(account, null);
memory.processingState = MemorizingState.FINISHED;
}
@Override
public synchronized void pendingCommandStarted(Account account, String commandTitle) {
Memory memory = getMemory(account, null);
memory.processingCommandTitle = commandTitle;
}
@Override
public synchronized void pendingCommandCompleted(Account account, String commandTitle) {
Memory memory = getMemory(account, null);
memory.processingCommandTitle = null;
}
}
private void actOnMessages(List<LocalMessage> messages, MessageActor actor) {
Map<Account, Map<Folder, List<Message>>> accountMap = new HashMap<Account, Map<Folder, List<Message>>>();
for (LocalMessage message : messages) {
if ( message == null) {
continue;
}
Folder folder = message.getFolder();
Account account = message.getAccount();
Map<Folder, List<Message>> folderMap = accountMap.get(account);
if (folderMap == null) {
folderMap = new HashMap<Folder, List<Message>>();
accountMap.put(account, folderMap);
}
List<Message> messageList = folderMap.get(folder);
if (messageList == null) {
messageList = new LinkedList<Message>();
folderMap.put(folder, messageList);
}
messageList.add(message);
}
for (Map.Entry<Account, Map<Folder, List<Message>>> entry : accountMap.entrySet()) {
Account account = entry.getKey();
//account.refresh(Preferences.getPreferences(K9.app));
Map<Folder, List<Message>> folderMap = entry.getValue();
for (Map.Entry<Folder, List<Message>> folderEntry : folderMap.entrySet()) {
Folder folder = folderEntry.getKey();
List<Message> messageList = folderEntry.getValue();
actor.act(account, folder, messageList);
}
}
}
interface MessageActor {
public void act(final Account account, final Folder folder, final List<Message> messages);
}
}
| 1 | 13,070 | You want to return if it's quiet time and notifications during quiet time are **not** enabled. So this is either a logic error or it's bad naming of the setting/field/method. | k9mail-k-9 | java |
@@ -28,14 +28,11 @@ Workshops::Application.configure do
config.log_level = :info
config.log_formatter = ::Logger::Formatter.new
- HOST = 'learn.thoughtbot.com'
- config.action_mailer.default_url_options = {host: HOST}
-
config.middleware.use Rack::SslEnforcer,
hsts: false,
except: %r{^/podcast},
strict: true,
- redirect_to: "https://#{HOST}"
+ redirect_to: "https://#{ENV['APP_DOMAIN']}"
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = MAIL_SETTINGS | 1 | require Rails.root.join('config/initializers/mail')
Workshops::Application.configure do
config.cache_classes = true
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
config.action_controller.asset_host = "//d3v2mfwlau8x6c.cloudfront.net"
config.assets.compile = false
config.assets.digest = true
config.assets.js_compressor = :uglifier
config.assets.precompile += %w( print.css prefilled_input.js )
# Serve static assets, which allows us to populate the CDN with compressed
# assets if a client supports them
config.serve_static_assets = true
# Fiddling with expires values is kind of pointless as we use hashing to bust
# caches during redeploys, but it should bump up our google pagespeed
# ranking.
config.static_cache_control = 'public, max-age=31536000'
config.eager_load = true
config.cache_store = :dalli_store
config.i18n.fallbacks = true
config.active_support.deprecation = :notify
config.log_level = :info
config.log_formatter = ::Logger::Formatter.new
HOST = 'learn.thoughtbot.com'
config.action_mailer.default_url_options = {host: HOST}
config.middleware.use Rack::SslEnforcer,
hsts: false,
except: %r{^/podcast},
strict: true,
redirect_to: "https://#{HOST}"
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = MAIL_SETTINGS
config.action_mailer.perform_deliveries = true
config.action_mailer.default(charset: "utf-8")
config.action_mailer.raise_delivery_errors = true
PAYPAL_USERNAME = ENV['PAYPAL_USERNAME']
PAYPAL_PASSWORD = ENV['PAYPAL_PASSWORD']
PAYPAL_SIGNATURE = ENV['PAYPAL_SIGNATURE']
PAPERCLIP_STORAGE_OPTIONS = {
storage: :s3,
s3_credentials: "#{Rails.root}/config/s3.yml",
s3_protocol: 'https'
}
GITHUB_KEY = ENV['GITHUB_KEY']
GITHUB_SECRET = ENV['GITHUB_SECRET']
config.middleware.use Rack::Cache,
verbose: true,
metastore: "memcached://#{ENV['MEMCACHE_SERVERS']}",
entitystore: "memcached://#{ENV['MEMCACHE_SERVERS']}"
config.middleware.insert_before Rack::Runtime, Sprockets::Redirect, manifest: Dir["#{Rails.root}/public/assets/manifest-*.json"].first
end
| 1 | 10,330 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -144,7 +144,12 @@ module Travis
def source_ssh?
return false if prefer_https?
repo_private? && !installation? or
- repo_private? && custom_ssh_key?
+ repo_private? && custom_ssh_key? or
+ force_private? && !installation?
+ end
+
+ def force_private?
+ source_host.exclude? 'github.com'
end
def source_host | 1 | require 'faraday'
require 'core_ext/hash/deep_merge'
require 'core_ext/hash/deep_symbolize_keys'
require 'travis/github_apps'
require 'travis/build/data/ssh_key'
# actually, the worker payload can be cleaned up a lot ...
module Travis
module Build
class Data
DEFAULTS = { }
DEFAULT_CACHES = {
bundler: false,
cocoapods: false,
composer: false,
ccache: false,
pip: false,
npm: true
}
attr_reader :data, :language_default_p
def initialize(data, defaults = {})
data = data.deep_symbolize_keys
defaults = defaults.deep_symbolize_keys
@language_default_p = data[:language_default_p]
@data = DEFAULTS.deep_merge(defaults.deep_merge(data))
end
def [](key)
data[key]
end
def key?(key)
data.key?(key)
end
def language
config[:language]
end
def group
config[:group]
end
def dist
config[:dist]
end
def urls
data[:urls] || {}
end
def config
data[:config]
end
def hosts
data[:hosts] || {}
end
def cache_options
data[:cache_settings] || data[:cache_options] || {}
end
def workspace
data[:workspace] || cache_options
end
def cache(input = config[:cache])
case input
when Hash then input
when Array then input.map { |e| cache(e) }.inject(:merge)
when String, Symbol then { input.to_sym => true }
when nil then {} # for ruby 1.9
when false then Hash[DEFAULT_CACHES.each_key.with_object(false).to_a]
else input.to_h
end
end
def cache?(type, default = DEFAULT_CACHES[type])
type &&= type.to_sym
!!cache.fetch(type) { default }
end
def env_vars
data[:env_vars] || []
end
def custom_ssh_key?
!!ssh_key&.custom?
end
def ssh_key?
!!ssh_key
end
def ssh_key
@ssh_key ||= if ssh_key = data[:ssh_key]
SshKey.new(ssh_key[:value], ssh_key[:source], ssh_key[:encoded])
elsif source_key = data[:config][:source_key]
SshKey.new(source_key, nil, true)
end
end
def pull_request?
!!pull_request
end
def pull_request
job[:pull_request]
end
def secure_env?
!!job[:secure_env_enabled]
end
def secure_env_removed?
!!job[:secure_env_removed]
end
def secrets
Array(data[:secrets])
end
def disable_sudo?
!!data[:paranoid]
end
def api_url
repository[:api_url]
end
def source_url
source_ssh? ? source_ssh_url : source_https_url
end
def source_https?
!source_ssh?
end
def source_ssh?
return false if prefer_https?
repo_private? && !installation? or
repo_private? && custom_ssh_key?
end
def source_host
repository[:source_host]
end
def source_ssh_url
"git@#{source_host}:#{slug}.git"
end
def source_https_url
"https://#{source_host}/#{slug}.git"
end
def slug
repository[:slug] || raise('data.slug must not be empty')
end
def github_id
repository[:vcs_id] || repository.fetch(:github_id)
end
def repo_private?
repository[:private]
end
def default_branch
repository[:default_branch]
end
def commit
job[:commit] || ''
end
def branch
job[:branch] || ''
end
def tag
job[:tag]
end
def ref
job[:ref]
end
def job
data[:job] || {}
end
def build
data[:source] || data[:build] || {} # TODO standarize the payload on :build
end
def repository
data[:repository] || {}
end
def token
installation? ? installation_token : data[:oauth_token]
end
def debug_options
job[:debug_options] || {}
end
def prefer_https?
data[:prefer_https]
end
def installation?
!!installation_id
end
def installation_id
repository[:installation_id]
end
def installation_token
GithubApps.new(installation_id).access_token
rescue RuntimeError => e
if e.message =~ /Failed to obtain token from GitHub/
raise Travis::Build::GithubAppsTokenFetchError.new
end
end
def workspaces
config[:workspaces]
end
end
end
end
| 1 | 17,489 | cool, here (L146 && L148) maybe (repo_private? || force_private?) && !installation? to prevent double call of installation? | travis-ci-travis-build | rb |
@@ -33,7 +33,7 @@ provide a working environment for development.`,
func appStart() {
app, err := platform.GetActiveApp("")
if err != nil {
- util.Failed("Failed to start: %s", err)
+ util.Failed("Failed to GetActiveApp err: %v", err)
}
fmt.Printf("Starting environment for %s...\n", app.GetName()) | 1 | package cmd
import (
"fmt"
"os"
"github.com/drud/ddev/pkg/plugins/platform"
"github.com/drud/ddev/pkg/util"
"github.com/spf13/cobra"
)
// StartCmd represents the add command
var StartCmd = &cobra.Command{
Use: "start",
Aliases: []string{"add"},
Short: "Start the local development environment for a site.",
Long: `Start initializes and configures the web server and database containers to
provide a working environment for development.`,
PreRun: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
err := cmd.Usage()
util.CheckErr(err)
os.Exit(0)
}
dockerNetworkPreRun()
},
Run: func(cmd *cobra.Command, args []string) {
appStart()
},
}
func appStart() {
app, err := platform.GetActiveApp("")
if err != nil {
util.Failed("Failed to start: %s", err)
}
fmt.Printf("Starting environment for %s...\n", app.GetName())
err = app.Start()
if err != nil {
util.Failed("Failed to start %s: %v", app.GetName(), err)
}
util.Success("Successfully started %s", app.GetName())
util.Success("Your application can be reached at: %s", app.URL())
}
func init() {
RootCmd.AddCommand(StartCmd)
}
| 1 | 11,845 | this error carries a bit more meaning for us, but less meaning for users. we also lose the context of what command produced the failure. I'd prefer to keep the original error message. If we need better identification of GetActiveApp errors, maybe we could address that in the error messages it returns? | drud-ddev | go |
@@ -97,7 +97,7 @@ const (
const sendRouteSubsInGoRoutineThreshold = 1024 * 1024 // 1MB
// Warning when user configures cluster TLS insecure
-const clusterTLSInsecureWarning = "TLS Hostname verification disabled, system will not verify identity of the solicited route"
+const clusterTLSInsecureWarning = "TLS certificate chain and hostname of solicited routes will not be verified, do not use in production!"
// Can be changed for tests
var routeConnectDelay = DEFAULT_ROUTE_CONNECT | 1 | // Copyright 2013-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
)
// RouteType designates the router type
type RouteType int
// Type of Route
const (
// This route we learned from speaking to other routes.
Implicit RouteType = iota
// This route was explicitly configured.
Explicit
)
const (
// RouteProtoZero is the original Route protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
RouteProtoZero = iota
// RouteProtoInfo signals a route can receive more then the original INFO block.
// This can be used to update remote cluster permissions, etc...
RouteProtoInfo
// RouteProtoV2 is the new route/cluster protocol that provides account support.
RouteProtoV2
)
// Include the space for the proto
var (
aSubBytes = []byte{'A', '+', ' '}
aUnsubBytes = []byte{'A', '-', ' '}
rSubBytes = []byte{'R', 'S', '+', ' '}
rUnsubBytes = []byte{'R', 'S', '-', ' '}
)
// Used by tests
var testRouteProto = RouteProtoV2
type route struct {
remoteID string
didSolicit bool
retry bool
routeType RouteType
url *url.URL
authRequired bool
tlsRequired bool
connectURLs []string
replySubs map[*subscription]*time.Timer
gatewayURL string
leafnodeURL string
}
type connectInfo struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Name string `json:"name"`
Gateway string `json:"gateway,omitempty"`
}
// Route protocol constants
const (
ConProto = "CONNECT %s" + _CRLF_
InfoProto = "INFO %s" + _CRLF_
)
// Used to decide if the sending of the route SUBs list should be
// done in place or in separate go routine.
const sendRouteSubsInGoRoutineThreshold = 1024 * 1024 // 1MB
// Warning when user configures cluster TLS insecure
const clusterTLSInsecureWarning = "TLS Hostname verification disabled, system will not verify identity of the solicited route"
// Can be changed for tests
var routeConnectDelay = DEFAULT_ROUTE_CONNECT
// This will add a timer to watch over remote reply subjects in case
// they fail to receive a response. The duration will be taken from the
// accounts map timeout to match.
// Lock should be held upon entering.
func (c *client) addReplySubTimeout(acc *Account, sub *subscription, d time.Duration) {
if c.route.replySubs == nil {
c.route.replySubs = make(map[*subscription]*time.Timer)
}
rs := c.route.replySubs
rs[sub] = time.AfterFunc(d, func() {
c.mu.Lock()
delete(rs, sub)
sub.max = 0
c.mu.Unlock()
c.unsubscribe(acc, sub, true)
})
}
// removeReplySub is called when we trip the max on remoteReply subs.
func (c *client) removeReplySub(sub *subscription) {
if sub == nil {
return
}
// Lookup the account based on sub.sid.
if i := bytes.Index(sub.sid, []byte(" ")); i > 0 {
// First part of SID for route is account name.
if acc, _ := c.srv.LookupAccount(string(sub.sid[:i])); acc != nil {
acc.sl.Remove(sub)
}
c.mu.Lock()
c.removeReplySubTimeout(sub)
delete(c.subs, string(sub.sid))
c.mu.Unlock()
}
}
// removeReplySubTimeout will remove a timer if it exists.
// Lock should be held upon entering.
func (c *client) removeReplySubTimeout(sub *subscription) {
// Remove any reply sub timer if it exists.
if c.route == nil || c.route.replySubs == nil {
return
}
if t, ok := c.route.replySubs[sub]; ok {
t.Stop()
delete(c.route.replySubs, sub)
}
}
func (c *client) processAccountSub(arg []byte) error {
c.traceInOp("A+", arg)
accName := string(arg)
if c.kind == GATEWAY {
return c.processGatewayAccountSub(accName)
}
return nil
}
func (c *client) processAccountUnsub(arg []byte) {
c.traceInOp("A-", arg)
accName := string(arg)
if c.kind == GATEWAY {
c.processGatewayAccountUnsub(accName)
}
}
// Process an inbound RMSG specification from the remote route.
func (c *client) processRoutedMsgArgs(trace bool, arg []byte) error {
if trace {
c.traceInOp("RMSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1, 2:
return fmt.Errorf("processRoutedMsgArgs Parse Error: '%s'", args)
case 3:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[2]
c.pa.queues = nil
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
// args[2] is our reply indicator. Should be + or | normally.
if len(args[2]) != 1 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
switch args[2][0] {
case '+':
c.pa.reply = args[3]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[4 : len(args)-1]
} else {
c.pa.queues = args[3 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.account = args[0]
c.pa.subject = args[1]
c.pa.pacache = arg[:len(args[0])+len(args[1])+1]
return nil
}
// processInboundRouteMsg is called to process an inbound msg from a route.
func (c *client) processInboundRoutedMsg(msg []byte) {
// Update statistics
c.in.msgs++
// The msg includes the CR_LF, so pull back out for accounting.
c.in.bytes += int32(len(msg) - LEN_CR_LF)
if c.trace {
c.traceMsg(msg)
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil {
return
}
acc, r := c.getAccAndResultFromCache()
if acc == nil {
c.Debugf("Unknown account %q for routed message on subject: %q", c.pa.account, c.pa.subject)
return
}
// Check to see if we need to map/route to another account.
if acc.imports.services != nil {
c.checkForImportServices(acc, msg)
}
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) == 0 {
return
}
// Check to see if we have a routed message with a service reply.
if isServiceReply(c.pa.reply) && acc != nil {
// Need to add a sub here for local interest to send a response back
// to the originating server/requestor where it will be re-mapped.
sid := make([]byte, 0, len(acc.Name)+len(c.pa.reply)+1)
sid = append(sid, acc.Name...)
sid = append(sid, ' ')
sid = append(sid, c.pa.reply...)
// Copy off the reply since otherwise we are referencing a buffer that will be reused.
reply := make([]byte, len(c.pa.reply))
copy(reply, c.pa.reply)
sub := &subscription{client: c, subject: reply, sid: sid, max: 1}
if err := acc.sl.Insert(sub); err != nil {
c.Errorf("Could not insert subscription: %v", err)
} else {
ttl := acc.AutoExpireTTL()
c.mu.Lock()
c.subs[string(sid)] = sub
c.addReplySubTimeout(acc, sub, ttl)
c.mu.Unlock()
}
}
c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, pmrNoFlag)
}
// Helper function for routes and gateways and leafnodes to create qfilters
// needed for converted subs from imports, etc.
func (c *client) makeQFilter(qsubs [][]*subscription) {
qs := make([][]byte, 0, len(qsubs))
for _, qsub := range qsubs {
if len(qsub) > 0 {
qs = append(qs, qsub[0].queue)
}
}
c.pa.queues = qs
}
// Lock should be held entering here.
func (c *client) sendRouteConnect(tlsRequired bool) {
var user, pass string
if userInfo := c.route.url.User; userInfo != nil {
user = userInfo.Username()
pass, _ = userInfo.Password()
}
cinfo := connectInfo{
Echo: true,
Verbose: false,
Pedantic: false,
User: user,
Pass: pass,
TLS: tlsRequired,
Name: c.srv.info.ID,
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
}
// Process the info message if we are a route.
func (c *client) processRouteInfo(info *Info) {
// We may need to update route permissions and will need the account
// sublist. Since getting the account requires server lock, do the
// lookup now.
// FIXME(dlc) - Add account scoping.
gacc := c.srv.globalAccount()
gacc.mu.RLock()
sl := gacc.sl
gacc.mu.RUnlock()
c.mu.Lock()
// Connection can be closed at any time (by auth timeout, etc).
// Does not make sense to continue here if connection is gone.
if c.route == nil || c.nc == nil {
c.mu.Unlock()
return
}
s := c.srv
remoteID := c.route.remoteID
// Check if this is an INFO for gateways...
if info.Gateway != "" {
c.mu.Unlock()
// If this server has no gateway configured, report error and return.
if !s.gateway.enabled {
// FIXME: Should this be a Fatalf()?
s.Errorf("Received information about gateway %q from %s, but gateway is not configured",
info.Gateway, remoteID)
return
}
s.processGatewayInfoFromRoute(info, remoteID, c)
return
}
// We receive an INFO from a server that informs us about another server,
// so the info.ID in the INFO protocol does not match the ID of this route.
if remoteID != "" && remoteID != info.ID {
c.mu.Unlock()
// Process this implicit route. We will check that it is not an explicit
// route and/or that it has not been connected already.
s.processImplicitRoute(info)
return
}
// Need to set this for the detection of the route to self to work
// in closeConnection().
c.route.remoteID = info.ID
// Get the route's proto version
c.opts.Protocol = info.Proto
// Detect route to self.
if c.route.remoteID == s.info.ID {
c.mu.Unlock()
c.closeConnection(DuplicateRoute)
return
}
// Copy over important information.
c.route.authRequired = info.AuthRequired
c.route.tlsRequired = info.TLSRequired
c.route.gatewayURL = info.GatewayURL
// When sent through route INFO, if the field is set, it should be of size 1.
if len(info.LeafNodeURLs) == 1 {
c.route.leafnodeURL = info.LeafNodeURLs[0]
}
// If this is an update due to config reload on the remote server,
// need to possibly send local subs to the remote server.
if c.flags.isSet(infoReceived) {
c.updateRemoteRoutePerms(sl, info)
c.mu.Unlock()
return
}
// Copy over permissions as well.
c.opts.Import = info.Import
c.opts.Export = info.Export
// If we do not know this route's URL, construct one on the fly
// from the information provided.
if c.route.url == nil {
// Add in the URL from host and port
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
if err != nil {
c.Errorf("Error parsing URL from INFO: %v\n", err)
c.mu.Unlock()
c.closeConnection(ParseError)
return
}
c.route.url = url
}
// Mark that the INFO protocol has been received. Will allow
// to detect INFO updates.
c.flags.set(infoReceived)
// Check to see if we have this remote already registered.
// This can happen when both servers have routes to each other.
c.mu.Unlock()
if added, sendInfo := s.addRoute(c, info); added {
c.Debugf("Registering remote route %q", info.ID)
// Send our subs to the other side.
s.sendSubsToRoute(c)
// Send info about the known gateways to this route.
s.sendGatewayConfigsToRoute(c)
// sendInfo will be false if the route that we just accepted
// is the only route there is.
if sendInfo {
// The incoming INFO from the route will have IP set
// if it has Cluster.Advertise. In that case, use that
// otherwise contruct it from the remote TCP address.
if info.IP == "" {
// Need to get the remote IP address.
c.mu.Lock()
switch conn := c.nc.(type) {
case *net.TCPConn, *tls.Conn:
addr := conn.RemoteAddr().(*net.TCPAddr)
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(),
strconv.Itoa(info.Port)))
default:
info.IP = c.route.url.String()
}
c.mu.Unlock()
}
// Now let the known servers know about this new route
s.forwardNewRouteInfoToKnownServers(info)
}
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !s.getOpts().Cluster.NoAdvertise {
s.addClientConnectURLsAndSendINFOToClients(info.ClientConnectURLs)
}
} else {
c.Debugf("Detected duplicate remote route %q", info.ID)
c.closeConnection(DuplicateRoute)
}
}
// Possibly sends local subscriptions interest to this route
// based on changes in the remote's Export permissions.
// Lock assumed held on entry
func (c *client) updateRemoteRoutePerms(sl *Sublist, info *Info) {
// Interested only on Export permissions for the remote server.
// Create "fake" clients that we will use to check permissions
// using the old permissions...
oldPerms := &RoutePermissions{Export: c.opts.Export}
oldPermsTester := &client{}
oldPermsTester.setRoutePermissions(oldPerms)
// and the new ones.
newPerms := &RoutePermissions{Export: info.Export}
newPermsTester := &client{}
newPermsTester.setRoutePermissions(newPerms)
c.opts.Import = info.Import
c.opts.Export = info.Export
var (
_localSubs [4096]*subscription
localSubs = _localSubs[:0]
)
sl.localSubs(&localSubs)
c.sendRouteSubProtos(localSubs, false, func(sub *subscription) bool {
subj := string(sub.subject)
// If the remote can now export but could not before, and this server can import this
// subject, then send SUB protocol.
if newPermsTester.canExport(subj) && !oldPermsTester.canExport(subj) && c.canImport(subj) {
return true
}
return false
})
}
// sendAsyncInfoToClients sends an INFO protocol to all
// connected clients that accept async INFO updates.
// The server lock is held on entry.
func (s *Server) sendAsyncInfoToClients() {
// If there are no clients supporting async INFO protocols, we are done.
// Also don't send if we are shutting down...
if s.cproto == 0 || s.shutdown {
return
}
for _, c := range s.clients {
c.mu.Lock()
// Here, we are going to send only to the clients that are fully
// registered (server has received CONNECT and first PING). For
// clients that are not at this stage, this will happen in the
// processing of the first PING (see client.processPing)
if c.opts.Protocol >= ClientProtoInfo && c.flags.isSet(firstPongSent) {
// sendInfo takes care of checking if the connection is still
// valid or not, so don't duplicate tests here.
c.sendInfo(c.generateClientInfoJSON(s.copyInfo()))
}
c.mu.Unlock()
}
}
// This will process implicit route information received from another server.
// We will check to see if we have configured or are already connected,
// and if so we will ignore. Otherwise we will attempt to connect.
func (s *Server) processImplicitRoute(info *Info) {
remoteID := info.ID
s.mu.Lock()
defer s.mu.Unlock()
// Don't connect to ourself
if remoteID == s.info.ID {
return
}
// Check if this route already exists
if _, exists := s.remotes[remoteID]; exists {
return
}
// Check if we have this route as a configured route
if s.hasThisRouteConfigured(info) {
return
}
// Initiate the connection, using info.IP instead of info.URL here...
r, err := url.Parse(info.IP)
if err != nil {
s.Errorf("Error parsing URL from INFO: %v\n", err)
return
}
// Snapshot server options.
opts := s.getOpts()
if info.AuthRequired {
r.User = url.UserPassword(opts.Cluster.Username, opts.Cluster.Password)
}
s.startGoRoutine(func() { s.connectToRoute(r, false, true) })
}
// hasThisRouteConfigured returns true if info.Host:info.Port is present
// in the server's opts.Routes, false otherwise.
// Server lock is assumed to be held by caller.
func (s *Server) hasThisRouteConfigured(info *Info) bool {
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
for _, ri := range s.getOpts().Routes {
if strings.ToLower(ri.Host) == urlToCheckExplicit {
return true
}
}
return false
}
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
// to all routes known by this server. In turn, each server will contact this
// new route.
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
s.mu.Lock()
defer s.mu.Unlock()
b, _ := json.Marshal(info)
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
for _, r := range s.routes {
r.mu.Lock()
if r.route.remoteID != info.ID {
r.sendInfo(infoJSON)
}
r.mu.Unlock()
}
}
// canImport is whether or not we will send a SUB for interest to the other side.
// This is for ROUTER connections only.
// Lock is held on entry.
func (c *client) canImport(subject string) bool {
// Use pubAllowed() since this checks Publish permissions which
// is what Import maps to.
return c.pubAllowed(subject)
}
// canExport is whether or not we will accept a SUB from the remote for a given subject.
// This is for ROUTER connections only.
// Lock is held on entry
func (c *client) canExport(subject string) bool {
// Use canSubscribe() since this checks Subscribe permissions which
// is what Export maps to.
return c.canSubscribe(subject)
}
// Initialize or reset cluster's permissions.
// This is for ROUTER connections only.
// Client lock is held on entry
func (c *client) setRoutePermissions(perms *RoutePermissions) {
// Reset if some were set
if perms == nil {
c.perms = nil
c.mperms = nil
return
}
// Convert route permissions to user permissions.
// The Import permission is mapped to Publish
// and Export permission is mapped to Subscribe.
// For meaning of Import/Export, see canImport and canExport.
p := &Permissions{
Publish: perms.Import,
Subscribe: perms.Export,
}
c.setPermissions(p)
}
// Type used to hold a list of subs on a per account basis.
type asubs struct {
acc *Account
subs []*subscription
}
// removeRemoteSubs will walk the subs and remove them from the appropriate account.
func (c *client) removeRemoteSubs() {
// We need to gather these on a per account basis.
// FIXME(dlc) - We should be smarter about this..
as := map[string]*asubs{}
c.mu.Lock()
srv := c.srv
subs := c.subs
c.subs = make(map[string]*subscription)
c.mu.Unlock()
for key, sub := range subs {
c.mu.Lock()
sub.max = 0
c.mu.Unlock()
// Grab the account
accountName := strings.Fields(key)[0]
ase := as[accountName]
if ase == nil {
acc, _ := srv.LookupAccount(accountName)
if acc == nil {
continue
}
as[accountName] = &asubs{acc: acc, subs: []*subscription{sub}}
} else {
ase.subs = append(ase.subs, sub)
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
}
// Now remove the subs by batch for each account sublist.
for _, ase := range as {
c.Debugf("Removing %d subscriptions for account %q", len(ase.subs), ase.acc.Name)
ase.acc.sl.RemoveBatch(ase.subs)
}
}
func (c *client) parseUnsubProto(arg []byte) (string, []byte, []byte, error) {
c.traceInOp("RS-", arg)
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
args := splitArg(arg)
var (
accountName string
subject []byte
queue []byte
)
switch len(args) {
case 2:
case 3:
queue = args[2]
default:
return "", nil, nil, fmt.Errorf("parse error: '%s'", arg)
}
subject = args[1]
accountName = string(args[0])
return accountName, subject, queue, nil
}
// Indicates no more interest in the given account/subject for the remote side.
func (c *client) processRemoteUnsub(arg []byte) (err error) {
srv := c.srv
if srv == nil {
return nil
}
accountName, subject, _, err := c.parseUnsubProto(arg)
if err != nil {
return fmt.Errorf("processRemoteUnsub %s", err.Error())
}
// Lookup the account
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
c.Debugf("Unknown account %q for subject %q", accountName, subject)
// Mark this account as not interested since we received a RS- and we
// do not have any record of it.
return nil
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// RS- will have the arg exactly as the key.
key := string(arg)
sub, ok := c.subs[key]
if ok {
delete(c.subs, key)
acc.sl.Remove(sub)
c.removeReplySubTimeout(sub)
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if updateGWs {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
if c.opts.Verbose {
c.sendOK()
}
return nil
}
func (c *client) processRemoteSub(argo []byte) (err error) {
c.traceInOp("RS+", argo)
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.queue = nil
case 4:
sub.queue = args[2]
sub.qw = int32(parseSize(args[3]))
default:
return fmt.Errorf("processRemoteSub Parse Error: '%s'", arg)
}
sub.subject = args[1]
// Lookup the account
// FIXME(dlc) - This may start having lots of contention?
accountName := string(args[0])
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
if !srv.NewAccountsAllowed() {
c.Debugf("Unknown account %q for subject %q", accountName, sub.subject)
return nil
}
acc, _ = srv.LookupOrRegisterAccount(accountName)
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.Debugf("Can not export %q, ignoring remote subscription request", sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// We store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[3])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
if osub == nil {
c.subs[key] = sub
// Now place into the account sl.
if err = acc.sl.Insert(sub); err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
c.mu.Unlock()
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
if c.opts.Verbose {
c.sendOK()
}
return nil
}
// sendSubsToRoute will send over our subject interest to
// the remote side. For each account we will send the
// complete interest for all subjects, both normal as a binary
// and queue group weights.
func (s *Server) sendSubsToRoute(route *client) {
s.mu.Lock()
// Estimated size of all protocols. It does not have to be accurate at all.
eSize := 0
// Send over our account subscriptions.
// copy accounts into array first
accs := make([]*Account, 0, 32)
s.accounts.Range(func(k, v interface{}) bool {
a := v.(*Account)
accs = append(accs, a)
a.mu.RLock()
// Proto looks like: "RS+ <account name> <subject>[ <queue weight>]\r\n"
// If we wanted to have better estimates (or even accurate), we would
// collect the subs here instead of capturing the accounts and then
// later going over each account.
eSize += len(a.rm) * (4 + len(a.Name) + 256)
a.mu.RUnlock()
return true
})
s.mu.Unlock()
sendSubs := func(accs []*Account) {
var raw [32]*subscription
var closed bool
route.mu.Lock()
for _, a := range accs {
subs := raw[:0]
a.mu.RLock()
c := a.randomClient()
if c == nil {
nsubs := len(a.rm)
accName := a.Name
a.mu.RUnlock()
if nsubs > 0 {
route.Warnf("Ignoring account %q with %d subs, no clients", accName, nsubs)
}
continue
}
for key, n := range a.rm {
// FIXME(dlc) - Just pass rme around.
// Construct a sub on the fly. We need to place
// a client (or im) to properly set the account.
var subj, qn []byte
s := strings.Split(key, " ")
subj = []byte(s[0])
if len(s) > 1 {
qn = []byte(s[1])
}
// TODO(dlc) - This code needs to change, but even if left alone could be more
// efficient with these tmp subs.
sub := &subscription{client: c, subject: subj, queue: qn, qw: n}
subs = append(subs, sub)
}
a.mu.RUnlock()
closed = route.sendRouteSubProtos(subs, false, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
if closed {
route.mu.Unlock()
return
}
}
route.mu.Unlock()
if !closed {
route.Debugf("Sent local subscriptions to route")
}
}
// Decide if we call above function in go routine or in place.
if eSize > sendRouteSubsInGoRoutineThreshold {
s.startGoRoutine(func() {
sendSubs(accs)
s.grWG.Done()
})
} else {
sendSubs(accs)
}
}
// Sends SUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, true, trace, filter)
}
// Sends UNSUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteUnSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, false, trace, filter)
}
// Low-level function that sends RS+ or RS- protocols for the given subscriptions.
// Use sendRouteSubProtos or sendRouteUnSubProtos instead for clarity.
// Lock is held on entry.
func (c *client) sendRouteSubOrUnSubProtos(subs []*subscription, isSubProto, trace bool, filter func(sub *subscription) bool) bool {
var (
_buf [1024]byte // array on stack
buf = _buf[:0] // our buffer will initially point to the stack buffer
mbs = maxBufSize * 2 // max size of the buffer
mpMax = int(c.out.mp / 2) // 50% of max_pending
closed bool
)
// We need to make sure that we stay below the user defined max pending bytes.
if mbs > mpMax {
mbs = mpMax
}
for _, sub := range subs {
if filter != nil && !filter(sub) {
continue
}
// Determine the account. If sub has an ImportMap entry, use that, otherwise scoped to
// client. Default to global if all else fails.
var accName string
if sub.client != nil && sub.client != c {
sub.client.mu.Lock()
}
if sub.im != nil {
accName = sub.im.acc.Name
} else if sub.client != nil && sub.client.acc != nil {
accName = sub.client.acc.Name
} else {
c.Debugf("Falling back to default account for sending subs")
accName = globalAccountName
}
if sub.client != nil && sub.client != c {
sub.client.mu.Unlock()
}
// Check if proto is going to fit.
curSize := len(buf)
// "RS+/- " + account + " " + subject + " " [+ queue + " " + weight] + CRLF
curSize += 4 + len(accName) + 1 + len(sub.subject) + 1 + 2
if len(sub.queue) > 0 {
curSize += len(sub.queue)
if isSubProto {
// Estimate weightlen in 1000s
curSize += 1 + 4
}
}
if curSize >= mbs {
if c.queueOutbound(buf) {
// Need to allocate new array
buf = make([]byte, 0, mbs)
} else {
// We can reuse previous buffer
buf = buf[:0]
}
// Update last activity because flushOutbound() will release
// the lock, which could cause pingTimer to think that this
// connection is stale otherwise.
c.last = time.Now()
c.flushOutbound()
if closed = c.flags.isSet(clearConnection); closed {
break
}
}
as := len(buf)
if isSubProto {
buf = append(buf, rSubBytes...)
} else {
buf = append(buf, rUnsubBytes...)
}
buf = append(buf, accName...)
buf = append(buf, ' ')
buf = append(buf, sub.subject...)
if len(sub.queue) > 0 {
buf = append(buf, ' ')
buf = append(buf, sub.queue...)
// Send our weight if we are a sub proto
if isSubProto {
buf = append(buf, ' ')
var b [12]byte
var i = len(b)
for l := sub.qw; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
buf = append(buf, b[i:]...)
}
}
if trace {
c.traceOutOp("", buf[as:])
}
buf = append(buf, CR_LF...)
}
if !closed && len(buf) > 0 {
c.queueOutbound(buf)
c.flushOutbound()
closed = c.flags.isSet(clearConnection)
}
return closed
}
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
// Snapshot server options.
opts := s.getOpts()
didSolicit := rURL != nil
r := &route{didSolicit: didSolicit}
for _, route := range opts.Routes {
if rURL != nil && (strings.EqualFold(rURL.Host, route.Host)) {
r.routeType = Explicit
}
}
c := &client{srv: s, nc: conn, opts: clientOpts{}, kind: ROUTER, msubs: -1, mpay: -1, route: r}
// Grab server variables
s.mu.Lock()
s.generateRouteInfoJSON()
infoJSON := s.routeInfoJSON
authRequired := s.routeInfo.AuthRequired
tlsRequired := s.routeInfo.TLSRequired
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
if didSolicit {
// Do this before the TLS code, otherwise, in case of failure
// and if route is explicit, it would try to reconnect to 'nil'...
r.url = rURL
}
// Check for TLS
if tlsRequired {
// Copy off the config to add in ServerName if we need to.
tlsConfig := opts.Cluster.TLSConfig.Clone()
// If we solicited, we will act like the client, otherwise the server.
if didSolicit {
c.Debugf("Starting TLS route client handshake")
// Specify the ServerName we are expecting.
host, _, _ := net.SplitHostPort(rURL.Host)
tlsConfig.ServerName = host
c.nc = tls.Client(c.nc, tlsConfig)
} else {
c.Debugf("Starting TLS route server handshake")
c.nc = tls.Server(c.nc, tlsConfig)
}
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.Cluster.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS route handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Verify that the connection did not go away while we released the lock.
if c.nc == nil {
c.mu.Unlock()
return nil
}
}
// Do final client initialization
// Initialize the per-account cache.
c.in.pacache = make(map[string]*perAccountCache)
if didSolicit {
// Set permissions associated with the route user (if applicable).
// No lock needed since we are already under client lock.
c.setRoutePermissions(opts.Cluster.Permissions)
}
// Set the Ping timer
c.setPingTimer()
// For routes, the "client" is added to s.routes only when processing
// the INFO protocol, that is much later.
// In the meantime, if the server shutsdown, there would be no reference
// to the client (connection) to be closed, leaving this readLoop
// uinterrupted, causing the Shutdown() to wait indefinitively.
// We need to store the client in a special map, under a special lock.
if !s.addToTempClients(c.cid, c) {
c.mu.Unlock()
c.setNoReconnect()
c.closeConnection(ServerShutdown)
return nil
}
// Check for Auth required state for incoming connections.
// Make sure to do this before spinning up readLoop.
if authRequired && !didSolicit {
ttl := secondsToDuration(opts.Cluster.AuthTimeout)
c.setAuthTimer(ttl)
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
// Queue Connect proto if we solicited the connection.
if didSolicit {
c.Debugf("Route connect msg sent")
c.sendRouteConnect(tlsRequired)
}
// Send our info to the other side.
// Our new version requires dynamic information for accounts and a nonce.
c.sendInfo(infoJSON)
c.mu.Unlock()
c.Noticef("Route connection created")
return c
}
const (
_CRLF_ = "\r\n"
_EMPTY_ = ""
)
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
id := c.route.remoteID
sendInfo := false
s.mu.Lock()
if !s.running {
s.mu.Unlock()
return false, false
}
remote, exists := s.remotes[id]
if !exists {
s.routes[c.cid] = c
s.remotes[id] = c
c.mu.Lock()
c.route.connectURLs = info.ClientConnectURLs
cid := c.cid
c.mu.Unlock()
// Now that we have registered the route, we can remove from the temp map.
s.removeFromTempClients(cid)
// we don't need to send if the only route is the one we just accepted.
sendInfo = len(s.routes) > 1
// If the INFO contains a Gateway URL, add it to the list for our cluster.
if info.GatewayURL != "" {
s.addGatewayURL(info.GatewayURL)
}
// Add the remote's leafnodeURL to our list of URLs and send the update
// to all LN connections. (Note that when coming from a route, LeafNodeURLs
// is an array of size 1 max).
if len(info.LeafNodeURLs) == 1 && s.addLeafNodeURL(info.LeafNodeURLs[0]) {
s.sendAsyncLeafNodeInfo()
}
}
s.mu.Unlock()
if exists {
var r *route
c.mu.Lock()
// upgrade to solicited?
if c.route.didSolicit {
// Make a copy
rs := *c.route
r = &rs
}
c.mu.Unlock()
remote.mu.Lock()
// r will be not nil if c.route.didSolicit was true
if r != nil {
// If we upgrade to solicited, we still want to keep the remote's
// connectURLs. So transfer those.
r.connectURLs = remote.route.connectURLs
remote.route = r
}
// This is to mitigate the issue where both sides add the route
// on the opposite connection, and therefore end-up with both
// connections being dropped.
remote.route.retry = true
remote.mu.Unlock()
}
return !exists, sendInfo
}
// updateRouteSubscriptionMap will make sure to update the route map for the subscription. Will
// also forward to all routes if needed.
func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
acc.mu.RLock()
rm := acc.rm
acc.mu.RUnlock()
// This is non-nil when we know we are in cluster mode.
if rm == nil {
return
}
// We only store state on local subs for transmission across all other routes.
if sub.client == nil || (sub.client.kind != CLIENT && sub.client.kind != SYSTEM && sub.client.kind != LEAF) {
return
}
// Create the fast key which will use the subject or 'subject<spc>queue' for queue subscribers.
var (
_rkey [1024]byte
key []byte
update bool
)
if sub.queue != nil {
// Just make the key subject spc group, e.g. 'foo bar'
key = _rkey[:0]
key = append(key, sub.subject...)
key = append(key, byte(' '))
key = append(key, sub.queue...)
// We always update for a queue subscriber since we need to send our relative weight.
update = true
} else {
key = sub.subject
}
// Copy to hold outside acc lock.
var n int32
var ok bool
acc.mu.Lock()
if n, ok = rm[string(key)]; ok {
n += delta
if n <= 0 {
delete(rm, string(key))
update = true // Update for deleting (N->0)
} else {
rm[string(key)] = n
}
} else if delta > 0 {
n = delta
rm[string(key)] = delta
update = true // Adding a new entry for normal sub means update (0->1)
}
acc.mu.Unlock()
if !update {
return
}
// We need to send out this update.
// If we are sending a queue sub, copy and place in the queue weight.
if sub.queue != nil {
sub.client.mu.Lock()
nsub := *sub
sub.client.mu.Unlock()
nsub.qw = n
sub = &nsub
}
// Note that queue unsubs where entry.n > 0 are still
// subscribes with a smaller weight.
if n > 0 {
s.broadcastSubscribe(sub)
} else {
s.broadcastUnSubscribe(sub)
}
}
// broadcastSubscribe will forward a client subscription
// to all active routes as needed.
func (s *Server) broadcastSubscribe(sub *subscription) {
trace := atomic.LoadInt32(&s.logging.trace) == 1
s.mu.Lock()
subs := []*subscription{sub}
for _, route := range s.routes {
route.mu.Lock()
route.sendRouteSubProtos(subs, trace, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
route.mu.Unlock()
}
s.mu.Unlock()
}
// broadcastUnSubscribe will forward a client unsubscribe
// action to all active routes.
func (s *Server) broadcastUnSubscribe(sub *subscription) {
trace := atomic.LoadInt32(&s.logging.trace) == 1
s.mu.Lock()
subs := []*subscription{sub}
for _, route := range s.routes {
route.mu.Lock()
route.sendRouteUnSubProtos(subs, trace, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
route.mu.Unlock()
}
s.mu.Unlock()
}
func (s *Server) routeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
// Snapshot server options.
port := opts.Cluster.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on router port: %d - %v", opts.Cluster.Port, e)
return
}
s.Noticef("Listening for route connections on %s",
net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
// For tests, we want to be able to make this server behave
// as an older server so we use the variable which we can override.
proto := testRouteProto
// Check for TLSConfig
tlsReq := opts.Cluster.TLSConfig != nil
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GoVersion: runtime.Version(),
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: tlsReq,
MaxPayload: s.info.MaxPayload,
Proto: proto,
GatewayURL: s.getGatewayURL(),
}
// Set this if only if advertise is not disabled
if !opts.Cluster.NoAdvertise {
info.ClientConnectURLs = s.clientConnectURLs
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.Cluster.Port = l.Addr().(*net.TCPAddr).Port
}
// Check for Auth items
if opts.Cluster.Username != "" {
info.AuthRequired = true
}
// Check for permissions.
if opts.Cluster.Permissions != nil {
info.Import = opts.Cluster.Permissions.Import
info.Export = opts.Cluster.Permissions.Export
}
// If this server has a LeafNode accept loop, s.leafNodeInfo.IP is,
// at this point, set to the host:port for the leafnode accept URL,
// taking into account possible advertise setting. Use the LeafNodeURLs
// and set this server's leafnode accept URL. This will be sent to
// routed servers.
if !opts.LeafNode.NoAdvertise && s.leafNodeInfo.IP != _EMPTY_ {
info.LeafNodeURLs = []string{s.leafNodeInfo.IP}
}
s.routeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setRouteInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", s.opts.Cluster.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Setup state that can enable shutdown
s.routeListener = l
// Warn if using Cluster.Insecure
if tlsReq && opts.Cluster.TLSConfig.InsecureSkipVerify {
s.Warnf(clusterTLSInsecureWarning)
}
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("Route", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createRoute(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Router accept loop exiting..")
s.done <- true
}
// Similar to setInfoHostPortAndGenerateJSON, but for routeInfo.
func (s *Server) setRouteInfoHostPortAndIP() error {
if s.opts.Cluster.Advertise != "" {
advHost, advPort, err := parseHostPort(s.opts.Cluster.Advertise, s.opts.Cluster.Port)
if err != nil {
return err
}
s.routeInfo.Host = advHost
s.routeInfo.Port = advPort
s.routeInfo.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(advHost, strconv.Itoa(advPort)))
} else {
s.routeInfo.Host = s.opts.Cluster.Host
s.routeInfo.Port = s.opts.Cluster.Port
s.routeInfo.IP = ""
}
// (re)generate the routeInfoJSON byte array
s.generateRouteInfoJSON()
return nil
}
// StartRouting will start the accept loop on the cluster host:port
// and will actively try to connect to listed routes.
func (s *Server) StartRouting(clientListenReady chan struct{}) {
defer s.grWG.Done()
// Wait for the client listen port to be opened, and
// the possible ephemeral port to be selected.
<-clientListenReady
// Spin up the accept loop
ch := make(chan struct{})
go s.routeAcceptLoop(ch)
<-ch
// Solicit Routes if needed.
s.solicitRoutes(s.getOpts().Routes)
}
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
tryForEver := rtype == Explicit
// If A connects to B, and B to A (regardless if explicit or
// implicit - due to auto-discovery), and if each server first
// registers the route on the opposite TCP connection, the
// two connections will end-up being closed.
// Add some random delay to reduce risk of repeated failures.
delay := time.Duration(rand.Intn(100)) * time.Millisecond
if tryForEver {
delay += DEFAULT_ROUTE_RECONNECT
}
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRoute(rURL, tryForEver, false)
}
// Checks to make sure the route is still valid.
func (s *Server) routeStillValid(rURL *url.URL) bool {
for _, ri := range s.getOpts().Routes {
if urlsAreEqual(ri, rURL) {
return true
}
}
return false
}
func (s *Server) connectToRoute(rURL *url.URL, tryForEver, firstConnect bool) {
// Snapshot server options.
opts := s.getOpts()
defer s.grWG.Done()
const connErrFmt = "Error trying to connect to route (attempt %v): %v"
attempts := 0
for s.isRunning() && rURL != nil {
if tryForEver && !s.routeStillValid(rURL) {
return
}
s.Debugf("Trying to connect to route on %s", rURL.Host)
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
if err != nil {
attempts++
if s.shouldReportConnectErr(firstConnect, attempts) {
s.Errorf(connErrFmt, attempts, err)
} else {
s.Debugf(connErrFmt, attempts, err)
}
if !tryForEver {
if opts.Cluster.ConnectRetries <= 0 {
return
}
if attempts > opts.Cluster.ConnectRetries {
return
}
}
select {
case <-s.quitCh:
return
case <-time.After(routeConnectDelay):
continue
}
}
if tryForEver && !s.routeStillValid(rURL) {
conn.Close()
return
}
// We have a route connection here.
// Go ahead and create it and exit this func.
s.createRoute(conn, rURL)
return
}
}
func (c *client) isSolicitedRoute() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.kind == ROUTER && c.route != nil && c.route.didSolicit
}
func (s *Server) solicitRoutes(routes []*url.URL) {
for _, r := range routes {
route := r
s.startGoRoutine(func() { s.connectToRoute(route, true, true) })
}
}
func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
if lang != "" {
c.sendErrAndErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToRoutePort
}
// Unmarshal as a route connect protocol
proto := &connectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the Route port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the Route port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
var perms *RoutePermissions
if srv != nil {
perms = srv.getOpts().Cluster.Permissions
}
// Grab connection name of remote route.
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.setRoutePermissions(perms)
c.mu.Unlock()
return nil
}
func (s *Server) removeRoute(c *client) {
var rID string
var lnURL string
c.mu.Lock()
cid := c.cid
r := c.route
if r != nil {
rID = r.remoteID
lnURL = r.leafnodeURL
}
c.mu.Unlock()
s.mu.Lock()
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
s.removeGatewayURL(r.gatewayURL)
// Remove the remote's leafNode URL from
// our list and send update to LN connections.
if lnURL != _EMPTY_ && s.removeLeafNodeURL(lnURL) {
s.sendAsyncLeafNodeInfo()
}
}
s.removeFromTempClients(cid)
s.mu.Unlock()
}
| 1 | 9,193 | Same: DO NOT USE IN PRODUCTION. Yes we should shout ;) | nats-io-nats-server | go |
@@ -134,7 +134,7 @@ namespace Microsoft.DotNet.Build.Tasks.Feed
await clientThrottle.WaitAsync();
string leaseId = string.Empty;
//this defines the lease for 15 seconds (max is 60) and 3000 milliseconds between requests
- AzureBlobLease blobLease = new AzureBlobLease(feed.AccountName, feed.AccountKey, string.Empty, feed.ContainerName, uploadPath, Log, "15", "3000");
+ AzureBlobLease blobLease = new AzureBlobLease(feed.AccountName, feed.AccountKey, string.Empty, feed.ContainerName, uploadPath, Log, "15", "5000");
bool isLeaseRequired = allowOverwrite && await feed.CheckIfBlobExists(uploadPath);
| 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using MSBuild = Microsoft.Build.Utilities;
using System.Linq;
using System.Threading.Tasks;
using System.Threading;
using System.IO;
using Microsoft.DotNet.Build.CloudTestTasks;
using NuGet.Versioning;
using System.Text.RegularExpressions;
namespace Microsoft.DotNet.Build.Tasks.Feed
{
sealed class BlobFeedAction
{
private MSBuild.TaskLoggingHelper Log;
private static readonly CancellationTokenSource TokenSource = new CancellationTokenSource();
private static readonly CancellationToken CancellationToken = TokenSource.Token;
public BlobFeed feed;
public int MaxClients { get; set; } = 8;
const string feedRegex = @"(?<feedurl>https:\/\/(?<accountname>[^\.-]+)(?<domain>[^\/]*)\/((?<token>[a-zA-Z0-9+\/]*?\/\d{4}-\d{2}-\d{2})\/)?(?<containername>[^\/]+)\/(?<relativepath>.*)\/)index\.json";
public BlobFeedAction(string expectedFeedUrl, string accountKey, string indexDirectory, MSBuild.TaskLoggingHelper Log)
{
this.Log = Log;
Match m = Regex.Match(expectedFeedUrl, feedRegex);
if (m.Success)
{
string accountName = m.Groups["accountname"].Value;
string containerName = m.Groups["containername"].Value;
string relativePath = m.Groups["relativepath"].Value;
string feedUrl = m.Groups["feedurl"].Value;
bool isPublic = string.IsNullOrWhiteSpace(m.Groups["token"].Value);
this.feed = new BlobFeed(accountName, accountKey, containerName, relativePath, feedUrl, string.IsNullOrWhiteSpace(indexDirectory) ? Path.GetTempPath() : indexDirectory, Log, isPublic);
}
else
{
throw new Exception("Unable to parse expected feed. Please check ExpectedFeedUrl.");
}
}
public async Task<bool> PushToFeed(IEnumerable<string> items, bool allowOverwrite = false)
{
if (feed.IsSanityChecked(items))
{
if (CancellationToken.IsCancellationRequested)
{
Log.LogError("Task PushToFeed cancelled");
CancellationToken.ThrowIfCancellationRequested();
}
using (var clientThrottle = new SemaphoreSlim(this.MaxClients, this.MaxClients))
{
await Task.WhenAll(items.Select(item => PushItemToFeed(item, feed.RelativePath, clientThrottle, allowOverwrite, false)));
}
}
return !Log.HasLoggedErrors;
}
public async Task<bool> PushToFeedFlat(IEnumerable<string> items, bool allowOverwrite = false)
{
if (CancellationToken.IsCancellationRequested)
{
Log.LogError("Task PushToFeedFlat cancelled");
CancellationToken.ThrowIfCancellationRequested();
}
using (var clientThrottle = new SemaphoreSlim(this.MaxClients, this.MaxClients))
{
await Task.WhenAll(items.Select(item => PushItemToFeed(item, feed.RelativePath, clientThrottle, allowOverwrite, true)));
}
return !Log.HasLoggedErrors;
}
public async Task<bool> PushItemToFeed(string item, string relativePath, SemaphoreSlim clientThrottle, bool allowOverwrite, bool isFlat)
{
try
{
string uploadPath = feed.CalculateBlobPath(item, relativePath);
string packageDirectory = feed.CalculateRelativeUploadPath(item, relativePath);
await UploadAsync(CancellationToken, item, uploadPath, clientThrottle, allowOverwrite);
if (!isFlat)
{
List<string> listAzureBlobs = await ListAzureBlobs.ListBlobs(Log, feed.AccountName, feed.AccountKey, feed.ContainerName, packageDirectory);
if (!listAzureBlobs.Any(x => x.Contains(uploadPath)))
{
throw new Exception($"Uploaded package {uploadPath} is not present on feed. Cannot update index.json.");
}
await UploadIndexJson(clientThrottle, true, packageDirectory, listAzureBlobs);
}
}
catch (Exception e)
{
Log.LogErrorFromException(e);
}
return !Log.HasLoggedErrors;
}
private async Task UploadIndexJson(SemaphoreSlim clientThrottle, bool allowOverwrite, string packageDirectory, List<string> listAzureBlobs)
{
listAzureBlobs.Remove(listAzureBlobs.Find(x => x.Contains("index.json")));
List<string> updatedVersions = new List<string>();
foreach (var version in listAzureBlobs)
{
string versionToCheck = version.Substring(packageDirectory.Length + 1).Split('/')[0];
NuGetVersion nugetVersion = null;
if (NuGetVersion.TryParse(versionToCheck, out nugetVersion))
{
updatedVersions.Add(versionToCheck);
}
}
string packageIndexJsonLocation = feed.GeneratePackageServiceIndex(packageDirectory, updatedVersions);
await UploadAsync(CancellationToken, packageIndexJsonLocation, packageDirectory + "/index.json", clientThrottle, true);
}
private async Task UploadAsync(CancellationToken ct, string item, string uploadPath, SemaphoreSlim clientThrottle, bool allowOverwrite)
{
if (!File.Exists(item))
throw new Exception(string.Format("The file '{0}' does not exist.", item));
await clientThrottle.WaitAsync();
string leaseId = string.Empty;
//this defines the lease for 15 seconds (max is 60) and 3000 milliseconds between requests
AzureBlobLease blobLease = new AzureBlobLease(feed.AccountName, feed.AccountKey, string.Empty, feed.ContainerName, uploadPath, Log, "15", "3000");
bool isLeaseRequired = allowOverwrite && await feed.CheckIfBlobExists(uploadPath);
if (isLeaseRequired)
{
try
{
leaseId = blobLease.Acquire();
}
catch (Exception)
{
Log.LogError($"Unable to obtain lease on {uploadPath}");
}
}
try
{
bool isExists = await feed.CheckIfBlobExists(uploadPath);
if (!isExists || allowOverwrite)
{
Log.LogMessage($"Uploading {item} to {uploadPath}.");
UploadClient uploadClient = new UploadClient(Log);
await
uploadClient.UploadBlockBlobAsync(
ct,
feed.AccountName,
feed.AccountKey,
feed.ContainerName,
item,
uploadPath,
leaseId);
}
else
{
Log.LogMessage($"Skipping uploading of {item} to {uploadPath}. Already exists.");
}
}
catch (Exception)
{
Log.LogError($"Unable to upload to {uploadPath}");
}
finally
{
if (isLeaseRequired)
{
blobLease.Release();
}
clientThrottle.Release();
}
}
}
}
| 1 | 13,961 | This change may be hiding more errors, if we continue to see more. | dotnet-buildtools | .cs |
@@ -246,13 +246,13 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2
object IFeatureCollection.this[Type key]
{
- get => FastFeatureGet(key);
+ get => FastFeatureGet(key) ?? ConnectionFeatures?[key];
set => FastFeatureSet(key, value);
}
TFeature IFeatureCollection.Get<TFeature>()
{
- return (TFeature)FastFeatureGet(typeof(TFeature));
+ return (TFeature)(FastFeatureGet(typeof(TFeature)) ?? ConnectionFeatures?[typeof(TFeature)]);
}
void IFeatureCollection.Set<TFeature>(TFeature instance) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Features;
using Microsoft.Extensions.Primitives;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2
{
public partial class Http2Stream : IFeatureCollection,
IHttpRequestFeature,
IHttpResponseFeature,
IHttpUpgradeFeature,
IHttpConnectionFeature,
IHttpRequestLifetimeFeature,
IHttpRequestIdentifierFeature,
IHttpBodyControlFeature,
IHttpMaxRequestBodySizeFeature,
IHttpMinRequestBodyDataRateFeature,
IHttpMinResponseDataRateFeature,
IHttp2StreamIdFeature
{
// NOTE: When feature interfaces are added to or removed from this Frame class implementation,
// then the list of `implementedFeatures` in the generated code project MUST also be updated.
// See also: tools/Microsoft.AspNetCore.Server.Kestrel.GeneratedCode/FrameFeatureCollection.cs
private int _featureRevision;
private List<KeyValuePair<Type, object>> MaybeExtra;
public void ResetFeatureCollection()
{
FastReset();
MaybeExtra?.Clear();
_featureRevision++;
}
private object ExtraFeatureGet(Type key)
{
if (MaybeExtra == null)
{
return null;
}
for (var i = 0; i < MaybeExtra.Count; i++)
{
var kv = MaybeExtra[i];
if (kv.Key == key)
{
return kv.Value;
}
}
return null;
}
private void ExtraFeatureSet(Type key, object value)
{
if (MaybeExtra == null)
{
MaybeExtra = new List<KeyValuePair<Type, object>>(2);
}
for (var i = 0; i < MaybeExtra.Count; i++)
{
if (MaybeExtra[i].Key == key)
{
MaybeExtra[i] = new KeyValuePair<Type, object>(key, value);
return;
}
}
MaybeExtra.Add(new KeyValuePair<Type, object>(key, value));
}
string IHttpRequestFeature.Protocol
{
get => HttpVersion;
set => throw new InvalidOperationException();
}
string IHttpRequestFeature.Scheme
{
get => Scheme ?? "http";
set => Scheme = value;
}
string IHttpRequestFeature.Method
{
get => Method;
set => Method = value;
}
string IHttpRequestFeature.PathBase
{
get => PathBase ?? "";
set => PathBase = value;
}
string IHttpRequestFeature.Path
{
get => Path;
set => Path = value;
}
string IHttpRequestFeature.QueryString
{
get => QueryString;
set => QueryString = value;
}
string IHttpRequestFeature.RawTarget
{
get => RawTarget;
set => RawTarget = value;
}
IHeaderDictionary IHttpRequestFeature.Headers
{
get => RequestHeaders;
set => RequestHeaders = value;
}
Stream IHttpRequestFeature.Body
{
get => RequestBody;
set => RequestBody = value;
}
int IHttpResponseFeature.StatusCode
{
get => StatusCode;
set => StatusCode = value;
}
string IHttpResponseFeature.ReasonPhrase
{
get => ReasonPhrase;
set => ReasonPhrase = value;
}
IHeaderDictionary IHttpResponseFeature.Headers
{
get => ResponseHeaders;
set => ResponseHeaders = value;
}
Stream IHttpResponseFeature.Body
{
get => ResponseBody;
set => ResponseBody = value;
}
CancellationToken IHttpRequestLifetimeFeature.RequestAborted
{
get => RequestAborted;
set => RequestAborted = value;
}
bool IHttpResponseFeature.HasStarted => HasResponseStarted;
bool IHttpUpgradeFeature.IsUpgradableRequest => false;
bool IFeatureCollection.IsReadOnly => false;
int IFeatureCollection.Revision => _featureRevision;
IPAddress IHttpConnectionFeature.RemoteIpAddress
{
get => RemoteIpAddress;
set => RemoteIpAddress = value;
}
IPAddress IHttpConnectionFeature.LocalIpAddress
{
get => LocalIpAddress;
set => LocalIpAddress = value;
}
int IHttpConnectionFeature.RemotePort
{
get => RemotePort;
set => RemotePort = value;
}
int IHttpConnectionFeature.LocalPort
{
get => LocalPort;
set => LocalPort = value;
}
string IHttpConnectionFeature.ConnectionId
{
get => ConnectionIdFeature;
set => ConnectionIdFeature = value;
}
string IHttpRequestIdentifierFeature.TraceIdentifier
{
get => TraceIdentifier;
set => TraceIdentifier = value;
}
bool IHttpBodyControlFeature.AllowSynchronousIO
{
get => AllowSynchronousIO;
set => AllowSynchronousIO = value;
}
bool IHttpMaxRequestBodySizeFeature.IsReadOnly => HasStartedConsumingRequestBody;
long? IHttpMaxRequestBodySizeFeature.MaxRequestBodySize
{
get => MaxRequestBodySize;
set
{
if (HasStartedConsumingRequestBody)
{
throw new InvalidOperationException(CoreStrings.MaxRequestBodySizeCannotBeModifiedAfterRead);
}
if (value < 0)
{
throw new ArgumentOutOfRangeException(nameof(value), CoreStrings.NonNegativeNumberOrNullRequired);
}
MaxRequestBodySize = value;
}
}
MinDataRate IHttpMinRequestBodyDataRateFeature.MinDataRate
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
MinDataRate IHttpMinResponseDataRateFeature.MinDataRate
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
object IFeatureCollection.this[Type key]
{
get => FastFeatureGet(key);
set => FastFeatureSet(key, value);
}
TFeature IFeatureCollection.Get<TFeature>()
{
return (TFeature)FastFeatureGet(typeof(TFeature));
}
void IFeatureCollection.Set<TFeature>(TFeature instance)
{
FastFeatureSet(typeof(TFeature), instance);
}
void IHttpResponseFeature.OnStarting(Func<object, Task> callback, object state)
{
OnStarting(callback, state);
}
void IHttpResponseFeature.OnCompleted(Func<object, Task> callback, object state)
{
OnCompleted(callback, state);
}
Task<Stream> IHttpUpgradeFeature.UpgradeAsync()
{
throw new NotImplementedException();
}
IEnumerator<KeyValuePair<Type, object>> IEnumerable<KeyValuePair<Type, object>>.GetEnumerator() => FastEnumerable().GetEnumerator();
IEnumerator IEnumerable.GetEnumerator() => FastEnumerable().GetEnumerator();
void IHttpRequestLifetimeFeature.Abort()
{
Abort(error: null);
}
int IHttp2StreamIdFeature.StreamId => StreamId;
}
}
| 1 | 13,895 | Beware of exposing the underlying features directly. Any mutable fields should be reset per request. | aspnet-KestrelHttpServer | .cs |
@@ -144,12 +144,18 @@ export function renderComponent(component, opts, mountAll, isChild) {
if (initialBase && base!==initialBase && inst!==initialChildComponent) {
let baseParent = initialBase.parentNode;
if (baseParent && base!==baseParent) {
- baseParent.replaceChild(base, initialBase);
if (!toUnmount) {
+ baseParent.replaceChild(base, initialBase);
initialBase._component = null;
recollectNodeTree(initialBase, false);
}
+ // @TODO Formerly the initialBase was always replaced by the base
+ // but somehow all tests passes also if its only called at not unmounting.
+ // These else condition was the original fix.
+ // else {
+ // baseParent.insertBefore(base, initialBase);
+ // }
}
}
| 1 | import { SYNC_RENDER, NO_RENDER, FORCE_RENDER, ASYNC_RENDER, ATTR_KEY } from '../constants';
import options from '../options';
import { extend } from '../util';
import { enqueueRender } from '../render-queue';
import { getNodeProps } from './index';
import { diff, mounts, diffLevel, flushMounts, recollectNodeTree, removeChildren } from './diff';
import { createComponent, collectComponent } from './component-recycler';
import { removeNode } from '../dom';
/** Set a component's `props` (generally derived from JSX attributes).
* @param {Object} props
* @param {Object} [opts]
* @param {boolean} [opts.renderSync=false] If `true` and {@link options.syncComponentUpdates} is `true`, triggers synchronous rendering.
* @param {boolean} [opts.render=true] If `false`, no render will be triggered.
*/
export function setComponentProps(component, props, opts, context, mountAll) {
if (component._disable) return;
component._disable = true;
if ((component.__ref = props.ref)) delete props.ref;
if ((component.__key = props.key)) delete props.key;
if (!component.base || mountAll) {
if (component.componentWillMount) component.componentWillMount();
}
else if (component.componentWillReceiveProps) {
component.componentWillReceiveProps(props, context);
}
if (context && context!==component.context) {
if (!component.prevContext) component.prevContext = component.context;
component.context = context;
}
if (!component.prevProps) component.prevProps = component.props;
component.props = props;
component._disable = false;
if (opts!==NO_RENDER) {
if (opts===SYNC_RENDER || options.syncComponentUpdates!==false || !component.base) {
renderComponent(component, SYNC_RENDER, mountAll);
}
else {
enqueueRender(component);
}
}
if (component.__ref) component.__ref(component);
}
/** Render a Component, triggering necessary lifecycle events and taking High-Order Components into account.
* @param {Component} component
* @param {Object} [opts]
* @param {boolean} [opts.build=false] If `true`, component will build and store a DOM node if not already associated with one.
* @private
*/
export function renderComponent(component, opts, mountAll, isChild) {
if (component._disable) return;
let props = component.props,
state = component.state,
context = component.context,
previousProps = component.prevProps || props,
previousState = component.prevState || state,
previousContext = component.prevContext || context,
isUpdate = component.base,
nextBase = component.nextBase,
initialBase = isUpdate || nextBase,
initialChildComponent = component._component,
skip = false,
rendered, inst, cbase;
// if updating
if (isUpdate) {
component.props = previousProps;
component.state = previousState;
component.context = previousContext;
if (opts!==FORCE_RENDER
&& component.shouldComponentUpdate
&& component.shouldComponentUpdate(props, state, context) === false) {
skip = true;
}
else if (component.componentWillUpdate) {
component.componentWillUpdate(props, state, context);
}
component.props = props;
component.state = state;
component.context = context;
}
component.prevProps = component.prevState = component.prevContext = component.nextBase = null;
component._dirty = false;
if (!skip) {
rendered = component.render(props, state, context);
// context to pass to the child, can be updated via (grand-)parent component
if (component.getChildContext) {
context = extend(extend({}, context), component.getChildContext());
}
let childComponent = rendered && rendered.nodeName,
toUnmount, base;
if (typeof childComponent==='function') {
// set up high order component link
let childProps = getNodeProps(rendered);
inst = initialChildComponent;
if (inst && inst.constructor===childComponent && childProps.key==inst.__key) {
setComponentProps(inst, childProps, SYNC_RENDER, context, false);
}
else {
toUnmount = inst;
component._component = inst = createComponent(childComponent, childProps, context);
inst.nextBase = inst.nextBase || nextBase;
inst._parentComponent = component;
setComponentProps(inst, childProps, NO_RENDER, context, false);
renderComponent(inst, SYNC_RENDER, mountAll, true);
}
base = inst.base;
}
else {
cbase = initialBase;
// destroy high order component link
toUnmount = initialChildComponent;
if (toUnmount) {
cbase = component._component = null;
}
if (initialBase || opts===SYNC_RENDER) {
if (cbase) cbase._component = null;
base = diff(cbase, rendered, context, mountAll || !isUpdate, initialBase && initialBase.parentNode, true);
}
}
if (initialBase && base!==initialBase && inst!==initialChildComponent) {
let baseParent = initialBase.parentNode;
if (baseParent && base!==baseParent) {
baseParent.replaceChild(base, initialBase);
if (!toUnmount) {
initialBase._component = null;
recollectNodeTree(initialBase, false);
}
}
}
if (toUnmount) {
unmountComponent(toUnmount);
}
component.base = base;
if (base && !isChild) {
let componentRef = component,
t = component;
while ((t=t._parentComponent)) {
(componentRef = t).base = base;
}
base._component = componentRef;
base._componentConstructor = componentRef.constructor;
}
}
if (!isUpdate || mountAll) {
mounts.unshift(component);
}
else if (!skip) {
// Ensure that pending componentDidMount() hooks of child components
// are called before the componentDidUpdate() hook in the parent.
flushMounts();
if (component.componentDidUpdate) {
component.componentDidUpdate(previousProps, previousState, previousContext);
}
if (options.afterUpdate) options.afterUpdate(component);
}
if (component._renderCallbacks!=null) {
while (component._renderCallbacks.length) component._renderCallbacks.pop().call(component);
}
if (!diffLevel && !isChild) flushMounts();
}
/** Apply the Component referenced by a VNode to the DOM.
* @param {Element} dom The DOM node to mutate
* @param {VNode} vnode A Component-referencing VNode
* @returns {Element} dom The created/mutated element
* @private
*/
export function buildComponentFromVNode(dom, vnode, context, mountAll) {
let c = dom && dom._component,
originalComponent = c,
oldDom = dom,
isDirectOwner = c && dom._componentConstructor===vnode.nodeName,
isOwner = isDirectOwner,
props = getNodeProps(vnode);
while (c && !isOwner && (c=c._parentComponent)) {
isOwner = c.constructor===vnode.nodeName;
}
if (c && isOwner && (!mountAll || c._component)) {
setComponentProps(c, props, ASYNC_RENDER, context, mountAll);
dom = c.base;
}
else {
if (originalComponent && !isDirectOwner) {
unmountComponent(originalComponent);
dom = oldDom = null;
}
c = createComponent(vnode.nodeName, props, context);
if (dom && !c.nextBase) {
c.nextBase = dom;
// passing dom/oldDom as nextBase will recycle it if unused, so bypass recycling on L229:
oldDom = null;
}
setComponentProps(c, props, SYNC_RENDER, context, mountAll);
dom = c.base;
if (oldDom && dom!==oldDom) {
oldDom._component = null;
recollectNodeTree(oldDom, false);
}
}
return dom;
}
/** Remove a component from the DOM and recycle it.
* @param {Component} component The Component instance to unmount
* @private
*/
export function unmountComponent(component) {
if (options.beforeUnmount) options.beforeUnmount(component);
let base = component.base;
component._disable = true;
if (component.componentWillUnmount) component.componentWillUnmount();
component.base = null;
// recursively tear down & recollect high-order component children:
let inner = component._component;
if (inner) {
unmountComponent(inner);
}
else if (base) {
if (base[ATTR_KEY] && base[ATTR_KEY].ref) base[ATTR_KEY].ref(null);
component.nextBase = base;
removeNode(base);
collectComponent(component);
removeChildren(base);
}
if (component.__ref) component.__ref(null);
}
| 1 | 11,090 | This is probably more nuanced than I can feasibly check in a PR review, haha. Was the issue here that `replaceChild()` removes `initialBase` from the DOM before `recollectNodeTree()` invokes `componentWillUnmount()` on the owning component? | preactjs-preact | js |
@@ -35,3 +35,6 @@ def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
+
+def testCannotClickDisabledButton(driver):
+ WebDriverWait(driver, 3).until(EC.element_to_be_unclickable(By.ID, "disabled-button")) | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture(autouse=True)
def loadPage(pages):
pages.load("clicks.html")
def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
driver.find_element(By.ID, "overflowLink").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
| 1 | 16,343 | I believe it's misleading name for the condition. I prefer "element_to_be_disable" We can have a condition, when element is enabled but we can't click it, because another element overlays above it. So, If we use "unclickable" we might mislead people, who use that condition to verify if element can be clicked | SeleniumHQ-selenium | py |
@@ -86,4 +86,8 @@ public class DataWriter<T> implements Closeable {
Preconditions.checkState(dataFile != null, "Cannot create data file from unclosed writer");
return dataFile;
}
+
+ public FileAppender<T> appender() {
+ return appender;
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.io;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.encryption.EncryptionKeyMetadata;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
public class DataWriter<T> implements Closeable {
private final FileAppender<T> appender;
private final FileFormat format;
private final String location;
private final PartitionSpec spec;
private final StructLike partition;
private final ByteBuffer keyMetadata;
private final SortOrder sortOrder;
private DataFile dataFile = null;
public DataWriter(FileAppender<T> appender, FileFormat format, String location,
PartitionSpec spec, StructLike partition, EncryptionKeyMetadata keyMetadata) {
this(appender, format, location, spec, partition, keyMetadata, null);
}
public DataWriter(FileAppender<T> appender, FileFormat format, String location,
PartitionSpec spec, StructLike partition, EncryptionKeyMetadata keyMetadata, SortOrder sortOrder) {
this.appender = appender;
this.format = format;
this.location = location;
this.spec = spec;
this.partition = partition;
this.keyMetadata = keyMetadata != null ? keyMetadata.buffer() : null;
this.sortOrder = sortOrder;
}
public void add(T row) {
appender.add(row);
}
public long length() {
return appender.length();
}
@Override
public void close() throws IOException {
if (dataFile == null) {
appender.close();
this.dataFile = DataFiles.builder(spec)
.withFormat(format)
.withPath(location)
.withPartition(partition)
.withEncryptionKeyMetadata(keyMetadata)
.withFileSizeInBytes(appender.length())
.withMetrics(appender.metrics())
.withSplitOffsets(appender.splitOffsets())
.withSortOrder(sortOrder)
.build();
}
}
public DataFile toDataFile() {
Preconditions.checkState(dataFile != null, "Cannot create data file from unclosed writer");
return dataFile;
}
}
| 1 | 33,702 | Do we need to expose it? It looks like it is only used in tests and only to obtain the final metrics. I think you can get the same by using `DataFile#lowerBounds` and `DataFile#upperBounds`. It seems `DataWriter` already exposes `toDataFile` that you can use. | apache-iceberg | java |
@@ -868,7 +868,7 @@ describe("date_utils", function() {
});
it("should return the 4 2021 year week", () => {
- const date = new Date("2021-01-18");
+ const date = new Date("2021-01-19");
assert(getWeek(date) === 3);
});
| 1 | import {
newDate,
addHours,
addDays,
subDays,
isEqual,
isSameDay,
isSameMonth,
isSameQuarter,
isSameYear,
isDayDisabled,
isDayExcluded,
isMonthDisabled,
isQuarterDisabled,
monthDisabledBefore,
monthDisabledAfter,
yearDisabledBefore,
yearDisabledAfter,
getEffectiveMinDate,
getEffectiveMaxDate,
addZero,
isTimeDisabled,
isTimeInDisabledRange,
isDayInRange,
parseDate,
isMonthinRange,
isQuarterInRange,
getStartOfYear,
getYearsPeriod,
setDefaultLocale,
yearsDisabledAfter,
yearsDisabledBefore,
getWeek
} from "../src/date_utils";
import setMinutes from "date-fns/setMinutes";
import setHours from "date-fns/setHours";
import ptBR from "date-fns/locale/pt-BR";
import { registerLocale } from "../src/date_utils";
registerLocale("pt-BR", ptBR);
describe("date_utils", function() {
describe("newDate", function() {
it("should return null for invalid value passed", function() {
expect(newDate("21123asd")).to.be.null;
});
});
describe("isEqual", function() {
it("should return true for null dates", function() {
expect(isEqual(null, null)).to.be.true;
});
it("should return false for a null and non-null date", function() {
expect(isEqual(newDate(), null)).to.be.false;
expect(isEqual(null, newDate())).to.be.false;
});
it("should return false for non-equal dates", function() {
expect(isEqual(newDate("2016-02-10"), newDate("2016-02-11"))).to.be.false;
});
it("should return false for non-equal date and date with time", function() {
expect(isEqual(newDate("2016-02-10"), newDate("2016-02-11 13:13"))).to.be
.false;
});
it("should return false for non-equal time", function() {
expect(isEqual(newDate("2016-02-10 13:13"), newDate("2016-02-11 13:14")))
.to.be.false;
});
it("should return true for equal dates", function() {
expect(isEqual(newDate("2016-02-10"), newDate("2016-02-10"))).to.be.true;
});
it("should return true for equal time", function() {
expect(isEqual(newDate("2016-02-10 13:13"), newDate("2016-02-10 13:13")))
.to.be.true;
});
});
describe("isSameDay", function() {
it("should return true for null dates", function() {
expect(isSameDay(null, null)).to.be.true;
});
it("should return false for a null and non-null date", function() {
expect(isSameDay(newDate(), null)).to.be.false;
expect(isSameDay(null, newDate())).to.be.false;
});
it("should return false for non-equal dates", function() {
expect(isSameDay(newDate("2016-02-10"), newDate("2016-02-11"))).to.be
.false;
});
it("should return true for equal dates", function() {
expect(isSameDay(newDate("2016-02-10"), newDate("2016-02-10"))).to.be
.true;
});
});
describe("isSameMonth", function() {
it("should return true for null dates", function() {
expect(isSameMonth(null, null)).to.be.true;
});
it("should return false for a null and non-null date", function() {
expect(isSameMonth(newDate(), null)).to.be.false;
expect(isSameMonth(null, newDate())).to.be.false;
});
it("should return false for non-equal months ", function() {
expect(isSameMonth(newDate("2016-02-10"), newDate("2016-03-10"))).to.be
.false;
});
it("should return true for equal months", function() {
expect(isSameMonth(newDate("2016-02-10"), newDate("2016-02-29"))).to.be
.true;
});
});
describe("isSameQuarter", function() {
it("should return true for null dates", function() {
expect(isSameQuarter(null, null)).to.be.true;
});
it("should return false for a null and non-null date", function() {
expect(isSameQuarter(newDate(), null)).to.be.false;
expect(isSameQuarter(null, newDate())).to.be.false;
});
it("should return false for non-equal quarters ", function() {
expect(isSameQuarter(newDate("2016-02-10"), newDate("2016-04-10"))).to.be
.false;
});
it("should return true for equal quarters", function() {
expect(isSameQuarter(newDate("2016-02-10"), newDate("2016-03-29"))).to.be
.true;
});
});
describe("isSameYear", function() {
it("should return true for null dates", function() {
expect(isSameYear(null, null)).to.be.true;
});
it("should return false for a null and non-null date", function() {
expect(isSameYear(newDate(), null)).to.be.false;
expect(isSameYear(null, newDate())).to.be.false;
});
it("should return false for non-equal years ", function() {
expect(isSameYear(newDate("2016-02-10"), newDate("2015-02-10"))).to.be
.false;
});
it("should return true for equal years", function() {
expect(isSameYear(newDate("2016-02-10"), newDate("2016-12-24"))).to.be
.true;
});
});
describe("isDayDisabled", function() {
it("should be enabled by default", () => {
const day = newDate();
expect(isDayDisabled(day)).to.be.false;
});
it("should be enabled if on the min date", () => {
const day = newDate();
expect(isDayDisabled(day, { minDate: day })).to.be.false;
});
it("should be disabled if before the min date", () => {
const day = newDate();
const minDate = addDays(day, 1);
expect(isDayDisabled(day, { minDate })).to.be.true;
});
it("should be enabled if on the max date", () => {
const day = newDate();
expect(isDayDisabled(day, { maxDate: day })).to.be.false;
});
it("should be disabled if after the max date", () => {
const day = newDate();
const maxDate = subDays(day, 1);
expect(isDayDisabled(day, { maxDate })).to.be.true;
});
it("should be disabled if in excluded dates", () => {
const day = newDate();
expect(isDayDisabled(day, { excludeDates: [day] })).to.be.true;
});
it("should be enabled if in included dates", () => {
const day = newDate();
expect(isDayDisabled(day, { includeDates: [day] })).to.be.false;
});
it("should be disabled if not in included dates", () => {
const day = newDate();
const includeDates = [addDays(day, 1)];
expect(isDayDisabled(day, { includeDates })).to.be.true;
});
it("should be enabled if date filter returns true", () => {
const day = newDate();
const filterDate = d => isEqual(d, day);
expect(isDayDisabled(day, { filterDate })).to.be.false;
});
it("should be disabled if date filter returns false", () => {
const day = newDate();
const filterDate = d => !isEqual(d, day);
expect(isDayDisabled(day, { filterDate })).to.be.true;
});
it("should not allow date filter to modify input date", () => {
const day = newDate();
const dayClone = newDate(day);
const filterDate = d => {
addDays(d, 1);
return true;
};
isDayDisabled(day, { filterDate });
expect(isEqual(day, dayClone)).to.be.true;
});
});
describe("isDayExcluded", function() {
it("should not be excluded by default", () => {
const day = newDate();
expect(isDayExcluded(day)).to.be.false;
});
it("should be excluded if in excluded dates", () => {
const day = newDate();
expect(isDayExcluded(day, { excludeDates: [day] })).to.be.true;
});
it("should not be excluded if not in excluded dates", () => {
const day = newDate();
const excludedDay = newDate();
const currentMonth = excludedDay.getMonth();
excludedDay.setMonth(currentMonth === 11 ? 0 : currentMonth + 1);
expect(isDayExcluded(day, { excludeDates: [] }));
});
});
describe("isMonthDisabled", function() {
it("should be enabled by default", () => {
const day = newDate();
expect(isMonthDisabled(day)).to.be.false;
});
it("should be enabled if on the min date", () => {
const day = newDate();
expect(isMonthDisabled(day, { minDate: day })).to.be.false;
});
it("should be disabled if before the min date", () => {
const day = newDate();
const minDate = addDays(day, 40);
expect(isMonthDisabled(day, { minDate })).to.be.true;
});
it("should be enabled if on the max date", () => {
const day = newDate();
expect(isMonthDisabled(day, { maxDate: day })).to.be.false;
});
it("should be disabled if after the max date", () => {
const day = newDate();
const maxDate = subDays(day, 40);
expect(isMonthDisabled(day, { maxDate })).to.be.true;
});
it("should be disabled if in excluded dates", () => {
const day = newDate();
expect(isMonthDisabled(day, { excludeDates: [day] })).to.be.true;
});
it("should be enabled if in included dates", () => {
const day = newDate();
expect(isMonthDisabled(day, { includeDates: [day] })).to.be.false;
});
it("should be disabled if not in included dates", () => {
const day = newDate();
const includeDates = [addDays(day, 40)];
expect(isMonthDisabled(day, { includeDates })).to.be.true;
});
it("should be enabled if date filter returns true", () => {
const day = newDate();
const filterDate = d => isEqual(d, day);
expect(isMonthDisabled(day, { filterDate })).to.be.false;
});
it("should be disabled if date filter returns false", () => {
const day = newDate();
const filterDate = d => !isEqual(d, day);
expect(isMonthDisabled(day, { filterDate })).to.be.true;
});
it("should not allow date filter to modify input date", () => {
const day = newDate();
const dayClone = newDate(day);
const filterDate = d => {
addDays(d, 40);
return true;
};
isMonthDisabled(day, { filterDate });
expect(isEqual(day, dayClone)).to.be.true;
});
});
describe("isQuarterDisabled", function() {
it("should be enabled by default", () => {
const day = newDate();
expect(isQuarterDisabled(day)).to.be.false;
});
it("should be enabled if on the min date", () => {
const day = newDate();
expect(isQuarterDisabled(day, { minDate: day })).to.be.false;
});
it("should be disabled if before the min date", () => {
const day = newDate();
const minDate = addDays(day, 40);
expect(isQuarterDisabled(day, { minDate })).to.be.true;
});
it("should be enabled if on the max date", () => {
const day = newDate();
expect(isQuarterDisabled(day, { maxDate: day })).to.be.false;
});
it("should be disabled if after the max date", () => {
const day = newDate();
const maxDate = subDays(day, 40);
expect(isQuarterDisabled(day, { maxDate })).to.be.true;
});
it("should be disabled if in excluded dates", () => {
const day = newDate();
expect(isQuarterDisabled(day, { excludeDates: [day] })).to.be.true;
});
it("should be enabled if in included dates", () => {
const day = newDate();
expect(isQuarterDisabled(day, { includeDates: [day] })).to.be.false;
});
xit("should be disabled if not in included dates", () => {
const day = newDate();
const includeDates = [addDays(day, 40)];
expect(isQuarterDisabled(day, { includeDates })).to.be.true;
});
it("should be enabled if date filter returns true", () => {
const day = newDate();
const filterDate = d => isEqual(d, day);
expect(isQuarterDisabled(day, { filterDate })).to.be.false;
});
it("should be disabled if date filter returns false", () => {
const day = newDate();
const filterDate = d => !isEqual(d, day);
expect(isQuarterDisabled(day, { filterDate })).to.be.true;
});
it("should not allow date filter to modify input date", () => {
const day = newDate();
const dayClone = newDate(day);
const filterDate = d => {
addDays(d, 40);
return true;
};
isQuarterDisabled(day, { filterDate });
expect(isEqual(day, dayClone)).to.be.true;
});
});
describe("monthDisabledBefore", () => {
it("should return false by default", () => {
expect(monthDisabledBefore(newDate())).to.be.false;
});
it("should return true if min date is in the same month", () => {
const day = newDate("2016-03-19");
const minDate = newDate("2016-03-01");
expect(monthDisabledBefore(day, { minDate })).to.be.true;
});
it("should return false if min date is in the previous month", () => {
const day = newDate("2016-03-19");
const minDate = newDate("2016-02-29");
expect(monthDisabledBefore(day, { minDate })).to.be.false;
});
it("should return true if previous month is before include dates", () => {
const day = newDate("2016-03-19");
const includeDates = [newDate("2016-03-01")];
expect(monthDisabledBefore(day, { includeDates })).to.be.true;
});
});
describe("monthDisabledAfter", () => {
it("should return false by default", () => {
expect(monthDisabledAfter(newDate())).to.be.false;
});
it("should return true if max date is in the same month", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2016-03-31");
expect(monthDisabledAfter(day, { maxDate })).to.be.true;
});
it("should return false if max date is in the next month", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2016-04-01");
expect(monthDisabledAfter(day, { maxDate })).to.be.false;
});
it("should return true if next month is after include dates", () => {
const day = newDate("2016-03-19");
const includeDates = [newDate("2016-03-01")];
expect(monthDisabledAfter(day, { includeDates })).to.be.true;
});
});
describe("yearDisabledBefore", () => {
it("should return false by default", () => {
expect(yearDisabledBefore(newDate())).to.be.false;
});
it("should return true if min date is in the same year", () => {
const day = newDate("2016-02-19");
const minDate = newDate("2016-03-01");
expect(yearDisabledBefore(day, { minDate })).to.be.true;
});
it("should return false if min date is in the previous year", () => {
const day = newDate("2016-03-19");
const minDate = newDate("2015-03-29");
expect(yearDisabledBefore(day, { minDate })).to.be.false;
});
it("should return true if previous year is before include dates", () => {
const day = newDate("2016-03-19");
const includeDates = [newDate("2016-03-01")];
expect(yearDisabledBefore(day, { includeDates })).to.be.true;
});
});
describe("yearDisabledAfter", () => {
it("should return false by default", () => {
expect(yearDisabledAfter(newDate())).to.be.false;
});
it("should return true if max date is in the same year", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2016-08-31");
expect(yearDisabledAfter(day, { maxDate })).to.be.true;
});
it("should return false if max date is in the next year", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2017-04-01");
expect(yearDisabledAfter(day, { maxDate })).to.be.false;
});
it("should return true if next year is after include dates", () => {
const day = newDate("2016-03-19");
const includeDates = [newDate("2016-03-01")];
expect(yearDisabledAfter(day, { includeDates })).to.be.true;
});
});
describe("getEffectiveMinDate", () => {
it("should return null by default", () => {
expect(getEffectiveMinDate({})).to.not.exist;
});
it("should return the min date", () => {
const minDate = newDate("2016-03-30");
const result = getEffectiveMinDate({ minDate });
assert(isEqual(minDate, result));
});
it("should return the minimum include date", () => {
const date1 = newDate("2016-03-30");
const date2 = newDate("2016-04-01");
const includeDates = [date1, date2];
assert(isEqual(getEffectiveMinDate({ includeDates }), date1));
});
it("should return the minimum include date satisfying the min date", () => {
const minDate = newDate("2016-03-31");
const date1 = newDate("2016-03-30");
const date2 = newDate("2016-04-01");
const includeDates = [date1, date2];
assert(isEqual(getEffectiveMinDate({ minDate, includeDates }), date2));
});
});
describe("getEffectiveMaxDate", () => {
it("should return null by default", () => {
expect(getEffectiveMaxDate({})).to.not.exist;
});
it("should return the max date", () => {
const maxDate = newDate("2016-03-30");
assert(isEqual(getEffectiveMaxDate({ maxDate }), maxDate));
});
it("should return the maximum include date", () => {
const date1 = newDate("2016-03-30");
const date2 = newDate("2016-04-01");
const includeDates = [date1, date2];
assert(isEqual(getEffectiveMaxDate({ includeDates }), date2));
});
it("should return the maximum include date satisfying the max date", () => {
const maxDate = newDate("2016-03-31");
const date1 = newDate("2016-03-30");
const date2 = newDate("2016-04-01");
const includeDates = [date1, date2];
assert(isEqual(getEffectiveMaxDate({ maxDate, includeDates }), date1));
});
});
describe("addZero", () => {
it("should return the same number if greater than 10", () => {
const input = 11;
const expected = "11";
const result = addZero(input);
assert(result === expected);
});
it("should return the number prefixed with zero if less than 10", () => {
const input = 1;
const expected = "01";
const result = addZero(input);
assert(result === expected);
});
});
describe("isTimeDisabled", function() {
it("should be enabled by default", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
expect(isTimeDisabled(time)).to.be.false;
});
it("should be disabled if in excluded times", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
expect(isTimeDisabled(time, { excludeTimes: [time] })).to.be.true;
});
it("should be enabled if in included times", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
expect(isTimeDisabled(time, { includeTimes: [time] })).to.be.false;
});
it("should be disabled if not in included times", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
const includeTimes = [addHours(time, 1)];
expect(isTimeDisabled(time, { includeTimes })).to.be.true;
});
it("should be enabled if time filter returns true", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
const filterTime = t => isEqual(t, time);
expect(isTimeDisabled(time, { filterTime })).to.be.false;
});
it("should be disabled if time filter returns false", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
const filterTime = t => !isEqual(t, time);
expect(isTimeDisabled(time, { filterTime })).to.be.true;
});
it("should not allow time filter to modify input time", () => {
const date = newDate();
const time = setHours(setMinutes(date, 30), 1);
const timeClone = newDate(time);
const filterTime = t => {
addHours(t, 1);
return true;
};
isTimeDisabled(time, { filterTime });
expect(isEqual(time, timeClone)).to.be.true;
});
});
describe("isTimeInDisabledRange", () => {
it("should tell if time is in disabled range", () => {
const date = newDate("2016-03-15");
const time = setHours(setMinutes(date, 30), 1);
const minTime = setHours(setMinutes(date, 30), 0);
const maxTime = setHours(setMinutes(date, 30), 5);
expect(isTimeInDisabledRange(time, { minTime, maxTime })).to.be.false;
});
it("should tell if time is not in disabled range", () => {
const date = newDate("2016-03-15");
const time = setHours(setMinutes(date, 30), 0);
const minTime = setHours(setMinutes(date, 30), 1);
const maxTime = setHours(setMinutes(date, 30), 5);
expect(isTimeInDisabledRange(time, { minTime, maxTime })).to.be.true;
});
it("should not throw an exception if max time is before min time", () => {
const date = newDate("2016-03-15");
const time = setHours(setMinutes(date, 30), 10);
const minTime = setHours(setMinutes(date, 30), 5);
const maxTime = setHours(setMinutes(date, 30), 0);
expect(isTimeInDisabledRange(time, { minTime, maxTime })).to.be.false;
});
});
describe("isDayInRange", () => {
it("should tell if day is in range", () => {
const day = newDate("2016-02-15 09:40");
const startDate = newDate("2016-02-01 09:40");
const endDate = newDate("2016-03-15 08:40");
expect(isDayInRange(day, startDate, endDate)).to.be.true;
});
it("should tell if day is in range, max bound test", () => {
const day = newDate("2016-03-15 09:40");
const startDate = newDate("2016-02-01 09:40");
const endDate = newDate("2016-03-15 08:40");
expect(isDayInRange(day, startDate, endDate)).to.be.true;
});
it("should tell if day is in range, min bound test", () => {
const day = newDate("2016-02-01 08:40");
const startDate = newDate("2016-02-01 09:40");
const endDate = newDate("2016-03-15 08:40");
expect(isDayInRange(day, startDate, endDate)).to.be.true;
});
it("should tell if day is not in range", () => {
const day = newDate("2016-07-15 09:40");
const startDate = newDate("2016-02-15 09:40");
const endDate = newDate("2016-03-15 08:40");
expect(isDayInRange(day, startDate, endDate)).to.be.false;
});
it("should not throw exception if end date is before start date", () => {
const day = newDate("2016-02-01 09:40");
const startDate = newDate("2016-02-15 09:40");
const endDate = newDate("2016-01-15 08:40");
expect(isDayInRange(day, startDate, endDate)).to.be.false;
});
});
describe("parseDate", () => {
it("should parse date that matches the format", () => {
const value = "01/15/2019";
const dateFormat = "MM/dd/yyyy";
expect(parseDate(value, dateFormat, null, true)).to.not.be.null;
});
it("should parse date that matches one of the formats", () => {
const value = "01/15/2019";
const dateFormat = ["MM/dd/yyyy", "yyyy-MM-dd"];
expect(parseDate(value, dateFormat, null, true)).to.not.be.null;
});
it("should not parse date that does not match the format", () => {
const value = "01/15/20";
const dateFormat = "MM/dd/yyyy";
expect(parseDate(value, dateFormat, null, true)).to.be.null;
});
it("should not parse date that does not match any of the formats", () => {
const value = "01/15/20";
const dateFormat = ["MM/dd/yyyy", "yyyy-MM-dd"];
expect(parseDate(value, dateFormat, null, true)).to.be.null;
});
it("should parse date without strict parsing", () => {
const value = "01/15/20";
const dateFormat = "MM/dd/yyyy";
expect(parseDate(value, dateFormat, null, false)).to.not.be.null;
});
it("should parse date based on locale", () => {
const value = "26/05/1995";
const dateFormat = "P";
const expected = new Date("05/26/1995");
const actual = parseDate(value, dateFormat, "pt-BR", false);
assert(isEqual(actual, expected));
});
it("should not parse date based on locale without a given locale", () => {
const value = "26/05/1995";
const dateFormat = "P";
const actual = parseDate(value, dateFormat, null, false);
expect(actual).to.be.null;
});
it("should parse date based on default locale", () => {
const value = "26/05/1995";
const dateFormat = "P";
const expected = new Date("05/26/1995");
setDefaultLocale('pt-BR');
const actual = parseDate(value, dateFormat, null, false);
setDefaultLocale(null);
assert(isEqual(actual, expected));
});
});
describe("isMonthinRange", () => {
it("should return true if the month passed is in range", () => {
const day = newDate("2015-02-01");
const startDate = newDate("2015-01-01");
const endDate = newDate("2015-08-01");
expect(isMonthinRange(startDate, endDate, 4, day)).to.be.true;
});
it("should return false if the month passed is not in range", () => {
const day = newDate("2015-02-01");
const startDate = newDate("2015-01-01");
const endDate = newDate("2015-08-01");
expect(isMonthinRange(startDate, endDate, 9, day)).to.be.false;
});
it("should return true if the month passed is in range and maxDate +1 year", () => {
const day = newDate("2019-06-04");
const startDate = newDate("2019-06-04");
const endDate = newDate("2020-02-01");
expect(isMonthinRange(startDate, endDate, 5, day)).to.be.true;
});
});
describe("getStartOfYear", () => {
it("should return the start of the year", () => {
const day = new Date("2020-04-13T00:00:00.000+08:00");
expect(getStartOfYear(day).getDate()).to.be.eq(1);
expect(getStartOfYear(day).getMonth()).to.be.eq(0);
});
});
describe("isQuarterInRange", () => {
it("should return true if the quarter passed is in range", () => {
const day = newDate("2015-02-01");
const startDate = newDate("2015-01-01");
const endDate = newDate("2015-08-01");
expect(isQuarterInRange(startDate, endDate, 2, day)).to.be.true;
});
it("should return false if the quarter passed is not in range", () => {
const day = newDate("2015-02-01");
const startDate = newDate("2015-01-01");
const endDate = newDate("2015-09-01");
expect(isQuarterInRange(startDate, endDate, 4, day)).to.be.false;
});
it("should return true if the quarter passed is in range and maxDate +1 year", () => {
const day = newDate("2019-06-04");
const startDate = newDate("2019-06-04");
const endDate = newDate("2020-02-01");
expect(isQuarterInRange(startDate, endDate, 5, day)).to.be.true;
});
});
describe("getYearsPeriod", () => {
it("should get start and end of default 11 years period", () => {
const date = newDate("2000-01-01");
const { startPeriod, endPeriod } = getYearsPeriod(date);
expect(startPeriod).to.be.eq(1993);
expect(endPeriod).to.be.eq(2004);
});
it("should get start and end of custom 8 years period", () => {
const date = newDate("2000-01-01");
const { startPeriod, endPeriod } = getYearsPeriod(date, 9);
expect(startPeriod).to.be.eq(1999);
expect(endPeriod).to.be.eq(2007);
});
});
describe("yearsDisabledAfter", () => {
it("should return false by default", () => {
expect(yearsDisabledAfter(newDate())).to.be.false;
});
it("should return true if max date is in the same year", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2016-08-31");
expect(yearsDisabledAfter(day, { maxDate })).to.be.true;
});
it("should return false if max date is in the next period years", () => {
const day = newDate("2016-03-19");
const maxDate = newDate("2018-04-01");
expect(yearsDisabledAfter(day, { maxDate })).to.be.false;
});
it("should return false if max date is in a next period year", () => {
const day = newDate("1996-08-08 00:00:00");
const maxDate = newDate("2020-08-08 00:00:00");
expect(yearsDisabledAfter(day, { maxDate })).to.be.false;
});
});
describe("yearsDisabledBefore", () => {
it("should return false by default", () => {
expect(yearsDisabledBefore(newDate())).to.be.false;
});
it("should return true if min date is in the same year", () => {
const day = newDate("2016-02-19");
const minDate = newDate("2016-03-01");
expect(yearsDisabledBefore(day, { minDate })).to.be.true;
});
it("should return false if min date is in the previous period year", () => {
const day = newDate("2016-03-19");
const minDate = newDate("2004-03-29");
expect(yearsDisabledBefore(day, { minDate })).to.be.false;
});
it("should return false if min date is in a previous period year", () => {
const day = newDate("2044-08-08 00:00:00");
const minDate = newDate("2020-08-08 00:00:00");
expect(yearsDisabledBefore(day, { minDate })).to.be.false;
});
});
describe("week", () => {
it("should return the first 2021 year week", () => {
const first2021Day = new Date("2021-01-01");
assert(getWeek(first2021Day) === 53);
});
it("should return the 4 2021 year week", () => {
const date = new Date("2021-01-18");
assert(getWeek(date) === 3);
});
it("should return the first 2022 year week", () => {
const first2022Day = new Date("2022-01-01");
assert(getWeek(first2022Day) === 52);
});
});
});
| 1 | 7,987 | the test was failing likely due to locale issue. had to increment the date by 1 to make it pass | Hacker0x01-react-datepicker | js |
@@ -935,15 +935,8 @@ bool Levitate(const InstantSpell*, Creature* creature, const std::string& param)
bool InstantSpell::loadFunction(const pugi::xml_attribute& attr)
{
const char* functionName = attr.as_string();
- if (strcasecmp(functionName, "levitate") == 0) {
- function = Levitate;
- } else {
- std::cout << "[Warning - InstantSpell::loadFunction] Function \"" << functionName << "\" does not exist." << std::endl;
- return false;
- }
-
- scripted = false;
- return true;
+ std::cout << "[Warning - InstantSpell::loadFunction] Function \"" << functionName << "\" does not exist." << std::endl;
+ return false;
}
bool InstantSpell::playerCastInstant(Player* player, std::string& param) | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "combat.h"
#include "configmanager.h"
#include "game.h"
#include "monster.h"
#include "pugicast.h"
#include "spells.h"
extern Game g_game;
extern Spells* g_spells;
extern Monsters g_monsters;
extern Vocations g_vocations;
extern ConfigManager g_config;
extern LuaEnvironment g_luaEnvironment;
Spells::Spells()
{
scriptInterface.initState();
}
Spells::~Spells()
{
clear();
}
TalkActionResult_t Spells::playerSaySpell(Player* player, std::string& words)
{
std::string str_words = words;
//strip trailing spaces
trimString(str_words);
InstantSpell* instantSpell = getInstantSpell(str_words);
if (!instantSpell) {
return TALKACTION_CONTINUE;
}
std::string param;
if (instantSpell->getHasParam()) {
size_t spellLen = instantSpell->getWords().length();
size_t paramLen = str_words.length() - spellLen;
std::string paramText = str_words.substr(spellLen, paramLen);
if (!paramText.empty() && paramText.front() == ' ') {
size_t loc1 = paramText.find('"', 1);
if (loc1 != std::string::npos) {
size_t loc2 = paramText.find('"', loc1 + 1);
if (loc2 == std::string::npos) {
loc2 = paramText.length();
} else if (paramText.find_last_not_of(' ') != loc2) {
return TALKACTION_CONTINUE;
}
param = paramText.substr(loc1 + 1, loc2 - loc1 - 1);
} else {
trimString(paramText);
loc1 = paramText.find(' ', 0);
if (loc1 == std::string::npos) {
param = paramText;
} else {
return TALKACTION_CONTINUE;
}
}
}
}
if (instantSpell->playerCastInstant(player, param)) {
words = instantSpell->getWords();
if (instantSpell->getHasParam() && !param.empty()) {
words += " \"" + param + "\"";
}
return TALKACTION_BREAK;
}
return TALKACTION_FAILED;
}
void Spells::clear()
{
for (const auto& it : runes) {
delete it.second;
}
runes.clear();
for (const auto& it : instants) {
delete it.second;
}
instants.clear();
scriptInterface.reInitState();
}
LuaScriptInterface& Spells::getScriptInterface()
{
return scriptInterface;
}
std::string Spells::getScriptBaseName() const
{
return "spells";
}
Event* Spells::getEvent(const std::string& nodeName)
{
if (strcasecmp(nodeName.c_str(), "rune") == 0) {
return new RuneSpell(&scriptInterface);
} else if (strcasecmp(nodeName.c_str(), "instant") == 0) {
return new InstantSpell(&scriptInterface);
} else if (strcasecmp(nodeName.c_str(), "conjure") == 0) {
return new ConjureSpell(&scriptInterface);
}
return nullptr;
}
bool Spells::registerEvent(Event* event, const pugi::xml_node&)
{
InstantSpell* instant = dynamic_cast<InstantSpell*>(event);
if (instant) {
auto result = instants.emplace(instant->getWords(), instant);
if (!result.second) {
std::cout << "[Warning - Spells::registerEvent] Duplicate registered instant spell with words: " << instant->getWords() << std::endl;
}
return result.second;
}
RuneSpell* rune = dynamic_cast<RuneSpell*>(event);
if (rune) {
auto result = runes.emplace(rune->getRuneItemId(), rune);
if (!result.second) {
std::cout << "[Warning - Spells::registerEvent] Duplicate registered rune with id: " << rune->getRuneItemId() << std::endl;
}
return result.second;
}
return false;
}
Spell* Spells::getSpellByName(const std::string& name)
{
Spell* spell = getRuneSpellByName(name);
if (!spell) {
spell = getInstantSpellByName(name);
}
return spell;
}
RuneSpell* Spells::getRuneSpell(uint32_t id)
{
auto it = runes.find(id);
if (it == runes.end()) {
return nullptr;
}
return it->second;
}
RuneSpell* Spells::getRuneSpellByName(const std::string& name)
{
for (const auto& it : runes) {
if (strcasecmp(it.second->getName().c_str(), name.c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
InstantSpell* Spells::getInstantSpell(const std::string& words)
{
InstantSpell* result = nullptr;
for (const auto& it : instants) {
InstantSpell* instantSpell = it.second;
const std::string& instantSpellWords = instantSpell->getWords();
size_t spellLen = instantSpellWords.length();
if (strncasecmp(instantSpellWords.c_str(), words.c_str(), spellLen) == 0) {
if (!result || spellLen > result->getWords().length()) {
result = instantSpell;
if (words.length() == spellLen) {
break;
}
}
}
}
if (result) {
const std::string& resultWords = result->getWords();
if (words.length() > resultWords.length()) {
if (!result->getHasParam()) {
return nullptr;
}
size_t spellLen = resultWords.length();
size_t paramLen = words.length() - spellLen;
if (paramLen < 2 || words[spellLen] != ' ') {
return nullptr;
}
}
return result;
}
return nullptr;
}
uint32_t Spells::getInstantSpellCount(const Player* player) const
{
uint32_t count = 0;
for (const auto& it : instants) {
InstantSpell* instantSpell = it.second;
if (instantSpell->canCast(player)) {
++count;
}
}
return count;
}
InstantSpell* Spells::getInstantSpellById(uint32_t spellId)
{
auto it = std::next(instants.begin(), std::min<uint32_t>(spellId, instants.size()));
if (it != instants.end()) {
return it->second;
}
return nullptr;
}
InstantSpell* Spells::getInstantSpellByName(const std::string& name)
{
for (const auto& it : instants) {
if (strcasecmp(it.second->getName().c_str(), name.c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Position Spells::getCasterPosition(Creature* creature, Direction dir)
{
return getNextPosition(dir, creature->getPosition());
}
CombatSpell::CombatSpell(Combat* combat, bool needTarget, bool needDirection) :
Event(&g_spells->getScriptInterface()),
combat(combat),
needDirection(needDirection),
needTarget(needTarget)
{}
CombatSpell::~CombatSpell()
{
if (!scripted) {
delete combat;
}
}
bool CombatSpell::loadScriptCombat()
{
combat = g_luaEnvironment.getCombatObject(g_luaEnvironment.lastCombatId);
return combat != nullptr;
}
bool CombatSpell::castSpell(Creature* creature)
{
if (scripted) {
LuaVariant var;
var.type = VARIANT_POSITION;
if (needDirection) {
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.pos = creature->getPosition();
}
return executeCastSpell(creature, var);
}
Position pos;
if (needDirection) {
pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
pos = creature->getPosition();
}
combat->doCombat(creature, pos);
return true;
}
bool CombatSpell::castSpell(Creature* creature, Creature* target)
{
if (scripted) {
LuaVariant var;
if (combat->hasArea()) {
var.type = VARIANT_POSITION;
if (needTarget) {
var.pos = target->getPosition();
} else if (needDirection) {
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.pos = creature->getPosition();
}
} else {
var.type = VARIANT_NUMBER;
var.number = target->getID();
}
return executeCastSpell(creature, var);
}
if (combat->hasArea()) {
if (needTarget) {
combat->doCombat(creature, target->getPosition());
} else {
return castSpell(creature);
}
} else {
combat->doCombat(creature, target);
}
return true;
}
bool CombatSpell::executeCastSpell(Creature* creature, const LuaVariant& var)
{
//onCastSpell(creature, var)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - CombatSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
return scriptInterface->callFunction(2);
}
bool Spell::configureSpell(const pugi::xml_node& node)
{
pugi::xml_attribute nameAttribute = node.attribute("name");
if (!nameAttribute) {
std::cout << "[Error - Spell::configureSpell] Spell without name" << std::endl;
return false;
}
name = nameAttribute.as_string();
static const char* reservedList[] = {
"melee",
"physical",
"poison",
"fire",
"energy",
"drown",
"lifedrain",
"manadrain",
"healing",
"speed",
"outfit",
"invisible",
"drunk",
"firefield",
"poisonfield",
"energyfield",
"firecondition",
"poisoncondition",
"energycondition",
"drowncondition",
"freezecondition",
"cursecondition",
"dazzlecondition"
};
//static size_t size = sizeof(reservedList) / sizeof(const char*);
//for (size_t i = 0; i < size; ++i) {
for (const char* reserved : reservedList) {
if (strcasecmp(reserved, name.c_str()) == 0) {
std::cout << "[Error - Spell::configureSpell] Spell is using a reserved name: " << reserved << std::endl;
return false;
}
}
pugi::xml_attribute attr;
if ((attr = node.attribute("spellid"))) {
spellId = pugi::cast<uint16_t>(attr.value());
}
if ((attr = node.attribute("group"))) {
std::string tmpStr = asLowerCaseString(attr.as_string());
if (tmpStr == "none" || tmpStr == "0") {
group = SPELLGROUP_NONE;
} else if (tmpStr == "attack" || tmpStr == "1") {
group = SPELLGROUP_ATTACK;
} else if (tmpStr == "healing" || tmpStr == "2") {
group = SPELLGROUP_HEALING;
} else if (tmpStr == "support" || tmpStr == "3") {
group = SPELLGROUP_SUPPORT;
} else if (tmpStr == "special" || tmpStr == "4") {
group = SPELLGROUP_SPECIAL;
} else {
std::cout << "[Warning - Spell::configureSpell] Unknown group: " << attr.as_string() << std::endl;
}
}
if ((attr = node.attribute("groupcooldown"))) {
groupCooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("secondarygroup"))) {
std::string tmpStr = asLowerCaseString(attr.as_string());
if (tmpStr == "none" || tmpStr == "0") {
secondaryGroup = SPELLGROUP_NONE;
} else if (tmpStr == "attack" || tmpStr == "1") {
secondaryGroup = SPELLGROUP_ATTACK;
} else if (tmpStr == "healing" || tmpStr == "2") {
secondaryGroup = SPELLGROUP_HEALING;
} else if (tmpStr == "support" || tmpStr == "3") {
secondaryGroup = SPELLGROUP_SUPPORT;
} else if (tmpStr == "special" || tmpStr == "4") {
secondaryGroup = SPELLGROUP_SPECIAL;
} else {
std::cout << "[Warning - Spell::configureSpell] Unknown secondarygroup: " << attr.as_string() << std::endl;
}
}
if ((attr = node.attribute("secondarygroupcooldown"))) {
secondaryGroupCooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("lvl"))) {
level = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("maglv"))) {
magLevel = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("mana"))) {
mana = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("manapercent"))) {
manaPercent = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("soul"))) {
soul = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("range"))) {
range = pugi::cast<int32_t>(attr.value());
}
if ((attr = node.attribute("exhaustion")) || (attr = node.attribute("cooldown"))) {
cooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("prem"))) {
premium = attr.as_bool();
}
if ((attr = node.attribute("enabled"))) {
enabled = attr.as_bool();
}
if ((attr = node.attribute("needtarget"))) {
needTarget = attr.as_bool();
}
if ((attr = node.attribute("needweapon"))) {
needWeapon = attr.as_bool();
}
if ((attr = node.attribute("selftarget"))) {
selfTarget = attr.as_bool();
}
if ((attr = node.attribute("needlearn"))) {
learnable = attr.as_bool();
}
if ((attr = node.attribute("blocking"))) {
blockingSolid = attr.as_bool();
blockingCreature = blockingSolid;
}
if ((attr = node.attribute("blocktype"))) {
std::string tmpStrValue = asLowerCaseString(attr.as_string());
if (tmpStrValue == "all") {
blockingSolid = true;
blockingCreature = true;
} else if (tmpStrValue == "solid") {
blockingSolid = true;
} else if (tmpStrValue == "creature") {
blockingCreature = true;
} else {
std::cout << "[Warning - Spell::configureSpell] Blocktype \"" << attr.as_string() << "\" does not exist." << std::endl;
}
}
if ((attr = node.attribute("aggressive"))) {
aggressive = booleanString(attr.as_string());
}
if (group == SPELLGROUP_NONE) {
group = (aggressive ? SPELLGROUP_ATTACK : SPELLGROUP_HEALING);
}
for (auto vocationNode : node.children()) {
if (!(attr = vocationNode.attribute("name"))) {
continue;
}
int32_t vocationId = g_vocations.getVocationId(attr.as_string());
if (vocationId != -1) {
attr = vocationNode.attribute("showInDescription");
vocSpellMap[vocationId] = !attr || attr.as_bool();
} else {
std::cout << "[Warning - Spell::configureSpell] Wrong vocation name: " << attr.as_string() << std::endl;
}
}
return true;
}
bool Spell::playerSpellCheck(Player* player) const
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
if (!enabled) {
return false;
}
if (aggressive && !player->hasFlag(PlayerFlag_IgnoreProtectionZone) && player->getZone() == ZONE_PROTECTION) {
player->sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE);
return false;
}
if (player->hasCondition(CONDITION_SPELLGROUPCOOLDOWN, group) || player->hasCondition(CONDITION_SPELLCOOLDOWN, spellId) || (secondaryGroup != SPELLGROUP_NONE && player->hasCondition(CONDITION_SPELLGROUPCOOLDOWN, secondaryGroup))) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
if (isInstant()) {
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
}
return false;
}
if (player->getLevel() < level) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHLEVEL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getMagicLevel() < magLevel) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHMAGICLEVEL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getMana() < getManaCost(player) && !player->hasFlag(PlayerFlag_HasInfiniteMana)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHMANA);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getSoul() < soul && !player->hasFlag(PlayerFlag_HasInfiniteSoul)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHSOUL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (isInstant() && isLearnable()) {
if (!player->hasLearnedInstantSpell(getName())) {
player->sendCancelMessage(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
} else if (!vocSpellMap.empty() && vocSpellMap.find(player->getVocationId()) == vocSpellMap.end()) {
player->sendCancelMessage(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (needWeapon) {
switch (player->getWeaponType()) {
case WEAPON_SWORD:
case WEAPON_CLUB:
case WEAPON_AXE:
break;
default: {
player->sendCancelMessage(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
}
}
if (isPremium() && !player->isPremium()) {
player->sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
return true;
}
bool Spell::playerInstantSpellCheck(Player* player, const Position& toPos)
{
if (toPos.x == 0xFFFF) {
return true;
}
const Position& playerPos = player->getPosition();
if (playerPos.z > toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGOUPSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (playerPos.z < toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGODOWNSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Tile* tile = g_game.map.getTile(toPos);
if (!tile) {
tile = new StaticTile(toPos.x, toPos.y, toPos.z);
g_game.map.setTile(toPos, tile);
}
ReturnValue ret = Combat::canDoCombat(player, tile, aggressive);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (blockingCreature && tile->getBottomVisibleCreature(player) != nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (blockingSolid && tile->hasFlag(TILESTATE_BLOCKSOLID)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
return true;
}
bool Spell::playerRuneSpellCheck(Player* player, const Position& toPos)
{
if (!playerSpellCheck(player)) {
return false;
}
if (toPos.x == 0xFFFF) {
return true;
}
const Position& playerPos = player->getPosition();
if (playerPos.z > toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGOUPSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (playerPos.z < toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGODOWNSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Tile* tile = g_game.map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (range != -1 && !g_game.canThrowObjectTo(playerPos, toPos, true, range, range)) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
ReturnValue ret = Combat::canDoCombat(player, tile, aggressive);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
const Creature* topVisibleCreature = tile->getBottomVisibleCreature(player);
if (blockingCreature && topVisibleCreature) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (blockingSolid && tile->hasFlag(TILESTATE_BLOCKSOLID)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (needTarget && !topVisibleCreature) {
player->sendCancelMessage(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (aggressive && needTarget && topVisibleCreature && player->hasSecureMode()) {
const Player* targetPlayer = topVisibleCreature->getPlayer();
if (targetPlayer && targetPlayer != player && player->getSkullClient(targetPlayer) == SKULL_NONE && !Combat::isInPvpZone(player, targetPlayer)) {
player->sendCancelMessage(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
}
return true;
}
void Spell::postCastSpell(Player* player, bool finishedCast /*= true*/, bool payCost /*= true*/) const
{
if (finishedCast) {
if (!player->hasFlag(PlayerFlag_HasNoExhaustion)) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
}
if (aggressive) {
player->addInFightTicks();
}
}
if (payCost) {
Spell::postCastSpell(player, getManaCost(player), getSoulCost());
}
}
void Spell::postCastSpell(Player* player, uint32_t manaCost, uint32_t soulCost)
{
if (manaCost > 0) {
player->addManaSpent(manaCost);
player->changeMana(-static_cast<int32_t>(manaCost));
}
if (!player->hasFlag(PlayerFlag_HasInfiniteSoul)) {
if (soulCost > 0) {
player->changeSoul(-static_cast<int32_t>(soulCost));
}
}
}
uint32_t Spell::getManaCost(const Player* player) const
{
if (mana != 0) {
return mana;
}
if (manaPercent != 0) {
uint32_t maxMana = player->getMaxMana();
uint32_t manaCost = (maxMana * manaPercent) / 100;
return manaCost;
}
return 0;
}
ReturnValue Spell::CreateIllusion(Creature* creature, const Outfit_t& outfit, int32_t time)
{
ConditionOutfit* outfitCondition = new ConditionOutfit(CONDITIONID_COMBAT, CONDITION_OUTFIT, time);
outfitCondition->setOutfit(outfit);
creature->addCondition(outfitCondition);
return RETURNVALUE_NOERROR;
}
ReturnValue Spell::CreateIllusion(Creature* creature, const std::string& name, int32_t time)
{
const auto mType = g_monsters.getMonsterType(name);
if (mType == nullptr) {
return RETURNVALUE_CREATUREDOESNOTEXIST;
}
Player* player = creature->getPlayer();
if (player && !player->hasFlag(PlayerFlag_CanIllusionAll)) {
if (!mType->info.isIllusionable) {
return RETURNVALUE_NOTPOSSIBLE;
}
}
return CreateIllusion(creature, mType->info.outfit, time);
}
ReturnValue Spell::CreateIllusion(Creature* creature, uint32_t itemId, int32_t time)
{
const ItemType& it = Item::items[itemId];
if (it.id == 0) {
return RETURNVALUE_NOTPOSSIBLE;
}
Outfit_t outfit;
outfit.lookTypeEx = itemId;
return CreateIllusion(creature, outfit, time);
}
std::string InstantSpell::getScriptEventName() const
{
return "onCastSpell";
}
bool InstantSpell::configureEvent(const pugi::xml_node& node)
{
if (!Spell::configureSpell(node)) {
return false;
}
if (!TalkAction::configureEvent(node)) {
return false;
}
pugi::xml_attribute attr;
if ((attr = node.attribute("params"))) {
hasParam = attr.as_bool();
}
if ((attr = node.attribute("playernameparam"))) {
hasPlayerNameParam = attr.as_bool();
}
if ((attr = node.attribute("direction"))) {
needDirection = attr.as_bool();
} else if ((attr = node.attribute("casterTargetOrDirection"))) {
casterTargetOrDirection = attr.as_bool();
}
if ((attr = node.attribute("blockwalls"))) {
checkLineOfSight = attr.as_bool();
}
return true;
}
namespace {
bool Levitate(const InstantSpell*, Creature* creature, const std::string& param)
{
Player* player = creature->getPlayer();
if (!player) {
return false;
}
const Position& currentPos = creature->getPosition();
const Position& destPos = Spells::getCasterPosition(creature, creature->getDirection());
ReturnValue ret = RETURNVALUE_NOTPOSSIBLE;
if (strcasecmp(param.c_str(), "up") == 0) {
if (currentPos.z != 8) {
Tile* tmpTile = g_game.map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_IMMOVABLEBLOCKSOLID))) {
tmpTile = g_game.map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_IMMOVABLEBLOCKSOLID | TILESTATE_FLOORCHANGE)) {
ret = g_game.internalMoveCreature(*player, *tmpTile, FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE);
}
}
}
} else if (strcasecmp(param.c_str(), "down") == 0) {
if (currentPos.z != 7) {
Tile* tmpTile = g_game.map.getTile(destPos);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = g_game.map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_IMMOVABLEBLOCKSOLID | TILESTATE_FLOORCHANGE)) {
ret = g_game.internalMoveCreature(*player, *tmpTile, FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE);
}
}
}
}
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
g_game.addMagicEffect(player->getPosition(), CONST_ME_TELEPORT);
return true;
}
}
bool InstantSpell::loadFunction(const pugi::xml_attribute& attr)
{
const char* functionName = attr.as_string();
if (strcasecmp(functionName, "levitate") == 0) {
function = Levitate;
} else {
std::cout << "[Warning - InstantSpell::loadFunction] Function \"" << functionName << "\" does not exist." << std::endl;
return false;
}
scripted = false;
return true;
}
bool InstantSpell::playerCastInstant(Player* player, std::string& param)
{
if (!playerSpellCheck(player)) {
return false;
}
LuaVariant var;
if (selfTarget) {
var.type = VARIANT_NUMBER;
var.number = player->getID();
} else if (needTarget || casterTargetOrDirection) {
Creature* target = nullptr;
bool useDirection = false;
if (hasParam) {
Player* playerTarget = nullptr;
ReturnValue ret = g_game.getPlayerByNameWildcard(param, playerTarget);
if (playerTarget && playerTarget->isAccessPlayer() && !player->isAccessPlayer()) {
playerTarget = nullptr;
}
target = playerTarget;
if (!target || target->getHealth() <= 0) {
if (!casterTargetOrDirection) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
useDirection = true;
}
if (playerTarget) {
param = playerTarget->getName();
}
} else {
target = player->getAttackedCreature();
if (!target || target->getHealth() <= 0) {
if (!casterTargetOrDirection) {
player->sendCancelMessage(RETURNVALUE_YOUCANONLYUSEITONCREATURES);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
useDirection = true;
}
}
if (!useDirection) {
if (!canThrowSpell(player, target)) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
var.type = VARIANT_NUMBER;
var.number = target->getID();
} else {
var.type = VARIANT_POSITION;
var.pos = Spells::getCasterPosition(player, player->getDirection());
if (!playerInstantSpellCheck(player, var.pos)) {
return false;
}
}
} else if (hasParam) {
var.type = VARIANT_STRING;
if (getHasPlayerNameParam()) {
Player* playerTarget = nullptr;
ReturnValue ret = g_game.getPlayerByNameWildcard(param, playerTarget);
if (ret != RETURNVALUE_NOERROR) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (playerTarget && (!playerTarget->isAccessPlayer() || player->isAccessPlayer())) {
param = playerTarget->getName();
}
}
var.text = param;
} else {
var.type = VARIANT_POSITION;
if (needDirection) {
var.pos = Spells::getCasterPosition(player, player->getDirection());
} else {
var.pos = player->getPosition();
}
if (!playerInstantSpellCheck(player, var.pos)) {
return false;
}
}
bool result = internalCastSpell(player, var);
if (result) {
postCastSpell(player);
}
return result;
}
bool InstantSpell::canThrowSpell(const Creature* creature, const Creature* target) const
{
const Position& fromPos = creature->getPosition();
const Position& toPos = target->getPosition();
if (fromPos.z != toPos.z ||
(range == -1 && !g_game.canThrowObjectTo(fromPos, toPos, checkLineOfSight)) ||
(range != -1 && !g_game.canThrowObjectTo(fromPos, toPos, checkLineOfSight, range, range))) {
return false;
}
return true;
}
bool InstantSpell::castSpell(Creature* creature)
{
LuaVariant var;
if (casterTargetOrDirection) {
Creature* target = creature->getAttackedCreature();
if (target && target->getHealth() > 0) {
if (!canThrowSpell(creature, target)) {
return false;
}
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var);
}
return false;
} else if (needDirection) {
var.type = VARIANT_POSITION;
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.type = VARIANT_POSITION;
var.pos = creature->getPosition();
}
return internalCastSpell(creature, var);
}
bool InstantSpell::castSpell(Creature* creature, Creature* target)
{
if (needTarget) {
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var);
} else {
return castSpell(creature);
}
}
bool InstantSpell::internalCastSpell(Creature* creature, const LuaVariant& var)
{
if (scripted) {
return executeCastSpell(creature, var);
} else if (function) {
return function(this, creature, var.text);
}
return false;
}
bool InstantSpell::executeCastSpell(Creature* creature, const LuaVariant& var)
{
//onCastSpell(creature, var)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - InstantSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
return scriptInterface->callFunction(2);
}
bool InstantSpell::canCast(const Player* player) const
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
if (isLearnable()) {
if (player->hasLearnedInstantSpell(getName())) {
return true;
}
} else {
if (vocSpellMap.empty() || vocSpellMap.find(player->getVocationId()) != vocSpellMap.end()) {
return true;
}
}
return false;
}
std::string ConjureSpell::getScriptEventName() const
{
return "onCastSpell";
}
bool ConjureSpell::configureEvent(const pugi::xml_node& node)
{
if (!InstantSpell::configureEvent(node)) {
return false;
}
pugi::xml_attribute attr;
if ((attr = node.attribute("conjureId"))) {
conjureId = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("conjureCount"))) {
conjureCount = pugi::cast<uint32_t>(attr.value());
} else if (conjureId != 0) {
// load default charges from items.xml
const ItemType& it = Item::items[conjureId];
if (it.charges != 0) {
conjureCount = it.charges;
}
}
if ((attr = node.attribute("reagentId"))) {
reagentId = pugi::cast<uint32_t>(attr.value());
}
return true;
}
bool ConjureSpell::loadFunction(const pugi::xml_attribute&)
{
scripted = false;
return true;
}
bool ConjureSpell::conjureItem(Creature* creature) const
{
Player* player = creature->getPlayer();
if (!player) {
return false;
}
if (reagentId != 0 && !player->removeItemOfType(reagentId, 1, -1)) {
player->sendCancelMessage(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Item* newItem = Item::CreateItem(conjureId, conjureCount);
if (!newItem) {
return false;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, newItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
delete newItem;
return false;
}
g_game.startDecay(newItem);
postCastSpell(player);
g_game.addMagicEffect(player->getPosition(), CONST_ME_MAGIC_RED);
return true;
}
bool ConjureSpell::playerCastInstant(Player* player, std::string& param)
{
if (!playerSpellCheck(player)) {
return false;
}
if (scripted) {
LuaVariant var;
var.type = VARIANT_STRING;
var.text = param;
return executeCastSpell(player, var);
}
return conjureItem(player);
}
std::string RuneSpell::getScriptEventName() const
{
return "onCastSpell";
}
bool RuneSpell::configureEvent(const pugi::xml_node& node)
{
if (!Spell::configureSpell(node)) {
return false;
}
if (!Action::configureEvent(node)) {
return false;
}
pugi::xml_attribute attr;
if (!(attr = node.attribute("id"))) {
std::cout << "[Error - RuneSpell::configureSpell] Rune spell without id." << std::endl;
return false;
}
runeId = pugi::cast<uint16_t>(attr.value());
uint32_t charges;
if ((attr = node.attribute("charges"))) {
charges = pugi::cast<uint32_t>(attr.value());
} else {
charges = 0;
}
hasCharges = (charges > 0);
if (magLevel != 0 || level != 0) {
//Change information in the ItemType to get accurate description
ItemType& iType = Item::items.getItemType(runeId);
iType.runeMagLevel = magLevel;
iType.runeLevel = level;
iType.charges = charges;
}
return true;
}
namespace {
bool RuneIllusion(const RuneSpell*, Player* player, const Position& posTo)
{
Thing* thing = g_game.internalGetThing(player, posTo, 0, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Item* illusionItem = thing->getItem();
if (!illusionItem || !illusionItem->isMoveable()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
ReturnValue ret = Spell::CreateIllusion(player, illusionItem->getID(), 200000);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
g_game.addMagicEffect(player->getPosition(), CONST_ME_MAGIC_RED);
return true;
}
bool Convince(const RuneSpell* spell, Player* player, const Position& posTo)
{
if (!player->hasFlag(PlayerFlag_CanConvinceAll)) {
if (player->getSummonCount() >= 2) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
}
Thing* thing = g_game.internalGetThing(player, posTo, 0, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Creature* convinceCreature = thing->getCreature();
if (!convinceCreature) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
uint32_t manaCost = 0;
if (convinceCreature->getMonster()) {
manaCost = convinceCreature->getMonster()->getManaCost();
}
if (!player->hasFlag(PlayerFlag_HasInfiniteMana) && player->getMana() < manaCost) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHMANA);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (!convinceCreature->convinceCreature(player)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Spell::postCastSpell(player, manaCost, spell->getSoulCost());
g_game.updateCreatureType(convinceCreature);
g_game.addMagicEffect(player->getPosition(), CONST_ME_MAGIC_RED);
return true;
}
}
bool RuneSpell::loadFunction(const pugi::xml_attribute& attr)
{
const char* functionName = attr.as_string();
if (strcasecmp(functionName, "chameleon") == 0) {
runeFunction = RuneIllusion;
} else if (strcasecmp(functionName, "convince") == 0) {
runeFunction = Convince;
} else {
std::cout << "[Warning - RuneSpell::loadFunction] Function \"" << functionName << "\" does not exist." << std::endl;
return false;
}
scripted = false;
return true;
}
ReturnValue RuneSpell::canExecuteAction(const Player* player, const Position& toPos)
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return RETURNVALUE_CANNOTUSETHISOBJECT;
}
ReturnValue ret = Action::canExecuteAction(player, toPos);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (toPos.x == 0xFFFF) {
if (needTarget) {
return RETURNVALUE_CANONLYUSETHISRUNEONCREATURES;
} else if (!selfTarget) {
return RETURNVALUE_NOTENOUGHROOM;
}
}
return RETURNVALUE_NOERROR;
}
bool RuneSpell::executeUse(Player* player, Item* item, const Position&, Thing* target, const Position& toPosition, bool isHotkey)
{
if (!playerRuneSpellCheck(player, toPosition)) {
return false;
}
bool result = false;
if (scripted) {
LuaVariant var;
if (needTarget) {
var.type = VARIANT_NUMBER;
if (target == nullptr) {
Tile* toTile = g_game.map.getTile(toPosition);
if (toTile) {
const Creature* visibleCreature = toTile->getBottomVisibleCreature(player);
if (visibleCreature) {
var.number = visibleCreature->getID();
}
}
} else {
var.number = target->getCreature()->getID();
}
} else {
var.type = VARIANT_POSITION;
var.pos = toPosition;
}
result = internalCastSpell(player, var, isHotkey);
} else if (runeFunction) {
result = runeFunction(this, player, toPosition);
}
if (!result) {
return false;
}
postCastSpell(player);
if (hasCharges && item && g_config.getBoolean(ConfigManager::REMOVE_RUNE_CHARGES)) {
int32_t newCount = std::max<int32_t>(0, item->getItemCount() - 1);
g_game.transformItem(item, item->getID(), newCount);
}
return true;
}
bool RuneSpell::castSpell(Creature* creature)
{
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = creature->getID();
return internalCastSpell(creature, var, false);
}
bool RuneSpell::castSpell(Creature* creature, Creature* target)
{
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var, false);
}
bool RuneSpell::internalCastSpell(Creature* creature, const LuaVariant& var, bool isHotkey)
{
bool result;
if (scripted) {
result = executeCastSpell(creature, var, isHotkey);
} else {
result = false;
}
return result;
}
bool RuneSpell::executeCastSpell(Creature* creature, const LuaVariant& var, bool isHotkey)
{
//onCastSpell(creature, var, isHotkey)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - RuneSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
LuaScriptInterface::pushBoolean(L, isHotkey);
return scriptInterface->callFunction(3);
}
| 1 | 14,671 | The changes below here can also be undone. | otland-forgottenserver | cpp |
@@ -1,13 +1,14 @@
package channelq
import (
+ "context"
"fmt"
"strings"
"sync"
"k8s.io/klog"
- "github.com/kubeedge/beehive/pkg/core/context"
+ beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
)
| 1 | package channelq
import (
"fmt"
"strings"
"sync"
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
)
// Read channel buffer size
const (
rChanBufSize = 10
)
// EventSet holds a set of events
type EventSet interface {
Ack() error
Get() (*model.Event, error)
}
// ChannelEventSet is the channel implementation of EventSet
type ChannelEventSet struct {
current model.Event
messages <-chan model.Event
}
// NewChannelEventSet initializes a new ChannelEventSet instance
func NewChannelEventSet(messages <-chan model.Event) *ChannelEventSet {
return &ChannelEventSet{messages: messages}
}
// Ack acknowledges once the event is processed
func (s *ChannelEventSet) Ack() error {
return nil
}
// Get obtains one event from the queue
func (s *ChannelEventSet) Get() (*model.Event, error) {
var ok bool
s.current, ok = <-s.messages
if !ok {
return nil, fmt.Errorf("failed to get message from cluster, reason: channel is closed")
}
return &s.current, nil
}
// ChannelEventQueue is the channel implementation of EventQueue
type ChannelEventQueue struct {
ctx *context.Context
channelPool sync.Map
}
// NewChannelEventQueue initializes a new ChannelEventQueue
func NewChannelEventQueue(ctx *context.Context) *ChannelEventQueue {
q := ChannelEventQueue{ctx: ctx}
return &q
}
// DispatchMessage gets the message from the cloud, extracts the
// node id from it, gets the channel associated with the node
// and pushes the event on the channel
func (q *ChannelEventQueue) DispatchMessage() {
for {
msg, err := q.ctx.Receive(model.SrcCloudHub)
if err != nil {
klog.Info("receive not Message format message")
continue
}
resource := msg.Router.Resource
tokens := strings.Split(resource, "/")
numOfTokens := len(tokens)
var nodeID string
for i, token := range tokens {
if token == model.ResNode && i+1 < numOfTokens {
nodeID = tokens[i+1]
break
}
}
if nodeID == "" {
klog.Warning("node id is not found in the message")
continue
}
rChannel, err := q.getRChannel(nodeID)
if err != nil {
klog.Infof("fail to get dispatch channel for %s", nodeID)
continue
}
rChannel <- model.MessageToEvent(&msg)
}
}
func (q *ChannelEventQueue) getRChannel(nodeID string) (chan model.Event, error) {
channels, ok := q.channelPool.Load(nodeID)
if !ok {
klog.Errorf("rChannel for edge node %s is removed", nodeID)
return nil, fmt.Errorf("rChannel not found")
}
rChannel := channels.(chan model.Event)
return rChannel, nil
}
// Connect allocates rChannel for given project and group
func (q *ChannelEventQueue) Connect(info *model.HubInfo) error {
_, ok := q.channelPool.Load(info.NodeID)
if ok {
return fmt.Errorf("edge node %s is already connected", info.NodeID)
}
// allocate a new rchannel with default buffer size
rChannel := make(chan model.Event, rChanBufSize)
_, ok = q.channelPool.LoadOrStore(info.NodeID, rChannel)
if ok {
// rchannel is already allocated
return fmt.Errorf("edge node %s is already connected", info.NodeID)
}
return nil
}
// Close closes rChannel for given project and group
func (q *ChannelEventQueue) Close(info *model.HubInfo) error {
channels, ok := q.channelPool.Load(info.NodeID)
if !ok {
klog.Warningf("rChannel for edge node %s is already removed", info.NodeID)
return nil
}
rChannel := channels.(chan model.Event)
close(rChannel)
q.channelPool.Delete(info.NodeID)
return nil
}
// Publish sends message via the rchannel to Edge Controller
func (q *ChannelEventQueue) Publish(info *model.HubInfo, event *model.Event) error {
msg := model.EventToMessage(event)
switch msg.Router.Source {
case model.ResTwin:
q.ctx.Send2Group(model.SrcDeviceController, msg)
default:
q.ctx.Send2Group(model.SrcEdgeController, msg)
}
return nil
}
// Consume retrieves message from the rChannel for given project and group
func (q *ChannelEventQueue) Consume(info *model.HubInfo) (EventSet, error) {
rChannel, err := q.getRChannel(info.NodeID)
if err != nil {
return nil, err
}
return NewChannelEventSet((<-chan model.Event)(rChannel)), nil
}
// Workload returns the number of queue channels connected to queue
func (q *ChannelEventQueue) Workload() (float64, error) {
return 1, nil
}
| 1 | 14,548 | Instead of importing context besides beehivecontext, I'd suggest include golang context functionalities into beehivecontext. To keep channelq only rely on beehive. | kubeedge-kubeedge | go |
@@ -88,6 +88,9 @@ public class AddProductNutritionFactsData {
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-k",
PREFIX_NUTRIMENT_LONG_NAME + "zinc"));
+ private AddProductNutritionFactsData() {
+ }
+
static String getCompleteEntryName(CustomValidatingEditTextView editText) {
return PREFIX_NUTRIMENT_LONG_NAME + editText.getEntryName();
} | 1 | package openfoodfacts.github.scrachx.openfood.fragments;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import openfoodfacts.github.scrachx.openfood.utils.CustomValidatingEditTextView;
public class AddProductNutritionFactsData {
static final String PREFIX_NUTRIMENT_LONG_NAME = "nutriment_";
static final List<String> PARAMS_OTHER_NUTRIENTS = Collections.unmodifiableList(Arrays.asList(PREFIX_NUTRIMENT_LONG_NAME + "alpha-linolenic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "arachidic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "arachidonic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "behenic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "bicarbonate",
PREFIX_NUTRIMENT_LONG_NAME + "biotin",
PREFIX_NUTRIMENT_LONG_NAME + "butyric-acid",
PREFIX_NUTRIMENT_LONG_NAME + "caffeine",
PREFIX_NUTRIMENT_LONG_NAME + "calcium",
PREFIX_NUTRIMENT_LONG_NAME + "capric-acid",
PREFIX_NUTRIMENT_LONG_NAME + "caproic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "caprylic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "casein",
PREFIX_NUTRIMENT_LONG_NAME + "cerotic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "chloride",
PREFIX_NUTRIMENT_LONG_NAME + "cholesterol",
PREFIX_NUTRIMENT_LONG_NAME + "chromium",
PREFIX_NUTRIMENT_LONG_NAME + "copper",
PREFIX_NUTRIMENT_LONG_NAME + "dihomo-gamma-linolenic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "docosahexaenoic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "eicosapentaenoic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "elaidic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "erucic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "fluoride",
PREFIX_NUTRIMENT_LONG_NAME + "fructose",
PREFIX_NUTRIMENT_LONG_NAME + "gamma-linolenic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "glucose",
PREFIX_NUTRIMENT_LONG_NAME + "gondoic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "iodine",
PREFIX_NUTRIMENT_LONG_NAME + "iron",
PREFIX_NUTRIMENT_LONG_NAME + "lactose",
PREFIX_NUTRIMENT_LONG_NAME + "lauric-acid",
PREFIX_NUTRIMENT_LONG_NAME + "lignoceric-acid",
PREFIX_NUTRIMENT_LONG_NAME + "linoleic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "magnesium",
PREFIX_NUTRIMENT_LONG_NAME + "maltodextrins",
PREFIX_NUTRIMENT_LONG_NAME + "maltose",
PREFIX_NUTRIMENT_LONG_NAME + "manganese",
PREFIX_NUTRIMENT_LONG_NAME + "mead-acid",
PREFIX_NUTRIMENT_LONG_NAME + "melissic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "molybdenum",
PREFIX_NUTRIMENT_LONG_NAME + "monounsaturated-fat",
PREFIX_NUTRIMENT_LONG_NAME + "montanic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "myristic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "nervonic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "nucleotides",
PREFIX_NUTRIMENT_LONG_NAME + "oleic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "omega-3-fat",
PREFIX_NUTRIMENT_LONG_NAME + "omega-6-fat",
PREFIX_NUTRIMENT_LONG_NAME + "omega-9-fat",
PREFIX_NUTRIMENT_LONG_NAME + "palmitic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "pantothenic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "ph",
PREFIX_NUTRIMENT_LONG_NAME + "phosphorus",
PREFIX_NUTRIMENT_LONG_NAME + "polyols",
PREFIX_NUTRIMENT_LONG_NAME + "polyunsaturated-fat",
PREFIX_NUTRIMENT_LONG_NAME + "potassium",
PREFIX_NUTRIMENT_LONG_NAME + "selenium",
PREFIX_NUTRIMENT_LONG_NAME + "serum-proteins",
PREFIX_NUTRIMENT_LONG_NAME + "silica",
PREFIX_NUTRIMENT_LONG_NAME + "starch",
PREFIX_NUTRIMENT_LONG_NAME + "stearic-acid",
PREFIX_NUTRIMENT_LONG_NAME + "sucrose",
PREFIX_NUTRIMENT_LONG_NAME + "taurine",
PREFIX_NUTRIMENT_LONG_NAME + "trans-fat",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-a",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-b1",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-b12",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-b2",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-pp",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-b6",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-b9",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-c",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-d",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-e",
PREFIX_NUTRIMENT_LONG_NAME + "vitamin-k",
PREFIX_NUTRIMENT_LONG_NAME + "zinc"));
static String getCompleteEntryName(CustomValidatingEditTextView editText) {
return PREFIX_NUTRIMENT_LONG_NAME + editText.getEntryName();
}
static String getShortName(String init) {
return StringUtils.removeStart(init, PREFIX_NUTRIMENT_LONG_NAME);
}
}
| 1 | 68,007 | I'm not sure about this one. Are you sure we don't need to initialize this fragment anywhere else? | openfoodfacts-openfoodfacts-androidapp | java |
@@ -644,6 +644,7 @@ public class LocalInsightPlugin extends PluginBase implements PumpInterface, Con
result.comment = ExceptionTranslator.getString(e);
}
} else if (detailedBolusInfo.carbs > 0) {
+ TreatmentsPlugin.getPlugin().addToHistoryTreatment(detailedBolusInfo, true);
result.success = true;
result.enacted = true;
} | 1 | package info.nightscout.androidaps.plugins.pump.insight;
import android.app.NotificationChannel;
import android.app.NotificationManager;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.ServiceConnection;
import android.os.Build;
import android.os.Handler;
import android.os.IBinder;
import android.os.Looper;
import androidx.fragment.app.FragmentActivity;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
import info.nightscout.androidaps.BuildConfig;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.DetailedBolusInfo;
import info.nightscout.androidaps.data.Profile;
import info.nightscout.androidaps.data.PumpEnactResult;
import info.nightscout.androidaps.db.CareportalEvent;
import info.nightscout.androidaps.db.ExtendedBolus;
import info.nightscout.androidaps.db.Source;
import info.nightscout.androidaps.db.TDD;
import info.nightscout.androidaps.db.TemporaryBasal;
import info.nightscout.androidaps.events.EventInitializationChanged;
import info.nightscout.androidaps.events.EventRefreshOverview;
import info.nightscout.androidaps.interfaces.Constraint;
import info.nightscout.androidaps.interfaces.ConstraintsInterface;
import info.nightscout.androidaps.interfaces.PluginBase;
import info.nightscout.androidaps.interfaces.PluginDescription;
import info.nightscout.androidaps.interfaces.PluginType;
import info.nightscout.androidaps.interfaces.PumpDescription;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.logging.L;
import info.nightscout.androidaps.plugins.bus.RxBus;
import info.nightscout.androidaps.plugins.common.ManufacturerType;
import info.nightscout.androidaps.plugins.configBuilder.ConfigBuilderPlugin;
import info.nightscout.androidaps.plugins.configBuilder.ProfileFunctions;
import info.nightscout.androidaps.plugins.general.actions.defs.CustomAction;
import info.nightscout.androidaps.plugins.general.actions.defs.CustomActionType;
import info.nightscout.androidaps.plugins.general.nsclient.NSUpload;
import info.nightscout.androidaps.plugins.general.nsclient.UploadQueue;
import info.nightscout.androidaps.plugins.general.overview.events.EventDismissNotification;
import info.nightscout.androidaps.plugins.general.overview.events.EventNewNotification;
import info.nightscout.androidaps.plugins.general.overview.events.EventOverviewBolusProgress;
import info.nightscout.androidaps.plugins.general.overview.notifications.Notification;
import info.nightscout.androidaps.plugins.pump.common.defs.PumpType;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.Service;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.HistoryReadingDirection;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.ReadHistoryEventsMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.StartReadingHistoryMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.StopReadingHistoryMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.BolusDeliveredEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.BolusProgrammedEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.CannulaFilledEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.DateTimeChangedEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.DefaultDateTimeSetEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.EndOfTBREvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.HistoryEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.OccurrenceOfAlertEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.OperatingModeChangedEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.PowerUpEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.SniffingDoneEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.StartOfTBREvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.TotalDailyDoseEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.history.history_events.TubeFilledEvent;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.ActiveBRProfileBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.BRProfile1Block;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.BRProfileBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.FactoryMinBasalAmountBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.FactoryMinBolusAmountBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.MaxBasalAmountBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.MaxBolusAmountBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.parameter_blocks.TBROverNotificationBlock;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.CancelBolusMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.CancelTBRMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.ChangeTBRMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.ConfirmAlertMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.DeliverBolusMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.SetDateTimeMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.SetOperatingModeMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.remote_control.SetTBRMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetActiveAlertMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetActiveBasalRateMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetActiveBolusesMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetActiveTBRMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetBatteryStatusMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetCartridgeStatusMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetDateTimeMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetOperatingModeMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetPumpStatusRegisterMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.GetTotalDailyDoseMessage;
import info.nightscout.androidaps.plugins.pump.insight.app_layer.status.ResetPumpStatusRegisterMessage;
import info.nightscout.androidaps.plugins.pump.insight.connection_service.InsightConnectionService;
import info.nightscout.androidaps.plugins.pump.insight.database.InsightBolusID;
import info.nightscout.androidaps.plugins.pump.insight.database.InsightHistoryOffset;
import info.nightscout.androidaps.plugins.pump.insight.database.InsightPumpID;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.ActiveBasalRate;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.ActiveBolus;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.ActiveTBR;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.AlertType;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.BasalProfile;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.BasalProfileBlock;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.BatteryStatus;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.BolusType;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.CartridgeStatus;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.InsightState;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.OperatingMode;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.PumpTime;
import info.nightscout.androidaps.plugins.pump.insight.descriptors.TotalDailyDose;
import info.nightscout.androidaps.plugins.pump.insight.events.EventLocalInsightUpdateGUI;
import info.nightscout.androidaps.plugins.pump.insight.exceptions.InsightException;
import info.nightscout.androidaps.plugins.pump.insight.exceptions.app_layer_errors.AppLayerErrorException;
import info.nightscout.androidaps.plugins.pump.insight.exceptions.app_layer_errors.NoActiveTBRToCanceLException;
import info.nightscout.androidaps.plugins.pump.insight.utils.ExceptionTranslator;
import info.nightscout.androidaps.plugins.pump.insight.utils.ParameterBlockUtil;
import info.nightscout.androidaps.plugins.treatments.Treatment;
import info.nightscout.androidaps.plugins.treatments.TreatmentsPlugin;
import info.nightscout.androidaps.utils.DateUtil;
import info.nightscout.androidaps.utils.SP;
public class LocalInsightPlugin extends PluginBase implements PumpInterface, ConstraintsInterface, InsightConnectionService.StateCallback {
public static final String ALERT_CHANNEL_ID = "AndroidAPS-InsightAlert";
private static LocalInsightPlugin instance = null;
private Logger log = LoggerFactory.getLogger(L.PUMP);
private PumpDescription pumpDescription;
private InsightAlertService alertService;
private InsightConnectionService connectionService;
private long timeOffset;
private ServiceConnection serviceConnection = new ServiceConnection() {
@Override
public void onServiceConnected(ComponentName name, IBinder binder) {
if (binder instanceof InsightConnectionService.LocalBinder) {
connectionService = ((InsightConnectionService.LocalBinder) binder).getService();
connectionService.registerStateCallback(LocalInsightPlugin.this);
} else if (binder instanceof InsightAlertService.LocalBinder) {
alertService = ((InsightAlertService.LocalBinder) binder).getService();
}
if (connectionService != null && alertService != null) {
RxBus.INSTANCE.send(new EventInitializationChanged());
}
}
@Override
public void onServiceDisconnected(ComponentName name) {
connectionService = null;
}
};
private final Object $bolusLock = new Object[0];
private int bolusID;
private boolean bolusCancelled;
private BasalProfile activeBasalProfile;
private List<BasalProfileBlock> profileBlocks;
private boolean limitsFetched;
private double maximumBolusAmount;
private double maximumBasalAmount;
private double minimumBolusAmount;
private double minimumBasalAmount;
private long lastUpdated = -1;
private OperatingMode operatingMode;
private BatteryStatus batteryStatus;
private CartridgeStatus cartridgeStatus;
private TotalDailyDose totalDailyDose;
private ActiveBasalRate activeBasalRate;
private ActiveTBR activeTBR;
private List<ActiveBolus> activeBoluses;
private boolean statusLoaded;
private TBROverNotificationBlock tbrOverNotificationBlock;
public static LocalInsightPlugin getPlugin() {
if (instance == null) instance = new LocalInsightPlugin();
return instance;
}
public LocalInsightPlugin() {
super(new PluginDescription()
.pluginName(R.string.insight_local)
.shortName(R.string.insightpump_shortname)
.mainType(PluginType.PUMP)
.description(R.string.description_pump_insight_local)
.fragmentClass(LocalInsightFragment.class.getName())
.preferencesId(MainApp.instance().getPackageName().equals("info.nightscout.androidaps")
? R.xml.pref_insight_local_full : R.xml.pref_insight_local_pumpcontrol));
pumpDescription = new PumpDescription();
pumpDescription.setPumpDescription(PumpType.AccuChekInsightBluetooth);
}
public TBROverNotificationBlock getTBROverNotificationBlock() {
return tbrOverNotificationBlock;
}
public long getLastUpdated() {
return lastUpdated;
}
public InsightConnectionService getConnectionService() {
return connectionService;
}
public OperatingMode getOperatingMode() {
return operatingMode;
}
public BatteryStatus getBatteryStatus() {
return batteryStatus;
}
public CartridgeStatus getCartridgeStatus() {
return cartridgeStatus;
}
public TotalDailyDose getTotalDailyDose() {
return totalDailyDose;
}
public ActiveBasalRate getActiveBasalRate() {
return activeBasalRate;
}
public ActiveTBR getActiveTBR() {
return activeTBR;
}
public List<ActiveBolus> getActiveBoluses() {
return activeBoluses;
}
@Override
protected void onStart() {
super.onStart();
MainApp.instance().bindService(new Intent(MainApp.instance(), InsightConnectionService.class), serviceConnection, Context.BIND_AUTO_CREATE);
MainApp.instance().bindService(new Intent(MainApp.instance(), InsightAlertService.class), serviceConnection, Context.BIND_AUTO_CREATE);
createNotificationChannel();
}
private void createNotificationChannel() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
NotificationManager notificationManager = (NotificationManager) MainApp.instance().getSystemService(Context.NOTIFICATION_SERVICE);
NotificationChannel channel = new NotificationChannel(ALERT_CHANNEL_ID, MainApp.gs(R.string.insight_alert_notification_channel), NotificationManager.IMPORTANCE_HIGH);
channel.setSound(null, null);
notificationManager.createNotificationChannel(channel);
}
}
@Override
protected void onStop() {
super.onStop();
MainApp.instance().unbindService(serviceConnection);
}
@Override
public void switchAllowed(boolean newState, FragmentActivity activity, PluginType type) {
confirmPumpPluginActivation(newState, activity, type);
}
@Override
public boolean isInitialized() {
return connectionService != null && alertService != null && connectionService.isPaired();
}
@Override
public boolean isSuspended() {
return operatingMode != null && operatingMode != OperatingMode.STARTED;
}
@Override
public boolean isBusy() {
return false;
}
@Override
public boolean isConnected() {
return connectionService != null
&& alertService != null
&& connectionService.hasRequestedConnection(this)
&& connectionService.getState() == InsightState.CONNECTED;
}
@Override
public boolean isConnecting() {
if (connectionService == null || alertService == null || !connectionService.hasRequestedConnection(this))
return false;
InsightState state = connectionService.getState();
return state == InsightState.CONNECTING
|| state == InsightState.APP_CONNECT_MESSAGE
|| state == InsightState.RECOVERING;
}
@Override
public boolean isHandshakeInProgress() {
return false;
}
@Override
public void finishHandshaking() {
}
@Override
public void connect(String reason) {
if (connectionService != null && alertService != null)
connectionService.requestConnection(this);
}
@Override
public void disconnect(String reason) {
if (connectionService != null && alertService != null)
connectionService.withdrawConnectionRequest(this);
}
@Override
public void stopConnecting() {
if (connectionService != null && alertService != null)
connectionService.withdrawConnectionRequest(this);
}
@Override
public void getPumpStatus() {
try {
tbrOverNotificationBlock = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, TBROverNotificationBlock.class);
readHistory();
fetchBasalProfile();
fetchLimitations();
updatePumpTimeIfNeeded();
fetchStatus();
} catch (AppLayerErrorException e) {
log.info("Exception while fetching status: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception while fetching status: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception while fetching status", e);
}
}
private void updatePumpTimeIfNeeded() throws Exception {
PumpTime pumpTime = connectionService.requestMessage(new GetDateTimeMessage()).await().getPumpTime();
Calendar calendar = Calendar.getInstance();
calendar.set(Calendar.YEAR, pumpTime.getYear());
calendar.set(Calendar.MONTH, pumpTime.getMonth() - 1);
calendar.set(Calendar.DAY_OF_MONTH, pumpTime.getDay());
calendar.set(Calendar.HOUR_OF_DAY, pumpTime.getHour());
calendar.set(Calendar.MINUTE, pumpTime.getMinute());
calendar.set(Calendar.SECOND, pumpTime.getSecond());
if (calendar.get(Calendar.HOUR_OF_DAY) != pumpTime.getHour() || Math.abs(calendar.getTimeInMillis() - System.currentTimeMillis()) > 10000) {
calendar.setTime(new Date());
pumpTime.setYear(calendar.get(Calendar.YEAR));
pumpTime.setMonth(calendar.get(Calendar.MONTH) + 1);
pumpTime.setDay(calendar.get(Calendar.DAY_OF_MONTH));
pumpTime.setHour(calendar.get(Calendar.HOUR_OF_DAY));
pumpTime.setMinute(calendar.get(Calendar.MINUTE));
pumpTime.setSecond(calendar.get(Calendar.SECOND));
SetDateTimeMessage setDateTimeMessage = new SetDateTimeMessage();
setDateTimeMessage.setPumpTime(pumpTime);
connectionService.requestMessage(setDateTimeMessage).await();
Notification notification = new Notification(Notification.INSIGHT_DATE_TIME_UPDATED, MainApp.gs(R.string.pump_time_updated), Notification.INFO, 60);
RxBus.INSTANCE.send(new EventNewNotification(notification));
}
}
private void fetchBasalProfile() throws Exception {
activeBasalProfile = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, ActiveBRProfileBlock.class).getActiveBasalProfile();
profileBlocks = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, BRProfile1Block.class).getProfileBlocks();
}
private void fetchStatus() throws Exception {
if (statusLoaded) {
GetPumpStatusRegisterMessage registerMessage = connectionService.requestMessage(new GetPumpStatusRegisterMessage()).await();
ResetPumpStatusRegisterMessage resetMessage = new ResetPumpStatusRegisterMessage();
resetMessage.setOperatingModeChanged(registerMessage.isOperatingModeChanged());
resetMessage.setBatteryStatusChanged(registerMessage.isBatteryStatusChanged());
resetMessage.setCartridgeStatusChanged(registerMessage.isCartridgeStatusChanged());
resetMessage.setTotalDailyDoseChanged(registerMessage.isTotalDailyDoseChanged());
resetMessage.setActiveTBRChanged(registerMessage.isActiveTBRChanged());
resetMessage.setActiveBolusesChanged(registerMessage.isActiveBolusesChanged());
connectionService.requestMessage(resetMessage).await();
if (registerMessage.isOperatingModeChanged())
operatingMode = connectionService.requestMessage(new GetOperatingModeMessage()).await().getOperatingMode();
if (registerMessage.isBatteryStatusChanged())
batteryStatus = connectionService.requestMessage(new GetBatteryStatusMessage()).await().getBatteryStatus();
if (registerMessage.isCartridgeStatusChanged())
cartridgeStatus = connectionService.requestMessage(new GetCartridgeStatusMessage()).await().getCartridgeStatus();
if (registerMessage.isTotalDailyDoseChanged())
totalDailyDose = connectionService.requestMessage(new GetTotalDailyDoseMessage()).await().getTDD();
if (operatingMode == OperatingMode.STARTED) {
if (registerMessage.isActiveBasalRateChanged())
activeBasalRate = connectionService.requestMessage(new GetActiveBasalRateMessage()).await().getActiveBasalRate();
if (registerMessage.isActiveTBRChanged())
activeTBR = connectionService.requestMessage(new GetActiveTBRMessage()).await().getActiveTBR();
if (registerMessage.isActiveBolusesChanged())
activeBoluses = connectionService.requestMessage(new GetActiveBolusesMessage()).await().getActiveBoluses();
} else {
activeBasalRate = null;
activeTBR = null;
activeBoluses = null;
}
} else {
ResetPumpStatusRegisterMessage resetMessage = new ResetPumpStatusRegisterMessage();
resetMessage.setOperatingModeChanged(true);
resetMessage.setBatteryStatusChanged(true);
resetMessage.setCartridgeStatusChanged(true);
resetMessage.setTotalDailyDoseChanged(true);
resetMessage.setActiveBasalRateChanged(true);
resetMessage.setActiveTBRChanged(true);
resetMessage.setActiveBolusesChanged(true);
connectionService.requestMessage(resetMessage).await();
operatingMode = connectionService.requestMessage(new GetOperatingModeMessage()).await().getOperatingMode();
batteryStatus = connectionService.requestMessage(new GetBatteryStatusMessage()).await().getBatteryStatus();
cartridgeStatus = connectionService.requestMessage(new GetCartridgeStatusMessage()).await().getCartridgeStatus();
totalDailyDose = connectionService.requestMessage(new GetTotalDailyDoseMessage()).await().getTDD();
if (operatingMode == OperatingMode.STARTED) {
activeBasalRate = connectionService.requestMessage(new GetActiveBasalRateMessage()).await().getActiveBasalRate();
activeTBR = connectionService.requestMessage(new GetActiveTBRMessage()).await().getActiveTBR();
activeBoluses = connectionService.requestMessage(new GetActiveBolusesMessage()).await().getActiveBoluses();
} else {
activeBasalRate = null;
activeTBR = null;
activeBoluses = null;
}
statusLoaded = true;
}
lastUpdated = System.currentTimeMillis();
new Handler(Looper.getMainLooper()).post(() -> {
RxBus.INSTANCE.send(new EventLocalInsightUpdateGUI());
RxBus.INSTANCE.send(new EventRefreshOverview("LocalInsightPlugin::fetchStatus"));
});
}
private void fetchLimitations() throws Exception {
maximumBolusAmount = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, MaxBolusAmountBlock.class).getAmountLimitation();
maximumBasalAmount = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, MaxBasalAmountBlock.class).getAmountLimitation();
minimumBolusAmount = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, FactoryMinBolusAmountBlock.class).getAmountLimitation();
minimumBasalAmount = ParameterBlockUtil.readParameterBlock(connectionService, Service.CONFIGURATION, FactoryMinBasalAmountBlock.class).getAmountLimitation();
this.pumpDescription.basalMaximumRate = maximumBasalAmount;
this.pumpDescription.basalMinimumRate = minimumBasalAmount;
limitsFetched = true;
}
@Override
public PumpEnactResult setNewBasalProfile(Profile profile) {
PumpEnactResult result = new PumpEnactResult();
RxBus.INSTANCE.send(new EventDismissNotification(Notification.PROFILE_NOT_SET_NOT_INITIALIZED));
List<BasalProfileBlock> profileBlocks = new ArrayList<>();
for (int i = 0; i < profile.getBasalValues().length; i++) {
Profile.ProfileValue basalValue = profile.getBasalValues()[i];
Profile.ProfileValue nextValue = null;
if (profile.getBasalValues().length > i + 1)
nextValue = profile.getBasalValues()[i + 1];
BasalProfileBlock profileBlock = new BasalProfileBlock();
profileBlock.setBasalAmount(basalValue.value > 5 ? Math.round(basalValue.value / 0.1) * 0.1 : Math.round(basalValue.value / 0.01) * 0.01);
profileBlock.setDuration((((nextValue != null ? nextValue.timeAsSeconds : 24 * 60 * 60) - basalValue.timeAsSeconds) / 60));
profileBlocks.add(profileBlock);
}
try {
ActiveBRProfileBlock activeBRProfileBlock = new ActiveBRProfileBlock();
activeBRProfileBlock.setActiveBasalProfile(BasalProfile.PROFILE_1);
ParameterBlockUtil.writeConfigurationBlock(connectionService, activeBRProfileBlock);
activeBasalProfile = BasalProfile.PROFILE_1;
BRProfileBlock profileBlock = new BRProfile1Block();
profileBlock.setProfileBlocks(profileBlocks);
ParameterBlockUtil.writeConfigurationBlock(connectionService, profileBlock);
RxBus.INSTANCE.send(new EventDismissNotification(Notification.FAILED_UDPATE_PROFILE));
Notification notification = new Notification(Notification.PROFILE_SET_OK, MainApp.gs(R.string.profile_set_ok), Notification.INFO, 60);
RxBus.INSTANCE.send(new EventNewNotification(notification));
result.success = true;
result.enacted = true;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
this.profileBlocks = profileBlocks;
try {
fetchStatus();
} catch (Exception ignored) {
}
} catch (AppLayerErrorException e) {
log.info("Exception while setting profile: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
Notification notification = new Notification(Notification.FAILED_UDPATE_PROFILE, MainApp.gs(R.string.failedupdatebasalprofile), Notification.URGENT);
RxBus.INSTANCE.send(new EventNewNotification(notification));
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while setting profile: " + e.getClass().getCanonicalName());
Notification notification = new Notification(Notification.FAILED_UDPATE_PROFILE, MainApp.gs(R.string.failedupdatebasalprofile), Notification.URGENT);
RxBus.INSTANCE.send(new EventNewNotification(notification));
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while setting profile", e);
Notification notification = new Notification(Notification.FAILED_UDPATE_PROFILE, MainApp.gs(R.string.failedupdatebasalprofile), Notification.URGENT);
RxBus.INSTANCE.send(new EventNewNotification(notification));
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
@Override
public boolean isThisProfileSet(Profile profile) {
if (!isInitialized() || profileBlocks == null) return true;
if (profile.getBasalValues().length != profileBlocks.size()) return false;
if (activeBasalProfile != BasalProfile.PROFILE_1) return false;
for (int i = 0; i < profileBlocks.size(); i++) {
BasalProfileBlock profileBlock = profileBlocks.get(i);
Profile.ProfileValue basalValue = profile.getBasalValues()[i];
Profile.ProfileValue nextValue = null;
if (profile.getBasalValues().length > i + 1)
nextValue = profile.getBasalValues()[i + 1];
if (profileBlock.getDuration() * 60 != (nextValue != null ? nextValue.timeAsSeconds : 24 * 60 * 60) - basalValue.timeAsSeconds)
return false;
if (Math.abs(profileBlock.getBasalAmount() - basalValue.value) > (basalValue.value > 5 ? 0.051 : 0.0051))
return false;
}
return true;
}
@Override
public long lastDataTime() {
if (connectionService == null || alertService == null) return System.currentTimeMillis();
return connectionService.getLastDataTime();
}
@Override
public double getBaseBasalRate() {
if (connectionService == null || alertService == null) return 0;
if (activeBasalRate != null) return activeBasalRate.getActiveBasalRate();
else return 0;
}
@Override
public double getReservoirLevel() {
if (cartridgeStatus == null) return 0;
return cartridgeStatus.getRemainingAmount();
}
@Override
public int getBatteryLevel() {
if (batteryStatus == null) return 0;
return batteryStatus.getBatteryAmount();
}
@Override
public PumpEnactResult deliverTreatment(DetailedBolusInfo detailedBolusInfo) {
PumpEnactResult result = new PumpEnactResult();
double insulin = Math.round(detailedBolusInfo.insulin / 0.01) * 0.01;
if (insulin > 0) {
try {
synchronized ($bolusLock) {
DeliverBolusMessage bolusMessage = new DeliverBolusMessage();
bolusMessage.setBolusType(BolusType.STANDARD);
bolusMessage.setDuration(0);
bolusMessage.setExtendedAmount(0);
bolusMessage.setImmediateAmount(insulin);
bolusID = connectionService.requestMessage(bolusMessage).await().getBolusId();
bolusCancelled = false;
}
result.success = true;
result.enacted = true;
Treatment t = new Treatment();
t.isSMB = detailedBolusInfo.isSMB;
final EventOverviewBolusProgress bolusingEvent = EventOverviewBolusProgress.INSTANCE;
bolusingEvent.setT(t);
bolusingEvent.setStatus(MainApp.gs(R.string.insight_delivered, 0d, insulin));
bolusingEvent.setPercent(0);
RxBus.INSTANCE.send(bolusingEvent);
int trials = 0;
InsightBolusID insightBolusID = new InsightBolusID();
insightBolusID.bolusID = bolusID;
insightBolusID.timestamp = System.currentTimeMillis();
insightBolusID.pumpSerial = connectionService.getPumpSystemIdentification().getSerialNumber();
MainApp.getDbHelper().createOrUpdate(insightBolusID);
detailedBolusInfo.date = insightBolusID.timestamp;
detailedBolusInfo.source = Source.PUMP;
detailedBolusInfo.pumpId = insightBolusID.id;
if (detailedBolusInfo.carbs > 0 && detailedBolusInfo.carbTime != 0) {
DetailedBolusInfo carbInfo = new DetailedBolusInfo();
carbInfo.carbs = detailedBolusInfo.carbs;
carbInfo.date = detailedBolusInfo.date + detailedBolusInfo.carbTime * 60L * 1000L;
carbInfo.source = Source.USER;
TreatmentsPlugin.getPlugin().addToHistoryTreatment(carbInfo, false);
detailedBolusInfo.carbTime = 0;
detailedBolusInfo.carbs = 0;
}
TreatmentsPlugin.getPlugin().addToHistoryTreatment(detailedBolusInfo, true);
while (true) {
synchronized ($bolusLock) {
if (bolusCancelled) break;
}
OperatingMode operatingMode = connectionService.requestMessage(new GetOperatingModeMessage()).await().getOperatingMode();
if (operatingMode != OperatingMode.STARTED) break;
List<ActiveBolus> activeBoluses = connectionService.requestMessage(new GetActiveBolusesMessage()).await().getActiveBoluses();
ActiveBolus activeBolus = null;
for (ActiveBolus bolus : activeBoluses) {
if (bolus.getBolusID() == bolusID) {
activeBolus = bolus;
break;
}
}
if (activeBolus != null) {
trials = -1;
int percentBefore = bolusingEvent.getPercent();
bolusingEvent.setPercent((int) (100D / activeBolus.getInitialAmount() * (activeBolus.getInitialAmount() - activeBolus.getRemainingAmount())));
bolusingEvent.setStatus(MainApp.gs(R.string.insight_delivered, activeBolus.getInitialAmount() - activeBolus.getRemainingAmount(), activeBolus.getInitialAmount()));
if (percentBefore != bolusingEvent.getPercent())
RxBus.INSTANCE.send(bolusingEvent);
} else {
synchronized ($bolusLock) {
if (bolusCancelled || trials == -1 || trials++ >= 5) {
if (!bolusCancelled) {
bolusingEvent.setStatus(MainApp.gs(R.string.insight_delivered, insulin, insulin));
bolusingEvent.setPercent(100);
RxBus.INSTANCE.send(bolusingEvent);
}
break;
}
}
}
Thread.sleep(200);
}
readHistory();
fetchStatus();
} catch (AppLayerErrorException e) {
log.info("Exception while delivering bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while delivering bolus: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while delivering bolus", e);
result.comment = ExceptionTranslator.getString(e);
}
} else if (detailedBolusInfo.carbs > 0) {
result.success = true;
result.enacted = true;
}
result.carbsDelivered = detailedBolusInfo.carbs;
result.bolusDelivered = insulin;
return result;
}
@Override
public void stopBolusDelivering() {
new Thread(() -> {
try {
synchronized ($bolusLock) {
alertService.ignore(AlertType.WARNING_38);
CancelBolusMessage cancelBolusMessage = new CancelBolusMessage();
cancelBolusMessage.setBolusID(bolusID);
connectionService.requestMessage(cancelBolusMessage).await();
bolusCancelled = true;
confirmAlert(AlertType.WARNING_38);
alertService.ignore(null);
}
} catch (AppLayerErrorException e) {
log.info("Exception while canceling bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception while canceling bolus: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception while canceling bolus", e);
}
}).start();
}
@Override
public PumpEnactResult setTempBasalAbsolute(Double absoluteRate, Integer durationInMinutes, Profile profile, boolean enforceNew) {
PumpEnactResult result = new PumpEnactResult();
if (activeBasalRate == null) return result;
if (activeBasalRate.getActiveBasalRate() == 0) return result;
double percent = 100D / activeBasalRate.getActiveBasalRate() * absoluteRate;
if (isFakingTempsByExtendedBoluses()) {
PumpEnactResult cancelEBResult = cancelExtendedBolusOnly();
if (cancelEBResult.success) {
if (percent > 250) {
PumpEnactResult cancelTBRResult = cancelTempBasalOnly();
if (cancelTBRResult.success) {
PumpEnactResult ebResult = setExtendedBolusOnly((absoluteRate - getBaseBasalRate()) / 60D
* ((double) durationInMinutes), durationInMinutes);
if (ebResult.success) {
result.success = true;
result.enacted = true;
result.isPercent = false;
result.absolute = absoluteRate;
result.duration = durationInMinutes;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
} else {
result.comment = ebResult.comment;
}
} else {
result.comment = cancelTBRResult.comment;
}
} else {
return setTempBasalPercent((int) Math.round(percent), durationInMinutes, profile, enforceNew);
}
} else {
result.comment = cancelEBResult.comment;
}
} else {
return setTempBasalPercent((int) Math.round(percent), durationInMinutes, profile, enforceNew);
}
try {
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception after setting TBR: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception after setting TBR: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception after setting TBR", e);
}
return result;
}
@Override
public PumpEnactResult setTempBasalPercent(Integer percent, Integer durationInMinutes, Profile profile, boolean enforceNew) {
PumpEnactResult result = new PumpEnactResult();
percent = (int) Math.round(((double) percent) / 10d) * 10;
if (percent == 100) return cancelTempBasal(true);
else if (percent > 250) percent = 250;
try {
if (activeTBR != null) {
ChangeTBRMessage message = new ChangeTBRMessage();
message.setDuration(durationInMinutes);
message.setPercentage(percent);
connectionService.requestMessage(message);
} else {
SetTBRMessage message = new SetTBRMessage();
message.setDuration(durationInMinutes);
message.setPercentage(percent);
connectionService.requestMessage(message);
}
result.isPercent = true;
result.percent = percent;
result.duration = durationInMinutes;
result.success = true;
result.enacted = true;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
readHistory();
fetchStatus();
} catch (AppLayerErrorException e) {
log.info("Exception while setting TBR: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while setting TBR: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while setting TBR", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
@Override
public PumpEnactResult setExtendedBolus(Double insulin, Integer durationInMinutes) {
PumpEnactResult result = cancelExtendedBolusOnly();
if (result.success) result = setExtendedBolusOnly(insulin, durationInMinutes);
try {
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception after delivering extended bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception after delivering extended bolus: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception after delivering extended bolus", e);
}
return result;
}
public PumpEnactResult setExtendedBolusOnly(Double insulin, Integer durationInMinutes) {
PumpEnactResult result = new PumpEnactResult();
try {
DeliverBolusMessage bolusMessage = new DeliverBolusMessage();
bolusMessage.setBolusType(BolusType.EXTENDED);
bolusMessage.setDuration(durationInMinutes);
bolusMessage.setExtendedAmount(insulin);
bolusMessage.setImmediateAmount(0);
int bolusID = connectionService.requestMessage(bolusMessage).await().getBolusId();
InsightBolusID insightBolusID = new InsightBolusID();
insightBolusID.bolusID = bolusID;
insightBolusID.timestamp = System.currentTimeMillis();
insightBolusID.pumpSerial = connectionService.getPumpSystemIdentification().getSerialNumber();
MainApp.getDbHelper().createOrUpdate(insightBolusID);
ExtendedBolus extendedBolus = new ExtendedBolus();
extendedBolus.date = insightBolusID.timestamp;
extendedBolus.source = Source.PUMP;
extendedBolus.durationInMinutes = durationInMinutes;
extendedBolus.insulin = insulin;
extendedBolus.pumpId = insightBolusID.id;
TreatmentsPlugin.getPlugin().addToHistoryExtendedBolus(extendedBolus);
result.success = true;
result.enacted = true;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
} catch (AppLayerErrorException e) {
log.info("Exception while delivering extended bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while delivering extended bolus: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while delivering extended bolus", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
@Override
public PumpEnactResult cancelTempBasal(boolean enforceNew) {
PumpEnactResult result = new PumpEnactResult();
PumpEnactResult cancelEBResult = null;
if (isFakingTempsByExtendedBoluses()) cancelEBResult = cancelExtendedBolusOnly();
PumpEnactResult cancelTBRResult = cancelTempBasalOnly();
result.success = (cancelEBResult != null && cancelEBResult.success) && cancelTBRResult.success;
result.enacted = (cancelEBResult != null && cancelEBResult.enacted) || cancelTBRResult.enacted;
result.comment = cancelEBResult != null ? cancelEBResult.comment : cancelTBRResult.comment;
try {
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception after canceling TBR: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception after canceling TBR: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception after canceling TBR", e);
}
return result;
}
private PumpEnactResult cancelTempBasalOnly() {
PumpEnactResult result = new PumpEnactResult();
try {
alertService.ignore(AlertType.WARNING_36);
connectionService.requestMessage(new CancelTBRMessage()).await();
result.success = true;
result.enacted = true;
result.isTempCancel = true;
confirmAlert(AlertType.WARNING_36);
alertService.ignore(null);
result.comment = MainApp.gs(R.string.virtualpump_resultok);
} catch (NoActiveTBRToCanceLException e) {
result.success = true;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
} catch (AppLayerErrorException e) {
log.info("Exception while canceling TBR: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while canceling TBR: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while canceling TBR", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
@Override
public PumpEnactResult cancelExtendedBolus() {
PumpEnactResult result = cancelExtendedBolusOnly();
try {
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception after canceling extended bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception after canceling extended bolus: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception after canceling extended bolus", e);
}
return result;
}
private PumpEnactResult cancelExtendedBolusOnly() {
PumpEnactResult result = new PumpEnactResult();
try {
for (ActiveBolus activeBolus : activeBoluses) {
if (activeBolus.getBolusType() == BolusType.EXTENDED || activeBolus.getBolusType() == BolusType.MULTIWAVE) {
alertService.ignore(AlertType.WARNING_38);
CancelBolusMessage cancelBolusMessage = new CancelBolusMessage();
cancelBolusMessage.setBolusID(activeBolus.getBolusID());
connectionService.requestMessage(cancelBolusMessage).await();
confirmAlert(AlertType.WARNING_38);
alertService.ignore(null);
InsightBolusID insightBolusID = MainApp.getDbHelper().getInsightBolusID(connectionService.getPumpSystemIdentification().getSerialNumber(),
activeBolus.getBolusID(), System.currentTimeMillis());
if (insightBolusID != null) {
ExtendedBolus extendedBolus = MainApp.getDbHelper().getExtendedBolusByPumpId(insightBolusID.id);
if (extendedBolus != null) {
extendedBolus.durationInMinutes = (int) ((System.currentTimeMillis() - extendedBolus.date) / 60000);
if (extendedBolus.durationInMinutes <= 0) {
final String _id = extendedBolus._id;
if (NSUpload.isIdValid(_id))
NSUpload.removeCareportalEntryFromNS(_id);
else UploadQueue.removeID("dbAdd", _id);
MainApp.getDbHelper().delete(extendedBolus);
} else
TreatmentsPlugin.getPlugin().addToHistoryExtendedBolus(extendedBolus);
}
result.enacted = true;
result.success = true;
}
}
}
result.success = true;
result.comment = MainApp.gs(R.string.virtualpump_resultok);
} catch (AppLayerErrorException e) {
log.info("Exception while canceling extended bolus: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while canceling extended bolus: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while canceling extended bolus", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
private void confirmAlert(AlertType alertType) {
try {
long started = System.currentTimeMillis();
while (System.currentTimeMillis() - started < 10000) {
GetActiveAlertMessage activeAlertMessage = connectionService.requestMessage(new GetActiveAlertMessage()).await();
if (activeAlertMessage.getAlert() != null) {
if (activeAlertMessage.getAlert().getAlertType() == alertType) {
ConfirmAlertMessage confirmMessage = new ConfirmAlertMessage();
confirmMessage.setAlertID(activeAlertMessage.getAlert().getAlertId());
connectionService.requestMessage(confirmMessage).await();
} else break;
}
}
} catch (AppLayerErrorException e) {
log.info("Exception while confirming alert: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception while confirming alert: " + e.getClass().getCanonicalName());
} catch (Exception e) {
log.error("Exception while confirming alert", e);
}
}
@Override
public JSONObject getJSONStatus(Profile profile, String profileName) {
long now = System.currentTimeMillis();
if (connectionService == null) return null;
if (System.currentTimeMillis() - connectionService.getLastConnected() > (60 * 60 * 1000)) {
return null;
}
final JSONObject pump = new JSONObject();
final JSONObject battery = new JSONObject();
final JSONObject status = new JSONObject();
final JSONObject extended = new JSONObject();
try {
status.put("timestamp", DateUtil.toISOString(connectionService.getLastConnected()));
extended.put("Version", BuildConfig.VERSION_NAME + "-" + BuildConfig.BUILDVERSION);
try {
extended.put("ActiveProfile", ProfileFunctions.getInstance().getProfileName());
} catch (Exception e) {
}
TemporaryBasal tb = TreatmentsPlugin.getPlugin().getTempBasalFromHistory(now);
if (tb != null) {
extended.put("TempBasalAbsoluteRate", tb.tempBasalConvertedToAbsolute(now, profile));
extended.put("TempBasalStart", DateUtil.dateAndTimeString(tb.date));
extended.put("TempBasalRemaining", tb.getPlannedRemainingMinutes());
}
ExtendedBolus eb = TreatmentsPlugin.getPlugin().getExtendedBolusFromHistory(now);
if (eb != null) {
extended.put("ExtendedBolusAbsoluteRate", eb.absoluteRate());
extended.put("ExtendedBolusStart", DateUtil.dateAndTimeString(eb.date));
extended.put("ExtendedBolusRemaining", eb.getPlannedRemainingMinutes());
}
extended.put("BaseBasalRate", getBaseBasalRate());
status.put("timestamp", DateUtil.toISOString(now));
pump.put("extended", extended);
if (statusLoaded) {
status.put("status", operatingMode != OperatingMode.STARTED ? "suspended" : "normal");
pump.put("status", status);
battery.put("percent", batteryStatus.getBatteryAmount());
pump.put("battery", battery);
pump.put("reservoir", cartridgeStatus.getRemainingAmount());
}
pump.put("clock", DateUtil.toISOString(now));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
return pump;
}
@Override
public ManufacturerType manufacturer() {
return ManufacturerType.Roche;
}
@Override
public PumpType model() {
return PumpType.AccuChekInsightBluetooth;
}
@Override
public String serialNumber() {
if (connectionService == null || alertService == null) return "Unknown";
return connectionService.getPumpSystemIdentification().getSerialNumber();
}
public PumpEnactResult stopPump() {
PumpEnactResult result = new PumpEnactResult();
try {
SetOperatingModeMessage operatingModeMessage = new SetOperatingModeMessage();
operatingModeMessage.setOperatingMode(OperatingMode.STOPPED);
connectionService.requestMessage(operatingModeMessage).await();
result.success = true;
result.enacted = true;
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception while stopping pump: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while stopping pump: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while stopping pump", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
public PumpEnactResult startPump() {
PumpEnactResult result = new PumpEnactResult();
try {
SetOperatingModeMessage operatingModeMessage = new SetOperatingModeMessage();
operatingModeMessage.setOperatingMode(OperatingMode.STARTED);
connectionService.requestMessage(operatingModeMessage).await();
result.success = true;
result.enacted = true;
fetchStatus();
readHistory();
} catch (AppLayerErrorException e) {
log.info("Exception while starting pump: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
log.info("Exception while starting pump: " + e.getClass().getCanonicalName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
log.error("Exception while starting pump", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
public PumpEnactResult setTBROverNotification(boolean enabled) {
PumpEnactResult result = new PumpEnactResult();
boolean valueBefore = tbrOverNotificationBlock.isEnabled();
tbrOverNotificationBlock.setEnabled(enabled);
try {
ParameterBlockUtil.writeConfigurationBlock(connectionService, tbrOverNotificationBlock);
result.success = true;
result.enacted = true;
} catch (AppLayerErrorException e) {
tbrOverNotificationBlock.setEnabled(valueBefore);
log.info("Exception while updating TBR notification block: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
result.comment = ExceptionTranslator.getString(e);
} catch (InsightException e) {
tbrOverNotificationBlock.setEnabled(valueBefore);
log.info("Exception while updating TBR notification block: " + e.getClass().getSimpleName());
result.comment = ExceptionTranslator.getString(e);
} catch (Exception e) {
tbrOverNotificationBlock.setEnabled(valueBefore);
log.error("Exception while updating TBR notification block", e);
result.comment = ExceptionTranslator.getString(e);
}
return result;
}
@Override
public PumpDescription getPumpDescription() {
return pumpDescription;
}
@Override
public String shortStatus(boolean veryShort) {
StringBuilder ret = new StringBuilder();
if (connectionService.getLastConnected() != 0) {
Long agoMsec = System.currentTimeMillis() - connectionService.getLastConnected();
int agoMin = (int) (agoMsec / 60d / 1000d);
ret.append(MainApp.gs(R.string.short_status_last_connected, agoMin) + "\n");
}
if (activeTBR != null) {
ret.append(MainApp.gs(R.string.short_status_tbr, activeTBR.getPercentage(),
activeTBR.getInitialDuration() - activeTBR.getRemainingDuration(), activeTBR.getInitialDuration()) + "\n");
}
if (activeBoluses != null) for (ActiveBolus activeBolus : activeBoluses) {
if (activeBolus.getBolusType() == BolusType.STANDARD) continue;
ret.append(MainApp.gs(activeBolus.getBolusType() == BolusType.MULTIWAVE ? R.string.short_status_multiwave : R.string.short_status_extended,
activeBolus.getRemainingAmount(), activeBolus.getInitialAmount(), activeBolus.getRemainingDuration()) + "\n");
}
if (!veryShort && totalDailyDose != null) {
ret.append(MainApp.gs(R.string.short_status_tdd, totalDailyDose.getBolusAndBasal()) + "\n");
}
if (cartridgeStatus != null) {
ret.append(MainApp.gs(R.string.short_status_reservoir, cartridgeStatus.getRemainingAmount()) + "\n");
}
if (batteryStatus != null) {
ret.append(MainApp.gs(R.string.short_status_battery, batteryStatus.getBatteryAmount()) + "\n");
}
return ret.toString();
}
@Override
public boolean isFakingTempsByExtendedBoluses() {
return SP.getBoolean("insight_enable_tbr_emulation", false);
}
@Override
public PumpEnactResult loadTDDs() {
return new PumpEnactResult().success(true);
}
@Override
public List<CustomAction> getCustomActions() {
return null;
}
@Override
public void executeCustomAction(CustomActionType customActionType) {
}
private void readHistory() {
try {
PumpTime pumpTime = connectionService.requestMessage(new GetDateTimeMessage()).await().getPumpTime();
String pumpSerial = connectionService.getPumpSystemIdentification().getSerialNumber();
timeOffset = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis() - parseDate(pumpTime.getYear(),
pumpTime.getMonth(), pumpTime.getDay(), pumpTime.getHour(), pumpTime.getMinute(), pumpTime.getSecond());
InsightHistoryOffset historyOffset = MainApp.getDbHelper().getInsightHistoryOffset(pumpSerial);
try {
List<HistoryEvent> historyEvents = new ArrayList<>();
if (historyOffset == null) {
StartReadingHistoryMessage startMessage = new StartReadingHistoryMessage();
startMessage.setDirection(HistoryReadingDirection.BACKWARD);
startMessage.setOffset(0xFFFFFFFF);
connectionService.requestMessage(startMessage).await();
historyEvents = connectionService.requestMessage(new ReadHistoryEventsMessage()).await().getHistoryEvents();
} else {
StartReadingHistoryMessage startMessage = new StartReadingHistoryMessage();
startMessage.setDirection(HistoryReadingDirection.FORWARD);
startMessage.setOffset(historyOffset.offset + 1);
connectionService.requestMessage(startMessage).await();
while (true) {
List<HistoryEvent> newEvents = connectionService.requestMessage(new ReadHistoryEventsMessage()).await().getHistoryEvents();
if (newEvents.size() == 0) break;
historyEvents.addAll(newEvents);
}
}
Collections.sort(historyEvents);
Collections.reverse(historyEvents);
if (historyOffset != null) processHistoryEvents(pumpSerial, historyEvents);
if (historyEvents.size() > 0) {
historyOffset = new InsightHistoryOffset();
historyOffset.pumpSerial = pumpSerial;
historyOffset.offset = historyEvents.get(0).getEventPosition();
MainApp.getDbHelper().createOrUpdate(historyOffset);
}
} catch (AppLayerErrorException e) {
log.info("Exception while reading history: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception while reading history: " + e.getClass().getSimpleName());
} catch (Exception e) {
log.error("Exception while reading history", e);
} finally {
try {
connectionService.requestMessage(new StopReadingHistoryMessage()).await();
} catch (Exception ignored) {
}
}
} catch (AppLayerErrorException e) {
log.info("Exception while reading history: " + e.getClass().getCanonicalName() + " (" + e.getErrorCode() + ")");
} catch (InsightException e) {
log.info("Exception while reading history: " + e.getClass().getSimpleName());
} catch (Exception e) {
log.error("Exception while reading history", e);
}
new Handler(Looper.getMainLooper()).post(() -> RxBus.INSTANCE.send(new EventRefreshOverview("LocalInsightPlugin::readHistory")));
}
private void processHistoryEvents(String serial, List<HistoryEvent> historyEvents) {
List<TemporaryBasal> temporaryBasals = new ArrayList<>();
List<InsightPumpID> pumpStartedEvents = new ArrayList<>();
for (HistoryEvent historyEvent : historyEvents)
if (!processHistoryEvent(serial, temporaryBasals, pumpStartedEvents, historyEvent))
break;
Collections.reverse(temporaryBasals);
for (InsightPumpID pumpID : pumpStartedEvents) {
InsightPumpID stoppedEvent = MainApp.getDbHelper().getPumpStoppedEvent(pumpID.pumpSerial, pumpID.timestamp);
if (stoppedEvent == null || stoppedEvent.eventType.equals("PumpPaused")) continue;
long tbrStart = stoppedEvent.timestamp + 10000;
TemporaryBasal temporaryBasal = new TemporaryBasal();
temporaryBasal.durationInMinutes = (int) ((pumpID.timestamp - tbrStart) / 60000);
temporaryBasal.date = tbrStart;
temporaryBasal.source = Source.PUMP;
temporaryBasal.pumpId = pumpID.id;
temporaryBasal.percentRate = 0;
temporaryBasal.isAbsolute = false;
temporaryBasals.add(temporaryBasal);
}
Collections.sort(temporaryBasals, (o1, o2) -> (int) (o1.date - o2.date));
for (TemporaryBasal temporaryBasal : temporaryBasals)
TreatmentsPlugin.getPlugin().addToHistoryTempBasal(temporaryBasal);
}
private boolean processHistoryEvent(String serial, List<TemporaryBasal> temporaryBasals, List<InsightPumpID> pumpStartedEvents, HistoryEvent event) {
if (event instanceof DefaultDateTimeSetEvent) return false;
else if (event instanceof DateTimeChangedEvent)
processDateTimeChangedEvent((DateTimeChangedEvent) event);
else if (event instanceof CannulaFilledEvent)
processCannulaFilledEvent((CannulaFilledEvent) event);
else if (event instanceof TotalDailyDoseEvent)
processTotalDailyDoseEvent((TotalDailyDoseEvent) event);
else if (event instanceof TubeFilledEvent) processTubeFilledEvent((TubeFilledEvent) event);
else if (event instanceof SniffingDoneEvent)
processSniffingDoneEvent((SniffingDoneEvent) event);
else if (event instanceof PowerUpEvent) processPowerUpEvent((PowerUpEvent) event);
else if (event instanceof OperatingModeChangedEvent)
processOperatingModeChangedEvent(serial, pumpStartedEvents, (OperatingModeChangedEvent) event);
else if (event instanceof StartOfTBREvent)
processStartOfTBREvent(serial, temporaryBasals, (StartOfTBREvent) event);
else if (event instanceof EndOfTBREvent)
processEndOfTBREvent(serial, temporaryBasals, (EndOfTBREvent) event);
else if (event instanceof BolusProgrammedEvent)
processBolusProgrammedEvent(serial, (BolusProgrammedEvent) event);
else if (event instanceof BolusDeliveredEvent)
processBolusDeliveredEvent(serial, (BolusDeliveredEvent) event);
else if (event instanceof OccurrenceOfAlertEvent)
processOccurrenceOfAlertEvent((OccurrenceOfAlertEvent) event);
return true;
}
private void processDateTimeChangedEvent(DateTimeChangedEvent event) {
long timeAfter = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(), event.getEventHour(), event.getEventMinute(), event.getEventSecond());
long timeBefore = parseDate(event.getBeforeYear(), event.getBeforeMonth(), event.getBeforeDay(), event.getBeforeHour(), event.getBeforeMinute(), event.getBeforeSecond());
timeOffset -= timeAfter - timeBefore;
}
private void processCannulaFilledEvent(CannulaFilledEvent event) {
if (!SP.getBoolean("insight_log_site_changes", false)) return;
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
uploadCareportalEvent(timestamp, CareportalEvent.SITECHANGE);
}
private void processTotalDailyDoseEvent(TotalDailyDoseEvent event) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(new Date(0));
calendar.set(Calendar.YEAR, event.getTotalYear());
calendar.set(Calendar.MONTH, event.getTotalMonth() - 1);
calendar.set(Calendar.DAY_OF_MONTH, event.getTotalDay());
TDD tdd = new TDD();
tdd.basal = event.getBasalTotal();
tdd.bolus = event.getBolusTotal();
tdd.total = tdd.basal + tdd.bolus;
tdd.date = calendar.getTimeInMillis();
MainApp.getDbHelper().createOrUpdateTDD(tdd);
}
private void processTubeFilledEvent(TubeFilledEvent event) {
if (!SP.getBoolean("insight_log_tube_changes", false)) return;
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
logNote(timestamp, MainApp.gs(R.string.tube_changed));
}
private void processSniffingDoneEvent(SniffingDoneEvent event) {
if (!SP.getBoolean("insight_log_reservoir_changes", false)) return;
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
uploadCareportalEvent(timestamp, CareportalEvent.INSULINCHANGE);
}
private void processPowerUpEvent(PowerUpEvent event) {
if (!SP.getBoolean("insight_log_battery_changes", false)) return;
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
uploadCareportalEvent(timestamp, CareportalEvent.PUMPBATTERYCHANGE);
}
private void processOperatingModeChangedEvent(String serial, List<InsightPumpID> pumpStartedEvents, OperatingModeChangedEvent event) {
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
InsightPumpID pumpID = new InsightPumpID();
pumpID.eventID = event.getEventPosition();
pumpID.pumpSerial = serial;
pumpID.timestamp = timestamp;
switch (event.getNewValue()) {
case STARTED:
pumpID.eventType = "PumpStarted";
pumpStartedEvents.add(pumpID);
if (SP.getBoolean("insight_log_operating_mode_changes", false))
logNote(timestamp, MainApp.gs(R.string.pump_started));
break;
case STOPPED:
pumpID.eventType = "PumpStopped";
if (SP.getBoolean("insight_log_operating_mode_changes", false))
logNote(timestamp, MainApp.gs(R.string.pump_stopped));
break;
case PAUSED:
pumpID.eventType = "PumpPaused";
if (SP.getBoolean("insight_log_operating_mode_changes", false))
logNote(timestamp, MainApp.gs(R.string.pump_paused));
break;
}
MainApp.getDbHelper().createOrUpdate(pumpID);
}
private void processStartOfTBREvent(String serial, List<TemporaryBasal> temporaryBasals, StartOfTBREvent event) {
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
InsightPumpID pumpID = new InsightPumpID();
pumpID.eventID = event.getEventPosition();
pumpID.pumpSerial = serial;
pumpID.timestamp = timestamp;
pumpID.eventType = "StartOfTBR";
MainApp.getDbHelper().createOrUpdate(pumpID);
TemporaryBasal temporaryBasal = new TemporaryBasal();
temporaryBasal.durationInMinutes = event.getDuration();
temporaryBasal.source = Source.PUMP;
temporaryBasal.pumpId = pumpID.id;
temporaryBasal.percentRate = event.getAmount();
temporaryBasal.isAbsolute = false;
temporaryBasal.date = timestamp;
temporaryBasals.add(temporaryBasal);
}
private void processEndOfTBREvent(String serial, List<TemporaryBasal> temporaryBasals, EndOfTBREvent event) {
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
InsightPumpID pumpID = new InsightPumpID();
pumpID.eventID = event.getEventPosition();
pumpID.pumpSerial = serial;
pumpID.eventType = "EndOfTBR";
pumpID.timestamp = timestamp;
MainApp.getDbHelper().createOrUpdate(pumpID);
TemporaryBasal temporaryBasal = new TemporaryBasal();
temporaryBasal.durationInMinutes = 0;
temporaryBasal.source = Source.PUMP;
temporaryBasal.pumpId = pumpID.id;
temporaryBasal.date = timestamp;
temporaryBasals.add(temporaryBasal);
}
private void processBolusProgrammedEvent(String serial, BolusProgrammedEvent event) {
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
InsightBolusID bolusID = MainApp.getDbHelper().getInsightBolusID(serial, event.getBolusID(), timestamp);
if (bolusID != null && bolusID.endID != null) {
bolusID.startID = event.getEventPosition();
MainApp.getDbHelper().createOrUpdate(bolusID);
return;
}
if (bolusID == null || bolusID.startID != null) {
bolusID = new InsightBolusID();
bolusID.timestamp = timestamp;
bolusID.bolusID = event.getBolusID();
bolusID.pumpSerial = serial;
}
bolusID.startID = event.getEventPosition();
MainApp.getDbHelper().createOrUpdate(bolusID);
if (event.getBolusType() == BolusType.STANDARD || event.getBolusType() == BolusType.MULTIWAVE) {
DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo();
detailedBolusInfo.date = bolusID.timestamp;
detailedBolusInfo.source = Source.PUMP;
detailedBolusInfo.pumpId = bolusID.id;
detailedBolusInfo.insulin = event.getImmediateAmount();
TreatmentsPlugin.getPlugin().addToHistoryTreatment(detailedBolusInfo, true);
}
if ((event.getBolusType() == BolusType.EXTENDED || event.getBolusType() == BolusType.MULTIWAVE)) {
ExtendedBolus extendedBolus = new ExtendedBolus();
extendedBolus.date = bolusID.timestamp;
extendedBolus.source = Source.PUMP;
extendedBolus.durationInMinutes = event.getDuration();
extendedBolus.insulin = event.getExtendedAmount();
extendedBolus.pumpId = bolusID.id;
if (ProfileFunctions.getInstance().getProfile(extendedBolus.date) != null)
TreatmentsPlugin.getPlugin().addToHistoryExtendedBolus(extendedBolus);
}
}
private void processBolusDeliveredEvent(String serial, BolusDeliveredEvent event) {
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
long startTimestamp = parseRelativeDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(), event.getEventHour(),
event.getEventMinute(), event.getEventSecond(), event.getStartHour(), event.getStartMinute(), event.getStartSecond()) + timeOffset;
InsightBolusID bolusID = MainApp.getDbHelper().getInsightBolusID(serial, event.getBolusID(), timestamp);
if (bolusID == null || bolusID.endID != null) {
bolusID = new InsightBolusID();
bolusID.timestamp = startTimestamp;
bolusID.bolusID = event.getBolusID();
bolusID.pumpSerial = serial;
}
bolusID.endID = event.getEventPosition();
MainApp.getDbHelper().createOrUpdate(bolusID);
if (event.getBolusType() == BolusType.STANDARD || event.getBolusType() == BolusType.MULTIWAVE) {
DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo();
detailedBolusInfo.date = bolusID.timestamp;
detailedBolusInfo.source = Source.PUMP;
detailedBolusInfo.pumpId = bolusID.id;
detailedBolusInfo.insulin = event.getImmediateAmount();
TreatmentsPlugin.getPlugin().addToHistoryTreatment(detailedBolusInfo, true);
}
if (event.getBolusType() == BolusType.EXTENDED || event.getBolusType() == BolusType.MULTIWAVE) {
if (event.getDuration() == 0) {
ExtendedBolus extendedBolus = MainApp.getDbHelper().getExtendedBolusByPumpId(bolusID.id);
if (extendedBolus != null) {
final String _id = extendedBolus._id;
if (NSUpload.isIdValid(_id)) NSUpload.removeCareportalEntryFromNS(_id);
else UploadQueue.removeID("dbAdd", _id);
MainApp.getDbHelper().delete(extendedBolus);
}
} else {
ExtendedBolus extendedBolus = new ExtendedBolus();
extendedBolus.date = bolusID.timestamp;
extendedBolus.source = Source.PUMP;
extendedBolus.durationInMinutes = event.getDuration();
extendedBolus.insulin = event.getExtendedAmount();
extendedBolus.pumpId = bolusID.id;
if (ProfileFunctions.getInstance().getProfile(extendedBolus.date) != null)
TreatmentsPlugin.getPlugin().addToHistoryExtendedBolus(extendedBolus);
}
}
}
private void processOccurrenceOfAlertEvent(OccurrenceOfAlertEvent event) {
if (!SP.getBoolean("insight_log_alerts", false)) return;
long timestamp = parseDate(event.getEventYear(), event.getEventMonth(), event.getEventDay(),
event.getEventHour(), event.getEventMinute(), event.getEventSecond()) + timeOffset;
Integer code = null;
Integer title = null;
switch (event.getAlertType()) {
case ERROR_6:
code = R.string.alert_e6_code;
title = R.string.alert_e6_title;
break;
case ERROR_10:
code = R.string.alert_e10_code;
title = R.string.alert_e10_title;
break;
case ERROR_13:
code = R.string.alert_e13_code;
title = R.string.alert_e13_title;
break;
case MAINTENANCE_20:
code = R.string.alert_m20_code;
title = R.string.alert_m20_title;
break;
case MAINTENANCE_21:
code = R.string.alert_m21_code;
title = R.string.alert_m21_title;
break;
case MAINTENANCE_22:
code = R.string.alert_m22_code;
title = R.string.alert_m22_title;
break;
case MAINTENANCE_23:
code = R.string.alert_m23_code;
title = R.string.alert_m23_title;
break;
case MAINTENANCE_24:
code = R.string.alert_m24_code;
title = R.string.alert_m24_title;
break;
case MAINTENANCE_25:
code = R.string.alert_m25_code;
title = R.string.alert_m25_title;
break;
case MAINTENANCE_26:
code = R.string.alert_m26_code;
title = R.string.alert_m26_title;
break;
case MAINTENANCE_27:
code = R.string.alert_m27_code;
title = R.string.alert_m27_title;
break;
case MAINTENANCE_28:
code = R.string.alert_m28_code;
title = R.string.alert_m28_title;
break;
case MAINTENANCE_29:
code = R.string.alert_m29_code;
title = R.string.alert_m29_title;
break;
case MAINTENANCE_30:
code = R.string.alert_m30_code;
title = R.string.alert_m30_title;
break;
case WARNING_31:
code = R.string.alert_w31_code;
title = R.string.alert_w31_title;
break;
case WARNING_32:
code = R.string.alert_w32_code;
title = R.string.alert_w32_title;
break;
case WARNING_33:
code = R.string.alert_w33_code;
title = R.string.alert_w33_title;
break;
case WARNING_34:
code = R.string.alert_w34_code;
title = R.string.alert_w34_title;
break;
case WARNING_39:
code = R.string.alert_w39_code;
title = R.string.alert_w39_title;
break;
}
if (code != null)
logNote(timestamp, MainApp.gs(R.string.insight_alert_formatter, MainApp.gs(code), MainApp.gs(title)));
}
private long parseDate(int year, int month, int day, int hour, int minute, int second) {
Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
calendar.set(Calendar.YEAR, year);
calendar.set(Calendar.MONTH, month - 1);
calendar.set(Calendar.DAY_OF_MONTH, day);
calendar.set(Calendar.HOUR_OF_DAY, hour);
calendar.set(Calendar.MINUTE, minute);
calendar.set(Calendar.SECOND, second);
return calendar.getTimeInMillis();
}
private void logNote(long date, String note) {
try {
if (MainApp.getDbHelper().getCareportalEventFromTimestamp(date) != null)
return;
JSONObject data = new JSONObject();
String enteredBy = SP.getString("careportal_enteredby", "");
if (!enteredBy.equals("")) data.put("enteredBy", enteredBy);
data.put("created_at", DateUtil.toISOString(date));
data.put("eventType", CareportalEvent.NOTE);
data.put("notes", note);
CareportalEvent careportalEvent = new CareportalEvent();
careportalEvent.date = date;
careportalEvent.source = Source.USER;
careportalEvent.eventType = CareportalEvent.NOTE;
careportalEvent.json = data.toString();
MainApp.getDbHelper().createOrUpdate(careportalEvent);
NSUpload.uploadCareportalEntryToNS(data);
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
private long parseRelativeDate(int year, int month, int day, int hour, int minute, int second, int relativeHour, int relativeMinute, int relativeSecond) {
if (relativeHour * 60 * 60 + relativeMinute * 60 + relativeSecond >= hour * 60 * 60 * minute * 60 + second)
day--;
Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
calendar.set(Calendar.YEAR, year);
calendar.set(Calendar.MONTH, month - 1);
calendar.set(Calendar.DAY_OF_MONTH, day);
calendar.set(Calendar.HOUR_OF_DAY, relativeHour);
calendar.set(Calendar.MINUTE, relativeMinute);
calendar.set(Calendar.SECOND, relativeSecond);
return calendar.getTimeInMillis();
}
private void uploadCareportalEvent(long date, String event) {
if (MainApp.getDbHelper().getCareportalEventFromTimestamp(date) != null)
return;
try {
JSONObject data = new JSONObject();
String enteredBy = SP.getString("careportal_enteredby", "");
if (!enteredBy.equals("")) data.put("enteredBy", enteredBy);
data.put("created_at", DateUtil.toISOString(date));
data.put("eventType", event);
CareportalEvent careportalEvent = new CareportalEvent();
careportalEvent.date = date;
careportalEvent.source = Source.USER;
careportalEvent.eventType = event;
careportalEvent.json = data.toString();
MainApp.getDbHelper().createOrUpdate(careportalEvent);
NSUpload.uploadCareportalEntryToNS(data);
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
@Override
public Constraint<Integer> applyBasalPercentConstraints(Constraint<Integer> percentRate, Profile profile) {
percentRate.setIfGreater(0, String.format(MainApp.gs(R.string.limitingpercentrate), 0, MainApp.gs(R.string.itmustbepositivevalue)), this);
percentRate.setIfSmaller(getPumpDescription().maxTempPercent, String.format(MainApp.gs(R.string.limitingpercentrate), getPumpDescription().maxTempPercent, MainApp.gs(R.string.pumplimit)), this);
return percentRate;
}
@Override
public Constraint<Double> applyBolusConstraints(Constraint<Double> insulin) {
if (!limitsFetched) return insulin;
insulin.setIfSmaller(maximumBolusAmount, String.format(MainApp.gs(R.string.limitingbolus), maximumBolusAmount, MainApp.gs(R.string.pumplimit)), this);
if (insulin.value() < minimumBolusAmount) {
//TODO: Add function to Constraints or use different approach
// This only works if the interface of the InsightPlugin is called last.
// If not, another constraint could theoretically set the value between 0 and minimumBolusAmount
insulin.set(0d, String.format(MainApp.gs(R.string.limitingbolus), minimumBolusAmount, MainApp.gs(R.string.pumplimit)), this);
}
return insulin;
}
@Override
public Constraint<Double> applyExtendedBolusConstraints(Constraint<Double> insulin) {
return applyBolusConstraints(insulin);
}
@Override
public void onStateChanged(InsightState state) {
if (state == InsightState.CONNECTED) {
statusLoaded = false;
new Handler(Looper.getMainLooper()).post(() -> RxBus.INSTANCE.send(new EventDismissNotification(Notification.INSIGHT_TIMEOUT_DURING_HANDSHAKE)));
} else if (state == InsightState.NOT_PAIRED) {
connectionService.withdrawConnectionRequest(this);
statusLoaded = false;
profileBlocks = null;
operatingMode = null;
batteryStatus = null;
cartridgeStatus = null;
totalDailyDose = null;
activeBasalRate = null;
activeTBR = null;
activeBoluses = null;
tbrOverNotificationBlock = null;
new Handler(Looper.getMainLooper()).post(() -> RxBus.INSTANCE.send(new EventRefreshOverview("LocalInsightPlugin::onStateChanged")));
}
new Handler(Looper.getMainLooper()).post(() -> RxBus.INSTANCE.send(new EventLocalInsightUpdateGUI()));
}
@Override
public void onPumpPaired() {
ConfigBuilderPlugin.getPlugin().getCommandQueue().readStatus("Pump paired", null);
}
@Override
public void onTimeoutDuringHandshake() {
Notification notification = new Notification(Notification.INSIGHT_TIMEOUT_DURING_HANDSHAKE, MainApp.gs(R.string.timeout_during_handshake), Notification.URGENT);
new Handler(Looper.getMainLooper()).post(() -> RxBus.INSTANCE.send(new EventNewNotification(notification)));
}
@Override
public boolean canHandleDST() {
return true;
}
@Override
public void timeDateOrTimeZoneChanged() {
}
} | 1 | 32,144 | You didn't set a source here, so it will probably fail. | MilosKozak-AndroidAPS | java |
@@ -52,7 +52,7 @@ func TestHandlerSucces(t *testing.T) {
headers.Set(BaggageHeaderPrefix+"BAR", "baz")
rpcHandler := transporttest.NewMockHandler(mockCtrl)
- httpHandler := handler{rpcHandler}
+ httpHandler := handler{rpcHandler, transport.NoDeps}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t, | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/yarpc/yarpc-go/encoding/raw"
"github.com/yarpc/yarpc-go/transport"
"github.com/yarpc/yarpc-go/transport/transporttest"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHandlerSucces(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "moe")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "nyuck")
headers.Set(ServiceHeader, "curly")
headers.Set(BaggageHeaderPrefix+"Foo", "bar")
headers.Set(BaggageHeaderPrefix+"BAR", "baz")
rpcHandler := transporttest.NewMockHandler(mockCtrl)
httpHandler := handler{rpcHandler}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(time.Second),
transporttest.ContextBaggage{
"foo": "bar",
"bar": "baz",
},
),
transport.Options{},
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "moe",
Service: "curly",
Encoding: raw.Encoding,
Procedure: "nyuck",
Body: bytes.NewReader([]byte("Nyuck Nyuck")),
},
),
gomock.Any(),
).Return(nil)
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("Nyuck Nyuck"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
code := rw.Code
assert.Equal(t, code, 200, "expected 200 code")
assert.Equal(t, rw.Body.String(), "")
}
func TestHandlerHeaders(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
giveHeaders http.Header
wantTTL time.Duration
wantHeaders map[string]string
wantBaggage map[string]string
}{
{
giveHeaders: http.Header{
TTLMSHeader: {"1000"},
"Rpc-Header-Foo": {"bar"},
"Context-Foo": {"Baz"},
},
wantTTL: time.Second,
wantHeaders: map[string]string{
"foo": "bar",
},
wantBaggage: map[string]string{
"foo": "Baz",
},
},
{
giveHeaders: http.Header{
TTLMSHeader: {"100"},
"Rpc-Foo": {"ignored"},
"ContextFoo": {"ignored"},
"Context-Rpc-Service": {"hello"},
},
wantTTL: 100 * time.Millisecond,
wantHeaders: map[string]string{},
wantBaggage: map[string]string{"rpc-service": "hello"},
},
}
for _, tt := range tests {
rpcHandler := transporttest.NewMockHandler(mockCtrl)
httpHandler := handler{rpcHandler}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(tt.wantTTL),
transporttest.ContextBaggage(tt.wantBaggage),
),
transport.Options{},
transporttest.NewRequestMatcher(t,
&transport.Request{
Caller: "caller",
Service: "service",
Encoding: raw.Encoding,
Procedure: "hello",
Headers: transport.HeadersFromMap(tt.wantHeaders),
Body: bytes.NewReader([]byte("world")),
}),
gomock.Any(),
).Return(nil)
headers := http.Header{}
for k, vs := range tt.giveHeaders {
for _, v := range vs {
headers.Add(k, v)
}
}
headers.Set(CallerHeader, "caller")
headers.Set(ServiceHeader, "service")
headers.Set(EncodingHeader, "raw")
headers.Set(ProcedureHeader, "hello")
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("world"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
assert.Equal(t, 200, rw.Code, "expected 200 status code")
}
}
func TestHandlerFailures(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
baseHeaders := make(http.Header)
baseHeaders.Set(CallerHeader, "somecaller")
baseHeaders.Set(EncodingHeader, "raw")
baseHeaders.Set(TTLMSHeader, "1000")
baseHeaders.Set(ProcedureHeader, "hello")
baseHeaders.Set(ServiceHeader, "fake")
headersWithBadTTL := headerCopyWithout(baseHeaders, TTLMSHeader)
headersWithBadTTL.Set(TTLMSHeader, "not a number")
tests := []struct {
req *http.Request
msg string
}{
{&http.Request{Method: "GET"}, "404 page not found\n"},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, CallerHeader),
},
"BadRequest: missing caller name\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ServiceHeader),
},
"BadRequest: missing service name\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ProcedureHeader),
},
"BadRequest: missing procedure\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, TTLMSHeader),
},
"BadRequest: missing TTL\n",
},
{
&http.Request{
Method: "POST",
},
"BadRequest: missing service name, procedure, caller name, TTL, and encoding\n",
},
{
&http.Request{
Method: "POST",
Header: headersWithBadTTL,
},
`BadRequest: invalid TTL "not a number" for procedure "hello" of service "fake": must be positive integer` + "\n",
},
}
for _, tt := range tests {
req := tt.req
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
h := handler{transporttest.NewMockHandler(mockCtrl)}
rw := httptest.NewRecorder()
h.ServeHTTP(rw, tt.req)
code := rw.Code
assert.True(t, code >= 400 && code < 500, "expected 400 level code")
assert.Equal(t, rw.Body.String(), tt.msg)
}
}
func TestHandlerInternalFailure(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "somecaller")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "hello")
headers.Set(ServiceHeader, "fake")
request := http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
rpcHandler := transporttest.NewMockHandler(mockCtrl)
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t, transporttest.ContextTTL(time.Second)),
transport.Options{},
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "somecaller",
Service: "fake",
Encoding: raw.Encoding,
Procedure: "hello",
Body: bytes.NewReader([]byte{}),
},
),
gomock.Any(),
).Return(fmt.Errorf("great sadness"))
httpHandler := handler{rpcHandler}
httpResponse := httptest.NewRecorder()
httpHandler.ServeHTTP(httpResponse, &request)
code := httpResponse.Code
assert.True(t, code >= 500 && code < 600, "expected 500 level response")
assert.Equal(t,
`UnexpectedError: error for procedure "hello" of service "fake": great sadness`+"\n",
httpResponse.Body.String())
}
func headerCopyWithout(headers http.Header, names ...string) http.Header {
newHeaders := make(http.Header)
for k, vs := range headers {
for _, v := range vs {
newHeaders.Add(k, v)
}
}
for _, k := range names {
newHeaders.Del(k)
}
return newHeaders
}
func TestResponseWriter(t *testing.T) {
recorder := httptest.NewRecorder()
writer := newResponseWriter(recorder)
headers := transport.HeadersFromMap(map[string]string{
"foo": "bar",
"shard-key": "123",
})
writer.AddHeaders(headers)
_, err := writer.Write([]byte("hello"))
require.NoError(t, err)
assert.Equal(t, "bar", recorder.Header().Get("rpc-header-foo"))
assert.Equal(t, "123", recorder.Header().Get("rpc-header-shard-key"))
assert.Equal(t, "hello", recorder.Body.String())
}
| 1 | 10,478 | Might be easier to just do `handler{Handler: rpcHandler}` since zero-value of `Deps` is now valid. | yarpc-yarpc-go | go |
@@ -15,6 +15,10 @@ module Faker
def quote
fetch('movie.quote')
end
+
+ def title
+ fetch('movie.title')
+ end
end
end
end | 1 | # frozen_string_literal: true
module Faker
class Movie < Base
class << self
##
# Produces a quote from a movie.
#
# @return [String]
#
# @example
# Faker::Movie.quote #=> "Bumble bee tuna"
#
# @faker.version 1.8.1
def quote
fetch('movie.quote')
end
end
end
end
| 1 | 9,360 | Could you please add docs for this method? | faker-ruby-faker | rb |
@@ -132,9 +132,16 @@ public abstract class AbstractApexNode<T extends AstNode> extends AbstractNode i
@Override
public String toString() {
+ return getXPathNodeName();
+ }
+
+
+ @Override
+ public final String getXPathNodeName() {
return this.getClass().getSimpleName().replaceFirst("^AST", "");
}
+
public String getLocation() {
if (hasRealLoc()) {
return String.valueOf(node.getLoc()); | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.apex.ast;
import net.sourceforge.pmd.lang.ast.AbstractNode;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.SourceCodePositioner;
import apex.jorje.data.Location;
import apex.jorje.data.Locations;
import apex.jorje.semantic.ast.AstNode;
import apex.jorje.semantic.exception.UnexpectedCodePathException;
public abstract class AbstractApexNode<T extends AstNode> extends AbstractNode implements ApexNode<T> {
protected final T node;
public AbstractApexNode(T node) {
super(node.getClass().hashCode());
this.node = node;
}
void calculateLineNumbers(SourceCodePositioner positioner) {
if (!hasRealLoc()) {
return;
}
Location loc = node.getLoc();
int startOffset = loc.getStartIndex();
int endOffset = loc.getEndIndex();
// end column will be interpreted as inclusive, while endOffset/endIndex
// is exclusive
endOffset -= 1;
this.beginLine = positioner.lineNumberFromOffset(startOffset);
this.beginColumn = positioner.columnFromOffset(this.beginLine, startOffset);
this.endLine = positioner.lineNumberFromOffset(endOffset);
this.endColumn = positioner.columnFromOffset(this.endLine, endOffset);
if (this.endColumn < 0) {
this.endColumn = 0;
}
}
protected void handleSourceCode(String source) {
// default implementation does nothing
}
@Override
public int getBeginLine() {
if (this.beginLine > 0) {
return this.beginLine;
}
Node parent = jjtGetParent();
if (parent != null) {
return parent.getBeginLine();
}
throw new RuntimeException("Unable to determine beginning line of Node.");
}
@Override
public int getBeginColumn() {
if (this.beginColumn > 0) {
return this.beginColumn;
}
Node parent = jjtGetParent();
if (parent != null) {
return parent.getBeginColumn();
}
throw new RuntimeException("Unable to determine beginning column of Node.");
}
@Override
public int getEndLine() {
if (this.endLine > 0) {
return this.endLine;
}
Node parent = jjtGetParent();
if (parent != null) {
return parent.getEndLine();
}
throw new RuntimeException("Unable to determine ending line of Node.");
}
@Override
public int getEndColumn() {
if (this.endColumn > 0) {
return this.endColumn;
}
Node parent = jjtGetParent();
if (parent != null) {
return parent.getEndColumn();
}
throw new RuntimeException("Unable to determine ending column of Node.");
}
/**
* Accept the visitor. *
*/
public Object childrenAccept(ApexParserVisitor visitor, Object data) {
if (children != null) {
for (int i = 0; i < children.length; ++i) {
@SuppressWarnings("unchecked")
// we know that the children here are all ApexNodes
ApexNode<T> apexNode = (ApexNode<T>) children[i];
apexNode.jjtAccept(visitor, data);
}
}
return data;
}
public T getNode() {
return node;
}
protected boolean hasRealLoc() {
try {
Location loc = node.getLoc();
return loc != null && Locations.isReal(loc);
} catch (UnexpectedCodePathException e) {
return false;
} catch (IndexOutOfBoundsException e) {
// bug in apex-jorje? happens on some ReferenceExpression nodes
return false;
} catch (NullPointerException e) {
// bug in apex-jorje?
return false;
}
}
@Override
public String toString() {
return this.getClass().getSimpleName().replaceFirst("^AST", "");
}
public String getLocation() {
if (hasRealLoc()) {
return String.valueOf(node.getLoc());
} else {
return "no location";
}
}
}
| 1 | 13,515 | we should probably `@Deprecated` this implementation to be removed in PMD 7.0.0 | pmd-pmd | java |
@@ -242,7 +242,11 @@ module Bolt
end
def shell
- @shell ||= Bolt::Shell::Bash.new(target, self)
+ @shell ||= if target.options['login-shell'] == 'powershell'
+ Bolt::Shell::Powershell.new(target, self)
+ else
+ Bolt::Shell::Bash.new(target, self)
+ end
end
# This is used by the Bash shell to decide whether to `cd` before | 1 | # frozen_string_literal: true
require 'logging'
require 'shellwords'
require 'bolt/node/errors'
require 'bolt/node/output'
require 'bolt/util'
module Bolt
module Transport
class SSH < Simple
class Connection
attr_reader :logger, :user, :target
def initialize(target, transport_logger)
# lazy-load expensive gem code
require 'net/ssh'
require 'net/ssh/proxy/jump'
raise Bolt::ValidationError, "Target #{target.safe_name} does not have a host" unless target.host
@target = target
@load_config = target.options['load-config']
ssh_config = @load_config ? Net::SSH::Config.for(target.host) : {}
@user = @target.user || ssh_config[:user] || Etc.getlogin
@strict_host_key_checking = ssh_config[:strict_host_key_checking]
@logger = Logging.logger[@target.safe_name]
@transport_logger = transport_logger
@logger.debug("Initializing ssh connection to #{@target.safe_name}")
if target.options['private-key']&.instance_of?(String)
begin
Bolt::Util.validate_file('ssh key', target.options['private-key'])
rescue Bolt::FileError => e
@logger.warn(e.msg)
end
end
end
PAGEANT_NAME = "Pageant\0".encode(Encoding::UTF_16LE)
def connect
options = {
logger: @transport_logger,
non_interactive: true
}
if (key = target.options['private-key'])
if key.instance_of?(String)
options[:keys] = key
else
options[:key_data] = [key['key-data']]
end
end
options[:port] = target.port if target.port
options[:password] = target.password if target.password
# Support both net-ssh 4 and 5. We use 5 in packaging, but Beaker pins to 4 so we
# want the gem to be compatible with version 4.
options[:verify_host_key] = if target.options['host-key-check'].nil?
# Fall back to SSH behavior. This variable will only be set in net-ssh 5.3+.
if @strict_host_key_checking.nil? || @strict_host_key_checking
net_ssh_verifier(:always)
else
# SSH's behavior with StrictHostKeyChecking=no: adds new keys to known_hosts.
# If known_hosts points to /dev/null, then equivalent to :never where it
# accepts any key beacuse they're all new.
net_ssh_verifier(:accept_new_or_tunnel_local)
end
elsif target.options['host-key-check']
net_ssh_verifier(:always)
else
net_ssh_verifier(:never)
end
options[:timeout] = target.options['connect-timeout'] if target.options['connect-timeout']
options[:proxy] = Net::SSH::Proxy::Jump.new(target.options['proxyjump']) if target.options['proxyjump']
# This option was to address discrepency betwen net-ssh host-key-check and ssh(1)
# For the net-ssh 5.x series it defaults to true, in 6.x it will default to false, and will be removed in 7.x
# https://github.com/net-ssh/net-ssh/pull/663#issuecomment-469979931
options[:check_host_ip] = false if Net::SSH::VALID_OPTIONS.include?(:check_host_ip)
if @load_config
# Mirroring:
# https://github.com/net-ssh/net-ssh/blob/master/lib/net/ssh/authentication/agent.rb#L80
# https://github.com/net-ssh/net-ssh/blob/master/lib/net/ssh/authentication/pageant.rb#L403
if defined?(UNIXSocket) && UNIXSocket
if ENV['SSH_AUTH_SOCK'].to_s.empty?
@logger.debug { "Disabling use_agent in net-ssh: ssh-agent is not available" }
options[:use_agent] = false
end
elsif Bolt::Util.windows?
require 'Win32API' # case matters in this require!
# https://docs.microsoft.com/en-us/windows/desktop/api/winuser/nf-winuser-findwindoww
@find_window ||= Win32API.new('user32', 'FindWindowW', %w[P P], 'L')
if @find_window.call(nil, PAGEANT_NAME).to_i == 0
@logger.debug { "Disabling use_agent in net-ssh: pageant process not running" }
options[:use_agent] = false
end
end
else
# Disable ssh config and ssh-agent if requested via load_config
options[:config] = false
options[:use_agent] = false
end
@session = Net::SSH.start(target.host, @user, options)
@logger.debug { "Opened session" }
rescue Net::SSH::AuthenticationFailed => e
raise Bolt::Node::ConnectError.new(
e.message,
'AUTH_ERROR'
)
rescue Net::SSH::HostKeyError => e
raise Bolt::Node::ConnectError.new(
"Host key verification failed for #{target.safe_name}: #{e.message}",
'HOST_KEY_ERROR'
)
rescue Net::SSH::ConnectionTimeout
raise Bolt::Node::ConnectError.new(
"Timeout after #{target.options['connect-timeout']} seconds connecting to #{target.safe_name}",
'CONNECT_ERROR'
)
rescue StandardError => e
raise Bolt::Node::ConnectError.new(
"Failed to connect to #{target.safe_name}: #{e.message}",
'CONNECT_ERROR'
)
end
def disconnect
if @session && !@session.closed?
begin
Timeout.timeout(@target.options['disconnect-timeout']) { @session.close }
rescue Timeout::Error
@session.shutdown!
end
@logger.debug { "Closed session" }
end
end
def execute(command_str)
in_rd, in_wr = IO.pipe
out_rd, out_wr = IO.pipe
err_rd, err_wr = IO.pipe
th = Thread.new do
exit_code = nil
session_channel = @session.open_channel do |channel|
# Request a pseudo tty
channel.request_pty if target.options['tty']
channel.exec(command_str) do |_, success|
unless success
raise Bolt::Node::ConnectError.new(
"Could not execute command: #{command_str.inspect}",
'EXEC_ERROR'
)
end
channel.on_data do |_, data|
out_wr << data
end
channel.on_extended_data do |_, _, data|
err_wr << data
end
channel.on_request("exit-status") do |_, data|
exit_code = data.read_long
end
end
end
write_th = Thread.new do
chunk_size = 4096
eof = false
active = true
readable = false
while active && !eof
@session.loop(0.1) do
active = session_channel.active?
readable = select([in_rd], [], [], 0)
# Loop as long as the channel is still live and there's nothing to be written
active && !readable
end
if readable
if in_rd.eof?
session_channel.eof!
eof = true
else
to_write = in_rd.readpartial(chunk_size)
session_channel.send_data(to_write)
end
end
end
session_channel.wait
end
write_th.join
exit_code
ensure
write_th.terminate
in_rd.close
out_wr.close
err_wr.close
end
[in_wr, out_rd, err_rd, th]
end
def copy_file(source, destination)
# Do not log wrapper script content
@logger.debug { "Uploading #{source}, to #{destination}" } unless source.is_a?(StringIO)
@session.scp.upload!(source, destination, recursive: true)
rescue StandardError => e
raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
end
# This handles renaming Net::SSH verifiers between version 4.x and 5.x
# of the gem
def net_ssh_verifier(verifier)
case verifier
when :always
if defined?(Net::SSH::Verifiers::Always)
Net::SSH::Verifiers::Always.new
else
Net::SSH::Verifiers::Secure.new
end
when :never
if defined?(Net::SSH::Verifiers::Never)
Net::SSH::Verifiers::Never.new
else
Net::SSH::Verifiers::Null.new
end
when :accept_new_or_tunnel_local
if defined?(Net::SSH::Verifiers::AcceptNewOrLocalTunnel)
Net::SSH::Verifiers::AcceptNewOrLocalTunnel.new
else
Net::SSH::Verifiers::Lenient.new
end
end
end
def shell
@shell ||= Bolt::Shell::Bash.new(target, self)
end
# This is used by the Bash shell to decide whether to `cd` before
# executing commands as a run-as user
def reset_cwd?
true
end
end
end
end
end
| 1 | 14,458 | I wonder if 'unix' makes more sense for this now? I was thinking of putting up a 'cleanup' PR - I think I missed a few "remote" var names in the first PR, and want to reassess unifying the transport classes that just define `with_connection`. Renaming this could be part of that. | puppetlabs-bolt | rb |
@@ -52,8 +52,8 @@ namespace Microsoft.CodeAnalysis.Sarif.Converters
{
const string source = "<results> <cppcheck version=\"12.34\" /> <errors> </errors> </results>";
const string expected = @"{
- ""$schema"": ""http://json.schemastore.org/sarif-1.0.0-beta.5"",
- ""version"": ""1.0.0-beta.5"",
+ ""$schema"": ""http://json.schemastore.org/sarif-1.0.0"",
+ ""version"": ""1.0.0"",
""runs"": [
{
""tool"": { | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.IO;
using System.Xml;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Microsoft.CodeAnalysis.Sarif.Writers;
namespace Microsoft.CodeAnalysis.Sarif.Converters
{
[TestClass]
public class CppCheckConverterTests
{
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void CppCheckConverter_Convert_NullInput()
{
CppCheckConverter converter = new CppCheckConverter();
converter.Convert(null, null);
}
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void CppCheckConverter_Convert_NullOutput()
{
CppCheckConverter converter = new CppCheckConverter();
converter.Convert(new MemoryStream(), null);
}
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void CppCheckConverter_Convert_NullLogTest()
{
CppCheckConverter converter = new CppCheckConverter();
converter.Convert(null, new ResultLogObjectWriter());
}
[TestMethod]
public void CppCheckConverter_ExtractsCppCheckVersion()
{
ResultLogObjectWriter results = Utilities.GetConverterObjects(new CppCheckConverter(),
"<results> <cppcheck version=\"12.34\" /> <errors /> </results>");
// We will transform the version above to a Semantic Versioning 2.0 form
Assert.AreEqual("12.34.0", results.Tool.Version);
}
[TestMethod]
public void CppCheckConverter_HandlesEmptyErrorsElement()
{
const string source = "<results> <cppcheck version=\"12.34\" /> <errors> </errors> </results>";
const string expected = @"{
""$schema"": ""http://json.schemastore.org/sarif-1.0.0-beta.5"",
""version"": ""1.0.0-beta.5"",
""runs"": [
{
""tool"": {
""name"": ""CppCheck"",
""version"": ""12.34.0""
},
""results"": []
}
]
}";
string resultJson = Utilities.GetConverterJson(new CppCheckConverter(), source);
Assert.AreEqual(expected, resultJson);
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckConverter_Invalid_RootNodeNotResults()
{
Utilities.GetConverterJson(new CppCheckConverter(), "<bad_root_node />");
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckConverter_Invalid_FirstFollowingNodeNotCppCheck()
{
Utilities.GetConverterJson(new CppCheckConverter(), "<results> <a_different_node /> </results>");
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckConverter_Invalid_MissingErrorsElement()
{
Utilities.GetConverterJson(new CppCheckConverter(), "<results> <cppcheck version=\"12.34\" /> </results>");
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void CppCheckConverter_Invalid_MissingVersion()
{
Utilities.GetConverterJson(new CppCheckConverter(), "<results> <cppcheck /> <errors /> </results>");
}
}
}
| 1 | 10,874 | Do something similar to what was done in JsonTests (can you derive from JsonTests?) | microsoft-sarif-sdk | .cs |
@@ -15,4 +15,4 @@
#
# https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
+__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| 1 | 8,527 | Yes, not much we can do here. | databricks-koalas | py |
@@ -24,16 +24,16 @@ import (
"strings"
"time"
- "go.uber.org/zap"
+ _ "knative.dev/pkg/metrics/testing"
cev2 "github.com/cloudevents/sdk-go/v2"
-
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/transformer"
"github.com/cloudevents/sdk-go/v2/protocol"
"github.com/cloudevents/sdk-go/v2/protocol/http"
"github.com/google/knative-gcp/pkg/metrics"
"github.com/google/wire"
+ "go.uber.org/zap"
"knative.dev/eventing/pkg/kncloudevents"
"knative.dev/eventing/pkg/logging"
) | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import (
"context"
"errors"
"fmt"
nethttp "net/http"
"strings"
"time"
"go.uber.org/zap"
cev2 "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/transformer"
"github.com/cloudevents/sdk-go/v2/protocol"
"github.com/cloudevents/sdk-go/v2/protocol/http"
"github.com/google/knative-gcp/pkg/metrics"
"github.com/google/wire"
"knative.dev/eventing/pkg/kncloudevents"
"knative.dev/eventing/pkg/logging"
)
const (
// TODO(liu-cong) configurable timeout
decoupleSinkTimeout = 30 * time.Second
// EventArrivalTime is used to access the metadata stored on a
// CloudEvent to measure the time difference between when an event is
// received on a broker and before it is dispatched to the trigger function.
// The format is an RFC3339 time in string format. For example: 2019-08-26T23:38:17.834384404Z.
EventArrivalTime = "knativearrivaltime"
)
// HandlerSet provides a handler with a real HTTPMessageReceiver and pubsub MultiTopicDecoupleSink.
var HandlerSet wire.ProviderSet = wire.NewSet(
NewHandler,
NewHTTPMessageReceiver,
wire.Bind(new(HttpMessageReceiver), new(*kncloudevents.HttpMessageReceiver)),
NewMultiTopicDecoupleSink,
wire.Bind(new(DecoupleSink), new(*multiTopicDecoupleSink)),
NewPubsubClient,
NewPubsubDecoupleClient,
metrics.NewIngressReporter,
)
// DecoupleSink is an interface to send events to a decoupling sink (e.g., pubsub).
type DecoupleSink interface {
// Send sends the event from a broker to the corresponding decoupling sink.
Send(ctx context.Context, ns, broker string, event cev2.Event) protocol.Result
}
// HttpMessageReceiver is an interface to listen on http requests.
type HttpMessageReceiver interface {
StartListen(ctx context.Context, handler nethttp.Handler) error
}
// handler receives events and persists them to storage (pubsub).
type Handler struct {
// httpReceiver is an HTTP server to receive events.
httpReceiver HttpMessageReceiver
// decouple is the client to send events to a decouple sink.
decouple DecoupleSink
logger *zap.Logger
reporter *metrics.IngressReporter
}
// NewHandler creates a new ingress handler.
func NewHandler(ctx context.Context, httpReceiver HttpMessageReceiver, decouple DecoupleSink, reporter *metrics.IngressReporter) *Handler {
return &Handler{
httpReceiver: httpReceiver,
decouple: decouple,
reporter: reporter,
logger: logging.FromContext(ctx),
}
}
// Start blocks to receive events over HTTP.
func (h *Handler) Start(ctx context.Context) error {
return h.httpReceiver.StartListen(ctx, h)
}
// ServeHTTP implements net/http Handler interface method.
// 1. Performs basic validation of the request.
// 2. Parse request URL to get namespace and broker.
// 3. Convert request to event.
// 4. Send event to decouple sink.
func (h *Handler) ServeHTTP(response nethttp.ResponseWriter, request *nethttp.Request) {
h.logger.Debug("Serving http", zap.Any("headers", request.Header))
startTime := time.Now()
if request.Method != nethttp.MethodPost {
response.WriteHeader(nethttp.StatusMethodNotAllowed)
return
}
// Path should be in the form of "/<ns>/<broker>".
pieces := strings.Split(request.URL.Path, "/")
if len(pieces) != 3 {
msg := fmt.Sprintf("Malformed request path. want: '/<ns>/<broker>'; got: %v..", request.URL.Path)
h.logger.Info(msg)
nethttp.Error(response, msg, nethttp.StatusNotFound)
return
}
ns, broker := pieces[1], pieces[2]
event, err := h.toEvent(request)
if err != nil {
nethttp.Error(response, err.Error(), nethttp.StatusBadRequest)
return
}
event.SetExtension(EventArrivalTime, cev2.Timestamp{Time: time.Now()})
// Optimistically set status code to StatusAccepted. It will be updated if there is an error.
// According to the data plane spec (https://github.com/knative/eventing/blob/master/docs/spec/data-plane.md), a
// non-callable SINK (which broker is) MUST respond with 202 Accepted if the request is accepted.
statusCode := nethttp.StatusAccepted
ctx, cancel := context.WithTimeout(request.Context(), decoupleSinkTimeout)
defer cancel()
defer func() { h.reportMetrics(request.Context(), ns, broker, event, statusCode, startTime) }()
if res := h.decouple.Send(ctx, ns, broker, *event); !cev2.IsACK(res) {
msg := fmt.Sprintf("Error publishing to PubSub for broker %v/%v. event: %+v, err: %v.", ns, broker, event, res)
h.logger.Error(msg)
statusCode = nethttp.StatusInternalServerError
if errors.Is(res, ErrNotFound) {
statusCode = nethttp.StatusNotFound
}
nethttp.Error(response, msg, statusCode)
return
}
response.WriteHeader(statusCode)
}
// toEvent converts an http request to an event.
func (h *Handler) toEvent(request *nethttp.Request) (*cev2.Event, error) {
message := http.NewMessageFromHttpRequest(request)
defer func() {
if err := message.Finish(nil); err != nil {
h.logger.Error("Failed to close message", zap.Any("message", message), zap.Error(err))
}
}()
// If encoding is unknown, the message is not an event.
if message.ReadEncoding() == binding.EncodingUnknown {
msg := fmt.Sprintf("Encoding is unknown. Not a cloud event? request: %+v", request)
h.logger.Debug(msg)
return nil, errors.New(msg)
}
event, err := binding.ToEvent(request.Context(), message, transformer.AddTimeNow)
if err != nil {
msg := fmt.Sprintf("Failed to convert request to event: %v", err)
h.logger.Error(msg)
return nil, errors.New(msg)
}
return event, nil
}
func (h *Handler) reportMetrics(ctx context.Context, ns, broker string, event *cev2.Event, statusCode int, start time.Time) {
args := metrics.IngressReportArgs{
Namespace: ns,
Broker: broker,
EventType: event.Type(),
ResponseCode: statusCode,
}
if err := h.reporter.ReportEventDispatchTime(ctx, args, time.Since(start)); err != nil {
h.logger.Warn("Failed to record metrics.", zap.Any("namespace", ns), zap.Any("broker", broker), zap.Error(err))
}
}
| 1 | 13,686 | Why do we need this? | google-knative-gcp | go |
@@ -0,0 +1,17 @@
+var bitcore = require('../bitcore');
+var Address = bitcore.Address;
+var bitcoreUtil = bitcore.util;
+var Script = bitcore.Script;
+var network = bitcore.networks.livenet;
+
+
+var script = ''; // write down your script here
+var s = Script.fromHumanReadable(script);
+var hash = bitcoreUtil.sha256ripe160(s.getBuffer());
+var version = network.addressScript;
+
+var addr = new Address(version, hash);
+var addrStr = addr.as('base58');
+
+// This outputs the "address" of thescript
+console.log(addrStr); | 1 | 1 | 12,483 | Would prefer to see an actual script here. | bitpay-bitcore | js |
|
@@ -191,7 +191,7 @@ public class FileHandler {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
- throw new IOException("Could not transfer all bytes.");
+ throw new IOException("Could not transfer all bytes of " + from.toPath());
}
}
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.io;
import com.google.common.collect.Lists;
import com.google.common.io.Closeables;
import org.openqa.selenium.Platform;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.util.List;
/**
* Utility methods for common filesystem activities
*/
public class FileHandler {
public static File unzip(InputStream resource) throws IOException {
File output = TemporaryFilesystem.getDefaultTmpFS().createTempDir("unzip", "stream");
new Zip().unzip(resource, output);
return output;
}
public static void copyResource(File outputDir, Class<?> forClassLoader, String... names)
throws IOException {
Zip zip = new Zip();
for (String name : names) {
InputStream is = locateResource(forClassLoader, name);
try {
zip.unzipFile(outputDir, is, name);
} finally {
is.close();
}
}
}
private static InputStream locateResource(Class<?> forClassLoader, String name)
throws IOException {
String arch = System.getProperty("os.arch").toLowerCase() + "/";
List<String> alternatives =
Lists.newArrayList(name, "/" + name, arch + name, "/" + arch + name);
if (Platform.getCurrent().is(Platform.MAC)) {
alternatives.add("mac/" + name);
alternatives.add("/mac/" + name);
}
// First look using our own classloader
for (String possibility : alternatives) {
InputStream stream = FileHandler.class.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
stream = forClassLoader.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
}
throw new IOException("Unable to locate: " + name);
}
public static boolean createDir(File dir) throws IOException {
if ((dir.exists() || dir.mkdirs()) && dir.canWrite())
return true;
if (dir.exists()) {
FileHandler.makeWritable(dir);
return dir.canWrite();
}
// Iterate through the parent directories until we find that exists,
// then sink down.
return createDir(dir.getParentFile());
}
public static boolean makeWritable(File file) throws IOException {
if (file.canWrite()) {
return true;
}
return file.setWritable(true);
}
public static boolean makeExecutable(File file) throws IOException {
if (canExecute(file)) {
return true;
}
return file.setExecutable(true);
}
public static Boolean canExecute(File file) {
return file.canExecute();
}
public static boolean isZipped(String fileName) {
return fileName.endsWith(".zip") || fileName.endsWith(".xpi");
}
public static boolean delete(File toDelete) {
boolean deleted = true;
if (toDelete.isDirectory()) {
File[] children = toDelete.listFiles();
if (children != null) {
for (File child : children) {
deleted &= child.canWrite() && delete(child);
}
}
}
return deleted && toDelete.canWrite() && toDelete.delete();
}
public static void copy(File from, File to) throws IOException {
copy(from, to, new NoFilter());
}
public static void copy(File source, File dest, String suffix) throws IOException {
copy(source, dest, suffix == null ? new NoFilter() : new FileSuffixFilter(suffix));
}
private static void copy(File source, File dest, Filter onlyCopy) throws IOException {
if (!source.exists()) {
return;
}
if (source.isDirectory()) {
copyDir(source, dest, onlyCopy);
} else {
copyFile(source, dest, onlyCopy);
}
}
private static void copyDir(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
// Create the target directory.
createDir(to);
// List children.
String[] children = from.list();
if (children == null) {
throw new IOException("Could not copy directory " + from.getPath());
}
for (String child : children) {
if (!".parentlock".equals(child) && !"parent.lock".equals(child)) {
copy(new File(from, child), new File(to, child), onlyCopy);
}
}
}
private static void copyFile(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
try (OutputStream out = new FileOutputStream(to)) {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
throw new IOException("Could not transfer all bytes.");
}
}
}
/**
* Used by file operations to determine whether or not to make use of a file.
*/
public interface Filter {
/**
* @param file File to be considered.
* @return Whether or not to make use of the file in this oprtation.
*/
boolean isRequired(File file);
}
private static class FileSuffixFilter implements Filter {
private final String suffix;
public FileSuffixFilter(String suffix) {
this.suffix = suffix;
}
public boolean isRequired(File file) {
return file.isDirectory() || file.getAbsolutePath().endsWith(suffix);
}
}
private static class NoFilter implements Filter {
public boolean isRequired(File file) {
return true;
}
}
public static String readAsString(File toRead) throws IOException {
Reader reader = null;
try {
reader = new BufferedReader(new FileReader(toRead));
StringBuilder builder = new StringBuilder();
char[] buffer = new char[4096];
int read;
while ((read = reader.read(buffer)) != -1) {
char[] target = new char[read];
System.arraycopy(buffer, 0, target, 0, read);
builder.append(target);
}
return builder.toString();
} finally {
Closeables.close(reader, false);
}
}
}
| 1 | 13,214 | seems reasonable to also want to include the 'to' location? | SeleniumHQ-selenium | java |
@@ -12,5 +12,4 @@ __all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
- 'ExtraAugmentation'
-]
+ 'ExtraAugmentation'] | 1 | from .custom import CustomDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
from .utils import to_tensor, random_scale, show_ann, get_dataset
from .concat_dataset import ConcatDataset
from .repeat_dataset import RepeatDataset
from .extra_aug import ExtraAugmentation
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
'ExtraAugmentation'
]
| 1 | 17,418 | This change is unnecessary. | open-mmlab-mmdetection | py |
@@ -61,7 +61,11 @@ public final class MiniatureListWidget<E> extends ScrollPane {
this.mappedElements = new MappedList<Element<E>, E>(items, value -> {
Element newElement = converter.apply(value);
- newElement.setOnMouseClicked(event -> setOnMouseClicked.accept(newElement, event));
+ newElement.setOnMouseClicked(event -> {
+ unselectAll();
+ setOnMouseClicked.accept(newElement, event);
+ select(newElement);
+ });
return newElement;
}); | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.javafx.views.common.widget;
import javafx.beans.binding.Bindings;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.geometry.Pos;
import javafx.scene.CacheHint;
import javafx.scene.Node;
import javafx.scene.control.Label;
import javafx.scene.control.ScrollPane;
import javafx.scene.control.Tooltip;
import javafx.scene.effect.ColorAdjust;
import javafx.scene.image.Image;
import javafx.scene.input.MouseEvent;
import javafx.scene.layout.FlowPane;
import javafx.scene.layout.Pane;
import javafx.scene.layout.VBox;
import javafx.scene.shape.Rectangle;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.phoenicis.apps.dto.ApplicationDTO;
import org.phoenicis.engines.dto.EngineVersionDTO;
import org.phoenicis.javafx.views.common.MappedList;
import org.phoenicis.library.dto.ShortcutDTO;
import java.io.ByteArrayInputStream;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Function;
public final class MiniatureListWidget<E> extends ScrollPane {
private final Pane content;
private Element selectedItem;
private ObservableList<E> items;
private ObservableList<Element<E>> mappedElements;
private MiniatureListWidget(Pane content, Function<E, Element> converter, BiConsumer<Element<E>, MouseEvent> setOnMouseClicked) {
super(content);
this.content = content;
this.items = FXCollections.observableArrayList();
this.mappedElements = new MappedList<Element<E>, E>(items, value -> {
Element newElement = converter.apply(value);
newElement.setOnMouseClicked(event -> setOnMouseClicked.accept(newElement, event));
return newElement;
});
Bindings.bindContent(content.getChildren(), this.mappedElements);
this.getStyleClass().add("rightPane");
this.content.getStyleClass().addAll("miniatureList");
this.getChildren().add(this.content);
this.setCache(true);
this.setCacheHint(CacheHint.QUALITY);
this.content.prefWidthProperty().bind(this.widthProperty());
this.setHbarPolicy(ScrollBarPolicy.NEVER);
}
/**
* Creates a new @class MiniatureListWidget of type @type <T>.
*
* @param converter A converter function that converts values of type @type T to Element
* @param setOnMouseClicked A mouse listener function, that is called whenever a user clicks on an element.
* This listener function receives the element, which has been clicked, and the event as parameters
* @param <T> The type of items to be added to this MiniatureListWidget
* @return
*/
public static <T> MiniatureListWidget<T> create(Function<T, Element> converter, BiConsumer<Element<T>, MouseEvent> setOnMouseClicked) {
return new MiniatureListWidget<T>(new FlowPane(), converter, setOnMouseClicked);
}
public ObservableList<E> getItems() {
return this.items;
}
public void setItems(List<E> items) {
this.items.setAll(items);
}
public List<Element<E>> getElements() {
return this.mappedElements;
}
public void unselectAll() {
getElements().forEach(element -> element.getStyleClass().remove("selected"));
this.selectedItem = null;
}
public void select(Element selectedItem) {
selectedItem.getStyleClass().add("selected");
this.selectedItem = selectedItem;
}
public Element getSelectedItem() {
return selectedItem;
}
public static class Element<E> extends VBox {
private final String elementName;
private final E value;
public Element(E value, String elementName, Node miniature) {
super();
this.getStyleClass().add("miniatureListElement");
this.setAlignment(Pos.CENTER);
this.elementName = elementName;
this.value = value;
this.widthProperty().addListener((observable, oldValue, newValue) -> {
final Rectangle clip = new Rectangle(this.getWidth(), this.getHeight());
this.setClip(clip);
});
this.heightProperty().addListener((observable, oldValue, newValue) -> {
final Rectangle clip = new Rectangle(this.getWidth(), this.getHeight());
this.setClip(clip);
});
final Label label = new Label(elementName);
label.getStyleClass().add("miniatureText");
this.getChildren().add(miniature);
this.getChildren().add(label);
final Tooltip tooltip = new Tooltip(elementName);
Tooltip.install(miniature, tooltip);
}
public Element(String appsItem, Node miniature) {
this(null, appsItem, miniature);
}
public Element(String elementName) {
this(elementName, new StaticMiniature());
}
public static Element<ApplicationDTO> create(ApplicationDTO application) {
return new Element<ApplicationDTO>(application, application.getName(), application.getMiniatures().isEmpty() ? new StaticMiniature() : new StaticMiniature(new Image(new ByteArrayInputStream(application.getMiniatures().get(0)))));
}
public static Element<ShortcutDTO> create(ShortcutDTO shortcut) {
return new Element<ShortcutDTO>(shortcut, shortcut.getName(), shortcut.getMiniature() == null ? new StaticMiniature() : new StaticMiniature(new Image(new ByteArrayInputStream(shortcut.getMiniature()))));
}
public static Element<EngineVersionDTO> create(EngineVersionDTO engineVersion, boolean installed) {
Element<EngineVersionDTO> result = new Element<EngineVersionDTO>(engineVersion, engineVersion.getVersion(), new StaticMiniature(StaticMiniature.WINE_MINIATURE));
if (!installed) {
ColorAdjust grayscale = new ColorAdjust();
grayscale.setSaturation(-1);
result.setEffect(grayscale);
}
return result;
}
public E getValue() {
return this.value;
}
public String getName() {
return elementName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Element<?> that = (Element<?>) o;
EqualsBuilder builder = new EqualsBuilder();
builder.append(value, that.value);
return builder.isEquals();
}
@Override
public int hashCode() {
HashCodeBuilder builder = new HashCodeBuilder();
builder.append(value);
return builder.toHashCode();
}
}
}
| 1 | 9,619 | Just as a note: Is it possible that the select css class is the same as the hover one? If this is true we should choose a different style for the selection css class, to make it more clear to the user I think. | PhoenicisOrg-phoenicis | java |
@@ -106,6 +106,11 @@ type ControllerOptions struct {
EnablePprof bool
DNS01CheckRetryPeriod time.Duration
+
+ // Annotations copied Certificate -> CertificateRequest,
+ // CertificateRequest -> Order. Slice of string literals that are
+ // treated as prefixes for annotation keys.
+ CopiedAnnotations []string
}
const ( | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"strings"
"time"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/sets"
cm "github.com/jetstack/cert-manager/pkg/apis/certmanager"
challengescontroller "github.com/jetstack/cert-manager/pkg/controller/acmechallenges"
orderscontroller "github.com/jetstack/cert-manager/pkg/controller/acmeorders"
shimgatewaycontroller "github.com/jetstack/cert-manager/pkg/controller/certificate-shim/gateways"
shimingresscontroller "github.com/jetstack/cert-manager/pkg/controller/certificate-shim/ingresses"
cracmecontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/acme"
crapprovercontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/approver"
crcacontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/ca"
crselfsignedcontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/selfsigned"
crvaultcontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/vault"
crvenaficontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/venafi"
"github.com/jetstack/cert-manager/pkg/controller/certificates/issuing"
"github.com/jetstack/cert-manager/pkg/controller/certificates/keymanager"
certificatesmetricscontroller "github.com/jetstack/cert-manager/pkg/controller/certificates/metrics"
"github.com/jetstack/cert-manager/pkg/controller/certificates/readiness"
"github.com/jetstack/cert-manager/pkg/controller/certificates/requestmanager"
"github.com/jetstack/cert-manager/pkg/controller/certificates/revisionmanager"
"github.com/jetstack/cert-manager/pkg/controller/certificates/trigger"
csracmecontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/acme"
csrcacontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/ca"
csrselfsignedcontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/selfsigned"
csrvaultcontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/vault"
csrvenaficontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/venafi"
clusterissuerscontroller "github.com/jetstack/cert-manager/pkg/controller/clusterissuers"
issuerscontroller "github.com/jetstack/cert-manager/pkg/controller/issuers"
"github.com/jetstack/cert-manager/pkg/feature"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util"
utilfeature "github.com/jetstack/cert-manager/pkg/util/feature"
)
type ControllerOptions struct {
APIServerHost string
Kubeconfig string
KubernetesAPIQPS float32
KubernetesAPIBurst int
ClusterResourceNamespace string
Namespace string
LeaderElect bool
LeaderElectionNamespace string
LeaderElectionLeaseDuration time.Duration
LeaderElectionRenewDeadline time.Duration
LeaderElectionRetryPeriod time.Duration
controllers []string
ACMEHTTP01SolverImage string
ACMEHTTP01SolverResourceRequestCPU string
ACMEHTTP01SolverResourceRequestMemory string
ACMEHTTP01SolverResourceLimitsCPU string
ACMEHTTP01SolverResourceLimitsMemory string
ClusterIssuerAmbientCredentials bool
IssuerAmbientCredentials bool
// Default issuer/certificates details consumed by ingress-shim
DefaultIssuerName string
DefaultIssuerKind string
DefaultIssuerGroup string
DefaultAutoCertificateAnnotations []string
// Allows specifying a list of custom nameservers to perform DNS checks on.
DNS01RecursiveNameservers []string
// Allows controlling if recursive nameservers are only used for all checks.
// Normally authoritative nameservers are used for checking propagation.
DNS01RecursiveNameserversOnly bool
EnableCertificateOwnerRef bool
MaxConcurrentChallenges int
// The host and port address, separated by a ':', that the Prometheus server
// should expose metrics on.
MetricsListenAddress string
// EnablePprof controls whether net/http/pprof handlers are registered with
// the HTTP listener.
EnablePprof bool
DNS01CheckRetryPeriod time.Duration
}
const (
defaultAPIServerHost = ""
defaultKubeconfig = ""
defaultKubernetesAPIQPS float32 = 20
defaultKubernetesAPIBurst = 50
defaultClusterResourceNamespace = "kube-system"
defaultNamespace = ""
defaultLeaderElect = true
defaultLeaderElectionNamespace = "kube-system"
defaultLeaderElectionLeaseDuration = 60 * time.Second
defaultLeaderElectionRenewDeadline = 40 * time.Second
defaultLeaderElectionRetryPeriod = 15 * time.Second
defaultClusterIssuerAmbientCredentials = true
defaultIssuerAmbientCredentials = false
defaultTLSACMEIssuerName = ""
defaultTLSACMEIssuerKind = "Issuer"
defaultTLSACMEIssuerGroup = cm.GroupName
defaultEnableCertificateOwnerRef = false
defaultDNS01RecursiveNameserversOnly = false
defaultMaxConcurrentChallenges = 60
defaultPrometheusMetricsServerAddress = "0.0.0.0:9402"
defaultDNS01CheckRetryPeriod = 10 * time.Second
)
var (
defaultACMEHTTP01SolverImage = fmt.Sprintf("quay.io/jetstack/cert-manager-acmesolver:%s", util.AppVersion)
defaultACMEHTTP01SolverResourceRequestCPU = "10m"
defaultACMEHTTP01SolverResourceRequestMemory = "64Mi"
defaultACMEHTTP01SolverResourceLimitsCPU = "100m"
defaultACMEHTTP01SolverResourceLimitsMemory = "64Mi"
defaultAutoCertificateAnnotations = []string{"kubernetes.io/tls-acme"}
allControllers = []string{
issuerscontroller.ControllerName,
clusterissuerscontroller.ControllerName,
certificatesmetricscontroller.ControllerName,
shimingresscontroller.ControllerName,
shimgatewaycontroller.ControllerName,
orderscontroller.ControllerName,
challengescontroller.ControllerName,
cracmecontroller.CRControllerName,
crapprovercontroller.ControllerName,
crcacontroller.CRControllerName,
crselfsignedcontroller.CRControllerName,
crvaultcontroller.CRControllerName,
crvenaficontroller.CRControllerName,
// certificate controllers
trigger.ControllerName,
issuing.ControllerName,
keymanager.ControllerName,
requestmanager.ControllerName,
readiness.ControllerName,
revisionmanager.ControllerName,
}
defaultEnabledControllers = []string{
issuerscontroller.ControllerName,
clusterissuerscontroller.ControllerName,
certificatesmetricscontroller.ControllerName,
shimingresscontroller.ControllerName,
orderscontroller.ControllerName,
challengescontroller.ControllerName,
cracmecontroller.CRControllerName,
crapprovercontroller.ControllerName,
crcacontroller.CRControllerName,
crselfsignedcontroller.CRControllerName,
crvaultcontroller.CRControllerName,
crvenaficontroller.CRControllerName,
// certificate controllers
trigger.ControllerName,
issuing.ControllerName,
keymanager.ControllerName,
requestmanager.ControllerName,
readiness.ControllerName,
revisionmanager.ControllerName,
}
experimentalCertificateSigningRequestControllers = []string{
csracmecontroller.CSRControllerName,
csrcacontroller.CSRControllerName,
csrselfsignedcontroller.CSRControllerName,
csrvenaficontroller.CSRControllerName,
csrvaultcontroller.CSRControllerName,
}
)
func NewControllerOptions() *ControllerOptions {
return &ControllerOptions{
APIServerHost: defaultAPIServerHost,
ClusterResourceNamespace: defaultClusterResourceNamespace,
KubernetesAPIQPS: defaultKubernetesAPIQPS,
KubernetesAPIBurst: defaultKubernetesAPIBurst,
Namespace: defaultNamespace,
LeaderElect: defaultLeaderElect,
LeaderElectionNamespace: defaultLeaderElectionNamespace,
LeaderElectionLeaseDuration: defaultLeaderElectionLeaseDuration,
LeaderElectionRenewDeadline: defaultLeaderElectionRenewDeadline,
LeaderElectionRetryPeriod: defaultLeaderElectionRetryPeriod,
controllers: defaultEnabledControllers,
ClusterIssuerAmbientCredentials: defaultClusterIssuerAmbientCredentials,
IssuerAmbientCredentials: defaultIssuerAmbientCredentials,
DefaultIssuerName: defaultTLSACMEIssuerName,
DefaultIssuerKind: defaultTLSACMEIssuerKind,
DefaultIssuerGroup: defaultTLSACMEIssuerGroup,
DefaultAutoCertificateAnnotations: defaultAutoCertificateAnnotations,
DNS01RecursiveNameservers: []string{},
DNS01RecursiveNameserversOnly: defaultDNS01RecursiveNameserversOnly,
EnableCertificateOwnerRef: defaultEnableCertificateOwnerRef,
MetricsListenAddress: defaultPrometheusMetricsServerAddress,
DNS01CheckRetryPeriod: defaultDNS01CheckRetryPeriod,
EnablePprof: false,
}
}
func (s *ControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.APIServerHost, "master", defaultAPIServerHost, ""+
"Optional apiserver host address to connect to. If not specified, autoconfiguration "+
"will be attempted.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", defaultKubeconfig, ""+
"Paths to a kubeconfig. Only required if out-of-cluster.")
fs.Float32Var(&s.KubernetesAPIQPS, "kube-api-qps", defaultKubernetesAPIQPS, "indicates the maximum queries-per-second requests to the Kubernetes apiserver")
fs.IntVar(&s.KubernetesAPIBurst, "kube-api-burst", defaultKubernetesAPIBurst, "the maximum burst queries-per-second of requests sent to the Kubernetes apiserver")
fs.StringVar(&s.ClusterResourceNamespace, "cluster-resource-namespace", defaultClusterResourceNamespace, ""+
"Namespace to store resources owned by cluster scoped resources such as ClusterIssuer in. "+
"This must be specified if ClusterIssuers are enabled.")
fs.StringVar(&s.Namespace, "namespace", defaultNamespace, ""+
"If set, this limits the scope of cert-manager to a single namespace and ClusterIssuers are disabled. "+
"If not specified, all namespaces will be watched")
fs.BoolVar(&s.LeaderElect, "leader-elect", true, ""+
"If true, cert-manager will perform leader election between instances to ensure no more "+
"than one instance of cert-manager operates at a time")
fs.StringVar(&s.LeaderElectionNamespace, "leader-election-namespace", defaultLeaderElectionNamespace, ""+
"Namespace used to perform leader election. Only used if leader election is enabled")
fs.DurationVar(&s.LeaderElectionLeaseDuration, "leader-election-lease-duration", defaultLeaderElectionLeaseDuration, ""+
"The duration that non-leader candidates will wait after observing a leadership "+
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
"slot. This is effectively the maximum duration that a leader can be stopped "+
"before it is replaced by another candidate. This is only applicable if leader "+
"election is enabled.")
fs.DurationVar(&s.LeaderElectionRenewDeadline, "leader-election-renew-deadline", defaultLeaderElectionRenewDeadline, ""+
"The interval between attempts by the acting master to renew a leadership slot "+
"before it stops leading. This must be less than or equal to the lease duration. "+
"This is only applicable if leader election is enabled.")
fs.DurationVar(&s.LeaderElectionRetryPeriod, "leader-election-retry-period", defaultLeaderElectionRetryPeriod, ""+
"The duration the clients should wait between attempting acquisition and renewal "+
"of a leadership. This is only applicable if leader election is enabled.")
fs.StringSliceVar(&s.controllers, "controllers", []string{"*"}, fmt.Sprintf(""+
"A list of controllers to enable. '--controllers=*' enables all "+
"on-by-default controllers, '--controllers=foo' enables just the controller "+
"named 'foo', '--controllers=*,-foo' disables the controller named "+
"'foo'.\nAll controllers: %s",
strings.Join(allControllers, ", ")))
fs.StringVar(&s.ACMEHTTP01SolverImage, "acme-http01-solver-image", defaultACMEHTTP01SolverImage, ""+
"The docker image to use to solve ACME HTTP01 challenges. You most likely will not "+
"need to change this parameter unless you are testing a new feature or developing cert-manager.")
fs.StringVar(&s.ACMEHTTP01SolverResourceRequestCPU, "acme-http01-solver-resource-request-cpu", defaultACMEHTTP01SolverResourceRequestCPU, ""+
"Defines the resource request CPU size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceRequestMemory, "acme-http01-solver-resource-request-memory", defaultACMEHTTP01SolverResourceRequestMemory, ""+
"Defines the resource request Memory size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceLimitsCPU, "acme-http01-solver-resource-limits-cpu", defaultACMEHTTP01SolverResourceLimitsCPU, ""+
"Defines the resource limits CPU size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceLimitsMemory, "acme-http01-solver-resource-limits-memory", defaultACMEHTTP01SolverResourceLimitsMemory, ""+
"Defines the resource limits Memory size when spawning new ACME HTTP01 challenge solver pods.")
fs.BoolVar(&s.ClusterIssuerAmbientCredentials, "cluster-issuer-ambient-credentials", defaultClusterIssuerAmbientCredentials, ""+
"Whether a cluster-issuer may make use of ambient credentials for issuers. 'Ambient Credentials' are credentials drawn from the environment, metadata services, or local files which are not explicitly configured in the ClusterIssuer API object. "+
"When this flag is enabled, the following sources for credentials are also used: "+
"AWS - All sources the Go SDK defaults to, notably including any EC2 IAM roles available via instance metadata.")
fs.BoolVar(&s.IssuerAmbientCredentials, "issuer-ambient-credentials", defaultIssuerAmbientCredentials, ""+
"Whether an issuer may make use of ambient credentials. 'Ambient Credentials' are credentials drawn from the environment, metadata services, or local files which are not explicitly configured in the Issuer API object. "+
"When this flag is enabled, the following sources for credentials are also used: "+
"AWS - All sources the Go SDK defaults to, notably including any EC2 IAM roles available via instance metadata.")
fs.StringSliceVar(&s.DefaultAutoCertificateAnnotations, "auto-certificate-annotations", defaultAutoCertificateAnnotations, ""+
"The annotation consumed by the ingress-shim controller to indicate a ingress is requesting a certificate")
fs.StringVar(&s.DefaultIssuerName, "default-issuer-name", defaultTLSACMEIssuerName, ""+
"Name of the Issuer to use when the tls is requested but issuer name is not specified on the ingress resource.")
fs.StringVar(&s.DefaultIssuerKind, "default-issuer-kind", defaultTLSACMEIssuerKind, ""+
"Kind of the Issuer to use when the tls is requested but issuer kind is not specified on the ingress resource.")
fs.StringVar(&s.DefaultIssuerGroup, "default-issuer-group", defaultTLSACMEIssuerGroup, ""+
"Group of the Issuer to use when the tls is requested but issuer group is not specified on the ingress resource.")
fs.StringSliceVar(&s.DNS01RecursiveNameservers, "dns01-recursive-nameservers",
[]string{}, "A list of comma separated dns server endpoints used for "+
"DNS01 check requests. This should be a list containing host and "+
"port, for example 8.8.8.8:53,8.8.4.4:53")
fs.BoolVar(&s.DNS01RecursiveNameserversOnly, "dns01-recursive-nameservers-only",
defaultDNS01RecursiveNameserversOnly,
"When true, cert-manager will only ever query the configured DNS resolvers "+
"to perform the ACME DNS01 self check. This is useful in DNS constrained "+
"environments, where access to authoritative nameservers is restricted. "+
"Enabling this option could cause the DNS01 self check to take longer "+
"due to caching performed by the recursive nameservers.")
fs.StringSliceVar(&s.DNS01RecursiveNameservers, "dns01-self-check-nameservers",
[]string{}, "A list of comma separated dns server endpoints used for "+
"DNS01 check requests. This should be a list containing host and port, "+
"for example 8.8.8.8:53,8.8.4.4:53")
fs.MarkDeprecated("dns01-self-check-nameservers", "Deprecated in favour of dns01-recursive-nameservers")
fs.BoolVar(&s.EnableCertificateOwnerRef, "enable-certificate-owner-ref", defaultEnableCertificateOwnerRef, ""+
"Whether to set the certificate resource as an owner of secret where the tls certificate is stored. "+
"When this flag is enabled, the secret will be automatically removed when the certificate resource is deleted.")
fs.IntVar(&s.MaxConcurrentChallenges, "max-concurrent-challenges", defaultMaxConcurrentChallenges, ""+
"The maximum number of challenges that can be scheduled as 'processing' at once.")
fs.DurationVar(&s.DNS01CheckRetryPeriod, "dns01-check-retry-period", defaultDNS01CheckRetryPeriod, ""+
"The duration the controller should wait between checking if a ACME dns entry exists."+
"This should be a valid duration string, for example 180s or 1h")
fs.StringVar(&s.MetricsListenAddress, "metrics-listen-address", defaultPrometheusMetricsServerAddress, ""+
"The host and port that the metrics endpoint should listen on.")
fs.BoolVar(&s.EnablePprof, "enable-profiling", false, ""+
"Enable profiling for controller.")
}
func (o *ControllerOptions) Validate() error {
switch o.DefaultIssuerKind {
case "Issuer":
case "ClusterIssuer":
default:
return fmt.Errorf("invalid default issuer kind: %v", o.DefaultIssuerKind)
}
if o.KubernetesAPIBurst <= 0 {
return fmt.Errorf("invalid value for kube-api-burst: %v must be higher than 0", o.KubernetesAPIBurst)
}
if o.KubernetesAPIQPS <= 0 {
return fmt.Errorf("invalid value for kube-api-qps: %v must be higher than 0", o.KubernetesAPIQPS)
}
if float32(o.KubernetesAPIBurst) < o.KubernetesAPIQPS {
return fmt.Errorf("invalid value for kube-api-burst: %v must be higher or equal to kube-api-qps: %v", o.KubernetesAPIQPS, o.KubernetesAPIQPS)
}
for _, server := range o.DNS01RecursiveNameservers {
// ensure all servers have a port number
_, _, err := net.SplitHostPort(server)
if err != nil {
return fmt.Errorf("invalid DNS server (%v): %v", err, server)
}
}
errs := []error{}
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
if len(errs) > 0 {
return fmt.Errorf("validation failed for '--controllers': %v", errs)
}
return nil
}
func (o *ControllerOptions) EnabledControllers() sets.String {
var disabled []string
enabled := sets.NewString()
for _, controller := range o.controllers {
switch {
case controller == "*":
enabled = enabled.Insert(defaultEnabledControllers...)
case strings.HasPrefix(controller, "-"):
disabled = append(disabled, strings.TrimPrefix(controller, "-"))
default:
enabled = enabled.Insert(controller)
}
}
enabled = enabled.Delete(disabled...)
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalCertificateSigningRequestControllers) {
logf.Log.Info("enabling all experimental certificatesigningrequest controllers")
enabled = enabled.Insert(experimentalCertificateSigningRequestControllers...)
}
return enabled
}
| 1 | 28,583 | What about Secrets, are we not also trying to cover those? | jetstack-cert-manager | go |
@@ -37,7 +37,7 @@
#include <jansson.h>
#include "src/common/libutil/log.h"
-#include "src/common/libkvs/jansson_dirent.h"
+#include "src/common/libkvs/treeobj.h"
#include "commit.h"
#include "kvs_util.h" | 1 | /*****************************************************************************\
* Copyright (c) 2015 Lawrence Livermore National Security, LLC. Produced at
* the Lawrence Livermore National Laboratory (cf, AUTHORS, DISCLAIMER.LLNS).
* LLNL-CODE-658032 All rights reserved.
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the license, or (at your option)
* any later version.
*
* Flux is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
* See also: http://www.gnu.org/licenses/
\*****************************************************************************/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <stdbool.h>
#include <ctype.h>
#include <czmq.h>
#include <flux/core.h>
#include <jansson.h>
#include "src/common/libutil/log.h"
#include "src/common/libkvs/jansson_dirent.h"
#include "commit.h"
#include "kvs_util.h"
struct commit_mgr {
struct cache *cache;
const char *hash_name;
int noop_stores; /* for kvs.stats.get, etc.*/
zhash_t *fences;
zlist_t *ready;
void *aux;
};
struct commit {
int errnum;
int aux_errnum;
fence_t *f;
int blocked:1;
json_t *rootcpy; /* working copy of root dir */
href_t newroot;
zlist_t *item_callback_list;
commit_mgr_t *cm;
enum {
COMMIT_STATE_INIT = 1,
COMMIT_STATE_LOAD_ROOT = 2,
COMMIT_STATE_APPLY_OPS = 3,
COMMIT_STATE_STORE = 4,
COMMIT_STATE_PRE_FINISHED = 5,
COMMIT_STATE_FINISHED = 6,
} state;
};
static void commit_destroy (commit_t *c)
{
if (c) {
json_decref (c->rootcpy);
if (c->item_callback_list)
zlist_destroy (&c->item_callback_list);
/* fence destroyed through management of fence, not commit_t's
* responsibility */
free (c);
}
}
static commit_t *commit_create (fence_t *f, commit_mgr_t *cm)
{
commit_t *c;
int saved_errno;
if (!(c = calloc (1, sizeof (*c)))) {
saved_errno = ENOMEM;
goto error;
}
c->f = f;
if (!(c->item_callback_list = zlist_new ())) {
saved_errno = ENOMEM;
goto error;
}
c->cm = cm;
c->state = COMMIT_STATE_INIT;
return c;
error:
commit_destroy (c);
errno = saved_errno;
return NULL;
}
int commit_get_errnum (commit_t *c)
{
return c->errnum;
}
int commit_get_aux_errnum (commit_t *c)
{
return c->aux_errnum;
}
int commit_set_aux_errnum (commit_t *c, int errnum)
{
c->aux_errnum = errnum;
return c->aux_errnum;
}
fence_t *commit_get_fence (commit_t *c)
{
return c->f;
}
void *commit_get_aux (commit_t *c)
{
return c->cm->aux;
}
const char *commit_get_newroot_ref (commit_t *c)
{
if (c->state == COMMIT_STATE_FINISHED)
return c->newroot;
return NULL;
}
/* On error we should cleanup anything on the dirty cache list
* that has not yet been passed to the user. Because this has not
* been passed to the user, there should be no waiters and the
* cache_entry_clear_dirty() should always succeed in clearing the
* bit.
*
* As of the writing of this code, it should also be impossible
* for the cache_entry_removal() to fail. In the rare case of two
* callers kvs-get and kvs.put-ing items that end up at the
* blobref in the cache, any waiters for a valid cache entry would
* have been satisfied when the dirty cache entry was put onto
* this dirty cache list (i.e. in store_cache() below when
* cache_entry_set_json() was called).
*/
void commit_cleanup_dirty_cache_entry (commit_t *c, struct cache_entry *hp)
{
if (c->state == COMMIT_STATE_STORE
|| c->state == COMMIT_STATE_PRE_FINISHED) {
href_t ref;
int ret;
assert (cache_entry_get_dirty (hp) == true);
ret = cache_entry_clear_dirty (hp);
assert (ret == 0);
if (kvs_util_json_hash (c->cm->hash_name,
cache_entry_get_json (hp),
ref) < 0)
log_err ("kvs_util_json_hash");
else {
ret = cache_remove_entry (c->cm->cache, ref);
assert (ret == 1);
}
}
}
static void cleanup_dirty_cache_list (commit_t *c)
{
struct cache_entry *hp;
while ((hp = zlist_pop (c->item_callback_list)))
commit_cleanup_dirty_cache_entry (c, hp);
}
/* Store object 'o' under key 'ref' in local cache.
* Object reference is given to this function, it will either give it
* to the cache or decref it.
* Returns -1 on error, 0 on success entry already there, 1 on success
* entry needs to be flushed to content store
*/
static int store_cache (commit_t *c, int current_epoch, json_t *o,
href_t ref, struct cache_entry **hpp)
{
struct cache_entry *hp;
int saved_errno, rc = -1;
if (kvs_util_json_hash (c->cm->hash_name, o, ref) < 0) {
saved_errno = errno;
log_err ("kvs_util_json_hash");
goto decref_done;
}
if (!(hp = cache_lookup (c->cm->cache, ref, current_epoch))) {
if (!(hp = cache_entry_create (NULL))) {
saved_errno = ENOMEM;
goto decref_done;
}
cache_insert (c->cm->cache, ref, hp);
}
if (cache_entry_get_valid (hp)) {
c->cm->noop_stores++;
json_decref (o);
rc = 0;
} else {
if (cache_entry_set_json (hp, o) < 0) {
int ret;
saved_errno = errno;
ret = cache_remove_entry (c->cm->cache, ref);
assert (ret == 1);
goto decref_done;
}
if (cache_entry_set_dirty (hp, true) < 0) {
/* cache_remove_entry will decref object */
int ret;
saved_errno = errno;
ret = cache_remove_entry (c->cm->cache, ref);
assert (ret == 1);
goto done;
}
rc = 1;
}
*hpp = hp;
return rc;
decref_done:
json_decref (o);
done:
errno = saved_errno;
return rc;
}
/* Store DIRVAL objects, converting them to DIRREFs.
* Store (large) FILEVAL objects, converting them to FILEREFs.
* Return 0 on success, -1 on error
*/
static int commit_unroll (commit_t *c, int current_epoch, json_t *dir)
{
json_t *value;
json_t *subdir, *key_value;
json_t *tmpdirent;
href_t ref;
int ret;
struct cache_entry *hp;
void *iter = json_object_iter (dir);
/* Do not use json_object_foreach(), unsafe to modify via
* json_object_set() while iterating.
*/
while (iter) {
value = json_object_iter_value (iter);
if ((subdir = json_object_get (value, "DIRVAL"))) {
if (commit_unroll (c, current_epoch, subdir) < 0) /* depth first */
return -1;
json_incref (subdir);
if ((ret = store_cache (c, current_epoch, subdir, ref, &hp)) < 0)
return -1;
if (ret) {
if (zlist_push (c->item_callback_list, hp) < 0) {
commit_cleanup_dirty_cache_entry (c, hp);
errno = ENOMEM;
return -1;
}
}
if (!(tmpdirent = j_dirent_create ("DIRREF", ref)))
return -1;
if (json_object_iter_set_new (dir, iter, tmpdirent) < 0) {
json_decref (tmpdirent);
errno = ENOMEM;
return -1;
}
}
else if ((key_value = json_object_get (value, "FILEVAL"))) {
size_t size;
if (kvs_util_json_encoded_size (key_value, &size) < 0)
return -1;
if (size > BLOBREF_MAX_STRING_SIZE) {
json_incref (key_value);
if ((ret = store_cache (c, current_epoch, key_value,
ref, &hp)) < 0)
return -1;
if (ret) {
if (zlist_push (c->item_callback_list, hp) < 0) {
commit_cleanup_dirty_cache_entry (c, hp);
errno = ENOMEM;
return -1;
}
}
if (!(tmpdirent = j_dirent_create ("FILEREF", ref)))
return -1;
if (json_object_iter_set_new (dir, iter, tmpdirent) < 0) {
json_decref (tmpdirent);
errno = ENOMEM;
return -1;
}
}
}
iter = json_object_iter_next (dir, iter);
}
return 0;
}
/* link (key, dirent) into directory 'dir'.
*/
static int commit_link_dirent (commit_t *c, int current_epoch,
json_t *rootdir, const char *key,
json_t *dirent, const char **missing_ref)
{
char *cpy = strdup (key);
char *next, *name = cpy;
json_t *dir = rootdir;
json_t *o, *subdir = NULL, *subdirent;
json_t *tmpdirent;
int saved_errno, rc = -1;
if (!cpy) {
saved_errno = ENOMEM;
goto done;
}
/* Special case root
*/
if (strcmp (name, ".") == 0) {
saved_errno = EINVAL;
goto done;
}
/* This is the first part of a key with multiple path components.
* Make sure that it is a directory in DIRVAL form, then recurse
* on the remaining path components.
*/
while ((next = strchr (name, '.'))) {
*next++ = '\0';
if (!(subdirent = json_object_get (dir, name))) {
if (json_is_null (dirent)) /* key deletion - it doesn't exist so return */
goto success;
if (!(subdir = json_object ())) {
saved_errno = ENOMEM;
goto done;
}
if (!(tmpdirent = j_dirent_create ("DIRVAL", subdir))) {
saved_errno = errno;
json_decref (subdir);
goto done;
}
if (json_object_set_new (dir, name, tmpdirent) < 0) {
json_decref (tmpdirent);
json_decref (subdir);
saved_errno = ENOMEM;
goto done;
}
json_decref (subdir);
} else if ((o = json_object_get (subdirent, "DIRVAL"))) {
subdir = o;
} else if ((o = json_object_get (subdirent, "DIRREF"))) {
assert (json_is_string (o));
const char *ref = json_string_value (o);
if (!(subdir = cache_lookup_and_get_json (c->cm->cache,
ref,
current_epoch))) {
*missing_ref = ref;
goto success; /* stall */
}
/* do not corrupt store by modify orig. */
if (!(subdir = json_copy (subdir))) {
saved_errno = ENOMEM;
goto done;
}
if (!(tmpdirent = j_dirent_create ("DIRVAL", subdir))) {
saved_errno = errno;
json_decref (subdir);
goto done;
}
if (json_object_set_new (dir, name, tmpdirent) < 0) {
json_decref (tmpdirent);
json_decref (subdir);
saved_errno = ENOMEM;
goto done;
}
json_decref (subdir);
} else if ((o = json_object_get (subdirent, "LINKVAL"))) {
assert (json_is_string (o));
char *nkey = NULL;
if (asprintf (&nkey, "%s.%s", json_string_value (o), next) < 0) {
saved_errno = ENOMEM;
goto done;
}
if (commit_link_dirent (c,
current_epoch,
rootdir,
nkey,
dirent,
missing_ref) < 0) {
saved_errno = errno;
free (nkey);
goto done;
}
free (nkey);
goto success;
} else {
if (json_is_null (dirent)) /* key deletion - it doesn't exist so return */
goto success;
if (!(subdir = json_object ())) {
saved_errno = ENOMEM;
goto done;
}
if (!(tmpdirent = j_dirent_create ("DIRVAL", subdir))) {
saved_errno = errno;
json_decref (subdir);
goto done;
}
if (json_object_set_new (dir, name, tmpdirent) < 0) {
json_decref (tmpdirent);
json_decref (subdir);
saved_errno = ENOMEM;
goto done;
}
json_decref (subdir);
}
name = next;
dir = subdir;
}
/* This is the final path component of the key. Add it to the directory.
*/
if (!json_is_null (dirent)) {
if (json_object_set_new (dir, name, json_incref (dirent)) < 0) {
saved_errno = errno;
json_decref (dirent);
goto done;
}
}
else
json_object_del (dir, name);
success:
rc = 0;
done:
free (cpy);
if (rc < 0)
errno = saved_errno;
return rc;
}
commit_process_t commit_process (commit_t *c,
int current_epoch,
const href_t rootdir_ref)
{
/* Incase user calls commit_process() again */
if (c->errnum)
return COMMIT_PROCESS_ERROR;
switch (c->state) {
case COMMIT_STATE_INIT:
case COMMIT_STATE_LOAD_ROOT:
{
/* Make a copy of the root directory.
*/
json_t *rootdir;
/* Caller didn't call commit_iter_missing_refs() */
if (zlist_first (c->item_callback_list))
goto stall_load;
c->state = COMMIT_STATE_LOAD_ROOT;
if (!(rootdir = cache_lookup_and_get_json (c->cm->cache,
rootdir_ref,
current_epoch))) {
if (zlist_push (c->item_callback_list,
(void *)rootdir_ref) < 0) {
c->errnum = ENOMEM;
return COMMIT_PROCESS_ERROR;
}
goto stall_load;
}
if (!(c->rootcpy = json_copy (rootdir))) {
c->errnum = ENOMEM;
return COMMIT_PROCESS_ERROR;
}
c->state = COMMIT_STATE_APPLY_OPS;
/* fallthrough */
}
case COMMIT_STATE_APPLY_OPS:
{
/* Apply each op (e.g. key = val) in sequence to the root
* copy. A side effect of walking key paths is to convert
* DIRREFs to DIRVALs in the copy. This allows the commit
* to be self-contained in the rootcpy until it is
* unrolled later on.
*/
if (fence_get_json_ops (c->f)) {
json_t *op, *key, *dirent;
const char *missing_ref = NULL;
json_t *ops = fence_get_json_ops (c->f);
int i, len = json_array_size (ops);
/* Caller didn't call commit_iter_missing_refs() */
if (zlist_first (c->item_callback_list))
goto stall_load;
for (i = 0; i < len; i++) {
missing_ref = NULL;
if (!(op = json_array_get (ops, i))
|| !(key = json_object_get (op, "key"))
|| !(dirent = json_object_get (op, "dirent")))
continue;
if (commit_link_dirent (c,
current_epoch,
c->rootcpy,
json_string_value (key),
dirent,
&missing_ref) < 0) {
c->errnum = errno;
break;
}
if (missing_ref) {
if (zlist_push (c->item_callback_list,
(void *)missing_ref) < 0) {
c->errnum = ENOMEM;
break;
}
}
}
if (c->errnum != 0) {
/* empty item_callback_list to prevent mistakes later */
while ((missing_ref = zlist_pop (c->item_callback_list)));
return COMMIT_PROCESS_ERROR;
}
if (zlist_first (c->item_callback_list))
goto stall_load;
}
c->state = COMMIT_STATE_STORE;
/* fallthrough */
}
case COMMIT_STATE_STORE:
{
/* Unroll the root copy.
* When a DIRVAL is found, store an object and replace it
* with a DIRREF. Finally, store the unrolled root copy
* as an object and keep its reference in c->newroot.
* Flushes to content cache are asynchronous but we don't
* proceed until they are completed.
*/
struct cache_entry *hp;
int sret;
if (commit_unroll (c, current_epoch, c->rootcpy) < 0)
c->errnum = errno;
else if ((sret = store_cache (c,
current_epoch,
c->rootcpy,
c->newroot,
&hp)) < 0)
c->errnum = errno;
else if (sret
&& zlist_push (c->item_callback_list, hp) < 0) {
commit_cleanup_dirty_cache_entry (c, hp);
c->errnum = ENOMEM;
}
if (c->errnum) {
cleanup_dirty_cache_list (c);
return COMMIT_PROCESS_ERROR;
}
/* cache took ownership of rootcpy, we're done, but
* may still need to stall user.
*/
c->state = COMMIT_STATE_PRE_FINISHED;
c->rootcpy = NULL;
/* fallthrough */
}
case COMMIT_STATE_PRE_FINISHED:
/* If we did not fall through to here, caller didn't call
* commit_iter_dirty_cache_entries()
*/
if (zlist_first (c->item_callback_list))
goto stall_store;
c->state = COMMIT_STATE_FINISHED;
/* fallthrough */
case COMMIT_STATE_FINISHED:
break;
default:
log_msg ("invalid commit state: %d", c->state);
c->errnum = EPERM;
return COMMIT_PROCESS_ERROR;
}
return COMMIT_PROCESS_FINISHED;
stall_load:
c->blocked = 1;
return COMMIT_PROCESS_LOAD_MISSING_REFS;
stall_store:
c->blocked = 1;
return COMMIT_PROCESS_DIRTY_CACHE_ENTRIES;
}
int commit_iter_missing_refs (commit_t *c, commit_ref_cb cb, void *data)
{
const char *ref;
int saved_errno, rc = 0;
if (c->state != COMMIT_STATE_LOAD_ROOT
&& c->state != COMMIT_STATE_APPLY_OPS) {
errno = EINVAL;
return -1;
}
while ((ref = zlist_pop (c->item_callback_list))) {
if (cb (c, ref, data) < 0) {
saved_errno = errno;
rc = -1;
break;
}
}
if (rc < 0) {
while ((ref = zlist_pop (c->item_callback_list)));
errno = saved_errno;
}
return rc;
}
int commit_iter_dirty_cache_entries (commit_t *c,
commit_cache_entry_cb cb,
void *data)
{
struct cache_entry *hp;
int saved_errno, rc = 0;
if (c->state != COMMIT_STATE_PRE_FINISHED) {
errno = EINVAL;
return -1;
}
while ((hp = zlist_pop (c->item_callback_list))) {
if (cb (c, hp, data) < 0) {
saved_errno = errno;
rc = -1;
break;
}
}
if (rc < 0) {
cleanup_dirty_cache_list (c);
errno = saved_errno;
}
return rc;
}
commit_mgr_t *commit_mgr_create (struct cache *cache,
const char *hash_name,
void *aux)
{
commit_mgr_t *cm;
int saved_errno;
if (!(cm = calloc (1, sizeof (*cm)))) {
saved_errno = ENOMEM;
goto error;
}
cm->cache = cache;
cm->hash_name = hash_name;
if (!(cm->fences = zhash_new ())) {
saved_errno = ENOMEM;
goto error;
}
if (!(cm->ready = zlist_new ())) {
saved_errno = ENOMEM;
goto error;
}
cm->aux = aux;
return cm;
error:
commit_mgr_destroy (cm);
errno = saved_errno;
return NULL;
}
void commit_mgr_destroy (commit_mgr_t *cm)
{
if (cm) {
if (cm->fences)
zhash_destroy (&cm->fences);
if (cm->ready)
zlist_destroy (&cm->ready);
free (cm);
}
}
int commit_mgr_add_fence (commit_mgr_t *cm, fence_t *f)
{
json_t *name;
if (!(name = json_array_get (fence_get_json_names (f), 0))) {
errno = EINVAL;
goto error;
}
if (zhash_insert (cm->fences, json_string_value (name), f) < 0) {
errno = EEXIST;
goto error;
}
zhash_freefn (cm->fences,
json_string_value (name),
(zhash_free_fn *)fence_destroy);
return 0;
error:
return -1;
}
fence_t *commit_mgr_lookup_fence (commit_mgr_t *cm, const char *name)
{
return zhash_lookup (cm->fences, name);
}
int commit_mgr_process_fence_request (commit_mgr_t *cm, fence_t *f)
{
if (fence_count_reached (f)) {
commit_t *c;
if (!(c = commit_create (f, cm)))
return -1;
if (zlist_append (cm->ready, c) < 0) {
commit_destroy (c);
errno = ENOMEM;
return -1;
}
zlist_freefn (cm->ready, c, (zlist_free_fn *)commit_destroy, true);
}
return 0;
}
bool commit_mgr_commits_ready (commit_mgr_t *cm)
{
commit_t *c;
if ((c = zlist_first (cm->ready)) && !c->blocked)
return true;
return false;
}
commit_t *commit_mgr_get_ready_commit (commit_mgr_t *cm)
{
if (commit_mgr_commits_ready (cm))
return zlist_first (cm->ready);
return NULL;
}
void commit_mgr_remove_commit (commit_mgr_t *cm, commit_t *c)
{
zlist_remove (cm->ready, c);
}
void commit_mgr_remove_fence (commit_mgr_t *cm, const char *name)
{
zhash_delete (cm->fences, name);
}
int commit_mgr_get_noop_stores (commit_mgr_t *cm)
{
return cm->noop_stores;
}
void commit_mgr_clear_noop_stores (commit_mgr_t *cm)
{
cm->noop_stores = 0;
}
/* Merge ready commits that are mergeable, where merging consists of
* popping the "donor" commit off the ready list, and appending its
* ops to the top commit. The top commit can be appended to if it
* hasn't started, or is still building the rootcpy, e.g. stalled
* walking the namespace.
*
* Break when an unmergeable commit is discovered. We do not wish to
* merge non-adjacent fences, as it can create undesireable out of
* order scenarios. e.g.
*
* commit #1 is mergeable: set A=1
* commit #2 is non-mergeable: set A=2
* commit #3 is mergeable: set A=3
*
* If we were to merge commit #1 and commit #3, A=2 would be set after
* A=3.
*/
int commit_mgr_merge_ready_commits (commit_mgr_t *cm)
{
commit_t *c = zlist_first (cm->ready);
/* commit must still be in state where merged in ops can be
* applied */
if (c
&& c->errnum == 0
&& c->state <= COMMIT_STATE_APPLY_OPS
&& !(fence_get_flags (c->f) & FLUX_KVS_NO_MERGE)) {
commit_t *nc;
while ((nc = zlist_next (cm->ready))) {
int ret;
if ((ret = fence_merge (c->f, nc->f)) < 0)
return -1;
/* if return == 0, we've merged as many as we currently
* can */
if (!ret)
break;
/* Merged fence, remove off ready list */
zlist_remove (cm->ready, nc);
}
}
return 0;
}
| 1 | 18,807 | (commit messge): how about > update internal commit API for RFC 11 | flux-framework-flux-core | c |
@@ -27,7 +27,7 @@ var eventErrorStates = []string{
}
var waiters = []request.WaiterOption{
- request.WithWaiterDelay(request.ConstantWaiterDelay(3 * time.Second)), // Poll for cfn updates every 3 seconds.
+ request.WithWaiterDelay(request.ConstantWaiterDelay(5 * time.Second)), // How long to wait in between poll cfn for updates.
request.WithWaiterMaxAttempts(1800), // Wait for at most 90 mins for any cfn action.
}
| 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package cloudformation provides a client to make API requests to AWS CloudFormation.
package cloudformation
import (
"context"
"errors"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
)
type eventMatcher func(*cloudformation.StackEvent) bool
var eventErrorStates = []string{
cloudformation.ResourceStatusCreateFailed,
cloudformation.ResourceStatusDeleteFailed,
cloudformation.ResourceStatusImportFailed,
cloudformation.ResourceStatusUpdateFailed,
cloudformation.ResourceStatusImportRollbackFailed,
}
var waiters = []request.WaiterOption{
request.WithWaiterDelay(request.ConstantWaiterDelay(3 * time.Second)), // Poll for cfn updates every 3 seconds.
request.WithWaiterMaxAttempts(1800), // Wait for at most 90 mins for any cfn action.
}
// CloudFormation represents a client to make requests to AWS CloudFormation.
type CloudFormation struct {
client
}
// New creates a new CloudFormation client.
func New(s *session.Session) *CloudFormation {
return &CloudFormation{
cloudformation.New(s),
}
}
// Create deploys a new CloudFormation stack using Change Sets.
// If the stack already exists in a failed state, deletes the stack and re-creates it.
func (c *CloudFormation) Create(stack *Stack) (changeSetID string, err error) {
descr, err := c.Describe(stack.Name)
if err != nil {
var stackNotFound *ErrStackNotFound
if !errors.As(err, &stackNotFound) {
return "", err
}
// If the stack does not exist, create it.
return c.create(stack)
}
status := StackStatus(aws.StringValue(descr.StackStatus))
if status.requiresCleanup() {
// If the stack exists, but failed to create, we'll clean it up and then re-create it.
if err := c.Delete(stack.Name); err != nil {
return "", fmt.Errorf("clean up previously failed stack %s: %w", stack.Name, err)
}
return c.create(stack)
}
if status.InProgress() {
return "", &ErrStackUpdateInProgress{
Name: stack.Name,
}
}
return "", &ErrStackAlreadyExists{
Name: stack.Name,
Stack: descr,
}
}
// CreateAndWait calls Create and then WaitForCreate.
func (c *CloudFormation) CreateAndWait(stack *Stack) error {
if _, err := c.Create(stack); err != nil {
return err
}
return c.WaitForCreate(context.Background(), stack.Name)
}
// DescribeChangeSet gathers and returns all changes for a change set.
func (c *CloudFormation) DescribeChangeSet(changeSetID, stackName string) (*ChangeSetDescription, error) {
cs := &changeSet{name: changeSetID, stackName: stackName, client: c.client}
out, err := cs.describe()
if err != nil {
return nil, err
}
return out, nil
}
// WaitForCreate blocks until the stack is created or until the max attempt window expires.
func (c *CloudFormation) WaitForCreate(ctx context.Context, stackName string) error {
err := c.client.WaitUntilStackCreateCompleteWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: aws.String(stackName),
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s create is complete: %w", stackName, err)
}
return nil
}
// Update updates an existing CloudFormation with the new configuration.
// If there are no changes for the stack, deletes the empty change set and returns ErrChangeSetEmpty.
func (c *CloudFormation) Update(stack *Stack) error {
descr, err := c.Describe(stack.Name)
if err != nil {
return err
}
status := StackStatus(aws.StringValue(descr.StackStatus))
if status.InProgress() {
return &ErrStackUpdateInProgress{
Name: stack.Name,
}
}
return c.update(stack)
}
// UpdateAndWait calls Update and then blocks until the stack is updated or until the max attempt window expires.
func (c *CloudFormation) UpdateAndWait(stack *Stack) error {
if err := c.Update(stack); err != nil {
return err
}
return c.WaitForUpdate(context.Background(), stack.Name)
}
// WaitForUpdate blocks until the stack is updated or until the max attempt window expires.
func (c *CloudFormation) WaitForUpdate(ctx context.Context, stackName string) error {
err := c.client.WaitUntilStackUpdateCompleteWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: aws.String(stackName),
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s update is complete: %w", stackName, err)
}
return nil
}
// Delete removes an existing CloudFormation stack.
// If the stack doesn't exist then do nothing.
func (c *CloudFormation) Delete(stackName string) error {
_, err := c.client.DeleteStack(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
})
if err != nil {
if !stackDoesNotExist(err) {
return fmt.Errorf("delete stack %s: %w", stackName, err)
}
// Move on if stack is already deleted.
}
return nil
}
// DeleteAndWait calls Delete then blocks until the stack is deleted or until the max attempt window expires.
func (c *CloudFormation) DeleteAndWait(stackName string) error {
return c.deleteAndWait(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
})
}
// DeleteAndWaitWithRoleARN is DeleteAndWait but with a role ARN that AWS CloudFormation assumes to delete the stack.
func (c *CloudFormation) DeleteAndWaitWithRoleARN(stackName, roleARN string) error {
return c.deleteAndWait(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
RoleARN: aws.String(roleARN),
})
}
// Describe returns a description of an existing stack.
// If the stack does not exist, returns ErrStackNotFound.
func (c *CloudFormation) Describe(name string) (*StackDescription, error) {
out, err := c.client.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(name),
})
if err != nil {
if stackDoesNotExist(err) {
return nil, &ErrStackNotFound{name: name}
}
return nil, fmt.Errorf("describe stack %s: %w", name, err)
}
if len(out.Stacks) == 0 {
return nil, &ErrStackNotFound{name: name}
}
descr := StackDescription(*out.Stacks[0])
return &descr, nil
}
// TemplateBody returns the template body of an existing stack.
// If the stack does not exist, returns ErrStackNotFound.
func (c *CloudFormation) TemplateBody(name string) (string, error) {
out, err := c.client.GetTemplate(&cloudformation.GetTemplateInput{
StackName: aws.String(name),
})
if err != nil {
if stackDoesNotExist(err) {
return "", &ErrStackNotFound{name: name}
}
return "", fmt.Errorf("get template %s: %w", name, err)
}
return aws.StringValue(out.TemplateBody), nil
}
// Events returns the list of stack events in **chronological** order.
func (c *CloudFormation) Events(stackName string) ([]StackEvent, error) {
return c.events(stackName, func(in *cloudformation.StackEvent) bool { return true })
}
func (c *CloudFormation) events(stackName string, match eventMatcher) ([]StackEvent, error) {
var nextToken *string
var events []StackEvent
for {
out, err := c.client.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
NextToken: nextToken,
StackName: aws.String(stackName),
})
if err != nil {
return nil, fmt.Errorf("describe stack events for stack %s: %w", stackName, err)
}
for _, event := range out.StackEvents {
if match(event) {
events = append(events, StackEvent(*event))
}
}
nextToken = out.NextToken
if nextToken == nil {
break
}
}
// Reverse the events so that they're returned in chronological order.
// Taken from https://github.com/golang/go/wiki/SliceTricks#reversing.
for i := len(events)/2 - 1; i >= 0; i-- {
opp := len(events) - 1 - i
events[i], events[opp] = events[opp], events[i]
}
return events, nil
}
// ErrorEvents returns the list of events with "failed" status in **chronological order**
func (c *CloudFormation) ErrorEvents(stackName string) ([]StackEvent, error) {
return c.events(stackName, func(in *cloudformation.StackEvent) bool {
for _, status := range eventErrorStates {
if aws.StringValue(in.ResourceStatus) == status {
return true
}
}
return false
})
}
func (c *CloudFormation) create(stack *Stack) (string, error) {
cs, err := newCreateChangeSet(c.client, stack.Name)
if err != nil {
return "", err
}
if err := cs.createAndExecute(stack.stackConfig); err != nil {
return "", err
}
return cs.name, nil
}
func (c *CloudFormation) update(stack *Stack) error {
cs, err := newUpdateChangeSet(c.client, stack.Name)
if err != nil {
return err
}
return cs.createAndExecute(stack.stackConfig)
}
func (c *CloudFormation) deleteAndWait(in *cloudformation.DeleteStackInput) error {
_, err := c.client.DeleteStack(in)
if err != nil {
if !stackDoesNotExist(err) {
return fmt.Errorf("delete stack %s: %w", aws.StringValue(in.StackName), err)
}
return nil // If the stack is already deleted, don't wait for it.
}
err = c.client.WaitUntilStackDeleteCompleteWithContext(context.Background(), &cloudformation.DescribeStacksInput{
StackName: in.StackName,
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s delete is complete: %w", aws.StringValue(in.StackName), err)
}
return nil
}
| 1 | 16,114 | This will now wait for 2.5 hours, not 90 minutes, if the waiter delay is 5s | aws-copilot-cli | go |
@@ -14,6 +14,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
const EventUtil = {
trapEvent(event){
+ if(!event) return
event.preventDefault();
event.stopPropagation();
if(event.nativeEvent && event.nativeEvent.preventDefault){ | 1 | /*
Copyright (c) 2015, salesforce.com, inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of salesforce.com, inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
'use strict';
const EventUtil = {
trapEvent(event){
event.preventDefault();
event.stopPropagation();
if(event.nativeEvent && event.nativeEvent.preventDefault){
event.nativeEvent.preventDefault();
}
if (event.nativeEvent && event.nativeEvent.stopPropagation){
event.nativeEvent.stopPropagation();
}
},
trap(event){
return EventUtil.trapEvent( event );
},
trapImmediate(event){
if(event.stopImmediatePropagation){
event.stopImmediatePropagation();
}
if(event.nativeEvent && event.nativeEvent.stopImmediatePropagation){
event.nativeEvent.stopImmediatePropagation();
}
EventUtil.trap(event);
}
};
module.exports = EventUtil;
| 1 | 9,389 | See this error on console too so I don't think it's just test simulation. For some reason event doesn't show up sometimes | salesforce-design-system-react | js |
@@ -44,7 +44,7 @@ func main() {
cmds.NewCRICTL(externalCLIAction("crictl", dataDir)),
cmds.NewCtrCommand(externalCLIAction("ctr", dataDir)),
cmds.NewCheckConfigCommand(externalCLIAction("check-config", dataDir)),
- cmds.NewEtcdSnapshotCommand(etcdsnapshotCommand, cmds.NewEtcdSnapshotSubcommands(etcdsnapshotCommand)),
+ cmds.NewEtcdSnapshotCommand(etcdsnapshotCommand, cmds.NewEtcdSnapshotSubcommands(etcdsnapshotCommand, etcdsnapshotCommand)),
}
if err := app.Run(os.Args); err != nil { | 1 | package main
import (
"bytes"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/configfilearg"
"github.com/rancher/k3s/pkg/data"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/dataverify"
"github.com/rancher/k3s/pkg/flock"
"github.com/rancher/k3s/pkg/untar"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/resolvehome"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var criDefaultConfigPath = "/etc/crictl.yaml"
// main entrypoint for the k3s multicall binary
func main() {
dataDir := findDataDir()
// Handle direct invocation via symlink alias (multicall binary behavior)
if runCLIs(dataDir) {
return
}
etcdsnapshotCommand := internalCLIAction(version.Program+"-"+cmds.EtcdSnapshotCommand, dataDir, os.Args)
// Handle subcommand invocation (k3s server, k3s crictl, etc)
app := cmds.NewApp()
app.Commands = []cli.Command{
cmds.NewServerCommand(internalCLIAction(version.Program+"-server", dataDir, os.Args)),
cmds.NewAgentCommand(internalCLIAction(version.Program+"-agent", dataDir, os.Args)),
cmds.NewKubectlCommand(externalCLIAction("kubectl", dataDir)),
cmds.NewCRICTL(externalCLIAction("crictl", dataDir)),
cmds.NewCtrCommand(externalCLIAction("ctr", dataDir)),
cmds.NewCheckConfigCommand(externalCLIAction("check-config", dataDir)),
cmds.NewEtcdSnapshotCommand(etcdsnapshotCommand, cmds.NewEtcdSnapshotSubcommands(etcdsnapshotCommand)),
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
// findDataDir reads data-dir settings from the CLI args and config file.
// If not found, the default will be used, which varies depending on whether
// k3s is being run as root or not.
func findDataDir() string {
for i, arg := range os.Args {
for _, flagName := range []string{"--data-dir", "-d"} {
if flagName == arg {
if len(os.Args) > i+1 {
return os.Args[i+1]
}
} else if strings.HasPrefix(arg, flagName+"=") {
return arg[len(flagName)+1:]
}
}
}
dataDir := configfilearg.MustFindString(os.Args, "data-dir")
if d, err := datadir.Resolve(dataDir); err == nil {
dataDir = d
} else {
logrus.Warnf("Failed to resolve user home directory: %s", err)
}
return dataDir
}
// runCLIs handles the case where the binary is being executed as a symlink alias,
// /usr/local/bin/crictl for example. If the executable name is one of the external
// binaries, it calls it directly and returns true. If it's not an external binary,
// it returns false so that standard CLI wrapping can occur.
func runCLIs(dataDir string) bool {
progName := filepath.Base(os.Args[0])
switch progName {
case "crictl", "ctr", "kubectl":
if err := externalCLI(progName, dataDir, os.Args[1:]); err != nil {
logrus.Fatal(err)
}
return true
}
return false
}
// externalCLIAction returns a function that will call an external binary, be used as the Action of a cli.Command.
func externalCLIAction(cmd, dataDir string) func(cli *cli.Context) error {
return func(cli *cli.Context) error {
return externalCLI(cmd, dataDir, cli.Args())
}
}
// externalCLI calls an external binary, fixing up argv[0] to the correct name.
// crictl needs extra help to find its config file so we do that here too.
func externalCLI(cli, dataDir string, args []string) error {
if cli == "crictl" {
if os.Getenv("CRI_CONFIG_FILE") == "" {
os.Setenv("CRI_CONFIG_FILE", findCriConfig(dataDir))
}
}
return stageAndRun(dataDir, cli, append([]string{cli}, args...))
}
// internalCLIAction returns a function that will call a K3s internal command, be used as the Action of a cli.Command.
func internalCLIAction(cmd, dataDir string, args []string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
return stageAndRunCLI(ctx, cmd, dataDir, args)
}
}
// stageAndRunCLI calls an external binary.
func stageAndRunCLI(cli *cli.Context, cmd string, dataDir string, args []string) error {
return stageAndRun(dataDir, cmd, args)
}
// stageAndRun does the actual work of setting up and calling an external binary.
func stageAndRun(dataDir, cmd string, args []string) error {
dir, err := extract(dataDir)
if err != nil {
return errors.Wrap(err, "extracting data")
}
logrus.Debugf("Asset dir %s", dir)
if err := os.Setenv("PATH", filepath.Join(dir, "bin")+":"+os.Getenv("PATH")+":"+filepath.Join(dir, "bin/aux")); err != nil {
return err
}
if err := os.Setenv(version.ProgramUpper+"_DATA_DIR", dir); err != nil {
return err
}
cmd, err = exec.LookPath(cmd)
if err != nil {
return err
}
logrus.Debugf("Running %s %v", cmd, args)
return syscall.Exec(cmd, args, os.Environ())
}
// getAssetAndDir returns the name of the bindata asset, along with a directory path
// derived from the data-dir and bindata asset name.
func getAssetAndDir(dataDir string) (string, string) {
asset := data.AssetNames()[0]
dir := filepath.Join(dataDir, "data", strings.SplitN(filepath.Base(asset), ".", 2)[0])
return asset, dir
}
// extract checks for and if necessary unpacks the bindata archive, returning the unique path
// to the extracted bindata asset.
func extract(dataDir string) (string, error) {
// first look for global asset folder so we don't create a HOME version if not needed
_, dir := getAssetAndDir(datadir.DefaultDataDir)
if _, err := os.Stat(filepath.Join(dir, "bin", "containerd")); err == nil {
return dir, nil
}
asset, dir := getAssetAndDir(dataDir)
// check if target content already exists
if _, err := os.Stat(filepath.Join(dir, "bin", "containerd")); err == nil {
return dir, nil
}
// acquire a data directory lock
os.MkdirAll(filepath.Join(dataDir, "data"), 0755)
lockFile := filepath.Join(dataDir, "data", ".lock")
logrus.Infof("Acquiring lock file %s", lockFile)
lock, err := flock.Acquire(lockFile)
if err != nil {
return "", err
}
defer flock.Release(lock)
// check again if target directory exists
if _, err := os.Stat(dir); err == nil {
return dir, nil
}
logrus.Infof("Preparing data dir %s", dir)
content, err := data.Asset(asset)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(content)
tempDest := dir + "-tmp"
defer os.RemoveAll(tempDest)
os.RemoveAll(tempDest)
if err := untar.Untar(buf, tempDest); err != nil {
return "", err
}
if err := dataverify.Verify(filepath.Join(tempDest, "bin")); err != nil {
return "", err
}
currentSymLink := filepath.Join(dataDir, "data", "current")
previousSymLink := filepath.Join(dataDir, "data", "previous")
if _, err := os.Lstat(currentSymLink); err == nil {
if err := os.Rename(currentSymLink, previousSymLink); err != nil {
return "", err
}
}
if err := os.Symlink(dir, currentSymLink); err != nil {
return "", err
}
return dir, os.Rename(tempDest, dir)
}
// findCriConfig returns the path to crictl.yaml
// crictl won't search multiple locations for a config file. It will fall back to looking in
// the same directory as the crictl binary, but that's it. We need to check the various possible
// data-dir locations ourselves and then point it at the right one. We check:
// - the configured data-dir
// - the default user data-dir (assuming we can find the user's home directory)
// - the default system data-dir
// - the default path from upstream crictl
func findCriConfig(dataDir string) string {
searchList := []string{filepath.Join(dataDir, "agent", criDefaultConfigPath)}
if homeDataDir, err := resolvehome.Resolve(datadir.DefaultHomeDataDir); err == nil {
searchList = append(searchList, filepath.Join(homeDataDir, "agent", criDefaultConfigPath))
} else {
logrus.Warnf("Failed to resolve user home directory: %s", err)
}
searchList = append(searchList, filepath.Join(datadir.DefaultDataDir, "agent", criDefaultConfigPath))
searchList = append(searchList, criDefaultConfigPath)
for _, path := range searchList {
_, err := os.Stat(path)
if err == nil {
return path
}
if !errors.Is(err, os.ErrNotExist) {
logrus.Warnf("Failed to %s", err)
}
}
return ""
}
| 1 | 9,525 | We're passing the same thing twice? | k3s-io-k3s | go |
@@ -245,7 +245,8 @@ func TestCreateInstance(t *testing.T) {
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- Data: pointer.StringPtr("user-data"),
+ // echo "user-data" | base64
+ Data: pointer.StringPtr("dXNlci1kYXRhCg=="),
},
},
}, | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ec2
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_elbiface"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestInstanceIfExists(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
testCases := []struct {
name string
instanceID string
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(instance *v1alpha2.Instance, err error)
}{
{
name: "does not exist",
instanceID: "hello",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("hello")},
})).
Return(nil, awserrors.NewNotFound(errors.New("not found")))
},
check: func(instance *v1alpha2.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance != nil {
t.Fatalf("Did not expect anything but got something: %+v", instance)
}
},
},
{
name: "instance exists",
instanceID: "id-1",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("id-1")},
})).
Return(&ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{
{
InstanceId: aws.String("id-1"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
State: &ec2.InstanceState{
Code: aws.Int64(16),
Name: aws.String(ec2.StateAvailable),
},
},
},
},
},
}, nil)
},
check: func(instance *v1alpha2.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance == nil {
t.Fatalf("expected instance but got nothing")
}
if instance.ID != "id-1" {
t.Fatalf("expected id-1 but got: %v", instance.ID)
}
},
},
{
name: "error describing instances",
instanceID: "one",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("one")},
}).
Return(nil, errors.New("some unknown error"))
},
check: func(i *v1alpha2.Instance, err error) {
if err == nil {
t.Fatalf("expected an error but got none.")
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{},
AWSClients: scope.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
},
AWSCluster: &v1alpha2.AWSCluster{
Spec: v1alpha2.AWSClusterSpec{
NetworkSpec: v1alpha2.NetworkSpec{
VPC: v1alpha2.VPCSpec{
ID: "test-vpc",
},
},
},
},
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
tc.expect(ec2Mock.EXPECT())
s := NewService(scope)
instance, err := s.InstanceIfExists(&tc.instanceID)
tc.check(instance, err)
})
}
}
func TestTerminateInstance(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
instanceNotFoundError := errors.New("instance not found")
testCases := []struct {
name string
instanceID string
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(err error)
}{
{
name: "instance exists",
instanceID: "i-exist",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-exist")},
})).
Return(&ec2.TerminateInstancesOutput{}, nil)
},
check: func(err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "instance does not exist",
instanceID: "i-donotexist",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-donotexist")},
})).
Return(&ec2.TerminateInstancesOutput{}, instanceNotFoundError)
},
check: func(err error) {
if err == nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
AWSClients: scope.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
},
Cluster: &clusterv1.Cluster{},
AWSCluster: &v1alpha2.AWSCluster{},
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
tc.expect(ec2Mock.EXPECT())
s := NewService(scope)
err = s.TerminateInstance(tc.instanceID)
tc.check(err)
})
}
}
func TestCreateInstance(t *testing.T) {
testcases := []struct {
name string
machine clusterv1.Machine
machineConfig *v1alpha2.AWSMachineSpec
awsCluster *v1alpha2.AWSCluster
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(instance *v1alpha2.Instance, err error)
}{
{
name: "simple",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
Data: pointer.StringPtr("user-data"),
},
},
},
machineConfig: &v1alpha2.AWSMachineSpec{
AMI: v1alpha2.AWSResourceReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
},
awsCluster: &v1alpha2.AWSCluster{
Spec: v1alpha2.AWSClusterSpec{
NetworkSpec: v1alpha2.NetworkSpec{
Subnets: v1alpha2.Subnets{
&v1alpha2.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
&v1alpha2.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: v1alpha2.AWSClusterStatus{
Network: v1alpha2.Network{
SecurityGroups: map[v1alpha2.SecurityGroupRole]v1alpha2.SecurityGroup{
v1alpha2.SecurityGroupControlPlane: {
ID: "1",
},
v1alpha2.SecurityGroupNode: {
ID: "2",
},
v1alpha2.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: v1alpha2.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *v1alpha2.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with availability zone",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
Data: pointer.StringPtr("user-data"),
},
},
},
machineConfig: &v1alpha2.AWSMachineSpec{
AMI: v1alpha2.AWSResourceReference{
ID: aws.String("abc"),
},
InstanceType: "m5.2xlarge",
AvailabilityZone: aws.String("us-east-1c"),
},
awsCluster: &v1alpha2.AWSCluster{
Spec: v1alpha2.AWSClusterSpec{
NetworkSpec: v1alpha2.NetworkSpec{
Subnets: v1alpha2.Subnets{
&v1alpha2.SubnetSpec{
ID: "subnet-1",
AvailabilityZone: "us-east-1a",
IsPublic: false,
},
&v1alpha2.SubnetSpec{
ID: "subnet-2",
AvailabilityZone: "us-east-1b",
IsPublic: false,
},
&v1alpha2.SubnetSpec{
ID: "subnet-3",
AvailabilityZone: "us-east-1c",
IsPublic: false,
},
&v1alpha2.SubnetSpec{
ID: "subnet-3-public",
AvailabilityZone: "us-east-1c",
IsPublic: true,
},
},
},
},
Status: v1alpha2.AWSClusterStatus{
Network: v1alpha2.Network{
SecurityGroups: map[v1alpha2.SecurityGroupRole]v1alpha2.SecurityGroup{
v1alpha2.SecurityGroupControlPlane: {
ID: "1",
},
v1alpha2.SecurityGroupNode: {
ID: "2",
},
v1alpha2.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: v1alpha2.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
},
},
}, nil)
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-3"),
ImageId: aws.String("ami-1"),
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *v1alpha2.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance.SubnetID != "subnet-3" {
t.Fatalf("expected subnet-3 from availability zone us-east-1c, got %q", instance.SubnetID)
}
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
// defer mockCtrl.Finish()
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test1",
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetworkingConfig{
ServiceDomain: "cluster.local",
Services: clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
Pods: clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
},
},
}
machine := &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "test1",
Labels: map[string]string{
"set": "node",
clusterv1.MachineClusterLabelName: "test1",
},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
Data: pointer.StringPtr("user-data"),
},
},
}
awsMachine := &v1alpha2.AWSMachine{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: clusterv1.SchemeGroupVersion.String(),
Kind: "Machine",
Name: "test1",
},
},
},
}
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Client: fake.NewFakeClient(cluster, machine),
AWSClients: scope.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
},
Cluster: cluster,
Machine: machine,
AWSMachine: awsMachine,
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
machineScope.AWSMachine.Spec = *tc.machineConfig
tc.expect(ec2Mock.EXPECT())
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: fake.NewFakeClient(cluster, machine),
AWSClients: scope.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
},
Cluster: cluster,
AWSCluster: tc.awsCluster,
})
s := NewService(clusterScope)
instance, err := s.CreateInstance(machineScope)
tc.check(instance, err)
})
}
}
| 1 | 10,497 | Shouldn't the value in Bootstrap.Data be just a plain string? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -209,7 +209,9 @@ namespace Datadog.Trace.AppSec
private void Report(ITransport transport, Span span, WafMatch[] results, bool blocked)
{
span.SetTag(Tags.AppSecEvent, "true");
- span.SetTraceSamplingPriority(SamplingPriority.UserKeep);
+ var samplingPirority = _settings.KeepTraces
+ ? SamplingPriority.UserKeep : SamplingPriority.AutoReject;
+ span.SetTraceSamplingPriority(samplingPirority);
LogMatchesIfDebugEnabled(results, blocked);
| 1 | // <copyright file="Security.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using Datadog.Trace.AppSec.EventModel;
using Datadog.Trace.AppSec.Transport;
using Datadog.Trace.AppSec.Transports.Http;
using Datadog.Trace.AppSec.Waf;
using Datadog.Trace.AppSec.Waf.ReturnTypes.Managed;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Headers;
using Datadog.Trace.Logging;
using Datadog.Trace.Vendors.Newtonsoft.Json;
using Datadog.Trace.Vendors.Serilog.Events;
namespace Datadog.Trace.AppSec
{
/// <summary>
/// The Secure is responsible coordinating app sec
/// </summary>
internal class Security : IDatadogSecurity, IDisposable
{
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<Security>();
private static readonly Dictionary<string, string> RequestHeaders;
private static readonly Dictionary<string, string> ResponseHeaders;
private static Security _instance;
private static bool _globalInstanceInitialized;
private static object _globalInstanceLock = new();
private readonly IWaf _waf;
private readonly InstrumentationGateway _instrumentationGateway;
private readonly SecuritySettings _settings;
static Security()
{
RequestHeaders = new Dictionary<string, string>()
{
{ "X-FORWARDED-FOR", string.Empty },
{ "X-CLIENT-IP", string.Empty },
{ "X-REAL-IP", string.Empty },
{ "X-FORWARDED", string.Empty },
{ "X-CLUSTER-CLIENT-IP", string.Empty },
{ "FORWARDED-FOR", string.Empty },
{ "FORWARDED", string.Empty },
{ "VIA", string.Empty },
{ "TRUE-CLIENT-IP", string.Empty },
{ "Content-Length", string.Empty },
{ "Content-Type", string.Empty },
{ "Content-Encoding", string.Empty },
{ "Content-Language", string.Empty },
{ "Host", string.Empty },
{ "user-agent", string.Empty },
{ "Accept", string.Empty },
{ "Accept-Encoding", string.Empty },
{ "Accept-Language", string.Empty },
};
ResponseHeaders = new Dictionary<string, string>()
{
{ "content-length", string.Empty },
{ "content-type", string.Empty },
{ "Content-Encoding", string.Empty },
{ "Content-Language", string.Empty },
};
}
/// <summary>
/// Initializes a new instance of the <see cref="Security"/> class with default settings.
/// </summary>
public Security()
: this(null, null)
{
}
private Security(SecuritySettings settings = null, InstrumentationGateway instrumentationGateway = null, IWaf waf = null)
{
try
{
_settings = settings ?? SecuritySettings.FromDefaultSources();
_instrumentationGateway = instrumentationGateway ?? new InstrumentationGateway();
_settings.Enabled = _settings.Enabled && AreArchitectureAndOsSupported();
if (_settings.Enabled)
{
_waf = waf ?? Waf.Waf.Create(_settings.Rules);
if (_waf != null)
{
_instrumentationGateway.InstrumentationGatewayEvent += InstrumentationGatewayInstrumentationGatewayEvent;
}
else
{
_settings.Enabled = false;
}
LifetimeManager.Instance.AddShutdownTask(RunShutdown);
}
}
catch (Exception ex)
{
_settings.Enabled = false;
Log.Error(ex, "AppSec could not start because of an unexpected error. No security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help.");
}
}
/// <summary>
/// Gets or sets the global <see cref="Security"/> instance.
/// </summary>
public static Security Instance
{
get
{
return LazyInitializer.EnsureInitialized(ref _instance, ref _globalInstanceInitialized, ref _globalInstanceLock);
}
set
{
lock (_globalInstanceLock)
{
_instance = value;
_globalInstanceInitialized = true;
}
}
}
/// <summary>
/// Gets <see cref="InstrumentationGateway"/> instance
/// </summary>
InstrumentationGateway IDatadogSecurity.InstrumentationGateway => _instrumentationGateway;
internal InstrumentationGateway InstrumentationGateway => _instrumentationGateway;
/// <summary>
/// Gets <see cref="SecuritySettings"/> instance
/// </summary>
SecuritySettings IDatadogSecurity.Settings => _settings;
internal SecuritySettings Settings => _settings;
internal Version DdlibWafVersion => _waf?.Version;
private static void AnnotateSpan(Span span)
{
// we should only tag service entry span, the first span opened for a
// service. For WAF it's safe to assume we always have service entry spans
// we'll need to revisit this for RASP.
if (span != null)
{
span.SetMetric(Metrics.AppSecEnabled, 1.0);
span.SetTag(Tags.RuntimeFamily, TracerConstants.Language);
}
}
private static void LogMatchesIfDebugEnabled(WafMatch[] results, bool blocked)
{
if (Log.IsEnabled(LogEventLevel.Debug))
{
for (var i = 0; i < results.Length; i++)
{
var result = results[i];
if (blocked)
{
Log.Debug("Blocking current transaction (rule: {RuleId})", result.Rule);
}
else
{
Log.Debug("Detecting an attack from rule {RuleId}", result.Rule);
}
}
}
}
private static void AddHeaderTags(Span span, IHeadersCollection headers, Dictionary<string, string> headersToCollect)
{
var tags = SpanContextPropagator.Instance.ExtractHeaderTags(headers, headersToCollect, defaultTagPrefix: SpanContextPropagator.HttpResponseHeadersTagPrefix);
foreach (var tag in tags)
{
span.SetTag(tag.Key, tag.Value);
}
}
private static Span GetLocalRootSpan(Span span)
{
var localRootSpan = span.Context.TraceContext?.RootSpan;
return localRootSpan ?? span;
}
private static void TryAddEndPoint(Span span)
{
var route = span.GetTag(Tags.AspNetCoreRoute) ?? span.GetTag(Tags.AspNetRoute);
if (route != null)
{
span.SetTag(Tags.HttpEndpoint, route);
}
}
/// <summary>
/// Frees resouces
/// </summary>
public void Dispose() => _waf?.Dispose();
private void Report(ITransport transport, Span span, WafMatch[] results, bool blocked)
{
span.SetTag(Tags.AppSecEvent, "true");
span.SetTraceSamplingPriority(SamplingPriority.UserKeep);
LogMatchesIfDebugEnabled(results, blocked);
var json = JsonConvert.SerializeObject(new AppSecJson { Triggers = results });
span.SetTag(Tags.AppSecJson, json);
span.SetTag(Tags.Origin, "appsec");
span.SetTag(Tags.HttpUserAgent, transport.GetUserAget());
var reportedIpInfo = transport.GetReportedIpInfo();
span.SetTag(Tags.NetworkClientIp, reportedIpInfo.IpAddress);
var ipInfo = RequestHeadersHelper.ExtractIpAndPort(transport.GetHeader, _settings.CustomIpHeader, _settings.ExtraHeaders, transport.IsSecureConnection, reportedIpInfo);
span.SetTag(Tags.ActorIp, ipInfo.IpAddress);
var headers = transport.GetRequestHeaders();
AddHeaderTags(span, headers, RequestHeaders);
transport.OnCompleted(() =>
{
TryAddEndPoint(span);
var headers = transport.GetResponseHeaders();
AddHeaderTags(span, headers, ResponseHeaders);
});
}
private void RunWafAndReact(IDictionary<string, object> args, ITransport transport, Span span)
{
span = Security.GetLocalRootSpan(span);
AnnotateSpan(span);
var additiveContext = transport.GetAdditiveContext();
if (additiveContext == null)
{
additiveContext = _waf.CreateContext();
transport.SetAdditiveContext(additiveContext);
}
// run the WAF and execute the results
using var wafResult = additiveContext.Run(args);
if (wafResult.ReturnCode == ReturnCode.Monitor || wafResult.ReturnCode == ReturnCode.Block)
{
var block = _settings.BlockingEnabled && wafResult.ReturnCode == ReturnCode.Block;
if (block)
{
// blocking has been removed, waiting a better implementation
}
var resultData = JsonConvert.DeserializeObject<WafMatch[]>(wafResult.Data);
Report(transport, span, resultData, block);
}
}
private void InstrumentationGatewayInstrumentationGatewayEvent(object sender, InstrumentationGatewayEventArgs e)
{
try
{
RunWafAndReact(e.EventData, e.Transport, e.RelatedSpan);
}
catch (Exception ex)
{
Log.Error(ex, "Call into the security module failed");
}
}
private bool AreArchitectureAndOsSupported()
{
var frameworkDescription = FrameworkDescription.Instance;
var osSupported = false;
var supportedOs = new[] { OSPlatform.Linux, OSPlatform.MacOS, OSPlatform.Windows };
if (supportedOs.Contains(frameworkDescription.OSPlatform))
{
osSupported = true;
}
var archSupported = false;
var supportedArchs = new[] { ProcessArchitecture.Arm, ProcessArchitecture.X64, ProcessArchitecture.X86 };
if (supportedArchs.Contains(frameworkDescription.ProcessArchitecture))
{
archSupported = true;
}
if (!osSupported || !archSupported)
{
Log.Error(
"AppSec could not start because the current environment is not supported. No security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help. Host information: operating_system: {{ {OSPlatform} }}, arch: {{ {ProcessArchitecture} }}, runtime_infos: {{ {ProductVersion} }}",
frameworkDescription.OSPlatform,
frameworkDescription.ProcessArchitecture,
frameworkDescription.ProductVersion);
}
return osSupported && archSupported;
}
private void RunShutdown()
{
if (_instrumentationGateway != null)
{
_instrumentationGateway.InstrumentationGatewayEvent -= InstrumentationGatewayInstrumentationGatewayEvent;
}
Dispose();
}
}
}
| 1 | 25,056 | @robertpi Isn't this changing the sampling priority when `KeepTraces==false`? The sampling priority could be `AutoReject` or `AutoKeep`, based on the sampling decision (or the user may have specified something else). Seems like we shouldn't be changing it in this case? | DataDog-dd-trace-dotnet | .cs |
@@ -17,6 +17,11 @@ const (
DefaultCADir = "/etc/kubeedge/ca"
DefaultCertDir = "/etc/kubeedge/certs"
+ DefaultCAURL = "/ca.crt"
+ DefaultCertURL = "/edge.crt"
+
+ DefaultCloudCoreReadyCheckURL = "/readyz"
+
DefaultStreamCAFile = "/etc/kubeedge/ca/streamCA.crt"
DefaultStreamCertFile = "/etc/kubeedge/certs/stream.crt"
DefaultStreamKeyFile = "/etc/kubeedge/certs/stream.key" | 1 | package constants
import (
"time"
v1 "k8s.io/api/core/v1"
)
const (
DefaultConfigDir = "/etc/kubeedge/config/"
DefaultCAFile = "/etc/kubeedge/ca/rootCA.crt"
DefaultCAKeyFile = "/etc/kubeedge/ca/rootCA.key"
DefaultCertFile = "/etc/kubeedge/certs/server.crt"
DefaultKeyFile = "/etc/kubeedge/certs/server.key"
DefaultEdgeCertFile = "/etc/kubeedge/certs/edge.crt"
DefaultEdgeKeyFile = "/etc/kubeedge/certs/edge.key"
DefaultCADir = "/etc/kubeedge/ca"
DefaultCertDir = "/etc/kubeedge/certs"
DefaultStreamCAFile = "/etc/kubeedge/ca/streamCA.crt"
DefaultStreamCertFile = "/etc/kubeedge/certs/stream.crt"
DefaultStreamKeyFile = "/etc/kubeedge/certs/stream.key"
)
const (
DefaultDockerAddress = "unix:///var/run/docker.sock"
DefaultRuntimeType = "docker"
DefaultEdgedMemoryCapacity = 7852396000
DefaultRemoteRuntimeEndpoint = "unix:///var/run/dockershim.sock"
DefaultRemoteImageEndpoint = "unix:///var/run/dockershim.sock"
DefaultPodSandboxImage = "kubeedge/pause:3.1"
DefaultArmPodSandboxImage = "kubeedge/pause-arm:3.1"
DefaultArm64PodSandboxImage = "kubeedge/pause-arm64:3.1"
DefaultNodeStatusUpdateFrequency = 10
DefaultImagePullProgressDeadline = 60
DefaultRuntimeRequestTimeout = 2
DefaultImageGCHighThreshold = 80
DefaultImageGCLowThreshold = 40
DefaultMaximumDeadContainersPerPod = 1
DefaultHostnameOverride = "default-edge-node"
DefaultRegisterNodeNamespace = "default"
DefaultInterfaceName = "eth0"
DefaultCNIConfDir = "/etc/cni/net.d"
DefaultCNIBinDir = "/opt/cni/bin"
DefaultCNICacheDir = "/var/lib/cni/cache"
DefaultNetworkPluginMTU = 1500
DefaultConcurrentConsumers = 5
DefaultCgroupRoot = ""
DefaultVolumeStatsAggPeriod = time.Minute
DefaultTunnelPort = 10004
)
const (
DefaultPodStatusSyncInterval = 60
)
// Config
const (
DefaultKubeContentType = "application/vnd.kubernetes.protobuf"
DefaultKubeConfig = "/root/.kube/config"
DefaultKubeNamespace = v1.NamespaceAll
DefaultKubeQPS = 100.0
DefaultKubeBurst = 200
DefaultKubeUpdateNodeFrequency = 20
DefaultUpdatePodStatusWorkers = 1
DefaultUpdateNodeStatusWorkers = 1
DefaultQueryConfigMapWorkers = 4
DefaultQuerySecretWorkers = 4
DefaultQueryServiceWorkers = 4
DefaultQueryEndpointsWorkers = 4
DefaultQueryPersistentVolumeWorkers = 4
DefaultQueryPersistentVolumeClaimWorkers = 4
DefaultQueryVolumeAttachmentWorkers = 4
DefaultQueryNodeWorkers = 4
DefaultUpdateNodeWorkers = 4
DefaultDeletePodWorkers = 4
DefaultUpdatePodStatusBuffer = 1024
DefaultUpdateNodeStatusBuffer = 1024
DefaultQueryConfigMapBuffer = 1024
DefaultQuerySecretBuffer = 1024
DefaultQueryServiceBuffer = 1024
DefaultQueryEndpointsBuffer = 1024
DefaultQueryPersistentVolumeBuffer = 1024
DefaultQueryPersistentVolumeClaimBuffer = 1024
DefaultQueryVolumeAttachmentBuffer = 1024
DefaultQueryNodeBuffer = 1024
DefaultUpdateNodeBuffer = 1024
DefaultDeletePodBuffer = 1024
DefaultETCDTimeout = 10
DefaultEnableElection = false
DefaultElectionTTL = 30
DefaultElectionPrefix = "/controller/leader"
DefaultMessageLayer = "context"
DefaultContextSendModuleName = "cloudhub"
DefaultContextReceiveModuleName = "edgecontroller"
DefaultContextResponseModuleName = "cloudhub"
DefaultPodEventBuffer = 1
DefaultConfigMapEventBuffer = 1
DefaultSecretEventBuffer = 1
DefaultServiceEventBuffer = 1
DefaultEndpointsEventBuffer = 1
// Resource sep
ResourceSep = "/"
ResourceTypeService = "service"
ResourceTypeServiceList = "servicelist"
ResourceTypeEndpoints = "endpoints"
ResourceTypeEndpointsList = "endpointslist"
ResourceTypeListener = "listener"
ResourceTypePersistentVolume = "persistentvolume"
ResourceTypePersistentVolumeClaim = "persistentvolumeclaim"
ResourceTypeVolumeAttachment = "volumeattachment"
CSIResourceTypeVolume = "volume"
CSIOperationTypeCreateVolume = "createvolume"
CSIOperationTypeDeleteVolume = "deletevolume"
CSIOperationTypeControllerPublishVolume = "controllerpublishvolume"
CSIOperationTypeControllerUnpublishVolume = "controllerunpublishvolume"
CSISyncMsgRespTimeout = 1 * time.Minute
CurrentSupportK8sVersion = "v1.17.1"
)
const (
DefaultUpdateDeviceStatusBuffer = 1024
DefaultDeviceEventBuffer = 1
DefaultDeviceModelEventBuffer = 1
DefaultUpdateDeviceStatusWorkers = 1
)
const (
// TODO put all modulename and group name together @kadisi
DeviceTwinModuleName = "twin"
)
const (
// ServerPort is the default port for the edgecore server on each host machine.
// May be overridden by a flag at startup in the future.
ServerPort = 10350
)
| 1 | 17,235 | The const can also be used in cloud/pkg/cloudhub/servers/httpserver/server.go L46-47? | kubeedge-kubeedge | go |
@@ -82,7 +82,7 @@ Blockly.Procedures.allProcedureMutations = function(root) {
var blocks = root.getAllBlocks();
var mutations = [];
for (var i = 0; i < blocks.length; i++) {
- if (blocks[i].type === 'procedures_prototype') {
+ if (blocks[i].type == 'procedures_prototype') {
var mutation = blocks[i].mutationToDom();
if (mutation) {
mutations.push(mutation); | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Utility functions for handling procedures.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
/**
* @name Blockly.Procedures
* @namespace
**/
goog.provide('Blockly.Procedures');
goog.require('Blockly.Blocks');
goog.require('Blockly.constants');
goog.require('Blockly.Field');
goog.require('Blockly.Names');
goog.require('Blockly.Workspace');
/**
* Constant to separate procedure names from variables and generated functions
* when running generators.
* @deprecated Use Blockly.PROCEDURE_CATEGORY_NAME
*/
Blockly.Procedures.NAME_TYPE = Blockly.PROCEDURE_CATEGORY_NAME;
/**
* Find all user-created procedure definitions in a workspace.
* @param {!Blockly.Workspace} root Root workspace.
* @return {!Array.<!Array.<!Array>>} Pair of arrays, the
* first contains procedures without return variables, the second with.
* Each procedure is defined by a three-element list of name, parameter
* list, and return value boolean.
*/
Blockly.Procedures.allProcedures = function(root) {
var blocks = root.getAllBlocks();
var proceduresReturn = [];
var proceduresNoReturn = [];
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].getProcedureDef) {
var tuple = blocks[i].getProcedureDef();
if (tuple) {
if (tuple[2]) {
proceduresReturn.push(tuple);
} else {
proceduresNoReturn.push(tuple);
}
}
}
}
proceduresNoReturn.sort(Blockly.Procedures.procTupleComparator_);
proceduresReturn.sort(Blockly.Procedures.procTupleComparator_);
return [proceduresNoReturn, proceduresReturn];
};
/**
* Find all user-created procedure definition mutations in a workspace.
* @param {!Blockly.Workspace} root Root workspace.
* @return {!Array.<Element>} Array of mutation xml elements.
*/
Blockly.Procedures.allProcedureMutations = function(root) {
var blocks = root.getAllBlocks();
var mutations = [];
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].type === 'procedures_prototype') {
var mutation = blocks[i].mutationToDom();
if (mutation) {
mutations.push(mutation);
}
}
}
return mutations;
};
/**
* Comparison function for case-insensitive sorting of the first element of
* a tuple.
* @param {!Array} ta First tuple.
* @param {!Array} tb Second tuple.
* @return {number} -1, 0, or 1 to signify greater than, equality, or less than.
* @private
*/
Blockly.Procedures.procTupleComparator_ = function(ta, tb) {
return ta[0].toLowerCase().localeCompare(tb[0].toLowerCase());
};
/**
* Ensure two identically-named procedures don't exist.
* @param {string} name Proposed procedure name.
* @param {!Blockly.Block} block Block to disambiguate.
* @return {string} Non-colliding name.
*/
Blockly.Procedures.findLegalName = function(name, block) {
if (block.isInFlyout) {
// Flyouts can have multiple procedures called 'do something'.
return name;
}
while (!Blockly.Procedures.isLegalName_(name, block.workspace, block)) {
// Collision with another procedure.
var r = name.match(/^(.*?)(\d+)$/);
if (!r) {
name += '2';
} else {
name = r[1] + (parseInt(r[2], 10) + 1);
}
}
return name;
};
/**
* Does this procedure have a legal name? Illegal names include names of
* procedures already defined.
* @param {string} name The questionable name.
* @param {!Blockly.Workspace} workspace The workspace to scan for collisions.
* @param {Blockly.Block=} opt_exclude Optional block to exclude from
* comparisons (one doesn't want to collide with oneself).
* @return {boolean} True if the name is legal.
* @private
*/
Blockly.Procedures.isLegalName_ = function(name, workspace, opt_exclude) {
return !Blockly.Procedures.isNameUsed(name, workspace, opt_exclude);
};
/**
* Return if the given name is already a procedure name.
* @param {string} name The questionable name.
* @param {!Blockly.Workspace} workspace The workspace to scan for collisions.
* @param {Blockly.Block=} opt_exclude Optional block to exclude from
* comparisons (one doesn't want to collide with oneself).
* @return {boolean} True if the name is used, otherwise return false.
*/
Blockly.Procedures.isNameUsed = function(name, workspace, opt_exclude) {
var blocks = workspace.getAllBlocks();
// Iterate through every block and check the name.
for (var i = 0; i < blocks.length; i++) {
if (blocks[i] == opt_exclude) {
continue;
}
if (blocks[i].getProcedureDef) {
var procName = blocks[i].getProcedureDef();
if (Blockly.Names.equals(procName[0], name)) {
return false;
}
}
}
return true;
};
/**
* Rename a procedure. Called by the editable field.
* @param {string} name The proposed new name.
* @return {string} The accepted name.
* @this {Blockly.Field}
*/
Blockly.Procedures.rename = function(name) {
// Strip leading and trailing whitespace. Beyond this, all names are legal.
name = name.replace(/^[\s\xa0]+|[\s\xa0]+$/g, '');
// Ensure two identically-named procedures don't exist.
var legalName = Blockly.Procedures.findLegalName(name, this.sourceBlock_);
var oldName = this.text_;
if (oldName != name && oldName != legalName) {
// Rename any callers.
var blocks = this.sourceBlock_.workspace.getAllBlocks();
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].renameProcedure) {
blocks[i].renameProcedure(oldName, legalName);
}
}
}
return legalName;
};
/**
* Construct the blocks required by the flyout for the procedure category.
* @param {!Blockly.Workspace} workspace The workspace contianing procedures.
* @return {!Array.<!Element>} Array of XML block elements.
*/
Blockly.Procedures.flyoutCategory = function(workspace) {
var xmlList = [];
Blockly.Procedures.addCreateButton_(workspace, xmlList);
// Create call blocks for each procedure defined in the workspace
var mutations = Blockly.Procedures.allProcedureMutations(workspace);
for (var i = 0; i < mutations.length; i++) {
var mutation = mutations[i];
// <block type="procedures_call">
// <mutation ...></mutation>
// </block>
var block = goog.dom.createDom('block');
block.setAttribute('type', 'procedures_call');
block.setAttribute('gap', 16);
block.appendChild(mutation);
xmlList.push(block);
}
return xmlList;
};
/**
* Create the "Make a Block..." button.
* @param {!Blockly.Workspace} workspace The workspace contianing procedures.
* @param {!Array.<!Element>} xmlList Array of XML block elements to add to.
* @private
*/
Blockly.Procedures.addCreateButton_ = function(workspace, xmlList) {
var button = goog.dom.createDom('button');
var msg = Blockly.Msg.NEW_PROCEDURE;
var callbackKey = 'CREATE_PROCEDURE';
var callback = function() {
Blockly.Procedures.createProcedureDefCallback_();
};
button.setAttribute('text', msg);
button.setAttribute('callbackKey', callbackKey);
workspace.registerButtonCallback(callbackKey, callback);
xmlList.push(button);
};
/**
* Find all callers of a named procedure.
* @param {string} name Name of procedure (procCode in scratch-blocks).
* @param {!Blockly.Workspace} ws The workspace to find callers in.
* @param {!Blockly.Block} definitionRoot The root of the stack where the
* procedure is defined.
* @param {boolean} allowRecursive True if the search should include recursive
* procedure calls. False if the search should ignore the stack starting
* with definitionRoot.
* @return {!Array.<!Blockly.Block>} Array of caller blocks.
* @package
*/
Blockly.Procedures.getCallers = function(name, ws, definitionRoot,
allowRecursive) {
var allBlocks = [];
var topBlocks = ws.getTopBlocks();
// Start by deciding which stacks to investigate.
for (var i = 0; i < topBlocks.length; i++) {
var block = topBlocks[i];
if (block.id == definitionRoot.id && !allowRecursive) {
continue;
}
allBlocks.push.apply(allBlocks, block.getDescendants());
}
var callers = [];
for (var i = 0; i < allBlocks.length; i++) {
var block = allBlocks[i];
if (block.type == Blockly.PROCEDURES_CALL_BLOCK_TYPE ) {
var procCode = block.getProcCode();
if (procCode && procCode == name) {
callers.push(block);
}
}
}
return callers;
};
/**
* When a procedure definition changes its parameters, find and edit all its
* callers.
* @param {!Blockly.Block} defBlock Procedure definition block.
*/
Blockly.Procedures.mutateCallers = function(defBlock) {
// TODO(#1143) Update this for scratch procedures.
var oldRecordUndo = Blockly.Events.recordUndo;
var name = defBlock.getProcedureDef()[0];
var xmlElement = defBlock.mutationToDom(true);
var callers = Blockly.Procedures.getCallers(name, defBlock.workspace);
for (var i = 0, caller; caller = callers[i]; i++) {
var oldMutationDom = caller.mutationToDom();
var oldMutation = oldMutationDom && Blockly.Xml.domToText(oldMutationDom);
caller.domToMutation(xmlElement);
var newMutationDom = caller.mutationToDom();
var newMutation = newMutationDom && Blockly.Xml.domToText(newMutationDom);
if (oldMutation != newMutation) {
// Fire a mutation on every caller block. But don't record this as an
// undo action since it is deterministically tied to the procedure's
// definition mutation.
Blockly.Events.recordUndo = false;
Blockly.Events.fire(new Blockly.Events.BlockChange(
caller, 'mutation', null, oldMutation, newMutation));
Blockly.Events.recordUndo = oldRecordUndo;
}
}
};
/**
* Find the definition block for the named procedure.
* @param {string} name Name of procedure.
* @param {!Blockly.Workspace} workspace The workspace to search.
* @return {Blockly.Block} The procedure definition block, or null not found.
*/
Blockly.Procedures.getDefinition = function(name, workspace) {
// Assume that a procedure definition is a top block.
var blocks = workspace.getTopBlocks(false);
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].getProcedureDef) {
var tuple = blocks[i].getProcedureDef();
if (tuple && Blockly.Names.equals(tuple[0], name)) {
return blocks[i];
}
}
}
return null;
};
/**
* Callback to create a new procedure custom command block.
* TODO(#1299): Implement.
* @private
*/
Blockly.Procedures.createProcedureDefCallback_ = function() {
alert('TODO(#1299) Implement procedure creation callback.');
};
/**
* Callback to open the modal for editing custom procedures.
* TODO(#603): Implement.
* @param {!Blockly.Block} block The block that was right-clicked.
* @private
*/
Blockly.Procedures.editProcedureCallback_ = function(block) {
if (block.type == Blockly.PROCEDURES_DEFINITION_BLOCK_TYPE) {
var input = block.getInput('custom_block');
if (!input) {
alert('Bad input'); // TODO: Decide what to do about this.
return;
}
var conn = input.connection;
if (!conn) {
alert('Bad connection'); // TODO: Decide what to do about this.
return;
}
var innerBlock = conn.targetBlock();
if (!innerBlock ||
!innerBlock.type == Blockly.PROCEDURES_PROTOTYPE_BLOCK_TYPE) {
alert('Bad inner block'); // TODO: Decide what to do about this.
return;
}
block = innerBlock;
}
alert('TODO(#603): implement function editing (procCode was "' +
block.procCode_ + '")');
};
/**
* Make a context menu option for editing a custom procedure.
* This appears in the context menu for procedure definitions and procedure
* calls.
* @param {!Blockly.BlockSvg} block The block where the right-click originated.
* @return {!Object} A menu option, containing text, enabled, and a callback.
* @package
*/
Blockly.Procedures.makeEditOption = function(block) {
var editOption = {
enabled: true,
text: Blockly.Msg.EDIT_PROCEDURE,
callback: function() {
Blockly.Procedures.editProcedureCallback_(block);
}
};
return editOption;
};
/**
* Callback to show the procedure definition corresponding to a custom command
* block.
* TODO(#1136): Implement.
* @param {!Blockly.Block} block The block that was right-clicked.
* @private
*/
Blockly.Procedures.showProcedureDefCallback_ = function(block) {
alert('TODO(#1136): implement showing procedure definition (procCode was "' +
block.procCode_ + '")');
};
/**
* Make a context menu option for showing the definition for a custom procedure,
* based on a right-click on a custom command block.
* @param {!Blockly.BlockSvg} block The block where the right-click originated.
* @return {!Object} A menu option, containing text, enabled, and a callback.
* @package
*/
Blockly.Procedures.makeShowDefinitionOption = function(block) {
var option = {
enabled: true,
text: Blockly.Msg.SHOW_PROCEDURE_DEFINITION,
callback: function() {
Blockly.Procedures.showProcedureDefCallback_(block);
}
};
return option;
};
/**
* Callback to try to delete a custom block definitions.
* @param {string} procCode The identifier of the procedure to delete.
* @param {!Blockly.Block} definitionRoot The root block of the stack that
* defines the custom procedure.
* @return {boolean} True if the custom procedure was deleted, false otherwise.
* @package
*/
Blockly.Procedures.deleteProcedureDefCallback = function(procCode,
definitionRoot) {
var callers = Blockly.Procedures.getCallers(procCode,
definitionRoot.workspace, definitionRoot, false /* allowRecursive */);
if (callers.length > 0) {
return false;
}
// Delete the whole stack.
Blockly.Events.setGroup(true);
definitionRoot.dispose();
Blockly.Events.setGroup(false);
return true;
};
| 1 | 8,913 | Use a constant for `procedures_prototype`. | LLK-scratch-blocks | js |
@@ -12,7 +12,7 @@ def bad_percent(arg):
def good_percent(arg):
'''Instead of passing multiple arguments, format the message'''
- raise KeyError('Bad key: %r' % arg)
+ raise KeyError(f'Bad key: {arg!r}')
def bad_multiarg(name, value):
'''Raising a formatted string and multiple additional arguments''' | 1 | '''
Complain about multi-argument exception constructors where the first argument
contains a percent sign, thus suggesting a % string formatting was intended
instead. The same holds for a string containing {...} suggesting str.format()
was intended.
'''
# pylint: disable=redundant-u-string-prefix
def bad_percent(arg):
'''Raising a percent-formatted string and an argument'''
raise KeyError('Bad key: %r', arg) # [raising-format-tuple]
def good_percent(arg):
'''Instead of passing multiple arguments, format the message'''
raise KeyError('Bad key: %r' % arg)
def bad_multiarg(name, value):
'''Raising a formatted string and multiple additional arguments'''
raise ValueError('%s measures %.2f', name, value) # [raising-format-tuple]
def good_multiarg(name, value):
'''The arguments have to be written as a tuple for formatting'''
raise ValueError('%s measures %.2f' % (name, value))
def bad_braces(arg):
'''Curly braces as placeholders'''
raise KeyError('Bad key: {:r}', arg) # [raising-format-tuple]
def good_braces(arg):
'''Call str.format() instead'''
raise KeyError('Bad key: {:r}'.format(arg))
def bad_multistring(arg):
'''Multiple adjacent string literals'''
raise AssertionError( # [raising-format-tuple]
'Long message about %s '
"split over several adjacent literals", arg)
def bad_triplequote(arg):
'''String literals with triple quotes'''
raise AssertionError( # [raising-format-tuple]
'''Long message about %s
split over several adjacent literals''', arg)
def bad_unicode(arg):
'''Unicode string literal'''
raise ValueError(u'Bad %s', arg) # [raising-format-tuple]
def raise_something_without_name(arg):
'''Regression test for nodes without .node attribute'''
import standard_exceptions # pylint: disable=import-error,import-outside-toplevel
raise standard_exceptions.MyException(u'An %s', arg) # [raising-format-tuple]
| 1 | 15,061 | This file should probably not be touched as the formatting of the string seems to be a test in itself ? | PyCQA-pylint | py |
@@ -276,8 +276,11 @@ is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, siginfo_t *info);
int
sigaction_syscall(int sig, kernel_sigaction_t *act, kernel_sigaction_t *oact)
{
-#if defined(X64) && !defined(VMX86_SERVER) && defined(LINUX)
+#if !defined(VMX86_SERVER) && defined(LINUX)
/* PR 305020: must have SA_RESTORER for x64 */
+ /* Must have SA_RESTORER to handle vsyscall32 being disabled
+ (see https://github.com/DynamoRIO/dynamorio/issues/2812)
+ */
if (act != NULL && !TEST(SA_RESTORER, act->flags)) {
act->flags |= SA_RESTORER;
act->restorer = (void (*)(void)) dynamorio_sigreturn; | 1 | /* **********************************************************
* Copyright (c) 2011-2017 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* signal.c - dynamorio signal handler
*/
#include <errno.h>
#undef errno
#include "signal_private.h" /* pulls in globals.h for us, in right order */
#ifdef LINUX
/* We want to build on older toolchains so we have our own copy of signal
* data structures
*/
# include "include/sigcontext.h"
# include "include/signalfd.h"
# include "../globals.h" /* after our sigcontext.h, to preclude bits/sigcontext.h */
#elif defined(MACOS)
# include "../globals.h" /* this defines _XOPEN_SOURCE for Mac */
# include <signal.h> /* after globals.h, for _XOPEN_SOURCE from os_exports.h */
#endif
#ifdef LINUX
# include <linux/sched.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <ucontext.h>
#include <string.h> /* for memcpy and memset */
#include "os_private.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../perfctr.h"
#include "arch.h"
#include "../monitor.h" /* for trace_abort */
#include "../link.h" /* for linking interrupted fragment_t */
#include "instr.h" /* to find target of SIGSEGV */
#include "decode.h" /* to find target of SIGSEGV */
#include "decode_fast.h" /* to handle self-mod code */
#include "../synch.h"
#include "../nudge.h"
#include "disassemble.h"
#include "ksynch.h"
#include "tls.h" /* tls_reinstate_selector */
#include "../translate.h"
#ifdef LINUX
# include "include/syscall.h"
#else
# include <sys/syscall.h>
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#ifdef VMX86_SERVER
# include <errno.h>
#endif
#ifdef MACOS
/* Define the Linux names, which the code is already using */
# define SA_NOMASK SA_NODEFER
# define SA_ONESHOT SA_RESETHAND
#endif
/**** data structures ***************************************************/
/* The signal numbers are slightly different between operating systems.
* To support differing default actions, we have separate arrays, rather
* than indirecting to a single all-signals array.
*/
extern int default_action[];
/* We know that many signals are always asynchronous.
* Others, however, may be synchronous or may not -- e.g., another process
* could send us a SIGSEGV, and there is no way we can tell whether it
* was generated by a real memory fault or not. Thus we have to assume
* that we must not delay any SIGSEGV deliveries.
*/
extern bool can_always_delay[];
static inline bool
sig_is_alarm_signal(int sig)
{
return (sig == SIGALRM || sig == SIGVTALRM || sig == SIGPROF);
}
/* we do not use SIGSTKSZ b/c for things like code modification
* we end up calling many core routines and so want more space
* (though currently non-debug stack size == SIGSTKSZ (8KB))
*/
#define SIGSTACK_SIZE (DYNAMO_OPTION(signal_stack_size))
/* this flag not defined in our headers */
#define SA_RESTORER 0x04000000
/* if no app sigaction, it's RT, since that's our handler */
#ifdef LINUX
# define IS_RT_FOR_APP(info, sig) \
IF_X64_ELSE(true, ((info)->app_sigaction[(sig)] == NULL ? true : \
(TEST(SA_SIGINFO, (info)->app_sigaction[(sig)]->flags))))
#elif defined(MACOS)
# define IS_RT_FOR_APP(info, sig) (true)
#endif
/* kernel sets size and sp to 0 for SS_DISABLE
* when asked, will hand back SS_ONSTACK only if current xsp is inside the
* alt stack; otherwise, if an alt stack is registered, it will give flags of 0
* We do not support the "legacy stack switching" that uses the restorer field
* as seen in kernel sources.
*/
#define APP_HAS_SIGSTACK(info) \
((info)->app_sigstack.ss_sp != NULL && (info)->app_sigstack.ss_flags != SS_DISABLE)
/* If we only intercept a few signals, we leave whether un-intercepted signals
* are blocked unchanged and stored in the kernel. If we intercept all (not
* quite yet: PR 297033, hence the need for this macro) we emulate the mask for
* all.
*/
#define EMULATE_SIGMASK(info, sig) \
(DYNAMO_OPTION(intercept_all_signals) || (info)->we_intercept[(sig)])
/* i#27: custom data to pass to the child of a clone */
/* PR i#149/403015: clone record now passed via a new dstack */
typedef struct _clone_record_t {
byte *dstack; /* dstack for new thread - allocated by parent thread */
#ifdef MACOS
/* XXX i#1403: once we have lower-level, earlier thread interception we can
* likely switch to something closer to what we do on Linux.
* This is used for bsdthread_create, where app_thread_xsp is NULL;
* for vfork, app_thread_xsp is non-NULL and this is unused.
*/
void *thread_arg;
#endif
reg_t app_thread_xsp; /* app xsp preserved for new thread to use */
app_pc continuation_pc;
thread_id_t caller_id;
int clone_sysnum;
uint clone_flags;
thread_sig_info_t info;
thread_sig_info_t *parent_info;
void *pcprofile_info;
#ifdef AARCHXX
/* To ensure we have the right value as of the point of the clone, we
* store it here (we'll have races if we try to get it during new thread
* init).
*/
reg_t app_stolen_value;
# ifndef AARCH64
dr_isa_mode_t isa_mode;
# endif
/* To ensure we have the right app lib tls base in child thread,
* we store it here if necessary (clone w/o CLONE_SETTLS or vfork).
*/
void *app_lib_tls_base;
#endif
/* we leave some padding at base of stack for dynamorio_clone
* to store values
*/
reg_t for_dynamorio_clone[4];
} clone_record_t;
/* i#350: set up signal handler for safe_read/faults during init */
static thread_sig_info_t init_info;
static kernel_sigset_t init_sigmask;
#ifdef DEBUG
static bool removed_sig_handler;
#endif
os_cxt_ptr_t osc_empty;
/**** function prototypes ***********************************************/
/* in x86.asm */
void
master_signal_handler(int sig, siginfo_t *siginfo, kernel_ucontext_t *ucxt);
static void
set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
kernel_sigaction_t *act);
static void
intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig);
static void
signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info);
static void
signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info,
bool other_thread);
static bool
execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame,
sigcontext_t *sc_orig, fragment_t *f
_IF_CLIENT(byte *access_address));
static bool
execute_handler_from_dispatch(dcontext_t *dcontext, int sig);
/* Execute default action from code cache and may terminate the process.
* If returns, the return value decides if caller should restore
* the untranslated context.
*/
static bool
execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig);
static void
execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame);
static bool
handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt);
static bool
handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt,
sigframe_rt_t *frame);
static bool
handle_nudge_signal(dcontext_t *dcontext, siginfo_t *siginfo, kernel_ucontext_t *ucxt);
static void
init_itimer(dcontext_t *dcontext, bool first);
static bool
set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info,
bool enable);
#ifdef DEBUG
static void
dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set);
#endif
static bool
is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, siginfo_t *info);
int
sigaction_syscall(int sig, kernel_sigaction_t *act, kernel_sigaction_t *oact)
{
#if defined(X64) && !defined(VMX86_SERVER) && defined(LINUX)
/* PR 305020: must have SA_RESTORER for x64 */
if (act != NULL && !TEST(SA_RESTORER, act->flags)) {
act->flags |= SA_RESTORER;
act->restorer = (void (*)(void)) dynamorio_sigreturn;
}
#endif
return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigaction,SYS_rt_sigaction),
4, sig, act, oact, sizeof(kernel_sigset_t));
}
static inline bool
signal_is_interceptable(int sig)
{
return (sig != SIGKILL && sig != SIGSTOP);
}
static inline int
sigaltstack_syscall(const stack_t *newstack, stack_t *oldstack)
{
return dynamorio_syscall(SYS_sigaltstack, 2, newstack, oldstack);
}
static inline int
getitimer_syscall(int which, struct itimerval *val)
{
return dynamorio_syscall(SYS_getitimer, 2, which, val);
}
static inline int
setitimer_syscall(int which, struct itimerval *val, struct itimerval *old)
{
return dynamorio_syscall(SYS_setitimer, 3, which, val, old);
}
static inline int
sigprocmask_syscall(int how, kernel_sigset_t *set, kernel_sigset_t *oset,
size_t sigsetsize)
{
return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigprocmask,SYS_rt_sigprocmask),
4, how, set, oset, sigsetsize);
}
static void
unblock_all_signals(kernel_sigset_t *oset)
{
kernel_sigset_t set;
kernel_sigemptyset(&set);
sigprocmask_syscall(SIG_SETMASK, &set, oset, sizeof(set));
}
/* exported for stackdump.c */
bool
set_default_signal_action(int sig)
{
kernel_sigset_t set;
kernel_sigaction_t act;
int rc;
memset(&act, 0, sizeof(act));
act.handler = (handler_t) SIG_DFL;
/* arm the signal */
rc = sigaction_syscall(sig, &act, NULL);
DODEBUG({ removed_sig_handler = true; });
/* If we're in our handler now, we have to unblock */
kernel_sigemptyset(&set);
kernel_sigaddset(&set, sig);
sigprocmask_syscall(SIG_UNBLOCK, &set, NULL, sizeof(set));
return (rc == 0);
}
static bool
set_ignore_signal_action(int sig)
{
kernel_sigaction_t act;
int rc;
memset(&act, 0, sizeof(act));
act.handler = (handler_t) SIG_IGN;
/* arm the signal */
rc = sigaction_syscall(sig, &act, NULL);
return (rc == 0);
}
/* We assume that signal handlers will be shared most of the time
* (pthreads shares them)
* Rather than start out with the handler table in local memory and then
* having to transfer to global, we just always use global
*/
static void
handler_free(dcontext_t *dcontext, void *p, size_t size)
{
global_heap_free(p, size HEAPACCT(ACCT_OTHER));
}
static void *
handler_alloc(dcontext_t *dcontext, size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_OTHER));
}
/**** top-level routines ***********************************************/
static bool
os_itimers_thread_shared(void)
{
static bool itimers_shared;
static bool cached = false;
if (!cached) {
file_t f = os_open("/proc/version", OS_OPEN_READ);
if (f != INVALID_FILE) {
char buf[128];
int major, minor, rel;
os_read(f, buf, BUFFER_SIZE_ELEMENTS(buf));
NULL_TERMINATE_BUFFER(buf);
if (sscanf(buf, "%*s %*s %d.%d.%d", &major, &minor, &rel) == 3) {
/* Linux NPTL in kernel 2.6.12+ has POSIX-style itimers shared
* among threads.
*/
LOG(GLOBAL, LOG_ASYNCH, 1, "kernel version = %d.%d.%d\n",
major, minor, rel);
itimers_shared = ((major == 2 && minor >= 6 && rel >= 12) ||
(major >= 3 /* linux-3.0 or above */));
cached = true;
}
os_close(f);
}
if (!cached) {
/* assume not shared */
itimers_shared = false;
cached = true;
}
LOG(GLOBAL, LOG_ASYNCH, 1, "itimers are %s\n",
itimers_shared ? "thread-shared" : "thread-private");
}
return itimers_shared;
}
static void
unset_initial_crash_handlers(dcontext_t *dcontext)
{
ASSERT(init_info.app_sigaction != NULL);
signal_info_exit_sigaction(GLOBAL_DCONTEXT, &init_info,
false/*!other_thread*/);
/* Undo the unblock-all */
sigprocmask_syscall(SIG_SETMASK, &init_sigmask, NULL, sizeof(init_sigmask));
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "initial app signal mask:\n");
dump_sigset(dcontext, &init_sigmask);
});
}
void
signal_init(void)
{
kernel_sigset_t set;
IF_LINUX(IF_X86_64(ASSERT(ALIGNED(offsetof(sigpending_t, xstate), AVX_ALIGNMENT))));
IF_MACOS(ASSERT(sizeof(kernel_sigset_t) == sizeof(__darwin_sigset_t)));
os_itimers_thread_shared();
/* Set up a handler for safe_read (or other fault detection) during
* DR init before thread is initialized.
*
* XXX: could set up a clone_record_t and pass to the initial
* signal_thread_inherit() but that would require further code changes.
* Could also call signal_thread_inherit to init this, but we don't want
* to intercept timer signals, etc. before we're ready to handle them,
* so we do a partial init.
*/
signal_info_init_sigaction(GLOBAL_DCONTEXT, &init_info);
intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGSEGV);
intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGBUS);
kernel_sigemptyset(&set);
kernel_sigaddset(&set, SIGSEGV);
kernel_sigaddset(&set, SIGBUS);
sigprocmask_syscall(SIG_UNBLOCK, &set, &init_sigmask, sizeof(set));
IF_LINUX(signalfd_init());
signal_arch_init();
}
void
signal_exit()
{
IF_LINUX(signalfd_exit());
#ifdef DEBUG
if (stats->loglevel > 0 && (stats->logmask & (LOG_ASYNCH|LOG_STATS)) != 0) {
LOG(GLOBAL, LOG_ASYNCH|LOG_STATS, 1,
"Total signals delivered: %d\n", GLOBAL_STAT(num_signals));
}
#endif
}
#ifdef HAVE_SIGALTSTACK
/* Separated out to run from the dstack (i#2016: see below). */
static void
set_our_alt_stack(void *arg)
{
thread_sig_info_t *info = (thread_sig_info_t *) arg;
DEBUG_DECLARE(int rc =)
sigaltstack_syscall(&info->sigstack, &info->app_sigstack);
ASSERT(rc == 0);
}
#endif
void
signal_thread_init(dcontext_t *dcontext)
{
thread_sig_info_t *info = HEAP_TYPE_ALLOC(dcontext, thread_sig_info_t,
ACCT_OTHER, PROTECTED);
size_t pend_unit_size = sizeof(sigpending_t) +
/* include alignment for xsave on xstate */
signal_frame_extra_size(true)
/* sigpending_t has xstate inside it already */
IF_LINUX(IF_X86(- sizeof(kernel_xstate_t)));
IF_LINUX(IF_X86(ASSERT(ALIGNED(pend_unit_size, AVX_ALIGNMENT))));
/* all fields want to be initialized to 0 */
memset(info, 0, sizeof(thread_sig_info_t));
dcontext->signal_field = (void *) info;
/* our special heap to avoid reentrancy problems
* composed entirely of sigpending_t units
* Note that it's fine to have the special heap do page-at-a-time
* committing, which does not use locks (unless triggers reset!),
* but if we need a new unit that will grab a lock: we try to
* avoid that by limiting the # of pending alarm signals (PR 596768).
*/
info->sigheap =
special_heap_init_aligned(pend_unit_size,
IF_X86_ELSE(AVX_ALIGNMENT, 0),
false /* cannot have any locking */,
false /* -x */,
true /* persistent */,
pend_unit_size * DYNAMO_OPTION(max_pending_signals));
#ifdef HAVE_SIGALTSTACK
/* set up alternate stack
* i#552 we may terminate the process without freeing the stack, so we
* stack_alloc it to exempt from the memory leak check.
*/
info->sigstack.ss_sp = (char *) stack_alloc(SIGSTACK_SIZE, NULL) - SIGSTACK_SIZE;
info->sigstack.ss_size = SIGSTACK_SIZE;
/* kernel will set xsp to sp+size to grow down from there, we don't have to */
info->sigstack.ss_flags = 0;
/* i#2016: for late takeover, this app thread may already be on its own alt
* stack. Not setting SA_ONSTACK for SUSPEND_SIGNAL is not sufficient to avoid
* this, as our SUSPEND_SIGNAL can interrupt the app inside its own signal
* handler. Thus, we simply swap to another stack temporarily to avoid the
* kernel complaining. The dstack is set up but it has the clone record and
* initial mcxt, so we use the new alt stack.
*/
call_switch_stack((void *)info,
(byte *)info->sigstack.ss_sp + info->sigstack.ss_size,
set_our_alt_stack, NULL, true/*return*/);
LOG(THREAD, LOG_ASYNCH, 1, "signal stack is "PFX" - "PFX"\n",
info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size);
/* app_sigstack dealt with below, based on parentage */
#endif
kernel_sigemptyset(&info->app_sigblocked);
ASSIGN_INIT_LOCK_FREE(info->child_lock, child_lock);
/* someone must call signal_thread_inherit() to finish initialization:
* for first thread, called from initial setup; else, from new_thread_setup
* or share_siginfo_after_take_over.
*/
}
bool
is_thread_signal_info_initialized(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t*)dcontext->signal_field;
return info->fully_initialized;
}
/* i#27: create custom data to pass to the child of a clone
* since we can't rely on being able to find the caller, or that
* its syscall data is still valid, once in the child.
*
* i#149/ PR 403015: The clone record is passed to the new thread via the dstack
* created for it. Unlike before, where the child thread would create its own
* dstack, now the parent thread creates the dstack. Also, switches app stack
* to dstack.
*
* XXX i#1403: for Mac we want to eventually do lower-level earlier interception
* of threads, but for now we're later and higher-level, intercepting the user
* thread function on the new thread's stack. We ignore app_thread_xsp.
*/
void *
#ifdef MACOS
create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp,
app_pc thread_func, void *thread_arg)
#else
create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp)
#endif
{
clone_record_t *record;
byte *dstack = stack_alloc(DYNAMORIO_STACK_SIZE, NULL);
LOG(THREAD, LOG_ASYNCH, 1,
"create_clone_record: dstack for new thread is "PFX"\n", dstack);
#ifdef MACOS
if (app_thread_xsp == NULL) {
record = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, clone_record_t,
ACCT_THREAD_MGT, true/*prot*/);
record->app_thread_xsp = 0;
record->continuation_pc = thread_func;
record->thread_arg = thread_arg;
record->clone_flags = CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD;
} else {
#endif
/* Note, the stack grows to low memory addr, so dstack points to the high
* end of the allocated stack region. So, we must subtract to get space for
* the clone record.
*/
record = (clone_record_t *) (dstack - sizeof(clone_record_t));
record->app_thread_xsp = *app_thread_xsp;
/* asynch_target is set in dispatch() prior to calling pre_system_call(). */
record->continuation_pc = dcontext->asynch_target;
record->clone_flags = dcontext->sys_param0;
#ifdef MACOS
}
#endif
LOG(THREAD, LOG_ASYNCH, 1, "allocated clone record: "PFX"\n", record);
record->dstack = dstack;
record->caller_id = dcontext->owning_thread;
record->clone_sysnum = dcontext->sys_num;
record->info = *((thread_sig_info_t *)dcontext->signal_field);
record->parent_info = (thread_sig_info_t *) dcontext->signal_field;
record->pcprofile_info = dcontext->pcprofile_field;
#ifdef AARCHXX
record->app_stolen_value = get_stolen_reg_val(get_mcontext(dcontext));
# ifndef AARCH64
record->isa_mode = dr_get_isa_mode(dcontext);
# endif
/* If the child thread shares the same TLS with parent by not setting
* CLONE_SETTLS or vfork, we put the TLS base here and clear the
* thread register in new_thread_setup, so that DR can distinguish
* this case from normal pthread thread creation.
*/
record->app_lib_tls_base = (!TEST(CLONE_SETTLS, record->clone_flags)) ?
os_get_app_tls_base(dcontext, TLS_REG_LIB) : NULL;
#endif
LOG(THREAD, LOG_ASYNCH, 1,
"create_clone_record: thread "TIDFMT", pc "PFX"\n",
record->caller_id, record->continuation_pc);
#ifdef MACOS
if (app_thread_xsp != NULL) {
#endif
/* Set the thread stack to point to the dstack, below the clone record.
* Note: it's glibc who sets up the arg to the thread start function;
* the kernel just does a fork + stack swap, so we can get away w/ our
* own stack swap if we restore before the glibc asm code takes over.
*/
/* i#754: set stack to be XSTATE aligned for saving YMM registers */
ASSERT(ALIGNED(XSTATE_ALIGNMENT, REGPARM_END_ALIGN));
*app_thread_xsp = ALIGN_BACKWARD(record, XSTATE_ALIGNMENT);
#ifdef MACOS
}
#endif
return (void *) record;
}
/* This is to support dr_create_client_thread() */
void
set_clone_record_fields(void *record, reg_t app_thread_xsp, app_pc continuation_pc,
uint clone_sysnum, uint clone_flags)
{
clone_record_t *rec = (clone_record_t *) record;
ASSERT(rec != NULL);
rec->app_thread_xsp = app_thread_xsp;
rec->continuation_pc = continuation_pc;
rec->clone_sysnum = clone_sysnum;
rec->clone_flags = clone_flags;
}
/* i#149/PR 403015: The clone record is passed to the new thread by placing it
* at the bottom of the dstack, i.e., the high memory. So the new thread gets
* it from the base of the dstack. The dstack is then set as the app stack.
*
* CAUTION: don't use a lot of stack in this routine as it gets invoked on the
* dstack from new_thread_setup - this is because this routine assumes
* no more than a page of dstack has been used so far since the clone
* system call was done.
*/
void *
get_clone_record(reg_t xsp)
{
clone_record_t *record;
byte *dstack_base;
/* xsp should be in a dstack, i.e., dynamorio heap. */
ASSERT(is_dynamo_address((app_pc) xsp));
/* The (size of the clone record +
* stack used by new_thread_start (only for setting up priv_mcontext_t) +
* stack used by new_thread_setup before calling get_clone_record())
* is less than a page. This is verified by the assert below. If it does
* exceed a page, it won't happen at random during runtime, but in a
* predictable way during development, which will be caught by the assert.
* The current usage is about 800 bytes for clone_record +
* sizeof(priv_mcontext_t) + few words in new_thread_setup before
* get_clone_record() is called.
*/
dstack_base = (byte *) ALIGN_FORWARD(xsp, PAGE_SIZE);
record = (clone_record_t *) (dstack_base - sizeof(clone_record_t));
/* dstack_base and the dstack in the clone record should be the same. */
ASSERT(dstack_base == record->dstack);
#ifdef MACOS
ASSERT(record->app_thread_xsp != 0); /* else it's not in dstack */
#endif
return (void *) record;
}
/* i#149/PR 403015: App xsp is passed to the new thread via the clone record. */
reg_t
get_clone_record_app_xsp(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *) record)->app_thread_xsp;
}
#ifdef MACOS
void *
get_clone_record_thread_arg(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *) record)->thread_arg;
}
#endif
byte *
get_clone_record_dstack(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *) record)->dstack;
}
#ifdef AARCHXX
reg_t
get_clone_record_stolen_value(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *) record)->app_stolen_value;
}
# ifndef AARCH64
uint /* dr_isa_mode_t but we have a header ordering problem */
get_clone_record_isa_mode(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *) record)->isa_mode;
}
# endif
void
set_thread_register_from_clone_record(void *record)
{
/* If record->app_lib_tls_base is not NULL, it means the parent
* thread did not setup TLS for the child, and we need clear the
* thread register.
*/
if (((clone_record_t *)record)->app_lib_tls_base != NULL)
write_thread_register(NULL);
}
void
set_app_lib_tls_base_from_clone_record(dcontext_t *dcontext, void *record)
{
if (((clone_record_t *)record)->app_lib_tls_base != NULL) {
/* child and parent share the same TLS */
os_set_app_tls_base(dcontext, TLS_REG_LIB,
((clone_record_t *)record)->app_lib_tls_base);
}
}
#endif
/* Initializes info's app_sigaction, restorer_valid, and we_intercept fields */
static void
signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info)
{
info->app_sigaction = (kernel_sigaction_t **)
handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(&info->restorer_valid, -1, SIGARRAY_SIZE * sizeof(info->restorer_valid[0]));
info->we_intercept = (bool *)
handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool));
memset(info->we_intercept, 0, SIGARRAY_SIZE * sizeof(bool));
}
/* Cleans up info's app_sigaction and we_intercept entries */
static void
signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info,
bool other_thread)
{
int i;
kernel_sigaction_t act;
memset(&act, 0, sizeof(act));
act.handler = (handler_t) SIG_DFL;
kernel_sigemptyset(&act.mask); /* does mask matter for SIG_DFL? */
for (i = 1; i <= MAX_SIGNUM; i++) {
if (!other_thread) {
if (info->app_sigaction[i] != NULL) {
/* Restore to old handler, but not if exiting whole
* process: else may get itimer during cleanup, so we
* set to SIG_IGN. We do this for detach in
* signal_remove_alarm_handlers().
*/
if (dynamo_exited && !doing_detach) {
info->app_sigaction[i]->handler = (handler_t) SIG_IGN;
sigaction_syscall(i, info->app_sigaction[i], NULL);
}
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring "PFX" as handler for %d\n",
info->app_sigaction[i]->handler, i);
sigaction_syscall(i, info->app_sigaction[i], NULL);
} else if (info->we_intercept[i]) {
/* restore to default */
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i);
sigaction_syscall(i, &act, NULL);
}
}
if (info->app_sigaction[i] != NULL) {
handler_free(dcontext, info->app_sigaction[i],
sizeof(kernel_sigaction_t));
}
}
handler_free(dcontext, info->app_sigaction,
SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
info->app_sigaction = NULL;
handler_free(dcontext, info->we_intercept, SIGARRAY_SIZE * sizeof(bool));
info->we_intercept = NULL;
}
/* Called once a new thread's dcontext is created.
* Inherited and shared fields are set up here.
* The clone_record contains the continuation pc, which is returned.
*/
app_pc
signal_thread_inherit(dcontext_t *dcontext, void *clone_record)
{
app_pc res = NULL;
clone_record_t *record = (clone_record_t *) clone_record;
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
if (record != NULL) {
app_pc continuation_pc = record->continuation_pc;
LOG(THREAD, LOG_ASYNCH, 1,
"continuation pc is "PFX"\n", continuation_pc);
LOG(THREAD, LOG_ASYNCH, 1,
"parent tid is "TIDFMT", parent sysnum is %d(%s), clone flags="PIFX"\n",
record->caller_id, record->clone_sysnum,
#ifdef SYS_vfork
(record->clone_sysnum == SYS_vfork) ? "vfork" :
#endif
(IF_LINUX(record->clone_sysnum == SYS_clone ? "clone" :)
IF_MACOS(record->clone_sysnum == SYS_bsdthread_create ? "bsdthread_create":)
"unexpected"), record->clone_flags);
#ifdef SYS_vfork
if (record->clone_sysnum == SYS_vfork) {
/* The above clone_flags argument is bogus.
SYS_vfork doesn't have a free register to keep the hardcoded value
see /usr/src/linux/arch/i386/kernel/process.c */
/* CHECK: is this the only place real clone flags are needed? */
record->clone_flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
}
#endif
/* handlers are either inherited or shared */
if (TEST(CLONE_SIGHAND, record->clone_flags)) {
/* need to share table of handlers! */
LOG(THREAD, LOG_ASYNCH, 2, "sharing signal handlers with parent\n");
info->shared_app_sigaction = true;
info->shared_refcount = record->info.shared_refcount;
info->shared_lock = record->info.shared_lock;
info->app_sigaction = record->info.app_sigaction;
info->we_intercept = record->info.we_intercept;
mutex_lock(info->shared_lock);
(*info->shared_refcount)++;
#ifdef DEBUG
for (i = 1; i <= MAX_SIGNUM; i++) {
if (info->app_sigaction[i] != NULL) {
LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is "PFX"\n",
i, info->app_sigaction[i]->handler);
}
}
#endif
mutex_unlock(info->shared_lock);
} else {
/* copy handlers */
LOG(THREAD, LOG_ASYNCH, 2, "inheriting signal handlers from parent\n");
info->app_sigaction = (kernel_sigaction_t **)
handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
for (i = 1; i <= MAX_SIGNUM; i++) {
info->restorer_valid[i] = -1; /* clear cache */
if (record->info.app_sigaction[i] != NULL) {
info->app_sigaction[i] = (kernel_sigaction_t *)
handler_alloc(dcontext, sizeof(kernel_sigaction_t));
memcpy(info->app_sigaction[i], record->info.app_sigaction[i],
sizeof(kernel_sigaction_t));
LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is "PFX"\n",
i, info->app_sigaction[i]->handler);
}
}
info->we_intercept = (bool *)
handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool));
memcpy(info->we_intercept, record->info.we_intercept,
SIGARRAY_SIZE * sizeof(bool));
mutex_lock(&record->info.child_lock);
record->info.num_unstarted_children--;
mutex_unlock(&record->info.child_lock);
/* this should be safe since parent should wait for us */
mutex_lock(&record->parent_info->child_lock);
record->parent_info->num_unstarted_children--;
mutex_unlock(&record->parent_info->child_lock);
}
/* itimers are either private or shared */
if (TEST(CLONE_THREAD, record->clone_flags) && os_itimers_thread_shared()) {
ASSERT(record->info.shared_itimer);
LOG(THREAD, LOG_ASYNCH, 2, "sharing itimers with parent\n");
info->shared_itimer = true;
info->shared_itimer_refcount = record->info.shared_itimer_refcount;
info->shared_itimer_underDR = record->info.shared_itimer_underDR;
info->shared_itimer_lock = record->info.shared_itimer_lock;
info->itimer = record->info.itimer;
acquire_recursive_lock(info->shared_itimer_lock);
(*info->shared_itimer_refcount)++;
release_recursive_lock(info->shared_itimer_lock);
/* shared_itimer_underDR will be incremented in start_itimer() */
} else {
info->shared_itimer = false;
init_itimer(dcontext, false/*!first thread*/);
}
if (APP_HAS_SIGSTACK(info)) {
/* parent was under our control, so the real sigstack we see is just
* the parent's being inherited -- clear it now
*/
memset(&info->app_sigstack, 0, sizeof(stack_t));
info->app_sigstack.ss_flags |= SS_DISABLE;
}
/* rest of state is never shared.
* app_sigstack should already be in place, when we set up our sigstack
* we asked for old sigstack.
* FIXME: are current pending or blocked inherited?
*/
res = continuation_pc;
#ifdef MACOS
if (record->app_thread_xsp != 0) {
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, record, clone_record_t,
ACCT_THREAD_MGT, true/*prot*/);
}
#endif
} else {
/* Initialize in isolation */
if (APP_HAS_SIGSTACK(info)) {
/* parent was NOT under our control, so the real sigstack we see is
* a real sigstack that was present before we took control
*/
LOG(THREAD, LOG_ASYNCH, 1, "app already has signal stack "PFX" - "PFX"\n",
info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
}
signal_info_init_sigaction(dcontext, info);
info->shared_itimer = false; /* we'll set to true if a child is created */
init_itimer(dcontext, true/*first*/);
/* We split init vs start for the signal handlers and mask. We do not
* install ours until we start running the app, to avoid races like
* i#2335. We'll set them up when os_process_under_dynamorio_*() invokes
* signal_reinstate_handlers(). All we do now is mark which signals we
* want to intercept.
*/
if (DYNAMO_OPTION(intercept_all_signals)) {
/* PR 304708: to support client signal handlers without
* the complexity of per-thread and per-signal callbacks
* we always intercept all signals. We also check here
* for handlers the app registered before our init.
*/
for (i=1; i<=MAX_SIGNUM; i++) {
/* cannot intercept KILL or STOP */
if (signal_is_interceptable(i) &&
/* FIXME PR 297033: we don't support intercepting DEFAULT_STOP /
* DEFAULT_CONTINUE signals. Once add support, update
* dr_register_signal_event() comments.
*/
default_action[i] != DEFAULT_STOP &&
default_action[i] != DEFAULT_CONTINUE)
info->we_intercept[i] = true;
}
} else {
/* we intercept the following signals ourselves: */
info->we_intercept[SIGSEGV] = true;
/* PR 313665: look for DR crashes on unaligned memory or mmap bounds */
info->we_intercept[SIGBUS] = true;
/* PR 212090: the signal we use to suspend threads */
info->we_intercept[SUSPEND_SIGNAL] = true;
#ifdef PAPI
/* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */
info->we_intercept[SIGPROF] = true;
#endif
/* vtalarm only used with pc profiling. it interferes w/ PAPI
* so arm this signal only if necessary
*/
if (INTERNAL_OPTION(profile_pcs)) {
info->we_intercept[SIGVTALRM] = true;
}
#ifdef CLIENT_INTERFACE
info->we_intercept[SIGALRM] = true;
#endif
#ifdef SIDELINE
info->we_intercept[SIGCHLD] = true;
#endif
/* i#61/PR 211530: the signal we use for nudges */
info->we_intercept[NUDGESIG_SIGNUM] = true;
}
/* should be 1st thread */
if (get_num_threads() > 1)
ASSERT_NOT_REACHED();
/* FIXME: any way to recover if not 1st thread? */
res = NULL;
}
/* only when SIGVTALRM handler is in place should we start itimer (PR 537743) */
if (INTERNAL_OPTION(profile_pcs)) {
/* even if the parent thread exits, we can use a pointer to its
* pcprofile_info b/c when shared it's process-shared and is not freed
* until the entire process exits
*/
pcprofile_thread_init(dcontext, info->shared_itimer,
(record == NULL) ? NULL : record->pcprofile_info);
}
/* Assumed to be async safe. */
info->fully_initialized = true;
return res;
}
/* When taking over existing app threads, we assume they're using pthreads and
* expect to share signal handlers, memory, thread group id, etc.
*/
void
share_siginfo_after_take_over(dcontext_t *dcontext, dcontext_t *takeover_dc)
{
clone_record_t crec = {0,};
thread_sig_info_t *parent_siginfo =
(thread_sig_info_t*)takeover_dc->signal_field;
/* Create a fake clone record with the given siginfo. All threads in the
* same thread group must share signal handlers since Linux 2.5.35, but we
* have to guess at the other flags.
* FIXME i#764: If we take over non-pthreads threads, we'll need some way to
* tell if they're sharing signal handlers or not.
*/
crec.caller_id = takeover_dc->owning_thread;
#ifdef LINUX
crec.clone_sysnum = SYS_clone;
#else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: NYI on Mac */
#endif
crec.clone_flags = PTHREAD_CLONE_FLAGS;
crec.parent_info = parent_siginfo;
crec.info = *parent_siginfo;
crec.pcprofile_info = takeover_dc->pcprofile_field;
signal_thread_inherit(dcontext, &crec);
}
/* This is split from os_fork_init() so the new logfiles are available
* (xref i#189/PR 452168). It had to be after dynamo_other_thread_exit()
* called in dynamorio_fork_init() after os_fork_init() else we clean
* up data structs used in signal_thread_exit().
*/
void
signal_fork_init(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
/* Child of fork is a single thread in a new process so should
* start over w/ no sharing (xref i#190/PR 452178)
*/
if (info->shared_app_sigaction) {
info->shared_app_sigaction = false;
if (info->shared_lock != NULL) {
DELETE_LOCK(*info->shared_lock);
global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER));
}
if (info->shared_refcount != NULL)
global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_lock = NULL;
info->shared_refcount = NULL;
}
if (info->shared_itimer) {
/* itimers are not inherited across fork */
info->shared_itimer = false;
if (os_itimers_thread_shared())
global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
else
heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
info->itimer = NULL; /* reset by init_itimer */
ASSERT(info->shared_itimer_lock != NULL);
DELETE_RECURSIVE_LOCK(*info->shared_itimer_lock);
global_heap_free(info->shared_itimer_lock, sizeof(*info->shared_itimer_lock)
HEAPACCT(ACCT_OTHER));
info->shared_itimer_lock = NULL;
ASSERT(info->shared_itimer_refcount != NULL);
global_heap_free(info->shared_itimer_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_itimer_refcount = NULL;
ASSERT(info->shared_itimer_underDR != NULL);
global_heap_free(info->shared_itimer_underDR, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_itimer_underDR = NULL;
init_itimer(dcontext, true/*first*/);
}
info->num_unstarted_children = 0;
for (i = 1; i <= MAX_SIGNUM; i++) {
/* "A child created via fork(2) initially has an empty pending signal set" */
dcontext->signals_pending = 0;
while (info->sigpending[i] != NULL) {
sigpending_t *temp = info->sigpending[i];
info->sigpending[i] = temp->next;
special_heap_free(info->sigheap, temp);
}
info->num_pending = 0;
}
if (INTERNAL_OPTION(profile_pcs)) {
pcprofile_fork_init(dcontext);
}
/* Assumed to be async safe. */
info->fully_initialized = true;
}
#ifdef DEBUG
static bool
sigsegv_handler_is_ours(void)
{
int rc;
kernel_sigaction_t oldact;
rc = sigaction_syscall(SIGSEGV, NULL, &oldact);
return (rc == 0 && oldact.handler == (handler_t)master_signal_handler);
}
#endif /* DEBUG */
#if defined(X86) && defined(LINUX)
static byte *
get_xstate_buffer(dcontext_t *dcontext)
{
/* See thread_sig_info_t.xstate_buf comments for why this is in TLS. */
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
if (info->xstate_buf == NULL) {
info->xstate_alloc =
heap_alloc(dcontext, signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER));
info->xstate_buf = (byte *) ALIGN_FORWARD(info->xstate_alloc, XSTATE_ALIGNMENT);
ASSERT(info->xstate_alloc + signal_frame_extra_size(true) >=
info->xstate_buf + signal_frame_extra_size(false));
}
return info->xstate_buf;
}
#endif
void
signal_thread_exit(dcontext_t *dcontext, bool other_thread)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
/* i#1012: DR's signal handler should always be installed before this point.
*/
ASSERT(sigsegv_handler_is_ours() || removed_sig_handler);
while (info->num_unstarted_children > 0) {
/* must wait for children to start and copy our state
* before we destroy it!
*/
os_thread_yield();
}
if (dynamo_exited) {
/* stop itimers before removing signal handlers */
for (i = 0; i < NUM_ITIMERS; i++)
set_actual_itimer(dcontext, i, info, false/*disable*/);
}
#if defined(X86) && defined(LINUX)
if (info->xstate_alloc != NULL) {
heap_free(dcontext, info->xstate_alloc, signal_frame_extra_size(true)
HEAPACCT(ACCT_OTHER));
}
#endif
/* FIXME: w/ shared handlers, if parent (the owner here) dies,
* can children keep living w/ a copy of the handlers?
*/
if (info->shared_app_sigaction) {
mutex_lock(info->shared_lock);
(*info->shared_refcount)--;
mutex_unlock(info->shared_lock);
}
if (!info->shared_app_sigaction || *info->shared_refcount == 0) {
LOG(THREAD, LOG_ASYNCH, 2, "signal handler cleanup:\n");
signal_info_exit_sigaction(dcontext, info, other_thread);
if (info->shared_lock != NULL) {
DELETE_LOCK(*info->shared_lock);
global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER));
}
if (info->shared_refcount != NULL)
global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
}
if (info->shared_itimer) {
acquire_recursive_lock(info->shared_itimer_lock);
(*info->shared_itimer_refcount)--;
release_recursive_lock(info->shared_itimer_lock);
}
if (!info->shared_itimer || *info->shared_itimer_refcount == 0) {
if (INTERNAL_OPTION(profile_pcs)) {
/* no cleanup needed for non-final thread in group */
pcprofile_thread_exit(dcontext);
}
if (os_itimers_thread_shared())
global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
else
heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
if (info->shared_itimer_lock != NULL) {
DELETE_RECURSIVE_LOCK(*info->shared_itimer_lock);
global_heap_free(info->shared_itimer_lock, sizeof(recursive_lock_t)
HEAPACCT(ACCT_OTHER));
ASSERT(info->shared_itimer_refcount != NULL);
global_heap_free(info->shared_itimer_refcount, sizeof(int)
HEAPACCT(ACCT_OTHER));
ASSERT(info->shared_itimer_underDR != NULL);
global_heap_free(info->shared_itimer_underDR, sizeof(int)
HEAPACCT(ACCT_OTHER));
}
}
for (i = 1; i <= MAX_SIGNUM; i++) {
/* pending queue is per-thread and not shared */
while (info->sigpending[i] != NULL) {
sigpending_t *temp = info->sigpending[i];
info->sigpending[i] = temp->next;
special_heap_free(info->sigheap, temp);
}
info->num_pending = 0;
}
signal_swap_mask(dcontext, true/*to_app*/);
#ifdef HAVE_SIGALTSTACK
/* Remove our sigstack and restore the app sigstack if it had one. */
if (!other_thread) {
LOG(THREAD, LOG_ASYNCH, 2, "removing our signal stack "PFX" - "PFX"\n",
info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size);
if (APP_HAS_SIGSTACK(info)) {
LOG(THREAD, LOG_ASYNCH, 2, "restoring app signal stack "PFX" - "PFX"\n",
info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
} else {
ASSERT(TEST(SS_DISABLE, info->app_sigstack.ss_flags));
}
if (info->sigstack.ss_sp != NULL) {
/* i#552: to raise client exit event, we may call dynamo_process_exit
* on sigstack in signal handler.
* In that case we set sigstack (ss_sp) NULL to avoid stack swap.
*/
# ifdef MACOS
if (info->app_sigstack.ss_sp == NULL) {
/* Kernel fails w/ ENOMEM (even for SS_DISABLE) if ss_size is too small */
info->sigstack.ss_flags = SS_DISABLE;
i = sigaltstack_syscall(&info->sigstack, NULL);
/* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */
ASSERT(i == 0 || i == -EINVAL);
} else {
i = sigaltstack_syscall(&info->app_sigstack, NULL);
/* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */
ASSERT(i == 0 || i == -EINVAL);
}
# else
i = sigaltstack_syscall(&info->app_sigstack, NULL);
ASSERT(i == 0);
# endif
}
}
#endif
IF_LINUX(signalfd_thread_exit(dcontext, info));
special_heap_exit(info->sigheap);
DELETE_LOCK(info->child_lock);
#ifdef DEBUG
/* for non-debug we do fast exit path and don't free local heap */
# ifdef HAVE_SIGALTSTACK
if (info->sigstack.ss_sp != NULL) {
/* i#552: to raise client exit event, we may call dynamo_process_exit
* on sigstack in signal handler.
* In that case we set sigstack (ss_sp) NULL to avoid stack free.
*/
stack_free(info->sigstack.ss_sp + info->sigstack.ss_size,
info->sigstack.ss_size);
}
# endif
HEAP_TYPE_FREE(dcontext, info, thread_sig_info_t, ACCT_OTHER, PROTECTED);
#endif
#ifdef PAPI
/* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */
set_itimer_callback(dcontext, ITIMER_PROF, 500,
(void (*func)(dcontext_t *, priv_mcontext_t *))
perfctr_update_gui());
#endif
}
void
set_handler_sigact(kernel_sigaction_t *act, int sig, handler_t handler)
{
act->handler = handler;
#ifdef MACOS
/* This is the real target */
act->tramp = (tramp_t) handler;
#endif
act->flags = SA_SIGINFO; /* send 3 args to handler */
#ifdef HAVE_SIGALTSTACK
act->flags |= SA_ONSTACK; /* use our sigstack */
#endif
/* We want the kernel to help us auto-restart syscalls, esp. when our signals
* interrupt native code such as during attach or in client or DR code (i#2659).
*/
act->flags |= SA_RESTART;
#if defined(X64) && !defined(VMX86_SERVER) && defined(LINUX)
/* PR 305020: must have SA_RESTORER for x64 */
act->flags |= SA_RESTORER;
act->restorer = (void (*)(void)) dynamorio_sigreturn;
#endif
/* We block most signals within our handler */
kernel_sigfillset(&act->mask);
/* i#184/PR 450670: we let our suspend signal interrupt our own handler
* We never send more than one before resuming, so no danger to stack usage
* from our own: but app could pile them up.
*/
kernel_sigdelset(&act->mask, SUSPEND_SIGNAL);
/* i#193/PR 287309: we need to NOT suppress further SIGSEGV, for decode faults,
* for try/except, and for !HAVE_MEMINFO probes.
* Just like SUSPEND_SIGNAL, if app sends repeated SEGV, could run out of
* alt stack: seems too corner-case to be worth increasing stack size.
*/
kernel_sigdelset(&act->mask, SIGSEGV);
if (sig == SUSPEND_SIGNAL || sig == SIGSEGV)
act->flags |= SA_NODEFER;
/* Sigset is a 1 or 2 elt array of longs on X64/X86. Treat as 2 elt of
* uint32. */
IF_DEBUG(uint32 *mask_sig = (uint32*)&act->mask.sig[0]);
LOG(THREAD_GET, LOG_ASYNCH, 3,
"mask for our handler is "PFX" "PFX"\n", mask_sig[0], mask_sig[1]);
}
static void
set_our_handler_sigact(kernel_sigaction_t *act, int sig)
{
set_handler_sigact(act, sig, (handler_t) master_signal_handler);
}
static void
set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
kernel_sigaction_t *act)
{
int rc;
kernel_sigaction_t oldact;
ASSERT(sig <= MAX_SIGNUM);
/* arm the signal */
rc = sigaction_syscall(sig, act, &oldact);
ASSERT(rc == 0
/* Workaround for PR 223720, which was fixed in ESX4.0 but
* is present in ESX3.5 and earlier: vmkernel treats
* 63 and 64 as invalid signal numbers.
*/
IF_VMX86(|| (sig >= 63 && rc == -EINVAL))
);
if (rc != 0) /* be defensive: app will probably still work */
return;
if (oldact.handler != (handler_t) SIG_DFL &&
oldact.handler != (handler_t) master_signal_handler) {
/* save the app's action for sig */
if (info->shared_app_sigaction) {
/* app_sigaction structure is shared */
mutex_lock(info->shared_lock);
}
if (info->app_sigaction[sig] != NULL) {
/* go ahead and toss the old one, it's up to the app to store
* and then restore later if it wants to
*/
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
}
info->app_sigaction[sig] = (kernel_sigaction_t *)
handler_alloc(dcontext, sizeof(kernel_sigaction_t));
memcpy(info->app_sigaction[sig], &oldact, sizeof(kernel_sigaction_t));
/* clear cache */
info->restorer_valid[sig] = -1;
if (info->shared_app_sigaction)
mutex_unlock(info->shared_lock);
#ifdef DEBUG
if (oldact.handler == (handler_t) SIG_IGN) {
LOG(THREAD, LOG_ASYNCH, 2,
"app already installed SIG_IGN as sigaction for signal %d\n", sig);
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"app already installed "PFX" as sigaction flags=0x%x for signal %d\n",
oldact.handler, oldact.flags, sig);
}
#endif
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"prior handler is "PFX" vs master "PFX" with flags=0x%x for signal %d\n",
oldact.handler, master_signal_handler, oldact.flags, sig);
}
LOG(THREAD, LOG_ASYNCH, 3, "\twe intercept signal %d\n", sig);
}
/* Set up master_signal_handler as the handler for signal "sig",
* for the current thread. Since we deal with kernel data structures
* in our interception of system calls, we use them here as well,
* to avoid having to translate to/from libc data structures.
*/
static void
intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
ASSERT(sig <= MAX_SIGNUM);
set_our_handler_sigact(&act, sig);
set_handler_and_record_app(dcontext, info, sig, &act);
}
static void
intercept_signal_ignore_initially(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
ASSERT(sig <= MAX_SIGNUM);
memset(&act, 0, sizeof(act));
act.handler = (handler_t) SIG_IGN;
set_handler_and_record_app(dcontext, info, sig, &act);
}
static void
intercept_signal_no_longer_ignore(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
int rc;
ASSERT(sig <= MAX_SIGNUM);
set_our_handler_sigact(&act, sig);
rc = sigaction_syscall(sig, &act, NULL);
ASSERT(rc == 0);
}
/* i#1921: For proper single-threaded native execution with re-takeover we need
* to propagate signals. For now we only support going completely native in
* this thread but without a full detach, so we abandon our signal handlers w/o
* freeing memory up front.
* We also use this for the start/stop interface where we are going fully native
* for all threads.
*/
void
signal_remove_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
kernel_sigaction_t act;
memset(&act, 0, sizeof(act));
act.handler = (handler_t) SIG_DFL;
kernel_sigemptyset(&act.mask);
for (i = 1; i <= MAX_SIGNUM; i++) {
if (info->app_sigaction[i] != NULL) {
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring "PFX" as handler for %d\n",
info->app_sigaction[i]->handler, i);
sigaction_syscall(i, info->app_sigaction[i], NULL);
} else if (info->we_intercept[i]) {
/* restore to default */
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i);
sigaction_syscall(i, &act, NULL);
}
}
DODEBUG({ removed_sig_handler = true; });
}
void
signal_remove_alarm_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
if (!info->we_intercept[i])
continue;
if (sig_is_alarm_signal(i)) {
set_ignore_signal_action(i);
}
}
}
/* For attaching mid-run, we assume regular POSIX with handlers global to just one
* thread group in the process.
* We also use this routine for the initial setup of our handlers, which we
* split from signal_thread_inherit() to support start/stop.
*/
void
signal_reinstate_handlers(dcontext_t *dcontext, bool ignore_alarm)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
bool skip = false;
if (!info->we_intercept[i]) {
skip = true;
if (signal_is_interceptable(i)) {
/* We do have to intercept everything the app does.
* If the app removes its handler, we'll never remove ours, which we
* can live with.
*/
kernel_sigaction_t oldact;
int rc = sigaction_syscall(i, NULL, &oldact);
ASSERT(rc == 0);
if (rc == 0 &&
oldact.handler != (handler_t) SIG_DFL &&
oldact.handler != (handler_t) master_signal_handler) {
skip = false;
}
}
}
if (skip)
continue;
if (sig_is_alarm_signal(i) && ignore_alarm) {
LOG(THREAD, LOG_ASYNCH, 2, "\tignoring %d initially\n", i);
intercept_signal_ignore_initially(dcontext, info, i);
} else {
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i);
intercept_signal(dcontext, info, i);
}
}
DODEBUG({ removed_sig_handler = false; });
}
void
signal_reinstate_alarm_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
if (!info->we_intercept[i] || !sig_is_alarm_signal(i))
continue;
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i);
intercept_signal_no_longer_ignore(dcontext, info, i);
}
}
/**** system call handlers ***********************************************/
/* FIXME: invalid pointer passed to kernel will currently show up
* probably as a segfault in our handlers below...need to make them
* look like kernel, and pass error code back to os.c
*/
void
handle_clone(dcontext_t *dcontext, uint flags)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
if ((flags & CLONE_VM) == 0) {
/* separate process not sharing memory */
if ((flags & CLONE_SIGHAND) != 0) {
/* FIXME: how deal with this?
* "man clone" says: "Since Linux 2.6.0-test6, flags must also
* include CLONE_VM if CLONE_SIGHAND is specified"
*/
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: !CLONE_VM but CLONE_SIGHAND!\n");
ASSERT_NOT_IMPLEMENTED(false);
}
return;
}
pre_second_thread();
if ((flags & CLONE_SIGHAND) != 0) {
/* need to share table of handlers! */
LOG(THREAD, LOG_ASYNCH, 2, "handle_clone: CLONE_SIGHAND set!\n");
if (!info->shared_app_sigaction) {
/* this is the start of a chain of sharing
* no synch needed here, child not created yet
*/
info->shared_app_sigaction = true;
info->shared_refcount = (int *) global_heap_alloc(sizeof(int)
HEAPACCT(ACCT_OTHER));
*info->shared_refcount = 1;
info->shared_lock = (mutex_t *) global_heap_alloc(sizeof(mutex_t)
HEAPACCT(ACCT_OTHER));
ASSIGN_INIT_LOCK_FREE(*info->shared_lock, shared_lock);
} /* else, some ancestor is already owner */
} else {
/* child will inherit copy of current table -> cannot modify it
* until child is scheduled! FIXME: any other way?
*/
mutex_lock(&info->child_lock);
info->num_unstarted_children++;
mutex_unlock(&info->child_lock);
}
if (TEST(CLONE_THREAD, flags) && os_itimers_thread_shared()) {
if (!info->shared_itimer) {
/* this is the start of a chain of sharing
* no synch needed here, child not created yet
*/
info->shared_itimer = true;
info->shared_itimer_refcount = (int *)
global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER));
*info->shared_itimer_refcount = 1;
info->shared_itimer_underDR = (int *)
global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER));
*info->shared_itimer_underDR = 1;
info->shared_itimer_lock = (recursive_lock_t *)
global_heap_alloc(sizeof(*info->shared_itimer_lock) HEAPACCT(ACCT_OTHER));
ASSIGN_INIT_RECURSIVE_LOCK_FREE(*info->shared_itimer_lock,
shared_itimer_lock);
} /* else, some ancestor already created */
}
}
/* Returns false if should NOT issue syscall.
* In such a case, the result is in "result".
* We could instead issue the syscall and expect it to fail, which would have a more
* accurate error code, but that risks missing a failure (e.g., RT on Android
* which in some cases returns success on bugus params).
* It seems better to err on the side of the wrong error code or failing when
* we shouldn't, than to think it failed when it didn't, which is more complex
* to deal with.
*/
bool
handle_sigaction(dcontext_t *dcontext, int sig, const kernel_sigaction_t *act,
prev_sigaction_t *oact, size_t sigsetsize, OUT uint *result)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
kernel_sigaction_t *save;
kernel_sigaction_t local_act;
if (sigsetsize != sizeof(kernel_sigset_t)) {
*result = EINVAL;
return false;
}
if (act != NULL) {
/* Linux checks readability before checking the signal number. */
if (!safe_read(act, sizeof(local_act), &local_act)) {
*result = EFAULT;
return false;
}
}
/* i#1135: app may pass invalid signum to find MAX_SIGNUM */
if (sig <= 0 || sig > MAX_SIGNUM ||
(act != NULL && !signal_is_interceptable(sig))) {
*result = EINVAL;
return false;
}
if (act != NULL) {
/* app is installing a new action */
while (info->num_unstarted_children > 0) {
/* must wait for children to start and copy our state
* before we modify it!
*/
os_thread_yield();
}
info->sigaction_param = act;
}
if (info->shared_app_sigaction) {
/* app_sigaction structure is shared */
mutex_lock(info->shared_lock);
}
if (oact != NULL) {
/* Keep a copy of the prior one for post-syscall to hand to the app. */
info->use_kernel_prior_sigaction = false;
if (info->app_sigaction[sig] == NULL) {
if (info->we_intercept[sig]) {
/* need to pretend there is no handler */
memset(&info->prior_app_sigaction, 0, sizeof(info->prior_app_sigaction));
info->prior_app_sigaction.handler = (handler_t) SIG_DFL;
} else {
info->use_kernel_prior_sigaction = true;
}
} else {
memcpy(&info->prior_app_sigaction, info->app_sigaction[sig],
sizeof(info->prior_app_sigaction));
}
}
if (act != NULL) {
if (local_act.handler == (handler_t) SIG_IGN ||
local_act.handler == (handler_t) SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 2,
"app installed %s as sigaction for signal %d\n",
(local_act.handler == (handler_t) SIG_IGN) ? "SIG_IGN" : "SIG_DFL", sig);
if (!info->we_intercept[sig]) {
/* let the SIG_IGN/SIG_DFL go through, we want to remove our
* handler. we delete the stored app_sigaction in post_
*/
if (info->shared_app_sigaction)
mutex_unlock(info->shared_lock);
return true;
}
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"app installed "PFX" as sigaction for signal %d\n",
local_act.handler, sig);
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "signal mask for handler:\n");
dump_sigset(dcontext, (kernel_sigset_t *) &local_act.mask);
});
}
/* save app's entire sigaction struct */
save = (kernel_sigaction_t *) handler_alloc(dcontext, sizeof(kernel_sigaction_t));
memcpy(save, &local_act, sizeof(kernel_sigaction_t));
/* Remove the unblockable sigs */
kernel_sigdelset(&save->mask, SIGKILL);
kernel_sigdelset(&save->mask, SIGSTOP);
if (info->app_sigaction[sig] != NULL) {
/* go ahead and toss the old one, it's up to the app to store
* and then restore later if it wants to
*/
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
}
info->app_sigaction[sig] = save;
LOG(THREAD, LOG_ASYNCH, 3, "\tflags = "PFX", %s = "PFX"\n",
local_act.flags, IF_MACOS_ELSE("tramp","restorer"),
IF_MACOS_ELSE(local_act.tramp, local_act.restorer));
/* clear cache */
info->restorer_valid[sig] = -1;
}
if (info->shared_app_sigaction)
mutex_unlock(info->shared_lock);
if (info->we_intercept[sig]) {
/* cancel the syscall */
*result = handle_post_sigaction(dcontext, true, sig, act, oact, sigsetsize);
return false;
}
if (act != NULL) {
/* Now hand kernel our master handler instead of app's. */
set_our_handler_sigact(&info->our_sigaction, sig);
set_syscall_param(dcontext, 1, (reg_t)&info->our_sigaction);
/* FIXME PR 297033: we don't support intercepting DEFAULT_STOP /
* DEFAULT_CONTINUE signals b/c we can't generate the default
* action: if the app registers a handler, though, we should work
* properly if we never see SIG_DFL.
*/
}
return true;
}
/* os.c thinks it's passing us struct_sigaction, really it's kernel_sigaction_t,
* which has fields in different order.
* Only called on success.
* Returns the desired app return value (caller will negate if nec).
*/
uint
handle_post_sigaction(dcontext_t *dcontext, bool success, int sig,
const kernel_sigaction_t *act,
prev_sigaction_t *oact,
size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
if (act != NULL) {
/* Restore app register value, in case we changed it. */
set_syscall_param(dcontext, 1, (reg_t)info->sigaction_param);
}
if (!success)
return 0; /* don't change return value */
ASSERT(sig <= MAX_SIGNUM && sig > 0);
if (oact != NULL) {
if (info->use_kernel_prior_sigaction) {
/* Real syscall succeeded with oact so it must be readable, barring races. */
ASSERT(oact->handler == (handler_t) SIG_IGN ||
oact->handler == (handler_t) SIG_DFL);
} else {
/* We may have skipped the syscall so we have to check writability */
#ifdef MACOS
/* On MacOS prev_sigaction_t is a different type (i#2105) */
bool fault = true;
TRY_EXCEPT(dcontext, {
oact->handler = info->prior_app_sigaction.handler;
oact->mask = info->prior_app_sigaction.mask;
oact->flags = info->prior_app_sigaction.flags;
fault = false;
} , { /* EXCEPT */
/* nothing: fault is already true */
});
if (fault)
return EFAULT;
#else
if (!safe_write_ex(oact, sizeof(*oact), &info->prior_app_sigaction, NULL)) {
/* We actually don't have to undo installing any passed action
* b/c the Linux kernel does that *before* checking oact perms.
*/
return EFAULT;
}
#endif
}
}
/* If installing IGN or DFL, delete ours.
* XXX: This is racy. We can't hold the lock across the syscall, though.
* What we should do is just drop support for -no_intercept_all_signals,
* which is off by default anyway and never turned off.
*/
if (act != NULL &&
/* De-ref here should work barring races: already racy and non-default so not
* bothering with safe_read.
*/
((act->handler == (handler_t) SIG_IGN ||
act->handler == (handler_t) SIG_DFL) &&
!info->we_intercept[sig]) &&
info->app_sigaction[sig] != NULL) {
if (info->shared_app_sigaction)
mutex_lock(info->shared_lock);
/* remove old stored app action */
handler_free(dcontext, info->app_sigaction[sig],
sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
if (info->shared_app_sigaction)
mutex_unlock(info->shared_lock);
}
return 0;
}
#ifdef LINUX
static bool
convert_old_sigaction_to_kernel(dcontext_t *dcontext, kernel_sigaction_t *ks,
const old_sigaction_t *os)
{
bool res = false;
TRY_EXCEPT(dcontext, {
ks->handler = os->handler;
ks->flags = os->flags;
ks->restorer = os->restorer;
kernel_sigemptyset(&ks->mask);
ks->mask.sig[0] = os->mask;
res = true;
} , { /* EXCEPT */
/* nothing: res is already false */
});
return res;
}
static bool
convert_kernel_sigaction_to_old(dcontext_t *dcontext, old_sigaction_t *os,
const kernel_sigaction_t *ks)
{
bool res = false;
TRY_EXCEPT(dcontext, {
os->handler = ks->handler;
os->flags = ks->flags;
os->restorer = ks->restorer;
os->mask = ks->mask.sig[0];
res = true;
} , { /* EXCEPT */
/* nothing: res is already false */
});
return res;
}
/* Returns false (and "result") if should NOT issue syscall. */
bool
handle_old_sigaction(dcontext_t *dcontext, int sig, const old_sigaction_t *act,
old_sigaction_t *oact, OUT uint *result)
{
kernel_sigaction_t kact;
kernel_sigaction_t okact;
bool res;
if (act != NULL) {
if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) {
*result = EFAULT;
return false;
}
}
res = handle_sigaction(dcontext, sig, act == NULL ? NULL : &kact,
oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t), result);
if (!res)
*result = handle_post_old_sigaction(dcontext, true, sig, act, oact);
return res;
}
/* Returns the desired app return value (caller will negate if nec). */
uint
handle_post_old_sigaction(dcontext_t *dcontext, bool success, int sig,
const old_sigaction_t *act, old_sigaction_t *oact)
{
kernel_sigaction_t kact;
kernel_sigaction_t okact;
ptr_uint_t res;
if (act != NULL && success) {
if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) {
ASSERT(!success);
return EFAULT;
}
}
if (oact != NULL && success) {
if (!convert_old_sigaction_to_kernel(dcontext, &okact, oact)) {
ASSERT(!success);
return EFAULT;
}
}
res = handle_post_sigaction(dcontext, success, sig, act == NULL ? NULL : &kact,
oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t));
if (res == 0 && oact != NULL) {
if (!convert_kernel_sigaction_to_old(dcontext, oact, &okact)) {
return EFAULT;
}
}
return res;
}
#endif /* LINUX */
/* Returns false if should NOT issue syscall */
bool
handle_sigaltstack(dcontext_t *dcontext, const stack_t *stack,
stack_t *old_stack)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
if (old_stack != NULL) {
*old_stack = info->app_sigstack;
}
if (stack != NULL) {
info->app_sigstack = *stack;
LOG(THREAD, LOG_ASYNCH, 2, "app set up signal stack "PFX" - "PFX" %s\n",
stack->ss_sp, stack->ss_sp + stack->ss_size - 1,
(APP_HAS_SIGSTACK(info)) ? "enabled" : "disabled");
return false; /* always cancel syscall */
}
return true;
}
/* Blocked signals:
* In general, we don't need to keep track of blocked signals.
* We only need to do so for those signals we intercept ourselves.
* Thus, info->app_sigblocked ONLY contains entries for signals
* we intercept ourselves.
* PR 304708: we now intercept all signals.
*/
static void
set_blocked(dcontext_t *dcontext, kernel_sigset_t *set, bool absolute)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
if (absolute) {
/* discard current blocked signals, re-set from new mask */
kernel_sigemptyset(&info->app_sigblocked);
} /* else, OR in the new set */
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
}
}
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
void
signal_set_mask(dcontext_t *dcontext, kernel_sigset_t *sigset)
{
set_blocked(dcontext, sigset, true/*absolute*/);
}
void
signal_swap_mask(dcontext_t *dcontext, bool to_app)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
if (to_app) {
if (init_info.app_sigaction != NULL) {
/* This is the first execution of the app.
* We need to remove our own init-time handler and mask.
*/
unset_initial_crash_handlers(dcontext);
return;
}
sigprocmask_syscall(SIG_SETMASK, &info->app_sigblocked, NULL,
sizeof(info->app_sigblocked));
} else {
unblock_all_signals(&info->app_sigblocked);
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "thread %d's initial app signal mask:\n",
get_thread_id());
dump_sigset(dcontext, &info->app_sigblocked);
});
}
}
/* Scans over info->sigpending to see if there are any unblocked, pending
* signals, and sets dcontext->signals_pending if there are. Do this after
* modifying the set of signals blocked by the application.
*/
static void
check_signals_pending(dcontext_t *dcontext, thread_sig_info_t *info)
{
int i;
if (dcontext->signals_pending != 0)
return;
for (i=1; i<=MAX_SIGNUM; i++) {
if (info->sigpending[i] != NULL &&
!kernel_sigismember(&info->app_sigblocked, i) &&
!dcontext->signals_pending) {
/* We only update the application's set of blocked signals from
* syscall handlers, so we know we'll go back to dispatch and see
* this flag right away.
*/
LOG(THREAD, LOG_ASYNCH, 3, "\tsetting signals_pending flag\n");
dcontext->signals_pending = 1;
break;
}
}
}
/* Returns whether to execute the syscall */
bool
handle_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set,
kernel_sigset_t *oset, size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
kernel_sigset_t safe_set;
/* If we're intercepting all, we emulate the whole thing */
bool execute_syscall = !DYNAMO_OPTION(intercept_all_signals);
LOG(THREAD, LOG_ASYNCH, 2, "handle_sigprocmask\n");
if (oset != NULL)
info->pre_syscall_app_sigblocked = info->app_sigblocked;
if (app_set != NULL && safe_read(app_set, sizeof(safe_set), &safe_set)) {
if (execute_syscall) {
/* The syscall will execute, so remove from the set passed
* to it. We restore post-syscall.
* XXX i#1187: we could crash here touching app memory -- could
* use TRY, but the app could pass read-only memory and it
* would work natively! Better to swap in our own
* allocated data struct. There's a transparency issue w/
* races too if another thread looks at this memory. This
* won't happen by default b/c -intercept_all_signals is
* on by default so we don't try to solve all these
* issues.
*/
info->pre_syscall_app_sigprocmask = safe_set;
}
if (how == SIG_BLOCK) {
/* The set of blocked signals is the union of the current
* set and the set argument.
*/
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
} else if (how == SIG_UNBLOCK) {
/* The signals in set are removed from the current set of
* blocked signals.
*/
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigdelset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
} else if (how == SIG_SETMASK) {
/* The set of blocked signals is set to the argument set. */
kernel_sigemptyset(&info->app_sigblocked);
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
}
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
/* make sure we deliver pending signals that are now unblocked
* FIXME: consider signal #S, which we intercept ourselves.
* If S arrives, then app blocks it prior to our delivering it,
* we then won't deliver it until app unblocks it...is this a
* problem? Could have arrived a little later and then we would
* do same thing, but this way kernel may send one more than would
* get w/o dynamo? This goes away if we deliver signals
* prior to letting app do a syscall.
*/
check_signals_pending(dcontext, info);
}
if (!execute_syscall) {
handle_post_sigprocmask(dcontext, how, app_set, oset, sigsetsize);
return false; /* skip syscall */
} else
return true;
}
/* need to add in our signals that the app thinks are blocked */
void
handle_post_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set,
kernel_sigset_t *oset, size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
if (!DYNAMO_OPTION(intercept_all_signals)) {
/* Restore app memory */
safe_write_ex(app_set, sizeof(*app_set), &info->pre_syscall_app_sigprocmask,
NULL);
}
if (oset != NULL) {
if (DYNAMO_OPTION(intercept_all_signals))
safe_write_ex(oset, sizeof(*oset), &info->pre_syscall_app_sigblocked, NULL);
else {
/* the syscall wrote to oset already, so just add any additional */
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) &&
/* use the pre-syscall value: do not take into account changes
* from this syscall itself! (PR 523394)
*/
kernel_sigismember(&info->pre_syscall_app_sigblocked, i)) {
kernel_sigaddset(oset, i);
}
}
}
}
}
void
handle_sigsuspend(dcontext_t *dcontext, kernel_sigset_t *set,
size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
int i;
ASSERT(set != NULL);
LOG(THREAD, LOG_ASYNCH, 2, "handle_sigsuspend\n");
info->in_sigsuspend = true;
info->app_sigblocked_save = info->app_sigblocked;
kernel_sigemptyset(&info->app_sigblocked);
for (i=1; i<=MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
kernel_sigdelset(set, i);
}
}
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "in sigsuspend, blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
/**** utility routines ***********************************************/
#ifdef DEBUG
static void
dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set)
{
int sig;
for (sig=1; sig<=MAX_SIGNUM; sig++) {
if (kernel_sigismember(set, sig))
LOG(THREAD, LOG_ASYNCH, 1, "\t%d = blocked\n", sig);
}
}
#endif /* DEBUG */
/* PR 205795: to avoid lock problems w/ in_fcache (it grabs a lock, we
* could have interrupted someone holding that), we first check
* whereami --- if whereami is WHERE_FCACHE we still check the pc
* to distinguish generated routines, but at least we're certain
* it's not in DR where it could own a lock.
* We can't use is_on_dstack() here b/c we need to handle clean call
* arg crashes -- which is too bad since checking client dll and DR dll is
* not sufficient due to calls to ntdll, libc, or pc being in gencode.
*/
static bool
safe_is_in_fcache(dcontext_t *dcontext, app_pc pc, app_pc xsp)
{
if (dcontext->whereami != WHERE_FCACHE ||
IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||)
is_in_dynamo_dll(pc) ||
is_on_initstack(xsp))
return false;
/* Reasonably certain not in DR code, so no locks should be held */
return in_fcache(pc);
}
static bool
safe_is_in_coarse_stubs(dcontext_t *dcontext, app_pc pc, app_pc xsp)
{
if (dcontext->whereami != WHERE_FCACHE ||
IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||)
is_in_dynamo_dll(pc) ||
is_on_initstack(xsp))
return false;
/* Reasonably certain not in DR code, so no locks should be held */
return in_coarse_stubs(pc);
}
static bool
is_on_alt_stack(dcontext_t *dcontext, byte *sp)
{
#ifdef HAVE_SIGALTSTACK
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
return (sp >= (byte *) info->sigstack.ss_sp &&
/* deliberate equality check since stacks often init to top */
sp <= (byte *) (info->sigstack.ss_sp + info->sigstack.ss_size));
#else
return false;
#endif
}
/* The caller must initialize ucxt, including its fpstate pointer for x86 Linux. */
static void
sig_full_initialize(sig_full_cxt_t *sc_full, kernel_ucontext_t *ucxt)
{
sc_full->sc = SIGCXT_FROM_UCXT(ucxt);
#ifdef X86
sc_full->fp_simd_state = NULL; /* we have a ptr inside sigcontext_t */
#elif defined(ARM)
sc_full->fp_simd_state = &ucxt->coproc.uc_vfp;
#elif defined(AARCH64)
sc_full->fp_simd_state = &ucxt->uc_mcontext.__reserved;
#else
ASSERT_NOT_IMPLEMENTED(false);
#endif
}
void
sigcontext_to_mcontext(priv_mcontext_t *mc, sig_full_cxt_t *sc_full,
dr_mcontext_flags_t flags)
{
sigcontext_t *sc = sc_full->sc;
ASSERT(mc != NULL && sc != NULL);
#ifdef X86
if (TEST(DR_MC_INTEGER, flags)) {
mc->xax = sc->SC_XAX;
mc->xbx = sc->SC_XBX;
mc->xcx = sc->SC_XCX;
mc->xdx = sc->SC_XDX;
mc->xsi = sc->SC_XSI;
mc->xdi = sc->SC_XDI;
mc->xbp = sc->SC_XBP;
# ifdef X64
mc->r8 = sc->SC_FIELD(r8);
mc->r9 = sc->SC_FIELD(r9);
mc->r10 = sc->SC_FIELD(r10);
mc->r11 = sc->SC_FIELD(r11);
mc->r12 = sc->SC_FIELD(r12);
mc->r13 = sc->SC_FIELD(r13);
mc->r14 = sc->SC_FIELD(r14);
mc->r15 = sc->SC_FIELD(r15);
# endif /* X64 */
}
if (TEST(DR_MC_CONTROL, flags)) {
mc->xsp = sc->SC_XSP;
mc->xflags = sc->SC_XFLAGS;
mc->pc = (app_pc) sc->SC_XIP;
}
#elif defined(AARCH64)
if (TEST(DR_MC_INTEGER, flags))
memcpy(&mc->r0, &sc->SC_FIELD(regs[0]), sizeof(mc->r0) * 31);
if (TEST(DR_MC_CONTROL, flags)) {
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
mc->sp = sc->SC_FIELD(sp);
mc->pc = (void *)sc->SC_FIELD(pc);
mc->nzcv = sc->SC_FIELD(pstate);
}
#elif defined (ARM)
if (TEST(DR_MC_INTEGER, flags)) {
mc->r0 = sc->SC_FIELD(arm_r0);
mc->r1 = sc->SC_FIELD(arm_r1);
mc->r2 = sc->SC_FIELD(arm_r2);
mc->r3 = sc->SC_FIELD(arm_r3);
mc->r4 = sc->SC_FIELD(arm_r4);
mc->r5 = sc->SC_FIELD(arm_r5);
mc->r6 = sc->SC_FIELD(arm_r6);
mc->r7 = sc->SC_FIELD(arm_r7);
mc->r8 = sc->SC_FIELD(arm_r8);
mc->r9 = sc->SC_FIELD(arm_r9);
mc->r10 = sc->SC_FIELD(arm_r10);
mc->r11 = sc->SC_FIELD(arm_fp);
mc->r12 = sc->SC_FIELD(arm_ip);
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
mc->r14 = sc->SC_FIELD(arm_lr);
}
if (TEST(DR_MC_CONTROL, flags)) {
mc->r13 = sc->SC_FIELD(arm_sp);
mc->r15 = sc->SC_FIELD(arm_pc);
mc->cpsr= sc->SC_FIELD(arm_cpsr);
}
# ifdef X64
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
if (TEST(DR_MC_MULTIMEDIA, flags))
sigcontext_to_mcontext_simd(mc, sc_full);
}
/* Note that unlike mcontext_to_context(), this routine does not fill in
* any state that is not present in the mcontext: in particular, it assumes
* the sigcontext already contains the native fpstate. If the caller
* is generating a synthetic sigcontext, the caller should call
* save_fpstate() before calling this routine.
*/
/* XXX: on ARM, sigreturn needs the T bit set in the sigcontext_t cpsr field in
* order to return to Thumb mode. But, our mcontext doesn't have the T bit (b/c
* usermode can't read it). Thus callers must either modify an mcontext
* obtained from sigcontext_to_mcontext() or must call set_pc_mode_in_cpsr() in
* order to create a proper sigcontext for sigreturn. All callers here do so.
* The only external non-Windows caller of thread_set_mcontext() is
* translate_from_synchall_to_dispatch() who first does a thread_get_mcontext()
* and tweaks that context, so cpsr should be there.
*/
void
mcontext_to_sigcontext(sig_full_cxt_t *sc_full, priv_mcontext_t *mc,
dr_mcontext_flags_t flags)
{
sigcontext_t *sc = sc_full->sc;
ASSERT(mc != NULL && sc != NULL);
#ifdef X86
if (TEST(DR_MC_INTEGER, flags)) {
sc->SC_XAX = mc->xax;
sc->SC_XBX = mc->xbx;
sc->SC_XCX = mc->xcx;
sc->SC_XDX = mc->xdx;
sc->SC_XSI = mc->xsi;
sc->SC_XDI = mc->xdi;
sc->SC_XBP = mc->xbp;
# ifdef X64
sc->SC_FIELD(r8) = mc->r8;
sc->SC_FIELD(r9) = mc->r9;
sc->SC_FIELD(r10) = mc->r10;
sc->SC_FIELD(r11) = mc->r11;
sc->SC_FIELD(r12) = mc->r12;
sc->SC_FIELD(r13) = mc->r13;
sc->SC_FIELD(r14) = mc->r14;
sc->SC_FIELD(r15) = mc->r15;
# endif /* X64 */
}
if (TEST(DR_MC_CONTROL, flags)) {
sc->SC_XSP = mc->xsp;
sc->SC_XFLAGS = mc->xflags;
sc->SC_XIP = (ptr_uint_t) mc->pc;
}
#elif defined(AARCH64)
if (TEST(DR_MC_INTEGER, flags)) {
memcpy(&sc->SC_FIELD(regs[0]), &mc->r0, sizeof(mc->r0) * 31);
}
if (TEST(DR_MC_CONTROL, flags)) {
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
sc->SC_FIELD(sp) = mc->sp;
sc->SC_FIELD(pc) = (ptr_uint_t)mc->pc;
sc->SC_FIELD(pstate) = mc->nzcv;
}
#elif defined(ARM)
if (TEST(DR_MC_INTEGER, flags)) {
sc->SC_FIELD(arm_r0) = mc->r0;
sc->SC_FIELD(arm_r1) = mc->r1;
sc->SC_FIELD(arm_r2) = mc->r2;
sc->SC_FIELD(arm_r3) = mc->r3;
sc->SC_FIELD(arm_r4) = mc->r4;
sc->SC_FIELD(arm_r5) = mc->r5;
sc->SC_FIELD(arm_r6) = mc->r6;
sc->SC_FIELD(arm_r7) = mc->r7;
sc->SC_FIELD(arm_r8) = mc->r8;
sc->SC_FIELD(arm_r9) = mc->r9;
sc->SC_FIELD(arm_r10) = mc->r10;
sc->SC_FIELD(arm_fp) = mc->r11;
sc->SC_FIELD(arm_ip) = mc->r12;
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
sc->SC_FIELD(arm_lr) = mc->r14;
}
if (TEST(DR_MC_CONTROL, flags)) {
sc->SC_FIELD(arm_sp) = mc->r13;
sc->SC_FIELD(arm_pc) = mc->r15;
sc->SC_FIELD(arm_cpsr)= mc->cpsr;
}
# ifdef X64
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
if (TEST(DR_MC_MULTIMEDIA, flags))
mcontext_to_sigcontext_simd(sc_full, mc);
}
static void
ucontext_to_mcontext(priv_mcontext_t *mc, kernel_ucontext_t *uc)
{
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
sigcontext_to_mcontext(mc, &sc_full, DR_MC_ALL);
}
static void
mcontext_to_ucontext(kernel_ucontext_t *uc, priv_mcontext_t *mc)
{
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL);
}
#ifdef AARCHXX
static void
set_sigcxt_stolen_reg(sigcontext_t *sc, reg_t val)
{
*(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0)) = val;
}
static reg_t
get_sigcxt_stolen_reg(sigcontext_t *sc)
{
return *(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0));
}
# ifndef AARCH64
static dr_isa_mode_t
get_pc_mode_from_cpsr(sigcontext_t *sc)
{
return TEST(EFLAGS_T, sc->SC_XFLAGS) ? DR_ISA_ARM_THUMB : DR_ISA_ARM_A32;
}
static void
set_pc_mode_in_cpsr(sigcontext_t *sc, dr_isa_mode_t isa_mode)
{
if (isa_mode == DR_ISA_ARM_THUMB)
sc->SC_XFLAGS |= EFLAGS_T;
else
sc->SC_XFLAGS &= ~EFLAGS_T;
}
# endif
#endif
/* Returns whether successful. If avoid_failure, tries to translate
* at least pc if not successful. Pass f if known.
*/
static bool
translate_sigcontext(dcontext_t *dcontext, kernel_ucontext_t *uc, bool avoid_failure,
fragment_t *f)
{
bool success = false;
priv_mcontext_t mcontext;
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
ucontext_to_mcontext(&mcontext, uc);
/* FIXME: if cannot find exact match, we're in trouble!
* probably ok to delay, since that indicates not a synchronous
* signal.
*/
/* FIXME : in_fcache() (called by recreate_app_state) grabs fcache
* fcache_unit_areas.lock, we could deadlock! Also on initexit_lock
* == PR 205795/1317
*/
/* For safe recreation we need to either be couldbelinking or hold the
* initexit lock (to keep someone from flushing current fragment), the
* initexit lock is easier
*/
mutex_lock(&thread_initexit_lock);
/* PR 214962: we assume we're going to relocate to this stored context,
* so we restore memory now
*/
if (translate_mcontext(dcontext->thread_record, &mcontext,
true/*restore memory*/, f)) {
mcontext_to_ucontext(uc, &mcontext);
success = true;
} else {
if (avoid_failure) {
ASSERT_NOT_REACHED(); /* is ok to break things, is UNIX :) */
/* FIXME : what to do? reg state might be wrong at least get pc */
if (safe_is_in_fcache(dcontext, (cache_pc)sc->SC_XIP, (app_pc)sc->SC_XSP)) {
sc->SC_XIP = (ptr_uint_t)recreate_app_pc(dcontext, mcontext.pc, f);
ASSERT(sc->SC_XIP != (ptr_uint_t)NULL);
} else {
/* FIXME : can't even get pc right, what do we do here? */
sc->SC_XIP = 0;
}
}
}
mutex_unlock(&thread_initexit_lock);
/* FIXME i#2095: restore the app's segment register value(s). */
LOG(THREAD, LOG_ASYNCH, 3,
"\ttranslate_sigcontext: just set frame's eip to "PFX"\n", sc->SC_XIP);
return success;
}
/* Takes an os-specific context */
void
thread_set_self_context(void *cxt)
{
#ifdef X86
if (!INTERNAL_OPTION(use_sigreturn_setcontext)) {
sigcontext_t *sc = (sigcontext_t *) cxt;
dr_jmp_buf_t buf;
buf.xbx = sc->SC_XBX;
buf.xcx = sc->SC_XCX;
buf.xdi = sc->SC_XDI;
buf.xsi = sc->SC_XSI;
buf.xbp = sc->SC_XBP;
/* XXX: this is not fully transparent: it assumes the target stack
* is valid and that we can clobber the slot beyond TOS.
* Using this instead of sigreturn is meant mainly as a diagnostic
* to help debug future issues with sigreturn (xref i#2080).
*/
buf.xsp = sc->SC_XSP - XSP_SZ; /* extra slot for retaddr */
buf.xip = sc->SC_XIP;
# ifdef X64
buf.r8 = sc->r8;
buf.r9 = sc->r9;
buf.r10 = sc->r10;
buf.r11 = sc->r11;
buf.r12 = sc->r12;
buf.r13 = sc->r13;
buf.r14 = sc->r14;
buf.r15 = sc->r15;
# endif
dr_longjmp(&buf, sc->SC_XAX);
return;
}
#endif
dcontext_t *dcontext = get_thread_private_dcontext();
/* Unlike Windows we can't say "only set this subset of the
* full machine state", so we need to get the rest of the state,
*/
sigframe_rt_t frame;
#if defined(LINUX) || defined(DEBUG)
sigcontext_t *sc = (sigcontext_t *) cxt;
#endif
app_pc xsp_for_sigreturn;
#ifdef VMX86_SERVER
ASSERT_NOT_IMPLEMENTED(false); /* PR 405694: can't use regular sigreturn! */
#endif
memset(&frame, 0, sizeof(frame));
#ifdef LINUX
# ifdef X86
byte *xstate = get_xstate_buffer(dcontext);
frame.uc.uc_mcontext.fpstate = &((kernel_xstate_t *)xstate)->fpstate;
# endif /* X86 */
frame.uc.uc_mcontext = *sc;
#endif
save_fpstate(dcontext, &frame);
/* The kernel calls do_sigaltstack on sys_rt_sigreturn primarily to ensure
* the frame is ok, but the side effect is we can mess up our own altstack
* settings if we're not careful. Having invalid ss_size looks good for
* kernel 2.6.23.9 at least so we leave frame.uc.uc_stack as all zeros.
*/
/* make sure sigreturn's mask setting doesn't change anything */
sigprocmask_syscall(SIG_SETMASK, NULL, (kernel_sigset_t *) &frame.uc.uc_sigmask,
sizeof(frame.uc.uc_sigmask));
LOG(THREAD_GET, LOG_ASYNCH, 2, "thread_set_self_context: pc="PFX"\n", sc->SC_XIP);
LOG(THREAD_GET, LOG_ASYNCH, 3, "full sigcontext\n");
DOLOG(LOG_ASYNCH, 3, {
dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(&frame));
});
/* set up xsp to point at &frame + sizeof(char*) */
xsp_for_sigreturn = ((app_pc)&frame) + sizeof(char*);
#ifdef X86
asm("mov %0, %%"ASM_XSP : : "m"(xsp_for_sigreturn));
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false && "need to pass 2 params to SYS_sigreturn");
asm("jmp _dynamorio_sigreturn");
# else
/* i#2632: recent clang for 32-bit annoyingly won't do the right thing for
* "jmp dynamorio_sigreturn" and leaves relocs so we ensure it's PIC:
*/
void (*asm_jmp_tgt)() = dynamorio_sigreturn;
asm("mov %0, %%"ASM_XCX : : "m"(asm_jmp_tgt));
asm("jmp *%"ASM_XCX);
# endif /* MACOS/LINUX */
#elif defined(AARCH64)
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
#elif defined(ARM)
asm("ldr "ASM_XSP", %0" : : "m"(xsp_for_sigreturn));
asm("b dynamorio_sigreturn");
#endif /* X86/ARM */
ASSERT_NOT_REACHED();
}
static void
thread_set_segment_registers(sigcontext_t *sc)
{
#ifdef X86
/* Fill in the segment registers */
__asm__ __volatile__("mov %%cs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(cs))
: : "eax");
# ifndef X64
__asm__ __volatile__("mov %%ss, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(ss))
: : "eax");
__asm__ __volatile__("mov %%ds, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(ds))
: : "eax");
__asm__ __volatile__("mov %%es, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(es))
: : "eax");
# endif
__asm__ __volatile__("mov %%fs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(fs))
: : "eax");
__asm__ __volatile__("mov %%gs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(gs))
: : "eax");
#endif
}
/* Takes a priv_mcontext_t */
void
thread_set_self_mcontext(priv_mcontext_t *mc)
{
kernel_ucontext_t ucxt;
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, &ucxt);
#if defined(LINUX) && defined(X86)
sc_full.sc->fpstate = NULL; /* for mcontext_to_sigcontext */
#endif
mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL);
thread_set_segment_registers(sc_full.sc);
/* sigreturn takes the mode from cpsr */
IF_ARM(set_pc_mode_in_cpsr(sc_full.sc,
dr_get_isa_mode(get_thread_private_dcontext())));
/* thread_set_self_context will fill in the real fp/simd state for x86 */
thread_set_self_context((void *)sc_full.sc);
ASSERT_NOT_REACHED();
}
#ifdef LINUX
static bool
sig_has_restorer(thread_sig_info_t *info, int sig)
{
# ifdef VMX86_SERVER
/* vmkernel ignores SA_RESTORER (PR 405694) */
return false;
# endif
if (info->app_sigaction[sig] == NULL)
return false;
if (TEST(SA_RESTORER, info->app_sigaction[sig]->flags))
return true;
if (info->app_sigaction[sig]->restorer == NULL)
return false;
/* we cache the result due to the safe_read cost */
if (info->restorer_valid[sig] == -1) {
/* With older kernels, don't seem to need flag: if sa_restorer !=
* NULL kernel will use it. But with newer kernels that's not
* true, and sometimes libc does pass non-NULL.
*/
# ifdef X86
/* Signal restorer code for Ubuntu 7.04:
* 0xffffe420 <__kernel_sigreturn+0>: pop %eax
* 0xffffe421 <__kernel_sigreturn+1>: mov $0x77,%eax
* 0xffffe426 <__kernel_sigreturn+6>: int $0x80
*
* 0xffffe440 <__kernel_rt_sigreturn+0>: mov $0xad,%eax
* 0xffffe445 <__kernel_rt_sigreturn+5>: int $0x80
*/
static const byte SIGRET_NONRT[8] =
{0x58, 0xb8, 0x77, 0x00, 0x00, 0x00, 0xcd, 0x80};
static const byte SIGRET_RT[8] =
{0xb8, 0xad, 0x00, 0x00, 0x00, 0xcd, 0x80};
# elif defined(ARM)
static const byte SIGRET_NONRT[8] =
{0x77, 0x70, 0xa0, 0xe3, 0x00, 0x00, 0x00, 0xef};
static const byte SIGRET_RT[8] =
{0xad, 0x70, 0xa0, 0xe3, 0x00, 0x00, 0x00, 0xef};
# elif defined(AARCH64)
static const byte SIGRET_NONRT[8] = { 0 }; /* unused */
static const byte SIGRET_RT[8] =
/* FIXME i#1569: untested */
/* mov w8, #139 ; svc #0 */
{0x68, 0x11, 0x80, 0x52, 0x01, 0x00, 0x00, 0xd4};
# endif
byte buf[MAX(sizeof(SIGRET_NONRT), sizeof(SIGRET_RT))]= {0};
# ifdef AARCH64
ASSERT_NOT_TESTED(); /* See SIGRET_RT, above. */
# endif
if (safe_read(info->app_sigaction[sig]->restorer, sizeof(buf), buf) &&
((IS_RT_FOR_APP(info, sig) &&
memcmp(buf, SIGRET_RT, sizeof(SIGRET_RT)) == 0) ||
(!IS_RT_FOR_APP(info, sig) &&
memcmp(buf, SIGRET_NONRT, sizeof(SIGRET_NONRT)) == 0))) {
LOG(THREAD_GET, LOG_ASYNCH, 2,
"sig_has_restorer %d: "PFX" looks like restorer, using w/o flag\n",
sig, info->app_sigaction[sig]->restorer);
info->restorer_valid[sig] = 1;
} else
info->restorer_valid[sig] = 0;
}
return (info->restorer_valid[sig] == 1);
}
#endif
/* Returns the size of the frame for delivering to the app.
* For x64 this does NOT include kernel_fpstate_t.
*/
static uint
get_app_frame_size(thread_sig_info_t *info, int sig)
{
if (IS_RT_FOR_APP(info, sig))
return sizeof(sigframe_rt_t);
#ifdef LINUX
else
return sizeof(sigframe_plain_t);
#endif
}
static kernel_ucontext_t *
get_ucontext_from_rt_frame(sigframe_rt_t *frame)
{
#if defined(MACOS) && !defined(X64)
/* Padding makes it unsafe to access uc on frame from kernel */
return frame->puc;
#else
return &frame->uc;
#endif
}
sigcontext_t *
get_sigcontext_from_rt_frame(sigframe_rt_t *frame)
{
return SIGCXT_FROM_UCXT(get_ucontext_from_rt_frame(frame));
}
static sigcontext_t *
get_sigcontext_from_app_frame(thread_sig_info_t *info, int sig, void *frame)
{
sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */
bool rtframe = IS_RT_FOR_APP(info, sig);
if (rtframe)
sc = get_sigcontext_from_rt_frame((sigframe_rt_t *)frame);
#ifdef LINUX
else {
# ifdef X86
sc = (sigcontext_t *) &(((sigframe_plain_t *)frame)->sc);
# elif defined(ARM)
sc = SIGCXT_FROM_UCXT(&(((sigframe_plain_t *)frame)->uc));
# else
ASSERT_NOT_REACHED();
# endif
}
#endif
return sc;
}
static sigcontext_t *
get_sigcontext_from_pending(thread_sig_info_t *info, int sig)
{
ASSERT(info->sigpending[sig] != NULL);
return get_sigcontext_from_rt_frame(&info->sigpending[sig]->rt_frame);
}
/* Returns the address on the appropriate signal stack where we should copy
* the frame.
* If frame is NULL, assumes signal happened while in DR and has been delayed,
* and thus we need to provide fpstate regardless of whether the original
* had it. If frame is non-NULL, matches frame's amount of fpstate.
*/
static byte *
get_sigstack_frame_ptr(dcontext_t *dcontext, int sig, sigframe_rt_t *frame)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
sigcontext_t *sc = (frame == NULL) ?
get_sigcontext_from_pending(info, sig) :
get_sigcontext_from_rt_frame(frame);
byte *sp;
if (frame != NULL) {
/* signal happened while in cache, grab interrupted xsp */
sp = (byte *) sc->SC_XSP;
LOG(THREAD, LOG_ASYNCH, 3,
"get_sigstack_frame_ptr: using frame's xsp "PFX"\n", sp);
} else {
/* signal happened while in DR, use stored xsp */
sp = (byte *) get_mcontext(dcontext)->xsp;
LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: using app xsp "PFX"\n", sp);
}
if (APP_HAS_SIGSTACK(info)) {
/* app has own signal stack */
LOG(THREAD, LOG_ASYNCH, 3,
"get_sigstack_frame_ptr: app has own stack "PFX"\n",
info->app_sigstack.ss_sp);
LOG(THREAD, LOG_ASYNCH, 3,
"\tcur sp="PFX" vs app stack "PFX"-"PFX"\n",
sp, info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
if (sp > (byte *)info->app_sigstack.ss_sp &&
sp - (byte *)info->app_sigstack.ss_sp < info->app_sigstack.ss_size) {
/* we're currently in the alt stack, so use current xsp */
LOG(THREAD, LOG_ASYNCH, 3,
"\tinside alt stack, so using current xsp "PFX"\n", sp);
} else {
/* need to go to top, stack grows down */
sp = info->app_sigstack.ss_sp + info->app_sigstack.ss_size;
LOG(THREAD, LOG_ASYNCH, 3,
"\tnot inside alt stack, so using base xsp "PFX"\n", sp);
}
}
/* now get frame pointer: need to go down to first field of frame */
sp -= get_app_frame_size(info, sig);
#if defined(LINUX) && defined(X86)
if (frame == NULL) {
/* XXX i#641: we always include space for full xstate,
* even if we don't use it all, which does not match what the
* kernel does, but we're not tracking app actions to know whether
* we can skip lazy fpstate on the delay
*/
sp -= signal_frame_extra_size(true);
} else {
if (sc->fpstate != NULL) {
/* The kernel doesn't seem to lazily include avx, so we don't either,
* which simplifies all our frame copying: if YMM_ENABLED() and the
* fpstate pointer is non-NULL, then we assume there's space for
* full xstate
*/
sp -= signal_frame_extra_size(true);
DOCHECK(1, {
if (YMM_ENABLED()) {
ASSERT_CURIOSITY(sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1);
ASSERT(sc->fpstate->sw_reserved.extended_size <=
signal_frame_extra_size(true));
}
});
}
}
#endif /* LINUX && X86 */
/* PR 369907: don't forget the redzone */
sp -= REDZONE_SIZE;
/* Align to 16-bytes. The kernel does this for both 32 and 64-bit code
* these days, so we do as well.
*/
sp = (byte *) ALIGN_BACKWARD(sp, 16);
IF_X86(sp -= sizeof(reg_t)); /* Model retaddr. */
LOG(THREAD, LOG_ASYNCH, 3, "\tplacing frame at "PFX"\n", sp);
return sp;
}
#if defined(LINUX) && !defined(X64)
static void
convert_frame_to_nonrt(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old,
sigframe_plain_t *f_new)
{
# ifdef X86
sigcontext_t *sc_old = get_sigcontext_from_rt_frame(f_old);
f_new->pretcode = f_old->pretcode;
f_new->sig = f_old->sig;
memcpy(&f_new->sc, get_sigcontext_from_rt_frame(f_old), sizeof(sigcontext_t));
if (sc_old->fpstate != NULL) {
/* up to caller to include enough space for fpstate at end */
byte *new_fpstate = (byte *)
ALIGN_FORWARD(((byte *)f_new) + sizeof(*f_new), XSTATE_ALIGNMENT);
memcpy(new_fpstate, sc_old->fpstate, signal_frame_extra_size(false));
f_new->sc.fpstate = (kernel_fpstate_t *) new_fpstate;
}
f_new->sc.oldmask = f_old->uc.uc_sigmask.sig[0];
memcpy(&f_new->extramask, &f_old->uc.uc_sigmask.sig[1],
(_NSIG_WORDS-1) * sizeof(uint));
memcpy(&f_new->retcode, &f_old->retcode, RETCODE_SIZE);
/* now fill in our extra field */
f_new->sig_noclobber = f_new->sig;
# elif defined(ARM)
memcpy(&f_new->uc, &f_old->uc, sizeof(f_new->uc));
memcpy(f_new->retcode, f_old->retcode, sizeof(f_new->retcode));
/* now fill in our extra field */
f_new->sig_noclobber = f_old->info.si_signo;
# endif /* X86 */
LOG(THREAD, LOG_ASYNCH, 3, "\tconverted sig=%d rt frame to non-rt frame\n",
f_new->sig_noclobber);
}
/* separated out to avoid the stack size cost on the common path */
static void
convert_frame_to_nonrt_partial(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old,
sigframe_plain_t *f_new, size_t size)
{
# ifdef X86
/* We create a full-size buffer for conversion and then copy the partial amount. */
byte *frame_and_xstate =
heap_alloc(dcontext, sizeof(sigframe_plain_t) + signal_frame_extra_size(true)
HEAPACCT(ACCT_OTHER));
sigframe_plain_t *f_plain = (sigframe_plain_t *) frame_and_xstate;
ASSERT_NOT_TESTED(); /* XXX: we have no test of this for change to heap_alloc */
convert_frame_to_nonrt(dcontext, sig, f_old, f_plain);
memcpy(f_new, f_plain, size);
heap_free(dcontext, frame_and_xstate, sizeof(sigframe_plain_t) +
signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER));
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
}
#endif
/* Exported for call from master_signal_handler asm routine.
* For the rt signal frame f_old that was copied to f_new, updates
* the intra-frame absolute pointers to point to the new addresses
* in f_new.
* Only updates the pretcode to the stored app restorer if for_app.
*/
void
fixup_rtframe_pointers(dcontext_t *dcontext, int sig,
sigframe_rt_t *f_old, sigframe_rt_t *f_new, bool for_app)
{
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
#if defined(X86) && defined(LINUX)
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
bool has_restorer = sig_has_restorer(info, sig);
# ifdef DEBUG
uint level = 3;
# if !defined(HAVE_MEMINFO)
/* avoid logging every single TRY probe fault */
if (!dynamo_initialized)
level = 5;
# endif
# endif
if (has_restorer && for_app)
f_new->pretcode = (char *) info->app_sigaction[sig]->restorer;
else {
# ifdef VMX86_SERVER
/* PR 404712: skip kernel's restorer code */
if (for_app)
f_new->pretcode = (char *) dynamorio_sigreturn;
# else
# ifdef X64
ASSERT(!for_app || doing_detach); /* detach uses a frame to go native */
# else
/* only point at retcode if old one was -- with newer OS, points at
* vsyscall page and there is no restorer, yet stack restorer code left
* there for gdb compatibility
*/
if (f_old->pretcode == f_old->retcode)
f_new->pretcode = f_new->retcode;
/* else, pointing at vsyscall, or we set it to dynamorio_sigreturn in
* master_signal_handler
*/
LOG(THREAD, LOG_ASYNCH, level, "\tleaving pretcode with old value\n");
# endif
# endif
}
# ifndef X64
f_new->pinfo = &(f_new->info);
f_new->puc = &(f_new->uc);
# endif
if (f_old->uc.uc_mcontext.fpstate != NULL) {
uint frame_size = get_app_frame_size(info, sig);
byte *frame_end = ((byte *)f_new) + frame_size;
byte *tgt = (byte *) ALIGN_FORWARD(frame_end, XSTATE_ALIGNMENT);
ASSERT(tgt - frame_end <= signal_frame_extra_size(true));
memcpy(tgt, f_old->uc.uc_mcontext.fpstate, sizeof(kernel_fpstate_t));
f_new->uc.uc_mcontext.fpstate = (kernel_fpstate_t *) tgt;
if (YMM_ENABLED()) {
kernel_xstate_t *xstate_new = (kernel_xstate_t *) tgt;
kernel_xstate_t *xstate_old =
(kernel_xstate_t *) f_old->uc.uc_mcontext.fpstate;
memcpy(&xstate_new->xstate_hdr, &xstate_old->xstate_hdr,
sizeof(xstate_new->xstate_hdr));
memcpy(&xstate_new->ymmh, &xstate_old->ymmh, sizeof(xstate_new->ymmh));
}
LOG(THREAD, LOG_ASYNCH, level+1, "\tfpstate old="PFX" new="PFX"\n",
f_old->uc.uc_mcontext.fpstate, f_new->uc.uc_mcontext.fpstate);
} else {
/* if fpstate is not set up, we're delivering signal immediately,
* and we shouldn't need an fpstate since DR code won't modify it;
* only if we delayed will we need it, and when delaying we make
* room and set up the pointer in copy_frame_to_pending.
* xref i#641.
*/
LOG(THREAD, LOG_ASYNCH, level+1, "\tno fpstate needed\n");
}
LOG(THREAD, LOG_ASYNCH, level, "\tretaddr = "PFX"\n", f_new->pretcode);
# ifdef RETURN_AFTER_CALL
info->signal_restorer_retaddr = (app_pc) f_new->pretcode;
# endif
/* 32-bit kernel copies to aligned buf first */
IF_X64(ASSERT(ALIGNED(f_new->uc.uc_mcontext.fpstate, 16)));
#elif defined(MACOS)
# ifndef X64
f_new->pinfo = &(f_new->info);
f_new->puc = &(f_new->uc);
# endif
f_new->puc->uc_mcontext = (IF_X64_ELSE(_STRUCT_MCONTEXT64, _STRUCT_MCONTEXT32) *)
&f_new->mc;
LOG(THREAD, LOG_ASYNCH, 3, "\tf_new="PFX", &handler="PFX"\n", f_new, &f_new->handler);
ASSERT(!for_app || ALIGNED(&f_new->handler, 16));
#endif /* X86 && LINUX */
}
static void
memcpy_rt_frame(sigframe_rt_t *frame, byte *dst, bool from_pending)
{
#if defined(MACOS) && !defined(X64)
if (!from_pending) {
/* The kernel puts padding in the middle. We collapse that padding here
* and re-align when we copy to the app stack.
* We should not reference fields from mc onward in what the kernel put
* on the stack, as our sigframe_rt_t layout does not match the kernel's
* variable mid-struct padding.
*/
sigcontext_t *sc = SIGCXT_FROM_UCXT(frame->puc);
memcpy(dst, frame, offsetof(sigframe_rt_t, puc) + sizeof(frame->puc));
memcpy(&((sigframe_rt_t*)dst)->mc, sc,
sizeof(sigframe_rt_t) - offsetof(sigframe_rt_t, mc));
return;
}
#endif
memcpy(dst, frame, sizeof(sigframe_rt_t));
}
/* Copies frame to sp.
* PR 304708: we now leave in rt form right up until we copy to the
* app stack, so that we can deliver to a client at a safe spot
* in rt form, so this routine now converts to a plain frame if necessary.
* If no restorer, touches up pretcode
* (and if rt_frame, touches up pinfo and puc)
* Also touches up fpstate pointer
*/
static void
copy_frame_to_stack(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, byte *sp,
bool from_pending)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
bool rtframe = IS_RT_FOR_APP(info, sig);
uint frame_size = get_app_frame_size(info, sig);
#if defined(LINUX) && defined(X86_32)
bool has_restorer = sig_has_restorer(info, sig);
#endif
byte *check_pc;
uint size = frame_size;
#if defined(LINUX) && defined(X86)
sigcontext_t *sc = get_sigcontext_from_rt_frame(frame);
size += (sc->fpstate == NULL ? 0 : signal_frame_extra_size(true));
#endif /* LINUX && X86 */
LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_stack: rt=%d, src="PFX", sp="PFX"\n",
rtframe, frame, sp);
/* before we write to the app's stack we need to see if it's writable */
check_pc = (byte *) ALIGN_BACKWARD(sp, PAGE_SIZE);
while (check_pc < (byte *)sp + size) {
uint prot;
DEBUG_DECLARE(bool ok = )
get_memory_info(check_pc, NULL, NULL, &prot);
ASSERT(ok);
if (!TEST(MEMPROT_WRITE, prot)) {
size_t rest = (byte *)sp + size - check_pc;
if (is_executable_area_writable(check_pc)) {
LOG(THREAD, LOG_ASYNCH, 2,
"\tcopy_frame_to_stack: part of stack is unwritable-by-us @"PFX"\n",
check_pc);
flush_fragments_and_remove_region(dcontext, check_pc, rest,
false /* don't own initexit_lock */,
false /* keep futures */);
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"\tcopy_frame_to_stack: part of stack is unwritable @"PFX"\n",
check_pc);
/* copy what we can */
if (rtframe)
memcpy(sp, frame, rest);
#if defined(LINUX) && !defined(X64)
else {
convert_frame_to_nonrt_partial(dcontext, sig, frame,
(sigframe_plain_t *) sp, rest);
}
#endif
/* now throw exception
* FIXME: what give as address? what does kernel use?
* If the app intercepts SIGSEGV then we'll come right back
* here, so we terminate explicitly instead. FIXME: set exit
* code properly: xref PR 205310.
*/
if (info->app_sigaction[SIGSEGV] == NULL)
os_forge_exception(0, UNREADABLE_MEMORY_EXECUTION_EXCEPTION);
else
os_terminate(dcontext, TERMINATE_PROCESS);
ASSERT_NOT_REACHED();
}
}
check_pc += PAGE_SIZE;
}
if (rtframe) {
ASSERT(frame_size == sizeof(*frame));
memcpy_rt_frame(frame, sp, from_pending);
}
#if defined(LINUX) && !defined(X64)
else
convert_frame_to_nonrt(dcontext, sig, frame, (sigframe_plain_t *) sp);
#endif
/* if !has_restorer we do NOT add the restorer code to the exec list here,
* to avoid removal problems (if handler never returns) and consistency problems
* (would have to mark as selfmod right now if on stack).
* for PROGRAM_SHEPHERDING we recognize as a pattern, and for consistency we
* allow entire region once try to execute -- not a performance worry since should
* very rarely be on the stack: should either be libc restorer code or with recent
* OS in rx vsyscall page.
*/
/* fix up pretcode, pinfo, puc, fpstate */
if (rtframe) {
fixup_rtframe_pointers(dcontext, sig, frame, (sigframe_rt_t *) sp,
true/*for app*/);
}
#if defined(X86) && defined(LINUX)
else {
# ifdef X64
ASSERT_NOT_REACHED();
# else
sigframe_plain_t *f_new = (sigframe_plain_t *) sp;
# ifndef VMX86_SERVER
sigframe_plain_t *f_old = (sigframe_plain_t *) frame;
# endif
if (has_restorer)
f_new->pretcode = (char *) info->app_sigaction[sig]->restorer;
else {
# ifdef VMX86_SERVER
/* PR 404712: skip kernel's restorer code */
f_new->pretcode = (char *) dynamorio_nonrt_sigreturn;
# else
/* see comments in rt case above */
if (f_old->pretcode == f_old->retcode)
f_new->pretcode = f_new->retcode;
else {
/* whether we set to dynamorio_sigreturn in master_signal_handler
* or it's still vsyscall page, we have to convert to non-rt
*/
f_new->pretcode = (char *) dynamorio_nonrt_sigreturn;
} /* else, pointing at vsyscall most likely */
LOG(THREAD, LOG_ASYNCH, 3, "\tleaving pretcode with old value\n");
# endif
}
/* convert_frame_to_nonrt*() should have updated fpstate pointer.
* The inlined fpstate is no longer used on new kernels, and we do that
* as well on older kernels.
*/
ASSERT(f_new->sc.fpstate != &f_new->fpstate);
LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = "PFX"\n", f_new->pretcode);
# ifdef RETURN_AFTER_CALL
info->signal_restorer_retaddr = (app_pc) f_new->pretcode;
# endif
/* 32-bit kernel copies to aligned buf so no assert on fpstate alignment */
# endif /* X64 */
}
#endif /* X86 && LINUX */
#ifdef MACOS
/* Update handler field, which is passed to the libc trampoline, to app */
ASSERT(info->app_sigaction[sig] != NULL);
((sigframe_rt_t *)sp)->handler = (app_pc) info->app_sigaction[sig]->handler;
#endif
}
/* Copies frame to pending slot.
* PR 304708: we now leave in rt form right up until we copy to the
* app stack, so that we can deliver to a client at a safe spot
* in rt form.
*/
static void
copy_frame_to_pending(dcontext_t *dcontext, int sig, sigframe_rt_t *frame
_IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
sigframe_rt_t *dst = &(info->sigpending[sig]->rt_frame);
memcpy_rt_frame(frame, (byte *)dst, false/*!already pending*/);
#if defined(LINUX) && defined(X86)
/* For lazy fpstate, it's possible there was no fpstate when the kernel
* sent us the frame, but in between then and now the app executed some
* fp or xmm/ymm instrs. Today we always add fpstate just in case.
* XXX i#641 optimization: track whether any fp/xmm/ymm
* instrs happened and avoid this.
*/
/* we'll fill in updated fpstate at delivery time, but we go ahead and
* copy now in case our own retrieval somehow misses some fields
*/
if (frame->uc.uc_mcontext.fpstate != NULL) {
memcpy(&info->sigpending[sig]->xstate, frame->uc.uc_mcontext.fpstate,
/* XXX: assuming full xstate if avx is enabled */
signal_frame_extra_size(false));
}
/* we must set the pointer now so that later save_fpstate, etc. work */
dst->uc.uc_mcontext.fpstate = (kernel_fpstate_t *)&info->sigpending[sig]->xstate;
#endif /* LINUX && X86 */
#ifdef CLIENT_INTERFACE
info->sigpending[sig]->access_address = access_address;
#endif
info->sigpending[sig]->use_sigcontext = false;
#ifdef MACOS
/* We rely on puc to find sc to we have to fix it up */
fixup_rtframe_pointers(dcontext, sig, frame, dst, false/*!for app*/);
#endif
LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_pending from "PFX"\n", frame);
DOLOG(3, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 3, "sigcontext:\n");
dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(dst));
});
}
/**** real work ***********************************************/
/* transfer control from signal handler to fcache return routine */
static void
transfer_from_sig_handler_to_fcache_return(dcontext_t *dcontext, kernel_ucontext_t *uc,
sigcontext_t *sc_interrupted, int sig,
app_pc next_pc,
linkstub_t *last_exit, bool is_kernel_xfer)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
#ifdef CLIENT_INTERFACE
if (is_kernel_xfer) {
sig_full_cxt_t sc_interrupted_full = { sc_interrupted, NULL/*not provided*/ };
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
sc->SC_XIP = (ptr_uint_t) next_pc;
if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_interrupted_full,
NULL, NULL, next_pc, sc->SC_XSP, sc_full, NULL, sig))
next_pc = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP);
}
#endif
dcontext->next_tag = canonicalize_pc_target(dcontext, next_pc);
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
/* Set our sigreturn context to point to fcache_return!
* Then we'll go back through kernel, appear in fcache_return,
* and go through dispatch & interp, without messing up dynamo stack.
* Note that even if this is a write in the shared cache, we
* still go to the private fcache_return for simplicity.
*/
sc->SC_XIP = (ptr_uint_t) fcache_return_routine(dcontext);
#ifdef AARCHXX
/* We do not have to set dr_reg_stolen in dcontext's mcontext here
* because dcontext's mcontext is stale and we used the mcontext
* created from recreate_app_state_internal with the original sigcontext.
*/
/* We restore dr_reg_stolen's app value in recreate_app_state_internal,
* so now we need set dr_reg_stolen to hold DR's TLS before sigreturn
* from DR's handler.
*/
ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t) *get_dr_tls_base_addr());
set_sigcxt_stolen_reg(sc, (reg_t) *get_dr_tls_base_addr());
# ifndef AARCH64
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
#endif
#if defined(X64) || defined(ARM)
/* x64 always uses shared gencode */
get_local_state_extended()->spill_space.IF_X86_ELSE(xax, r0) =
sc->IF_X86_ELSE(SC_XAX, SC_R0);
# ifdef AARCH64
/* X1 needs to be spilled because of br x1 in exit stubs. */
get_local_state_extended()->spill_space.r1 = sc->SC_R1;
# endif
#else
get_mcontext(dcontext)->IF_X86_ELSE(xax, r0) = sc->IF_X86_ELSE(SC_XAX, SC_R0);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "\tsaved xax "PFX"\n", sc->IF_X86_ELSE(SC_XAX, SC_R0));
sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t) last_exit;
LOG(THREAD, LOG_ASYNCH, 2,
"\tset next_tag to "PFX", resuming in fcache_return\n", next_pc);
LOG(THREAD, LOG_ASYNCH, 3, "transfer_from_sig_handler_to_fcache_return\n");
DOLOG(3, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 3, "sigcontext:\n");
dump_sigcontext(dcontext, sc);
});
}
#ifdef CLIENT_INTERFACE
static dr_signal_action_t
send_signal_to_client(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *raw_sc, byte *access_address,
bool blocked, fragment_t *fragment)
{
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame);
dr_siginfo_t si;
dr_signal_action_t action;
/* XXX #1615: we need a full ucontext to store pre-xl8 simd values.
* Right now we share the same simd values with post-xl8.
*/
sig_full_cxt_t raw_sc_full;
sig_full_initialize(&raw_sc_full, uc);
raw_sc_full.sc = raw_sc;
if (!dr_signal_hook_exists())
return DR_SIGNAL_DELIVER;
LOG(THREAD, LOG_ASYNCH, 2, "sending signal to client\n");
si.sig = sig;
si.drcontext = (void *) dcontext;
/* It's safe to allocate since we do not send signals that interrupt DR.
* With priv_mcontext_t x2 that's a little big for stack alloc.
*/
si.mcontext = heap_alloc(dcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER));
si.raw_mcontext = heap_alloc(dcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER));
dr_mcontext_init(si.mcontext);
dr_mcontext_init(si.raw_mcontext);
/* i#207: fragment tag and fcache start pc on fault. */
si.fault_fragment_info.tag = NULL;
si.fault_fragment_info.cache_start_pc = NULL;
/* i#182/PR 449996: we provide the pre-translation context */
if (raw_sc != NULL) {
fragment_t wrapper;
si.raw_mcontext_valid = true;
sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.raw_mcontext),
&raw_sc_full, si.raw_mcontext->flags);
/* i#207: fragment tag and fcache start pc on fault. */
/* FIXME: we should avoid the fragment_pclookup since it is expensive
* and since we already did the work of a lookup when translating
*/
if (fragment == NULL)
fragment = fragment_pclookup(dcontext, si.raw_mcontext->pc, &wrapper);
if (fragment != NULL && !hide_tag_from_client(fragment->tag)) {
si.fault_fragment_info.tag = fragment->tag;
si.fault_fragment_info.cache_start_pc = FCACHE_ENTRY_PC(fragment);
si.fault_fragment_info.is_trace = TEST(FRAG_IS_TRACE,
fragment->flags);
si.fault_fragment_info.app_code_consistent =
!TESTANY(FRAG_WAS_DELETED|FRAG_SELFMOD_SANDBOXED,
fragment->flags);
}
} else
si.raw_mcontext_valid = false;
/* The client has no way to calculate this when using
* instrumentation that deliberately faults (to shift a rare event
* out of the fastpath) so we provide it. When raw_mcontext is
* available the client can calculate it, but we provide it as a
* convenience anyway.
*/
si.access_address = access_address;
si.blocked = blocked;
ucontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.mcontext), uc);
/* We disallow the client calling dr_redirect_execution(), so we
* will not leak si
*/
action = instrument_signal(dcontext, &si);
if (action == DR_SIGNAL_DELIVER ||
action == DR_SIGNAL_REDIRECT) {
/* propagate client changes */
CLIENT_ASSERT(si.mcontext->flags == DR_MC_ALL,
"signal mcontext flags cannot be changed");
mcontext_to_ucontext(uc, dr_mcontext_as_priv_mcontext(si.mcontext));
} else if (action == DR_SIGNAL_SUPPRESS && raw_sc != NULL) {
/* propagate client changes */
CLIENT_ASSERT(si.raw_mcontext->flags == DR_MC_ALL,
"signal mcontext flags cannot be changed");
mcontext_to_sigcontext(&raw_sc_full,
dr_mcontext_as_priv_mcontext(si.raw_mcontext),
si.raw_mcontext->flags);
}
heap_free(dcontext, si.mcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER));
heap_free(dcontext, si.raw_mcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER));
return action;
}
/* Returns false if caller should exit */
static bool
handle_client_action_from_cache(dcontext_t *dcontext, int sig, dr_signal_action_t action,
sigframe_rt_t *our_frame, sigcontext_t *sc_orig,
sigcontext_t *sc_interrupted, bool blocked)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
/* in order to pass to the client, we come all the way here for signals
* the app has no handler for
*/
if (action == DR_SIGNAL_REDIRECT) {
/* send_signal_to_client copied mcontext into our
* master_signal_handler frame, so we set up for fcache_return w/
* our frame's state
*/
transfer_from_sig_handler_to_fcache_return
(dcontext, uc, sc_interrupted, sig,
(app_pc) sc->SC_XIP, (linkstub_t *) get_asynch_linkstub(), true);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
return false;
}
else if (action == DR_SIGNAL_SUPPRESS ||
(!blocked && info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n",
(action == DR_SIGNAL_SUPPRESS) ?
"client suppressing signal" :
"app signal handler is SIG_IGN");
/* restore original (untranslated) sc */
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
return false;
}
else if (!blocked && /* no BYPASS for blocked */
(action == DR_SIGNAL_BYPASS ||
(info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL))) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n",
(action == DR_SIGNAL_BYPASS) ?
"client forcing default" :
"app signal handler is SIG_DFL");
if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig)) {
/* if we haven't terminated, restore original (untranslated) sc
* on request.
*/
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xsp="PFX", xip="PFX"\n",
__FUNCTION__, get_sigcontext_from_rt_frame(our_frame)->SC_XSP,
get_sigcontext_from_rt_frame(our_frame)->SC_XIP);
}
return false;
}
CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value");
return true;
}
#endif
static void
abort_on_fault(dcontext_t *dcontext, uint dumpcore_flag, app_pc pc, byte *target,
int sig, sigframe_rt_t *frame,
const char *prefix, const char *signame, const char *where)
{
kernel_ucontext_t *ucxt = &frame->uc;
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
bool stack_overflow = (sig == SIGSEGV && is_stack_overflow(dcontext, target));
#if defined(STATIC_LIBRARY) && defined(LINUX)
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
uint orig_dumpcore_flag = dumpcore_flag;
if (init_info.app_sigaction != NULL)
info = &init_info; /* use init-time handler */
ASSERT(info->app_sigaction != NULL);
#endif
const char *fmt =
"%s %s at PC "PFX"\n"
"Received SIG%s at%s pc "PFX" in thread "TIDFMT"\n"
"Base: "PFX"\n"
"Registers:"
#ifdef X86
"eax="PFX" ebx="PFX" ecx="PFX" edx="PFX"\n"
"\tesi="PFX" edi="PFX" esp="PFX" ebp="PFX"\n"
# ifdef X64
"\tr8 ="PFX" r9 ="PFX" r10="PFX" r11="PFX"\n"
"\tr12="PFX" r13="PFX" r14="PFX" r15="PFX"\n"
# endif /* X64 */
#elif defined(ARM)
# ifndef X64
" r0 ="PFX" r1 ="PFX" r2 ="PFX" r3 ="PFX"\n"
"\tr4 ="PFX" r5 ="PFX" r6 ="PFX" r7 ="PFX"\n"
"\tr8 ="PFX" r9 ="PFX" r10="PFX" r11="PFX"\n"
"\tr12="PFX" r13="PFX" r14="PFX" r15="PFX"\n"
# else
# error NYI on AArch64
# endif
#endif /* X86/ARM */
"\teflags="PFX;
#if defined(STATIC_LIBRARY) && defined(LINUX)
/* i#2119: if we're invoking an app handler, disable a fatal coredump. */
if (INTERNAL_OPTION(invoke_app_on_crash) &&
info->app_sigaction[sig] != NULL && IS_RT_FOR_APP(info, sig) &&
TEST(dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) &&
!DYNAMO_OPTION(live_dump))
dumpcore_flag = 0;
#endif
report_dynamorio_problem(dcontext, dumpcore_flag |
(stack_overflow ? DUMPCORE_STACK_OVERFLOW : 0),
pc, (app_pc) sc->SC_FP,
fmt, prefix,
stack_overflow ? STACK_OVERFLOW_NAME : CRASH_NAME,
pc, signame, where, pc, get_thread_id(),
get_dynamorio_dll_start(),
#ifdef X86
sc->SC_XAX, sc->SC_XBX, sc->SC_XCX, sc->SC_XDX,
sc->SC_XSI, sc->SC_XDI, sc->SC_XSP, sc->SC_XBP,
# ifdef X64
sc->SC_FIELD(r8), sc->SC_FIELD(r9),
sc->SC_FIELD(r10), sc->SC_FIELD(r11),
sc->SC_FIELD(r12), sc->SC_FIELD(r13),
sc->SC_FIELD(r14), sc->SC_FIELD(r15),
# endif /* X86 */
#elif defined(ARM)
# ifndef X64
sc->SC_FIELD(arm_r0), sc->SC_FIELD(arm_r1),
sc->SC_FIELD(arm_r2), sc->SC_FIELD(arm_r3),
sc->SC_FIELD(arm_r4), sc->SC_FIELD(arm_r5),
sc->SC_FIELD(arm_r6), sc->SC_FIELD(arm_r7),
sc->SC_FIELD(arm_r8), sc->SC_FIELD(arm_r9),
sc->SC_FIELD(arm_r10), sc->SC_FIELD(arm_fp),
sc->SC_FIELD(arm_ip), sc->SC_FIELD(arm_sp),
sc->SC_FIELD(arm_lr), sc->SC_FIELD(arm_pc),
# else
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
sc->SC_XFLAGS);
#if defined(STATIC_LIBRARY) && defined(LINUX)
/* i#2119: For static DR, the surrounding app's handler may well be
* safe to invoke even when DR state is messed up: it's worth a try, as it
* likely has useful reporting features for users of the app.
* We limit to Linux and RT for simplicity: it can be expanded later if static
* library use expands.
*/
if (INTERNAL_OPTION(invoke_app_on_crash) &&
info->app_sigaction[sig] != NULL && IS_RT_FOR_APP(info, sig)) {
SYSLOG(SYSLOG_WARNING, INVOKING_APP_HANDLER, 2,
get_application_name(), get_application_pid());
(*info->app_sigaction[sig]->handler)(sig, &frame->info, ucxt);
/* If the app handler didn't terminate, now get a fatal core. */
if (TEST(orig_dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) &&
!DYNAMO_OPTION(live_dump))
os_dump_core("post-app-handler attempt at core dump");
}
#endif
os_terminate(dcontext, TERMINATE_PROCESS);
ASSERT_NOT_REACHED();
}
static void
abort_on_DR_fault(dcontext_t *dcontext, app_pc pc, byte *target, int sig,
sigframe_rt_t *frame, const char *signame, const char *where)
{
abort_on_fault(dcontext, DUMPCORE_INTERNAL_EXCEPTION, pc, target, sig, frame,
exception_label_core, signame, where);
ASSERT_NOT_REACHED();
}
/* Returns whether unlinked or mangled syscall.
* Restored in receive_pending_signal.
*/
static bool
unlink_fragment_for_signal(dcontext_t *dcontext, fragment_t *f,
byte *pc/*interruption pc*/)
{
/* We only come here if we interrupted a fragment in the cache,
* or interrupted transition gencode (i#2019),
* which means that this thread's DR state is safe, and so it
* should be ok to acquire a lock. xref PR 596069.
*
* There is a race where if two threads hit a signal in the same
* shared fragment, the first could re-link after the second
* un-links but before the second exits, and the second could then
* execute the syscall, resulting in arbitrary delay prior to
* signal delivery. We don't want to allocate global memory,
* but we could use a static array of counters (since should
* be small # of interrupted shared fragments at any one time)
* used as refcounts so we only unlink when all are done.
* Not bothering to implement now: going to live w/ chance of
* long signal delays. xref PR 596069.
*/
bool changed = false;
bool waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
/* may not be linked if trace_relink or something */
if (TEST(FRAG_COARSE_GRAIN, f->flags)) {
/* XXX PR 213040: we don't support unlinking coarse, so we try
* not to come here, but for indirect branch and other spots
* where we don't yet support translation (since can't fault)
* we end up w/ no bound on delivery...
*/
} else if (TEST(FRAG_LINKED_OUTGOING, f->flags)) {
LOG(THREAD, LOG_ASYNCH, 3,
"\tunlinking outgoing for interrupted F%d\n", f->id);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock);
unlink_fragment_outgoing(dcontext, f);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
changed = true;
} else {
LOG(THREAD, LOG_ASYNCH, 3,
"\toutgoing already unlinked for interrupted F%d\n", f->id);
}
if (TEST(FRAG_HAS_SYSCALL, f->flags)) {
/* Syscalls are signal barriers!
* Make sure the next syscall (if any) in f is not executed!
* instead go back to dispatch right before the syscall
*/
/* syscall mangling does a bunch of decodes but only one write,
* changing the target of a short jmp, which is atomic
* since a one-byte write, so we don't need the change_linking_lock.
*/
if (mangle_syscall_code(dcontext, f, pc, false/*do not skip exit cti*/))
changed = true;
}
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return changed;
}
static bool
interrupted_inlined_syscall(dcontext_t *dcontext, fragment_t *f,
byte *pc/*interruption pc*/)
{
bool pre_or_post_syscall = false;
if (TEST(FRAG_HAS_SYSCALL, f->flags)) {
/* PR 596147: if the thread is currently in an inlined
* syscall when a signal comes in, we can't delay and bound the
* delivery time: we need to deliver now. Should decode
* backward and see if syscall. We assume our translation of
* the interruption state is fine to re-start: i.e., the syscall
* is complete if kernel has pc at post-syscall point, and
* kernel set EINTR in eax if necessary.
*/
/* Interrupted fcache, so ok to alloc memory for decode */
instr_t instr;
byte *nxt_pc;
instr_init(dcontext, &instr);
nxt_pc = decode(dcontext, pc, &instr);
if (nxt_pc != NULL && instr_valid(&instr) &&
instr_is_syscall(&instr)) {
/* pre-syscall but post-jmp so can't skip syscall */
pre_or_post_syscall = true;
} else {
size_t syslen = syscall_instr_length(FRAG_ISA_MODE(f->flags));
instr_reset(dcontext, &instr);
nxt_pc = decode(dcontext, pc - syslen, &instr);
if (nxt_pc != NULL && instr_valid(&instr) &&
instr_is_syscall(&instr)) {
#if defined(X86) && !defined(MACOS)
/* decoding backward so check for exit cti jmp prior
* to syscall to ensure no mismatch
*/
instr_reset(dcontext, &instr);
nxt_pc = decode(dcontext, pc - syslen - JMP_LONG_LENGTH, &instr);
if (nxt_pc != NULL && instr_valid(&instr) &&
instr_get_opcode(&instr) == OP_jmp) {
/* post-inlined-syscall */
pre_or_post_syscall = true;
}
#else
/* On Mac and ARM we have some TLS spills in between so we just
* trust that this is a syscall (esp on ARM w/ aligned instrs).
*/
pre_or_post_syscall = true;
#endif
}
}
instr_free(dcontext, &instr);
}
return pre_or_post_syscall;
}
/* i#1145: auto-restart syscalls interrupted by signals */
static bool
adjust_syscall_for_restart(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
sigcontext_t *sc, fragment_t *f, reg_t orig_retval_reg)
{
byte *pc = (byte *) sc->SC_XIP;
int sys_inst_len;
if (sc->IF_X86_ELSE(SC_XAX, SC_R0) != -EINTR) {
/* The syscall succeeded, so no reason to interrupt.
* Some syscalls succeed on a signal coming in.
* E.g., SYS_wait4 on SIGCHLD, or reading from a slow device.
* XXX: Now that we pass SA_RESTART we should never get here?
*/
return false;
}
/* Don't restart if the app's handler says not to */
if (info->app_sigaction[sig] != NULL &&
!TEST(SA_RESTART, info->app_sigaction[sig]->flags)) {
return false;
}
/* XXX i#1145: some syscalls are never restarted when interrupted by a signal.
* We check those that are simple to distinguish below, but not all are. We have
* this under an option so it can be disabled if necessary.
*/
if (!DYNAMO_OPTION(restart_syscalls))
return false;
/* Now that we use SA_RESTART we rely on that and ignore our own
* inaccurate check sysnum_is_not_restartable(sysnum).
* SA_RESTART also means we can just be passed in the register value to restore.
*/
LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xax/r0 to %ld\n", __FUNCTION__,
orig_retval_reg);
#ifdef X86
sc->SC_XAX = orig_retval_reg;
#elif defined(AARCHXX)
sc->SC_R0 = orig_retval_reg;
#else
# error NYI
#endif
/* Now adjust the pc to point at the syscall instruction instead of after it,
* so when we resume we'll go back to the syscall.
* Adjusting solves transparency as well: natively the kernel adjusts
* the pc before setting up the signal frame.
* We don't pass in the post-syscall pc provided by the kernel because
* we want the app pc, not the raw pc.
*/
dr_isa_mode_t isa_mode;
if (is_after_syscall_address(dcontext, pc) ||
pc == vsyscall_sysenter_return_pc) {
isa_mode = dr_get_isa_mode(dcontext);
} else {
/* We're going to walk back in the fragment, not gencode */
ASSERT(f != NULL);
isa_mode = FRAG_ISA_MODE(f->flags);
}
sys_inst_len = syscall_instr_length(isa_mode);
if (pc == vsyscall_sysenter_return_pc) {
#ifdef X86
sc->SC_XIP = (ptr_uint_t) (vsyscall_syscall_end_pc - sys_inst_len);
/* To restart sysenter we must re-copy xsp into xbp, as xbp is
* clobbered by the kernel.
* XXX: The kernel points at the int 0x80 in vsyscall on a restart
* and so doesn't have to do this: should we do that too? If so we'll
* have to avoid interpreting our own hook which is right after the
* int 0x80.
*/
sc->SC_XBP = sc->SC_XSP;
#else
ASSERT_NOT_REACHED();
#endif
} else if (is_after_syscall_address(dcontext, pc)) {
/* We're at do_syscall: point at app syscall instr. We want an app
* address b/c this signal will be delayed and the delivery will use
* a direct app context: no translation from the cache.
* The caller sets info->sigpending[sig]->use_sigcontext for us.
*/
sc->SC_XIP = (ptr_uint_t) (dcontext->asynch_target - sys_inst_len);
DODEBUG({
instr_t instr;
dr_isa_mode_t old_mode;
dr_set_isa_mode(dcontext, isa_mode, &old_mode);
instr_init(dcontext, &instr);
ASSERT(decode(dcontext, (app_pc) sc->SC_XIP, &instr) != NULL &&
instr_is_syscall(&instr));
instr_free(dcontext, &instr);
dr_set_isa_mode(dcontext, old_mode, NULL);
});
} else {
ASSERT_NOT_REACHED(); /* Inlined syscalls no longer come here. */
}
LOG(THREAD, LOG_ASYNCH, 2, "%s: sigreturn pc is now "PFX"\n", __FUNCTION__,
sc->SC_XIP);
return true;
}
static void
record_pending_signal(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt,
sigframe_rt_t *frame, bool forged
_IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
/* XXX #1615: we need a full ucontext to store pre-xl8 simd values */
sigcontext_t sc_orig;
byte *pc = (byte *) sc->SC_XIP;
byte *xsp = (byte*) sc->SC_XSP;
bool receive_now = false;
bool blocked = false;
bool handled = false;
bool at_auto_restart_syscall = false;
int syslen = 0;
reg_t orig_retval_reg = sc->IF_X86_ELSE(SC_XAX, SC_R0);
sigpending_t *pend;
fragment_t *f = NULL;
fragment_t wrapper;
/* We no longer block SUSPEND_SIGNAL (i#184/PR 450670) or SIGSEGV (i#193/PR 287309).
* But we can have re-entrancy issues in this routine if the app uses the same
* SUSPEND_SIGNAL, or the nested SIGSEGV needs to be sent to the app. The
* latter shouldn't happen unless the app sends SIGSEGV via SYS_kill().
*/
if (ostd->processing_signal > 0 ||
/* If we interrupted receive_pending_signal() we can't prepend a new
* pending or delete an old b/c we might mess up the state so we
* just drop this one: should only happen for alarm signal
*/
(info->accessing_sigpending &&
/* we do want to report a crash in receive_pending_signal() */
(can_always_delay[sig] ||
is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, &frame->info)))) {
LOG(THREAD, LOG_ASYNCH, 1, "nested signal %d\n", sig);
ASSERT(ostd->processing_signal == 0 || sig == SUSPEND_SIGNAL || sig == SIGSEGV);
ASSERT(can_always_delay[sig] ||
is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, &frame->info));
/* To avoid re-entrant execution of special_heap_alloc() and of
* prepending to the pending list we just drop this signal.
* FIXME i#194/PR 453996: do better.
*/
STATS_INC(num_signals_dropped);
SYSLOG_INTERNAL_WARNING_ONCE("dropping nested signal");
return;
}
ostd->processing_signal++; /* no need for atomicity: thread-private */
/* First, check whether blocked, before we restore for sigsuspend (i#1340). */
if (kernel_sigismember(&info->app_sigblocked, sig))
blocked = true;
if (info->in_sigsuspend) {
/* sigsuspend ends when a signal is received, so restore the
* old blocked set
*/
info->app_sigblocked = info->app_sigblocked_save;
info->in_sigsuspend = false;
/* update the set to restore to post-signal-delivery */
#ifdef MACOS
ucxt->uc_sigmask = *(__darwin_sigset_t *) &info->app_sigblocked;
#else
ucxt->uc_sigmask = info->app_sigblocked;
#endif
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "after sigsuspend, blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
if (get_at_syscall(dcontext))
syslen = syscall_instr_length(dr_get_isa_mode(dcontext));
if (info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler == (handler_t)SIG_IGN
/* If a client registered a handler, put this in the queue.
* Races between registering, queueing, and delivering are fine.
*/
IF_CLIENT_INTERFACE(&& !dr_signal_hook_exists())) {
LOG(THREAD, LOG_ASYNCH, 3,
"record_pending_signal (%d at pc "PFX"): action is SIG_IGN!\n",
sig, pc);
ostd->processing_signal--;
return;
} else if (blocked) {
/* signal is blocked by app, so just record it, don't receive now */
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d at pc "PFX"): signal is currently blocked\n",
sig, pc);
IF_LINUX(handled = notify_signalfd(dcontext, info, sig, frame));
} else if (safe_is_in_fcache(dcontext, pc, xsp)) {
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from cache pc "PFX"\n", sig, pc);
if (forged || can_always_delay[sig]) {
/* to make translation easier, want to delay if can until dispatch
* unlink cur frag, wait for dispatch
*/
/* check for coarse first to avoid cost of coarse pclookup */
if (get_fcache_coarse_info(pc) != NULL) {
/* PR 213040: we can't unlink coarse. If we fail to translate
* we'll switch back to delaying, below.
*/
if (sig_is_alarm_signal(sig) &&
info->sigpending[sig] != NULL &&
info->sigpending[sig]->next != NULL &&
info->skip_alarm_xl8 > 0) {
/* Translating coarse fragments is very expensive so we
* avoid doing it when we're having trouble keeping up w/
* the alarm frequency (PR 213040), but we make sure we try
* every once in a while to avoid unbounded signal delay
*/
info->skip_alarm_xl8--;
STATS_INC(num_signals_coarse_delayed);
} else {
if (sig_is_alarm_signal(sig))
info->skip_alarm_xl8 = SKIP_ALARM_XL8_MAX;
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2,
"signal interrupted coarse fragment so delivering now\n");
}
} else {
f = fragment_pclookup(dcontext, pc, &wrapper);
ASSERT(f != NULL);
ASSERT(!TEST(FRAG_COARSE_GRAIN, f->flags)); /* checked above */
LOG(THREAD, LOG_ASYNCH, 2, "\tdelaying until exit F%d\n", f->id);
if (interrupted_inlined_syscall(dcontext, f, pc)) {
/* PR 596147: if delayable signal arrives after syscall-skipping
* jmp, either at syscall or post-syscall, we deliver
* immediately, since we can't bound the delay
*/
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2,
"signal interrupted pre/post syscall itself so delivering now\n");
/* We don't set at_auto_restart_syscall because we just leave
* the SA_RESTART kernel-supplied resumption point: with no
* post-syscall handler to worry about we have no need to
* change anything.
*/
} else {
/* could get another signal but should be in same fragment */
ASSERT(info->interrupted == NULL || info->interrupted == f);
if (unlink_fragment_for_signal(dcontext, f, pc)) {
info->interrupted = f;
info->interrupted_pc = pc;
} else {
/* either was unlinked for trace creation, or we got another
* signal before exiting cache to handle 1st
*/
ASSERT(info->interrupted == NULL ||
info->interrupted == f);
}
}
}
} else {
/* the signal interrupted code cache => run handler now! */
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2, "\tnot certain can delay so handling now\n");
}
} else if (in_generated_routine(dcontext, pc) ||
/* XXX: should also check fine stubs */
safe_is_in_coarse_stubs(dcontext, pc, xsp)) {
/* Assumption: dynamo errors have been caught already inside
* the master_signal_handler, thus any error in a generated routine
* is an asynch signal that can be delayed
*/
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from gen routine or stub "PFX"\n", sig, pc);
if (get_at_syscall(dcontext)) {
/* i#1206: the syscall was interrupted, so we can go back to dispatch
* and don't need to receive it now (which complicates post-syscall handling)
* w/o any extra delay.
*/
/* i#2659: we now use SA_RESTART to handle interrupting native
* auto-restart syscalls. That means we have to adjust do_syscall
* interruption to give us control so we can deliver the signal. Due to
* needing to run post-syscall handlers (we don't want to get into nested
* dcontexts like on Windows) it's simplest to go back to dispatch, which
* is most easily done by emulating the non-SA_RESTART behavior.
* XXX: This all seems backward: we should revisit this model and see if
* we can get rid of this emulation and the auto-restart emulation.
*/
/* The get_at_syscall() check above distinguishes from just having
* arrived at the syscall instr.
*/
if (is_after_syscall_address(dcontext, pc + syslen)) {
LOG(THREAD, LOG_ASYNCH, 2,
"Adjusting interrupted auto-restart syscall from "PFX" to "PFX"\n",
pc, pc + syslen);
at_auto_restart_syscall = true;
sc->SC_XIP += syslen;
sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR;
pc = (byte *) sc->SC_XIP;
}
}
/* This could come from another thread's SYS_kill (via our gen do_syscall) */
DOLOG(1, LOG_ASYNCH, {
if (!is_after_syscall_address(dcontext, pc) &&
!forged && !can_always_delay[sig]) {
LOG(THREAD, LOG_ASYNCH, 1,
"WARNING: signal %d in gen routine: may cause problems!\n", sig);
}
});
/* i#2019: for a signal arriving in gencode before entry to a fragment,
* we need to unlink the fragment just like for a signal arriving inside
* the fragment itself.
* Multiple signals should all have the same asynch_target so we should
* only need a single info->interrupted.
*/
if (info->interrupted == NULL && !get_at_syscall(dcontext)) {
/* Try to find the target if the signal arrived in the IBL.
* We could try to be a lot more precise by hardcoding the IBL
* sequence here but that would make the code less maintainable.
* Instead we try the registers that hold the target app address.
*
* FIXME i#2042: we'll still fail if the signal arrives at the
* actual jmp* in the hit path b/c the reg holding the target is
* restored on the prior instr.
*
* XXX: better to get this code inside arch/ but we'd have to
* convert to an mcontext which seems overkill.
*/
#ifdef AARCHXX
/* The target is in r2 the whole time, w/ or w/o Thumb LSB. */
if (sc->SC_R2 != 0)
f = fragment_lookup(dcontext, ENTRY_PC_TO_DECODE_PC(sc->SC_R2));
#elif defined(X86)
/* The target is initially in xcx but is then copied to xbx. */
if (sc->SC_XBX != 0)
f = fragment_lookup(dcontext, (app_pc)sc->SC_XBX);
if (f == NULL && sc->SC_XCX != 0)
f = fragment_lookup(dcontext, (app_pc)sc->SC_XCX);
#else
# error Unsupported arch.
#endif
/* If in fcache_enter, we stored the next_tag in asynch_target in dispatch. */
if (f == NULL && dcontext->asynch_target != NULL)
f = fragment_lookup(dcontext, dcontext->asynch_target);
if (f != NULL && !TEST(FRAG_COARSE_GRAIN, f->flags)) {
if (unlink_fragment_for_signal(dcontext, f, FCACHE_ENTRY_PC(f))) {
info->interrupted = f;
info->interrupted_pc = FCACHE_ENTRY_PC(f);
}
}
}
} else if (get_at_syscall(dcontext) && pc == vsyscall_sysenter_return_pc - syslen) {
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from restart-vsyscall "PFX"\n", sig, pc);
/* While the kernel points at int 0x80 for a restart, we leverage our
* existing sysenter restart mechanism.
*/
at_auto_restart_syscall = true;
sc->SC_XIP = (reg_t) vsyscall_sysenter_return_pc;
sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR;
pc = (byte *) sc->SC_XIP;
} else if (pc == vsyscall_sysenter_return_pc) {
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from vsyscall "PFX"\n", sig, pc);
/* i#1206: the syscall was interrupted but is not auto-restart, so we can go
* back to dispatch and don't need to receive it now (which complicates
* post-syscall handling)
*/
} else if (thread_synch_check_state(dcontext, THREAD_SYNCH_NO_LOCKS)) {
/* The signal interrupted DR or the client but it's at a safe spot so
* deliver it now.
*/
receive_now = true;
} else {
/* the signal interrupted DR itself => do not run handler now! */
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from DR at pc "PFX"\n", sig, pc);
if (!forged &&
!can_always_delay[sig] &&
!is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, &frame->info)) {
/* i#195/PR 453964: don't re-execute if will just re-fault.
* Our checks for dstack, etc. in master_signal_handler should
* have accounted for everything
*/
ASSERT_NOT_REACHED();
abort_on_DR_fault(dcontext, pc, NULL, sig, frame,
(sig == SIGSEGV) ? "SEGV" : "other", " unknown");
}
}
LOG(THREAD, LOG_ASYNCH, 3, "\taction is not SIG_IGN\n");
#if defined(X86) && defined(LINUX)
LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = "PFX"\n",
frame->pretcode); /* pretcode has same offs for plain */
#endif
if (receive_now) {
/* we need to translate sc before we know whether client wants to
* suppress, so we need a backup copy
*/
bool xl8_success;
ASSERT(!at_auto_restart_syscall); /* only used for delayed delivery */
sc_orig = *sc;
ASSERT(!forged);
/* cache the fragment since pclookup is expensive for coarse (i#658) */
f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper);
xl8_success = translate_sigcontext(dcontext, ucxt, !can_always_delay[sig], f);
if (can_always_delay[sig] && !xl8_success) {
/* delay: we expect this for coarse fragments if alarm arrives
* in middle of ind branch region or sthg (PR 213040)
*/
LOG(THREAD, LOG_ASYNCH, 2,
"signal is in un-translatable spot in coarse fragment: delaying\n");
receive_now = false;
}
}
if (receive_now) {
/* N.B.: since we abandon the old context for synchronous signals,
* we do not need to mark this fragment as FRAG_CANNOT_DELETE
*/
#ifdef DEBUG
if (stats->loglevel >= 2 && (stats->logmask & LOG_ASYNCH) != 0 &&
safe_is_in_fcache(dcontext, pc, xsp)) {
ASSERT(f != NULL);
LOG(THREAD, LOG_ASYNCH, 2,
"Got signal at pc "PFX" in this fragment:\n", pc);
disassemble_fragment(dcontext, f, false);
}
#endif
LOG(THREAD, LOG_ASYNCH, 2, "Going to receive signal now\n");
/* If we end up executing the default action, we'll go native
* since we translated the context. If there's a handler,
* we'll copy the context to the app stack and then adjust the
* original on our stack so we take over.
*/
execute_handler_from_cache(dcontext, sig, frame, &sc_orig, f
_IF_CLIENT(access_address));
} else if (!handled) {
#ifdef CLIENT_INTERFACE
/* i#182/PR 449996: must let client act on blocked non-delayable signals to
* handle instrumentation faults. Make sure we're at a safe spot: i.e.,
* only raise for in-cache faults. Checking forged and no-delay
* to avoid the in-cache check for delayable signals => safer.
*/
if (blocked && !forged && !can_always_delay[sig] &&
safe_is_in_fcache(dcontext, pc, xsp)) {
dr_signal_action_t action;
/* cache the fragment since pclookup is expensive for coarse (i#658) */
f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper);
sc_orig = *sc;
translate_sigcontext(dcontext, ucxt, true/*shouldn't fail*/, f);
/* make a copy before send_signal_to_client() tweaks it */
sigcontext_t sc_interrupted = *sc;
action = send_signal_to_client(dcontext, sig, frame, &sc_orig,
access_address, true/*blocked*/, f);
/* For blocked signal early event we disallow BYPASS (xref i#182/PR 449996) */
CLIENT_ASSERT(action != DR_SIGNAL_BYPASS,
"cannot bypass a blocked signal event");
if (!handle_client_action_from_cache(dcontext, sig, action, frame,
&sc_orig, &sc_interrupted,
true/*blocked*/)) {
ostd->processing_signal--;
return;
}
/* restore original (untranslated) sc */
*get_sigcontext_from_rt_frame(frame) = sc_orig;
}
#endif
/* i#196/PR 453847: avoid infinite loop of signals if try to re-execute */
if (blocked && !forged && !can_always_delay[sig] &&
!is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, &frame->info)) {
ASSERT(default_action[sig] == DEFAULT_TERMINATE ||
default_action[sig] == DEFAULT_TERMINATE_CORE);
LOG(THREAD, LOG_ASYNCH, 1,
"blocked fatal signal %d cannot be delayed: terminating\n", sig);
sc_orig = *sc;
translate_sigcontext(dcontext, ucxt, true/*shouldn't fail*/, NULL);
/* the process should be terminated */
execute_default_from_cache(dcontext, sig, frame, &sc_orig);
ASSERT_NOT_REACHED();
}
/* Happened in DR, do not translate context. Record for later processing
* at a safe point with a clean app state.
*/
if (!blocked || sig >= OFFS_RT ||
(blocked && info->sigpending[sig] == NULL)) {
/* only have 1 pending for blocked non-rt signals */
/* to avoid accumulating signals if we're slow in presence of
* a high-rate itimer we only keep 2 alarm signals (PR 596768)
*/
if (sig_is_alarm_signal(sig)) {
if (info->sigpending[sig] != NULL &&
info->sigpending[sig]->next != NULL) {
ASSERT(info->sigpending[sig]->next->next == NULL);
/* keep the oldest, replace newer w/ brand-new one, for
* more spread-out alarms
*/
sigpending_t *temp = info->sigpending[sig];
info->sigpending[sig] = temp->next;
special_heap_free(info->sigheap, temp);
info->num_pending--;
LOG(THREAD, LOG_ASYNCH, 2,
"3rd pending alarm %d => dropping 2nd\n", sig);
STATS_INC(num_signals_dropped);
SYSLOG_INTERNAL_WARNING_ONCE("dropping 3rd pending alarm signal");
}
}
/* special heap alloc always uses sizeof(sigpending_t) blocks */
pend = special_heap_alloc(info->sigheap);
ASSERT(sig > 0 && sig <= MAX_SIGNUM);
info->num_pending++;
if (info->num_pending > DYNAMO_OPTION(max_pending_signals) &&
!info->multiple_pending_units)
info->multiple_pending_units = true;
if (info->num_pending >= DYNAMO_OPTION(max_pending_signals)) {
/* We're at the limit of our special heap: one more and it will try to
* allocate a new unit, which is unsafe as it acquires locks. We take
* several steps: we notify the user; we check for this on delivery as
* well and proactively allocate a new unit in a safer context.
* XXX: Perhaps we should drop some signals here?
*/
DO_ONCE({
char max_string[32];
snprintf(max_string, BUFFER_SIZE_ELEMENTS(max_string), "%d",
DYNAMO_OPTION(max_pending_signals));
NULL_TERMINATE_BUFFER(max_string);
SYSLOG(SYSLOG_WARNING, MAX_PENDING_SIGNALS, 3,
get_application_name(), get_application_pid(), max_string);
});
}
pend->next = info->sigpending[sig];
info->sigpending[sig] = pend;
pend->unblocked = !blocked;
/* FIXME: note that for asynchronous signals we don't need to
* bother to record exact machine context, even entire frame,
* since don't want to pass dynamo pc context to app handler.
* only copy frame for synchronous signals? those only
* happen while in cache? but for asynch, we would have to
* construct our own frame...kind of a pain.
*/
copy_frame_to_pending(dcontext, sig, frame _IF_CLIENT(access_address));
/* i#1145: check whether we should auto-restart an interrupted syscall */
if (at_auto_restart_syscall) {
/* Adjust the pending frame to restart the syscall, if applicable */
sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame);
sigcontext_t *sc_pend = get_sigcontext_from_rt_frame(frame);
if (adjust_syscall_for_restart(dcontext, info, sig, sc_pend, f,
orig_retval_reg)) {
/* We're going to re-start this syscall after we go
* back to dispatch, run the post-syscall handler (for -EINTR),
* and deliver the signal. We've adjusted the sigcontext
* for re-start on the sigreturn, but we need to tell
* execute_handler_from_dispatch() to use our sigcontext
* and not the mcontext.
* A client will see a second set of pre + post handlers for
* the restart, which seems reasonable, given the signal in
* between.
*/
info->sigpending[sig]->use_sigcontext = true;
}
}
} else {
/* For clients, we document that we do not pass to them
* unless we're prepared to deliver to app. We would have
* to change our model to pass them non-final-translated
* contexts for delayable signals in order to give them
* signals as soon as they come in. Xref i#182/PR 449996.
*/
LOG(THREAD, LOG_ASYNCH, 3,
"\tnon-rt signal already in queue, ignoring this one!\n");
}
if (!blocked && !dcontext->signals_pending)
dcontext->signals_pending = 1;
}
ostd->processing_signal--;
}
/* Distinguish SYS_kill-generated from instruction-generated signals.
* If sent from another process we can't tell, but if sent from this
* thread the interruption point should be our own post-syscall.
* FIXME PR 368277: for other threads in same process we should set a flag
* and identify them as well.
* FIXME: for faults like SIGILL we could examine the interrupted pc
* to see whether it is capable of generating such a fault (see code
* used in handle_nudge_signal()).
*/
static bool
is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, siginfo_t *info)
{
#ifndef VMX86_SERVER /* does not properly set si_code */
/* i#133: use si_code to distinguish user-sent signals.
* Even 2.2 Linux kernel supports <=0 meaning user-sent (except
* SIGIO) so we assume we can rely on it.
*/
if (info->si_code <= 0)
return true;
#endif
return (is_at_do_syscall(dcontext, pc, xsp) &&
(dcontext->sys_num == SYS_kill ||
#ifdef LINUX
dcontext->sys_num == SYS_tkill ||
dcontext->sys_num == SYS_tgkill ||
dcontext->sys_num == SYS_rt_sigqueueinfo
#elif defined (MACOS)
dcontext->sys_num == SYS___pthread_kill
#endif
));
}
static byte *
compute_memory_target(dcontext_t *dcontext, cache_pc instr_cache_pc,
kernel_ucontext_t *uc, siginfo_t *si, bool *write)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
byte *target = NULL;
instr_t instr;
priv_mcontext_t mc;
uint memopidx, memoppos, memopsize;
opnd_t memop;
bool found_target = false;
bool in_maps;
bool use_allmem = false;
uint prot;
IF_ARM(dr_isa_mode_t old_mode;)
LOG(THREAD, LOG_ALL, 2,
"computing memory target for "PFX" causing SIGSEGV, kernel claims it is "PFX"\n",
instr_cache_pc, (byte*)si->si_addr);
/* ARM's sigcontext_t has a "fault_address" field but it also seems unreliable */
IF_ARM(LOG(THREAD, LOG_ALL, 2, "fault_address: "PFX"\n", sc->fault_address));
/* We used to do a memory query to check if instr_cache_pc is readable, but
* now we use TRY/EXCEPT because we don't have the instr length and the OS
* query is expensive. If decoding faults, the signal handler will longjmp
* out before it calls us recursively.
*/
instr_init(dcontext, &instr);
IF_ARM({
/* Be sure to use the interrupted mode and not the last-dispatch mode */
dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), &old_mode);
});
TRY_EXCEPT(dcontext, {
decode(dcontext, instr_cache_pc, &instr);
}, {
return NULL; /* instr_cache_pc was unreadable */
});
IF_ARM(dr_set_isa_mode(dcontext, old_mode, NULL));
if (!instr_valid(&instr)) {
LOG(THREAD, LOG_ALL, 2,
"WARNING: got SIGSEGV for invalid instr at cache pc "PFX"\n", instr_cache_pc);
ASSERT_NOT_REACHED();
instr_free(dcontext, &instr);
return NULL;
}
ucontext_to_mcontext(&mc, uc);
ASSERT(write != NULL);
/* i#1009: If si_addr is plausibly one of the memory operands of the
* faulting instruction, assume the target was si_addr. If none of the
* memops match, fall back to checking page protections, which can be racy.
* For si_addr == NULL, we fall back to the protection check because it's
* too likely to be a valid memop and we can live with a race on a page that
* is typically unmapped.
*/
if (si->si_code == SEGV_ACCERR && si->si_addr != NULL) {
for (memopidx = 0;
instr_compute_address_ex_priv(&instr, &mc, memopidx,
&target, write, &memoppos);
memopidx++) {
/* i#1045: check whether operand and si_addr overlap */
memop = *write ? instr_get_dst(&instr, memoppos) :
instr_get_src(&instr, memoppos);
memopsize = opnd_size_in_bytes(opnd_get_size(memop));
LOG(THREAD, LOG_ALL, 2,
"memory operand %u has address "PFX" and size %u\n",
memopidx, target, memopsize);
if ((byte*)si->si_addr >= target &&
(byte*)si->si_addr < target + memopsize) {
target = (byte*)si->si_addr;
found_target = true;
break;
}
}
}
/* For fcache faults, use all_memory_areas, which is faster but acquires
* locks. If it's possible we're in DR, go to the OS to avoid deadlock.
*/
if (DYNAMO_OPTION(use_all_memory_areas)) {
use_allmem = safe_is_in_fcache(dcontext, instr_cache_pc,
(byte *)sc->SC_XSP);
}
if (!found_target) {
if (si->si_addr != NULL) {
LOG(THREAD, LOG_ALL, 3,
"%s: falling back to racy protection checks\n", __FUNCTION__);
}
/* i#115/PR 394984: consider all memops */
for (memopidx = 0;
instr_compute_address_ex_priv(&instr, &mc, memopidx,
&target, write, NULL);
memopidx++) {
if (use_allmem) {
in_maps = get_memory_info(target, NULL, NULL, &prot);
} else {
in_maps = get_memory_info_from_os(target, NULL, NULL, &prot);
}
if ((!in_maps || !TEST(MEMPROT_READ, prot)) ||
(*write && !TEST(MEMPROT_WRITE, prot))) {
found_target = true;
break;
}
}
}
if (!found_target) {
/* probably an NX fault: how tell whether kernel enforcing? */
in_maps = get_memory_info_from_os(instr_cache_pc, NULL, NULL, &prot);
if (!in_maps || !TEST(MEMPROT_EXEC, prot)) {
target = instr_cache_pc;
found_target = true;
}
}
/* we may still not find target, e.g. for SYS_kill(SIGSEGV) */
if (!found_target)
target = NULL;
DOLOG(2, LOG_ALL, {
LOG(THREAD, LOG_ALL, 2,
"For SIGSEGV at cache pc "PFX", computed target %s "PFX"\n",
instr_cache_pc, *write ? "write" : "read", target);
loginst(dcontext, 2, &instr, "\tfaulting instr");
});
instr_free(dcontext, &instr);
return target;
}
/* If native_state is true, assumes the fault is not in the cache and thus
* does not need translation but rather should always be re-executed.
*/
static bool
check_for_modified_code(dcontext_t *dcontext, cache_pc instr_cache_pc,
kernel_ucontext_t *uc, byte *target, bool native_state)
{
/* special case: we expect a seg fault for executable regions
* that were writable and marked read-only by us.
* have to figure out the target address!
* unfortunately the OS doesn't tell us, nor whether it's a write.
* FIXME: if sent from SYS_kill(SIGSEGV), the pc will be post-syscall,
* and if that post-syscall instr is a write that could have faulted,
* how can we tell the difference?
*/
if (was_executable_area_writable(target)) {
/* translate instr_cache_pc to original app pc
* DO NOT use translate_sigcontext, don't want to change the
* signal frame or else we'll lose control when we try to
* return to signal pc!
*/
app_pc next_pc, translated_pc = NULL;
fragment_t *f = NULL;
fragment_t wrapper;
ASSERT((cache_pc)SIGCXT_FROM_UCXT(uc)->SC_XIP == instr_cache_pc);
if (!native_state) {
/* For safe recreation we need to either be couldbelinking or hold
* the initexit lock (to keep someone from flushing current
* fragment), the initexit lock is easier
*/
mutex_lock(&thread_initexit_lock);
/* cache the fragment since pclookup is expensive for coarse units (i#658) */
f = fragment_pclookup(dcontext, instr_cache_pc, &wrapper);
translated_pc = recreate_app_pc(dcontext, instr_cache_pc, f);
ASSERT(translated_pc != NULL);
mutex_unlock(&thread_initexit_lock);
}
next_pc =
handle_modified_code(dcontext, instr_cache_pc, translated_pc,
target, f);
if (!native_state) {
/* going to exit from middle of fragment (at the write) so will mess up
* trace building
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
}
if (next_pc == NULL) {
/* re-execute the write -- just have master_signal_handler return */
return true;
} else {
ASSERT(!native_state);
/* Do not resume execution in cache, go back to dispatch. */
transfer_from_sig_handler_to_fcache_return
(dcontext, uc, NULL, SIGSEGV, next_pc,
(linkstub_t *) get_selfmod_linkstub(), false);
/* now have master_signal_handler return */
return true;
}
}
return false;
}
#ifndef HAVE_SIGALTSTACK
/* The exact layout of this struct is relied on in master_signal_handler()
* in x86.asm.
*/
struct clone_and_swap_args {
byte *stack;
byte *tos;
};
/* Helper function for swapping handler to dstack */
bool
sig_should_swap_stack(struct clone_and_swap_args *args, kernel_ucontext_t *ucxt)
{
byte *cur_esp;
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return false;
GET_STACK_PTR(cur_esp);
if (!is_on_dstack(dcontext, cur_esp)) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
/* Pass back the proper args to clone_and_swap_stack: we want to
* copy to dstack from the tos at the signal interruption point.
*/
args->stack = dcontext->dstack;
/* leave room for fpstate */
args->stack -= signal_frame_extra_size(true);
args->stack = (byte *) ALIGN_BACKWARD(args->stack, XSTATE_ALIGNMENT);
args->tos = (byte *) sc->SC_XSP;
return true;
} else
return false;
}
#endif
/* Helper that takes over the current thread signaled via SUSPEND_SIGNAL. Kept
* separate mostly to keep the priv_mcontext_t allocation out of
* master_signal_handler_C.
* If it returns, it returns false, and the signal should be squashed.
*/
static bool
sig_take_over(kernel_ucontext_t *uc)
{
priv_mcontext_t mc;
ucontext_to_mcontext(&mc, uc);
/* We don't want our own blocked signals: we want the app's, stored in the frame. */
if (!os_thread_take_over(&mc, SIGMASK_FROM_UCXT(uc)))
return false;
ASSERT_NOT_REACHED(); /* shouldn't return */
return true; /* make compiler happy */
}
static bool
is_safe_read_ucxt(kernel_ucontext_t *ucxt)
{
app_pc pc = (app_pc) SIGCXT_FROM_UCXT(ucxt)->SC_XIP;
return is_safe_read_pc(pc);
}
/* the master signal handler
* WARNING: behavior varies with different versions of the kernel!
* sigaction support was only added with 2.2
*/
#ifndef X86_32
/* stub in x86.asm passes our xsp to us */
# ifdef MACOS
void
master_signal_handler_C(handler_t handler, int style, int sig, siginfo_t *info,
kernel_ucontext_t *ucxt, byte *xsp)
# else
void
master_signal_handler_C(int sig, siginfo_t *siginfo, kernel_ucontext_t *ucxt,
byte *xsp)
# endif
#else
/* On ia32, adding a parameter disturbs the frame we're trying to capture, so we
* add an intermediate frame and read the normal params off the stack directly.
*/
void
master_signal_handler_C(byte *xsp)
#endif
{
sigframe_rt_t *frame = (sigframe_rt_t *) xsp;
#ifdef X86_32
/* Read the normal arguments from the frame. */
int sig = frame->sig;
siginfo_t *siginfo = frame->pinfo;
kernel_ucontext_t *ucxt = frame->puc;
#endif /* !X64 */
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
thread_record_t *tr;
#ifdef DEBUG
uint level = 2;
# if !defined(HAVE_MEMINFO)
/* avoid logging every single TRY probe fault */
if (!dynamo_initialized)
level = 5;
# endif
#endif
bool local;
#if defined(MACOS) && !defined(X64)
/* The kernel clears fs, so we have to re-instate our selector, if
* it was set in the first place.
*/
if (sc->__ss.__fs != 0)
tls_reinstate_selector(sc->__ss.__fs);
#endif
#ifdef X86
/* i#2089: For is_thread_tls_initialized() we need a safe_read path that does not
* do any logging or call get_thread_private_dcontext() as those will recurse.
* This path is global so there's no SELF_PROTECT_LOCAL and we also bypass
* the ENTERING_DR() for this short path.
*/
if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_magic) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t) safe_read_tls_magic_recover;
return;
} else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_self) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t) safe_read_tls_self_recover;
return;
} else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_app_self) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t) safe_read_tls_app_self_recover;
return;
}
#endif
dcontext_t *dcontext = get_thread_private_dcontext();
#ifdef MACOS
# ifdef X64
ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64)) ||
(!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT64)));
# else
ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX32)) ||
(!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT)));
# endif
#endif
/* i#350: To support safe_read or TRY_EXCEPT without a dcontext, use the
* global dcontext
* when handling safe_read faults. This lets us pass the check for a
* dcontext below and causes us to use the global log.
*/
if (dcontext == NULL && (sig == SIGSEGV || sig == SIGBUS) &&
(is_safe_read_ucxt(ucxt) ||
(!dynamo_initialized && global_try_except.try_except_state != NULL))) {
dcontext = GLOBAL_DCONTEXT;
}
if (dynamo_exited && get_num_threads() > 1 && sig == SIGSEGV) {
/* PR 470957: this is almost certainly a race so just squelch it.
* We live w/ the risk that it was holding a lock our release-build
* exit code needs.
*/
exit_thread_syscall(1);
}
/* FIXME: ensure the path for recording a pending signal does not grab any DR locks
* that could have been interrupted
* e.g., synchronize_dynamic_options grabs the stats_lock!
*/
if (dcontext == NULL && sig == SUSPEND_SIGNAL) {
/* Check for a temporarily-native thread we're synch-ing with. */
tr = thread_lookup(get_sys_thread_id());
if (tr != NULL)
dcontext = tr->dcontext;
}
if (dcontext == NULL ||
(dcontext != GLOBAL_DCONTEXT &&
(dcontext->signal_field == NULL ||
!((thread_sig_info_t*)dcontext->signal_field)->fully_initialized))) {
/* FIXME: || !intercept_asynch, or maybe !under_our_control */
/* FIXME i#26: this could be a signal arbitrarily sent to this thread.
* We could try to route it to another thread, using a global queue
* of pending signals. But what if it was targeted to this thread
* via SYS_{tgkill,tkill}? Can we tell the difference, even if
* we watch the kill syscalls: could come from another process?
*/
if (sig_is_alarm_signal(sig)) {
/* assuming an alarm during thread exit or init (xref PR 596127,
* i#359): suppressing is fine
*/
} else if (sig == SUSPEND_SIGNAL && dcontext == NULL) {
/* We sent SUSPEND_SIGNAL to a thread we don't control (no
* dcontext), which means we want to take over.
*/
ASSERT(!doing_detach);
if (!sig_take_over(ucxt))
return;
ASSERT_NOT_REACHED(); /* else, shouldn't return */
} else {
/* Using global dcontext because dcontext is NULL here. */
DOLOG(1, LOG_ASYNCH, { dump_sigcontext(GLOBAL_DCONTEXT, sc); });
SYSLOG_INTERNAL_ERROR("ERROR: master_signal_handler with no siginfo "
"(i#26?): tid=%d, sig=%d", get_sys_thread_id(), sig);
}
/* see FIXME comments above.
* workaround for now: suppressing is better than dying.
*/
if (can_always_delay[sig])
return;
REPORT_FATAL_ERROR_AND_EXIT(dcontext, FAILED_TO_HANDLE_SIGNAL,
2, get_application_name(),
get_application_pid());
}
/* we may be entering dynamo from code cache! */
/* Note that this is unsafe if -single_thread_in_DR => we grab a lock =>
* hang if signal interrupts DR: but we don't really support that option
*/
ENTERING_DR();
if (dcontext == GLOBAL_DCONTEXT) {
local = false;
tr = thread_lookup(get_sys_thread_id());
} else {
tr = dcontext->thread_record;
local = local_heap_protected(dcontext);
if (local)
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
}
/* i#1921: For proper native execution with re-takeover we need to propagate
* signals to app handlers while native. For now we do not support re-takeover
* and we give up our handlers via signal_remove_handlers().
*/
ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext) ||
sig == SUSPEND_SIGNAL);
LOG(THREAD, LOG_ASYNCH, level, "\nmaster_signal_handler: sig=%d, retaddr="PFX"\n",
sig, *((byte **)xsp));
LOG(THREAD, LOG_ASYNCH, level+1,
"siginfo: sig = %d, pid = %d, status = %d, errno = %d, si_code = %d\n",
siginfo->si_signo, siginfo->si_pid, siginfo->si_status, siginfo->si_errno,
siginfo->si_code);
DOLOG(level+1, LOG_ASYNCH, { dump_sigcontext(dcontext, sc); });
#if defined(X86_32) && !defined(VMX86_SERVER) && defined(LINUX)
/* FIXME case 6700: 2.6.9 (FC3) kernel sets up our frame with a pretcode
* of 0x440. This happens if our restorer is unspecified (though 2.6.9
* src code shows setting the restorer to a default value in that case...)
* or if we explicitly point at dynamorio_sigreturn. I couldn't figure
* out why it kept putting 0x440 there. So we fix the issue w/ this
* hardcoded return.
* This hack causes vmkernel to kill the process on sigreturn due to
* vmkernel's non-standard sigreturn semantics. PR 404712.
*/
*((byte **)xsp) = (byte *) dynamorio_sigreturn;
#endif
/* N.B.:
* ucontext_t is defined in two different places. The one we get
* included is /usr/include/sys/ucontext.h, which would have us
* doing this:
* void *pc = (void *) ucxt->uc_mcontext.gregs[EIP];
* However, EIP is not defined for us (used to be in older
* RedHat version) unless we define __USE_GNU, which we don't want to do
* for other reasons, so we'd have to also say:
* #define EIP 14
* Instead we go by the ucontext_t definition in
* /usr/include/asm/ucontext.h, which has it containing a sigcontext struct,
* defined in /usr/include/asm/sigcontext.h. This is the definition used
* by the kernel. The two definitions are field-for-field
* identical except that the sys one has an fpstate struct at the end --
* but the next field in the frame is an fpstate. The only mystery
* is why the rt frame is declared as ucontext instead of sigcontext.
* The kernel's version of ucontext must be the asm one!
* And the sys one grabs the next field of the frame.
* Also note that mcontext_t.fpregs == sigcontext.fpstate is NULL if
* floating point operations have not been used (lazy fp state saving).
* Also, sigset_t has different sizes according to kernel (8 bytes) vs.
* glibc (128 bytes?).
*/
switch (sig) {
case SIGBUS: /* PR 313665: look for DR crashes on unaligned memory or mmap bounds */
case SIGSEGV: {
/* Older kernels do NOT fill out the signal-specific fields of siginfo,
* except for SIGCHLD. Thus we cannot do this:
* void *pc = (void*) siginfo->si_addr;
* Thus we must use the third argument, which is a ucontext_t (see above)
*/
void *pc = (void *) sc->SC_XIP;
bool syscall_signal = false; /* signal came from syscall? */
bool is_write = false;
byte *target;
bool is_DR_exception = false;
#ifdef SIDELINE
if (dcontext == NULL) {
SYSLOG_INTERNAL_ERROR("seg fault in sideline thread -- NULL dcontext!");
ASSERT_NOT_REACHED();
}
#endif
if (is_safe_read_ucxt(ucxt) ||
(!dynamo_initialized && global_try_except.try_except_state != NULL) ||
dcontext->try_except.try_except_state != NULL) {
/* handle our own TRY/EXCEPT */
try_except_context_t *try_cxt;
#ifdef HAVE_MEMINFO
/* our probe produces many of these every run */
/* since we use for safe_*, making a _ONCE */
SYSLOG_INTERNAL_WARNING_ONCE("(1+x) Handling our fault in a TRY at "PFX, pc);
#endif
LOG(THREAD, LOG_ALL, level, "TRY fault at "PFX"\n", pc);
if (TEST(DUMPCORE_TRY_EXCEPT, DYNAMO_OPTION(dumpcore_mask)))
os_dump_core("try/except fault");
if (is_safe_read_ucxt(ucxt)) {
sc->SC_XIP = (reg_t) safe_read_resume_pc();
/* Break out to log the normal return from the signal handler.
*/
break;
}
try_cxt = (dcontext != NULL) ? dcontext->try_except.try_except_state :
global_try_except.try_except_state;
ASSERT(try_cxt != NULL);
/* The exception interception code did an ENTER so we must EXIT here */
EXITING_DR();
/* Since we have no sigreturn we have to restore the mask
* manually, just like siglongjmp(). i#226/PR 492568: we rely
* on the kernel storing the prior mask in ucxt, so we do not
* need to store it on every setjmp.
*/
/* Verify that there's no scenario where the mask gets changed prior
* to a fault inside a try. This relies on dr_setjmp_sigmask() filling
* in the mask, which we only bother to do in debug build.
*/
ASSERT(memcmp(&try_cxt->context.sigmask,
&ucxt->uc_sigmask, sizeof(ucxt->uc_sigmask)) == 0);
sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), NULL,
sizeof(ucxt->uc_sigmask));
DR_LONGJMP(&try_cxt->context, LONGJMP_EXCEPTION);
ASSERT_NOT_REACHED();
}
target = compute_memory_target(dcontext, pc, ucxt, siginfo, &is_write);
#ifdef CLIENT_INTERFACE
if (CLIENTS_EXIST() && is_in_client_lib(pc)) {
/* i#1354: client might write to a page we made read-only.
* If so, handle the fault and re-execute it, if it's safe to do so
* (we document these criteria under DR_MEMPROT_PRETEND_WRITE).
*/
if (is_write && !is_couldbelinking(dcontext) &&
OWN_NO_LOCKS(dcontext) &&
check_for_modified_code(dcontext, pc, ucxt, target, true/*native*/))
break;
abort_on_fault(dcontext, DUMPCORE_CLIENT_EXCEPTION, pc, target, sig, frame,
exception_label_client, (sig == SIGSEGV) ? "SEGV" : "BUS",
" client library");
ASSERT_NOT_REACHED();
}
#endif
/* For !HAVE_MEMINFO, we cannot compute the target until
* after the try/except check b/c compute_memory_target()
* calls get_memory_info_from_os() which does a probe: and the
* try/except could be from a probe itself. A try/except that
* triggers a stack overflow should recover on the longjmp, so
* this order should be fine.
*/
/* FIXME: share code with Windows callback.c */
/* FIXME PR 205795: in_fcache and is_dynamo_address do grab locks! */
if ((is_on_dstack(dcontext, (byte *)sc->SC_XSP)
/* PR 302951: clean call arg processing => pass to app/client.
* Rather than call the risky in_fcache we check whereami. */
IF_CLIENT_INTERFACE(&& (dcontext->whereami != WHERE_FCACHE))) ||
is_on_alt_stack(dcontext, (byte *)sc->SC_XSP) ||
is_on_initstack((byte *)sc->SC_XSP)) {
/* Checks here need to cover everything that record_pending_signal()
* thinks is non-fcache, non-gencode: else that routine will kill
* process since can't delay or re-execute (i#195/PR 453964).
*/
is_DR_exception = true;
} else if (!safe_is_in_fcache(dcontext, pc, (byte*)sc->SC_XSP) &&
(in_generated_routine(dcontext, pc) ||
is_at_do_syscall(dcontext, pc, (byte*)sc->SC_XSP) ||
is_dynamo_address(pc))) {
#ifdef CLIENT_INTERFACE
if (!in_generated_routine(dcontext, pc) &&
!is_at_do_syscall(dcontext, pc, (byte*)sc->SC_XSP)) {
/* PR 451074: client needs a chance to handle exceptions in its
* own gencode. client_exception_event() won't return if client
* wants to re-execute faulting instr.
*/
sigcontext_t sc_interrupted = *get_sigcontext_from_rt_frame(frame);
dr_signal_action_t action =
send_signal_to_client(dcontext, sig, frame, sc,
target, false/*!blocked*/, NULL);
if (action != DR_SIGNAL_DELIVER && /* for delivery, continue below */
!handle_client_action_from_cache(dcontext, sig, action, frame,
sc, &sc_interrupted,
false/*!blocked*/)) {
/* client handled fault */
break;
}
}
#endif
is_DR_exception = true;
}
if (is_DR_exception) {
/* kill(getpid(), SIGSEGV) looks just like a SIGSEGV in the store of eax
* to mcontext after the syscall instr in do_syscall -- try to distinguish:
*/
if (is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, siginfo)) {
LOG(THREAD, LOG_ALL, 2,
"assuming SIGSEGV at post-do-syscall is kill, not our write fault\n");
syscall_signal = true;
}
if (!syscall_signal) {
if (check_in_last_thread_vm_area(dcontext, target)) {
/* See comments in callback.c as well.
* FIXME: try to share code
*/
SYSLOG_INTERNAL_WARNING("(decode) exception in last area, "
"DR pc="PFX", app pc="PFX, pc, target);
STATS_INC(num_exceptions_decode);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 2, "intercept_exception: "
"squashing old trace\n");
trace_abort(dcontext);
}
/* we do get faults when not building a bb: e.g.,
* ret_after_call_check does decoding (case 9396) */
if (dcontext->bb_build_info != NULL) {
/* must have been building a bb at the time */
bb_build_abort(dcontext, true/*clean vm area*/, true/*unlock*/);
}
/* Since we have no sigreturn we have to restore the mask manually */
unblock_all_signals(NULL);
/* Let's pass it back to the application - memory is unreadable */
if (TEST(DUMPCORE_FORGE_UNREAD_EXEC, DYNAMO_OPTION(dumpcore_mask)))
os_dump_core("Warning: Racy app execution (decode unreadable)");
os_forge_exception(target, UNREADABLE_MEMORY_EXECUTION_EXCEPTION);
ASSERT_NOT_REACHED();
} else {
abort_on_DR_fault(dcontext, pc, target, sig, frame,
(sig == SIGSEGV) ? "SEGV" : "BUS",
in_generated_routine(dcontext, pc) ?
" generated" : "");
}
}
}
/* if get here, pass the signal to the app */
ASSERT(pc != 0); /* shouldn't get here */
if (sig == SIGSEGV && !syscall_signal/*only for in-cache signals*/) {
/* special case: we expect a seg fault for executable regions
* that were writable and marked read-only by us.
*/
if (is_write &&
check_for_modified_code(dcontext, pc, ucxt, target, false/*!native*/)) {
/* it was our signal, so don't pass to app -- return now */
break;
}
}
/* pass it to the application (or client) */
LOG(THREAD, LOG_ALL, 1,
"** Received SIG%s at cache pc "PFX" in thread "TIDFMT"\n",
(sig == SIGSEGV) ? "SEGV" : "BUS", pc, get_thread_id());
ASSERT(syscall_signal || safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP));
/* we do not call trace_abort() here since we may need to
* translate from a temp private bb (i#376): but all paths
* that deliver the signal or redirect will call it
*/
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(target));
break;
}
/* PR 212090: the signal we use to suspend threads */
case SUSPEND_SIGNAL:
if (handle_suspend_signal(dcontext, ucxt, frame)) {
/* i#1921: see comment above */
ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext));
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
}
/* else, don't deliver to app */
break;
/* i#61/PR 211530: the signal we use for nudges */
case NUDGESIG_SIGNUM:
if (handle_nudge_signal(dcontext, siginfo, ucxt))
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
/* else, don't deliver to app */
break;
case SIGALRM:
case SIGVTALRM:
case SIGPROF:
if (handle_alarm(dcontext, sig, ucxt))
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
/* else, don't deliver to app */
break;
#ifdef SIDELINE
case SIGCHLD: {
int status = siginfo->si_status;
if (siginfo->si_pid == 0) {
/* FIXME: with older versions of linux the sigchld fields of
* siginfo are not filled in properly!
* This is my attempt to handle that, pid seems to be 0
*/
break;
}
if (status != 0) {
LOG(THREAD, LOG_ALL, 0, "*** Child thread died with error %d\n",
status);
ASSERT_NOT_REACHED();
}
break;
}
#endif
default: {
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
break;
}
} /* end switch */
LOG(THREAD, LOG_ASYNCH, level,
"\tmaster_signal_handler %d returning now to "PFX"\n\n", sig, sc->SC_XIP);
/* restore protections */
if (local)
SELF_PROTECT_LOCAL(dcontext, READONLY);
EXITING_DR();
}
static bool
execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame,
sigcontext_t *sc_orig, fragment_t *f
_IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
/* we want to modify the sc in DR's frame */
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
kernel_sigset_t blocked;
/* Need to get xsp now before get new dcontext.
* This is the translated xsp, so we avoid PR 306410 (cleancall arg fault
* on dstack => handler run on dstack) that Windows hit.
*/
byte *xsp = get_sigstack_frame_ptr(dcontext, sig,
our_frame/* take xsp from (translated)
* interruption point */);
#ifdef CLIENT_INTERFACE
sigcontext_t sc_interrupted = *sc;
dr_signal_action_t action =
send_signal_to_client(dcontext, sig, our_frame, sc_orig, access_address,
false/*not blocked*/, f);
if (!handle_client_action_from_cache(dcontext, sig, action, our_frame, sc_orig,
&sc_interrupted, false/*!blocked*/))
return false;
#else
if (info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n");
if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig)) {
/* if we haven't terminated, restore original (untranslated) sc
* on request.
* XXX i#1615: this doesn't restore SIMD regs, if client translated them!
*/
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
}
return false;
}
ASSERT(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler != (handler_t)SIG_IGN &&
info->app_sigaction[sig]->handler != (handler_t)SIG_DFL);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_cache for signal %d\n", sig);
RSTATS_INC(num_signals);
/* now that we know it's not a client-involved fault, dump as app fault */
report_app_problem(dcontext, APPFAULT_FAULT, (byte *)sc->SC_XIP, (byte *)sc->SC_FP,
"\nSignal %d delivered to application handler.\n", sig);
LOG(THREAD, LOG_ASYNCH, 3, "\txsp is "PFX"\n", xsp);
/* copy frame to appropriate stack and convert to non-rt if necessary */
copy_frame_to_stack(dcontext, sig, our_frame, (void *)xsp, false/*!pending*/);
LOG(THREAD, LOG_ASYNCH, 3, "\tcopied frame from "PFX" to "PFX"\n", our_frame, xsp);
sigcontext_t *app_sc = get_sigcontext_from_app_frame(info, sig, (void *)xsp);
/* Because of difficulties determining when/if a signal handler
* returns, we do what the kernel does: abandon all of our current
* state, copy what we might need to the handler frame if we come back,
* and then it's ok if the handler doesn't return.
* If it does, we start interpreting afresh when we see sigreturn().
* This routine assumes anything needed to return has been put in the
* frame (only needed for signals queued up while in dynamo), and goes
* ahead and trashes the current dcontext.
*/
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
/* add to set of blocked signals those in sigaction mask */
blocked = info->app_sigaction[sig]->mask;
/* SA_NOMASK says whether to block sig itself or not */
if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0)
kernel_sigaddset(&blocked, sig);
set_blocked(dcontext, &blocked, false/*relative: OR these in*/);
/* Doesn't matter what most app registers are, signal handler doesn't
* expect anything except the frame on the stack. We do need to set xsp,
* only because if app wants special signal stack we need to point xsp
* there. (If no special signal stack, this is a nop.)
*/
sc->SC_XSP = (ptr_uint_t) xsp;
/* Set up args to handler: int sig, siginfo_t *siginfo, kernel_ucontext_t *ucxt */
#ifdef X86_64
sc->SC_XDI = sig;
sc->SC_XSI = (reg_t) &((sigframe_rt_t *)xsp)->info;
sc->SC_XDX = (reg_t) &((sigframe_rt_t *)xsp)->uc;
#elif defined(AARCHXX)
sc->SC_R0 = sig;
if (IS_RT_FOR_APP(info, sig)) {
sc->SC_R1 = (reg_t) &((sigframe_rt_t *)xsp)->info;
sc->SC_R2 = (reg_t) &((sigframe_rt_t *)xsp)->uc;
}
if (sig_has_restorer(info, sig))
sc->SC_LR = (reg_t) info->app_sigaction[sig]->restorer;
else
sc->SC_LR = (reg_t) dynamorio_sigreturn;
# ifndef AARCH64
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
#endif
/* Set our sigreturn context (NOT for the app: we already copied the
* translated context to the app stack) to point to fcache_return!
* Then we'll go back through kernel, appear in fcache_return,
* and go through dispatch & interp, without messing up DR stack.
*/
transfer_from_sig_handler_to_fcache_return
(dcontext, uc, app_sc, sig,
/* Make sure handler is next thing we execute */
(app_pc) SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]),
(linkstub_t *) get_asynch_linkstub(), true);
if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
/* clear handler now -- can't delete memory since sigreturn,
* others may look at sigaction struct, so we just set to default
*/
info->app_sigaction[sig]->handler = (handler_t) SIG_DFL;
}
LOG(THREAD, LOG_ASYNCH, 3, "\tset next_tag to handler "PFX", xsp to "PFX"\n",
SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]), xsp);
return true;
}
static bool
execute_handler_from_dispatch(dcontext_t *dcontext, int sig)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
byte *xsp = get_sigstack_frame_ptr(dcontext, sig, NULL);
sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame);
priv_mcontext_t *mcontext = get_mcontext(dcontext);
sigcontext_t *sc;
kernel_ucontext_t *uc;
kernel_sigset_t blocked;
#ifdef CLIENT_INTERFACE
dr_signal_action_t action;
#else
if (info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n");
execute_default_from_dispatch(dcontext, sig, frame);
return true;
}
ASSERT(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler != (handler_t)SIG_IGN &&
info->app_sigaction[sig]->handler != (handler_t)SIG_DFL);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_dispatch for signal %d\n", sig);
RSTATS_INC(num_signals);
/* modify the rtframe before copying to stack so we can pass final
* version to client, and propagate its mods
*/
uc = get_ucontext_from_rt_frame(frame);
sc = SIGCXT_FROM_UCXT(uc);
/* Because of difficulties determining when/if a signal handler
* returns, we do what the kernel does: abandon all of our current
* state, copy what we might need to the handler frame if we come back,
* and then it's ok if the handler doesn't return.
* If it does, we start interpreting afresh when we see sigreturn().
*/
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "original sigcontext "PFX":\n", sc);
dump_sigcontext(dcontext, sc);
}
#endif
if (info->sigpending[sig]->use_sigcontext) {
LOG(THREAD, LOG_ASYNCH, 2,
"%s: using sigcontext, not mcontext (syscall restart)\n", __FUNCTION__);
} else {
/* copy currently-interrupted-context to frame's context, so we can
* abandon the currently-interrupted context.
*/
mcontext_to_ucontext(uc, mcontext);
}
/* Sigreturn needs the target ISA mode to be set in the T bit in cpsr.
* Since we came from dispatch, the post-signal target's mode is in dcontext.
*/
IF_ARM(set_pc_mode_in_cpsr(sc, dr_get_isa_mode(dcontext)));
/* mcontext does not contain fp or mmx or xmm state, which may have
* changed since the frame was created (while finishing up interrupted
* fragment prior to returning to dispatch). Since DR does not touch
* this state except for xmm on x64, we go ahead and copy the
* current state into the frame, and then touch up xmm for x64.
*/
/* FIXME: should this be done for all pending as soon as reach
* dispatch? what if get two asynch inside same frag prior to exiting
* cache? have issues with fpstate, but also prob with next_tag? FIXME
*/
/* FIXME: we should clear fpstate for app handler itself as that's
* how our own handler is executed.
*/
#if defined(LINUX) && defined(X86)
ASSERT(sc->fpstate != NULL); /* not doing i#641 yet */
save_fpstate(dcontext, frame);
#endif /* LINUX && X86 */
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "new sigcontext "PFX":\n", sc);
dump_sigcontext(dcontext, sc);
LOG(THREAD, LOG_ASYNCH, 3, "\n");
}
#endif
/* FIXME: other state? debug regs?
* if no syscall allowed between master_ (when frame created) and
* receiving, then don't have to worry about debug regs, etc.
* check for syscall when record pending, if it exists, try to
* receive in pre_system_call or something? what if ignorable? FIXME!
*/
if (!info->sigpending[sig]->use_sigcontext) {
/* for the pc we want the app pc not the cache pc */
sc->SC_XIP = (ptr_uint_t) dcontext->next_tag;
LOG(THREAD, LOG_ASYNCH, 3, "\tset frame's eip to "PFX"\n", sc->SC_XIP);
}
#ifdef CLIENT_INTERFACE
sigcontext_t sc_interrupted = *sc;
action = send_signal_to_client(dcontext, sig, frame, NULL,
info->sigpending[sig]->access_address,
false/*not blocked*/, NULL);
/* in order to pass to the client, we come all the way here for signals
* the app has no handler for
*/
if (action == DR_SIGNAL_REDIRECT) {
/* send_signal_to_client copied mcontext into frame's sc */
priv_mcontext_t *mcontext = get_mcontext(dcontext);
ucontext_to_mcontext(mcontext, uc);
dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc) sc->SC_XIP);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
mcontext->pc = dcontext->next_tag;
sig_full_cxt_t sc_interrupted_full = { &sc_interrupted, NULL/*not provided*/ };
if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT, sc_interrupted_full,
NULL, NULL, dcontext->next_tag, mcontext->xsp,
osc_empty, mcontext, sig))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
return true; /* don't try another signal */
}
else if (action == DR_SIGNAL_SUPPRESS ||
(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n",
(action == DR_SIGNAL_SUPPRESS) ?
"client suppressing signal" :
"app signal handler is SIG_IGN");
return false;
}
else if (action == DR_SIGNAL_BYPASS ||
(info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n",
(action == DR_SIGNAL_BYPASS) ?
"client forcing default" :
"app signal handler is SIG_DFL");
if (info->sigpending[sig]->use_sigcontext) {
/* after the default action we want to go to the sigcontext */
dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc) sc->SC_XIP);
ucontext_to_mcontext(get_mcontext(dcontext), uc);
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
}
execute_default_from_dispatch(dcontext, sig, frame);
return true;
}
CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value");
#endif
/* now that we've made all our changes and given the client a
* chance to make changes, copy the frame to the appropriate stack
* location and convert to non-rt if necessary
*/
copy_frame_to_stack(dcontext, sig, frame, xsp, true/*pending*/);
/* now point at the app's frame */
sc = get_sigcontext_from_app_frame(info, sig, (void *) xsp);
ASSERT(info->app_sigaction[sig] != NULL);
/* add to set of blocked signals those in sigaction mask */
blocked = info->app_sigaction[sig]->mask;
/* SA_NOMASK says whether to block sig itself or not */
if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0)
kernel_sigaddset(&blocked, sig);
set_blocked(dcontext, &blocked, false/*relative: OR these in*/);
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
/* Doesn't matter what most app registers are, signal handler doesn't
* expect anything except the frame on the stack. We do need to set xsp.
*/
mcontext->xsp = (ptr_uint_t) xsp;
/* Set up args to handler: int sig, siginfo_t *siginfo, kernel_ucontext_t *ucxt */
#ifdef X86_64
mcontext->xdi = sig;
mcontext->xsi = (reg_t) &((sigframe_rt_t *)xsp)->info;
mcontext->xdx = (reg_t) &((sigframe_rt_t *)xsp)->uc;
#elif defined(AARCHXX)
mcontext->r0 = sig;
if (IS_RT_FOR_APP(info, sig)) {
mcontext->r1 = (reg_t) &((sigframe_rt_t *)xsp)->info;
mcontext->r2 = (reg_t) &((sigframe_rt_t *)xsp)->uc;
}
if (sig_has_restorer(info, sig))
mcontext->lr = (reg_t) info->app_sigaction[sig]->restorer;
else
mcontext->lr = (reg_t) dynamorio_sigreturn;
#endif
#ifdef X86
/* Clear eflags DF (signal handler should match function entry ABI) */
mcontext->xflags &= ~EFLAGS_DF;
#endif
/* Make sure handler is next thing we execute */
dcontext->next_tag = canonicalize_pc_target
(dcontext, (app_pc) SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]));
if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
/* clear handler now -- can't delete memory since sigreturn,
* others may look at sigaction struct, so we just set to default
*/
info->app_sigaction[sig]->handler = (handler_t) SIG_DFL;
}
#ifdef CLIENT_INTERFACE
mcontext->pc = dcontext->next_tag;
sig_full_cxt_t sc_full = { sc, NULL/*not provided*/ };
if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_full, NULL, NULL,
dcontext->next_tag, mcontext->xsp, osc_empty, mcontext,
sig))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
#endif
LOG(THREAD, LOG_ASYNCH, 3, "\tset xsp to "PFX"\n", xsp);
return true;
}
/* The arg to SYS_kill, i.e., the signal number, should be in dcontext->sys_param0 */
static void
terminate_via_kill(dcontext_t *dcontext)
{
ASSERT(dcontext == get_thread_private_dcontext());
/* FIXME PR 541760: there can be multiple thread groups and thus
* this may not exit all threads in the address space
*/
cleanup_and_terminate(dcontext, SYS_kill,
/* Pass -pid in case main thread has exited
* in which case will get -ESRCH
*/
IF_VMX86(os_in_vmkernel_userworld() ?
-(int)get_process_id() :)
get_process_id(),
dcontext->sys_param0, true, 0, 0);
ASSERT_NOT_REACHED();
}
bool
is_currently_on_sigaltstack(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
byte *cur_esp;
GET_STACK_PTR(cur_esp);
return (cur_esp >= (byte *)info->sigstack.ss_sp &&
cur_esp < (byte *)info->sigstack.ss_sp + info->sigstack.ss_size);
}
static void
terminate_via_kill_from_anywhere(dcontext_t *dcontext, int sig)
{
dcontext->sys_param0 = sig; /* store arg to SYS_kill */
if (is_currently_on_sigaltstack(dcontext)) {
/* We can't clean up our sigstack properly when we're on it
* (i#1160) so we terminate on the dstack.
*/
call_switch_stack(dcontext, dcontext->dstack, (void(*)(void*))terminate_via_kill,
NULL/*!initstack */, false/*no return */);
} else {
terminate_via_kill(dcontext);
}
ASSERT_NOT_REACHED();
}
/* xref os_request_fatal_coredump() */
void
os_terminate_via_signal(dcontext_t *dcontext, terminate_flags_t flags, int sig)
{
if (signal_is_interceptable(sig)) {
bool set_action = false;
#if defined(STATIC_LIBRARY) && defined(LINUX)
if (INTERNAL_OPTION(invoke_app_on_crash)) {
/* We come here for asserts. Faults already bypass this routine. */
dcontext_t *my_dc = get_thread_private_dcontext();
if (my_dc != NULL) {
thread_sig_info_t *info = (thread_sig_info_t *) my_dc->signal_field;
if (info != NULL && info->app_sigaction[sig] != NULL &&
IS_RT_FOR_APP(info, sig)) {
set_action = true;
sigaction_syscall(sig, info->app_sigaction[sig], NULL);
}
}
}
#endif
if (!set_action) {
DEBUG_DECLARE(bool res =)
set_default_signal_action(sig);
ASSERT(res);
}
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
ASSERT(dcontext != NULL);
dcontext->sys_param0 = sig;
/* XXX: the comment in the else below implies some systems have SYS_kill
* of SIGSEGV w/ no handler on oneself actually return.
* cleanup_and_terminate won't return to us and will use global_do_syscall
* to invoke SYS_kill, which in debug will do an inf loop (good!) but
* in release will do SYS_exit_group -- oh well, the systems I'm testing
* on do an immediate exit.
*/
terminate_via_kill_from_anywhere(dcontext, sig);
} else {
/* general clean up is unsafe: just remove .1config file */
config_exit();
dynamorio_syscall(SYS_kill, 2, get_process_id(), sig);
/* We try both the SYS_kill and the immediate crash since on some platforms
* the SIGKILL is delayed and on others the *-1 is hanging(?): should investigate
*/
if (sig == SIGSEGV) /* make doubly-sure */
*((int *)PTR_UINT_MINUS_1) = 0;
while (true) {
/* in case signal delivery is delayed we wait...forever */
os_thread_yield();
}
}
ASSERT_NOT_REACHED();
}
static bool
execute_default_action(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig, bool from_dispatch)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
sigcontext_t *sc = get_sigcontext_from_rt_frame(frame);
byte *pc = (byte *) sc->SC_XIP;
LOG(THREAD, LOG_ASYNCH, 3, "execute_default_action for signal %d\n", sig);
/* should only come here for signals we catch, or signal with ONESHOT
* that didn't sigreturn
*/
ASSERT(info->we_intercept[sig] ||
(info->app_sigaction[sig]->flags & SA_ONESHOT) != 0);
if (info->app_sigaction[sig] != NULL &&
(info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
if (!info->we_intercept[sig]) {
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
}
/* FIXME PR 205310: we can't always perfectly emulate the default
* behavior. To execute the default action, we have to un-register our
* handler, if we have one, for signals whose default action is not
* ignore or that will just be re-raised upon returning to the
* interrupted context -- FIXME: are any of the ignores repeated?
* SIGURG?
*
* If called from execute_handler_from_cache(), our master_signal_handler()
* is going to return directly to the translated context: which means we
* go native to re-execute the instr, which if it does in fact generate
* the signal again means we have a nice transparent core dump.
*
* If called from execute_handler_from_dispatch(), we need to generate
* the signal ourselves.
*/
if (default_action[sig] != DEFAULT_IGNORE) {
DEBUG_DECLARE(bool ok =)
set_default_signal_action(sig);
ASSERT(ok);
/* FIXME: to avoid races w/ shared handlers should set a flag to
* prevent another thread from re-enabling.
* Perhaps worse: what if this signal arrives for another thread
* in the meantime (and the default is not terminate)?
*/
if (info->shared_app_sigaction) {
LOG(THREAD, LOG_ASYNCH, 1,
"WARNING: having to install SIG_DFL for thread "TIDFMT", but will be "
"shared!\n", get_thread_id());
}
if (default_action[sig] == DEFAULT_TERMINATE ||
default_action[sig] == DEFAULT_TERMINATE_CORE) {
report_app_problem(dcontext, APPFAULT_CRASH, pc, (byte *)sc->SC_FP,
"\nSignal %d delivered to application as default "
"action.\n", sig);
/* App may call sigaction to set handler SIG_DFL (unnecessary but legal),
* in which case DR will put a handler in info->app_sigaction[sig].
* We must clear it, otherwise, signal_thread_exit may cleanup the
* handler and set it to SIG_IGN instead.
*/
if (info->app_sigaction[sig] != NULL) {
ASSERT(info->we_intercept[sig]);
handler_free(dcontext, info->app_sigaction[sig],
sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
/* N.B.: we don't have to restore our handler because the
* default action is for the process (entire thread group for NPTL) to die!
*/
if (from_dispatch ||
can_always_delay[sig] ||
is_sys_kill(dcontext, pc, (byte*)sc->SC_XSP, &frame->info)) {
/* This must have come from SYS_kill rather than raised by
* a faulting instruction. Thus we can't go re-execute the
* instr in order to re-raise the signal (if from_dispatch,
* we delayed and can't re-execute anyway). Instead we
* re-generate via SYS_kill. An alternative, if we don't
* care about generating a core dump, is to use SYS_exit
* and pass the right exit code to indicate the signal
* number: that would avoid races w/ the sigaction.
*
* FIXME: should have app make the syscall to get a more
* transparent core dump!
*/
if (!from_dispatch)
KSTOP_NOT_MATCHING_NOT_PROPAGATED(fcache_default);
KSTOP_NOT_MATCHING_NOT_PROPAGATED(dispatch_num_exits);
if (is_couldbelinking(dcontext)) /* won't be for SYS_kill (i#1159) */
enter_nolinking(dcontext, NULL, false);
/* we could be on sigstack so call this version: */
terminate_via_kill_from_anywhere(dcontext, sig);
ASSERT_NOT_REACHED();
} else {
/* We assume that re-executing the interrupted instr will
* re-raise the fault. We could easily be wrong:
* xref PR 363811 infinite loop due to memory we
* thought was unreadable and thus thought would raise
* a signal; xref PR 368277 to improve is_sys_kill().
* FIXME PR 205310: we should check whether we come out of
* the cache when we expected to terminate!
*
* An alternative is to abandon transparent core dumps and
* do the same explicit SYS_kill we do for from_dispatch.
* That would let us clean up DR as well.
* FIXME: currently we do not clean up DR for a synchronous
* signal death, but we do for asynch.
*/
/* i#552: cleanup and raise client exit event */
int instr_sz;
thread_sig_info_t *info;
/* We are on the sigstack now, so assign it to NULL to avoid being
* freed during process exit cleanup
*/
info = (thread_sig_info_t *)dcontext->signal_field;
info->sigstack.ss_sp = NULL;
/* We enter from several different places, so rewind until
* top-level kstat.
*/
KSTOP_REWIND_UNTIL(thread_measured);
/* We try to raise the same signal in app's context so a correct
* coredump can be generated. However, the client might change
* the code in a way that the corresponding app code won't
* raise the signal, so we first check if the app instr is the
* same as instr in the cache, and raise the signal (by return).
* Otherwise, we kill the process instead.
*/
ASSERT(sc_orig != NULL);
instr_sz = decode_sizeof(dcontext, (byte *) sc_orig->SC_XIP,
NULL _IF_X86_64(NULL));
if (instr_sz != 0 &&
pc != NULL && /* avoid crash on xl8 failure (i#1699) */
instr_sz == decode_sizeof(dcontext, pc, NULL _IF_X86_64(NULL)) &&
memcmp(pc, (byte *) sc_orig->SC_XIP, instr_sz) == 0) {
/* the app instr matches the cache instr; cleanup and raise the
* the signal in the app context
*/
dynamo_process_exit();
/* we cannot re-enter the cache, which is freed by now */
ASSERT(!from_dispatch);
return false;
} else {
/* mismatch, cleanup and terminate */
dcontext->sys_param0 = sig;
terminate_via_kill(dcontext);
ASSERT_NOT_REACHED();
}
}
} else {
/* FIXME PR 297033: in order to intercept DEFAULT_STOP /
* DEFAULT_CONTINUE signals, we need to set sigcontext to point
* to some kind of regain-control routine, so that when our
* thread gets to run again we can reset our handler. So far
* we have no signals that fall here that we intercept.
*/
CLIENT_ASSERT(false, "STOP/CONT signals not supported");
}
#if defined(DEBUG) && defined(INTERNAL)
if (sig == SIGSEGV && !dynamo_exited) {
/* pc should be an app pc at this point (it was translated) --
* check for bad cases here
*/
if (safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP)) {
fragment_t wrapper;
fragment_t *f;
LOG(THREAD, LOG_ALL, 1,
"Received SIGSEGV at pc "PFX" in thread "TIDFMT"\n",
pc, get_thread_id());
f = fragment_pclookup(dcontext, pc, &wrapper);
if (f)
disassemble_fragment(dcontext, f, false);
ASSERT_NOT_REACHED();
} else if (in_generated_routine(dcontext, pc)) {
LOG(THREAD, LOG_ALL, 1,
"Received SIGSEGV at generated non-code-cache pc "PFX"\n", pc);
ASSERT_NOT_REACHED();
}
}
#endif
}
/* now continue at the interruption point and re-raise the signal */
return true;
}
static bool
execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig)
{
return execute_default_action(dcontext, sig, frame, sc_orig, false);
}
static void
execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame)
{
execute_default_action(dcontext, sig, frame, NULL, true);
}
void
receive_pending_signal(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
sigpending_t *temp;
int sig;
LOG(THREAD, LOG_ASYNCH, 3, "receive_pending_signal\n");
if (info->interrupted != NULL) {
/* i#2066: if we were building a trace, it may already be re-linked */
if (!TEST(FRAG_LINKED_OUTGOING, info->interrupted->flags)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tre-linking outgoing for interrupted F%d\n",
info->interrupted->id);
SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, acquire,
change_linking_lock);
link_fragment_outgoing(dcontext, info->interrupted, false);
SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, release,
change_linking_lock);
}
if (TEST(FRAG_HAS_SYSCALL, info->interrupted->flags)) {
/* restore syscall (they're a barrier to signals, so signal
* handler has cur frag exit before it does a syscall)
*/
if (info->interrupted_pc != NULL) {
mangle_syscall_code(dcontext, info->interrupted,
info->interrupted_pc, true/*skip exit cti*/);
}
}
info->interrupted = NULL;
info->interrupted_pc = NULL;
}
/* grab first pending signal
* XXX: start with real-time ones?
*/
/* "lock" the array to prevent a new signal that interrupts this bit of
* code from prepended or deleting from the array while we're accessing it
*/
info->accessing_sigpending = true;
/* barrier to prevent compiler from moving the above write below the loop */
__asm__ __volatile__("" : : : "memory");
if (!info->multiple_pending_units &&
info->num_pending + 2 >= DYNAMO_OPTION(max_pending_signals)) {
/* We're close to the limit: proactively get a new unit while it's safe
* to acquire locks. We do that by pushing over the edge.
* We assume that filling up a 2nd unit is too pathological to plan for.
*/
info->multiple_pending_units = true;
SYSLOG_INTERNAL_WARNING("many pending signals: asking for 2nd special unit");
sigpending_t *temp1 = special_heap_alloc(info->sigheap);
sigpending_t *temp2 = special_heap_alloc(info->sigheap);
sigpending_t *temp3 = special_heap_alloc(info->sigheap);
special_heap_free(info->sigheap, temp1);
special_heap_free(info->sigheap, temp2);
special_heap_free(info->sigheap, temp3);
}
for (sig = 1; sig <= MAX_SIGNUM; sig++) {
if (info->sigpending[sig] != NULL) {
bool executing = true;
/* We do not re-check whether blocked if it was unblocked at
* receive time, to properly handle sigsuspend (i#1340).
*/
if (!info->sigpending[sig]->unblocked &&
kernel_sigismember(&info->app_sigblocked, sig)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal %d is blocked!\n", sig);
continue;
}
LOG(THREAD, LOG_ASYNCH, 3, "\treceiving signal %d\n", sig);
executing = execute_handler_from_dispatch(dcontext, sig);
temp = info->sigpending[sig];
info->sigpending[sig] = temp->next;
special_heap_free(info->sigheap, temp);
info->num_pending--;
/* only one signal at a time! */
if (executing) {
/* Make negative so our fcache_enter check makes progress but
* our C code still considers there to be pending signals.
*/
dcontext->signals_pending = -1;
break;
}
}
}
/* barrier to prevent compiler from moving the below write above the loop */
__asm__ __volatile__("" : : : "memory");
info->accessing_sigpending = false;
/* we only clear this on a call to us where we find NO pending signals */
if (sig > MAX_SIGNUM) {
LOG(THREAD, LOG_ASYNCH, 3, "\tclearing signals_pending flag\n");
dcontext->signals_pending = 0;
}
}
/* Returns false if should NOT issue syscall. */
bool
#ifdef LINUX
handle_sigreturn(dcontext_t *dcontext, bool rt)
#else
handle_sigreturn(dcontext_t *dcontext, void *ucxt_param, int style)
#endif
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */
kernel_ucontext_t *ucxt = NULL;
int sig = 0;
app_pc next_pc;
/* xsp was put in mcontext prior to pre_system_call() */
reg_t xsp = get_mcontext(dcontext)->xsp;
#ifdef MACOS
bool rt = true;
#endif
LOG(THREAD, LOG_ASYNCH, 3, "%ssigreturn()\n", rt?"rt_":"");
LOG(THREAD, LOG_ASYNCH, 3, "\txsp is "PFX"\n", xsp);
#ifdef PROGRAM_SHEPHERDING
/* if (!sig_has_restorer, region was never added to exec list,
* allowed as pattern only and kicked off at first write via
* selfmod detection or otherwise if vsyscall, so no worries
* about having to remove it here
*/
#endif
/* get sigframe: it's the top thing on the stack, except the ret
* popped off pretcode.
* WARNING: handler for tcsh's window_change (SIGWINCH) clobbers its
* signal # arg, so don't use frame->sig! (kernel doesn't look at sig
* so app can get away with it)
*/
if (rt) {
#ifdef LINUX
sigframe_rt_t *frame = (sigframe_rt_t *) (xsp IF_X86(- sizeof(char*)));
/* use si_signo instead of sig, less likely to be clobbered by app */
sig = frame->info.si_signo;
# ifdef X86_32
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n",
sig, frame->sig);
if (frame->sig != sig)
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n");
# endif
sc = get_sigcontext_from_app_frame(info, sig, (void *) frame);
ucxt = &frame->uc;
#elif defined(MACOS)
/* The initial frame fields on the stack are messed up due to
* params to handler from tramp, so use params to syscall.
* XXX: we don't have signal # though: so we have to rely on app
* not clobbering the sig param field.
*/
sig = *(int*)xsp;
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d\n", sig);
ucxt = (kernel_ucontext_t *) ucxt_param;
if (ucxt == NULL) {
/* On Mac the kernel seems to store state on whether the process is
* on the altstack, so longjmp calls _sigunaltstack() which issues a
* sigreturn syscall telling the kernel about the altstack change,
* with a NULL context.
*/
LOG(THREAD, LOG_ASYNCH, 3, "\tsigunalstack sigreturn: no context\n");
return true;
}
sc = SIGCXT_FROM_UCXT(ucxt);
#endif
ASSERT(sig > 0 && sig <= MAX_SIGNUM && IS_RT_FOR_APP(info, sig));
/* FIXME: what if handler called sigaction and requested rt
* when itself was non-rt?
*/
/* discard blocked signals, re-set from prev mask stored in frame */
set_blocked(dcontext, SIGMASK_FROM_UCXT(ucxt), true/*absolute*/);
}
#ifdef LINUX
else {
/* FIXME: libc's restorer pops prior to calling sigreturn, I have
* no idea why, but kernel asks for xsp-8 not xsp-4...weird!
*/
kernel_sigset_t prevset;
sigframe_plain_t *frame = (sigframe_plain_t *) (xsp IF_X86(-8));
/* We don't trust frame->sig (app sometimes clobbers it), and for
* plain frame there's no other place that sig is stored,
* so as a hack we added a new frame!
* FIXME: this means we won't support nonstandard use of SYS_sigreturn,
* e.g., as NtContinue, if frame didn't come from a real signal and so
* wasn't copied to stack by us.
*/
sig = frame->sig_noclobber;
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n",
sig, IF_X86_ELSE(frame->sig, 0));
# ifdef X86_32
if (frame->sig != sig)
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n");
# endif
ASSERT(sig > 0 && sig <= MAX_SIGNUM && !IS_RT_FOR_APP(info, sig));
sc = get_sigcontext_from_app_frame(info, sig, (void *) frame);
/* discard blocked signals, re-set from prev mask stored in frame */
# ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
/* Avoid build failure with GCC 7 due to uninitialized value */
prevset.sig[0] = 0;
# else
prevset.sig[0] = frame->IF_X86_ELSE(sc.oldmask, uc.uc_mcontext.oldmask);
if (_NSIG_WORDS > 1) {
memcpy(&prevset.sig[1], &frame->IF_X86_ELSE(extramask, uc.sigset_ex),
sizeof(prevset.sig[1]));
}
# ifdef ARM
ucxt = &frame->uc; /* we leave ucxt NULL for x86: not needed there */
# endif
# endif
set_blocked(dcontext, &prevset, true/*absolute*/);
}
#endif /* LINUX */
/* Make sure we deliver pending signals that are now unblocked.
*/
check_signals_pending(dcontext, info);
/* We abandoned the previous context, so we need to start
* interpreting anew. Regardless of whether we handled the signal
* from dispatch or the fcache, we want to go to the context
* stored in the frame. So we have the kernel send us to
* fcache_return and set up for dispatch to use the frame's
* context.
*/
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
ASSERT(info->app_sigaction[sig]->handler == (handler_t) SIG_DFL);
if (!info->we_intercept[sig]) {
/* let kernel do default independent of us */
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
}
ASSERT(!safe_is_in_fcache(dcontext, (app_pc) sc->SC_XIP, (byte *)sc->SC_XSP));
#ifdef CLIENT_INTERFACE
sig_full_cxt_t sc_full = { sc, NULL/*not provided*/ };
get_mcontext(dcontext)->pc = dcontext->next_tag;
instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_RETURN, osc_empty, NULL,
get_mcontext(dcontext), (app_pc)sc->SC_XIP, sc->SC_XSP,
sc_full, NULL, sig);
#endif
#ifdef DEBUG
if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "returning-to sigcontext "PFX":\n", sc);
dump_sigcontext(dcontext, sc);
}
#endif
/* XXX i#1206: if we interrupted a non-ignorable syscall to run the app's
* handler, and we set up to restart the syscall, we'll come here with the
* translated syscall pc -- thus we can't distinguish from a signal interrupting
* the prior app instr. So we can't simply point at do_syscall and call
* set_at_syscall -- we have to re-interpret the syscall and re-run the
* pre-syscall handler. Hopefully all our pre-syscall handlers can handle that.
*/
/* set up for dispatch */
/* we have to use a different slot since next_tag ends up holding the do_syscall
* entry when entered from dispatch (we're called from
* pre_syscall, prior to entering cache)
*/
dcontext->asynch_target = canonicalize_pc_target
(dcontext, (app_pc)(sc->SC_XIP IF_ARM(|(TEST(EFLAGS_T, sc->SC_XFLAGS) ? 1 : 0))));
next_pc = dcontext->asynch_target;
#ifdef VMX86_SERVER
/* PR 404712: kernel only restores gp regs so we do it ourselves and avoid
* complexities of kernel's non-linux-like sigreturn semantics
*/
sig_full_cxt_t sc_full = {sc, NULL}; /* non-ARM so NULL ok */
sigcontext_to_mcontext(get_mcontext(dcontext), &sc_full, DR_MC_ALL);
#else
/* HACK to get eax put into mcontext AFTER do_syscall */
dcontext->next_tag = (app_pc) sc->IF_X86_ELSE(SC_XAX, SC_R0);
/* use special linkstub so we know why we came out of the cache */
sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t) get_asynch_linkstub();
/* set our sigreturn context to point to fcache_return */
/* We don't need PC_AS_JMP_TGT b/c the kernel uses EFLAGS_T for the mode */
sc->SC_XIP = (ptr_uint_t) fcache_return_routine(dcontext);
/* if we overlaid inner frame on nested signal, will end up with this
* error -- disable in release build since this is often app's fault (stack
* too small)
* FIXME: how make this transparent? what ends up happening is that we
* get a segfault when we start interpreting dispatch, we want to make it
* look like whatever would happen to the app...
*/
ASSERT((app_pc)sc->SC_XIP != next_pc);
# ifdef AARCHXX
set_stolen_reg_val(get_mcontext(dcontext), get_sigcxt_stolen_reg(sc));
set_sigcxt_stolen_reg(sc, (reg_t) *get_dr_tls_base_addr());
# ifdef AARCH64
/* On entry to the do_syscall gencode, we save X1 into TLS_REG1_SLOT.
* Then the sigreturn would redirect the flow to the fcache_return gencode.
* In fcache_return it recovers the values of x0 and x1 from TLS_SLOT 0 and 1.
*/
get_mcontext(dcontext)->r1 = sc->regs[1];
# else
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
# endif
#endif
LOG(THREAD, LOG_ASYNCH, 3, "set next tag to "PFX", sc->SC_XIP to "PFX"\n",
next_pc, sc->SC_XIP);
return IF_VMX86_ELSE(false, true);
}
bool
is_signal_restorer_code(byte *pc, size_t *len)
{
/* is this a sigreturn pattern placed by kernel on the stack or vsyscall page?
* for non-rt frame:
* 0x58 popl %eax
* 0xb8 <sysnum> movl SYS_sigreturn, %eax
* 0xcd 0x80 int 0x80
* for rt frame:
* 0xb8 <sysnum> movl SYS_rt_sigreturn, %eax
* 0xcd 0x80 int 0x80
*/
/* optimized we only need two uint reads, but we have to do
* some little-endian byte-order reverses to get the right result
*/
# define reverse(x) ((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | \
(((x) & 0xff0000) >> 8) | (((x) & 0xff000000) >> 24))
#ifdef MACOS
# define SYS_RT_SIGRET SYS_sigreturn
#else
# define SYS_RT_SIGRET SYS_rt_sigreturn
#endif
#ifndef X64
/* 58 b8 s4 s3 s2 s1 cd 80 */
static const uint non_rt_1w = reverse(0x58b80000 | (reverse(SYS_sigreturn) >> 16));
static const uint non_rt_2w = reverse((reverse(SYS_sigreturn) << 16) | 0xcd80);
#endif
/* b8 s4 s3 s2 s1 cd 80 XX */
static const uint rt_1w = reverse(0xb8000000 | (reverse(SYS_RT_SIGRET) >> 8));
static const uint rt_2w = reverse((reverse(SYS_RT_SIGRET) << 24) | 0x00cd8000);
/* test rt first as it's the most common
* only 7 bytes here so we ignore the last one (becomes msb since little-endian)
*/
if (*((uint *)pc) == rt_1w && (*((uint *)(pc+4)) & 0x00ffffff) == rt_2w) {
if (len != NULL)
*len = 7;
return true;
}
#ifndef X64
if (*((uint *)pc) == non_rt_1w && *((uint *)(pc+4)) == non_rt_2w) {
if (len != NULL)
*len = 8;
return true;
}
#endif
return false;
}
void
os_forge_exception(app_pc target_pc, dr_exception_type_t type)
{
/* PR 205136:
* We want to deliver now, and the caller expects us not to return.
* We have two alternatives:
* 1) Emulate stack frame, and call transfer_to_dispatch() for delivery. We
* may not know how to fill out every field of the frame (cr2, etc.). Plus,
* we have problems w/ default actions (PR 205310) but we have to solve
* those long-term anyway. We also have to create different frames based on
* whether app intercepts via rt or not.
* 2) Call SYS_tgkill from a special location that our handler can
* recognize and know it's a signal meant for the app and that the
* interrupted DR can be discarded. We'd then essentially repeat 1,
* but modifying the kernel-generated frame. We'd have to always
* intercept SIGILL.
* I'm going with #1 for now b/c the common case is simpler.
*/
dcontext_t *dcontext = get_thread_private_dcontext();
#if defined(LINUX) && defined(X86)
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
#endif
char frame_no_xstate[sizeof(sigframe_rt_t)];
sigframe_rt_t *frame = (sigframe_rt_t *) frame_no_xstate;
int sig;
where_am_i_t cur_whereami = dcontext->whereami;
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
switch (type) {
case ILLEGAL_INSTRUCTION_EXCEPTION: sig = SIGILL; break;
case UNREADABLE_MEMORY_EXECUTION_EXCEPTION: sig = SIGSEGV; break;
case SINGLE_STEP_EXCEPTION: ASSERT_NOT_IMPLEMENTED(false); /* FIXME: i#2144 */
case IN_PAGE_ERROR_EXCEPTION: /* fall-through: Windows only */
default: ASSERT_NOT_REACHED(); sig = SIGSEGV; break;
}
LOG(GLOBAL, LOG_ASYNCH, 1, "os_forge_exception sig=%d\n", sig);
/* since we always delay delivery, we always want an rt frame. we'll convert
* to a plain frame on delivery.
*/
memset(frame, 0, sizeof(*frame));
frame->info.si_signo = sig;
#ifdef X86_32
frame->sig = sig;
frame->pinfo = &frame->info;
frame->puc = (void *) &frame->uc;
#endif
#if defined(LINUX) && defined(X86)
/* We use a TLS buffer to avoid too much stack space here. */
sc->fpstate = (kernel_fpstate_t *) get_xstate_buffer(dcontext);
#endif
mcontext_to_ucontext(uc, get_mcontext(dcontext));
sc->SC_XIP = (reg_t) target_pc;
/* We'll fill in fpstate at delivery time.
* We fill in segment registers to their current values and assume they won't
* change and that these are the right values.
*
* FIXME i#2095: restore the app's segment register value(s).
*
* XXX: it seems to work w/o filling in the other state:
* I'm leaving cr2 and other fields all zero.
* If this gets problematic we could switch to approach #2.
*/
thread_set_segment_registers(sc);
#if defined(X86) && defined(LINUX)
if (sig_has_restorer(info, sig))
frame->pretcode = (char *) info->app_sigaction[sig]->restorer;
else
frame->pretcode = (char *) dynamorio_sigreturn;
#endif
/* We assume that we do not need to translate the context when forged.
* If we did, we'd move this below enter_nolinking() (and update
* record_pending_signal() to do the translation).
*/
record_pending_signal(dcontext, sig, &frame->uc, frame, true/*forged*/
_IF_CLIENT(NULL));
/* For most callers this is not necessary and we only do it to match
* the Windows usage model: but for forging from our own handler,
* this is good b/c it resets us to the base of dstack.
*/
/* tell dispatch() why we're coming there */
dcontext->whereami = WHERE_TRAMPOLINE;
KSTART(dispatch_num_exits);
set_last_exit(dcontext, (linkstub_t *) get_asynch_linkstub());
if (is_couldbelinking(dcontext))
enter_nolinking(dcontext, NULL, false);
transfer_to_dispatch(dcontext, get_mcontext(dcontext),
cur_whereami != WHERE_FCACHE &&
cur_whereami != WHERE_SIGNAL_HANDLER
/*full_DR_state*/);
ASSERT_NOT_REACHED();
}
void
os_request_fatal_coredump(const char *msg)
{
/* To enable getting a coredump just make sure that rlimits are
* not preventing getting one, e.g. ulimit -c unlimited
*/
SYSLOG_INTERNAL_ERROR("Crashing the process deliberately for a core dump!");
os_terminate_via_signal(NULL, 0/*no cleanup*/, SIGSEGV);
ASSERT_NOT_REACHED();
}
void
os_request_live_coredump(const char *msg)
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_request_live_coredump(msg);
return;
}
#endif
LOG(GLOBAL, LOG_ASYNCH, 1, "LiveCoreDump unsupported (PR 365105). "
"Continuing execution without a core.\n");
return;
}
void
os_dump_core(const char *msg)
{
/* FIXME Case 3408: fork stack dump crashes on 2.6 kernel, so moving the getchar
* ahead to aid in debugging */
if (TEST(DUMPCORE_WAIT_FOR_DEBUGGER, dynamo_options.dumpcore_mask)) {
SYSLOG_INTERNAL_ERROR("looping so you can use gdb to attach to pid %s",
get_application_pid());
IF_CLIENT_INTERFACE(SYSLOG(SYSLOG_CRITICAL, WAITING_FOR_DEBUGGER, 2,
get_application_name(), get_application_pid()));
/* getchar() can hit our own vsyscall hook (from PR 212570); typically we
* want to attach and not continue anyway, so doing an infinite loop:
*/
while (true)
os_thread_yield();
}
if (DYNAMO_OPTION(live_dump)) {
os_request_live_coredump(msg);
}
if (TEST(DUMPCORE_INCLUDE_STACKDUMP, dynamo_options.dumpcore_mask)) {
/* fork, dump core, then use gdb to get a stack dump
* we can get into an infinite loop if there's a seg fault
* in the process of doing this -- so we have a do-once test,
* and if it failed we do the no-symbols dr callstack dump
*/
static bool tried_stackdump = false;
if (!tried_stackdump) {
tried_stackdump = true;
stackdump();
} else {
static bool tried_calldump = false;
if (!tried_calldump) {
tried_calldump = true;
dump_dr_callstack(STDERR);
}
}
}
if (!DYNAMO_OPTION(live_dump)) {
os_request_fatal_coredump(msg);
ASSERT_NOT_REACHED();
}
}
#ifdef RETURN_AFTER_CALL
bool
at_known_exception(dcontext_t *dcontext, app_pc target_pc, app_pc source_fragment)
{
/* There is a known exception in signal restorers and the Linux
* dynamic symbol resoulution.
* The latter we assume it is the only other recurring known exception,
* so the first time we pattern match to help make sure it is indeed
* _dl_runtime_resolve (since with LD_BIND_NOW it will never be called).
* After that we compare with the known value.
*/
static app_pc known_exception = 0;
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
LOG(THREAD, LOG_INTERP, 1, "RCT: testing for KNOWN exception "PFX" "PFX"\n",
target_pc, source_fragment);
/* Check if this is a signal return.
FIXME: we should really get that from the frame itself.
Since currently grabbing restorer only when copying a frame,
this will work with nested signals only if they all have same restorer
(I haven't seen restorers other than the one in libc)
*/
if (target_pc == info->signal_restorer_retaddr) {
LOG(THREAD, LOG_INTERP, 1,
"RCT: KNOWN exception this is a signal restorer --ok \n");
STATS_INC(ret_after_call_signal_restorer);
return true;
}
if (source_fragment == known_exception) {
LOG(THREAD, LOG_INTERP, 1,
"RCT: KNOWN exception again _dl_runtime_resolve --ok\n");
return true;
}
if (known_exception == 0) {
int ret_imm;
return at_dl_runtime_resolve_ret(dcontext, source_fragment, &ret_imm);
}
return false;
}
#endif /* RETURN_AFTER_CALL */
/***************************************************************************
* ITIMERS
*
* We support combining an app itimer with a DR itimer for each of the 3 types
* (PR 204556).
*/
static inline uint64
timeval_to_usec(struct timeval *t1)
{
return ((uint64)(t1->tv_sec))*1000000 + t1->tv_usec;
}
static inline void
usec_to_timeval(uint64 usec, struct timeval *t1)
{
t1->tv_sec = (long) usec / 1000000;
t1->tv_usec = (long) usec % 1000000;
}
static void
init_itimer(dcontext_t *dcontext, bool first)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL);
ASSERT(!info->shared_itimer); /* else inherit */
LOG(THREAD, LOG_ASYNCH, 2, "thread has private itimers%s\n",
os_itimers_thread_shared() ? " (for now)" : "");
if (os_itimers_thread_shared()) {
/* we have to allocate now even if no itimer is installed until later,
* so that all child threads point to the same data
*/
info->itimer = (thread_itimer_info_t (*)[NUM_ITIMERS])
global_heap_alloc(sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
} else {
/* for simplicity and parallel w/ shared we allocate proactively */
info->itimer = (thread_itimer_info_t (*)[NUM_ITIMERS])
heap_alloc(dcontext, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
}
memset(info->itimer, 0, sizeof(*info->itimer));
if (first) {
/* see if app has set up an itimer before we were loaded */
struct itimerval prev;
int rc;
int which;
for (which = 0; which < NUM_ITIMERS; which++) {
rc = getitimer_syscall(which, &prev);
ASSERT(rc == SUCCESS);
(*info->itimer)[which].app.interval = timeval_to_usec(&prev.it_interval);
(*info->itimer)[which].app.value = timeval_to_usec(&prev.it_value);
}
}
}
/* Up to caller to hold lock for shared itimers */
static bool
set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info,
bool enable)
{
struct itimerval val;
int rc;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
if (enable) {
ASSERT(!info->shared_itimer ||
self_owns_recursive_lock(info->shared_itimer_lock));
usec_to_timeval((*info->itimer)[which].actual.interval, &val.it_interval);
usec_to_timeval((*info->itimer)[which].actual.value, &val.it_value);
LOG(THREAD, LOG_ASYNCH, 2, "installing itimer %d interval="INT64_FORMAT_STRING
", value="INT64_FORMAT_STRING"\n", which,
(*info->itimer)[which].actual.interval, (*info->itimer)[which].actual.value);
} else {
LOG(THREAD, LOG_ASYNCH, 2, "disabling itimer %d\n", which);
memset(&val, 0, sizeof(val));
(*info->itimer)[which].actual.value = 0;
(*info->itimer)[which].actual.interval = 0;
}
rc = setitimer_syscall(which, &val, NULL);
return (rc == SUCCESS);
}
/* Caller should hold lock */
static bool
itimer_new_settings(dcontext_t *dcontext, int which, bool app_changed)
{
struct itimerval val;
bool res = true;
int rc;
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
ASSERT(!info->shared_itimer || self_owns_recursive_lock(info->shared_itimer_lock));
/* the general strategy is to set the actual value to the smaller,
* update the larger on each signal, and when the larger becomes
* smaller do a one-time swap for the remaining
*/
if ((*info->itimer)[which].dr.interval > 0 &&
((*info->itimer)[which].app.interval == 0 ||
(*info->itimer)[which].dr.interval < (*info->itimer)[which].app.interval))
(*info->itimer)[which].actual.interval = (*info->itimer)[which].dr.interval;
else
(*info->itimer)[which].actual.interval = (*info->itimer)[which].app.interval;
if ((*info->itimer)[which].actual.value > 0) {
if ((*info->itimer)[which].actual.interval == 0 &&
(*info->itimer)[which].dr.value == 0 &&
(*info->itimer)[which].app.value == 0) {
(*info->itimer)[which].actual.value = 0;
res = set_actual_itimer(dcontext, which, info, false/*disabled*/);
} else {
/* one of app or us has an in-flight timer which we should not interrupt.
* but, we already set the new requested value (for app or us), so we
* need to update the actual value so we subtract properly.
*/
rc = getitimer_syscall(which, &val);
ASSERT(rc == SUCCESS);
uint64 left = timeval_to_usec(&val.it_value);
if (!app_changed &&
(*info->itimer)[which].actual.value == (*info->itimer)[which].app.value)
(*info->itimer)[which].app.value = left;
if (app_changed &&
(*info->itimer)[which].actual.value == (*info->itimer)[which].dr.value)
(*info->itimer)[which].dr.value = left;
(*info->itimer)[which].actual.value = left;
}
} else {
if ((*info->itimer)[which].dr.value > 0 &&
((*info->itimer)[which].app.value == 0 ||
(*info->itimer)[which].dr.value < (*info->itimer)[which].app.value))
(*info->itimer)[which].actual.value = (*info->itimer)[which].dr.value;
else {
(*info->itimer)[which].actual.value = (*info->itimer)[which].app.value;
}
res = set_actual_itimer(dcontext, which, info, true/*enable*/);
}
return res;
}
bool
set_itimer_callback(dcontext_t *dcontext, int which, uint millisec,
void (*func)(dcontext_t *, priv_mcontext_t *),
void (*func_api)(dcontext_t *, dr_mcontext_t *))
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
bool rc;
if (which < 0 || which >= NUM_ITIMERS) {
CLIENT_ASSERT(false, "invalid itimer type");
return false;
}
if (func == NULL && func_api == NULL && millisec != 0) {
CLIENT_ASSERT(false, "invalid function");
return false;
}
ASSERT(info != NULL && info->itimer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(info->shared_itimer_lock);
(*info->itimer)[which].dr.interval = ((uint64)millisec)*1000;
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
(*info->itimer)[which].cb = func;
(*info->itimer)[which].cb_api = func_api;
rc = itimer_new_settings(dcontext, which, false/*us*/);
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
return rc;
}
uint
get_itimer_frequency(dcontext_t *dcontext, int which)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
uint ms = 0;
if (which < 0 || which >= NUM_ITIMERS) {
CLIENT_ASSERT(false, "invalid itimer type");
return 0;
}
ASSERT(info != NULL && info->itimer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(info->shared_itimer_lock);
ms = (*info->itimer)[which].dr.interval / 1000;
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
return ms;
}
static bool
handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
int which = 0;
bool invoke_cb = false, pass_to_app = false, reset_timer_manually = false;
bool should_release_lock = false;
/* i#471: suppress alarms coming in after exit */
if (dynamo_exited)
return pass_to_app;
if (sig == SIGALRM)
which = ITIMER_REAL;
else if (sig == SIGVTALRM)
which = ITIMER_VIRTUAL;
else if (sig == SIGPROF)
which = ITIMER_PROF;
else
ASSERT_NOT_REACHED();
LOG(THREAD, LOG_ASYNCH, 2, "received alarm %d @"PFX"\n", which,
SIGCXT_FROM_UCXT(ucxt)->SC_XIP);
/* This alarm could have interrupted an app thread making an itimer syscall */
if (info->shared_itimer) {
#ifdef DEADLOCK_AVOIDANCE
/* i#2061: in debug build we can get an alarm while in deadlock handling
* code that holds innermost_lock. We just drop such alarms.
*/
if (OWN_MUTEX(&innermost_lock))
return pass_to_app;
#endif
if (self_owns_recursive_lock(info->shared_itimer_lock)) {
/* What can we do? We just go ahead and hope conflicting writes work out.
* We don't re-acquire in case app was in middle of acquiring.
*/
} else if (try_recursive_lock(info->shared_itimer_lock) ||
try_recursive_lock(info->shared_itimer_lock)) {
should_release_lock = true;
} else {
/* Heuristic: if fail twice then assume interrupted lock routine.
* What can we do? Just continue and hope conflicting writes work out.
*/
}
}
if ((*info->itimer)[which].app.value > 0) {
/* Alarm could have been on its way when app value changed */
if ((*info->itimer)[which].app.value >= (*info->itimer)[which].actual.value) {
(*info->itimer)[which].app.value -= (*info->itimer)[which].actual.value;
LOG(THREAD, LOG_ASYNCH, 2,
"\tapp value is now %d\n", (*info->itimer)[which].app.value);
if ((*info->itimer)[which].app.value == 0) {
pass_to_app = true;
(*info->itimer)[which].app.value = (*info->itimer)[which].app.interval;
} else
reset_timer_manually = true;
}
}
if ((*info->itimer)[which].dr.value > 0) {
/* Alarm could have been on its way when DR value changed */
if ((*info->itimer)[which].dr.value >= (*info->itimer)[which].actual.value) {
(*info->itimer)[which].dr.value -= (*info->itimer)[which].actual.value;
LOG(THREAD, LOG_ASYNCH, 2,
"\tdr value is now %d\n", (*info->itimer)[which].dr.value);
if ((*info->itimer)[which].dr.value == 0) {
invoke_cb = true;
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
} else
reset_timer_manually = true;
}
}
/* for efficiency we let the kernel reset the value to interval if
* there's only one timer
*/
if (reset_timer_manually) {
(*info->itimer)[which].actual.value = 0;
itimer_new_settings(dcontext, which, true/*doesn't matter: actual.value==0*/);
} else
(*info->itimer)[which].actual.value = (*info->itimer)[which].actual.interval;
if (invoke_cb) {
/* invoke after setting new itimer value */
/* we save stack space by allocating superset dr_mcontext_t */
dr_mcontext_t dmc;
dr_mcontext_init(&dmc);
void (*cb)(dcontext_t *, priv_mcontext_t *) = (*info->itimer)[which].cb;
void (*cb_api)(dcontext_t *, dr_mcontext_t *) = (*info->itimer)[which].cb_api;
if (which == ITIMER_VIRTUAL && info->shared_itimer && should_release_lock) {
release_recursive_lock(info->shared_itimer_lock);
should_release_lock = false;
}
if (cb != NULL) {
priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(&dmc);
ucontext_to_mcontext(mc, ucxt);
cb(dcontext, mc);
} else {
cb_api(dcontext, &dmc);
}
}
if (info->shared_itimer && should_release_lock)
release_recursive_lock(info->shared_itimer_lock);
return pass_to_app;
}
/* Starts itimer if stopped, or increases refcount of existing itimer if already
* started. It is *not* safe to call this more than once for the same thread,
* since it will inflate the refcount and prevent cleanup.
*/
void
start_itimer(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
bool start = false;
if (info->shared_itimer) {
acquire_recursive_lock(info->shared_itimer_lock);
(*info->shared_itimer_underDR)++;
start = (*info->shared_itimer_underDR == 1);
} else
start = true;
if (start) {
/* Enable all DR itimers b/c at least one thread in this set of threads
* sharing itimers is under DR control
*/
int which;
LOG(THREAD, LOG_ASYNCH, 2, "starting DR itimers from thread "TIDFMT"\n",
get_thread_id());
for (which = 0; which < NUM_ITIMERS; which++) {
/* May have already been started if there was no stop_itimer() since
* init time
*/
if ((*info->itimer)[which].dr.value == 0 &&
(*info->itimer)[which].dr.interval > 0) {
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
itimer_new_settings(dcontext, which, false/*!app*/);
}
}
}
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
}
/* Decrements the itimer refcount, and turns off the itimer once there are no
* more threads listening for it. It is not safe to call this more than once on
* the same thread.
*/
void
stop_itimer(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
bool stop = false;
if (info->shared_itimer) {
acquire_recursive_lock(info->shared_itimer_lock);
ASSERT(*info->shared_itimer_underDR > 0);
(*info->shared_itimer_underDR)--;
stop = (*info->shared_itimer_underDR == 0);
} else
stop = true;
if (stop) {
/* Disable all DR itimers b/c this set of threads sharing this
* itimer is now completely native
*/
int which;
LOG(THREAD, LOG_ASYNCH, 2, "stopping DR itimers from thread "TIDFMT"\n",
get_thread_id());
for (which = 0; which < NUM_ITIMERS; which++) {
if ((*info->itimer)[which].dr.value > 0) {
(*info->itimer)[which].dr.value = 0;
if ((*info->itimer)[which].app.value > 0) {
(*info->itimer)[which].actual.interval =
(*info->itimer)[which].app.interval;
} else
set_actual_itimer(dcontext, which, info, false/*disable*/);
}
}
}
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
}
/* handle app itimer syscalls */
/* handle_pre_alarm also calls this function and passes NULL as prev_timer */
void
handle_pre_setitimer(dcontext_t *dcontext,
int which, const struct itimerval *new_timer,
struct itimerval *prev_timer)
{
if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS)
return;
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
struct itimerval val;
if (safe_read(new_timer, sizeof(val), &val)) {
if (info->shared_itimer)
acquire_recursive_lock(info->shared_itimer_lock);
/* save a copy in case the syscall fails */
(*info->itimer)[which].app_saved = (*info->itimer)[which].app;
(*info->itimer)[which].app.interval = timeval_to_usec(&val.it_interval);
(*info->itimer)[which].app.value = timeval_to_usec(&val.it_value);
LOG(THREAD, LOG_ASYNCH, 2,
"app setitimer type=%d interval="SZFMT" value="SZFMT"\n",
which, (*info->itimer)[which].app.interval,
(*info->itimer)[which].app.value);
itimer_new_settings(dcontext, which, true/*app*/);
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
}
}
void
handle_post_setitimer(dcontext_t *dcontext, bool success,
int which, const struct itimerval *new_timer,
struct itimerval *prev_timer)
{
if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS) {
ASSERT(new_timer == NULL || !success);
return;
}
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
if (!success && new_timer != NULL) {
if (info->shared_itimer)
acquire_recursive_lock(info->shared_itimer_lock);
/* restore saved pre-syscall settings */
(*info->itimer)[which].app = (*info->itimer)[which].app_saved;
itimer_new_settings(dcontext, which, true/*app*/);
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
}
if (success && prev_timer != NULL)
handle_post_getitimer(dcontext, success, which, prev_timer);
}
void
handle_post_getitimer(dcontext_t *dcontext, bool success,
int which, struct itimerval *cur_timer)
{
thread_sig_info_t *info = (thread_sig_info_t *) dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
if (success) {
/* write succeeded for kernel but we're user and can have races */
struct timeval val;
DEBUG_DECLARE(bool ok;)
ASSERT(which >= 0 && which < NUM_ITIMERS);
ASSERT(cur_timer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(info->shared_itimer_lock);
usec_to_timeval((*info->itimer)[which].app.interval, &val);
IF_DEBUG(ok = )
safe_write_ex(&cur_timer->it_interval, sizeof(val), &val, NULL);
ASSERT(ok);
if (safe_read(&cur_timer->it_value, sizeof(val), &val)) {
/* subtract the difference between last-asked-for value
* and current value to reflect elapsed time
*/
uint64 left = (*info->itimer)[which].app.value -
((*info->itimer)[which].actual.value - timeval_to_usec(&val));
usec_to_timeval(left, &val);
IF_DEBUG(ok = )
safe_write_ex(&cur_timer->it_value, sizeof(val), &val, NULL);
ASSERT(ok);
} else
ASSERT_NOT_REACHED();
if (info->shared_itimer)
release_recursive_lock(info->shared_itimer_lock);
}
}
/* handle app alarm syscall */
/* alarm uses the same itimer and could be defined in terms of setitimer */
void
handle_pre_alarm(dcontext_t *dcontext, unsigned int sec)
{
struct itimerval val;
val.it_interval.tv_usec = 0;
val.it_interval.tv_sec = 0;
val.it_value.tv_usec = 0;
val.it_value.tv_sec = sec;
handle_pre_setitimer(dcontext, ITIMER_REAL, &val, NULL);
}
void
handle_post_alarm(dcontext_t *dcontext, bool success, unsigned int sec)
{
/* alarm is always successful, so do nothing in post */
ASSERT(success);
return;
}
/***************************************************************************
* Internal DR communication
*/
typedef struct _sig_detach_info_t {
KSYNCH_TYPE *detached;
byte *sigframe_xsp;
#ifdef HAVE_SIGALTSTACK
stack_t *app_sigstack;
#endif
} sig_detach_info_t;
/* xsp is only set for X86 */
static void
notify_and_jmp_without_stack(KSYNCH_TYPE *notify_var, byte *continuation, byte *xsp)
{
if (ksynch_kernel_support()) {
/* Can't use dstack once we signal so in asm we do:
* futex/semaphore = 1;
* %xsp = xsp;
* dynamorio_condvar_wake_and_jmp(notify_var, continuation);
*/
#ifdef MACOS
ASSERT(sizeof(notify_var->sem) == 4);
#endif
#ifdef X86
# ifndef MACOS
/* i#2632: recent clang for 32-bit annoyingly won't do the right thing for
* "jmp dynamorio_condvar_wake_and_jmp" and leaves relocs so we ensure it's PIC.
* We do this first as it may end up clobbering a scratch reg like xax.
*/
void (*asm_jmp_tgt)() = dynamorio_condvar_wake_and_jmp;
asm("mov %0, %%"ASM_XDX : : "m"(asm_jmp_tgt));
# endif
asm("mov %0, %%"ASM_XAX : : "m"(notify_var));
asm("mov %0, %%"ASM_XCX : : "m"(continuation));
asm("mov %0, %%"ASM_XSP : : "m"(xsp));
# ifdef MACOS
asm("movl $1,4(%"ASM_XAX")");
asm("jmp _dynamorio_condvar_wake_and_jmp");
# else
asm("movl $1,(%"ASM_XAX")");
asm("jmp *%"ASM_XDX);
# endif
#elif defined(AARCHXX)
asm("ldr "ASM_R0", %0" : : "m"(notify_var));
asm("mov "ASM_R1", #1");
asm("str "ASM_R1",["ASM_R0"]");
asm("ldr "ASM_R1", %0" : : "m"(continuation));
asm("b dynamorio_condvar_wake_and_jmp");
#endif
} else {
ksynch_set_value(notify_var, 1);
#ifdef X86
asm("mov %0, %%"ASM_XSP : : "m"(xsp));
asm("mov %0, %%"ASM_XAX : : "m"(continuation));
asm("jmp *%"ASM_XAX);
#elif defined(AARCHXX)
asm("ldr "ASM_R0", %0" : : "m"(continuation));
asm(ASM_INDJMP" "ASM_R0);
#endif /* X86/ARM */
}
}
/* Go native from detach. This is executed on the app stack. */
static void
sig_detach_go_native(sig_detach_info_t *info)
{
byte *xsp = info->sigframe_xsp;
#ifdef HAVE_SIGALTSTACK
/* Restore the app signal stack. */
DEBUG_DECLARE(int rc =)
sigaltstack_syscall(info->app_sigstack, NULL);
ASSERT(rc == 0);
#endif
#ifdef X86
/* Skip pretcode */
xsp += sizeof(char *);
#endif
notify_and_jmp_without_stack(info->detached, (byte *)dynamorio_sigreturn, xsp);
ASSERT_NOT_REACHED();
}
/* Sets this (slave) thread to detach by directly returning from the signal. */
static void
sig_detach(dcontext_t *dcontext, sigframe_rt_t *frame, KSYNCH_TYPE *detached)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
byte *xsp;
sig_detach_info_t detach_info;
LOG(THREAD, LOG_ASYNCH, 1, "%s: detaching\n", __FUNCTION__);
/* Update the mask of the signal frame so that the later sigreturn will
* restore the app signal mask.
*/
memcpy(&frame->uc.uc_sigmask, &info->app_sigblocked,
sizeof(info->app_sigblocked));
/* Copy the signal frame to the app stack.
* XXX: We live with the transparency risk of storing the signal frame on
* the app stack: we assume the app stack is writable where we need it to be,
* and that we're not clobbering any app data beyond TOS.
*/
xsp = get_sigstack_frame_ptr(dcontext, SUSPEND_SIGNAL, frame);
copy_frame_to_stack(dcontext, SUSPEND_SIGNAL, frame, xsp, false/*!pending*/);
#ifdef HAVE_SIGALTSTACK
/* Make sure the frame's sigstack reflects the app stack. */
frame = (sigframe_rt_t *) xsp;
frame->uc.uc_stack = info->app_sigstack;
#endif
/* Restore app segment registers. */
os_thread_not_under_dynamo(dcontext);
os_tls_thread_exit(dcontext->local_state);
#ifdef HAVE_SIGALTSTACK
/* We can't restore the app's sigstack here as that will invalidate the
* sigstack we're currently on.
*/
detach_info.app_sigstack = &info->app_sigstack;
#endif
detach_info.detached = detached;
detach_info.sigframe_xsp = xsp;
call_switch_stack(&detach_info, xsp, (void(*)(void*))sig_detach_go_native,
false/*free_initstack*/, false/*do not return*/);
ASSERT_NOT_REACHED();
}
/* Returns whether to pass on to app */
static bool
handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt,
sigframe_rt_t *frame)
{
os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field;
kernel_sigset_t prevmask;
sig_full_cxt_t sc_full;
ASSERT(ostd != NULL);
if (ostd->terminate) {
/* PR 297902: exit this thread, without using the dstack */
/* For MacOS, we need a stack as 32-bit syscalls take args on the stack.
* We go ahead and use it for x86 too for simpler sysenter return.
* We don't have a lot of options: we're terminating, so we go ahead
* and use the app stack.
*/
byte *app_xsp;
if (IS_CLIENT_THREAD(dcontext))
app_xsp = (byte *) SIGCXT_FROM_UCXT(ucxt)->SC_XSP;
else
app_xsp = (byte *) get_mcontext(dcontext)->xsp;
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: exiting\n");
ASSERT(app_xsp != NULL);
notify_and_jmp_without_stack(&ostd->terminated, (byte*)dynamorio_sys_exit,
app_xsp);
ASSERT_NOT_REACHED();
return false;
}
if (!doing_detach &&
is_thread_currently_native(dcontext->thread_record) &&
!IS_CLIENT_THREAD(dcontext)
IF_APP_EXPORTS(&& !dr_api_exit)) {
if (!sig_take_over(ucxt))
return false;
ASSERT_NOT_REACHED(); /* else, shouldn't return */
}
/* If suspend_count is 0, we are not trying to suspend this thread
* (os_thread_resume() may have already decremented suspend_count to 0, but
* os_thread_suspend() will not send a signal until this thread unsets
* ostd->suspended, so not having a lock around the suspend_count read is
* ok), so pass signal to app.
* If we are trying or have already suspended this thread, our own
* os_thread_suspend() will not send a 2nd suspend signal until we are
* completely resumed, so we can distinguish app uses of SUSPEND_SIGNAL. We
* can't have a race between the read and write of suspended_sigcxt b/c
* signals are blocked. It's fine to have a race and reorder the app's
* signal w/ DR's.
*/
if (ostd->suspend_count == 0 || ostd->suspended_sigcxt != NULL)
return true; /* pass to app */
sig_full_initialize(&sc_full, ucxt);
ostd->suspended_sigcxt = &sc_full;
/* We're sitting on our sigaltstack w/ all signals blocked. We're
* going to stay here but unblock all signals so we don't lose any
* delivered while we're waiting. We're at a safe enough point to
* re-enter master_signal_handler(). We use a mutex in
* thread_{suspend,resume} to prevent our own re-suspension signal
* from arriving before we've re-blocked on the resume.
*/
sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), &prevmask,
sizeof(ucxt->uc_sigmask));
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: suspended now\n");
/* We cannot use mutexes here as we have interrupted DR at an
* arbitrary point! Thus we can't use the event_t routines.
* However, the existing synch and check above prevent any
* re-entrance here, and our cond vars target just a single thread,
* so we can get away w/o a mutex.
*/
/* Notify os_thread_suspend that it can now return, as this thread is
* officially suspended now and is ready for thread_{get,set}_mcontext.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
ksynch_set_value(&ostd->suspended, 1);
ksynch_wake_all(&ostd->suspended);
/* i#96/PR 295561: use futex(2) if available */
while (ksynch_get_value(&ostd->wakeup) == 0) {
/* Waits only if the wakeup flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->wakeup, 0, 0);
if (ksynch_get_value(&ostd->wakeup) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: awake now\n");
/* re-block so our exit from master_signal_handler is not interrupted */
sigprocmask_syscall(SIG_SETMASK, &prevmask, NULL, sizeof(prevmask));
ostd->suspended_sigcxt = NULL;
/* Notify os_thread_resume that it can return now, which (assuming
* suspend_count is back to 0) means it's then safe to re-suspend.
*/
ksynch_set_value(&ostd->suspended, 0); /*reset prior to signalling os_thread_resume*/
ksynch_set_value(&ostd->resumed, 1);
ksynch_wake_all(&ostd->resumed);
if (ostd->retakeover) {
ostd->retakeover = false;
sig_take_over(ucxt); /* shouldn't return for this case */
ASSERT_NOT_REACHED();
} else if (ostd->do_detach) {
ostd->do_detach = false;
sig_detach(dcontext, frame, &ostd->detached); /* no return */
ASSERT_NOT_REACHED();
}
return false; /* do not pass to app */
}
/* PR 206278: for try/except we need to save the signal mask */
void
dr_setjmp_sigmask(dr_jmp_buf_t *buf)
{
/* i#226/PR 492568: we rely on the kernel storing the prior mask in the
* signal frame, so we do not need to store it on every setjmp, which
* can be a performance hit.
*/
#ifdef DEBUG
sigprocmask_syscall(SIG_SETMASK, NULL, &buf->sigmask, sizeof(buf->sigmask));
#endif
}
/* i#61/PR 211530: nudge on Linux.
* Determines whether this is a nudge signal, and if so queues up a nudge,
* or is an app signal. Returns whether to pass the signal on to the app.
*/
static bool
handle_nudge_signal(dcontext_t *dcontext, siginfo_t *siginfo, kernel_ucontext_t *ucxt)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
nudge_arg_t *arg = (nudge_arg_t *) siginfo;
instr_t instr;
char buf[MAX_INSTR_LENGTH];
/* Distinguish a nudge from an app signal. An app using libc sigqueue()
* will never have its signal mistaken as libc does not expose the siginfo_t
* and always passes 0 for si_errno, so we're only worried beyond our
* si_code check about an app using a raw syscall that is deliberately
* trying to fool us.
* While there is a lot of padding space in siginfo_t, the kernel doesn't
* copy it through on SYS_rt_sigqueueinfo so we don't have room for any
* dedicated magic numbers. The client id could function as a magic
* number for client nudges, but I don't think we want to kill the app
* if an external nudger types the client id wrong.
*/
LOG(THREAD, LOG_ASYNCH, 2, "%s: sig=%d code=%d errno=%d\n", __FUNCTION__,
siginfo->si_signo, siginfo->si_code, siginfo->si_errno);
if (siginfo->si_signo != NUDGESIG_SIGNUM
/* PR 477454: remove the IF_NOT_VMX86 once we have nudge-arg support */
IF_NOT_VMX86(|| siginfo->si_code != SI_QUEUE
|| siginfo->si_errno == 0)) {
return true; /* pass to app */
}
#if defined(CLIENT_INTERFACE) && !defined(VMX86_SERVER)
DODEBUG({
if (TEST(NUDGE_GENERIC(client), arg->nudge_action_mask) &&
!is_valid_client_id(arg->client_id)) {
SYSLOG_INTERNAL_WARNING("received client nudge for invalid id=0x%x",
arg->client_id);
}
});
#endif
if (dynamo_exited || !dynamo_initialized || dcontext == NULL) {
/* Ignore the nudge: too early, or too late.
* Xref Windows handling of such cases in nudge.c: old case 5702, etc.
* We do this before the illegal-instr check b/c it's unsafe to decode
* if too early or too late.
*/
SYSLOG_INTERNAL_WARNING("too-early or too-late nudge: ignoring");
return false; /* do not pass to app */
}
/* As a further check, try to detect whether this was raised synchronously
* from a real illegal instr: though si_code for that should not be
* SI_QUEUE. It's possible a nudge happened to come at a bad instr before
* it faulted, or maybe the instr after a syscall or other wait spot is
* illegal, but we'll live with that risk.
*/
ASSERT(NUDGESIG_SIGNUM == SIGILL); /* else this check makes no sense */
instr_init(dcontext, &instr);
if (safe_read((byte *)sc->SC_XIP, sizeof(buf), buf) &&
(decode(dcontext, (byte *)buf, &instr) == NULL ||
/* check for ud2 (xref PR 523161) */
instr_is_undefined(&instr))) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: real illegal instr @"PFX"\n", __FUNCTION__,
sc->SC_XIP);
DOLOG(2, LOG_ASYNCH, {
disassemble_with_bytes(dcontext, (byte *)sc->SC_XIP, THREAD);
});
instr_free(dcontext, &instr);
return true; /* pass to app */
}
instr_free(dcontext, &instr);
#ifdef VMX86_SERVER
/* Treat as a client nudge until we have PR 477454 */
if (siginfo->si_errno == 0) {
arg->version = NUDGE_ARG_CURRENT_VERSION;
arg->flags = 0;
arg->nudge_action_mask = NUDGE_GENERIC(client);
arg->client_id = 0;
arg->client_arg = 0;
}
#endif
LOG(THREAD, LOG_ASYNCH, 1,
"received nudge version=%u flags=0x%x mask=0x%x id=0x%08x arg=0x"
ZHEX64_FORMAT_STRING"\n",
arg->version, arg->flags, arg->nudge_action_mask,
arg->client_id, arg->client_arg);
SYSLOG_INTERNAL_INFO("received nudge mask=0x%x id=0x%08x arg=0x"ZHEX64_FORMAT_STRING,
arg->nudge_action_mask, arg->client_id, arg->client_arg);
/* We need to handle the nudge at a safe, nolinking spot */
if (safe_is_in_fcache(dcontext, (byte *)sc->SC_XIP, (byte*)sc->SC_XSP) &&
dcontext->interrupted_for_nudge == NULL) {
/* We unlink the interrupted fragment and skip any inlined syscalls to
* bound the nudge delivery time. If we already unlinked one we assume
* that's sufficient.
*/
fragment_t wrapper;
fragment_t *f = fragment_pclookup(dcontext, (byte *)sc->SC_XIP, &wrapper);
if (f != NULL) {
if (unlink_fragment_for_signal(dcontext, f, (byte *)sc->SC_XIP))
dcontext->interrupted_for_nudge = f;
}
}
/* No lock is needed since thread-private and this signal is blocked now */
nudge_add_pending(dcontext, arg);
return false; /* do not pass to app */
}
| 1 | 12,881 | style nit: missing leading '*' | DynamoRIO-dynamorio | c |
@@ -184,7 +184,7 @@ public class PrivateTransactionProcessor {
messageFrameStack.addFirst(initialFrame);
while (!messageFrameStack.isEmpty()) {
- process(messageFrameStack.peekFirst(), operationTracer);
+ process(messageFrameStack.peekFirst(), OperationTracer.NO_TRACING);
}
if (initialFrame.getState() == MessageFrame.State.COMPLETED_SUCCESS) { | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.privacy;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.AccountState;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.EvmAccount;
import org.hyperledger.besu.ethereum.core.Gas;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MutableAccount;
import org.hyperledger.besu.ethereum.core.ProcessableBlockHeader;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.core.WorldUpdater;
import org.hyperledger.besu.ethereum.mainnet.AbstractMessageProcessor;
import org.hyperledger.besu.ethereum.mainnet.MainnetTransactionValidator;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.processing.TransactionProcessingResult;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.vm.BlockHashLookup;
import org.hyperledger.besu.ethereum.vm.Code;
import org.hyperledger.besu.ethereum.vm.GasCalculator;
import org.hyperledger.besu.ethereum.vm.MessageFrame;
import org.hyperledger.besu.ethereum.vm.OperationTracer;
import org.hyperledger.besu.ethereum.worldstate.DefaultMutablePrivateWorldStateUpdater;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.Optional;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
public class PrivateTransactionProcessor {
private static final Logger LOG = LogManager.getLogger();
@SuppressWarnings("unused")
private final GasCalculator gasCalculator;
@SuppressWarnings("unused")
private final MainnetTransactionValidator transactionValidator;
private final PrivateTransactionValidator privateTransactionValidator;
private final AbstractMessageProcessor contractCreationProcessor;
private final AbstractMessageProcessor messageCallProcessor;
private final int maxStackSize;
private final int createContractAccountVersion;
@SuppressWarnings("unused")
private final boolean clearEmptyAccounts;
public PrivateTransactionProcessor(
final GasCalculator gasCalculator,
final MainnetTransactionValidator transactionValidator,
final AbstractMessageProcessor contractCreationProcessor,
final AbstractMessageProcessor messageCallProcessor,
final boolean clearEmptyAccounts,
final int maxStackSize,
final int createContractAccountVersion,
final PrivateTransactionValidator privateTransactionValidator) {
this.gasCalculator = gasCalculator;
this.transactionValidator = transactionValidator;
this.contractCreationProcessor = contractCreationProcessor;
this.messageCallProcessor = messageCallProcessor;
this.clearEmptyAccounts = clearEmptyAccounts;
this.maxStackSize = maxStackSize;
this.createContractAccountVersion = createContractAccountVersion;
this.privateTransactionValidator = privateTransactionValidator;
}
@SuppressWarnings("unused")
public TransactionProcessingResult processTransaction(
final Blockchain blockchain,
final WorldUpdater publicWorldState,
final WorldUpdater privateWorldState,
final ProcessableBlockHeader blockHeader,
final Hash pmtHash,
final PrivateTransaction transaction,
final Address miningBeneficiary,
final OperationTracer operationTracer,
final BlockHashLookup blockHashLookup,
final Bytes privacyGroupId) {
try {
LOG.trace("Starting private execution of {}", transaction);
final Address senderAddress = transaction.getSender();
final EvmAccount maybePrivateSender = privateWorldState.getAccount(senderAddress);
final MutableAccount sender =
maybePrivateSender != null
? maybePrivateSender.getMutable()
: privateWorldState.createAccount(senderAddress, 0, Wei.ZERO).getMutable();
final ValidationResult<TransactionInvalidReason> validationResult =
privateTransactionValidator.validate(transaction, sender.getNonce(), false);
if (!validationResult.isValid()) {
return TransactionProcessingResult.invalid(validationResult);
}
final long previousNonce = sender.incrementNonce();
LOG.trace(
"Incremented private sender {} nonce ({} -> {})",
senderAddress,
previousNonce,
sender.getNonce());
final WorldUpdater mutablePrivateWorldStateUpdater =
new DefaultMutablePrivateWorldStateUpdater(publicWorldState, privateWorldState);
final Deque<MessageFrame> messageFrameStack = new ArrayDeque<>();
final MessageFrame.Builder commonMessageFrameBuilder =
MessageFrame.builder()
.messageFrameStack(messageFrameStack)
.maxStackSize(maxStackSize)
.blockchain(blockchain)
.worldState(mutablePrivateWorldStateUpdater)
.initialGas(Gas.MAX_VALUE)
.originator(senderAddress)
.gasPrice(transaction.getGasPrice())
.sender(senderAddress)
.value(transaction.getValue())
.apparentValue(transaction.getValue())
.blockHeader(blockHeader)
.depth(0)
.completer(__ -> {})
.miningBeneficiary(miningBeneficiary)
.blockHashLookup(blockHashLookup)
.transactionHash(pmtHash);
final MessageFrame initialFrame;
if (transaction.isContractCreation()) {
final Address privateContractAddress =
Address.privateContractAddress(senderAddress, previousNonce, privacyGroupId);
LOG.debug(
"Calculated contract address {} from sender {} with nonce {} and privacy group {}",
privateContractAddress.toString(),
senderAddress,
previousNonce,
privacyGroupId.toString());
initialFrame =
commonMessageFrameBuilder
.type(MessageFrame.Type.CONTRACT_CREATION)
.address(privateContractAddress)
.contract(privateContractAddress)
.contractAccountVersion(createContractAccountVersion)
.inputData(Bytes.EMPTY)
.code(new Code(transaction.getPayload()))
.build();
} else {
final Address to = transaction.getTo().get();
final Optional<Account> maybeContract = Optional.ofNullable(privateWorldState.get(to));
initialFrame =
commonMessageFrameBuilder
.type(MessageFrame.Type.MESSAGE_CALL)
.address(to)
.contract(to)
.contractAccountVersion(
maybeContract.map(AccountState::getVersion).orElse(Account.DEFAULT_VERSION))
.inputData(transaction.getPayload())
.code(new Code(maybeContract.map(AccountState::getCode).orElse(Bytes.EMPTY)))
.build();
}
messageFrameStack.addFirst(initialFrame);
while (!messageFrameStack.isEmpty()) {
process(messageFrameStack.peekFirst(), operationTracer);
}
if (initialFrame.getState() == MessageFrame.State.COMPLETED_SUCCESS) {
mutablePrivateWorldStateUpdater.commit();
}
if (initialFrame.getState() == MessageFrame.State.COMPLETED_SUCCESS) {
return TransactionProcessingResult.successful(
initialFrame.getLogs(), 0, 0, initialFrame.getOutputData(), ValidationResult.valid());
} else {
return TransactionProcessingResult.failed(
0,
0,
ValidationResult.invalid(TransactionInvalidReason.PRIVATE_TRANSACTION_FAILED),
initialFrame.getRevertReason());
}
} catch (final RuntimeException re) {
LOG.error("Critical Exception Processing Transaction", re);
return TransactionProcessingResult.invalid(
ValidationResult.invalid(
TransactionInvalidReason.INTERNAL_ERROR,
"Internal Error in Besu - " + re.toString()));
}
}
@SuppressWarnings("unused")
private static void clearEmptyAccounts(final WorldUpdater worldState) {
worldState.getTouchedAccounts().stream()
.filter(Account::isEmpty)
.forEach(a -> worldState.deleteAccount(a.getAddress()));
}
private void process(final MessageFrame frame, final OperationTracer operationTracer) {
final AbstractMessageProcessor executor = getMessageProcessor(frame.getType());
executor.process(frame, operationTracer);
}
private AbstractMessageProcessor getMessageProcessor(final MessageFrame.Type type) {
switch (type) {
case MESSAGE_CALL:
return messageCallProcessor;
case CONTRACT_CREATION:
return contractCreationProcessor;
default:
throw new IllegalStateException("Request for unsupported message processor type " + type);
}
}
@SuppressWarnings("unused")
private Gas refunded(final Transaction transaction, final Gas gasRemaining, final Gas gasRefund) {
// Integer truncation takes care of the the floor calculation needed after the divide.
final Gas maxRefundAllowance =
Gas.of(transaction.getGasLimit())
.minus(gasRemaining)
.dividedBy(gasCalculator.getMaxRefundQuotient());
final Gas refundAllowance = maxRefundAllowance.min(gasRefund);
return gasRemaining.plus(refundAllowance);
}
}
| 1 | 25,479 | The operationTracer is passed into the processTransaction() method. We should probably pass the no tracing in one in from where it is called. | hyperledger-besu | java |
@@ -125,13 +125,10 @@ const (
// SPI on the Arduino Nano 33.
var (
- SPI0 = SPI{Bus: sam.SERCOM0_SPI,
- SCK: SPI0_SCK_PIN,
- MOSI: SPI0_MOSI_PIN,
- MISO: SPI0_MISO_PIN,
- DOpad: spiTXPad2SCK3,
- DIpad: sercomRXPad0,
- PinMode: PinSERCOM}
+ SPI0 = SPI{
+ Bus: sam.SERCOM0_SPI,
+ SERCOM: 0,
+ }
)
// I2S pins | 1 | // +build sam,atsamd21,arduino_nano33
// This contains the pin mappings for the Arduino Nano33 IoT board.
//
// For more information, see: https://store.arduino.cc/nano-33-iot
//
package machine
import "device/sam"
// used to reset into bootloader
const RESET_MAGIC_VALUE = 0x07738135
// GPIO Pins
const (
RX0 Pin = PB23 // UART2 RX
TX1 Pin = PB22 // UART2 TX
D2 Pin = PB10 // PWM available
D3 Pin = PB11 // PWM available
D4 Pin = PA07
D5 Pin = PA05 // PWM available
D6 Pin = PA04 // PWM available
D7 Pin = PA06
D8 Pin = PA18
D9 Pin = PA20 // PWM available
D10 Pin = PA21 // PWM available
D11 Pin = PA16 // PWM available
D12 Pin = PA19 // PWM available
D13 Pin = PA17
)
// Analog pins
const (
A0 Pin = PA02 // ADC/AIN[0]
A1 Pin = PB02 // ADC/AIN[10]
A2 Pin = PA11 // ADC/AIN[19]
A3 Pin = PA10 // ADC/AIN[18],
A4 Pin = PB08 // ADC/AIN[2], SCL: SERCOM2/PAD[1]
A5 Pin = PB09 // ADC/AIN[3], SDA: SERCOM2/PAD[1]
A6 Pin = PA09 // ADC/AIN[17]
A7 Pin = PB03 // ADC/AIN[11]
)
const (
LED = D13
)
// NINA-W102 Pins
const (
NINA_MOSI Pin = PA12
NINA_MISO Pin = PA13
NINA_CS Pin = PA14
NINA_SCK Pin = PA15
NINA_GPIO0 Pin = PA27
NINA_RESETN Pin = PA08
NINA_ACK Pin = PA28
)
// UART0 aka USBCDC pins
const (
USBCDC_DM_PIN Pin = PA24
USBCDC_DP_PIN Pin = PA25
)
// UART1 on the Arduino Nano 33 connects to the onboard NINA-W102 WiFi chip.
var (
UART1 = UART{Bus: sam.SERCOM5_USART,
Buffer: NewRingBuffer(),
Mode: PinSERCOMAlt,
IRQVal: sam.IRQ_SERCOM5,
}
)
// UART1 pins
const (
UART_TX_PIN Pin = PA22
UART_RX_PIN Pin = PA23
)
//go:export SERCOM5_IRQHandler
func handleUART1() {
defaultUART1Handler()
}
// UART2 on the Arduino Nano 33 connects to the normal TX/RX pins.
var (
UART2 = UART{Bus: sam.SERCOM3_USART,
Buffer: NewRingBuffer(),
Mode: PinSERCOMAlt,
IRQVal: sam.IRQ_SERCOM3,
}
)
//go:export SERCOM3_IRQHandler
func handleUART2() {
// should reset IRQ
UART2.Receive(byte((UART2.Bus.DATA.Get() & 0xFF)))
UART2.Bus.INTFLAG.SetBits(sam.SERCOM_USART_INTFLAG_RXC)
}
// I2C pins
const (
SDA_PIN Pin = A4 // SDA: SERCOM4/PAD[1]
SCL_PIN Pin = A5 // SCL: SERCOM4/PAD[1]
)
// I2C on the Arduino Nano 33.
var (
I2C0 = I2C{Bus: sam.SERCOM4_I2CM,
SDA: SDA_PIN,
SCL: SCL_PIN,
PinMode: PinSERCOMAlt}
)
// SPI pins
const (
SPI0_SCK_PIN Pin = A2 // SCK: SERCOM0/PAD[3]
SPI0_MOSI_PIN Pin = A3 // MOSI: SERCOM0/PAD[2]
SPI0_MISO_PIN Pin = A6 // MISO: SERCOM0/PAD[1]
)
// SPI on the Arduino Nano 33.
var (
SPI0 = SPI{Bus: sam.SERCOM0_SPI,
SCK: SPI0_SCK_PIN,
MOSI: SPI0_MOSI_PIN,
MISO: SPI0_MISO_PIN,
DOpad: spiTXPad2SCK3,
DIpad: sercomRXPad0,
PinMode: PinSERCOM}
)
// I2S pins
const (
I2S_SCK_PIN Pin = PA10
I2S_SD_PIN Pin = PA08
I2S_WS_PIN = NoPin // TODO: figure out what this is on Arduino Nano 33.
)
// I2S on the Arduino Nano 33.
var (
I2S0 = I2S{Bus: sam.I2S}
)
| 1 | 7,752 | I believe this line was an error, it should have been `sercomRXPad1` to be consistent with the pin numbers. Resolving this ambiguity by following the pin numbers. | tinygo-org-tinygo | go |
@@ -0,0 +1,12 @@
+<div>
+ <p>
+ Hello,<br/>
+ The request, <%= @proposal.name %> (<%= @proposal.public_id %>), has been cancelled.
+ </p>
+</div>
+
+<p>
+ <strong>
+ <%= link_to "View This Request", proposal_url(@proposal), {target: 'C2'} %>
+ </strong>
+</p> | 1 | 1 | 13,389 | May be useful to have the reason here, though clearly not pressing. | 18F-C2 | rb |
|
@@ -136,7 +136,7 @@ class GridInterface(DictInterface):
'of arrays must match. %s found that arrays '
'along the %s dimension do not match.' %
(cls.__name__, vdim.name))
- stack = np.stack if any(is_dask(arr) for arr in arrays) else dask_array_module().stack
+ stack = dask_array_module().stack if any(is_dask(arr) for arr in arrays) else np.stack
new_data[vdim.name] = stack(arrays, -1)
return new_data
| 1 | from __future__ import absolute_import
import sys
import datetime as dt
from collections import OrderedDict, defaultdict, Iterable
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .dictionary import DictInterface
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from .. import util
from .interface import is_dask, dask_array_module, get_array_types
class GridInterface(DictInterface):
"""
Interface for simple dictionary-based dataset format using a
compressed representation that uses the cartesian product between
key dimensions. As with DictInterface, the dictionary keys correspond
to the column (i.e dimension) names and the values are NumPy arrays
representing the values in that column.
To use this compressed format, the key dimensions must be orthogonal
to one another with each key dimension specifying an axis of the
multidimensional space occupied by the value dimension data. For
instance, given an temperature recordings sampled regularly across
the earth surface, a list of N unique latitudes and M unique
longitudes can specify the position of NxM temperature samples.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'grid'
gridded = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
if not vdims:
raise ValueError('GridInterface interface requires at least '
'one value dimension.')
ndims = len(kdims)
dimensions = [dimension_name(d) for d in kdims+vdims]
if isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
elif not isinstance(data, dict):
raise TypeError('GridInterface must be instantiated as a '
'dictionary or tuple')
for dim in kdims+vdims:
name = dimension_name(dim)
if name not in data:
raise ValueError("Values for dimension %s not found" % dim)
if not isinstance(data[name], get_array_types()):
data[name] = np.array(data[name])
kdim_names = [dimension_name(d) for d in kdims]
vdim_names = [dimension_name(d) for d in vdims]
expected = tuple([len(data[kd]) for kd in kdim_names])
irregular_shape = data[kdim_names[0]].shape if kdim_names else ()
valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1]
shapes = tuple([data[kd].shape for kd in kdim_names])
for vdim in vdim_names:
shape = data[vdim].shape
error = DataError if len(shape) > 1 else ValueError
if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1):
# If empty or an irregular mesh
pass
elif len(shape) != len(expected):
raise error('The shape of the %s value array does not '
'match the expected dimensionality indicated '
'by the key dimensions. Expected %d-D array, '
'found %d-D array.' % (vdim, len(expected), len(shape)))
elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)):
raise error('Key dimension values and value array %s '
'shapes do not match. Expected shape %s, '
'actual shape: %s' % (vdim, valid_shape, shape), cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def concat(cls, datasets, dimensions, vdims):
from . import Dataset
with sorted_context(False):
datasets = NdMapping(datasets, kdims=dimensions)
datasets = datasets.clone([(k, v.data if isinstance(v, Dataset) else v)
for k, v in datasets.data.items()])
if len(datasets.kdims) > 1:
items = datasets.groupby(datasets.kdims[:-1]).data.items()
return cls.concat([(k, cls.concat(v, v.kdims, vdims=vdims)) for k, v in items],
datasets.kdims[:-1], vdims)
return cls.concat_dim(datasets, datasets.kdims[0], vdims)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
values, grids = zip(*datasets.items())
new_data = {k: v for k, v in grids[0].items() if k not in vdims}
new_data[dim.name] = np.array(values)
for vdim in vdims:
arrays = [grid[vdim.name] for grid in grids]
shapes = set(arr.shape for arr in arrays)
if len(shapes) > 1:
raise DataError('When concatenating gridded data the shape '
'of arrays must match. %s found that arrays '
'along the %s dimension do not match.' %
(cls.__name__, vdim.name))
stack = np.stack if any(is_dask(arr) for arr in arrays) else dask_array_module().stack
new_data[vdim.name] = stack(arrays, -1)
return new_data
@classmethod
def irregular(cls, dataset, dim):
return dataset.data[dimension_name(dim)].ndim > 1
@classmethod
def isscalar(cls, dataset, dim):
values = cls.values(dataset, dim, expanded=False)
return values.shape in ((), (1,)) or len(np.unique(values)) == 1
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if dim in dataset.dimensions():
arr = cls.values(dataset, dim, False, False)
else:
return None
return arr.dtype.type
@classmethod
def shape(cls, dataset, gridded=False):
shape = dataset.data[dataset.vdims[0].name].shape
if gridded:
return shape
else:
return (np.product(shape, dtype=np.intp), len(dataset.dimensions()))
@classmethod
def length(cls, dataset):
return cls.shape(dataset)[0]
@classmethod
def _infer_interval_breaks(cls, coord, axis=0):
"""
>>> GridInterface._infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if sys.version_info.major == 2 and len(coord) and isinstance(coord[0], (dt.datetime, dt.date)):
# np.diff does not work on datetimes in python 2
coord = coord.astype('datetime64')
deltas = 0.5 * np.diff(coord, axis=axis)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):
"""
Returns the coordinates along a dimension. Ordered ensures
coordinates are in ascending order and expanded creates
ND-array matching the dimensionality of the dataset.
"""
dim = dataset.get_dimension(dim, strict=True)
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim.name]
else:
data = util.expand_grid_coords(dataset, dim)
if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data
data = dataset.data[dim.name]
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = data[:-1] + np.diff(data)/2.
return data
@classmethod
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]):
"""
Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped.
"""
if data_coords is None:
data_coords = dataset.dimensions('key', label='name')[::-1]
# Transpose data
dims = [name for name in data_coords
if isinstance(cls.coords(dataset, name), get_array_types())]
dropped = [dims.index(d) for d in dims
if d not in dataset.kdims+virtual_coords]
if dropped:
data = np.squeeze(data, axis=tuple(dropped))
if not any(cls.irregular(dataset, d) for d in dataset.kdims):
inds = [dims.index(kd.name) for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if inds:
data = data.transpose(inds[::-1])
# Reorient data
invert = False
slices = []
for d in dataset.kdims[::-1]:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1:
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[tuple(slices)] if invert else data
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data
@classmethod
def invert_index(cls, index, length):
if np.isscalar(index):
return length - index
elif isinstance(index, slice):
start, stop = index.start, index.stop
new_start, new_stop = None, None
if start is not None:
new_stop = length - start
if stop is not None:
new_start = length - stop
return slice(new_start-1, new_stop-1)
elif isinstance(index, Iterable):
new_index = []
for ind in index:
new_index.append(length-ind)
return new_index
@classmethod
def ndloc(cls, dataset, indices):
selected = {}
adjusted_inds = []
all_scalar = True
for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)):
coords = cls.coords(dataset, kd.name, True)
if np.isscalar(ind):
ind = [ind]
else:
all_scalar = False
selected[kd.name] = coords[ind]
adjusted_inds.append(ind)
for kd in dataset.kdims:
if kd.name not in selected:
coords = cls.coords(dataset, kd.name)
selected[kd.name] = coords
all_scalar = False
for d in dataset.dimensions():
if d in dataset.kdims and not cls.irregular(dataset, d):
continue
arr = cls.values(dataset, d, flat=False, compute=False)
if all_scalar and len(dataset.vdims) == 1:
return arr[tuple(ind[0] for ind in adjusted_inds)]
selected[d.name] = arr[tuple(adjusted_inds)]
return tuple(selected[d.name] for d in dataset.dimensions())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims or dataset.data[dim.name].ndim > 1:
data = dataset.data[dim.name]
data = cls.canonicalize(dataset, data)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
if 'kdims' in kwargs:
kdims = kwargs['kdims']
else:
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
kwargs['kdims'] = kdims
invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
else:
kwargs.pop('kdims')
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)
# Find all the keys along supplied dimensions
keys = [cls.coords(dataset, d.name) for d in dimensions]
transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims]
transpose += [i for i in range(dataset.ndims) if i not in transpose]
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in zip(*util.cartesian_product(keys)):
select = dict(zip(dim_names, unique_key))
if drop_dim:
group_data = dataset.select(**select)
group_data = group_data if np.isscalar(group_data) else group_data.columns()
else:
group_data = cls.select(dataset, **select)
if np.isscalar(group_data) or (isinstance(group_data, get_array_types()) and group_data.shape == ()):
group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
for dim, v in zip(dim_names, unique_key):
group_data[dim] = np.atleast_1d(v)
elif not drop_dim:
if isinstance(group_data, get_array_types()):
group_data = {dataset.vdims[0].name: group_data}
for vdim in dataset.vdims:
data = group_data[vdim.name]
data = data.transpose(transpose[::-1])
group_data[vdim.name] = np.squeeze(data)
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((tuple(unique_key), group_data))
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def key_select_mask(cls, dataset, values, ind):
if isinstance(ind, tuple):
ind = slice(*ind)
if isinstance(ind, get_array_types()):
mask = ind
elif isinstance(ind, slice):
mask = True
if ind.start is not None:
mask &= ind.start <= values
if ind.stop is not None:
mask &= values < ind.stop
# Expand empty mask
if mask is True:
mask = np.ones(values.shape, dtype=np.bool)
elif isinstance(ind, (set, list)):
iter_slcs = []
for ik in ind:
iter_slcs.append(values == ik)
mask = np.logical_or.reduce(iter_slcs)
elif callable(ind):
mask = ind(values)
elif ind is None:
mask = None
else:
index_mask = values == ind
if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(values - ind))
mask = np.zeros(len(values), dtype=np.bool)
mask[data_index] = True
else:
mask = index_mask
if mask is None:
mask = np.ones(values.shape, dtype=bool)
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
dimensions = dataset.kdims
val_dims = [vdim for vdim in dataset.vdims if vdim in selection]
if val_dims:
raise IndexError('Cannot slice value dimensions in compressed format, '
'convert to expanded format before slicing.')
indexed = cls.indexed(dataset, selection)
full_selection = [(d, selection.get(d.name, selection.get(d.label)))
for d in dimensions]
data = {}
value_select = []
for i, (dim, ind) in enumerate(full_selection):
irregular = cls.irregular(dataset, dim)
values = cls.coords(dataset, dim, irregular)
mask = cls.key_select_mask(dataset, values, ind)
if irregular:
if np.isscalar(ind) or isinstance(ind, (set, list)):
raise IndexError("Indexing not supported for irregularly "
"sampled data. %s value along %s dimension."
"must be a slice or 2D boolean mask."
% (ind, dim))
mask = mask.max(axis=i)
elif dataset._binned:
edges = cls.coords(dataset, dim, False, edges=True)
inds = np.argwhere(mask)
if np.isscalar(ind):
emin, emax = edges.min(), edges.max()
if ind < emin:
raise IndexError("Index %s less than lower bound "
"of %s for %s dimension." % (ind, emin, dim))
elif ind >= emax:
raise IndexError("Index %s more than or equal to upper bound "
"of %s for %s dimension." % (ind, emax, dim))
idx = max([np.digitize([ind], edges)[0]-1, 0])
mask = np.zeros(len(values), dtype=np.bool)
mask[idx] = True
values = edges[idx:idx+2]
elif len(inds):
values = edges[inds.min(): inds.max()+2]
else:
values = edges[0:0]
else:
values = values[mask]
values, mask = np.asarray(values), np.asarray(mask)
value_select.append(mask)
data[dim.name] = np.array([values]) if np.isscalar(values) else values
int_inds = [np.argwhere(v) for v in value_select][::-1]
index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind)
for ind in int_inds])
for kdim in dataset.kdims:
if cls.irregular(dataset, dim):
da = dask_array_module()
if da and isinstance(dataset.data[kdim.name], da.Array):
data[kdim.name] = dataset.data[kdim.name].vindex[index]
else:
data[kdim.name] = np.asarray(data[kdim.name])[index]
for vdim in dataset.vdims:
da = dask_array_module()
if da and isinstance(dataset.data[vdim.name], da.Array):
data[vdim.name] = dataset.data[vdim.name].vindex[index]
else:
data[vdim.name] = np.asarray(dataset.data[vdim.name])[index]
if indexed:
if len(dataset.vdims) == 1:
da = dask_array_module()
arr = np.squeeze(data[dataset.vdims[0].name])
if da and isinstance(arr, da.Array):
arr = arr.compute()
return arr if np.isscalar(arr) else arr[()]
else:
return np.array([np.squeeze(data[vd.name])
for vd in dataset.vdims])
return data
@classmethod
def sample(cls, dataset, samples=[]):
"""
Samples the gridded data into dataset of samples.
"""
ndims = dataset.ndims
dimensions = dataset.dimensions(label='name')
arrays = [dataset.data[vdim.name] for vdim in dataset.vdims]
data = defaultdict(list)
for sample in samples:
if np.isscalar(sample): sample = [sample]
if len(sample) != ndims:
sample = [sample[i] if i < len(sample) else None
for i in range(ndims)]
sampled, int_inds = [], []
for d, ind in zip(dimensions, sample):
cdata = dataset.data[d]
mask = cls.key_select_mask(dataset, cdata, ind)
inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask)
int_inds.append(inds)
sampled.append(cdata[mask])
for d, arr in zip(dimensions, np.meshgrid(*sampled)):
data[d].append(arr)
for vdim, array in zip(dataset.vdims, arrays):
da = dask_array_module()
flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape)
if da and isinstance(array, da.Array):
data[vdim.name].append(array.flatten().vindex[tuple(flat_index)])
else:
data[vdim.name].append(array.flat[flat_index])
concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()}
return concatenated
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dimension_name(kd) for kd in kdims]
data = {kdim: dataset.data[kdim] for kdim in kdims}
axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1
for kdim in dataset.kdims if kdim not in kdims)
da = dask_array_module()
dropped = []
for vdim in dataset.vdims:
values = dataset.data[vdim.name]
atleast_1d = da.atleast_1d if is_dask(values) else np.atleast_1d
try:
data[vdim.name] = atleast_1d(function(values, axis=axes, **kwargs))
except TypeError:
dropped.append(vdim)
return data, dropped
@classmethod
def reindex(cls, dataset, kdims, vdims):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
dropped_vdims = ([vdim for vdim in dataset.vdims
if vdim not in vdims] if vdims else [])
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
data = {k: values for k, values in dataset.data.items()
if k not in dropped_kdims+dropped_vdims}
if len(constant) == len(dropped_kdims):
joined_dims = kdims+dropped_kdims
axes = tuple(dataset.ndims-dataset.kdims.index(d)-1
for d in joined_dims)
dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1
for d in dropped_kdims)
for vdim in vdims:
vdata = data[vdim.name]
if len(axes) > 1:
vdata = vdata.transpose(axes[::-1])
if dropped_axes:
vdata = np.squeeze(vdata, axis=dropped_axes)
data[vdim.name] = vdata
return data
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return data
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
return dict(dataset.data, **{dim: values})
@classmethod
def sort(cls, dataset, by=[], reverse=False):
if not by or by in [dataset.kdims, dataset.dimensions()]:
return dataset.data
else:
raise Exception('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if np.isscalar(rows):
rows = [rows]
new_data = []
for d in cols:
new_data.append(cls.values(dataset, d, compute=False)[rows])
if scalar:
da = dask_array_module()
if new_data and isinstance(new_data[0], da.Array):
return new_data[0].compute()[0]
return new_data[0][0]
return tuple(new_data)
@classmethod
def range(cls, dataset, dimension):
if dataset._binned and dimension in dataset.kdims:
expanded = cls.irregular(dataset, dimension)
column = cls.coords(dataset, dimension, expanded=expanded, edges=True)
else:
column = cls.values(dataset, dimension, expanded=False, flat=False)
da = dask_array_module()
if column.dtype.kind == 'M':
dmin, dmax = column.min(), column.max()
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
dmin, dmax = (np.nanmin(column), np.nanmax(column))
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
except TypeError:
column.sort()
return column[0], column[-1]
Interface.register(GridInterface)
| 1 | 21,958 | These were inverted before | holoviz-holoviews | py |
@@ -4227,6 +4227,11 @@ public class DBService {
return null;
}
+ public Map<String, DomainRoleMember> getReviewMembers() {
+ // Currently unimplemented
+ return new HashMap<>();
+ }
+
public void processExpiredPendingMembers(int pendingRoleMemberLifespan, final String monitorIdentity) {
final String auditRef = "Expired - auto reject"; | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.yahoo.athenz.auth.AuthorityConsts;
import com.yahoo.athenz.auth.Principal;
import com.yahoo.athenz.auth.util.AthenzUtils;
import com.yahoo.athenz.common.server.audit.AuditReferenceValidator;
import com.yahoo.athenz.common.server.log.AuditLogMsgBuilder;
import com.yahoo.athenz.common.server.log.AuditLogger;
import com.yahoo.athenz.common.server.util.StringUtils;
import com.yahoo.athenz.zms.store.AthenzDomain;
import com.yahoo.athenz.zms.store.ObjectStore;
import com.yahoo.athenz.zms.store.ObjectStoreConnection;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.rdl.JSON;
import com.yahoo.rdl.Timestamp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class DBService {
ObjectStore store;
BitSet auditRefSet;
AuditLogger auditLogger;
Cache<String, DataCache> cacheStore;
QuotaChecker quotaCheck;
int retrySleepTime;
int defaultRetryCount;
int defaultOpTimeout;
ZMSConfig zmsConfig;
private static final Logger LOG = LoggerFactory.getLogger(DBService.class);
public static int AUDIT_TYPE_ROLE = 0;
public static int AUDIT_TYPE_POLICY = 1;
public static int AUDIT_TYPE_SERVICE = 2;
public static int AUDIT_TYPE_DOMAIN = 3;
public static int AUDIT_TYPE_ENTITY = 4;
public static int AUDIT_TYPE_TENANCY = 5;
public static int AUDIT_TYPE_TEMPLATE = 6;
private static final String ROLE_PREFIX = "role.";
private static final String POLICY_PREFIX = "policy.";
private static final String TEMPLATE_DOMAIN_NAME = "_domain_";
AuditReferenceValidator auditReferenceValidator;
public DBService(ObjectStore store, AuditLogger auditLogger, ZMSConfig zmsConfig, AuditReferenceValidator auditReferenceValidator) {
this.store = store;
this.zmsConfig = zmsConfig;
this.auditLogger = auditLogger;
cacheStore = CacheBuilder.newBuilder().concurrencyLevel(25).build();
// default timeout in seconds for object store commands
defaultOpTimeout = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_STORE_OP_TIMEOUT, "60"));
if (defaultOpTimeout < 0) {
defaultOpTimeout = 60;
}
if (this.store != null) {
this.store.setOperationTimeout(defaultOpTimeout);
}
// retrieve the concurrent update retry count. If we're given an invalid negative
// value for count, we'll default back to our default configured value of 120 retries
// which would result up to 30 seconds sleeping 250ms each time
defaultRetryCount = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_COUNT, "120"));
if (defaultRetryCount < 0) {
defaultRetryCount = 120;
}
retrySleepTime = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_CONFLICT_RETRY_SLEEP_TIME, "250"));
if (retrySleepTime < 0) {
retrySleepTime = 250;
}
// check what objects we're going to enforce audit reference flag
setAuditRefObjectBits();
this.auditReferenceValidator = auditReferenceValidator;
// create our quota checker class
quotaCheck = new QuotaChecker();
}
void setAuditRefObjectBits() {
auditRefSet = new BitSet();
// by default we're only going to handle audit enabled roles
// the value is a comma separated list of supported objects:
// role, policy, service, domain, entity
final String auditCheck = System.getProperty(ZMSConsts.ZMS_PROP_AUDIT_REF_CHECK_OBJECTS, "role");
String[] objects = auditCheck.split(",");
for (String object : objects) {
switch (object) {
case ZMSConsts.ZMS_AUDIT_TYPE_ROLE:
auditRefSet.set(AUDIT_TYPE_ROLE);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_POLICY:
auditRefSet.set(AUDIT_TYPE_POLICY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_SERVICE:
auditRefSet.set(AUDIT_TYPE_SERVICE);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_DOMAIN:
auditRefSet.set(AUDIT_TYPE_DOMAIN);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_ENTITY:
auditRefSet.set(AUDIT_TYPE_ENTITY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_TENANCY:
auditRefSet.set(AUDIT_TYPE_TENANCY);
break;
case ZMSConsts.ZMS_AUDIT_TYPE_TEMPLATE:
auditRefSet.set(AUDIT_TYPE_TEMPLATE);
break;
}
}
}
static class DataCache {
AthenzDomain athenzDomain;
long modTime;
DataCache(AthenzDomain athenzDomain, long modTime) {
this.athenzDomain = athenzDomain;
this.modTime = modTime;
}
AthenzDomain getAthenzDomain() {
return athenzDomain;
}
long getModTime() {
return modTime;
}
}
AthenzDomain getAthenzDomainFromCache(ObjectStoreConnection con, String domainName) {
DataCache data = cacheStore.getIfPresent(domainName);
if (data == null) {
return null;
}
// if we have a match for a given domain name then we're going
// to check if the last modified domain timestamp matches to what's
// in the db: So if there is no match, then we'll take the hit
// of extra db read, however, in most cases the domain data is not
// changed that often so we'll satisfy the request with just
// verifying the last modification time as oppose to reading the
// full domain data from db
long modTime = 0;
try {
modTime = con.getDomainModTimestamp(domainName);
} catch (ResourceException ignored) {
// if the exception is due to timeout or we were not able
// to get a connection to the object store then we're
// going to use our cache as is instead of rejecting
// the operation
}
// if our cache data is same or newer than db then return
// data from the cache (it could be newer if we just updated
// the cache based on write db but during read, the server
// hasn't replicated the data yet)
if (data.getModTime() >= modTime) {
return data.getAthenzDomain();
}
cacheStore.invalidate(domainName);
return null;
}
String getPrincipalName(ResourceContext ctx) {
if (ctx == null) {
return null;
}
Principal principal = ((RsrcCtxWrapper) ctx).principal();
if (principal == null) {
return null;
}
return principal.getFullName();
}
void saveChanges(ObjectStoreConnection con, String domainName) {
// we're first going to commit our changes which will
// also set the connection in auto-commit mode. we are
// going to change the domain timestamp in auto-commit
// mode so that we don't have a contention
con.commitChanges();
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
void auditLogRequest(ResourceContext ctx, String domainName, String auditRef,
String caller, String operation, String entityName, String auditDetails) {
auditLogger.log(getAuditLogMsgBuilder(ctx, domainName, auditRef, caller, operation, entityName, auditDetails));
}
void auditLogRequest(String principal, String domainName, String auditRef,
String caller, String operation, String entityName, String auditDetails) {
AuditLogMsgBuilder msgBldr = getAuditLogMsgBuilder(null, domainName, auditRef, caller, operation, entityName, auditDetails);
msgBldr.who(principal);
auditLogger.log(msgBldr);
}
private AuditLogMsgBuilder getAuditLogMsgBuilder(ResourceContext ctx, String domainName,
String auditRef, String caller, String operation, String entityName, String auditDetails) {
AuditLogMsgBuilder msgBldr = ZMSUtils.getAuditLogMsgBuilder(ctx, auditLogger,
domainName, auditRef, caller, operation);
msgBldr.when(Timestamp.fromCurrentTime().toString()).whatEntity(entityName);
if (auditDetails != null) {
msgBldr.whatDetails(auditDetails);
}
return msgBldr;
}
Domain makeDomain(ResourceContext ctx, Domain domain, List<String> adminUsers,
List<String> solutionTemplates, String auditRef) {
final String caller = "makedomain";
final String domainName = domain.getName();
String principalName = getPrincipalName(ctx);
if (principalName == null) {
principalName = "system-account";
}
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
// get our connection object
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// before adding this domain we need to verify our
// quota check for sub-domains
quotaCheck.checkSubdomainQuota(con, domainName, caller);
boolean objectsInserted = con.insertDomain(domain);
if (!objectsInserted) {
con.rollbackChanges();
throw ZMSUtils.requestError("makeDomain: Cannot create domain: " +
domainName + " - already exists", caller);
}
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"domain\": ");
auditLogDomain(auditDetails, domain);
// first create and process the admin role
Role adminRole = ZMSUtils.makeAdminRole(domainName, adminUsers);
auditDetails.append(", \"role\": ");
if (!processRole(con, null, domainName, ZMSConsts.ADMIN_ROLE_NAME, adminRole,
principalName, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot process role: '" +
adminRole.getName(), caller);
}
// now create and process the admin policy
Policy adminPolicy = ZMSUtils.makeAdminPolicy(domainName, adminRole);
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, null, domainName, ZMSConsts.ADMIN_POLICY_NAME, adminPolicy,
false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot process policy: '" +
adminPolicy.getName(), caller);
}
// go through our list of templates and add the specified
// roles and polices to our domain
if (solutionTemplates != null) {
for (String templateName : solutionTemplates) {
auditDetails.append(", \"template\": ");
if (!addSolutionTemplate(con, domainName, templateName, principalName,
null, auditRef, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("makeDomain: Cannot apply templates: '" +
domain, caller);
}
}
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log entry
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_POST,
domainName, auditDetails.toString());
return domain;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean processPolicy(ObjectStoreConnection con, Policy originalPolicy, String domainName,
String policyName, Policy policy, boolean ignoreDeletes, StringBuilder auditDetails) {
// check to see if we need to insert the policy or update it
boolean requestSuccess;
if (originalPolicy == null) {
requestSuccess = con.insertPolicy(domainName, policy);
} else {
requestSuccess = con.updatePolicy(domainName, policy);
}
// if we didn't update any policies then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record
auditDetails.append("{\"name\": \"").append(policyName).append('\"');
// now we need process our policy assertions depending this is
// a new insert operation or an update
List<Assertion> newAssertions = policy.getAssertions();
if (originalPolicy == null) {
// we're just going to process our new assertions
if (newAssertions != null) {
for (Assertion assertion : newAssertions) {
if (!con.insertAssertion(domainName, policyName, assertion)) {
return false;
}
}
auditLogAssertions(auditDetails, "added-assertions", newAssertions);
}
} else {
// first we need to retrieve the current set of assertions
List<Assertion> curAssertions = originalPolicy.getAssertions();
if (curAssertions == null) {
curAssertions = new ArrayList<>();
}
List<Assertion> addAssertions = new ArrayList<>();
List<Assertion> delAssertions = new ArrayList<>();
policyAssertionChanges(newAssertions, curAssertions, addAssertions, delAssertions);
if (!ignoreDeletes) {
for (Assertion assertion : delAssertions) {
if (!con.deleteAssertion(domainName, policyName, assertion.getId())) {
return false;
}
}
auditLogAssertions(auditDetails, "deleted-assertions", delAssertions);
}
for (Assertion assertion : addAssertions) {
if (!con.insertAssertion(domainName, policyName, assertion)) {
return false;
}
}
auditLogAssertions(auditDetails, "added-assertions", addAssertions);
}
auditDetails.append('}');
return true;
}
boolean removeMatchedAssertion(Assertion assertion, List<Assertion> assertions, List<Assertion> matchedAssertions) {
AssertionEffect effect = AssertionEffect.ALLOW;
if (assertion.getEffect() != null) {
effect = assertion.getEffect();
}
Iterator<Assertion> itr = assertions.iterator();
while (itr.hasNext()) {
Assertion checkAssertion = itr.next();
if (!assertion.getAction().equals(checkAssertion.getAction())) {
continue;
}
if (!assertion.getResource().equals(checkAssertion.getResource())) {
continue;
}
if (!assertion.getRole().equals(checkAssertion.getRole())) {
continue;
}
AssertionEffect checkEffect = AssertionEffect.ALLOW;
if (checkAssertion.getEffect() != null) {
checkEffect = checkAssertion.getEffect();
}
if (effect != checkEffect) {
continue;
}
itr.remove();
matchedAssertions.add(checkAssertion);
return true;
}
return false;
}
void policyAssertionChanges(List<Assertion> newAssertions, List<Assertion> curAssertions,
List<Assertion> addAssertions, List<Assertion> delAssertions) {
// let's iterate through the new list and the ones that are
// not in the current list should be added to the add list
List<Assertion> matchedAssertions = new ArrayList<>();
if (newAssertions != null) {
for (Assertion assertion : newAssertions) {
if (!removeMatchedAssertion(assertion, curAssertions, matchedAssertions)) {
addAssertions.add(assertion);
}
}
}
// now our current list has been updated as well and
// all the assertions that were present moved to the
// matched assertion list so whatever left in the
// current list must be deleted
delAssertions.addAll(curAssertions);
// now let's go back and re-add the matched assertions
// back to our list so we can get the right audit data
curAssertions.addAll(matchedAssertions);
}
boolean processRole(ObjectStoreConnection con, Role originalRole, String domainName,
String roleName, Role role, String admin, String auditRef, boolean ignoreDeletes,
StringBuilder auditDetails) {
// check to see if we need to insert the role or update it
boolean requestSuccess;
if (originalRole == null) {
// auditEnabled can only be set with system admin privileges
role.setAuditEnabled(false);
requestSuccess = con.insertRole(domainName, role);
} else {
// carrying over auditEnabled from original role
role.setAuditEnabled(originalRole.getAuditEnabled());
requestSuccess = con.updateRole(domainName, role);
}
// if we didn't update any roles then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record and log our trust field if one is available
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"trust\": \"").append(role.getTrust()).append('\"');
// now we need process our role members depending this is
// a new insert operation or an update
List<RoleMember> roleMembers = role.getRoleMembers();
// support older clients which might send members field
// at this point, we expect either roleMembers or members,
// and we can't have both
List<String> members = role.getMembers();
if (members != null && !members.isEmpty()) {
roleMembers = ZMSUtils.convertMembersToRoleMembers(members);
}
if (originalRole == null) {
// we are just going to process all members as new inserts
if (roleMembers != null) {
for (RoleMember member : roleMembers) {
if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "added-members", roleMembers);
}
} else {
processUpdateRoleMembers(con, originalRole, roleMembers, ignoreDeletes,
domainName, roleName, admin, auditRef, auditDetails);
}
auditDetails.append('}');
return true;
}
private boolean processUpdateRoleMembers(ObjectStoreConnection con, Role originalRole,
List<RoleMember> roleMembers, boolean ignoreDeletes, String domainName,
String roleName, String admin, String auditRef, StringBuilder auditDetails) {
// first we need to retrieve the current set of members
List<RoleMember> originalMembers = originalRole.getRoleMembers();
List<RoleMember> curMembers = (null == originalMembers) ? new ArrayList<>() : new ArrayList<>(originalMembers);
List<RoleMember> delMembers = new ArrayList<>(curMembers);
ArrayList<RoleMember> newMembers = (null == roleMembers) ? new ArrayList<>() : new ArrayList<>(roleMembers);
// remove current members from new members
ZMSUtils.removeMembers(newMembers, curMembers);
// remove new members from current members
// which leaves the deleted members.
ZMSUtils.removeMembers(delMembers, roleMembers);
if (!ignoreDeletes) {
for (RoleMember member : delMembers) {
if (!con.deleteRoleMember(domainName, roleName, member.getMemberName(), admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "deleted-members", delMembers);
}
for (RoleMember member : newMembers) {
if (!con.insertRoleMember(domainName, roleName, member, admin, auditRef)) {
return false;
}
}
auditLogRoleMembers(auditDetails, "added-members", newMembers);
return true;
}
boolean processServiceIdentity(ObjectStoreConnection con, ServiceIdentity originalService,
String domainName, String serviceName, ServiceIdentity service,
boolean ignoreDeletes, StringBuilder auditDetails) {
boolean requestSuccess;
if (originalService == null) {
// provider endpoint can only be set with system admin privileges
service.setProviderEndpoint(null);
requestSuccess = con.insertServiceIdentity(domainName, service);
} else {
// carrying over provider endpoint from original service
service.setProviderEndpoint(originalService.getProviderEndpoint());
requestSuccess = con.updateServiceIdentity(domainName, service);
}
// if we didn't update any services then we need to return failure
if (!requestSuccess) {
return false;
}
// open our audit record and log our service details
auditDetails.append("{\"name\": \"").append(serviceName).append('\"')
.append(", \"executable\": \"").append(service.getExecutable()).append('\"')
.append(", \"user\": \"").append(service.getUser()).append('\"')
.append(", \"group\": \"").append(service.getGroup()).append('\"')
.append(", \"description\": \"").append(service.getDescription()).append('\"');
// now we need process our public keys depending this is
// a new insert operation or an update
List<PublicKeyEntry> publicKeys = service.getPublicKeys();
if (originalService == null) {
// we are just going to process all public keys as new inserts
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
if (!con.insertPublicKeyEntry(domainName, serviceName, publicKey)) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "added-publickeys", publicKeys);
}
} else {
// first we need to retrieve the current set of public keys
List<PublicKeyEntry> curPublicKeys = originalService.getPublicKeys();
Map<String, PublicKeyEntry> curPublicKeysMap = new HashMap<>();
if (curPublicKeys != null) {
for (PublicKeyEntry publicKey : curPublicKeys) {
curPublicKeysMap.put(publicKey.getId(), publicKey);
}
}
Map<String, PublicKeyEntry> publicKeysMap = new HashMap<>();
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
publicKeysMap.put(publicKey.getId(), publicKey);
}
}
Set<String> curPublicKeysSet = new HashSet<>(curPublicKeysMap.keySet());
Set<String> delPublicKeysSet = new HashSet<>(curPublicKeysSet);
Set<String> newPublicKeysSet = new HashSet<>(publicKeysMap.keySet());
newPublicKeysSet.removeAll(curPublicKeysSet);
delPublicKeysSet.removeAll(new HashSet<>(publicKeysMap.keySet()));
if (!ignoreDeletes) {
for (String publicKey : delPublicKeysSet) {
if (!con.deletePublicKeyEntry(domainName, serviceName, publicKey)) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "deleted-publickeys", delPublicKeysSet);
}
for (String publicKey : newPublicKeysSet) {
if (!con.insertPublicKeyEntry(domainName, serviceName, publicKeysMap.get(publicKey))) {
return false;
}
}
auditLogPublicKeyEntries(auditDetails, "added-publickeys", newPublicKeysSet, publicKeysMap);
}
// now we need to process the hosts defined for this service
Set<String> curHosts;
if (originalService != null && originalService.getHosts() != null) {
curHosts = new HashSet<>(originalService.getHosts());
} else {
curHosts = new HashSet<>();
}
Set<String> newHosts;
if (service.getHosts() != null) {
newHosts = new HashSet<>(service.getHosts());
} else {
newHosts = new HashSet<>();
}
Set<String> delHosts = new HashSet<>(curHosts);
delHosts.removeAll(newHosts);
newHosts.removeAll(curHosts);
for (String host : delHosts) {
if (!con.deleteServiceHost(domainName, serviceName, host)) {
return false;
}
}
auditLogStrings(auditDetails, "deleted-hosts", delHosts);
for (String host : newHosts) {
if (!con.insertServiceHost(domainName, serviceName, host)) {
return false;
}
}
auditLogStrings(auditDetails, "added-hosts", newHosts);
auditDetails.append('}');
return true;
}
boolean shouldRetryOperation(ResourceException ex, int retryCount) {
// before doing anything else let's check to see if
// we still have the option to retry the operation
if (retryCount <= 1) {
return false;
}
// if we got a conflict result it means we either had
// no connection or deadlock was detected and as such
// the changes were aborted
boolean retry = false;
switch (ex.getCode()) {
case ResourceException.CONFLICT:
retry = true;
break;
case ResourceException.GONE:
// this error indicates that the server is reporting it is in
// read-only mode which indicates a fail-over has taken place
// and we need to clear all connections and start new ones
// this could only happen with write operations against the
// read-write object store
store.clearConnections();
retry = true;
break;
}
// if we're asked to retry then we're going to
// wait for a short period of time to allow the other
// connection to finish its work
if (retry) {
if (LOG.isDebugEnabled()) {
LOG.debug(": possible deadlock, retries available: " + retryCount);
}
ZMSUtils.threadSleep(retrySleepTime);
}
// return our response
return retry;
}
void executePutPolicy(ResourceContext ctx, String domainName, String policyName, Policy policy,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// check that quota is not exceeded
quotaCheck.checkPolicyQuota(con, domainName, policy, caller);
// retrieve our original policy
Policy originalPolicy = getPolicy(con, domainName, policyName);
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processPolicy(con, originalPolicy, domainName, policyName, policy, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
policyName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutRole(ResourceContext ctx, String domainName, String roleName, Role role,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// check that quota is not exceeded
quotaCheck.checkRoleQuota(con, domainName, role, caller);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole != null &&
(originalRole.getAuditEnabled() == Boolean.TRUE || originalRole.getReviewEnabled() == Boolean.TRUE)) {
throw ZMSUtils.requestError("Can not update auditEnabled and/or reviewEnabled roles", caller);
}
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processRole(con, originalRole, domainName, roleName, role,
principal, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + role.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutServiceIdentity(ResourceContext ctx, String domainName, String serviceName,
ServiceIdentity service, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// check that quota is not exceeded
quotaCheck.checkServiceIdentityQuota(con, domainName, service, caller);
// retrieve our original service identity object
ServiceIdentity originalService = getServiceIdentity(con, domainName, serviceName, false);
// now process the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (!processServiceIdentity(con, originalService, domainName, serviceName,
service, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put service: " + service.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutPublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
PublicKeyEntry keyEntry, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// check to see if this key already exists or not
PublicKeyEntry originalKeyEntry = con.getPublicKeyEntry(domainName, serviceName,
keyEntry.getId(), false);
// now we need verify our quota check if we know that
// that we'll be adding another public key
if (originalKeyEntry == null) {
quotaCheck.checkServiceIdentityPublicKeyQuota(con, domainName, serviceName, caller);
}
// now process the request
boolean requestSuccess;
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
if (originalKeyEntry == null) {
requestSuccess = con.insertPublicKeyEntry(domainName, serviceName, keyEntry);
auditDetails.append("{\"added-publicKeys\": [");
} else {
requestSuccess = con.updatePublicKeyEntry(domainName, serviceName, keyEntry);
auditDetails.append("{\"updated-publicKeys\": [");
}
if (!requestSuccess) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put public key: " + keyEntry.getId() +
" in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller);
}
// update our service and domain time-stamp and save changes
con.updateServiceIdentityModTimestamp(domainName, serviceName);
saveChanges(con, domainName);
// audit log the request
auditLogPublicKeyEntry(auditDetails, keyEntry, true);
auditDetails.append("]}");
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePublicKeyEntry(ResourceContext ctx, String domainName, String serviceName,
String keyId, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// now process the request
if (!con.deletePublicKeyEntry(domainName, serviceName, keyId)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError("unable to delete public key: " + keyId +
" in service " + ZMSUtils.serviceResourceName(domainName, serviceName), caller);
}
// update our service and domain time-stamp and save changes
con.updateServiceIdentityModTimestamp(domainName, serviceName);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"deleted-publicKeys\": [{\"id\": \"").append(keyId).append("\"}]}");
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
serviceName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean isTrustRole(Role role) {
if (role == null) {
return false;
}
return role.getTrust() != null && !role.getTrust().isEmpty();
}
void executePutMembership(ResourceContext ctx, String domainName, String roleName,
RoleMember roleMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// make sure the role auditing requires are bet
Role originalRole = con.getRole(domainName, roleName);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, principal);
// before inserting a member we need to verify that
// this is a group role and not a delegated one.
if (isTrustRole(originalRole)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": " + roleName +
"is a delegated role", caller);
}
// now we need verify our quota check
quotaCheck.checkRoleMembershipQuota(con, domainName, roleName, caller);
// process our insert role member support. since this is a "single"
// operation, we are not using any transactions.
if (!con.insertRoleMember(domainName, roleName, roleMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": unable to insert role member: " +
roleMember.getMemberName() + " to role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName,
auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutEntity(ResourceContext ctx, String domainName, String entityName,
Entity entity, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ENTITY);
// check that quota is not exceeded
quotaCheck.checkEntityQuota(con, domainName, entity, caller);
// check to see if this key already exists or not
Entity originalEntity = con.getEntity(domainName, entityName);
// now process the request
boolean requestSuccess;
if (originalEntity == null) {
requestSuccess = con.insertEntity(domainName, entity);
} else {
requestSuccess = con.updateEntity(domainName, entity);
}
if (!requestSuccess) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put entity: "
+ entity.getName(), caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
entity.getName(), JSON.string(entity.getValue()));
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteMembership(ResourceContext ctx, String domainName, String roleName,
String normalizedMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// if this is the admin role then we need to make sure
// the admin is not himself who happens to be the last
// member in the role
if (ZMSConsts.ADMIN_ROLE_NAME.equals(roleName)) {
List<RoleMember> members = con.listRoleMembers(domainName, roleName, false);
if (members.size() == 1 && members.get(0).getMemberName().equals(normalizedMember)) {
throw ZMSUtils.forbiddenError(caller +
": Cannot delete last member of 'admin' role", caller);
}
}
// process our delete role member operation
if (!con.deleteRoleMember(domainName, roleName, normalizedMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role member: " +
normalizedMember + " from role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, "{\"member\": \"" + normalizedMember + "\"}");
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePendingMembership(ResourceContext ctx, String domainName, String roleName,
String normalizedMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// process our delete role member operation
if (!con.deletePendingRoleMember(domainName, roleName, normalizedMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete pending role member: " +
normalizedMember + " from role: " + roleName, caller);
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, "{\"pending-member\": \"" + normalizedMember + "\"}");
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteServiceIdentity(ResourceContext ctx, String domainName, String serviceName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// process our delete service request
if (!con.deleteServiceIdentity(domainName, serviceName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete service: " + serviceName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
serviceName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteEntity(ResourceContext ctx, String domainName, String entityName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ENTITY);
// process our delete role request
if (!con.deleteEntity(domainName, entityName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete entity: " + entityName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
entityName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteRole(ResourceContext ctx, String domainName, String roleName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ROLE);
// process our delete role request
if (!con.deleteRole(domainName, roleName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role: " + roleName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
roleName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeletePolicy(ResourceContext ctx, String domainName, String policyName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// process our delete policy request
if (!con.deletePolicy(domainName, policyName)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete policy: " + policyName, caller);
}
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
policyName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
/**
* If the domain has audit enabled, and user did not provide the auditRef,
* an exception will be thrown
**/
void checkDomainAuditEnabled(ObjectStoreConnection con, final String domainName,
final String auditRef, final String caller, final String principal, int objectType) {
// before retrieving the domain details make sure we are
// configured to enforce audit reference field on the given
// object type
if (!auditRefSet.get(objectType)) {
return;
}
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
auditReferenceCheck(con, domain, auditRef, caller, principal);
}
void checkDomainAuditEnabled(ObjectStoreConnection con, Domain domain,
final String auditRef, final String caller, final String principal, int objectType) {
if (!auditRefSet.get(objectType)) {
return;
}
auditReferenceCheck(con, domain, auditRef, caller, principal);
}
void auditReferenceCheck(ObjectStoreConnection con, Domain domain, final String auditRef,
final String caller, final String principal) {
if (domain.getAuditEnabled() == Boolean.TRUE) {
if (auditRef == null || auditRef.length() == 0) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference required for domain: " + domain.getName(), caller);
}
if (auditReferenceValidator != null && !auditReferenceValidator.validateReference(auditRef, principal, caller)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference validation failed for domain: " + domain.getName() + ", auditRef: " + auditRef, caller);
}
}
}
void executeDeleteDomain(ResourceContext ctx, String domainName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_DOMAIN);
// now process the request
con.deleteDomain(domainName);
con.commitChanges();
cacheStore.invalidate(domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
List<String> listPrincipals(String domainName, boolean domainOnly) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
List<String> principals = con.listPrincipals(domainName);
// if no further filtering is necessary, return the data
// right away
if (!domainOnly) {
return principals;
}
// generate our return list
List<String> users = new ArrayList<>();
// if we're asked for domain only then we need to match
// the domain name, if specified, and make sure the response
// only includes a single period/domain separator
// we need to skip an extra byte to accommodate for the
// domain separator (e.g. <domainName>.<userName>)
int prefixLength = 0;
if (domainName != null) {
prefixLength = domainName.length() + 1;
}
for (String principal : principals) {
// make sure the principal name doesn't have multiple
// components - e.g. user.joe.test since it represents
// a service or a sub-domain and we're only interested
// in actual users
if (prefixLength > 0) {
if (principal.substring(prefixLength).indexOf('.') == -1) {
users.add(principal);
}
} else {
// we have a single separator when the first index
// and the last index are the same
if (principal.indexOf('.') == principal.lastIndexOf('.')) {
users.add(principal);
}
}
}
return users;
}
}
void removePrincipalFromDomainRoles(ObjectStoreConnection con, String domainName, String principalName,
String adminUser, String auditRef) {
// extract all the roles that this principal is member of
// we have to this here so that there are records of
// entries in the role member audit logs and the domain
// entries are properly invalidated
List<PrincipalRole> roles = con.listPrincipalRoles(domainName, principalName);
// we want to check if we had any roles otherwise
// we don't want to update the domain mod timestamp
if (roles.isEmpty()) {
return;
}
for (PrincipalRole role : roles) {
final String roleName = role.getRoleName();
// process our delete role member operation
if (LOG.isDebugEnabled()) {
LOG.debug("removePrincipalFromDomainRoles: removing member {} from {}:role.{}",
principalName, domainName, roleName);
}
// we are going to ignore all errors here rather than
// rejecting the full operation
try {
con.deleteRoleMember(domainName, roleName, principalName, adminUser, auditRef);
} catch (ResourceException ex) {
LOG.error("removePrincipalFromDomainRoles: unable to remove {} from {}:role.{} - error {}",
principalName, domainName, roleName, ex.getMessage());
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
}
con.updateDomainModTimestamp(domainName);
}
void removePrincipalFromAllRoles(ObjectStoreConnection con, String principalName,
String adminUser, String auditRef) {
// extract all the roles that this principal is member of
// we have to this here so that there are records of
// entries in the role member audit logs and the domain
// entries are properly invalidated
List<PrincipalRole> roles;
try {
roles = con.listPrincipalRoles(null, principalName);
} catch (ResourceException ex) {
// if there is no such principal then we have nothing to do
if (ex.getCode() == ResourceException.NOT_FOUND) {
return;
} else {
throw ex;
}
}
for (PrincipalRole role : roles) {
final String domainName = role.getDomainName();
final String roleName = role.getRoleName();
// process our delete role member operation
if (LOG.isDebugEnabled()) {
LOG.debug("removePrincipalFromAllRoles: removing member {} from {}:role.{}",
principalName, domainName, roleName);
}
// we are going to ignore all errors here rather than
// rejecting the full operation. our delete user will
// eventually remove all these principals
try {
con.deleteRoleMember(domainName, roleName, principalName, adminUser, auditRef);
} catch (ResourceException ex) {
LOG.error("removePrincipalFromAllRoles: unable to remove {} from {}:role.{} - error {}",
principalName, domainName, roleName, ex.getMessage());
}
// update our role and domain time-stamps, and invalidate local cache entry
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
}
}
void removePrincipalDomains(ObjectStoreConnection con, String principalName) {
// first we're going to retrieve the list domains for
// the given user
final String domainPrefix = principalName + ".";
List<String> subDomains = con.listDomains(domainPrefix, 0);
// first we're going to delete the user domain if
// one exists and then all the sub-domains. We're not
// going to fail the operation for these steps - only
// if the actual user is not deleted
con.deleteDomain(principalName);
cacheStore.invalidate(principalName);
for (String subDomain : subDomains) {
con.deleteDomain(subDomain);
cacheStore.invalidate(subDomain);
}
}
void executeDeleteDomainRoleMember(ResourceContext ctx, String domainName,
String memberName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// remove this user from all roles manually so that we
// can have an audit log record for each role
removePrincipalFromDomainRoles(con, domainName, memberName,
getPrincipalName(ctx), auditRef);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
memberName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteUser(ResourceContext ctx, String userName, String domainName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// remove all principal domains
removePrincipalDomains(con, domainName);
// extract all principals that this user has - this would
// include the user self plus all services this user
// has created in the personal domain + sub-domains
List<String> userSvcPrincipals = con.listPrincipals(domainName);
// remove this user from all roles manually so that we
// can have an audit log record for each role
final String adminPrincipal = getPrincipalName(ctx);
removePrincipalFromAllRoles(con, userName, adminPrincipal, auditRef);
for (String userSvcPrincipal : userSvcPrincipals) {
removePrincipalFromAllRoles(con, userSvcPrincipal, adminPrincipal, auditRef);
}
// finally delete the principal object. any roles that were
// left behind will be cleaned up from this operation
if (!con.deletePrincipal(userName, true)) {
throw ZMSUtils.notFoundError(caller + ": unable to delete user: "
+ userName, caller);
}
// audit log the request
auditLogRequest(ctx, userName, auditRef, caller, ZMSConsts.HTTP_DELETE,
userName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
ServiceIdentity getServiceIdentity(String domainName, String serviceName, boolean attrsOnly) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getServiceIdentity(con, domainName, serviceName, attrsOnly);
}
}
DomainTemplateList listDomainTemplates(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
DomainTemplateList domainTemplateList = new DomainTemplateList();
domainTemplateList.setTemplateNames(con.listDomainTemplates(domainName));
return domainTemplateList;
}
}
ServiceIdentity getServiceIdentity(ObjectStoreConnection con, String domainName,
String serviceName, boolean attrsOnly) {
ServiceIdentity service = con.getServiceIdentity(domainName, serviceName);
if (service != null && !attrsOnly) {
service.setPublicKeys(con.listPublicKeys(domainName, serviceName));
List<String> hosts = con.listServiceHosts(domainName, serviceName);
if (hosts != null && !hosts.isEmpty()) {
service.setHosts(hosts);
}
}
return service;
}
PublicKeyEntry getPublicKeyFromCache(String domainName, String serviceName, String keyId) {
DataCache data = cacheStore.getIfPresent(domainName);
if (data == null) {
return null;
}
AthenzDomain athenzDomain = data.getAthenzDomain();
if (athenzDomain == null) {
return null;
}
List<ServiceIdentity> services = athenzDomain.getServices();
if (services == null) {
return null;
}
final String fullServiceName = ZMSUtils.serviceResourceName(domainName, serviceName);
for (ServiceIdentity service : services) {
if (fullServiceName.equals(service.getName())) {
List<PublicKeyEntry> publicKeys = service.getPublicKeys();
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
if (keyId.equals(publicKey.getId())) {
return publicKey;
}
}
}
break;
}
}
return null;
}
PublicKeyEntry getServicePublicKeyEntry(String domainName, String serviceName,
String keyId, boolean domainStateCheck) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getPublicKeyEntry(domainName, serviceName, keyId, domainStateCheck);
} catch (ResourceException ex) {
if (ex.getCode() != ResourceException.SERVICE_UNAVAILABLE) {
throw ex;
}
}
// if we got this far it means we couldn't get our public key
// from our DB store either due to timeout or communication
// error so we're going to see if we have the public key in
// our cache and use that for our requests
PublicKeyEntry keyEntry = getPublicKeyFromCache(domainName, serviceName, keyId);
if (keyEntry == null) {
throw new ResourceException(ResourceException.SERVICE_UNAVAILABLE,
"Unable to retrieve public key from DB store");
}
return keyEntry;
}
public ResourceAccessList getResourceAccessList(String principal, String action) {
// this commands takes a quite a bit of time due to joining tables
// and needs to be optimized. For now we'll configure it with
// default timeout of 30 minutes to avoid any issues
try (ObjectStoreConnection con = store.getConnection(true, false)) {
con.setOperationTimeout(1800);
return con.listResourceAccess(principal, action, zmsConfig.getUserDomain());
}
}
Domain getDomain(String domainName, boolean masterCopy) {
try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) {
return con.getDomain(domainName);
}
}
List<String> listDomains(String prefix, long modifiedSince) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listDomains(prefix, modifiedSince);
}
}
DomainList lookupDomainById(String account, int productId) {
DomainList domList = new DomainList();
try (ObjectStoreConnection con = store.getConnection(true, false)) {
String domain = con.lookupDomainById(account, productId);
if (domain != null) {
List<String> list = Collections.singletonList(domain);
domList.setNames(list);
}
}
return domList;
}
DomainList lookupDomainByAccount(String account) {
return lookupDomainById(account, 0);
}
DomainList lookupDomainByProductId(Integer productId) {
return lookupDomainById(null, productId);
}
DomainList lookupDomainByRole(String roleMember, String roleName) {
DomainList domList = new DomainList();
try (ObjectStoreConnection con = store.getConnection(true, false)) {
List<String> domains = con.lookupDomainByRole(roleMember, roleName);
if (domains != null) {
domList.setNames(domains);
}
}
return domList;
}
List<String> listRoles(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listRoles(domainName);
}
}
Membership getMembership(String domainName, String roleName, String principal,
long expiryTimestamp, boolean pending) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
Membership membership = con.getRoleMember(domainName, roleName, principal, expiryTimestamp, pending);
Timestamp expiration = membership.getExpiration();
//need to check expiration and set isMember if expired
if (expiration != null && expiration.millis() < System.currentTimeMillis()) {
membership.setIsMember(false);
}
return membership;
}
}
DomainRoleMembers listDomainRoleMembers(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listDomainRoleMembers(domainName);
}
}
Role getRole(String domainName, String roleName, Boolean auditLog, Boolean expand, Boolean pending) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getRole(con, domainName, roleName, auditLog, expand, pending);
}
}
Role getRole(ObjectStoreConnection con, String domainName, String roleName,
Boolean auditLog, Boolean expand, Boolean pending) {
Role role = con.getRole(domainName, roleName);
if (role != null) {
if (role.getTrust() == null) {
// if we have no trust field specified then we need to
// retrieve our standard group role members
role.setRoleMembers(con.listRoleMembers(domainName, roleName, pending));
// still populate the members for old clients
role.setMembers(ZMSUtils.convertRoleMembersToMembers(
role.getRoleMembers()));
if (auditLog == Boolean.TRUE) {
role.setAuditLog(con.listRoleAuditLogs(domainName, roleName));
}
} else if (expand == Boolean.TRUE) {
// otherwise, if asked, let's expand the delegated
// membership and return the list of members
role.setRoleMembers(getDelegatedRoleMembers(con, domainName, role.getTrust(), roleName));
// still populate the members for old clients
role.setMembers(ZMSUtils.convertRoleMembersToMembers(role.getRoleMembers()));
}
}
return role;
}
List<RoleMember> getDelegatedRoleMembers(ObjectStoreConnection con, final String domainName,
final String trustDomain, final String roleName) {
// verify that the domain and trust domain are not the same
if (domainName.equals(trustDomain)) {
return null;
}
// retrieve our trust domain
AthenzDomain domain = null;
try {
domain = getAthenzDomain(con, trustDomain);
} catch (ResourceException ex) {
LOG.error("unable to fetch domain {}: {}", trustDomain, ex.getMessage());
}
if (domain == null) {
return null;
}
// we need to use a set since we might be matching
// multiple assertions and we want to automatically
// skip any duplicate members
Map<String, RoleMember> roleMembers = new HashMap<>();
// generate our full role name
String fullRoleName = ZMSUtils.roleResourceName(domainName, roleName);
// iterate through all policies to see which one has the
// assume_role assertion for the given role
for (Policy policy : domain.getPolicies()) {
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
continue;
}
for (Assertion assertion : assertions) {
if (!ZMSUtils.assumeRoleResourceMatch(fullRoleName, assertion)) {
continue;
}
String rolePattern = StringUtils.patternFromGlob(assertion.getRole());
for (Role role : domain.getRoles()) {
// make sure we have members before trying to match the name
List<RoleMember> members = role.getRoleMembers();
if (members == null || members.isEmpty()) {
continue;
}
if (!role.getName().matches(rolePattern)) {
continue;
}
for (RoleMember member : members) {
String memberName = member.getMemberName();
if (!roleMembers.containsKey(memberName)) {
roleMembers.put(memberName, member);
}
}
}
}
}
return new ArrayList<>(roleMembers.values());
}
Policy getPolicy(String domainName, String policyName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return getPolicy(con, domainName, policyName);
}
}
Assertion getAssertion(String domainName, String policyName, Long assertionId) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getAssertion(domainName, policyName, assertionId);
}
}
void executePutAssertion(ResourceContext ctx, String domainName, String policyName,
Assertion assertion, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// now we need verify our quota check
quotaCheck.checkPolicyAssertionQuota(con, domainName, policyName, caller);
// process our insert assertion. since this is a "single"
// operation, we are not using any transactions.
if (!con.insertAssertion(domainName, policyName, assertion)) {
throw ZMSUtils.requestError(caller + ": unable to insert assertion: " +
" to policy: " + policyName, caller);
}
// update our policy and domain time-stamps, and invalidate local cache entry
con.updatePolicyModTimestamp(domainName, policyName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogAssertion(auditDetails, assertion, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
policyName, auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteAssertion(ResourceContext ctx, String domainName, String policyName,
Long assertionId, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_POLICY);
// process our delete assertion. since this is a "single"
// operation, we are not using any transactions.
if (!con.deleteAssertion(domainName, policyName, assertionId)) {
throw ZMSUtils.requestError(caller + ": unable to delete assertion: " +
assertionId + " from policy: " + policyName, caller);
}
// update our policy and domain time-stamps, and invalidate local cache entry
con.updatePolicyModTimestamp(domainName, policyName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
// audit log the request
final String auditDetails = "{\"policy\": \"" + policyName +
"\", \"assertionId\": \"" + assertionId + "\"}";
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
policyName, auditDetails);
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
List<String> listEntities(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listEntities(domainName);
}
}
Entity getEntity(String domainName, String entityName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.getEntity(domainName, entityName);
}
}
Policy getPolicy(ObjectStoreConnection con, String domainName, String policyName) {
Policy policy = con.getPolicy(domainName, policyName);
if (policy != null) {
policy.setAssertions(con.listAssertions(domainName, policyName));
}
return policy;
}
List<String> listPolicies(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listPolicies(domainName, null);
}
}
List<String> listServiceIdentities(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return con.listServiceIdentities(domainName);
}
}
void executePutDomainMeta(ResourceContext ctx, String domainName, DomainMeta meta,
final String systemAttribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_DOMAIN);
// now process the request. first we're going to make a
// copy of our domain
Domain updatedDomain = new Domain()
.setName(domain.getName())
.setEnabled(domain.getEnabled())
.setId(domain.getId())
.setAuditEnabled(domain.getAuditEnabled())
.setDescription(domain.getDescription())
.setOrg(domain.getOrg())
.setApplicationId(domain.getApplicationId())
.setAccount(domain.getAccount())
.setYpmId(domain.getYpmId())
.setCertDnsDomain(domain.getCertDnsDomain())
.setMemberExpiryDays(domain.getMemberExpiryDays())
.setServiceExpiryDays(domain.getServiceExpiryDays())
.setTokenExpiryMins(domain.getTokenExpiryMins())
.setRoleCertExpiryMins(domain.getRoleCertExpiryMins())
.setServiceCertExpiryMins(domain.getServiceCertExpiryMins())
.setSignAlgorithm(domain.getSignAlgorithm());
// then we're going to apply the updated fields
// from the given object
if (systemAttribute != null) {
updateSystemMetaFields(updatedDomain, systemAttribute, deleteAllowed, meta);
} else {
updateDomainMetaFields(updatedDomain, meta);
}
con.updateDomain(updatedDomain);
con.commitChanges();
cacheStore.invalidate(domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogDomain(auditDetails, updatedDomain);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
// if the domain member expiry date has changed then we're going
// process all the members in the domain and update the expiration
// date accordingly
updateDomainMembersExpiration(ctx, con, domain, updatedDomain, auditRef, caller);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void updateDomainMembersExpiration(ResourceContext ctx, ObjectStoreConnection con, Domain domain,
Domain updatedDomain, String auditRef, String caller) {
// we only need to process the domain role members if the new expiration
// is more restrictive than what we had before
boolean userMemberExpiryDayReduced = memberExpiryDaysReduced(domain.getMemberExpiryDays(),
updatedDomain.getMemberExpiryDays());
boolean serviceMemberExpiryDayReduced = memberExpiryDaysReduced(domain.getServiceExpiryDays(),
updatedDomain.getServiceExpiryDays());
if (!userMemberExpiryDayReduced && !serviceMemberExpiryDayReduced) {
return;
}
AthenzDomain athenzDomain;
try {
athenzDomain = getAthenzDomain(con, domain.getName());
} catch (ResourceException ex) {
LOG.error("unable to fetch domain {}: {}", domain.getName(), ex.getMessage());
return;
}
long userMillis = userMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedDomain.getMemberExpiryDays(), TimeUnit.DAYS) : 0;
long serviceMillis = serviceMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedDomain.getServiceExpiryDays(), TimeUnit.DAYS) : 0;
Timestamp userExpiration = Timestamp.fromMillis(userMillis);
Timestamp serviceExpiration = Timestamp.fromMillis(serviceMillis);
boolean bDataChanged = false;
final String principal = getPrincipalName(ctx);
for (Role role : athenzDomain.getRoles()) {
// if the role already has a specific expiry date set then we
// will automatically skip this role
if (role.getMemberExpiryDays() != null || role.getServiceExpiryDays() != null) {
continue;
}
// if it's a delegated role then we have nothing to do
if (role.getTrust() != null && !role.getTrust().isEmpty()) {
continue;
}
// if no role members, then there is nothing to do
final List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers == null || roleMembers.isEmpty()) {
continue;
}
// process our role members and if there were any changes processed then update
// our role and domain time-stamps, and invalidate local cache entry
final String roleName = AthenzUtils.extractRoleName(role.getName());
if (setRoleMemberExpiration(ctx, con, roleMembers, userExpiration, userMillis, serviceExpiration,
serviceMillis, domain.getName(), roleName, principal, auditRef, caller)) {
con.updateRoleModTimestamp(domain.getName(), roleName);
bDataChanged = true;
}
}
// update our role and domain time-stamps, and invalidate local cache entry
if (bDataChanged) {
con.updateDomainModTimestamp(domain.getName());
cacheStore.invalidate(domain.getName());
}
}
void updateDomainMetaFields(Domain domain, DomainMeta meta) {
domain.setApplicationId(meta.getApplicationId());
domain.setDescription(meta.getDescription());
if (meta.getMemberExpiryDays() != null) {
domain.setMemberExpiryDays(meta.getMemberExpiryDays());
}
if (meta.getServiceExpiryDays() != null) {
domain.setServiceExpiryDays(meta.getServiceExpiryDays());
}
if (meta.getRoleCertExpiryMins() != null) {
domain.setRoleCertExpiryMins(meta.getRoleCertExpiryMins());
}
if (meta.getServiceCertExpiryMins() != null) {
domain.setServiceCertExpiryMins(meta.getServiceCertExpiryMins());
}
if (meta.getTokenExpiryMins() != null) {
domain.setTokenExpiryMins(meta.getTokenExpiryMins());
}
if (meta.getSignAlgorithm() != null) {
domain.setSignAlgorithm(meta.getSignAlgorithm());
}
}
boolean isDeleteSystemMetaAllowed(boolean deleteAllowed, Object oldValue, Object newValue) {
// if authorized or old value is not set, then there is
// no need to check any value
if (deleteAllowed || oldValue == null) {
return true;
}
// since our old value is not null then we will only
// allow if the new value is identical
return (newValue != null) ? oldValue.equals(newValue) : false;
}
void updateSystemMetaFields(Domain domain, final String attribute, boolean deleteAllowed,
DomainMeta meta) {
final String caller = "putdomainsystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_ACCOUNT:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getAccount(), meta.getAccount())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setAccount(meta.getAccount());
break;
case ZMSConsts.SYSTEM_META_PRODUCT_ID:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getYpmId(), meta.getYpmId())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setYpmId(meta.getYpmId());
break;
case ZMSConsts.SYSTEM_META_CERT_DNS_DOMAIN:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getCertDnsDomain(), meta.getCertDnsDomain())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setCertDnsDomain(meta.getCertDnsDomain());
break;
case ZMSConsts.SYSTEM_META_ORG:
if (!isDeleteSystemMetaAllowed(deleteAllowed, domain.getOrg(), meta.getOrg())) {
throw ZMSUtils.forbiddenError("unauthorized to reset system meta attribute: " + attribute, caller);
}
domain.setOrg(meta.getOrg());
break;
case ZMSConsts.SYSTEM_META_AUDIT_ENABLED:
domain.setAuditEnabled(meta.getAuditEnabled());
break;
case ZMSConsts.SYSTEM_META_ENABLED:
domain.setEnabled(meta.getEnabled());
break;
default:
throw ZMSUtils.requestError("unknown system meta attribute: " + attribute, caller);
}
}
void updateRoleSystemMetaFields(Role role, final String attribute, boolean deleteAllowed, RoleSystemMeta meta) {
final String caller = "putrolesystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_AUDIT_ENABLED:
role.setAuditEnabled(meta.getAuditEnabled());
break;
default:
throw ZMSUtils.requestError("unknown role system meta attribute: " + attribute, caller);
}
}
void updateServiceIdentitySystemMetaFields(ServiceIdentity service, final String attribute,
boolean deleteAllowed, ServiceIdentitySystemMeta meta) {
final String caller = "putserviceidentitysystemmeta";
// system attributes we'll only set if they're available
// in the given object
switch (attribute) {
case ZMSConsts.SYSTEM_META_PROVIDER_ENDPOINT:
service.setProviderEndpoint(meta.getProviderEndpoint());
break;
default:
throw ZMSUtils.requestError("unknown service system meta attribute: " + attribute, caller);
}
}
void executePutDomainTemplate(ResourceContext ctx, String domainName, DomainTemplate domainTemplate,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TEMPLATE);
// go through our list of templates and add the specified
// roles and polices to our domain
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"add-templates\": ");
boolean firstEntry = true;
for (String templateName : domainTemplate.getTemplateNames()) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
if (!addSolutionTemplate(con, domainName, templateName, getPrincipalName(ctx),
domainTemplate.getParams(), auditRef, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put domain templates: " + domainName, caller);
}
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteDomainTemplate(ResourceContext ctx, String domainName, String templateName,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TEMPLATE);
// go through our list of templates and add the specified
// roles and polices to our domain
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"templates\": ");
Template template = zmsConfig.getServerSolutionTemplates().get(templateName);
if (!deleteSolutionTemplate(con, domainName, templateName, template, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to delete domain template: " + domainName, caller);
}
auditDetails.append("}");
// update our domain time-stamp and save changes
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean addSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName,
String admin, List<TemplateParam> templateParams, String auditRef, StringBuilder auditDetails) {
auditDetails.append("{\"name\": \"").append(templateName).append('\"');
// we have already verified that our template is valid but
// we'll just double check to make sure it's not null
Template template = zmsConfig.getServerSolutionTemplates().get(templateName);
if (template == null) {
auditDetails.append("}");
return true;
}
boolean firstEntry = true;
// iterate through roles in the list.
// When adding a template, if the role does not exist in our domain
// then insert it otherwise only apply the changes to the member list.
List<Role> templateRoles = template.getRoles();
if (templateRoles != null) {
for (Role role : templateRoles) {
Role templateRole = updateTemplateRole(role, domainName, templateParams);
String roleName = ZMSUtils.removeDomainPrefix(templateRole.getName(),
domainName, ROLE_PREFIX);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-role\": ");
if (!processRole(con, originalRole, domainName, roleName, templateRole,
admin, auditRef, true, auditDetails)) {
return false;
}
}
}
// iterate through policies in the list.
// When adding a template, if the policy does not exist in our domain
// then insert it otherwise only apply the changes to the assertions
List<Policy> templatePolicies = template.getPolicies();
if (templatePolicies != null) {
for (Policy policy : templatePolicies) {
Policy templatePolicy = updateTemplatePolicy(policy, domainName, templateParams);
String policyName = ZMSUtils.removeDomainPrefix(templatePolicy.getName(),
domainName, POLICY_PREFIX);
// retrieve our original policy
Policy originalPolicy = getPolicy(con, domainName, policyName);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-policy\": ");
if (!processPolicy(con, originalPolicy, domainName, policyName, templatePolicy,
true, auditDetails)) {
return false;
}
}
}
// iterate through service identities in the list.
// When adding a template, if the service identity does not exist in our domain
// then insert it otherwise only apply the changes
List<ServiceIdentity> templateServiceIdentities = template.getServices();
if (templateServiceIdentities != null) {
for (ServiceIdentity serviceIdentity : templateServiceIdentities) {
ServiceIdentity templateServiceIdentity = updateTemplateServiceIdentity(
serviceIdentity, domainName, templateParams);
String serviceIdentityName = ZMSUtils.removeDomainPrefixForService(
templateServiceIdentity.getName(), domainName);
// retrieve our original service
ServiceIdentity originalServiceIdentity = getServiceIdentity(con, domainName,
serviceIdentityName, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"add-service\": ");
if (!processServiceIdentity(con, originalServiceIdentity, domainName,
serviceIdentityName, templateServiceIdentity, true, auditDetails)) {
return false;
}
}
}
// if adding a template, only add if it is not in our current list
// check to see if the template is already listed for the domain
List<String> currentTemplateList = con.listDomainTemplates(domainName);
if (!currentTemplateList.contains(templateName)) {
con.insertDomainTemplate(domainName, templateName, null);
}
auditDetails.append("}");
return true;
}
boolean deleteSolutionTemplate(ObjectStoreConnection con, String domainName, String templateName,
Template template, StringBuilder auditDetails) {
// currently there is no support for dynamic templates since the
// DELETE request has no payload and we can't pass our parameters
auditDetails.append("{\"name\": \"").append(templateName).append('\"');
// we have already verified that our template is valid but
// we'll just double check to make sure it's not null
if (template == null) {
auditDetails.append("}");
return true;
}
boolean firstEntry = true;
// iterate through roles in the list and delete the role
List<Role> templateRoles = template.getRoles();
if (templateRoles != null) {
for (Role role : templateRoles) {
String roleName = ZMSUtils.removeDomainPrefix(role.getName(),
TEMPLATE_DOMAIN_NAME, ROLE_PREFIX);
con.deleteRole(domainName, roleName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-role\": \"").append(roleName).append('\"');
}
}
// iterate through policies in the list and delete the policy
List<Policy> templatePolicies = template.getPolicies();
if (templatePolicies != null) {
for (Policy policy : templatePolicies) {
String policyName = ZMSUtils.removeDomainPrefix(policy.getName(),
TEMPLATE_DOMAIN_NAME, POLICY_PREFIX);
con.deletePolicy(domainName, policyName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-policy\": \"").append(policyName).append('\"');
}
}
// iterate through services in the list and delete the service
List<ServiceIdentity> templateServices = template.getServices();
if (templateServices != null) {
for (ServiceIdentity serviceIdentity : templateServices) {
String serviceName = ZMSUtils.removeDomainPrefixForService(serviceIdentity.getName(),
TEMPLATE_DOMAIN_NAME);
con.deleteServiceIdentity(domainName, serviceName);
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append(" \"delete-service\": \"").append(serviceName).append('\"');
}
}
// delete the template from the current list
con.deleteDomainTemplate(domainName, templateName, null);
auditDetails.append("}");
return true;
}
Role updateTemplateRole(Role role, String domainName, List<TemplateParam> params) {
// first process our given role name and carry out any
// requested substitutions
String templateRoleName = role.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templateRoleName = templateRoleName.replace(paramKey, param.getValue());
}
}
Role templateRole = new Role()
.setName(templateRoleName)
.setTrust(role.getTrust());
List<RoleMember> roleMembers = role.getRoleMembers();
List<RoleMember> newMembers = new ArrayList<>();
if (roleMembers != null && !roleMembers.isEmpty()) {
for (RoleMember roleMember : roleMembers) {
RoleMember newRoleMember = new RoleMember();
// process our role members for any requested substitutions
String memberName = roleMember.getMemberName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
memberName = memberName.replace(paramKey, param.getValue());
}
}
newRoleMember.setMemberName(memberName);
newRoleMember.setExpiration(roleMember.getExpiration());
newMembers.add(newRoleMember);
}
}
templateRole.setRoleMembers(newMembers);
return templateRole;
}
Policy updateTemplatePolicy(Policy policy, String domainName, List<TemplateParam> params) {
// first process our given role name and carry out any
// requested substitutions
String templatePolicyName = policy.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templatePolicyName = templatePolicyName.replace(paramKey, param.getValue());
}
}
Policy templatePolicy = new Policy().setName(templatePolicyName);
List<Assertion> assertions = policy.getAssertions();
List<Assertion> newAssertions = new ArrayList<>();
if (assertions != null && !assertions.isEmpty()) {
for (Assertion assertion : assertions) {
Assertion newAssertion = new Assertion();
newAssertion.setAction(assertion.getAction());
newAssertion.setEffect(assertion.getEffect());
// process our assertion resource and role for any requested substitutions
String resource = assertion.getResource().replace(TEMPLATE_DOMAIN_NAME, domainName);
String role = assertion.getRole().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
resource = resource.replace(paramKey, param.getValue());
role = role.replace(paramKey, param.getValue());
}
}
newAssertion.setResource(resource);
newAssertion.setRole(role);
newAssertions.add(newAssertion);
}
}
templatePolicy.setAssertions(newAssertions);
return templatePolicy;
}
ServiceIdentity updateTemplateServiceIdentity(ServiceIdentity serviceIdentity,
String domainName, List<TemplateParam> params) {
String templateServiceName = serviceIdentity.getName().replace(TEMPLATE_DOMAIN_NAME, domainName);
if (params != null) {
for (TemplateParam param : params) {
final String paramKey = "_" + param.getName() + "_";
templateServiceName = templateServiceName.replace(paramKey, param.getValue());
}
}
ServiceIdentity templateServiceIdentity = new ServiceIdentity().setName(templateServiceName);
templateServiceIdentity.setDescription(serviceIdentity.getDescription());
templateServiceIdentity.setExecutable(serviceIdentity.getExecutable());
templateServiceIdentity.setGroup(serviceIdentity.getGroup());
templateServiceIdentity.setUser(serviceIdentity.getUser());
templateServiceIdentity.setProviderEndpoint(serviceIdentity.getProviderEndpoint());
List<PublicKeyEntry> publicKeyEntries = serviceIdentity.getPublicKeys();
List<PublicKeyEntry> newPublicKeyEntries = new ArrayList<>();
if (publicKeyEntries != null && !publicKeyEntries.isEmpty()) {
for (PublicKeyEntry publicKeyEntry : publicKeyEntries) {
PublicKeyEntry newPublicKeyEntry = new PublicKeyEntry();
newPublicKeyEntry.setId(publicKeyEntry.getId());
newPublicKeyEntry.setKey(publicKeyEntry.getKey());
newPublicKeyEntries.add(newPublicKeyEntry);
}
}
templateServiceIdentity.setPublicKeys(newPublicKeyEntries);
List<String> hosts = serviceIdentity.getHosts();
if (hosts != null) {
templateServiceIdentity.setHosts(new ArrayList<>(hosts));
}
return templateServiceIdentity;
}
void setupTenantAdminPolicy(String tenantDomain, String provSvcDomain,
String provSvcName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, provSvcDomain + "." + provSvcName, AUDIT_TYPE_TENANCY);
String domainAdminRole = ZMSUtils.roleResourceName(tenantDomain, ZMSConsts.ADMIN_ROLE_NAME);
String serviceRoleResourceName = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, null) + ZMSConsts.ADMIN_ROLE_NAME;
// our tenant admin role/policy name
final String tenancyResource = "tenancy." + provSvcDomain + '.' + provSvcName;
String adminName = tenancyResource + ".admin";
String tenantAdminRole = ZMSUtils.roleResourceName(tenantDomain, adminName);
// tenant admin role - if it already exists then we skip it
// by default it has no members.
if (con.getRole(tenantDomain, adminName) == null) {
con.insertRole(tenantDomain, new Role().setName(tenantAdminRole));
}
// tenant admin policy - check to see if this already exists. If it does
// then we don't have anything to do
if (con.getPolicy(tenantDomain, adminName) == null) {
Policy adminPolicy = new Policy().setName(ZMSUtils.policyResourceName(tenantDomain, adminName));
con.insertPolicy(tenantDomain, adminPolicy);
// we are going to create 2 assertions - one for the domain admin role
// and another for the tenant admin role
Assertion assertion = new Assertion().setRole(domainAdminRole)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
assertion = new Assertion().setRole(tenantAdminRole)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
// the tenant admin role must have the capability to provision
// new resource groups in the domain which requires update
// action capability on resource tenancy.<prov_domain>.<prov_svc>
String tenantResourceName = tenantDomain + ":" + tenancyResource;
assertion = new Assertion().setRole(tenantAdminRole)
.setResource(tenantResourceName).setAction(ZMSConsts.ACTION_UPDATE)
.setEffect(AssertionEffect.ALLOW);
con.insertAssertion(tenantDomain, adminName, assertion);
}
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executePutTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName, String tenantDomain,
String resourceGroup, List<TenantRoleAction> roles, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, resourceGroup);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"put-tenant-roles\": [");
boolean firstEntry = true;
for (TenantRoleAction ra : roles) {
String tenantRole = ra.getRole();
String tenantAction = ra.getAction();
String trustedRole = trustedRolePrefix + tenantRole;
String trustedName = trustedRole.substring((provSvcDomain + AuthorityConsts.ROLE_SEP).length());
Role role = new Role().setName(trustedRole).setTrust(tenantDomain);
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": add trusted Role to domain " + provSvcDomain +
": " + trustedRole + " -> " + role);
}
// retrieve our original role in case one exists
Role originalRole = getRole(con, provSvcDomain, trustedName, false, false, false);
// now process the request
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"role\": ");
if (!processRole(con, originalRole, provSvcDomain, trustedName, role,
getPrincipalName(ctx), auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + trustedRole, caller);
}
String policyResourceName = ZMSUtils.policyResourceName(provSvcDomain, trustedName);
final String resourceName = provSvcDomain + ":service." +
ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup) + '*';
List<Assertion> assertions = Collections.singletonList(
new Assertion().setRole(trustedRole)
.setResource(resourceName)
.setAction(tenantAction));
Policy policy = new Policy().setName(policyResourceName).setAssertions(assertions);
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": add trust policy to domain " + provSvcDomain +
": " + trustedRole + " -> " + policy);
}
// retrieve our original policy
Policy originalPolicy = getPolicy(con, provSvcDomain, trustedName);
// now process the request
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, originalPolicy, provSvcDomain, trustedName, policy, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " + policy.getName(), caller);
}
auditDetails.append('}');
}
// update our domain time-stamp and save changes
saveChanges(con, provSvcDomain);
// audit log the request
auditLogRequest(ctx, provSvcDomain, auditRef, caller, ZMSConsts.HTTP_PUT,
tenantDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void addAssumeRolePolicy(ObjectStoreConnection con, String rolePrefix,
String trustedRolePrefix, String role, List<RoleMember> roleMembers,
String tenantDomain, String admin, String auditRef,
StringBuilder auditDetails, String caller) {
// first create the role in the domain. We're going to create it
// only if the role does not already exist
String roleName = rolePrefix + role;
String roleResourceName = ZMSUtils.roleResourceName(tenantDomain, roleName);
// retrieve our original role in case one exists
Role originalRole = getRole(con, tenantDomain, roleName, false, false, false);
// we need to add the original role members to the new one
if (originalRole != null && originalRole.getRoleMembers() != null) {
roleMembers.addAll(originalRole.getRoleMembers());
}
// now process the request
Role roleObj = new Role().setName(roleResourceName).setRoleMembers(roleMembers);
auditDetails.append("{\"role\": ");
if (!processRole(con, originalRole, tenantDomain, roleName, roleObj,
admin, auditRef, false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put role: " + roleName, caller);
}
// now create the corresponding policy. We're going to create it
// only if the policy does not exist otherwise we'll just
// add a new assertion
String policyName = "tenancy." + roleName;
String policyResourceName = ZMSUtils.policyResourceName(tenantDomain, policyName);
String serviceRoleResourceName = trustedRolePrefix + role;
Assertion assertion = new Assertion().setRole(roleResourceName)
.setResource(serviceRoleResourceName).setAction(ZMSConsts.ACTION_ASSUME_ROLE)
.setEffect(AssertionEffect.ALLOW);
if (LOG.isInfoEnabled()) {
LOG.info("executePutProviderRoles: ---- ASSUME_ROLE policyName is " + policyName);
}
// retrieve our original policy
Policy originalPolicy = getPolicy(con, tenantDomain, policyName);
// we need to add the original policy assertions to the new one
List<Assertion> newAssertions = new ArrayList<>();
if (originalPolicy != null && originalPolicy.getAssertions() != null) {
newAssertions.addAll(originalPolicy.getAssertions());
}
// if our new assertion is not already in the list then that will be added to
if (!newAssertions.contains(assertion)) {
newAssertions.add(assertion);
}
// now process the request
Policy assumeRolePolicy = new Policy().setName(policyResourceName).setAssertions(newAssertions);
auditDetails.append(", \"policy\": ");
if (!processPolicy(con, originalPolicy, tenantDomain, policyName, assumeRolePolicy,
false, auditDetails)) {
con.rollbackChanges();
throw ZMSUtils.internalServerError("unable to put policy: " +
assumeRolePolicy.getName(), caller);
}
auditDetails.append('}');
}
void executePutProviderRoles(ResourceContext ctx, String tenantDomain, String provSvcDomain,
String provSvcName, String resourceGroup, List<String> roles, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// we're going to create a separate role for each one of tenant roles returned
// based on its action and set the caller as a member in each role
final String principalName = getPrincipalName(ctx);
// now set up the roles and policies for all the provider roles returned.
final String rolePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain,
provSvcName, resourceGroup);
final String trustedRolePrefix = ZMSUtils.getTrustedResourceGroupRolePrefix(provSvcDomain,
provSvcName, tenantDomain, resourceGroup);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"put-provider-roles\": [");
boolean firstEntry = true;
for (String role : roles) {
// we need to create a new object for each role since the list is updated
// in case the role already has existing members, but we don't want to
// add those members to other roles in our list
List<RoleMember> roleMembers = new ArrayList<>();
if (principalName != null) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(principalName);
roleMembers.add(roleMember);
}
role = role.toLowerCase();
if (LOG.isInfoEnabled()) {
LOG.info("executePutProviderRoles: provision ASSUME_ROLE policy for access remote role in "
+ provSvcDomain + "." + provSvcName + ": " + resourceGroup + "." + role);
}
firstEntry = auditLogSeparator(auditDetails, firstEntry);
addAssumeRolePolicy(con, rolePrefix, trustedRolePrefix, role, roleMembers,
tenantDomain, principalName, auditRef, auditDetails, caller);
}
auditDetails.append("]}");
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_PUT,
provSvcDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteTenancy(ResourceContext ctx, String tenantDomain, String provSvcDomain,
String provSvcName, String resourceGroup, String auditRef, String caller) {
// create list of policies and delete them from the tenant domain
// have to get all policies that match "tenant.<provider>.*"
// ex: tenancy.weather.storage.admin
String rnamePrefix = ZMSUtils.getProviderResourceGroupRolePrefix(provSvcDomain, provSvcName,
resourceGroup);
final String pnamePrefix = "tenancy." + rnamePrefix;
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, tenantDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// first let's process and remove any policies that start with our
// provider prefix
List<String> pnames = con.listPolicies(tenantDomain, null);
for (String pname : pnames) {
if (!validResourceGroupObjectToDelete(pname, pnamePrefix)) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore policy " + pname);
}
continue;
}
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": --delete policy " + pname);
}
con.deletePolicy(tenantDomain, pname);
}
// now we're going to find any roles that have the provider prefix as
// well but we're going to be careful about removing them. We'll check
// and if we have no more policies referencing them then we'll remove
List<String> rnames = con.listRoles(tenantDomain);
for (String rname : rnames) {
if (!validResourceGroupObjectToDelete(rname, rnamePrefix)) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore role " + rname);
}
continue;
}
if (!con.listPolicies(tenantDomain, rname).isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": --ignore role " + rname + " due to active references");
}
continue;
}
if (LOG.isInfoEnabled()) {
LOG.info(caller + ": --delete role " + rname);
}
con.deleteRole(tenantDomain, rname);
}
// update our domain time-stamp and save changes
saveChanges(con, tenantDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE,
ZMSUtils.entityResourceName(provSvcDomain, provSvcName), null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean validResourceGroupObjectToDelete(String name, String prefix) {
if (!name.startsWith(prefix)) {
return false;
}
// the suffix must be the action which should only be
// simple-name thus it cannot contain any more .'s
// otherwise we don't want to make a mistake
// and match substring resource groups - e.g:
// system.engine and system.engine.test
return (name.indexOf('.', prefix.length()) == -1);
}
void executeDeleteTenantRoles(ResourceContext ctx, String provSvcDomain, String provSvcName,
String tenantDomain, String resourceGroup, String auditRef, String caller) {
// look for this tenants roles, ex: storage.tenant.sports.reader
String rolePrefix = ZMSUtils.getTenantResourceGroupRolePrefix(provSvcName, tenantDomain, resourceGroup);
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, provSvcDomain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_TENANCY);
// find roles and policies matching the prefix
List<String> rnames = con.listRoles(provSvcDomain);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditDetails.append("{\"tenant-roles\": [");
boolean firstEntry = true;
for (String rname : rnames) {
if (isTrustRoleForTenant(con, provSvcDomain, rname, rolePrefix,
resourceGroup, tenantDomain)) {
// good, its exactly what we are looking for
con.deleteRole(provSvcDomain, rname);
con.deletePolicy(provSvcDomain, rname);
firstEntry = auditLogString(auditDetails, rname, firstEntry);
}
}
auditDetails.append("]}");
// update our domain time-stamp and save changes
saveChanges(con, provSvcDomain);
// audit log the request
auditLogRequest(ctx, tenantDomain, auditRef, caller, ZMSConsts.HTTP_DELETE,
provSvcDomain, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean isTrustRoleForTenant(ObjectStoreConnection con, String provSvcDomain, String roleName,
String rolePrefix, String resourceGroup, String tenantDomain) {
// first make sure the role name starts with the given prefix
if (!isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain)) {
return false;
}
Role role = con.getRole(provSvcDomain, roleName);
if (role == null) {
return false;
}
// ensure it is a trust role for the tenant
String trustDom = role.getTrust();
return trustDom != null && trustDom.equals(tenantDomain);
}
boolean isTrustRoleForTenant(String provSvcDomain, String roleName, String rolePrefix,
String resourceGroup, String tenantDomain) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return isTrustRoleForTenant(con, provSvcDomain, roleName, rolePrefix, resourceGroup, tenantDomain);
}
}
boolean isTenantRolePrefixMatch(String roleName, String rolePrefix, String resourceGroup,
String tenantDomain) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return isTenantRolePrefixMatch(con, roleName, rolePrefix, resourceGroup, tenantDomain);
}
}
boolean isTenantRolePrefixMatch(ObjectStoreConnection con, String roleName, String rolePrefix,
String resourceGroup, String tenantDomain) {
if (LOG.isDebugEnabled()) {
LOG.debug("isTenantRolePrefixMatch: role-name=" + roleName + ", role-prefix=" +
rolePrefix + ", reosurce-group=" + resourceGroup + ", tenant-domain=" + tenantDomain);
}
// first make sure the role name starts with the given prefix
if (!roleName.startsWith(rolePrefix)) {
return false;
}
// if we're dealing with a resource group then we need
// to make sure we're not going to match a substring
// resource group. Since we expect to see a SimpleName
// action after the name, if we get another '.' then
// we're dealing with a substring so the role does
// match the expected format
if (resourceGroup != null) {
return (roleName.indexOf('.', rolePrefix.length()) == -1);
}
// otherwise we're going to split the remaining value
// into components. If we have 2 components then we'll
// check if we have a domain for the first component
// if we don't then it's a resource group and as such
// it can be removed otherwise, we'll leave it alone
String[] comps = roleName.substring(rolePrefix.length()).split("\\.");
if (comps.length == 2) {
// check to see if we have a subdomain - if we do then
// we're not going to include this role as we don't know
// for sure if this for a resource group or not
String subDomain = tenantDomain + "." + comps[0];
if (LOG.isDebugEnabled()) {
LOG.debug("isTenantRolePrefixMatch: verifying tenant subdomain: " + subDomain);
}
return con.getDomain(subDomain) == null;
} else {
// if we have more than 2 subcomponents then we're
// definitely not dealing with resource groups
return comps.length <= 2;
}
}
public AthenzDomain getAthenzDomain(final String domainName, boolean masterCopy) {
try (ObjectStoreConnection con = store.getConnection(true, masterCopy)) {
return getAthenzDomain(con, domainName);
}
}
AthenzDomain getAthenzDomain(ObjectStoreConnection con, final String domainName) {
// first check to see if we our data is in the cache
AthenzDomain athenzDomain = getAthenzDomainFromCache(con, domainName);
if (athenzDomain != null) {
return athenzDomain;
}
athenzDomain = con.getAthenzDomain(domainName);
setMembersInDomain(athenzDomain);
DataCache dataCache = new DataCache(athenzDomain,
athenzDomain.getDomain().getModified().millis());
cacheStore.put(domainName, dataCache);
return athenzDomain;
}
private void setMembersInDomain(AthenzDomain athenzDomain) {
List<Role> roleList = athenzDomain.getRoles();
if (roleList != null) {
for (Role role: roleList) {
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers != null) {
List<String> members = role.getMembers();
if (members == null) {
members = new ArrayList<>();
role.setMembers(members);
}
for (RoleMember roleMember: roleMembers) {
members.add(roleMember.getMemberName());
}
}
}
}
}
DomainMetaList listModifiedDomains(long modifiedSince) {
// since this is the operation executed by ZTS servers to
// retrieve latest domain changes, we're going to use
// the read-write store as oppose to read-only store to
// get our up-to-date data
try (ObjectStoreConnection con = store.getConnection(true, true)) {
return con.listModifiedDomains(modifiedSince);
}
}
boolean auditLogSeparator(StringBuilder auditDetails, boolean firstEntry) {
if (!firstEntry) {
auditDetails.append(',');
}
// regardless of the current state, the new state is no
// longer the first entry so we return false
return false;
}
void auditLogStrings(StringBuilder auditDetails, String label, Collection<String> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogString(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogString(StringBuilder auditDetails, String value, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append('\"').append(value).append('\"');
return firstEntry;
}
void auditLogRoleMembers(StringBuilder auditDetails, String label,
Collection<RoleMember> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (RoleMember value : values) {
firstEntry = auditLogRoleMember(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogRoleMember(StringBuilder auditDetails, RoleMember roleMember, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"member\": \"").append(roleMember.getMemberName()).append('"');
if (roleMember.getExpiration() != null) {
auditDetails.append(", \"expiration\": \"").append(roleMember.getExpiration().toString()).append('"');
}
auditDetails.append(", \"approved\": ");
auditDetails.append(roleMember.getApproved() == Boolean.FALSE ? "false}" : "true}");
return firstEntry;
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label,
List<PublicKeyEntry> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (PublicKeyEntry value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values,
Map<String, PublicKeyEntry> publicKeysMap) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, publicKeysMap.get(value), firstEntry);
}
auditDetails.append(']');
}
void auditLogPublicKeyEntries(StringBuilder auditDetails, String label, Set<String> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (String value : values) {
firstEntry = auditLogPublicKeyEntry(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogPublicKeyEntry(StringBuilder auditDetails, PublicKeyEntry publicKey, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"key\": \"").append(publicKey.getKey())
.append("\", \"id\": \"").append(publicKey.getId()).append("\"}");
return firstEntry;
}
boolean auditLogPublicKeyEntry(StringBuilder auditDetails, String publicKeyId, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
auditDetails.append("{\"id\": \"").append(publicKeyId).append("\"}");
return firstEntry;
}
void auditLogAssertions(StringBuilder auditDetails, String label, Collection<Assertion> values) {
auditDetails.append(", \"").append(label).append("\": [");
boolean firstEntry = true;
for (Assertion value : values) {
firstEntry = auditLogAssertion(auditDetails, value, firstEntry);
}
auditDetails.append(']');
}
boolean auditLogAssertion(StringBuilder auditDetails, Assertion assertion, boolean firstEntry) {
firstEntry = auditLogSeparator(auditDetails, firstEntry);
String assertionEffect = "ALLOW";
if (assertion.getEffect() != null) {
assertionEffect = assertion.getEffect().toString();
}
auditDetails.append("{\"role\": \"").append(assertion.getRole())
.append("\", \"action\": \"").append(assertion.getAction())
.append("\", \"effect\": \"").append(assertionEffect)
.append("\", \"resource\": \"").append(assertion.getResource())
.append("\"}");
return firstEntry;
}
void auditLogDomain(StringBuilder auditDetails, Domain domain) {
auditDetails.append("{\"description\": \"").append(domain.getDescription())
.append("\", \"org\": \"").append(domain.getOrg())
.append("\", \"auditEnabled\": \"").append(domain.getAuditEnabled())
.append("\", \"enabled\": \"").append(domain.getEnabled())
.append("\", \"account\": \"").append(domain.getAccount())
.append("\", \"acctId\": \"").append(domain.getApplicationId())
.append("\", \"ypmid\": \"").append(domain.getYpmId())
.append("\", \"id\": \"").append(domain.getId())
.append("\", \"memberExpiryDays\": \"").append(domain.getMemberExpiryDays())
.append("\", \"serviceExpiryDays\": \"").append(domain.getServiceExpiryDays())
.append("\", \"tokenExpiryMins\": \"").append(domain.getTokenExpiryMins())
.append("\", \"serviceCertExpiryMins\": \"").append(domain.getServiceCertExpiryMins())
.append("\", \"roleCertExpiryMins\": \"").append(domain.getRoleCertExpiryMins())
.append("\"}");
}
void auditLogRoleSystemMeta(StringBuilder auditDetails, Role role, String roleName) {
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"auditEnabled\": \"").append(role.getAuditEnabled())
.append("\"}");
}
void auditLogServiceIdentitySystemMeta(StringBuilder auditDetails, ServiceIdentity service, String serviceName) {
auditDetails.append("{\"name\": \"").append(serviceName)
.append("\", \"providerEndpoint\": \"").append(service.getProviderEndpoint())
.append("\"}");
}
void auditLogRoleMeta(StringBuilder auditDetails, Role role, String roleName) {
auditDetails.append("{\"name\": \"").append(roleName)
.append("\", \"selfServe\": \"").append(role.getSelfServe())
.append("\", \"memberExpiryDays\": \"").append(role.getMemberExpiryDays())
.append("\", \"serviceExpiryDays\": \"").append(role.getServiceExpiryDays())
.append("\", \"tokenExpiryMins\": \"").append(role.getTokenExpiryMins())
.append("\", \"certExpiryMins\": \"").append(role.getCertExpiryMins())
.append("\", \"reviewEnabled\": \"").append(role.getReviewEnabled())
.append("\", \"notifyRoles\": \"").append(role.getNotifyRoles())
.append("\"}");
}
void executePutQuota(ResourceContext ctx, String domainName, Quota quota,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// process our insert quota. since this is a "single"
// operation, we are not using any transactions.
if (con.getQuota(domainName) != null) {
con.updateQuota(domainName, quota);
} else {
con.insertQuota(domainName, quota);
}
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, null);
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void executeDeleteQuota(ResourceContext ctx, String domainName, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// process our delete quota request - it's a single
// operation so no need to make it a transaction
if (!con.deleteQuota(domainName)) {
throw ZMSUtils.notFoundError(caller + ": unable to delete quota: " + domainName, caller);
}
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_DELETE,
domainName, null);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
public Quota getQuota(String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, false)) {
return quotaCheck.getDomainQuota(con, domainName);
}
}
public void executePutRoleSystemMeta(ResourceContext ctx, String domainName, String roleName,
RoleSystemMeta meta, String attribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_ROLE);
if (domain.getAuditEnabled() != Boolean.TRUE) {
throw ZMSUtils.requestError(caller + ": auditEnabled flag not set for domain: " + domainName + " to add it on the role: " + roleName, caller);
}
Role originalRole = getRole(con, domainName, roleName, false, false, false);
// now process the request. first we're going to make a
// copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName())
.setAuditEnabled(originalRole.getAuditEnabled())
.setTrust(originalRole.getTrust())
.setSelfServe(originalRole.getSelfServe())
.setMemberExpiryDays(originalRole.getMemberExpiryDays())
.setServiceExpiryDays(originalRole.getServiceExpiryDays())
.setTokenExpiryMins(originalRole.getTokenExpiryMins())
.setCertExpiryMins(originalRole.getCertExpiryMins())
.setSignAlgorithm(originalRole.getSignAlgorithm())
.setReviewEnabled(originalRole.getReviewEnabled())
.setNotifyRoles(originalRole.getNotifyRoles());
// then we're going to apply the updated fields
// from the given object
updateRoleSystemMetaFields(updatedRole, attribute, deleteAllowed, meta);
con.updateRole(domainName, updatedRole);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleSystemMeta(auditDetails, updatedRole, roleName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
public void executePutServiceIdentitySystemMeta(ResourceContext ctx, String domainName, String serviceName,
ServiceIdentitySystemMeta meta, String attribute, boolean deleteAllowed, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Domain domain = con.getDomain(domainName);
if (domain == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown domain: " + domainName, caller);
}
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domain, auditRef, caller, getPrincipalName(ctx), AUDIT_TYPE_SERVICE);
// retrieve our original service identity object
ServiceIdentity serviceIdentity = getServiceIdentity(con, domainName, serviceName, false);
// then we're going to apply the updated fields
// from the given object
updateServiceIdentitySystemMetaFields(serviceIdentity, attribute, deleteAllowed, meta);
con.updateServiceIdentity(domainName, serviceIdentity);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogServiceIdentitySystemMeta(auditDetails, serviceIdentity, serviceName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
void updateRoleMetaFields(Role role, RoleMeta meta) {
if (meta.getSelfServe() != null) {
role.setSelfServe(meta.getSelfServe());
}
if (meta.getMemberExpiryDays() != null) {
role.setMemberExpiryDays(meta.getMemberExpiryDays());
}
if (meta.getServiceExpiryDays() != null) {
role.setServiceExpiryDays(meta.getServiceExpiryDays());
}
if (meta.getTokenExpiryMins() != null) {
role.setTokenExpiryMins(meta.getTokenExpiryMins());
}
if (meta.getCertExpiryMins() != null) {
role.setCertExpiryMins(meta.getCertExpiryMins());
}
if (meta.getSignAlgorithm() != null) {
role.setSignAlgorithm(meta.getSignAlgorithm());
}
if (meta.getReviewEnabled() != null) {
role.setReviewEnabled(meta.getReviewEnabled());
}
if (meta.getNotifyRoles() != null) {
role.setNotifyRoles(meta.getNotifyRoles());
}
}
public void executePutRoleMeta(ResourceContext ctx, String domainName, String roleName, RoleMeta meta,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, getPrincipalName(ctx));
// now process the request. first we're going to make a
// copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName())
.setAuditEnabled(originalRole.getAuditEnabled())
.setTrust(originalRole.getTrust())
.setSelfServe(originalRole.getSelfServe())
.setMemberExpiryDays(originalRole.getMemberExpiryDays())
.setServiceExpiryDays(originalRole.getServiceExpiryDays())
.setTokenExpiryMins(originalRole.getTokenExpiryMins())
.setCertExpiryMins(originalRole.getCertExpiryMins())
.setSignAlgorithm(originalRole.getSignAlgorithm())
.setReviewEnabled(originalRole.getReviewEnabled())
.setNotifyRoles(originalRole.getNotifyRoles());
// then we're going to apply the updated fields
// from the given object
updateRoleMetaFields(updatedRole, meta);
con.updateRole(domainName, updatedRole);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMeta(auditDetails, updatedRole, roleName);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
domainName, auditDetails.toString());
// if the role member expiry date has changed then we're going
// process all the members in the role and update the expiration
// date accordingly
updateRoleMembersExpiration(ctx, con, domainName, roleName, originalRole, updatedRole,
auditRef, caller);
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
boolean setRoleMemberExpiration(ResourceContext ctx, ObjectStoreConnection con, List<RoleMember> roleMembers,
Timestamp userExpiration, long userMillis, Timestamp serviceExpiration, long serviceMillis,
final String domainName, final String roleName, final String principal, final String auditRef,
final String caller) {
boolean bDataChanged = false;
for (RoleMember roleMember : roleMembers) {
boolean bUser = ZMSUtils.isUserDomainPrincipal(roleMember.getMemberName(), zmsConfig.getUserDomainPrefix(),
zmsConfig.getAddlUserCheckDomainPrefixList());
if (bUser && userMillis != 0) {
if (roleMember.getExpiration() != null && roleMember.getExpiration().millis() < userMillis) {
continue;
}
roleMember.setExpiration(userExpiration);
} else if (!bUser && serviceMillis != 0) {
if (roleMember.getExpiration() != null && roleMember.getExpiration().millis() < serviceMillis) {
continue;
}
roleMember.setExpiration(serviceExpiration);
}
try {
if (!con.insertRoleMember(domainName, roleName, roleMember, principal, auditRef)) {
LOG.error("unable to update member {} expiration", roleMember.getMemberName());
continue;
}
} catch (Exception ex) {
LOG.error("unable to update member {} expiration: {}", roleMember.getMemberName(), ex.getMessage());
continue;
}
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT, roleName,
auditDetails.toString());
bDataChanged = true;
}
return bDataChanged;
}
void updateRoleMembersExpiration(ResourceContext ctx, ObjectStoreConnection con, final String domainName,
final String roleName, Role originalRole, Role updatedRole, final String auditRef, final String caller) {
// if it's a delegated role then we have nothing to do
if (originalRole.getTrust() != null && !originalRole.getTrust().isEmpty()) {
return;
}
// if no role members, then there is nothing to do
final List<RoleMember> roleMembers = originalRole.getRoleMembers();
if (roleMembers == null || roleMembers.isEmpty()) {
return;
}
// we only need to process the role members if the new expiration
// is more restrictive than what we had before
boolean userMemberExpiryDayReduced = memberExpiryDaysReduced(originalRole.getMemberExpiryDays(),
updatedRole.getMemberExpiryDays());
boolean serviceMemberExpiryDayReduced = memberExpiryDaysReduced(originalRole.getServiceExpiryDays(),
updatedRole.getServiceExpiryDays());
if (!userMemberExpiryDayReduced && !serviceMemberExpiryDayReduced) {
return;
}
// we're only going to process those role members whose
// expiration is either not set or longer than the new limit
long userMillis = userMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getMemberExpiryDays(), TimeUnit.DAYS) : 0;
long serviceMillis = serviceMemberExpiryDayReduced ? System.currentTimeMillis()
+ TimeUnit.MILLISECONDS.convert(updatedRole.getServiceExpiryDays(), TimeUnit.DAYS) : 0;
Timestamp userExpiration = Timestamp.fromMillis(userMillis);
Timestamp serviceExpiration = Timestamp.fromMillis(serviceMillis);
final String principal = getPrincipalName(ctx);
// process our role members and if there were any changes processed then update
// our role and domain time-stamps, and invalidate local cache entry
if (setRoleMemberExpiration(ctx, con, roleMembers, userExpiration, userMillis, serviceExpiration, serviceMillis,
domainName, roleName, principal, auditRef, caller)) {
con.updateRoleModTimestamp(domainName, roleName);
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
boolean memberExpiryDaysReduced(Integer oldMemberExpiryDays, Integer newMemberExpiryDays) {
if (newMemberExpiryDays == null || newMemberExpiryDays <= 0) {
return false;
}
if (oldMemberExpiryDays == null || oldMemberExpiryDays <= 0) {
return true;
}
return newMemberExpiryDays < oldMemberExpiryDays;
}
/**
* If the role has audit enabled, and user did not provide the auditRef,
* an exception will be thrown.
**/
void checkRoleAuditEnabled(ObjectStoreConnection con, Role role, final String auditRef,
final String caller, final String principal) {
if (role.getAuditEnabled() == Boolean.TRUE) {
if (auditRef == null || auditRef.length() == 0) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference required for role: " + role.getName(), caller);
}
if (auditReferenceValidator != null && !auditReferenceValidator.validateReference(auditRef, principal, caller)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": Audit reference validation failed for role: " + role.getName() +
", auditRef: " + auditRef, caller);
}
}
}
void executePutMembershipDecision(ResourceContext ctx, String domainName, String roleName,
RoleMember roleMember, String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
String principal = getPrincipalName(ctx);
// make sure the role auditing requires are bet
Role originalRole = con.getRole(domainName, roleName);
if (originalRole == null) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": Unknown role: " + roleName, caller);
}
checkRoleAuditEnabled(con, originalRole, auditRef, caller, principal);
// process our confirm role member support
if (!con.confirmRoleMember(domainName, roleName, roleMember,
principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.requestError(caller + ": unable to apply role membership decision for member: " +
roleMember.getMemberName() + " and role: " + roleName, caller);
}
// update our domain time-stamp and save changes
con.updateRoleModTimestamp(domainName, roleName);
saveChanges(con, domainName);
// audit log the request
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
auditLogRoleMember(auditDetails, roleMember, true);
auditLogRequest(ctx, domainName, auditRef, caller, ZMSConsts.HTTP_PUT,
roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
// otherwise check if we need to retry or return failure
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
DomainRoleMembership getPendingDomainRoleMembers(final String principal) {
DomainRoleMembership domainRoleMembership = new DomainRoleMembership();
List<DomainRoleMembers> domainRoleMembersList = new ArrayList<>();
DomainRoleMembers domainRoleMembers;
try (ObjectStoreConnection con = store.getConnection(true, false)) {
Map<String, List<DomainRoleMember>> domainRoleMembersMap = con.getPendingDomainRoleMembers(principal);
if (domainRoleMembersMap != null) {
for (String domain : domainRoleMembersMap.keySet()) {
domainRoleMembers = new DomainRoleMembers();
domainRoleMembers.setDomainName(domain);
domainRoleMembers.setMembers(domainRoleMembersMap.get(domain));
domainRoleMembersList.add(domainRoleMembers);
}
domainRoleMembership.setDomainRoleMembersList(domainRoleMembersList);
}
}
return domainRoleMembership;
}
public Set<String> getPendingMembershipApproverRoles() {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
long updateTs = System.currentTimeMillis();
if (con.updatePendingRoleMembersNotificationTimestamp(zmsConfig.getServerHostName(), updateTs)) {
return con.getPendingMembershipApproverRoles(zmsConfig.getServerHostName(), updateTs);
}
}
return null;
}
public Map<String, DomainRoleMember> getRoleExpiryMembers() {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
long updateTs = System.currentTimeMillis();
if (con.updateRoleMemberExpirationNotificationTimestamp(zmsConfig.getServerHostName(), updateTs)) {
return con.getNotifyTemporaryRoleMembers(zmsConfig.getServerHostName(), updateTs);
}
}
return null;
}
public void processExpiredPendingMembers(int pendingRoleMemberLifespan, final String monitorIdentity) {
final String auditRef = "Expired - auto reject";
final String caller = "processExpiredPendingMembers";
Map<String, List<DomainRoleMember>> memberList;
try (ObjectStoreConnection con = store.getConnection(true, false)) {
memberList = con.getExpiredPendingDomainRoleMembers(pendingRoleMemberLifespan);
}
// delete each member and record each expired member in audit log in a transaction
for (String domainName : memberList.keySet()) {
for (DomainRoleMember domainRoleMember : memberList.get(domainName)) {
final String principalName = domainRoleMember.getMemberName();
for (MemberRole memberRole : domainRoleMember.getMemberRoles()) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
if (con.deletePendingRoleMember(domainName, memberRole.getRoleName(),
principalName, monitorIdentity, auditRef)) {
auditLogRequest(monitorIdentity, domainName, auditRef, caller,
"REJECT", memberRole.getRoleName(),
"{\"member\": \"" + principalName + "\"}");
}
}
}
}
}
}
void executePutRoleReview(ResourceContext ctx, String domainName, String roleName, Role role,
String auditRef, String caller) {
// our exception handling code does the check for retry count
// and throws the exception it had received when the retry
// count reaches 0
for (int retryCount = defaultRetryCount; ; retryCount--) {
try (ObjectStoreConnection con = store.getConnection(false, true)) {
final String principal = getPrincipalName(ctx);
// first verify that auditing requirements are met
checkDomainAuditEnabled(con, domainName, auditRef, caller, principal, AUDIT_TYPE_ROLE);
// retrieve our original role
Role originalRole = getRole(con, domainName, roleName, false, false, false);
if (originalRole.getTrust() != null && !originalRole.getTrust().isEmpty()) {
throw ZMSUtils.requestError(caller + ": role " + roleName + " is delegated. Review should happen on the trusted role. ", caller);
}
// now process the request. first we're going to make a copy of our role
Role updatedRole = new Role()
.setName(originalRole.getName());
// then we're going to apply the updated expiry and/or active status from the incoming role
List<RoleMember> noactionMembers = applyMembershipChanges(updatedRole, originalRole, role, auditRef);
StringBuilder auditDetails = new StringBuilder(ZMSConsts.STRING_BLDR_SIZE_DEFAULT);
List<RoleMember> deletedMembers = new ArrayList<>();
List<RoleMember> extendedMembers = new ArrayList<>();
auditDetails.append("{\"name\": \"").append(roleName).append('\"')
.append(", \"selfServe\": ").append(originalRole.getSelfServe() == Boolean.TRUE ? "true" : "false")
.append(", \"auditEnabled\": ").append(originalRole.getAuditEnabled() == Boolean.TRUE ? "true" : "false");
for (RoleMember member : updatedRole.getRoleMembers()) {
// if active flag is coming as false for the member, that means it's flagged for deletion
if (member.getActive() == Boolean.FALSE) {
if (!con.deleteRoleMember(domainName, roleName, member.getMemberName(), principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to delete role member: " +
member.getMemberName() + " from role: " + roleName, caller);
}
deletedMembers.add(member);
} else {
// if not marked for deletion, then we are going to extend the member
if (!con.insertRoleMember(domainName, roleName, member, principal, auditRef)) {
con.rollbackChanges();
throw ZMSUtils.notFoundError(caller + ": unable to extend role member: " +
member.getMemberName() + " for the role: " + roleName, caller);
}
extendedMembers.add(member);
}
}
// construct audit log details
auditLogRoleMembers(auditDetails, "deleted-members", deletedMembers);
auditLogRoleMembers(auditDetails, "extended-members", extendedMembers);
auditLogRoleMembers(auditDetails, "no-action-members", noactionMembers);
auditDetails.append("}");
if (!deletedMembers.isEmpty() || !extendedMembers.isEmpty()) {
// we have one or more changes to the role. We should update both lastReviewed as well as modified timestamps
con.updateRoleModTimestamp(domainName, roleName);
con.updateRoleReviewTimestamp(domainName, roleName);
} else {
// since "no-action" is still a review, we are updating lastReviewed timestamp
con.updateRoleReviewTimestamp(domainName, roleName);
}
saveChanges(con, domainName);
// audit log the request
auditLogRequest(ctx, domainName, auditRef, caller, "REVIEW", roleName, auditDetails.toString());
return;
} catch (ResourceException ex) {
if (!shouldRetryOperation(ex, retryCount)) {
throw ex;
}
}
}
}
/**
* This method takes the input role, creates a map using memberName as key,
* copies members from original role from DB and only adds deleted / extended members to the updatedRole.
* @param updatedRole updated role to be sent to DB to record changes
* @param originalRole original role from DB
* @param role incoming role containing changes from domain admin
* @param auditRef audit ref for the change
* @return List of rolemember where no action was taken
*/
List<RoleMember> applyMembershipChanges(Role updatedRole, Role originalRole, Role role, String auditRef) {
Map<String, RoleMember> incomingMemberMap =
role.getRoleMembers().stream().collect(Collectors.toMap(RoleMember::getMemberName, item -> item));
List<RoleMember> noActionMembers = new ArrayList<>(originalRole.getRoleMembers().size());
// updatedMembers size is driven by input
List<RoleMember> updatedMembers = new ArrayList<>(incomingMemberMap.size());
updatedRole.setRoleMembers(updatedMembers);
RoleMember updatedMember;
// if original role is auditEnabled then all the extensions should be sent for approval again.
boolean approvalStatus = originalRole.getAuditEnabled() != Boolean.TRUE;
RoleMember tempMemberFromMap;
for (RoleMember originalMember : originalRole.getRoleMembers()) {
// we are only going to update the changed members
if (incomingMemberMap.containsKey(originalMember.getMemberName())) {
updatedMember = new RoleMember();
updatedMember.setMemberName(originalMember.getMemberName());
tempMemberFromMap = incomingMemberMap.get(updatedMember.getMemberName());
// member's approval status is determined by auditEnabled flag set on original role
updatedMember.setApproved(approvalStatus);
// member's active status is determined by action taken in UI
updatedMember.setActive(tempMemberFromMap.getActive());
// member's new expiration is set by role / domain level expiration setting
updatedMember.setExpiration(tempMemberFromMap.getExpiration());
updatedMember.setAuditRef(auditRef);
updatedMembers.add(updatedMember);
} else {
noActionMembers.add(originalMember);
}
}
return noActionMembers;
}
void updateDomainModTimestamp(final String domainName) {
try (ObjectStoreConnection con = store.getConnection(true, true)) {
// update domain time-stamps, and invalidate local cache entry
con.updateDomainModTimestamp(domainName);
cacheStore.invalidate(domainName);
}
}
}
| 1 | 5,050 | DB is implemented in PR 951 | AthenZ-athenz | java |
@@ -15,6 +15,16 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+const (
+ bytesToGB = 1073741824
+ bytesToMB = 1048567
+ micSec = 1000000
+ bytesToKB = 1024
+ minwidth = 0
+ maxwidth = 0
+ padding = 3
+)
+
// A gauge is a metric that represents a single numerical value that can
// arbitrarily go up and down.
| 1 | // Package collector is used to collect metrics by implementing
// prometheus.Collector interface. See function level comments
// for more details.
package collector
import (
"encoding/json"
"log"
"net/http"
"net/url"
"strconv"
"time"
"github.com/openebs/maya/types/v1"
"github.com/prometheus/client_golang/prometheus"
)
// A gauge is a metric that represents a single numerical value that can
// arbitrarily go up and down.
// Gauges are typically used for measured values like temperatures or current
// memory usage, but also "counts" that can go up and down, like the number of
// running goroutines.
// GaugeOpts is the alias for Opts, which is used to create diffent type of
// metrics.
// All the stats exposed from jiva will be collected by the GaugeOpts.
var (
usedLogicalBlocks = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "used_logical_blocks",
Help: "Used Logical Blocks of volume",
})
usedBlocks = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "used_blocks",
Help: "Used Blocks of volume",
})
sectorSize = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "sector_size",
Help: "sector size of volume",
})
readIOPS = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "read_iops",
Help: "Read Input/Outputs on Volume",
})
totalReadTime = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "total_read_time",
Help: "Total Read time on volume",
})
totalReadBlockCount = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "total_read_block_count",
Help: "Total Read Block count of volume",
})
writeIOPS = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "write_iops",
Help: "Write Input/Outputs on Volume",
})
totalWriteTime = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "total_write_time",
Help: "Total Write time on volume",
})
totalWriteBlockCount = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "OpenEBS",
Name: "total_write_block_count",
Help: "Total Write Block count of volume",
})
)
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection of metrics. Basically it has two methods Describe and Collect.
// VolumeExporter implements the prometheus.Collector interface. It exposes
// the metrics of a OpenEBS (Jiva) volume.
type VolumeExporter struct {
VolumeControllerURL string
}
// NewExporter returns Jiva volume controller URL along with Path.
func NewExporter(volumeControllerURL *url.URL) *VolumeExporter {
volumeControllerURL.Path = "/v1/stats"
return &VolumeExporter{
VolumeControllerURL: volumeControllerURL.String(),
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
// documentation. (It is valid if one and the same Collector sends
// duplicate descriptors. Those duplicates are simply ignored. However,
// two different Collectors must not send duplicate descriptors.) This
// method idempotently sends the same descriptors throughout the
// lifetime of the Collector. If a Collector encounters an error while
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
// Describe describes all the registered stats metrics from the OpenEBS volumes.
func (e *VolumeExporter) Describe(ch chan<- *prometheus.Desc) {
readIOPS.Describe(ch)
totalReadTime.Describe(ch)
totalReadBlockCount.Describe(ch)
writeIOPS.Describe(ch)
totalWriteTime.Describe(ch)
totalWriteBlockCount.Describe(ch)
usedLogicalBlocks.Describe(ch)
usedBlocks.Describe(ch)
sectorSize.Describe(ch)
}
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by
// Describe. Returned metrics that share the same descriptor must differ
// in their variable label values. This method may be called
// concurrently and must therefore be implemented in a concurrency safe
// way. Blocking occurs at the expense of total performance of rendering
// all registered metrics. Ideally, Collector implementations support
// concurrent readers.
// Collect collects all the registered stats metrics from the OpenEBS volumes.
func (e *VolumeExporter) Collect(ch chan<- prometheus.Metric) {
if err := e.collect(); err != nil {
return
}
readIOPS.Collect(ch)
totalReadTime.Collect(ch)
totalReadBlockCount.Collect(ch)
writeIOPS.Collect(ch)
totalWriteTime.Collect(ch)
totalWriteBlockCount.Collect(ch)
usedLogicalBlocks.Collect(ch)
usedBlocks.Collect(ch)
sectorSize.Collect(ch)
}
// collect is used to set the values gathered from OpenEBS volume controller
func (e *VolumeExporter) collect() error {
var metrics v1.VolumeMetrics
httpClient := http.DefaultClient
httpClient.Timeout = 1 * time.Second
resp, err := httpClient.Get(e.VolumeControllerURL)
if err != nil {
log.Printf("could not retrieve OpenEBS Volume controller metrics: %v", err)
return err
}
err = json.NewDecoder(resp.Body).Decode(&metrics)
if err != nil {
log.Printf("could not decode OpenEBS Volume controller metrics: %v", err)
return err
}
rIOPS, _ := strconv.ParseFloat(metrics.ReadIOPS, 64)
readIOPS.Set(rIOPS)
totRTime, _ := strconv.ParseFloat(metrics.TotalReadTime, 64)
totalReadTime.Set(totRTime)
totRBCount, _ := strconv.ParseFloat(metrics.TotalReadBlockCount, 64)
totalReadBlockCount.Set(totRBCount)
wIOPS, _ := strconv.ParseFloat(metrics.WriteIOPS, 64)
writeIOPS.Set(wIOPS)
totWTime, _ := strconv.ParseFloat(metrics.TotalWriteTime, 64)
totalWriteTime.Set(totWTime)
totWBCount, _ := strconv.ParseFloat(metrics.TotalWriteBlockCount, 64)
totalWriteBlockCount.Set(totWBCount)
uLBlocks, _ := strconv.ParseFloat(metrics.UsedLogicalBlocks, 64)
usedLogicalBlocks.Set(uLBlocks)
uBlocks, _ := strconv.ParseFloat(metrics.UsedBlocks, 64)
usedBlocks.Set(uBlocks)
sSize, _ := strconv.ParseFloat(metrics.SectorSize, 64)
sectorSize.Set(sSize)
return nil
}
| 1 | 7,106 | move all constants to `pkg/util/constants.go`, these constants had been used in `volume_stats.go` file too. So better to import them. | openebs-maya | go |
@@ -1,8 +1,8 @@
#appModules/winword.py
#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2006-2017 NV Access Limited, Manish Agrawal, Derek Riemer, Babbage B.V.
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
+# Copyright (C) 2006-2020 NV Access Limited, Manish Agrawal, Derek Riemer, Babbage B.V.
+# This file is covered by the GNU General Public License.
+# See the file COPYING for more details.
import ctypes
import time | 1 | #appModules/winword.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2017 NV Access Limited, Manish Agrawal, Derek Riemer, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import ctypes
import time
from comtypes import COMError, GUID, BSTR
import comtypes.client
import comtypes.automation
import uuid
import operator
import locale
import collections
import colorsys
import sayAllHandler
import eventHandler
import braille
from scriptHandler import script
import languageHandler
import ui
import NVDAHelper
import XMLFormatting
from logHandler import log
import winUser
import oleacc
import globalVars
import speech
import config
import textInfos
import textInfos.offsets
import colors
import controlTypes
import treeInterceptorHandler
import browseMode
import review
from cursorManager import CursorManager, ReviewCursorManager
from tableUtils import HeaderCellInfo, HeaderCellTracker
from . import Window
from ..behaviors import EditableTextWithoutAutoSelectDetection
from . import _msOfficeChart
import locationHelper
#Word constants
#wdLineSpacing rules
wdLineSpaceSingle=0
wdLineSpace1pt5=1
wdLineSpaceDouble=2
wdLineSpaceAtLeast=3
wdLineSpaceExactly=4
wdLineSpaceMultiple=5
# wdMeasurementUnits
wdInches=0
wdCentimeters=1
wdMillimeters=2
wdPoints=3
wdPicas=4
wdCollapseEnd=0
wdCollapseStart=1
#Indexing
wdActiveEndAdjustedPageNumber=1
wdActiveEndPageNumber=3
wdNumberOfPagesInDocument=4
wdHorizontalPositionRelativeToPage=5
wdVerticalPositionRelativeToPage=6
wdFirstCharacterLineNumber=10
wdWithInTable=12
wdStartOfRangeRowNumber=13
wdMaximumNumberOfRows=15
wdStartOfRangeColumnNumber=16
wdMaximumNumberOfColumns=18
#Horizontal alignment
wdAlignParagraphLeft=0
wdAlignParagraphCenter=1
wdAlignParagraphRight=2
wdAlignParagraphJustify=3
#Units
wdCharacter=1
wdWord=2
wdSentence=3
wdParagraph=4
wdLine=5
wdStory=6
wdColumn=9
wdRow=10
wdWindow=11
wdCell=12
wdCharFormat=13
wdParaFormat=14
wdTable=15
#GoTo - direction
wdGoToAbsolute=1
wdGoToRelative=2
wdGoToNext=2
wdGoToPrevious=3
#GoTo - units
wdGoToBookmark=-1
wdGoToSection=0
wdGoToPage=1
wdGoToTable=2
wdGoToLine=3
wdGoToFootnote=4
wdGoToEndnote=5
wdGoToComment=6
wdGoToField=7
wdGoToGraphic=8
wdGoToObject=9
wdGoToEquation=10
wdGoToHeading=11
wdGoToPercent=12
wdGoToSpellingError=13
wdGoToGrammaticalError=14
wdGoToProofreadingError=15
wdCommentsStory=4
wdEndnotesStory=3
wdEvenPagesFooterStory=8
wdEvenPagesHeaderStory=6
wdFirstPageFooterStory=11
wdFirstPageHeaderStory=10
wdFootnotesStory=2
wdMainTextStory=1
wdPrimaryFooterStory=9
wdPrimaryHeaderStory=7
wdTextFrameStory=5
wdFieldFormTextInput=70
wdFieldFormCheckBox=71
wdFieldFormDropDown=83
wdContentControlRichText=0
wdContentControlText=1
wdContentControlPicture=2
wdContentControlComboBox=3
wdContentControlDropdownList=4
wdContentControlBuildingBlockGallery=5
wdContentControlDate=6
wdContentControlGroup=7
wdContentControlCheckBox=8
wdInlineShapeChart=12
wdNoRevision=0
wdRevisionInsert=1
wdRevisionDelete=2
wdRevisionProperty=3
wdRevisionParagraphNumber=4
wdRevisionDisplayField=5
wdRevisionReconcile=6
wdRevisionConflict=7
wdRevisionStyle=8
wdRevisionReplace=9
wdRevisionParagraphProperty=10
wdRevisionTableProperty=11
wdRevisionSectionProperty=12
wdRevisionStyleDefinition=13
wdRevisionMovedFrom=14
wdRevisionMovedTo=15
wdRevisionCellInsertion=16
wdRevisionCellDeletion=17
wdRevisionCellMerge=18
# MsoThemeColorSchemeIndex
msoThemeAccent1=5
msoThemeAccent2=6
msoThemeAccent3=7
msoThemeAccent4=8
msoThemeAccent5=9
msoThemeAccent6=10
msoThemeDark1=1
msoThemeDark2=3
msoThemeFollowedHyperlink=12
msoThemeHyperlink=11
msoThemeLight1=2
msoThemeLight2=4
# WdThemeColorIndex
wdNotThemeColor=-1
wdThemeColorAccent1=4
wdThemeColorAccent2=5
wdThemeColorAccent3=6
wdThemeColorAccent4=7
wdThemeColorAccent5=8
wdThemeColorAccent6=9
wdThemeColorBackground1=12
wdThemeColorBackground2=14
wdThemeColorHyperlink=10
wdThemeColorHyperlinkFollowed=11
wdThemeColorMainDark1=0
wdThemeColorMainDark2=2
wdThemeColorMainLight1=1
wdThemeColorMainLight2=3
wdThemeColorText1=13
wdThemeColorText2=15
# Word Field types
FIELD_TYPE_REF = 3 # cross reference field
FIELD_TYPE_HYPERLINK = 88 # hyperlink field
# Mapping from http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations
WdThemeColorIndexToMsoThemeColorSchemeIndex={
wdThemeColorMainDark1:msoThemeDark1,
wdThemeColorMainLight1:msoThemeLight1,
wdThemeColorMainDark2:msoThemeDark2,
wdThemeColorMainLight2:msoThemeLight2,
wdThemeColorAccent1:msoThemeAccent1,
wdThemeColorAccent2:msoThemeAccent2,
wdThemeColorAccent3:msoThemeAccent3,
wdThemeColorAccent4:msoThemeAccent4,
wdThemeColorAccent5:msoThemeAccent5,
wdThemeColorAccent6:msoThemeAccent6,
wdThemeColorHyperlink:msoThemeHyperlink,
wdThemeColorHyperlinkFollowed:msoThemeFollowedHyperlink,
wdThemeColorBackground1:msoThemeLight1,
wdThemeColorText1:msoThemeDark1,
wdThemeColorBackground2:msoThemeLight2,
wdThemeColorText2:msoThemeDark2,
}
wdRevisionTypeLabels={
# Translators: a Microsoft Word revision type (inserted content)
wdRevisionInsert:_("insertion"),
# Translators: a Microsoft Word revision type (deleted content)
wdRevisionDelete:_("deletion"),
# Translators: a Microsoft Word revision type (changed content property, e.g. font, color)
wdRevisionProperty:_("property"),
# Translators: a Microsoft Word revision type (changed paragraph number)
wdRevisionParagraphNumber:_("paragraph number"),
# Translators: a Microsoft Word revision type (display field)
wdRevisionDisplayField:_("display field"),
# Translators: a Microsoft Word revision type (reconcile)
wdRevisionReconcile:_("reconcile"),
# Translators: a Microsoft Word revision type (conflicting revision)
wdRevisionConflict:_("conflict"),
# Translators: a Microsoft Word revision type (style change)
wdRevisionStyle:_("style"),
# Translators: a Microsoft Word revision type (replaced content)
wdRevisionReplace:_("replace"),
# Translators: a Microsoft Word revision type (changed paragraph property, e.g. alignment)
wdRevisionParagraphProperty:_("paragraph property"),
# Translators: a Microsoft Word revision type (table)
wdRevisionTableProperty:_("table property"),
# Translators: a Microsoft Word revision type (section property)
wdRevisionSectionProperty:_("section property"),
# Translators: a Microsoft Word revision type (style definition)
wdRevisionStyleDefinition:_("style definition"),
# Translators: a Microsoft Word revision type (moved from)
wdRevisionMovedFrom:_("moved from"),
# Translators: a Microsoft Word revision type (moved to)
wdRevisionMovedTo:_("moved to"),
# Translators: a Microsoft Word revision type (inserted table cell)
wdRevisionCellInsertion:_("cell insertion"),
# Translators: a Microsoft Word revision type (deleted table cell)
wdRevisionCellDeletion:_("cell deletion"),
# Translators: a Microsoft Word revision type (merged table cells)
wdRevisionCellMerge:_("cell merge"),
}
storyTypeLocalizedLabels={
wdCommentsStory:_("Comments"),
wdEndnotesStory:_("Endnotes"),
wdEvenPagesFooterStory:_("Even pages footer"),
wdEvenPagesHeaderStory:_("Even pages header"),
wdFirstPageFooterStory:_("First page footer"),
wdFirstPageHeaderStory:_("First page header"),
wdFootnotesStory:_("Footnotes"),
wdPrimaryFooterStory:_("Primary footer"),
wdPrimaryHeaderStory:_("Primary header"),
wdTextFrameStory:_("Text frame"),
}
wdFieldTypesToNVDARoles={
wdFieldFormTextInput:controlTypes.ROLE_EDITABLETEXT,
wdFieldFormCheckBox:controlTypes.ROLE_CHECKBOX,
wdFieldFormDropDown:controlTypes.ROLE_COMBOBOX,
}
wdContentControlTypesToNVDARoles={
wdContentControlRichText:controlTypes.ROLE_EDITABLETEXT,
wdContentControlText:controlTypes.ROLE_EDITABLETEXT,
wdContentControlPicture:controlTypes.ROLE_GRAPHIC,
wdContentControlComboBox:controlTypes.ROLE_COMBOBOX,
wdContentControlDropdownList:controlTypes.ROLE_COMBOBOX,
wdContentControlDate:controlTypes.ROLE_EDITABLETEXT,
wdContentControlGroup:controlTypes.ROLE_GROUPING,
wdContentControlCheckBox:controlTypes.ROLE_CHECKBOX,
}
winwordWindowIid=GUID('{00020962-0000-0000-C000-000000000046}')
wm_winword_expandToLine=ctypes.windll.user32.RegisterWindowMessageW(u"wm_winword_expandToLine")
NVDAUnitsToWordUnits={
textInfos.UNIT_CHARACTER:wdCharacter,
textInfos.UNIT_WORD:wdWord,
textInfos.UNIT_LINE:wdLine,
textInfos.UNIT_SENTENCE:wdSentence,
textInfos.UNIT_PARAGRAPH:wdParagraph,
textInfos.UNIT_TABLE:wdTable,
textInfos.UNIT_CELL:wdCell,
textInfos.UNIT_ROW:wdRow,
textInfos.UNIT_COLUMN:wdColumn,
textInfos.UNIT_STORY:wdStory,
textInfos.UNIT_READINGCHUNK:wdSentence,
}
formatConfigFlagsMap={
"reportFontName":0x1,
"reportFontSize":0x2,
"reportFontAttributes":0x4,
"reportColor":0x8,
"reportAlignment":0x10,
"reportStyle":0x20,
"reportSpellingErrors":0x40,
"reportPage":0x80,
"reportLineNumber":0x100,
"reportTables":0x200,
"reportLists":0x400,
"reportLinks":0x800,
"reportComments":0x1000,
"reportHeadings":0x2000,
"autoLanguageSwitching":0x4000,
"reportRevisions":0x8000,
"reportParagraphIndentation":0x10000,
"reportLineSpacing":0x40000,
}
formatConfigFlag_includeLayoutTables=0x20000
# Map some characters from 0 to Unicode. Meant to be used with bullets only.
# Doesn't care about the actual font, so can give incorrect Unicode in rare cases.
mapPUAToUnicode = {
# from : to # fontname
u'\uF06E': u'\u25A0', # Wingdings (black square)
u'\uF076': u'\u2756', # Wingdings (black diamond minus white x
u'\uF0A7': u'\u25AA', # Symbol (black small square)
u'\uF0A8': u'\u2666', # Symbol (black diamond suit)
u'\uF0B7': u'\u2022', # Symbol (bullet)
u'\uF0D8': u'\u2B9A', # Wingdings (three-D top-lighted RIGHTWARDS equilateral arrowhead)
u'\uF0E8': u'\U0001f87a', # Wingdings (wide-headed rightwards heavy barb arrow)
u'\uF0F0': u'\u21E8', # Wingdings (right white arrow)
u'\uF0FC': u'\u2714', # Wingdings (heavy check mark)
}
class WordDocumentHeadingQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,nodeType,document,textInfo,level):
self.level=level
super(WordDocumentHeadingQuickNavItem,self).__init__(nodeType,document,textInfo)
def isChild(self,parent):
if not isinstance(parent,WordDocumentHeadingQuickNavItem):
return False
return self.level>parent.level
class WordDocumentCollectionQuickNavItem(browseMode.TextInfoQuickNavItem):
"""
A QuickNavItem representing an item that MS Word stores as a collection (e.g. link, table etc).
"""
def rangeFromCollectionItem(self,item):
"""
Fetches a Microsoft Word range object from a Microsoft Word item in a collection. E.g. a HyperLink object.
@param item: an item from a collection (E.g. a HyperLink object).
"""
return item.range
def __init__(self,itemType,document,collectionItem):
"""
See L{TextInfoQuickNavItem} for itemType and document argument definitions.
@param collectionItem: an item from an MS Word collection e.g. HyperLink object.
"""
self.collectionItem=collectionItem
self.rangeObj=self.rangeFromCollectionItem(collectionItem)
textInfo=BrowseModeWordDocumentTextInfo(document,None,_rangeObj=self.rangeObj)
super(WordDocumentCollectionQuickNavItem,self).__init__(itemType,document,textInfo)
class WordDocumentCommentQuickNavItem(WordDocumentCollectionQuickNavItem):
@property
def label(self):
author=self.collectionItem.author
date=self.collectionItem.date
text=self.collectionItem.range.text
# Translators: The label shown for a comment in the NVDA Elements List dialog in Microsoft Word.
# {text}, {author} and {date} will be replaced by the corresponding details about the comment.
return _(u"comment: {text} by {author} on {date}").format(author=author,text=text,date=date)
def rangeFromCollectionItem(self,item):
return item.scope
class WordDocumentFieldQuickNavItem(WordDocumentCollectionQuickNavItem):
def rangeFromCollectionItem(self,item):
return item.result
class WordDocumentRevisionQuickNavItem(WordDocumentCollectionQuickNavItem):
@property
def label(self):
revisionType=wdRevisionTypeLabels.get(self.collectionItem.type)
author=self.collectionItem.author or ""
date=self.collectionItem.date
description=self.collectionItem.formatDescription or ""
text=(self.collectionItem.range.text or "")[:100]
# Translators: The label shown for an editor revision (tracked change) in the NVDA Elements List dialog in Microsoft Word.
# {revisionType} will be replaced with the type of revision; e.g. insertion, deletion or property.
# {description} will be replaced with a description of the formatting changes, if any.
# {text}, {author} and {date} will be replaced by the corresponding details about the revision.
return _(u"{revisionType} {description}: {text} by {author} on {date}").format(revisionType=revisionType,author=author,text=text,date=date,description=description)
class WordDocumentChartQuickNavItem(WordDocumentCollectionQuickNavItem):
@property
def label(self):
text=""
if self.collectionItem.Chart.HasTitle:
text=self.collectionItem.Chart.ChartTitle.Text
else:
text=self.collectionItem.Chart.Name
return u"{text}".format(text=text)
def moveTo(self):
chartNVDAObj = _msOfficeChart.OfficeChart(windowHandle= self.document.rootNVDAObject.windowHandle, officeApplicationObject=self.rangeObj.Document.Application, officeChartObject=self.collectionItem.Chart , initialDocument = self.document.rootNVDAObject )
eventHandler.queueEvent("gainFocus",chartNVDAObj)
class WordDocumentSpellingErrorQuickNavItem(WordDocumentCollectionQuickNavItem):
def rangeFromCollectionItem(self,item):
return item
@property
def label(self):
text=self.collectionItem.text
# Translators: The label shown for a spelling error in the NVDA Elements List dialog in Microsoft Word.
# {text} will be replaced with the text of the spelling error.
return _(u"spelling: {text}").format(text=text)
class WinWordCollectionQuicknavIterator(object):
"""
Allows iterating over an MS Word collection (e.g. HyperLinks) emitting L{QuickNavItem} objects.
"""
quickNavItemClass=WordDocumentCollectionQuickNavItem #: the QuickNavItem class that should be instanciated and emitted.
def __init__(self,itemType,document,direction,rangeObj,includeCurrent):
"""
See L{QuickNavItemIterator} for itemType, document and direction definitions.
@param rangeObj: a Microsoft Word range object where the collection should be fetched from.
@param includeCurrent: if true then any item at the initial position will be also emitted
rather than just further ones.
"""
self.document=document
self.itemType=itemType
self.direction=direction if direction else "next"
self.rangeObj=rangeObj
self.includeCurrent=includeCurrent
def collectionFromRange(self,rangeObj):
"""
Fetches a Microsoft Word collection object from a Microsoft Word range object. E.g. HyperLinks from a range.
@param rangeObj: a Microsoft Word range object.
@return: a Microsoft Word collection object.
"""
raise NotImplementedError
def filter(self,item):
"""
Only allows certain items fom a collection to be emitted. E.g. a table who's borders are enabled.
@param item: an item from a Microsoft Word collection (e.g. HyperLink object).
@return True if this item should be allowd, false otherwise.
@rtype: bool
"""
return True
def iterate(self):
"""
returns a generator that emits L{QuickNavItem} objects for this collection.
"""
if self.direction=="next":
self.rangeObj.moveEnd(wdStory,1)
elif self.direction=="previous":
self.rangeObj.collapse(wdCollapseStart)
self.rangeObj.moveStart(wdStory,-1)
items=self.collectionFromRange(self.rangeObj)
itemCount=items.count
isFirst=True
for index in range(1,itemCount+1):
if self.direction=="previous":
index=itemCount-(index-1)
collectionItem=items[index]
try:
item=self.quickNavItemClass(self.itemType,self.document,collectionItem)
except COMError:
message = ("Error iterating over item with "
"type: {type}, iteration direction: {dir}, total item count: {count}, item at index: {index}"
"\nThis could be caused by an issue with some element within or a corruption of the word document."
).format(type=self.itemType, dir=self.direction, count=itemCount, index=index)
log.debugWarning(message ,exc_info=True)
continue
itemRange=item.rangeObj
# Skip over the item we're already on.
if not self.includeCurrent and isFirst and ((self.direction=="next" and itemRange.start<=self.rangeObj.start) or (self.direction=="previous" and itemRange.end>self.rangeObj.end)):
continue
if not self.filter(collectionItem):
continue
yield item
isFirst=False
class LinkWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentFieldQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.fields
def filter(self, item):
t = item.type
if t == FIELD_TYPE_REF:
fieldText = item.code.text.strip().split(' ')
# ensure that the text has a \\h in it
return any( fieldText[i] == '\\h' for i in range(2, len(fieldText)) )
return t == FIELD_TYPE_HYPERLINK
class CommentWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentCommentQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.comments
class RevisionWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentRevisionQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.revisions
class SpellingErrorWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentSpellingErrorQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.spellingErrors
class GraphicWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
def collectionFromRange(self,rangeObj):
return rangeObj.inlineShapes
def filter(self,item):
return 2<item.type<5
class TableWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
def collectionFromRange(self,rangeObj):
return rangeObj.tables
def filter(self,item):
return item.borders.enable
class ChartWinWordCollectionQuicknavIterator(WinWordCollectionQuicknavIterator):
quickNavItemClass=WordDocumentChartQuickNavItem
def collectionFromRange(self,rangeObj):
return rangeObj.inlineShapes
def filter(self,item):
return item.type==wdInlineShapeChart
class WordDocumentTextInfo(textInfos.TextInfo):
# #4852: temporary fix.
# force mouse reading chunk to sentense to make it what it used to be in 2014.4.
# We need to however fix line so it does not accidentially scroll.
def _get_unit_mouseChunk(self):
unit=super(WordDocumentTextInfo,self).unit_mouseChunk
if unit==textInfos.UNIT_LINE:
unit=textInfos.UNIT_SENTENCE
return unit
def _get_locationText(self):
textList=[]
# #8994: MS Word can only give accurate distances (taking paragraph indenting into account) when directly querying the selection.
r=self._rangeObj
s=self.obj.WinwordSelectionObject
if s.isEqual(r):
r=s
else:
return super(WordDocumentTextInfo,self).locationText
offset=r.information(wdHorizontalPositionRelativeToPage)
distance=self.obj.getLocalizedMeasurementTextForPointSize(offset)
# Translators: a distance from the left edge of the page in Microsoft Word
textList.append(_("{distance} from left edge of page").format(distance=distance))
offset=r.information(wdVerticalPositionRelativeToPage)
distance=self.obj.getLocalizedMeasurementTextForPointSize(offset)
# Translators: a distance from the left edge of the page in Microsoft Word
textList.append(_("{distance} from top edge of page").format(distance=distance))
return ", ".join(textList)
def copyToClipboard(self):
self._rangeObj.copy()
return True
def find(self,text,caseSensitive=False,reverse=False):
f=self._rangeObj.find
f.text=text
f.matchCase=caseSensitive
f.forward=not reverse
return f.execute()
shouldIncludeLayoutTables=True #: layout tables should always be included (no matter the user's browse mode setting).
def activate(self):
import mathPres
mathMl=mathPres.getMathMlFromTextInfo(self)
if mathMl:
return mathPres.interactWithMathMl(mathMl)
newRng=self._rangeObj.Duplicate
newRng.End=newRng.End+1
if newRng.InlineShapes.Count >= 1:
if newRng.InlineShapes[1].Type==wdInlineShapeChart:
return eventHandler.queueEvent('gainFocus',_msOfficeChart.OfficeChart(windowHandle= self.obj.windowHandle, officeApplicationObject=self.obj.WinwordDocumentObject.Application, officeChartObject=newRng.InlineShapes[1].Chart , initialDocument = self.obj ))
# Handle activating links.
# It is necessary to expand to word to get a link as the link's first character is never actually in the link!
tempRange=self._rangeObj.duplicate
tempRange.expand(wdWord)
links=tempRange.hyperlinks
if links.count>0:
links[1].follow()
return
tempRange.expand(wdParagraph)
fields=tempRange.fields
for field in (fields.item(i) for i in range(1, fields.count+1)):
if field.type != FIELD_TYPE_REF:
continue
fResult = field.result
fResult.moveStart(wdCharacter,-1) # move back one visible character (passed the hidden text eg the code for the reference).
fResStart = fResult.start +1 # don't include the character before the hidden text.
fResEnd = fResult.end
rObjStart = self._rangeObj.start
rObjEnd = self._rangeObj.end
# check to see if the _rangeObj is inside the fResult range
if not (fResStart <= rObjStart and fResEnd >= rObjEnd):
continue
# text will be something like ' REF _Ref457210120 \\h '
fieldText = field.code.text.strip().split(' ')
# the \\h field indicates that the field is a link
if not any( fieldText[i] == '\\h' for i in range(2, len(fieldText)) ):
log.debugWarning("no \\h for field xref: %s" % field.code.text)
continue
bookmarkKey = fieldText[1] # we want the _Ref12345 part
# get book mark start, we need to look at the whole document to find the bookmark.
tempRange.Expand(wdStory)
bMark = tempRange.bookmarks(bookmarkKey)
self._rangeObj.setRange(bMark.start, bMark.start)
self.updateCaret()
tiCopy = self.copy()
tiCopy.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(tiCopy,reason=controlTypes.REASON_FOCUS)
braille.handler.handleCaretMove(self)
return
def _expandToLineAtCaret(self):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
res=NVDAHelper.localLib.nvdaInProcUtils_winword_expandToLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,self._rangeObj.start,ctypes.byref(lineStart),ctypes.byref(lineEnd))
if res!=0 or lineStart.value==lineEnd.value or lineStart.value==-1 or lineEnd.value==-1:
log.debugWarning("winword_expandToLine failed")
self._rangeObj.expand(wdParagraph)
return
self._rangeObj.setRange(lineStart.value,lineEnd.value)
def __init__(self,obj,position,_rangeObj=None):
super(WordDocumentTextInfo,self).__init__(obj,position)
if _rangeObj:
self._rangeObj=_rangeObj.Duplicate
return
if isinstance(position, locationHelper.Point):
try:
self._rangeObj=self.obj.WinwordDocumentObject.activeWindow.RangeFromPoint(position.x,position.y)
except COMError:
raise NotImplementedError
elif position==textInfos.POSITION_SELECTION:
self._rangeObj=self.obj.WinwordSelectionObject.range
elif position==textInfos.POSITION_CARET:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.Collapse()
elif position==textInfos.POSITION_ALL:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.Expand(wdStory)
elif position==textInfos.POSITION_FIRST:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.SetRange(0,0)
elif position==textInfos.POSITION_LAST:
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.endOf(wdStory)
self._rangeObj.move(wdCharacter,-1)
elif isinstance(position,textInfos.offsets.Offsets):
self._rangeObj=self.obj.WinwordSelectionObject.range
self._rangeObj.SetRange(position.startOffset,position.endOffset)
elif isinstance(position,WordDocumentTextInfo):
# copying from one textInfo to another
self._rangeObj=position._rangeObj.duplicate
else:
raise NotImplementedError("position: %s"%position)
def getTextWithFields(self,formatConfig=None):
if self.isCollapsed: return []
if self.obj.ignoreFormatting:
return [self.text]
extraDetail=formatConfig.get('extraDetail',False) if formatConfig else False
if not formatConfig:
formatConfig=config.conf['documentFormatting']
formatConfig['autoLanguageSwitching']=config.conf['speech'].get('autoLanguageSwitching',False)
startOffset=self._rangeObj.start
endOffset=self._rangeObj.end
text=BSTR()
# #9067: format config flags map is a dictionary.
formatConfigFlags=sum(y for x,y in formatConfigFlagsMap.items() if formatConfig.get(x,False))
if self.shouldIncludeLayoutTables:
formatConfigFlags+=formatConfigFlag_includeLayoutTables
if self.obj.ignoreEditorRevisions:
formatConfigFlags&=~formatConfigFlagsMap['reportRevisions']
if self.obj.ignorePageNumbers:
formatConfigFlags&=~formatConfigFlagsMap['reportPage']
res=NVDAHelper.localLib.nvdaInProcUtils_winword_getTextInRange(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,startOffset,endOffset,formatConfigFlags,ctypes.byref(text))
if res or not text:
log.debugWarning("winword_getTextInRange failed with %d"%res)
return [self.text]
commandList=XMLFormatting.XMLTextParser().parse(text.value)
for index,item in enumerate(commandList):
if isinstance(item,textInfos.FieldCommand):
field=item.field
if isinstance(field,textInfos.ControlField):
item.field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
item.field=self._normalizeFormatField(field,extraDetail=extraDetail)
elif index>0 and isinstance(item,str) and item.isspace():
#2047: don't expose language for whitespace as its incorrect for east-asian languages
lastItem=commandList[index-1]
if isinstance(lastItem,textInfos.FieldCommand) and isinstance(lastItem.field,textInfos.FormatField):
try:
del lastItem.field['language']
except KeyError:
pass
return commandList
def _normalizeControlField(self,field):
role=field.pop('role',None)
if role=="heading":
role=controlTypes.ROLE_HEADING
elif role=="table":
role=controlTypes.ROLE_TABLE
field['table-rowcount']=int(field.get('table-rowcount',0))
field['table-columncount']=int(field.get('table-columncount',0))
elif role=="tableCell":
role=controlTypes.ROLE_TABLECELL
field['table-rownumber']=int(field.get('table-rownumber',0))
field['table-columnnumber']=int(field.get('table-columnnumber',0))
elif role=="footnote":
role=controlTypes.ROLE_FOOTNOTE
elif role=="endnote":
role=controlTypes.ROLE_ENDNOTE
elif role=="graphic":
role=controlTypes.ROLE_GRAPHIC
elif role=="chart":
role=controlTypes.ROLE_CHART
elif role=="object":
progid=field.get("progid")
if progid and progid.startswith("Equation.DSMT"):
# MathType.
role=controlTypes.ROLE_MATH
else:
role=controlTypes.ROLE_EMBEDDEDOBJECT
else:
fieldType=int(field.pop('wdFieldType',-1))
if fieldType!=-1:
role=wdFieldTypesToNVDARoles.get(fieldType,controlTypes.ROLE_UNKNOWN)
if fieldType==wdFieldFormCheckBox and int(field.get('wdFieldResult','0'))>0:
field['states']=set([controlTypes.STATE_CHECKED])
elif fieldType==wdFieldFormDropDown:
field['value']=field.get('wdFieldResult',None)
fieldStatusText=field.pop('wdFieldStatusText',None)
if fieldStatusText:
field['name']=fieldStatusText
field['alwaysReportName']=True
else:
fieldType=int(field.get('wdContentControlType',-1))
if fieldType!=-1:
role=wdContentControlTypesToNVDARoles.get(fieldType,controlTypes.ROLE_UNKNOWN)
if role==controlTypes.ROLE_CHECKBOX:
fieldChecked=bool(int(field.get('wdContentControlChecked','0')))
if fieldChecked:
field['states']=set([controlTypes.STATE_CHECKED])
fieldTitle=field.get('wdContentControlTitle',None)
if fieldTitle:
field['name']=fieldTitle
field['alwaysReportName']=True
if role is not None: field['role']=role
if role==controlTypes.ROLE_TABLE and field.get('longdescription'):
field['states']=set([controlTypes.STATE_HASLONGDESC])
storyType=int(field.pop('wdStoryType',0))
if storyType:
name=storyTypeLocalizedLabels.get(storyType,None)
if name:
field['name']=name
field['alwaysReportName']=True
field['role']=controlTypes.ROLE_FRAME
# Hack support for lazy fetching of row and column header text values
class ControlField(textInfos.ControlField):
def get(d,name,default=None):
if name=="table-rowheadertext":
try:
cell=self._rangeObj.cells[1]
except IndexError:
log.debugWarning("no cells for table row, possibly on end of cell mark")
return super(ControlField,d).get(name,default)
return self.obj.fetchAssociatedHeaderCellText(cell,False)
elif name=="table-columnheadertext":
try:
cell=self._rangeObj.cells[1]
except IndexError:
log.debugWarning("no cells for table row, possibly on end of cell mark")
return super(ControlField,d).get(name,default)
return self.obj.fetchAssociatedHeaderCellText(cell,True)
else:
return super(ControlField,d).get(name,default)
newField=ControlField()
newField.update(field)
return newField
def _normalizeFormatField(self,field,extraDetail=False):
_startOffset=int(field.pop('_startOffset'))
_endOffset=int(field.pop('_endOffset'))
lineSpacingRule=field.pop('wdLineSpacingRule',None)
lineSpacingVal=field.pop('wdLineSpacing',None)
if lineSpacingRule is not None:
lineSpacingRule=int(lineSpacingRule)
if lineSpacingRule==wdLineSpaceSingle:
# Translators: single line spacing
field['line-spacing']=pgettext('line spacing value',"single")
elif lineSpacingRule==wdLineSpaceDouble:
# Translators: double line spacing
field['line-spacing']=pgettext('line spacing value',"double")
elif lineSpacingRule==wdLineSpace1pt5:
# Translators: line spacing of 1.5 lines
field['line-spacing']=pgettext('line spacing value',"1.5 lines")
elif lineSpacingRule==wdLineSpaceExactly:
# Translators: exact (minimum) line spacing
field['line-spacing']=pgettext('line spacing value',"exact")
elif lineSpacingRule==wdLineSpaceAtLeast:
# Translators: line spacing of at least x point
field['line-spacing']=pgettext('line spacing value',"at least %.1f pt")%float(lineSpacingVal)
elif lineSpacingRule==wdLineSpaceMultiple:
# Translators: line spacing of x lines
field['line-spacing']=pgettext('line spacing value',"%.1f lines")%(float(lineSpacingVal)/12.0)
revisionType=int(field.pop('wdRevisionType',0))
if revisionType==wdRevisionInsert:
field['revision-insertion']=True
elif revisionType==wdRevisionDelete:
field['revision-deletion']=True
elif revisionType:
revisionLabel=wdRevisionTypeLabels.get(revisionType,None)
if revisionLabel:
field['revision']=revisionLabel
color=field.pop('color',None)
if color is not None:
field['color']=self.obj.winwordColorToNVDAColor(int(color))
try:
languageId = int(field.pop('wdLanguageId',0))
if languageId:
field['language']=languageHandler.windowsLCIDToLocaleName(languageId)
except:
log.debugWarning("language error",exc_info=True)
pass
for x in ("first-line-indent","left-indent","right-indent","hanging-indent"):
v=field.get(x)
if not v: continue
v=float(v)
if abs(v)<0.001:
v=None
else:
v=self.obj.getLocalizedMeasurementTextForPointSize(v)
field[x]=v
bullet=field.get('line-prefix')
if bullet and len(bullet)==1:
field['line-prefix']=mapPUAToUnicode.get(bullet,bullet)
return field
def expand(self,unit):
if unit==textInfos.UNIT_LINE:
try:
if self._rangeObj.tables.count>0 and self._rangeObj.cells.count==0:
unit=textInfos.UNIT_CHARACTER
except COMError:
pass
if unit==textInfos.UNIT_LINE:
self._expandToLineAtCaret()
elif unit==textInfos.UNIT_CHARACTER:
self._rangeObj.moveEnd(wdCharacter,1)
elif unit in NVDAUnitsToWordUnits:
self._rangeObj.Expand(NVDAUnitsToWordUnits[unit])
else:
raise NotImplementedError("unit: %s"%unit)
def compareEndPoints(self,other,which):
if which=="startToStart":
diff=self._rangeObj.Start-other._rangeObj.Start
elif which=="startToEnd":
diff=self._rangeObj.Start-other._rangeObj.End
elif which=="endToStart":
diff=self._rangeObj.End-other._rangeObj.Start
elif which=="endToEnd":
diff=self._rangeObj.End-other._rangeObj.End
else:
raise ValueError("bad argument - which: %s"%which)
if diff<0:
diff=-1
elif diff>0:
diff=1
return diff
def setEndPoint(self,other,which):
if which=="startToStart":
self._rangeObj.Start=other._rangeObj.Start
elif which=="startToEnd":
self._rangeObj.Start=other._rangeObj.End
elif which=="endToStart":
self._rangeObj.End=other._rangeObj.Start
elif which=="endToEnd":
self._rangeObj.End=other._rangeObj.End
else:
raise ValueError("bad argument - which: %s"%which)
def _get_isCollapsed(self):
if self._rangeObj.Start==self._rangeObj.End:
return True
else:
return False
def collapse(self,end=False):
if end:
oldEndOffset=self._rangeObj.end
self._rangeObj.collapse(wdCollapseEnd if end else wdCollapseStart)
if end:
newEndOffset = self._rangeObj.end
# the new endOffset should not have become smaller than the old endOffset, this could cause an infinite loop in
# a case where you called move end then collapse until the size of the range is no longer being reduced.
# For an example of this see sayAll (specifically readTextHelper_generator in sayAllHandler.py)
if newEndOffset < oldEndOffset :
raise RuntimeError
def copy(self):
return WordDocumentTextInfo(self.obj,None,_rangeObj=self._rangeObj)
def _get_text(self):
text=self._rangeObj.text
if not text:
text=""
return text
def _move(self,unit,direction,endPoint=None,_rangeObj=None):
if not _rangeObj:
_rangeObj=self._rangeObj
if unit in NVDAUnitsToWordUnits:
unit=NVDAUnitsToWordUnits[unit]
else:
raise NotImplementedError("unit: %s"%unit)
if endPoint=="start":
moveFunc=_rangeObj.MoveStart
elif endPoint=="end":
moveFunc=_rangeObj.MoveEnd
else:
moveFunc=_rangeObj.Move
res=moveFunc(unit,direction)
#units higher than character and word expand to contain the last text plus the insertion point offset in the document
#However move from a character before will incorrectly move to this offset which makes move/expand contridictory to each other
#Make sure that move fails if it lands on the final offset but the unit is bigger than character/word
if (direction>0 and endPoint!="end"
and unit not in (wdCharacter,wdWord) # moving by units of line or more
and (_rangeObj.start+1) == self.obj.WinwordDocumentObject.range().end # character after the range start is the end of the document range
):
return 0
return res
def move(self,unit,direction,endPoint=None):
if unit!=textInfos.UNIT_LINE:
return self._move(unit,direction,endPoint)
if direction==0 or direction>1 or direction<-1:
raise NotImplementedError("moving by line is only supported collapsed and with a count of 1 or -1")
oldOffset=self._rangeObj.end if endPoint=="end" else self._rangeObj.start
newOffset=ctypes.c_long()
# Try moving by line making use of the selection temporarily
res=NVDAHelper.localLib.nvdaInProcUtils_winword_moveByLine(self.obj.appModule.helperLocalBindingHandle,self.obj.documentWindowHandle,oldOffset,1 if direction<0 else 0,ctypes.byref(newOffset))
if res==0:
res=direction
newOffset=newOffset.value
if direction<0 and not endPoint and newOffset==oldOffset:
# Moving backwards by line seemed to not move.
# Therefore fallback to moving back a character, expanding to line and collapsing to start instead.
self.move(textInfos.UNIT_CHARACTER,-1)
self.expand(unit)
self.collapse()
elif direction>0 and not endPoint and newOffset<oldOffset:
# Moving forward by line seems to have wrapped back before the original position
# This can happen in some tables with merged rows.
# Try moving forward by cell, but if that fails, jump past the entire table.
res=self.move(textInfos.UNIT_CELL,direction,endPoint)
if res==0:
self.expand(textInfos.UNIT_TABLE)
self.collapse(end=True)
else:
# the move by line using the selection succeeded. Therefore update this TextInfo's position.
if not endPoint:
self._rangeObj.setRange(newOffset,newOffset)
elif endPoint=="start":
self._rangeObj.start=newOffset
elif endPoint=="end":
self._rangeObj.end=newOffset
return res
def _get_bookmark(self):
return textInfos.offsets.Offsets(self._rangeObj.Start,self._rangeObj.End)
def _get_pointAtStart(self):
left = ctypes.c_int()
top = ctypes.c_int()
width = ctypes.c_int()
height = ctypes.c_int()
try:
self.obj.WinwordWindowObject.GetPoint(ctypes.byref(left), ctypes.byref(top), ctypes.byref(width), ctypes.byref(height), self._rangeObj)
except COMError:
raise LookupError
if not any((left.value, top.value, width.value, height.value)):
raise LookupError
return locationHelper.Point(left.value, top.value)
def updateCaret(self):
self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj)
self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.Start)
def updateSelection(self):
self.obj.WinwordWindowObject.ScrollIntoView(self._rangeObj)
self.obj.WinwordSelectionObject.SetRange(self._rangeObj.Start,self._rangeObj.End)
def getMathMl(self, field):
try:
import mathType
except:
raise LookupError("MathType not installed")
rangeObj = self._rangeObj.Duplicate
rangeObj.Start = int(field["shapeoffset"])
obj = rangeObj.InlineShapes[0].OLEFormat
try:
return mathType.getMathMl(obj)
except:
raise LookupError("Couldn't get MathML from MathType")
class BrowseModeWordDocumentTextInfo(browseMode.BrowseModeDocumentTextInfo,treeInterceptorHandler.RootProxyTextInfo):
def __init__(self,obj,position,_rangeObj=None):
if isinstance(position,WordDocument):
position=textInfos.POSITION_CARET
super(BrowseModeWordDocumentTextInfo,self).__init__(obj,position,_rangeObj=_rangeObj)
def _get_focusableNVDAObjectAtStart(self):
return self.obj.rootNVDAObject
class WordDocumentTreeInterceptor(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=BrowseModeWordDocumentTextInfo
def _activateLongDesc(self,controlField):
longDesc=controlField.get('longdescription')
# Translators: the title of the message dialog desplaying an MS Word table description.
ui.browseableMessage(longDesc,_("Table description"))
def _get_isAlive(self):
return winUser.isWindow(self.rootNVDAObject.windowHandle)
def __contains__(self,obj):
return obj==self.rootNVDAObject
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterHeadings(self,nodeType,direction,rangeObj,includeCurrent):
neededLevel=int(nodeType[7:]) if len(nodeType)>7 else 0
isFirst=True
while True:
if not isFirst or includeCurrent:
level=rangeObj.paragraphs[1].outlineLevel
if level and 0<level<10 and (not neededLevel or neededLevel==level):
rangeObj.expand(wdParagraph)
yield WordDocumentHeadingQuickNavItem(nodeType,self,BrowseModeWordDocumentTextInfo(self,None,_rangeObj=rangeObj),level)
isFirst=False
if direction=="next":
newRangeObj=rangeObj.gotoNext(wdGoToHeading)
if not newRangeObj or newRangeObj.start<=rangeObj.start:
break
elif direction=="previous":
newRangeObj=rangeObj.gotoPrevious(wdGoToHeading)
if not newRangeObj or newRangeObj.start>=rangeObj.start:
break
rangeObj=newRangeObj
def _iterNodesByType(self,nodeType,direction="next",pos=None):
if pos:
rangeObj=pos.innerTextInfo._rangeObj
else:
rangeObj=self.rootNVDAObject.WinwordDocumentObject.range(0,0)
includeCurrent=False if pos else True
if nodeType=="link":
return LinkWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="annotation":
comments=CommentWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
revisions=RevisionWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
return browseMode.mergeQuickNavItemIterators([comments,revisions],direction)
elif nodeType in ("table","container"):
return TableWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="error":
return SpellingErrorWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="graphic":
return GraphicWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType=="chart":
return ChartWinWordCollectionQuicknavIterator(nodeType,self,direction,rangeObj,includeCurrent).iterate()
elif nodeType.startswith('heading'):
return self._iterHeadings(nodeType,direction,rangeObj,includeCurrent)
else:
raise NotImplementedError
def _activatePosition(self, info=None):
if not info:
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.activate()
def script_nextRow(self,gesture):
self.rootNVDAObject._moveInTable(row=True,forward=True)
braille.handler.handleCaretMove(self)
def script_previousRow(self,gesture):
self.rootNVDAObject._moveInTable(row=True,forward=False)
braille.handler.handleCaretMove(self)
def script_nextColumn(self,gesture):
self.rootNVDAObject._moveInTable(row=False,forward=True)
braille.handler.handleCaretMove(self)
def script_previousColumn(self,gesture):
self.rootNVDAObject._moveInTable(row=False,forward=False)
braille.handler.handleCaretMove(self)
__gestures={
"kb:tab":"trapNonCommandGesture",
"kb:shift+tab":"trapNonCommandGesture",
"kb:control+alt+upArrow": "previousRow",
"kb:control+alt+downArrow": "nextRow",
"kb:control+alt+leftArrow": "previousColumn",
"kb:control+alt+rightArrow": "nextColumn",
# We want to fall back to MS Word's real page up and page down, rather than browseMode's faked 25 lines
"kb:pageUp":None,
"kb:pageDown":None,
"kb:shift+pageUp":None,
"kb:shift+pageDown":None,
}
class WordDocument(Window):
def winwordColorToNVDAColor(self,val):
if val>=0:
# normal RGB value
return colors.RGB.fromCOLORREF(val).name
elif (val&0xffffffff)==0xff000000:
# Translators: the default (automatic) color in Microsoft Word
return _("default color")
elif ((val>>28)&0xf)==0xd and ((val>>16)&0xff)==0x00:
# An MS word color index Plus intencity
# Made up of MS Word Theme Color index, hsv value ratio (MS Word darker percentage) and hsv saturation ratio (MS Word lighter percentage)
# Info: http://www.wordarticles.com/Articles/Colours/2007.php#UIConsiderations
saturationRatio=(val&0xff)/255.0
valueRatio=((val>>8)&0xff)/255.0
themeColorIndex=(val>>24)&0x0f
# Convert the MS Word theme color index to an MS Office color scheme index
schemeColorIndex=WdThemeColorIndexToMsoThemeColorSchemeIndex[themeColorIndex]
# Lookup the rgb value for the MS Office scheme color index based on the current theme
colorref=self.WinwordDocumentObject.documentTheme.themeColorScheme(schemeColorIndex).rgb
# Convert the rgb value to hsv and apply the saturation and value ratios
rgb=tuple(x/255.0 for x in colors.RGB.fromCOLORREF(colorref))
hsv=colorsys.rgb_to_hsv(*rgb)
hsv=(hsv[0],hsv[1]*saturationRatio,hsv[2]*valueRatio)
rgb=colorsys.hsv_to_rgb(*hsv)
name=colors.RGB(rgb[0]*255,rgb[1]*255,rgb[2]*255).name
return name
else:
raise ValueError("Unknown color format %x %x %x %x"%((val>>24)&0xff,(val>>16)&0xff,(val>>8)&0xff,val&0xff))
def _get_WinwordVersion(self):
if not hasattr(self,'_WinwordVersion'):
self._WinwordVersion=float(self.WinwordApplicationObject.version)
return self._WinwordVersion
def _get_documentWindowHandle(self):
return self.windowHandle
def _get_WinwordWindowObject(self):
if not getattr(self,'_WinwordWindowObject',None):
try:
pDispatch=oleacc.AccessibleObjectFromWindow(self.documentWindowHandle,winUser.OBJID_NATIVEOM,interface=comtypes.automation.IDispatch)
except (COMError, WindowsError):
log.debugWarning("Could not get MS Word object model from window %s with class %s"%(self.documentWindowHandle,winUser.getClassName(self.documentWindowHandle)),exc_info=True)
return None
self._WinwordWindowObject=comtypes.client.dynamic.Dispatch(pDispatch)
return self._WinwordWindowObject
def _get_WinwordDocumentObject(self):
if not getattr(self,'_WinwordDocumentObject',None):
windowObject=self.WinwordWindowObject
if not windowObject: return None
self._WinwordDocumentObject=windowObject.document
return self._WinwordDocumentObject
def _get_WinwordApplicationObject(self):
if not getattr(self,'_WinwordApplicationObject',None):
self._WinwordApplicationObject=self.WinwordWindowObject.application
return self._WinwordApplicationObject
def _get_WinwordSelectionObject(self):
if not getattr(self,'_WinwordSelectionObject',None):
windowObject=self.WinwordWindowObject
if not windowObject: return None
self._WinwordSelectionObject=windowObject.selection
return self._WinwordSelectionObject
def _WaitForValueChangeForAction(self,action,fetcher,timeout=0.15):
oldVal=fetcher()
action()
startTime=curTime=time.time()
curVal=fetcher()
while curVal==oldVal and (curTime-startTime)<timeout:
time.sleep(0.01)
curVal=fetcher()
curTime=time.time()
return curVal
def script_toggleBold(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.bold)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Bold on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Bold off"))
def script_toggleItalic(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.italic)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Italic on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Italic off"))
def script_toggleUnderline(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.underline)
if val:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Underline on"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Underline off"))
def script_toggleAlignment(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.alignment)
alignmentMessages={
# Translators: a an alignment in Microsoft Word
wdAlignParagraphLeft:_("Left aligned"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphCenter:_("centered"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphRight:_("Right aligned"),
# Translators: a an alignment in Microsoft Word
wdAlignParagraphJustify:_("Justified"),
}
msg=alignmentMessages.get(val)
if msg:
ui.message(msg)
def script_toggleSuperscriptSubscript(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: (self.WinwordSelectionObject.font.superscript,self.WinwordSelectionObject.font.subscript))
if val[0]:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Superscript"))
elif val[1]:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Subscript"))
else:
# Translators: a message when toggling formatting in Microsoft word
ui.message(_("Baseline"))
def script_moveParagraphDown(self,gesture):
oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark
gesture.send()
if self._hasCaretMoved(oldBookmark)[0]:
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
info.collapse()
info.move(textInfos.UNIT_PARAGRAPH,-1,endPoint="start")
lastParaText=info.text.strip()
if lastParaText:
# Translators: a message reported when a paragraph is moved below another paragraph
ui.message(_("Moved below %s")%lastParaText)
else:
# Translators: a message reported when a paragraph is moved below a blank paragraph
ui.message(_("Moved below blank paragraph"))
def script_moveParagraphUp(self,gesture):
oldBookmark=self.makeTextInfo(textInfos.POSITION_CARET).bookmark
gesture.send()
if self._hasCaretMoved(oldBookmark)[0]:
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
info.collapse()
info.move(textInfos.UNIT_PARAGRAPH,1)
info.expand(textInfos.UNIT_PARAGRAPH)
lastParaText=info.text.strip()
if lastParaText:
# Translators: a message reported when a paragraph is moved above another paragraph
ui.message(_("Moved above %s")%lastParaText)
else:
# Translators: a message reported when a paragraph is moved above a blank paragraph
ui.message(_("Moved above blank paragraph"))
def script_increaseDecreaseOutlineLevel(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.paragraphFormat.outlineLevel)
style=self.WinwordSelectionObject.style.nameLocal
# Translators: the message when the outline level / style is changed in Microsoft word
ui.message(_("{styleName} style, outline level {outlineLevel}").format(styleName=style,outlineLevel=val))
def script_increaseDecreaseFontSize(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda: self.WinwordSelectionObject.font.size)
# Translators: a message when increasing or decreasing font size in Microsoft Word
ui.message(_("{size:g} point font").format(size=val))
def script_toggleChangeTracking(self, gesture):
if not self.WinwordDocumentObject:
# We cannot fetch the Word object model, so we therefore cannot report the status change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail,
# or it's within Windows Defender Application Guard.
# In this case, just let the gesture through and don't report anything.
return gesture.send()
val = self._WaitForValueChangeForAction(
lambda: gesture.send(),
lambda: self.WinwordDocumentObject.TrackRevisions
)
if val:
# Translators: a message when toggling change tracking in Microsoft word
ui.message(_("Change tracking on"))
else:
# Translators: a message when toggling change tracking in Microsoft word
ui.message(_("Change tracking off"))
@script(gesture="kb:control+shift+8")
def script_toggleDisplayNonprintingCharacters(self, gesture):
if not self.WinwordWindowObject:
# We cannot fetch the Word object model, so we therefore cannot report the status change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail,
# or it's within Windows Defender Application Guard.
# In this case, just let the gesture through and don't report anything.
return gesture.send()
val = self._WaitForValueChangeForAction(
lambda: gesture.send(),
lambda: self.WinwordWindowObject.ActivePane.View.ShowAll
)
if val:
# Translators: a message when toggling Display Nonprinting Characters in Microsoft word
ui.message(_("Display nonprinting characters"))
else:
# Translators: a message when toggling Display Nonprinting Characters in Microsoft word
ui.message(_("Hide nonprinting characters"))
@script(gestures=["kb:tab", "kb:shift+tab"])
def script_tab(self,gesture):
"""
A script for the tab key which:
* if in a table, announces the newly selected cell or new cell where the caret is, or
* If not in a table, announces the distance of the caret from the left edge of the document, and any remaining text on that line.
"""
gesture.send()
selectionObj=self.WinwordSelectionObject
inTable=selectionObj.tables.count>0 if selectionObj else False
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
isCollapsed=info.isCollapsed
if inTable and isCollapsed:
info.expand(textInfos.UNIT_PARAGRAPH)
isCollapsed=info.isCollapsed
if not isCollapsed:
speech.speakTextInfo(info,reason=controlTypes.REASON_FOCUS)
braille.handler.handleCaretMove(self)
if selectionObj and isCollapsed:
offset=selectionObj.information(wdHorizontalPositionRelativeToPage)
msg=self.getLocalizedMeasurementTextForPointSize(offset)
ui.message(msg)
if selectionObj.paragraphs[1].range.start==selectionObj.start:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET)
def getLocalizedMeasurementTextForPointSize(self,offset):
options=self.WinwordApplicationObject.options
useCharacterUnit=options.useCharacterUnit
if useCharacterUnit:
offset=offset/self.WinwordSelectionObject.font.size
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} characters").format(offset=offset)
else:
unit=options.measurementUnit
if unit==wdInches:
offset=offset/72.0
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} inches").format(offset=offset)
elif unit==wdCentimeters:
offset=offset/28.35
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} centimeters").format(offset=offset)
elif unit==wdMillimeters:
offset=offset/2.835
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} millimeters").format(offset=offset)
elif unit==wdPoints:
# Translators: a measurement in Microsoft Word
return _("{offset:.3g} points").format(offset=offset)
elif unit==wdPicas:
offset=offset/12.0
# Translators: a measurement in Microsoft Word
# See http://support.microsoft.com/kb/76388 for details.
return _("{offset:.3g} picas").format(offset=offset)
def script_changeLineSpacing(self,gesture):
if not self.WinwordSelectionObject:
# We cannot fetch the Word object model, so we therefore cannot report the format change.
# The object model may be unavailable because this is a pure UIA implementation such as Windows 10 Mail, or its within Windows Defender Application Guard.
# Eventually UIA will have its own way of detecting format changes at the cursor. For now, just let the gesture through and don't erport anything.
return gesture.send()
val=self._WaitForValueChangeForAction(lambda: gesture.send(),lambda:self.WinwordSelectionObject.ParagraphFormat.LineSpacingRule)
if val == wdLineSpaceSingle:
# Translators: a message when switching to single line spacing in Microsoft word
ui.message(_("Single line spacing"))
elif val == wdLineSpaceDouble:
# Translators: a message when switching to double line spacing in Microsoft word
ui.message(_("Double line spacing"))
elif val == wdLineSpace1pt5:
# Translators: a message when switching to 1.5 line spaceing in Microsoft word
ui.message(_("1.5 line spacing"))
def initOverlayClass(self):
if isinstance(self, EditableTextWithoutAutoSelectDetection):
self.bindGesture("kb:alt+shift+home", "caret_changeSelection")
self.bindGesture("kb:alt+shift+end", "caret_changeSelection")
self.bindGesture("kb:alt+shift+pageUp", "caret_changeSelection",)
self.bindGesture("kb:alt+shift+pageDown", "caret_changeSelection",)
__gestures = {
"kb:control+[":"increaseDecreaseFontSize",
"kb:control+]":"increaseDecreaseFontSize",
"kb:control+shift+,":"increaseDecreaseFontSize",
"kb:control+shift+.":"increaseDecreaseFontSize",
"kb:control+b":"toggleBold",
"kb:control+i":"toggleItalic",
"kb:control+u":"toggleUnderline",
"kb:control+=":"toggleSuperscriptSubscript",
"kb:control+shift+=":"toggleSuperscriptSubscript",
"kb:control+l":"toggleAlignment",
"kb:control+e":"toggleAlignment",
"kb:control+r":"toggleAlignment",
"kb:control+j":"toggleAlignment",
"kb:alt+shift+downArrow":"moveParagraphDown",
"kb:alt+shift+upArrow":"moveParagraphUp",
"kb:alt+shift+rightArrow":"increaseDecreaseOutlineLevel",
"kb:alt+shift+leftArrow":"increaseDecreaseOutlineLevel",
"kb:control+shift+n":"increaseDecreaseOutlineLevel",
"kb:control+alt+1":"increaseDecreaseOutlineLevel",
"kb:control+alt+2":"increaseDecreaseOutlineLevel",
"kb:control+alt+3":"increaseDecreaseOutlineLevel",
"kb:control+1":"changeLineSpacing",
"kb:control+2":"changeLineSpacing",
"kb:control+5":"changeLineSpacing",
"kb:control+shift+e": "toggleChangeTracking",
"kb:control+pageUp": "caret_moveByLine",
"kb:control+pageDown": "caret_moveByLine",
}
class WordDocument_WwN(WordDocument):
def _get_documentWindowHandle(self):
w=NVDAHelper.localLib.findWindowWithClassInThread(self.windowThreadID,u"_WwG",True)
if not w:
log.debugWarning("Could not find window for class _WwG in thread.")
w=super(WordDocument_WwN,self).documentWindowHandle
return w
def _get_WinwordWindowObject(self):
window=super(WordDocument_WwN,self).WinwordWindowObject
if not window: return None
try:
return window.application.activeWindow.activePane
except COMError:
log.debugWarning("Unable to get activePane")
return window.application.windows[1].activePane
__gestures={
"kb:tab":None,
"kb:shift+tab":None,
}
class ElementsListDialog(browseMode.ElementsListDialog):
ELEMENT_TYPES=(browseMode.ElementsListDialog.ELEMENT_TYPES[0],browseMode.ElementsListDialog.ELEMENT_TYPES[1],
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("annotation", _("&Annotations")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("chart", _("&Charts")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("error", _("&Errors")),
)
| 1 | 28,667 | Could you revisit the full header and add appropriate spaces after the hashes? | nvaccess-nvda | py |
@@ -102,6 +102,11 @@ class CodeEditor extends FormWidgetBase
*/
public $showPrintMargin = false;
+ /**
+ * @var string Hint to show above the code editor
+ */
+ public $codeHint = '';
+
//
// Object properties
// | 1 | <?php namespace Backend\FormWidgets;
use Backend\Models\Preference as BackendPreference;
use Backend\Classes\FormWidgetBase;
/**
* Code Editor
* Renders a code editor field.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class CodeEditor extends FormWidgetBase
{
//
// Configurable properties
//
/**
* @var string Code language to display (php, twig)
*/
public $language = 'php';
/**
* @var boolean Determines whether the gutter is visible.
*/
public $showGutter = true;
/**
* @var boolean Indicates whether the the word wrapping is enabled.
*/
public $wordWrap = true;
/**
* @var string Cold folding mode: manual, markbegin, markbeginend.
*/
public $codeFolding = 'manual';
/**
* @var boolean Automatically close tags and special characters,
* like quotation marks, parenthesis, or brackets.
*/
public $autoClosing = true;
/**
* @var boolean Indicates whether the the editor uses spaces for indentation.
*/
public $useSoftTabs = true;
/**
* @var boolean Sets the size of the indentation.
*/
public $tabSize = 4;
/**
* @var integer Sets the font size.
*/
public $fontSize = 12;
/**
* @var integer Sets the editor margin size.
*/
public $margin = 0;
/**
* @var string Ace Editor theme to use.
*/
public $theme = 'twilight';
/**
* @var bool Show invisible characters.
*/
public $showInvisibles = false;
/**
* @var bool Highlight the active line.
*/
public $highlightActiveLine = true;
/**
* @var boolean If true, the editor is set to read-only mode
*/
public $readOnly = false;
/**
* @var string Autocomplete mode: manual, basic, live.
*/
public $autocompletion = 'manual';
/**
* @var boolean If true, the editor activate use Snippets
*/
public $enableSnippets = true;
/**
* @var boolean If true, the editor show Indent Guides
*/
public $displayIndentGuides = true;
/**
* @var boolean If true, the editor show Print Margin
*/
public $showPrintMargin = false;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'codeeditor';
/**
* @inheritDoc
*/
public function init()
{
$this->applyEditorPreferences();
if ($this->formField->disabled) {
$this->readOnly = true;
}
$this->fillFromConfig([
'language',
'showGutter',
'wordWrap',
'codeFolding',
'autoClosing',
'useSoftTabs',
'tabSize',
'fontSize',
'margin',
'theme',
'showInvisibles',
'highlightActiveLine',
'readOnly',
'autocompletion',
'enableSnippets',
'displayIndentGuides',
'showPrintMargin'
]);
}
/**
* @inheritDoc
*/
public function render()
{
$this->prepareVars();
return $this->makePartial('codeeditor');
}
/**
* Prepares the widget data
*/
public function prepareVars()
{
$this->vars['fontSize'] = $this->fontSize;
$this->vars['wordWrap'] = $this->wordWrap;
$this->vars['codeFolding'] = $this->codeFolding;
$this->vars['autoClosing'] = $this->autoClosing;
$this->vars['tabSize'] = $this->tabSize;
$this->vars['theme'] = $this->theme;
$this->vars['showInvisibles'] = $this->showInvisibles;
$this->vars['highlightActiveLine'] = $this->highlightActiveLine;
$this->vars['useSoftTabs'] = $this->useSoftTabs;
$this->vars['showGutter'] = $this->showGutter;
$this->vars['language'] = $this->language;
$this->vars['margin'] = $this->margin;
$this->vars['stretch'] = $this->formField->stretch;
$this->vars['size'] = $this->formField->size;
$this->vars['readOnly'] = $this->readOnly;
$this->vars['autocompletion'] = $this->autocompletion;
$this->vars['enableSnippets'] = $this->enableSnippets;
$this->vars['displayIndentGuides'] = $this->displayIndentGuides;
$this->vars['showPrintMargin'] = $this->showPrintMargin;
// Double encode when escaping
$this->vars['value'] = htmlentities($this->getLoadValue(), ENT_QUOTES, 'UTF-8', true);
$this->vars['name'] = $this->getFieldName();
}
/**
* @inheritDoc
*/
protected function loadAssets()
{
$this->addCss('css/codeeditor.css', 'core');
$this->addJs('js/build-min.js', 'core');
}
/**
* Looks at the user preferences and overrides any set values.
* @return void
*/
protected function applyEditorPreferences()
{
// Load the editor system settings
$preferences = BackendPreference::instance();
$this->fontSize = $preferences->editor_font_size;
$this->wordWrap = $preferences->editor_word_wrap;
$this->codeFolding = $preferences->editor_code_folding;
$this->autoClosing = $preferences->editor_auto_closing;
$this->tabSize = $preferences->editor_tab_size;
$this->theme = $preferences->editor_theme;
$this->showInvisibles = $preferences->editor_show_invisibles;
$this->highlightActiveLine = $preferences->editor_highlight_active_line;
$this->useSoftTabs = !$preferences->editor_use_hard_tabs;
$this->showGutter = $preferences->editor_show_gutter;
$this->autocompletion = $preferences->editor_autocompletion;
$this->enableSnippets = $preferences->editor_enable_snippets;
$this->displayIndentGuides = $preferences->editor_display_indent_guides;
$this->showPrintMargin = $preferences->editor_show_print_margin;
}
}
| 1 | 17,410 | This should either be `hint` that takes a string to render inside of the partial container or probably better you should just render a separate hint field above the code field in the same tab. | octobercms-october | php |
@@ -4330,7 +4330,7 @@ func TestJetStreamCrossAccountMirrorsAndSources(t *testing.T) {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
- checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
+ checkFor(t, 4*time.Second, 100*time.Millisecond, func() error {
si, err := js2.StreamInfo("MY_SOURCE_TEST")
if err != nil {
t.Fatalf("Could not retrieve stream info") | 1 | // Copyright 2020-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/nats-io/nats.go"
)
func TestJetStreamClusterConfig(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
jetstream: {max_mem_store: 16GB, max_file_store: 10TB, store_dir: "%s"}
cluster { listen: 127.0.0.1:-1 }
`))
defer os.Remove(conf)
check := func(errStr string) {
t.Helper()
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := NewServer(opts); err == nil || !strings.Contains(err.Error(), errStr) {
t.Fatalf("Expected an error of `%s`, got `%v`", errStr, err)
}
}
check("requires `server_name`")
conf = createConfFile(t, []byte(`
listen: 127.0.0.1:-1
server_name: "TEST"
jetstream: {max_mem_store: 16GB, max_file_store: 10TB, store_dir: "%s"}
cluster { listen: 127.0.0.1:-1 }
`))
defer os.Remove(conf)
check("requires `cluster.name`")
}
func TestJetStreamClusterLeader(t *testing.T) {
c := createJetStreamClusterExplicit(t, "JSC", 3)
defer c.shutdown()
// Kill our current leader and force an election.
c.leader().Shutdown()
c.waitOnLeader()
// Now killing our current leader should leave us leaderless.
c.leader().Shutdown()
c.expectNoLeader()
}
func TestJetStreamExpandCluster(t *testing.T) {
c := createJetStreamClusterExplicit(t, "JSC", 2)
defer c.shutdown()
c.addInNewServer()
c.waitOnPeerCount(3)
}
func TestJetStreamClusterAccountInfo(t *testing.T) {
c := createJetStreamClusterExplicit(t, "JSC", 3)
defer c.shutdown()
nc := clientConnectToServer(t, c.randomServer())
defer nc.Close()
reply := nats.NewInbox()
sub, _ := nc.SubscribeSync(reply)
if err := nc.PublishRequest(JSApiAccountInfo, reply, nil); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 1)
resp, _ := sub.NextMsg(0)
var info JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.JetStreamAccountStats == nil || info.Error != nil {
t.Fatalf("Did not receive correct response: %+v", info.Error)
}
// Make sure we only got 1 response.
// Technically this will always work since its a singelton service export.
if nmsgs, _, _ := sub.Pending(); nmsgs > 0 {
t.Fatalf("Expected only a single response, got %d more", nmsgs)
}
}
func TestJetStreamClusterSingleReplicaStreams(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R1S", 3)
defer c.shutdown()
// Client based API
s := c.randomNonLeader()
nc, js := jsClientConnect(t, s)
defer nc.Close()
si, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil {
t.Fatalf("Expected si to have cluster info")
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Now create a consumer. This should be pinned to same server that our stream was allocated to.
// First do a normal sub.
sub, err := js.SubscribeSync("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
// Now create a consumer as well.
ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ci == nil || ci.Name != "dlc" || ci.Stream != "TEST" {
t.Fatalf("ConsumerInfo is not correct %+v", ci)
}
// Now make sure that if we kill and restart the server that this stream and consumer return.
sl := c.streamLeader("$G", "TEST")
sl.Shutdown()
c.restartServer(sl)
c.waitOnStreamLeader("$G", "TEST")
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Now durable consumer.
c.waitOnConsumerLeader("$G", "TEST", "dlc")
if _, err = js.ConsumerInfo("TEST", "dlc"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestJetStreamClusterMultiReplicaStreams(t *testing.T) {
c := createJetStreamClusterExplicit(t, "RNS", 5)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Now create a consumer. This should be affinitize to the same set of servers as the stream.
// First do a normal sub.
sub, err := js.SubscribeSync("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
// Now create a consumer as well.
ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ci == nil || ci.Name != "dlc" || ci.Stream != "TEST" || ci.NumPending != uint64(toSend) {
t.Fatalf("ConsumerInfo is not correct %+v", ci)
}
}
func TestJetStreamClusterMemoryStore(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3M", 3)
defer c.shutdown()
// Client based API
nc, js := jsClientConnect(t, c.randomServer())
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
Storage: nats.MemoryStorage,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 100 messages.
msg, toSend := []byte("Hello MemoryStore"), 100
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
if si.Cluster == nil || len(si.Cluster.Replicas) != 2 {
t.Fatalf("Cluster info is incorrect: %+v", si.Cluster)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Do a normal sub.
sub, err := js.SubscribeSync("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
}
func TestJetStreamClusterDelete(t *testing.T) {
c := createJetStreamClusterExplicit(t, "RNS", 3)
defer c.shutdown()
s := c.randomServer()
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
cfg := StreamConfig{
Name: "C22",
Subjects: []string{"foo", "bar", "baz"},
Replicas: 2,
Storage: FileStorage,
MaxMsgs: 100,
}
req, err := json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var scResp JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
// Now create a consumer.
obsReq := CreateConsumerRequest{
Stream: cfg.Name,
Config: ConsumerConfig{Durable: "dlc", AckPolicy: AckExplicit},
}
req, err = json.Marshal(obsReq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, cfg.Name, "dlc"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var ccResp JSApiConsumerCreateResponse
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ccResp.ConsumerInfo == nil || ccResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", ccResp.Error)
}
// Now delete the consumer.
resp, _ = nc.Request(fmt.Sprintf(JSApiConsumerDeleteT, cfg.Name, "dlc"), nil, time.Second)
var cdResp JSApiConsumerDeleteResponse
if err = json.Unmarshal(resp.Data, &cdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !cdResp.Success || cdResp.Error != nil {
t.Fatalf("Got a bad response %+v", cdResp)
}
// Now delete the stream.
resp, _ = nc.Request(fmt.Sprintf(JSApiStreamDeleteT, cfg.Name), nil, time.Second)
var dResp JSApiStreamDeleteResponse
if err = json.Unmarshal(resp.Data, &dResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !dResp.Success || dResp.Error != nil {
t.Fatalf("Got a bad response %+v", dResp.Error)
}
// This will get the current information about usage and limits for this account.
resp, err = nc.Request(JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Streams != 0 {
t.Fatalf("Expected no remaining streams, got %d", info.Streams)
}
}
func TestJetStreamClusterStreamPurge(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R5S", 5)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
msg, toSend := []byte("Hello JS Clustering"), 100
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Now purge the stream.
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected purge error: %v", err)
}
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != 0 || si.State.FirstSeq != uint64(toSend+1) {
t.Fatalf("Expected no msgs, got: %+v", si.State)
}
}
func TestJetStreamClusterStreamUpdateSubjects(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
cfg := &nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
}
if _, err := js.AddStream(cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure we can update subjects.
cfg.Subjects = []string{"bar", "baz"}
si, err := js.UpdateStream(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil {
t.Fatalf("Expected a stream info, got none")
}
if !reflect.DeepEqual(si.Config.Subjects, cfg.Subjects) {
t.Fatalf("Expected subjects to be updated: got %+v", si.Config.Subjects)
}
// Make sure it registered
js2, err := nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err = js2.Publish("foo", nil); err == nil {
t.Fatalf("Expected this to fail")
}
if _, err = js2.Publish("baz", nil); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
func TestJetStreamClusterConsumerRedeliveredInfo(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
nc, js := jsClientConnect(t, c.randomServer())
defer nc.Close()
cfg := &nats.StreamConfig{Name: "TEST"}
if _, err := js.AddStream(cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.Publish("TEST", []byte("CI")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
sub, _ := nc.SubscribeSync("R")
sub.AutoUnsubscribe(2)
ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{
DeliverSubject: "R",
AckPolicy: nats.AckExplicitPolicy,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 2)
sub.Unsubscribe()
ci, err = js.ConsumerInfo("TEST", ci.Name)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ci.NumRedelivered != 1 {
t.Fatalf("Expected 1 redelivered, got %d", ci.NumRedelivered)
}
}
func TestJetStreamClusterConsumerState(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.Pull(1))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 1)
// Pull 5 messages and ack.
for i := 0; i < 5; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg %d: %v", i+1, err)
}
m.Ack()
}
// Let state propagate for exact comparison below.
time.Sleep(200 * time.Millisecond)
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
if ci.AckFloor.Consumer != 5 {
t.Fatalf("Expected ack floor of %d, got %d", 5, ci.AckFloor.Consumer)
}
c.consumerLeader("$G", "TEST", "dlc").Shutdown()
c.waitOnConsumerLeader("$G", "TEST", "dlc")
nci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
if nci.Delivered != ci.Delivered {
t.Fatalf("Consumer delivered did not match after leader switch, wanted %+v, got %+v", ci.Delivered, nci.Delivered)
}
if nci.AckFloor != ci.AckFloor {
t.Fatalf("Consumer ackfloor did not match after leader switch, wanted %+v, got %+v", ci.AckFloor, nci.AckFloor)
}
// Now make sure we can receive new messages.
// Pull last 5.
for i := 0; i < 5; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg %d: %v", i+1, err)
}
m.Ack()
}
nci, _ = sub.ConsumerInfo()
if nci.Delivered.Consumer != 10 || nci.Delivered.Stream != 10 {
t.Fatalf("Received bad delivered: %+v", nci.Delivered)
}
if nci.AckFloor.Consumer != 10 || nci.AckFloor.Stream != 10 {
t.Fatalf("Received bad ackfloor: %+v", nci.AckFloor)
}
if nci.NumAckPending != 0 {
t.Fatalf("Received bad ackpending: %+v", nci.NumAckPending)
}
}
func TestJetStreamClusterFullConsumerState(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.Pull(1))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 1)
// Now purge the stream.
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected purge error: %v", err)
}
}
func TestJetStreamClusterMetaSnapshotsAndCatchup(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Shut one down.
rs := c.randomServer()
rs.Shutdown()
c.waitOnLeader()
s := c.leader()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
numStreams := 4
// Create 4 streams
// FIXME(dlc) - R2 make sure we place properly.
for i := 0; i < numStreams; i++ {
sn := fmt.Sprintf("T-%d", i+1)
_, err := js.AddStream(&nats.StreamConfig{Name: sn})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
c.leader().JetStreamSnapshotMeta()
rs = c.restartServer(rs)
c.checkClusterFormed()
c.waitOnServerCurrent(rs)
rs.Shutdown()
c.waitOnLeader()
for i := 0; i < numStreams; i++ {
sn := fmt.Sprintf("T-%d", i+1)
err := js.DeleteStream(sn)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
rs = c.restartServer(rs)
c.checkClusterFormed()
c.waitOnServerCurrent(rs)
}
func TestJetStreamClusterMetaSnapshotsMultiChange(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 2)
defer c.shutdown()
s := c.leader()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
// Add in 2 streams with 1 consumer each.
if _, err := js.AddStream(&nats.StreamConfig{Name: "S1"}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err := js.AddConsumer("S1", &nats.ConsumerConfig{Durable: "S1C1", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err = js.AddStream(&nats.StreamConfig{Name: "S2"}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = js.AddConsumer("S2", &nats.ConsumerConfig{Durable: "S2C1", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Add in a new server to the group. This way we know we can delete the original streams and consumers.
rs := c.addInNewServer()
c.waitOnServerCurrent(rs)
rsn := rs.Name()
// Shut it down.
rs.Shutdown()
// Wait for the peer to be removed.
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
for _, p := range s.JetStreamClusterPeers() {
if p == rsn {
return fmt.Errorf("Old server still in peer set")
}
}
return nil
})
// We want to make changes here that test each delta scenario for the meta snapshots.
// Add new stream and consumer.
if _, err = js.AddStream(&nats.StreamConfig{Name: "S3"}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = js.AddConsumer("S3", &nats.ConsumerConfig{Durable: "S3C1", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Delete stream S2
resp, _ := nc.Request(fmt.Sprintf(JSApiStreamDeleteT, "S2"), nil, time.Second)
var dResp JSApiStreamDeleteResponse
if err := json.Unmarshal(resp.Data, &dResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !dResp.Success || dResp.Error != nil {
t.Fatalf("Got a bad response %+v", dResp.Error)
}
// Delete the consumer on S1 but add another.
resp, _ = nc.Request(fmt.Sprintf(JSApiConsumerDeleteT, "S1", "S1C1"), nil, time.Second)
var cdResp JSApiConsumerDeleteResponse
if err = json.Unmarshal(resp.Data, &cdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !cdResp.Success || cdResp.Error != nil {
t.Fatalf("Got a bad response %+v", cdResp)
}
// Add new consumer on S1
_, err = js.AddConsumer("S1", &nats.ConsumerConfig{Durable: "S1C2", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
cl := c.leader()
cl.JetStreamSnapshotMeta()
c.waitOnServerCurrent(cl)
rs = c.restartServer(rs)
c.checkClusterFormed()
c.waitOnServerCurrent(rs)
}
func TestJetStreamClusterStreamSynchedTimeStamps(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Storage: nats.MemoryStorage, Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err = js.Publish("foo", []byte("TSS")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
// Grab the message and timestamp from our current leader
sub, err := js.SubscribeSync("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
meta, _ := m.MetaData()
sub.Unsubscribe()
sl := c.streamLeader("$G", "foo")
sl.Shutdown()
c.waitOnLeader()
c.waitOnStreamLeader("$G", "foo")
nc, js = jsClientConnect(t, c.leader())
defer nc.Close()
sm, err := js.GetMsg("foo", 1)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !sm.Time.Equal(meta.Timestamp) {
t.Fatalf("Expected same timestamps, got %v vs %v", sm.Time, meta.Timestamp)
}
}
// Test to mimic what R.I. was seeing.
func TestJetStreamClusterRestoreSingleConsumer(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err = js.Publish("foo", []byte("TSS")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if m, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
m.Ack()
}
c.stopAll()
c.restartAll()
c.waitOnLeader()
s = c.randomServer()
nc, js = jsClientConnect(t, s)
defer nc.Close()
resp, err := nc.Request(JSApiStreams, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var streams JSApiStreamNamesResponse
if err = json.Unmarshal(resp.Data, &streams); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(streams.Streams) != 1 {
t.Fatalf("Expected only 1 stream but got %d", len(streams.Streams))
}
// Now do detailed version.
resp, err = nc.Request(JSApiStreamList, nil, 5*time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var listResponse JSApiStreamListResponse
if err = json.Unmarshal(resp.Data, &listResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(listResponse.Streams) != 1 {
t.Fatalf("Expected 1 stream but got %d", len(listResponse.Streams))
}
si, err := js.StreamInfo("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "foo" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Now check for consumer.
resp, err = nc.Request(fmt.Sprintf(JSApiConsumersT, "foo"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var clResponse JSApiConsumerNamesResponse
if err = json.Unmarshal(resp.Data, &clResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(clResponse.Consumers) != 1 {
t.Fatalf("Expected 1 consumer but got %d", len(clResponse.Consumers))
}
}
func TestJetStreamClusterStreamPublishWithActiveConsumers(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err = js.Publish("foo", []byte("TSS")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if m, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
m.Ack()
}
// Send 10 messages.
for i := 1; i <= 10; i++ {
payload := []byte(fmt.Sprintf("MSG-%d", i))
if _, err = js.Publish("foo", payload); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
checkSubsPending(t, sub, 10)
// Sanity check for duplicate deliveries..
if nmsgs, _, _ := sub.Pending(); nmsgs > 10 {
t.Fatalf("Expected only %d responses, got %d more", 10, nmsgs)
}
for i := 1; i <= 10; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
payload := []byte(fmt.Sprintf("MSG-%d", i))
if !bytes.Equal(m.Data, payload) {
t.Fatalf("Did not get expected msg, expected %q, got %q", payload, m.Data)
}
}
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
c.consumerLeader("$G", "foo", "dlc").Shutdown()
c.waitOnConsumerLeader("$G", "foo", "dlc")
ci2, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
ci.Cluster = nil
ci2.Cluster = nil
if !reflect.DeepEqual(ci, ci2) {
t.Fatalf("Consumer info did not match: %+v vs %+v", ci, ci2)
}
// In case the server above was also stream leader.
c.waitOnStreamLeader("$G", "foo")
// Now send more..
// Send 10 more messages.
for i := 11; i <= 20; i++ {
payload := []byte(fmt.Sprintf("MSG-%d", i))
if _, err = js.Publish("foo", payload); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
checkSubsPending(t, sub, 10)
// Sanity check for duplicate deliveries..
if nmsgs, _, _ := sub.Pending(); nmsgs > 10 {
t.Fatalf("Expected only %d responses, got %d more", 10, nmsgs)
}
for i := 11; i <= 20; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
payload := []byte(fmt.Sprintf("MSG-%d", i))
if !bytes.Equal(m.Data, payload) {
t.Fatalf("Did not get expected msg, expected %q, got %q", payload, m.Data)
}
}
}
func TestJetStreamClusterStreamOverlapSubjects(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R32", 2)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}, Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST2", Subjects: []string{"foo"}, Replicas: 2}); err == nil || err == nats.ErrTimeout {
t.Fatalf("Expected error but got none or timeout")
}
// Now grab list of streams and make sure the second is not there.
resp, err := nc.Request(JSApiStreams, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var streams JSApiStreamNamesResponse
if err = json.Unmarshal(resp.Data, &streams); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(streams.Streams) != 1 {
t.Fatalf("Expected only 1 stream but got %d", len(streams.Streams))
}
// Now do detailed version.
resp, err = nc.Request(JSApiStreamList, nil, 5*time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var listResponse JSApiStreamListResponse
if err = json.Unmarshal(resp.Data, &listResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestJetStreamClusterStreamInfoList(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
createStream := func(name string) {
t.Helper()
if _, err := js.AddStream(&nats.StreamConfig{Name: name}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
createStream("foo")
createStream("bar")
createStream("baz")
sendBatch := func(subject string, n int) {
t.Helper()
// Send a batch to a given subject.
for i := 0; i < n; i++ {
if _, err := js.Publish(subject, []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
}
sendBatch("foo", 10)
sendBatch("bar", 22)
sendBatch("baz", 33)
// Now get the stream list info.
sl := js.NewStreamLister()
if !sl.Next() {
t.Fatalf("Unexpected error: %v", sl.Err())
}
p := sl.Page()
if len(p) != 3 {
t.Fatalf("StreamInfo expected 3 results, got %d", len(p))
}
for _, si := range p {
switch si.Config.Name {
case "foo":
if si.State.Msgs != 10 {
t.Fatalf("Expected %d msgs but got %d", 10, si.State.Msgs)
}
case "bar":
if si.State.Msgs != 22 {
t.Fatalf("Expected %d msgs but got %d", 22, si.State.Msgs)
}
case "baz":
if si.State.Msgs != 33 {
t.Fatalf("Expected %d msgs but got %d", 33, si.State.Msgs)
}
}
}
}
func TestJetStreamClusterConsumerInfoList(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Place messages so we can generate consumer state.
for i := 0; i < 10; i++ {
if _, err := js.Publish("TEST", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
createConsumer := func(name string) *nats.Subscription {
t.Helper()
sub, err := js.SubscribeSync("TEST", nats.Durable(name), nats.Pull(2))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 2)
return sub
}
subFoo := createConsumer("foo")
subBar := createConsumer("bar")
subBaz := createConsumer("baz")
// Place consumers in various states.
for _, ss := range []struct {
sub *nats.Subscription
fetch int
ack int
}{
{subFoo, 4, 2},
{subBar, 2, 0},
{subBaz, 8, 6},
} {
for i := 0; i < ss.fetch; i++ {
if m, err := ss.sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error getting message %d: %v", i, err)
} else if i < ss.ack {
m.Ack()
}
}
}
// Now get the consumer list info.
cl := js.NewConsumerLister("TEST")
if !cl.Next() {
t.Fatalf("Unexpected error: %v", cl.Err())
}
p := cl.Page()
if len(p) != 3 {
t.Fatalf("ConsumerInfo expected 3 results, got %d", len(p))
}
for _, ci := range p {
switch ci.Name {
case "foo":
if ci.Delivered.Consumer != 4 {
t.Fatalf("Expected %d delivered but got %d", 4, ci.Delivered.Consumer)
}
if ci.AckFloor.Consumer != 2 {
t.Fatalf("Expected %d for ack floor but got %d", 2, ci.AckFloor.Consumer)
}
case "bar":
if ci.Delivered.Consumer != 2 {
t.Fatalf("Expected %d delivered but got %d", 2, ci.Delivered.Consumer)
}
if ci.AckFloor.Consumer != 0 {
t.Fatalf("Expected %d for ack floor but got %d", 0, ci.AckFloor.Consumer)
}
case "baz":
if ci.Delivered.Consumer != 8 {
t.Fatalf("Expected %d delivered but got %d", 8, ci.Delivered.Consumer)
}
if ci.AckFloor.Consumer != 6 {
t.Fatalf("Expected %d for ack floor but got %d", 6, ci.AckFloor.Consumer)
}
}
}
}
func TestJetStreamClusterStreamUpdate(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
sc := &nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
MaxMsgs: 10,
Discard: DiscardNew,
}
if _, err := js.AddStream(sc); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for i := 1; i <= int(sc.MaxMsgs); i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", i))
if _, err := js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Expect error here.
if _, err := js.Publish("foo", []byte("fail")); err == nil {
t.Fatalf("Expected publish to fail")
}
// Now update MaxMsgs, select non-leader
s = c.randomNonStreamLeader("$G", "TEST")
nc, js = jsClientConnect(t, s)
defer nc.Close()
sc.MaxMsgs = 20
si, err := js.UpdateStream(sc)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Config.MaxMsgs != 20 {
t.Fatalf("Expected to have config updated with max msgs of %d, got %d", 20, si.Config.MaxMsgs)
}
// Do one that will fail. Wait and make sure we only are getting one response.
sc.Name = "TEST22"
rsub, _ := nc.SubscribeSync(nats.NewInbox())
defer rsub.Unsubscribe()
nc.Flush()
req, _ := json.Marshal(sc)
if err := nc.PublishRequest(fmt.Sprintf(JSApiStreamUpdateT, "TEST"), rsub.Subject, req); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Wait incase more than one reply sent.
time.Sleep(250 * time.Millisecond)
if nmsgs, _, _ := rsub.Pending(); err != nil || nmsgs != 1 {
t.Fatalf("Expected only one response, got %d", nmsgs)
}
m, err := rsub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
var scResp JSApiStreamCreateResponse
if err := json.Unmarshal(m.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo != nil || scResp.Error == nil {
t.Fatalf("Did not receive correct response: %+v", scResp)
}
}
func TestJetStreamClusterStreamExtendedUpdates(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
cfg := &nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
}
if _, err := js.AddStream(cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
updateStream := func() *nats.StreamInfo {
si, err := js.UpdateStream(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
return si
}
expectError := func() {
if _, err := js.UpdateStream(cfg); err == nil {
t.Fatalf("Expected error and got none")
}
}
// Subjects
cfg.Subjects = []string{"bar", "baz"}
if si := updateStream(); !reflect.DeepEqual(si.Config.Subjects, cfg.Subjects) {
t.Fatalf("Did not get expected stream info: %+v", si)
}
// Make sure these error for now.
// R factor changes
cfg.Replicas = 1
expectError()
// Mirror changes
cfg.Replicas = 3
cfg.Mirror = &nats.StreamSource{Name: "ORDERS"}
expectError()
}
func TestJetStreamClusterDoubleAdd(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R32", 2)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Check double add fails.
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err == nil || err == nats.ErrTimeout {
t.Fatalf("Expected error but got none or timeout")
}
// Do Consumers too.
cfg := &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}
if _, err := js.AddConsumer("TEST", cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Check double add fails.
if _, err := js.AddConsumer("TEST", cfg); err == nil || err == nats.ErrTimeout {
t.Fatalf("Expected error but got none or timeout")
}
}
func TestJetStreamClusterStreamNormalCatchup(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo", "bar"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
for i := 1; i <= toSend; i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", i))
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sl := c.streamLeader("$G", "TEST")
sl.Shutdown()
c.waitOnStreamLeader("$G", "TEST")
// Send 10 more while one replica offline.
for i := toSend; i <= toSend*2; i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", i))
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Delete the first from the second batch.
dreq := JSApiMsgDeleteRequest{Seq: uint64(toSend)}
dreqj, err := json.Marshal(dreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ := nc.Request(fmt.Sprintf(JSApiMsgDeleteT, "TEST"), dreqj, time.Second)
var delMsgResp JSApiMsgDeleteResponse
if err = json.Unmarshal(resp.Data, &delMsgResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !delMsgResp.Success || delMsgResp.Error != nil {
t.Fatalf("Got a bad response %+v", delMsgResp.Error)
}
sl = c.restartServer(sl)
c.checkClusterFormed()
c.waitOnServerCurrent(sl)
c.waitOnStreamCurrent(sl, "$G", "TEST")
}
func TestJetStreamClusterStreamSnapshotCatchup(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
pseq := uint64(1)
sendBatch := func(n int) {
t.Helper()
// Send a batch.
for i := 0; i < n; i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", pseq))
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
pseq++
}
}
sendBatch(2)
sl := c.streamLeader("$G", "TEST")
sl.Shutdown()
c.waitOnStreamLeader("$G", "TEST")
sendBatch(100)
deleteMsg := func(seq uint64) {
if err := js.DeleteMsg("TEST", seq); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// Delete the first from the second batch.
deleteMsg(pseq / 2)
// Delete the next one too.
deleteMsg(pseq/2 + 1)
nsl := c.streamLeader("$G", "TEST")
nsl.JetStreamSnapshotStream("$G", "TEST")
// Do some activity post snapshot as well.
// Delete next to last.
deleteMsg(pseq - 2)
// Send another batch.
sendBatch(100)
sl = c.restartServer(sl)
c.checkClusterFormed()
c.waitOnServerCurrent(sl)
c.waitOnStreamCurrent(sl, "$G", "TEST")
}
func TestJetStreamClusterDeleteMsg(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
// R=1 make sure delete works.
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
for i := 1; i <= toSend; i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", i))
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
deleteMsg := func(seq uint64) {
if err := js.DeleteMsg("TEST", seq); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
deleteMsg(1)
// Also make sure purge of R=1 works too.
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected purge error: %v", err)
}
}
func TestJetStreamClusterDeleteMsgAndRestart(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
// R=1 make sure delete works.
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
for i := 1; i <= toSend; i++ {
msg := []byte(fmt.Sprintf("HELLO JSC-%d", i))
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
deleteMsg := func(seq uint64) {
if err := js.DeleteMsg("TEST", seq); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
deleteMsg(1)
c.stopAll()
c.restartAll()
c.waitOnStreamLeader("$G", "TEST")
}
func TestJetStreamClusterStreamSnapshotCatchupWithPurge(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R5S", 5)
defer c.shutdown()
s := c.randomServer()
// Client based API
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sl := c.streamLeader("$G", "TEST")
sl.Shutdown()
c.waitOnStreamLeader("$G", "TEST")
toSend := 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
nsl := c.streamLeader("$G", "TEST")
if err := nsl.JetStreamSnapshotStream("$G", "TEST"); err != nil {
t.Fatalf("Error snapshotting stream: %v", err)
}
time.Sleep(250 * time.Millisecond)
sl = c.restartServer(sl)
c.checkClusterFormed()
// Now purge the stream while we are recovering.
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected purge error: %v", err)
}
c.waitOnServerCurrent(sl)
c.waitOnStreamCurrent(sl, "$G", "TEST")
nsl.Shutdown()
c.waitOnStreamLeader("$G", "TEST")
if _, err := js.StreamInfo("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestJetStreamClusterExtendedStreamInfo(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 50
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
leader := c.streamLeader("$G", "TEST").Name()
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil {
t.Fatalf("Expected cluster info")
}
if si.Cluster.Name != c.name {
t.Fatalf("Expected cluster name of %q, got %q", c.name, si.Cluster.Name)
}
if si.Cluster.Leader != leader {
t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader)
}
if len(si.Cluster.Replicas) != 2 {
t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas))
}
// Faster timeout since we loop below checking for condition.
js2, err := nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// We may need to wait a bit for peers to catch up.
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
if si, err = js2.StreamInfo("TEST"); err != nil {
t.Fatalf("Could not retrieve stream info")
}
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
// Shutdown the leader.
oldLeader := c.streamLeader("$G", "TEST")
oldLeader.Shutdown()
c.waitOnStreamLeader("$G", "TEST")
// Re-request.
leader = c.streamLeader("$G", "TEST").Name()
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil {
t.Fatalf("Expected cluster info")
}
if si.Cluster.Leader != leader {
t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader)
}
if len(si.Cluster.Replicas) != 2 {
t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas))
}
for _, peer := range si.Cluster.Replicas {
if peer.Name == oldLeader.Name() {
if peer.Current {
t.Fatalf("Expected old leader to be reported as not current: %+v", peer)
}
} else if !peer.Current {
t.Fatalf("Expected replica to be current: %+v", peer)
}
}
// Now send a few more messages then restart the oldLeader.
for i := 0; i < 10; i++ {
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
oldLeader = c.restartServer(oldLeader)
c.waitOnStreamCurrent(oldLeader, "$G", "TEST")
// Re-request.
leader = c.streamLeader("$G", "TEST").Name()
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil {
t.Fatalf("Expected cluster info")
}
if si.Cluster.Leader != leader {
t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader)
}
if len(si.Cluster.Replicas) != 2 {
t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas))
}
// We may need to wait a bit for peers to catch up.
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
if si, err = js2.StreamInfo("TEST"); err != nil {
t.Fatalf("Could not retrieve stream info")
}
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
// Now do consumer.
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.Pull(10))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer sub.Unsubscribe()
checkSubsPending(t, sub, 10)
leader = c.consumerLeader("$G", "TEST", "dlc").Name()
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
if ci.Cluster.Leader != leader {
t.Fatalf("Expected leader of %q, got %q", leader, ci.Cluster.Leader)
}
if len(ci.Cluster.Replicas) != 2 {
t.Fatalf("Expected %d replicas, got %d", 2, len(ci.Cluster.Replicas))
}
for _, peer := range ci.Cluster.Replicas {
if !peer.Current {
t.Fatalf("Expected replica to be current: %+v", peer)
}
}
}
func TestJetStreamClusterExtendedStreamInfoSingleReplica(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 50
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
leader := c.streamLeader("$G", "TEST").Name()
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil {
t.Fatalf("Expected cluster info")
}
if si.Cluster.Name != c.name {
t.Fatalf("Expected cluster name of %q, got %q", c.name, si.Cluster.Name)
}
if si.Cluster.Leader != leader {
t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader)
}
if len(si.Cluster.Replicas) != 0 {
t.Fatalf("Expected no replicas but got %d", len(si.Cluster.Replicas))
}
// Make sure we can grab consumer lists from any
cl := js.NewConsumerLister("TEST")
if !cl.Next() {
t.Fatalf("Unexpected error: %v", cl.Err())
}
p := cl.Page()
if len(p) != 0 {
t.Fatalf("ConsumerInfo expected no paged results, got %d", len(p))
}
// Now add in a consumer.
cfg := &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}
if _, err := js.AddConsumer("TEST", cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
cl = js.NewConsumerLister("TEST")
if !cl.Next() {
t.Fatalf("Unexpected error: %v", cl.Err())
}
p = cl.Page()
if len(p) != 1 {
t.Fatalf("ConsumerInfo expected 1 result, got %d", len(p))
}
// Now do direct names list as well.
resp, err := nc.Request(fmt.Sprintf(JSApiConsumersT, "TEST"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var clResponse JSApiConsumerNamesResponse
if err = json.Unmarshal(resp.Data, &clResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(clResponse.Consumers) != 1 {
t.Fatalf("Expected only 1 consumer but got %d", len(clResponse.Consumers))
}
}
func TestJetStreamClusterInterestRetention(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Retention: nats.InterestPolicy, Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sl := c.streamLeader("$G", "foo")
cl := c.consumerLeader("$G", "foo", "dlc")
if sl == cl {
_, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "foo"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
c.waitOnStreamLeader("$G", "foo")
}
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg: %v", err)
}
m.Ack()
js, err = nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
si, err := js.StreamInfo("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != 0 {
return fmt.Errorf("Expected 0 msgs, got state: %+v", si.State)
}
return nil
})
}
func TestJetStreamClusterInterestRetentionWithFilteredConsumers(t *testing.T) {
// Flaky for the time being.
skip(t)
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"*"}, Retention: nats.InterestPolicy, Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
fsub, err := js.SubscribeSync("foo", nats.Durable("d1"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fsub.Unsubscribe()
bsub, err := js.SubscribeSync("bar", nats.Durable("d2"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer bsub.Unsubscribe()
msg := []byte("FILTERED")
sendMsg := func(subj string) {
t.Helper()
if _, err = js.Publish(subj, msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
getAndAck := func(sub *nats.Subscription) {
t.Helper()
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg: %v", err)
}
m.Ack()
}
jsq, err := nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkState := func(expected uint64) {
t.Helper()
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
si, err := jsq.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != expected {
return fmt.Errorf("Expected %d msgs, got %d", expected, si.State.Msgs)
}
return nil
})
}
sendMsg("foo")
checkState(1)
getAndAck(fsub)
checkState(0)
sendMsg("bar")
sendMsg("foo")
checkState(2)
getAndAck(bsub)
checkState(1)
getAndAck(fsub)
checkState(0)
}
func TestJetStreamClusterEphemeralConsumerCleanup(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 2})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sub, err := js.Subscribe("foo", func(m *nats.Msg) {})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ci, _ := sub.ConsumerInfo()
if ci == nil {
t.Fatalf("Unexpected error: no consumer info")
}
// We will look up by hand this consumer to set inactive threshold lower for this test.
cl := c.consumerLeader("$G", "foo", ci.Name)
if cl == nil {
t.Fatalf("Could not find consumer leader")
}
mset, err := cl.GlobalAccount().lookupStream("foo")
if err != nil {
t.Fatalf("Expected to find a stream for %q", "foo")
}
o := mset.lookupConsumer(ci.Name)
if o == nil {
t.Fatalf("Error looking up consumer %q", ci.Name)
}
o.setInActiveDeleteThreshold(10 * time.Millisecond)
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
getConsumers := func() []string {
resp, err := nc.Request(fmt.Sprintf(JSApiConsumersT, "foo"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var clResponse JSApiConsumerNamesResponse
if err = json.Unmarshal(resp.Data, &clResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
return clResponse.Consumers
}
checkConsumer := func(expected int) {
consumers := getConsumers()
if len(consumers) != expected {
t.Fatalf("Expected %d consumers but got %d", expected, len(consumers))
}
}
checkConsumer(1)
// Now Unsubscribe, since this is ephemeral this will make this go away.
sub.Unsubscribe()
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
if consumers := getConsumers(); len(consumers) == 0 {
return nil
} else {
return fmt.Errorf("Still %d consumers remaining", len(consumers))
}
})
}
func TestJetStreamClusterEphemeralConsumersNotReplicated(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sub, err := js.SubscribeSync("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ci, _ := sub.ConsumerInfo()
if ci == nil {
t.Fatalf("Unexpected error: no consumer info")
}
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
checkSubsPending(t, sub, 1)
sub.NextMsg(0)
if ci.Cluster == nil || len(ci.Cluster.Replicas) != 0 {
t.Fatalf("Expected ephemeral to be R=1, got %+v", ci.Cluster)
}
scl := c.serverByName(ci.Cluster.Leader)
if scl == nil {
t.Fatalf("Could not select server where ephemeral consumer is running")
}
// Test migrations. If we are also metadata leader will not work so skip.
if scl == c.leader() {
return
}
scl.Shutdown()
c.waitOnStreamLeader("$G", "foo")
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
if _, err := sub.NextMsg(500 * time.Millisecond); err != nil {
t.Logf("Expected to see another message, but behavior is optimistic so can fail")
}
}
func TestJetStreamClusterUserSnapshotAndRestore(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 2,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend, batchSize := 200, 50
for i := 0; i < toSend; i++ {
if _, err = js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Create consumer with no state.
_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "rip", AckPolicy: nats.AckExplicitPolicy, AckWait: time.Second})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Create another consumer as well and give it a non-simplistic state.
_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy, AckWait: time.Second})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
jsub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.Pull(batchSize))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, jsub, batchSize)
// Ack first 50.
for i := 1; i <= 50; i++ {
m, err := jsub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg %d: %v", i, err)
}
m.Ack()
}
// Now ack every third message for next 50.
for i := 51; i <= 100; i++ {
m, err := jsub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg %d: %v", i, err)
}
if i%3 == 0 {
m.Ack()
}
}
nc.Flush()
time.Sleep(200 * time.Millisecond)
// Snapshot consumer info.
ci, err := jsub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
sreq := &JSApiStreamSnapshotRequest{
DeliverSubject: nats.NewInbox(),
ChunkSize: 512,
}
req, _ := json.Marshal(sreq)
rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "TEST"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
var resp JSApiStreamSnapshotResponse
json.Unmarshal(rmsg.Data, &resp)
if resp.Error != nil {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
// Grab state for comparison.
state := *resp.State
config := *resp.Config
var snapshot []byte
done := make(chan bool)
sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) {
// EOF
if len(m.Data) == 0 {
done <- true
return
}
// Could be writing to a file here too.
snapshot = append(snapshot, m.Data...)
// Flow ack
m.Respond(nil)
})
defer sub.Unsubscribe()
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
var rresp JSApiStreamRestoreResponse
rreq := &JSApiStreamRestoreRequest{
Config: config,
State: state,
}
req, _ = json.Marshal(rreq)
// Make sure a restore to an existing stream fails.
rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "TEST"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error == nil || rresp.Error.Code != 500 || !strings.Contains(rresp.Error.Description, "already in use") {
t.Fatalf("Did not get correct error response: %+v", rresp.Error)
}
if _, err := js.StreamInfo("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now make sure a restore will work.
// Delete our stream first.
if err := js.DeleteStream("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// This should work properly.
rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "TEST"), req, 5*time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
// Send our snapshot back in to restore the stream.
// Can be any size message.
var chunk [1024]byte
for r := bytes.NewReader(snapshot); ; {
n, err := r.Read(chunk[:])
if err != nil {
break
}
nc.Request(rresp.DeliverSubject, chunk[:n], time.Second)
}
rmsg, err = nc.Request(rresp.DeliverSubject, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" || si.State.Msgs != uint64(toSend) {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Make sure the replicas become current eventually. They will be doing catchup.
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
si, _ := js.StreamInfo("TEST")
if si == nil || si.Cluster == nil {
t.Fatalf("Did not get stream info")
}
for _, pi := range si.Cluster.Replicas {
if !pi.Current {
return fmt.Errorf("Peer not current: %+v", pi)
}
}
return nil
})
// Wait on the system to elect a leader for the restored consumer.
c.waitOnConsumerLeader("$G", "TEST", "dlc")
// Now check for the consumer being recreated.
nci, err := js.ConsumerInfo("TEST", "dlc")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if nci.Delivered != ci.Delivered {
t.Fatalf("Delivered states do not match %+v vs %+v", nci.Delivered, ci.Delivered)
}
if nci.AckFloor != ci.AckFloor {
t.Fatalf("Ack floors did not match %+v vs %+v", nci.AckFloor, ci.AckFloor)
}
// Make sure consumer works.
// It should pick up with the next delivery spot, so check for that as first message.
// We should have all the messages for first delivery delivered.
start := 101
end := toSend
for i := start; i <= end; i++ {
m, err := jsub.NextMsg(2 * time.Second)
if err != nil {
t.Fatalf("Unexpected error getting msg [%d]: %v", i, err)
}
meta, err := m.MetaData()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if meta.Stream != uint64(i) {
t.Fatalf("Expected stream sequence of %d, but got %d", i, meta.Stream)
}
m.Ack()
}
// Check that redelivered come in now..
redelivered := 50/3 + 1
checkSubsPending(t, jsub, redelivered)
// Now make sure the other server was properly caughtup.
// Need to call this by hand for now.
rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var sdResp JSApiStreamLeaderStepDownResponse
if err := json.Unmarshal(rmsg.Data, &sdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if sdResp.Error != nil {
t.Fatalf("Unexpected error: %+v", sdResp.Error)
}
c.waitOnStreamLeader("$G", "TEST")
si, err = js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %+v", err)
}
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Unexpected stream info: %+v", si)
}
// Check idle consumer
c.waitOnConsumerLeader("$G", "TEST", "rip")
// Now check for the consumer being recreated.
if _, err := js.ConsumerInfo("TEST", "rip"); err != nil {
t.Fatalf("Unexpected error: %+v", err)
}
}
func TestJetStreamClusterUserSnapshotAndRestoreConfigChanges(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
nc, js := jsClientConnect(t, c.randomServer())
defer nc.Close()
// FIXME(dlc) - Do case with R=1
cfg := &nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 2,
}
if _, err := js.AddStream(cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
for i := 0; i < toSend; i++ {
if _, err := js.Publish("foo", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
getSnapshot := func() ([]byte, *StreamState) {
t.Helper()
sreq := &JSApiStreamSnapshotRequest{
DeliverSubject: nats.NewInbox(),
ChunkSize: 1024,
}
req, _ := json.Marshal(sreq)
rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "TEST"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
var resp JSApiStreamSnapshotResponse
json.Unmarshal(rmsg.Data, &resp)
if resp.Error != nil {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
var snapshot []byte
done := make(chan bool)
sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) {
// EOF
if len(m.Data) == 0 {
done <- true
return
}
// Could be writing to a file here too.
snapshot = append(snapshot, m.Data...)
// Flow ack
m.Respond(nil)
})
defer sub.Unsubscribe()
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
return snapshot, resp.State
}
restore := func(cfg *StreamConfig, state *StreamState, snap []byte) *nats.StreamInfo {
rreq := &JSApiStreamRestoreRequest{
Config: *cfg,
State: *state,
}
req, err := json.Marshal(rreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamRestoreT, cfg.Name), req, 5*time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var rresp JSApiStreamRestoreResponse
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
// Send our snapshot back in to restore the stream.
// Can be any size message.
var chunk [1024]byte
for r := bytes.NewReader(snap); ; {
n, err := r.Read(chunk[:])
if err != nil {
break
}
nc.Request(rresp.DeliverSubject, chunk[:n], time.Second)
}
rmsg, err = nc.Request(rresp.DeliverSubject, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
si, err := js.StreamInfo(cfg.Name)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
return si
}
snap, state := getSnapshot()
if err := js.DeleteStream("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Change name.
ncfg := &StreamConfig{
Name: "TEST2",
Subjects: []string{"foo"},
Storage: FileStorage,
Replicas: 2,
}
if si := restore(ncfg, state, snap); si.Config.Name != "TEST2" {
t.Fatalf("Did not get expected stream info: %+v", si)
}
if err := js.DeleteStream("TEST2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now change subjects.
ncfg.Subjects = []string{"bar", "baz"}
if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) {
t.Fatalf("Did not get expected stream info: %+v", si)
}
if err := js.DeleteStream("TEST2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Storage
ncfg.Storage = MemoryStorage
if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) {
t.Fatalf("Did not get expected stream info: %+v", si)
}
if err := js.DeleteStream("TEST2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now replicas
ncfg.Replicas = 3
if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) {
t.Fatalf("Did not get expected stream info: %+v", si)
}
}
func TestJetStreamClusterAccountInfoAndLimits(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R5S", 5)
defer c.shutdown()
// Adjust our limits.
c.updateLimits("$G", &JetStreamAccountLimits{
MaxMemory: 1024,
MaxStore: 8000,
MaxStreams: 3,
MaxConsumers: 1,
})
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 1}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.AddStream(&nats.StreamConfig{Name: "bar", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.AddStream(&nats.StreamConfig{Name: "baz", Replicas: 3}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch := func(subject string, n int) {
t.Helper()
for i := 0; i < n; i++ {
if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
}
sendBatch("foo", 25)
sendBatch("bar", 75)
sendBatch("baz", 10)
accountStats := func() *JetStreamAccountStats {
t.Helper()
resp, err := nc.Request(JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Error != nil {
t.Fatalf("Unexpected error: %+v", info.Error)
}
if info.JetStreamAccountStats == nil {
t.Fatalf("AccountStats missing")
}
return info.JetStreamAccountStats
}
// If subject is not 3 letters or payload not 2 this needs to change.
const msgSize = uint64(22 + 3 + 6 + 8)
stats := accountStats()
if stats.Streams != 3 {
t.Fatalf("Should have been tracking 3 streams, found %d", stats.Streams)
}
expectedSize := 25*msgSize + 75*msgSize*2 + 10*msgSize*3
// This may lag.
checkFor(t, 5*time.Second, 500*time.Millisecond, func() error {
if stats.Store != expectedSize {
err := fmt.Errorf("Expected store size to be %d, got %+v\n", expectedSize, stats)
stats = accountStats()
return err
}
return nil
})
// Check limit enforcement.
if _, err := js.AddStream(&nats.StreamConfig{Name: "fail", Replicas: 3}); err == nil {
t.Fatalf("Expected an error but got none")
}
// We should be at 7995 at the moment with a limit of 8000, so any message will go over.
if _, err := js.Publish("baz", []byte("JSC-NOT-OK")); err == nil {
t.Fatalf("Expected publish error but got none")
}
// Check consumers
_, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// This should fail.
_, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc22", AckPolicy: nats.AckExplicitPolicy})
if err == nil {
t.Fatalf("Expected error but got none")
}
}
func TestJetStreamClusterStreamLimits(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
// Check that large R will fail.
if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 5}); err == nil {
t.Fatalf("Expected error but got none")
}
maxMsgs := 5
_, err := js.AddStream(&nats.StreamConfig{
Name: "foo",
Replicas: 3,
Retention: nats.LimitsPolicy,
Discard: DiscardNew,
MaxMsgSize: 11,
MaxMsgs: int64(maxMsgs),
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Large message should fail.
if _, err := js.Publish("foo", []byte("0123456789ZZZ")); err == nil {
t.Fatalf("Expected publish to fail")
}
for i := 0; i < maxMsgs; i++ {
if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// These should fail.
if _, err := js.Publish("foo", []byte("JSC-OK")); err == nil {
t.Fatalf("Expected publish to fail")
}
}
func TestJetStreamClusterStreamInterestOnlyPolicy(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "foo",
Replicas: 3,
Retention: nats.InterestPolicy,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
// With no interest these should be no-ops.
for i := 0; i < toSend; i++ {
if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
si, err := js.StreamInfo("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != 0 {
t.Fatalf("Expected no messages with no interest, got %d", si.State.Msgs)
}
// Now create a consumer.
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for i := 0; i < toSend; i++ {
if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
checkSubsPending(t, sub, toSend)
si, err = js.StreamInfo("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages with interest, got %d", toSend, si.State.Msgs)
}
if si.State.FirstSeq != uint64(toSend+1) {
t.Fatalf("Expected first sequence of %d, got %d", toSend+1, si.State.FirstSeq)
}
// Now delete the consumer.
sub.Unsubscribe()
// That should make it go away.
if _, err := js.ConsumerInfo("foo", "dlc"); err == nil {
t.Fatalf("Expected not found error, got none")
}
// Wait for the messages to be purged.
checkFor(t, 5*time.Second, 20*time.Millisecond, func() error {
si, err := js.StreamInfo("foo")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs == 0 {
return nil
}
return fmt.Errorf("Wanted 0 messages, got %d", si.State.Msgs)
})
}
// These are disabled for now.
func TestJetStreamClusterStreamTemplates(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, _ := jsClientConnect(t, s)
defer nc.Close()
// List API
var tListResp JSApiStreamTemplateNamesResponse
resp, err := nc.Request(JSApiTemplates, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := json.Unmarshal(resp.Data, &tListResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if tListResp.Error == nil {
t.Fatalf("Expected an unsupported error, got none")
}
if !strings.Contains(tListResp.Error.Description, "not currently supported in clustered mode") {
t.Fatalf("Did not get correct error response: %+v", tListResp.Error)
}
// Create
// Now do templates.
mcfg := &StreamConfig{
Subjects: []string{"kv.*"},
Storage: MemoryStorage,
}
template := &StreamTemplateConfig{
Name: "kv",
Config: mcfg,
MaxStreams: 4,
}
req, err := json.Marshal(template)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var stResp JSApiStreamTemplateCreateResponse
resp, err = nc.Request(fmt.Sprintf(JSApiTemplateCreateT, template.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err = json.Unmarshal(resp.Data, &stResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if stResp.Error == nil {
t.Fatalf("Expected an unsupported error, got none")
}
if !strings.Contains(stResp.Error.Description, "not currently supported in clustered mode") {
t.Fatalf("Did not get correct error response: %+v", stResp.Error)
}
}
func TestJetStreamClusterExtendedAccountInfo(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
sendBatch := func(subject string, n int) {
t.Helper()
for i := 0; i < n; i++ {
if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
}
// Add in some streams with msgs and consumers.
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-1", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-1"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-1", 25)
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-2", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-2"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-2", 50)
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-3", Replicas: 3, Storage: nats.MemoryStorage}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-3"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-3", 100)
// Go client will lag so use direct for now.
getAccountInfo := func() *JetStreamAccountStats {
resp, err := nc.Request(JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
return info.JetStreamAccountStats
}
// Wait to accumulate.
time.Sleep(250 * time.Millisecond)
ai := getAccountInfo()
if ai.Streams != 3 || ai.Consumers != 3 {
t.Fatalf("AccountInfo not correct: %+v", ai)
}
if ai.API.Total < 8 {
t.Fatalf("Expected at least 8 total API calls, got %d", ai.API.Total)
}
// Now do a failure to make sure we track API errors.
js.StreamInfo("NO-STREAM")
js.ConsumerInfo("TEST-1", "NO-CONSUMER")
js.ConsumerInfo("TEST-2", "NO-CONSUMER")
js.ConsumerInfo("TEST-3", "NO-CONSUMER")
ai = getAccountInfo()
if ai.API.Errors != 4 {
t.Fatalf("Expected 4 API calls to be errors, got %d", ai.API.Errors)
}
}
func TestJetStreamClusterPeerRemovalAPI(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R5S", 5)
defer c.shutdown()
// Client based API
ml := c.leader()
nc, err := nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
if err != nil {
t.Fatalf("Failed to create system client: %v", err)
}
defer nc.Close()
// Expect error if unknown peer
req := &JSApiMetaServerRemoveRequest{Server: "S-9"}
jsreq, err := json.Marshal(req)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rmsg, err := nc.Request(JSApiRemoveServer, jsreq, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var resp JSApiMetaServerRemoveResponse
if err := json.Unmarshal(rmsg.Data, &resp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if resp.Error == nil {
t.Fatalf("Expected an error, got none")
}
sub, err := nc.SubscribeSync(JSAdvisoryServerRemoved)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
req = &JSApiMetaServerRemoveRequest{Server: c.serverByName("S-2").ID()}
jsreq, err = json.Marshal(req)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rmsg, err = nc.Request(JSApiRemoveServer, jsreq, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp.Error = nil
if err := json.Unmarshal(rmsg.Data, &resp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if resp.Error != nil {
t.Fatalf("Unexpected error: %+v", resp.Error)
}
c.waitOnLeader()
ml = c.leader()
checkSubsPending(t, sub, 1)
madv, _ := sub.NextMsg(0)
var adv JSServerRemovedAdvisory
if err := json.Unmarshal(madv.Data, &adv); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if adv.Server != "S-2" {
t.Fatalf("Expected advisory about S-2 being removed, got %+v", adv)
}
for _, s := range ml.JetStreamClusterPeers() {
if s == "S-2" {
t.Fatalf("Still in the peer list")
}
}
}
func TestJetStreamClusterNoQuorumStepdown(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
// Setup subscription for leader elected.
lesub, err := nc.SubscribeSync(JSAdvisoryStreamLeaderElectedPre + ".*")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.AddStream(&nats.StreamConfig{Name: "NO-Q", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure we received our leader elected advisory.
leadv, _ := lesub.NextMsg(0)
if leadv == nil {
t.Fatalf("Expected to receive a leader elected advisory")
}
var le JSStreamLeaderElectedAdvisory
if err := json.Unmarshal(leadv.Data, &le); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ln := c.streamLeader("$G", "NO-Q").Name(); le.Leader != ln {
t.Fatalf("Expected to have leader %q in elect advisory, got %q", ln, le.Leader)
}
payload := []byte("Hello JSC")
for i := 0; i < 10; i++ {
if _, err := js.Publish("NO-Q", payload); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// Setup subscription for leader elected.
clesub, err := nc.SubscribeSync(JSAdvisoryConsumerLeaderElectedPre + ".*.*")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make durable to have R match Stream.
sub, err := js.SubscribeSync("NO-Q", nats.Durable("rr"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ci, err := sub.ConsumerInfo()
if err != nil || ci == nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure we received our consumer leader elected advisory.
leadv, _ = clesub.NextMsg(0)
if leadv == nil {
t.Fatalf("Expected to receive a consumer leader elected advisory")
}
// Shutdown the non-leader.
c.randomNonStreamLeader("$G", "NO-Q").Shutdown()
// This should eventually have us stepdown as leader since we would have lost quorum with R=2.
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
if sl := c.streamLeader("$G", "NO-Q"); sl == nil {
return nil
}
return fmt.Errorf("Still have leader for stream")
})
notAvailableErr := func(err error) bool {
return err != nil && strings.Contains(err.Error(), "unavailable")
}
// Expect to get errors here.
if _, err := js.StreamInfo("NO-Q"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
if cl := c.consumerLeader("$G", "NO-Q", ci.Name); cl == nil {
return nil
}
return fmt.Errorf("Still have leader for consumer")
})
if _, err = js.ConsumerInfo("NO-Q", ci.Name); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if _, err := sub.ConsumerInfo(); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
// Now let's take out the other non meta-leader
// We should get same error for general API calls.
c.randomNonLeader().Shutdown()
c.expectNoLeader()
// Now make sure the general JS API responds with system unavailable.
if _, err = js.AccountInfo(); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if _, err := js.AddStream(&nats.StreamConfig{Name: "NO-Q33", Replicas: 2}); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if _, err := js.UpdateStream(&nats.StreamConfig{Name: "NO-Q33", Replicas: 2}); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if err := js.DeleteStream("NO-Q"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if _, err := js.StreamInfo("NO-Q"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if err := js.PurgeStream("NO-Q"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if err := js.DeleteMsg("NO-Q", 1); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
// Consumer
if _, err := js.AddConsumer("NO-Q", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if err := js.DeleteConsumer("NO-Q", "dlc"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
if _, err := js.ConsumerInfo("NO-Q", "dlc"); !notAvailableErr(err) {
t.Fatalf("Expected an 'unavailable' error, got %v", err)
}
// Listers
if sl := js.NewStreamLister(); sl.Next() || !notAvailableErr(sl.Err()) {
t.Fatalf("Expected an 'unavailable' error, got %v", sl.Err())
}
if cl := js.NewConsumerLister("NO-Q"); cl.Next() || !notAvailableErr(cl.Err()) {
t.Fatalf("Expected an 'unavailable' error, got %v", cl.Err())
}
}
func TestJetStreamClusterCreateResponseAdvisoriesHaveSubject(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
sub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.API")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer sub.Unsubscribe()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST", nats.Durable("DLC")); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := js.DeleteStream("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 6)
for m, err := sub.NextMsg(0); err == nil; m, err = sub.NextMsg(0) {
var audit JSAPIAudit
if err := json.Unmarshal(m.Data, &audit); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if audit.Subject == "" {
t.Fatalf("Expected subject, got nothing")
}
}
}
func TestJetStreamClusterRestartAndRemoveAdvisories(t *testing.T) {
// FIXME(dlc) - Flaky on Travis, skip for now.
skip(t)
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
sub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.API")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer sub.Unsubscribe()
csub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.CREATED.>")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer csub.Unsubscribe()
nc.Flush()
sendBatch := func(subject string, n int) {
t.Helper()
for i := 0; i < n; i++ {
if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
}
// Add in some streams with msgs and consumers.
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-1", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-1", nats.Durable("DC")); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-1", 25)
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-2", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-2", nats.Durable("DC")); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-2", 50)
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-3", Replicas: 3, Storage: nats.MemoryStorage}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.SubscribeSync("TEST-3", nats.Durable("DC")); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch("TEST-3", 100)
drainSub := func(sub *nats.Subscription) {
for _, err := sub.NextMsg(0); err == nil; _, err = sub.NextMsg(0) {
}
}
// Wait for the advisories for all streams and consumers.
checkSubsPending(t, sub, 12) // 3 streams, 3*2 consumers, 3 stream names lookups for creating consumers.
drainSub(sub)
// Created audit events.
checkSubsPending(t, csub, 6)
drainSub(csub)
usub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.UPDATED.>")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer usub.Unsubscribe()
nc.Flush()
checkSubsPending(t, csub, 0)
checkSubsPending(t, sub, 0)
checkSubsPending(t, usub, 0)
// Now restart the other two servers we are not connected to.
for _, cs := range c.servers {
if cs != s {
cs.Shutdown()
c.restartServer(cs)
}
}
c.waitOnAllCurrent()
checkSubsPending(t, csub, 0)
checkSubsPending(t, sub, 0)
checkSubsPending(t, usub, 0)
dsub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.DELETED.>")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer dsub.Unsubscribe()
nc.Flush()
c.waitOnConsumerLeader("$G", "TEST-1", "DC")
c.waitOnLeader()
// Now check delete advisories as well.
if err := js.DeleteConsumer("TEST-1", "DC"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, csub, 0)
checkSubsPending(t, dsub, 1)
checkSubsPending(t, sub, 1)
checkSubsPending(t, usub, 0)
drainSub(dsub)
if err := js.DeleteStream("TEST-3"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, dsub, 2) // Stream and the consumer underneath.
checkSubsPending(t, sub, 2)
}
func TestJetStreamClusterNoDuplicateOnNodeRestart(t *testing.T) {
c := createJetStreamClusterExplicit(t, "ND", 2)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
js.Publish("foo", []byte("msg1"))
if m, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
m.Ack()
}
sl := c.streamLeader("$G", "TEST")
sl.Shutdown()
c.restartServer(sl)
c.waitOnStreamLeader("$G", "TEST")
// Send second msg
js.Publish("foo", []byte("msg2"))
msg, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
if string(msg.Data) != "msg2" {
t.Fatalf("Unexpected message: %s", msg.Data)
}
msg.Ack()
// Make sure we don't get a duplicate.
msg, err = sub.NextMsg(250 * time.Millisecond)
if err == nil {
t.Fatalf("Should have gotten an error, got %s", msg.Data)
}
}
func TestJetStreamClusterNoDupePeerSelection(t *testing.T) {
c := createJetStreamClusterExplicit(t, "NDP", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
// Create 10 streams. Make sure none of them have a replica
// that is the same as the leader.
for i := 1; i <= 10; i++ {
si, err := js.AddStream(&nats.StreamConfig{
Name: fmt.Sprintf("TEST-%d", i),
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.Cluster == nil || si.Cluster.Leader == "" || len(si.Cluster.Replicas) != 2 {
t.Fatalf("Unexpected cluster state for stream info: %+v\n", si.Cluster)
}
// Make sure that the replicas are not same as the leader.
for _, pi := range si.Cluster.Replicas {
if pi.Name == si.Cluster.Leader {
t.Fatalf("Found replica that is same as leader, meaning 2 nodes placed on same server")
}
}
// Now do a consumer and check same thing.
sub, err := js.SubscribeSync(si.Config.Name)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error getting consumer info: %v", err)
}
for _, pi := range ci.Cluster.Replicas {
if pi.Name == ci.Cluster.Leader {
t.Fatalf("Found replica that is same as leader, meaning 2 nodes placed on same server")
}
}
}
}
func TestJetStreamClusterRemovePeer(t *testing.T) {
c := createJetStreamClusterExplicit(t, "RNS", 5)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sub, err := js.SubscribeSync("TEST", nats.Durable("cat"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
// Grab stream info.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
peers := []string{si.Cluster.Leader}
for _, p := range si.Cluster.Replicas {
peers = append(peers, p.Name)
}
rand.Shuffle(len(peers), func(i, j int) { peers[i], peers[j] = peers[j], peers[i] })
toRemove := peers[0]
// First test bad peer.
req := &JSApiStreamRemovePeerRequest{Peer: "NOT VALID"}
jsreq, err := json.Marshal(req)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Need to call this by hand for now.
resp, err := nc.Request(fmt.Sprintf(JSApiStreamRemovePeerT, "TEST"), jsreq, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var rpResp JSApiStreamRemovePeerResponse
if err := json.Unmarshal(resp.Data, &rpResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if rpResp.Error == nil || !strings.Contains(rpResp.Error.Description, "peer not a member") {
t.Fatalf("Expected error for bad peer, got %+v", rpResp.Error)
}
rpResp.Error = nil
req = &JSApiStreamRemovePeerRequest{Peer: toRemove}
jsreq, err = json.Marshal(req)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc.Request(fmt.Sprintf(JSApiStreamRemovePeerT, "TEST"), jsreq, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := json.Unmarshal(resp.Data, &rpResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if rpResp.Error != nil {
t.Fatalf("Unexpected error: %+v", rpResp.Error)
}
// Grab shorter timeout jetstream context.
js, err = nc.JetStream(nats.MaxWait(100 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
si, err := js.StreamInfo("TEST")
if err != nil {
return fmt.Errorf("Could not fetch stream info: %v", err)
}
if len(si.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas))
}
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
if si.Cluster.Leader == toRemove {
return fmt.Errorf("Peer not removed yet: %+v", toRemove)
}
for _, p := range si.Cluster.Replicas {
if p.Name == toRemove {
return fmt.Errorf("Peer not removed yet: %+v", toRemove)
}
}
return nil
})
// Now check consumer info as well.
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
ci, err := js.ConsumerInfo("TEST", "cat")
if err != nil {
return fmt.Errorf("Could not fetch consumer info: %v", err)
}
if len(ci.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas))
}
for _, peer := range ci.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
if ci.Cluster.Leader == toRemove {
return fmt.Errorf("Peer not removed yet: %+v", toRemove)
}
for _, p := range ci.Cluster.Replicas {
if p.Name == toRemove {
return fmt.Errorf("Peer not removed yet: %+v", toRemove)
}
}
return nil
})
}
func TestJetStreamClusterStreamLeaderStepDown(t *testing.T) {
c := createJetStreamClusterExplicit(t, "RNS", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sub, err := js.SubscribeSync("TEST", nats.Durable("cat"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer sub.Unsubscribe()
oldLeader := c.streamLeader("$G", "TEST").Name()
// Need to call this by hand for now.
resp, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var sdResp JSApiStreamLeaderStepDownResponse
if err := json.Unmarshal(resp.Data, &sdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if sdResp.Error != nil {
t.Fatalf("Unexpected error: %+v", sdResp.Error)
}
// Grab shorter timeout jetstream context.
js, err = nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkFor(t, 2*time.Second, 50*time.Millisecond, func() error {
si, err := js.StreamInfo("TEST")
if err != nil {
return fmt.Errorf("Could not fetch stream info: %v", err)
}
if si.Cluster.Leader == oldLeader {
return fmt.Errorf("Still have old leader")
}
if len(si.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas))
}
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
// Now do consumer.
oldLeader = c.consumerLeader("$G", "TEST", "cat").Name()
// Need to call this by hand for now.
resp, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "cat"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var cdResp JSApiConsumerLeaderStepDownResponse
if err := json.Unmarshal(resp.Data, &cdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if cdResp.Error != nil {
t.Fatalf("Unexpected error: %+v", sdResp.Error)
}
checkFor(t, 2*time.Second, 50*time.Millisecond, func() error {
ci, err := js.ConsumerInfo("TEST", "cat")
if err != nil {
return fmt.Errorf("Could not fetch consumer info: %v", err)
}
if ci.Cluster.Leader == oldLeader {
return fmt.Errorf("Still have old leader")
}
if len(ci.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas))
}
for _, peer := range ci.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
}
func TestJetStreamClusterRemoveServer(t *testing.T) {
c := createJetStreamClusterExplicit(t, "RNS", 5)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
sub, err := js.SubscribeSync("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
cname := ci.Name
sl := c.streamLeader("$G", "TEST")
c.removeJetStream(sl)
c.waitOnStreamLeader("$G", "TEST")
// Faster timeout since we loop below checking for condition.
js, err = nc.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Check the stream info is eventually correct.
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
si, err := js.StreamInfo("TEST")
if err != nil {
return fmt.Errorf("Could not fetch stream info: %v", err)
}
if len(si.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas))
}
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
// Now do consumer.
c.waitOnConsumerLeader("$G", "TEST", cname)
checkFor(t, 5*time.Second, 50*time.Millisecond, func() error {
ci, err := js.ConsumerInfo("TEST", cname)
if err != nil {
return fmt.Errorf("Could not fetch consumer info: %v", err)
}
if len(ci.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas))
}
for _, peer := range ci.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
}
return nil
})
}
func TestJetStreamClusterPurgeReplayAfterRestart(t *testing.T) {
c := createJetStreamClusterExplicit(t, "P3F", 3)
defer c.shutdown()
// Client based API
s := c.randomNonLeader()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendBatch := func(n int) {
t.Helper()
// Send a batch to a given subject.
for i := 0; i < n; i++ {
if _, err := js.Publish("TEST", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
}
sendBatch(10)
if err := js.PurgeStream("TEST"); err != nil {
t.Fatalf("Unexpected purge error: %v", err)
}
sendBatch(10)
c.stopAll()
c.restartAll()
c.waitOnStreamLeader("$G", "TEST")
s = c.randomServer()
nc, js = jsClientConnect(t, s)
defer nc.Close()
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si.State.Msgs != 10 {
t.Fatalf("Expected 10 msgs after restart, got %d", si.State.Msgs)
}
}
func TestJetStreamClusterStreamGetMsg(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R3F", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := js.Publish("TEST", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
mreq := &JSApiMsgGetRequest{Seq: 1}
req, err := json.Marshal(mreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, "TEST"), req, time.Second)
if err != nil {
t.Fatalf("Could not retrieve stream message: %v", err)
}
if err != nil {
t.Fatalf("Could not retrieve stream message: %v", err)
}
var resp JSApiMsgGetResponse
err = json.Unmarshal(rmsg.Data, &resp)
if err != nil {
t.Fatalf("Could not parse stream message: %v", err)
}
if resp.Message == nil || resp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", resp.Error)
}
}
func TestJetStreamClusterMetaPlacement(t *testing.T) {
sc := createJetStreamSuperCluster(t, 3, 3)
defer sc.shutdown()
// We want to influence where the meta leader will place itself when we ask the
// current leader to stepdown.
ml := sc.leader()
cn := ml.ClusterName()
var pcn string
for _, c := range sc.clusters {
if c.name != cn {
pcn = c.name
break
}
}
// Client based API
s := sc.randomCluster().randomServer()
nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
if err != nil {
t.Fatalf("Failed to create system client: %v", err)
}
defer nc.Close()
stepdown := func(cn string) *JSApiLeaderStepDownResponse {
req := &JSApiLeaderStepdownRequest{Placement: &Placement{Cluster: cn}}
jreq, err := json.Marshal(req)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err := nc.Request(JSApiLeaderStepDown, jreq, time.Second)
if err != nil {
t.Fatalf("Error on stepdown request: %v", err)
}
var sdr JSApiLeaderStepDownResponse
if err := json.Unmarshal(resp.Data, &sdr); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
return &sdr
}
// Make sure we get correct errors for tags and bad or unavailable cluster placement.
sdr := stepdown("C22")
if sdr.Error == nil || !strings.Contains(sdr.Error.Description, "no suitable peers") {
t.Fatalf("Got incorrect error result: %+v", sdr.Error)
}
// Should work.
sdr = stepdown(pcn)
if sdr.Error != nil {
t.Fatalf("Got an error on stepdown: %+v", sdr.Error)
}
sc.waitOnLeader()
ml = sc.leader()
cn = ml.ClusterName()
if cn != pcn {
t.Fatalf("Expected new metaleader to be in cluster %q, got %q", pcn, cn)
}
}
func TestJetStreamClusterSuperClusterBasics(t *testing.T) {
sc := createJetStreamSuperCluster(t, 3, 3)
defer sc.shutdown()
// Client based API
s := sc.randomCluster().randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Check request origin placement.
if si.Cluster.Name != s.ClusterName() {
t.Fatalf("Expected stream to be placed in %q, but got %q", s.ClusterName(), si.Cluster.Name)
}
// Check consumers.
sub, err := js.SubscribeSync("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, toSend)
ci, err := sub.ConsumerInfo()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ci.Delivered.Consumer != uint64(toSend) || ci.NumAckPending != toSend {
t.Fatalf("ConsumerInfo is not correct: %+v", ci)
}
// Now check we can place a stream.
// Need to do this by hand for now until Go client catches up.
pcn := "C3"
cfg := StreamConfig{
Name: "TEST2",
Storage: FileStorage,
Placement: &Placement{Cluster: pcn},
}
req, err := json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
var scResp JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
if scResp.StreamInfo.Cluster.Name != pcn {
t.Fatalf("Expected the stream to be placed in %q, got %q", pcn, scResp.StreamInfo.Cluster.Name)
}
}
// Test that consumer interest across gateways and superclusters is properly identitifed in a remote cluster.
func TestJetStreamClusterSuperClusterCrossClusterConsumerInterest(t *testing.T) {
sc := createJetStreamSuperCluster(t, 3, 3)
defer sc.shutdown()
// Client based API - Connect to Cluster C1. Stream and consumer will live in C2.
s := sc.clusterForName("C1").randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
pcn := "C2"
_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3, Placement: &nats.Placement{Cluster: pcn}})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Pull based first.
sub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.Pull(1))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send a message.
if _, err = js.Publish("foo", []byte("CCI")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
checkSubsPending(t, sub, 1)
// Now check push based delivery.
sub, err = js.SubscribeSync("foo", nats.Durable("rip"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 1)
// Send another message.
if _, err = js.Publish("foo", []byte("CCI")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
checkSubsPending(t, sub, 2)
}
func TestJetStreamNextReqFromMsg(t *testing.T) {
bef := time.Now()
expires, _, _, err := nextReqFromMsg([]byte(`{"expires":5000000000}`)) // nanoseconds
require_NoError(t, err)
now := time.Now()
if expires.Before(bef.Add(5*time.Second)) || expires.After(now.Add(5*time.Second)) {
t.Fatal("Expires out of expected range")
}
}
func TestJetStreamClusterSuperClusterPeerReassign(t *testing.T) {
sc := createJetStreamSuperCluster(t, 3, 3)
defer sc.shutdown()
// Client based API
s := sc.randomCluster().randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
pcn := "C2"
cfg := StreamConfig{
Name: "TEST",
Replicas: 3,
Storage: FileStorage,
Placement: &Placement{Cluster: pcn},
}
req, err := json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var scResp JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
// Send in 10 messages.
msg, toSend := []byte("Hello JS Clustering"), 10
for i := 0; i < toSend; i++ {
if _, err = js.Publish("TEST", msg); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
// Now grab info for this stream.
si, err := js.StreamInfo("TEST")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if si == nil || si.Config.Name != "TEST" {
t.Fatalf("StreamInfo is not correct %+v", si)
}
// Check active state as well, shows that the owner answered.
if si.State.Msgs != uint64(toSend) {
t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State)
}
// Check request origin placement.
if si.Cluster.Name != pcn {
t.Fatalf("Expected stream to be placed in %q, but got %q", s.ClusterName(), si.Cluster.Name)
}
// Now remove a peer that is assigned to the stream.
rc := sc.clusterForName(pcn)
rs := rc.randomNonStreamLeader("$G", "TEST")
rc.removeJetStream(rs)
// Check the stream info is eventually correct.
checkFor(t, 2*time.Second, 50*time.Millisecond, func() error {
si, err := js.StreamInfo("TEST")
if err != nil {
return fmt.Errorf("Could not fetch stream info: %v", err)
}
if len(si.Cluster.Replicas) != 2 {
return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas))
}
for _, peer := range si.Cluster.Replicas {
if !peer.Current {
return fmt.Errorf("Expected replica to be current: %+v", peer)
}
if !strings.HasPrefix(peer.Name, pcn) {
t.Fatalf("Stream peer reassigned to wrong cluster: %q", peer.Name)
}
}
return nil
})
}
func TestJetStreamClusterStreamPerf(t *testing.T) {
// Comment out to run, holding place for now.
skip(t)
c := createJetStreamClusterExplicit(t, "R3S", 3)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo"},
Replicas: 3,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
numConnections := 4
var conns []nats.JetStream
for i := 0; i < numConnections; i++ {
s := c.randomServer()
_, js := jsClientConnect(t, s)
conns = append(conns, js)
}
toSend := 100000
numProducers := 8
payload := []byte("Hello JSC")
startCh := make(chan bool)
var wg sync.WaitGroup
for n := 0; n < numProducers; n++ {
wg.Add(1)
go func() {
defer wg.Done()
js := conns[rand.Intn(numConnections)]
<-startCh
for i := 0; i < int(toSend)/numProducers; i++ {
if _, err = js.Publish("foo", payload); err != nil {
t.Errorf("Unexpected publish error: %v", err)
}
}
}()
}
// Wait for Go routines.
time.Sleep(250 * time.Millisecond)
start := time.Now()
close(startCh)
wg.Wait()
tt := time.Since(start)
fmt.Printf("Took %v to send %d msgs with %d producers and R=3!\n", tt, toSend, numProducers)
fmt.Printf("%.0f msgs/sec\n\n", float64(toSend)/tt.Seconds())
}
// This test creates a queue consumer for the delivery subject,
// and make sure it connects to the server that is not the leader
// of the stream. A bug was not stripping the $JS.ACK reply subject
// correctly, which means that ack sent on the reply subject was
// droped by the routed
func TestJetStreamClusterQueueSubConsumer(t *testing.T) {
c := createJetStreamClusterExplicit(t, "R2S", 2)
defer c.shutdown()
// Client based API
s := c.randomServer()
nc, js := jsClientConnect(t, s)
defer nc.Close()
_, err := js.AddStream(&nats.StreamConfig{
Name: "TEST",
Subjects: []string{"foo.>"},
Replicas: 1,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
inbox := nats.NewInbox()
ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{
Durable: "ivan",
DeliverSubject: inbox,
AckPolicy: nats.AckExplicitPolicy,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now create a client that does NOT connect to the stream leader.
// Start with url from first server in the cluster.
u := c.servers[0].ClientURL()
// If leader is "S-1", then use S-2 to connect to, which is at servers[1].
if ci.Cluster.Leader == "S-1" {
u = c.servers[1].ClientURL()
}
qsubnc, err := nats.Connect(u)
if err != nil {
t.Fatalf("Error connecting: %v", err)
}
defer qsubnc.Close()
ch := make(chan struct{}, 2)
if _, err := qsubnc.QueueSubscribe(inbox, "queue", func(m *nats.Msg) {
m.Respond(nil)
ch <- struct{}{}
}); err != nil {
t.Fatalf("Error creating sub: %v", err)
}
// Use the other connection to publish a message
if _, err := js.Publish("foo.bar", []byte("hello")); err != nil {
t.Fatalf("Error on publish: %v", err)
}
// Wait that we receive the message first.
select {
case <-ch:
case <-time.After(time.Second):
t.Fatal("Did not receive message")
}
// Message should be ack'ed and not redelivered.
select {
case <-ch:
t.Fatal("Message redelivered!!!")
case <-time.After(250 * time.Millisecond):
// OK
}
}
func TestJetStreamClusterLeaderStepdown(t *testing.T) {
c := createJetStreamClusterExplicit(t, "JSC", 3)
defer c.shutdown()
c.waitOnLeader()
cl := c.leader()
// Now ask the system account to have the leader stepdown.
s := c.randomNonLeader()
nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
if err != nil {
t.Fatalf("Failed to create system client: %v", err)
}
defer nc.Close()
resp, err := nc.Request(JSApiLeaderStepDown, nil, time.Second)
if err != nil {
t.Fatalf("Error on stepdown request: %v", err)
}
var sdr JSApiLeaderStepDownResponse
if err := json.Unmarshal(resp.Data, &sdr); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if sdr.Error != nil || !sdr.Success {
t.Fatalf("Unexpected error for leader stepdown: %+v", sdr.Error)
}
c.waitOnLeader()
if cl == c.leader() {
t.Fatalf("Expected a new metaleader, got same")
}
}
func TestJetStreamCrossAccountMirrorsAndSources(t *testing.T) {
c := createJetStreamClusterWithTemplate(t, jsClusterMirrorSourceImportsTempl, "C1", 3)
defer c.shutdown()
// Create source stream under RI account.
s := c.randomServer()
nc, js := jsClientConnect(t, s, nats.UserInfo("rip", "pass"))
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 100
for i := 0; i < toSend; i++ {
if _, err := js.Publish("TEST", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
}
nc2, _ := jsClientConnect(t, s)
defer nc2.Close()
// Have to do this direct until we get Go client support.
// Need to match jsClusterMirrorSourceImportsTempl imports.
cfg := StreamConfig{
Name: "MY_MIRROR_TEST",
Storage: FileStorage,
Mirror: &StreamSource{
Name: "TEST",
External: &ExternalStream{
ApiPrefix: "RI.JS.API",
DeliverPrefix: "RI.DELIVER.SYNC.MIRRORS",
},
},
}
req, err := json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err := nc2.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var scResp JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
js2, err := nc2.JetStream(nats.MaxWait(50 * time.Millisecond))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
si, err := js2.StreamInfo("MY_MIRROR_TEST")
if err != nil {
t.Fatalf("Could not retrieve stream info")
}
if si.State.Msgs != uint64(toSend) {
return fmt.Errorf("Expected %d msgs, got state: %+v", toSend, si.State)
}
return nil
})
// Now do sources as well.
cfg = StreamConfig{
Name: "MY_SOURCE_TEST",
Storage: FileStorage,
Sources: []*StreamSource{
&StreamSource{
Name: "TEST",
External: &ExternalStream{
ApiPrefix: "RI.JS.API",
DeliverPrefix: "RI.DELIVER.SYNC.SOURCES",
},
},
},
}
req, err = json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc2.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
scResp.Error = nil
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
si, err := js2.StreamInfo("MY_SOURCE_TEST")
if err != nil {
t.Fatalf("Could not retrieve stream info")
}
if si.State.Msgs != uint64(toSend) {
return fmt.Errorf("Expected %d msgs, got state: %+v", toSend, si.State)
}
return nil
})
}
func TestJetStreamClusterJSAPIImport(t *testing.T) {
c := createJetStreamClusterWithTemplate(t, jsClusterImportsTempl, "C1", 3)
defer c.shutdown()
// Client based API - This will connect to the non-js account which imports JS.
// Connect below does an AccountInfo call.
s := c.randomNonLeader()
nc, js := jsClientConnect(t, s)
defer nc.Close()
if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Note if this was ephemeral we would need to setup export/import for that subject.
sub, err := js.SubscribeSync("TEST", nats.Durable("dlc"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure we can look up both.
if _, err := js.StreamInfo("TEST"); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if _, err := sub.ConsumerInfo(); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Names list..
resp, err := nc.Request(JSApiStreams, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var streams JSApiStreamNamesResponse
if err = json.Unmarshal(resp.Data, &streams); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(streams.Streams) != 1 {
t.Fatalf("Expected only 1 stream but got %d", len(streams.Streams))
}
// Now send to stream.
if _, err := js.Publish("TEST", []byte("OK")); err != nil {
t.Fatalf("Unexpected publish error: %v", err)
}
sub, err = js.SubscribeSync("TEST", nats.Durable("tr"), nats.Pull(1))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkSubsPending(t, sub, 1)
m, err := sub.NextMsg(0)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if m.Subject != "TEST" {
t.Fatalf("Expected subject of %q, got %q", "TEST", m.Subject)
}
if m.Header != nil {
t.Fatalf("Expected no header on the message, got: %v", m.Header)
}
meta, err := m.MetaData()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if meta.Consumer != 1 || meta.Stream != 1 || meta.Delivered != 1 || meta.Pending != 0 {
t.Fatalf("Bad meta: %+v", meta)
}
}
// Support functions
// Used to setup superclusters for tests.
type supercluster struct {
t *testing.T
clusters []*cluster
}
func (sc *supercluster) shutdown() {
if sc == nil {
return
}
for _, c := range sc.clusters {
shutdownCluster(c)
}
}
var jsClusterTempl = `
listen: 127.0.0.1:-1
server_name: %s
jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: "%s"}
cluster {
name: %s
listen: 127.0.0.1:%d
routes = [%s]
}
# For access to system account.
accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } }
`
var jsSuperClusterTempl = `
%s
gateway {
name: %s
listen: 127.0.0.1:%d
gateways = [%s
]
}
`
var jsGWTempl = `%s{name: %s, urls: [%s]}`
func createJetStreamSuperCluster(t *testing.T, numServersPer, numClusters int) *supercluster {
t.Helper()
if numServersPer < 1 {
t.Fatalf("Number of servers must be >= 1")
}
if numClusters <= 1 {
t.Fatalf("Number of clusters must be > 1")
}
const (
startClusterPort = 33222
startGWPort = 11222
)
// Make the GWs form faster for the tests.
SetGatewaysSolicitDelay(10 * time.Millisecond)
defer ResetGatewaysSolicitDelay()
cp, gp := startClusterPort, startGWPort
var clusters []*cluster
var gws []string
// Build GWs first, will be same for all servers.
for i, port := 1, gp; i <= numClusters; i++ {
cn := fmt.Sprintf("C%d", i)
var urls []string
for n := 0; n < numServersPer; n++ {
urls = append(urls, fmt.Sprintf("nats-route://127.0.0.1:%d", port))
port++
}
gws = append(gws, fmt.Sprintf(jsGWTempl, "\n\t\t\t", cn, strings.Join(urls, ",")))
}
gwconf := strings.Join(gws, "")
for i := 1; i <= numClusters; i++ {
cn := fmt.Sprintf("C%d", i)
// Go ahead and build configurations.
c := &cluster{servers: make([]*Server, 0, numServersPer), opts: make([]*Options, 0, numServersPer), name: cn}
// Build out the routes that will be shared with all configs.
var routes []string
for port := cp; port < cp+numServersPer; port++ {
routes = append(routes, fmt.Sprintf("nats-route://127.0.0.1:%d", port))
}
routeConfig := strings.Join(routes, ",")
for si := 0; si < numServersPer; si++ {
storeDir, _ := ioutil.TempDir("", JetStreamStoreDir)
sn := fmt.Sprintf("%s-S%d", cn, si+1)
bconf := fmt.Sprintf(jsClusterTempl, sn, storeDir, cn, cp+si, routeConfig)
conf := fmt.Sprintf(jsSuperClusterTempl, bconf, cn, gp, gwconf)
gp++
s, o := RunServerWithConfig(createConfFile(t, []byte(conf)))
c.servers = append(c.servers, s)
c.opts = append(c.opts, o)
}
checkClusterFormed(t, c.servers...)
clusters = append(clusters, c)
cp += numServersPer
c.t = t
}
// Wait for the supercluster to be formed.
egws := numClusters - 1
for _, c := range clusters {
for _, s := range c.servers {
waitForOutboundGateways(t, s, egws, 2*time.Second)
}
}
sc := &supercluster{t, clusters}
sc.waitOnLeader()
sc.waitOnAllCurrent()
// Wait for all the peer nodes to be registered.
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
var peers []string
if ml := sc.leader(); ml != nil {
peers = ml.ActivePeers()
if len(peers) == numClusters*numServersPer {
return nil
}
}
return fmt.Errorf("Not correct number of peers, expected %d, got %d", numClusters*numServersPer, len(peers))
})
if sc.leader() == nil {
sc.t.Fatalf("Expected a cluster leader, got none")
}
return sc
}
func (sc *supercluster) leader() *Server {
for _, c := range sc.clusters {
if leader := c.leader(); leader != nil {
return leader
}
}
return nil
}
func (sc *supercluster) waitOnLeader() {
expires := time.Now().Add(5 * time.Second)
for time.Now().Before(expires) {
for _, c := range sc.clusters {
if leader := c.leader(); leader != nil {
time.Sleep(200 * time.Millisecond)
return
}
}
time.Sleep(25 * time.Millisecond)
}
sc.t.Fatalf("Expected a cluster leader, got none")
}
func (sc *supercluster) waitOnAllCurrent() {
for _, c := range sc.clusters {
c.waitOnAllCurrent()
}
}
func (sc *supercluster) clusterForName(name string) *cluster {
for _, c := range sc.clusters {
if c.name == name {
return c
}
}
return nil
}
func (sc *supercluster) randomCluster() *cluster {
clusters := append(sc.clusters[:0:0], sc.clusters...)
rand.Shuffle(len(clusters), func(i, j int) { clusters[i], clusters[j] = clusters[j], clusters[i] })
return clusters[0]
}
var jsClusterMirrorSourceImportsTempl = `
listen: 127.0.0.1:-1
server_name: %s
jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: "%s"}
cluster {
name: %s
listen: 127.0.0.1:%d
routes = [%s]
}
no_auth_user: dlc
accounts {
JS {
jetstream: enabled
users = [ { user: "rip", pass: "pass" } ]
exports [
{ service: "$JS.API.CONSUMER.>" } # To create internal consumers to mirror/source.
{ stream: "RI.DELIVER.SYNC.>" } # For the mirror/source consumers sending to IA via delivery subject.
]
}
IA {
jetstream: enabled
users = [ { user: "dlc", pass: "pass" } ]
imports [
{ service: { account: JS, subject: "$JS.API.CONSUMER.>"}, to: "RI.JS.API.CONSUMER.>" }
{ stream: { account: JS, subject: "RI.DELIVER.SYNC.>"} }
]
}
$SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }
}
`
var jsClusterImportsTempl = `
listen: 127.0.0.1:-1
server_name: %s
jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: "%s"}
cluster {
name: %s
listen: 127.0.0.1:%d
routes = [%s]
}
no_auth_user: dlc
accounts {
JS {
jetstream: enabled
users = [ { user: "rip", pass: "pass" } ]
exports [
{ service: "$JS.API.>" }
{ service: "TEST" } # For publishing to the stream.
]
}
IA {
users = [ { user: "dlc", pass: "pass" } ]
imports [
{ service: { subject: "$JS.API.>", account: JS }}
{ service: { subject: "TEST", account: JS }}
]
}
$SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }
}
`
// This will create a cluster that is explicitly configured for the routes, etc.
// and also has a defined clustername. All configs for routes and cluster name will be the same.
func createJetStreamClusterExplicit(t *testing.T, clusterName string, numServers int) *cluster {
return createJetStreamClusterWithTemplate(t, jsClusterTempl, clusterName, numServers)
}
func createJetStreamClusterWithTemplate(t *testing.T, tmpl string, clusterName string, numServers int) *cluster {
t.Helper()
if clusterName == "" || numServers < 1 {
t.Fatalf("Bad params")
}
const startClusterPort = 22332
// Build out the routes that will be shared with all configs.
var routes []string
for cp := startClusterPort; cp < startClusterPort+numServers; cp++ {
routes = append(routes, fmt.Sprintf("nats-route://127.0.0.1:%d", cp))
}
routeConfig := strings.Join(routes, ",")
// Go ahead and build configurations and start servers.
c := &cluster{servers: make([]*Server, 0, numServers), opts: make([]*Options, 0, numServers), name: clusterName}
for cp := startClusterPort; cp < startClusterPort+numServers; cp++ {
storeDir, _ := ioutil.TempDir("", JetStreamStoreDir)
sn := fmt.Sprintf("S-%d", cp-startClusterPort+1)
conf := fmt.Sprintf(tmpl, sn, storeDir, clusterName, cp, routeConfig)
s, o := RunServerWithConfig(createConfFile(t, []byte(conf)))
c.servers = append(c.servers, s)
c.opts = append(c.opts, o)
}
c.t = t
// Wait til we are formed and have a leader.
c.checkClusterFormed()
c.waitOnClusterReady()
return c
}
func (c *cluster) addInNewServer() *Server {
c.t.Helper()
sn := fmt.Sprintf("S-%d", len(c.servers)+1)
storeDir, _ := ioutil.TempDir("", JetStreamStoreDir)
seedRoute := fmt.Sprintf("nats-route://127.0.0.1:%d", c.opts[0].Cluster.Port)
conf := fmt.Sprintf(jsClusterTempl, sn, storeDir, c.name, -1, seedRoute)
s, o := RunServerWithConfig(createConfFile(c.t, []byte(conf)))
c.servers = append(c.servers, s)
c.opts = append(c.opts, o)
c.checkClusterFormed()
return s
}
// Adjust limits for the given account.
func (c *cluster) updateLimits(account string, newLimits *JetStreamAccountLimits) {
c.t.Helper()
for _, s := range c.servers {
acc, err := s.LookupAccount(account)
if err != nil {
c.t.Fatalf("Unexpected error: %v", err)
}
if err := acc.UpdateJetStreamLimits(newLimits); err != nil {
c.t.Fatalf("Unexpected error: %v", err)
}
}
}
// Hack for staticcheck
var skip = func(t *testing.T) {
t.SkipNow()
}
func jsClientConnect(t *testing.T, s *Server, opts ...nats.Option) (*nats.Conn, nats.JetStreamContext) {
t.Helper()
nc, err := nats.Connect(s.ClientURL(), opts...)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
js, err := nc.JetStream(nats.MaxWait(5 * time.Second))
if err != nil {
t.Fatalf("Unexpected error getting JetStream context: %v", err)
}
return nc, js
}
func checkSubsPending(t *testing.T, sub *nats.Subscription, numExpected int) {
t.Helper()
checkFor(t, 4*time.Second, 20*time.Millisecond, func() error {
if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
func (c *cluster) restartServer(rs *Server) *Server {
c.t.Helper()
index := -1
var opts *Options
for i, s := range c.servers {
if s == rs {
index = i
break
}
}
if index < 0 {
c.t.Fatalf("Could not find server %v to restart", rs)
}
opts = c.opts[index]
s, o := RunServerWithConfig(opts.ConfigFile)
c.servers[index] = s
c.opts[index] = o
return s
}
func (c *cluster) checkClusterFormed() {
c.t.Helper()
checkClusterFormed(c.t, c.servers...)
}
func (c *cluster) waitOnPeerCount(n int) {
c.t.Helper()
c.waitOnLeader()
leader := c.leader()
expires := time.Now().Add(10 * time.Second)
for time.Now().Before(expires) {
peers := leader.JetStreamClusterPeers()
if len(peers) == n {
return
}
time.Sleep(100 * time.Millisecond)
}
c.t.Fatalf("Expected a cluster peer count of %d, got %d", n, len(leader.JetStreamClusterPeers()))
}
func (c *cluster) waitOnConsumerLeader(account, stream, consumer string) {
c.t.Helper()
expires := time.Now().Add(10 * time.Second)
for time.Now().Before(expires) {
if leader := c.consumerLeader(account, stream, consumer); leader != nil {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(100 * time.Millisecond)
}
c.t.Fatalf("Expected a consumer leader for %q %q %q, got none", account, stream, consumer)
}
func (c *cluster) consumerLeader(account, stream, consumer string) *Server {
c.t.Helper()
for _, s := range c.servers {
if s.JetStreamIsConsumerLeader(account, stream, consumer) {
return s
}
}
return nil
}
func (c *cluster) waitOnStreamLeader(account, stream string) {
c.t.Helper()
expires := time.Now().Add(10 * time.Second)
for time.Now().Before(expires) {
if leader := c.streamLeader(account, stream); leader != nil {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(100 * time.Millisecond)
}
c.t.Fatalf("Expected a stream leader for %q %q, got none", account, stream)
}
func (c *cluster) randomNonStreamLeader(account, stream string) *Server {
c.t.Helper()
for _, s := range c.servers {
if s.JetStreamIsStreamAssigned(account, stream) && !s.JetStreamIsStreamLeader(account, stream) {
return s
}
}
return nil
}
func (c *cluster) streamLeader(account, stream string) *Server {
c.t.Helper()
for _, s := range c.servers {
if s.JetStreamIsStreamLeader(account, stream) {
return s
}
}
return nil
}
func (c *cluster) waitOnStreamCurrent(s *Server, account, stream string) {
c.t.Helper()
expires := time.Now().Add(10 * time.Second)
for time.Now().Before(expires) {
if s.JetStreamIsStreamCurrent(account, stream) {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(100 * time.Millisecond)
}
c.t.Fatalf("Expected server %q to eventually be current for stream %q", s, stream)
}
func (c *cluster) waitOnServerCurrent(s *Server) {
c.t.Helper()
expires := time.Now().Add(5 * time.Second)
for time.Now().Before(expires) {
if s.JetStreamIsCurrent() {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(100 * time.Millisecond)
}
c.t.Fatalf("Expected server %q to eventually be current", s)
}
func (c *cluster) waitOnAllCurrent() {
for _, cs := range c.servers {
c.waitOnServerCurrent(cs)
}
}
func (c *cluster) serverByName(sname string) *Server {
for _, s := range c.servers {
if s.Name() == sname {
return s
}
}
return nil
}
func (c *cluster) randomNonLeader() *Server {
// range should randomize.. but..
for _, s := range c.servers {
if s.Running() && !s.JetStreamIsLeader() {
return s
}
}
return nil
}
func (c *cluster) leader() *Server {
for _, s := range c.servers {
if s.JetStreamIsLeader() {
return s
}
}
return nil
}
func (c *cluster) expectNoLeader() {
c.t.Helper()
expires := time.Now().Add(maxElectionTimeout)
for time.Now().Before(expires) {
if c.leader() == nil {
return
}
time.Sleep(10 * time.Millisecond)
}
c.t.Fatalf("Expected no leader but have one")
}
func (c *cluster) waitOnLeader() {
c.t.Helper()
expires := time.Now().Add(5 * time.Second)
for time.Now().Before(expires) {
if leader := c.leader(); leader != nil {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(25 * time.Millisecond)
}
c.t.Fatalf("Expected a cluster leader, got none")
}
// Helper function to check that a cluster is formed
func (c *cluster) waitOnClusterReady() {
c.t.Helper()
var leader *Server
expires := time.Now().Add(10 * time.Second)
for time.Now().Before(expires) {
if leader = c.leader(); leader != nil {
break
}
time.Sleep(50 * time.Millisecond)
}
// Now make sure we have all peers.
for leader != nil && time.Now().Before(expires) {
if len(leader.JetStreamClusterPeers()) == len(c.servers) {
time.Sleep(100 * time.Millisecond)
return
}
time.Sleep(50 * time.Millisecond)
}
c.shutdown()
c.t.Fatalf("Expected a cluster leader and fully formed cluster")
}
// Helper function to check that a cluster is formed
func (c *cluster) removeJetStream(s *Server) {
c.t.Helper()
index := -1
for i, cs := range c.servers {
if cs == s {
index = i
break
}
}
cf := c.opts[index].ConfigFile
cb, _ := ioutil.ReadFile(cf)
var sb strings.Builder
for _, l := range strings.Split(string(cb), "\n") {
if !strings.HasPrefix(strings.TrimSpace(l), "jetstream") {
sb.WriteString(l + "\n")
}
}
if err := ioutil.WriteFile(cf, []byte(sb.String()), 0644); err != nil {
c.t.Fatalf("Error writing updated config file: %v", err)
}
if err := s.Reload(); err != nil {
c.t.Fatalf("Error on server reload: %v", err)
}
time.Sleep(100 * time.Millisecond)
}
// Helper function to check that a cluster is formed
func (c *cluster) stopAll() {
c.t.Helper()
for _, s := range c.servers {
s.Shutdown()
}
}
func (c *cluster) restartAll() {
c.t.Helper()
for i, s := range c.servers {
if !s.Running() {
opts := c.opts[i]
s, o := RunServerWithConfig(opts.ConfigFile)
c.servers[i] = s
c.opts[i] = o
}
}
c.waitOnClusterReady()
c.waitOnLeader()
}
| 1 | 12,708 | this was flapping | nats-io-nats-server | go |
Subsets and Splits