buggy_function
stringlengths 1
391k
| fixed_function
stringlengths 0
392k
|
---|---|
protected byte readByte () throws DRDAProtocolException
{
ensureBLayerDataInBuffer (1, ADJUST_LENGTHS);
return (byte) (buffer[pos++] & 0xff);
}
| protected byte readByte () throws DRDAProtocolException
{
ensureBLayerDataInBuffer (1, ADJUST_LENGTHS);
return buffer[pos++];
}
|
private boolean readBoolean(int codepoint) throws DRDAProtocolException
{
checkLength(codepoint, 1);
int val = reader.readByte();
if (val == CodePoint.TRUE)
return true;
else if (val == CodePoint.FALSE)
return false;
else
invalidValue(codepoint);
return false; //to shut the compiler up
}
| private boolean readBoolean(int codepoint) throws DRDAProtocolException
{
checkLength(codepoint, 1);
byte val = reader.readByte();
if (val == CodePoint.TRUE)
return true;
else if (val == CodePoint.FALSE)
return false;
else
invalidValue(codepoint);
return false; //to shut the compiler up
}
|
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
addInputOption();
addOutputOption();
addOption("numRecommendations", "n", "Number of recommendations per user",
String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS));
addOption("usersFile", "u", "File of users to recommend for", null);
addOption("itemsFile", "i", "File of items to recommend for", null);
addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from "
+ "the recommendations for that user (optional)", null);
addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString());
addOption("maxPrefsPerUser", "mp",
"Maximum number of preferences considered per user in final recommendation phase",
String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED));
addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation "
+ "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER));
addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ",
String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM));
addOption("maxCooccurrencesPerItem", "mo", "try to cap the number of cooccurrences per item to this "
+ "number (default: " + DEFAULT_MAX_COOCCURRENCES_PER_ITEM + ')',
String.valueOf(DEFAULT_MAX_COOCCURRENCES_PER_ITEM));
addOption("similarityClassname", "s", "Name of distributed similarity class to instantiate, alternatively use "
+ "one of the predefined similarities (" + SimilarityType.listEnumNames() + ')',
String.valueOf(SimilarityType.SIMILARITY_COOCCURRENCE));
Map<String,String> parsedArgs = parseArguments(args);
if (parsedArgs == null) {
return -1;
}
Path inputPath = getInputPath();
Path outputPath = getOutputPath();
int numRecommendations = Integer.parseInt(parsedArgs.get("--numRecommendations"));
String usersFile = parsedArgs.get("--usersFile");
String itemsFile = parsedArgs.get("--itemsFile");
String filterFile = parsedArgs.get("--filterFile");
boolean booleanData = Boolean.valueOf(parsedArgs.get("--booleanData"));
int maxPrefsPerUser = Integer.parseInt(parsedArgs.get("--maxPrefsPerUser"));
int minPrefsPerUser = Integer.parseInt(parsedArgs.get("--minPrefsPerUser"));
int maxSimilaritiesPerItem = Integer.parseInt(parsedArgs.get("--maxSimilaritiesPerItem"));
int maxCooccurrencesPerItem = Integer.parseInt(parsedArgs.get("--maxCooccurrencesPerItem"));
String similarityClassname = parsedArgs.get("--similarityClassname");
Path userVectorPath = getTempPath("userVectors");
Path itemIDIndexPath = getTempPath("itemIDIndex");
Path countUsersPath = getTempPath("countUsers");
Path itemUserMatrixPath = getTempPath("itemUserMatrix");
Path similarityMatrixPath = getTempPath("similarityMatrix");
Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1");
Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2");
Path explicitFilterPath = getTempPath("explicitFilterPath");
Path partialMultiplyPath = getTempPath("partialMultiply");
AtomicInteger currentPhase = new AtomicInteger();
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job itemIDIndex = prepareJob(
inputPath, itemIDIndexPath, TextInputFormat.class,
ItemIDIndexMapper.class, VarIntWritable.class, VarLongWritable.class,
ItemIDIndexReducer.class, VarIntWritable.class, VarLongWritable.class,
SequenceFileOutputFormat.class);
itemIDIndex.setCombinerClass(ItemIDIndexReducer.class);
itemIDIndex.waitForCompletion(true);
}
int numberOfUsers = 0;
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job toUserVector = prepareJob(
inputPath, userVectorPath, TextInputFormat.class,
ToItemPrefsMapper.class, VarLongWritable.class, booleanData ? VarLongWritable.class : EntityPrefWritable.class,
ToUserVectorReducer.class, VarLongWritable.class, VectorWritable.class,
SequenceFileOutputFormat.class);
toUserVector.getConfiguration().setBoolean(BOOLEAN_DATA, booleanData);
toUserVector.getConfiguration().setInt(ToUserVectorReducer.MIN_PREFERENCES_PER_USER, minPrefsPerUser);
toUserVector.waitForCompletion(true);
numberOfUsers = (int) toUserVector.getCounters().findCounter(ToUserVectorReducer.Counters.USERS).getValue();
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job maybePruneAndTransponse = prepareJob(userVectorPath,
itemUserMatrixPath,
SequenceFileInputFormat.class,
MaybePruneRowsMapper.class,
IntWritable.class,
DistributedRowMatrix.MatrixEntryWritable.class,
ToItemVectorsReducer.class,
IntWritable.class,
VectorWritable.class,
SequenceFileOutputFormat.class);
maybePruneAndTransponse.getConfiguration().setInt(MaybePruneRowsMapper.MAX_COOCCURRENCES,
maxCooccurrencesPerItem);
maybePruneAndTransponse.waitForCompletion(true);
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
/* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like
* new DistributedRowMatrix(...).rowSimilarity(...) */
try {
ToolRunner.run(getConf(), new RowSimilarityJob(), new String[] {
"-Dmapred.input.dir=" + itemUserMatrixPath,
"-Dmapred.output.dir=" + similarityMatrixPath,
"--numberOfColumns", String.valueOf(numberOfUsers),
"--similarityClassname", similarityClassname,
"--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem + 1),
"--tempDir", getTempPath().toString() });
} catch (Exception e) {
throw new IllegalStateException("item-item-similarity computation failed", e);
}
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job prePartialMultiply1 = prepareJob(
similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class,
SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class,
SequenceFileOutputFormat.class);
prePartialMultiply1.waitForCompletion(true);
Job prePartialMultiply2 = prepareJob(
userVectorPath, prePartialMultiplyPath2, SequenceFileInputFormat.class,
UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class,
SequenceFileOutputFormat.class);
if (usersFile != null) {
prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile);
}
prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED,
maxPrefsPerUser);
prePartialMultiply2.waitForCompletion(true);
Job partialMultiply = prepareJob(
new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath,
SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class,
SequenceFileOutputFormat.class);
setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2);
partialMultiply.waitForCompletion(true);
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
/* convert the user/item pairs to filter if a filterfile has been specified */
if (filterFile != null) {
Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class,
ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class,
ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class,
SequenceFileOutputFormat.class);
itemFiltering.waitForCompletion(true);
}
String aggregateAndRecommendInput = partialMultiplyPath.toString();
if (filterFile != null) {
aggregateAndRecommendInput += "," + explicitFilterPath;
}
Job aggregateAndRecommend = prepareJob(
new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class,
PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class,
AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class,
TextOutputFormat.class);
Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration();
if (itemsFile != null) {
aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile);
}
if (filterFile != null) {
setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath);
}
setIOSort(aggregateAndRecommend);
aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, itemIDIndexPath.toString());
aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations);
aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData);
aggregateAndRecommend.waitForCompletion(true);
}
return 0;
}
| public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
addInputOption();
addOutputOption();
addOption("numRecommendations", "n", "Number of recommendations per user",
String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS));
addOption("usersFile", "u", "File of users to recommend for", null);
addOption("itemsFile", "i", "File of items to recommend for", null);
addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from "
+ "the recommendations for that user (optional)", null);
addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString());
addOption("maxPrefsPerUser", "mxp",
"Maximum number of preferences considered per user in final recommendation phase",
String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED));
addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation "
+ "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER));
addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ",
String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM));
addOption("maxCooccurrencesPerItem", "mo", "try to cap the number of cooccurrences per item to this "
+ "number (default: " + DEFAULT_MAX_COOCCURRENCES_PER_ITEM + ')',
String.valueOf(DEFAULT_MAX_COOCCURRENCES_PER_ITEM));
addOption("similarityClassname", "s", "Name of distributed similarity class to instantiate, alternatively use "
+ "one of the predefined similarities (" + SimilarityType.listEnumNames() + ')',
String.valueOf(SimilarityType.SIMILARITY_COOCCURRENCE));
Map<String,String> parsedArgs = parseArguments(args);
if (parsedArgs == null) {
return -1;
}
Path inputPath = getInputPath();
Path outputPath = getOutputPath();
int numRecommendations = Integer.parseInt(parsedArgs.get("--numRecommendations"));
String usersFile = parsedArgs.get("--usersFile");
String itemsFile = parsedArgs.get("--itemsFile");
String filterFile = parsedArgs.get("--filterFile");
boolean booleanData = Boolean.valueOf(parsedArgs.get("--booleanData"));
int maxPrefsPerUser = Integer.parseInt(parsedArgs.get("--maxPrefsPerUser"));
int minPrefsPerUser = Integer.parseInt(parsedArgs.get("--minPrefsPerUser"));
int maxSimilaritiesPerItem = Integer.parseInt(parsedArgs.get("--maxSimilaritiesPerItem"));
int maxCooccurrencesPerItem = Integer.parseInt(parsedArgs.get("--maxCooccurrencesPerItem"));
String similarityClassname = parsedArgs.get("--similarityClassname");
Path userVectorPath = getTempPath("userVectors");
Path itemIDIndexPath = getTempPath("itemIDIndex");
Path countUsersPath = getTempPath("countUsers");
Path itemUserMatrixPath = getTempPath("itemUserMatrix");
Path similarityMatrixPath = getTempPath("similarityMatrix");
Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1");
Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2");
Path explicitFilterPath = getTempPath("explicitFilterPath");
Path partialMultiplyPath = getTempPath("partialMultiply");
AtomicInteger currentPhase = new AtomicInteger();
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job itemIDIndex = prepareJob(
inputPath, itemIDIndexPath, TextInputFormat.class,
ItemIDIndexMapper.class, VarIntWritable.class, VarLongWritable.class,
ItemIDIndexReducer.class, VarIntWritable.class, VarLongWritable.class,
SequenceFileOutputFormat.class);
itemIDIndex.setCombinerClass(ItemIDIndexReducer.class);
itemIDIndex.waitForCompletion(true);
}
int numberOfUsers = 0;
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job toUserVector = prepareJob(
inputPath, userVectorPath, TextInputFormat.class,
ToItemPrefsMapper.class, VarLongWritable.class, booleanData ? VarLongWritable.class : EntityPrefWritable.class,
ToUserVectorReducer.class, VarLongWritable.class, VectorWritable.class,
SequenceFileOutputFormat.class);
toUserVector.getConfiguration().setBoolean(BOOLEAN_DATA, booleanData);
toUserVector.getConfiguration().setInt(ToUserVectorReducer.MIN_PREFERENCES_PER_USER, minPrefsPerUser);
toUserVector.waitForCompletion(true);
numberOfUsers = (int) toUserVector.getCounters().findCounter(ToUserVectorReducer.Counters.USERS).getValue();
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job maybePruneAndTransponse = prepareJob(userVectorPath,
itemUserMatrixPath,
SequenceFileInputFormat.class,
MaybePruneRowsMapper.class,
IntWritable.class,
DistributedRowMatrix.MatrixEntryWritable.class,
ToItemVectorsReducer.class,
IntWritable.class,
VectorWritable.class,
SequenceFileOutputFormat.class);
maybePruneAndTransponse.getConfiguration().setInt(MaybePruneRowsMapper.MAX_COOCCURRENCES,
maxCooccurrencesPerItem);
maybePruneAndTransponse.waitForCompletion(true);
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
/* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like
* new DistributedRowMatrix(...).rowSimilarity(...) */
try {
ToolRunner.run(getConf(), new RowSimilarityJob(), new String[] {
"-Dmapred.input.dir=" + itemUserMatrixPath,
"-Dmapred.output.dir=" + similarityMatrixPath,
"--numberOfColumns", String.valueOf(numberOfUsers),
"--similarityClassname", similarityClassname,
"--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem + 1),
"--tempDir", getTempPath().toString() });
} catch (Exception e) {
throw new IllegalStateException("item-item-similarity computation failed", e);
}
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
Job prePartialMultiply1 = prepareJob(
similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class,
SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class,
SequenceFileOutputFormat.class);
prePartialMultiply1.waitForCompletion(true);
Job prePartialMultiply2 = prepareJob(
userVectorPath, prePartialMultiplyPath2, SequenceFileInputFormat.class,
UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class,
SequenceFileOutputFormat.class);
if (usersFile != null) {
prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile);
}
prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED,
maxPrefsPerUser);
prePartialMultiply2.waitForCompletion(true);
Job partialMultiply = prepareJob(
new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath,
SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class,
ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class,
SequenceFileOutputFormat.class);
setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2);
partialMultiply.waitForCompletion(true);
}
if (shouldRunNextPhase(parsedArgs, currentPhase)) {
/* convert the user/item pairs to filter if a filterfile has been specified */
if (filterFile != null) {
Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class,
ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class,
ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class,
SequenceFileOutputFormat.class);
itemFiltering.waitForCompletion(true);
}
String aggregateAndRecommendInput = partialMultiplyPath.toString();
if (filterFile != null) {
aggregateAndRecommendInput += "," + explicitFilterPath;
}
Job aggregateAndRecommend = prepareJob(
new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class,
PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class,
AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class,
TextOutputFormat.class);
Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration();
if (itemsFile != null) {
aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile);
}
if (filterFile != null) {
setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath);
}
setIOSort(aggregateAndRecommend);
aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, itemIDIndexPath.toString());
aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations);
aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData);
aggregateAndRecommend.waitForCompletion(true);
}
return 0;
}
|
public void testFSDirectoryFilter() throws IOException {
checkDirectoryFilter(FSDirectory.open(new File("test")));
}
| public void testFSDirectoryFilter() throws IOException {
checkDirectoryFilter(FSDirectory.open(new File(System.getProperty("tempDir"),"test")));
}
|
private void schedule()
{
requestScheduler.queue(Thread.currentThread(), clientState.getSchedulingId());
}
| private void schedule()
{
requestScheduler.queue(Thread.currentThread(), clientState.getSchedulingValue());
}
|
private void syncRequest(Node node, UpdateRequestExt ureq) {
Request sreq = new Request();
sreq.node = node;
sreq.ureq = ureq;
String url = node.getUrl();
String fullUrl;
if (!url.startsWith("http://") && !url.startsWith("https://")) {
fullUrl = "http://" + url;
} else {
fullUrl = url;
}
HttpSolrServer server = new HttpSolrServer(fullUrl,
updateShardHandler.getHttpClient());
try {
sreq.ursp = server.request(ureq);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed synchronous update on shard " + sreq.node, sreq.exception);
}
}
| private void syncRequest(Node node, UpdateRequestExt ureq) {
Request sreq = new Request();
sreq.node = node;
sreq.ureq = ureq;
String url = node.getUrl();
String fullUrl;
if (!url.startsWith("http://") && !url.startsWith("https://")) {
fullUrl = "http://" + url;
} else {
fullUrl = url;
}
HttpSolrServer server = new HttpSolrServer(fullUrl,
updateShardHandler.getHttpClient());
try {
sreq.ursp = server.request(ureq);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed synchronous update on shard " + sreq.node + " update: " + ureq , e);
}
}
|
public String init(NamedList config, SolrCore core) {
LOG.info("init: " + config);
String name = super.init(config, core);
threshold = config.get(THRESHOLD_TOKEN_FREQUENCY) == null ? 0.0f
: (Float) config.get(THRESHOLD_TOKEN_FREQUENCY);
sourceLocation = (String) config.get(LOCATION);
field = (String)config.get(FIELD);
lookupImpl = (String)config.get(LOOKUP_IMPL);
if (lookupImpl == null) {
lookupImpl = JaspellLookup.class.getName();
}
String store = (String)config.get(STORE_DIR);
if (store != null) {
storeDir = new File(store);
if (!storeDir.isAbsolute()) {
storeDir = new File(core.getDataDir() + File.separator + storeDir);
}
if (!storeDir.exists()) {
storeDir.mkdirs();
}
}
return name;
}
| public String init(NamedList config, SolrCore core) {
LOG.info("init: " + config);
String name = super.init(config, core);
threshold = config.get(THRESHOLD_TOKEN_FREQUENCY) == null ? 0.0f
: Float.valueOf((String)config.get(THRESHOLD_TOKEN_FREQUENCY));
sourceLocation = (String) config.get(LOCATION);
field = (String)config.get(FIELD);
lookupImpl = (String)config.get(LOOKUP_IMPL);
if (lookupImpl == null) {
lookupImpl = JaspellLookup.class.getName();
}
String store = (String)config.get(STORE_DIR);
if (store != null) {
storeDir = new File(store);
if (!storeDir.isAbsolute()) {
storeDir = new File(core.getDataDir() + File.separator + storeDir);
}
if (!storeDir.exists()) {
storeDir.mkdirs();
}
}
return name;
}
|
public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
| public void cacheCurrentTerm(SegmentTermEnum enumerator) {
termsCache.put(new CloneableTerm(enumerator.term()),
new TermInfoAndOrd(enumerator.termInfo,
enumerator.position));
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, boolean useCache) throws IOException {
if (useCache) {
return seekEnum(enumerator, term, termsCache.get(new CloneableTerm(term)), useCache);
} else {
return seekEnum(enumerator, term, null, useCache);
}
}
TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd, boolean useCache) throws IOException {
if (size == 0) {
return null;
}
// optimize sequential access: first try scanning cached enum w/o seeking
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0)
|| term.compareToUTF16(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareToUTF16(indexTerms[enumOffset]) < 0) {
// no need to seek
final TermInfo ti;
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
}
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
final int indexPos;
if (tiOrd != null) {
indexPos = (int) (tiOrd.termOrd / totalIndexInterval);
} else {
// Must do binary search:
indexPos = getIndexOffset(term);
}
seekEnum(enumerator, indexPos);
enumerator.scanTo(term);
final TermInfo ti;
if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
ti = enumerator.termInfo;
if (tiOrd == null) {
if (useCache) {
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;
}
return ti;
}
|
private static final char UNABLE_TO_PROXY = '#';
public static Class<?> getProxySubclass(Class<?> aClass) throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "getProxySubclass", new Object[] { aClass });
ClassLoader loader = aClass.getClassLoader();
// in the special case where the loader is null we use the thread
// ContextClassLoader
// this is for subclassing java.* or javax.* packages
if (loader == null) loader = Thread.currentThread().getContextClassLoader();
ConcurrentMap<String, String> proxyMap;
synchronized (loader) {
proxyMap = proxyClassesByClassLoader.get(loader);
if (proxyMap == null) {
proxyMap = new ConcurrentHashMap<String, String>();
proxyClassesByClassLoader.put(loader, proxyMap);
}
}
// check the map to see if we have already generated a subclass for this
// class
// if we have return the mapped class object
// if we haven't generate the subclass and return it
Class<?> classToReturn = null;
synchronized (aClass) {
String key = aClass.getName();
String className = proxyMap.get(key);
if (className != null) {
LOGGER.debug("Found proxy subclass with key {} and name {}.", key, className);
if (className.charAt(0) == FINAL_MODIFIER) {
String[] exceptionParts = className.substring(1).split(":");
if (exceptionParts.length == 1) {
throw new FinalModifierException(aClass);
} else {
throw new FinalModifierException(aClass, exceptionParts[1]);
}
} else if (className.charAt(0) == UNABLE_TO_PROXY) {
throw new UnableToProxyException(aClass);
}
try {
classToReturn = loader.loadClass(className);
} catch (ClassNotFoundException cnfe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, cnfe);
throw new UnableToLoadProxyException(className, cnfe);
}
} else {
LOGGER.debug("Need to generate subclass. Using key {}.", key);
try {
scanForFinalModifiers(aClass);
classToReturn = generateAndLoadSubclass(aClass, loader);
if (classToReturn != null) {
proxyMap.put(key, classToReturn.getName());
} else {
proxyMap.put(key, UNABLE_TO_PROXY + aClass.getName());
throw new UnableToProxyException(aClass);
}
} catch (FinalModifierException e) {
if (e.isFinalClass()) {
proxyMap.put(key, FINAL_MODIFIER + e.getClassName());
throw e;
} else {
proxyMap.put(key, FINAL_MODIFIER + e.getClassName() + ':' + e.getFinalMethods());
throw e;
}
}
}
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "getProxySubclass", classToReturn);
return classToReturn;
}
public static Object newProxySubclassInstance(Class<?> classToProxy, InvocationHandler ih)
throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "newProxySubclassInstance", new Object[] {
classToProxy, ih });
Object proxySubclassInstance = null;
try {
Class<?> generatedProxySubclass = getProxySubclass(classToProxy);
LOGGER.debug("Getting the proxy subclass constructor");
Constructor<?> subclassConstructor = generatedProxySubclass
.getConstructor(new Class[] { InvocationHandler.class });
LOGGER.debug("Invoking the proxy subclass constructor");
proxySubclassInstance = subclassConstructor.newInstance(ih);
LOGGER.debug("Invoked proxy subclass constructor");
} catch (NoSuchMethodException nsme) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, nsme);
throw new ProxyClassInstantiationException(classToProxy, nsme);
} catch (InvocationTargetException ite) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ite);
throw new ProxyClassInstantiationException(classToProxy, ite);
} catch (InstantiationException ie) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ie);
throw new ProxyClassInstantiationException(classToProxy, ie);
} catch (IllegalAccessException iae) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, iae);
throw new ProxyClassInstantiationException(classToProxy, iae);
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "newProxySubclassInstance", proxySubclassInstance);
return proxySubclassInstance;
}
private static Class<?> generateAndLoadSubclass(Class<?> aClass, ClassLoader loader)
throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "generateAndLoadSubclass", new Object[] { aClass,
loader });
// set the newClassName
String newClassName = "$" + aClass.getSimpleName() + aClass.hashCode();
String packageName = aClass.getPackage().getName();
if (packageName.startsWith("java.") || packageName.startsWith("javax.")) {
packageName = "com.ibm.osgi.blueprint.proxy." + packageName;
}
String fullNewClassName = (packageName + "." + newClassName).replaceAll("\\.", "/");
LOGGER.debug("New class name: {}", newClassName);
LOGGER.debug("Full new class name: {}", fullNewClassName);
Class<?> clazz = null;
try {
ClassReader cReader = new ClassReader(loader.getResourceAsStream(aClass.getName().replaceAll(
"\\.", "/")
+ ".class"));
ClassWriter cWriter = new ClassWriter(ClassWriter.COMPUTE_MAXS);
ClassVisitor dynamicSubclassAdapter = new ProxySubclassAdapter(cWriter, fullNewClassName,
loader);
byte[] byteClassData = processClass(cReader, cWriter, dynamicSubclassAdapter);
clazz = loadClassFromBytes(loader, getBinaryName(fullNewClassName), byteClassData, aClass
.getName());
} catch (IOException ioe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ioe);
throw new ProxyClassBytecodeGenerationException(aClass.getName(), ioe);
} catch (TypeNotPresentException tnpe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, tnpe);
throw new ProxyClassBytecodeGenerationException(tnpe.typeName(), tnpe.getCause());
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "generateAndLoadSubclass", clazz);
return clazz;
}
| private static final char UNABLE_TO_PROXY = '#';
public static Class<?> getProxySubclass(Class<?> aClass) throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "getProxySubclass", new Object[] { aClass });
ClassLoader loader = aClass.getClassLoader();
// in the special case where the loader is null we use the thread
// ContextClassLoader
// this is for subclassing java.* or javax.* packages
if (loader == null) loader = Thread.currentThread().getContextClassLoader();
ConcurrentMap<String, String> proxyMap;
synchronized (loader) {
proxyMap = proxyClassesByClassLoader.get(loader);
if (proxyMap == null) {
proxyMap = new ConcurrentHashMap<String, String>();
proxyClassesByClassLoader.put(loader, proxyMap);
}
}
// check the map to see if we have already generated a subclass for this
// class
// if we have return the mapped class object
// if we haven't generate the subclass and return it
Class<?> classToReturn = null;
synchronized (aClass) {
String key = aClass.getName();
String className = proxyMap.get(key);
if (className != null) {
LOGGER.debug("Found proxy subclass with key {} and name {}.", key, className);
if (className.charAt(0) == FINAL_MODIFIER) {
String[] exceptionParts = className.substring(1).split(":");
if (exceptionParts.length == 1) {
throw new FinalModifierException(aClass);
} else {
throw new FinalModifierException(aClass, exceptionParts[1]);
}
} else if (className.charAt(0) == UNABLE_TO_PROXY) {
throw new UnableToProxyException(aClass);
}
try {
classToReturn = loader.loadClass(className);
} catch (ClassNotFoundException cnfe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, cnfe);
throw new UnableToLoadProxyException(className, cnfe);
}
} else {
LOGGER.debug("Need to generate subclass. Using key {}.", key);
try {
scanForFinalModifiers(aClass);
classToReturn = generateAndLoadSubclass(aClass, loader);
if (classToReturn != null) {
proxyMap.put(key, classToReturn.getName());
} else {
proxyMap.put(key, UNABLE_TO_PROXY + aClass.getName());
throw new UnableToProxyException(aClass);
}
} catch (FinalModifierException e) {
if (e.isFinalClass()) {
proxyMap.put(key, FINAL_MODIFIER + e.getClassName());
throw e;
} else {
proxyMap.put(key, FINAL_MODIFIER + e.getClassName() + ':' + e.getFinalMethods());
throw e;
}
}
}
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "getProxySubclass", classToReturn);
return classToReturn;
}
public static Object newProxySubclassInstance(Class<?> classToProxy, InvocationHandler ih)
throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "newProxySubclassInstance", new Object[] {
classToProxy, ih });
Object proxySubclassInstance = null;
try {
Class<?> generatedProxySubclass = getProxySubclass(classToProxy);
LOGGER.debug("Getting the proxy subclass constructor");
Constructor<?> subclassConstructor = generatedProxySubclass
.getConstructor(new Class[] { InvocationHandler.class });
LOGGER.debug("Invoking the proxy subclass constructor");
proxySubclassInstance = subclassConstructor.newInstance(ih);
LOGGER.debug("Invoked proxy subclass constructor");
} catch (NoSuchMethodException nsme) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, nsme);
throw new ProxyClassInstantiationException(classToProxy, nsme);
} catch (InvocationTargetException ite) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ite);
throw new ProxyClassInstantiationException(classToProxy, ite);
} catch (InstantiationException ie) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ie);
throw new ProxyClassInstantiationException(classToProxy, ie);
} catch (IllegalAccessException iae) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, iae);
throw new ProxyClassInstantiationException(classToProxy, iae);
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "newProxySubclassInstance", proxySubclassInstance);
return proxySubclassInstance;
}
private static Class<?> generateAndLoadSubclass(Class<?> aClass, ClassLoader loader)
throws UnableToProxyException
{
LOGGER.debug(AsmInterceptorWrapper.LOG_ENTRY, "generateAndLoadSubclass", new Object[] { aClass,
loader });
// set the newClassName
String newClassName = "$" + aClass.getSimpleName() + aClass.hashCode();
String packageName = aClass.getPackage().getName();
if (packageName.startsWith("java.") || packageName.startsWith("javax.")) {
packageName = "org.apache.aries.blueprint.proxy." + packageName;
}
String fullNewClassName = (packageName + "." + newClassName).replaceAll("\\.", "/");
LOGGER.debug("New class name: {}", newClassName);
LOGGER.debug("Full new class name: {}", fullNewClassName);
Class<?> clazz = null;
try {
ClassReader cReader = new ClassReader(loader.getResourceAsStream(aClass.getName().replaceAll(
"\\.", "/")
+ ".class"));
ClassWriter cWriter = new ClassWriter(ClassWriter.COMPUTE_MAXS);
ClassVisitor dynamicSubclassAdapter = new ProxySubclassAdapter(cWriter, fullNewClassName,
loader);
byte[] byteClassData = processClass(cReader, cWriter, dynamicSubclassAdapter);
clazz = loadClassFromBytes(loader, getBinaryName(fullNewClassName), byteClassData, aClass
.getName());
} catch (IOException ioe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, ioe);
throw new ProxyClassBytecodeGenerationException(aClass.getName(), ioe);
} catch (TypeNotPresentException tnpe) {
LOGGER.debug(AsmInterceptorWrapper.LOG_EXCEPTION, tnpe);
throw new ProxyClassBytecodeGenerationException(tnpe.typeName(), tnpe.getCause());
}
LOGGER.debug(AsmInterceptorWrapper.LOG_EXIT, "generateAndLoadSubclass", clazz);
return clazz;
}
|
public void testSize() {
assertEquals("size", 7, getTestVector().getNumNondefaultElements());
}
| public void testSize() {
assertEquals("size", 3, getTestVector().getNumNondefaultElements());
}
|
private void verifyCachedSchema(Connection c) throws SQLException {
if (c instanceof org.apache.derby.client.am.Connection) {
String cached =
((org.apache.derby.client.am.Connection) c).
getCurrentSchemaName();
Statement s = c.createStatement();
ResultSet rs = s.executeQuery("VALUES CURRENT SCHEMA");
rs.next();
String reported = rs.getString(1);
assertEquals(reported, cached);
} else {
println("Cannot verify cached schema for "+c.getClass());
}
}
| private void verifyCachedSchema(Connection c) throws SQLException {
if (usingDerbyNetClient()) {
String cached =
((org.apache.derby.client.am.Connection) c).
getCurrentSchemaName();
Statement s = c.createStatement();
ResultSet rs = s.executeQuery("VALUES CURRENT SCHEMA");
rs.next();
String reported = rs.getString(1);
assertEquals(reported, cached);
} else {
println("Cannot verify cached schema for "+c.getClass());
}
}
|
public void run() {
try {
TermEnum termEnum = s.getIndexReader().terms(new Term("body", ""));
int seenTermCount = 0;
int shift;
int trigger;
if (totTermCount.get() == 0) {
shift = 0;
trigger = 1;
} else {
shift = random.nextInt(totTermCount.get()/10);
trigger = totTermCount.get()/10;
}
while(System.currentTimeMillis() < searchStopTime) {
Term term = termEnum.term();
if (term == null) {
if (seenTermCount == 0) {
break;
}
totTermCount.set(seenTermCount);
seenTermCount = 0;
trigger = totTermCount.get()/10;
//System.out.println("trigger " + trigger);
shift = random.nextInt(totTermCount.get()/10);
termEnum = s.getIndexReader().terms(new Term("body", ""));
continue;
}
seenTermCount++;
// search 10 terms
if (trigger == 0) {
trigger = 1;
}
if ((seenTermCount + shift) % trigger == 0) {
//if (VERBOSE) {
//System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
//}
totHits.addAndGet(runQuery(s, new TermQuery(term)));
}
}
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": search done");
}
} catch (Throwable t) {
failed.set(true);
t.printStackTrace(System.out);
throw new RuntimeException(t);
}
}
};
searchThreads[thread].setDaemon(true);
searchThreads[thread].start();
}
for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
searchThreads[thread].join();
}
if (VERBOSE) {
System.out.println("TEST: DONE search: totHits=" + totHits);
}
} else {
Thread.sleep(100);
}
}
if (VERBOSE) {
System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
}
//System.out.println("numDocs=" + r.numDocs() + " openDelFileCount=" + dir.openDeleteFileCount());
r.close();
final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
if (openDeletedFiles.size() > 0) {
System.out.println("OBD files: " + openDeletedFiles);
}
any |= openDeletedFiles.size() > 0;
assertFalse("saw non-zero open-but-deleted count", any);
if (VERBOSE) {
System.out.println("TEST: now join");
}
for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
threads[thread].join();
}
if (VERBOSE) {
System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount);
}
writer.commit();
assertEquals(addCount.get() - delCount.get(), writer.numDocs());
writer.close(false);
dir.close();
_TestUtil.rmDir(tempDir);
docs.close();
if (VERBOSE) {
System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]");
}
}
| public void run() {
try {
TermEnum termEnum = s.getIndexReader().terms(new Term("body", ""));
int seenTermCount = 0;
int shift;
int trigger;
if (totTermCount.get() == 0) {
shift = 0;
trigger = 1;
} else {
shift = random.nextInt(totTermCount.get()/10);
trigger = totTermCount.get()/10;
}
while(System.currentTimeMillis() < searchStopTime) {
Term term = termEnum.term();
if (term == null) {
if (seenTermCount == 0) {
break;
}
totTermCount.set(seenTermCount);
seenTermCount = 0;
trigger = totTermCount.get()/10;
//System.out.println("trigger " + trigger);
shift = random.nextInt(totTermCount.get()/10);
termEnum = s.getIndexReader().terms(new Term("body", ""));
continue;
}
seenTermCount++;
// search 10 terms
if (trigger == 0) {
trigger = 1;
}
if ((seenTermCount + shift) % trigger == 0) {
//if (VERBOSE) {
//System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
//}
totHits.addAndGet(runQuery(s, new TermQuery(term)));
}
}
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": search done");
}
} catch (Throwable t) {
failed.set(true);
t.printStackTrace(System.out);
throw new RuntimeException(t);
}
}
};
searchThreads[thread].setDaemon(true);
searchThreads[thread].start();
}
for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
searchThreads[thread].join();
}
if (VERBOSE) {
System.out.println("TEST: DONE search: totHits=" + totHits);
}
} else {
Thread.sleep(100);
}
}
if (VERBOSE) {
System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
}
//System.out.println("numDocs=" + r.numDocs() + " openDelFileCount=" + dir.openDeleteFileCount());
r.close();
final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
if (openDeletedFiles.size() > 0) {
System.out.println("OBD files: " + openDeletedFiles);
}
any |= openDeletedFiles.size() > 0;
assertFalse("saw non-zero open-but-deleted count", any);
if (VERBOSE) {
System.out.println("TEST: now join");
}
for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
threads[thread].join();
}
if (VERBOSE) {
System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount);
}
writer.commit();
assertEquals("index=" + writer.segString(), addCount.get() - delCount.get(), writer.numDocs());
writer.close(false);
dir.close();
_TestUtil.rmDir(tempDir);
docs.close();
if (VERBOSE) {
System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]");
}
}
|
public synchronized int numDocs() throws IOException {
int count;
if (docWriter != null)
count = docWriter.getNumDocsInRAM();
else
count = 0;
for (int i = 0; i < segmentInfos.size(); i++) {
final SegmentInfo info = segmentInfos.info(i);
count += info.docCount - info.getDelCount();
}
return count;
}
| public synchronized int numDocs() throws IOException {
int count;
if (docWriter != null)
count = docWriter.getNumDocsInRAM();
else
count = 0;
for (int i = 0; i < segmentInfos.size(); i++) {
final SegmentInfo info = segmentInfos.info(i);
count += info.docCount - numDeletedDocs(info);
}
return count;
}
|
public static Test suite()
{
TestSuite suite = new TestSuite("errorcode Test");
suite.addTest(TestConfiguration.embeddedSuite(ErrorCodeTest.class));
return new LocaleTestSetup(suite, Locale.ENGLISH);
}
| public static Test suite()
{
TestSuite suite = new TestSuite("errorcode Test");
suite.addTest(TestConfiguration.defaultSuite(ErrorCodeTest.class));
return new LocaleTestSetup(suite, Locale.ENGLISH);
}
|
private static List<Row> strongRead(List<ReadCommand> commands) throws IOException, TimeoutException, InvalidRequestException, UnavailableException
{
List<QuorumResponseHandler<Row>> quorumResponseHandlers = new ArrayList<QuorumResponseHandler<Row>>();
List<EndPoint[]> commandEndPoints = new ArrayList<EndPoint[]>();
List<Row> rows = new ArrayList<Row>();
int commandIndex = 0;
for (ReadCommand command: commands)
{
// TODO: throw a thrift exception if we do not have N nodes
assert !command.isDigestQuery();
ReadCommand readMessageDigestOnly = command.copy();
readMessageDigestOnly.setDigestQuery(true);
Message message = command.makeReadMessage();
Message messageDigestOnly = readMessageDigestOnly.makeReadMessage();
QuorumResponseHandler<Row> quorumResponseHandler = new QuorumResponseHandler<Row>(DatabaseDescriptor.getQuorum(), new ReadResponseResolver());
EndPoint dataPoint = StorageService.instance().findSuitableEndPoint(command.key);
List<EndPoint> endpointList = new ArrayList<EndPoint>(Arrays.asList(StorageService.instance().getReadStorageEndPoints(command.key)));
/* Remove the local storage endpoint from the list. */
endpointList.remove(dataPoint);
EndPoint[] endPoints = new EndPoint[endpointList.size() + 1];
Message messages[] = new Message[endpointList.size() + 1];
/*
* First message is sent to the node that will actually get
* the data for us. The other two replicas are only sent a
* digest query.
*/
endPoints[0] = dataPoint;
messages[0] = message;
if (logger.isDebugEnabled())
logger.debug("strongread reading data for " + command + " from " + message.getMessageId() + "@" + dataPoint);
for (int i = 1; i < endPoints.length; i++)
{
EndPoint digestPoint = endpointList.get(i - 1);
endPoints[i] = digestPoint;
messages[i] = messageDigestOnly;
if (logger.isDebugEnabled())
logger.debug("strongread reading digest for " + command + " from " + messageDigestOnly.getMessageId() + "@" + digestPoint);
}
MessagingService.getMessagingInstance().sendRR(messages, endPoints, quorumResponseHandler);
quorumResponseHandlers.add(quorumResponseHandler);
commandEndPoints.add(endPoints);
}
for (QuorumResponseHandler<Row> quorumResponseHandler: quorumResponseHandlers)
{
Row row = null;
ReadCommand command = commands.get(commandIndex);
try
{
long startTime2 = System.currentTimeMillis();
row = quorumResponseHandler.get();
if (row != null)
rows.add(row);
if (logger.isDebugEnabled())
logger.debug("quorumResponseHandler: " + (System.currentTimeMillis() - startTime2) + " ms.");
}
catch (DigestMismatchException ex)
{
if (DatabaseDescriptor.getConsistencyCheck())
{
IResponseResolver<Row> readResponseResolverRepair = new ReadResponseResolver();
QuorumResponseHandler<Row> quorumResponseHandlerRepair = new QuorumResponseHandler<Row>(
DatabaseDescriptor.getQuorum(),
readResponseResolverRepair);
logger.info("DigestMismatchException: " + command.key);
Message messageRepair = command.makeReadMessage();
MessagingService.getMessagingInstance().sendRR(messageRepair, commandEndPoints.get(commandIndex), quorumResponseHandlerRepair);
try
{
row = quorumResponseHandlerRepair.get();
if (row != null)
rows.add(row);
}
catch (DigestMismatchException e)
{
// TODO should this be a thrift exception?
throw new RuntimeException("digest mismatch reading key " + command.key, e);
}
}
}
commandIndex++;
}
return rows;
}
| private static List<Row> strongRead(List<ReadCommand> commands) throws IOException, TimeoutException, InvalidRequestException, UnavailableException
{
List<QuorumResponseHandler<Row>> quorumResponseHandlers = new ArrayList<QuorumResponseHandler<Row>>();
List<EndPoint[]> commandEndPoints = new ArrayList<EndPoint[]>();
List<Row> rows = new ArrayList<Row>();
int commandIndex = 0;
for (ReadCommand command: commands)
{
// TODO: throw a thrift exception if we do not have N nodes
assert !command.isDigestQuery();
ReadCommand readMessageDigestOnly = command.copy();
readMessageDigestOnly.setDigestQuery(true);
Message message = command.makeReadMessage();
Message messageDigestOnly = readMessageDigestOnly.makeReadMessage();
QuorumResponseHandler<Row> quorumResponseHandler = new QuorumResponseHandler<Row>(DatabaseDescriptor.getQuorum(), new ReadResponseResolver());
EndPoint dataPoint = StorageService.instance().findSuitableEndPoint(command.key);
List<EndPoint> endpointList = new ArrayList<EndPoint>(Arrays.asList(StorageService.instance().getReadStorageEndPoints(command.key)));
/* Remove the local storage endpoint from the list. */
endpointList.remove(dataPoint);
EndPoint[] endPoints = new EndPoint[endpointList.size() + 1];
Message messages[] = new Message[endpointList.size() + 1];
/*
* First message is sent to the node that will actually get
* the data for us. The other two replicas are only sent a
* digest query.
*/
endPoints[0] = dataPoint;
messages[0] = message;
if (logger.isDebugEnabled())
logger.debug("strongread reading data for " + command + " from " + message.getMessageId() + "@" + dataPoint);
for (int i = 1; i < endPoints.length; i++)
{
EndPoint digestPoint = endpointList.get(i - 1);
endPoints[i] = digestPoint;
messages[i] = messageDigestOnly;
if (logger.isDebugEnabled())
logger.debug("strongread reading digest for " + command + " from " + messageDigestOnly.getMessageId() + "@" + digestPoint);
}
MessagingService.getMessagingInstance().sendRR(messages, endPoints, quorumResponseHandler);
quorumResponseHandlers.add(quorumResponseHandler);
commandEndPoints.add(endPoints);
}
for (QuorumResponseHandler<Row> quorumResponseHandler: quorumResponseHandlers)
{
Row row = null;
ReadCommand command = commands.get(commandIndex);
try
{
long startTime2 = System.currentTimeMillis();
row = quorumResponseHandler.get();
if (row != null)
rows.add(row);
if (logger.isDebugEnabled())
logger.debug("quorumResponseHandler: " + (System.currentTimeMillis() - startTime2) + " ms.");
}
catch (DigestMismatchException ex)
{
if (DatabaseDescriptor.getConsistencyCheck())
{
IResponseResolver<Row> readResponseResolverRepair = new ReadResponseResolver();
QuorumResponseHandler<Row> quorumResponseHandlerRepair = new QuorumResponseHandler<Row>(
DatabaseDescriptor.getQuorum(),
readResponseResolverRepair);
logger.info("DigestMismatchException: " + ex.getMessage());
Message messageRepair = command.makeReadMessage();
MessagingService.getMessagingInstance().sendRR(messageRepair, commandEndPoints.get(commandIndex), quorumResponseHandlerRepair);
try
{
row = quorumResponseHandlerRepair.get();
if (row != null)
rows.add(row);
}
catch (DigestMismatchException e)
{
// TODO should this be a thrift exception?
throw new RuntimeException("digest mismatch reading key " + command.key, e);
}
}
}
commandIndex++;
}
return rows;
}
|
private FST<Object> buildAutomaton(BytesRefSorter sorter) throws IOException {
// Build the automaton.
final Outputs<Object> outputs = NoOutputs.getSingleton();
final Object empty = outputs.getNoOutput();
final Builder<Object> builder = new Builder<Object>(
FST.INPUT_TYPE.BYTE1, 0, 0, true, true,
shareMaxTailLength, outputs, null, false);
BytesRef scratch = new BytesRef();
BytesRef entry;
final IntsRef scratchIntsRef = new IntsRef();
int count = 0;
BytesRefIterator iter = sorter.iterator();
while((entry = iter.next()) != null) {
count++;
if (scratch.compareTo(entry) != 0) {
builder.add(Util.toIntsRef(entry, scratchIntsRef), empty);
scratch.copyBytes(entry);
}
}
return count == 0 ? null : builder.finish();
}
| private FST<Object> buildAutomaton(BytesRefSorter sorter) throws IOException {
// Build the automaton.
final Outputs<Object> outputs = NoOutputs.getSingleton();
final Object empty = outputs.getNoOutput();
final Builder<Object> builder = new Builder<Object>(
FST.INPUT_TYPE.BYTE1, 0, 0, true, true,
shareMaxTailLength, outputs, null, false, true);
BytesRef scratch = new BytesRef();
BytesRef entry;
final IntsRef scratchIntsRef = new IntsRef();
int count = 0;
BytesRefIterator iter = sorter.iterator();
while((entry = iter.next()) != null) {
count++;
if (scratch.compareTo(entry) != 0) {
builder.add(Util.toIntsRef(entry, scratchIntsRef), empty);
scratch.copyBytes(entry);
}
}
return count == 0 ? null : builder.finish();
}
|
public int compare(String[] left, String[] right) {
return left[0].compareTo(right[0]);
}
});
System.out.println(" encode...");
PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(true);
Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true);
IntsRef scratch = new IntsRef();
long ord = -1; // first ord will be 0
String lastValue = null;
// build tokeninfo dictionary
for (String[] entry : lines) {
int next = dictionary.put(entry);
if(next == offset){
System.out.println("Failed to process line: " + Arrays.toString(entry));
continue;
}
String token = entry[0];
if (!token.equals(lastValue)) {
// new word to add to fst
ord++;
lastValue = token;
scratch.grow(token.length());
scratch.length = token.length();
for (int i = 0; i < token.length(); i++) {
scratch.ints[i] = (int) token.charAt(i);
}
fstBuilder.add(scratch, ord);
}
dictionary.addMapping((int)ord, offset);
offset = next;
}
final FST<Long> fst = fstBuilder.finish();
System.out.print(" " + fst.getNodeCount() + " nodes, " + fst.getArcCount() + " arcs, " + fst.sizeInBytes() + " bytes... ");
dictionary.setFST(fst);
System.out.println(" done");
return dictionary;
}
| public int compare(String[] left, String[] right) {
return left[0].compareTo(right[0]);
}
});
System.out.println(" encode...");
PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(true);
Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, true);
IntsRef scratch = new IntsRef();
long ord = -1; // first ord will be 0
String lastValue = null;
// build tokeninfo dictionary
for (String[] entry : lines) {
int next = dictionary.put(entry);
if(next == offset){
System.out.println("Failed to process line: " + Arrays.toString(entry));
continue;
}
String token = entry[0];
if (!token.equals(lastValue)) {
// new word to add to fst
ord++;
lastValue = token;
scratch.grow(token.length());
scratch.length = token.length();
for (int i = 0; i < token.length(); i++) {
scratch.ints[i] = (int) token.charAt(i);
}
fstBuilder.add(scratch, ord);
}
dictionary.addMapping((int)ord, offset);
offset = next;
}
final FST<Long> fst = fstBuilder.finish();
System.out.print(" " + fst.getNodeCount() + " nodes, " + fst.getArcCount() + " arcs, " + fst.sizeInBytes() + " bytes... ");
dictionary.setFST(fst);
System.out.println(" done");
return dictionary;
}
|
public void setExclusionTable( Map<?,?> exclusiontable ) {
exclusions = new HashSet(exclusiontable.keySet());
}
| public void setExclusionTable( Map<?,?> exclusiontable ) {
exclusions = exclusiontable.keySet();
}
|
public boolean validateData(QualityQuery[] qq, PrintWriter logger) {
HashMap<String,QRelJudgement> missingQueries = (HashMap<String, QRelJudgement>) judgements.clone();
ArrayList<String> missingJudgements = new ArrayList<String>();
for (int i=0; i<qq.length; i++) {
String id = qq[i].getQueryID();
if (missingQueries.containsKey(id)) {
missingQueries.remove(id);
} else {
missingJudgements.add(id);
}
}
boolean isValid = true;
if (missingJudgements.size()>0) {
isValid = false;
if (logger!=null) {
logger.println("WARNING: "+missingJudgements.size()+" queries have no judgments! - ");
for (int i=0; i<missingJudgements.size(); i++) {
logger.println(" "+ missingJudgements.get(i));
}
}
}
if (missingQueries.size()>0) {
isValid = false;
if (logger!=null) {
logger.println("WARNING: "+missingQueries.size()+" judgments match no query! - ");
for (final String id : missingQueries.keySet()) {
logger.println(" "+id);
}
}
}
return isValid;
}
| public boolean validateData(QualityQuery[] qq, PrintWriter logger) {
HashMap<String,QRelJudgement> missingQueries = new HashMap<String, QRelJudgement>(judgements);
ArrayList<String> missingJudgements = new ArrayList<String>();
for (int i=0; i<qq.length; i++) {
String id = qq[i].getQueryID();
if (missingQueries.containsKey(id)) {
missingQueries.remove(id);
} else {
missingJudgements.add(id);
}
}
boolean isValid = true;
if (missingJudgements.size()>0) {
isValid = false;
if (logger!=null) {
logger.println("WARNING: "+missingJudgements.size()+" queries have no judgments! - ");
for (int i=0; i<missingJudgements.size(); i++) {
logger.println(" "+ missingJudgements.get(i));
}
}
}
if (missingQueries.size()>0) {
isValid = false;
if (logger!=null) {
logger.println("WARNING: "+missingQueries.size()+" judgments match no query! - ");
for (final String id : missingQueries.keySet()) {
logger.println(" "+id);
}
}
}
return isValid;
}
|
public void testScheduledExecMemoryLeak() throws Exception {
Fixture jar = ArchiveFixture.newJar()
.manifest().symbolicName("test.bundle").end()
.file("OSGI-INF/blueprint/blueprint.xml")
.line("<blueprint xmlns=\"http://www.osgi.org/xmlns/blueprint/v1.0.0\">")
.line("<reference interface=\"java.util.List\" />")
.line("</blueprint>").end().end();
ByteArrayOutputStream bout = new ByteArrayOutputStream();
jar.writeOut(bout);
Bundle b = bundleContext.installBundle("test.bundle", new ByteArrayInputStream(bout.toByteArray()));
for (int i=0; i<16; i++) System.gc();
long startFreeMemory = Runtime.getRuntime().freeMemory();
// 3000 iterations on a Mac 1.6 JVM leaks 30+ mb, 2000 leaks a bit more than 20,
// 10000 iterations would be close to OutOfMemory however by that stage the test runs very slowly
for (int i=0; i<3000; i++) {
b.start();
// give the container some time to operate, otherwise it probably won't even get to create a future
Thread.sleep(10);
b.stop();
}
for (int i=0; i<16; i++) System.gc();
long endFreeMemory = Runtime.getRuntime().freeMemory();
long lossage = startFreeMemory - endFreeMemory;
assertTrue("We lost: "+lossage, lossage < 10000000);
}
| public void testScheduledExecMemoryLeak() throws Exception {
Fixture jar = ArchiveFixture.newJar()
.manifest().symbolicName("test.bundle").end()
.file("OSGI-INF/blueprint/blueprint.xml")
.line("<blueprint xmlns=\"http://www.osgi.org/xmlns/blueprint/v1.0.0\">")
.line("<reference interface=\"java.util.List\" />")
.line("</blueprint>").end().end();
ByteArrayOutputStream bout = new ByteArrayOutputStream();
jar.writeOut(bout);
Bundle b = bundleContext.installBundle("test.bundle", new ByteArrayInputStream(bout.toByteArray()));
for (int i=0; i<16; i++) System.gc();
long startFreeMemory = Runtime.getRuntime().freeMemory();
// 3000 iterations on a Mac 1.6 JVM leaks 30+ mb, 2000 leaks a bit more than 20,
// 10000 iterations would be close to OutOfMemory however by that stage the test runs very slowly
for (int i=0; i<3000; i++) {
b.start();
// give the container some time to operate, otherwise it probably won't even get to create a future
Thread.sleep(10);
b.stop();
}
for (int i=0; i<16; i++) System.gc();
long endFreeMemory = Runtime.getRuntime().freeMemory();
long lossage = startFreeMemory - endFreeMemory;
assertTrue("We lost: "+lossage, lossage < 20000000);
}
|
String PERSISTENTLY_STARTED = "PersistentlyStarted";
/*
* Copyright (c) OSGi Alliance (2009). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.osgi.jmx.framework;
import java.io.IOException;
import javax.management.openmbean.CompositeType;
import javax.management.openmbean.SimpleType;
import javax.management.openmbean.TabularData;
import javax.management.openmbean.TabularType;
import org.osgi.jmx.Item;
import org.osgi.jmx.JmxConstants;
/**
* This MBean represents the Bundle state of the framework. This MBean also
* emits events that clients can use to get notified of the changes in the
* bundle state of the framework.
*
* @version $Rev$
*/
public interface BundleStateMBean {
/**
* The Object Name for a Bundle State MBean.
*/
String OBJECTNAME = JmxConstants.OSGI_CORE
+ ":type=bundleState,version=1.5";
/**
* The key KEY, used in {@link #KEY_ITEM}.
*/
String KEY = "Key";
/**
* The item describing the key of a bundle header entry. The key is
* {@link #KEY} and the type is {@link SimpleType#STRING}.
*/
Item KEY_ITEM = new Item(KEY, "The bundle header key", SimpleType.STRING);
/**
* The key VALUE, used in {@link #VALUE_ITEM}.
*/
String VALUE = "Value";
/**
* The item describing the value of a bundle header entry. The key is
* {@link #VALUE} and the type is {@link SimpleType#STRING}.
*/
Item VALUE_ITEM = new Item(VALUE, "The bundle header value",
SimpleType.STRING);
/**
* The Composite Type describing an entry in bundle headers. It consists of
* {@link #KEY_ITEM} and {@link #VALUE_ITEM}.
*/
CompositeType HEADER_TYPE = Item.compositeType("HEADER",
"This type encapsulates OSGi bundle header key/value pairs",
KEY_ITEM, VALUE_ITEM);
/**
* The Tabular Type describing the type of the Tabular Data value that is
* returned from {@link #getHeaders(long)} method. The primary item is
* {@link #KEY_ITEM}.
*/
TabularType HEADERS_TYPE = Item.tabularType("HEADERS",
"The table of bundle headers", HEADER_TYPE, new String[] { KEY });
/**
* The key LOCATION, used in {@link #LOCATION_ITEM}.
*/
String LOCATION = "Location";
/**
* The item containing the bundle location in {@link #BUNDLE_TYPE}. The key
* is {@link #LOCATION} and the the type is {@link SimpleType#STRING}.
*/
Item LOCATION_ITEM = new Item(LOCATION, "The location of the bundle",
SimpleType.STRING);
/**
* The key IDENTIFIER, used in {@link #IDENTIFIER_ITEM}.
*/
String IDENTIFIER = "Identifier";
/**
* The item containing the bundle identifier in {@link #BUNDLE_TYPE}. The
* key is {@link #IDENTIFIER} and the the type is {@link SimpleType#LONG}.
*/
Item IDENTIFIER_ITEM = new Item(IDENTIFIER, "The id of the bundle",
SimpleType.LONG);
/**
* The key SYMBOLIC_NAME, used in {@link #SYMBOLIC_NAME_ITEM}.
*/
String SYMBOLIC_NAME = "SymbolicName";
/**
* The item containing the symbolic name in {@link #BUNDLE_TYPE}. The key is
* {@link #SYMBOLIC_NAME} and the the type is {@link SimpleType#STRING}.
*/
Item SYMBOLIC_NAME_ITEM = new Item(SYMBOLIC_NAME,
"The symbolic name of the bundle", SimpleType.STRING);
/**
* The key VERSION, used in {@link #VERSION_ITEM}.
*/
String VERSION = "Version";
/**
* The item containing the symbolic name in {@link #BUNDLE_TYPE}. The key is
* {@link #SYMBOLIC_NAME} and the the type is {@link SimpleType#STRING}.
*/
Item VERSION_ITEM = new Item(VERSION, "The version of the bundle",
SimpleType.STRING);
/**
* The key START_LEVEL, used in {@link #START_LEVEL_ITEM}.
*/
String START_LEVEL = "StartLevel";
/**
* The item containing the start level in {@link #BUNDLE_TYPE}. The key is
* {@link #START_LEVEL} and the the type is {@link SimpleType#INTEGER}.
*/
Item START_LEVEL_ITEM = new Item(START_LEVEL,
"The start level of the bundle", SimpleType.INTEGER);
/**
* The key STATE, used in {@link #STATE_ITEM}.
*/
String STATE = "State";
/**
* Constant INSTALLED for the {@link #STATE}
*/
String INSTALLED = "INSTALLED";
/**
* Constant RESOLVED for the {@link #STATE}
*/
String RESOLVED = "RESOLVED";
/**
* Constant STARTING for the {@link #STATE}
*/
String STARTING = "STARTING";
/**
* Constant ACTIVE for the {@link #STATE}
*/
String ACTIVE = "ACTIVE";
/**
* Constant STOPPING for the {@link #STATE}
*/
String STOPPING = "STOPPING";
/**
* Constant UNINSTALLED for the {@link #STATE}
*/
String UNINSTALLED = "UNINSTALLED";
/**
* Constant UNKNOWN for the {@link #STATE}
*/
String UNKNOWN = "UNKNOWN";
/**
* The item containing the bundle state in {@link #BUNDLE_TYPE}. The key is
* {@link #STATE} and the the type is {@link SimpleType#STRING}. The
* returned values must be one of the following strings:
* <ul>
* <li>{@link #INSTALLED}</li>
* <li>{@link #RESOLVED}</li>
* <li>{@link #STARTING}</li>
* <li>{@link #ACTIVE}</li>
* <li>{@link #STOPPING}</li>
* <li>{@link #UNINSTALLED}</li>
* <li>{@link #UNKNOWN}</li>
* </ul>
*/
Item STATE_ITEM = new Item(STATE, "The state of the bundle",
SimpleType.STRING, INSTALLED, RESOLVED, STARTING, ACTIVE, STOPPING,
UNINSTALLED, UNKNOWN);
/**
* The key LAST_MODIFIED, used in {@link #LAST_MODIFIED_ITEM}.
*/
String LAST_MODIFIED = "LastModified";
/**
* The item containing the last modified time in the {@link #BUNDLE_TYPE}.
* The key is {@link #LAST_MODIFIED} and the the type is
* {@link SimpleType#LONG}.
*/
Item LAST_MODIFIED_ITEM = new Item(LAST_MODIFIED,
"The last modification time of the bundle", SimpleType.LONG);
/**
* The key PERSISTENTLY_STARTED, used in {@link #PERSISTENTLY_STARTED_ITEM}.
*/
String PERSISTENTLY_STARTED = "PeristentlyStarted";
/**
* The item containing the indication of persistently started in
* {@link #BUNDLE_TYPE}. The key is {@link #PERSISTENTLY_STARTED} and the
* the type is {@link SimpleType#BOOLEAN}.
*/
Item PERSISTENTLY_STARTED_ITEM = new Item(PERSISTENTLY_STARTED,
"Whether the bundle is persistently started", SimpleType.BOOLEAN);
/**
* The key REMOVAL_PENDING, used in {@link #REMOVAL_PENDING_ITEM}.
*/
String REMOVAL_PENDING = "RemovalPending";
/**
* The item containing the indication of removal pending in
* {@link #BUNDLE_TYPE}. The key is {@link #REMOVAL_PENDING} and the type is
* {@link SimpleType#BOOLEAN}.
*/
Item REMOVAL_PENDING_ITEM = new Item(REMOVAL_PENDING,
"Whether the bundle is pending removal", SimpleType.BOOLEAN);
/**
* The key REQUIRED, used in {@value #REQUIRED_ITEM}.
*/
String REQUIRED = "Required";
/**
* The item containing the required status in {@link #BUNDLE_TYPE}. The key
* is {@link #REQUIRED} and the the type is {@link SimpleType#BOOLEAN}.
*/
Item REQUIRED_ITEM = new Item(REQUIRED, "Whether the bundle is required",
SimpleType.BOOLEAN);
/**
* The key FRAGMENT, used in {@value #FRAGMENT_ITEM}.
*/
String FRAGMENT = "Fragment";
/**
* The item containing the fragment status in {@link #BUNDLE_TYPE}. The key
* is {@link #FRAGMENT} and the the type is {@link SimpleType#BOOLEAN}.
*/
Item FRAGMENT_ITEM = new Item(FRAGMENT, "Whether the bundle is a fragment",
SimpleType.BOOLEAN);
/**
* The key REGISTERED_SERVICES, used in {@value #REGISTERED_SERVICES_ITEM}.
*/
String REGISTERED_SERVICES = "RegisteredServices";
/**
* The item containing the registered services of the bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #REGISTERED_SERVICES} and the the
* type is {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item REGISTERED_SERVICES_ITEM = new Item(REGISTERED_SERVICES,
"The registered services of the bundle",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key SERVICES_IN_USE, used in {@value #SERVICES_IN_USE_ITEM}.
*/
String SERVICES_IN_USE = "ServicesInUse";
/**
* The item containing the services in use by this bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #SERVICES_IN_USE} and the the
* type is {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item SERVICES_IN_USE_ITEM = new Item(SERVICES_IN_USE,
"The services in use by the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key HEADERS, used in {@link #HEADERS_ITEM}.
*/
String HEADERS = "Headers";
/**
* The item containing the bundle headers in {@link #BUNDLE_TYPE}. The key
* is {@link #HEADERS} and the the type is {@link #HEADERS_TYPE}.
*/
Item HEADERS_ITEM = new Item(HEADERS, "The headers of the bundle",
HEADERS_TYPE);
/**
* The key EXPORTED_PACKAGES, used in {@link #EXPORTED_PACKAGES_ITEM}.
*/
String EXPORTED_PACKAGES = "ExportedPackages";
/**
* The item containing the exported package names in {@link #BUNDLE_TYPE}
* .The key is {@link #EXPORTED_PACKAGES} and the the type is
* {@link JmxConstants#STRING_ARRAY_TYPE}.
*/
Item EXPORTED_PACKAGES_ITEM = new Item(EXPORTED_PACKAGES,
"The exported packages of the bundle",
JmxConstants.STRING_ARRAY_TYPE);
/**
* The key IMPORTED_PACKAGES, used in {@link #EXPORTED_PACKAGES_ITEM}.
*/
String IMPORTED_PACKAGES = "ImportedPackages";
/**
* The item containing the imported package names in {@link #BUNDLE_TYPE}
* .The key is {@link #IMPORTED_PACKAGES} and the the type is
* {@link JmxConstants#STRING_ARRAY_TYPE}.
*/
Item IMPORTED_PACKAGES_ITEM = new Item(IMPORTED_PACKAGES,
"The imported packages of the bundle",
JmxConstants.STRING_ARRAY_TYPE);
/**
* The key FRAGMENTS, used in {@link #FRAGMENTS_ITEM}.
*/
String FRAGMENTS = "Fragments";
/**
* The item containing the list of fragments the bundle is host to in
* {@link #BUNDLE_TYPE}. The key is {@link #FRAGMENTS} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item FRAGMENTS_ITEM = new Item(FRAGMENTS,
"The fragments of which the bundle is host",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key HOSTS, used in {@link #HOSTS_ITEM}.
*/
String HOSTS = "Hosts";
/**
* The item containing the bundle identifiers representing the hosts in
* {@link #BUNDLE_TYPE}. The key is {@link #HOSTS} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item HOSTS_ITEM = new Item(HOSTS,
"The fragments of which the bundle is host",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key REQUIRED_BUNDLES, used in {@link #REQUIRED_BUNDLES_ITEM}.
*/
String REQUIRED_BUNDLES = "RequiredBundles";
/**
* The item containing the required bundles in {@link #BUNDLE_TYPE}. The key
* is {@link #REQUIRED_BUNDLES} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item REQUIRED_BUNDLES_ITEM = new Item(REQUIRED_BUNDLES,
"The required bundles the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key REQUIRING_BUNDLES, used in {@link #REQUIRING_BUNDLES_ITEM}.
*/
String REQUIRING_BUNDLES = "RequiringBundles";
/**
* The item containing the bundles requiring this bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #REQUIRING_BUNDLES} and the type
* is {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item REQUIRING_BUNDLES_ITEM = new Item(REQUIRING_BUNDLES,
"The bundles requiring the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key EVENT, used in {@link #EVENT_ITEM}.
*/
String EVENT = "BundleEvent";
/**
* The item containing the event type. The key is {@link #EVENT} and the type is {@link SimpleType#INTEGER}
*/
Item EVENT_ITEM = new Item(
EVENT,
"The type of the event: {INSTALLED=1, STARTED=2, STOPPED=4, UPDATED=8, UNINSTALLED=16}",
SimpleType.INTEGER);
/**
* The Composite Type that represents a bundle event. This composite consists of:
* <ul>
* <li>{@link #IDENTIFIER}</li>
* <li>{@link #LOCATION}</li>
* <li>{@link #SYMBOLIC_NAME}</li>
* <li>{@link #EVENT}</li>
* </ul>
*/
CompositeType BUNDLE_EVENT_TYPE = Item.compositeType("BUNDLE_EVENT",
"This type encapsulates OSGi bundle events", IDENTIFIER_ITEM,
LOCATION_ITEM, SYMBOLIC_NAME_ITEM, EVENT_ITEM);
/**
* The Composite Type that represents a bundle. This composite consist of:
* <ul>
* <li>{@link #EXPORTED_PACKAGES}</li>
* <li>{@link #FRAGMENT}</li>
* <li>{@link #FRAGMENTS}</li>
* <li>{@link #HEADERS}</li>
* <li>{@link #HOSTS}</li>
* <li>{@link #IDENTIFIER}</li>
* <li>{@link #IMPORTED_PACKAGES}</li>
* <li>{@link #LAST_MODIFIED}</li>
* <li>{@link #LOCATION}</li>
* <li>{@link #PERSISTENTLY_STARTED}</li>
* <li>{@link #REGISTERED_SERVICES}</li>
* <li>{@link #REMOVAL_PENDING}</li>
* <li>{@link #REQUIRED}</li>
* <li>{@link #REQUIRED_BUNDLES}</li>
* <li>{@link #REQUIRING_BUNDLES}</li>
* <li>{@link #START_LEVEL}</li>
* <li>{@link #STATE}</li>
* <li>{@link #SERVICES_IN_USE}</li>
* <li>{@link #SYMBOLIC_NAME}</li>
* <li>{@link #VERSION}</li>
* </ul>
* It is used by {@link #BUNDLES_TYPE}.
*/
CompositeType BUNDLE_TYPE = Item.compositeType("BUNDLE",
"This type encapsulates OSGi bundles", EXPORTED_PACKAGES_ITEM,
FRAGMENT_ITEM, FRAGMENTS_ITEM, HEADERS_ITEM, HOSTS_ITEM,
IDENTIFIER_ITEM, IMPORTED_PACKAGES_ITEM, LAST_MODIFIED_ITEM,
LOCATION_ITEM, PERSISTENTLY_STARTED_ITEM, REGISTERED_SERVICES_ITEM,
REMOVAL_PENDING_ITEM, REQUIRED_ITEM, REQUIRED_BUNDLES_ITEM,
REQUIRING_BUNDLES_ITEM, START_LEVEL_ITEM, STATE_ITEM,
SERVICES_IN_USE_ITEM, SYMBOLIC_NAME_ITEM, VERSION_ITEM);
/**
* The Tabular Type for a list of bundles. The row type is
* {@link #BUNDLE_TYPE}.
*/
TabularType BUNDLES_TYPE = Item.tabularType("BUNDLES", "A list of bundles",
BUNDLE_TYPE, new String[] { IDENTIFIER });
/**
* Answer the list of identifiers of the bundles this bundle depends upon
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRequiredBundles(long bundleIdentifier) throws IOException;
/**
* Answer the bundle state of the system in tabular form.
*
* Each row of the returned table represents a single bundle. The Tabular
* Data consists of Composite Data that is type by {@link #BUNDLES_TYPE}.
*
* @return the tabular representation of the bundle state
* @throws IOException
*/
TabularData listBundles() throws IOException;
/**
* Answer the list of exported packages for this bundle.
*
* @param bundleId
* @return the array of package names, combined with their version in the
* format <packageName;version>
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String[] getExportedPackages(long bundleId) throws IOException;
/**
* Answer the list of the bundle ids of the fragments associated with this
* bundle
*
* @param bundleId
* @return the array of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getFragments(long bundleId) throws IOException;
/**
* Answer the headers for the bundle uniquely identified by the bundle id.
* The Tabular Data is typed by the {@link #HEADERS_TYPE}.
*
* @param bundleId
* the unique identifier of the bundle
* @return the table of associated header key and values
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
TabularData getHeaders(long bundleId) throws IOException;
/**
* Answer the list of bundle ids of the bundles which host a fragment
*
* @param fragment
* the bundle id of the fragment
* @return the array of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getHosts(long fragment) throws IOException;
/**
* Answer the array of the packages imported by this bundle
*
* @param bundleId
* the bundle identifier
* @return the array of package names, combined with their version in the
* format <packageName;version>
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String[] getImportedPackages(long bundleId) throws IOException;
/**
* Answer the last modified time of a bundle
*
* @param bundleId
* the unique identifier of a bundle
* @return the last modified time
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long getLastModified(long bundleId) throws IOException;
/**
* Answer the list of service identifiers representing the services this
* bundle exports
*
* @param bundleId
* the bundle identifier
* @return the list of service identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRegisteredServices(long bundleId) throws IOException;
/**
* Answer the list of identifiers of the bundles which require this bundle
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRequiringBundles(long bundleIdentifier) throws IOException;
/**
* Answer the list of service identifiers which refer to the the services
* this bundle is using
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of service identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getServicesInUse(long bundleIdentifier) throws IOException;
/**
* Answer the start level of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the start level
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
int getStartLevel(long bundleId) throws IOException;
/**
* Answer the symbolic name of the state of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the string name of the bundle state
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getState(long bundleId) throws IOException;
/**
* Answer the symbolic name of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the symbolic name
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getSymbolicName(long bundleId) throws IOException;
/**
* Answer if the bundle is persistently started when its start level is
* reached
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is persistently started
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isPersistentlyStarted(long bundleId) throws IOException;
/**
* Answer whether the bundle is a fragment or not
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is a fragment
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isFragment(long bundleId) throws IOException;
/**
* Answer true if the bundle is pending removal
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is pending removal
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isRemovalPending(long bundleId) throws IOException;
/**
* Answer true if the bundle is required by another bundle
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is required by another bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isRequired(long bundleId) throws IOException;
/**
* Answer the location of the bundle.
*
* @param bundleId
* the identifier of the bundle
* @return The location string of this bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getLocation(long bundleId) throws IOException;
/**
* Answer the location of the bundle.
*
* @param bundleId
* the identifier of the bundle
* @return The location string of this bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getVersion(long bundleId) throws IOException;
} | String PERSISTENTLY_STARTED = "PersistentlyStarted";
/*
* Copyright (c) OSGi Alliance (2009). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.osgi.jmx.framework;
import java.io.IOException;
import javax.management.openmbean.CompositeType;
import javax.management.openmbean.SimpleType;
import javax.management.openmbean.TabularData;
import javax.management.openmbean.TabularType;
import org.osgi.jmx.Item;
import org.osgi.jmx.JmxConstants;
/**
* This MBean represents the Bundle state of the framework. This MBean also
* emits events that clients can use to get notified of the changes in the
* bundle state of the framework.
*
* @version $Rev$
*/
public interface BundleStateMBean {
/**
* The Object Name for a Bundle State MBean.
*/
String OBJECTNAME = JmxConstants.OSGI_CORE
+ ":type=bundleState,version=1.5";
/**
* The key KEY, used in {@link #KEY_ITEM}.
*/
String KEY = "Key";
/**
* The item describing the key of a bundle header entry. The key is
* {@link #KEY} and the type is {@link SimpleType#STRING}.
*/
Item KEY_ITEM = new Item(KEY, "The bundle header key", SimpleType.STRING);
/**
* The key VALUE, used in {@link #VALUE_ITEM}.
*/
String VALUE = "Value";
/**
* The item describing the value of a bundle header entry. The key is
* {@link #VALUE} and the type is {@link SimpleType#STRING}.
*/
Item VALUE_ITEM = new Item(VALUE, "The bundle header value",
SimpleType.STRING);
/**
* The Composite Type describing an entry in bundle headers. It consists of
* {@link #KEY_ITEM} and {@link #VALUE_ITEM}.
*/
CompositeType HEADER_TYPE = Item.compositeType("HEADER",
"This type encapsulates OSGi bundle header key/value pairs",
KEY_ITEM, VALUE_ITEM);
/**
* The Tabular Type describing the type of the Tabular Data value that is
* returned from {@link #getHeaders(long)} method. The primary item is
* {@link #KEY_ITEM}.
*/
TabularType HEADERS_TYPE = Item.tabularType("HEADERS",
"The table of bundle headers", HEADER_TYPE, new String[] { KEY });
/**
* The key LOCATION, used in {@link #LOCATION_ITEM}.
*/
String LOCATION = "Location";
/**
* The item containing the bundle location in {@link #BUNDLE_TYPE}. The key
* is {@link #LOCATION} and the the type is {@link SimpleType#STRING}.
*/
Item LOCATION_ITEM = new Item(LOCATION, "The location of the bundle",
SimpleType.STRING);
/**
* The key IDENTIFIER, used in {@link #IDENTIFIER_ITEM}.
*/
String IDENTIFIER = "Identifier";
/**
* The item containing the bundle identifier in {@link #BUNDLE_TYPE}. The
* key is {@link #IDENTIFIER} and the the type is {@link SimpleType#LONG}.
*/
Item IDENTIFIER_ITEM = new Item(IDENTIFIER, "The id of the bundle",
SimpleType.LONG);
/**
* The key SYMBOLIC_NAME, used in {@link #SYMBOLIC_NAME_ITEM}.
*/
String SYMBOLIC_NAME = "SymbolicName";
/**
* The item containing the symbolic name in {@link #BUNDLE_TYPE}. The key is
* {@link #SYMBOLIC_NAME} and the the type is {@link SimpleType#STRING}.
*/
Item SYMBOLIC_NAME_ITEM = new Item(SYMBOLIC_NAME,
"The symbolic name of the bundle", SimpleType.STRING);
/**
* The key VERSION, used in {@link #VERSION_ITEM}.
*/
String VERSION = "Version";
/**
* The item containing the symbolic name in {@link #BUNDLE_TYPE}. The key is
* {@link #SYMBOLIC_NAME} and the the type is {@link SimpleType#STRING}.
*/
Item VERSION_ITEM = new Item(VERSION, "The version of the bundle",
SimpleType.STRING);
/**
* The key START_LEVEL, used in {@link #START_LEVEL_ITEM}.
*/
String START_LEVEL = "StartLevel";
/**
* The item containing the start level in {@link #BUNDLE_TYPE}. The key is
* {@link #START_LEVEL} and the the type is {@link SimpleType#INTEGER}.
*/
Item START_LEVEL_ITEM = new Item(START_LEVEL,
"The start level of the bundle", SimpleType.INTEGER);
/**
* The key STATE, used in {@link #STATE_ITEM}.
*/
String STATE = "State";
/**
* Constant INSTALLED for the {@link #STATE}
*/
String INSTALLED = "INSTALLED";
/**
* Constant RESOLVED for the {@link #STATE}
*/
String RESOLVED = "RESOLVED";
/**
* Constant STARTING for the {@link #STATE}
*/
String STARTING = "STARTING";
/**
* Constant ACTIVE for the {@link #STATE}
*/
String ACTIVE = "ACTIVE";
/**
* Constant STOPPING for the {@link #STATE}
*/
String STOPPING = "STOPPING";
/**
* Constant UNINSTALLED for the {@link #STATE}
*/
String UNINSTALLED = "UNINSTALLED";
/**
* Constant UNKNOWN for the {@link #STATE}
*/
String UNKNOWN = "UNKNOWN";
/**
* The item containing the bundle state in {@link #BUNDLE_TYPE}. The key is
* {@link #STATE} and the the type is {@link SimpleType#STRING}. The
* returned values must be one of the following strings:
* <ul>
* <li>{@link #INSTALLED}</li>
* <li>{@link #RESOLVED}</li>
* <li>{@link #STARTING}</li>
* <li>{@link #ACTIVE}</li>
* <li>{@link #STOPPING}</li>
* <li>{@link #UNINSTALLED}</li>
* <li>{@link #UNKNOWN}</li>
* </ul>
*/
Item STATE_ITEM = new Item(STATE, "The state of the bundle",
SimpleType.STRING, INSTALLED, RESOLVED, STARTING, ACTIVE, STOPPING,
UNINSTALLED, UNKNOWN);
/**
* The key LAST_MODIFIED, used in {@link #LAST_MODIFIED_ITEM}.
*/
String LAST_MODIFIED = "LastModified";
/**
* The item containing the last modified time in the {@link #BUNDLE_TYPE}.
* The key is {@link #LAST_MODIFIED} and the the type is
* {@link SimpleType#LONG}.
*/
Item LAST_MODIFIED_ITEM = new Item(LAST_MODIFIED,
"The last modification time of the bundle", SimpleType.LONG);
/**
* The key PERSISTENTLY_STARTED, used in {@link #PERSISTENTLY_STARTED_ITEM}.
*/
String PERSISTENTLY_STARTED = "PersistentlyStarted";
/**
* The item containing the indication of persistently started in
* {@link #BUNDLE_TYPE}. The key is {@link #PERSISTENTLY_STARTED} and the
* the type is {@link SimpleType#BOOLEAN}.
*/
Item PERSISTENTLY_STARTED_ITEM = new Item(PERSISTENTLY_STARTED,
"Whether the bundle is persistently started", SimpleType.BOOLEAN);
/**
* The key REMOVAL_PENDING, used in {@link #REMOVAL_PENDING_ITEM}.
*/
String REMOVAL_PENDING = "RemovalPending";
/**
* The item containing the indication of removal pending in
* {@link #BUNDLE_TYPE}. The key is {@link #REMOVAL_PENDING} and the type is
* {@link SimpleType#BOOLEAN}.
*/
Item REMOVAL_PENDING_ITEM = new Item(REMOVAL_PENDING,
"Whether the bundle is pending removal", SimpleType.BOOLEAN);
/**
* The key REQUIRED, used in {@value #REQUIRED_ITEM}.
*/
String REQUIRED = "Required";
/**
* The item containing the required status in {@link #BUNDLE_TYPE}. The key
* is {@link #REQUIRED} and the the type is {@link SimpleType#BOOLEAN}.
*/
Item REQUIRED_ITEM = new Item(REQUIRED, "Whether the bundle is required",
SimpleType.BOOLEAN);
/**
* The key FRAGMENT, used in {@value #FRAGMENT_ITEM}.
*/
String FRAGMENT = "Fragment";
/**
* The item containing the fragment status in {@link #BUNDLE_TYPE}. The key
* is {@link #FRAGMENT} and the the type is {@link SimpleType#BOOLEAN}.
*/
Item FRAGMENT_ITEM = new Item(FRAGMENT, "Whether the bundle is a fragment",
SimpleType.BOOLEAN);
/**
* The key REGISTERED_SERVICES, used in {@value #REGISTERED_SERVICES_ITEM}.
*/
String REGISTERED_SERVICES = "RegisteredServices";
/**
* The item containing the registered services of the bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #REGISTERED_SERVICES} and the the
* type is {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item REGISTERED_SERVICES_ITEM = new Item(REGISTERED_SERVICES,
"The registered services of the bundle",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key SERVICES_IN_USE, used in {@value #SERVICES_IN_USE_ITEM}.
*/
String SERVICES_IN_USE = "ServicesInUse";
/**
* The item containing the services in use by this bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #SERVICES_IN_USE} and the the
* type is {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item SERVICES_IN_USE_ITEM = new Item(SERVICES_IN_USE,
"The services in use by the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key HEADERS, used in {@link #HEADERS_ITEM}.
*/
String HEADERS = "Headers";
/**
* The item containing the bundle headers in {@link #BUNDLE_TYPE}. The key
* is {@link #HEADERS} and the the type is {@link #HEADERS_TYPE}.
*/
Item HEADERS_ITEM = new Item(HEADERS, "The headers of the bundle",
HEADERS_TYPE);
/**
* The key EXPORTED_PACKAGES, used in {@link #EXPORTED_PACKAGES_ITEM}.
*/
String EXPORTED_PACKAGES = "ExportedPackages";
/**
* The item containing the exported package names in {@link #BUNDLE_TYPE}
* .The key is {@link #EXPORTED_PACKAGES} and the the type is
* {@link JmxConstants#STRING_ARRAY_TYPE}.
*/
Item EXPORTED_PACKAGES_ITEM = new Item(EXPORTED_PACKAGES,
"The exported packages of the bundle",
JmxConstants.STRING_ARRAY_TYPE);
/**
* The key IMPORTED_PACKAGES, used in {@link #EXPORTED_PACKAGES_ITEM}.
*/
String IMPORTED_PACKAGES = "ImportedPackages";
/**
* The item containing the imported package names in {@link #BUNDLE_TYPE}
* .The key is {@link #IMPORTED_PACKAGES} and the the type is
* {@link JmxConstants#STRING_ARRAY_TYPE}.
*/
Item IMPORTED_PACKAGES_ITEM = new Item(IMPORTED_PACKAGES,
"The imported packages of the bundle",
JmxConstants.STRING_ARRAY_TYPE);
/**
* The key FRAGMENTS, used in {@link #FRAGMENTS_ITEM}.
*/
String FRAGMENTS = "Fragments";
/**
* The item containing the list of fragments the bundle is host to in
* {@link #BUNDLE_TYPE}. The key is {@link #FRAGMENTS} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}.
*/
Item FRAGMENTS_ITEM = new Item(FRAGMENTS,
"The fragments of which the bundle is host",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key HOSTS, used in {@link #HOSTS_ITEM}.
*/
String HOSTS = "Hosts";
/**
* The item containing the bundle identifiers representing the hosts in
* {@link #BUNDLE_TYPE}. The key is {@link #HOSTS} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item HOSTS_ITEM = new Item(HOSTS,
"The fragments of which the bundle is host",
JmxConstants.LONG_ARRAY_TYPE);
/**
* The key REQUIRED_BUNDLES, used in {@link #REQUIRED_BUNDLES_ITEM}.
*/
String REQUIRED_BUNDLES = "RequiredBundles";
/**
* The item containing the required bundles in {@link #BUNDLE_TYPE}. The key
* is {@link #REQUIRED_BUNDLES} and the type is
* {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item REQUIRED_BUNDLES_ITEM = new Item(REQUIRED_BUNDLES,
"The required bundles the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key REQUIRING_BUNDLES, used in {@link #REQUIRING_BUNDLES_ITEM}.
*/
String REQUIRING_BUNDLES = "RequiringBundles";
/**
* The item containing the bundles requiring this bundle in
* {@link #BUNDLE_TYPE}. The key is {@link #REQUIRING_BUNDLES} and the type
* is {@link JmxConstants#LONG_ARRAY_TYPE}
*/
Item REQUIRING_BUNDLES_ITEM = new Item(REQUIRING_BUNDLES,
"The bundles requiring the bundle", JmxConstants.LONG_ARRAY_TYPE);
/**
* The key EVENT, used in {@link #EVENT_ITEM}.
*/
String EVENT = "BundleEvent";
/**
* The item containing the event type. The key is {@link #EVENT} and the type is {@link SimpleType#INTEGER}
*/
Item EVENT_ITEM = new Item(
EVENT,
"The type of the event: {INSTALLED=1, STARTED=2, STOPPED=4, UPDATED=8, UNINSTALLED=16}",
SimpleType.INTEGER);
/**
* The Composite Type that represents a bundle event. This composite consists of:
* <ul>
* <li>{@link #IDENTIFIER}</li>
* <li>{@link #LOCATION}</li>
* <li>{@link #SYMBOLIC_NAME}</li>
* <li>{@link #EVENT}</li>
* </ul>
*/
CompositeType BUNDLE_EVENT_TYPE = Item.compositeType("BUNDLE_EVENT",
"This type encapsulates OSGi bundle events", IDENTIFIER_ITEM,
LOCATION_ITEM, SYMBOLIC_NAME_ITEM, EVENT_ITEM);
/**
* The Composite Type that represents a bundle. This composite consist of:
* <ul>
* <li>{@link #EXPORTED_PACKAGES}</li>
* <li>{@link #FRAGMENT}</li>
* <li>{@link #FRAGMENTS}</li>
* <li>{@link #HEADERS}</li>
* <li>{@link #HOSTS}</li>
* <li>{@link #IDENTIFIER}</li>
* <li>{@link #IMPORTED_PACKAGES}</li>
* <li>{@link #LAST_MODIFIED}</li>
* <li>{@link #LOCATION}</li>
* <li>{@link #PERSISTENTLY_STARTED}</li>
* <li>{@link #REGISTERED_SERVICES}</li>
* <li>{@link #REMOVAL_PENDING}</li>
* <li>{@link #REQUIRED}</li>
* <li>{@link #REQUIRED_BUNDLES}</li>
* <li>{@link #REQUIRING_BUNDLES}</li>
* <li>{@link #START_LEVEL}</li>
* <li>{@link #STATE}</li>
* <li>{@link #SERVICES_IN_USE}</li>
* <li>{@link #SYMBOLIC_NAME}</li>
* <li>{@link #VERSION}</li>
* </ul>
* It is used by {@link #BUNDLES_TYPE}.
*/
CompositeType BUNDLE_TYPE = Item.compositeType("BUNDLE",
"This type encapsulates OSGi bundles", EXPORTED_PACKAGES_ITEM,
FRAGMENT_ITEM, FRAGMENTS_ITEM, HEADERS_ITEM, HOSTS_ITEM,
IDENTIFIER_ITEM, IMPORTED_PACKAGES_ITEM, LAST_MODIFIED_ITEM,
LOCATION_ITEM, PERSISTENTLY_STARTED_ITEM, REGISTERED_SERVICES_ITEM,
REMOVAL_PENDING_ITEM, REQUIRED_ITEM, REQUIRED_BUNDLES_ITEM,
REQUIRING_BUNDLES_ITEM, START_LEVEL_ITEM, STATE_ITEM,
SERVICES_IN_USE_ITEM, SYMBOLIC_NAME_ITEM, VERSION_ITEM);
/**
* The Tabular Type for a list of bundles. The row type is
* {@link #BUNDLE_TYPE}.
*/
TabularType BUNDLES_TYPE = Item.tabularType("BUNDLES", "A list of bundles",
BUNDLE_TYPE, new String[] { IDENTIFIER });
/**
* Answer the list of identifiers of the bundles this bundle depends upon
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRequiredBundles(long bundleIdentifier) throws IOException;
/**
* Answer the bundle state of the system in tabular form.
*
* Each row of the returned table represents a single bundle. The Tabular
* Data consists of Composite Data that is type by {@link #BUNDLES_TYPE}.
*
* @return the tabular representation of the bundle state
* @throws IOException
*/
TabularData listBundles() throws IOException;
/**
* Answer the list of exported packages for this bundle.
*
* @param bundleId
* @return the array of package names, combined with their version in the
* format <packageName;version>
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String[] getExportedPackages(long bundleId) throws IOException;
/**
* Answer the list of the bundle ids of the fragments associated with this
* bundle
*
* @param bundleId
* @return the array of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getFragments(long bundleId) throws IOException;
/**
* Answer the headers for the bundle uniquely identified by the bundle id.
* The Tabular Data is typed by the {@link #HEADERS_TYPE}.
*
* @param bundleId
* the unique identifier of the bundle
* @return the table of associated header key and values
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
TabularData getHeaders(long bundleId) throws IOException;
/**
* Answer the list of bundle ids of the bundles which host a fragment
*
* @param fragment
* the bundle id of the fragment
* @return the array of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getHosts(long fragment) throws IOException;
/**
* Answer the array of the packages imported by this bundle
*
* @param bundleId
* the bundle identifier
* @return the array of package names, combined with their version in the
* format <packageName;version>
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String[] getImportedPackages(long bundleId) throws IOException;
/**
* Answer the last modified time of a bundle
*
* @param bundleId
* the unique identifier of a bundle
* @return the last modified time
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long getLastModified(long bundleId) throws IOException;
/**
* Answer the list of service identifiers representing the services this
* bundle exports
*
* @param bundleId
* the bundle identifier
* @return the list of service identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRegisteredServices(long bundleId) throws IOException;
/**
* Answer the list of identifiers of the bundles which require this bundle
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of bundle identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getRequiringBundles(long bundleIdentifier) throws IOException;
/**
* Answer the list of service identifiers which refer to the the services
* this bundle is using
*
* @param bundleIdentifier
* the bundle identifier
* @return the list of service identifiers
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
long[] getServicesInUse(long bundleIdentifier) throws IOException;
/**
* Answer the start level of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the start level
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
int getStartLevel(long bundleId) throws IOException;
/**
* Answer the symbolic name of the state of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the string name of the bundle state
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getState(long bundleId) throws IOException;
/**
* Answer the symbolic name of the bundle
*
* @param bundleId
* the identifier of the bundle
* @return the symbolic name
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getSymbolicName(long bundleId) throws IOException;
/**
* Answer if the bundle is persistently started when its start level is
* reached
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is persistently started
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isPersistentlyStarted(long bundleId) throws IOException;
/**
* Answer whether the bundle is a fragment or not
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is a fragment
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isFragment(long bundleId) throws IOException;
/**
* Answer true if the bundle is pending removal
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is pending removal
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isRemovalPending(long bundleId) throws IOException;
/**
* Answer true if the bundle is required by another bundle
*
* @param bundleId
* the identifier of the bundle
* @return true if the bundle is required by another bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
boolean isRequired(long bundleId) throws IOException;
/**
* Answer the location of the bundle.
*
* @param bundleId
* the identifier of the bundle
* @return The location string of this bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getLocation(long bundleId) throws IOException;
/**
* Answer the location of the bundle.
*
* @param bundleId
* the identifier of the bundle
* @return The location string of this bundle
* @throws IOException
* if the operation fails
* @throws IllegalArgumentException
* if the bundle indicated does not exist
*/
String getVersion(long bundleId) throws IOException;
} |
private String getRemotCoreUrl(CoreContainer cores, String collectionName, String origCorename) {
ClusterState clusterState = cores.getZkController().getClusterState();
Collection<Slice> slices = clusterState.getActiveSlices(collectionName);
boolean byCoreName = false;
if (slices == null) {
// look by core name
byCoreName = true;
Set<String> collections = clusterState.getCollections();
for (String collection : collections) {
slices = new ArrayList<Slice>();
slices.addAll(clusterState.getActiveSlices(collection));
}
}
if (slices == null || slices.size() == 0) {
return null;
}
Set<String> liveNodes = clusterState.getLiveNodes();
Iterator<Slice> it = slices.iterator();
while (it.hasNext()) {
Slice slice = it.next();
Map<String,Replica> sliceShards = slice.getReplicasMap();
for (ZkNodeProps nodeProps : sliceShards.values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
if (liveNodes.contains(coreNodeProps.getNodeName())
&& coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) {
if (byCoreName && !collectionName.equals(coreNodeProps.getCoreName())) {
// if it's by core name, make sure they match
continue;
}
if (coreNodeProps.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
String coreUrl;
if (origCorename != null) {
coreUrl = coreNodeProps.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = coreNodeProps.getCoreUrl();
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
}
return coreUrl;
}
}
}
return null;
}
| private String getRemotCoreUrl(CoreContainer cores, String collectionName, String origCorename) {
ClusterState clusterState = cores.getZkController().getClusterState();
Collection<Slice> slices = clusterState.getActiveSlices(collectionName);
boolean byCoreName = false;
if (slices == null) {
// look by core name
byCoreName = true;
Set<String> collections = clusterState.getCollections();
for (String collection : collections) {
slices = new ArrayList<Slice>();
slices.addAll(clusterState.getSlices(collection));
}
}
if (slices == null || slices.size() == 0) {
return null;
}
Set<String> liveNodes = clusterState.getLiveNodes();
Iterator<Slice> it = slices.iterator();
while (it.hasNext()) {
Slice slice = it.next();
Map<String,Replica> sliceShards = slice.getReplicasMap();
for (ZkNodeProps nodeProps : sliceShards.values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
if (liveNodes.contains(coreNodeProps.getNodeName())
&& coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) {
if (byCoreName && !collectionName.equals(coreNodeProps.getCoreName())) {
// if it's by core name, make sure they match
continue;
}
if (coreNodeProps.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
String coreUrl;
if (origCorename != null) {
coreUrl = coreNodeProps.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = coreNodeProps.getCoreUrl();
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
}
return coreUrl;
}
}
}
return null;
}
|
private SolrZkClient electNewOverseer(String address) throws InterruptedException,
TimeoutException, IOException, KeeperException {
SolrZkClient zkClient = new SolrZkClient(address, TIMEOUT);
ZkStateReader reader = new ZkStateReader(zkClient);
LeaderElector overseerElector = new LeaderElector(zkClient);
ElectionContext ec = new OverseerElectionContext(address, zkClient, reader);
overseerElector.setup(ec);
overseerElector.joinElection(ec);
return zkClient;
}
| private SolrZkClient electNewOverseer(String address) throws InterruptedException,
TimeoutException, IOException, KeeperException {
SolrZkClient zkClient = new SolrZkClient(address, TIMEOUT);
ZkStateReader reader = new ZkStateReader(zkClient);
LeaderElector overseerElector = new LeaderElector(zkClient);
ElectionContext ec = new OverseerElectionContext(address.replaceAll("/", "_"), zkClient, reader);
overseerElector.setup(ec);
overseerElector.joinElection(ec);
return zkClient;
}
|
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception, ParseException, InstantiationException, IllegalAccessException
{
// int sleep = req.getParams().getInt("sleep",0);
// if (sleep > 0) {log.error("SLEEPING for " + sleep); Thread.sleep(sleep);}
ResponseBuilder rb = new ResponseBuilder(req, rsp, components);
if (rb.requestInfo != null) {
rb.requestInfo.setResponseBuilder(rb);
}
boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
rb.setDebug(dbg);
if (dbg == false){//if it's true, we are doing everything anyway.
SolrPluginUtils.getDebugInterests(req.getParams().getParams(CommonParams.DEBUG), rb);
}
final RTimer timer = rb.isDebug() ? new RTimer() : null;
ShardHandler shardHandler1 = shardHandlerFactory.getShardHandler();
shardHandler1.checkDistributed(rb);
if (timer == null) {
// non-debugging prepare phase
for( SearchComponent c : components ) {
c.prepare(rb);
}
} else {
// debugging prepare phase
RTimer subt = timer.sub( "prepare" );
for( SearchComponent c : components ) {
rb.setTimer( subt.sub( c.getName() ) );
c.prepare(rb);
rb.getTimer().stop();
}
subt.stop();
}
if (!rb.isDistrib) {
// a normal non-distributed request
// The semantics of debugging vs not debugging are different enough that
// it makes sense to have two control loops
if(!rb.isDebug()) {
// Process
for( SearchComponent c : components ) {
c.process(rb);
}
}
else {
// Process
RTimer subt = timer.sub( "process" );
for( SearchComponent c : components ) {
rb.setTimer( subt.sub( c.getName() ) );
c.process(rb);
rb.getTimer().stop();
}
subt.stop();
timer.stop();
// add the timing info
if (rb.isDebugTimings()) {
rb.addDebugInfo("timing", timer.asNamedList() );
}
}
} else {
// a distributed request
if (rb.outgoing == null) {
rb.outgoing = new LinkedList<ShardRequest>();
}
rb.finished = new ArrayList<ShardRequest>();
int nextStage = 0;
do {
rb.stage = nextStage;
nextStage = ResponseBuilder.STAGE_DONE;
// call all components
for( SearchComponent c : components ) {
// the next stage is the minimum of what all components report
nextStage = Math.min(nextStage, c.distributedProcess(rb));
}
// check the outgoing queue and send requests
while (rb.outgoing.size() > 0) {
// submit all current request tasks at once
while (rb.outgoing.size() > 0) {
ShardRequest sreq = rb.outgoing.remove(0);
sreq.actualShards = sreq.shards;
if (sreq.actualShards==ShardRequest.ALL_SHARDS) {
sreq.actualShards = rb.shards;
}
sreq.responses = new ArrayList<ShardResponse>();
// TODO: map from shard to address[]
for (String shard : sreq.actualShards) {
ModifiableSolrParams params = new ModifiableSolrParams(sreq.params);
params.remove(ShardParams.SHARDS); // not a top-level request
params.remove("distrib"); // not a top-level request
params.remove("indent");
params.remove(CommonParams.HEADER_ECHO_PARAMS);
params.set(ShardParams.IS_SHARD, true); // a sub (shard) request
params.set(ShardParams.SHARD_URL, shard); // so the shard knows what was asked
if (rb.requestInfo != null) {
// we could try and detect when this is needed, but it could be tricky
params.set("NOW", Long.toString(rb.requestInfo.getNOW().getTime()));
}
String shardQt = req.getParams().get(ShardParams.SHARDS_QT);
if (shardQt == null) {
params.remove(CommonParams.QT);
} else {
params.set(CommonParams.QT, shardQt);
}
shardHandler1.submit(sreq, shard, params);
}
}
// now wait for replies, but if anyone puts more requests on
// the outgoing queue, send them out immediately (by exiting
// this loop)
while (rb.outgoing.size() == 0) {
ShardResponse srsp = shardHandler1.takeCompletedOrError();
if (srsp == null) break; // no more requests to wait for
// Was there an exception? If so, abort everything and
// rethrow
if (srsp.getException() != null) {
shardHandler1.cancelAll();
if (srsp.getException() instanceof SolrException) {
throw (SolrException)srsp.getException();
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
}
}
rb.finished.add(srsp.getShardRequest());
// let the components see the responses to the request
for(SearchComponent c : components) {
c.handleResponses(rb, srsp.getShardRequest());
}
}
}
for(SearchComponent c : components) {
c.finishStage(rb);
}
// we are done when the next stage is MAX_VALUE
} while (nextStage != Integer.MAX_VALUE);
}
}
| public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception, ParseException, InstantiationException, IllegalAccessException
{
// int sleep = req.getParams().getInt("sleep",0);
// if (sleep > 0) {log.error("SLEEPING for " + sleep); Thread.sleep(sleep);}
ResponseBuilder rb = new ResponseBuilder(req, rsp, components);
if (rb.requestInfo != null) {
rb.requestInfo.setResponseBuilder(rb);
}
boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
rb.setDebug(dbg);
if (dbg == false){//if it's true, we are doing everything anyway.
SolrPluginUtils.getDebugInterests(req.getParams().getParams(CommonParams.DEBUG), rb);
}
final RTimer timer = rb.isDebug() ? new RTimer() : null;
ShardHandler shardHandler1 = shardHandlerFactory.getShardHandler();
shardHandler1.checkDistributed(rb);
if (timer == null) {
// non-debugging prepare phase
for( SearchComponent c : components ) {
c.prepare(rb);
}
} else {
// debugging prepare phase
RTimer subt = timer.sub( "prepare" );
for( SearchComponent c : components ) {
rb.setTimer( subt.sub( c.getName() ) );
c.prepare(rb);
rb.getTimer().stop();
}
subt.stop();
}
if (!rb.isDistrib) {
// a normal non-distributed request
// The semantics of debugging vs not debugging are different enough that
// it makes sense to have two control loops
if(!rb.isDebug()) {
// Process
for( SearchComponent c : components ) {
c.process(rb);
}
}
else {
// Process
RTimer subt = timer.sub( "process" );
for( SearchComponent c : components ) {
rb.setTimer( subt.sub( c.getName() ) );
c.process(rb);
rb.getTimer().stop();
}
subt.stop();
timer.stop();
// add the timing info
if (rb.isDebugTimings()) {
rb.addDebugInfo("timing", timer.asNamedList() );
}
}
} else {
// a distributed request
if (rb.outgoing == null) {
rb.outgoing = new LinkedList<ShardRequest>();
}
rb.finished = new ArrayList<ShardRequest>();
int nextStage = 0;
do {
rb.stage = nextStage;
nextStage = ResponseBuilder.STAGE_DONE;
// call all components
for( SearchComponent c : components ) {
// the next stage is the minimum of what all components report
nextStage = Math.min(nextStage, c.distributedProcess(rb));
}
// check the outgoing queue and send requests
while (rb.outgoing.size() > 0) {
// submit all current request tasks at once
while (rb.outgoing.size() > 0) {
ShardRequest sreq = rb.outgoing.remove(0);
sreq.actualShards = sreq.shards;
if (sreq.actualShards==ShardRequest.ALL_SHARDS) {
sreq.actualShards = rb.shards;
}
sreq.responses = new ArrayList<ShardResponse>();
// TODO: map from shard to address[]
for (String shard : sreq.actualShards) {
ModifiableSolrParams params = new ModifiableSolrParams(sreq.params);
params.remove(ShardParams.SHARDS); // not a top-level request
params.set("distrib", "false"); // not a top-level request
params.remove("indent");
params.remove(CommonParams.HEADER_ECHO_PARAMS);
params.set(ShardParams.IS_SHARD, true); // a sub (shard) request
params.set(ShardParams.SHARD_URL, shard); // so the shard knows what was asked
if (rb.requestInfo != null) {
// we could try and detect when this is needed, but it could be tricky
params.set("NOW", Long.toString(rb.requestInfo.getNOW().getTime()));
}
String shardQt = req.getParams().get(ShardParams.SHARDS_QT);
if (shardQt == null) {
params.remove(CommonParams.QT);
} else {
params.set(CommonParams.QT, shardQt);
}
shardHandler1.submit(sreq, shard, params);
}
}
// now wait for replies, but if anyone puts more requests on
// the outgoing queue, send them out immediately (by exiting
// this loop)
while (rb.outgoing.size() == 0) {
ShardResponse srsp = shardHandler1.takeCompletedOrError();
if (srsp == null) break; // no more requests to wait for
// Was there an exception? If so, abort everything and
// rethrow
if (srsp.getException() != null) {
shardHandler1.cancelAll();
if (srsp.getException() instanceof SolrException) {
throw (SolrException)srsp.getException();
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
}
}
rb.finished.add(srsp.getShardRequest());
// let the components see the responses to the request
for(SearchComponent c : components) {
c.handleResponses(rb, srsp.getShardRequest());
}
}
}
for(SearchComponent c : components) {
c.finishStage(rb);
}
// we are done when the next stage is MAX_VALUE
} while (nextStage != Integer.MAX_VALUE);
}
}
|
public void testFeature1() throws Exception {
Subsystem subsystem = installSubsystemFromFile("feature1.ssa");
try {
assertSymbolicName("org.apache.aries.subsystem.feature1", subsystem);
assertVersion("1.0.0", subsystem);
assertConstituents(2, subsystem);
// TODO Test internal events for installation.
startSubsystem(subsystem);
// TODO Test internal events for starting.
stopSubsystem(subsystem);
// TODO Test internal events for stopping.
}
finally {
uninstallSubsystem(subsystem);
// TODO Test internal events for uninstalling.
}
}
| public void testFeature1() throws Exception {
Subsystem subsystem = installSubsystemFromFile("feature1.ssa");
try {
assertSymbolicName("org.apache.aries.subsystem.feature1", subsystem);
assertVersion("1.0.0", subsystem);
assertConstituents(3, subsystem);
// TODO Test internal events for installation.
startSubsystem(subsystem);
// TODO Test internal events for starting.
stopSubsystem(subsystem);
// TODO Test internal events for stopping.
}
finally {
uninstallSubsystem(subsystem);
// TODO Test internal events for uninstalling.
}
}
|
public static Option[] configuration() {
Option[] options = options(
// Log
mavenBundle("org.ops4j.pax.logging", "pax-logging-api"),
mavenBundle("org.ops4j.pax.logging", "pax-logging-service"),
// Felix Config Admin
mavenBundle("org.apache.felix", "org.apache.felix.configadmin"),
// Felix mvn url handler
mavenBundle("org.ops4j.pax.url", "pax-url-mvn"),
// this is how you set the default log level when using pax
// logging (logProfile)
systemProperty("org.ops4j.pax.logging.DefaultServiceLog.level").value("DEBUG"),
systemProperty("org.osgi.framework.bsnversion").value("multiple"),
// Bundles
mavenBundle("org.eclipse.osgi", "services").version("3.3.0-v20110523"),
mavenBundle("org.eclipse.equinox", "region").version("1.0.0.v20110518"),
mavenBundle("org.apache.aries.testsupport", "org.apache.aries.testsupport.unit"),
mavenBundle("org.apache.aries.application", "org.apache.aries.application.api"),
mavenBundle("org.apache.aries", "org.apache.aries.util"),
mavenBundle("org.apache.aries.application", "org.apache.aries.application.utils"),
mavenBundle("org.apache.felix", "org.apache.felix.bundlerepository"),
mavenBundle("org.eclipse.equinox", "coordinator"),
mavenBundle("org.eclipse.equinox", "org.eclipse.equinox.event"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.api"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.core"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.executor"),
// org.ops4j.pax.exam.container.def.PaxRunnerOptions.vmOption("-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"),
PaxRunnerOptions.rawPaxRunnerOption("config", "classpath:ss-runner.properties"),
equinox().version("3.8.0.v20110621"));
options = updateOptions(options);
| public static Option[] configuration() {
Option[] options = options(
// Log
mavenBundle("org.ops4j.pax.logging", "pax-logging-api"),
mavenBundle("org.ops4j.pax.logging", "pax-logging-service"),
// Felix Config Admin
mavenBundle("org.apache.felix", "org.apache.felix.configadmin"),
// Felix mvn url handler
mavenBundle("org.ops4j.pax.url", "pax-url-mvn"),
// this is how you set the default log level when using pax
// logging (logProfile)
systemProperty("org.ops4j.pax.logging.DefaultServiceLog.level").value("DEBUG"),
systemProperty("org.osgi.framework.bsnversion").value("multiple"),
// Bundles
mavenBundle("org.eclipse.osgi", "services").version("3.3.0-v20110523"),
mavenBundle("org.eclipse.equinox", "region").version("1.0.0.v20110518"),
mavenBundle("org.apache.aries.testsupport", "org.apache.aries.testsupport.unit"),
mavenBundle("org.apache.aries.application", "org.apache.aries.application.api"),
mavenBundle("org.apache.aries", "org.apache.aries.util"),
mavenBundle("org.apache.aries.application", "org.apache.aries.application.utils"),
mavenBundle("org.apache.felix", "org.apache.felix.bundlerepository"),
mavenBundle("org.eclipse.equinox", "coordinator"),
mavenBundle("org.eclipse.equinox", "org.eclipse.equinox.event"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.api"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.core"),
mavenBundle("org.apache.aries.subsystem", "org.apache.aries.subsystem.executor"),
// org.ops4j.pax.exam.container.def.PaxRunnerOptions.vmOption("-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"),
PaxRunnerOptions.rawPaxRunnerOption("config", "classpath:ss-runner.properties"),
equinox().version("3.8.0-SNAPSHOT"));
options = updateOptions(options);
|
public static DeploymentManifest newInstance(SubsystemManifest manifest, SubsystemEnvironment environment) {
DeploymentManifest result = new DeploymentManifest();
result.headers.put(ManifestVersionHeader.NAME, manifest.getManifestVersion());
Collection<Requirement> requirements = new ArrayList<Requirement>();
for (SubsystemContentHeader.Content content : manifest.getSubsystemContent().getContents()) {
Requirement requirement = OsgiIdentityRequirement.newInstance(content);
requirements.add(requirement);
}
// TODO This does not validate that all content bundles were found.
Map<Resource, List<Wire>> resolution = Activator.getResolver().resolve(environment, requirements.toArray(new Requirement[requirements.size()]));
// TODO Once we have a resolver that actually returns lists of wires, we can use them to compute other manifest headers such as Import-Package.
Collection<Resource> deployedContent = new ArrayList<Resource>();
Collection<Resource> provisionResource = new ArrayList<Resource>();
for (Resource resource : resolution.keySet()) {
if (environment.isContentResource(resource))
deployedContent.add(resource);
else
provisionResource.add(resource);
}
result.headers.put(DeployedContentHeader.NAME, DeployedContentHeader.newInstance(deployedContent));
if (!provisionResource.isEmpty())
result.headers.put(ProvisionResourceHeader.NAME, ProvisionResourceHeader.newInstance(provisionResource));
result.headers.put(SubsystemSymbolicNameHeader.NAME, manifest.getSubsystemSymbolicName());
result.headers.put(SubsystemVersionHeader.NAME, manifest.getSubsystemVersion());
SubsystemTypeHeader typeHeader = manifest.getSubsystemType();
result.headers.put(SubsystemTypeHeader.NAME, typeHeader);
// TODO Add to constants.
if ("osgi.application".equals(typeHeader.getValue())) {
// TODO Compute additional headers for an application.
}
// TODO Add to constants.
else if ("osgi.composite".equals(typeHeader.getValue())) {
// TODO Compute additional headers for a composite.
}
// Features require no additional headers.
return result;
}
| public static DeploymentManifest newInstance(SubsystemManifest manifest, SubsystemEnvironment environment) {
DeploymentManifest result = new DeploymentManifest();
result.headers.put(ManifestVersionHeader.NAME, manifest.getManifestVersion());
Collection<Requirement> requirements = new ArrayList<Requirement>();
for (SubsystemContentHeader.Content content : manifest.getSubsystemContent().getContents()) {
Requirement requirement = new OsgiIdentityRequirement(content.getName(), content.getVersionRange(), content.getType(), false);
requirements.add(requirement);
}
// TODO This does not validate that all content bundles were found.
Map<Resource, List<Wire>> resolution = Activator.getResolver().resolve(environment, requirements.toArray(new Requirement[requirements.size()]));
// TODO Once we have a resolver that actually returns lists of wires, we can use them to compute other manifest headers such as Import-Package.
Collection<Resource> deployedContent = new ArrayList<Resource>();
Collection<Resource> provisionResource = new ArrayList<Resource>();
for (Resource resource : resolution.keySet()) {
if (environment.isContentResource(resource))
deployedContent.add(resource);
else
provisionResource.add(resource);
}
result.headers.put(DeployedContentHeader.NAME, DeployedContentHeader.newInstance(deployedContent));
if (!provisionResource.isEmpty())
result.headers.put(ProvisionResourceHeader.NAME, ProvisionResourceHeader.newInstance(provisionResource));
result.headers.put(SubsystemSymbolicNameHeader.NAME, manifest.getSubsystemSymbolicName());
result.headers.put(SubsystemVersionHeader.NAME, manifest.getSubsystemVersion());
SubsystemTypeHeader typeHeader = manifest.getSubsystemType();
result.headers.put(SubsystemTypeHeader.NAME, typeHeader);
// TODO Add to constants.
if ("osgi.application".equals(typeHeader.getValue())) {
// TODO Compute additional headers for an application.
}
// TODO Add to constants.
else if ("osgi.composite".equals(typeHeader.getValue())) {
// TODO Compute additional headers for a composite.
}
// Features require no additional headers.
return result;
}
|
public void renameCf() throws ConfigurationException, IOException, ExecutionException, InterruptedException
{
DecoratedKey dk = Util.dk("key0");
final KSMetaData ks = DatabaseDescriptor.getTableDefinition("Keyspace2");
assert ks != null;
final CFMetaData oldCfm = ks.cfMetaData().get("Standard1");
assert oldCfm != null;
// write some data, force a flush, then verify that files exist on disk.
RowMutation rm = new RowMutation(ks.name, dk.key);
for (int i = 0; i < 100; i++)
rm.add(new QueryPath(oldCfm.cfName, null, ("col" + i).getBytes()), "anyvalue".getBytes(), new TimestampClock(1L));
rm.apply();
ColumnFamilyStore store = Table.open(oldCfm.tableName).getColumnFamilyStore(oldCfm.cfName);
assert store != null;
store.forceBlockingFlush();
int fileCount = DefsTable.getFiles(oldCfm.tableName, oldCfm.cfName).size();
assert fileCount > 0;
final String cfName = "St4ndard1Replacement";
new RenameColumnFamily(oldCfm.tableName, oldCfm.cfName, cfName).apply();
assert !DatabaseDescriptor.getTableDefinition(ks.name).cfMetaData().containsKey(oldCfm.cfName);
assert DatabaseDescriptor.getTableDefinition(ks.name).cfMetaData().containsKey(cfName);
// verify that new files are there.
assert DefsTable.getFiles(oldCfm.tableName, cfName).size() == fileCount;
// do some reads.
store = Table.open(oldCfm.tableName).getColumnFamilyStore(cfName);
assert store != null;
ColumnFamily cfam = store.getColumnFamily(QueryFilter.getSliceFilter(dk, new QueryPath(cfName), "".getBytes(), "".getBytes(), null, false, 1000));
assert cfam.getSortedColumns().size() == 100; // should be good enough?
// do some writes
rm = new RowMutation(ks.name, dk.key);
rm.add(new QueryPath(cfName, null, "col5".getBytes()), "updated".getBytes(), new TimestampClock(2L));
rm.apply();
store.forceBlockingFlush();
cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, new QueryPath(cfName), "col5".getBytes()));
assert cfam.getColumnCount() == 1;
assert Arrays.equals(cfam.getColumn("col5".getBytes()).value(), "updated".getBytes());
}
| public void renameCf() throws ConfigurationException, IOException, ExecutionException, InterruptedException
{
DecoratedKey dk = Util.dk("key0");
final KSMetaData ks = DatabaseDescriptor.getTableDefinition("Keyspace2");
assert ks != null;
final CFMetaData oldCfm = ks.cfMetaData().get("Standard1");
assert oldCfm != null;
// write some data, force a flush, then verify that files exist on disk.
RowMutation rm = new RowMutation(ks.name, dk.key);
for (int i = 0; i < 100; i++)
rm.add(new QueryPath(oldCfm.cfName, null, ("col" + i).getBytes()), "anyvalue".getBytes(), new TimestampClock(1L));
rm.apply();
ColumnFamilyStore store = Table.open(oldCfm.tableName).getColumnFamilyStore(oldCfm.cfName);
assert store != null;
store.forceBlockingFlush();
int fileCount = DefsTable.getFiles(oldCfm.tableName, oldCfm.cfName).size();
assert fileCount > 0;
final String cfName = "St4ndard1Replacement";
new RenameColumnFamily(oldCfm.tableName, oldCfm.cfName, cfName).apply();
assert !DatabaseDescriptor.getTableDefinition(ks.name).cfMetaData().containsKey(oldCfm.cfName);
assert DatabaseDescriptor.getTableDefinition(ks.name).cfMetaData().containsKey(cfName);
// verify that new files are there.
assert DefsTable.getFiles(oldCfm.tableName, cfName).size() == fileCount;
// do some reads.
store = Table.open(oldCfm.tableName).getColumnFamilyStore(cfName);
assert store != null;
ColumnFamily cfam = store.getColumnFamily(QueryFilter.getSliceFilter(dk, new QueryPath(cfName), "".getBytes(), "".getBytes(), false, 1000));
assert cfam.getSortedColumns().size() == 100; // should be good enough?
// do some writes
rm = new RowMutation(ks.name, dk.key);
rm.add(new QueryPath(cfName, null, "col5".getBytes()), "updated".getBytes(), new TimestampClock(2L));
rm.apply();
store.forceBlockingFlush();
cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, new QueryPath(cfName), "col5".getBytes()));
assert cfam.getColumnCount() == 1;
assert Arrays.equals(cfam.getColumn("col5".getBytes()).value(), "updated".getBytes());
}
|
public static ColumnFamily getDroppedCFs() throws IOException
{
ColumnFamilyStore cfs = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(SystemTable.STATUS_CF);
return cfs.getColumnFamily(QueryFilter.getSliceFilter(decorate(GRAVEYARD_KEY), new QueryPath(STATUS_CF), "".getBytes(), "".getBytes(), null, false, 100));
}
| public static ColumnFamily getDroppedCFs() throws IOException
{
ColumnFamilyStore cfs = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(SystemTable.STATUS_CF);
return cfs.getColumnFamily(QueryFilter.getSliceFilter(decorate(GRAVEYARD_KEY), new QueryPath(STATUS_CF), "".getBytes(), "".getBytes(), false, 100));
}
|
public static Collection<IColumn> getLocalMigrations(UUID start, UUID end)
{
DecoratedKey dkey = StorageService.getPartitioner().decorateKey(MIGRATIONS_KEY);
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.MIGRATIONS_CF);
QueryFilter filter = QueryFilter.getSliceFilter(dkey, new QueryPath(MIGRATIONS_CF), UUIDGen.decompose(start), UUIDGen.decompose(end), null, false, 1000);
ColumnFamily cf = cfStore.getColumnFamily(filter);
return cf.getSortedColumns();
}
| public static Collection<IColumn> getLocalMigrations(UUID start, UUID end)
{
DecoratedKey dkey = StorageService.getPartitioner().decorateKey(MIGRATIONS_KEY);
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.MIGRATIONS_CF);
QueryFilter filter = QueryFilter.getSliceFilter(dkey, new QueryPath(MIGRATIONS_CF), UUIDGen.decompose(start), UUIDGen.decompose(end), false, 1000);
ColumnFamily cf = cfStore.getColumnFamily(filter);
return cf.getSortedColumns();
}
|
public SnowballFilter(TokenStream in, String name) {
super(in);
try {
Class<? extends SnowballProgram> stemClass =
Class.forName("org.tartarus.snowball.ext." + name + "Stemmer").asSubclass(SnowballProgram.class);
stemmer = stemClass.newInstance();
} catch (Exception e) {
throw new RuntimeException(e.toString());
}
}
| public SnowballFilter(TokenStream in, String name) {
super(in);
try {
Class<? extends SnowballProgram> stemClass =
Class.forName("org.tartarus.snowball.ext." + name + "Stemmer").asSubclass(SnowballProgram.class);
stemmer = stemClass.newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Invalid stemmer class specified: " + name, e);
}
}
|
protected void copyState(SQLChar other) {
this.value = other.value;
this.rawData = other.rawData;
this.rawLength = other.rawLength;
this.cKey = other.cKey;
this.stream = other.stream;
this._clobValue = other._clobValue;
this.localeFinder = localeFinder;
}
| protected void copyState(SQLChar other) {
this.value = other.value;
this.rawData = other.rawData;
this.rawLength = other.rawLength;
this.cKey = other.cKey;
this.stream = other.stream;
this._clobValue = other._clobValue;
this.localeFinder = other.localeFinder;
}
|
public NamedSPILoader(Class<S> clazz) {
this(clazz, Thread.currentThread().getContextClassLoader());
}
public NamedSPILoader(Class<S> clazz, ClassLoader classloader) {
this.clazz = clazz;
reload(classloader);
}
/**
* Reloads the internal SPI list from the given {@link ClassLoader}.
* Changes to the service list are visible after the method ends, all
* iterators ({@link #iterator()},...) stay consistent.
*
* <p><b>NOTE:</b> Only new service providers are added, existing ones are
* never removed or replaced.
*
* <p><em>This method is expensive and should only be called for discovery
* of new service providers on the given classpath/classloader!</em>
*/
public void reload(ClassLoader classloader) {
final LinkedHashMap<String,S> services = new LinkedHashMap<String,S>(this.services);
final SPIClassIterator<S> loader = SPIClassIterator.get(clazz, classloader);
while (loader.hasNext()) {
final Class<? extends S> c = loader.next();
try {
final S service = c.newInstance();
final String name = service.getName();
// only add the first one for each name, later services will be ignored
// this allows to place services before others in classpath to make
// them used instead of others
if (!services.containsKey(name)) {
checkServiceName(name);
services.put(name, service);
}
} catch (Exception e) {
throw new ServiceConfigurationError("Cannot instantiate SPI class: " + c.getName(), e);
}
}
this.services = Collections.unmodifiableMap(services);
}
| public NamedSPILoader(Class<S> clazz) {
this(clazz, Thread.currentThread().getContextClassLoader());
}
public NamedSPILoader(Class<S> clazz, ClassLoader classloader) {
this.clazz = clazz;
reload(classloader);
}
/**
* Reloads the internal SPI list from the given {@link ClassLoader}.
* Changes to the service list are visible after the method ends, all
* iterators ({@link #iterator()},...) stay consistent.
*
* <p><b>NOTE:</b> Only new service providers are added, existing ones are
* never removed or replaced.
*
* <p><em>This method is expensive and should only be called for discovery
* of new service providers on the given classpath/classloader!</em>
*/
public synchronized void reload(ClassLoader classloader) {
final LinkedHashMap<String,S> services = new LinkedHashMap<String,S>(this.services);
final SPIClassIterator<S> loader = SPIClassIterator.get(clazz, classloader);
while (loader.hasNext()) {
final Class<? extends S> c = loader.next();
try {
final S service = c.newInstance();
final String name = service.getName();
// only add the first one for each name, later services will be ignored
// this allows to place services before others in classpath to make
// them used instead of others
if (!services.containsKey(name)) {
checkServiceName(name);
services.put(name, service);
}
} catch (Exception e) {
throw new ServiceConfigurationError("Cannot instantiate SPI class: " + c.getName(), e);
}
}
this.services = Collections.unmodifiableMap(services);
}
|
private void doTest(final SpatialOperation operation) throws IOException {
//first show that when there's no data, a query will result in no results
{
Query query = strategy.makeQuery(new SpatialArgs(operation, randomRectangle()));
SearchResults searchResults = executeQuery(query, 1);
assertEquals(0, searchResults.numFound);
}
final boolean biasContains = (operation == SpatialOperation.Contains);
//Main index loop:
Map<String, Shape> indexedShapes = new LinkedHashMap<>();
Map<String, Shape> indexedShapesGS = new LinkedHashMap<>();//grid snapped
final int numIndexedShapes = randomIntBetween(1, 6);
boolean indexedAtLeastOneShapePair = false;
for (int i = 0; i < numIndexedShapes; i++) {
String id = "" + i;
Shape indexedShape;
int R = random().nextInt(12);
if (R == 0) {//1 in 12
indexedShape = null;
} else if (R == 1) {//1 in 12
indexedShape = randomPoint();//just one point
} else if (R <= 4) {//3 in 12
//comprised of more than one shape
indexedShape = randomShapePairRect(biasContains);
indexedAtLeastOneShapePair = true;
} else {
indexedShape = randomRectangle();//just one rect
}
indexedShapes.put(id, indexedShape);
indexedShapesGS.put(id, gridSnap(indexedShape));
adoc(id, indexedShape);
if (random().nextInt(10) == 0)
commit();//intermediate commit, produces extra segments
}
//delete some documents randomly
Iterator<String> idIter = indexedShapes.keySet().iterator();
while (idIter.hasNext()) {
String id = idIter.next();
if (random().nextInt(10) == 0) {
deleteDoc(id);
idIter.remove();
indexedShapesGS.remove(id);
}
}
commit();
//Main query loop:
final int numQueryShapes = atLeast(20);
for (int i = 0; i < numQueryShapes; i++) {
int scanLevel = randomInt(grid.getMaxLevels());
((RecursivePrefixTreeStrategy) strategy).setPrefixGridScanLevel(scanLevel);
final Shape queryShape;
switch (randomInt(10)) {
case 0: queryShape = randomPoint(); break;
case 1:case 2:case 3:
if (!indexedAtLeastOneShapePair) { // avoids ShapePair.relate(ShapePair), which isn't reliable
queryShape = randomShapePairRect(biasContains);
break;
}
default: queryShape = randomRectangle();
}
final Shape queryShapeGS = gridSnap(queryShape);
final boolean opIsDisjoint = operation == SpatialOperation.IsDisjointTo;
//Generate truth via brute force:
// We ensure true-positive matches (if the predicate on the raw shapes match
// then the search should find those same matches).
// approximations, false-positive matches
Set<String> expectedIds = new LinkedHashSet<>();//true-positives
Set<String> secondaryIds = new LinkedHashSet<>();//false-positives (unless disjoint)
for (Map.Entry<String, Shape> entry : indexedShapes.entrySet()) {
String id = entry.getKey();
Shape indexedShapeCompare = entry.getValue();
if (indexedShapeCompare == null)
continue;
Shape queryShapeCompare = queryShape;
if (operation.evaluate(indexedShapeCompare, queryShapeCompare)) {
expectedIds.add(id);
if (opIsDisjoint) {
//if no longer intersect after buffering them, for disjoint, remember this
indexedShapeCompare = indexedShapesGS.get(id);
queryShapeCompare = queryShapeGS;
if (!operation.evaluate(indexedShapeCompare, queryShapeCompare))
secondaryIds.add(id);
}
} else if (!opIsDisjoint) {
//buffer either the indexed or query shape (via gridSnap) and try again
if (operation == SpatialOperation.Intersects) {
indexedShapeCompare = indexedShapesGS.get(id);
queryShapeCompare = queryShapeGS;
//TODO Unfortunately, grid-snapping both can result in intersections that otherwise
// wouldn't happen when the grids are adjacent. Not a big deal but our test is just a
// bit more lenient.
} else if (operation == SpatialOperation.Contains) {
indexedShapeCompare = indexedShapesGS.get(id);
} else if (operation == SpatialOperation.IsWithin) {
queryShapeCompare = queryShapeGS;
}
if (operation.evaluate(indexedShapeCompare, queryShapeCompare))
secondaryIds.add(id);
}
}
//Search and verify results
SpatialArgs args = new SpatialArgs(operation, queryShape);
if (queryShape instanceof ShapePair)
args.setDistErrPct(0.0);//a hack; we want to be more detailed than gridSnap(queryShape)
Query query = strategy.makeQuery(args);
SearchResults got = executeQuery(query, 100);
Set<String> remainingExpectedIds = new LinkedHashSet<>(expectedIds);
for (SearchResult result : got.results) {
String id = result.getId();
boolean removed = remainingExpectedIds.remove(id);
if (!removed && (!opIsDisjoint && !secondaryIds.contains(id))) {
fail("Shouldn't match", id, indexedShapes, indexedShapesGS, queryShape);
}
}
if (opIsDisjoint)
remainingExpectedIds.removeAll(secondaryIds);
if (!remainingExpectedIds.isEmpty()) {
String id = remainingExpectedIds.iterator().next();
fail("Should have matched", id, indexedShapes, indexedShapesGS, queryShape);
}
}
}
| private void doTest(final SpatialOperation operation) throws IOException {
//first show that when there's no data, a query will result in no results
{
Query query = strategy.makeQuery(new SpatialArgs(operation, randomRectangle()));
SearchResults searchResults = executeQuery(query, 1);
assertEquals(0, searchResults.numFound);
}
final boolean biasContains = (operation == SpatialOperation.Contains);
//Main index loop:
Map<String, Shape> indexedShapes = new LinkedHashMap<>();
Map<String, Shape> indexedShapesGS = new LinkedHashMap<>();//grid snapped
final int numIndexedShapes = randomIntBetween(1, 6);
boolean indexedAtLeastOneShapePair = false;
for (int i = 0; i < numIndexedShapes; i++) {
String id = "" + i;
Shape indexedShape;
int R = random().nextInt(12);
if (R == 0) {//1 in 12
indexedShape = null;
} else if (R == 1) {//1 in 12
indexedShape = randomPoint();//just one point
} else if (R <= 4) {//3 in 12
//comprised of more than one shape
indexedShape = randomShapePairRect(biasContains);
indexedAtLeastOneShapePair = true;
} else {
indexedShape = randomRectangle();//just one rect
}
indexedShapes.put(id, indexedShape);
indexedShapesGS.put(id, gridSnap(indexedShape));
adoc(id, indexedShape);
if (random().nextInt(10) == 0)
commit();//intermediate commit, produces extra segments
}
//delete some documents randomly
Iterator<String> idIter = indexedShapes.keySet().iterator();
while (idIter.hasNext()) {
String id = idIter.next();
if (random().nextInt(10) == 0) {
deleteDoc(id);
idIter.remove();
indexedShapesGS.remove(id);
}
}
commit();
//Main query loop:
final int numQueryShapes = atLeast(20);
for (int i = 0; i < numQueryShapes; i++) {
int scanLevel = randomInt(grid.getMaxLevels());
((RecursivePrefixTreeStrategy) strategy).setPrefixGridScanLevel(scanLevel);
final Shape queryShape;
switch (randomInt(10)) {
case 0: queryShape = randomPoint(); break;
case 1:case 2:case 3:
if (!indexedAtLeastOneShapePair) { // avoids ShapePair.relate(ShapePair), which isn't reliable
queryShape = randomShapePairRect(!biasContains);//invert biasContains for query side
break;
}
default: queryShape = randomRectangle();
}
final Shape queryShapeGS = gridSnap(queryShape);
final boolean opIsDisjoint = operation == SpatialOperation.IsDisjointTo;
//Generate truth via brute force:
// We ensure true-positive matches (if the predicate on the raw shapes match
// then the search should find those same matches).
// approximations, false-positive matches
Set<String> expectedIds = new LinkedHashSet<>();//true-positives
Set<String> secondaryIds = new LinkedHashSet<>();//false-positives (unless disjoint)
for (Map.Entry<String, Shape> entry : indexedShapes.entrySet()) {
String id = entry.getKey();
Shape indexedShapeCompare = entry.getValue();
if (indexedShapeCompare == null)
continue;
Shape queryShapeCompare = queryShape;
if (operation.evaluate(indexedShapeCompare, queryShapeCompare)) {
expectedIds.add(id);
if (opIsDisjoint) {
//if no longer intersect after buffering them, for disjoint, remember this
indexedShapeCompare = indexedShapesGS.get(id);
queryShapeCompare = queryShapeGS;
if (!operation.evaluate(indexedShapeCompare, queryShapeCompare))
secondaryIds.add(id);
}
} else if (!opIsDisjoint) {
//buffer either the indexed or query shape (via gridSnap) and try again
if (operation == SpatialOperation.Intersects) {
indexedShapeCompare = indexedShapesGS.get(id);
queryShapeCompare = queryShapeGS;
//TODO Unfortunately, grid-snapping both can result in intersections that otherwise
// wouldn't happen when the grids are adjacent. Not a big deal but our test is just a
// bit more lenient.
} else if (operation == SpatialOperation.Contains) {
indexedShapeCompare = indexedShapesGS.get(id);
} else if (operation == SpatialOperation.IsWithin) {
queryShapeCompare = queryShapeGS;
}
if (operation.evaluate(indexedShapeCompare, queryShapeCompare))
secondaryIds.add(id);
}
}
//Search and verify results
SpatialArgs args = new SpatialArgs(operation, queryShape);
if (queryShape instanceof ShapePair)
args.setDistErrPct(0.0);//a hack; we want to be more detailed than gridSnap(queryShape)
Query query = strategy.makeQuery(args);
SearchResults got = executeQuery(query, 100);
Set<String> remainingExpectedIds = new LinkedHashSet<>(expectedIds);
for (SearchResult result : got.results) {
String id = result.getId();
boolean removed = remainingExpectedIds.remove(id);
if (!removed && (!opIsDisjoint && !secondaryIds.contains(id))) {
fail("Shouldn't match", id, indexedShapes, indexedShapesGS, queryShape);
}
}
if (opIsDisjoint)
remainingExpectedIds.removeAll(secondaryIds);
if (!remainingExpectedIds.isEmpty()) {
String id = remainingExpectedIds.iterator().next();
fail("Should have matched", id, indexedShapes, indexedShapesGS, queryShape);
}
}
}
|
public synchronized final long getRecomputedSizeInBytes() {
long size = 0;
Iterator it = fileMap.values().iterator();
while (it.hasNext())
size += ((RAMFile) it.next()).getSizeInBytes();
return size;
}
/** Like getRecomputedSizeInBytes(), but, uses actual file
* lengths rather than buffer allocations (which are
* quantized up to nearest
* RAMOutputStream.BUFFER_SIZE (now 1024) bytes.
*/
final long getRecomputedActualSizeInBytes() {
long size = 0;
Iterator it = fileMap.values().iterator();
while (it.hasNext())
size += ((RAMFile) it.next()).length;
return size;
}
| public synchronized final long getRecomputedSizeInBytes() {
long size = 0;
Iterator it = fileMap.values().iterator();
while (it.hasNext())
size += ((RAMFile) it.next()).getSizeInBytes();
return size;
}
/** Like getRecomputedSizeInBytes(), but, uses actual file
* lengths rather than buffer allocations (which are
* quantized up to nearest
* RAMOutputStream.BUFFER_SIZE (now 1024) bytes.
*/
final synchronized long getRecomputedActualSizeInBytes() {
long size = 0;
Iterator it = fileMap.values().iterator();
while (it.hasNext())
size += ((RAMFile) it.next()).length;
return size;
}
|
public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException
{
IndexWriter writer = new IndexWriter(dir, analyzer);
writer.setSimilarity(similarity);
//writer.setUseCompoundFile(false);
writer.addDocument(doc);
writer.flush();
SegmentInfo info = writer.segmentInfos.info(writer.segmentInfos.size()-1);
writer.close();
return info;
}
| public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException
{
IndexWriter writer = new IndexWriter(dir, analyzer);
writer.setSimilarity(similarity);
//writer.setUseCompoundFile(false);
writer.addDocument(doc);
writer.flush();
SegmentInfo info = writer.newestSegment();
writer.close();
return info;
}
|
private SegmentInfo indexDoc(IndexWriter writer, String fileName)
throws Exception
{
File file = new File(workDir, fileName);
Document doc = FileDocument.Document(file);
writer.addDocument(doc);
writer.flush();
return writer.segmentInfos.info(writer.segmentInfos.size()-1);
}
| private SegmentInfo indexDoc(IndexWriter writer, String fileName)
throws Exception
{
File file = new File(workDir, fileName);
Document doc = FileDocument.Document(file);
writer.addDocument(doc);
writer.flush();
return writer.newestSegment();
}
|
public DecoratedKey getKey()
{
return filter.key;
}
};
ColumnFamily returnCF = container.cloneMeShallow();
filter.collateColumns(returnCF, Collections.singletonList(toCollate), cfs.metadata.comparator, gcBefore);
// "hoist up" the requested data into a more recent sstable
if (sstablesIterated >= cfs.getMinimumCompactionThreshold() && cfs.getCompactionStrategy() instanceof SizeTieredCompactionStrategy)
{
RowMutation rm = new RowMutation(cfs.table.name, new Row(filter.key, returnCF));
try
{
rm.applyUnsafe(); // skipping commitlog is fine since we're just de-fragmenting existing data
}
catch (IOException e)
{
// log and allow the result to be returned
logger.error("Error re-writing read results", e);
}
}
// Caller is responsible for final removeDeletedCF. This is important for cacheRow to work correctly:
return returnCF;
}
finally
{
for (IColumnIterator iter : iterators)
FileUtils.closeQuietly(iter);
SSTableReader.releaseReferences(view.sstables);
}
}
| public DecoratedKey getKey()
{
return filter.key;
}
};
ColumnFamily returnCF = container.cloneMeShallow();
filter.collateColumns(returnCF, Collections.singletonList(toCollate), cfs.metadata.comparator, gcBefore);
// "hoist up" the requested data into a more recent sstable
if (sstablesIterated >= cfs.getMinimumCompactionThreshold() && cfs.getCompactionStrategy() instanceof SizeTieredCompactionStrategy)
{
RowMutation rm = new RowMutation(cfs.table.name, new Row(filter.key, returnCF.cloneMe()));
try
{
rm.applyUnsafe(); // skipping commitlog is fine since we're just de-fragmenting existing data
}
catch (IOException e)
{
// log and allow the result to be returned
logger.error("Error re-writing read results", e);
}
}
// Caller is responsible for final removeDeletedCF. This is important for cacheRow to work correctly:
return returnCF;
}
finally
{
for (IColumnIterator iter : iterators)
FileUtils.closeQuietly(iter);
SSTableReader.releaseReferences(view.sstables);
}
}
|
public void testInvalidLDAPServerConnectionError() throws SQLException {
// setup
Connection conn = getConnection();
// set the ldap properties
setDatabaseProperty("derby.connection.requireAuthentication", "true", conn);
setDatabaseProperty("derby.authentication.provider", "LDAP", conn);
setDatabaseProperty("derby.authentication.server", "noSuchServer", conn);
setDatabaseProperty("derby.authentication.ldap.searchBase", "o=dnString", conn);
setDatabaseProperty("derby.authentication.ldap.searchFilter","(&(objectClass=inetOrgPerson)(uid=%USERNAME%))", conn);
commit();
conn.setAutoCommit(true);
conn.close();
// shutdown the database as system, so the properties take effect
TestConfiguration.getCurrent().shutdownDatabase();
String dbName = TestConfiguration.getCurrent().getDefaultDatabaseName();
// actual test.
// first, try datasource connection
DataSource ds = JDBCDataSource.getDataSource(dbName);
try {
ds.getConnection();
fail("expected java.net.UnknownHostException for datasource");
} catch (SQLException se) {
assertSQLState("08004", se);
// with network server, the java.net.UnknownHostException will be in
// derby.log, the client only gets a 08004 and somewhat misleading
// warning ('Reason: userid or password invalid')
if (usingEmbedded())
assertTrue(se.getMessage().indexOf("java.net.UnknownHostException")>1);
}
// driver manager connection
String url2 = TestConfiguration.getCurrent().getJDBCUrl(dbName);
try {
DriverManager.getConnection(url2,"user","password").close();
fail("expected java.net.UnknownHostException for driver");
} catch (SQLException se) {
assertSQLState("08004", se);
// with network server, the java.net.UnknownHostException will be in
// derby.log, the client only gets a 08004 and somewhat misleading
// warning ('Reason: userid or password invalid')
if (usingEmbedded())
assertTrue(se.getMessage().indexOf("java.net.UnknownHostException")>1);
}
// we need to shutdown the system, or the failed connections
// cling to db.lck causing cleanup to fail.
// we *can* shutdown because we don't have authentication required
// set at system level (only database level).
shutdownSystem();
}
| public void testInvalidLDAPServerConnectionError() throws SQLException {
// setup
Connection conn = getConnection();
// set the ldap properties
setDatabaseProperty("derby.connection.requireAuthentication", "true", conn);
setDatabaseProperty("derby.authentication.provider", "LDAP", conn);
setDatabaseProperty("derby.authentication.server", "noSuchServer.invalid", conn);
setDatabaseProperty("derby.authentication.ldap.searchBase", "o=dnString", conn);
setDatabaseProperty("derby.authentication.ldap.searchFilter","(&(objectClass=inetOrgPerson)(uid=%USERNAME%))", conn);
commit();
conn.setAutoCommit(true);
conn.close();
// shutdown the database as system, so the properties take effect
TestConfiguration.getCurrent().shutdownDatabase();
String dbName = TestConfiguration.getCurrent().getDefaultDatabaseName();
// actual test.
// first, try datasource connection
DataSource ds = JDBCDataSource.getDataSource(dbName);
try {
ds.getConnection();
fail("expected java.net.UnknownHostException for datasource");
} catch (SQLException se) {
assertSQLState("08004", se);
// with network server, the java.net.UnknownHostException will be in
// derby.log, the client only gets a 08004 and somewhat misleading
// warning ('Reason: userid or password invalid')
if (usingEmbedded())
assertTrue(se.getMessage().indexOf("java.net.UnknownHostException")>1);
}
// driver manager connection
String url2 = TestConfiguration.getCurrent().getJDBCUrl(dbName);
try {
DriverManager.getConnection(url2,"user","password").close();
fail("expected java.net.UnknownHostException for driver");
} catch (SQLException se) {
assertSQLState("08004", se);
// with network server, the java.net.UnknownHostException will be in
// derby.log, the client only gets a 08004 and somewhat misleading
// warning ('Reason: userid or password invalid')
if (usingEmbedded())
assertTrue(se.getMessage().indexOf("java.net.UnknownHostException")>1);
}
// we need to shutdown the system, or the failed connections
// cling to db.lck causing cleanup to fail.
// we *can* shutdown because we don't have authentication required
// set at system level (only database level).
shutdownSystem();
}
|
public List<ColumnOrSuperColumn> get_slice(String keyspace, String key, ColumnParent column_parent, SlicePredicate predicate, int consistency_level)
throws InvalidRequestException, NotFoundException
{
if (logger.isDebugEnabled())
logger.debug("get_slice_from");
ThriftValidation.validateColumnParent(keyspace, column_parent);
if (predicate.column_names != null)
{
return getSlice(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names), consistency_level);
}
else
{
SliceRange range = predicate.slice_range;
if (range.count < 0)
throw new InvalidRequestException("get_slice requires non-negative count");
return getSlice(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.is_ascending, range.count), consistency_level);
}
}
| public List<ColumnOrSuperColumn> get_slice(String keyspace, String key, ColumnParent column_parent, SlicePredicate predicate, int consistency_level)
throws InvalidRequestException, NotFoundException
{
if (logger.isDebugEnabled())
logger.debug("get_slice_from");
ThriftValidation.validateColumnParent(keyspace, column_parent);
if (predicate.column_names != null)
{
return getSlice(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names), consistency_level);
}
else
{
SliceRange range = predicate.slice_range;
if (range.count < 0)
throw new InvalidRequestException("get_slice requires non-negative count");
return getSlice(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count), consistency_level);
}
}
|
private void doTestExactScore(String field, boolean inOrder) throws Exception {
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
ValueSource vs;
if (inOrder) {
vs = new OrdFieldSource(field);
} else {
vs = new ReverseOrdFieldSource(field);
}
Query q = new FunctionQuery(vs);
TopDocs td = s.search(q, null, 1000);
assertEquals("All docs should be matched!", N_DOCS, td.totalHits);
ScoreDoc sd[] = td.scoreDocs;
for (int i = 0; i < sd.length; i++) {
float score = sd[i].score;
String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD);
log("-------- " + i + ". Explain doc " + id);
log(s.explain(q, sd[i].doc));
float expectedScore = N_DOCS - i;
assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
String expectedId = inOrder
? id2String(N_DOCS - i) // in-order ==> larger values first
: id2String(i + 1); // reverse ==> smaller values first
assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
}
r.close();
}
| private void doTestExactScore(String field, boolean inOrder) throws Exception {
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
ValueSource vs;
if (inOrder) {
vs = new OrdFieldSource(field);
} else {
vs = new ReverseOrdFieldSource(field);
}
Query q = new FunctionQuery(vs);
TopDocs td = s.search(q, null, 1000);
assertEquals("All docs should be matched!", N_DOCS, td.totalHits);
ScoreDoc sd[] = td.scoreDocs;
for (int i = 0; i < sd.length; i++) {
float score = sd[i].score;
String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD);
log("-------- " + i + ". Explain doc " + id);
log(s.explain(q, sd[i].doc));
float expectedScore = N_DOCS - i - 1;
assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
String expectedId = inOrder
? id2String(N_DOCS - i) // in-order ==> larger values first
: id2String(i + 1); // reverse ==> smaller values first
assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
}
r.close();
}
|
public int intVal(int doc) {
return (end - sindex.getOrd(doc+off));
}
};
}
| public int intVal(int doc) {
return (end - sindex.getOrd(doc+off) - 1);
}
};
}
|
public void testBackCompatXml() throws Exception {
setMeUp();
addSolrPropertiesFile();
addSolrXml();
addConfigsForBackCompat();
CoreContainer cc = init();
try {
Properties props = cc.getContainerProperties();
assertEquals("/admin/cores", cc.getAdminPath());
assertEquals("collectionLazy2", cc.getDefaultCoreName());
// Shouldn't get these in properties at this point
assertNull(props.getProperty("cores.adminPath"));
assertNull(props.getProperty("cores.defaultCoreName"));
assertNull(props.getProperty("host"));
assertNull(props.getProperty("port")); // getProperty actually looks at original props.
assertNull(props.getProperty("cores.hostContext"));
assertNull(props.getProperty("cores.zkClientTimeout"));
SolrCore core1 = cc.getCore("collection1");
CoreDescriptor desc = core1.getCoreDescriptor();
assertEquals("collection1", desc.getProperty("solr.core.name"));
// This is too long and ugly to put in. Besides, it varies.
assertNotNull(desc.getProperty("solr.core.instanceDir"));
assertEquals("data/", desc.getProperty("solr.core.dataDir"));
assertEquals("solrconfig-minimal.xml", desc.getProperty("solr.core.configName"));
assertEquals("schema-tiny.xml", desc.getProperty("solr.core.schemaName"));
core1.close();
} finally {
cc.shutdown();
}
}
| public void testBackCompatXml() throws Exception {
setMeUp();
addSolrPropertiesFile();
addSolrXml();
addConfigsForBackCompat();
CoreContainer cc = init();
try {
Properties props = cc.getContainerProperties();
assertEquals("/admin/cores", cc.getAdminPath());
assertEquals("collectionLazy2", cc.getDefaultCoreName());
// Shouldn't get these in properties at this point
assertNull(props.getProperty("cores.adminPath"));
assertNull(props.getProperty("cores.defaultCoreName"));
assertNull(props.getProperty("host"));
assertNull(props.getProperty("port")); // getProperty actually looks at original props.
assertNull(props.getProperty("cores.hostContext"));
assertNull(props.getProperty("cores.zkClientTimeout"));
SolrCore core1 = cc.getCore("collection1");
CoreDescriptor desc = core1.getCoreDescriptor();
assertEquals("collection1", desc.getProperty("solr.core.name"));
// This is too long and ugly to put in. Besides, it varies.
assertNotNull(desc.getProperty("solr.core.instanceDir"));
assertEquals("data" + File.separator, desc.getProperty("solr.core.dataDir"));
assertEquals("solrconfig-minimal.xml", desc.getProperty("solr.core.configName"));
assertEquals("schema-tiny.xml", desc.getProperty("solr.core.schemaName"));
core1.close();
} finally {
cc.shutdown();
}
}
|
protected AbstractCompactedRow getReduced()
{
assert rows.size() > 0;
try
{
AbstractCompactedRow compactedRow = controller.getCompactedRow(rows);
if (compactedRow.isEmpty())
{
controller.invalidateCachedRow(compactedRow.key);
return null;
}
// If the raw is cached, we call removeDeleted on it to have/ coherent query returns. However it would look
// like some deleted columns lived longer than gc_grace + compaction. This can also free up big amount of
// memory on long running instances
controller.removeDeletedInCache(compactedRow.key);
return compactedRow;
}
finally
{
rows.clear();
if ((row++ % controller.getThrottleResolution()) == 0)
{
bytesRead = 0;
for (SSTableScanner scanner : getScanners())
bytesRead += scanner.getFilePointer();
throttle();
}
}
}
| protected AbstractCompactedRow getReduced()
{
assert rows.size() > 0;
try
{
AbstractCompactedRow compactedRow = controller.getCompactedRow(new ArrayList<SSTableIdentityIterator>(rows));
if (compactedRow.isEmpty())
{
controller.invalidateCachedRow(compactedRow.key);
return null;
}
// If the raw is cached, we call removeDeleted on it to have/ coherent query returns. However it would look
// like some deleted columns lived longer than gc_grace + compaction. This can also free up big amount of
// memory on long running instances
controller.removeDeletedInCache(compactedRow.key);
return compactedRow;
}
finally
{
rows.clear();
if ((row++ % controller.getThrottleResolution()) == 0)
{
bytesRead = 0;
for (SSTableScanner scanner : getScanners())
bytesRead += scanner.getFilePointer();
throttle();
}
}
}
|
protected IColumn getReduced()
{
assert container != null;
IColumn reduced = container.iterator().next();
ColumnFamily purged = shouldPurge ? ColumnFamilyStore.removeDeleted(container, controller.gcBefore) : container;
if (purged != null && purged.metadata().getDefaultValidator().isCommutative())
{
CounterColumn.removeOldShards(purged, controller.gcBefore);
}
if (purged == null || !purged.iterator().hasNext())
{
container.clear();
return null;
}
container.clear();
serializedSize += reduced.serializedSize();
size++;
return reduced;
}
}
| protected IColumn getReduced()
{
assert container != null;
IColumn reduced = container.iterator().next();
ColumnFamily purged = shouldPurge ? ColumnFamilyStore.removeDeleted(container, controller.gcBefore) : container;
if (shouldPurge && purged != null && purged.metadata().getDefaultValidator().isCommutative())
{
CounterColumn.removeOldShards(purged, controller.gcBefore);
}
if (purged == null || !purged.iterator().hasNext())
{
container.clear();
return null;
}
container.clear();
serializedSize += reduced.serializedSize();
size++;
return reduced;
}
}
|
final private int mergeMiddle(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
merge.checkAborted(directory);
final String mergedName = merge.info.name;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(), mergedName, merge,
payloadProcessorProvider,
((FieldInfos) docWriter.getFieldInfos().clone()));
if (infoStream != null) {
message("merging " + merge.segString(directory) + " mergeVectors=" + merge.info.getHasVectors());
}
merge.readers = new ArrayList<SegmentReader>();
merge.readerClones = new ArrayList<SegmentReader>();
merge.info.setHasVectors(merger.fieldInfos().hasVectors());
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int totDocCount = 0;
int segUpto = 0;
while(segUpto < sourceSegments.size()) {
final SegmentInfo info = sourceSegments.info(segUpto);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
final SegmentReader reader = readerPool.get(info, true,
MERGE_READ_BUFFER_SIZE,
-1);
merge.readers.add(reader);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
final SegmentReader clone = (SegmentReader) reader.clone(true);
merge.readerClones.add(clone);
if (clone.numDocs() > 0) {
merger.add(clone);
totDocCount += clone.numDocs();
}
segUpto++;
}
if (infoStream != null) {
message("merge: total " + totDocCount + " docs");
}
merge.checkAborted(directory);
// This is where all the work happens:
mergedDocCount = merge.info.docCount = merger.merge();
assert mergedDocCount == totDocCount;
if (infoStream != null) {
message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + merge.readers.size());
}
anyNonBulkMerges |= merger.getMatchedSubReaderCount() != merge.readers.size();
assert mergedDocCount == totDocCount: "mergedDocCount=" + mergedDocCount + " vs " + totDocCount;
// Very important to do this before opening the reader
// because SegmentReader must know if prox was written for
// this segment:
merge.info.setHasProx(merger.fieldInfos().hasProx());
boolean useCompoundFile;
synchronized (this) { // Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info);
}
if (useCompoundFile) {
success = false;
final String compoundFileName = IndexFileNames.segmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);
try {
if (infoStream != null) {
message("create compound file " + compoundFileName);
}
merger.createCompoundFile(compoundFileName, merge.info);
success = true;
} catch (IOException ioe) {
synchronized(this) {
if (merge.isAborted()) {
// This can happen if rollback or close(false)
// is called -- fall through to logic below to
// remove the partially created CFS:
} else {
handleMergeException(ioe, merge);
}
}
} catch (Throwable t) {
handleMergeException(t, merge);
} finally {
if (!success) {
if (infoStream != null) {
message("hit exception creating compound file during merge");
}
synchronized(this) {
deleter.deleteFile(compoundFileName);
deleter.deleteNewFiles(merge.info.files());
}
}
}
success = false;
synchronized(this) {
// delete new non cfs files directly: they were never
// registered with IFD
deleter.deleteNewFiles(merge.info.files());
if (merge.isAborted()) {
if (infoStream != null) {
message("abort merge after building CFS");
}
deleter.deleteFile(compoundFileName);
return 0;
}
}
merge.info.setUseCompoundFile(true);
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
final int termsIndexDivisor;
final boolean loadDocStores;
if (mergedSegmentWarmer != null) {
// Load terms index & doc stores so the segment
// warmer can run searches, load documents/term
// vectors
termsIndexDivisor = config.getReaderTermsIndexDivisor();
loadDocStores = true;
} else {
termsIndexDivisor = -1;
loadDocStores = false;
}
// TODO: in the non-realtime case, we may want to only
// keep deletes (it's costly to open entire reader
// when we just need deletes)
final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
try {
if (poolReaders && mergedSegmentWarmer != null) {
mergedSegmentWarmer.warm(mergedReader);
}
if (!commitMerge(merge, mergedReader)) {
// commitMerge will return false if this merge was aborted
return 0;
}
} finally {
synchronized(this) {
if (readerPool.release(mergedReader)) {
// Must checkpoint after releasing the
// mergedReader since it may have written a new
// deletes file:
checkpoint();
}
}
}
success = true;
} finally {
// Readers are already closed in commitMerge if we didn't hit
// an exc:
if (!success) {
closeMergeReaders(merge, true);
}
}
return mergedDocCount;
}
| final private int mergeMiddle(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
merge.checkAborted(directory);
final String mergedName = merge.info.name;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(), mergedName, merge,
payloadProcessorProvider,
((FieldInfos) docWriter.getFieldInfos().clone()));
if (infoStream != null) {
message("merging " + merge.segString(directory) + " mergeVectors=" + merge.info.getHasVectors());
}
merge.readers = new ArrayList<SegmentReader>();
merge.readerClones = new ArrayList<SegmentReader>();
merge.info.setHasVectors(merger.fieldInfos().hasVectors());
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int totDocCount = 0;
int segUpto = 0;
while(segUpto < sourceSegments.size()) {
final SegmentInfo info = sourceSegments.info(segUpto);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
final SegmentReader reader = readerPool.get(info, true,
MERGE_READ_BUFFER_SIZE,
-1);
merge.readers.add(reader);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
final SegmentReader clone = (SegmentReader) reader.clone(true);
merge.readerClones.add(clone);
if (clone.numDocs() > 0) {
merger.add(clone);
totDocCount += clone.numDocs();
}
segUpto++;
}
if (infoStream != null) {
message("merge: total " + totDocCount + " docs");
}
merge.checkAborted(directory);
// This is where all the work happens:
mergedDocCount = merge.info.docCount = merger.merge();
assert mergedDocCount == totDocCount;
if (infoStream != null) {
message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + merge.readers.size());
}
anyNonBulkMerges |= merger.getAnyNonBulkMerges();
assert mergedDocCount == totDocCount: "mergedDocCount=" + mergedDocCount + " vs " + totDocCount;
// Very important to do this before opening the reader
// because SegmentReader must know if prox was written for
// this segment:
merge.info.setHasProx(merger.fieldInfos().hasProx());
boolean useCompoundFile;
synchronized (this) { // Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info);
}
if (useCompoundFile) {
success = false;
final String compoundFileName = IndexFileNames.segmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);
try {
if (infoStream != null) {
message("create compound file " + compoundFileName);
}
merger.createCompoundFile(compoundFileName, merge.info);
success = true;
} catch (IOException ioe) {
synchronized(this) {
if (merge.isAborted()) {
// This can happen if rollback or close(false)
// is called -- fall through to logic below to
// remove the partially created CFS:
} else {
handleMergeException(ioe, merge);
}
}
} catch (Throwable t) {
handleMergeException(t, merge);
} finally {
if (!success) {
if (infoStream != null) {
message("hit exception creating compound file during merge");
}
synchronized(this) {
deleter.deleteFile(compoundFileName);
deleter.deleteNewFiles(merge.info.files());
}
}
}
success = false;
synchronized(this) {
// delete new non cfs files directly: they were never
// registered with IFD
deleter.deleteNewFiles(merge.info.files());
if (merge.isAborted()) {
if (infoStream != null) {
message("abort merge after building CFS");
}
deleter.deleteFile(compoundFileName);
return 0;
}
}
merge.info.setUseCompoundFile(true);
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
final int termsIndexDivisor;
final boolean loadDocStores;
if (mergedSegmentWarmer != null) {
// Load terms index & doc stores so the segment
// warmer can run searches, load documents/term
// vectors
termsIndexDivisor = config.getReaderTermsIndexDivisor();
loadDocStores = true;
} else {
termsIndexDivisor = -1;
loadDocStores = false;
}
// TODO: in the non-realtime case, we may want to only
// keep deletes (it's costly to open entire reader
// when we just need deletes)
final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
try {
if (poolReaders && mergedSegmentWarmer != null) {
mergedSegmentWarmer.warm(mergedReader);
}
if (!commitMerge(merge, mergedReader)) {
// commitMerge will return false if this merge was aborted
return 0;
}
} finally {
synchronized(this) {
if (readerPool.release(mergedReader)) {
// Must checkpoint after releasing the
// mergedReader since it may have written a new
// deletes file:
checkpoint();
}
}
}
success = true;
} finally {
// Readers are already closed in commitMerge if we didn't hit
// an exc:
if (!success) {
closeMergeReaders(merge, true);
}
}
return mergedDocCount;
}
|
public void testTrecFeedDirAllTypes() throws Exception {
File dataDir = _TestUtil.getTempDir("trecFeedAllTypes");
_TestUtil.unzip(getDataFile("trecdocs.zip"), dataDir);
TrecContentSource tcs = new TrecContentSource();
Properties props = new Properties();
props.setProperty("print.props", "false");
props.setProperty("content.source.verbose", "false");
props.setProperty("content.source.excludeIteration", "true");
props.setProperty("doc.maker.forever", "false");
props.setProperty("docs.dir", dataDir.getCanonicalPath().replace('\\','/'));
props.setProperty("trec.doc.parser", TrecParserByPath.class.getName());
props.setProperty("content.source.forever", "false");
tcs.setConfig(new Config(props));
tcs.resetInputs();
DocData dd = new DocData();
int n = 0;
boolean gotExpectedException = false;
HashSet<ParsePathType> unseenTypes = new HashSet<ParsePathType>(Arrays.asList(ParsePathType.values()));
try {
while (n<100) { // arbiterary limit to prevent looping forever in case of test failure
dd = tcs.getNextDocData(dd);
++n;
assertNotNull("doc data "+n+" should not be null!", dd);
unseenTypes.remove(tcs.currPathType);
switch(tcs.currPathType) {
case GOV2:
assertDocData(dd, "TEST-000", "TEST-000 title", "TEST-000 text", tcs.parseDate("Sun, 11 Jan 2009 08:00:00 GMT"));
break;
case FBIS:
assertDocData(dd, "TEST-001", "TEST-001 Title", "TEST-001 text", tcs.parseDate("1 January 1991"));
break;
case FR94:
// no title extraction in this source for now
assertDocData(dd, "TEST-002", null, "DEPARTMENT OF SOMETHING", tcs.parseDate("February 3, 1994"));
break;
case FT:
assertDocData(dd, "TEST-003", "Test-003 title", "Some pub text", tcs.parseDate("980424"));
break;
case LATIMES:
assertDocData(dd, "TEST-004", "Test-004 Title", "Some paragraph", tcs.parseDate("January 17, 1997, Sunday"));
break;
default:
assertTrue("Should never get here!", false);
}
}
} catch (NoMoreDataException e) {
gotExpectedException = true;
}
assertTrue("Should have gotten NoMoreDataException!", gotExpectedException);
assertEquals("Wrong numbre of documents created by osurce!",5,n);
assertTrue("Did not see all types!",unseenTypes.isEmpty());
| public void testTrecFeedDirAllTypes() throws Exception {
File dataDir = _TestUtil.getTempDir("trecFeedAllTypes");
_TestUtil.unzip(getDataFile("trecdocs.zip"), dataDir);
TrecContentSource tcs = new TrecContentSource();
Properties props = new Properties();
props.setProperty("print.props", "false");
props.setProperty("content.source.verbose", "false");
props.setProperty("content.source.excludeIteration", "true");
props.setProperty("doc.maker.forever", "false");
props.setProperty("docs.dir", dataDir.getCanonicalPath().replace('\\','/'));
props.setProperty("trec.doc.parser", TrecParserByPath.class.getName());
props.setProperty("content.source.forever", "false");
tcs.setConfig(new Config(props));
tcs.resetInputs();
DocData dd = new DocData();
int n = 0;
boolean gotExpectedException = false;
HashSet<ParsePathType> unseenTypes = new HashSet<ParsePathType>(Arrays.asList(ParsePathType.values()));
try {
while (n<100) { // arbiterary limit to prevent looping forever in case of test failure
dd = tcs.getNextDocData(dd);
++n;
assertNotNull("doc data "+n+" should not be null!", dd);
unseenTypes.remove(tcs.currPathType);
switch(tcs.currPathType) {
case GOV2:
assertDocData(dd, "TEST-000", "TEST-000 title", "TEST-000 text", tcs.parseDate("Sun, 11 Jan 2009 08:00:00 GMT"));
break;
case FBIS:
assertDocData(dd, "TEST-001", "TEST-001 Title", "TEST-001 text", tcs.parseDate("1 January 1991"));
break;
case FR94:
// no title extraction in this source for now
assertDocData(dd, "TEST-002", null, "DEPARTMENT OF SOMETHING", tcs.parseDate("February 3, 1994"));
break;
case FT:
assertDocData(dd, "TEST-003", "Test-003 title", "Some pub text", tcs.parseDate("980424"));
break;
case LATIMES:
assertDocData(dd, "TEST-004", "Test-004 Title", "Some paragraph", tcs.parseDate("January 17, 1997, Sunday"));
break;
default:
assertTrue("Should never get here!", false);
}
}
} catch (NoMoreDataException e) {
gotExpectedException = true;
}
assertTrue("Should have gotten NoMoreDataException!", gotExpectedException);
assertEquals("Wrong number of documents created by osurce!",5,n);
assertTrue("Did not see all types!",unseenTypes.isEmpty());
|
public void testBasicUsage() throws Exception {
checkCorrectClassification(new KNearestNeighborClassifier(1), new BytesRef("technology"), new MockAnalyzer(random()), categoryFieldName);
}
| public void testBasicUsage() throws Exception {
checkCorrectClassification(new KNearestNeighborClassifier(1), TECHNOLOGY_INPUT, TECHNOLOGY_RESULT, new MockAnalyzer(random()), categoryFieldName);
}
|
public ClassificationResult<BytesRef> assignClass(String inputDocument) throws IOException {
if (atomicReader == null) {
throw new IOException("You must first call Classifier#train first");
}
double max = 0d;
BytesRef foundClass = new BytesRef();
Terms terms = MultiFields.getTerms(atomicReader, classFieldName);
TermsEnum termsEnum = terms.iterator(null);
BytesRef next;
String[] tokenizedDoc = tokenizeDoc(inputDocument);
while ((next = termsEnum.next()) != null) {
// TODO : turn it to be in log scale
double clVal = calculatePrior(next) * calculateLikelihood(tokenizedDoc, next);
if (clVal > max) {
max = clVal;
foundClass = next.clone();
}
}
return new ClassificationResult<BytesRef>(foundClass, max);
}
| public ClassificationResult<BytesRef> assignClass(String inputDocument) throws IOException {
if (atomicReader == null) {
throw new IOException("You must first call Classifier#train first");
}
double max = 0d;
BytesRef foundClass = new BytesRef();
Terms terms = MultiFields.getTerms(atomicReader, classFieldName);
TermsEnum termsEnum = terms.iterator(null);
BytesRef next;
String[] tokenizedDoc = tokenizeDoc(inputDocument);
while ((next = termsEnum.next()) != null) {
// TODO : turn it to be in log scale
double clVal = calculatePrior(next) * calculateLikelihood(tokenizedDoc, next);
if (clVal > max) {
max = clVal;
foundClass = BytesRef.deepCopyOf(next);
}
}
return new ClassificationResult<BytesRef>(foundClass, max);
}
|
private void verifyStatistics()
throws SQLException {
// DERBY-5097: On machines with a single core/CPU the load generated
// by the test threads may cause the index statistics daemon worker
// thread to be "starved". Add a timeout to give it a chance to do
// what it has been told to do.
IndexStatsUtil stats = new IndexStatsUtil(getConnection(), 2000);
IdxStats[] myStats = stats.getStatsTable(TAB, 2);
for (int i=0; i < myStats.length; i++) {
IdxStats s = myStats[i];
assertEquals(_100K, s.rows);
switch (s.lcols) {
case 1:
assertEquals(10, s.card);
break;
case 2:
assertEquals(_100K, s.card);
break;
default:
fail("unexpected number of leading columns: " + s.lcols);
}
}
}
| private void verifyStatistics()
throws SQLException {
// DERBY-5097: On machines with a single core/CPU the load generated
// by the test threads may cause the index statistics daemon worker
// thread to be "starved". Add a timeout to give it a chance to do
// what it has been told to do.
IndexStatsUtil stats = new IndexStatsUtil(getConnection(), 5000);
IdxStats[] myStats = stats.getStatsTable(TAB, 2);
for (int i=0; i < myStats.length; i++) {
IdxStats s = myStats[i];
assertEquals(_100K, s.rows);
switch (s.lcols) {
case 1:
assertEquals(10, s.card);
break;
case 2:
assertEquals(_100K, s.card);
break;
default:
fail("unexpected number of leading columns: " + s.lcols);
}
}
}
|
public Query parse() throws ParseException {
SolrParams localParams = getLocalParams();
SolrParams params = getParams();
solrParams = SolrParams.wrapDefaults(localParams, params);
userFields = new UserFields(U.parseFieldBoosts(solrParams.getParams(DMP.UF)));
queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF));
if (0 == queryFields.size()) {
queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f);
}
// Boosted phrase of the full query string
Map<String,Float> phraseFields =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.PF));
// Boosted Bi-Term Shingles from the query string
Map<String,Float> phraseFields2 =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf2"));
// Boosted Tri-Term Shingles from the query string
Map<String,Float> phraseFields3 =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf3"));
float tiebreaker = solrParams.getFloat(DisMaxParams.TIE, 0.0f);
int pslop = solrParams.getInt(DisMaxParams.PS, 0);
int qslop = solrParams.getInt(DisMaxParams.QS, 0);
// remove stopwords from mandatory "matching" component?
boolean stopwords = solrParams.getBool("stopwords", true);
/* the main query we will execute. we disable the coord because
* this query is an artificial construct
*/
BooleanQuery query = new BooleanQuery(true);
/* * * Main User Query * * */
parsedUserQuery = null;
String userQuery = getString();
altUserQuery = null;
if( userQuery == null || userQuery.length() < 1 ) {
// If no query is specified, we may have an alternate
String altQ = solrParams.get( DisMaxParams.ALTQ );
if (altQ != null) {
altQParser = subQuery(altQ, null);
altUserQuery = altQParser.getQuery();
query.add( altUserQuery , BooleanClause.Occur.MUST );
} else {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "missing query string" );
}
}
else {
// There is a valid query string
// userQuery = partialEscape(U.stripUnbalancedQuotes(userQuery)).toString();
boolean lowercaseOperators = solrParams.getBool("lowercaseOperators", true);
String mainUserQuery = userQuery;
ExtendedSolrQueryParser up =
new ExtendedSolrQueryParser(this, IMPOSSIBLE_FIELD_NAME);
up.addAlias(IMPOSSIBLE_FIELD_NAME,
tiebreaker, queryFields);
addAliasesFromRequest(up, tiebreaker);
up.setPhraseSlop(qslop); // slop for explicit user phrase queries
up.setAllowLeadingWildcard(true);
// defer escaping and only do if lucene parsing fails, or we need phrases
// parsing fails. Need to sloppy phrase queries anyway though.
List<Clause> clauses = null;
int numPluses = 0;
int numMinuses = 0;
int numOR = 0;
int numNOT = 0;
clauses = splitIntoClauses(userQuery, false);
for (Clause clause : clauses) {
if (clause.must == '+') numPluses++;
if (clause.must == '-') numMinuses++;
if (clause.isBareWord()) {
String s = clause.val;
if ("OR".equals(s)) {
numOR++;
} else if ("NOT".equals(s)) {
numNOT++;
} else if (lowercaseOperators && "or".equals(s)) {
numOR++;
}
}
}
// Always rebuild mainUserQuery from clauses to catch modifications from splitIntoClauses
// This was necessary for userFields modifications to get propagated into the query.
// Convert lower or mixed case operators to uppercase if we saw them.
// only do this for the lucene query part and not for phrase query boosting
// since some fields might not be case insensitive.
// We don't use a regex for this because it might change and AND or OR in
// a phrase query in a case sensitive field.
StringBuilder sb = new StringBuilder();
for (int i=0; i<clauses.size(); i++) {
Clause clause = clauses.get(i);
String s = clause.raw;
// and and or won't be operators at the start or end
if (i>0 && i+1<clauses.size()) {
if ("AND".equalsIgnoreCase(s)) {
s="AND";
} else if ("OR".equalsIgnoreCase(s)) {
s="OR";
}
}
sb.append(s);
sb.append(' ');
}
mainUserQuery = sb.toString();
// For correct lucene queries, turn off mm processing if there
// were explicit operators (except for AND).
boolean doMinMatched = (numOR + numNOT + numPluses + numMinuses) == 0;
try {
up.setRemoveStopFilter(!stopwords);
up.exceptions = true;
parsedUserQuery = up.parse(mainUserQuery);
if (stopwords && isEmpty(parsedUserQuery)) {
// if the query was all stop words, remove none of them
up.setRemoveStopFilter(true);
parsedUserQuery = up.parse(mainUserQuery);
}
} catch (Exception e) {
// ignore failure and reparse later after escaping reserved chars
up.exceptions = false;
}
if (parsedUserQuery != null && doMinMatched) {
String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
if (parsedUserQuery instanceof BooleanQuery) {
SolrPluginUtils.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
}
}
if (parsedUserQuery == null) {
sb = new StringBuilder();
for (Clause clause : clauses) {
boolean doQuote = clause.isPhrase;
String s=clause.val;
if (!clause.isPhrase && ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s))) {
doQuote=true;
}
if (clause.must != 0) {
sb.append(clause.must);
}
if (clause.field != null) {
sb.append(clause.field);
sb.append(':');
}
if (doQuote) {
sb.append('"');
}
sb.append(clause.val);
if (doQuote) {
sb.append('"');
}
if (clause.field != null) {
// Add the default user field boost, if any
Float boost = userFields.getBoost(clause.field);
if(boost != null)
sb.append("^").append(boost);
}
sb.append(' ');
}
String escapedUserQuery = sb.toString();
parsedUserQuery = up.parse(escapedUserQuery);
// Only do minimum-match logic
String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
if (parsedUserQuery instanceof BooleanQuery) {
BooleanQuery t = new BooleanQuery();
SolrPluginUtils.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
SolrPluginUtils.setMinShouldMatch(t, minShouldMatch);
parsedUserQuery = t;
}
}
query.add(parsedUserQuery, BooleanClause.Occur.MUST);
// sloppy phrase queries for proximity
if (phraseFields.size() > 0 ||
phraseFields2.size() > 0 ||
phraseFields3.size() > 0) {
// find non-field clauses
List<Clause> normalClauses = new ArrayList<Clause>(clauses.size());
for (Clause clause : clauses) {
if (clause.field != null || clause.isPhrase) continue;
// check for keywords "AND,OR,TO"
if (clause.isBareWord()) {
String s = clause.val.toString();
// avoid putting explict operators in the phrase query
if ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s) || "TO".equals(s)) continue;
}
normalClauses.add(clause);
}
// full phrase...
addShingledPhraseQueries(query, normalClauses, phraseFields, 0,
tiebreaker, pslop);
// shingles...
addShingledPhraseQueries(query, normalClauses, phraseFields2, 2,
tiebreaker, pslop);
addShingledPhraseQueries(query, normalClauses, phraseFields3, 3,
tiebreaker, pslop);
}
}
/* * * Boosting Query * * */
boostParams = solrParams.getParams(DisMaxParams.BQ);
//List<Query> boostQueries = U.parseQueryStrings(req, boostParams);
boostQueries=null;
if (boostParams!=null && boostParams.length>0) {
boostQueries = new ArrayList<Query>();
for (String qs : boostParams) {
if (qs.trim().length()==0) continue;
Query q = subQuery(qs, null).getQuery();
boostQueries.add(q);
}
}
if (null != boostQueries) {
for(Query f : boostQueries) {
query.add(f, BooleanClause.Occur.SHOULD);
}
}
/* * * Boosting Functions * * */
String[] boostFuncs = solrParams.getParams(DisMaxParams.BF);
if (null != boostFuncs && 0 != boostFuncs.length) {
for (String boostFunc : boostFuncs) {
if(null == boostFunc || "".equals(boostFunc)) continue;
Map<String,Float> ff = SolrPluginUtils.parseFieldBoosts(boostFunc);
for (String f : ff.keySet()) {
Query fq = subQuery(f, FunctionQParserPlugin.NAME).getQuery();
Float b = ff.get(f);
if (null != b) {
fq.setBoost(b);
}
query.add(fq, BooleanClause.Occur.SHOULD);
}
}
}
//
// create a boosted query (scores multiplied by boosts)
//
Query topQuery = query;
multBoosts = solrParams.getParams("boost");
if (multBoosts!=null && multBoosts.length>0) {
List<ValueSource> boosts = new ArrayList<ValueSource>();
for (String boostStr : multBoosts) {
if (boostStr==null || boostStr.length()==0) continue;
Query boost = subQuery(boostStr, FunctionQParserPlugin.NAME).getQuery();
ValueSource vs;
if (boost instanceof FunctionQuery) {
vs = ((FunctionQuery)boost).getValueSource();
} else {
vs = new QueryValueSource(boost, 1.0f);
}
boosts.add(vs);
}
if (boosts.size()>1) {
ValueSource prod = new ProductFloatFunction(boosts.toArray(new ValueSource[boosts.size()]));
topQuery = new BoostedQuery(query, prod);
} else if (boosts.size() == 1) {
topQuery = new BoostedQuery(query, boosts.get(0));
}
}
return topQuery;
}
| public Query parse() throws ParseException {
SolrParams localParams = getLocalParams();
SolrParams params = getParams();
solrParams = SolrParams.wrapDefaults(localParams, params);
userFields = new UserFields(U.parseFieldBoosts(solrParams.getParams(DMP.UF)));
queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF));
if (0 == queryFields.size()) {
queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f);
}
// Boosted phrase of the full query string
Map<String,Float> phraseFields =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.PF));
// Boosted Bi-Term Shingles from the query string
Map<String,Float> phraseFields2 =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf2"));
// Boosted Tri-Term Shingles from the query string
Map<String,Float> phraseFields3 =
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf3"));
float tiebreaker = solrParams.getFloat(DisMaxParams.TIE, 0.0f);
int pslop = solrParams.getInt(DisMaxParams.PS, 0);
int qslop = solrParams.getInt(DisMaxParams.QS, 0);
// remove stopwords from mandatory "matching" component?
boolean stopwords = solrParams.getBool("stopwords", true);
/* the main query we will execute. we disable the coord because
* this query is an artificial construct
*/
BooleanQuery query = new BooleanQuery(true);
/* * * Main User Query * * */
parsedUserQuery = null;
String userQuery = getString();
altUserQuery = null;
if( userQuery == null || userQuery.trim().length() == 0 ) {
// If no query is specified, we may have an alternate
String altQ = solrParams.get( DisMaxParams.ALTQ );
if (altQ != null) {
altQParser = subQuery(altQ, null);
altUserQuery = altQParser.getQuery();
query.add( altUserQuery , BooleanClause.Occur.MUST );
} else {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "missing query string" );
}
}
else {
// There is a valid query string
// userQuery = partialEscape(U.stripUnbalancedQuotes(userQuery)).toString();
boolean lowercaseOperators = solrParams.getBool("lowercaseOperators", true);
String mainUserQuery = userQuery;
ExtendedSolrQueryParser up =
new ExtendedSolrQueryParser(this, IMPOSSIBLE_FIELD_NAME);
up.addAlias(IMPOSSIBLE_FIELD_NAME,
tiebreaker, queryFields);
addAliasesFromRequest(up, tiebreaker);
up.setPhraseSlop(qslop); // slop for explicit user phrase queries
up.setAllowLeadingWildcard(true);
// defer escaping and only do if lucene parsing fails, or we need phrases
// parsing fails. Need to sloppy phrase queries anyway though.
List<Clause> clauses = null;
int numPluses = 0;
int numMinuses = 0;
int numOR = 0;
int numNOT = 0;
clauses = splitIntoClauses(userQuery, false);
for (Clause clause : clauses) {
if (clause.must == '+') numPluses++;
if (clause.must == '-') numMinuses++;
if (clause.isBareWord()) {
String s = clause.val;
if ("OR".equals(s)) {
numOR++;
} else if ("NOT".equals(s)) {
numNOT++;
} else if (lowercaseOperators && "or".equals(s)) {
numOR++;
}
}
}
// Always rebuild mainUserQuery from clauses to catch modifications from splitIntoClauses
// This was necessary for userFields modifications to get propagated into the query.
// Convert lower or mixed case operators to uppercase if we saw them.
// only do this for the lucene query part and not for phrase query boosting
// since some fields might not be case insensitive.
// We don't use a regex for this because it might change and AND or OR in
// a phrase query in a case sensitive field.
StringBuilder sb = new StringBuilder();
for (int i=0; i<clauses.size(); i++) {
Clause clause = clauses.get(i);
String s = clause.raw;
// and and or won't be operators at the start or end
if (i>0 && i+1<clauses.size()) {
if ("AND".equalsIgnoreCase(s)) {
s="AND";
} else if ("OR".equalsIgnoreCase(s)) {
s="OR";
}
}
sb.append(s);
sb.append(' ');
}
mainUserQuery = sb.toString();
// For correct lucene queries, turn off mm processing if there
// were explicit operators (except for AND).
boolean doMinMatched = (numOR + numNOT + numPluses + numMinuses) == 0;
try {
up.setRemoveStopFilter(!stopwords);
up.exceptions = true;
parsedUserQuery = up.parse(mainUserQuery);
if (stopwords && isEmpty(parsedUserQuery)) {
// if the query was all stop words, remove none of them
up.setRemoveStopFilter(true);
parsedUserQuery = up.parse(mainUserQuery);
}
} catch (Exception e) {
// ignore failure and reparse later after escaping reserved chars
up.exceptions = false;
}
if (parsedUserQuery != null && doMinMatched) {
String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
if (parsedUserQuery instanceof BooleanQuery) {
SolrPluginUtils.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
}
}
if (parsedUserQuery == null) {
sb = new StringBuilder();
for (Clause clause : clauses) {
boolean doQuote = clause.isPhrase;
String s=clause.val;
if (!clause.isPhrase && ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s))) {
doQuote=true;
}
if (clause.must != 0) {
sb.append(clause.must);
}
if (clause.field != null) {
sb.append(clause.field);
sb.append(':');
}
if (doQuote) {
sb.append('"');
}
sb.append(clause.val);
if (doQuote) {
sb.append('"');
}
if (clause.field != null) {
// Add the default user field boost, if any
Float boost = userFields.getBoost(clause.field);
if(boost != null)
sb.append("^").append(boost);
}
sb.append(' ');
}
String escapedUserQuery = sb.toString();
parsedUserQuery = up.parse(escapedUserQuery);
// Only do minimum-match logic
String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
if (parsedUserQuery instanceof BooleanQuery) {
BooleanQuery t = new BooleanQuery();
SolrPluginUtils.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
SolrPluginUtils.setMinShouldMatch(t, minShouldMatch);
parsedUserQuery = t;
}
}
query.add(parsedUserQuery, BooleanClause.Occur.MUST);
// sloppy phrase queries for proximity
if (phraseFields.size() > 0 ||
phraseFields2.size() > 0 ||
phraseFields3.size() > 0) {
// find non-field clauses
List<Clause> normalClauses = new ArrayList<Clause>(clauses.size());
for (Clause clause : clauses) {
if (clause.field != null || clause.isPhrase) continue;
// check for keywords "AND,OR,TO"
if (clause.isBareWord()) {
String s = clause.val.toString();
// avoid putting explict operators in the phrase query
if ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s) || "TO".equals(s)) continue;
}
normalClauses.add(clause);
}
// full phrase...
addShingledPhraseQueries(query, normalClauses, phraseFields, 0,
tiebreaker, pslop);
// shingles...
addShingledPhraseQueries(query, normalClauses, phraseFields2, 2,
tiebreaker, pslop);
addShingledPhraseQueries(query, normalClauses, phraseFields3, 3,
tiebreaker, pslop);
}
}
/* * * Boosting Query * * */
boostParams = solrParams.getParams(DisMaxParams.BQ);
//List<Query> boostQueries = U.parseQueryStrings(req, boostParams);
boostQueries=null;
if (boostParams!=null && boostParams.length>0) {
boostQueries = new ArrayList<Query>();
for (String qs : boostParams) {
if (qs.trim().length()==0) continue;
Query q = subQuery(qs, null).getQuery();
boostQueries.add(q);
}
}
if (null != boostQueries) {
for(Query f : boostQueries) {
query.add(f, BooleanClause.Occur.SHOULD);
}
}
/* * * Boosting Functions * * */
String[] boostFuncs = solrParams.getParams(DisMaxParams.BF);
if (null != boostFuncs && 0 != boostFuncs.length) {
for (String boostFunc : boostFuncs) {
if(null == boostFunc || "".equals(boostFunc)) continue;
Map<String,Float> ff = SolrPluginUtils.parseFieldBoosts(boostFunc);
for (String f : ff.keySet()) {
Query fq = subQuery(f, FunctionQParserPlugin.NAME).getQuery();
Float b = ff.get(f);
if (null != b) {
fq.setBoost(b);
}
query.add(fq, BooleanClause.Occur.SHOULD);
}
}
}
//
// create a boosted query (scores multiplied by boosts)
//
Query topQuery = query;
multBoosts = solrParams.getParams("boost");
if (multBoosts!=null && multBoosts.length>0) {
List<ValueSource> boosts = new ArrayList<ValueSource>();
for (String boostStr : multBoosts) {
if (boostStr==null || boostStr.length()==0) continue;
Query boost = subQuery(boostStr, FunctionQParserPlugin.NAME).getQuery();
ValueSource vs;
if (boost instanceof FunctionQuery) {
vs = ((FunctionQuery)boost).getValueSource();
} else {
vs = new QueryValueSource(boost, 1.0f);
}
boosts.add(vs);
}
if (boosts.size()>1) {
ValueSource prod = new ProductFloatFunction(boosts.toArray(new ValueSource[boosts.size()]));
topQuery = new BoostedQuery(query, prod);
} else if (boosts.size() == 1) {
topQuery = new BoostedQuery(query, boosts.get(0));
}
}
return topQuery;
}
|
public final boolean isProxy(Object proxy)
{
return (getInvocationHandler(proxy) instanceof ProxyHandler);
}
| public final boolean isProxy(Object proxy)
{
return (proxy != null && getInvocationHandler(proxy) instanceof ProxyHandler);
}
|
private final CSVLoader.FieldAdder base;
FieldSplitter(CSVStrategy strategy, CSVLoader.FieldAdder base) {
this.strategy = strategy;
this.base = base;
}
void add(SolrInputDocument doc, int line, int column, String val) {
CSVParser parser = new CSVParser(new StringReader(val), strategy);
try {
String[] vals = parser.getLine();
if (vals!=null) {
for (String v: vals) base.add(doc,line,column,v);
} else {
base.add(doc,line,column,val);
}
} catch (IOException e) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,e);
}
}
}
String errHeader="CSVLoader:";
CSVLoader(SolrQueryRequest req, UpdateRequestProcessor processor) {
this.processor = processor;
this.params = req.getParams();
schema = req.getSchema();
templateAdd = new AddUpdateCommand();
templateAdd.allowDups=false;
templateAdd.overwriteCommitted=true;
templateAdd.overwritePending=true;
if (params.getBool(OVERWRITE,true)) {
templateAdd.allowDups=false;
templateAdd.overwriteCommitted=true;
templateAdd.overwritePending=true;
} else {
templateAdd.allowDups=true;
templateAdd.overwriteCommitted=false;
templateAdd.overwritePending=false;
}
strategy = new CSVStrategy(',', '"', CSVStrategy.COMMENTS_DISABLED, CSVStrategy.ESCAPE_DISABLED, false, false, false, true);
String sep = params.get(SEPARATOR);
if (sep!=null) {
if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid separator:'"+sep+"'");
strategy.setDelimiter(sep.charAt(0));
}
String encapsulator = params.get(ENCAPSULATOR);
if (encapsulator!=null) {
if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid encapsulator:'"+encapsulator+"'");
}
String escape = params.get(ESCAPE);
if (escape!=null) {
if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid escape:'"+escape+"'");
}
// if only encapsulator or escape is set, disable the other escaping mechanism
if (encapsulator == null && escape != null) {
strategy.setEncapsulator( CSVStrategy.ENCAPSULATOR_DISABLED);
strategy.setEscape(escape.charAt(0));
} else {
if (encapsulator != null) {
strategy.setEncapsulator(encapsulator.charAt(0));
}
if (escape != null) {
char ch = escape.charAt(0);
strategy.setEscape(ch);
if (ch == '\\') {
// If the escape is the standard backslash, then also enable
// unicode escapes (it's harmless since 'u' would not otherwise
// be escaped.
strategy.setUnicodeEscapeInterpretation(true);
}
}
}
String fn = params.get(FIELDNAMES);
fieldnames = fn != null ? commaSplit.split(fn,-1) : null;
Boolean hasHeader = params.getBool(HEADER);
skipLines = params.getInt(SKIPLINES,0);
if (fieldnames==null) {
if (null == hasHeader) {
// assume the file has the headers if they aren't supplied in the args
hasHeader=true;
} else if (!hasHeader) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"CSVLoader: must specify fieldnames=<fields>* or header=true");
}
} else {
// if the fieldnames were supplied and the file has a header, we need to
// skip over that header.
if (hasHeader!=null && hasHeader) skipLines++;
prepareFields();
}
}
/** create the FieldAdders that control how each field is indexed */
void prepareFields() {
// Possible future optimization: for really rapid incremental indexing
// from a POST, one could cache all of this setup info based on the params.
// The link from FieldAdder to this would need to be severed for that to happen.
fields = new SchemaField[fieldnames.length];
adders = new CSVLoader.FieldAdder[fieldnames.length];
String skipStr = params.get(SKIP);
List<String> skipFields = skipStr==null ? null : StrUtils.splitSmart(skipStr,',');
CSVLoader.FieldAdder adder = new CSVLoader.FieldAdder();
CSVLoader.FieldAdder adderKeepEmpty = new CSVLoader.FieldAdderEmpty();
for (int i=0; i<fields.length; i++) {
String fname = fieldnames[i];
// to skip a field, leave the entries in fields and addrs null
if (fname.length()==0 || (skipFields!=null && skipFields.contains(fname))) continue;
fields[i] = schema.getField(fname);
boolean keepEmpty = params.getFieldBool(fname,EMPTY,false);
adders[i] = keepEmpty ? adderKeepEmpty : adder;
// Order that operations are applied: split -> trim -> map -> add
// so create in reverse order.
// Creation of FieldAdders could be optimized and shared among fields
String[] fmap = params.getFieldParams(fname,MAP);
if (fmap!=null) {
for (String mapRule : fmap) {
String[] mapArgs = colonSplit.split(mapRule,-1);
if (mapArgs.length!=2)
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Map rules must be of the form 'from:to' ,got '"+mapRule+"'");
adders[i] = new CSVLoader.FieldMapperSingle(mapArgs[0], mapArgs[1], adders[i]);
}
}
if (params.getFieldBool(fname,TRIM,false)) {
adders[i] = new CSVLoader.FieldTrimmer(adders[i]);
}
if (params.getFieldBool(fname,SPLIT,false)) {
String sepStr = params.getFieldParam(fname,SEPARATOR);
char fsep = sepStr==null || sepStr.length()==0 ? ',' : sepStr.charAt(0);
String encStr = params.getFieldParam(fname,ENCAPSULATOR);
char fenc = encStr==null || encStr.length()==0 ? (char)-2 : encStr.charAt(0);
String escStr = params.getFieldParam(fname,ESCAPE);
char fesc = escStr==null || encStr.length()==0 ? CSVStrategy.ESCAPE_DISABLED : escStr.charAt(0);
CSVStrategy fstrat = new CSVStrategy(fsep,fenc,CSVStrategy.COMMENTS_DISABLED,fesc, false, false, false, false);
adders[i] = new CSVLoader.FieldSplitter(fstrat, adders[i]);
}
}
}
| private final CSVLoader.FieldAdder base;
FieldSplitter(CSVStrategy strategy, CSVLoader.FieldAdder base) {
this.strategy = strategy;
this.base = base;
}
void add(SolrInputDocument doc, int line, int column, String val) {
CSVParser parser = new CSVParser(new StringReader(val), strategy);
try {
String[] vals = parser.getLine();
if (vals!=null) {
for (String v: vals) base.add(doc,line,column,v);
} else {
base.add(doc,line,column,val);
}
} catch (IOException e) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,e);
}
}
}
String errHeader="CSVLoader:";
CSVLoader(SolrQueryRequest req, UpdateRequestProcessor processor) {
this.processor = processor;
this.params = req.getParams();
schema = req.getSchema();
templateAdd = new AddUpdateCommand();
templateAdd.allowDups=false;
templateAdd.overwriteCommitted=true;
templateAdd.overwritePending=true;
if (params.getBool(OVERWRITE,true)) {
templateAdd.allowDups=false;
templateAdd.overwriteCommitted=true;
templateAdd.overwritePending=true;
} else {
templateAdd.allowDups=true;
templateAdd.overwriteCommitted=false;
templateAdd.overwritePending=false;
}
strategy = new CSVStrategy(',', '"', CSVStrategy.COMMENTS_DISABLED, CSVStrategy.ESCAPE_DISABLED, false, false, false, true);
String sep = params.get(SEPARATOR);
if (sep!=null) {
if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid separator:'"+sep+"'");
strategy.setDelimiter(sep.charAt(0));
}
String encapsulator = params.get(ENCAPSULATOR);
if (encapsulator!=null) {
if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid encapsulator:'"+encapsulator+"'");
}
String escape = params.get(ESCAPE);
if (escape!=null) {
if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid escape:'"+escape+"'");
}
// if only encapsulator or escape is set, disable the other escaping mechanism
if (encapsulator == null && escape != null) {
strategy.setEncapsulator( CSVStrategy.ENCAPSULATOR_DISABLED);
strategy.setEscape(escape.charAt(0));
} else {
if (encapsulator != null) {
strategy.setEncapsulator(encapsulator.charAt(0));
}
if (escape != null) {
char ch = escape.charAt(0);
strategy.setEscape(ch);
if (ch == '\\') {
// If the escape is the standard backslash, then also enable
// unicode escapes (it's harmless since 'u' would not otherwise
// be escaped.
strategy.setUnicodeEscapeInterpretation(true);
}
}
}
String fn = params.get(FIELDNAMES);
fieldnames = fn != null ? commaSplit.split(fn,-1) : null;
Boolean hasHeader = params.getBool(HEADER);
skipLines = params.getInt(SKIPLINES,0);
if (fieldnames==null) {
if (null == hasHeader) {
// assume the file has the headers if they aren't supplied in the args
hasHeader=true;
} else if (!hasHeader) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"CSVLoader: must specify fieldnames=<fields>* or header=true");
}
} else {
// if the fieldnames were supplied and the file has a header, we need to
// skip over that header.
if (hasHeader!=null && hasHeader) skipLines++;
prepareFields();
}
}
/** create the FieldAdders that control how each field is indexed */
void prepareFields() {
// Possible future optimization: for really rapid incremental indexing
// from a POST, one could cache all of this setup info based on the params.
// The link from FieldAdder to this would need to be severed for that to happen.
fields = new SchemaField[fieldnames.length];
adders = new CSVLoader.FieldAdder[fieldnames.length];
String skipStr = params.get(SKIP);
List<String> skipFields = skipStr==null ? null : StrUtils.splitSmart(skipStr,',');
CSVLoader.FieldAdder adder = new CSVLoader.FieldAdder();
CSVLoader.FieldAdder adderKeepEmpty = new CSVLoader.FieldAdderEmpty();
for (int i=0; i<fields.length; i++) {
String fname = fieldnames[i];
// to skip a field, leave the entries in fields and addrs null
if (fname.length()==0 || (skipFields!=null && skipFields.contains(fname))) continue;
fields[i] = schema.getField(fname);
boolean keepEmpty = params.getFieldBool(fname,EMPTY,false);
adders[i] = keepEmpty ? adderKeepEmpty : adder;
// Order that operations are applied: split -> trim -> map -> add
// so create in reverse order.
// Creation of FieldAdders could be optimized and shared among fields
String[] fmap = params.getFieldParams(fname,MAP);
if (fmap!=null) {
for (String mapRule : fmap) {
String[] mapArgs = colonSplit.split(mapRule,-1);
if (mapArgs.length!=2)
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Map rules must be of the form 'from:to' ,got '"+mapRule+"'");
adders[i] = new CSVLoader.FieldMapperSingle(mapArgs[0], mapArgs[1], adders[i]);
}
}
if (params.getFieldBool(fname,TRIM,false)) {
adders[i] = new CSVLoader.FieldTrimmer(adders[i]);
}
if (params.getFieldBool(fname,SPLIT,false)) {
String sepStr = params.getFieldParam(fname,SEPARATOR);
char fsep = sepStr==null || sepStr.length()==0 ? ',' : sepStr.charAt(0);
String encStr = params.getFieldParam(fname,ENCAPSULATOR);
char fenc = encStr==null || encStr.length()==0 ? (char)-2 : encStr.charAt(0);
String escStr = params.getFieldParam(fname,ESCAPE);
char fesc = escStr==null || escStr.length()==0 ? CSVStrategy.ESCAPE_DISABLED : escStr.charAt(0);
CSVStrategy fstrat = new CSVStrategy(fsep,fenc,CSVStrategy.COMMENTS_DISABLED,fesc, false, false, false, false);
adders[i] = new CSVLoader.FieldSplitter(fstrat, adders[i]);
}
}
}
|
protected SolrServer createNewSolrServer()
{
try {
// setup the server...
String url = "http://localhost:"+port+context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer( url );
s.setConnectionTimeout(5);
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch( Exception ex ) {
throw new RuntimeException( ex );
}
}
| protected SolrServer createNewSolrServer()
{
try {
// setup the server...
String url = "http://localhost:"+port+context;
CommonsHttpSolrServer s = new CommonsHttpSolrServer( url );
s.setConnectionTimeout(100); // 1/10th sec
s.setDefaultMaxConnectionsPerHost(100);
s.setMaxTotalConnections(100);
return s;
}
catch( Exception ex ) {
throw new RuntimeException( ex );
}
}
|
public HMMChineseTokenizer(AttributeFactory factory) {
super((BreakIterator)sentenceProto.clone());
}
| public HMMChineseTokenizer(AttributeFactory factory) {
super(factory, (BreakIterator)sentenceProto.clone());
}
|
public ThaiTokenizer(AttributeFactory factory) {
super((BreakIterator)sentenceProto.clone());
if (!DBBI_AVAILABLE) {
throw new UnsupportedOperationException("This JRE does not have support for Thai segmentation");
}
wordBreaker = (BreakIterator)proto.clone();
}
| public ThaiTokenizer(AttributeFactory factory) {
super(factory, (BreakIterator)sentenceProto.clone());
if (!DBBI_AVAILABLE) {
throw new UnsupportedOperationException("This JRE does not have support for Thai segmentation");
}
wordBreaker = (BreakIterator)proto.clone();
}
|
public static synchronized Collection<KSMetaData> loadFromStorage(UUID version) throws IOException
{
DecoratedKey vkey = StorageService.getPartitioner().decorateKey(Migration.toUTF8Bytes(version));
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.SCHEMA_CF);
QueryFilter filter = QueryFilter.getIdentityFilter(vkey, new QueryPath(Migration.SCHEMA_CF));
ColumnFamily cf = cfStore.getColumnFamily(filter);
IColumn avroschema = cf.getColumn(DEFINITION_SCHEMA_COLUMN_NAME);
if (avroschema == null)
// TODO: more polite way to handle this?
throw new RuntimeException("Cannot read system table! Are you upgrading a pre-release version?");
Schema schema = Schema.parse(new String(avroschema.value()));
// deserialize keyspaces using schema
Collection<KSMetaData> keyspaces = new ArrayList<KSMetaData>();
for (IColumn column : cf.getSortedColumns())
{
if (Arrays.equals(column.name(), DEFINITION_SCHEMA_COLUMN_NAME))
continue;
org.apache.cassandra.config.avro.KsDef ks = SerDeUtils.<org.apache.cassandra.config.avro.KsDef>deserialize(schema, column.value());
keyspaces.add(KSMetaData.inflate(ks));
}
return keyspaces;
}
| public static synchronized Collection<KSMetaData> loadFromStorage(UUID version) throws IOException
{
DecoratedKey vkey = StorageService.getPartitioner().decorateKey(Migration.toUTF8Bytes(version));
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.SCHEMA_CF);
QueryFilter filter = QueryFilter.getIdentityFilter(vkey, new QueryPath(Migration.SCHEMA_CF));
ColumnFamily cf = cfStore.getColumnFamily(filter);
IColumn avroschema = cf.getColumn(DEFINITION_SCHEMA_COLUMN_NAME);
if (avroschema == null)
// TODO: more polite way to handle this?
throw new RuntimeException("Cannot read system table! Are you upgrading a pre-release version?");
Schema schema = Schema.parse(new String(avroschema.value()));
// deserialize keyspaces using schema
Collection<KSMetaData> keyspaces = new ArrayList<KSMetaData>();
for (IColumn column : cf.getSortedColumns())
{
if (Arrays.equals(column.name(), DEFINITION_SCHEMA_COLUMN_NAME))
continue;
org.apache.cassandra.config.avro.KsDef ks = SerDeUtils.deserialize(schema, column.value(), new org.apache.cassandra.config.avro.KsDef());
keyspaces.add(KSMetaData.inflate(ks));
}
return keyspaces;
}
|
public static Migration deserialize(byte[] bytes) throws IOException
{
// deserialize
org.apache.cassandra.db.migration.avro.Migration mi = SerDeUtils.deserializeWithSchema(bytes);
// create an instance of the migration subclass
Migration migration;
try
{
Class migrationClass = Class.forName(mi.classname.toString());
Constructor migrationConstructor = migrationClass.getDeclaredConstructor();
migrationConstructor.setAccessible(true);
migration = (Migration)migrationConstructor.newInstance();
}
catch (Exception e)
{
throw new RuntimeException("Invalid migration class: " + mi.classname.toString(), e);
}
// super inflate
migration.lastVersion = UUIDGen.makeType1UUID(mi.old_version.bytes());
migration.newVersion = UUIDGen.makeType1UUID(mi.new_version.bytes());
try
{
migration.rm = RowMutation.serializer().deserialize(SerDeUtils.createDataInputStream(mi.row_mutation));
}
catch (IOException e)
{
throw new RuntimeException(e);
}
// sub inflate
migration.subinflate(mi);
return migration;
}
| public static Migration deserialize(byte[] bytes) throws IOException
{
// deserialize
org.apache.cassandra.db.migration.avro.Migration mi = SerDeUtils.deserializeWithSchema(bytes, new org.apache.cassandra.db.migration.avro.Migration());
// create an instance of the migration subclass
Migration migration;
try
{
Class migrationClass = Class.forName(mi.classname.toString());
Constructor migrationConstructor = migrationClass.getDeclaredConstructor();
migrationConstructor.setAccessible(true);
migration = (Migration)migrationConstructor.newInstance();
}
catch (Exception e)
{
throw new RuntimeException("Invalid migration class: " + mi.classname.toString(), e);
}
// super inflate
migration.lastVersion = UUIDGen.makeType1UUID(mi.old_version.bytes());
migration.newVersion = UUIDGen.makeType1UUID(mi.new_version.bytes());
try
{
migration.rm = RowMutation.serializer().deserialize(SerDeUtils.createDataInputStream(mi.row_mutation));
}
catch (IOException e)
{
throw new RuntimeException(e);
}
// sub inflate
migration.subinflate(mi);
return migration;
}
|
public void connect() {
if (zkController != null) return;
synchronized(this) {
if (zkController != null) return;
try {
ZkController zk = new ZkController(zkHost, zkConnectTimeout, zkClientTimeout, null, null, null);
zk.addShardZkNodeWatches();
zk.updateCloudState(true);
zkController = zk;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (KeeperException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (IOException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (TimeoutException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
}
| public void connect() {
if (zkController != null) return;
synchronized(this) {
if (zkController != null) return;
try {
ZkController zk = new ZkController(zkHost, zkConnectTimeout, zkClientTimeout, null, null, null);
zk.addShardZkNodeWatches();
zk.getZkStateReader().updateCloudState(true);
zkController = zk;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (KeeperException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (IOException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (TimeoutException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
}
|
public void afterExecute(Runnable r, Throwable t)
{
super.afterExecute(r,t);
// exceptions wrapped by FutureTask
if (r instanceof FutureTask)
{
try
{
((FutureTask) r).get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), e);
}
}
// exceptions for non-FutureTask runnables [i.e., added via execute() instead of submit()]
if (t != null)
{
logger.error("Error in ThreadPoolExecutor", t);
}
}
| public void afterExecute(Runnable r, Throwable t)
{
super.afterExecute(r,t);
// exceptions wrapped by FutureTask
if (r instanceof FutureTask)
{
try
{
((FutureTask) r).get();
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
catch (ExecutionException e)
{
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), e.getCause());
}
}
// exceptions for non-FutureTask runnables [i.e., added via execute() instead of submit()]
if (t != null)
{
logger.error("Error in ThreadPoolExecutor", t);
}
}
|
private RefCount getRefCount(String fileName) {
RefCount rc;
if (!refCounts.containsKey(fileName)) {
rc = new RefCount(fileName);
refCounts.put(fileName, rc);
} else {
rc = refCounts.get(fileName);
}
return rc;
}
void deleteFiles(List<String> files) throws IOException {
for(final String file: files)
deleteFile(file);
}
/** Deletes the specified files, but only if they are new
* (have not yet been incref'd). */
void deleteNewFiles(Collection<String> files) throws IOException {
for (final String fileName: files) {
if (!refCounts.containsKey(fileName))
deleteFile(fileName);
}
}
void deleteFile(String fileName)
throws IOException {
try {
if (infoStream != null) {
message("delete \"" + fileName + "\"");
}
directory.deleteFile(fileName);
} catch (IOException e) { // if delete fails
if (directory.fileExists(fileName)) {
// Some operating systems (e.g. Windows) don't
// permit a file to be deleted while it is opened
// for read (e.g. by another process or thread). So
// we assume that when a delete fails it is because
// the file is open in another process, and queue
// the file for subsequent deletion.
if (infoStream != null) {
message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.toString() + "; Will re-try later.");
}
if (deletable == null) {
deletable = new ArrayList<String>();
}
deletable.add(fileName); // add to deletable
}
}
}
| private RefCount getRefCount(String fileName) {
RefCount rc;
if (!refCounts.containsKey(fileName)) {
rc = new RefCount(fileName);
refCounts.put(fileName, rc);
} else {
rc = refCounts.get(fileName);
}
return rc;
}
void deleteFiles(List<String> files) throws IOException {
for(final String file: files)
deleteFile(file);
}
/** Deletes the specified files, but only if they are new
* (have not yet been incref'd). */
void deleteNewFiles(Collection<String> files) throws IOException {
for (final String fileName: files) {
if (!refCounts.containsKey(fileName))
deleteFile(fileName);
}
}
void deleteFile(String fileName)
throws IOException {
try {
if (infoStream != null) {
message("delete \"" + fileName + "\"");
}
directory.deleteFile(fileName);
} catch (IOException e) { // if delete fails
if (directory.fileExists(fileName)) {
// Some operating systems (e.g. Windows) don't
// permit a file to be deleted while it is opened
// for read (e.g. by another process or thread). So
// we assume that when a delete fails it is because
// the file is open in another process, and queue
// the file for subsequent deletion.
if (infoStream != null) {
message("unable to remove file \"" + fileName + "\": " + e.toString() + "; Will re-try later.");
}
if (deletable == null) {
deletable = new ArrayList<String>();
}
deletable.add(fileName); // add to deletable
}
}
}
|
public void _testStressLocks(LockFactory lockFactory, String indexDirName) throws IOException {
FSDirectory fs1 = FSDirectory.getDirectory(indexDirName, lockFactory);
// First create a 1 doc index:
IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true);
addDoc(w);
w.close();
WriterThread writer = new WriterThread(100, fs1);
SearcherThread searcher = new SearcherThread(100, fs1);
writer.start();
searcher.start();
while(writer.isAlive() || searcher.isAlive()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
// Cleanup
rmDir(indexDirName);
}
| public void _testStressLocks(LockFactory lockFactory, String indexDirName) throws IOException {
FSDirectory fs1 = FSDirectory.getDirectory(indexDirName, lockFactory, false);
// First create a 1 doc index:
IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true);
addDoc(w);
w.close();
WriterThread writer = new WriterThread(100, fs1);
SearcherThread searcher = new SearcherThread(100, fs1);
writer.start();
searcher.start();
while(writer.isAlive() || searcher.isAlive()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
// Cleanup
rmDir(indexDirName);
}
|
private void testIndexInternal(int maxWait) throws IOException {
final boolean create = true;
//Directory rd = new RAMDirectory();
// work on disk to make sure potential lock problems are tested:
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
throw new IOException("java.io.tmpdir undefined, cannot run test");
File indexDir = new File(tempDir, "lucenetestindex");
Directory rd = FSDirectory.getDirectory(indexDir);
IndexThread.id = 0;
IndexThread.idStack.clear();
IndexModifier index = new IndexModifier(rd, new StandardAnalyzer(), create);
IndexThread thread1 = new IndexThread(index, maxWait, 1);
thread1.start();
IndexThread thread2 = new IndexThread(index, maxWait, 2);
thread2.start();
while(thread1.isAlive() || thread2.isAlive()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
index.optimize();
int added = thread1.added + thread2.added;
int deleted = thread1.deleted + thread2.deleted;
assertEquals(added-deleted, index.docCount());
index.close();
try {
index.close();
fail();
} catch(IllegalStateException e) {
// expected exception
}
rmDir(indexDir);
}
| private void testIndexInternal(int maxWait) throws IOException {
final boolean create = true;
//Directory rd = new RAMDirectory();
// work on disk to make sure potential lock problems are tested:
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
throw new IOException("java.io.tmpdir undefined, cannot run test");
File indexDir = new File(tempDir, "lucenetestindex");
Directory rd = FSDirectory.getDirectory(indexDir, null, false);
IndexThread.id = 0;
IndexThread.idStack.clear();
IndexModifier index = new IndexModifier(rd, new StandardAnalyzer(), create);
IndexThread thread1 = new IndexThread(index, maxWait, 1);
thread1.start();
IndexThread thread2 = new IndexThread(index, maxWait, 2);
thread2.start();
while(thread1.isAlive() || thread2.isAlive()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
index.optimize();
int added = thread1.added + thread2.added;
int deleted = thread1.deleted + thread2.deleted;
assertEquals(added-deleted, index.docCount());
index.close();
try {
index.close();
fail();
} catch(IllegalStateException e) {
// expected exception
}
rmDir(indexDir);
}
|
public void testThreadedOptimize() throws Exception {
Directory directory = new MockRAMDirectory();
runTest(directory, false, null);
runTest(directory, true, null);
runTest(directory, false, new ConcurrentMergeScheduler());
runTest(directory, true, new ConcurrentMergeScheduler());
directory.close();
String tempDir = System.getProperty("tempDir");
if (tempDir == null)
throw new IOException("tempDir undefined, cannot run test");
String dirName = tempDir + "/luceneTestThreadedOptimize";
directory = FSDirectory.getDirectory(dirName);
runTest(directory, false, null);
runTest(directory, true, null);
runTest(directory, false, new ConcurrentMergeScheduler());
runTest(directory, true, new ConcurrentMergeScheduler());
directory.close();
_TestUtil.rmDir(dirName);
}
| public void testThreadedOptimize() throws Exception {
Directory directory = new MockRAMDirectory();
runTest(directory, false, null);
runTest(directory, true, null);
runTest(directory, false, new ConcurrentMergeScheduler());
runTest(directory, true, new ConcurrentMergeScheduler());
directory.close();
String tempDir = System.getProperty("tempDir");
if (tempDir == null)
throw new IOException("tempDir undefined, cannot run test");
String dirName = tempDir + "/luceneTestThreadedOptimize";
directory = FSDirectory.getDirectory(dirName, null, false);
runTest(directory, false, null);
runTest(directory, true, null);
runTest(directory, false, new ConcurrentMergeScheduler());
runTest(directory, true, new ConcurrentMergeScheduler());
directory.close();
_TestUtil.rmDir(dirName);
}
|
public void testAtomicUpdates() throws Exception {
Directory directory;
// First in a RAM directory:
directory = new MockRAMDirectory();
runTest(directory);
directory.close();
// Second in an FSDirectory:
String tempDir = System.getProperty("java.io.tmpdir");
File dirPath = new File(tempDir, "lucene.test.atomic");
directory = FSDirectory.getDirectory(dirPath);
runTest(directory);
directory.close();
_TestUtil.rmDir(dirPath);
}
| public void testAtomicUpdates() throws Exception {
Directory directory;
// First in a RAM directory:
directory = new MockRAMDirectory();
runTest(directory);
directory.close();
// Second in an FSDirectory:
String tempDir = System.getProperty("java.io.tmpdir");
File dirPath = new File(tempDir, "lucene.test.atomic");
directory = FSDirectory.getDirectory(dirPath, null, false);
runTest(directory);
directory.close();
_TestUtil.rmDir(dirPath);
}
|
public void setUp() throws Exception {
super.setUp();
File file = new File(System.getProperty("tempDir"), "testIndex");
_TestUtil.rmDir(file);
dir = FSDirectory.getDirectory(file);
}
| public void setUp() throws Exception {
super.setUp();
File file = new File(System.getProperty("tempDir"), "testIndex");
_TestUtil.rmDir(file);
dir = FSDirectory.getDirectory(file, null, false);
}
|
public IndexableField createField(SchemaField field, Object val, float boost) {
if (val == null) return null;
if (!field.stored()) {
log.trace("Ignoring unstored binary field: " + field);
return null;
}
byte[] buf = null;
int offset = 0, len = 0;
if (val instanceof byte[]) {
buf = (byte[]) val;
len = buf.length;
} else if (val instanceof ByteBuffer && ((ByteBuffer)val).hasArray()) {
ByteBuffer byteBuf = (ByteBuffer) val;
buf = byteBuf.array();
offset = byteBuf.position();
len = byteBuf.limit() - byteBuf.position();
} else {
String strVal = val.toString();
//the string has to be a base64 encoded string
buf = Base64.base64ToByteArray(strVal);
offset = 0;
len = buf.length;
}
Field f = new org.apache.lucene.document.BinaryField(field.getName(), buf, offset, len);
f.setBoost(boost);
return f;
}
| public IndexableField createField(SchemaField field, Object val, float boost) {
if (val == null) return null;
if (!field.stored()) {
log.trace("Ignoring unstored binary field: " + field);
return null;
}
byte[] buf = null;
int offset = 0, len = 0;
if (val instanceof byte[]) {
buf = (byte[]) val;
len = buf.length;
} else if (val instanceof ByteBuffer && ((ByteBuffer)val).hasArray()) {
ByteBuffer byteBuf = (ByteBuffer) val;
buf = byteBuf.array();
offset = byteBuf.position();
len = byteBuf.limit() - byteBuf.position();
} else {
String strVal = val.toString();
//the string has to be a base64 encoded string
buf = Base64.base64ToByteArray(strVal);
offset = 0;
len = buf.length;
}
Field f = new org.apache.lucene.document.StoredField(field.getName(), buf, offset, len);
f.setBoost(boost);
return f;
}
|
public static void beforeClass() throws Exception {
NUM_DOCS = atLeast(500);
NUM_ORDS = atLeast(2);
directory = newDirectory();
RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
byte theByte = Byte.MAX_VALUE;
short theShort = Short.MAX_VALUE;
int theInt = Integer.MAX_VALUE;
float theFloat = Float.MAX_VALUE;
unicodeStrings = new String[NUM_DOCS];
multiValued = new BytesRef[NUM_DOCS][NUM_ORDS];
if (VERBOSE) {
System.out.println("TEST: setUp");
}
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
doc.add(newField("theLong", String.valueOf(theLong--), StringField.TYPE_UNSTORED));
doc.add(newField("theDouble", String.valueOf(theDouble--), StringField.TYPE_UNSTORED));
doc.add(newField("theByte", String.valueOf(theByte--), StringField.TYPE_UNSTORED));
doc.add(newField("theShort", String.valueOf(theShort--), StringField.TYPE_UNSTORED));
doc.add(newField("theInt", String.valueOf(theInt--), StringField.TYPE_UNSTORED));
doc.add(newField("theFloat", String.valueOf(theFloat--), StringField.TYPE_UNSTORED));
if (i%2 == 0) {
doc.add(newField("sparse", String.valueOf(i), StringField.TYPE_UNSTORED));
}
if (i%2 == 0) {
doc.add(new NumericField("numInt").setIntValue(i));
}
// sometimes skip the field:
if (random.nextInt(40) != 17) {
unicodeStrings[i] = generateString(i);
doc.add(newField("theRandomUnicodeString", unicodeStrings[i], StringField.TYPE_STORED));
}
// sometimes skip the field:
if (random.nextInt(10) != 8) {
for (int j = 0; j < NUM_ORDS; j++) {
String newValue = generateString(i);
multiValued[i][j] = new BytesRef(newValue);
doc.add(newField("theRandomUnicodeMultiValuedField", newValue, StringField.TYPE_STORED));
}
Arrays.sort(multiValued[i]);
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
| public static void beforeClass() throws Exception {
NUM_DOCS = atLeast(500);
NUM_ORDS = atLeast(2);
directory = newDirectory();
RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
byte theByte = Byte.MAX_VALUE;
short theShort = Short.MAX_VALUE;
int theInt = Integer.MAX_VALUE;
float theFloat = Float.MAX_VALUE;
unicodeStrings = new String[NUM_DOCS];
multiValued = new BytesRef[NUM_DOCS][NUM_ORDS];
if (VERBOSE) {
System.out.println("TEST: setUp");
}
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
doc.add(newField("theLong", String.valueOf(theLong--), StringField.TYPE_UNSTORED));
doc.add(newField("theDouble", String.valueOf(theDouble--), StringField.TYPE_UNSTORED));
doc.add(newField("theByte", String.valueOf(theByte--), StringField.TYPE_UNSTORED));
doc.add(newField("theShort", String.valueOf(theShort--), StringField.TYPE_UNSTORED));
doc.add(newField("theInt", String.valueOf(theInt--), StringField.TYPE_UNSTORED));
doc.add(newField("theFloat", String.valueOf(theFloat--), StringField.TYPE_UNSTORED));
if (i%2 == 0) {
doc.add(newField("sparse", String.valueOf(i), StringField.TYPE_UNSTORED));
}
if (i%2 == 0) {
doc.add(new NumericField("numInt", i));
}
// sometimes skip the field:
if (random.nextInt(40) != 17) {
unicodeStrings[i] = generateString(i);
doc.add(newField("theRandomUnicodeString", unicodeStrings[i], StringField.TYPE_STORED));
}
// sometimes skip the field:
if (random.nextInt(10) != 8) {
for (int j = 0; j < NUM_ORDS; j++) {
String newValue = generateString(i);
multiValued[i][j] = new BytesRef(newValue);
doc.add(newField("theRandomUnicodeMultiValuedField", newValue, StringField.TYPE_STORED));
}
Arrays.sort(multiValued[i]);
}
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
|
public void testMultiValuedNRQ() throws Exception {
Directory directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
int num = atLeast(500);
for (int l = 0; l < num; l++) {
Document doc = new Document();
for (int m=0, c=random.nextInt(10); m<=c; m++) {
int value = random.nextInt(Integer.MAX_VALUE);
doc.add(newField("asc", format.format(value), StringField.TYPE_UNSTORED));
doc.add(new NumericField("trie").setIntValue(value));
}
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher=newSearcher(reader);
num = atLeast(50);
for (int i = 0; i < num; i++) {
int lower=random.nextInt(Integer.MAX_VALUE);
int upper=random.nextInt(Integer.MAX_VALUE);
if (lower>upper) {
int a=lower; lower=upper; upper=a;
}
TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange("trie", lower, upper, true, true);
TopDocs trTopDocs = searcher.search(cq, 1);
TopDocs nrTopDocs = searcher.search(tq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
}
reader.close();
directory.close();
}
| public void testMultiValuedNRQ() throws Exception {
Directory directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
int num = atLeast(500);
for (int l = 0; l < num; l++) {
Document doc = new Document();
for (int m=0, c=random.nextInt(10); m<=c; m++) {
int value = random.nextInt(Integer.MAX_VALUE);
doc.add(newField("asc", format.format(value), StringField.TYPE_UNSTORED));
doc.add(new NumericField("trie", value));
}
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher=newSearcher(reader);
num = atLeast(50);
for (int i = 0; i < num; i++) {
int lower=random.nextInt(Integer.MAX_VALUE);
int upper=random.nextInt(Integer.MAX_VALUE);
if (lower>upper) {
int a=lower; lower=upper; upper=a;
}
TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange("trie", lower, upper, true, true);
TopDocs trTopDocs = searcher.search(cq, 1);
TopDocs nrTopDocs = searcher.search(tq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
}
reader.close();
directory.close();
}
|
private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
Document doc = new Document();
doc.add(new NumericField("id").setIntValue(id));
if (VERBOSE) {
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
}
for (String s2 : terms) {
doc.add(newField("f", s2, StringField.TYPE_UNSTORED));
termToID.put(new BytesRef(s2), id);
}
w.addDocument(doc);
terms.clear();
}
| private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
Document doc = new Document();
doc.add(new NumericField("id", id));
if (VERBOSE) {
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
}
for (String s2 : terms) {
doc.add(newField("f", s2, StringField.TYPE_UNSTORED));
termToID.put(new BytesRef(s2), id);
}
w.addDocument(doc);
terms.clear();
}
|
public Query rewrite(IndexReader reader) throws IOException {
if (clauses.size() == 1) { // optimize 1-clause queries
BooleanClause c = (BooleanClause)clauses.elementAt(0);
if (!c.prohibited) { // just return clause
Query query = c.query;
if (getBoost() != 1.0f) { // have to clone to boost
query = (Query)query.clone();
query.setBoost(getBoost() * query.getBoost());
}
return query;
}
}
BooleanQuery clone = null; // recursively rewrite
for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.elementAt(i);
Query query = c.query.rewrite(reader);
if (query != c.query) { // clause rewrote: must clone
if (clone == null)
clone = (BooleanQuery)this.clone();
clone.clauses.setElementAt
(new BooleanClause(query, c.required, c.prohibited), i);
}
}
if (clone != null) {
return clone; // some clauses rewrote
} else
return this; // no clauses rewrote
}
| public Query rewrite(IndexReader reader) throws IOException {
if (clauses.size() == 1) { // optimize 1-clause queries
BooleanClause c = (BooleanClause)clauses.elementAt(0);
if (!c.prohibited) { // just return clause
Query query = c.query.rewrite(reader); // rewrite first
if (getBoost() != 1.0f) { // have to clone to boost
query = (Query)query.clone();
query.setBoost(getBoost() * query.getBoost());
}
return query;
}
}
BooleanQuery clone = null; // recursively rewrite
for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.elementAt(i);
Query query = c.query.rewrite(reader);
if (query != c.query) { // clause rewrote: must clone
if (clone == null)
clone = (BooleanQuery)this.clone();
clone.clauses.setElementAt
(new BooleanClause(query, c.required, c.prohibited), i);
}
}
if (clone != null) {
return clone; // some clauses rewrote
} else
return this; // no clauses rewrote
}
|
public static void removePersistentService(String name)
throws StandardException
{
// For now we only allow dropping in-memory databases.
// This is mostly due to the fact that the current implementation for
// the on-disk back end doesn't handle logDevice when dropping.
// Security is another concern.
if (!name.startsWith(PersistentService.INMEMORY)) {
throw StandardException.newException(
SQLState.SERVICE_DIRECTORY_REMOVE_ERROR, name);
}
monitor.removePersistentService(name);
}
| public static void removePersistentService(String name)
throws StandardException
{
// For now we only allow dropping in-memory databases.
// This is mostly due to the fact that the current implementation for
// the on-disk back end doesn't handle logDevice when dropping.
// Security is another concern.
if (!name.startsWith(PersistentService.INMEMORY + ":")) {
throw StandardException.newException(
SQLState.SERVICE_DIRECTORY_REMOVE_ERROR, name);
}
monitor.removePersistentService(name);
}
|
public void normalTest1(String query, int[] expdnrs) throws Exception {
BooleanQueryTest bqt = new BooleanQueryTest( query, expdnrs, db1, fieldName, this,
new BasicQueryFactory(maxBasicQueries));
bqt.setVerbose(verbose);
bqt.doTest();
}
| public void normalTest1(String query, int[] expdnrs) throws Exception {
BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db1, fieldName, this,
new BasicQueryFactory(maxBasicQueries));
bqt.setVerbose(verbose);
bqt.doTest();
}
|
public void test01Exceptions() throws Exception {
String m = ExceptionQueryTest.getFailQueries(exceptionQueries, verbose);
if (m.length() > 0) {
fail("No ParseException for:\n" + m);
}
}
| public void test01Exceptions() throws Exception {
String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose);
if (m.length() > 0) {
fail("No ParseException for:\n" + m);
}
}
|
protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) throws SyntaxError {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
TokenStream source;
try {
source = analyzer.tokenStream(field, new StringReader(queryText));
source.reset();
} catch (IOException e) {
throw new SyntaxError("Unable to initialize TokenStream to analyze query text", e);
}
CachingTokenFilter buffer = new CachingTokenFilter(source);
TermToBytesRefAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
buffer.reset();
if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
}
if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
}
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
boolean hasMoreTokens = false;
if (termAtt != null) {
try {
hasMoreTokens = buffer.incrementToken();
while (hasMoreTokens) {
numTokens++;
int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
if (positionIncrement != 0) {
positionCount += positionIncrement;
} else {
severalTokensAtSamePosition = true;
}
hasMoreTokens = buffer.incrementToken();
}
} catch (IOException e) {
// ignore
}
}
try {
// rewind the buffer stream
buffer.reset();
// close original stream - all tokens buffered
source.close();
}
catch (IOException e) {
throw new SyntaxError("Cannot close TokenStream analyzing query text", e);
}
BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef();
if (numTokens == 0)
return null;
else if (numTokens == 1) {
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
return newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes)));
} else {
if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) {
if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) {
// no phrase query:
BooleanQuery q = newBooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
for (int i = 0; i < numTokens; i++) {
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
Query currentQuery = newTermQuery(
new Term(field, BytesRef.deepCopyOf(bytes)));
q.add(currentQuery, occur);
}
return q;
}
else {
// phrase query:
MultiPhraseQuery mpq = newMultiPhraseQuery();
mpq.setSlop(phraseSlop);
List<Term> multiTerms = new ArrayList<Term>();
int position = -1;
for (int i = 0; i < numTokens; i++) {
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (positionIncrement > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add(multiTerms.toArray(new Term[0]),position);
} else {
mpq.add(multiTerms.toArray(new Term[0]));
}
multiTerms.clear();
}
position += positionIncrement;
multiTerms.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
if (enablePositionIncrements) {
mpq.add(multiTerms.toArray(new Term[0]),position);
} else {
mpq.add(multiTerms.toArray(new Term[0]));
}
return mpq;
}
}
else {
PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
for (int i = 0; i < numTokens; i++) {
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (enablePositionIncrements) {
position += positionIncrement;
pq.add(new Term(field, BytesRef.deepCopyOf(bytes)),position);
} else {
pq.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
}
return pq;
}
}
}
| protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) throws SyntaxError {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
TokenStream source;
try {
source = analyzer.tokenStream(field, queryText);
source.reset();
} catch (IOException e) {
throw new SyntaxError("Unable to initialize TokenStream to analyze query text", e);
}
CachingTokenFilter buffer = new CachingTokenFilter(source);
TermToBytesRefAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
buffer.reset();
if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
}
if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
}
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
boolean hasMoreTokens = false;
if (termAtt != null) {
try {
hasMoreTokens = buffer.incrementToken();
while (hasMoreTokens) {
numTokens++;
int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
if (positionIncrement != 0) {
positionCount += positionIncrement;
} else {
severalTokensAtSamePosition = true;
}
hasMoreTokens = buffer.incrementToken();
}
} catch (IOException e) {
// ignore
}
}
try {
// rewind the buffer stream
buffer.reset();
// close original stream - all tokens buffered
source.close();
}
catch (IOException e) {
throw new SyntaxError("Cannot close TokenStream analyzing query text", e);
}
BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef();
if (numTokens == 0)
return null;
else if (numTokens == 1) {
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
return newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes)));
} else {
if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) {
if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) {
// no phrase query:
BooleanQuery q = newBooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
for (int i = 0; i < numTokens; i++) {
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
Query currentQuery = newTermQuery(
new Term(field, BytesRef.deepCopyOf(bytes)));
q.add(currentQuery, occur);
}
return q;
}
else {
// phrase query:
MultiPhraseQuery mpq = newMultiPhraseQuery();
mpq.setSlop(phraseSlop);
List<Term> multiTerms = new ArrayList<Term>();
int position = -1;
for (int i = 0; i < numTokens; i++) {
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (positionIncrement > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add(multiTerms.toArray(new Term[0]),position);
} else {
mpq.add(multiTerms.toArray(new Term[0]));
}
multiTerms.clear();
}
position += positionIncrement;
multiTerms.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
if (enablePositionIncrements) {
mpq.add(multiTerms.toArray(new Term[0]),position);
} else {
mpq.add(multiTerms.toArray(new Term[0]));
}
return mpq;
}
}
else {
PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
for (int i = 0; i < numTokens; i++) {
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
termAtt.fillBytesRef();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (enablePositionIncrements) {
position += positionIncrement;
pq.add(new Term(field, BytesRef.deepCopyOf(bytes)),position);
} else {
pq.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
}
return pq;
}
}
}
|
public void testLUCENE_3042() throws Exception {
String testString = "t";
Analyzer analyzer = new MockAnalyzer(random());
TokenStream stream = analyzer.tokenStream("dummy", new StringReader(testString));
stream.reset();
while (stream.incrementToken()) {
// consume
}
stream.end();
stream.close();
assertAnalyzesToReuse(analyzer, testString, new String[] { "t" });
}
| public void testLUCENE_3042() throws Exception {
String testString = "t";
Analyzer analyzer = new MockAnalyzer(random());
TokenStream stream = analyzer.tokenStream("dummy", testString);
stream.reset();
while (stream.incrementToken()) {
// consume
}
stream.end();
stream.close();
assertAnalyzesToReuse(analyzer, testString, new String[] { "t" });
}
|
public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newTextField("f", "", Field.Store.NO);
d.add(f);
Random r = random();
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random(), 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setStringValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
| public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newTextField("f", "", Field.Store.NO);
d.add(f);
Random r = random();
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random(), 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", term);
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setStringValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
|
public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream tokenStream = analyzer.tokenStream("field", new StringReader("abcd "));
TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream);
TokenStream sink = tee.newSinkTokenStream();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);
Field f1 = new Field("field", tee, ft);
Field f2 = new Field("field", sink, ft);
doc.add(f1);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = DirectoryReader.open(dir);
Terms vector = r.getTermVectors(0).terms("field");
assertEquals(1, vector.size());
TermsEnum termsEnum = vector.iterator(null);
termsEnum.next();
assertEquals(2, termsEnum.totalTermFreq());
DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null);
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, positions.freq());
positions.nextPosition();
assertEquals(0, positions.startOffset());
assertEquals(4, positions.endOffset());
positions.nextPosition();
assertEquals(8, positions.startOffset());
assertEquals(12, positions.endOffset());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, positions.nextDoc());
r.close();
dir.close();
}
| public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
TokenStream tokenStream = analyzer.tokenStream("field", "abcd ");
TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream);
TokenStream sink = tee.newSinkTokenStream();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);
Field f1 = new Field("field", tee, ft);
Field f2 = new Field("field", sink, ft);
doc.add(f1);
doc.add(f2);
w.addDocument(doc);
w.close();
IndexReader r = DirectoryReader.open(dir);
Terms vector = r.getTermVectors(0).terms("field");
assertEquals(1, vector.size());
TermsEnum termsEnum = vector.iterator(null);
termsEnum.next();
assertEquals(2, termsEnum.totalTermFreq());
DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null);
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, positions.freq());
positions.nextPosition();
assertEquals(0, positions.startOffset());
assertEquals(4, positions.endOffset());
positions.nextPosition();
assertEquals(8, positions.startOffset());
assertEquals(12, positions.endOffset());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, positions.nextDoc());
r.close();
dir.close();
}
|
public void testTokenAttributes() throws Exception {
TokenStream ts = a.tokenStream("dummy", new StringReader("This is a test"));
ScriptAttribute scriptAtt = ts.addAttribute(ScriptAttribute.class);
ts.reset();
while (ts.incrementToken()) {
assertEquals(UScript.LATIN, scriptAtt.getCode());
assertEquals(UScript.getName(UScript.LATIN), scriptAtt.getName());
assertEquals(UScript.getShortName(UScript.LATIN), scriptAtt.getShortName());
assertTrue(ts.reflectAsString(false).contains("script=Latin"));
}
ts.end();
ts.close();
}
| public void testTokenAttributes() throws Exception {
TokenStream ts = a.tokenStream("dummy", "This is a test");
ScriptAttribute scriptAtt = ts.addAttribute(ScriptAttribute.class);
ts.reset();
while (ts.incrementToken()) {
assertEquals(UScript.LATIN, scriptAtt.getCode());
assertEquals(UScript.getName(UScript.LATIN), scriptAtt.getName());
assertEquals(UScript.getShortName(UScript.LATIN), scriptAtt.getShortName());
assertTrue(ts.reflectAsString(false).contains("script=Latin"));
}
ts.end();
ts.close();
}
|
public int read(byte[] b, int off, int len) throws IOException {
int actualLength = 0;
updateIfRequired();
//If maxPos is not invalid then
//ensure that the length(len)
//that is requested falls within
//the restriction set by maxPos.
if(maxPos != -1) {
actualLength
= (int )Math.min(len, maxPos - pos);
}
else {
//maxPos has not been set. Make
//maxPos the length requested.
actualLength = len;
}
int retValue = super.read(b, off, actualLength);
if (retValue > 0)
pos += retValue;
return retValue;
}
| public int read(byte[] b, int off, int len) throws IOException {
int actualLength = 0;
updateIfRequired();
//If maxPos is not invalid then
//ensure that the length(len)
//that is requested falls within
//the restriction set by maxPos.
if(maxPos != -1) {
actualLength
= (int )Math.min(len, maxPos - pos);
}
else {
//maxPos has not been set. Make
//maxPos the length requested.
actualLength = len;
}
int retValue = stream.read(b, off, actualLength);
if (retValue > 0)
pos += retValue;
return retValue;
}
|
public void apply(org.apache.cassandra.db.migration.avro.CfDef cf_def) throws ConfigurationException
{
// validate
if (!cf_def.keyspace.toString().equals(ksName))
throw new ConfigurationException(String.format("Keyspace mismatch (found %s; expected %s)",
cf_def.keyspace, tableName));
if (!cf_def.name.toString().equals(cfName))
throw new ConfigurationException(String.format("Column family mismatch (found %s; expected %s)",
cf_def.name, cfName));
if (!cf_def.id.equals(cfId))
throw new ConfigurationException(String.format("Column family ID mismatch (found %s; expected %s)",
cf_def.id, cfId));
if (!cf_def.column_type.toString().equals(cfType.name()))
throw new ConfigurationException("types do not match.");
if (comparator != TypeParser.parse(cf_def.comparator_type))
throw new ConfigurationException("comparators do not match.");
if (cf_def.subcomparator_type == null || cf_def.subcomparator_type.equals(""))
{
if (subcolumnComparator != null)
throw new ConfigurationException("subcolumncomparators do not match.");
// else, it's null and we're good.
}
else if (subcolumnComparator != TypeParser.parse(cf_def.subcomparator_type))
throw new ConfigurationException("subcolumncomparators do not match.");
validateMinMaxCompactionThresholds(cf_def);
validateMemtableSettings(cf_def);
comment = enforceCommentNotNull(cf_def.comment);
rowCacheSize = cf_def.row_cache_size;
keyCacheSize = cf_def.key_cache_size;
readRepairChance = cf_def.read_repair_chance;
replicateOnWrite = cf_def.replicate_on_write;
gcGraceSeconds = cf_def.gc_grace_seconds;
defaultValidator = TypeParser.parse(cf_def.default_validation_class);
keyValidator = TypeParser.parse(cf_def.key_validation_class);
minCompactionThreshold = cf_def.min_compaction_threshold;
maxCompactionThreshold = cf_def.max_compaction_threshold;
rowCacheSavePeriodInSeconds = cf_def.row_cache_save_period_in_seconds;
keyCacheSavePeriodInSeconds = cf_def.key_cache_save_period_in_seconds;
memtableFlushAfterMins = cf_def.memtable_flush_after_mins;
memtableThroughputInMb = cf_def.memtable_throughput_in_mb;
memtableOperationsInMillions = cf_def.memtable_operations_in_millions;
mergeShardsChance = cf_def.merge_shards_chance;
if (cf_def.row_cache_provider != null)
rowCacheProvider = FBUtilities.newCacheProvider(cf_def.row_cache_provider.toString());
keyAlias = cf_def.key_alias;
// adjust column definitions. figure out who is coming and going.
Set<ByteBuffer> toRemove = new HashSet<ByteBuffer>();
Set<ByteBuffer> newColumns = new HashSet<ByteBuffer>();
Set<org.apache.cassandra.db.migration.avro.ColumnDef> toAdd = new HashSet<org.apache.cassandra.db.migration.avro.ColumnDef>();
for (org.apache.cassandra.db.migration.avro.ColumnDef def : cf_def.column_metadata)
{
newColumns.add(def.name);
if (!column_metadata.containsKey(def.name))
toAdd.add(def);
}
for (ByteBuffer name : column_metadata.keySet())
if (!newColumns.contains(name))
toRemove.add(name);
// remove the ones leaving.
for (ByteBuffer indexName : toRemove)
column_metadata.remove(indexName);
// update the ones staying
for (org.apache.cassandra.db.migration.avro.ColumnDef def : cf_def.column_metadata)
{
ColumnDefinition oldDef = column_metadata.get(def.name);
if (oldDef == null)
continue;
oldDef.setValidator(TypeParser.parse(def.validation_class));
oldDef.setIndexType(def.index_type == null ? null : org.apache.cassandra.thrift.IndexType.valueOf(def.index_type.name()));
oldDef.setIndexName(def.index_name == null ? null : def.index_name.toString());
}
// add the new ones coming in.
for (org.apache.cassandra.db.migration.avro.ColumnDef def : toAdd)
{
AbstractType dValidClass = TypeParser.parse(def.validation_class);
ColumnDefinition cd = new ColumnDefinition(def.name,
dValidClass,
def.index_type == null ? null : org.apache.cassandra.thrift.IndexType.valueOf(def.index_type.toString()),
def.index_name == null ? null : def.index_name.toString());
column_metadata.put(cd.name, cd);
}
}
| public void apply(org.apache.cassandra.db.migration.avro.CfDef cf_def) throws ConfigurationException
{
// validate
if (!cf_def.keyspace.toString().equals(ksName))
throw new ConfigurationException(String.format("Keyspace mismatch (found %s; expected %s)",
cf_def.keyspace, ksName));
if (!cf_def.name.toString().equals(cfName))
throw new ConfigurationException(String.format("Column family mismatch (found %s; expected %s)",
cf_def.name, cfName));
if (!cf_def.id.equals(cfId))
throw new ConfigurationException(String.format("Column family ID mismatch (found %s; expected %s)",
cf_def.id, cfId));
if (!cf_def.column_type.toString().equals(cfType.name()))
throw new ConfigurationException("types do not match.");
if (comparator != TypeParser.parse(cf_def.comparator_type))
throw new ConfigurationException("comparators do not match.");
if (cf_def.subcomparator_type == null || cf_def.subcomparator_type.equals(""))
{
if (subcolumnComparator != null)
throw new ConfigurationException("subcolumncomparators do not match.");
// else, it's null and we're good.
}
else if (subcolumnComparator != TypeParser.parse(cf_def.subcomparator_type))
throw new ConfigurationException("subcolumncomparators do not match.");
validateMinMaxCompactionThresholds(cf_def);
validateMemtableSettings(cf_def);
comment = enforceCommentNotNull(cf_def.comment);
rowCacheSize = cf_def.row_cache_size;
keyCacheSize = cf_def.key_cache_size;
readRepairChance = cf_def.read_repair_chance;
replicateOnWrite = cf_def.replicate_on_write;
gcGraceSeconds = cf_def.gc_grace_seconds;
defaultValidator = TypeParser.parse(cf_def.default_validation_class);
keyValidator = TypeParser.parse(cf_def.key_validation_class);
minCompactionThreshold = cf_def.min_compaction_threshold;
maxCompactionThreshold = cf_def.max_compaction_threshold;
rowCacheSavePeriodInSeconds = cf_def.row_cache_save_period_in_seconds;
keyCacheSavePeriodInSeconds = cf_def.key_cache_save_period_in_seconds;
memtableFlushAfterMins = cf_def.memtable_flush_after_mins;
memtableThroughputInMb = cf_def.memtable_throughput_in_mb;
memtableOperationsInMillions = cf_def.memtable_operations_in_millions;
mergeShardsChance = cf_def.merge_shards_chance;
if (cf_def.row_cache_provider != null)
rowCacheProvider = FBUtilities.newCacheProvider(cf_def.row_cache_provider.toString());
keyAlias = cf_def.key_alias;
// adjust column definitions. figure out who is coming and going.
Set<ByteBuffer> toRemove = new HashSet<ByteBuffer>();
Set<ByteBuffer> newColumns = new HashSet<ByteBuffer>();
Set<org.apache.cassandra.db.migration.avro.ColumnDef> toAdd = new HashSet<org.apache.cassandra.db.migration.avro.ColumnDef>();
for (org.apache.cassandra.db.migration.avro.ColumnDef def : cf_def.column_metadata)
{
newColumns.add(def.name);
if (!column_metadata.containsKey(def.name))
toAdd.add(def);
}
for (ByteBuffer name : column_metadata.keySet())
if (!newColumns.contains(name))
toRemove.add(name);
// remove the ones leaving.
for (ByteBuffer indexName : toRemove)
column_metadata.remove(indexName);
// update the ones staying
for (org.apache.cassandra.db.migration.avro.ColumnDef def : cf_def.column_metadata)
{
ColumnDefinition oldDef = column_metadata.get(def.name);
if (oldDef == null)
continue;
oldDef.setValidator(TypeParser.parse(def.validation_class));
oldDef.setIndexType(def.index_type == null ? null : org.apache.cassandra.thrift.IndexType.valueOf(def.index_type.name()));
oldDef.setIndexName(def.index_name == null ? null : def.index_name.toString());
}
// add the new ones coming in.
for (org.apache.cassandra.db.migration.avro.ColumnDef def : toAdd)
{
AbstractType dValidClass = TypeParser.parse(def.validation_class);
ColumnDefinition cd = new ColumnDefinition(def.name,
dValidClass,
def.index_type == null ? null : org.apache.cassandra.thrift.IndexType.valueOf(def.index_type.toString()),
def.index_name == null ? null : def.index_name.toString());
column_metadata.put(cd.name, cd);
}
}
|
public SegmentInfoWriter getSegmentInfosWriter() {
return writer;
}
} | public SegmentInfoWriter getSegmentInfoWriter() {
return writer;
}
} |
public final void read(Directory directory, String segmentFileName) throws CorruptIndexException, IOException {
boolean success = false;
// Clear any previous segments:
this.clear();
generation = generationFromSegmentsFileName(segmentFileName);
lastGeneration = generation;
ChecksumIndexInput input = new ChecksumIndexInput(directory.openInput(segmentFileName, IOContext.READ));
try {
final int format = input.readInt();
if (format == CodecUtil.CODEC_MAGIC) {
// 4.0+
CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_40, VERSION_40);
version = input.readLong();
counter = input.readInt();
int numSegments = input.readInt();
for(int seg=0;seg<numSegments;seg++) {
String segName = input.readString();
Codec codec = Codec.forName(input.readString());
//System.out.println("SIS.read seg=" + seg + " codec=" + codec);
SegmentInfo info = codec.segmentInfoFormat().getSegmentInfosReader().read(directory, segName, IOContext.READ);
info.setCodec(codec);
long delGen = input.readLong();
int delCount = input.readInt();
assert delCount <= info.getDocCount();
add(new SegmentInfoPerCommit(info, delCount, delGen));
}
userData = input.readStringStringMap();
} else {
Lucene3xSegmentInfoReader.readLegacyInfos(this, directory, input, format);
Codec codec = Codec.forName("Lucene3x");
for (SegmentInfoPerCommit info : this) {
info.info.setCodec(codec);
}
}
final long checksumNow = input.getChecksum();
final long checksumThen = input.readLong();
if (checksumNow != checksumThen) {
throw new CorruptIndexException("checksum mismatch in segments file (resource: " + input + ")");
}
success = true;
} finally {
if (!success) {
// Clear any segment infos we had loaded so we
// have a clean slate on retry:
this.clear();
IOUtils.closeWhileHandlingException(input);
} else {
input.close();
}
}
}
| public final void read(Directory directory, String segmentFileName) throws CorruptIndexException, IOException {
boolean success = false;
// Clear any previous segments:
this.clear();
generation = generationFromSegmentsFileName(segmentFileName);
lastGeneration = generation;
ChecksumIndexInput input = new ChecksumIndexInput(directory.openInput(segmentFileName, IOContext.READ));
try {
final int format = input.readInt();
if (format == CodecUtil.CODEC_MAGIC) {
// 4.0+
CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_40, VERSION_40);
version = input.readLong();
counter = input.readInt();
int numSegments = input.readInt();
for(int seg=0;seg<numSegments;seg++) {
String segName = input.readString();
Codec codec = Codec.forName(input.readString());
//System.out.println("SIS.read seg=" + seg + " codec=" + codec);
SegmentInfo info = codec.segmentInfoFormat().getSegmentInfoReader().read(directory, segName, IOContext.READ);
info.setCodec(codec);
long delGen = input.readLong();
int delCount = input.readInt();
assert delCount <= info.getDocCount();
add(new SegmentInfoPerCommit(info, delCount, delGen));
}
userData = input.readStringStringMap();
} else {
Lucene3xSegmentInfoReader.readLegacyInfos(this, directory, input, format);
Codec codec = Codec.forName("Lucene3x");
for (SegmentInfoPerCommit info : this) {
info.info.setCodec(codec);
}
}
final long checksumNow = input.getChecksum();
final long checksumThen = input.readLong();
if (checksumNow != checksumThen) {
throw new CorruptIndexException("checksum mismatch in segments file (resource: " + input + ")");
}
success = true;
} finally {
if (!success) {
// Clear any segment infos we had loaded so we
// have a clean slate on retry:
this.clear();
IOUtils.closeWhileHandlingException(input);
} else {
input.close();
}
}
}
|
private void go()
throws Exception
{
try
{
// Connect to the database, prepare statements,
// and load id-to-name mappings.
this.conn = DriverManager.getConnection(sourceDBUrl);
prepForDump();
boolean at10_6 = atVersion( conn, 10, 6 );
// Generate DDL.
// Start with schemas, since we might need them to
// exist for jars to load properly.
DB_Schema.doSchemas(this.conn,
(tableList != null) && (targetSchema == null));
DB_Sequence.doSequences( conn );
if (tableList == null) {
// Don't do these if user just wants table-related objects.
DB_Jar.doJars(sourceDBName, this.conn);
DB_Alias.doProceduresFunctionsAndUDTs(this.conn, at10_6 );
}
DB_Table.doTables(this.conn, tableIdToNameMap);
DB_Index.doIndexes(this.conn);
DB_Alias.doSynonyms(this.conn);
DB_Key.doKeys(this.conn);
DB_Check.doChecks(this.conn);
if (!skipViews)
DB_View.doViews(this.conn);
DB_Trigger.doTriggers(this.conn);
DB_Roles.doRoles(this.conn);
DB_GrantRevoke.doAuthorizations(this.conn, at10_6);
// That's it; we're done.
if (getColNameFromNumberQuery != null)
getColNameFromNumberQuery.close();
Logs.cleanup();
}
catch (SQLException sqlE)
{
Logs.debug(sqlE);
Logs.debug(Logs.unRollExceptions(sqlE), (String)null);
Logs.cleanup();
return;
}
catch (Exception e)
{
Logs.debug(e);
Logs.cleanup();
return;
}
finally {
// Close our connection.
if (conn != null) {
conn.commit();
conn.close();
}
}
}
| private void go()
throws Exception
{
try
{
// Connect to the database, prepare statements,
// and load id-to-name mappings.
this.conn = DriverManager.getConnection(sourceDBUrl);
prepForDump();
boolean at10_6 = atVersion( conn, 10, 6 );
// Generate DDL.
// Start with schemas, since we might need them to
// exist for jars to load properly.
DB_Schema.doSchemas(this.conn,
(tableList != null) && (targetSchema == null));
if ( at10_6 ) { DB_Sequence.doSequences( conn ); }
if (tableList == null) {
// Don't do these if user just wants table-related objects.
DB_Jar.doJars(sourceDBName, this.conn);
DB_Alias.doProceduresFunctionsAndUDTs(this.conn, at10_6 );
}
DB_Table.doTables(this.conn, tableIdToNameMap);
DB_Index.doIndexes(this.conn);
DB_Alias.doSynonyms(this.conn);
DB_Key.doKeys(this.conn);
DB_Check.doChecks(this.conn);
if (!skipViews)
DB_View.doViews(this.conn);
DB_Trigger.doTriggers(this.conn);
DB_Roles.doRoles(this.conn);
DB_GrantRevoke.doAuthorizations(this.conn, at10_6);
// That's it; we're done.
if (getColNameFromNumberQuery != null)
getColNameFromNumberQuery.close();
Logs.cleanup();
}
catch (SQLException sqlE)
{
Logs.debug(sqlE);
Logs.debug(Logs.unRollExceptions(sqlE), (String)null);
Logs.cleanup();
return;
}
catch (Exception e)
{
Logs.debug(e);
Logs.cleanup();
return;
}
finally {
// Close our connection.
if (conn != null) {
conn.commit();
conn.close();
}
}
}
|
public static long absoluteFromFraction(double fractOrAbs, long total)
{
if (fractOrAbs < 0)
throw new UnsupportedOperationException("unexpected negative value " + fractOrAbs);
if (0 < fractOrAbs && fractOrAbs < 1)
{
// fraction
return Math.max(1, (long)(fractOrAbs * total));
}
// absolute
assert fractOrAbs >= 1 || fractOrAbs == 0;
return (long)fractOrAbs;
}
| public static long absoluteFromFraction(double fractOrAbs, long total)
{
if (fractOrAbs < 0)
throw new UnsupportedOperationException("unexpected negative value " + fractOrAbs);
if (0 < fractOrAbs && fractOrAbs <= 1)
{
// fraction
return Math.max(1, (long)(fractOrAbs * total));
}
// absolute
assert fractOrAbs >= 1 || fractOrAbs == 0;
return (long)fractOrAbs;
}
|
public void init(NamedList args) {
super.init(args);
SolrParams p = SolrParams.toSolrParams(args);
restrictToField = p.get("termSourceField");
spellcheckerIndexDir = p.get("spellcheckerIndexDir");
try {
spellChecker = new SpellChecker(FSDirectory.getDirectory(spellcheckerIndexDir));
} catch (IOException e) {
throw new RuntimeException("Cannot open SpellChecker index", e);
}
}
| public void init(NamedList args) {
super.init(args);
SolrParams p = SolrParams.toSolrParams(args);
termSourceField = p.get("termSourceField");
spellcheckerIndexDir = p.get("spellcheckerIndexDir");
try {
spellChecker = new SpellChecker(FSDirectory.getDirectory(spellcheckerIndexDir));
} catch (IOException e) {
throw new RuntimeException("Cannot open SpellChecker index", e);
}
}
|
public void BlueprintSample() throws Exception {
System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Start Test Blueprint Sample");
//////////////////////////////
//Test BlueprintStateMBean
//////////////////////////////
//find the Blueprint Sample bundle id and the blueprint extender bundle id
long sampleBundleId = -1;
long extenderBundleId = -1; // the blueprint extender bundle "org.apache.geronimo.blueprint.geronimo-blueprint" is also a blueprint bundle.
for (Bundle bundle : bundleContext.getBundles()){
if (bundle.getSymbolicName().equals("org.apache.aries.blueprint.sample")) sampleBundleId = bundle.getBundleId();
if (bundle.getSymbolicName().equals("org.apache.aries.blueprint")) extenderBundleId = bundle.getBundleId();
}
if (-1==sampleBundleId) fail("Blueprint Sample Bundle is not found!");
if (-1==extenderBundleId) fail("Blueprint Extender Bundle is not found!");
//retrieve the proxy object
BlueprintStateMBean stateProxy = (BlueprintStateMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, new ObjectName(BlueprintStateMBean.OBJECTNAME), BlueprintStateMBean.class, false);
// test getBlueprintBundleIds
long[] bpBundleIds = stateProxy.getBlueprintBundleIds();
assertEquals(2, bpBundleIds.length);
// test getLastEvent
BlueprintEventValidator sampleValidator = new BlueprintEventValidator(sampleBundleId, extenderBundleId, 2);
sampleValidator.validate(stateProxy.getLastEvent(sampleBundleId));
// test getLastEvents
TabularData lastEvents = stateProxy.getLastEvents();
assertEquals(BlueprintStateMBean.OSGI_BLUEPRINT_EVENTS_TYPE,lastEvents.getTabularType());
sampleValidator.validate(lastEvents.get(new Long[]{sampleBundleId}));
//////////////////////////////
//Test BlueprintMetadataMBean
//////////////////////////////
//find the Blueprint Sample bundle's container service id
Bundle sampleBundle = bundleContext.getBundle(sampleBundleId);
String filter = "(&(osgi.blueprint.container.symbolicname=" // no similar one in interfaces
+ sampleBundle.getSymbolicName() + ")(osgi.blueprint.container.version=" + sampleBundle.getVersion() + "))";
ServiceReference[] serviceReferences = null;
try {
serviceReferences = bundleContext.getServiceReferences(BlueprintContainer.class.getName(), filter);
} catch (InvalidSyntaxException e) {
throw new RuntimeException(e);
}
long sampleBlueprintContainerServiceId = (Long) serviceReferences[0].getProperty(Constants.SERVICE_ID);
//retrieve the proxy object
BlueprintMetadataMBean metadataProxy = (BlueprintMetadataMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, new ObjectName(BlueprintMetadataMBean.OBJECTNAME), BlueprintMetadataMBean.class, false);
// test getBlueprintContainerServiceIds
long[] bpContainerServiceIds = metadataProxy.getBlueprintContainerServiceIds();
assertEquals(2, bpContainerServiceIds.length);
// test getBlueprintContainerServiceId
assertEquals(sampleBlueprintContainerServiceId, metadataProxy.getBlueprintContainerServiceId(sampleBundleId));
// test getComponentMetadata
// bean: foo
BeanValidator bv_foo = new BeanValidator("org.apache.aries.blueprint.sample.Foo", "init", "destroy");
BeanPropertyValidator bpv_a = new BeanPropertyValidator("a");
bpv_a.setObjectValueValidator(new ValueValidator("5"));
BeanPropertyValidator bpv_b = new BeanPropertyValidator("b");
bpv_b.setObjectValueValidator(new ValueValidator("-1"));
BeanPropertyValidator bpv_bar = new BeanPropertyValidator("bar");
bpv_bar.setObjectValueValidator(new RefValidator("bar"));
BeanPropertyValidator bpv_currency = new BeanPropertyValidator("currency");
bpv_currency.setObjectValueValidator(new ValueValidator("PLN"));
BeanPropertyValidator bpv_date = new BeanPropertyValidator("date");
bpv_date.setObjectValueValidator(new ValueValidator("2009.04.17"));
bv_foo.addPropertyValidators(bpv_a, bpv_b, bpv_bar, bpv_currency, bpv_date);
bv_foo.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "foo"));
// bean: bar
BeanPropertyValidator bpv_value = new BeanPropertyValidator("value");
bpv_value.setObjectValueValidator(new ValueValidator("Hello FooBar"));
BeanPropertyValidator bpv_context = new BeanPropertyValidator("context");
bpv_context.setObjectValueValidator(new RefValidator("blueprintBundleContext"));
CollectionValidator cv = new CollectionValidator("java.util.List");
cv.addCollectionValueValidators(new ValueValidator("a list element"), new ValueValidator("5", "java.lang.Integer"));
BeanPropertyValidator bpv_list = new BeanPropertyValidator("list");
bpv_list.setObjectValueValidator(cv);
BeanValidator bv_bar = new BeanValidator("org.apache.aries.blueprint.sample.Bar");
bv_bar.addPropertyValidators(bpv_value, bpv_context, bpv_list);
bv_bar.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "bar"));
// service: ref=foo, no componentId set. So using it to test getComponentIdsByType.
String[] serviceComponentIds = metadataProxy.getComponentIdsByType(sampleBlueprintContainerServiceId, BlueprintMetadataMBean.SERVICE_METADATA);
assertEquals("There should be only one service component in this sample", 1, serviceComponentIds.length);
MapEntryValidator mev = new MapEntryValidator();
mev.setKeyValueValidator(new ValueValidator("key"), new ValueValidator("value"));
RegistrationListenerValidator rglrv = new RegistrationListenerValidator("serviceRegistered", "serviceUnregistered");
rglrv.setListenerComponentValidator(new RefValidator("fooRegistrationListener"));
ServiceValidator sv = new ServiceValidator(4);
sv.setServiceComponentValidator(new RefValidator("foo"));
sv.addMapEntryValidator(mev);
sv.addRegistrationListenerValidator(rglrv);
sv.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, serviceComponentIds[0]));
// bean: fooRegistrationListener
BeanValidator bv_fooRegistrationListener = new BeanValidator("org.apache.aries.blueprint.sample.FooRegistrationListener");
bv_fooRegistrationListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "fooRegistrationListener"));
// reference: ref2
ReferenceListenerValidator rlrv_1 = new ReferenceListenerValidator("bind", "unbind");
rlrv_1.setListenerComponentValidator(new RefValidator("bindingListener"));
ReferenceValidator rv = new ReferenceValidator("org.apache.aries.blueprint.sample.InterfaceA", 100);
rv.addReferenceListenerValidator(rlrv_1);
rv.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "ref2"));
// bean: bindingListener
BeanValidator bv_bindingListener = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener");
bv_bindingListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "bindingListener"));
// reference-list: ref-list
ReferenceListenerValidator rlrv_2 = new ReferenceListenerValidator("bind", "unbind");
rlrv_2.setListenerComponentValidator(new RefValidator("listBindingListener"));
ReferenceListValidator rlv_ref_list = new ReferenceListValidator("org.apache.aries.blueprint.sample.InterfaceA");
rlv_ref_list.addReferenceListenerValidator(rlrv_2);
rlv_ref_list.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "ref-list"));
// bean: listBindingListener
BeanValidator bv_listBindingListener = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener");
bv_listBindingListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "listBindingListener"));
// bean: circularReference
ReferenceListenerValidator rlrv_3 = new ReferenceListenerValidator("bind", "unbind");
rlrv_3.setListenerComponentValidator(new RefValidator("circularReference"));
ReferenceListValidator rlv_2 = new ReferenceListValidator("org.apache.aries.blueprint.sample.InterfaceA", 2);
rlv_2.addReferenceListenerValidator(rlrv_3);
BeanPropertyValidator bpv_list_2 = new BeanPropertyValidator("list");
bpv_list_2.setObjectValueValidator(rlv_2);
BeanValidator bv_circularReference = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener", "init");
bv_circularReference.addPropertyValidators(bpv_list_2);
bv_circularReference.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "circularReference"));
}
| public void BlueprintSample() throws Exception {
System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Start Test Blueprint Sample");
//////////////////////////////
//Test BlueprintStateMBean
//////////////////////////////
//find the Blueprint Sample bundle id and the blueprint extender bundle id
long sampleBundleId = -1;
long extenderBundleId = -1; // the blueprint extender bundle "org.apache.geronimo.blueprint.geronimo-blueprint" is also a blueprint bundle.
for (Bundle bundle : bundleContext.getBundles()){
if (bundle.getSymbolicName().equals("org.apache.aries.blueprint.sample")) sampleBundleId = bundle.getBundleId();
if (bundle.getSymbolicName().equals("org.apache.aries.blueprint")) extenderBundleId = bundle.getBundleId();
}
if (-1==sampleBundleId) fail("Blueprint Sample Bundle is not found!");
if (-1==extenderBundleId) fail("Blueprint Extender Bundle is not found!");
//retrieve the proxy object
BlueprintStateMBean stateProxy = (BlueprintStateMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, new ObjectName(BlueprintStateMBean.OBJECTNAME), BlueprintStateMBean.class, false);
// test getBlueprintBundleIds
long[] bpBundleIds = stateProxy.getBlueprintBundleIds();
assertEquals(2, bpBundleIds.length);
// test getLastEvent
BlueprintEventValidator sampleValidator = new BlueprintEventValidator(sampleBundleId, extenderBundleId, 2);
sampleValidator.validate(stateProxy.getLastEvent(sampleBundleId));
// test getLastEvents
TabularData lastEvents = stateProxy.getLastEvents();
assertEquals(BlueprintStateMBean.OSGI_BLUEPRINT_EVENTS_TYPE,lastEvents.getTabularType());
sampleValidator.validate(lastEvents.get(new Long[]{sampleBundleId}));
//////////////////////////////
//Test BlueprintMetadataMBean
//////////////////////////////
//find the Blueprint Sample bundle's container service id
Bundle sampleBundle = bundleContext.getBundle(sampleBundleId);
String filter = "(&(osgi.blueprint.container.symbolicname=" // no similar one in interfaces
+ sampleBundle.getSymbolicName() + ")(osgi.blueprint.container.version=" + sampleBundle.getVersion() + "))";
ServiceReference[] serviceReferences = null;
try {
serviceReferences = bundleContext.getServiceReferences(BlueprintContainer.class.getName(), filter);
} catch (InvalidSyntaxException e) {
throw new RuntimeException(e);
}
long sampleBlueprintContainerServiceId = (Long) serviceReferences[0].getProperty(Constants.SERVICE_ID);
//retrieve the proxy object
BlueprintMetadataMBean metadataProxy = (BlueprintMetadataMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, new ObjectName(BlueprintMetadataMBean.OBJECTNAME), BlueprintMetadataMBean.class, false);
// test getBlueprintContainerServiceIds
long[] bpContainerServiceIds = metadataProxy.getBlueprintContainerServiceIds();
assertEquals(2, bpContainerServiceIds.length);
// test getBlueprintContainerServiceId
assertEquals(sampleBlueprintContainerServiceId, metadataProxy.getBlueprintContainerServiceId(sampleBundleId));
// test getComponentMetadata
// bean: foo
BeanValidator bv_foo = new BeanValidator("org.apache.aries.blueprint.sample.Foo", "init", "destroy");
BeanPropertyValidator bpv_a = new BeanPropertyValidator("a");
bpv_a.setObjectValueValidator(new ValueValidator("5"));
BeanPropertyValidator bpv_b = new BeanPropertyValidator("b");
bpv_b.setObjectValueValidator(new ValueValidator("-1"));
BeanPropertyValidator bpv_bar = new BeanPropertyValidator("bar");
bpv_bar.setObjectValueValidator(new RefValidator("bar"));
BeanPropertyValidator bpv_currency = new BeanPropertyValidator("currency");
bpv_currency.setObjectValueValidator(new ValueValidator("PLN"));
BeanPropertyValidator bpv_date = new BeanPropertyValidator("date");
bpv_date.setObjectValueValidator(new ValueValidator("2009.04.17"));
bv_foo.addPropertyValidators(bpv_a, bpv_b, bpv_bar, bpv_currency, bpv_date);
bv_foo.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "foo"));
// bean: bar
BeanPropertyValidator bpv_value = new BeanPropertyValidator("value");
bpv_value.setObjectValueValidator(new ValueValidator("Hello FooBar"));
BeanPropertyValidator bpv_context = new BeanPropertyValidator("context");
bpv_context.setObjectValueValidator(new RefValidator("blueprintBundleContext"));
CollectionValidator cv = new CollectionValidator("java.util.List");
cv.addCollectionValueValidators(new ValueValidator("a list element"), new ValueValidator("5", "java.lang.Integer"));
BeanPropertyValidator bpv_list = new BeanPropertyValidator("list");
bpv_list.setObjectValueValidator(cv);
BeanValidator bv_bar = new BeanValidator("org.apache.aries.blueprint.sample.Bar");
bv_bar.addPropertyValidators(bpv_value, bpv_context, bpv_list);
bv_bar.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "bar"));
// service: ref=foo, no componentId set. So using it to test getComponentIdsByType.
String[] serviceComponentIds = metadataProxy.getComponentIdsByType(sampleBlueprintContainerServiceId, BlueprintMetadataMBean.SERVICE_METADATA);
assertEquals("There should be two service components in this sample", 2, serviceComponentIds.length);
MapEntryValidator mev = new MapEntryValidator();
mev.setKeyValueValidator(new ValueValidator("key"), new ValueValidator("value"));
RegistrationListenerValidator rglrv = new RegistrationListenerValidator("serviceRegistered", "serviceUnregistered");
rglrv.setListenerComponentValidator(new RefValidator("fooRegistrationListener"));
ServiceValidator sv = new ServiceValidator(4);
sv.setServiceComponentValidator(new RefValidator("foo"));
sv.addMapEntryValidator(mev);
sv.addRegistrationListenerValidator(rglrv);
sv.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, serviceComponentIds[0]));
// bean: fooRegistrationListener
BeanValidator bv_fooRegistrationListener = new BeanValidator("org.apache.aries.blueprint.sample.FooRegistrationListener");
bv_fooRegistrationListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "fooRegistrationListener"));
// reference: ref2
ReferenceListenerValidator rlrv_1 = new ReferenceListenerValidator("bind", "unbind");
rlrv_1.setListenerComponentValidator(new RefValidator("bindingListener"));
ReferenceValidator rv = new ReferenceValidator("org.apache.aries.blueprint.sample.InterfaceA", 100);
rv.addReferenceListenerValidator(rlrv_1);
rv.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "ref2"));
// bean: bindingListener
BeanValidator bv_bindingListener = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener");
bv_bindingListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "bindingListener"));
// reference-list: ref-list
ReferenceListenerValidator rlrv_2 = new ReferenceListenerValidator("bind", "unbind");
rlrv_2.setListenerComponentValidator(new RefValidator("listBindingListener"));
ReferenceListValidator rlv_ref_list = new ReferenceListValidator("org.apache.aries.blueprint.sample.InterfaceA");
rlv_ref_list.addReferenceListenerValidator(rlrv_2);
rlv_ref_list.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "ref-list"));
// bean: listBindingListener
BeanValidator bv_listBindingListener = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener");
bv_listBindingListener.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "listBindingListener"));
// bean: circularReference
ReferenceListenerValidator rlrv_3 = new ReferenceListenerValidator("bind", "unbind");
rlrv_3.setListenerComponentValidator(new RefValidator("circularReference"));
ReferenceListValidator rlv_2 = new ReferenceListValidator("org.apache.aries.blueprint.sample.InterfaceA", 2);
rlv_2.addReferenceListenerValidator(rlrv_3);
BeanPropertyValidator bpv_list_2 = new BeanPropertyValidator("list");
bpv_list_2.setObjectValueValidator(rlv_2);
BeanValidator bv_circularReference = new BeanValidator("org.apache.aries.blueprint.sample.BindingListener", "init");
bv_circularReference.addPropertyValidators(bpv_list_2);
bv_circularReference.validate(metadataProxy.getComponentMetadata(sampleBlueprintContainerServiceId, "circularReference"));
}
|
public CompositeData installBundles(String[] locations, String[] urls) throws IOException {
if(locations == null || urls == null){
return new BatchInstallResult("Failed to install bundles arguments can't be null").toCompositeData();
}
if(locations != null && locations != null && locations.length != urls.length){
return new BatchInstallResult("Failed to install bundles size of arguments should be same").toCompositeData();
}
long[] ids = new long[locations.length];
for (int i = 0; i < locations.length; i++) {
try {
long id = installBundle(locations[i], urls[i]);
ids[i] = id;
} catch (Throwable t) {
long[] completed = new long[i];
System.arraycopy(ids, 0, completed, 0, i);
String[] remaining = new String[locations.length - i - 1];
System.arraycopy(locations, i + 1, remaining, 0, remaining.length);
return new BatchInstallResult(completed, t.toString(), remaining, locations[i]).toCompositeData();
}
}
return new BatchInstallResult(ids).toCompositeData();
}
| public CompositeData installBundles(String[] locations, String[] urls) throws IOException {
if(locations == null || urls == null){
return new BatchInstallResult("Failed to install bundles arguments can't be null").toCompositeData();
}
if(locations != null && locations.length != urls.length){
return new BatchInstallResult("Failed to install bundles size of arguments should be same").toCompositeData();
}
long[] ids = new long[locations.length];
for (int i = 0; i < locations.length; i++) {
try {
long id = installBundle(locations[i], urls[i]);
ids[i] = id;
} catch (Throwable t) {
long[] completed = new long[i];
System.arraycopy(ids, 0, completed, 0, i);
String[] remaining = new String[locations.length - i - 1];
System.arraycopy(locations, i + 1, remaining, 0, remaining.length);
return new BatchInstallResult(completed, t.toString(), remaining, locations[i]).toCompositeData();
}
}
return new BatchInstallResult(ids).toCompositeData();
}
|
protected int readXaPrepare(NetConnection conn) throws DisconnectException {
startSameIdChainParse();
int synctype = parseSYNCCTLreply(conn);
endOfSameIdChainData();
NetXACallInfo callInfo = conn.xares_.callInfoArray_[conn.currXACallInfoOffset_];
if (synctype == NetXAResource.XARETVAL_XARDONLY) { // xaretval of read-only, make sure flag agrees
callInfo.setReadOnlyTransactionFlag(true);
} else { // xaretval NOT read-only, make sure flag agrees
callInfo.setReadOnlyTransactionFlag(false);
}
return synctype;
}
| protected int readXaPrepare(NetConnection conn) throws DisconnectException {
startSameIdChainParse();
int synctype = parseSYNCCTLreply(conn);
endOfSameIdChainData();
NetXACallInfo callInfo = conn.xares_.callInfoArray_[conn.currXACallInfoOffset_];
if (synctype == XAResource.XA_RDONLY) { // xaretval of read-only, make sure flag agrees
callInfo.setReadOnlyTransactionFlag(true);
} else { // xaretval NOT read-only, make sure flag agrees
callInfo.setReadOnlyTransactionFlag(false);
}
return synctype;
}
|
private void checkInvariants(IndexWriter writer) throws IOException {
_TestUtil.syncConcurrentMerges(writer);
int maxBufferedDocs = writer.getMaxBufferedDocs();
int mergeFactor = writer.getMergeFactor();
int maxMergeDocs = writer.getMaxMergeDocs();
int ramSegmentCount = writer.getNumBufferedDocuments();
assertTrue(ramSegmentCount < maxBufferedDocs);
int lowerBound = -1;
int upperBound = maxBufferedDocs;
int numSegments = 0;
int segmentCount = writer.getSegmentCount();
for (int i = segmentCount - 1; i >= 0; i--) {
int docCount = writer.getDocCount(i);
assertTrue(docCount > lowerBound);
if (docCount <= upperBound) {
numSegments++;
} else {
if (upperBound * mergeFactor <= maxMergeDocs) {
assertTrue("maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor, numSegments < mergeFactor);
}
do {
lowerBound = upperBound;
upperBound *= mergeFactor;
} while (docCount > upperBound);
numSegments = 1;
}
}
if (upperBound * mergeFactor <= maxMergeDocs) {
assertTrue(numSegments < mergeFactor);
}
String[] files = writer.getDirectory().listAll();
int segmentCfsCount = 0;
for (int i = 0; i < files.length; i++) {
if (files[i].endsWith(".cfs")) {
segmentCfsCount++;
}
}
assertEquals(segmentCount, segmentCfsCount);
}
| private void checkInvariants(IndexWriter writer) throws IOException {
_TestUtil.syncConcurrentMerges(writer);
int maxBufferedDocs = writer.getMaxBufferedDocs();
int mergeFactor = writer.getMergeFactor();
int maxMergeDocs = writer.getMaxMergeDocs();
int ramSegmentCount = writer.getNumBufferedDocuments();
assertTrue(ramSegmentCount < maxBufferedDocs);
int lowerBound = -1;
int upperBound = maxBufferedDocs;
int numSegments = 0;
int segmentCount = writer.getSegmentCount();
for (int i = segmentCount - 1; i >= 0; i--) {
int docCount = writer.getDocCount(i);
assertTrue(docCount > lowerBound);
if (docCount <= upperBound) {
numSegments++;
} else {
if (upperBound * mergeFactor <= maxMergeDocs) {
assertTrue("maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor + "; segs=" + writer.segString(), numSegments < mergeFactor);
}
do {
lowerBound = upperBound;
upperBound *= mergeFactor;
} while (docCount > upperBound);
numSegments = 1;
}
}
if (upperBound * mergeFactor <= maxMergeDocs) {
assertTrue(numSegments < mergeFactor);
}
String[] files = writer.getDirectory().listAll();
int segmentCfsCount = 0;
for (int i = 0; i < files.length; i++) {
if (files[i].endsWith(".cfs")) {
segmentCfsCount++;
}
}
assertEquals(segmentCount, segmentCfsCount);
}
|
public long sizeInBytes() {
return file.numBuffers() * BUFFER_SIZE;
}
| public long sizeInBytes() {
return (long) file.numBuffers() * (long) BUFFER_SIZE;
}
|
private void verifyInterval(Blob blob, long pos, int length,
int testNum, int blobLength) throws Exception {
try {
String subStr = new String(blob.getBytes(pos,length), "US-ASCII");
assertEquals("FAIL - getSubString returned wrong length ",
Math.min((blob.length() - pos) + 1, length),
subStr.length());
assertEquals("FAIL - clob has mismatched lengths",
blobLength, blob.length());
assertFalse("FAIL - NO ERROR ON getSubString POS TOO LARGE",
(pos > blobLength + 1));
// Get expected value usign Blob.getBinaryStream()
byte[] value = new byte[length];
String valueString;
InputStream inStream = blob.getBinaryStream();
inStream.skip(pos - 1);
int numBytes = inStream.read(value);
// check that the two values match
if (numBytes >= 0) {
byte[] readBytes = new byte[numBytes];
System.arraycopy(value, 0, readBytes, 0, numBytes);
valueString = new String(readBytes);
assertEquals("FAIL - wrong substring value",
valueString, subStr);
} else {
assertTrue("FAIL - wrong length", subStr.length() == 0);
}
} catch (SQLException e) {
if (pos <= 0) {
checkException(BLOB_BAD_POSITION, e);
} else {
if (pos > blobLength + 1) {
checkException(BLOB_POSITION_TOO_LARGE, e);
} else {
throw e;
}
}
} catch (NegativeArraySizeException nase) {
if (!((pos > blobLength) && usingDerbyNet())) {
throw nase;
}
}
}
| private void verifyInterval(Blob blob, long pos, int length,
int testNum, int blobLength) throws Exception {
try {
String subStr = new String(blob.getBytes(pos,length), "US-ASCII");
assertEquals("FAIL - getSubString returned wrong length ",
Math.min((blob.length() - pos) + 1, length),
subStr.length());
assertEquals("FAIL - clob has mismatched lengths",
blobLength, blob.length());
assertFalse("FAIL - NO ERROR ON getSubString POS TOO LARGE",
(pos > blobLength + 1));
// Get expected value usign Blob.getBinaryStream()
byte[] value = new byte[length];
String valueString;
InputStream inStream = blob.getBinaryStream();
inStream.skip(pos - 1);
int numBytes = inStream.read(value);
// check that the two values match
if (numBytes >= 0) {
byte[] readBytes = new byte[numBytes];
System.arraycopy(value, 0, readBytes, 0, numBytes);
valueString = new String(readBytes, "US-ASCII");
assertEquals("FAIL - wrong substring value",
valueString, subStr);
} else {
assertTrue("FAIL - wrong length", subStr.length() == 0);
}
} catch (SQLException e) {
if (pos <= 0) {
checkException(BLOB_BAD_POSITION, e);
} else {
if (pos > blobLength + 1) {
checkException(BLOB_POSITION_TOO_LARGE, e);
} else {
throw e;
}
}
} catch (NegativeArraySizeException nase) {
if (!((pos > blobLength) && usingDerbyNet())) {
throw nase;
}
}
}
|
public void testGetNewNames() throws IOException
{
Descriptor desc = Descriptor.fromFilename(new File("Keyspace1", "Standard1-500-Data.db").toString());
PendingFile inContext = new PendingFile(null, desc, "Data.db", Arrays.asList(new Pair<Long,Long>(0L, 1L)));
PendingFile outContext = StreamIn.getContextMapping(inContext);
// filename and generation are expected to have changed
assert !inContext.getFilename().equals(outContext.getFilename());
// nothing else should
assertEquals(inContext.component, outContext.component);
assertEquals(inContext.desc.ksname, outContext.desc.ksname);
assertEquals(inContext.desc.cfname, outContext.desc.cfname);
}
| public void testGetNewNames() throws IOException
{
Descriptor desc = Descriptor.fromFilename(new File("Keyspace1", "Standard1-500-Data.db").toString());
PendingFile inContext = new PendingFile(null, desc, "Data.db", Arrays.asList(new Pair<Long,Long>(0L, 1L)), OperationType.BOOTSTRAP);
PendingFile outContext = StreamIn.getContextMapping(inContext);
// filename and generation are expected to have changed
assert !inContext.getFilename().equals(outContext.getFilename());
// nothing else should
assertEquals(inContext.component, outContext.component);
assertEquals(inContext.desc.ksname, outContext.desc.ksname);
assertEquals(inContext.desc.cfname, outContext.desc.cfname);
}
|
public void doVerb(Message message)
{
if (logger.isDebugEnabled())
logger.debug("Received a StreamRequestMessage from {}", message.getFrom());
byte[] body = message.getMessageBody();
ByteArrayInputStream bufIn = new ByteArrayInputStream(body);
try
{
StreamRequestMessage srm = StreamRequestMessage.serializer().deserialize(new DataInputStream(bufIn));
if (logger.isDebugEnabled())
logger.debug(srm.toString());
StreamOutSession session = StreamOutSession.create(srm.table, message.getFrom(), srm.sessionId);
StreamOut.transferRangesForRequest(session, srm.ranges);
}
catch (IOException ex)
{
throw new IOError(ex);
}
}
| public void doVerb(Message message)
{
if (logger.isDebugEnabled())
logger.debug("Received a StreamRequestMessage from {}", message.getFrom());
byte[] body = message.getMessageBody();
ByteArrayInputStream bufIn = new ByteArrayInputStream(body);
try
{
StreamRequestMessage srm = StreamRequestMessage.serializer().deserialize(new DataInputStream(bufIn));
if (logger.isDebugEnabled())
logger.debug(srm.toString());
StreamOutSession session = StreamOutSession.create(srm.table, message.getFrom(), srm.sessionId);
StreamOut.transferRangesForRequest(session, srm.ranges, srm.type);
}
catch (IOException ex)
{
throw new IOError(ex);
}
}
|
public void finished(PendingFile remoteFile, PendingFile localFile) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
Future future = CompactionManager.instance.submitSSTableBuild(localFile.desc);
buildFutures.add(future);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance.sendOneWay(reply.createMessage(), getHost());
}
| public void finished(PendingFile remoteFile, PendingFile localFile) throws IOException
{
if (logger.isDebugEnabled())
logger.debug("Finished {}. Sending ack to {}", remoteFile, this);
Future future = CompactionManager.instance.submitSSTableBuild(localFile.desc, remoteFile.type);
buildFutures.add(future);
files.remove(remoteFile);
if (remoteFile.equals(current))
current = null;
StreamReply reply = new StreamReply(remoteFile.getFilename(), getSessionId(), StreamReply.Status.FILE_FINISHED);
// send a StreamStatus message telling the source node it can delete this file
MessagingService.instance.sendOneWay(reply.createMessage(), getHost());
}
|
private int getAbsoluteColumnPosition(Optimizable optTable)
{
ColumnReference cr = (ColumnReference) operand;
int columnPosition;
ConglomerateDescriptor bestCD;
/* Column positions are one-based, store is zero-based */
columnPosition = cr.getSource().getColumnPosition();
bestCD =
optTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/*
** If it's an index, find the base column position in the index
** and translate it to an index column position.
*/
if (bestCD.isIndex())
{
columnPosition = bestCD.getIndexDescriptor().
getKeyColumnPosition(new Integer(columnPosition)).intValue();
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(columnPosition > 0,
"Base column not found in index");
}
}
// return the 0-based column position
return columnPosition - 1;
}
| private int getAbsoluteColumnPosition(Optimizable optTable)
{
ColumnReference cr = (ColumnReference) operand;
int columnPosition;
ConglomerateDescriptor bestCD;
/* Column positions are one-based, store is zero-based */
columnPosition = cr.getSource().getColumnPosition();
bestCD =
optTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();
/*
** If it's an index, find the base column position in the index
** and translate it to an index column position.
*/
if (bestCD.isIndex())
{
columnPosition = bestCD.getIndexDescriptor().
getKeyColumnPosition(columnPosition);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(columnPosition > 0,
"Base column not found in index");
}
}
// return the 0-based column position
return columnPosition - 1;
}
|
public void testBoostsSimple() throws Exception {
Map<CharSequence,Float> boosts = new HashMap<CharSequence,Float>();
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
// Check for simple
Query q = mfqp.parse("one", null);
assertEquals("b:one^5.0 t:one^10.0", q.toString());
// Check for AND
q = mfqp.parse("one AND two", null);
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q
.toString());
// Check for OR
q = mfqp.parse("one OR two", null);
assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
// Check for AND and a field
q = mfqp.parse("one AND two AND foo:test", null);
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q
.toString());
q = mfqp.parse("one^3 AND two^4", null);
assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)",
q.toString());
}
| public void testBoostsSimple() throws Exception {
Map<String,Float> boosts = new HashMap<String,Float>();
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
// Check for simple
Query q = mfqp.parse("one", null);
assertEquals("b:one^5.0 t:one^10.0", q.toString());
// Check for AND
q = mfqp.parse("one AND two", null);
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q
.toString());
// Check for OR
q = mfqp.parse("one OR two", null);
assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
// Check for AND and a field
q = mfqp.parse("one AND two AND foo:test", null);
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q
.toString());
q = mfqp.parse("one^3 AND two^4", null);
assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)",
q.toString());
}
|
public void testBoostsSimple() throws Exception {
Map<CharSequence,Float> boosts = new HashMap<CharSequence,Float>();
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
// Check for simple
Query q = mfqp.parse("one");
assertEquals("b:one^5.0 t:one^10.0", q.toString());
// Check for AND
q = mfqp.parse("one AND two");
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q
.toString());
// Check for OR
q = mfqp.parse("one OR two");
assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
// Check for AND and a field
q = mfqp.parse("one AND two AND foo:test");
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q
.toString());
q = mfqp.parse("one^3 AND two^4");
assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)",
q.toString());
}
| public void testBoostsSimple() throws Exception {
Map<String,Float> boosts = new HashMap<String,Float>();
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
// Check for simple
Query q = mfqp.parse("one");
assertEquals("b:one^5.0 t:one^10.0", q.toString());
// Check for AND
q = mfqp.parse("one AND two");
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q
.toString());
// Check for OR
q = mfqp.parse("one OR two");
assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
// Check for AND and a field
q = mfqp.parse("one AND two AND foo:test");
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q
.toString());
q = mfqp.parse("one^3 AND two^4");
assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)",
q.toString());
}
|
private void setupCassandra() throws TException, InvalidRequestException
{
/* Establish a thrift connection to the cassandra instance */
TSocket socket = new TSocket(DatabaseDescriptor.getListenAddress().getHostName(), DatabaseDescriptor.getRpcPort());
TTransport transport = new TFramedTransport(socket);
TBinaryProtocol binaryProtocol = new TBinaryProtocol(transport, false, false);
Cassandra.Client cassandraClient = new Cassandra.Client(binaryProtocol);
transport.open();
thriftClient = cassandraClient;
Set<String> keyspaces = thriftClient.describe_keyspaces();
if (!keyspaces.contains(KEYSPACE))
{
List<CfDef> cfDefs = new ArrayList<CfDef>();
thriftClient.system_add_keyspace(new KsDef(KEYSPACE, "org.apache.cassandra.locator.RackUnawareStrategy", 1, cfDefs));
}
thriftClient.set_keyspace(KEYSPACE);
CfDef cfDef = new CfDef(KEYSPACE, COLUMN_FAMILY);
try
{
thriftClient.system_add_column_family(cfDef);
}
catch (InvalidRequestException e)
{
throw new RuntimeException(e);
}
}
| private void setupCassandra() throws TException, InvalidRequestException
{
/* Establish a thrift connection to the cassandra instance */
TSocket socket = new TSocket(DatabaseDescriptor.getListenAddress().getHostName(), DatabaseDescriptor.getRpcPort());
TTransport transport = new TFramedTransport(socket);
TBinaryProtocol binaryProtocol = new TBinaryProtocol(transport);
Cassandra.Client cassandraClient = new Cassandra.Client(binaryProtocol);
transport.open();
thriftClient = cassandraClient;
Set<String> keyspaces = thriftClient.describe_keyspaces();
if (!keyspaces.contains(KEYSPACE))
{
List<CfDef> cfDefs = new ArrayList<CfDef>();
thriftClient.system_add_keyspace(new KsDef(KEYSPACE, "org.apache.cassandra.locator.RackUnawareStrategy", 1, cfDefs));
}
thriftClient.set_keyspace(KEYSPACE);
CfDef cfDef = new CfDef(KEYSPACE, COLUMN_FAMILY);
try
{
thriftClient.system_add_column_family(cfDef);
}
catch (InvalidRequestException e)
{
throw new RuntimeException(e);
}
}
|
public void testConsumerBundle() throws Exception {
String testClassFileName = TestClass.class.getName().replace('.', '/') + ".class";
URL testClassURL = getClass().getResource("/" + testClassFileName);
String test2ClassFileName = Test2Class.class.getName().replace('.', '/') + ".class";
URL test2ClassURL = getClass().getResource("/" + test2ClassFileName);
String test3ClassFileName = Test3Class.class.getName().replace('.', '/') + ".class";
URL test3ClassURL = getClass().getResource("/" + test3ClassFileName);
File jarFile = new File(System.getProperty("java.io.tmpdir") + "/testjar_" + System.currentTimeMillis() + ".jar");
File expectedFile = null;
try {
// Create the jarfile to be used for testing
Manifest mf = new Manifest();
Attributes mainAttributes = mf.getMainAttributes();
mainAttributes.putValue("Manifest-Version", "1.0");
mainAttributes.putValue("Bundle-ManifestVersion", "2.0");
mainAttributes.putValue("Bundle-SymbolicName", "testbundle");
mainAttributes.putValue("Foo", "Bar Bar");
mainAttributes.putValue("Import-Package", "org.foo.bar");
mainAttributes.putValue(SpiFlyConstants.REQUIRE_CAPABILITY,
"osgi.serviceloader; filter:=\"(osgi.serviceloader=org.apache.aries.spifly.mysvc.SPIProvider)\";cardinality:=multiple," +
"osgi.extender; filter:=\"(osgi.extender=osgi.serviceloader.processor)\"");
JarOutputStream jos = new JarOutputStream(new FileOutputStream(jarFile), mf);
jos.putNextEntry(new ZipEntry(testClassFileName));
Streams.pump(testClassURL.openStream(), jos);
jos.putNextEntry(new ZipEntry(test2ClassFileName));
Streams.pump(test2ClassURL.openStream(), jos);
jos.putNextEntry(new ZipEntry(test3ClassFileName));
Streams.pump(test3ClassURL.openStream(), jos);
jos.close();
Main.main(jarFile.getCanonicalPath());
expectedFile = new File(jarFile.getParent(), jarFile.getName().replaceAll("[.]jar", "_spifly.jar"));
Assert.assertTrue("A processed separate bundle should have been created", expectedFile.exists());
// Check manifest in generated bundle.
JarFile transformedJarFile = new JarFile(expectedFile);
Manifest expectedMF = transformedJarFile.getManifest();
Assert.assertEquals("1.0", expectedMF.getMainAttributes().getValue("Manifest-Version"));
Assert.assertEquals("2.0", expectedMF.getMainAttributes().getValue("Bundle-ManifestVersion"));
Assert.assertEquals("testbundle", expectedMF.getMainAttributes().getValue("Bundle-SymbolicName"));
Assert.assertEquals("Bar Bar", expectedMF.getMainAttributes().getValue("Foo"));
Assert.assertEquals("osgi.serviceloader; filter:=\"(osgi.serviceloader=org.apache.aries.spifly.mysvc.SPIProvider)\";cardinality:=multiple,",
expectedMF.getMainAttributes().getValue(SpiFlyConstants.REQUIRE_CAPABILITY));
String importPackage = expectedMF.getMainAttributes().getValue("Import-Package");
Assert.assertTrue(
"org.foo.bar,org.apache.aries.spifly;version=\"[1.0.0,1.1.0)\"".equals(importPackage) ||
"org.apache.aries.spifly;version=\"[1.0.0,1.1.0)\",org.foo.bar".equals(importPackage));
JarFile initialJarFile = new JarFile(jarFile);
byte[] orgBytes = Streams.suck(initialJarFile.getInputStream(new ZipEntry(testClassFileName)));
byte[] nonTransBytes = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(testClassFileName)));
Assert.assertArrayEquals(orgBytes, nonTransBytes);
byte[] orgBytes2 = Streams.suck(initialJarFile.getInputStream(new ZipEntry(test2ClassFileName)));
byte[] nonTransBytes2 = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(test2ClassFileName)));
Assert.assertArrayEquals(orgBytes2, nonTransBytes2);
byte[] orgBytes3 = Streams.suck(initialJarFile.getInputStream(new ZipEntry(test3ClassFileName)));
byte[] transBytes3 = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(test3ClassFileName)));
Assert.assertFalse("The transformed class should be different", Arrays.equals(orgBytes3, transBytes3));
initialJarFile.close();
transformedJarFile.close();
} finally {
jarFile.delete();
if (expectedFile != null)
expectedFile.delete();
}
}
| public void testConsumerBundle() throws Exception {
String testClassFileName = TestClass.class.getName().replace('.', '/') + ".class";
URL testClassURL = getClass().getResource("/" + testClassFileName);
String test2ClassFileName = Test2Class.class.getName().replace('.', '/') + ".class";
URL test2ClassURL = getClass().getResource("/" + test2ClassFileName);
String test3ClassFileName = Test3Class.class.getName().replace('.', '/') + ".class";
URL test3ClassURL = getClass().getResource("/" + test3ClassFileName);
File jarFile = new File(System.getProperty("java.io.tmpdir") + "/testjar_" + System.currentTimeMillis() + ".jar");
File expectedFile = null;
try {
// Create the jarfile to be used for testing
Manifest mf = new Manifest();
Attributes mainAttributes = mf.getMainAttributes();
mainAttributes.putValue("Manifest-Version", "1.0");
mainAttributes.putValue("Bundle-ManifestVersion", "2.0");
mainAttributes.putValue("Bundle-SymbolicName", "testbundle");
mainAttributes.putValue("Foo", "Bar Bar");
mainAttributes.putValue("Import-Package", "org.foo.bar");
mainAttributes.putValue(SpiFlyConstants.REQUIRE_CAPABILITY,
"osgi.serviceloader; filter:=\"(osgi.serviceloader=org.apache.aries.spifly.mysvc.SPIProvider)\";cardinality:=multiple," +
"osgi.extender; filter:=\"(osgi.extender=osgi.serviceloader.processor)\"");
JarOutputStream jos = new JarOutputStream(new FileOutputStream(jarFile), mf);
jos.putNextEntry(new ZipEntry(testClassFileName));
Streams.pump(testClassURL.openStream(), jos);
jos.putNextEntry(new ZipEntry(test2ClassFileName));
Streams.pump(test2ClassURL.openStream(), jos);
jos.putNextEntry(new ZipEntry(test3ClassFileName));
Streams.pump(test3ClassURL.openStream(), jos);
jos.close();
Main.main(jarFile.getCanonicalPath());
expectedFile = new File(jarFile.getParent(), jarFile.getName().replaceAll("[.]jar", "_spifly.jar"));
Assert.assertTrue("A processed separate bundle should have been created", expectedFile.exists());
// Check manifest in generated bundle.
JarFile transformedJarFile = new JarFile(expectedFile);
Manifest expectedMF = transformedJarFile.getManifest();
Assert.assertEquals("1.0", expectedMF.getMainAttributes().getValue("Manifest-Version"));
Assert.assertEquals("2.0", expectedMF.getMainAttributes().getValue("Bundle-ManifestVersion"));
Assert.assertEquals("testbundle", expectedMF.getMainAttributes().getValue("Bundle-SymbolicName"));
Assert.assertEquals("Bar Bar", expectedMF.getMainAttributes().getValue("Foo"));
Assert.assertEquals("osgi.serviceloader; filter:=\"(osgi.serviceloader=org.apache.aries.spifly.mysvc.SPIProvider)\";cardinality:=multiple",
expectedMF.getMainAttributes().getValue(SpiFlyConstants.REQUIRE_CAPABILITY));
String importPackage = expectedMF.getMainAttributes().getValue("Import-Package");
Assert.assertTrue(
"org.foo.bar,org.apache.aries.spifly;version=\"[1.0.0,1.1.0)\"".equals(importPackage) ||
"org.apache.aries.spifly;version=\"[1.0.0,1.1.0)\",org.foo.bar".equals(importPackage));
JarFile initialJarFile = new JarFile(jarFile);
byte[] orgBytes = Streams.suck(initialJarFile.getInputStream(new ZipEntry(testClassFileName)));
byte[] nonTransBytes = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(testClassFileName)));
Assert.assertArrayEquals(orgBytes, nonTransBytes);
byte[] orgBytes2 = Streams.suck(initialJarFile.getInputStream(new ZipEntry(test2ClassFileName)));
byte[] nonTransBytes2 = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(test2ClassFileName)));
Assert.assertArrayEquals(orgBytes2, nonTransBytes2);
byte[] orgBytes3 = Streams.suck(initialJarFile.getInputStream(new ZipEntry(test3ClassFileName)));
byte[] transBytes3 = Streams.suck(transformedJarFile.getInputStream(new ZipEntry(test3ClassFileName)));
Assert.assertFalse("The transformed class should be different", Arrays.equals(orgBytes3, transBytes3));
initialJarFile.close();
transformedJarFile.close();
} finally {
jarFile.delete();
if (expectedFile != null)
expectedFile.delete();
}
}
|