本文整理汇总了Java中org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer类的典型用法代码示例。如果您正苦于以下问题:Java BytePrimitiveArraySerializer类的具体用法?Java BytePrimitiveArraySerializer怎么用?Java BytePrimitiveArraySerializer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BytePrimitiveArraySerializer类属于org.apache.flink.api.common.typeutils.base.array包,在下文中一共展示了BytePrimitiveArraySerializer类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: initializeState
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
@Override
public void initializeState(StateInitializationContext context) throws Exception {
super.initializeState(context);
if (siddhiRuntimeState == null) {
siddhiRuntimeState = context.getOperatorStateStore().getUnionListState(new ListStateDescriptor<>(SIDDHI_RUNTIME_STATE_NAME,
new BytePrimitiveArraySerializer()));
}
if (queuedRecordsState == null) {
queuedRecordsState = context.getOperatorStateStore().getListState(
new ListStateDescriptor<>(QUEUED_RECORDS_STATE_NAME, new BytePrimitiveArraySerializer()));
}
if (context.isRestored()) {
restoreState();
}
}
开发者ID:haoch,
项目名称:flink-siddhi,
代码行数:16,
代码来源:AbstractSiddhiOperator.java
示例2: initializeState
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
@Override
public void initializeState(StateInitializationContext context) throws Exception {
super.initializeState(context);
if (siddhiRuntimeState == null) {
siddhiRuntimeState = context.getOperatorStateStore().getUnionListState(new ListStateDescriptor<>(SIDDHI_RUNTIME_STATE_NAME,
new BytePrimitiveArraySerializer()));
}
if (queuedRecordsState == null) {
queuedRecordsState = context.getOperatorStateStore().getListState(
new ListStateDescriptor<>(QUEUED_RECORDS_STATE_NAME, new BytePrimitiveArraySerializer()));
}
if (context.isRestored()) {
restoreState();
}
}
开发者ID:apache,
项目名称:bahir-flink,
代码行数:16,
代码来源:AbstractSiddhiOperator.java
示例3: HashTableTest
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
public HashTableTest() {
TypeSerializer<?>[] fieldSerializers = { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE };
@SuppressWarnings("unchecked")
Class<Tuple2<Long, byte[]>> clazz = (Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class;
this.buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>(clazz, fieldSerializers);
this.probeSerializer = LongSerializer.INSTANCE;
TypeComparator<?>[] comparators = { new LongComparator(true) };
TypeSerializer<?>[] comparatorSerializers = { LongSerializer.INSTANCE };
this.buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] {0}, comparators, comparatorSerializers);
this.probeComparator = new LongComparator(true);
this.pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() {
private long ref;
@Override
public void setReference(Long reference) {
ref = reference;
}
@Override
public boolean equalToReference(Tuple2<Long, byte[]> candidate) {
//noinspection UnnecessaryUnboxing
return candidate.f0.longValue() == ref;
}
@Override
public int compareToReference(Tuple2<Long, byte[]> candidate) {
long x = ref;
long y = candidate.f0;
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
};
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:39,
代码来源:HashTableTest.java
示例4: writeKeyValuePair
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
private void writeKeyValuePair(byte[] key, byte[] value, DataOutputView out) throws IOException {
BytePrimitiveArraySerializer.INSTANCE.serialize(key, out);
BytePrimitiveArraySerializer.INSTANCE.serialize(value, out);
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:5,
代码来源:RocksDBKeyedStateBackend.java
示例5: restoreKVStateData
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
/**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state handle.
*
* @throws IOException
* @throws RocksDBException
*/
private void restoreKVStateData() throws IOException, RocksDBException {
//for all key-groups in the current state handle...
for (Tuple2<Integer, Long> keyGroupOffset : currentKeyGroupsStateHandle.getGroupRangeOffsets()) {
int keyGroup = keyGroupOffset.f0;
// Check that restored key groups all belong to the backend
Preconditions.checkState(rocksDBKeyedStateBackend.getKeyGroupRange().contains(keyGroup),
"The key group must belong to the backend");
long offset = keyGroupOffset.f1;
//not empty key-group?
if (0L != offset) {
currentStateHandleInStream.seek(offset);
try (InputStream compressedKgIn = keygroupStreamCompressionDecorator.decorateWithCompression(currentStateHandleInStream)) {
DataInputViewStreamWrapper compressedKgInputView = new DataInputViewStreamWrapper(compressedKgIn);
//TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible
int kvStateId = compressedKgInputView.readShort();
ColumnFamilyHandle handle = currentStateHandleKVStateColumnFamilies.get(kvStateId);
//insert all k/v pairs into DB
boolean keyGroupHasMoreKeys = true;
while (keyGroupHasMoreKeys) {
byte[] key = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView);
byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView);
if (RocksDBFullSnapshotOperation.hasMetaDataFollowsFlag(key)) {
//clear the signal bit in the key to make it ready for insertion again
RocksDBFullSnapshotOperation.clearMetaDataFollowsFlag(key);
rocksDBKeyedStateBackend.db.put(handle, key, value);
//TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible
kvStateId = RocksDBFullSnapshotOperation.END_OF_KEY_GROUP_MARK
& compressedKgInputView.readShort();
if (RocksDBFullSnapshotOperation.END_OF_KEY_GROUP_MARK == kvStateId) {
keyGroupHasMoreKeys = false;
} else {
handle = currentStateHandleKVStateColumnFamilies.get(kvStateId);
}
} else {
rocksDBKeyedStateBackend.db.put(handle, key, value);
}
}
}
}
}
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:50,
代码来源:RocksDBKeyedStateBackend.java
示例6: createSerializer
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
@Override
protected TypeSerializer<byte[]> createSerializer() {
return new BytePrimitiveArraySerializer();
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:5,
代码来源:BytePrimitiveArraySerializerTest.java
示例7: testChunkedResponse
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
/**
* Tests that large responses are chunked.
*/
@Test
public void testChunkedResponse() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateServerHandler handler = new KvStateServerHandler(registry, TEST_THREAD_POOL, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(
dummyEnv,
new JobID(),
"test_op",
IntSerializer.INSTANCE,
numKeyGroups,
new KeyGroupRange(0, 0),
registry.createTaskRegistry(dummyEnv.getJobID(), dummyEnv.getJobVertexId()));
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(registryListener);
// Register state
ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<byte[]> state = backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
desc);
// Update KvState
byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()];
byte current = 0;
for (int i = 0; i < bytes.length; i++) {
bytes[i] = current++;
}
int key = 99812822;
backend.setCurrentKey(key);
state.update(bytes);
// Request
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(
channel.alloc(),
requestId,
registryListener.kvStateId,
serializedKeyAndNamespace);
// Write the request and wait for the response
channel.writeInbound(request);
Object msg = readInboundBlocking(channel);
assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf);
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:72,
代码来源:KvStateServerHandlerTest.java
示例8: testSpillingWhenBuildingTableWithoutOverflow
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
/**
* Tests that the MutableHashTable spills its partitions when creating the initial table
* without overflow segments in the partitions. This means that the records are large.
*/
@Test
public void testSpillingWhenBuildingTableWithoutOverflow() throws Exception {
final IOManager ioMan = new IOManagerAsync();
try {
final TypeSerializer<byte[]> serializer = BytePrimitiveArraySerializer.INSTANCE;
final TypeComparator<byte[]> buildComparator = new BytePrimitiveArrayComparator(true);
final TypeComparator<byte[]> probeComparator = new BytePrimitiveArrayComparator(true);
@SuppressWarnings("unchecked") final TypePairComparator<byte[], byte[]> pairComparator =
new GenericPairComparator<>(
new BytePrimitiveArrayComparator(true), new BytePrimitiveArrayComparator(true));
final int pageSize = 128;
final int numSegments = 33;
List<MemorySegment> memory = getMemory(numSegments, pageSize);
MutableHashTable<byte[], byte[]> table = new MutableHashTable<byte[], byte[]>(
serializer,
serializer,
buildComparator,
probeComparator,
pairComparator,
memory,
ioMan,
1,
false);
int numElements = 9;
table.open(
new CombiningIterator<byte[]>(
new ByteArrayIterator(numElements, 128, (byte) 0),
new ByteArrayIterator(numElements, 128, (byte) 1)),
new CombiningIterator<byte[]>(
new ByteArrayIterator(1, 128, (byte) 0),
new ByteArrayIterator(1, 128, (byte) 1)));
while (table.nextRecord()) {
MutableObjectIterator<byte[]> iterator = table.getBuildSideIterator();
int counter = 0;
while (iterator.next() != null) {
counter++;
}
// check that we retrieve all our elements
Assert.assertEquals(numElements, counter);
}
table.close();
} finally {
ioMan.shutdown();
}
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:62,
代码来源:HashTableTest.java
示例9: testChunkedResponse
点赞 2
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer; //导入依赖的package包/类
/**
* Tests that large responses are chunked.
*/
@Test
public void testChunkedResponse() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(
dummyEnv,
new JobID(),
"test_op",
IntSerializer.INSTANCE,
numKeyGroups,
new KeyGroupRange(0, 0),
registry.createTaskRegistry(dummyEnv.getJobID(), dummyEnv.getJobVertexId()));
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(registryListener);
// Register state
ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<byte[]> state = backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
desc);
// Update KvState
byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()];
byte current = 0;
for (int i = 0; i < bytes.length; i++) {
bytes[i] = current++;
}
int key = 99812822;
backend.setCurrentKey(key);
state.update(bytes);
// Request
byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace);
ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
Object msg = readInboundBlocking(channel);
assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf);
}
开发者ID:axbaretto,
项目名称:flink,
代码行数:72,
代码来源:KvStateServerHandlerTest.java