[SPARK-34889][SS] Introduce MergingSessionsIterator merging elements directly which belong to the same session

Introduction: this PR is a part of SPARK-10816 (`EventTime based sessionization (session window)`). Please refer #31937 to see the overall view of the code change. (Note that code diff could be diverged a bit.)

### What changes were proposed in this pull request?

This PR introduces MergingSessionsIterator, which enables to merge elements belong to the same session directly.

MergingSessionsIterator is a variant of SortAggregateIterator which merges the session windows based on the fact input rows are sorted by "group keys + the start time of session window". When merging windows, MergingSessionsIterator also applies aggregations on merged window, which eliminates the necessity on buffering inputs (which requires copying rows) and update the session spec for each input.

MergingSessionsIterator is quite performant compared to UpdatingSessionsIterator brought by SPARK-34888. Note that MergingSessionsIterator can only apply to the cases aggregation can be applied altogether, so there're still rooms for UpdatingSessionIterator to be used.

This issue also introduces MergingSessionsExec which is the physical node on leveraging MergingSessionsIterator to sort the input rows and aggregate rows according to the session windows.

### Why are the changes needed?

This part is a one of required on implementing SPARK-10816.

### Does this PR introduce _any_ user-facing change?

No.

### How was this patch tested?

New test suite added.

Closes #31987 from HeartSaVioR/SPARK-34889-SPARK-10816-PR-31570-part-2.

Lead-authored-by: Jungtaek Lim (HeartSaVioR) <kabhwan.opensource@gmail.com>
Co-authored-by: Jungtaek Lim <kabhwan.opensource@gmail.com>
Signed-off-by: Liang-Chi Hsieh <viirya@gmail.com>
This commit is contained in:
Jungtaek Lim (HeartSaVioR) 2021-06-23 13:04:37 -07:00 committed by Liang-Chi Hsieh
parent 077cf2acdb
commit 476197791b
3 changed files with 692 additions and 0 deletions

View file

@ -0,0 +1,115 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, Expression, MutableProjection, NamedExpression, SortOrder, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.metric.SQLMetrics
/**
* This node is a variant of SortAggregateExec which merges the session windows based on the fact
* child node will provide inputs as sorted by group keys + the start time of session window.
*
* When merging windows, it also applies aggregations on merged window, which eliminates the
* necessity on buffering inputs (which requires copying rows) and update the session spec
* for each input.
*
* This class receives requiredChildDistribution from caller, to enable merging session in
* local partition before shuffling. Specifying both parameters to None won't trigger shuffle,
* but sort would still happen per local partition.
*
* Refer [[MergingSessionsIterator]] for more details.
*/
case class MergingSessionsExec(
requiredChildDistributionExpressions: Option[Seq[Expression]],
requiredChildDistributionOption: Option[Seq[Distribution]],
groupingExpressions: Seq[NamedExpression],
sessionExpression: NamedExpression,
aggregateExpressions: Seq[AggregateExpression],
aggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
child: SparkPlan) extends BaseAggregateExec {
private val keyWithoutSessionExpressions = groupingExpressions.diff(Seq(sessionExpression))
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def output: Seq[Attribute] = child.output
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def requiredChildDistribution: List[Distribution] = {
requiredChildDistributionExpressions match {
case Some(exprs) if exprs.isEmpty => AllTuples :: Nil
case Some(exprs) => ClusteredDistribution(exprs) :: Nil
case None => requiredChildDistributionOption match {
case Some(distributions) => distributions.toList
case None => UnspecifiedDistribution :: Nil
}
}
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] = {
Seq((keyWithoutSessionExpressions ++ Seq(sessionExpression)).map(SortOrder(_, Ascending)))
}
override protected def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
child.execute().mapPartitionsWithIndexInternal { (partIndex, iter) =>
// Because the constructor of an aggregation iterator will read at least the first row,
// we need to get the value of iter.hasNext first.
val hasInput = iter.hasNext
if (!hasInput && groupingExpressions.nonEmpty) {
// This is a grouped aggregate and the input iterator is empty,
// so return an empty iterator.
Iterator[UnsafeRow]()
} else {
val outputIter = new MergingSessionsIterator(
partIndex,
groupingExpressions,
sessionExpression,
child.output,
iter,
aggregateExpressions,
aggregateAttributes,
initialInputBufferOffset,
resultExpressions,
(expressions, inputSchema) =>
MutableProjection.create(expressions, inputSchema),
numOutputRows)
if (!hasInput && groupingExpressions.isEmpty) {
// There is no input and there is no grouping expressions.
// We need to output a single row as the output.
numOutputRows += 1
Iterator[UnsafeRow](outputIter.outputForEmptyGroupingKeyWithoutInput())
} else {
outputIter
}
}
}
}
override protected def withNewChildInternal(newChild: SparkPlan): MergingSessionsExec =
copy(child = newChild)
}

View file

@ -0,0 +1,251 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, GenericInternalRow, JoinedRow, MutableProjection, NamedExpression, SpecificInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.metric.SQLMetric
/**
* This is a variant of SortAggregateIterator which merges the session windows based on the fact
* input rows are sorted by "group keys + the start time of session window".
*
* When merging windows, it also applies aggregations on merged window, which eliminates the
* necessity on buffering inputs (which requires copying rows) and update the session spec
* for each input.
*/
class MergingSessionsIterator(
partIndex: Int,
groupingExpressions: Seq[NamedExpression],
sessionExpression: NamedExpression,
valueAttributes: Seq[Attribute],
inputIterator: Iterator[InternalRow],
aggregateExpressions: Seq[AggregateExpression],
aggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
newMutableProjection: (Seq[Expression], Seq[Attribute]) => MutableProjection,
numOutputRows: SQLMetric)
extends AggregationIterator(
partIndex,
groupingExpressions,
valueAttributes,
aggregateExpressions,
aggregateAttributes,
initialInputBufferOffset,
resultExpressions,
newMutableProjection) {
val groupingWithoutSession: Seq[NamedExpression] =
groupingExpressions.diff(Seq(sessionExpression))
val groupingWithoutSessionAttributes: Seq[Attribute] = groupingWithoutSession.map(_.toAttribute)
/**
* Creates a new aggregation buffer and initializes buffer values
* for all aggregate functions.
*/
private def newBuffer: InternalRow = {
val bufferSchema = aggregateFunctions.flatMap(_.aggBufferAttributes)
val bufferRowSize: Int = bufferSchema.length
val genericMutableBuffer = new GenericInternalRow(bufferRowSize)
val useUnsafeBuffer = bufferSchema.map(_.dataType).forall(UnsafeRow.isMutable)
val buffer = if (useUnsafeBuffer) {
val unsafeProjection =
UnsafeProjection.create(bufferSchema.map(_.dataType))
unsafeProjection.apply(genericMutableBuffer)
} else {
genericMutableBuffer
}
initializeBuffer(buffer)
buffer
}
///////////////////////////////////////////////////////////////////////////
// Mutable states for sort based aggregation.
///////////////////////////////////////////////////////////////////////////
// The partition key of the current partition.
private[this] var currentGroupingKey: UnsafeRow = _
private[this] var currentSession: UnsafeRow = _
// The partition key of next partition.
private[this] var nextGroupingKey: UnsafeRow = _
private[this] var nextGroupingSession: UnsafeRow = _
// The first row of next partition.
private[this] var firstRowInNextGroup: InternalRow = _
// Indicates if we has new group of rows from the sorted input iterator
private[this] var sortedInputHasNewGroup: Boolean = false
// The aggregation buffer used by the sort-based aggregation.
private[this] val sortBasedAggregationBuffer: InternalRow = newBuffer
private[this] val groupingWithoutSessionProjection: UnsafeProjection =
UnsafeProjection.create(groupingWithoutSession, valueAttributes)
private[this] val sessionProjection: UnsafeProjection =
UnsafeProjection.create(Seq(sessionExpression), valueAttributes)
// The flag indicating there's error on iterator, sort precondition is not fulfilled.
private var errorOnIterator: Boolean = false
protected def initialize(): Unit = {
if (inputIterator.hasNext) {
initializeBuffer(sortBasedAggregationBuffer)
val inputRow = inputIterator.next()
nextGroupingKey = groupingWithoutSessionProjection(inputRow).copy()
val session = sessionProjection(inputRow)
nextGroupingSession = session.getStruct(0, 2).copy()
firstRowInNextGroup = inputRow.copy()
sortedInputHasNewGroup = true
} else {
// This inputIter is empty.
sortedInputHasNewGroup = false
}
}
initialize()
/** Processes rows in the current group. It will stop when it find a new group. */
protected def processCurrentSortedGroup(): Unit = {
currentGroupingKey = nextGroupingKey
currentSession = nextGroupingSession
// Now, we will start to find all rows belonging to this group.
// We create a variable to track if we see the next group.
var findNextGroup = false
// firstRowInNextGroup is the first row of this group. We first process it.
processRow(sortBasedAggregationBuffer, firstRowInNextGroup)
// The search will stop when we see the next group or there is no
// input row left in the iter.
while (!findNextGroup && inputIterator.hasNext) {
// Get the grouping key.
val currentRow = inputIterator.next()
val groupingKey = groupingWithoutSessionProjection(currentRow)
val session = sessionProjection(currentRow)
val sessionStruct = session.getStruct(0, 2)
val sessionStart = getSessionStart(sessionStruct)
val sessionEnd = getSessionEnd(sessionStruct)
// Check if the current row belongs the current input row.
if (currentGroupingKey == groupingKey) {
if (sessionStart < getSessionStart(currentSession)) {
errorOnIterator = true
throw new IllegalStateException("Input iterator is not sorted based on session!")
} else if (sessionStart <= getSessionEnd(currentSession)) {
// expanding session length if needed
expandEndOfCurrentSession(sessionEnd)
processRow(sortBasedAggregationBuffer, currentRow)
} else {
// We find a new session window in the same group.
findNextGroup = true
startNewSession(currentRow, groupingKey, sessionStruct)
}
} else {
// We find a new group.
findNextGroup = true
startNewSession(currentRow, groupingKey, sessionStruct)
}
}
// We have not seen a new group. It means that there is no new row in the input
// iter. The current group is the last group of the iter.
if (!findNextGroup) {
sortedInputHasNewGroup = false
}
}
private def startNewSession(
currentRow: InternalRow,
groupingKey: UnsafeRow,
sessionStruct: UnsafeRow): Unit = {
nextGroupingKey = groupingKey.copy()
nextGroupingSession = sessionStruct.copy()
firstRowInNextGroup = currentRow.copy()
}
private def getSessionStart(sessionStruct: UnsafeRow): Long = {
sessionStruct.getLong(0)
}
private def getSessionEnd(sessionStruct: UnsafeRow): Long = {
sessionStruct.getLong(1)
}
private def expandEndOfCurrentSession(sessionEnd: Long): Unit = {
if (sessionEnd > getSessionEnd(currentSession)) {
currentSession.setLong(1, sessionEnd)
}
}
///////////////////////////////////////////////////////////////////////////
// Iterator's public methods
///////////////////////////////////////////////////////////////////////////
override final def hasNext: Boolean = {
if (errorOnIterator) {
throw new IllegalStateException("The iterator is already corrupted.")
}
sortedInputHasNewGroup
}
override final def next(): UnsafeRow = {
if (hasNext) {
// Process the current group.
processCurrentSortedGroup()
// Generate output row for the current group.
val groupingKey = generateGroupingKey()
val outputRow = generateOutput(groupingKey, sortBasedAggregationBuffer)
// Initialize buffer values for the next group.
initializeBuffer(sortBasedAggregationBuffer)
numOutputRows += 1
outputRow
} else {
// no more result
throw new NoSuchElementException
}
}
private val join = new JoinedRow
private val groupingKeyProj = GenerateUnsafeProjection.generate(groupingExpressions,
groupingWithoutSessionAttributes :+ sessionExpression.toAttribute)
private def generateGroupingKey(): UnsafeRow = {
val sessionStruct = new SpecificInternalRow(Seq(sessionExpression.toAttribute).toStructType)
sessionStruct.update(0, currentSession)
val joined = join(currentGroupingKey, sessionStruct)
groupingKeyProj(joined)
}
def outputForEmptyGroupingKeyWithoutInput(): UnsafeRow = {
initializeBuffer(sortBasedAggregationBuffer)
generateOutput(UnsafeRow.createFromByteArray(0, 0), sortBasedAggregationBuffer)
}
}

View file

@ -0,0 +1,326 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, Literal, MutableProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.aggregate.Count
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.aggregate.MergingSessionsIterator
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType}
import org.apache.spark.unsafe.types.UTF8String
class MergingSessionsIteratorSuite extends SharedSparkSession {
private val rowSchema = new StructType().add("key1", StringType).add("key2", IntegerType)
.add("session", new StructType().add("start", LongType).add("end", LongType))
.add("count", LongType)
private val rowAttributes = rowSchema.toAttributes
private val keysWithSessionAttributes = rowAttributes.filter { attr =>
List("key1", "key2", "session").contains(attr.name)
}
private val noKeyRowAttributes = rowAttributes.filterNot { attr =>
Seq("key1", "key2").contains(attr.name)
}
private val sessionAttribute = rowAttributes.filter(attr => attr.name == "session").head
test("no row") {
val iterator = createTestIterator(None.iterator)
assert(!iterator.hasNext)
}
test("only one row") {
val rows = List(createRow("a", 1, 100, 110))
val iterator = createTestIterator(rows.iterator)
assert(iterator.hasNext)
val expectedRow = createRow("a", 1, 100, 110, 1)
val retRow = iterator.next()
assertRowsEquals(retRow, expectedRow)
assert(!iterator.hasNext)
}
test("one session per key, one key") {
val row1 = createRow("a", 1, 100, 110)
val row2 = createRow("a", 1, 100, 110)
val row3 = createRow("a", 1, 105, 115)
val row4 = createRow("a", 1, 113, 123)
val rows = List(row1, row2, row3, row4)
val iterator = createTestIterator(rows.iterator)
assert(iterator.hasNext)
val retRow = iterator.next()
val expectedRow = createRow("a", 1, 100, 123, 4)
assertRowsEquals(expectedRow, retRow)
assert(!iterator.hasNext)
}
test("one session per key, multi keys") {
val rows = Seq(
// session 1
createRow("a", 1, 100, 110),
createRow("a", 1, 100, 110),
createRow("a", 1, 105, 115),
createRow("a", 1, 113, 123),
// session 2
createRow("a", 2, 110, 120),
createRow("a", 2, 115, 125),
createRow("a", 2, 117, 127),
createRow("a", 2, 125, 135)
)
val iterator = createTestIterator(rows.iterator)
assert(iterator.hasNext)
val expectedRow1 = createRow("a", 1, 100, 123, 4)
assertRowsEquals(expectedRow1, iterator.next())
assert(iterator.hasNext)
val expectedRow2 = createRow("a", 2, 110, 135, 4)
assertRowsEquals(expectedRow2, iterator.next())
assert(!iterator.hasNext)
}
test("multiple sessions per key, single key") {
val rows = Seq(
// session 1
createRow("a", 1, 100, 110),
createRow("a", 1, 105, 115),
// session 2
createRow("a", 1, 125, 135),
createRow("a", 1, 127, 137)
)
val iterator = createTestIterator(rows.iterator)
assert(iterator.hasNext)
val expectedRow1 = createRow("a", 1, 100, 115, 2)
assertRowsEquals(expectedRow1, iterator.next())
assert(iterator.hasNext)
val expectedRow2 = createRow("a", 1, 125, 137, 2)
assertRowsEquals(expectedRow2, iterator.next())
assert(!iterator.hasNext)
}
test("multiple sessions per key, multi keys") {
val rows = Seq(
// session 1
createRow("a", 1, 100, 110),
createRow("a", 1, 100, 110),
// session 2
createRow("a", 1, 115, 125),
createRow("a", 1, 119, 129),
// session 3
createRow("a", 2, 110, 120),
createRow("a", 2, 115, 125),
// session 4
createRow("a", 2, 127, 137),
createRow("a", 2, 135, 145)
)
val iterator = createTestIterator(rows.iterator)
assert(iterator.hasNext)
val expectedRow1 = createRow("a", 1, 100, 110, 2)
assertRowsEquals(expectedRow1, iterator.next())
assert(iterator.hasNext)
val expectedRow2 = createRow("a", 1, 115, 129, 2)
assertRowsEquals(expectedRow2, iterator.next())
assert(iterator.hasNext)
val expectedRow3 = createRow("a", 2, 110, 125, 2)
assertRowsEquals(expectedRow3, iterator.next())
assert(iterator.hasNext)
val expectedRow4 = createRow("a", 2, 127, 145, 2)
assertRowsEquals(expectedRow4, iterator.next())
assert(!iterator.hasNext)
}
test("throws exception if data is not sorted by session start") {
val rows = Seq(
createRow("a", 1, 100, 110),
createRow("a", 1, 100, 110),
createRow("a", 1, 95, 105),
createRow("a", 1, 113, 123)
)
val iterator = createTestIterator(rows.iterator)
// MergingSessionsIterator can't detect error on hasNext
assert(iterator.hasNext)
// when calling next() it can detect error and throws IllegalStateException
intercept[IllegalStateException] {
iterator.next()
}
// afterwards, calling either hasNext() or next() will throw IllegalStateException
intercept[IllegalStateException] {
iterator.hasNext
}
intercept[IllegalStateException] {
iterator.next()
}
}
test("no key") {
val rows = Seq(
createNoKeyRow(100, 110),
createNoKeyRow(100, 110),
createNoKeyRow(105, 115),
createNoKeyRow(113, 123)
)
val iterator = createNoKeyTestIterator(rows.iterator)
assert(iterator.hasNext)
val expectedRow = createNoKeyRow(100, 123, 4)
assertNoKeyRowsEquals(expectedRow, iterator.next())
assert(!iterator.hasNext)
}
private def createTestIterator(iterator: Iterator[InternalRow]): MergingSessionsIterator = {
createTestIterator(iterator, isNoKey = false)
}
private def createNoKeyTestIterator(iterator: Iterator[InternalRow]): MergingSessionsIterator = {
createTestIterator(iterator, isNoKey = true)
}
private def createTestIterator(
iterator: Iterator[InternalRow],
isNoKey: Boolean): MergingSessionsIterator = {
val countFunc = Count(Literal.create(1L, LongType))
val countAggExpr = countFunc.toAggregateExpression()
val countRetAttr = countAggExpr.resultAttribute
val aggregateExpressions = Seq(countAggExpr)
val aggregateAttributes = Seq(countRetAttr)
val initialInputBufferOffset = 1
val groupingExpressions = if (isNoKey) {
Seq(sessionAttribute)
} else {
keysWithSessionAttributes
}
val resultExpressions = groupingExpressions ++ aggregateAttributes
new MergingSessionsIterator(
partIndex = 0,
groupingExpressions = groupingExpressions,
sessionExpression = sessionAttribute,
valueAttributes = resultExpressions,
inputIterator = iterator,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
newMutableProjection = (expressions, inputSchema) =>
MutableProjection.create(expressions, inputSchema),
numOutputRows = SQLMetrics.createMetric(sparkContext, "output rows")
)
}
private def createRow(
key1: String,
key2: Int,
sessionStart: Long,
sessionEnd: Long,
countValue: Long = 0): UnsafeRow = {
val genericRow = new GenericInternalRow(4)
if (key1 != null) {
genericRow.update(0, UTF8String.fromString(key1))
} else {
genericRow.setNullAt(0)
}
genericRow.setInt(1, key2)
val session: Array[Any] = new Array[Any](2)
session(0) = sessionStart
session(1) = sessionEnd
val sessionRow = new GenericInternalRow(session)
genericRow.update(2, sessionRow)
genericRow.setLong(3, countValue)
val rowProjection = GenerateUnsafeProjection.generate(rowAttributes, rowAttributes)
rowProjection(genericRow)
}
private def assertRowsEquals(expectedRow: InternalRow, retRow: InternalRow): Unit = {
assert(retRow.getString(0) === expectedRow.getString(0))
assert(retRow.getInt(1) === expectedRow.getInt(1))
assert(retRow.getStruct(2, 2).getLong(0) == expectedRow.getStruct(2, 2).getLong(0))
assert(retRow.getStruct(2, 2).getLong(1) == expectedRow.getStruct(2, 2).getLong(1))
assert(retRow.getLong(3) === expectedRow.getLong(3))
}
private def createNoKeyRow(
sessionStart: Long,
sessionEnd: Long,
countValue: Long = 0): UnsafeRow = {
val genericRow = new GenericInternalRow(2)
val session: Array[Any] = new Array[Any](2)
session(0) = sessionStart
session(1) = sessionEnd
val sessionRow = new GenericInternalRow(session)
genericRow.update(0, sessionRow)
genericRow.setLong(1, countValue)
val rowProjection = GenerateUnsafeProjection.generate(noKeyRowAttributes, noKeyRowAttributes)
rowProjection(genericRow)
}
private def assertNoKeyRowsEquals(expectedRow: InternalRow, retRow: InternalRow): Unit = {
assert(retRow.getStruct(0, 2).getLong(0) == expectedRow.getStruct(0, 2).getLong(0))
assert(retRow.getStruct(0, 2).getLong(1) == expectedRow.getStruct(0, 2).getLong(1))
assert(retRow.getLong(1) === expectedRow.getLong(1))
}
}