From 47b7ebad12a17218f6ca0301fc802c0e0a81d873 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sat, 28 Jul 2012 20:03:26 -0700 Subject: [PATCH 001/226] Added the Spark Streaing code, ported to Akka 2 --- core/src/main/scala/spark/BlockRDD.scala | 42 ++ core/src/main/scala/spark/SparkContext.scala | 5 + project/SparkBuild.scala | 6 +- run | 2 + sentences.txt | 3 + startTrigger.sh | 3 + .../src/main/scala/spark/stream/BlockID.scala | 20 + .../spark/stream/ConnectionHandler.scala | 157 +++++ .../stream/DumbTopKWordCount2_Special.scala | 138 ++++ .../spark/stream/DumbWordCount2_Special.scala | 92 +++ .../spark/stream/FileStreamReceiver.scala | 70 ++ .../main/scala/spark/stream/GrepCount.scala | 39 ++ .../main/scala/spark/stream/GrepCount2.scala | 113 ++++ .../scala/spark/stream/GrepCountApprox.scala | 54 ++ .../scala/spark/stream/IdealPerformance.scala | 36 ++ .../main/scala/spark/stream/Interval.scala | 75 +++ .../src/main/scala/spark/stream/Job.scala | 21 + .../main/scala/spark/stream/JobManager.scala | 112 ++++ .../main/scala/spark/stream/JobManager2.scala | 37 ++ .../spark/stream/NetworkStreamReceiver.scala | 184 ++++++ .../src/main/scala/spark/stream/RDS.scala | 607 ++++++++++++++++++ .../spark/stream/ReducedWindowedRDS.scala | 218 +++++++ .../main/scala/spark/stream/Scheduler.scala | 181 ++++++ .../SenGeneratorForPerformanceTest.scala | 78 +++ .../spark/stream/SenderReceiverTest.scala | 63 ++ .../spark/stream/SentenceFileGenerator.scala | 92 +++ .../spark/stream/SentenceGenerator.scala | 103 +++ .../main/scala/spark/stream/ShuffleTest.scala | 22 + .../scala/spark/stream/SimpleWordCount.scala | 30 + .../scala/spark/stream/SimpleWordCount2.scala | 51 ++ .../stream/SimpleWordCount2_Special.scala | 83 +++ .../spark/stream/SparkStreamContext.scala | 105 +++ .../scala/spark/stream/TestGenerator.scala | 107 +++ .../scala/spark/stream/TestGenerator2.scala | 119 ++++ .../scala/spark/stream/TestGenerator4.scala | 244 +++++++ .../spark/stream/TestInputBlockTracker.scala | 42 ++ .../spark/stream/TestStreamCoordinator.scala | 38 ++ .../spark/stream/TestStreamReceiver3.scala | 420 ++++++++++++ .../spark/stream/TestStreamReceiver4.scala | 373 +++++++++++ .../src/main/scala/spark/stream/Time.scala | 85 +++ .../scala/spark/stream/TopContentCount.scala | 97 +++ .../scala/spark/stream/TopKWordCount2.scala | 103 +++ .../spark/stream/TopKWordCount2_Special.scala | 142 ++++ .../main/scala/spark/stream/WindowedRDS.scala | 68 ++ .../main/scala/spark/stream/WordCount.scala | 62 ++ .../main/scala/spark/stream/WordCount1.scala | 46 ++ .../main/scala/spark/stream/WordCount2.scala | 55 ++ .../spark/stream/WordCount2_Special.scala | 94 +++ .../main/scala/spark/stream/WordCount3.scala | 49 ++ .../scala/spark/stream/WordCountEc2.scala | 41 ++ .../spark/stream/WordCountTrivialWindow.scala | 51 ++ .../src/main/scala/spark/stream/WordMax.scala | 64 ++ 52 files changed, 5141 insertions(+), 1 deletion(-) create mode 100644 core/src/main/scala/spark/BlockRDD.scala create mode 100644 sentences.txt create mode 100755 startTrigger.sh create mode 100644 streaming/src/main/scala/spark/stream/BlockID.scala create mode 100644 streaming/src/main/scala/spark/stream/ConnectionHandler.scala create mode 100644 streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/stream/FileStreamReceiver.scala create mode 100644 streaming/src/main/scala/spark/stream/GrepCount.scala create mode 100644 streaming/src/main/scala/spark/stream/GrepCount2.scala create mode 100644 streaming/src/main/scala/spark/stream/GrepCountApprox.scala create mode 100644 streaming/src/main/scala/spark/stream/IdealPerformance.scala create mode 100644 streaming/src/main/scala/spark/stream/Interval.scala create mode 100644 streaming/src/main/scala/spark/stream/Job.scala create mode 100644 streaming/src/main/scala/spark/stream/JobManager.scala create mode 100644 streaming/src/main/scala/spark/stream/JobManager2.scala create mode 100644 streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala create mode 100644 streaming/src/main/scala/spark/stream/RDS.scala create mode 100644 streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala create mode 100644 streaming/src/main/scala/spark/stream/Scheduler.scala create mode 100644 streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala create mode 100644 streaming/src/main/scala/spark/stream/SenderReceiverTest.scala create mode 100644 streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala create mode 100644 streaming/src/main/scala/spark/stream/SentenceGenerator.scala create mode 100644 streaming/src/main/scala/spark/stream/ShuffleTest.scala create mode 100644 streaming/src/main/scala/spark/stream/SimpleWordCount.scala create mode 100644 streaming/src/main/scala/spark/stream/SimpleWordCount2.scala create mode 100644 streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/stream/SparkStreamContext.scala create mode 100644 streaming/src/main/scala/spark/stream/TestGenerator.scala create mode 100644 streaming/src/main/scala/spark/stream/TestGenerator2.scala create mode 100644 streaming/src/main/scala/spark/stream/TestGenerator4.scala create mode 100644 streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala create mode 100644 streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala create mode 100644 streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala create mode 100644 streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala create mode 100644 streaming/src/main/scala/spark/stream/Time.scala create mode 100644 streaming/src/main/scala/spark/stream/TopContentCount.scala create mode 100644 streaming/src/main/scala/spark/stream/TopKWordCount2.scala create mode 100644 streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/stream/WindowedRDS.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCount.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCount1.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCount2.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCount3.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCountEc2.scala create mode 100644 streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala create mode 100644 streaming/src/main/scala/spark/stream/WordMax.scala diff --git a/core/src/main/scala/spark/BlockRDD.scala b/core/src/main/scala/spark/BlockRDD.scala new file mode 100644 index 0000000000..ea009f0f4f --- /dev/null +++ b/core/src/main/scala/spark/BlockRDD.scala @@ -0,0 +1,42 @@ +package spark + +import scala.collection.mutable.HashMap + +class BlockRDDSplit(val blockId: String, idx: Int) extends Split { + val index = idx +} + + +class BlockRDD[T: ClassManifest](sc: SparkContext, blockIds: Array[String]) extends RDD[T](sc) { + + @transient + val splits_ = (0 until blockIds.size).map(i => { + new BlockRDDSplit(blockIds(i), i).asInstanceOf[Split] + }).toArray + + @transient + lazy val locations_ = { + val blockManager = SparkEnv.get.blockManager + /*val locations = blockIds.map(id => blockManager.getLocations(id))*/ + val locations = blockManager.getLocations(blockIds) + HashMap(blockIds.zip(locations):_*) + } + + override def splits = splits_ + + override def compute(split: Split): Iterator[T] = { + val blockManager = SparkEnv.get.blockManager + val blockId = split.asInstanceOf[BlockRDDSplit].blockId + blockManager.get(blockId) match { + case Some(block) => block.asInstanceOf[Iterator[T]] + case None => + throw new Exception("Could not compute split, block " + blockId + " not found") + } + } + + override def preferredLocations(split: Split) = + locations_(split.asInstanceOf[BlockRDDSplit].blockId) + + override val dependencies: List[Dependency[_]] = Nil +} + diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index dd17d4d6b3..78c7618542 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -409,6 +409,11 @@ class SparkContext( * various Spark features. */ object SparkContext { + + // TODO: temporary hack for using HDFS as input in streaing + var inputFile: String = null + var idealPartitions: Int = 1 + implicit object DoubleAccumulatorParam extends AccumulatorParam[Double] { def addInPlace(t1: Double, t2: Double): Double = t1 + t2 def zero(initialValue: Double) = 0.0 diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 726d490738..c4ada2bf2a 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -8,7 +8,7 @@ object SparkBuild extends Build { // "1.0.1" for Apache releases, or "0.20.2-cdh3u3" for Cloudera Hadoop. val HADOOP_VERSION = "0.20.205.0" - lazy val root = Project("root", file("."), settings = sharedSettings) aggregate(core, repl, examples, bagel) + lazy val root = Project("root", file("."), settings = sharedSettings) aggregate(core, repl, examples, bagel, streaming) lazy val core = Project("core", file("core"), settings = coreSettings) @@ -18,6 +18,8 @@ object SparkBuild extends Build { lazy val bagel = Project("bagel", file("bagel"), settings = bagelSettings) dependsOn (core) + lazy val streaming = Project("streaming", file("streaming"), settings = streamingSettings) dependsOn (core) + def sharedSettings = Defaults.defaultSettings ++ Seq( organization := "org.spark-project", version := "0.6.0-SNAPSHOT", @@ -82,6 +84,8 @@ object SparkBuild extends Build { def bagelSettings = sharedSettings ++ Seq(name := "spark-bagel") + def streamingSettings = sharedSettings ++ Seq(name := "spark-streaming") + def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( mergeStrategy in assembly := { case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard diff --git a/run b/run index 8f7256b4e5..e3e98f4280 100755 --- a/run +++ b/run @@ -46,6 +46,7 @@ CORE_DIR="$FWDIR/core" REPL_DIR="$FWDIR/repl" EXAMPLES_DIR="$FWDIR/examples" BAGEL_DIR="$FWDIR/bagel" +STREAMING_DIR="$FWDIR/streaming" # Build up classpath CLASSPATH="$SPARK_CLASSPATH" @@ -55,6 +56,7 @@ CLASSPATH+=":$CORE_DIR/target/scala-$SCALA_VERSION/classes" CLASSPATH+=":$CORE_DIR/src/main/resources" CLASSPATH+=":$REPL_DIR/target/scala-$SCALA_VERSION/classes" CLASSPATH+=":$EXAMPLES_DIR/target/scala-$SCALA_VERSION/classes" +CLASSPATH+=":$STREAMING_DIR/target/scala-$SCALA_VERSION/classes" for jar in `find $CORE_DIR/lib -name '*jar'`; do CLASSPATH+=":$jar" done diff --git a/sentences.txt b/sentences.txt new file mode 100644 index 0000000000..fedf96c66e --- /dev/null +++ b/sentences.txt @@ -0,0 +1,3 @@ +Hello world! +What's up? +There is no cow level diff --git a/startTrigger.sh b/startTrigger.sh new file mode 100755 index 0000000000..0afce91a3e --- /dev/null +++ b/startTrigger.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./run spark.stream.SentenceGenerator localhost 7078 sentences.txt 1 diff --git a/streaming/src/main/scala/spark/stream/BlockID.scala b/streaming/src/main/scala/spark/stream/BlockID.scala new file mode 100644 index 0000000000..a3fd046c9a --- /dev/null +++ b/streaming/src/main/scala/spark/stream/BlockID.scala @@ -0,0 +1,20 @@ +package spark.stream + +case class BlockID(sRds: String, sInterval: Interval, sPartition: Int) { + override def toString : String = ( + sRds + BlockID.sConnector + + sInterval.beginTime + BlockID.sConnector + + sInterval.endTime + BlockID.sConnector + + sPartition + ) +} + +object BlockID { + val sConnector = '-' + + def parse(name : String) = BlockID( + name.split(BlockID.sConnector)(0), + new Interval(name.split(BlockID.sConnector)(1).toLong, + name.split(BlockID.sConnector)(2).toLong), + name.split(BlockID.sConnector)(3).toInt) +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/stream/ConnectionHandler.scala b/streaming/src/main/scala/spark/stream/ConnectionHandler.scala new file mode 100644 index 0000000000..73b82b76b8 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/ConnectionHandler.scala @@ -0,0 +1,157 @@ +package spark.stream + +import spark.Logging + +import scala.collection.mutable.{ArrayBuffer, SynchronizedQueue} + +import java.net._ +import java.io._ +import java.nio._ +import java.nio.charset._ +import java.nio.channels._ +import java.nio.channels.spi._ + +abstract class ConnectionHandler(host: String, port: Int, connect: Boolean) +extends Thread with Logging { + + val selector = SelectorProvider.provider.openSelector() + val interestChangeRequests = new SynchronizedQueue[(SelectionKey, Int)] + + initLogging() + + override def run() { + try { + if (connect) { + connect() + } else { + listen() + } + + var interrupted = false + while(!interrupted) { + + preSelect() + + while(!interestChangeRequests.isEmpty) { + val (key, ops) = interestChangeRequests.dequeue + val lastOps = key.interestOps() + key.interestOps(ops) + + def intToOpStr(op: Int): String = { + val opStrs = new ArrayBuffer[String]() + if ((op & SelectionKey.OP_READ) != 0) opStrs += "READ" + if ((op & SelectionKey.OP_WRITE) != 0) opStrs += "WRITE" + if ((op & SelectionKey.OP_CONNECT) != 0) opStrs += "CONNECT" + if ((op & SelectionKey.OP_ACCEPT) != 0) opStrs += "ACCEPT" + if (opStrs.size > 0) opStrs.reduceLeft(_ + " | " + _) else " " + } + + logTrace("Changed ops from [" + intToOpStr(lastOps) + "] to [" + intToOpStr(ops) + "]") + } + + selector.select() + interrupted = Thread.currentThread.isInterrupted + + val selectedKeys = selector.selectedKeys().iterator() + while (selectedKeys.hasNext) { + val key = selectedKeys.next.asInstanceOf[SelectionKey] + selectedKeys.remove() + if (key.isValid) { + if (key.isAcceptable) { + accept(key) + } else if (key.isConnectable) { + finishConnect(key) + } else if (key.isReadable) { + read(key) + } else if (key.isWritable) { + write(key) + } + } + } + } + } catch { + case e: Exception => { + logError("Error in select loop", e) + } + } + } + + def connect() { + val socketAddress = new InetSocketAddress(host, port) + val channel = SocketChannel.open() + channel.configureBlocking(false) + channel.socket.setReuseAddress(true) + channel.socket.setTcpNoDelay(true) + channel.connect(socketAddress) + channel.register(selector, SelectionKey.OP_CONNECT) + logInfo("Initiating connection to [" + socketAddress + "]") + } + + def listen() { + val channel = ServerSocketChannel.open() + channel.configureBlocking(false) + channel.socket.setReuseAddress(true) + channel.socket.setReceiveBufferSize(256 * 1024) + channel.socket.bind(new InetSocketAddress(port)) + channel.register(selector, SelectionKey.OP_ACCEPT) + logInfo("Listening on port " + port) + } + + def finishConnect(key: SelectionKey) { + try { + val channel = key.channel.asInstanceOf[SocketChannel] + val address = channel.socket.getRemoteSocketAddress + channel.finishConnect() + logInfo("Connected to [" + host + ":" + port + "]") + ready(key) + } catch { + case e: IOException => { + logError("Error finishing connect to " + host + ":" + port) + close(key) + } + } + } + + def accept(key: SelectionKey) { + try { + val serverChannel = key.channel.asInstanceOf[ServerSocketChannel] + val channel = serverChannel.accept() + val address = channel.socket.getRemoteSocketAddress + channel.configureBlocking(false) + logInfo("Accepted connection from [" + address + "]") + ready(channel.register(selector, 0)) + } catch { + case e: IOException => { + logError("Error accepting connection", e) + } + } + } + + def changeInterest(key: SelectionKey, ops: Int) { + logTrace("Added request to change ops to " + ops) + interestChangeRequests += ((key, ops)) + } + + def ready(key: SelectionKey) + + def preSelect() { + } + + def read(key: SelectionKey) { + throw new UnsupportedOperationException("Cannot read on connection of type " + this.getClass.toString) + } + + def write(key: SelectionKey) { + throw new UnsupportedOperationException("Cannot write on connection of type " + this.getClass.toString) + } + + def close(key: SelectionKey) { + try { + key.channel.close() + key.cancel() + Thread.currentThread.interrupt + } catch { + case e: Exception => logError("Error closing connection", e) + } + } +} diff --git a/streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala b/streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala new file mode 100644 index 0000000000..bd43f44b1a --- /dev/null +++ b/streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala @@ -0,0 +1,138 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.JavaConversions.mapAsScalaMap +import scala.collection.mutable.Queue + +import java.lang.{Long => JLong} + +object DumbTopKWordCount2_Special { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + moreWarmup(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray + ) + + + def add(v1: JLong, v2: JLong) = (v1 + v2) + def subtract(v1: JLong, v2: JLong) = (v1 - v2) + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { + val map = new java.util.HashMap[String, JLong] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.get(w) + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator + } + + + val wordCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + wordCounts.persist(StorageLevel.MEMORY_ONLY) + val windowedCounts = wordCounts.window(Seconds(10), Seconds(1)).reduceByKey(_ + _, 10) + + def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { + val taken = new Array[(String, JLong)](k) + + var i = 0 + var len = 0 + var done = false + var value: (String, JLong) = null + var swap: (String, JLong) = null + var count = 0 + + while(data.hasNext) { + value = data.next + count += 1 + /*println("count = " + count)*/ + if (len == 0) { + taken(0) = value + len = 1 + } else if (len < k || value._2 > taken(len - 1)._2) { + if (len < k) { + len += 1 + } + taken(len - 1) = value + i = len - 1 + while(i > 0 && taken(i - 1)._2 < taken(i)._2) { + swap = taken(i) + taken(i) = taken(i-1) + taken(i - 1) = swap + i -= 1 + } + } + } + println("Took " + len + " out of " + count + " items") + return taken.toIterator + } + + val k = 10 + val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) + partialTopKWindowedCounts.foreachRDD(rdd => { + val collectedCounts = rdd.collect + println("Collected " + collectedCounts.size + " items") + topK(collectedCounts.toIterator, k).foreach(println) + }) + + /* + windowedCounts.filter(_ == null).foreachRDD(rdd => { + val count = rdd.count + println("# of nulls = " + count) + })*/ + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala b/streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala new file mode 100644 index 0000000000..31d682348a --- /dev/null +++ b/streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala @@ -0,0 +1,92 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue +import scala.collection.JavaConversions.mapAsScalaMap + +import java.lang.{Long => JLong} +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} + +object DumbWordCount2_Special { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + moreWarmup(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray + ) + + def add(v1: JLong, v2: JLong) = (v1 + v2) + def subtract(v1: JLong, v2: JLong) = (v1 - v2) + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { + val map = new java.util.HashMap[String, JLong] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.get(w) + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + + map.toIterator + } + + val wordCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + wordCounts.persist(StorageLevel.MEMORY_ONLY) + val windowedCounts = wordCounts.window(Seconds(10), Seconds(1)).reduceByKey(_ + _, 10) + windowedCounts.foreachRDD(_.collect) + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/FileStreamReceiver.scala b/streaming/src/main/scala/spark/stream/FileStreamReceiver.scala new file mode 100644 index 0000000000..026254d6e1 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/FileStreamReceiver.scala @@ -0,0 +1,70 @@ +package spark.stream + +import spark.Logging + +import scala.collection.mutable.HashSet +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import org.apache.hadoop.fs._ +import org.apache.hadoop.conf._ +import org.apache.hadoop.io._ +import org.apache.hadoop.mapred._ +import org.apache.hadoop.util._ + +class FileStreamReceiver ( + inputName: String, + rootDirectory: String, + intervalDuration: Long) + extends Logging { + + val pollInterval = 100 + val sparkstreamScheduler = { + val host = System.getProperty("spark.master.host") + val port = System.getProperty("spark.master.port").toInt + 1 + RemoteActor.select(Node(host, port), 'SparkStreamScheduler) + } + val directory = new Path(rootDirectory) + val fs = directory.getFileSystem(new Configuration()) + val files = new HashSet[String]() + var time: Long = 0 + + def start() { + fs.mkdirs(directory) + files ++= getFiles() + + actor { + logInfo("Monitoring directory - " + rootDirectory) + while(true) { + testFiles(getFiles()) + Thread.sleep(pollInterval) + } + } + } + + def getFiles(): Iterable[String] = { + fs.listStatus(directory).map(_.getPath.toString) + } + + def testFiles(fileList: Iterable[String]) { + fileList.foreach(file => { + if (!files.contains(file)) { + if (!file.endsWith("_tmp")) { + notifyFile(file) + } + files += file + } + }) + } + + def notifyFile(file: String) { + logInfo("Notifying file " + file) + time += intervalDuration + val interval = Interval(LongTime(time), LongTime(time + intervalDuration)) + sparkstreamScheduler ! InputGenerated(inputName, interval, file) + } +} + + diff --git a/streaming/src/main/scala/spark/stream/GrepCount.scala b/streaming/src/main/scala/spark/stream/GrepCount.scala new file mode 100644 index 0000000000..45b90d4837 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/GrepCount.scala @@ -0,0 +1,39 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkContext +import spark.storage.StorageLevel + +object GrepCount { + var inputFile : String = null + var HDFS : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 4) { + println ("Usage: GrepCount ") + System.exit(1) + } + + HDFS = args(1) + inputFile = HDFS + args(2) + idealPartitions = args(3).toInt + println ("Input file: " + inputFile) + + val ssc = new SparkStreamContext(args(0), "GrepCount") + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + //sentences.print + val matching = sentences.filter(_.contains("light")) + matching.foreachRDD(rdd => println(rdd.count)) + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/GrepCount2.scala b/streaming/src/main/scala/spark/stream/GrepCount2.scala new file mode 100644 index 0000000000..4eb65ba906 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/GrepCount2.scala @@ -0,0 +1,113 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkEnv +import spark.SparkContext +import spark.storage.StorageLevel +import spark.network.Message +import spark.network.ConnectionManagerId + +import java.nio.ByteBuffer + +object GrepCount2 { + + def startSparkEnvs(sc: SparkContext) { + + val dummy = sc.parallelize(0 to 1000, 100).persist(StorageLevel.DISK_AND_MEMORY) + sc.runJob(dummy, (_: Iterator[Int]) => {}) + + println("SparkEnvs started") + Thread.sleep(1000) + /*sc.runJob(sc.parallelize(0 to 1000, 100), (_: Iterator[Int]) => {})*/ + } + + def warmConnectionManagers(sc: SparkContext) { + val slaveConnManagerIds = sc.parallelize(0 to 100, 100).map( + i => SparkEnv.get.connectionManager.id).collect().distinct + println("\nSlave ConnectionManagerIds") + slaveConnManagerIds.foreach(println) + println + + Thread.sleep(1000) + val numSlaves = slaveConnManagerIds.size + val count = 3 + val size = 5 * 1024 * 1024 + val iterations = (500 * 1024 * 1024 / (numSlaves * size)).toInt + println("count = " + count + ", size = " + size + ", iterations = " + iterations) + + (0 until count).foreach(i => { + val resultStrs = sc.parallelize(0 until numSlaves, numSlaves).map(i => { + val connManager = SparkEnv.get.connectionManager + val thisConnManagerId = connManager.id + /*connManager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => { + println("Received [" + msg + "] from [" + id + "]") + None + })*/ + + + val buffer = ByteBuffer.allocate(size).put(Array.tabulate[Byte](size)(x => x.toByte)) + buffer.flip + + val startTime = System.currentTimeMillis + val futures = (0 until iterations).map(i => { + slaveConnManagerIds.filter(_ != thisConnManagerId).map(slaveConnManagerId => { + val bufferMessage = Message.createBufferMessage(buffer.duplicate) + println("Sending [" + bufferMessage + "] to [" + slaveConnManagerId + "]") + connManager.sendMessageReliably(slaveConnManagerId, bufferMessage) + }) + }).flatMap(x => x) + val results = futures.map(f => f()) + val finishTime = System.currentTimeMillis + + + val mb = size * results.size / 1024.0 / 1024.0 + val ms = finishTime - startTime + + val resultStr = "Sent " + mb + " MB in " + ms + " ms at " + (mb / ms * 1000.0) + " MB/s" + println(resultStr) + System.gc() + resultStr + }).collect() + + println("---------------------") + println("Run " + i) + resultStrs.foreach(println) + println("---------------------") + }) + } + + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: GrepCount2 <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "GrepCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + /*startSparkEnvs(ssc.sc)*/ + warmConnectionManagers(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-"+i, 500)).toArray + ) + + val matching = sentences.filter(_.contains("light")) + matching.foreachRDD(rdd => println(rdd.count)) + + ssc.run + } +} + + + + diff --git a/streaming/src/main/scala/spark/stream/GrepCountApprox.scala b/streaming/src/main/scala/spark/stream/GrepCountApprox.scala new file mode 100644 index 0000000000..a4be2cc936 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/GrepCountApprox.scala @@ -0,0 +1,54 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkContext +import spark.storage.StorageLevel + +object GrepCountApprox { + var inputFile : String = null + var hdfs : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 5) { + println ("Usage: GrepCountApprox ") + System.exit(1) + } + + hdfs = args(1) + inputFile = hdfs + args(2) + idealPartitions = args(3).toInt + val timeout = args(4).toLong + println ("Input file: " + inputFile) + + val ssc = new SparkStreamContext(args(0), "GrepCount") + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + ssc.setTempDir(hdfs + "/tmp") + + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + //sentences.print + val matching = sentences.filter(_.contains("light")) + var i = 0 + val startTime = System.currentTimeMillis + matching.foreachRDD { rdd => + val myNum = i + val result = rdd.countApprox(timeout) + val initialTime = (System.currentTimeMillis - startTime) / 1000.0 + printf("APPROX\t%.2f\t%d\tinitial\t%.1f\t%.1f\n", initialTime, myNum, result.initialValue.mean, + result.initialValue.high - result.initialValue.low) + result.onComplete { r => + val finalTime = (System.currentTimeMillis - startTime) / 1000.0 + printf("APPROX\t%.2f\t%d\tfinal\t%.1f\t0.0\t%.1f\n", finalTime, myNum, r.mean, finalTime - initialTime) + } + i += 1 + } + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/IdealPerformance.scala b/streaming/src/main/scala/spark/stream/IdealPerformance.scala new file mode 100644 index 0000000000..589fb2def0 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/IdealPerformance.scala @@ -0,0 +1,36 @@ +package spark.stream + +import scala.collection.mutable.Map + +object IdealPerformance { + val base: String = "The medium researcher counts around the pinched troop The empire breaks " + + "Matei Matei announces HY with a theorem " + + def main (args: Array[String]) { + val sentences: String = base * 100000 + + for (i <- 1 to 30) { + val start = System.nanoTime + + val words = sentences.split(" ") + + val pairs = words.map(word => (word, 1)) + + val counts = Map[String, Int]() + + println("Job " + i + " position A at " + (System.nanoTime - start) / 1e9) + + pairs.foreach((pair) => { + var t = counts.getOrElse(pair._1, 0) + counts(pair._1) = t + pair._2 + }) + println("Job " + i + " position B at " + (System.nanoTime - start) / 1e9) + + for ((word, count) <- counts) { + print(word + " " + count + "; ") + } + println + println("Job " + i + " finished in " + (System.nanoTime - start) / 1e9) + } + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/stream/Interval.scala b/streaming/src/main/scala/spark/stream/Interval.scala new file mode 100644 index 0000000000..08d0ed95b4 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/Interval.scala @@ -0,0 +1,75 @@ +package spark.stream + +case class Interval (val beginTime: Time, val endTime: Time) { + + def this(beginMs: Long, endMs: Long) = this(new LongTime(beginMs), new LongTime(endMs)) + + def duration(): Time = endTime - beginTime + + def += (time: Time) { + beginTime += time + endTime += time + this + } + + def + (time: Time): Interval = { + new Interval(beginTime + time, endTime + time) + } + + def < (that: Interval): Boolean = { + if (this.duration != that.duration) { + throw new Exception("Comparing two intervals with different durations [" + this + ", " + that + "]") + } + this.endTime < that.endTime + } + + def <= (that: Interval) = (this < that || this == that) + + def > (that: Interval) = !(this <= that) + + def >= (that: Interval) = !(this < that) + + def next(): Interval = { + this + (endTime - beginTime) + } + + def isZero() = (beginTime.isZero && endTime.isZero) + + def toFormattedString = beginTime.toFormattedString + "-" + endTime.toFormattedString + + override def toString = "[" + beginTime + ", " + endTime + "]" +} + +object Interval { + + /* + implicit def longTupleToInterval (longTuple: (Long, Long)) = + Interval(longTuple._1, longTuple._2) + + implicit def intTupleToInterval (intTuple: (Int, Int)) = + Interval(intTuple._1, intTuple._2) + + implicit def string2Interval (str: String): Interval = { + val parts = str.split(",") + if (parts.length == 1) + return Interval.zero + return Interval (parts(0).toInt, parts(1).toInt) + } + + def getInterval (timeMs: Long, intervalDurationMs: Long): Interval = { + val intervalBeginMs = timeMs / intervalDurationMs * intervalDurationMs + Interval(intervalBeginMs, intervalBeginMs + intervalDurationMs) + } + */ + + def zero() = new Interval (Time.zero, Time.zero) + + def currentInterval(intervalDuration: LongTime): Interval = { + val time = LongTime(System.currentTimeMillis) + val intervalBegin = time.floor(intervalDuration) + Interval(intervalBegin, intervalBegin + intervalDuration) + } + +} + + diff --git a/streaming/src/main/scala/spark/stream/Job.scala b/streaming/src/main/scala/spark/stream/Job.scala new file mode 100644 index 0000000000..bfdd5db645 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/Job.scala @@ -0,0 +1,21 @@ +package spark.stream + +class Job(val time: Time, func: () => _) { + val id = Job.getNewId() + + def run() { + func() + } + + override def toString = "SparkStream Job " + id + ":" + time +} + +object Job { + var lastId = 1 + + def getNewId() = synchronized { + lastId += 1 + lastId + } +} + diff --git a/streaming/src/main/scala/spark/stream/JobManager.scala b/streaming/src/main/scala/spark/stream/JobManager.scala new file mode 100644 index 0000000000..5ea80b92aa --- /dev/null +++ b/streaming/src/main/scala/spark/stream/JobManager.scala @@ -0,0 +1,112 @@ +package spark.stream + +import spark.SparkEnv +import spark.Logging + +import scala.collection.mutable.PriorityQueue +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ +import scala.actors.scheduler.ResizableThreadPoolScheduler +import scala.actors.scheduler.ForkJoinScheduler + +sealed trait JobManagerMessage +case class RunJob(job: Job) extends JobManagerMessage +case class JobCompleted(handlerId: Int) extends JobManagerMessage + +class JobHandler(ssc: SparkStreamContext, val id: Int) extends DaemonActor with Logging { + + var busy = false + + def act() { + loop { + receive { + case job: Job => { + SparkEnv.set(ssc.env) + try { + logInfo("Starting " + job) + job.run() + logInfo("Finished " + job) + if (job.time.isInstanceOf[LongTime]) { + val longTime = job.time.asInstanceOf[LongTime] + logInfo("Total pushing + skew + processing delay for " + longTime + " is " + + (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") + } + } catch { + case e: Exception => logError("SparkStream job failed", e) + } + busy = false + reply(JobCompleted(id)) + } + } + } + } +} + +class JobManager(ssc: SparkStreamContext, numThreads: Int = 2) extends DaemonActor with Logging { + + implicit private val jobOrdering = new Ordering[Job] { + override def compare(job1: Job, job2: Job): Int = { + if (job1.time < job2.time) { + return 1 + } else if (job2.time < job1.time) { + return -1 + } else { + return 0 + } + } + } + + private val jobs = new PriorityQueue[Job]() + private val handlers = (0 until numThreads).map(i => new JobHandler(ssc, i)) + + def act() { + handlers.foreach(_.start) + loop { + receive { + case RunJob(job) => { + jobs += job + logInfo("Job " + job + " submitted") + runJob() + } + case JobCompleted(handlerId) => { + runJob() + } + } + } + } + + def runJob(): Unit = { + logInfo("Attempting to allocate job ") + if (jobs.size > 0) { + handlers.find(!_.busy).foreach(handler => { + val job = jobs.dequeue + logInfo("Allocating job " + job + " to handler " + handler.id) + handler.busy = true + handler ! job + }) + } + } +} + +object JobManager { + def main(args: Array[String]) { + val ssc = new SparkStreamContext("local[4]", "JobManagerTest") + val jobManager = new JobManager(ssc) + jobManager.start() + + val t = System.currentTimeMillis + for (i <- 1 to 10) { + jobManager ! RunJob(new Job( + LongTime(i), + () => { + Thread.sleep(500) + println("Job " + i + " took " + (System.currentTimeMillis - t) + " ms") + } + )) + } + Thread.sleep(6000) + } +} + diff --git a/streaming/src/main/scala/spark/stream/JobManager2.scala b/streaming/src/main/scala/spark/stream/JobManager2.scala new file mode 100644 index 0000000000..b69653b9a4 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/JobManager2.scala @@ -0,0 +1,37 @@ +package spark.stream + +import spark.{Logging, SparkEnv} +import java.util.concurrent.Executors + + +class JobManager2(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { + + class JobHandler(ssc: SparkStreamContext, job: Job) extends Runnable { + def run() { + SparkEnv.set(ssc.env) + try { + logInfo("Starting " + job) + job.run() + logInfo("Finished " + job) + if (job.time.isInstanceOf[LongTime]) { + val longTime = job.time.asInstanceOf[LongTime] + logInfo("Total notification + skew + processing delay for " + longTime + " is " + + (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") + if (System.getProperty("spark.stream.distributed", "false") == "true") { + TestInputBlockTracker.setEndTime(job.time) + } + } + } catch { + case e: Exception => logError("SparkStream job failed", e) + } + } + } + + initLogging() + + val jobExecutor = Executors.newFixedThreadPool(numThreads) + + def runJob(job: Job) { + jobExecutor.execute(new JobHandler(ssc, job)) + } +} diff --git a/streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala b/streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala new file mode 100644 index 0000000000..8be46cc927 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala @@ -0,0 +1,184 @@ +package spark.stream + +import spark.Logging +import spark.storage.StorageLevel + +import scala.math._ +import scala.collection.mutable.{Queue, HashMap, ArrayBuffer} +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import java.io.BufferedWriter +import java.io.OutputStreamWriter + +import org.apache.hadoop.fs._ +import org.apache.hadoop.conf._ +import org.apache.hadoop.io._ +import org.apache.hadoop.mapred._ +import org.apache.hadoop.util._ + +/*import akka.actor.Actor._*/ + +class NetworkStreamReceiver[T: ClassManifest] ( + inputName: String, + intervalDuration: Time, + splitId: Int, + ssc: SparkStreamContext, + tempDirectory: String) + extends DaemonActor + with Logging { + + /** + * Assume all data coming in has non-decreasing timestamp. + */ + final class Inbox[T: ClassManifest] (intervalDuration: Time) { + var currentBucket: (Interval, ArrayBuffer[T]) = null + val filledBuckets = new Queue[(Interval, ArrayBuffer[T])]() + + def += (tuple: (Time, T)) = addTuple(tuple) + + def addTuple(tuple: (Time, T)) { + val (time, data) = tuple + val interval = getInterval (time) + + filledBuckets.synchronized { + if (currentBucket == null) { + currentBucket = (interval, new ArrayBuffer[T]()) + } + + if (interval != currentBucket._1) { + filledBuckets += currentBucket + currentBucket = (interval, new ArrayBuffer[T]()) + } + + currentBucket._2 += data + } + } + + def getInterval(time: Time): Interval = { + val intervalBegin = time.floor(intervalDuration) + Interval (intervalBegin, intervalBegin + intervalDuration) + } + + def hasFilledBuckets(): Boolean = { + filledBuckets.synchronized { + return filledBuckets.size > 0 + } + } + + def popFilledBucket(): (Interval, ArrayBuffer[T]) = { + filledBuckets.synchronized { + if (filledBuckets.size == 0) { + return null + } + return filledBuckets.dequeue() + } + } + } + + val inbox = new Inbox[T](intervalDuration) + lazy val sparkstreamScheduler = { + val host = System.getProperty("spark.master.host") + val port = System.getProperty("spark.master.port").toInt + val url = "akka://spark@%s:%s/user/SparkStreamScheduler".format(host, port) + ssc.actorSystem.actorFor(url) + } + /*sparkstreamScheduler ! Test()*/ + + val intervalDurationMillis = intervalDuration.asInstanceOf[LongTime].milliseconds + val useBlockManager = true + + initLogging() + + override def act() { + // register the InputReceiver + val port = 7078 + RemoteActor.alive(port) + RemoteActor.register(Symbol("NetworkStreamReceiver-"+inputName), self) + logInfo("Registered actor on port " + port) + + loop { + reactWithin (getSleepTime) { + case TIMEOUT => + flushInbox() + case data => + val t = data.asInstanceOf[T] + inbox += (getTimeFromData(t), t) + } + } + } + + def getSleepTime(): Long = { + (System.currentTimeMillis / intervalDurationMillis + 1) * + intervalDurationMillis - System.currentTimeMillis + } + + def getTimeFromData(data: T): Time = { + LongTime(System.currentTimeMillis) + } + + def flushInbox() { + while (inbox.hasFilledBuckets) { + inbox.synchronized { + val (interval, data) = inbox.popFilledBucket() + val dataArray = data.toArray + logInfo("Received " + dataArray.length + " items at interval " + interval) + val reference = { + if (useBlockManager) { + writeToBlockManager(dataArray, interval) + } else { + writeToDisk(dataArray, interval) + } + } + if (reference != null) { + logInfo("Notifying scheduler") + sparkstreamScheduler ! InputGenerated(inputName, interval, reference.toString) + } + } + } + } + + def writeToDisk(data: Array[T], interval: Interval): String = { + try { + // TODO(Haoyuan): For current test, the following writing to file lines could be + // commented. + val fs = new Path(tempDirectory).getFileSystem(new Configuration()) + val inputDir = new Path( + tempDirectory, + inputName + "-" + interval.toFormattedString) + val inputFile = new Path(inputDir, "part-" + splitId) + logInfo("Writing to file " + inputFile) + if (System.getProperty("spark.fake", "false") != "true") { + val writer = new BufferedWriter(new OutputStreamWriter(fs.create(inputFile, true))) + data.foreach(x => writer.write(x.toString + "\n")) + writer.close() + } else { + logInfo("Fake file") + } + inputFile.toString + }catch { + case e: Exception => + logError("Exception writing to file at interval " + interval + ": " + e.getMessage, e) + null + } + } + + def writeToBlockManager(data: Array[T], interval: Interval): String = { + try{ + val blockId = inputName + "-" + interval.toFormattedString + "-" + splitId + if (System.getProperty("spark.fake", "false") != "true") { + logInfo("Writing as block " + blockId ) + ssc.env.blockManager.put(blockId.toString, data.toIterator, StorageLevel.DISK_AND_MEMORY) + } else { + logInfo("Fake block") + } + blockId + } catch { + case e: Exception => + logError("Exception writing to block manager at interval " + interval + ": " + e.getMessage, e) + null + } + } +} diff --git a/streaming/src/main/scala/spark/stream/RDS.scala b/streaming/src/main/scala/spark/stream/RDS.scala new file mode 100644 index 0000000000..b83181b0d1 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/RDS.scala @@ -0,0 +1,607 @@ +package spark.stream + +import spark.stream.SparkStreamContext._ + +import spark.RDD +import spark.BlockRDD +import spark.UnionRDD +import spark.Logging +import spark.SparkContext +import spark.SparkContext._ +import spark.storage.StorageLevel + +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap + +import java.net.InetSocketAddress + +abstract class RDS[T: ClassManifest] (@transient val ssc: SparkStreamContext) +extends Logging with Serializable { + + initLogging() + + /* ---------------------------------------------- */ + /* Methods that must be implemented by subclasses */ + /* ---------------------------------------------- */ + + // Time by which the window slides in this RDS + def slideTime: Time + + // List of parent RDSs on which this RDS depends on + def dependencies: List[RDS[_]] + + // Key method that computes RDD for a valid time + def compute (validTime: Time): Option[RDD[T]] + + /* --------------------------------------- */ + /* Other general fields and methods of RDS */ + /* --------------------------------------- */ + + // Variable to store the RDDs generated earlier in time + @transient private val generatedRDDs = new HashMap[Time, RDD[T]] () + + // Variable to be set to the first time seen by the RDS (effective time zero) + private[stream] var zeroTime: Time = null + + // Variable to specify storage level + private var storageLevel: StorageLevel = StorageLevel.NONE + + // Checkpoint level and checkpoint interval + private var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint + private var checkpointInterval: Time = null + + // Change this RDD's storage level + def persist( + storageLevel: StorageLevel, + checkpointLevel: StorageLevel, + checkpointInterval: Time): RDS[T] = { + if (this.storageLevel != StorageLevel.NONE && this.storageLevel != storageLevel) { + // TODO: not sure this is necessary for RDSes + throw new UnsupportedOperationException( + "Cannot change storage level of an RDS after it was already assigned a level") + } + this.storageLevel = storageLevel + this.checkpointLevel = checkpointLevel + this.checkpointInterval = checkpointInterval + this + } + + def persist(newLevel: StorageLevel): RDS[T] = persist(newLevel, StorageLevel.NONE, null) + + // Turn on the default caching level for this RDD + def persist(): RDS[T] = persist(StorageLevel.MEMORY_ONLY_DESER) + + // Turn on the default caching level for this RDD + def cache(): RDS[T] = persist() + + def isInitialized = (zeroTime != null) + + // This method initializes the RDS by setting the "zero" time, based on which + // the validity of future times is calculated. This method also recursively initializes + // its parent RDSs. + def initialize(firstInterval: Interval) { + if (zeroTime == null) { + zeroTime = firstInterval.beginTime + } + logInfo(this + " initialized") + dependencies.foreach(_.initialize(firstInterval)) + } + + // This method checks whether the 'time' is valid wrt slideTime for generating RDD + private def isTimeValid (time: Time): Boolean = { + if (!isInitialized) + throw new Exception (this.toString + " has not been initialized") + if ((time - zeroTime).isMultipleOf(slideTime)) { + true + } else { + false + } + } + + // This method either retrieves a precomputed RDD of this RDS, + // or computes the RDD (if the time is valid) + def getOrCompute(time: Time): Option[RDD[T]] = { + + // if RDD was already generated, then retrieve it from HashMap + generatedRDDs.get(time) match { + + // If an RDD was already generated and is being reused, then + // probably all RDDs in this RDS will be reused and hence should be cached + case Some(oldRDD) => Some(oldRDD) + + // if RDD was not generated, and if the time is valid + // (based on sliding time of this RDS), then generate the RDD + case None => + if (isTimeValid(time)) { + compute(time) match { + case Some(newRDD) => + if (System.getProperty("spark.fake", "false") != "true" || + newRDD.getStorageLevel == StorageLevel.NONE) { + if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { + newRDD.persist(checkpointLevel) + logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) + } else if (storageLevel != StorageLevel.NONE) { + newRDD.persist(storageLevel) + logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) + } + } + generatedRDDs.put(time.copy(), newRDD) + Some(newRDD) + case None => + None + } + } else { + None + } + } + } + + // This method generates a SparkStream job for the given time + // and may require to be overriden by subclasses + def generateJob(time: Time): Option[Job] = { + getOrCompute(time) match { + case Some(rdd) => { + val jobFunc = () => { + val emptyFunc = { (iterator: Iterator[T]) => {} } + ssc.sc.runJob(rdd, emptyFunc) + } + Some(new Job(time, jobFunc)) + } + case None => None + } + } + + /* -------------- */ + /* RDS operations */ + /* -------------- */ + + def map[U: ClassManifest](mapFunc: T => U) = new MappedRDS(this, ssc.sc.clean(mapFunc)) + + def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = + new FlatMappedRDS(this, ssc.sc.clean(flatMapFunc)) + + def filter(filterFunc: T => Boolean) = new FilteredRDS(this, filterFunc) + + def glom() = new GlommedRDS(this) + + def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = + new MapPartitionedRDS(this, ssc.sc.clean(mapPartFunc)) + + def reduce(reduceFunc: (T, T) => T) = this.map(x => (1, x)).reduceByKey(reduceFunc, 1).map(_._2) + + def count() = this.map(_ => 1).reduce(_ + _) + + def collect() = this.map(x => (1, x)).groupByKey(1).map(_._2) + + def foreach(foreachFunc: T => Unit) = { + val newrds = new PerElementForEachRDS(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newrds) + newrds + } + + def foreachRDD(foreachFunc: RDD[T] => Unit) = { + val newrds = new PerRDDForEachRDS(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newrds) + newrds + } + + def print() = { + def foreachFunc = (rdd: RDD[T], time: Time) => { + val first11 = rdd.take(11) + println ("-------------------------------------------") + println ("Time: " + time) + println ("-------------------------------------------") + first11.take(10).foreach(println) + if (first11.size > 10) println("...") + println() + } + val newrds = new PerRDDForEachRDS(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newrds) + newrds + } + + def window(windowTime: Time, slideTime: Time) = new WindowedRDS(this, windowTime, slideTime) + + def batch(batchTime: Time) = window(batchTime, batchTime) + + def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Time, slideTime: Time) = + this.window(windowTime, slideTime).reduce(reduceFunc) + + def reduceByWindow( + reduceFunc: (T, T) => T, + invReduceFunc: (T, T) => T, + windowTime: Time, + slideTime: Time) = { + this.map(x => (1, x)) + .reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, 1) + .map(_._2) + } + + def countByWindow(windowTime: Time, slideTime: Time) = { + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + this.map(_ => 1).reduceByWindow(add _, subtract _, windowTime, slideTime) + } + + def union(that: RDS[T]) = new UnifiedRDS(Array(this, that)) + + def register() = ssc.registerOutputStream(this) +} + + +class PairRDSFunctions[K: ClassManifest, V: ClassManifest](rds: RDS[(K,V)]) +extends Serializable { + + def ssc = rds.ssc + + /* ---------------------------------- */ + /* RDS operations for key-value pairs */ + /* ---------------------------------- */ + + def groupByKey(numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { + def createCombiner(v: V) = ArrayBuffer[V](v) + def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) + def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) + combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, numPartitions) + } + + def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledRDS[K, V, V] = { + val cleanedReduceFunc = ssc.sc.clean(reduceFunc) + combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, numPartitions) + } + + private def combineByKey[C: ClassManifest]( + createCombiner: V => C, + mergeValue: (C, V) => C, + mergeCombiner: (C, C) => C, + numPartitions: Int) : ShuffledRDS[K, V, C] = { + new ShuffledRDS[K, V, C](rds, createCombiner, mergeValue, mergeCombiner, numPartitions) + } + + def groupByKeyAndWindow( + windowTime: Time, + slideTime: Time, + numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { + rds.window(windowTime, slideTime).groupByKey(numPartitions) + } + + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int = 0): ShuffledRDS[K, V, V] = { + rds.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) + } + + // This method is the efficient sliding window reduce operation, + // which requires the specification of an inverse reduce function, + // so that new elements introduced in the window can be "added" using + // reduceFunc to the previous window's result and old elements can be + // "subtracted using invReduceFunc. + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int): ReducedWindowedRDS[K, V] = { + + new ReducedWindowedRDS[K, V]( + rds, + ssc.sc.clean(reduceFunc), + ssc.sc.clean(invReduceFunc), + windowTime, + slideTime, + numPartitions) + } +} + + +abstract class InputRDS[T: ClassManifest] ( + val inputName: String, + val batchDuration: Time, + ssc: SparkStreamContext) +extends RDS[T](ssc) { + + override def dependencies = List() + + override def slideTime = batchDuration + + def setReference(time: Time, reference: AnyRef) +} + + +class FileInputRDS( + val fileInputName: String, + val directory: String, + ssc: SparkStreamContext) +extends InputRDS[String](fileInputName, LongTime(1000), ssc) { + + @transient val generatedFiles = new HashMap[Time,String] + + // TODO(Haoyuan): This is for the performance test. + @transient + val rdd = ssc.sc.textFile(SparkContext.inputFile, + SparkContext.idealPartitions).asInstanceOf[RDD[String]] + + override def compute(validTime: Time): Option[RDD[String]] = { + generatedFiles.get(validTime) match { + case Some(file) => + logInfo("Reading from file " + file + " for time " + validTime) + // Some(ssc.sc.textFile(file).asInstanceOf[RDD[String]]) + // The following line is for HDFS performance test. Sould comment out the above line. + Some(rdd) + case None => + throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") + None + } + } + + def setReference(time: Time, reference: AnyRef) { + generatedFiles += ((time, reference.toString)) + logInfo("Reference added for time " + time + " - " + reference.toString) + } +} + +class NetworkInputRDS[T: ClassManifest]( + val networkInputName: String, + val addresses: Array[InetSocketAddress], + batchDuration: Time, + ssc: SparkStreamContext) +extends InputRDS[T](networkInputName, batchDuration, ssc) { + + + // TODO(Haoyuan): This is for the performance test. + @transient var rdd: RDD[T] = null + + if (System.getProperty("spark.fake", "false") == "true") { + logInfo("Running initial count to cache fake RDD") + rdd = ssc.sc.textFile(SparkContext.inputFile, + SparkContext.idealPartitions).asInstanceOf[RDD[T]] + val fakeCacheLevel = System.getProperty("spark.fake.cache", "") + if (fakeCacheLevel == "MEMORY_ONLY_2") { + rdd.persist(StorageLevel.MEMORY_ONLY_2) + } else if (fakeCacheLevel == "MEMORY_ONLY_DESER_2") { + rdd.persist(StorageLevel.MEMORY_ONLY_2) + } else if (fakeCacheLevel != "") { + logError("Invalid fake cache level: " + fakeCacheLevel) + System.exit(1) + } + rdd.count() + } + + @transient val references = new HashMap[Time,String] + + override def compute(validTime: Time): Option[RDD[T]] = { + if (System.getProperty("spark.fake", "false") == "true") { + logInfo("Returning fake RDD at " + validTime) + return Some(rdd) + } + references.get(validTime) match { + case Some(reference) => + if (reference.startsWith("file") || reference.startsWith("hdfs")) { + logInfo("Reading from file " + reference + " for time " + validTime) + Some(ssc.sc.textFile(reference).asInstanceOf[RDD[T]]) + } else { + logInfo("Getting from BlockManager " + reference + " for time " + validTime) + Some(new BlockRDD(ssc.sc, Array(reference))) + } + case None => + throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") + None + } + } + + def setReference(time: Time, reference: AnyRef) { + references += ((time, reference.toString)) + logInfo("Reference added for time " + time + " - " + reference.toString) + } +} + + +class TestInputRDS( + val testInputName: String, + batchDuration: Time, + ssc: SparkStreamContext) +extends InputRDS[String](testInputName, batchDuration, ssc) { + + @transient val references = new HashMap[Time,Array[String]] + + override def compute(validTime: Time): Option[RDD[String]] = { + references.get(validTime) match { + case Some(reference) => + Some(new BlockRDD[String](ssc.sc, reference)) + case None => + throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") + None + } + } + + def setReference(time: Time, reference: AnyRef) { + references += ((time, reference.asInstanceOf[Array[String]])) + } +} + + +class MappedRDS[T: ClassManifest, U: ClassManifest] ( + parent: RDS[T], + mapFunc: T => U) +extends RDS[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.map[U](mapFunc)) + } +} + + +class FlatMappedRDS[T: ClassManifest, U: ClassManifest]( + parent: RDS[T], + flatMapFunc: T => Traversable[U]) +extends RDS[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc)) + } +} + + +class FilteredRDS[T: ClassManifest](parent: RDS[T], filterFunc: T => Boolean) +extends RDS[T](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[T]] = { + parent.getOrCompute(validTime).map(_.filter(filterFunc)) + } +} + +class MapPartitionedRDS[T: ClassManifest, U: ClassManifest]( + parent: RDS[T], + mapPartFunc: Iterator[T] => Iterator[U]) +extends RDS[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc)) + } +} + +class GlommedRDS[T: ClassManifest](parent: RDS[T]) extends RDS[Array[T]](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[Array[T]]] = { + parent.getOrCompute(validTime).map(_.glom()) + } +} + + +class ShuffledRDS[K: ClassManifest, V: ClassManifest, C: ClassManifest]( + parent: RDS[(K,V)], + createCombiner: V => C, + mergeValue: (C, V) => C, + mergeCombiner: (C, C) => C, + numPartitions: Int) + extends RDS [(K,C)] (parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K,C)]] = { + parent.getOrCompute(validTime) match { + case Some(rdd) => + val newrdd = { + if (numPartitions > 0) { + rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner, numPartitions) + } else { + rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner) + } + } + Some(newrdd) + case None => None + } + } +} + + +class UnifiedRDS[T: ClassManifest](parents: Array[RDS[T]]) +extends RDS[T](parents(0).ssc) { + + if (parents.length == 0) { + throw new IllegalArgumentException("Empty array of parents") + } + + if (parents.map(_.ssc).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different SparkStreamContexts") + } + + if (parents.map(_.slideTime).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different slide times") + } + + override def dependencies = parents.toList + + override def slideTime: Time = parents(0).slideTime + + override def compute(validTime: Time): Option[RDD[T]] = { + val rdds = new ArrayBuffer[RDD[T]]() + parents.map(_.getOrCompute(validTime)).foreach(_ match { + case Some(rdd) => rdds += rdd + case None => throw new Exception("Could not generate RDD from a parent for unifying at time " + validTime) + }) + if (rdds.size > 0) { + Some(new UnionRDD(ssc.sc, rdds)) + } else { + None + } + } +} + + +class PerElementForEachRDS[T: ClassManifest] ( + parent: RDS[T], + foreachFunc: T => Unit) +extends RDS[Unit](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[Unit]] = None + + override def generateJob(time: Time): Option[Job] = { + parent.getOrCompute(time) match { + case Some(rdd) => + val jobFunc = () => { + val sparkJobFunc = { + (iterator: Iterator[T]) => iterator.foreach(foreachFunc) + } + ssc.sc.runJob(rdd, sparkJobFunc) + } + Some(new Job(time, jobFunc)) + case None => None + } + } +} + + +class PerRDDForEachRDS[T: ClassManifest] ( + parent: RDS[T], + foreachFunc: (RDD[T], Time) => Unit) +extends RDS[Unit](parent.ssc) { + + def this(parent: RDS[T], altForeachFunc: (RDD[T]) => Unit) = + this(parent, (rdd: RDD[T], time: Time) => altForeachFunc(rdd)) + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[Unit]] = None + + override def generateJob(time: Time): Option[Job] = { + parent.getOrCompute(time) match { + case Some(rdd) => + val jobFunc = () => { + foreachFunc(rdd, time) + } + Some(new Job(time, jobFunc)) + case None => None + } + } +} diff --git a/streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala b/streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala new file mode 100644 index 0000000000..d47654ccb9 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala @@ -0,0 +1,218 @@ +package spark.stream + +import spark.stream.SparkStreamContext._ + +import spark.RDD +import spark.UnionRDD +import spark.CoGroupedRDD +import spark.HashPartitioner +import spark.SparkContext._ +import spark.storage.StorageLevel + +import scala.collection.mutable.ArrayBuffer + +class ReducedWindowedRDS[K: ClassManifest, V: ClassManifest]( + parent: RDS[(K, V)], + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + _windowTime: Time, + _slideTime: Time, + numPartitions: Int) +extends RDS[(K,V)](parent.ssc) { + + if (!_windowTime.isMultipleOf(parent.slideTime)) + throw new Exception("The window duration of ReducedWindowedRDS (" + _slideTime + ") " + + "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + + if (!_slideTime.isMultipleOf(parent.slideTime)) + throw new Exception("The slide duration of ReducedWindowedRDS (" + _slideTime + ") " + + "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + + val reducedRDS = parent.reduceByKey(reduceFunc, numPartitions) + val allowPartialWindows = true + //reducedRDS.persist(StorageLevel.MEMORY_ONLY_DESER_2) + + override def dependencies = List(reducedRDS) + + def windowTime: Time = _windowTime + + override def slideTime: Time = _slideTime + + override def persist( + storageLevel: StorageLevel, + checkpointLevel: StorageLevel, + checkpointInterval: Time): RDS[(K,V)] = { + super.persist(storageLevel, checkpointLevel, checkpointInterval) + reducedRDS.persist(storageLevel, checkpointLevel, checkpointInterval) + } + + override def compute(validTime: Time): Option[RDD[(K, V)]] = { + + + // Notation: + // _____________________________ + // | previous window _________|___________________ + // |___________________| current window | --------------> Time + // |_____________________________| + // + // |________ _________| |________ _________| + // | | + // V V + // old time steps new time steps + // + def getAdjustedWindow(endTime: Time, windowTime: Time): Interval = { + val beginTime = + if (allowPartialWindows && endTime - windowTime < parent.zeroTime) { + parent.zeroTime + } else { + endTime - windowTime + } + Interval(beginTime, endTime) + } + + val currentTime = validTime.copy + val currentWindow = getAdjustedWindow(currentTime, windowTime) + val previousWindow = getAdjustedWindow(currentTime - slideTime, windowTime) + + logInfo("Current window = " + currentWindow) + logInfo("Previous window = " + previousWindow) + logInfo("Parent.zeroTime = " + parent.zeroTime) + + if (allowPartialWindows) { + if (currentTime - slideTime == parent.zeroTime) { + reducedRDS.getOrCompute(currentTime) match { + case Some(rdd) => return Some(rdd) + case None => throw new Exception("Could not get first reduced RDD for time " + currentTime) + } + } + } else { + if (previousWindow.beginTime < parent.zeroTime) { + if (currentWindow.beginTime < parent.zeroTime) { + return None + } else { + // If this is the first feasible window, then generate reduced value in the naive manner + val reducedRDDs = new ArrayBuffer[RDD[(K, V)]]() + var t = currentWindow.endTime + while (t > currentWindow.beginTime) { + reducedRDS.getOrCompute(t) match { + case Some(rdd) => reducedRDDs += rdd + case None => throw new Exception("Could not get reduced RDD for time " + t) + } + t -= reducedRDS.slideTime + } + if (reducedRDDs.size == 0) { + throw new Exception("Could not generate the first RDD for time " + validTime) + } + return Some(new UnionRDD(ssc.sc, reducedRDDs).reduceByKey(reduceFunc, numPartitions)) + } + } + } + + // Get the RDD of the reduced value of the previous window + val previousWindowRDD = getOrCompute(previousWindow.endTime) match { + case Some(rdd) => rdd.asInstanceOf[RDD[(_, _)]] + case None => throw new Exception("Could not get previous RDD for time " + previousWindow.endTime) + } + + val oldRDDs = new ArrayBuffer[RDD[(_, _)]]() + val newRDDs = new ArrayBuffer[RDD[(_, _)]]() + + // Get the RDDs of the reduced values in "old time steps" + var t = currentWindow.beginTime + while (t > previousWindow.beginTime) { + reducedRDS.getOrCompute(t) match { + case Some(rdd) => oldRDDs += rdd.asInstanceOf[RDD[(_, _)]] + case None => throw new Exception("Could not get old reduced RDD for time " + t) + } + t -= reducedRDS.slideTime + } + + // Get the RDDs of the reduced values in "new time steps" + t = currentWindow.endTime + while (t > previousWindow.endTime) { + reducedRDS.getOrCompute(t) match { + case Some(rdd) => newRDDs += rdd.asInstanceOf[RDD[(_, _)]] + case None => throw new Exception("Could not get new reduced RDD for time " + t) + } + t -= reducedRDS.slideTime + } + + val partitioner = new HashPartitioner(numPartitions) + val allRDDs = new ArrayBuffer[RDD[(_, _)]]() + allRDDs += previousWindowRDD + allRDDs ++= oldRDDs + allRDDs ++= newRDDs + + + val numOldRDDs = oldRDDs.size + val numNewRDDs = newRDDs.size + logInfo("Generated numOldRDDs = " + numOldRDDs + ", numNewRDDs = " + numNewRDDs) + logInfo("Generating CoGroupedRDD with " + allRDDs.size + " RDDs") + val newRDD = new CoGroupedRDD[K](allRDDs.toSeq, partitioner).asInstanceOf[RDD[(K,Seq[Seq[V]])]].map(x => { + val (key, value) = x + logDebug("value.size = " + value.size + ", numOldRDDs = " + numOldRDDs + ", numNewRDDs = " + numNewRDDs) + if (value.size != 1 + numOldRDDs + numNewRDDs) { + throw new Exception("Number of groups not odd!") + } + + // old values = reduced values of the "old time steps" that are eliminated from current window + // new values = reduced values of the "new time steps" that are introduced to the current window + // previous value = reduced value of the previous window + + /*val numOldValues = (value.size - 1) / 2*/ + // Getting reduced values "old time steps" + val oldValues = + (0 until numOldRDDs).map(i => value(1 + i)).filter(_.size > 0).map(x => x(0)) + // Getting reduced values "new time steps" + val newValues = + (0 until numNewRDDs).map(i => value(1 + numOldRDDs + i)).filter(_.size > 0).map(x => x(0)) + + // If reduced value for the key does not exist in previous window, it should not exist in "old time steps" + if (value(0).size == 0 && oldValues.size != 0) { + throw new Exception("Unexpected: Key exists in old reduced values but not in previous reduced values") + } + + // For the key, at least one of "old time steps", "new time steps" and previous window should have reduced values + if (value(0).size == 0 && oldValues.size == 0 && newValues.size == 0) { + throw new Exception("Unexpected: Key does not exist in any of old, new, or previour reduced values") + } + + // Logic to generate the final reduced value for current window: + // + // If previous window did not have reduced value for the key + // Then, return reduced value of "new time steps" as the final value + // Else, reduced value exists in previous window + // If "old" time steps did not have reduced value for the key + // Then, reduce previous window's reduced value with that of "new time steps" for final value + // Else, reduced values exists in "old time steps" + // If "new values" did not have reduced value for the key + // Then, inverse-reduce "old values" from previous window's reduced value for final value + // Else, all 3 values exist, combine all of them together + // + logDebug("# old values = " + oldValues.size + ", # new values = " + newValues) + val finalValue = { + if (value(0).size == 0) { + newValues.reduce(reduceFunc) + } else { + val prevValue = value(0)(0) + logDebug("prev value = " + prevValue) + if (oldValues.size == 0) { + // assuming newValue.size > 0 (all 3 cannot be zero, as checked earlier) + val temp = newValues.reduce(reduceFunc) + reduceFunc(prevValue, temp) + } else if (newValues.size == 0) { + invReduceFunc(prevValue, oldValues.reduce(reduceFunc)) + } else { + val tempValue = invReduceFunc(prevValue, oldValues.reduce(reduceFunc)) + reduceFunc(tempValue, newValues.reduce(reduceFunc)) + } + } + } + (key, finalValue) + }) + //newRDD.persist(StorageLevel.MEMORY_ONLY_DESER_2) + Some(newRDD) + } +} + + diff --git a/streaming/src/main/scala/spark/stream/Scheduler.scala b/streaming/src/main/scala/spark/stream/Scheduler.scala new file mode 100644 index 0000000000..38946fef11 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/Scheduler.scala @@ -0,0 +1,181 @@ +package spark.stream + +import spark.SparkEnv +import spark.Logging + +import scala.collection.mutable.HashMap +import scala.collection.mutable.HashSet +import scala.collection.mutable.ArrayBuffer + +import akka.actor._ +import akka.actor.Actor +import akka.actor.Actor._ +import akka.util.duration._ + +sealed trait SchedulerMessage +case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage +case class Test extends SchedulerMessage + +class Scheduler( + ssc: SparkStreamContext, + inputRDSs: Array[InputRDS[_]], + outputRDSs: Array[RDS[_]]) +extends Actor with Logging { + + class InputState (inputNames: Array[String]) { + val inputsLeft = new HashSet[String]() + inputsLeft ++= inputNames + + val startTime = System.currentTimeMillis + + def delay() = System.currentTimeMillis - startTime + + def addGeneratedInput(inputName: String) = inputsLeft -= inputName + + def areAllInputsGenerated() = (inputsLeft.size == 0) + + override def toString(): String = { + val left = if (inputsLeft.size == 0) "" else inputsLeft.reduceLeft(_ + ", " + _) + return "Inputs left = [ " + left + " ]" + } + } + + + initLogging() + + val inputNames = inputRDSs.map(_.inputName).toArray + val inputStates = new HashMap[Interval, InputState]() + val currentJobs = System.getProperty("spark.stream.currentJobs", "1").toInt + val jobManager = new JobManager2(ssc, currentJobs) + + // TODO(Haoyuan): The following line is for performance test only. + var cnt: Int = System.getProperty("spark.stream.fake.cnt", "60").toInt + var lastInterval: Interval = null + + + /*remote.register("SparkStreamScheduler", actorOf[Scheduler])*/ + logInfo("Registered actor on port ") + + /*jobManager.start()*/ + startStreamReceivers() + + def receive = { + case InputGenerated(inputName, interval, reference) => { + addGeneratedInput(inputName, interval, reference) + } + case Test() => logInfo("TEST PASSED") + } + + def addGeneratedInput(inputName: String, interval: Interval, reference: AnyRef = null) { + logInfo("Input " + inputName + " generated for interval " + interval) + inputStates.get(interval) match { + case None => inputStates.put(interval, new InputState(inputNames)) + case _ => + } + inputStates(interval).addGeneratedInput(inputName) + + inputRDSs.filter(_.inputName == inputName).foreach(inputRDS => { + inputRDS.setReference(interval.endTime, reference) + if (inputRDS.isInstanceOf[TestInputRDS]) { + TestInputBlockTracker.addBlocks(interval.endTime, reference) + } + } + ) + + def getNextInterval(): Option[Interval] = { + logDebug("Last interval is " + lastInterval) + val readyIntervals = inputStates.filter(_._2.areAllInputsGenerated).keys + /*inputState.foreach(println) */ + logDebug("InputState has " + inputStates.size + " intervals, " + readyIntervals.size + " ready intervals") + return readyIntervals.find(lastInterval == null || _.beginTime == lastInterval.endTime) + } + + var nextInterval = getNextInterval() + var count = 0 + while(nextInterval.isDefined) { + val inputState = inputStates.get(nextInterval.get).get + generateRDDsForInterval(nextInterval.get) + logInfo("Skew delay for " + nextInterval.get.endTime + " is " + (inputState.delay / 1000.0) + " s") + inputStates.remove(nextInterval.get) + lastInterval = nextInterval.get + nextInterval = getNextInterval() + count += 1 + /*if (nextInterval.size == 0 && inputState.size > 0) { + logDebug("Next interval not ready, pending intervals " + inputState.size) + }*/ + } + logDebug("RDDs generated for " + count + " intervals") + + /* + if (inputState(interval).areAllInputsGenerated) { + generateRDDsForInterval(interval) + lastInterval = interval + inputState.remove(interval) + } else { + logInfo("All inputs not generated for interval " + interval) + } + */ + } + + def generateRDDsForInterval (interval: Interval) { + logInfo("Generating RDDs for interval " + interval) + outputRDSs.foreach(outputRDS => { + if (!outputRDS.isInitialized) outputRDS.initialize(interval) + outputRDS.generateJob(interval.endTime) match { + case Some(job) => submitJob(job) + case None => + } + } + ) + // TODO(Haoyuan): This comment is for performance test only. + if (System.getProperty("spark.fake", "false") == "true" || System.getProperty("spark.stream.fake", "false") == "true") { + cnt -= 1 + if (cnt <= 0) { + logInfo("My time is up! " + cnt) + System.exit(1) + } + } + } + + def submitJob(job: Job) { + logInfo("Submitting " + job + " to JobManager") + /*jobManager ! RunJob(job)*/ + jobManager.runJob(job) + } + + def startStreamReceivers() { + val testStreamReceiverNames = new ArrayBuffer[(String, Long)]() + inputRDSs.foreach (inputRDS => { + inputRDS match { + case fileInputRDS: FileInputRDS => { + val fileStreamReceiver = new FileStreamReceiver( + fileInputRDS.inputName, + fileInputRDS.directory, + fileInputRDS.batchDuration.asInstanceOf[LongTime].milliseconds) + fileStreamReceiver.start() + } + case networkInputRDS: NetworkInputRDS[_] => { + val networkStreamReceiver = new NetworkStreamReceiver( + networkInputRDS.inputName, + networkInputRDS.batchDuration, + 0, + ssc, + if (ssc.tempDir == null) null else ssc.tempDir.toString) + networkStreamReceiver.start() + } + case testInputRDS: TestInputRDS => { + testStreamReceiverNames += + ((testInputRDS.inputName, testInputRDS.batchDuration.asInstanceOf[LongTime].milliseconds)) + } + } + }) + if (testStreamReceiverNames.size > 0) { + /*val testStreamCoordinator = new TestStreamCoordinator(testStreamReceiverNames.toArray)*/ + /*testStreamCoordinator.start()*/ + val actor = ssc.actorSystem.actorOf( + Props(new TestStreamCoordinator(testStreamReceiverNames.toArray)), + name = "TestStreamCoordinator") + } + } +} + diff --git a/streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala b/streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala new file mode 100644 index 0000000000..74fd54072f --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala @@ -0,0 +1,78 @@ +package spark.stream + +import scala.util.Random +import scala.io.Source +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import java.net.InetSocketAddress + +/*import akka.actor.Actor._*/ +/*import akka.actor.ActorRef*/ + + +object SenGeneratorForPerformanceTest { + + def printUsage () { + println ("Usage: SentenceGenerator []") + System.exit(0) + } + + def main (args: Array[String]) { + if (args.length < 3) { + printUsage + } + + val inputManagerIP = args(0) + val inputManagerPort = args(1).toInt + val sentenceFile = args(2) + val sentencesPerSecond = { + if (args.length > 3) args(3).toInt + else 10 + } + + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n") + source.close () + + try { + /*val inputManager = remote.actorFor("InputReceiver-Sentences",*/ + /* inputManagerIP, inputManagerPort)*/ + val inputManager = select(Node(inputManagerIP, inputManagerPort), Symbol("InputReceiver-Sentences")) + val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 + val random = new Random () + println ("Sending " + sentencesPerSecond + " sentences per second to " + inputManagerIP + ":" + inputManagerPort) + var lastPrintTime = System.currentTimeMillis() + var count = 0 + + while (true) { + /*if (!inputManager.tryTell (lines (random.nextInt (lines.length))))*/ + /*throw new Exception ("disconnected")*/ +// inputManager ! lines (random.nextInt (lines.length)) + for (i <- 0 to sentencesPerSecond) inputManager ! lines (0) + println(System.currentTimeMillis / 1000 + " s") +/* count += 1 + + if (System.currentTimeMillis - lastPrintTime >= 1000) { + println (count + " sentences sent last second") + count = 0 + lastPrintTime = System.currentTimeMillis + } + + Thread.sleep (sleepBetweenSentences.toLong) +*/ + val currentMs = System.currentTimeMillis / 1000; + Thread.sleep ((currentMs * 1000 + 1000) - System.currentTimeMillis) + } + } catch { + case e: Exception => + /*Thread.sleep (1000)*/ + } + } +} + + + + diff --git a/streaming/src/main/scala/spark/stream/SenderReceiverTest.scala b/streaming/src/main/scala/spark/stream/SenderReceiverTest.scala new file mode 100644 index 0000000000..69879b621c --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SenderReceiverTest.scala @@ -0,0 +1,63 @@ +package spark.stream +import java.net.{Socket, ServerSocket} +import java.io.{ByteArrayOutputStream, DataOutputStream, DataInputStream, BufferedInputStream} + +object Receiver { + def main(args: Array[String]) { + val port = args(0).toInt + val lsocket = new ServerSocket(port) + println("Listening on port " + port ) + while(true) { + val socket = lsocket.accept() + (new Thread() { + override def run() { + val buffer = new Array[Byte](100000) + var count = 0 + val time = System.currentTimeMillis + try { + val is = new DataInputStream(new BufferedInputStream(socket.getInputStream)) + var loop = true + var string: String = null + while((string = is.readUTF) != null) { + count += 28 + } + } catch { + case e: Exception => e.printStackTrace + } + val timeTaken = System.currentTimeMillis - time + val tput = (count / 1024.0) / (timeTaken / 1000.0) + println("Data = " + count + " bytes\nTime = " + timeTaken + " ms\nTput = " + tput + " KB/s") + } + }).start() + } + } + +} + +object Sender { + + def main(args: Array[String]) { + try { + val host = args(0) + val port = args(1).toInt + val size = args(2).toInt + + val byteStream = new ByteArrayOutputStream() + val stringDataStream = new DataOutputStream(byteStream) + (0 until size).foreach(_ => stringDataStream.writeUTF("abcdedfghijklmnopqrstuvwxy")) + val bytes = byteStream.toByteArray() + println("Generated array of " + bytes.length + " bytes") + + /*val bytes = new Array[Byte](size)*/ + val socket = new Socket(host, port) + val os = socket.getOutputStream + os.write(bytes) + os.flush + socket.close() + + } catch { + case e: Exception => e.printStackTrace + } + } +} + diff --git a/streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala b/streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala new file mode 100644 index 0000000000..9aa441d9bb --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala @@ -0,0 +1,92 @@ +package spark.stream + +import spark._ + +import scala.collection.mutable.ArrayBuffer +import scala.util.Random +import scala.io.Source + +import java.net.InetSocketAddress + +import org.apache.hadoop.fs._ +import org.apache.hadoop.conf._ +import org.apache.hadoop.io._ +import org.apache.hadoop.mapred._ +import org.apache.hadoop.util._ + +object SentenceFileGenerator { + + def printUsage () { + println ("Usage: SentenceFileGenerator <# partitions> []") + System.exit(0) + } + + def main (args: Array[String]) { + if (args.length < 4) { + printUsage + } + + val master = args(0) + val fs = new Path(args(1)).getFileSystem(new Configuration()) + val targetDirectory = new Path(args(1)).makeQualified(fs) + val numPartitions = args(2).toInt + val sentenceFile = args(3) + val sentencesPerSecond = { + if (args.length > 4) args(4).toInt + else 10 + } + + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n").toArray + source.close () + println("Read " + lines.length + " lines from file " + sentenceFile) + + val sentences = { + val buffer = ArrayBuffer[String]() + val random = new Random() + var i = 0 + while (i < sentencesPerSecond) { + buffer += lines(random.nextInt(lines.length)) + i += 1 + } + buffer.toArray + } + println("Generated " + sentences.length + " sentences") + + val sc = new SparkContext(master, "SentenceFileGenerator") + val sentencesRDD = sc.parallelize(sentences, numPartitions) + + val tempDirectory = new Path(targetDirectory, "_tmp") + + fs.mkdirs(targetDirectory) + fs.mkdirs(tempDirectory) + + var saveTimeMillis = System.currentTimeMillis + try { + while (true) { + val newDir = new Path(targetDirectory, "Sentences-" + saveTimeMillis) + val tmpNewDir = new Path(tempDirectory, "Sentences-" + saveTimeMillis) + println("Writing to file " + newDir) + sentencesRDD.saveAsTextFile(tmpNewDir.toString) + fs.rename(tmpNewDir, newDir) + saveTimeMillis += 1000 + val sleepTimeMillis = { + val currentTimeMillis = System.currentTimeMillis + if (saveTimeMillis < currentTimeMillis) { + 0 + } else { + saveTimeMillis - currentTimeMillis + } + } + println("Sleeping for " + sleepTimeMillis + " ms") + Thread.sleep(sleepTimeMillis) + } + } catch { + case e: Exception => + } + } +} + + + + diff --git a/streaming/src/main/scala/spark/stream/SentenceGenerator.scala b/streaming/src/main/scala/spark/stream/SentenceGenerator.scala new file mode 100644 index 0000000000..ef66e66047 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SentenceGenerator.scala @@ -0,0 +1,103 @@ +package spark.stream + +import scala.util.Random +import scala.io.Source +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import java.net.InetSocketAddress + + +object SentenceGenerator { + + def printUsage { + println ("Usage: SentenceGenerator []") + System.exit(0) + } + + def generateRandomSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { + val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 + val random = new Random () + + try { + var lastPrintTime = System.currentTimeMillis() + var count = 0 + while(true) { + streamReceiver ! lines(random.nextInt(lines.length)) + count += 1 + if (System.currentTimeMillis - lastPrintTime >= 1000) { + println (count + " sentences sent last second") + count = 0 + lastPrintTime = System.currentTimeMillis + } + Thread.sleep(sleepBetweenSentences.toLong) + } + } catch { + case e: Exception => + } + } + + def generateSameSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { + try { + val numSentences = if (sentencesPerSecond <= 0) { + lines.length + } else { + sentencesPerSecond + } + var nextSendingTime = System.currentTimeMillis() + val pingInterval = if (System.getenv("INTERVAL") != null) { + System.getenv("INTERVAL").toInt + } else { + 2000 + } + while(true) { + (0 until numSentences).foreach(i => { + streamReceiver ! lines(i % lines.length) + }) + println ("Sent " + numSentences + " sentences") + nextSendingTime += pingInterval + val sleepTime = nextSendingTime - System.currentTimeMillis + if (sleepTime > 0) { + println ("Sleeping for " + sleepTime + " ms") + Thread.sleep(sleepTime) + } + } + } catch { + case e: Exception => + } + } + + def main(args: Array[String]) { + if (args.length < 3) { + printUsage + } + + val generateRandomly = false + + val streamReceiverIP = args(0) + val streamReceiverPort = args(1).toInt + val sentenceFile = args(2) + val sentencesPerSecond = if (args.length > 3) args(3).toInt else 10 + val sentenceInputName = if (args.length > 4) args(4) else "Sentences" + + println("Sending " + sentencesPerSecond + " sentences per second to " + + streamReceiverIP + ":" + streamReceiverPort + "/NetworkStreamReceiver-" + sentenceInputName) + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n") + source.close () + + val streamReceiver = select( + Node(streamReceiverIP, streamReceiverPort), + Symbol("NetworkStreamReceiver-" + sentenceInputName)) + if (generateRandomly) { + generateRandomSentences(lines, sentencesPerSecond, streamReceiver) + } else { + generateSameSentences(lines, sentencesPerSecond, streamReceiver) + } + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/ShuffleTest.scala b/streaming/src/main/scala/spark/stream/ShuffleTest.scala new file mode 100644 index 0000000000..5ad56f6777 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/ShuffleTest.scala @@ -0,0 +1,22 @@ +package spark.stream +import spark.SparkContext +import SparkContext._ + +object ShuffleTest { + def main(args: Array[String]) { + + if (args.length < 1) { + println ("Usage: ShuffleTest ") + System.exit(1) + } + + val sc = new spark.SparkContext(args(0), "ShuffleTest") + val rdd = sc.parallelize(1 to 1000, 500).cache + + def time(f: => Unit) { val start = System.nanoTime; f; println((System.nanoTime - start) * 1.0e-6) } + + time { for (i <- 0 until 50) time { rdd.map(x => (x % 100, x)).reduceByKey(_ + _, 10).count } } + System.exit(0) + } +} + diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount.scala b/streaming/src/main/scala/spark/stream/SimpleWordCount.scala new file mode 100644 index 0000000000..c53fe35f44 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SimpleWordCount.scala @@ -0,0 +1,30 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +object SimpleWordCount { + + def main (args: Array[String]) { + + if (args.length < 1) { + println ("Usage: SparkStreamContext []") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount") + if (args.length > 1) { + ssc.setTempDir(args(1)) + } + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) + /*sentences.print*/ + + val words = sentences.flatMap(_.split(" ")) + + val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 1) + counts.print + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount2.scala b/streaming/src/main/scala/spark/stream/SimpleWordCount2.scala new file mode 100644 index 0000000000..1a2c67cd4d --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SimpleWordCount2.scala @@ -0,0 +1,51 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import scala.util.Sorting + +object SimpleWordCount2 { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SimpleWordCount2 <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "SimpleWordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + moreWarmup(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray + ) + + + val words = sentences.flatMap(_.split(" ")) + + val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 10) + counts.foreachRDD(_.collect()) + /*words.foreachRDD(_.countByValue())*/ + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala b/streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala new file mode 100644 index 0000000000..9003a5dbb3 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala @@ -0,0 +1,83 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import scala.collection.JavaConversions.mapAsScalaMap +import scala.util.Sorting +import java.lang.{Long => JLong} + +object SimpleWordCount2_Special { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SimpleWordCount2 <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "SimpleWordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + moreWarmup(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 400)).toArray + ) + + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { + val map = new java.util.HashMap[String, JLong] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.get(w) + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator + } + + + /*val words = sentences.flatMap(_.split(" "))*/ + /*val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 10)*/ + val counts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + counts.foreachRDD(_.collect()) + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/SparkStreamContext.scala b/streaming/src/main/scala/spark/stream/SparkStreamContext.scala new file mode 100644 index 0000000000..0e65196e46 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/SparkStreamContext.scala @@ -0,0 +1,105 @@ +package spark.stream + +import spark.SparkContext +import spark.SparkEnv +import spark.Utils +import spark.Logging + +import scala.collection.mutable.ArrayBuffer + +import java.net.InetSocketAddress +import java.io.IOException +import java.util.UUID + +import org.apache.hadoop.fs.Path +import org.apache.hadoop.conf.Configuration + +import akka.actor._ +import akka.actor.Actor +import akka.util.duration._ + +class SparkStreamContext ( + master: String, + frameworkName: String, + val sparkHome: String = null, + val jars: Seq[String] = Nil) + extends Logging { + + initLogging() + + val sc = new SparkContext(master, frameworkName, sparkHome, jars) + val env = SparkEnv.get + val actorSystem = env.actorSystem + + @transient val inputRDSs = new ArrayBuffer[InputRDS[_]]() + @transient val outputRDSs = new ArrayBuffer[RDS[_]]() + + var tempDirRoot: String = null + var tempDir: Path = null + + def readNetworkStream[T: ClassManifest]( + name: String, + addresses: Array[InetSocketAddress], + batchDuration: Time): RDS[T] = { + + val inputRDS = new NetworkInputRDS[T](name, addresses, batchDuration, this) + inputRDSs += inputRDS + inputRDS + } + + def readNetworkStream[T: ClassManifest]( + name: String, + addresses: Array[String], + batchDuration: Long): RDS[T] = { + + def stringToInetSocketAddress (str: String): InetSocketAddress = { + val parts = str.split(":") + if (parts.length != 2) { + throw new IllegalArgumentException ("Address format error") + } + new InetSocketAddress(parts(0), parts(1).toInt) + } + + readNetworkStream( + name, + addresses.map(stringToInetSocketAddress).toArray, + LongTime(batchDuration)) + } + + def readFileStream(name: String, directory: String): RDS[String] = { + val path = new Path(directory) + val fs = path.getFileSystem(new Configuration()) + val qualPath = path.makeQualified(fs) + val inputRDS = new FileInputRDS(name, qualPath.toString, this) + inputRDSs += inputRDS + inputRDS + } + + def readTestStream(name: String, batchDuration: Long): RDS[String] = { + val inputRDS = new TestInputRDS(name, LongTime(batchDuration), this) + inputRDSs += inputRDS + inputRDS + } + + def registerOutputStream (outputRDS: RDS[_]) { + outputRDSs += outputRDS + } + + def setTempDir(dir: String) { + tempDirRoot = dir + } + + def run () { + val ctxt = this + val actor = actorSystem.actorOf( + Props(new Scheduler(ctxt, inputRDSs.toArray, outputRDSs.toArray)), + name = "SparkStreamScheduler") + logInfo("Registered actor") + actorSystem.awaitTermination() + } +} + +object SparkStreamContext { + implicit def rdsToPairRdsFunctions [K: ClassManifest, V: ClassManifest] (rds: RDS[(K,V)]) = + new PairRDSFunctions (rds) +} diff --git a/streaming/src/main/scala/spark/stream/TestGenerator.scala b/streaming/src/main/scala/spark/stream/TestGenerator.scala new file mode 100644 index 0000000000..738ce17452 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestGenerator.scala @@ -0,0 +1,107 @@ +package spark.stream + +import scala.util.Random +import scala.io.Source +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import java.net.InetSocketAddress + + +object TestGenerator { + + def printUsage { + println ("Usage: SentenceGenerator []") + System.exit(0) + } + /* + def generateRandomSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { + val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 + val random = new Random () + + try { + var lastPrintTime = System.currentTimeMillis() + var count = 0 + while(true) { + streamReceiver ! lines(random.nextInt(lines.length)) + count += 1 + if (System.currentTimeMillis - lastPrintTime >= 1000) { + println (count + " sentences sent last second") + count = 0 + lastPrintTime = System.currentTimeMillis + } + Thread.sleep(sleepBetweenSentences.toLong) + } + } catch { + case e: Exception => + } + }*/ + + def generateSameSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { + try { + val numSentences = if (sentencesPerSecond <= 0) { + lines.length + } else { + sentencesPerSecond + } + val sentences = lines.take(numSentences).toArray + + var nextSendingTime = System.currentTimeMillis() + val sendAsArray = true + while(true) { + if (sendAsArray) { + println("Sending as array") + streamReceiver !? sentences + } else { + println("Sending individually") + sentences.foreach(sentence => { + streamReceiver !? sentence + }) + } + println ("Sent " + numSentences + " sentences in " + (System.currentTimeMillis - nextSendingTime) + " ms") + nextSendingTime += 1000 + val sleepTime = nextSendingTime - System.currentTimeMillis + if (sleepTime > 0) { + println ("Sleeping for " + sleepTime + " ms") + Thread.sleep(sleepTime) + } + } + } catch { + case e: Exception => + } + } + + def main(args: Array[String]) { + if (args.length < 3) { + printUsage + } + + val generateRandomly = false + + val streamReceiverIP = args(0) + val streamReceiverPort = args(1).toInt + val sentenceFile = args(2) + val sentencesPerSecond = if (args.length > 3) args(3).toInt else 10 + val sentenceInputName = if (args.length > 4) args(4) else "Sentences" + + println("Sending " + sentencesPerSecond + " sentences per second to " + + streamReceiverIP + ":" + streamReceiverPort + "/NetworkStreamReceiver-" + sentenceInputName) + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n") + source.close () + + val streamReceiver = select( + Node(streamReceiverIP, streamReceiverPort), + Symbol("NetworkStreamReceiver-" + sentenceInputName)) + if (generateRandomly) { + /*generateRandomSentences(lines, sentencesPerSecond, streamReceiver)*/ + } else { + generateSameSentences(lines, sentencesPerSecond, streamReceiver) + } + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/TestGenerator2.scala b/streaming/src/main/scala/spark/stream/TestGenerator2.scala new file mode 100644 index 0000000000..ceb4730e72 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestGenerator2.scala @@ -0,0 +1,119 @@ +package spark.stream + +import scala.util.Random +import scala.io.Source +import scala.actors._ +import scala.actors.Actor._ +import scala.actors.remote._ +import scala.actors.remote.RemoteActor._ + +import java.io.{DataOutputStream, ByteArrayOutputStream, DataInputStream} +import java.net.Socket + +object TestGenerator2 { + + def printUsage { + println ("Usage: SentenceGenerator []") + System.exit(0) + } + + def sendSentences(streamReceiverHost: String, streamReceiverPort: Int, numSentences: Int, bytes: Array[Byte], intervalTime: Long){ + try { + println("Connecting to " + streamReceiverHost + ":" + streamReceiverPort) + val socket = new Socket(streamReceiverHost, streamReceiverPort) + + println("Sending " + numSentences+ " sentences / " + (bytes.length / 1024.0 / 1024.0) + " MB per " + intervalTime + " ms to " + streamReceiverHost + ":" + streamReceiverPort ) + val currentTime = System.currentTimeMillis + var targetTime = (currentTime / intervalTime + 1).toLong * intervalTime + Thread.sleep(targetTime - currentTime) + + while(true) { + val startTime = System.currentTimeMillis() + println("Sending at " + startTime + " ms with delay of " + (startTime - targetTime) + " ms") + val socketOutputStream = socket.getOutputStream + val parts = 10 + (0 until parts).foreach(i => { + val partStartTime = System.currentTimeMillis + + val offset = (i * bytes.length / parts).toInt + val len = math.min(((i + 1) * bytes.length / parts).toInt - offset, bytes.length) + socketOutputStream.write(bytes, offset, len) + socketOutputStream.flush() + val partFinishTime = System.currentTimeMillis + println("Sending part " + i + " of " + len + " bytes took " + (partFinishTime - partStartTime) + " ms") + val sleepTime = math.max(0, 1000 / parts - (partFinishTime - partStartTime) - 1) + Thread.sleep(sleepTime) + }) + + socketOutputStream.flush() + /*val socketInputStream = new DataInputStream(socket.getInputStream)*/ + /*val reply = socketInputStream.readUTF()*/ + val finishTime = System.currentTimeMillis() + println ("Sent " + bytes.length + " bytes in " + (finishTime - startTime) + " ms for interval [" + targetTime + ", " + (targetTime + intervalTime) + "]") + /*println("Received = " + reply)*/ + targetTime = targetTime + intervalTime + val sleepTime = (targetTime - finishTime) + 10 + if (sleepTime > 0) { + println("Sleeping for " + sleepTime + " ms") + Thread.sleep(sleepTime) + } else { + println("############################") + println("###### Skipping sleep ######") + println("############################") + } + } + } catch { + case e: Exception => println(e) + } + println("Stopped sending") + } + + def main(args: Array[String]) { + if (args.length < 4) { + printUsage + } + + val streamReceiverHost = args(0) + val streamReceiverPort = args(1).toInt + val sentenceFile = args(2) + val intervalTime = args(3).toLong + val sentencesPerInterval = if (args.length > 4) args(4).toInt else 0 + + println("Reading the file " + sentenceFile) + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n") + source.close() + + val numSentences = if (sentencesPerInterval <= 0) { + lines.length + } else { + sentencesPerInterval + } + + println("Generating sentences") + val sentences: Array[String] = if (numSentences <= lines.length) { + lines.take(numSentences).toArray + } else { + (0 until numSentences).map(i => lines(i % lines.length)).toArray + } + + println("Converting to byte array") + val byteStream = new ByteArrayOutputStream() + val stringDataStream = new DataOutputStream(byteStream) + /*stringDataStream.writeInt(sentences.size)*/ + sentences.foreach(stringDataStream.writeUTF) + val bytes = byteStream.toByteArray() + stringDataStream.close() + println("Generated array of " + bytes.length + " bytes") + + /*while(true) { */ + sendSentences(streamReceiverHost, streamReceiverPort, numSentences, bytes, intervalTime) + /*println("Sleeping for 5 seconds")*/ + /*Thread.sleep(5000)*/ + /*System.gc()*/ + /*}*/ + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/TestGenerator4.scala b/streaming/src/main/scala/spark/stream/TestGenerator4.scala new file mode 100644 index 0000000000..edeb969d7c --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestGenerator4.scala @@ -0,0 +1,244 @@ +package spark.stream + +import spark.Logging + +import scala.util.Random +import scala.io.Source +import scala.collection.mutable.{ArrayBuffer, Queue} + +import java.net._ +import java.io._ +import java.nio._ +import java.nio.charset._ +import java.nio.channels._ + +import it.unimi.dsi.fastutil.io._ + +class TestGenerator4(targetHost: String, targetPort: Int, sentenceFile: String, intervalDuration: Long, sentencesPerInterval: Int) +extends Logging { + + class SendingConnectionHandler(host: String, port: Int, generator: TestGenerator4) + extends ConnectionHandler(host, port, true) { + + val buffers = new ArrayBuffer[ByteBuffer] + val newBuffers = new Queue[ByteBuffer] + var activeKey: SelectionKey = null + + def send(buffer: ByteBuffer) { + logDebug("Sending: " + buffer) + newBuffers.synchronized { + newBuffers.enqueue(buffer) + } + selector.wakeup() + buffer.synchronized { + buffer.wait() + } + } + + override def ready(key: SelectionKey) { + logDebug("Ready") + activeKey = key + val channel = key.channel.asInstanceOf[SocketChannel] + channel.register(selector, SelectionKey.OP_WRITE) + generator.startSending() + } + + override def preSelect() { + newBuffers.synchronized { + while(!newBuffers.isEmpty) { + val buffer = newBuffers.dequeue + buffers += buffer + logDebug("Added: " + buffer) + changeInterest(activeKey, SelectionKey.OP_WRITE) + } + } + } + + override def write(key: SelectionKey) { + try { + /*while(true) {*/ + val channel = key.channel.asInstanceOf[SocketChannel] + if (buffers.size > 0) { + val buffer = buffers(0) + val newBuffer = buffer.slice() + newBuffer.limit(math.min(newBuffer.remaining, 32768)) + val bytesWritten = channel.write(newBuffer) + buffer.position(buffer.position + bytesWritten) + if (bytesWritten == 0) return + if (buffer.remaining == 0) { + buffers -= buffer + buffer.synchronized { + buffer.notify() + } + } + /*changeInterest(key, SelectionKey.OP_WRITE)*/ + } else { + changeInterest(key, 0) + } + /*}*/ + } catch { + case e: IOException => { + if (e.toString.contains("pipe") || e.toString.contains("reset")) { + logError("Connection broken") + } else { + logError("Connection error", e) + } + close(key) + } + } + } + + override def close(key: SelectionKey) { + buffers.clear() + super.close(key) + } + } + + initLogging() + + val connectionHandler = new SendingConnectionHandler(targetHost, targetPort, this) + var sendingThread: Thread = null + var sendCount = 0 + val sendBatches = 5 + + def run() { + logInfo("Connection handler started") + connectionHandler.start() + connectionHandler.join() + if (sendingThread != null && !sendingThread.isInterrupted) { + sendingThread.interrupt + } + logInfo("Connection handler stopped") + } + + def startSending() { + sendingThread = new Thread() { + override def run() { + logInfo("STARTING TO SEND") + sendSentences() + logInfo("SENDING STOPPED AFTER " + sendCount) + connectionHandler.interrupt() + } + } + sendingThread.start() + } + + def stopSending() { + sendingThread.interrupt() + } + + def sendSentences() { + logInfo("Reading the file " + sentenceFile) + val source = Source.fromFile(sentenceFile) + val lines = source.mkString.split ("\n") + source.close() + + val numSentences = if (sentencesPerInterval <= 0) { + lines.length + } else { + sentencesPerInterval + } + + logInfo("Generating sentence buffer") + val sentences: Array[String] = if (numSentences <= lines.length) { + lines.take(numSentences).toArray + } else { + (0 until numSentences).map(i => lines(i % lines.length)).toArray + } + + /* + val sentences: Array[String] = if (numSentences <= lines.length) { + lines.take((numSentences / sendBatches).toInt).toArray + } else { + (0 until (numSentences/sendBatches)).map(i => lines(i % lines.length)).toArray + }*/ + + + val serializer = new spark.KryoSerializer().newInstance() + val byteStream = new FastByteArrayOutputStream(100 * 1024 * 1024) + serializer.serializeStream(byteStream).writeAll(sentences.toIterator.asInstanceOf[Iterator[Any]]).close() + byteStream.trim() + val sentenceBuffer = ByteBuffer.wrap(byteStream.array) + + logInfo("Sending " + numSentences+ " sentences / " + sentenceBuffer.limit + " bytes per " + intervalDuration + " ms to " + targetHost + ":" + targetPort ) + val currentTime = System.currentTimeMillis + var targetTime = (currentTime / intervalDuration + 1).toLong * intervalDuration + Thread.sleep(targetTime - currentTime) + + val totalBytes = sentenceBuffer.limit + + while(true) { + val batchesInCurrentInterval = sendBatches // if (sendCount < 10) 1 else sendBatches + + val startTime = System.currentTimeMillis() + logDebug("Sending # " + sendCount + " at " + startTime + " ms with delay of " + (startTime - targetTime) + " ms") + + (0 until batchesInCurrentInterval).foreach(i => { + try { + val position = (i * totalBytes / sendBatches).toInt + val limit = if (i == sendBatches - 1) { + totalBytes + } else { + ((i + 1) * totalBytes / sendBatches).toInt - 1 + } + + val partStartTime = System.currentTimeMillis + sentenceBuffer.limit(limit) + connectionHandler.send(sentenceBuffer) + val partFinishTime = System.currentTimeMillis + val sleepTime = math.max(0, intervalDuration / sendBatches - (partFinishTime - partStartTime) - 1) + Thread.sleep(sleepTime) + + } catch { + case ie: InterruptedException => return + case e: Exception => e.printStackTrace() + } + }) + sentenceBuffer.rewind() + + val finishTime = System.currentTimeMillis() + /*logInfo ("Sent " + sentenceBuffer.limit + " bytes in " + (finishTime - startTime) + " ms")*/ + targetTime = targetTime + intervalDuration //+ (if (sendCount < 3) 1000 else 0) + + val sleepTime = (targetTime - finishTime) + 20 + if (sleepTime > 0) { + logInfo("Sleeping for " + sleepTime + " ms") + Thread.sleep(sleepTime) + } else { + logInfo("###### Skipping sleep ######") + } + if (Thread.currentThread.isInterrupted) { + return + } + sendCount += 1 + } + } +} + +object TestGenerator4 { + def printUsage { + println("Usage: TestGenerator4 []") + System.exit(0) + } + + def main(args: Array[String]) { + println("GENERATOR STARTED") + if (args.length < 4) { + printUsage + } + + + val streamReceiverHost = args(0) + val streamReceiverPort = args(1).toInt + val sentenceFile = args(2) + val intervalDuration = args(3).toLong + val sentencesPerInterval = if (args.length > 4) args(4).toInt else 0 + + while(true) { + val generator = new TestGenerator4(streamReceiverHost, streamReceiverPort, sentenceFile, intervalDuration, sentencesPerInterval) + generator.run() + Thread.sleep(2000) + } + println("GENERATOR STOPPED") + } +} diff --git a/streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala b/streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala new file mode 100644 index 0000000000..da3b964407 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala @@ -0,0 +1,42 @@ +package spark.stream +import spark.Logging +import scala.collection.mutable.{ArrayBuffer, HashMap} + +object TestInputBlockTracker extends Logging { + initLogging() + val allBlockIds = new HashMap[Time, ArrayBuffer[String]]() + + def addBlocks(intervalEndTime: Time, reference: AnyRef) { + allBlockIds.getOrElseUpdate(intervalEndTime, new ArrayBuffer[String]()) ++= reference.asInstanceOf[Array[String]] + } + + def setEndTime(intervalEndTime: Time) { + try { + val endTime = System.currentTimeMillis + allBlockIds.get(intervalEndTime) match { + case Some(blockIds) => { + val numBlocks = blockIds.size + var totalDelay = 0d + blockIds.foreach(blockId => { + val inputTime = getInputTime(blockId) + val delay = (endTime - inputTime) / 1000.0 + totalDelay += delay + logInfo("End-to-end delay for block " + blockId + " is " + delay + " s") + }) + logInfo("Average end-to-end delay for time " + intervalEndTime + " is " + (totalDelay / numBlocks) + " s") + allBlockIds -= intervalEndTime + } + case None => throw new Exception("Unexpected") + } + } catch { + case e: Exception => logError(e.toString) + } + } + + def getInputTime(blockId: String): Long = { + val parts = blockId.split("-") + /*logInfo(blockId + " -> " + parts(4)) */ + parts(4).toLong + } +} + diff --git a/streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala b/streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala new file mode 100644 index 0000000000..add166fbd9 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala @@ -0,0 +1,38 @@ +package spark.stream + +import spark.Logging + +import akka.actor._ +import akka.actor.Actor +import akka.actor.Actor._ + +sealed trait TestStreamCoordinatorMessage +case class GetStreamDetails extends TestStreamCoordinatorMessage +case class GotStreamDetails(name: String, duration: Long) extends TestStreamCoordinatorMessage +case class TestStarted extends TestStreamCoordinatorMessage + +class TestStreamCoordinator(streamDetails: Array[(String, Long)]) extends Actor with Logging { + + var index = 0 + + initLogging() + + logInfo("Created") + + def receive = { + case TestStarted => { + sender ! "OK" + } + + case GetStreamDetails => { + val streamDetail = if (index >= streamDetails.length) null else streamDetails(index) + sender ! GotStreamDetails(streamDetail._1, streamDetail._2) + index += 1 + if (streamDetail != null) { + logInfo("Allocated " + streamDetail._1 + " (" + index + "/" + streamDetails.length + ")" ) + } + } + } + +} + diff --git a/streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala b/streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala new file mode 100644 index 0000000000..9cc342040b --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala @@ -0,0 +1,420 @@ +package spark.stream + +import spark._ +import spark.storage._ +import spark.util.AkkaUtils + +import scala.math._ +import scala.collection.mutable.{Queue, HashMap, ArrayBuffer, SynchronizedMap} + +import akka.actor._ +import akka.actor.Actor +import akka.dispatch._ +import akka.pattern.ask +import akka.util.duration._ + +import java.io.DataInputStream +import java.io.BufferedInputStream +import java.net.Socket +import java.net.ServerSocket +import java.util.LinkedHashMap + +import org.apache.hadoop.fs._ +import org.apache.hadoop.conf._ +import org.apache.hadoop.io._ +import org.apache.hadoop.mapred._ +import org.apache.hadoop.util._ + +import spark.Utils + + +class TestStreamReceiver3(actorSystem: ActorSystem, blockManager: BlockManager) +extends Thread with Logging { + + + class DataHandler( + inputName: String, + longIntervalDuration: LongTime, + shortIntervalDuration: LongTime, + blockManager: BlockManager + ) + extends Logging { + + class Block(var id: String, var shortInterval: Interval) { + val data = ArrayBuffer[String]() + var pushed = false + def longInterval = getLongInterval(shortInterval) + def empty() = (data.size == 0) + def += (str: String) = (data += str) + override def toString() = "Block " + id + } + + class Bucket(val longInterval: Interval) { + val blocks = new ArrayBuffer[Block]() + var filled = false + def += (block: Block) = blocks += block + def empty() = (blocks.size == 0) + def ready() = (filled && !blocks.exists(! _.pushed)) + def blockIds() = blocks.map(_.id).toArray + override def toString() = "Bucket [" + longInterval + ", " + blocks.size + " blocks]" + } + + initLogging() + + val shortIntervalDurationMillis = shortIntervalDuration.asInstanceOf[LongTime].milliseconds + val longIntervalDurationMillis = longIntervalDuration.asInstanceOf[LongTime].milliseconds + + var currentBlock: Block = null + var currentBucket: Bucket = null + + val blocksForPushing = new Queue[Block]() + val buckets = new HashMap[Interval, Bucket]() with SynchronizedMap[Interval, Bucket] + + val blockUpdatingThread = new Thread() { override def run() { keepUpdatingCurrentBlock() } } + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + def start() { + blockUpdatingThread.start() + blockPushingThread.start() + } + + def += (data: String) = addData(data) + + def addData(data: String) { + if (currentBlock == null) { + updateCurrentBlock() + } + currentBlock.synchronized { + currentBlock += data + } + } + + def getShortInterval(time: Time): Interval = { + val intervalBegin = time.floor(shortIntervalDuration) + Interval(intervalBegin, intervalBegin + shortIntervalDuration) + } + + def getLongInterval(shortInterval: Interval): Interval = { + val intervalBegin = shortInterval.beginTime.floor(longIntervalDuration) + Interval(intervalBegin, intervalBegin + longIntervalDuration) + } + + def updateCurrentBlock() { + /*logInfo("Updating current block")*/ + val currentTime: LongTime = LongTime(System.currentTimeMillis) + val shortInterval = getShortInterval(currentTime) + val longInterval = getLongInterval(shortInterval) + + def createBlock(reuseCurrentBlock: Boolean = false) { + val newBlockId = inputName + "-" + longInterval.toFormattedString + "-" + currentBucket.blocks.size + if (!reuseCurrentBlock) { + val newBlock = new Block(newBlockId, shortInterval) + /*logInfo("Created " + currentBlock)*/ + currentBlock = newBlock + } else { + currentBlock.shortInterval = shortInterval + currentBlock.id = newBlockId + } + } + + def createBucket() { + val newBucket = new Bucket(longInterval) + buckets += ((longInterval, newBucket)) + currentBucket = newBucket + /*logInfo("Created " + currentBucket + ", " + buckets.size + " buckets")*/ + } + + if (currentBlock == null || currentBucket == null) { + createBucket() + currentBucket.synchronized { + createBlock() + } + return + } + + currentBlock.synchronized { + var reuseCurrentBlock = false + + if (shortInterval != currentBlock.shortInterval) { + if (!currentBlock.empty) { + blocksForPushing.synchronized { + blocksForPushing += currentBlock + blocksForPushing.notifyAll() + } + } + + currentBucket.synchronized { + if (currentBlock.empty) { + reuseCurrentBlock = true + } else { + currentBucket += currentBlock + } + + if (longInterval != currentBucket.longInterval) { + currentBucket.filled = true + if (currentBucket.ready) { + currentBucket.notifyAll() + } + createBucket() + } + } + + createBlock(reuseCurrentBlock) + } + } + } + + def pushBlock(block: Block) { + try{ + if (blockManager != null) { + logInfo("Pushing block") + val startTime = System.currentTimeMillis + + val bytes = blockManager.dataSerialize(block.data.toIterator) + val finishTime = System.currentTimeMillis + logInfo(block + " serialization delay is " + (finishTime - startTime) / 1000.0 + " s") + + blockManager.putBytes(block.id.toString, bytes, StorageLevel.DISK_AND_MEMORY_2) + /*blockManager.putBytes(block.id.toString, bytes, StorageLevel.DISK_AND_MEMORY_DESER_2)*/ + /*blockManager.put(block.id.toString, block.data.toIterator, StorageLevel.DISK_AND_MEMORY_DESER)*/ + /*blockManager.put(block.id.toString, block.data.toIterator, StorageLevel.DISK_AND_MEMORY)*/ + val finishTime1 = System.currentTimeMillis + logInfo(block + " put delay is " + (finishTime1 - startTime) / 1000.0 + " s") + } else { + logWarning(block + " not put as block manager is null") + } + } catch { + case e: Exception => logError("Exception writing " + block + " to blockmanager" , e) + } + } + + def getBucket(longInterval: Interval): Option[Bucket] = { + buckets.get(longInterval) + } + + def clearBucket(longInterval: Interval) { + buckets.remove(longInterval) + } + + def keepUpdatingCurrentBlock() { + logInfo("Thread to update current block started") + while(true) { + updateCurrentBlock() + val currentTimeMillis = System.currentTimeMillis + val sleepTimeMillis = (currentTimeMillis / shortIntervalDurationMillis + 1) * + shortIntervalDurationMillis - currentTimeMillis + 1 + Thread.sleep(sleepTimeMillis) + } + } + + def keepPushingBlocks() { + var loop = true + logInfo("Thread to push blocks started") + while(loop) { + val block = blocksForPushing.synchronized { + if (blocksForPushing.size == 0) { + blocksForPushing.wait() + } + blocksForPushing.dequeue + } + pushBlock(block) + block.pushed = true + block.data.clear() + + val bucket = buckets(block.longInterval) + bucket.synchronized { + if (bucket.ready) { + bucket.notifyAll() + } + } + } + } + } + + + class ConnectionListener(port: Int, dataHandler: DataHandler) + extends Thread with Logging { + initLogging() + override def run { + try { + val listener = new ServerSocket(port) + logInfo("Listening on port " + port) + while (true) { + new ConnectionHandler(listener.accept(), dataHandler).start(); + } + listener.close() + } catch { + case e: Exception => logError("", e); + } + } + } + + class ConnectionHandler(socket: Socket, dataHandler: DataHandler) extends Thread with Logging { + initLogging() + override def run { + logInfo("New connection from " + socket.getInetAddress() + ":" + socket.getPort) + val bytes = new Array[Byte](100 * 1024 * 1024) + try { + + val inputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream, 1024 * 1024)) + /*val inputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream))*/ + var str: String = null + str = inputStream.readUTF + while(str != null) { + dataHandler += str + str = inputStream.readUTF() + } + + /* + var loop = true + while(loop) { + val numRead = inputStream.read(bytes) + if (numRead < 0) { + loop = false + } + inbox += ((LongTime(SystemTime.currentTimeMillis), "test")) + }*/ + + inputStream.close() + } catch { + case e => logError("Error receiving data", e) + } + socket.close() + } + } + + initLogging() + + val masterHost = System.getProperty("spark.master.host") + val masterPort = System.getProperty("spark.master.port").toInt + + val akkaPath = "akka://spark@%s:%s/user/".format(masterHost, masterPort) + val sparkstreamScheduler = actorSystem.actorFor(akkaPath + "/SparkStreamScheduler") + val testStreamCoordinator = actorSystem.actorFor(akkaPath + "/TestStreamCoordinator") + + logInfo("Getting stream details from master " + masterHost + ":" + masterPort) + + val timeout = 50 millis + + var started = false + while (!started) { + askActor[String](testStreamCoordinator, TestStarted) match { + case Some(str) => { + started = true + logInfo("TestStreamCoordinator started") + } + case None => { + logInfo("TestStreamCoordinator not started yet") + Thread.sleep(200) + } + } + } + + val streamDetails = askActor[GotStreamDetails](testStreamCoordinator, GetStreamDetails) match { + case Some(details) => details + case None => throw new Exception("Could not get stream details") + } + logInfo("Stream details received: " + streamDetails) + + val inputName = streamDetails.name + val intervalDurationMillis = streamDetails.duration + val intervalDuration = LongTime(intervalDurationMillis) + + val dataHandler = new DataHandler( + inputName, + intervalDuration, + LongTime(TestStreamReceiver3.SHORT_INTERVAL_MILLIS), + blockManager) + + val connListener = new ConnectionListener(TestStreamReceiver3.PORT, dataHandler) + + // Send a message to an actor and return an option with its reply, or None if this times out + def askActor[T](actor: ActorRef, message: Any): Option[T] = { + try { + val future = actor.ask(message)(timeout) + return Some(Await.result(future, timeout).asInstanceOf[T]) + } catch { + case e: Exception => + logInfo("Error communicating with " + actor, e) + return None + } + } + + override def run() { + connListener.start() + dataHandler.start() + + var interval = Interval.currentInterval(intervalDuration) + var dataStarted = false + + while(true) { + waitFor(interval.endTime) + logInfo("Woken up at " + System.currentTimeMillis + " for " + interval) + dataHandler.getBucket(interval) match { + case Some(bucket) => { + logInfo("Found " + bucket + " for " + interval) + bucket.synchronized { + if (!bucket.ready) { + logInfo("Waiting for " + bucket) + bucket.wait() + logInfo("Wait over for " + bucket) + } + if (dataStarted || !bucket.empty) { + logInfo("Notifying " + bucket) + notifyScheduler(interval, bucket.blockIds) + dataStarted = true + } + bucket.blocks.clear() + dataHandler.clearBucket(interval) + } + } + case None => { + logInfo("Found none for " + interval) + if (dataStarted) { + logInfo("Notifying none") + notifyScheduler(interval, Array[String]()) + } + } + } + interval = interval.next + } + } + + def waitFor(time: Time) { + val currentTimeMillis = System.currentTimeMillis + val targetTimeMillis = time.asInstanceOf[LongTime].milliseconds + if (currentTimeMillis < targetTimeMillis) { + val sleepTime = (targetTimeMillis - currentTimeMillis) + Thread.sleep(sleepTime + 1) + } + } + + def notifyScheduler(interval: Interval, blockIds: Array[String]) { + try { + sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) + val time = interval.endTime.asInstanceOf[LongTime] + val delay = (System.currentTimeMillis - time.milliseconds) / 1000.0 + logInfo("Pushing delay for " + time + " is " + delay + " s") + } catch { + case _ => logError("Exception notifying scheduler at interval " + interval) + } + } +} + +object TestStreamReceiver3 { + + val PORT = 9999 + val SHORT_INTERVAL_MILLIS = 100 + + def main(args: Array[String]) { + System.setProperty("spark.master.host", Utils.localHostName) + System.setProperty("spark.master.port", "7078") + val details = Array(("Sentences", 2000L)) + val (actorSystem, port) = AkkaUtils.createActorSystem("spark", Utils.localHostName, 7078) + actorSystem.actorOf(Props(new TestStreamCoordinator(details)), name = "TestStreamCoordinator") + new TestStreamReceiver3(actorSystem, null).start() + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala b/streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala new file mode 100644 index 0000000000..e7bef75391 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala @@ -0,0 +1,373 @@ +package spark.stream + +import spark._ +import spark.storage._ +import spark.util.AkkaUtils + +import scala.math._ +import scala.collection.mutable.{Queue, HashMap, ArrayBuffer, SynchronizedMap} + +import java.io._ +import java.nio._ +import java.nio.charset._ +import java.nio.channels._ +import java.util.concurrent.Executors + +import akka.actor._ +import akka.actor.Actor +import akka.dispatch._ +import akka.pattern.ask +import akka.util.duration._ + +class TestStreamReceiver4(actorSystem: ActorSystem, blockManager: BlockManager) +extends Thread with Logging { + + class DataHandler( + inputName: String, + longIntervalDuration: LongTime, + shortIntervalDuration: LongTime, + blockManager: BlockManager + ) + extends Logging { + + class Block(val id: String, val shortInterval: Interval, val buffer: ByteBuffer) { + var pushed = false + def longInterval = getLongInterval(shortInterval) + override def toString() = "Block " + id + } + + class Bucket(val longInterval: Interval) { + val blocks = new ArrayBuffer[Block]() + var filled = false + def += (block: Block) = blocks += block + def empty() = (blocks.size == 0) + def ready() = (filled && !blocks.exists(! _.pushed)) + def blockIds() = blocks.map(_.id).toArray + override def toString() = "Bucket [" + longInterval + ", " + blocks.size + " blocks]" + } + + initLogging() + + val syncOnLastShortInterval = true + + val shortIntervalDurationMillis = shortIntervalDuration.asInstanceOf[LongTime].milliseconds + val longIntervalDurationMillis = longIntervalDuration.asInstanceOf[LongTime].milliseconds + + val buffer = ByteBuffer.allocateDirect(100 * 1024 * 1024) + var currentShortInterval = Interval.currentInterval(shortIntervalDuration) + + val blocksForPushing = new Queue[Block]() + val buckets = new HashMap[Interval, Bucket]() with SynchronizedMap[Interval, Bucket] + + val bufferProcessingThread = new Thread() { override def run() { keepProcessingBuffers() } } + val blockPushingExecutor = Executors.newFixedThreadPool(5) + + + def start() { + buffer.clear() + if (buffer.remaining == 0) { + throw new Exception("Buffer initialization error") + } + bufferProcessingThread.start() + } + + def readDataToBuffer(func: ByteBuffer => Int): Int = { + buffer.synchronized { + if (buffer.remaining == 0) { + logInfo("Received first data for interval " + currentShortInterval) + } + func(buffer) + } + } + + def getLongInterval(shortInterval: Interval): Interval = { + val intervalBegin = shortInterval.beginTime.floor(longIntervalDuration) + Interval(intervalBegin, intervalBegin + longIntervalDuration) + } + + def processBuffer() { + + def readInt(buffer: ByteBuffer): Int = { + var offset = 0 + var result = 0 + while (offset < 32) { + val b = buffer.get() + result |= ((b & 0x7F) << offset) + if ((b & 0x80) == 0) { + return result + } + offset += 7 + } + throw new Exception("Malformed zigzag-encoded integer") + } + + val currentLongInterval = getLongInterval(currentShortInterval) + val startTime = System.currentTimeMillis + val newBuffer: ByteBuffer = buffer.synchronized { + buffer.flip() + if (buffer.remaining == 0) { + buffer.clear() + null + } else { + logDebug("Processing interval " + currentShortInterval + " with delay of " + (System.currentTimeMillis - startTime) + " ms") + val startTime1 = System.currentTimeMillis + var loop = true + var count = 0 + while(loop) { + buffer.mark() + try { + val len = readInt(buffer) + buffer.position(buffer.position + len) + count += 1 + } catch { + case e: Exception => { + buffer.reset() + loop = false + } + } + } + val bytesToCopy = buffer.position + val newBuf = ByteBuffer.allocate(bytesToCopy) + buffer.position(0) + newBuf.put(buffer.slice().limit(bytesToCopy).asInstanceOf[ByteBuffer]) + newBuf.flip() + buffer.position(bytesToCopy) + buffer.compact() + newBuf + } + } + + if (newBuffer != null) { + val bucket = buckets.getOrElseUpdate(currentLongInterval, new Bucket(currentLongInterval)) + bucket.synchronized { + val newBlockId = inputName + "-" + currentLongInterval.toFormattedString + "-" + currentShortInterval.toFormattedString + val newBlock = new Block(newBlockId, currentShortInterval, newBuffer) + if (syncOnLastShortInterval) { + bucket += newBlock + } + logDebug("Created " + newBlock + " with " + newBuffer.remaining + " bytes, creation delay is " + (System.currentTimeMillis - currentShortInterval.endTime.asInstanceOf[LongTime].milliseconds) / 1000.0 + " s" ) + blockPushingExecutor.execute(new Runnable() { def run() { pushAndNotifyBlock(newBlock) } }) + } + } + + val newShortInterval = Interval.currentInterval(shortIntervalDuration) + val newLongInterval = getLongInterval(newShortInterval) + + if (newLongInterval != currentLongInterval) { + buckets.get(currentLongInterval) match { + case Some(bucket) => { + bucket.synchronized { + bucket.filled = true + if (bucket.ready) { + bucket.notifyAll() + } + } + } + case None => + } + buckets += ((newLongInterval, new Bucket(newLongInterval))) + } + + currentShortInterval = newShortInterval + } + + def pushBlock(block: Block) { + try{ + if (blockManager != null) { + val startTime = System.currentTimeMillis + logInfo(block + " put start delay is " + (startTime - block.shortInterval.endTime.asInstanceOf[LongTime].milliseconds) + " ms") + /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY)*/ + /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_2)*/ + blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.MEMORY_ONLY_2) + /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.MEMORY_ONLY)*/ + /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_DESER)*/ + /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_DESER_2)*/ + val finishTime = System.currentTimeMillis + logInfo(block + " put delay is " + (finishTime - startTime) + " ms") + } else { + logWarning(block + " not put as block manager is null") + } + } catch { + case e: Exception => logError("Exception writing " + block + " to blockmanager" , e) + } + } + + def getBucket(longInterval: Interval): Option[Bucket] = { + buckets.get(longInterval) + } + + def clearBucket(longInterval: Interval) { + buckets.remove(longInterval) + } + + def keepProcessingBuffers() { + logInfo("Thread to process buffers started") + while(true) { + processBuffer() + val currentTimeMillis = System.currentTimeMillis + val sleepTimeMillis = (currentTimeMillis / shortIntervalDurationMillis + 1) * + shortIntervalDurationMillis - currentTimeMillis + 1 + Thread.sleep(sleepTimeMillis) + } + } + + def pushAndNotifyBlock(block: Block) { + pushBlock(block) + block.pushed = true + val bucket = if (syncOnLastShortInterval) { + buckets(block.longInterval) + } else { + var longInterval = block.longInterval + while(!buckets.contains(longInterval)) { + logWarning("Skipping bucket of " + longInterval + " for " + block) + longInterval = longInterval.next + } + val chosenBucket = buckets(longInterval) + logDebug("Choosing bucket of " + longInterval + " for " + block) + chosenBucket += block + chosenBucket + } + + bucket.synchronized { + if (bucket.ready) { + bucket.notifyAll() + } + } + + } + } + + + class ReceivingConnectionHandler(host: String, port: Int, dataHandler: DataHandler) + extends ConnectionHandler(host, port, false) { + + override def ready(key: SelectionKey) { + changeInterest(key, SelectionKey.OP_READ) + } + + override def read(key: SelectionKey) { + try { + val channel = key.channel.asInstanceOf[SocketChannel] + val bytesRead = dataHandler.readDataToBuffer(channel.read) + if (bytesRead < 0) { + close(key) + } + } catch { + case e: IOException => { + logError("Error reading", e) + close(key) + } + } + } + } + + initLogging() + + val masterHost = System.getProperty("spark.master.host", "localhost") + val masterPort = System.getProperty("spark.master.port", "7078").toInt + + val akkaPath = "akka://spark@%s:%s/user/".format(masterHost, masterPort) + val sparkstreamScheduler = actorSystem.actorFor(akkaPath + "/SparkStreamScheduler") + val testStreamCoordinator = actorSystem.actorFor(akkaPath + "/TestStreamCoordinator") + + logInfo("Getting stream details from master " + masterHost + ":" + masterPort) + + val streamDetails = askActor[GotStreamDetails](testStreamCoordinator, GetStreamDetails) match { + case Some(details) => details + case None => throw new Exception("Could not get stream details") + } + logInfo("Stream details received: " + streamDetails) + + val inputName = streamDetails.name + val intervalDurationMillis = streamDetails.duration + val intervalDuration = Milliseconds(intervalDurationMillis) + val shortIntervalDuration = Milliseconds(System.getProperty("spark.stream.shortinterval", "500").toInt) + + val dataHandler = new DataHandler(inputName, intervalDuration, shortIntervalDuration, blockManager) + val connectionHandler = new ReceivingConnectionHandler("localhost", 9999, dataHandler) + + val timeout = 100 millis + + // Send a message to an actor and return an option with its reply, or None if this times out + def askActor[T](actor: ActorRef, message: Any): Option[T] = { + try { + val future = actor.ask(message)(timeout) + return Some(Await.result(future, timeout).asInstanceOf[T]) + } catch { + case e: Exception => + logInfo("Error communicating with " + actor, e) + return None + } + } + + override def run() { + connectionHandler.start() + dataHandler.start() + + var interval = Interval.currentInterval(intervalDuration) + var dataStarted = false + + + while(true) { + waitFor(interval.endTime) + /*logInfo("Woken up at " + System.currentTimeMillis + " for " + interval)*/ + dataHandler.getBucket(interval) match { + case Some(bucket) => { + logDebug("Found " + bucket + " for " + interval) + bucket.synchronized { + if (!bucket.ready) { + logDebug("Waiting for " + bucket) + bucket.wait() + logDebug("Wait over for " + bucket) + } + if (dataStarted || !bucket.empty) { + logDebug("Notifying " + bucket) + notifyScheduler(interval, bucket.blockIds) + dataStarted = true + } + bucket.blocks.clear() + dataHandler.clearBucket(interval) + } + } + case None => { + logDebug("Found none for " + interval) + if (dataStarted) { + logDebug("Notifying none") + notifyScheduler(interval, Array[String]()) + } + } + } + interval = interval.next + } + } + + def waitFor(time: Time) { + val currentTimeMillis = System.currentTimeMillis + val targetTimeMillis = time.asInstanceOf[LongTime].milliseconds + if (currentTimeMillis < targetTimeMillis) { + val sleepTime = (targetTimeMillis - currentTimeMillis) + Thread.sleep(sleepTime + 1) + } + } + + def notifyScheduler(interval: Interval, blockIds: Array[String]) { + try { + sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) + val time = interval.endTime.asInstanceOf[LongTime] + val delay = (System.currentTimeMillis - time.milliseconds) + logInfo("Notification delay for " + time + " is " + delay + " ms") + } catch { + case e: Exception => logError("Exception notifying scheduler at interval " + interval + ": " + e) + } + } +} + + +object TestStreamReceiver4 { + def main(args: Array[String]) { + val details = Array(("Sentences", 2000L)) + val (actorSystem, port) = AkkaUtils.createActorSystem("spark", Utils.localHostName, 7078) + actorSystem.actorOf(Props(new TestStreamCoordinator(details)), name = "TestStreamCoordinator") + new TestStreamReceiver4(actorSystem, null).start() + } +} diff --git a/streaming/src/main/scala/spark/stream/Time.scala b/streaming/src/main/scala/spark/stream/Time.scala new file mode 100644 index 0000000000..25369dfee5 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/Time.scala @@ -0,0 +1,85 @@ +package spark.stream + +abstract case class Time { + + // basic operations that must be overridden + def copy(): Time + def zero: Time + def < (that: Time): Boolean + def += (that: Time): Time + def -= (that: Time): Time + def floor(that: Time): Time + def isMultipleOf(that: Time): Boolean + + // derived operations composed of basic operations + def + (that: Time) = this.copy() += that + def - (that: Time) = this.copy() -= that + def * (times: Int) = { + var count = 0 + var result = this.copy() + while (count < times) { + result += this + count += 1 + } + result + } + def <= (that: Time) = (this < that || this == that) + def > (that: Time) = !(this <= that) + def >= (that: Time) = !(this < that) + def isZero = (this == zero) + def toFormattedString = toString +} + +object Time { + def Milliseconds(milliseconds: Long) = LongTime(milliseconds) + + def zero = LongTime(0) +} + +case class LongTime(var milliseconds: Long) extends Time { + + override def copy() = LongTime(this.milliseconds) + + override def zero = LongTime(0) + + override def < (that: Time): Boolean = + (this.milliseconds < that.asInstanceOf[LongTime].milliseconds) + + override def += (that: Time): Time = { + this.milliseconds += that.asInstanceOf[LongTime].milliseconds + this + } + + override def -= (that: Time): Time = { + this.milliseconds -= that.asInstanceOf[LongTime].milliseconds + this + } + + override def floor(that: Time): Time = { + val t = that.asInstanceOf[LongTime].milliseconds + val m = this.milliseconds / t + LongTime(m.toLong * t) + } + + override def isMultipleOf(that: Time): Boolean = + (this.milliseconds % that.asInstanceOf[LongTime].milliseconds == 0) + + override def isZero = (this.milliseconds == 0) + + override def toString = (milliseconds.toString + "ms") + + override def toFormattedString = milliseconds.toString +} + +object Milliseconds { + def apply(milliseconds: Long) = LongTime(milliseconds) +} + +object Seconds { + def apply(seconds: Long) = LongTime(seconds * 1000) +} + +object Minutes { + def apply(minutes: Long) = LongTime(minutes * 60000) +} + diff --git a/streaming/src/main/scala/spark/stream/TopContentCount.scala b/streaming/src/main/scala/spark/stream/TopContentCount.scala new file mode 100644 index 0000000000..a8cca4e793 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TopContentCount.scala @@ -0,0 +1,97 @@ +package spark.stream + +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting + +object TopContentCount { + + case class Event(val country: String, val content: String) + + object Event { + def create(string: String): Event = { + val parts = string.split(":") + new Event(parts(0), parts(1)) + } + } + + def main(args: Array[String]) { + + if (args.length < 2) { + println ("Usage: GrepCount2 <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "TopContentCount") + val sc = ssc.sc + val dummy = sc.parallelize(0 to 1000, 100).persist(StorageLevel.DISK_AND_MEMORY) + sc.runJob(dummy, (_: Iterator[Int]) => {}) + + + val numEventStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + val eventStrings = new UnifiedRDS( + (1 to numEventStreams).map(i => ssc.readTestStream("Events-" + i, 1000)).toArray + ) + + def parse(string: String) = { + val parts = string.split(":") + (parts(0), parts(1)) + } + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + val events = eventStrings.map(x => parse(x)) + /*events.print*/ + + val parallelism = 8 + val counts_per_content_per_country = events + .map(x => (x, 1)) + .reduceByKey(_ + _) + /*.reduceByKeyAndWindow(add _, subtract _, Seconds(5), Seconds(1), parallelism)*/ + /*counts_per_content_per_country.print*/ + + /* + counts_per_content_per_country.persist( + StorageLevel.MEMORY_ONLY_DESER, + StorageLevel.MEMORY_ONLY_DESER_2, + Seconds(1) + )*/ + + val counts_per_country = counts_per_content_per_country + .map(x => (x._1._1, (x._1._2, x._2))) + .groupByKey() + counts_per_country.print + + + def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { + implicit val countOrdering = new Ordering[(String, Int)] { + override def compare(count1: (String, Int), count2: (String, Int)): Int = { + count2._2 - count1._2 + } + } + val array = data.toArray + Sorting.quickSort(array) + val taken = array.take(k) + taken + } + + val k = 10 + val topKContents_per_country = counts_per_country + .map(x => (x._1, topK(x._2, k))) + .map(x => (x._1, x._2.map(_.toString).reduceLeft(_ + ", " + _))) + + topKContents_per_country.print + + ssc.run + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/TopKWordCount2.scala b/streaming/src/main/scala/spark/stream/TopKWordCount2.scala new file mode 100644 index 0000000000..7dd06dd5ee --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TopKWordCount2.scala @@ -0,0 +1,103 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting + +object TopKWordCount2 { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + moreWarmup(ssc.sc) + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray + ) + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) + + def topK(data: Iterator[(String, Int)], k: Int): Iterator[(String, Int)] = { + val taken = new Array[(String, Int)](k) + + var i = 0 + var len = 0 + var done = false + var value: (String, Int) = null + var swap: (String, Int) = null + var count = 0 + + while(data.hasNext) { + value = data.next + count += 1 + println("count = " + count) + if (len == 0) { + taken(0) = value + len = 1 + } else if (len < k || value._2 > taken(len - 1)._2) { + if (len < k) { + len += 1 + } + taken(len - 1) = value + i = len - 1 + while(i > 0 && taken(i - 1)._2 < taken(i)._2) { + swap = taken(i) + taken(i) = taken(i-1) + taken(i - 1) = swap + i -= 1 + } + } + } + println("Took " + len + " out of " + count + " items") + return taken.toIterator + } + + val k = 10 + val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) + partialTopKWindowedCounts.foreachRDD(rdd => { + val collectedCounts = rdd.collect + println("Collected " + collectedCounts.size + " items") + topK(collectedCounts.toIterator, k).foreach(println) + }) + + /* + windowedCounts.filter(_ == null).foreachRDD(rdd => { + val count = rdd.count + println("# of nulls = " + count) + })*/ + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala b/streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala new file mode 100644 index 0000000000..e9f3f914ae --- /dev/null +++ b/streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala @@ -0,0 +1,142 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.JavaConversions.mapAsScalaMap +import scala.collection.mutable.Queue + +import java.lang.{Long => JLong} + +object TopKWordCount2_Special { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "TopKWordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + /*moreWarmup(ssc.sc)*/ + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 500)).toArray + ) + + /*val words = sentences.flatMap(_.split(" "))*/ + + /*def add(v1: Int, v2: Int) = (v1 + v2) */ + /*def subtract(v1: Int, v2: Int) = (v1 - v2) */ + + def add(v1: JLong, v2: JLong) = (v1 + v2) + def subtract(v1: JLong, v2: JLong) = (v1 - v2) + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { + val map = new java.util.HashMap[String, JLong] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.get(w) + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator + } + + + val windowedCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Milliseconds(500), 10) + /*windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1))*/ + windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY, Milliseconds(500)) + + def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { + val taken = new Array[(String, JLong)](k) + + var i = 0 + var len = 0 + var done = false + var value: (String, JLong) = null + var swap: (String, JLong) = null + var count = 0 + + while(data.hasNext) { + value = data.next + count += 1 + println("count = " + count) + if (len == 0) { + taken(0) = value + len = 1 + } else if (len < k || value._2 > taken(len - 1)._2) { + if (len < k) { + len += 1 + } + taken(len - 1) = value + i = len - 1 + while(i > 0 && taken(i - 1)._2 < taken(i)._2) { + swap = taken(i) + taken(i) = taken(i-1) + taken(i - 1) = swap + i -= 1 + } + } + } + println("Took " + len + " out of " + count + " items") + return taken.toIterator + } + + val k = 50 + val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) + partialTopKWindowedCounts.foreachRDD(rdd => { + val collectedCounts = rdd.collect + println("Collected " + collectedCounts.size + " items") + topK(collectedCounts.toIterator, k).foreach(println) + }) + + /* + windowedCounts.filter(_ == null).foreachRDD(rdd => { + val count = rdd.count + println("# of nulls = " + count) + })*/ + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/WindowedRDS.scala b/streaming/src/main/scala/spark/stream/WindowedRDS.scala new file mode 100644 index 0000000000..a2e7966edb --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WindowedRDS.scala @@ -0,0 +1,68 @@ +package spark.stream + +import spark.stream.SparkStreamContext._ + +import spark.RDD +import spark.UnionRDD +import spark.SparkContext._ + +import scala.collection.mutable.ArrayBuffer + +class WindowedRDS[T: ClassManifest]( + parent: RDS[T], + _windowTime: Time, + _slideTime: Time) + extends RDS[T](parent.ssc) { + + if (!_windowTime.isMultipleOf(parent.slideTime)) + throw new Exception("The window duration of WindowedRDS (" + _slideTime + ") " + + "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + + if (!_slideTime.isMultipleOf(parent.slideTime)) + throw new Exception("The slide duration of WindowedRDS (" + _slideTime + ") " + + "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + + val allowPartialWindows = true + + override def dependencies = List(parent) + + def windowTime: Time = _windowTime + + override def slideTime: Time = _slideTime + + override def compute(validTime: Time): Option[RDD[T]] = { + val parentRDDs = new ArrayBuffer[RDD[T]]() + val windowEndTime = validTime.copy() + val windowStartTime = if (allowPartialWindows && windowEndTime - windowTime < parent.zeroTime) { + parent.zeroTime + } else { + windowEndTime - windowTime + } + + logInfo("Window = " + windowStartTime + " - " + windowEndTime) + logInfo("Parent.zeroTime = " + parent.zeroTime) + + if (windowStartTime >= parent.zeroTime) { + // Walk back through time, from the 'windowEndTime' to 'windowStartTime' + // and get all parent RDDs from the parent RDS + var t = windowEndTime + while (t > windowStartTime) { + parent.getOrCompute(t) match { + case Some(rdd) => parentRDDs += rdd + case None => throw new Exception("Could not generate parent RDD for time " + t) + } + t -= parent.slideTime + } + } + + // Do a union of all parent RDDs to generate the new RDD + if (parentRDDs.size > 0) { + Some(new UnionRDD(ssc.sc, parentRDDs)) + } else { + None + } + } +} + + + diff --git a/streaming/src/main/scala/spark/stream/WordCount.scala b/streaming/src/main/scala/spark/stream/WordCount.scala new file mode 100644 index 0000000000..af825e46a8 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCount.scala @@ -0,0 +1,62 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkContext +import spark.storage.StorageLevel + +object WordCount { + var inputFile : String = null + var HDFS : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 4) { + println ("Usage: WordCount ") + System.exit(1) + } + + HDFS = args(1) + inputFile = HDFS + args(2) + idealPartitions = args(3).toInt + println ("Input file: " + inputFile) + + val ssc = new SparkStreamContext(args(0), "WordCountWindow") + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) + //sentences.print + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(2000), + // System.getProperty("spark.default.parallelism", "1").toInt) + //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.DISK_AND_MEMORY_DESER_2, Seconds(5)) + //windowedCounts.print + + val parallelism = System.getProperty("spark.default.parallelism", "1").toInt + + //val localCounts = words.map(x => (x, 1)).reduceByKey(add _, parallelism) + //localCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(6)) + //val windowedCounts = localCounts.window(Seconds(30), Seconds(2)).reduceByKey(_ + _) + + val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(2), + parallelism) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(10)) + + //windowedCounts.print + windowedCounts.register + //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => print(x+ " "))) + //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => x)) + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/WordCount1.scala b/streaming/src/main/scala/spark/stream/WordCount1.scala new file mode 100644 index 0000000000..501062b18d --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCount1.scala @@ -0,0 +1,46 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkContext +import spark.storage.StorageLevel + +object WordCount1 { + var inputFile : String = null + var HDFS : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 4) { + println ("Usage: WordCount ") + System.exit(1) + } + + HDFS = args(1) + inputFile = HDFS + args(2) + idealPartitions = args(3).toInt + println ("Input file: " + inputFile) + + val ssc = new SparkStreamContext(args(0), "WordCountWindow") + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + //sentences.print + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) + windowedCounts.foreachRDD(_.collect) + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/WordCount2.scala b/streaming/src/main/scala/spark/stream/WordCount2.scala new file mode 100644 index 0000000000..24324e891a --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCount2.scala @@ -0,0 +1,55 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting + +object WordCount2 { + + def moreWarmup(sc: SparkContext) { + (0 until 20).foreach {i => + sc.parallelize(1 to 20000000, 500) + .map(_ % 100).map(_.toString) + .map(x => (x, 1)).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + if (args.length > 2) { + ssc.setTempDir(args(2)) + } + + GrepCount2.warmConnectionManagers(ssc.sc) + /*moreWarmup(ssc.sc)*/ + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray + ) + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 6) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) + windowedCounts.foreachRDD(_.collect) + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/WordCount2_Special.scala b/streaming/src/main/scala/spark/stream/WordCount2_Special.scala new file mode 100644 index 0000000000..c6b1aaa57e --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCount2_Special.scala @@ -0,0 +1,94 @@ +package spark.stream + +import spark.SparkContext +import SparkContext._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue +import scala.collection.JavaConversions.mapAsScalaMap + +import java.lang.{Long => JLong} +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} + + +object WordCount2_ExtraFunctions { + + def add(v1: JLong, v2: JLong) = (v1 + v2) + + def subtract(v1: JLong, v2: JLong) = (v1 - v2) + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { + val map = new java.util.HashMap[String, JLong] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.get(w) + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator + } +} + +object WordCount2_Special { + + def moreWarmup(sc: SparkContext) { + (0 until 40).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(_ % 1331).map(_.toString) + .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext <# sentence streams>") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + + val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 + + GrepCount2.warmConnectionManagers(ssc.sc) + /*moreWarmup(ssc.sc)*/ + + val sentences = new UnifiedRDS( + (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 500)).toArray + ) + + val windowedCounts = sentences + .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions) + .reduceByKeyAndWindow(WordCount2_ExtraFunctions.add _, WordCount2_ExtraFunctions.subtract _, Seconds(10), Milliseconds(500), 10) + windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY, Milliseconds(500)) + windowedCounts.foreachRDD(_.collect) + + ssc.run + } +} + diff --git a/streaming/src/main/scala/spark/stream/WordCount3.scala b/streaming/src/main/scala/spark/stream/WordCount3.scala new file mode 100644 index 0000000000..455a8c9dbf --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCount3.scala @@ -0,0 +1,49 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +object WordCount3 { + + def main (args: Array[String]) { + + if (args.length < 1) { + println ("Usage: SparkStreamContext []") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount") + if (args.length > 1) { + ssc.setTempDir(args(1)) + } + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + /*sentences.print*/ + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + /*val windowedCounts = words.map(x => (x, 1)).window(Seconds(5), Seconds(1)).reduceByKey(add _, 1)*/ + val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(5), Seconds(1), 1) + /*windowedCounts.print */ + + def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { + implicit val countOrdering = new Ordering[(String, Int)] { + override def compare(count1: (String, Int), count2: (String, Int)): Int = { + count2._2 - count1._2 + } + } + val array = data.toArray + Sorting.quickSort(array) + array.take(k) + } + + val k = 10 + val topKWindowedCounts = windowedCounts.glom.flatMap(topK(_, k)).collect.flatMap(topK(_, k)) + topKWindowedCounts.print + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/WordCountEc2.scala b/streaming/src/main/scala/spark/stream/WordCountEc2.scala new file mode 100644 index 0000000000..5b10026d7a --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCountEc2.scala @@ -0,0 +1,41 @@ +package spark.stream + +import SparkStreamContext._ +import spark.SparkContext + +object WordCountEc2 { + var inputFile : String = null + var HDFS : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 4) { + println ("Usage: SparkStreamContext ") + System.exit(1) + } + + HDFS = args(1) + inputFile = HDFS + args(2) + idealPartitions = args(3).toInt + println ("Input file: " + inputFile) + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + + val ssc = new SparkStreamContext(args(0), "Test") + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + /*sentences.foreach(println)*/ + + val words = sentences.flatMap(_.split(" ")) + /*words.foreach(println)*/ + + val counts = words.map(x => (x, 1)).reduceByKey(_ + _) + /*counts.foreach(println)*/ + + counts.foreachRDD(rdd => rdd.collect.foreach(x => x)) + /*counts.register*/ + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala b/streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala new file mode 100644 index 0000000000..5469df71e9 --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala @@ -0,0 +1,51 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +object WordCountTrivialWindow { + + def main (args: Array[String]) { + + if (args.length < 1) { + println ("Usage: SparkStreamContext []") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCountTrivialWindow") + if (args.length > 1) { + ssc.setTempDir(args(1)) + } + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) + /*sentences.print*/ + + val words = sentences.flatMap(_.split(" ")) + + /*val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 1)*/ + /*counts.print*/ + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + + val windowedCounts = words.map(x => (x, 1)).window(Seconds(5), Seconds(1)).reduceByKey(add _, 1) + /*windowedCounts.print */ + + def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { + implicit val countOrdering = new Ordering[(String, Int)] { + override def compare(count1: (String, Int), count2: (String, Int)): Int = { + count2._2 - count1._2 + } + } + val array = data.toArray + Sorting.quickSort(array) + array.take(k) + } + + val k = 10 + val topKWindowedCounts = windowedCounts.glom.flatMap(topK(_, k)).collect.flatMap(topK(_, k)) + topKWindowedCounts.print + + ssc.run + } +} diff --git a/streaming/src/main/scala/spark/stream/WordMax.scala b/streaming/src/main/scala/spark/stream/WordMax.scala new file mode 100644 index 0000000000..fc075e6d9d --- /dev/null +++ b/streaming/src/main/scala/spark/stream/WordMax.scala @@ -0,0 +1,64 @@ +package spark.stream + +import SparkStreamContext._ + +import scala.util.Sorting + +import spark.SparkContext +import spark.storage.StorageLevel + +object WordMax { + var inputFile : String = null + var HDFS : String = null + var idealPartitions : Int = 0 + + def main (args: Array[String]) { + + if (args.length != 4) { + println ("Usage: WordCount ") + System.exit(1) + } + + HDFS = args(1) + inputFile = HDFS + args(2) + idealPartitions = args(3).toInt + println ("Input file: " + inputFile) + + val ssc = new SparkStreamContext(args(0), "WordCountWindow") + + SparkContext.idealPartitions = idealPartitions + SparkContext.inputFile = inputFile + + val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) + //sentences.print + + val words = sentences.flatMap(_.split(" ")) + + def add(v1: Int, v2: Int) = (v1 + v2) + def subtract(v1: Int, v2: Int) = (v1 - v2) + def max(v1: Int, v2: Int) = (if (v1 > v2) v1 else v2) + + //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(2000), + // System.getProperty("spark.default.parallelism", "1").toInt) + //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.DISK_AND_MEMORY_DESER_2, Seconds(5)) + //windowedCounts.print + + val parallelism = System.getProperty("spark.default.parallelism", "1").toInt + + val localCounts = words.map(x => (x, 1)).reduceByKey(add _, parallelism) + //localCounts.persist(StorageLevel.MEMORY_ONLY_DESER) + localCounts.persist(StorageLevel.MEMORY_ONLY_DESER_2) + val windowedCounts = localCounts.window(Seconds(30), Seconds(2)).reduceByKey(max _) + + //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(2), + // parallelism) + //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(6)) + + //windowedCounts.print + windowedCounts.register + //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => print(x+ " "))) + //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => x)) + + ssc.run + } +} From fcee4153b92bdd66dd92820a2670b339f9f59c77 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 29 Jul 2012 13:35:22 -0700 Subject: [PATCH 002/226] Renamed stream package to streaming --- startTrigger.sh | 2 +- .../main/scala/spark/{stream => streaming}/BlockID.scala | 2 +- .../spark/{stream => streaming}/ConnectionHandler.scala | 2 +- .../{stream => streaming}/DumbTopKWordCount2_Special.scala | 2 +- .../{stream => streaming}/DumbWordCount2_Special.scala | 2 +- .../spark/{stream => streaming}/FileStreamReceiver.scala | 2 +- .../main/scala/spark/{stream => streaming}/GrepCount.scala | 2 +- .../main/scala/spark/{stream => streaming}/GrepCount2.scala | 2 +- .../scala/spark/{stream => streaming}/GrepCountApprox.scala | 2 +- .../spark/{stream => streaming}/IdealPerformance.scala | 2 +- .../main/scala/spark/{stream => streaming}/Interval.scala | 2 +- .../src/main/scala/spark/{stream => streaming}/Job.scala | 2 +- .../main/scala/spark/{stream => streaming}/JobManager.scala | 2 +- .../scala/spark/{stream => streaming}/JobManager2.scala | 2 +- .../spark/{stream => streaming}/NetworkStreamReceiver.scala | 2 +- .../src/main/scala/spark/{stream => streaming}/RDS.scala | 6 +++--- .../spark/{stream => streaming}/ReducedWindowedRDS.scala | 4 ++-- .../main/scala/spark/{stream => streaming}/Scheduler.scala | 2 +- .../SenGeneratorForPerformanceTest.scala | 2 +- .../spark/{stream => streaming}/SenderReceiverTest.scala | 2 +- .../spark/{stream => streaming}/SentenceFileGenerator.scala | 2 +- .../spark/{stream => streaming}/SentenceGenerator.scala | 2 +- .../scala/spark/{stream => streaming}/ShuffleTest.scala | 2 +- .../scala/spark/{stream => streaming}/SimpleWordCount.scala | 2 +- .../spark/{stream => streaming}/SimpleWordCount2.scala | 2 +- .../{stream => streaming}/SimpleWordCount2_Special.scala | 2 +- .../spark/{stream => streaming}/SparkStreamContext.scala | 2 +- .../scala/spark/{stream => streaming}/TestGenerator.scala | 2 +- .../scala/spark/{stream => streaming}/TestGenerator2.scala | 2 +- .../scala/spark/{stream => streaming}/TestGenerator4.scala | 2 +- .../spark/{stream => streaming}/TestInputBlockTracker.scala | 2 +- .../spark/{stream => streaming}/TestStreamCoordinator.scala | 2 +- .../spark/{stream => streaming}/TestStreamReceiver3.scala | 2 +- .../spark/{stream => streaming}/TestStreamReceiver4.scala | 2 +- .../src/main/scala/spark/{stream => streaming}/Time.scala | 2 +- .../scala/spark/{stream => streaming}/TopContentCount.scala | 2 +- .../scala/spark/{stream => streaming}/TopKWordCount2.scala | 2 +- .../{stream => streaming}/TopKWordCount2_Special.scala | 2 +- .../scala/spark/{stream => streaming}/WindowedRDS.scala | 4 ++-- .../main/scala/spark/{stream => streaming}/WordCount.scala | 2 +- .../main/scala/spark/{stream => streaming}/WordCount1.scala | 2 +- .../main/scala/spark/{stream => streaming}/WordCount2.scala | 2 +- .../spark/{stream => streaming}/WordCount2_Special.scala | 2 +- .../main/scala/spark/{stream => streaming}/WordCount3.scala | 2 +- .../scala/spark/{stream => streaming}/WordCountEc2.scala | 2 +- .../{stream => streaming}/WordCountTrivialWindow.scala | 2 +- .../main/scala/spark/{stream => streaming}/WordMax.scala | 2 +- 47 files changed, 51 insertions(+), 51 deletions(-) rename streaming/src/main/scala/spark/{stream => streaming}/BlockID.scala (95%) rename streaming/src/main/scala/spark/{stream => streaming}/ConnectionHandler.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/DumbTopKWordCount2_Special.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/DumbWordCount2_Special.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/FileStreamReceiver.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/GrepCount.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/GrepCount2.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/GrepCountApprox.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/IdealPerformance.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/Interval.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/Job.scala (91%) rename streaming/src/main/scala/spark/{stream => streaming}/JobManager.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/JobManager2.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/NetworkStreamReceiver.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/RDS.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/ReducedWindowedRDS.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/Scheduler.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/SenGeneratorForPerformanceTest.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/SenderReceiverTest.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/SentenceFileGenerator.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/SentenceGenerator.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/ShuffleTest.scala (95%) rename streaming/src/main/scala/spark/{stream => streaming}/SimpleWordCount.scala (96%) rename streaming/src/main/scala/spark/{stream => streaming}/SimpleWordCount2.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/SimpleWordCount2_Special.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/SparkStreamContext.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TestGenerator.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TestGenerator2.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TestGenerator4.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TestInputBlockTracker.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/TestStreamCoordinator.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/TestStreamReceiver3.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TestStreamReceiver4.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/Time.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/TopContentCount.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TopKWordCount2.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/TopKWordCount2_Special.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/WindowedRDS.scala (96%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCount.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCount1.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCount2.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCount2_Special.scala (99%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCount3.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCountEc2.scala (97%) rename streaming/src/main/scala/spark/{stream => streaming}/WordCountTrivialWindow.scala (98%) rename streaming/src/main/scala/spark/{stream => streaming}/WordMax.scala (98%) diff --git a/startTrigger.sh b/startTrigger.sh index 0afce91a3e..373dbda93e 100755 --- a/startTrigger.sh +++ b/startTrigger.sh @@ -1,3 +1,3 @@ #!/bin/bash -./run spark.stream.SentenceGenerator localhost 7078 sentences.txt 1 +./run spark.streaming.SentenceGenerator localhost 7078 sentences.txt 1 diff --git a/streaming/src/main/scala/spark/stream/BlockID.scala b/streaming/src/main/scala/spark/streaming/BlockID.scala similarity index 95% rename from streaming/src/main/scala/spark/stream/BlockID.scala rename to streaming/src/main/scala/spark/streaming/BlockID.scala index a3fd046c9a..16aacfda18 100644 --- a/streaming/src/main/scala/spark/stream/BlockID.scala +++ b/streaming/src/main/scala/spark/streaming/BlockID.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming case class BlockID(sRds: String, sInterval: Interval, sPartition: Int) { override def toString : String = ( diff --git a/streaming/src/main/scala/spark/stream/ConnectionHandler.scala b/streaming/src/main/scala/spark/streaming/ConnectionHandler.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/ConnectionHandler.scala rename to streaming/src/main/scala/spark/streaming/ConnectionHandler.scala index 73b82b76b8..a4f454632f 100644 --- a/streaming/src/main/scala/spark/stream/ConnectionHandler.scala +++ b/streaming/src/main/scala/spark/streaming/ConnectionHandler.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/DumbTopKWordCount2_Special.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/DumbTopKWordCount2_Special.scala index bd43f44b1a..2ca72da79f 100644 --- a/streaming/src/main/scala/spark/stream/DumbTopKWordCount2_Special.scala +++ b/streaming/src/main/scala/spark/streaming/DumbTopKWordCount2_Special.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/DumbWordCount2_Special.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/DumbWordCount2_Special.scala index 31d682348a..34e7edfda9 100644 --- a/streaming/src/main/scala/spark/stream/DumbWordCount2_Special.scala +++ b/streaming/src/main/scala/spark/streaming/DumbWordCount2_Special.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/FileStreamReceiver.scala b/streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/FileStreamReceiver.scala rename to streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala index 026254d6e1..92c7cfe00c 100644 --- a/streaming/src/main/scala/spark/stream/FileStreamReceiver.scala +++ b/streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/GrepCount.scala b/streaming/src/main/scala/spark/streaming/GrepCount.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/GrepCount.scala rename to streaming/src/main/scala/spark/streaming/GrepCount.scala index 45b90d4837..ec3e70f258 100644 --- a/streaming/src/main/scala/spark/stream/GrepCount.scala +++ b/streaming/src/main/scala/spark/streaming/GrepCount.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/GrepCount2.scala b/streaming/src/main/scala/spark/streaming/GrepCount2.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/GrepCount2.scala rename to streaming/src/main/scala/spark/streaming/GrepCount2.scala index 4eb65ba906..27ecced1c0 100644 --- a/streaming/src/main/scala/spark/stream/GrepCount2.scala +++ b/streaming/src/main/scala/spark/streaming/GrepCount2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/GrepCountApprox.scala b/streaming/src/main/scala/spark/streaming/GrepCountApprox.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/GrepCountApprox.scala rename to streaming/src/main/scala/spark/streaming/GrepCountApprox.scala index a4be2cc936..f9674136fe 100644 --- a/streaming/src/main/scala/spark/stream/GrepCountApprox.scala +++ b/streaming/src/main/scala/spark/streaming/GrepCountApprox.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/IdealPerformance.scala b/streaming/src/main/scala/spark/streaming/IdealPerformance.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/IdealPerformance.scala rename to streaming/src/main/scala/spark/streaming/IdealPerformance.scala index 589fb2def0..303d4e7ae6 100644 --- a/streaming/src/main/scala/spark/stream/IdealPerformance.scala +++ b/streaming/src/main/scala/spark/streaming/IdealPerformance.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import scala.collection.mutable.Map diff --git a/streaming/src/main/scala/spark/stream/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/Interval.scala rename to streaming/src/main/scala/spark/streaming/Interval.scala index 08d0ed95b4..a985f44ba1 100644 --- a/streaming/src/main/scala/spark/stream/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming case class Interval (val beginTime: Time, val endTime: Time) { diff --git a/streaming/src/main/scala/spark/stream/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala similarity index 91% rename from streaming/src/main/scala/spark/stream/Job.scala rename to streaming/src/main/scala/spark/streaming/Job.scala index bfdd5db645..f7654dff79 100644 --- a/streaming/src/main/scala/spark/stream/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming class Job(val time: Time, func: () => _) { val id = Job.getNewId() diff --git a/streaming/src/main/scala/spark/stream/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/JobManager.scala rename to streaming/src/main/scala/spark/streaming/JobManager.scala index 5ea80b92aa..45a3971643 100644 --- a/streaming/src/main/scala/spark/stream/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkEnv import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/JobManager2.scala b/streaming/src/main/scala/spark/streaming/JobManager2.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/JobManager2.scala rename to streaming/src/main/scala/spark/streaming/JobManager2.scala index b69653b9a4..ce0154e19b 100644 --- a/streaming/src/main/scala/spark/stream/JobManager2.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.{Logging, SparkEnv} import java.util.concurrent.Executors diff --git a/streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala b/streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala rename to streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala index 8be46cc927..efd4689cf0 100644 --- a/streaming/src/main/scala/spark/stream/NetworkStreamReceiver.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging import spark.storage.StorageLevel diff --git a/streaming/src/main/scala/spark/stream/RDS.scala b/streaming/src/main/scala/spark/streaming/RDS.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/RDS.scala rename to streaming/src/main/scala/spark/streaming/RDS.scala index b83181b0d1..c8dd1015ed 100644 --- a/streaming/src/main/scala/spark/stream/RDS.scala +++ b/streaming/src/main/scala/spark/streaming/RDS.scala @@ -1,6 +1,6 @@ -package spark.stream +package spark.streaming -import spark.stream.SparkStreamContext._ +import spark.streaming.SparkStreamContext._ import spark.RDD import spark.BlockRDD @@ -41,7 +41,7 @@ extends Logging with Serializable { @transient private val generatedRDDs = new HashMap[Time, RDD[T]] () // Variable to be set to the first time seen by the RDS (effective time zero) - private[stream] var zeroTime: Time = null + private[streaming] var zeroTime: Time = null // Variable to specify storage level private var storageLevel: StorageLevel = StorageLevel.NONE diff --git a/streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala rename to streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala index d47654ccb9..dd1f474657 100644 --- a/streaming/src/main/scala/spark/stream/ReducedWindowedRDS.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala @@ -1,6 +1,6 @@ -package spark.stream +package spark.streaming -import spark.stream.SparkStreamContext._ +import spark.streaming.SparkStreamContext._ import spark.RDD import spark.UnionRDD diff --git a/streaming/src/main/scala/spark/stream/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/Scheduler.scala rename to streaming/src/main/scala/spark/streaming/Scheduler.scala index 38946fef11..4137d8f27d 100644 --- a/streaming/src/main/scala/spark/stream/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkEnv import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala b/streaming/src/main/scala/spark/streaming/SenGeneratorForPerformanceTest.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala rename to streaming/src/main/scala/spark/streaming/SenGeneratorForPerformanceTest.scala index 74fd54072f..bb32089ae2 100644 --- a/streaming/src/main/scala/spark/stream/SenGeneratorForPerformanceTest.scala +++ b/streaming/src/main/scala/spark/streaming/SenGeneratorForPerformanceTest.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/stream/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/SenderReceiverTest.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/SenderReceiverTest.scala rename to streaming/src/main/scala/spark/streaming/SenderReceiverTest.scala index 69879b621c..6af270298a 100644 --- a/streaming/src/main/scala/spark/stream/SenderReceiverTest.scala +++ b/streaming/src/main/scala/spark/streaming/SenderReceiverTest.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import java.net.{Socket, ServerSocket} import java.io.{ByteArrayOutputStream, DataOutputStream, DataInputStream, BufferedInputStream} diff --git a/streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala b/streaming/src/main/scala/spark/streaming/SentenceFileGenerator.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala rename to streaming/src/main/scala/spark/streaming/SentenceFileGenerator.scala index 9aa441d9bb..15858f59e3 100644 --- a/streaming/src/main/scala/spark/stream/SentenceFileGenerator.scala +++ b/streaming/src/main/scala/spark/streaming/SentenceFileGenerator.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark._ diff --git a/streaming/src/main/scala/spark/stream/SentenceGenerator.scala b/streaming/src/main/scala/spark/streaming/SentenceGenerator.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/SentenceGenerator.scala rename to streaming/src/main/scala/spark/streaming/SentenceGenerator.scala index ef66e66047..a9f124d2d7 100644 --- a/streaming/src/main/scala/spark/stream/SentenceGenerator.scala +++ b/streaming/src/main/scala/spark/streaming/SentenceGenerator.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/stream/ShuffleTest.scala b/streaming/src/main/scala/spark/streaming/ShuffleTest.scala similarity index 95% rename from streaming/src/main/scala/spark/stream/ShuffleTest.scala rename to streaming/src/main/scala/spark/streaming/ShuffleTest.scala index 5ad56f6777..32aa4144a0 100644 --- a/streaming/src/main/scala/spark/stream/ShuffleTest.scala +++ b/streaming/src/main/scala/spark/streaming/ShuffleTest.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount.scala b/streaming/src/main/scala/spark/streaming/SimpleWordCount.scala similarity index 96% rename from streaming/src/main/scala/spark/stream/SimpleWordCount.scala rename to streaming/src/main/scala/spark/streaming/SimpleWordCount.scala index c53fe35f44..a75ccd3a56 100644 --- a/streaming/src/main/scala/spark/stream/SimpleWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/SimpleWordCount.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount2.scala b/streaming/src/main/scala/spark/streaming/SimpleWordCount2.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/SimpleWordCount2.scala rename to streaming/src/main/scala/spark/streaming/SimpleWordCount2.scala index 1a2c67cd4d..9672e64b13 100644 --- a/streaming/src/main/scala/spark/stream/SimpleWordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/SimpleWordCount2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/SimpleWordCount2_Special.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/SimpleWordCount2_Special.scala index 9003a5dbb3..503033a8e5 100644 --- a/streaming/src/main/scala/spark/stream/SimpleWordCount2_Special.scala +++ b/streaming/src/main/scala/spark/streaming/SimpleWordCount2_Special.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/SparkStreamContext.scala b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/SparkStreamContext.scala rename to streaming/src/main/scala/spark/streaming/SparkStreamContext.scala index 0e65196e46..51f8193740 100644 --- a/streaming/src/main/scala/spark/stream/SparkStreamContext.scala +++ b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import spark.SparkEnv diff --git a/streaming/src/main/scala/spark/stream/TestGenerator.scala b/streaming/src/main/scala/spark/streaming/TestGenerator.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TestGenerator.scala rename to streaming/src/main/scala/spark/streaming/TestGenerator.scala index 738ce17452..0ff6af61f2 100644 --- a/streaming/src/main/scala/spark/stream/TestGenerator.scala +++ b/streaming/src/main/scala/spark/streaming/TestGenerator.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/stream/TestGenerator2.scala b/streaming/src/main/scala/spark/streaming/TestGenerator2.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TestGenerator2.scala rename to streaming/src/main/scala/spark/streaming/TestGenerator2.scala index ceb4730e72..00d43604d0 100644 --- a/streaming/src/main/scala/spark/stream/TestGenerator2.scala +++ b/streaming/src/main/scala/spark/streaming/TestGenerator2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/stream/TestGenerator4.scala b/streaming/src/main/scala/spark/streaming/TestGenerator4.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TestGenerator4.scala rename to streaming/src/main/scala/spark/streaming/TestGenerator4.scala index edeb969d7c..93c7f2f440 100644 --- a/streaming/src/main/scala/spark/stream/TestGenerator4.scala +++ b/streaming/src/main/scala/spark/streaming/TestGenerator4.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala b/streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala rename to streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala index da3b964407..7e23b7bb82 100644 --- a/streaming/src/main/scala/spark/stream/TestInputBlockTracker.scala +++ b/streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging import scala.collection.mutable.{ArrayBuffer, HashMap} diff --git a/streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala b/streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala rename to streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala index add166fbd9..c658a036f9 100644 --- a/streaming/src/main/scala/spark/stream/TestStreamCoordinator.scala +++ b/streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.Logging diff --git a/streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala b/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala rename to streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala index 9cc342040b..a7a5635aa5 100644 --- a/streaming/src/main/scala/spark/stream/TestStreamReceiver3.scala +++ b/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark._ import spark.storage._ diff --git a/streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala b/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala rename to streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala index e7bef75391..2c3f5d1b9d 100644 --- a/streaming/src/main/scala/spark/stream/TestStreamReceiver4.scala +++ b/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark._ import spark.storage._ diff --git a/streaming/src/main/scala/spark/stream/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/Time.scala rename to streaming/src/main/scala/spark/streaming/Time.scala index 25369dfee5..b932fe9258 100644 --- a/streaming/src/main/scala/spark/stream/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming abstract case class Time { diff --git a/streaming/src/main/scala/spark/stream/TopContentCount.scala b/streaming/src/main/scala/spark/streaming/TopContentCount.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TopContentCount.scala rename to streaming/src/main/scala/spark/streaming/TopContentCount.scala index a8cca4e793..031e989c87 100644 --- a/streaming/src/main/scala/spark/stream/TopContentCount.scala +++ b/streaming/src/main/scala/spark/streaming/TopContentCount.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/TopKWordCount2.scala b/streaming/src/main/scala/spark/streaming/TopKWordCount2.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TopKWordCount2.scala rename to streaming/src/main/scala/spark/streaming/TopKWordCount2.scala index 7dd06dd5ee..679ed0a7ef 100644 --- a/streaming/src/main/scala/spark/stream/TopKWordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/TopKWordCount2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/TopKWordCount2_Special.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/TopKWordCount2_Special.scala index e9f3f914ae..c873fbd0f0 100644 --- a/streaming/src/main/scala/spark/stream/TopKWordCount2_Special.scala +++ b/streaming/src/main/scala/spark/streaming/TopKWordCount2_Special.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/WindowedRDS.scala b/streaming/src/main/scala/spark/streaming/WindowedRDS.scala similarity index 96% rename from streaming/src/main/scala/spark/stream/WindowedRDS.scala rename to streaming/src/main/scala/spark/streaming/WindowedRDS.scala index a2e7966edb..812a982301 100644 --- a/streaming/src/main/scala/spark/stream/WindowedRDS.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedRDS.scala @@ -1,6 +1,6 @@ -package spark.stream +package spark.streaming -import spark.stream.SparkStreamContext._ +import spark.streaming.SparkStreamContext._ import spark.RDD import spark.UnionRDD diff --git a/streaming/src/main/scala/spark/stream/WordCount.scala b/streaming/src/main/scala/spark/streaming/WordCount.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordCount.scala rename to streaming/src/main/scala/spark/streaming/WordCount.scala index af825e46a8..fb5508ffcc 100644 --- a/streaming/src/main/scala/spark/stream/WordCount.scala +++ b/streaming/src/main/scala/spark/streaming/WordCount.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/WordCount1.scala b/streaming/src/main/scala/spark/streaming/WordCount1.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordCount1.scala rename to streaming/src/main/scala/spark/streaming/WordCount1.scala index 501062b18d..42d985920a 100644 --- a/streaming/src/main/scala/spark/stream/WordCount1.scala +++ b/streaming/src/main/scala/spark/streaming/WordCount1.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/WordCount2.scala b/streaming/src/main/scala/spark/streaming/WordCount2.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordCount2.scala rename to streaming/src/main/scala/spark/streaming/WordCount2.scala index 24324e891a..9168a2fe2f 100644 --- a/streaming/src/main/scala/spark/stream/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/WordCount2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/WordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/WordCount2_Special.scala similarity index 99% rename from streaming/src/main/scala/spark/stream/WordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/WordCount2_Special.scala index c6b1aaa57e..1920915af7 100644 --- a/streaming/src/main/scala/spark/stream/WordCount2_Special.scala +++ b/streaming/src/main/scala/spark/streaming/WordCount2_Special.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/stream/WordCount3.scala b/streaming/src/main/scala/spark/streaming/WordCount3.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordCount3.scala rename to streaming/src/main/scala/spark/streaming/WordCount3.scala index 455a8c9dbf..018c19a509 100644 --- a/streaming/src/main/scala/spark/stream/WordCount3.scala +++ b/streaming/src/main/scala/spark/streaming/WordCount3.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/WordCountEc2.scala b/streaming/src/main/scala/spark/streaming/WordCountEc2.scala similarity index 97% rename from streaming/src/main/scala/spark/stream/WordCountEc2.scala rename to streaming/src/main/scala/spark/streaming/WordCountEc2.scala index 5b10026d7a..82b9fa781d 100644 --- a/streaming/src/main/scala/spark/stream/WordCountEc2.scala +++ b/streaming/src/main/scala/spark/streaming/WordCountEc2.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ import spark.SparkContext diff --git a/streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala b/streaming/src/main/scala/spark/streaming/WordCountTrivialWindow.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala rename to streaming/src/main/scala/spark/streaming/WordCountTrivialWindow.scala index 5469df71e9..114dd144f1 100644 --- a/streaming/src/main/scala/spark/stream/WordCountTrivialWindow.scala +++ b/streaming/src/main/scala/spark/streaming/WordCountTrivialWindow.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ diff --git a/streaming/src/main/scala/spark/stream/WordMax.scala b/streaming/src/main/scala/spark/streaming/WordMax.scala similarity index 98% rename from streaming/src/main/scala/spark/stream/WordMax.scala rename to streaming/src/main/scala/spark/streaming/WordMax.scala index fc075e6d9d..fbfc48030f 100644 --- a/streaming/src/main/scala/spark/stream/WordMax.scala +++ b/streaming/src/main/scala/spark/streaming/WordMax.scala @@ -1,4 +1,4 @@ -package spark.stream +package spark.streaming import SparkStreamContext._ From 5a26ca4a80a428eb4d7e3407ca496e39ad38c757 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 30 Jul 2012 13:29:13 -0700 Subject: [PATCH 003/226] Restructured file locations to separate examples and other programs from core programs. --- .../main/scala/spark/streaming/Interval.scala | 20 --- .../scala/spark/streaming/JobManager.scala | 123 ++++-------------- .../scala/spark/streaming/JobManager2.scala | 37 ------ .../scala/spark/streaming/Scheduler.scala | 2 +- .../DumbTopKWordCount2_Special.scala | 0 .../DumbWordCount2_Special.scala | 0 .../streaming/{ => examples}/GrepCount.scala | 0 .../streaming/{ => examples}/GrepCount2.scala | 0 .../{ => examples}/GrepCountApprox.scala | 0 .../{ => examples}/SimpleWordCount.scala | 0 .../{ => examples}/SimpleWordCount2.scala | 0 .../SimpleWordCount2_Special.scala | 0 .../{ => examples}/TopContentCount.scala | 0 .../{ => examples}/TopKWordCount2.scala | 0 .../TopKWordCount2_Special.scala | 0 .../streaming/{ => examples}/WordCount.scala | 0 .../streaming/{ => examples}/WordCount1.scala | 0 .../streaming/{ => examples}/WordCount2.scala | 0 .../{ => examples}/WordCount2_Special.scala | 0 .../streaming/{ => examples}/WordCount3.scala | 0 .../{ => examples}/WordCountEc2.scala | 0 .../WordCountTrivialWindow.scala | 0 .../streaming/{ => examples}/WordMax.scala | 0 .../SenGeneratorForPerformanceTest.scala | 0 .../{ => utils}/SenderReceiverTest.scala | 0 .../{ => utils}/SentenceFileGenerator.scala | 0 .../{ => utils}/SentenceGenerator.scala | 0 .../streaming/{ => utils}/ShuffleTest.scala | 0 28 files changed, 25 insertions(+), 157 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/JobManager2.scala rename streaming/src/main/scala/spark/streaming/{ => examples}/DumbTopKWordCount2_Special.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/DumbWordCount2_Special.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/GrepCount.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/GrepCount2.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/GrepCountApprox.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/SimpleWordCount.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/SimpleWordCount2.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/SimpleWordCount2_Special.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/TopContentCount.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/TopKWordCount2.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/TopKWordCount2_Special.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCount.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCount1.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCount2.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCount2_Special.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCount3.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCountEc2.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordCountTrivialWindow.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => examples}/WordMax.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => utils}/SenGeneratorForPerformanceTest.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => utils}/SenderReceiverTest.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => utils}/SentenceFileGenerator.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => utils}/SentenceGenerator.scala (100%) rename streaming/src/main/scala/spark/streaming/{ => utils}/ShuffleTest.scala (100%) diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index a985f44ba1..9a61d85274 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -42,26 +42,6 @@ case class Interval (val beginTime: Time, val endTime: Time) { object Interval { - /* - implicit def longTupleToInterval (longTuple: (Long, Long)) = - Interval(longTuple._1, longTuple._2) - - implicit def intTupleToInterval (intTuple: (Int, Int)) = - Interval(intTuple._1, intTuple._2) - - implicit def string2Interval (str: String): Interval = { - val parts = str.split(",") - if (parts.length == 1) - return Interval.zero - return Interval (parts(0).toInt, parts(1).toInt) - } - - def getInterval (timeMs: Long, intervalDurationMs: Long): Interval = { - val intervalBeginMs = timeMs / intervalDurationMs * intervalDurationMs - Interval(intervalBeginMs, intervalBeginMs + intervalDurationMs) - } - */ - def zero() = new Interval (Time.zero, Time.zero) def currentInterval(intervalDuration: LongTime): Interval = { diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 45a3971643..d7d88a7000 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -1,112 +1,37 @@ package spark.streaming -import spark.SparkEnv -import spark.Logging +import spark.{Logging, SparkEnv} +import java.util.concurrent.Executors -import scala.collection.mutable.PriorityQueue -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ -import scala.actors.scheduler.ResizableThreadPoolScheduler -import scala.actors.scheduler.ForkJoinScheduler -sealed trait JobManagerMessage -case class RunJob(job: Job) extends JobManagerMessage -case class JobCompleted(handlerId: Int) extends JobManagerMessage - -class JobHandler(ssc: SparkStreamContext, val id: Int) extends DaemonActor with Logging { - - var busy = false - - def act() { - loop { - receive { - case job: Job => { - SparkEnv.set(ssc.env) - try { - logInfo("Starting " + job) - job.run() - logInfo("Finished " + job) - if (job.time.isInstanceOf[LongTime]) { - val longTime = job.time.asInstanceOf[LongTime] - logInfo("Total pushing + skew + processing delay for " + longTime + " is " + - (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") - } - } catch { - case e: Exception => logError("SparkStream job failed", e) +class JobManager(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { + + class JobHandler(ssc: SparkStreamContext, job: Job) extends Runnable { + def run() { + SparkEnv.set(ssc.env) + try { + logInfo("Starting " + job) + job.run() + logInfo("Finished " + job) + if (job.time.isInstanceOf[LongTime]) { + val longTime = job.time.asInstanceOf[LongTime] + logInfo("Total notification + skew + processing delay for " + longTime + " is " + + (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") + if (System.getProperty("spark.stream.distributed", "false") == "true") { + TestInputBlockTracker.setEndTime(job.time) } - busy = false - reply(JobCompleted(id)) } - } - } - } -} - -class JobManager(ssc: SparkStreamContext, numThreads: Int = 2) extends DaemonActor with Logging { - - implicit private val jobOrdering = new Ordering[Job] { - override def compare(job1: Job, job2: Job): Int = { - if (job1.time < job2.time) { - return 1 - } else if (job2.time < job1.time) { - return -1 - } else { - return 0 + } catch { + case e: Exception => logError("SparkStream job failed", e) } } } - private val jobs = new PriorityQueue[Job]() - private val handlers = (0 until numThreads).map(i => new JobHandler(ssc, i)) + initLogging() - def act() { - handlers.foreach(_.start) - loop { - receive { - case RunJob(job) => { - jobs += job - logInfo("Job " + job + " submitted") - runJob() - } - case JobCompleted(handlerId) => { - runJob() - } - } - } - } - - def runJob(): Unit = { - logInfo("Attempting to allocate job ") - if (jobs.size > 0) { - handlers.find(!_.busy).foreach(handler => { - val job = jobs.dequeue - logInfo("Allocating job " + job + " to handler " + handler.id) - handler.busy = true - handler ! job - }) - } + val jobExecutor = Executors.newFixedThreadPool(numThreads) + + def runJob(job: Job) { + jobExecutor.execute(new JobHandler(ssc, job)) } } - -object JobManager { - def main(args: Array[String]) { - val ssc = new SparkStreamContext("local[4]", "JobManagerTest") - val jobManager = new JobManager(ssc) - jobManager.start() - - val t = System.currentTimeMillis - for (i <- 1 to 10) { - jobManager ! RunJob(new Job( - LongTime(i), - () => { - Thread.sleep(500) - println("Job " + i + " took " + (System.currentTimeMillis - t) + " ms") - } - )) - } - Thread.sleep(6000) - } -} - diff --git a/streaming/src/main/scala/spark/streaming/JobManager2.scala b/streaming/src/main/scala/spark/streaming/JobManager2.scala deleted file mode 100644 index ce0154e19b..0000000000 --- a/streaming/src/main/scala/spark/streaming/JobManager2.scala +++ /dev/null @@ -1,37 +0,0 @@ -package spark.streaming - -import spark.{Logging, SparkEnv} -import java.util.concurrent.Executors - - -class JobManager2(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { - - class JobHandler(ssc: SparkStreamContext, job: Job) extends Runnable { - def run() { - SparkEnv.set(ssc.env) - try { - logInfo("Starting " + job) - job.run() - logInfo("Finished " + job) - if (job.time.isInstanceOf[LongTime]) { - val longTime = job.time.asInstanceOf[LongTime] - logInfo("Total notification + skew + processing delay for " + longTime + " is " + - (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") - if (System.getProperty("spark.stream.distributed", "false") == "true") { - TestInputBlockTracker.setEndTime(job.time) - } - } - } catch { - case e: Exception => logError("SparkStream job failed", e) - } - } - } - - initLogging() - - val jobExecutor = Executors.newFixedThreadPool(numThreads) - - def runJob(job: Job) { - jobExecutor.execute(new JobHandler(ssc, job)) - } -} diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 4137d8f27d..8df346559c 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -46,7 +46,7 @@ extends Actor with Logging { val inputNames = inputRDSs.map(_.inputName).toArray val inputStates = new HashMap[Interval, InputState]() val currentJobs = System.getProperty("spark.stream.currentJobs", "1").toInt - val jobManager = new JobManager2(ssc, currentJobs) + val jobManager = new JobManager(ssc, currentJobs) // TODO(Haoyuan): The following line is for performance test only. var cnt: Int = System.getProperty("spark.stream.fake.cnt", "60").toInt diff --git a/streaming/src/main/scala/spark/streaming/DumbTopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/DumbTopKWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala diff --git a/streaming/src/main/scala/spark/streaming/DumbWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/DumbWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala diff --git a/streaming/src/main/scala/spark/streaming/GrepCount.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/GrepCount.scala rename to streaming/src/main/scala/spark/streaming/examples/GrepCount.scala diff --git a/streaming/src/main/scala/spark/streaming/GrepCount2.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/GrepCount2.scala rename to streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala diff --git a/streaming/src/main/scala/spark/streaming/GrepCountApprox.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/GrepCountApprox.scala rename to streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala diff --git a/streaming/src/main/scala/spark/streaming/SimpleWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SimpleWordCount.scala rename to streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala diff --git a/streaming/src/main/scala/spark/streaming/SimpleWordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SimpleWordCount2.scala rename to streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala diff --git a/streaming/src/main/scala/spark/streaming/SimpleWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SimpleWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala diff --git a/streaming/src/main/scala/spark/streaming/TopContentCount.scala b/streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/TopContentCount.scala rename to streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala diff --git a/streaming/src/main/scala/spark/streaming/TopKWordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/TopKWordCount2.scala rename to streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala diff --git a/streaming/src/main/scala/spark/streaming/TopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/TopKWordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCount.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCount.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCount1.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount1.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCount1.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCount1.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCount2.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCount2.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCount2_Special.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCount3.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount3.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCount3.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCount3.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCountEc2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCountEc2.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala diff --git a/streaming/src/main/scala/spark/streaming/WordCountTrivialWindow.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordCountTrivialWindow.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala diff --git a/streaming/src/main/scala/spark/streaming/WordMax.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/WordMax.scala rename to streaming/src/main/scala/spark/streaming/examples/WordMax.scala diff --git a/streaming/src/main/scala/spark/streaming/SenGeneratorForPerformanceTest.scala b/streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SenGeneratorForPerformanceTest.scala rename to streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala diff --git a/streaming/src/main/scala/spark/streaming/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/utils/SenderReceiverTest.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SenderReceiverTest.scala rename to streaming/src/main/scala/spark/streaming/utils/SenderReceiverTest.scala diff --git a/streaming/src/main/scala/spark/streaming/SentenceFileGenerator.scala b/streaming/src/main/scala/spark/streaming/utils/SentenceFileGenerator.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SentenceFileGenerator.scala rename to streaming/src/main/scala/spark/streaming/utils/SentenceFileGenerator.scala diff --git a/streaming/src/main/scala/spark/streaming/SentenceGenerator.scala b/streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/SentenceGenerator.scala rename to streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala diff --git a/streaming/src/main/scala/spark/streaming/ShuffleTest.scala b/streaming/src/main/scala/spark/streaming/utils/ShuffleTest.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/ShuffleTest.scala rename to streaming/src/main/scala/spark/streaming/utils/ShuffleTest.scala From 3be54c2a8afcb2a3abf1cf22934123fae3419278 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 1 Aug 2012 22:09:27 -0700 Subject: [PATCH 004/226] 1. Refactored SparkStreamContext, Scheduler, InputRDS, FileInputRDS and a few other files. 2. Modified Time class to represent milliseconds (long) directly, instead of LongTime. 3. Added new files QueueInputRDS, RecurringTimer, etc. 4. Added RDDSuite as the skeleton for testcases. 5. Added two examples in spark.streaming.examples. 6. Removed all past examples and a few unnecessary files. Moved a number of files to spark.streaming.util. --- core/src/main/scala/spark/Utils.scala | 2 +- .../main/scala/spark/streaming/BlockID.scala | 20 -- .../scala/spark/streaming/FileInputRDS.scala | 163 ++++++++++ .../spark/streaming/FileStreamReceiver.scala | 70 ---- .../spark/streaming/IdealPerformance.scala | 36 -- .../main/scala/spark/streaming/Interval.scala | 6 +- .../src/main/scala/spark/streaming/Job.scala | 9 +- .../scala/spark/streaming/JobManager.scala | 23 +- .../streaming/NetworkStreamReceiver.scala | 184 ----------- .../spark/streaming/PairRDSFunctions.scala | 72 ++++ .../scala/spark/streaming/QueueInputRDS.scala | 36 ++ .../src/main/scala/spark/streaming/RDS.scala | 307 ++++++------------ .../scala/spark/streaming/Scheduler.scala | 171 ++-------- .../spark/streaming/SparkStreamContext.scala | 148 ++++++--- .../streaming/TestInputBlockTracker.scala | 42 --- .../spark/streaming/TestStreamReceiver3.scala | 18 +- .../spark/streaming/TestStreamReceiver4.scala | 16 +- .../src/main/scala/spark/streaming/Time.scala | 104 +++--- .../examples/DumbTopKWordCount2_Special.scala | 138 -------- .../examples/DumbWordCount2_Special.scala | 92 ------ .../spark/streaming/examples/ExampleOne.scala | 41 +++ .../spark/streaming/examples/ExampleTwo.scala | 47 +++ .../spark/streaming/examples/GrepCount.scala | 39 --- .../spark/streaming/examples/GrepCount2.scala | 113 ------- .../streaming/examples/GrepCountApprox.scala | 54 --- .../streaming/examples/SimpleWordCount.scala | 30 -- .../streaming/examples/SimpleWordCount2.scala | 51 --- .../examples/SimpleWordCount2_Special.scala | 83 ----- .../streaming/examples/TopContentCount.scala | 97 ------ .../streaming/examples/TopKWordCount2.scala | 103 ------ .../examples/TopKWordCount2_Special.scala | 142 -------- .../spark/streaming/examples/WordCount.scala | 62 ---- .../spark/streaming/examples/WordCount1.scala | 46 --- .../spark/streaming/examples/WordCount2.scala | 55 ---- .../examples/WordCount2_Special.scala | 94 ------ .../spark/streaming/examples/WordCount3.scala | 49 --- .../streaming/examples/WordCountEc2.scala | 41 --- .../examples/WordCountTrivialWindow.scala | 51 --- .../spark/streaming/examples/WordMax.scala | 64 ---- .../spark/streaming/util/RecurringTimer.scala | 52 +++ .../{utils => util}/SenderReceiverTest.scala | 3 +- .../SentenceFileGenerator.scala | 2 +- .../{utils => util}/ShuffleTest.scala | 3 +- .../scala/spark/streaming/util/Utils.scala | 9 + .../SenGeneratorForPerformanceTest.scala | 78 ----- .../streaming/utils/SentenceGenerator.scala | 103 ------ .../test/scala/spark/streaming/RDSSuite.scala | 65 ++++ 47 files changed, 799 insertions(+), 2435 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/BlockID.scala create mode 100644 streaming/src/main/scala/spark/streaming/FileInputRDS.scala delete mode 100644 streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala delete mode 100644 streaming/src/main/scala/spark/streaming/IdealPerformance.scala delete mode 100644 streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala create mode 100644 streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala create mode 100644 streaming/src/main/scala/spark/streaming/QueueInputRDS.scala delete mode 100644 streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/GrepCount.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount1.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount3.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordMax.scala create mode 100644 streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala rename streaming/src/main/scala/spark/streaming/{utils => util}/SenderReceiverTest.scala (98%) rename streaming/src/main/scala/spark/streaming/{utils => util}/SentenceFileGenerator.scala (98%) rename streaming/src/main/scala/spark/streaming/{utils => util}/ShuffleTest.scala (94%) create mode 100644 streaming/src/main/scala/spark/streaming/util/Utils.scala delete mode 100644 streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala delete mode 100644 streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala create mode 100644 streaming/src/test/scala/spark/streaming/RDSSuite.scala diff --git a/core/src/main/scala/spark/Utils.scala b/core/src/main/scala/spark/Utils.scala index 5eda1011f9..1d33f7d6b3 100644 --- a/core/src/main/scala/spark/Utils.scala +++ b/core/src/main/scala/spark/Utils.scala @@ -185,7 +185,7 @@ object Utils { * millisecond. */ def getUsedTimeMs(startTimeMs: Long): String = { - return " " + (System.currentTimeMillis - startTimeMs) + " ms " + return " " + (System.currentTimeMillis - startTimeMs) + " ms" } /** diff --git a/streaming/src/main/scala/spark/streaming/BlockID.scala b/streaming/src/main/scala/spark/streaming/BlockID.scala deleted file mode 100644 index 16aacfda18..0000000000 --- a/streaming/src/main/scala/spark/streaming/BlockID.scala +++ /dev/null @@ -1,20 +0,0 @@ -package spark.streaming - -case class BlockID(sRds: String, sInterval: Interval, sPartition: Int) { - override def toString : String = ( - sRds + BlockID.sConnector + - sInterval.beginTime + BlockID.sConnector + - sInterval.endTime + BlockID.sConnector + - sPartition - ) -} - -object BlockID { - val sConnector = '-' - - def parse(name : String) = BlockID( - name.split(BlockID.sConnector)(0), - new Interval(name.split(BlockID.sConnector)(1).toLong, - name.split(BlockID.sConnector)(2).toLong), - name.split(BlockID.sConnector)(3).toInt) -} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/FileInputRDS.scala b/streaming/src/main/scala/spark/streaming/FileInputRDS.scala new file mode 100644 index 0000000000..dde80cd27a --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/FileInputRDS.scala @@ -0,0 +1,163 @@ +package spark.streaming + +import spark.SparkContext +import spark.RDD +import spark.BlockRDD +import spark.UnionRDD +import spark.storage.StorageLevel +import spark.streaming._ + +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap + +import java.net.InetSocketAddress + +import org.apache.hadoop.fs.Path +import org.apache.hadoop.fs.PathFilter +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} + + +class FileInputRDS[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( + ssc: SparkStreamContext, + directory: Path, + filter: PathFilter = FileInputRDS.defaultPathFilter, + newFilesOnly: Boolean = true) + extends InputRDS[(K, V)](ssc) { + + val fs = directory.getFileSystem(new Configuration()) + var lastModTime: Long = 0 + + override def start() { + if (newFilesOnly) { + lastModTime = System.currentTimeMillis() + } else { + lastModTime = 0 + } + } + + override def stop() { } + + override def compute(validTime: Time): Option[RDD[(K, V)]] = { + val newFilter = new PathFilter() { + var latestModTime = 0L + + def accept(path: Path): Boolean = { + + if (!filter.accept(path)) { + return false + } else { + val modTime = fs.getFileStatus(path).getModificationTime() + if (modTime < lastModTime) { + return false + } + if (modTime > latestModTime) { + latestModTime = modTime + } + return true + } + } + } + + val newFiles = fs.listStatus(directory, newFilter) + lastModTime = newFilter.latestModTime + val newRDD = new UnionRDD(ssc.sc, newFiles.map(file => + ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString)) + ) + Some(newRDD) + } +} + +object FileInputRDS { + val defaultPathFilter = new PathFilter { + def accept(path: Path): Boolean = { + val file = path.getName() + if (file.startsWith(".") || file.endsWith("_tmp")) { + return false + } else { + return true + } + } + } +} + +/* +class NetworkInputRDS[T: ClassManifest]( + val networkInputName: String, + val addresses: Array[InetSocketAddress], + batchDuration: Time, + ssc: SparkStreamContext) +extends InputRDS[T](networkInputName, batchDuration, ssc) { + + + // TODO(Haoyuan): This is for the performance test. + @transient var rdd: RDD[T] = null + + if (System.getProperty("spark.fake", "false") == "true") { + logInfo("Running initial count to cache fake RDD") + rdd = ssc.sc.textFile(SparkContext.inputFile, + SparkContext.idealPartitions).asInstanceOf[RDD[T]] + val fakeCacheLevel = System.getProperty("spark.fake.cache", "") + if (fakeCacheLevel == "MEMORY_ONLY_2") { + rdd.persist(StorageLevel.MEMORY_ONLY_2) + } else if (fakeCacheLevel == "MEMORY_ONLY_DESER_2") { + rdd.persist(StorageLevel.MEMORY_ONLY_2) + } else if (fakeCacheLevel != "") { + logError("Invalid fake cache level: " + fakeCacheLevel) + System.exit(1) + } + rdd.count() + } + + @transient val references = new HashMap[Time,String] + + override def compute(validTime: Time): Option[RDD[T]] = { + if (System.getProperty("spark.fake", "false") == "true") { + logInfo("Returning fake RDD at " + validTime) + return Some(rdd) + } + references.get(validTime) match { + case Some(reference) => + if (reference.startsWith("file") || reference.startsWith("hdfs")) { + logInfo("Reading from file " + reference + " for time " + validTime) + Some(ssc.sc.textFile(reference).asInstanceOf[RDD[T]]) + } else { + logInfo("Getting from BlockManager " + reference + " for time " + validTime) + Some(new BlockRDD(ssc.sc, Array(reference))) + } + case None => + throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") + None + } + } + + def setReference(time: Time, reference: AnyRef) { + references += ((time, reference.toString)) + logInfo("Reference added for time " + time + " - " + reference.toString) + } +} + + +class TestInputRDS( + val testInputName: String, + batchDuration: Time, + ssc: SparkStreamContext) +extends InputRDS[String](testInputName, batchDuration, ssc) { + + @transient val references = new HashMap[Time,Array[String]] + + override def compute(validTime: Time): Option[RDD[String]] = { + references.get(validTime) match { + case Some(reference) => + Some(new BlockRDD[String](ssc.sc, reference)) + case None => + throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") + None + } + } + + def setReference(time: Time, reference: AnyRef) { + references += ((time, reference.asInstanceOf[Array[String]])) + } +} +*/ diff --git a/streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala b/streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala deleted file mode 100644 index 92c7cfe00c..0000000000 --- a/streaming/src/main/scala/spark/streaming/FileStreamReceiver.scala +++ /dev/null @@ -1,70 +0,0 @@ -package spark.streaming - -import spark.Logging - -import scala.collection.mutable.HashSet -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import org.apache.hadoop.fs._ -import org.apache.hadoop.conf._ -import org.apache.hadoop.io._ -import org.apache.hadoop.mapred._ -import org.apache.hadoop.util._ - -class FileStreamReceiver ( - inputName: String, - rootDirectory: String, - intervalDuration: Long) - extends Logging { - - val pollInterval = 100 - val sparkstreamScheduler = { - val host = System.getProperty("spark.master.host") - val port = System.getProperty("spark.master.port").toInt + 1 - RemoteActor.select(Node(host, port), 'SparkStreamScheduler) - } - val directory = new Path(rootDirectory) - val fs = directory.getFileSystem(new Configuration()) - val files = new HashSet[String]() - var time: Long = 0 - - def start() { - fs.mkdirs(directory) - files ++= getFiles() - - actor { - logInfo("Monitoring directory - " + rootDirectory) - while(true) { - testFiles(getFiles()) - Thread.sleep(pollInterval) - } - } - } - - def getFiles(): Iterable[String] = { - fs.listStatus(directory).map(_.getPath.toString) - } - - def testFiles(fileList: Iterable[String]) { - fileList.foreach(file => { - if (!files.contains(file)) { - if (!file.endsWith("_tmp")) { - notifyFile(file) - } - files += file - } - }) - } - - def notifyFile(file: String) { - logInfo("Notifying file " + file) - time += intervalDuration - val interval = Interval(LongTime(time), LongTime(time + intervalDuration)) - sparkstreamScheduler ! InputGenerated(inputName, interval, file) - } -} - - diff --git a/streaming/src/main/scala/spark/streaming/IdealPerformance.scala b/streaming/src/main/scala/spark/streaming/IdealPerformance.scala deleted file mode 100644 index 303d4e7ae6..0000000000 --- a/streaming/src/main/scala/spark/streaming/IdealPerformance.scala +++ /dev/null @@ -1,36 +0,0 @@ -package spark.streaming - -import scala.collection.mutable.Map - -object IdealPerformance { - val base: String = "The medium researcher counts around the pinched troop The empire breaks " + - "Matei Matei announces HY with a theorem " - - def main (args: Array[String]) { - val sentences: String = base * 100000 - - for (i <- 1 to 30) { - val start = System.nanoTime - - val words = sentences.split(" ") - - val pairs = words.map(word => (word, 1)) - - val counts = Map[String, Int]() - - println("Job " + i + " position A at " + (System.nanoTime - start) / 1e9) - - pairs.foreach((pair) => { - var t = counts.getOrElse(pair._1, 0) - counts(pair._1) = t + pair._2 - }) - println("Job " + i + " position B at " + (System.nanoTime - start) / 1e9) - - for ((word, count) <- counts) { - print(word + " " + count + "; ") - } - println - println("Job " + i + " finished in " + (System.nanoTime - start) / 1e9) - } - } -} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index 9a61d85274..1960097216 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -2,7 +2,7 @@ package spark.streaming case class Interval (val beginTime: Time, val endTime: Time) { - def this(beginMs: Long, endMs: Long) = this(new LongTime(beginMs), new LongTime(endMs)) + def this(beginMs: Long, endMs: Long) = this(Time(beginMs), new Time(endMs)) def duration(): Time = endTime - beginTime @@ -44,8 +44,8 @@ object Interval { def zero() = new Interval (Time.zero, Time.zero) - def currentInterval(intervalDuration: LongTime): Interval = { - val time = LongTime(System.currentTimeMillis) + def currentInterval(intervalDuration: Time): Interval = { + val time = Time(System.currentTimeMillis) val intervalBegin = time.floor(intervalDuration) Interval(intervalBegin, intervalBegin + intervalDuration) } diff --git a/streaming/src/main/scala/spark/streaming/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala index f7654dff79..36958dafe1 100644 --- a/streaming/src/main/scala/spark/streaming/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -1,13 +1,14 @@ package spark.streaming +import spark.streaming.util.Utils + class Job(val time: Time, func: () => _) { val id = Job.getNewId() - - def run() { - func() + def run(): Long = { + Utils.time { func() } } - override def toString = "SparkStream Job " + id + ":" + time + override def toString = "streaming job " + id + " @ " + time } object Job { diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index d7d88a7000..43d167f7db 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -1,6 +1,7 @@ package spark.streaming -import spark.{Logging, SparkEnv} +import spark.Logging +import spark.SparkEnv import java.util.concurrent.Executors @@ -10,19 +11,14 @@ class JobManager(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { def run() { SparkEnv.set(ssc.env) try { - logInfo("Starting " + job) - job.run() - logInfo("Finished " + job) - if (job.time.isInstanceOf[LongTime]) { - val longTime = job.time.asInstanceOf[LongTime] - logInfo("Total notification + skew + processing delay for " + longTime + " is " + - (System.currentTimeMillis - longTime.milliseconds) / 1000.0 + " s") - if (System.getProperty("spark.stream.distributed", "false") == "true") { - TestInputBlockTracker.setEndTime(job.time) - } - } + val timeTaken = job.run() + logInfo( + "Runnning " + job + " took " + timeTaken + " ms, " + + "total delay was " + (System.currentTimeMillis - job.time) + " ms" + ) } catch { - case e: Exception => logError("SparkStream job failed", e) + case e: Exception => + logError("Running " + job + " failed", e) } } } @@ -33,5 +29,6 @@ class JobManager(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { def runJob(job: Job) { jobExecutor.execute(new JobHandler(ssc, job)) + logInfo("Added " + job + " to queue") } } diff --git a/streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala b/streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala deleted file mode 100644 index efd4689cf0..0000000000 --- a/streaming/src/main/scala/spark/streaming/NetworkStreamReceiver.scala +++ /dev/null @@ -1,184 +0,0 @@ -package spark.streaming - -import spark.Logging -import spark.storage.StorageLevel - -import scala.math._ -import scala.collection.mutable.{Queue, HashMap, ArrayBuffer} -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import java.io.BufferedWriter -import java.io.OutputStreamWriter - -import org.apache.hadoop.fs._ -import org.apache.hadoop.conf._ -import org.apache.hadoop.io._ -import org.apache.hadoop.mapred._ -import org.apache.hadoop.util._ - -/*import akka.actor.Actor._*/ - -class NetworkStreamReceiver[T: ClassManifest] ( - inputName: String, - intervalDuration: Time, - splitId: Int, - ssc: SparkStreamContext, - tempDirectory: String) - extends DaemonActor - with Logging { - - /** - * Assume all data coming in has non-decreasing timestamp. - */ - final class Inbox[T: ClassManifest] (intervalDuration: Time) { - var currentBucket: (Interval, ArrayBuffer[T]) = null - val filledBuckets = new Queue[(Interval, ArrayBuffer[T])]() - - def += (tuple: (Time, T)) = addTuple(tuple) - - def addTuple(tuple: (Time, T)) { - val (time, data) = tuple - val interval = getInterval (time) - - filledBuckets.synchronized { - if (currentBucket == null) { - currentBucket = (interval, new ArrayBuffer[T]()) - } - - if (interval != currentBucket._1) { - filledBuckets += currentBucket - currentBucket = (interval, new ArrayBuffer[T]()) - } - - currentBucket._2 += data - } - } - - def getInterval(time: Time): Interval = { - val intervalBegin = time.floor(intervalDuration) - Interval (intervalBegin, intervalBegin + intervalDuration) - } - - def hasFilledBuckets(): Boolean = { - filledBuckets.synchronized { - return filledBuckets.size > 0 - } - } - - def popFilledBucket(): (Interval, ArrayBuffer[T]) = { - filledBuckets.synchronized { - if (filledBuckets.size == 0) { - return null - } - return filledBuckets.dequeue() - } - } - } - - val inbox = new Inbox[T](intervalDuration) - lazy val sparkstreamScheduler = { - val host = System.getProperty("spark.master.host") - val port = System.getProperty("spark.master.port").toInt - val url = "akka://spark@%s:%s/user/SparkStreamScheduler".format(host, port) - ssc.actorSystem.actorFor(url) - } - /*sparkstreamScheduler ! Test()*/ - - val intervalDurationMillis = intervalDuration.asInstanceOf[LongTime].milliseconds - val useBlockManager = true - - initLogging() - - override def act() { - // register the InputReceiver - val port = 7078 - RemoteActor.alive(port) - RemoteActor.register(Symbol("NetworkStreamReceiver-"+inputName), self) - logInfo("Registered actor on port " + port) - - loop { - reactWithin (getSleepTime) { - case TIMEOUT => - flushInbox() - case data => - val t = data.asInstanceOf[T] - inbox += (getTimeFromData(t), t) - } - } - } - - def getSleepTime(): Long = { - (System.currentTimeMillis / intervalDurationMillis + 1) * - intervalDurationMillis - System.currentTimeMillis - } - - def getTimeFromData(data: T): Time = { - LongTime(System.currentTimeMillis) - } - - def flushInbox() { - while (inbox.hasFilledBuckets) { - inbox.synchronized { - val (interval, data) = inbox.popFilledBucket() - val dataArray = data.toArray - logInfo("Received " + dataArray.length + " items at interval " + interval) - val reference = { - if (useBlockManager) { - writeToBlockManager(dataArray, interval) - } else { - writeToDisk(dataArray, interval) - } - } - if (reference != null) { - logInfo("Notifying scheduler") - sparkstreamScheduler ! InputGenerated(inputName, interval, reference.toString) - } - } - } - } - - def writeToDisk(data: Array[T], interval: Interval): String = { - try { - // TODO(Haoyuan): For current test, the following writing to file lines could be - // commented. - val fs = new Path(tempDirectory).getFileSystem(new Configuration()) - val inputDir = new Path( - tempDirectory, - inputName + "-" + interval.toFormattedString) - val inputFile = new Path(inputDir, "part-" + splitId) - logInfo("Writing to file " + inputFile) - if (System.getProperty("spark.fake", "false") != "true") { - val writer = new BufferedWriter(new OutputStreamWriter(fs.create(inputFile, true))) - data.foreach(x => writer.write(x.toString + "\n")) - writer.close() - } else { - logInfo("Fake file") - } - inputFile.toString - }catch { - case e: Exception => - logError("Exception writing to file at interval " + interval + ": " + e.getMessage, e) - null - } - } - - def writeToBlockManager(data: Array[T], interval: Interval): String = { - try{ - val blockId = inputName + "-" + interval.toFormattedString + "-" + splitId - if (System.getProperty("spark.fake", "false") != "true") { - logInfo("Writing as block " + blockId ) - ssc.env.blockManager.put(blockId.toString, data.toIterator, StorageLevel.DISK_AND_MEMORY) - } else { - logInfo("Fake block") - } - blockId - } catch { - case e: Exception => - logError("Exception writing to block manager at interval " + interval + ": " + e.getMessage, e) - null - } - } -} diff --git a/streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala b/streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala new file mode 100644 index 0000000000..403ae233a5 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala @@ -0,0 +1,72 @@ +package spark.streaming + +import scala.collection.mutable.ArrayBuffer +import spark.streaming.SparkStreamContext._ + +class PairRDSFunctions[K: ClassManifest, V: ClassManifest](rds: RDS[(K,V)]) +extends Serializable { + + def ssc = rds.ssc + + /* ---------------------------------- */ + /* RDS operations for key-value pairs */ + /* ---------------------------------- */ + + def groupByKey(numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { + def createCombiner(v: V) = ArrayBuffer[V](v) + def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) + def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) + combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, numPartitions) + } + + def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledRDS[K, V, V] = { + val cleanedReduceFunc = ssc.sc.clean(reduceFunc) + combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, numPartitions) + } + + private def combineByKey[C: ClassManifest]( + createCombiner: V => C, + mergeValue: (C, V) => C, + mergeCombiner: (C, C) => C, + numPartitions: Int) : ShuffledRDS[K, V, C] = { + new ShuffledRDS[K, V, C](rds, createCombiner, mergeValue, mergeCombiner, numPartitions) + } + + def groupByKeyAndWindow( + windowTime: Time, + slideTime: Time, + numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { + rds.window(windowTime, slideTime).groupByKey(numPartitions) + } + + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int = 0): ShuffledRDS[K, V, V] = { + rds.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) + } + + // This method is the efficient sliding window reduce operation, + // which requires the specification of an inverse reduce function, + // so that new elements introduced in the window can be "added" using + // reduceFunc to the previous window's result and old elements can be + // "subtracted using invReduceFunc. + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int): ReducedWindowedRDS[K, V] = { + + new ReducedWindowedRDS[K, V]( + rds, + ssc.sc.clean(reduceFunc), + ssc.sc.clean(invReduceFunc), + windowTime, + slideTime, + numPartitions) + } +} + + diff --git a/streaming/src/main/scala/spark/streaming/QueueInputRDS.scala b/streaming/src/main/scala/spark/streaming/QueueInputRDS.scala new file mode 100644 index 0000000000..31e6a64e21 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/QueueInputRDS.scala @@ -0,0 +1,36 @@ +package spark.streaming + +import spark.RDD +import spark.UnionRDD + +import scala.collection.mutable.Queue +import scala.collection.mutable.ArrayBuffer + +class QueueInputRDS[T: ClassManifest]( + ssc: SparkStreamContext, + val queue: Queue[RDD[T]], + oneAtATime: Boolean, + defaultRDD: RDD[T] + ) extends InputRDS[T](ssc) { + + override def start() { } + + override def stop() { } + + override def compute(validTime: Time): Option[RDD[T]] = { + val buffer = new ArrayBuffer[RDD[T]]() + if (oneAtATime && queue.size > 0) { + buffer += queue.dequeue() + } else { + buffer ++= queue + } + if (buffer.size > 0) { + Some(new UnionRDD(ssc.sc, buffer.toSeq)) + } else if (defaultRDD != null) { + Some(defaultRDD) + } else { + None + } + } + +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/RDS.scala b/streaming/src/main/scala/spark/streaming/RDS.scala index c8dd1015ed..fd923929e7 100644 --- a/streaming/src/main/scala/spark/streaming/RDS.scala +++ b/streaming/src/main/scala/spark/streaming/RDS.scala @@ -13,16 +13,18 @@ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap -import java.net.InetSocketAddress +import java.util.concurrent.ArrayBlockingQueue abstract class RDS[T: ClassManifest] (@transient val ssc: SparkStreamContext) extends Logging with Serializable { initLogging() - /* ---------------------------------------------- */ - /* Methods that must be implemented by subclasses */ - /* ---------------------------------------------- */ + /** + * ---------------------------------------------- + * Methods that must be implemented by subclasses + * ---------------------------------------------- + */ // Time by which the window slides in this RDS def slideTime: Time @@ -33,9 +35,11 @@ extends Logging with Serializable { // Key method that computes RDD for a valid time def compute (validTime: Time): Option[RDD[T]] - /* --------------------------------------- */ - /* Other general fields and methods of RDS */ - /* --------------------------------------- */ + /** + * --------------------------------------- + * Other general fields and methods of RDS + * --------------------------------------- + */ // Variable to store the RDDs generated earlier in time @transient private val generatedRDDs = new HashMap[Time, RDD[T]] () @@ -66,9 +70,9 @@ extends Logging with Serializable { this } + // Set caching level for the RDDs created by this RDS def persist(newLevel: StorageLevel): RDS[T] = persist(newLevel, StorageLevel.NONE, null) - // Turn on the default caching level for this RDD def persist(): RDS[T] = persist(StorageLevel.MEMORY_ONLY_DESER) // Turn on the default caching level for this RDD @@ -76,18 +80,20 @@ extends Logging with Serializable { def isInitialized = (zeroTime != null) - // This method initializes the RDS by setting the "zero" time, based on which - // the validity of future times is calculated. This method also recursively initializes - // its parent RDSs. - def initialize(firstInterval: Interval) { + /** + * This method initializes the RDS by setting the "zero" time, based on which + * the validity of future times is calculated. This method also recursively initializes + * its parent RDSs. + */ + def initialize(time: Time) { if (zeroTime == null) { - zeroTime = firstInterval.beginTime + zeroTime = time } logInfo(this + " initialized") - dependencies.foreach(_.initialize(firstInterval)) + dependencies.foreach(_.initialize(zeroTime)) } - // This method checks whether the 'time' is valid wrt slideTime for generating RDD + /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ private def isTimeValid (time: Time): Boolean = { if (!isInitialized) throw new Exception (this.toString + " has not been initialized") @@ -98,11 +104,13 @@ extends Logging with Serializable { } } - // This method either retrieves a precomputed RDD of this RDS, - // or computes the RDD (if the time is valid) + /** + * This method either retrieves a precomputed RDD of this RDS, + * or computes the RDD (if the time is valid) + */ def getOrCompute(time: Time): Option[RDD[T]] = { - - // if RDD was already generated, then retrieve it from HashMap + // If this RDS was not initialized (i.e., zeroTime not set), then do it + // If RDD was already generated, then retrieve it from HashMap generatedRDDs.get(time) match { // If an RDD was already generated and is being reused, then @@ -115,15 +123,12 @@ extends Logging with Serializable { if (isTimeValid(time)) { compute(time) match { case Some(newRDD) => - if (System.getProperty("spark.fake", "false") != "true" || - newRDD.getStorageLevel == StorageLevel.NONE) { - if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { - newRDD.persist(checkpointLevel) - logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) - } else if (storageLevel != StorageLevel.NONE) { - newRDD.persist(storageLevel) - logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) - } + if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { + newRDD.persist(checkpointLevel) + logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) + } else if (storageLevel != StorageLevel.NONE) { + newRDD.persist(storageLevel) + logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) } generatedRDDs.put(time.copy(), newRDD) Some(newRDD) @@ -136,8 +141,10 @@ extends Logging with Serializable { } } - // This method generates a SparkStream job for the given time - // and may require to be overriden by subclasses + /** + * This method generates a SparkStream job for the given time + * and may require to be overriden by subclasses + */ def generateJob(time: Time): Option[Job] = { getOrCompute(time) match { case Some(rdd) => { @@ -151,9 +158,11 @@ extends Logging with Serializable { } } - /* -------------- */ - /* RDS operations */ - /* -------------- */ + /** + * -------------- + * RDS operations + * -------------- + */ def map[U: ClassManifest](mapFunc: T => U) = new MappedRDS(this, ssc.sc.clean(mapFunc)) @@ -185,6 +194,15 @@ extends Logging with Serializable { newrds } + private[streaming] def toQueue() = { + val queue = new ArrayBlockingQueue[RDD[T]](10000) + this.foreachRDD(rdd => { + println("Added RDD " + rdd.id) + queue.add(rdd) + }) + queue + } + def print() = { def foreachFunc = (rdd: RDD[T], time: Time) => { val first11 = rdd.take(11) @@ -229,198 +247,23 @@ extends Logging with Serializable { } -class PairRDSFunctions[K: ClassManifest, V: ClassManifest](rds: RDS[(K,V)]) -extends Serializable { - - def ssc = rds.ssc - - /* ---------------------------------- */ - /* RDS operations for key-value pairs */ - /* ---------------------------------- */ - - def groupByKey(numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { - def createCombiner(v: V) = ArrayBuffer[V](v) - def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) - def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) - combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, numPartitions) - } - - def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledRDS[K, V, V] = { - val cleanedReduceFunc = ssc.sc.clean(reduceFunc) - combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, numPartitions) - } - - private def combineByKey[C: ClassManifest]( - createCombiner: V => C, - mergeValue: (C, V) => C, - mergeCombiner: (C, C) => C, - numPartitions: Int) : ShuffledRDS[K, V, C] = { - new ShuffledRDS[K, V, C](rds, createCombiner, mergeValue, mergeCombiner, numPartitions) - } - - def groupByKeyAndWindow( - windowTime: Time, - slideTime: Time, - numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { - rds.window(windowTime, slideTime).groupByKey(numPartitions) - } - - def reduceByKeyAndWindow( - reduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, - numPartitions: Int = 0): ShuffledRDS[K, V, V] = { - rds.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) - } - - // This method is the efficient sliding window reduce operation, - // which requires the specification of an inverse reduce function, - // so that new elements introduced in the window can be "added" using - // reduceFunc to the previous window's result and old elements can be - // "subtracted using invReduceFunc. - def reduceByKeyAndWindow( - reduceFunc: (V, V) => V, - invReduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, - numPartitions: Int): ReducedWindowedRDS[K, V] = { - - new ReducedWindowedRDS[K, V]( - rds, - ssc.sc.clean(reduceFunc), - ssc.sc.clean(invReduceFunc), - windowTime, - slideTime, - numPartitions) - } -} - - abstract class InputRDS[T: ClassManifest] ( - val inputName: String, - val batchDuration: Time, ssc: SparkStreamContext) extends RDS[T](ssc) { override def dependencies = List() - override def slideTime = batchDuration + override def slideTime = ssc.batchDuration - def setReference(time: Time, reference: AnyRef) + def start() + + def stop() } -class FileInputRDS( - val fileInputName: String, - val directory: String, - ssc: SparkStreamContext) -extends InputRDS[String](fileInputName, LongTime(1000), ssc) { - - @transient val generatedFiles = new HashMap[Time,String] - - // TODO(Haoyuan): This is for the performance test. - @transient - val rdd = ssc.sc.textFile(SparkContext.inputFile, - SparkContext.idealPartitions).asInstanceOf[RDD[String]] - - override def compute(validTime: Time): Option[RDD[String]] = { - generatedFiles.get(validTime) match { - case Some(file) => - logInfo("Reading from file " + file + " for time " + validTime) - // Some(ssc.sc.textFile(file).asInstanceOf[RDD[String]]) - // The following line is for HDFS performance test. Sould comment out the above line. - Some(rdd) - case None => - throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") - None - } - } - - def setReference(time: Time, reference: AnyRef) { - generatedFiles += ((time, reference.toString)) - logInfo("Reference added for time " + time + " - " + reference.toString) - } -} - -class NetworkInputRDS[T: ClassManifest]( - val networkInputName: String, - val addresses: Array[InetSocketAddress], - batchDuration: Time, - ssc: SparkStreamContext) -extends InputRDS[T](networkInputName, batchDuration, ssc) { - - - // TODO(Haoyuan): This is for the performance test. - @transient var rdd: RDD[T] = null - - if (System.getProperty("spark.fake", "false") == "true") { - logInfo("Running initial count to cache fake RDD") - rdd = ssc.sc.textFile(SparkContext.inputFile, - SparkContext.idealPartitions).asInstanceOf[RDD[T]] - val fakeCacheLevel = System.getProperty("spark.fake.cache", "") - if (fakeCacheLevel == "MEMORY_ONLY_2") { - rdd.persist(StorageLevel.MEMORY_ONLY_2) - } else if (fakeCacheLevel == "MEMORY_ONLY_DESER_2") { - rdd.persist(StorageLevel.MEMORY_ONLY_2) - } else if (fakeCacheLevel != "") { - logError("Invalid fake cache level: " + fakeCacheLevel) - System.exit(1) - } - rdd.count() - } - - @transient val references = new HashMap[Time,String] - - override def compute(validTime: Time): Option[RDD[T]] = { - if (System.getProperty("spark.fake", "false") == "true") { - logInfo("Returning fake RDD at " + validTime) - return Some(rdd) - } - references.get(validTime) match { - case Some(reference) => - if (reference.startsWith("file") || reference.startsWith("hdfs")) { - logInfo("Reading from file " + reference + " for time " + validTime) - Some(ssc.sc.textFile(reference).asInstanceOf[RDD[T]]) - } else { - logInfo("Getting from BlockManager " + reference + " for time " + validTime) - Some(new BlockRDD(ssc.sc, Array(reference))) - } - case None => - throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") - None - } - } - - def setReference(time: Time, reference: AnyRef) { - references += ((time, reference.toString)) - logInfo("Reference added for time " + time + " - " + reference.toString) - } -} - - -class TestInputRDS( - val testInputName: String, - batchDuration: Time, - ssc: SparkStreamContext) -extends InputRDS[String](testInputName, batchDuration, ssc) { - - @transient val references = new HashMap[Time,Array[String]] - - override def compute(validTime: Time): Option[RDD[String]] = { - references.get(validTime) match { - case Some(reference) => - Some(new BlockRDD[String](ssc.sc, reference)) - case None => - throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") - None - } - } - - def setReference(time: Time, reference: AnyRef) { - references += ((time, reference.asInstanceOf[Array[String]])) - } -} - +/** + * TODO + */ class MappedRDS[T: ClassManifest, U: ClassManifest] ( parent: RDS[T], @@ -437,6 +280,10 @@ extends RDS[U](parent.ssc) { } +/** + * TODO + */ + class FlatMappedRDS[T: ClassManifest, U: ClassManifest]( parent: RDS[T], flatMapFunc: T => Traversable[U]) @@ -452,6 +299,10 @@ extends RDS[U](parent.ssc) { } +/** + * TODO + */ + class FilteredRDS[T: ClassManifest](parent: RDS[T], filterFunc: T => Boolean) extends RDS[T](parent.ssc) { @@ -464,6 +315,11 @@ extends RDS[T](parent.ssc) { } } + +/** + * TODO + */ + class MapPartitionedRDS[T: ClassManifest, U: ClassManifest]( parent: RDS[T], mapPartFunc: Iterator[T] => Iterator[U]) @@ -478,6 +334,11 @@ extends RDS[U](parent.ssc) { } } + +/** + * TODO + */ + class GlommedRDS[T: ClassManifest](parent: RDS[T]) extends RDS[Array[T]](parent.ssc) { override def dependencies = List(parent) @@ -490,6 +351,10 @@ class GlommedRDS[T: ClassManifest](parent: RDS[T]) extends RDS[Array[T]](parent. } +/** + * TODO + */ + class ShuffledRDS[K: ClassManifest, V: ClassManifest, C: ClassManifest]( parent: RDS[(K,V)], createCombiner: V => C, @@ -519,6 +384,10 @@ class ShuffledRDS[K: ClassManifest, V: ClassManifest, C: ClassManifest]( } +/** + * TODO + */ + class UnifiedRDS[T: ClassManifest](parents: Array[RDS[T]]) extends RDS[T](parents(0).ssc) { @@ -553,6 +422,10 @@ extends RDS[T](parents(0).ssc) { } +/** + * TODO + */ + class PerElementForEachRDS[T: ClassManifest] ( parent: RDS[T], foreachFunc: T => Unit) @@ -580,6 +453,10 @@ extends RDS[Unit](parent.ssc) { } +/** + * TODO + */ + class PerRDDForEachRDS[T: ClassManifest] ( parent: RDS[T], foreachFunc: (RDD[T], Time) => Unit) diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 8df346559c..83f874e550 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -1,16 +1,11 @@ package spark.streaming +import spark.streaming.util.RecurringTimer import spark.SparkEnv import spark.Logging import scala.collection.mutable.HashMap -import scala.collection.mutable.HashSet -import scala.collection.mutable.ArrayBuffer -import akka.actor._ -import akka.actor.Actor -import akka.actor.Actor._ -import akka.util.duration._ sealed trait SchedulerMessage case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage @@ -20,162 +15,42 @@ class Scheduler( ssc: SparkStreamContext, inputRDSs: Array[InputRDS[_]], outputRDSs: Array[RDS[_]]) -extends Actor with Logging { - - class InputState (inputNames: Array[String]) { - val inputsLeft = new HashSet[String]() - inputsLeft ++= inputNames - - val startTime = System.currentTimeMillis - - def delay() = System.currentTimeMillis - startTime - - def addGeneratedInput(inputName: String) = inputsLeft -= inputName - - def areAllInputsGenerated() = (inputsLeft.size == 0) - - override def toString(): String = { - val left = if (inputsLeft.size == 0) "" else inputsLeft.reduceLeft(_ + ", " + _) - return "Inputs left = [ " + left + " ]" - } - } - +extends Logging { initLogging() - val inputNames = inputRDSs.map(_.inputName).toArray - val inputStates = new HashMap[Interval, InputState]() - val currentJobs = System.getProperty("spark.stream.currentJobs", "1").toInt - val jobManager = new JobManager(ssc, currentJobs) - - // TODO(Haoyuan): The following line is for performance test only. - var cnt: Int = System.getProperty("spark.stream.fake.cnt", "60").toInt - var lastInterval: Interval = null - - - /*remote.register("SparkStreamScheduler", actorOf[Scheduler])*/ - logInfo("Registered actor on port ") - - /*jobManager.start()*/ - startStreamReceivers() - - def receive = { - case InputGenerated(inputName, interval, reference) => { - addGeneratedInput(inputName, interval, reference) - } - case Test() => logInfo("TEST PASSED") - } - - def addGeneratedInput(inputName: String, interval: Interval, reference: AnyRef = null) { - logInfo("Input " + inputName + " generated for interval " + interval) - inputStates.get(interval) match { - case None => inputStates.put(interval, new InputState(inputNames)) - case _ => - } - inputStates(interval).addGeneratedInput(inputName) + val concurrentJobs = System.getProperty("spark.stream.concurrentJobs", "1").toInt + val jobManager = new JobManager(ssc, concurrentJobs) + val timer = new RecurringTimer(ssc.batchDuration, generateRDDs(_)) - inputRDSs.filter(_.inputName == inputName).foreach(inputRDS => { - inputRDS.setReference(interval.endTime, reference) - if (inputRDS.isInstanceOf[TestInputRDS]) { - TestInputBlockTracker.addBlocks(interval.endTime, reference) - } - } - ) - - def getNextInterval(): Option[Interval] = { - logDebug("Last interval is " + lastInterval) - val readyIntervals = inputStates.filter(_._2.areAllInputsGenerated).keys - /*inputState.foreach(println) */ - logDebug("InputState has " + inputStates.size + " intervals, " + readyIntervals.size + " ready intervals") - return readyIntervals.find(lastInterval == null || _.beginTime == lastInterval.endTime) - } - - var nextInterval = getNextInterval() - var count = 0 - while(nextInterval.isDefined) { - val inputState = inputStates.get(nextInterval.get).get - generateRDDsForInterval(nextInterval.get) - logInfo("Skew delay for " + nextInterval.get.endTime + " is " + (inputState.delay / 1000.0) + " s") - inputStates.remove(nextInterval.get) - lastInterval = nextInterval.get - nextInterval = getNextInterval() - count += 1 - /*if (nextInterval.size == 0 && inputState.size > 0) { - logDebug("Next interval not ready, pending intervals " + inputState.size) - }*/ - } - logDebug("RDDs generated for " + count + " intervals") - - /* - if (inputState(interval).areAllInputsGenerated) { - generateRDDsForInterval(interval) - lastInterval = interval - inputState.remove(interval) - } else { - logInfo("All inputs not generated for interval " + interval) - } - */ + def start() { + + val zeroTime = Time(timer.start()) + outputRDSs.foreach(_.initialize(zeroTime)) + inputRDSs.par.foreach(_.start()) + logInfo("Scheduler started") } - - def generateRDDsForInterval (interval: Interval) { - logInfo("Generating RDDs for interval " + interval) + + def stop() { + timer.stop() + inputRDSs.par.foreach(_.stop()) + logInfo("Scheduler stopped") + } + + def generateRDDs (time: Time) { + logInfo("Generating RDDs for time " + time) outputRDSs.foreach(outputRDS => { - if (!outputRDS.isInitialized) outputRDS.initialize(interval) - outputRDS.generateJob(interval.endTime) match { + outputRDS.generateJob(time) match { case Some(job) => submitJob(job) case None => } } ) - // TODO(Haoyuan): This comment is for performance test only. - if (System.getProperty("spark.fake", "false") == "true" || System.getProperty("spark.stream.fake", "false") == "true") { - cnt -= 1 - if (cnt <= 0) { - logInfo("My time is up! " + cnt) - System.exit(1) - } - } + logInfo("Generated RDDs for time " + time) } - def submitJob(job: Job) { - logInfo("Submitting " + job + " to JobManager") - /*jobManager ! RunJob(job)*/ + def submitJob(job: Job) { jobManager.runJob(job) } - - def startStreamReceivers() { - val testStreamReceiverNames = new ArrayBuffer[(String, Long)]() - inputRDSs.foreach (inputRDS => { - inputRDS match { - case fileInputRDS: FileInputRDS => { - val fileStreamReceiver = new FileStreamReceiver( - fileInputRDS.inputName, - fileInputRDS.directory, - fileInputRDS.batchDuration.asInstanceOf[LongTime].milliseconds) - fileStreamReceiver.start() - } - case networkInputRDS: NetworkInputRDS[_] => { - val networkStreamReceiver = new NetworkStreamReceiver( - networkInputRDS.inputName, - networkInputRDS.batchDuration, - 0, - ssc, - if (ssc.tempDir == null) null else ssc.tempDir.toString) - networkStreamReceiver.start() - } - case testInputRDS: TestInputRDS => { - testStreamReceiverNames += - ((testInputRDS.inputName, testInputRDS.batchDuration.asInstanceOf[LongTime].milliseconds)) - } - } - }) - if (testStreamReceiverNames.size > 0) { - /*val testStreamCoordinator = new TestStreamCoordinator(testStreamReceiverNames.toArray)*/ - /*testStreamCoordinator.start()*/ - val actor = ssc.actorSystem.actorOf( - Props(new TestStreamCoordinator(testStreamReceiverNames.toArray)), - name = "TestStreamCoordinator") - } - } } diff --git a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala index 51f8193740..d32f6d588c 100644 --- a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala +++ b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala @@ -1,22 +1,23 @@ package spark.streaming -import spark.SparkContext -import spark.SparkEnv -import spark.Utils +import spark.RDD import spark.Logging +import spark.SparkEnv +import spark.SparkContext import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.Queue -import java.net.InetSocketAddress import java.io.IOException -import java.util.UUID +import java.net.InetSocketAddress +import java.util.concurrent.atomic.AtomicInteger import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration - -import akka.actor._ -import akka.actor.Actor -import akka.util.duration._ +import org.apache.hadoop.io.LongWritable +import org.apache.hadoop.io.Text +import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} +import org.apache.hadoop.mapreduce.lib.input.TextInputFormat class SparkStreamContext ( master: String, @@ -24,30 +25,37 @@ class SparkStreamContext ( val sparkHome: String = null, val jars: Seq[String] = Nil) extends Logging { - + initLogging() val sc = new SparkContext(master, frameworkName, sparkHome, jars) val env = SparkEnv.get - val actorSystem = env.actorSystem - - @transient val inputRDSs = new ArrayBuffer[InputRDS[_]]() - @transient val outputRDSs = new ArrayBuffer[RDS[_]]() - var tempDirRoot: String = null - var tempDir: Path = null - - def readNetworkStream[T: ClassManifest]( + val inputRDSs = new ArrayBuffer[InputRDS[_]]() + val outputRDSs = new ArrayBuffer[RDS[_]]() + var batchDuration: Time = null + var scheduler: Scheduler = null + + def setBatchDuration(duration: Long) { + setBatchDuration(Time(duration)) + } + + def setBatchDuration(duration: Time) { + batchDuration = duration + } + + /* + def createNetworkStream[T: ClassManifest]( name: String, addresses: Array[InetSocketAddress], batchDuration: Time): RDS[T] = { - val inputRDS = new NetworkInputRDS[T](name, addresses, batchDuration, this) + val inputRDS = new NetworkInputRDS[T](this, addresses) inputRDSs += inputRDS inputRDS - } + } - def readNetworkStream[T: ClassManifest]( + def createNetworkStream[T: ClassManifest]( name: String, addresses: Array[String], batchDuration: Long): RDS[T] = { @@ -65,40 +73,100 @@ class SparkStreamContext ( addresses.map(stringToInetSocketAddress).toArray, LongTime(batchDuration)) } + */ - def readFileStream(name: String, directory: String): RDS[String] = { - val path = new Path(directory) - val fs = path.getFileSystem(new Configuration()) - val qualPath = path.makeQualified(fs) - val inputRDS = new FileInputRDS(name, qualPath.toString, this) + /** + * This function creates a input stream that monitors a Hadoop-compatible + * for new files and executes the necessary processing on them. + */ + def createFileStream[ + K: ClassManifest, + V: ClassManifest, + F <: NewInputFormat[K, V]: ClassManifest + ](directory: String): RDS[(K, V)] = { + val inputRDS = new FileInputRDS[K, V, F](this, new Path(directory)) inputRDSs += inputRDS inputRDS } - def readTestStream(name: String, batchDuration: Long): RDS[String] = { - val inputRDS = new TestInputRDS(name, LongTime(batchDuration), this) - inputRDSs += inputRDS + def createTextFileStream(directory: String): RDS[String] = { + createFileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) + } + + /** + * This function create a input stream from an queue of RDDs. In each batch, + * it will process either one or all of the RDDs returned by the queue + */ + def createQueueStream[T: ClassManifest]( + queue: Queue[RDD[T]], + oneAtATime: Boolean = true, + defaultRDD: RDD[T] = null + ): RDS[T] = { + val inputRDS = new QueueInputRDS(this, queue, oneAtATime, defaultRDD) + inputRDSs += inputRDS inputRDS } + + def createQueueStream[T: ClassManifest](iterator: Iterator[RDD[T]]): RDS[T] = { + val queue = new Queue[RDD[T]] + val inputRDS = createQueueStream(queue, true, null) + queue ++= iterator + inputRDS + } + + /** + * This function registers a RDS as an output stream that will be + * computed every interval. + */ def registerOutputStream (outputRDS: RDS[_]) { outputRDSs += outputRDS } - - def setTempDir(dir: String) { - tempDirRoot = dir + + /** + * This function verify whether the stream computation is eligible to be executed. + */ + def verify() { + if (batchDuration == null) { + throw new Exception("Batch duration has not been set") + } + if (batchDuration < Milliseconds(100)) { + logWarning("Batch duration of " + batchDuration + " is very low") + } + if (inputRDSs.size == 0) { + throw new Exception("No input RDSes created, so nothing to take input from") + } + if (outputRDSs.size == 0) { + throw new Exception("No output RDSes registered, so nothing to execute") + } + } - - def run () { - val ctxt = this - val actor = actorSystem.actorOf( - Props(new Scheduler(ctxt, inputRDSs.toArray, outputRDSs.toArray)), - name = "SparkStreamScheduler") - logInfo("Registered actor") - actorSystem.awaitTermination() + + /** + * This function starts the execution of the streams. + */ + def start() { + verify() + scheduler = new Scheduler(this, inputRDSs.toArray, outputRDSs.toArray) + scheduler.start() + } + + /** + * This function starts the execution of the streams. + */ + def stop() { + try { + scheduler.stop() + sc.stop() + } catch { + case e: Exception => logWarning("Error while stopping", e) + } + + logInfo("SparkStreamContext stopped") } } + object SparkStreamContext { implicit def rdsToPairRdsFunctions [K: ClassManifest, V: ClassManifest] (rds: RDS[(K,V)]) = new PairRDSFunctions (rds) diff --git a/streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala b/streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala deleted file mode 100644 index 7e23b7bb82..0000000000 --- a/streaming/src/main/scala/spark/streaming/TestInputBlockTracker.scala +++ /dev/null @@ -1,42 +0,0 @@ -package spark.streaming -import spark.Logging -import scala.collection.mutable.{ArrayBuffer, HashMap} - -object TestInputBlockTracker extends Logging { - initLogging() - val allBlockIds = new HashMap[Time, ArrayBuffer[String]]() - - def addBlocks(intervalEndTime: Time, reference: AnyRef) { - allBlockIds.getOrElseUpdate(intervalEndTime, new ArrayBuffer[String]()) ++= reference.asInstanceOf[Array[String]] - } - - def setEndTime(intervalEndTime: Time) { - try { - val endTime = System.currentTimeMillis - allBlockIds.get(intervalEndTime) match { - case Some(blockIds) => { - val numBlocks = blockIds.size - var totalDelay = 0d - blockIds.foreach(blockId => { - val inputTime = getInputTime(blockId) - val delay = (endTime - inputTime) / 1000.0 - totalDelay += delay - logInfo("End-to-end delay for block " + blockId + " is " + delay + " s") - }) - logInfo("Average end-to-end delay for time " + intervalEndTime + " is " + (totalDelay / numBlocks) + " s") - allBlockIds -= intervalEndTime - } - case None => throw new Exception("Unexpected") - } - } catch { - case e: Exception => logError(e.toString) - } - } - - def getInputTime(blockId: String): Long = { - val parts = blockId.split("-") - /*logInfo(blockId + " -> " + parts(4)) */ - parts(4).toLong - } -} - diff --git a/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala b/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala index a7a5635aa5..bbf2c7bf5e 100644 --- a/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala +++ b/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala @@ -34,8 +34,8 @@ extends Thread with Logging { class DataHandler( inputName: String, - longIntervalDuration: LongTime, - shortIntervalDuration: LongTime, + longIntervalDuration: Time, + shortIntervalDuration: Time, blockManager: BlockManager ) extends Logging { @@ -61,8 +61,8 @@ extends Thread with Logging { initLogging() - val shortIntervalDurationMillis = shortIntervalDuration.asInstanceOf[LongTime].milliseconds - val longIntervalDurationMillis = longIntervalDuration.asInstanceOf[LongTime].milliseconds + val shortIntervalDurationMillis = shortIntervalDuration.toLong + val longIntervalDurationMillis = longIntervalDuration.toLong var currentBlock: Block = null var currentBucket: Bucket = null @@ -101,7 +101,7 @@ extends Thread with Logging { def updateCurrentBlock() { /*logInfo("Updating current block")*/ - val currentTime: LongTime = LongTime(System.currentTimeMillis) + val currentTime = Time(System.currentTimeMillis) val shortInterval = getShortInterval(currentTime) val longInterval = getLongInterval(shortInterval) @@ -318,12 +318,12 @@ extends Thread with Logging { val inputName = streamDetails.name val intervalDurationMillis = streamDetails.duration - val intervalDuration = LongTime(intervalDurationMillis) + val intervalDuration = Time(intervalDurationMillis) val dataHandler = new DataHandler( inputName, intervalDuration, - LongTime(TestStreamReceiver3.SHORT_INTERVAL_MILLIS), + Time(TestStreamReceiver3.SHORT_INTERVAL_MILLIS), blockManager) val connListener = new ConnectionListener(TestStreamReceiver3.PORT, dataHandler) @@ -382,7 +382,7 @@ extends Thread with Logging { def waitFor(time: Time) { val currentTimeMillis = System.currentTimeMillis - val targetTimeMillis = time.asInstanceOf[LongTime].milliseconds + val targetTimeMillis = time.milliseconds if (currentTimeMillis < targetTimeMillis) { val sleepTime = (targetTimeMillis - currentTimeMillis) Thread.sleep(sleepTime + 1) @@ -392,7 +392,7 @@ extends Thread with Logging { def notifyScheduler(interval: Interval, blockIds: Array[String]) { try { sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) - val time = interval.endTime.asInstanceOf[LongTime] + val time = interval.endTime val delay = (System.currentTimeMillis - time.milliseconds) / 1000.0 logInfo("Pushing delay for " + time + " is " + delay + " s") } catch { diff --git a/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala b/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala index 2c3f5d1b9d..a2babb23f4 100644 --- a/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala +++ b/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala @@ -24,8 +24,8 @@ extends Thread with Logging { class DataHandler( inputName: String, - longIntervalDuration: LongTime, - shortIntervalDuration: LongTime, + longIntervalDuration: Time, + shortIntervalDuration: Time, blockManager: BlockManager ) extends Logging { @@ -50,8 +50,8 @@ extends Thread with Logging { val syncOnLastShortInterval = true - val shortIntervalDurationMillis = shortIntervalDuration.asInstanceOf[LongTime].milliseconds - val longIntervalDurationMillis = longIntervalDuration.asInstanceOf[LongTime].milliseconds + val shortIntervalDurationMillis = shortIntervalDuration.milliseconds + val longIntervalDurationMillis = longIntervalDuration.milliseconds val buffer = ByteBuffer.allocateDirect(100 * 1024 * 1024) var currentShortInterval = Interval.currentInterval(shortIntervalDuration) @@ -145,7 +145,7 @@ extends Thread with Logging { if (syncOnLastShortInterval) { bucket += newBlock } - logDebug("Created " + newBlock + " with " + newBuffer.remaining + " bytes, creation delay is " + (System.currentTimeMillis - currentShortInterval.endTime.asInstanceOf[LongTime].milliseconds) / 1000.0 + " s" ) + logDebug("Created " + newBlock + " with " + newBuffer.remaining + " bytes, creation delay is " + (System.currentTimeMillis - currentShortInterval.endTime.milliseconds) / 1000.0 + " s" ) blockPushingExecutor.execute(new Runnable() { def run() { pushAndNotifyBlock(newBlock) } }) } } @@ -175,7 +175,7 @@ extends Thread with Logging { try{ if (blockManager != null) { val startTime = System.currentTimeMillis - logInfo(block + " put start delay is " + (startTime - block.shortInterval.endTime.asInstanceOf[LongTime].milliseconds) + " ms") + logInfo(block + " put start delay is " + (startTime - block.shortInterval.endTime.milliseconds) + " ms") /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY)*/ /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_2)*/ blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.MEMORY_ONLY_2) @@ -343,7 +343,7 @@ extends Thread with Logging { def waitFor(time: Time) { val currentTimeMillis = System.currentTimeMillis - val targetTimeMillis = time.asInstanceOf[LongTime].milliseconds + val targetTimeMillis = time.milliseconds if (currentTimeMillis < targetTimeMillis) { val sleepTime = (targetTimeMillis - currentTimeMillis) Thread.sleep(sleepTime + 1) @@ -353,7 +353,7 @@ extends Thread with Logging { def notifyScheduler(interval: Interval, blockIds: Array[String]) { try { sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) - val time = interval.endTime.asInstanceOf[LongTime] + val time = interval.endTime val delay = (System.currentTimeMillis - time.milliseconds) logInfo("Notification delay for " + time + " is " + delay + " ms") } catch { diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index b932fe9258..c4573137ae 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,19 +1,34 @@ package spark.streaming -abstract case class Time { +class Time(private var millis: Long) { - // basic operations that must be overridden - def copy(): Time - def zero: Time - def < (that: Time): Boolean - def += (that: Time): Time - def -= (that: Time): Time - def floor(that: Time): Time - def isMultipleOf(that: Time): Boolean + def copy() = new Time(this.millis) + + def zero = Time.zero + + def < (that: Time): Boolean = + (this.millis < that.millis) + + def <= (that: Time) = (this < that || this == that) + + def > (that: Time) = !(this <= that) + + def >= (that: Time) = !(this < that) + + def += (that: Time): Time = { + this.millis += that.millis + this + } + + def -= (that: Time): Time = { + this.millis -= that.millis + this + } - // derived operations composed of basic operations def + (that: Time) = this.copy() += that + def - (that: Time) = this.copy() -= that + def * (times: Int) = { var count = 0 var result = this.copy() @@ -23,63 +38,44 @@ abstract case class Time { } result } - def <= (that: Time) = (this < that || this == that) - def > (that: Time) = !(this <= that) - def >= (that: Time) = !(this < that) - def isZero = (this == zero) - def toFormattedString = toString + + def floor(that: Time): Time = { + val t = that.millis + val m = math.floor(this.millis / t).toLong + new Time(m * t) + } + + def isMultipleOf(that: Time): Boolean = + (this.millis % that.millis == 0) + + def isZero = (this.millis == 0) + + override def toString() = (millis.toString + " ms") + + def toFormattedString() = millis.toString + + def milliseconds() = millis } object Time { - def Milliseconds(milliseconds: Long) = LongTime(milliseconds) - - def zero = LongTime(0) -} - -case class LongTime(var milliseconds: Long) extends Time { + val zero = new Time(0) - override def copy() = LongTime(this.milliseconds) - - override def zero = LongTime(0) - - override def < (that: Time): Boolean = - (this.milliseconds < that.asInstanceOf[LongTime].milliseconds) - - override def += (that: Time): Time = { - this.milliseconds += that.asInstanceOf[LongTime].milliseconds - this - } + def apply(milliseconds: Long) = new Time(milliseconds) - override def -= (that: Time): Time = { - this.milliseconds -= that.asInstanceOf[LongTime].milliseconds - this - } - - override def floor(that: Time): Time = { - val t = that.asInstanceOf[LongTime].milliseconds - val m = this.milliseconds / t - LongTime(m.toLong * t) - } - - override def isMultipleOf(that: Time): Boolean = - (this.milliseconds % that.asInstanceOf[LongTime].milliseconds == 0) - - override def isZero = (this.milliseconds == 0) - - override def toString = (milliseconds.toString + "ms") - - override def toFormattedString = milliseconds.toString + implicit def toTime(long: Long) = Time(long) + + implicit def toLong(time: Time) = time.milliseconds } object Milliseconds { - def apply(milliseconds: Long) = LongTime(milliseconds) + def apply(milliseconds: Long) = Time(milliseconds) } object Seconds { - def apply(seconds: Long) = LongTime(seconds * 1000) + def apply(seconds: Long) = Time(seconds * 1000) } object Minutes { - def apply(minutes: Long) = LongTime(minutes * 60000) + def apply(minutes: Long) = Time(minutes * 60000) } diff --git a/streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala deleted file mode 100644 index 2ca72da79f..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/DumbTopKWordCount2_Special.scala +++ /dev/null @@ -1,138 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.JavaConversions.mapAsScalaMap -import scala.collection.mutable.Queue - -import java.lang.{Long => JLong} - -object DumbTopKWordCount2_Special { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - moreWarmup(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray - ) - - - def add(v1: JLong, v2: JLong) = (v1 + v2) - def subtract(v1: JLong, v2: JLong) = (v1 - v2) - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { - val map = new java.util.HashMap[String, JLong] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.get(w) - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - map.toIterator - } - - - val wordCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) - wordCounts.persist(StorageLevel.MEMORY_ONLY) - val windowedCounts = wordCounts.window(Seconds(10), Seconds(1)).reduceByKey(_ + _, 10) - - def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { - val taken = new Array[(String, JLong)](k) - - var i = 0 - var len = 0 - var done = false - var value: (String, JLong) = null - var swap: (String, JLong) = null - var count = 0 - - while(data.hasNext) { - value = data.next - count += 1 - /*println("count = " + count)*/ - if (len == 0) { - taken(0) = value - len = 1 - } else if (len < k || value._2 > taken(len - 1)._2) { - if (len < k) { - len += 1 - } - taken(len - 1) = value - i = len - 1 - while(i > 0 && taken(i - 1)._2 < taken(i)._2) { - swap = taken(i) - taken(i) = taken(i-1) - taken(i - 1) = swap - i -= 1 - } - } - } - println("Took " + len + " out of " + count + " items") - return taken.toIterator - } - - val k = 10 - val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) - partialTopKWindowedCounts.foreachRDD(rdd => { - val collectedCounts = rdd.collect - println("Collected " + collectedCounts.size + " items") - topK(collectedCounts.toIterator, k).foreach(println) - }) - - /* - windowedCounts.filter(_ == null).foreachRDD(rdd => { - val count = rdd.count - println("# of nulls = " + count) - })*/ - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala deleted file mode 100644 index 34e7edfda9..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/DumbWordCount2_Special.scala +++ /dev/null @@ -1,92 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap -import scala.collection.mutable.Queue -import scala.collection.JavaConversions.mapAsScalaMap - -import java.lang.{Long => JLong} -import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} - -object DumbWordCount2_Special { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - moreWarmup(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray - ) - - def add(v1: JLong, v2: JLong) = (v1 + v2) - def subtract(v1: JLong, v2: JLong) = (v1 - v2) - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { - val map = new java.util.HashMap[String, JLong] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.get(w) - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - - map.toIterator - } - - val wordCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) - wordCounts.persist(StorageLevel.MEMORY_ONLY) - val windowedCounts = wordCounts.window(Seconds(10), Seconds(1)).reduceByKey(_ + _, 10) - windowedCounts.foreachRDD(_.collect) - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala new file mode 100644 index 0000000000..d56fdcdf29 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala @@ -0,0 +1,41 @@ +package spark.streaming.examples + +import spark.RDD +import spark.streaming.SparkStreamContext +import spark.streaming.SparkStreamContext._ +import spark.streaming.Seconds + +import scala.collection.mutable.SynchronizedQueue + +object ExampleOne { + + def main(args: Array[String]) { + if (args.length < 1) { + System.err.println("Usage: ExampleOne ") + System.exit(1) + } + + // Create the context and set the batch size + val ssc = new SparkStreamContext(args(0), "ExampleOne") + ssc.setBatchDuration(Seconds(1)) + + // Create the queue through which RDDs can be pushed to + // a QueueInputRDS + val rddQueue = new SynchronizedQueue[RDD[Int]]() + + // Create the QueueInputRDs and use it do some processing + val inputStream = ssc.createQueueStream(rddQueue) + val mappedStream = inputStream.map(x => (x % 10, 1)) + val reducedStream = mappedStream.reduceByKey(_ + _) + reducedStream.print() + ssc.start() + + // Create and push some RDDs into + for (i <- 1 to 30) { + rddQueue += ssc.sc.makeRDD(1 to 1000, 10) + Thread.sleep(1000) + } + ssc.stop() + System.exit(0) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala new file mode 100644 index 0000000000..4b8f6d609d --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala @@ -0,0 +1,47 @@ +package spark.streaming.examples + +import spark.streaming.SparkStreamContext +import spark.streaming.SparkStreamContext._ +import spark.streaming.Seconds +import org.apache.hadoop.fs.Path +import org.apache.hadoop.conf.Configuration + + +object ExampleTwo { + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println("Usage: ExampleOne ") + System.exit(1) + } + + // Create the context and set the batch size + val ssc = new SparkStreamContext(args(0), "ExampleTwo") + ssc.setBatchDuration(Seconds(2)) + + // Create the new directory + val directory = new Path(args(1)) + val fs = directory.getFileSystem(new Configuration()) + if (fs.exists(directory)) throw new Exception("This directory already exists") + fs.mkdirs(directory) + + // Create the FileInputRDS on the directory and use the + // stream to count words in new files created + val inputRDS = ssc.createTextFileStream(directory.toString) + val wordsRDS = inputRDS.flatMap(_.split(" ")) + val wordCountsRDS = wordsRDS.map(x => (x, 1)).reduceByKey(_ + _) + wordCountsRDS.print + ssc.start() + + // Creating new files in the directory + val text = "This is a text file" + for (i <- 1 to 30) { + ssc.sc.parallelize((1 to (i * 10)).map(_ => text), 10) + .saveAsTextFile(new Path(directory, i.toString).toString) + Thread.sleep(1000) + } + Thread.sleep(5000) // Waiting for the file to be processed + ssc.stop() + fs.delete(directory) + System.exit(0) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepCount.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCount.scala deleted file mode 100644 index ec3e70f258..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/GrepCount.scala +++ /dev/null @@ -1,39 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkContext -import spark.storage.StorageLevel - -object GrepCount { - var inputFile : String = null - var HDFS : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 4) { - println ("Usage: GrepCount ") - System.exit(1) - } - - HDFS = args(1) - inputFile = HDFS + args(2) - idealPartitions = args(3).toInt - println ("Input file: " + inputFile) - - val ssc = new SparkStreamContext(args(0), "GrepCount") - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - //sentences.print - val matching = sentences.filter(_.contains("light")) - matching.foreachRDD(rdd => println(rdd.count)) - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala deleted file mode 100644 index 27ecced1c0..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/GrepCount2.scala +++ /dev/null @@ -1,113 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkEnv -import spark.SparkContext -import spark.storage.StorageLevel -import spark.network.Message -import spark.network.ConnectionManagerId - -import java.nio.ByteBuffer - -object GrepCount2 { - - def startSparkEnvs(sc: SparkContext) { - - val dummy = sc.parallelize(0 to 1000, 100).persist(StorageLevel.DISK_AND_MEMORY) - sc.runJob(dummy, (_: Iterator[Int]) => {}) - - println("SparkEnvs started") - Thread.sleep(1000) - /*sc.runJob(sc.parallelize(0 to 1000, 100), (_: Iterator[Int]) => {})*/ - } - - def warmConnectionManagers(sc: SparkContext) { - val slaveConnManagerIds = sc.parallelize(0 to 100, 100).map( - i => SparkEnv.get.connectionManager.id).collect().distinct - println("\nSlave ConnectionManagerIds") - slaveConnManagerIds.foreach(println) - println - - Thread.sleep(1000) - val numSlaves = slaveConnManagerIds.size - val count = 3 - val size = 5 * 1024 * 1024 - val iterations = (500 * 1024 * 1024 / (numSlaves * size)).toInt - println("count = " + count + ", size = " + size + ", iterations = " + iterations) - - (0 until count).foreach(i => { - val resultStrs = sc.parallelize(0 until numSlaves, numSlaves).map(i => { - val connManager = SparkEnv.get.connectionManager - val thisConnManagerId = connManager.id - /*connManager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => { - println("Received [" + msg + "] from [" + id + "]") - None - })*/ - - - val buffer = ByteBuffer.allocate(size).put(Array.tabulate[Byte](size)(x => x.toByte)) - buffer.flip - - val startTime = System.currentTimeMillis - val futures = (0 until iterations).map(i => { - slaveConnManagerIds.filter(_ != thisConnManagerId).map(slaveConnManagerId => { - val bufferMessage = Message.createBufferMessage(buffer.duplicate) - println("Sending [" + bufferMessage + "] to [" + slaveConnManagerId + "]") - connManager.sendMessageReliably(slaveConnManagerId, bufferMessage) - }) - }).flatMap(x => x) - val results = futures.map(f => f()) - val finishTime = System.currentTimeMillis - - - val mb = size * results.size / 1024.0 / 1024.0 - val ms = finishTime - startTime - - val resultStr = "Sent " + mb + " MB in " + ms + " ms at " + (mb / ms * 1000.0) + " MB/s" - println(resultStr) - System.gc() - resultStr - }).collect() - - println("---------------------") - println("Run " + i) - resultStrs.foreach(println) - println("---------------------") - }) - } - - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: GrepCount2 <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "GrepCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - /*startSparkEnvs(ssc.sc)*/ - warmConnectionManagers(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-"+i, 500)).toArray - ) - - val matching = sentences.filter(_.contains("light")) - matching.foreachRDD(rdd => println(rdd.count)) - - ssc.run - } -} - - - - diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala b/streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala deleted file mode 100644 index f9674136fe..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/GrepCountApprox.scala +++ /dev/null @@ -1,54 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkContext -import spark.storage.StorageLevel - -object GrepCountApprox { - var inputFile : String = null - var hdfs : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 5) { - println ("Usage: GrepCountApprox ") - System.exit(1) - } - - hdfs = args(1) - inputFile = hdfs + args(2) - idealPartitions = args(3).toInt - val timeout = args(4).toLong - println ("Input file: " + inputFile) - - val ssc = new SparkStreamContext(args(0), "GrepCount") - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - ssc.setTempDir(hdfs + "/tmp") - - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - //sentences.print - val matching = sentences.filter(_.contains("light")) - var i = 0 - val startTime = System.currentTimeMillis - matching.foreachRDD { rdd => - val myNum = i - val result = rdd.countApprox(timeout) - val initialTime = (System.currentTimeMillis - startTime) / 1000.0 - printf("APPROX\t%.2f\t%d\tinitial\t%.1f\t%.1f\n", initialTime, myNum, result.initialValue.mean, - result.initialValue.high - result.initialValue.low) - result.onComplete { r => - val finalTime = (System.currentTimeMillis - startTime) / 1000.0 - printf("APPROX\t%.2f\t%d\tfinal\t%.1f\t0.0\t%.1f\n", finalTime, myNum, r.mean, finalTime - initialTime) - } - i += 1 - } - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala deleted file mode 100644 index a75ccd3a56..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount.scala +++ /dev/null @@ -1,30 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -object SimpleWordCount { - - def main (args: Array[String]) { - - if (args.length < 1) { - println ("Usage: SparkStreamContext []") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount") - if (args.length > 1) { - ssc.setTempDir(args(1)) - } - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) - /*sentences.print*/ - - val words = sentences.flatMap(_.split(" ")) - - val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 1) - counts.print - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala deleted file mode 100644 index 9672e64b13..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2.scala +++ /dev/null @@ -1,51 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import scala.util.Sorting - -object SimpleWordCount2 { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SimpleWordCount2 <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "SimpleWordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - moreWarmup(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray - ) - - - val words = sentences.flatMap(_.split(" ")) - - val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 10) - counts.foreachRDD(_.collect()) - /*words.foreachRDD(_.countByValue())*/ - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala deleted file mode 100644 index 503033a8e5..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/SimpleWordCount2_Special.scala +++ /dev/null @@ -1,83 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import scala.collection.JavaConversions.mapAsScalaMap -import scala.util.Sorting -import java.lang.{Long => JLong} - -object SimpleWordCount2_Special { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SimpleWordCount2 <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "SimpleWordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - moreWarmup(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 400)).toArray - ) - - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { - val map = new java.util.HashMap[String, JLong] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.get(w) - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - map.toIterator - } - - - /*val words = sentences.flatMap(_.split(" "))*/ - /*val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 10)*/ - val counts = sentences.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) - counts.foreachRDD(_.collect()) - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala b/streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala deleted file mode 100644 index 031e989c87..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/TopContentCount.scala +++ /dev/null @@ -1,97 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting - -object TopContentCount { - - case class Event(val country: String, val content: String) - - object Event { - def create(string: String): Event = { - val parts = string.split(":") - new Event(parts(0), parts(1)) - } - } - - def main(args: Array[String]) { - - if (args.length < 2) { - println ("Usage: GrepCount2 <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "TopContentCount") - val sc = ssc.sc - val dummy = sc.parallelize(0 to 1000, 100).persist(StorageLevel.DISK_AND_MEMORY) - sc.runJob(dummy, (_: Iterator[Int]) => {}) - - - val numEventStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - val eventStrings = new UnifiedRDS( - (1 to numEventStreams).map(i => ssc.readTestStream("Events-" + i, 1000)).toArray - ) - - def parse(string: String) = { - val parts = string.split(":") - (parts(0), parts(1)) - } - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - val events = eventStrings.map(x => parse(x)) - /*events.print*/ - - val parallelism = 8 - val counts_per_content_per_country = events - .map(x => (x, 1)) - .reduceByKey(_ + _) - /*.reduceByKeyAndWindow(add _, subtract _, Seconds(5), Seconds(1), parallelism)*/ - /*counts_per_content_per_country.print*/ - - /* - counts_per_content_per_country.persist( - StorageLevel.MEMORY_ONLY_DESER, - StorageLevel.MEMORY_ONLY_DESER_2, - Seconds(1) - )*/ - - val counts_per_country = counts_per_content_per_country - .map(x => (x._1._1, (x._1._2, x._2))) - .groupByKey() - counts_per_country.print - - - def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { - implicit val countOrdering = new Ordering[(String, Int)] { - override def compare(count1: (String, Int), count2: (String, Int)): Int = { - count2._2 - count1._2 - } - } - val array = data.toArray - Sorting.quickSort(array) - val taken = array.take(k) - taken - } - - val k = 10 - val topKContents_per_country = counts_per_country - .map(x => (x._1, topK(x._2, k))) - .map(x => (x._1, x._2.map(_.toString).reduceLeft(_ + ", " + _))) - - topKContents_per_country.print - - ssc.run - } -} - - - diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala deleted file mode 100644 index 679ed0a7ef..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2.scala +++ /dev/null @@ -1,103 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting - -object TopKWordCount2 { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - moreWarmup(ssc.sc) - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray - ) - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) - - def topK(data: Iterator[(String, Int)], k: Int): Iterator[(String, Int)] = { - val taken = new Array[(String, Int)](k) - - var i = 0 - var len = 0 - var done = false - var value: (String, Int) = null - var swap: (String, Int) = null - var count = 0 - - while(data.hasNext) { - value = data.next - count += 1 - println("count = " + count) - if (len == 0) { - taken(0) = value - len = 1 - } else if (len < k || value._2 > taken(len - 1)._2) { - if (len < k) { - len += 1 - } - taken(len - 1) = value - i = len - 1 - while(i > 0 && taken(i - 1)._2 < taken(i)._2) { - swap = taken(i) - taken(i) = taken(i-1) - taken(i - 1) = swap - i -= 1 - } - } - } - println("Took " + len + " out of " + count + " items") - return taken.toIterator - } - - val k = 10 - val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) - partialTopKWindowedCounts.foreachRDD(rdd => { - val collectedCounts = rdd.collect - println("Collected " + collectedCounts.size + " items") - topK(collectedCounts.toIterator, k).foreach(println) - }) - - /* - windowedCounts.filter(_ == null).foreachRDD(rdd => { - val count = rdd.count - println("# of nulls = " + count) - })*/ - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala deleted file mode 100644 index c873fbd0f0..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCount2_Special.scala +++ /dev/null @@ -1,142 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.JavaConversions.mapAsScalaMap -import scala.collection.mutable.Queue - -import java.lang.{Long => JLong} - -object TopKWordCount2_Special { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "TopKWordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - /*moreWarmup(ssc.sc)*/ - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 500)).toArray - ) - - /*val words = sentences.flatMap(_.split(" "))*/ - - /*def add(v1: Int, v2: Int) = (v1 + v2) */ - /*def subtract(v1: Int, v2: Int) = (v1 - v2) */ - - def add(v1: JLong, v2: JLong) = (v1 + v2) - def subtract(v1: JLong, v2: JLong) = (v1 - v2) - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { - val map = new java.util.HashMap[String, JLong] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.get(w) - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - map.toIterator - } - - - val windowedCounts = sentences.mapPartitions(splitAndCountPartitions).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Milliseconds(500), 10) - /*windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1))*/ - windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY, Milliseconds(500)) - - def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { - val taken = new Array[(String, JLong)](k) - - var i = 0 - var len = 0 - var done = false - var value: (String, JLong) = null - var swap: (String, JLong) = null - var count = 0 - - while(data.hasNext) { - value = data.next - count += 1 - println("count = " + count) - if (len == 0) { - taken(0) = value - len = 1 - } else if (len < k || value._2 > taken(len - 1)._2) { - if (len < k) { - len += 1 - } - taken(len - 1) = value - i = len - 1 - while(i > 0 && taken(i - 1)._2 < taken(i)._2) { - swap = taken(i) - taken(i) = taken(i-1) - taken(i - 1) = swap - i -= 1 - } - } - } - println("Took " + len + " out of " + count + " items") - return taken.toIterator - } - - val k = 50 - val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) - partialTopKWindowedCounts.foreachRDD(rdd => { - val collectedCounts = rdd.collect - println("Collected " + collectedCounts.size + " items") - topK(collectedCounts.toIterator, k).foreach(println) - }) - - /* - windowedCounts.filter(_ == null).foreachRDD(rdd => { - val count = rdd.count - println("# of nulls = " + count) - })*/ - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala deleted file mode 100644 index fb5508ffcc..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala +++ /dev/null @@ -1,62 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkContext -import spark.storage.StorageLevel - -object WordCount { - var inputFile : String = null - var HDFS : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 4) { - println ("Usage: WordCount ") - System.exit(1) - } - - HDFS = args(1) - inputFile = HDFS + args(2) - idealPartitions = args(3).toInt - println ("Input file: " + inputFile) - - val ssc = new SparkStreamContext(args(0), "WordCountWindow") - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) - //sentences.print - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(2000), - // System.getProperty("spark.default.parallelism", "1").toInt) - //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.DISK_AND_MEMORY_DESER_2, Seconds(5)) - //windowedCounts.print - - val parallelism = System.getProperty("spark.default.parallelism", "1").toInt - - //val localCounts = words.map(x => (x, 1)).reduceByKey(add _, parallelism) - //localCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(6)) - //val windowedCounts = localCounts.window(Seconds(30), Seconds(2)).reduceByKey(_ + _) - - val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(2), - parallelism) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(10)) - - //windowedCounts.print - windowedCounts.register - //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => print(x+ " "))) - //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => x)) - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount1.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount1.scala deleted file mode 100644 index 42d985920a..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount1.scala +++ /dev/null @@ -1,46 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkContext -import spark.storage.StorageLevel - -object WordCount1 { - var inputFile : String = null - var HDFS : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 4) { - println ("Usage: WordCount ") - System.exit(1) - } - - HDFS = args(1) - inputFile = HDFS + args(2) - idealPartitions = args(3).toInt - println ("Input file: " + inputFile) - - val ssc = new SparkStreamContext(args(0), "WordCountWindow") - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - //sentences.print - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) - windowedCounts.foreachRDD(_.collect) - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala deleted file mode 100644 index 9168a2fe2f..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ /dev/null @@ -1,55 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting - -object WordCount2 { - - def moreWarmup(sc: SparkContext) { - (0 until 20).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(_ % 100).map(_.toString) - .map(x => (x, 1)).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - if (args.length > 2) { - ssc.setTempDir(args(2)) - } - - GrepCount2.warmConnectionManagers(ssc.sc) - /*moreWarmup(ssc.sc)*/ - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 1000)).toArray - ) - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 6) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(1)) - windowedCounts.foreachRDD(_.collect) - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala deleted file mode 100644 index 1920915af7..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2_Special.scala +++ /dev/null @@ -1,94 +0,0 @@ -package spark.streaming - -import spark.SparkContext -import SparkContext._ -import SparkStreamContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap -import scala.collection.mutable.Queue -import scala.collection.JavaConversions.mapAsScalaMap - -import java.lang.{Long => JLong} -import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} - - -object WordCount2_ExtraFunctions { - - def add(v1: JLong, v2: JLong) = (v1 + v2) - - def subtract(v1: JLong, v2: JLong) = (v1 - v2) - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, JLong)] = { - val map = new java.util.HashMap[String, JLong] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.get(w) - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - map.toIterator - } -} - -object WordCount2_Special { - - def moreWarmup(sc: SparkContext) { - (0 until 40).foreach {i => - sc.parallelize(1 to 20000000, 1000) - .map(_ % 1331).map(_.toString) - .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions).reduceByKey(_ + _, 10) - .collect() - } - } - - def main (args: Array[String]) { - - if (args.length < 2) { - println ("Usage: SparkStreamContext <# sentence streams>") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount2") - - val numSentenceStreams = if (args.length > 1) args(1).toInt else 1 - - GrepCount2.warmConnectionManagers(ssc.sc) - /*moreWarmup(ssc.sc)*/ - - val sentences = new UnifiedRDS( - (1 to numSentenceStreams).map(i => ssc.readTestStream("Sentences-" + i, 500)).toArray - ) - - val windowedCounts = sentences - .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions) - .reduceByKeyAndWindow(WordCount2_ExtraFunctions.add _, WordCount2_ExtraFunctions.subtract _, Seconds(10), Milliseconds(500), 10) - windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY, Milliseconds(500)) - windowedCounts.foreachRDD(_.collect) - - ssc.run - } -} - diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount3.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount3.scala deleted file mode 100644 index 018c19a509..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount3.scala +++ /dev/null @@ -1,49 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -object WordCount3 { - - def main (args: Array[String]) { - - if (args.length < 1) { - println ("Usage: SparkStreamContext []") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCount") - if (args.length > 1) { - ssc.setTempDir(args(1)) - } - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - /*sentences.print*/ - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - /*val windowedCounts = words.map(x => (x, 1)).window(Seconds(5), Seconds(1)).reduceByKey(add _, 1)*/ - val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(5), Seconds(1), 1) - /*windowedCounts.print */ - - def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { - implicit val countOrdering = new Ordering[(String, Int)] { - override def compare(count1: (String, Int), count2: (String, Int)): Int = { - count2._2 - count1._2 - } - } - val array = data.toArray - Sorting.quickSort(array) - array.take(k) - } - - val k = 10 - val topKWindowedCounts = windowedCounts.glom.flatMap(topK(_, k)).collect.flatMap(topK(_, k)) - topKWindowedCounts.print - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala deleted file mode 100644 index 82b9fa781d..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountEc2.scala +++ /dev/null @@ -1,41 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ -import spark.SparkContext - -object WordCountEc2 { - var inputFile : String = null - var HDFS : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 4) { - println ("Usage: SparkStreamContext ") - System.exit(1) - } - - HDFS = args(1) - inputFile = HDFS + args(2) - idealPartitions = args(3).toInt - println ("Input file: " + inputFile) - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - - val ssc = new SparkStreamContext(args(0), "Test") - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - /*sentences.foreach(println)*/ - - val words = sentences.flatMap(_.split(" ")) - /*words.foreach(println)*/ - - val counts = words.map(x => (x, 1)).reduceByKey(_ + _) - /*counts.foreach(println)*/ - - counts.foreachRDD(rdd => rdd.collect.foreach(x => x)) - /*counts.register*/ - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala deleted file mode 100644 index 114dd144f1..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountTrivialWindow.scala +++ /dev/null @@ -1,51 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -object WordCountTrivialWindow { - - def main (args: Array[String]) { - - if (args.length < 1) { - println ("Usage: SparkStreamContext []") - System.exit(1) - } - - val ssc = new SparkStreamContext(args(0), "WordCountTrivialWindow") - if (args.length > 1) { - ssc.setTempDir(args(1)) - } - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 1000) - /*sentences.print*/ - - val words = sentences.flatMap(_.split(" ")) - - /*val counts = words.map(x => (x, 1)).reduceByKey(_ + _, 1)*/ - /*counts.print*/ - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - - val windowedCounts = words.map(x => (x, 1)).window(Seconds(5), Seconds(1)).reduceByKey(add _, 1) - /*windowedCounts.print */ - - def topK(data: Seq[(String, Int)], k: Int): Array[(String, Int)] = { - implicit val countOrdering = new Ordering[(String, Int)] { - override def compare(count1: (String, Int), count2: (String, Int)): Int = { - count2._2 - count1._2 - } - } - val array = data.toArray - Sorting.quickSort(array) - array.take(k) - } - - val k = 10 - val topKWindowedCounts = windowedCounts.glom.flatMap(topK(_, k)).collect.flatMap(topK(_, k)) - topKWindowedCounts.print - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordMax.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax.scala deleted file mode 100644 index fbfc48030f..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordMax.scala +++ /dev/null @@ -1,64 +0,0 @@ -package spark.streaming - -import SparkStreamContext._ - -import scala.util.Sorting - -import spark.SparkContext -import spark.storage.StorageLevel - -object WordMax { - var inputFile : String = null - var HDFS : String = null - var idealPartitions : Int = 0 - - def main (args: Array[String]) { - - if (args.length != 4) { - println ("Usage: WordCount ") - System.exit(1) - } - - HDFS = args(1) - inputFile = HDFS + args(2) - idealPartitions = args(3).toInt - println ("Input file: " + inputFile) - - val ssc = new SparkStreamContext(args(0), "WordCountWindow") - - SparkContext.idealPartitions = idealPartitions - SparkContext.inputFile = inputFile - - val sentences = ssc.readNetworkStream[String]("Sentences", Array("localhost:55119"), 2000) - //sentences.print - - val words = sentences.flatMap(_.split(" ")) - - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - def max(v1: Int, v2: Int) = (if (v1 > v2) v1 else v2) - - //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(2000), - // System.getProperty("spark.default.parallelism", "1").toInt) - //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.DISK_AND_MEMORY_DESER_2, Seconds(5)) - //windowedCounts.print - - val parallelism = System.getProperty("spark.default.parallelism", "1").toInt - - val localCounts = words.map(x => (x, 1)).reduceByKey(add _, parallelism) - //localCounts.persist(StorageLevel.MEMORY_ONLY_DESER) - localCounts.persist(StorageLevel.MEMORY_ONLY_DESER_2) - val windowedCounts = localCounts.window(Seconds(30), Seconds(2)).reduceByKey(max _) - - //val windowedCounts = words.map(x => (x, 1)).reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(2), - // parallelism) - //windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(6)) - - //windowedCounts.print - windowedCounts.register - //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => print(x+ " "))) - //windowedCounts.foreachRDD(rdd => rdd.collect.foreach(x => x)) - - ssc.run - } -} diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala new file mode 100644 index 0000000000..6125bb82eb --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -0,0 +1,52 @@ +package spark.streaming.util + +class RecurringTimer(period: Long, callback: (Long) => Unit) { + + val minPollTime = 25L + + val pollTime = { + if (period / 10.0 > minPollTime) { + (period / 10.0).toLong + } else { + minPollTime + } + } + + val thread = new Thread() { + override def run() { loop } + } + + var nextTime = 0L + + def start(): Long = { + nextTime = (math.floor(System.currentTimeMillis() / period) + 1).toLong * period + thread.start() + nextTime + } + + def stop() { + thread.interrupt() + } + + def loop() { + try { + while (true) { + val beforeSleepTime = System.currentTimeMillis() + while (beforeSleepTime >= nextTime) { + callback(nextTime) + nextTime += period + } + val sleepTime = if (nextTime - beforeSleepTime < 2 * pollTime) { + nextTime - beforeSleepTime + } else { + pollTime + } + Thread.sleep(sleepTime) + val afterSleepTime = System.currentTimeMillis() + } + } catch { + case e: InterruptedException => + } + } +} + diff --git a/streaming/src/main/scala/spark/streaming/utils/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala similarity index 98% rename from streaming/src/main/scala/spark/streaming/utils/SenderReceiverTest.scala rename to streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala index 6af270298a..9925b1d07c 100644 --- a/streaming/src/main/scala/spark/streaming/utils/SenderReceiverTest.scala +++ b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala @@ -1,4 +1,5 @@ -package spark.streaming +package spark.streaming.util + import java.net.{Socket, ServerSocket} import java.io.{ByteArrayOutputStream, DataOutputStream, DataInputStream, BufferedInputStream} diff --git a/streaming/src/main/scala/spark/streaming/utils/SentenceFileGenerator.scala b/streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala similarity index 98% rename from streaming/src/main/scala/spark/streaming/utils/SentenceFileGenerator.scala rename to streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala index 15858f59e3..94e8f7a849 100644 --- a/streaming/src/main/scala/spark/streaming/utils/SentenceFileGenerator.scala +++ b/streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.util import spark._ diff --git a/streaming/src/main/scala/spark/streaming/utils/ShuffleTest.scala b/streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala similarity index 94% rename from streaming/src/main/scala/spark/streaming/utils/ShuffleTest.scala rename to streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala index 32aa4144a0..60085f4f88 100644 --- a/streaming/src/main/scala/spark/streaming/utils/ShuffleTest.scala +++ b/streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala @@ -1,4 +1,5 @@ -package spark.streaming +package spark.streaming.util + import spark.SparkContext import SparkContext._ diff --git a/streaming/src/main/scala/spark/streaming/util/Utils.scala b/streaming/src/main/scala/spark/streaming/util/Utils.scala new file mode 100644 index 0000000000..86a729fb49 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/util/Utils.scala @@ -0,0 +1,9 @@ +package spark.streaming.util + +object Utils { + def time(func: => Unit): Long = { + val t = System.currentTimeMillis + func + (System.currentTimeMillis - t) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala b/streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala deleted file mode 100644 index bb32089ae2..0000000000 --- a/streaming/src/main/scala/spark/streaming/utils/SenGeneratorForPerformanceTest.scala +++ /dev/null @@ -1,78 +0,0 @@ -package spark.streaming - -import scala.util.Random -import scala.io.Source -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import java.net.InetSocketAddress - -/*import akka.actor.Actor._*/ -/*import akka.actor.ActorRef*/ - - -object SenGeneratorForPerformanceTest { - - def printUsage () { - println ("Usage: SentenceGenerator []") - System.exit(0) - } - - def main (args: Array[String]) { - if (args.length < 3) { - printUsage - } - - val inputManagerIP = args(0) - val inputManagerPort = args(1).toInt - val sentenceFile = args(2) - val sentencesPerSecond = { - if (args.length > 3) args(3).toInt - else 10 - } - - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n") - source.close () - - try { - /*val inputManager = remote.actorFor("InputReceiver-Sentences",*/ - /* inputManagerIP, inputManagerPort)*/ - val inputManager = select(Node(inputManagerIP, inputManagerPort), Symbol("InputReceiver-Sentences")) - val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 - val random = new Random () - println ("Sending " + sentencesPerSecond + " sentences per second to " + inputManagerIP + ":" + inputManagerPort) - var lastPrintTime = System.currentTimeMillis() - var count = 0 - - while (true) { - /*if (!inputManager.tryTell (lines (random.nextInt (lines.length))))*/ - /*throw new Exception ("disconnected")*/ -// inputManager ! lines (random.nextInt (lines.length)) - for (i <- 0 to sentencesPerSecond) inputManager ! lines (0) - println(System.currentTimeMillis / 1000 + " s") -/* count += 1 - - if (System.currentTimeMillis - lastPrintTime >= 1000) { - println (count + " sentences sent last second") - count = 0 - lastPrintTime = System.currentTimeMillis - } - - Thread.sleep (sleepBetweenSentences.toLong) -*/ - val currentMs = System.currentTimeMillis / 1000; - Thread.sleep ((currentMs * 1000 + 1000) - System.currentTimeMillis) - } - } catch { - case e: Exception => - /*Thread.sleep (1000)*/ - } - } -} - - - - diff --git a/streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala b/streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala deleted file mode 100644 index a9f124d2d7..0000000000 --- a/streaming/src/main/scala/spark/streaming/utils/SentenceGenerator.scala +++ /dev/null @@ -1,103 +0,0 @@ -package spark.streaming - -import scala.util.Random -import scala.io.Source -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import java.net.InetSocketAddress - - -object SentenceGenerator { - - def printUsage { - println ("Usage: SentenceGenerator []") - System.exit(0) - } - - def generateRandomSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { - val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 - val random = new Random () - - try { - var lastPrintTime = System.currentTimeMillis() - var count = 0 - while(true) { - streamReceiver ! lines(random.nextInt(lines.length)) - count += 1 - if (System.currentTimeMillis - lastPrintTime >= 1000) { - println (count + " sentences sent last second") - count = 0 - lastPrintTime = System.currentTimeMillis - } - Thread.sleep(sleepBetweenSentences.toLong) - } - } catch { - case e: Exception => - } - } - - def generateSameSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { - try { - val numSentences = if (sentencesPerSecond <= 0) { - lines.length - } else { - sentencesPerSecond - } - var nextSendingTime = System.currentTimeMillis() - val pingInterval = if (System.getenv("INTERVAL") != null) { - System.getenv("INTERVAL").toInt - } else { - 2000 - } - while(true) { - (0 until numSentences).foreach(i => { - streamReceiver ! lines(i % lines.length) - }) - println ("Sent " + numSentences + " sentences") - nextSendingTime += pingInterval - val sleepTime = nextSendingTime - System.currentTimeMillis - if (sleepTime > 0) { - println ("Sleeping for " + sleepTime + " ms") - Thread.sleep(sleepTime) - } - } - } catch { - case e: Exception => - } - } - - def main(args: Array[String]) { - if (args.length < 3) { - printUsage - } - - val generateRandomly = false - - val streamReceiverIP = args(0) - val streamReceiverPort = args(1).toInt - val sentenceFile = args(2) - val sentencesPerSecond = if (args.length > 3) args(3).toInt else 10 - val sentenceInputName = if (args.length > 4) args(4) else "Sentences" - - println("Sending " + sentencesPerSecond + " sentences per second to " + - streamReceiverIP + ":" + streamReceiverPort + "/NetworkStreamReceiver-" + sentenceInputName) - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n") - source.close () - - val streamReceiver = select( - Node(streamReceiverIP, streamReceiverPort), - Symbol("NetworkStreamReceiver-" + sentenceInputName)) - if (generateRandomly) { - generateRandomSentences(lines, sentencesPerSecond, streamReceiver) - } else { - generateSameSentences(lines, sentencesPerSecond, streamReceiver) - } - } -} - - - diff --git a/streaming/src/test/scala/spark/streaming/RDSSuite.scala b/streaming/src/test/scala/spark/streaming/RDSSuite.scala new file mode 100644 index 0000000000..f51ea50a5d --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/RDSSuite.scala @@ -0,0 +1,65 @@ +package spark.streaming + +import spark.RDD + +import org.scalatest.FunSuite +import org.scalatest.BeforeAndAfter + +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.SynchronizedQueue + +class RDSSuite extends FunSuite with BeforeAndAfter { + + var ssc: SparkStreamContext = null + val batchDurationMillis = 1000 + + def testOp[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: RDS[U] => RDS[V], + expectedOutput: Seq[Seq[V]]) = { + try { + ssc = new SparkStreamContext("local", "test") + ssc.setBatchDuration(Milliseconds(batchDurationMillis)) + + val inputStream = ssc.createQueueStream(input.map(ssc.sc.makeRDD(_, 2)).toIterator) + val outputStream = operation(inputStream) + val outputQueue = outputStream.toQueue + + ssc.start() + Thread.sleep(batchDurationMillis * input.size) + + val output = new ArrayBuffer[Seq[V]]() + while(outputQueue.size > 0) { + val rdd = outputQueue.take() + println("Collecting RDD " + rdd.id + ", " + rdd.getClass().getSimpleName() + ", " + rdd.splits.size) + output += (rdd.collect()) + } + assert(output.size === expectedOutput.size) + for (i <- 0 until output.size) { + assert(output(i).toList === expectedOutput(i).toList) + } + } finally { + ssc.stop() + } + } + + test("basic operations") { + val inputData = Array(1 to 4, 5 to 8, 9 to 12) + + // map + testOp(inputData, (r: RDS[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + + // flatMap + testOp(inputData, (r: RDS[Int]) => r.flatMap(x => Array(x, x * 2)), + inputData.map(_.flatMap(x => Array(x, x * 2))) + ) + } +} + +object RDSSuite { + def main(args: Array[String]) { + val r = new RDSSuite() + val inputData = Array(1 to 4, 5 to 8, 9 to 12) + r.testOp(inputData, (r: RDS[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + } +} \ No newline at end of file From ed897ac5e1d1dd2ce144b232cf7a73db2d6679f9 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 1 Aug 2012 22:28:54 -0700 Subject: [PATCH 005/226] Moved streaming files not immediately necessary to spark.streaming.util. --- streaming/src/main/scala/spark/streaming/Job.scala | 7 ++++--- .../spark/streaming/{ => util}/ConnectionHandler.scala | 2 +- .../scala/spark/streaming/{ => util}/TestGenerator.scala | 2 +- .../spark/streaming/{ => util}/TestGenerator2.scala | 2 +- .../spark/streaming/{ => util}/TestGenerator4.scala | 2 +- .../streaming/{ => util}/TestStreamCoordinator.scala | 3 ++- .../spark/streaming/{ => util}/TestStreamReceiver3.scala | 3 ++- .../spark/streaming/{ => util}/TestStreamReceiver4.scala | 3 ++- .../src/main/scala/spark/streaming/util/Utils.scala | 9 --------- 9 files changed, 14 insertions(+), 19 deletions(-) rename streaming/src/main/scala/spark/streaming/{ => util}/ConnectionHandler.scala (99%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestGenerator.scala (99%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestGenerator2.scala (99%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestGenerator4.scala (99%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestStreamCoordinator.scala (94%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestStreamReceiver3.scala (99%) rename streaming/src/main/scala/spark/streaming/{ => util}/TestStreamReceiver4.scala (99%) delete mode 100644 streaming/src/main/scala/spark/streaming/util/Utils.scala diff --git a/streaming/src/main/scala/spark/streaming/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala index 36958dafe1..2481a9a3ef 100644 --- a/streaming/src/main/scala/spark/streaming/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -1,11 +1,12 @@ package spark.streaming -import spark.streaming.util.Utils - class Job(val time: Time, func: () => _) { val id = Job.getNewId() def run(): Long = { - Utils.time { func() } + val startTime = System.currentTimeMillis + func() + val stopTime = System.currentTimeMillis + (startTime - stopTime) } override def toString = "streaming job " + id + " @ " + time diff --git a/streaming/src/main/scala/spark/streaming/ConnectionHandler.scala b/streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/ConnectionHandler.scala rename to streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala index a4f454632f..cde868a0c9 100644 --- a/streaming/src/main/scala/spark/streaming/ConnectionHandler.scala +++ b/streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.util import spark.Logging diff --git a/streaming/src/main/scala/spark/streaming/TestGenerator.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/TestGenerator.scala rename to streaming/src/main/scala/spark/streaming/util/TestGenerator.scala index 0ff6af61f2..23e9235c60 100644 --- a/streaming/src/main/scala/spark/streaming/TestGenerator.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestGenerator.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.util import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/streaming/TestGenerator2.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/TestGenerator2.scala rename to streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala index 00d43604d0..ff840d084f 100644 --- a/streaming/src/main/scala/spark/streaming/TestGenerator2.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.util import scala.util.Random import scala.io.Source diff --git a/streaming/src/main/scala/spark/streaming/TestGenerator4.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/TestGenerator4.scala rename to streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala index 93c7f2f440..9c39ef3e12 100644 --- a/streaming/src/main/scala/spark/streaming/TestGenerator4.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.util import spark.Logging diff --git a/streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala similarity index 94% rename from streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala rename to streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala index c658a036f9..f584f772bb 100644 --- a/streaming/src/main/scala/spark/streaming/TestStreamCoordinator.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala @@ -1,5 +1,6 @@ -package spark.streaming +package spark.streaming.util +import spark.streaming._ import spark.Logging import akka.actor._ diff --git a/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala rename to streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala index bbf2c7bf5e..d00ae9cbca 100644 --- a/streaming/src/main/scala/spark/streaming/TestStreamReceiver3.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala @@ -1,8 +1,9 @@ -package spark.streaming +package spark.streaming.util import spark._ import spark.storage._ import spark.util.AkkaUtils +import spark.streaming._ import scala.math._ import scala.collection.mutable.{Queue, HashMap, ArrayBuffer, SynchronizedMap} diff --git a/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala rename to streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala index a2babb23f4..31754870dd 100644 --- a/streaming/src/main/scala/spark/streaming/TestStreamReceiver4.scala +++ b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala @@ -1,5 +1,6 @@ -package spark.streaming +package spark.streaming.util +import spark.streaming._ import spark._ import spark.storage._ import spark.util.AkkaUtils diff --git a/streaming/src/main/scala/spark/streaming/util/Utils.scala b/streaming/src/main/scala/spark/streaming/util/Utils.scala deleted file mode 100644 index 86a729fb49..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/Utils.scala +++ /dev/null @@ -1,9 +0,0 @@ -package spark.streaming.util - -object Utils { - def time(func: => Unit): Long = { - val t = System.currentTimeMillis - func - (System.currentTimeMillis - t) - } -} \ No newline at end of file From 650d11817eb15c1c2a8dc322b72c753df88bf8d3 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Thu, 2 Aug 2012 11:09:43 -0400 Subject: [PATCH 006/226] Added a WordCount for external data and fixed bugs in file streams --- .../scala/spark/streaming/FileInputRDS.scala | 13 +++++----- .../scala/spark/streaming/JobManager.scala | 2 +- .../spark/streaming/examples/WordCount.scala | 25 +++++++++++++++++++ 3 files changed, 33 insertions(+), 7 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount.scala diff --git a/streaming/src/main/scala/spark/streaming/FileInputRDS.scala b/streaming/src/main/scala/spark/streaming/FileInputRDS.scala index dde80cd27a..ebd246823d 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputRDS.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputRDS.scala @@ -43,12 +43,11 @@ class FileInputRDS[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] var latestModTime = 0L def accept(path: Path): Boolean = { - if (!filter.accept(path)) { return false } else { val modTime = fs.getFileStatus(path).getModificationTime() - if (modTime < lastModTime) { + if (modTime <= lastModTime) { return false } if (modTime > latestModTime) { @@ -60,10 +59,12 @@ class FileInputRDS[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] } val newFiles = fs.listStatus(directory, newFilter) - lastModTime = newFilter.latestModTime - val newRDD = new UnionRDD(ssc.sc, newFiles.map(file => - ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString)) - ) + logInfo("New files: " + newFiles.map(_.getPath).mkString(", ")) + if (newFiles.length > 0) { + lastModTime = newFilter.latestModTime + } + val newRDD = new UnionRDD(ssc.sc, newFiles.map( + file => ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString))) Some(newRDD) } } diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 43d167f7db..c37fe1e9ad 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -13,7 +13,7 @@ class JobManager(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { try { val timeTaken = job.run() logInfo( - "Runnning " + job + " took " + timeTaken + " ms, " + + "Running " + job + " took " + timeTaken + " ms, " + "total delay was " + (System.currentTimeMillis - job.time) + " ms" ) } catch { diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala new file mode 100644 index 0000000000..a155630151 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala @@ -0,0 +1,25 @@ +package spark.streaming.examples + +import spark.streaming.{Seconds, SparkStreamContext} +import spark.streaming.SparkStreamContext._ + +object WordCount { + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println("Usage: WordCount ") + System.exit(1) + } + + // Create the context and set the batch size + val ssc = new SparkStreamContext(args(0), "ExampleTwo") + ssc.setBatchDuration(Seconds(2)) + + // Create the FileInputRDS on the directory and use the + // stream to count words in new files created + val inputRDS = ssc.createTextFileStream(args(1)) + val wordsRDS = inputRDS.flatMap(_.split(" ")) + val wordCountsRDS = wordsRDS.map(x => (x, 1)).reduceByKey(_ + _) + wordCountsRDS.print() + ssc.start() + } +} From 29bf44473c9d76622628f2511588f7846e9b1f3c Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Thu, 2 Aug 2012 11:43:04 -0400 Subject: [PATCH 007/226] Added an RDS that repeatedly returns the same input --- .../spark/streaming/ConstantInputRDS.scala | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala diff --git a/streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala b/streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala new file mode 100644 index 0000000000..bf2e6f7e16 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala @@ -0,0 +1,18 @@ +package spark.streaming + +import spark.RDD + +/** + * An input stream that always returns the same RDD on each timestep. Useful for testing. + */ +class ConstantInputRDS[T: ClassManifest](ssc: SparkStreamContext, rdd: RDD[T]) + extends InputRDS[T](ssc) { + + override def start() {} + + override def stop() {} + + override def compute(validTime: Time): Option[RDD[T]] = { + Some(rdd) + } +} \ No newline at end of file From 43b81eb2719c4666b7869d7d0290f2ee83daeafa Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Thu, 2 Aug 2012 14:05:51 -0400 Subject: [PATCH 008/226] Renamed RDS to DStream, plus minor style fixes --- ...utRDS.scala => ConstantInputDStream.scala} | 4 +- .../streaming/{RDS.scala => DStream.scala} | 130 +++++++++--------- ...eInputRDS.scala => FileInputDStream.scala} | 16 +-- .../main/scala/spark/streaming/Interval.scala | 7 +- ...tions.scala => PairDStreamFunctions.scala} | 28 ++-- ...InputRDS.scala => QueueInputDStream.scala} | 4 +- ...RDS.scala => ReducedWindowedDStream.scala} | 38 ++--- .../scala/spark/streaming/Scheduler.scala | 15 +- .../spark/streaming/SparkStreamContext.scala | 58 ++++---- .../src/main/scala/spark/streaming/Time.scala | 6 +- ...indowedRDS.scala => WindowedDStream.scala} | 16 +-- .../spark/streaming/examples/ExampleOne.scala | 4 +- .../spark/streaming/examples/ExampleTwo.scala | 10 +- .../spark/streaming/examples/WordCount.scala | 10 +- .../streaming/util/SenderReceiverTest.scala | 2 +- .../{RDSSuite.scala => DStreamSuite.scala} | 20 +-- 16 files changed, 183 insertions(+), 185 deletions(-) rename streaming/src/main/scala/spark/streaming/{ConstantInputRDS.scala => ConstantInputDStream.scala} (70%) rename streaming/src/main/scala/spark/streaming/{RDS.scala => DStream.scala} (74%) rename streaming/src/main/scala/spark/streaming/{FileInputRDS.scala => FileInputDStream.scala} (91%) rename streaming/src/main/scala/spark/streaming/{PairRDSFunctions.scala => PairDStreamFunctions.scala} (66%) rename streaming/src/main/scala/spark/streaming/{QueueInputRDS.scala => QueueInputDStream.scala} (89%) rename streaming/src/main/scala/spark/streaming/{ReducedWindowedRDS.scala => ReducedWindowedDStream.scala} (87%) rename streaming/src/main/scala/spark/streaming/{WindowedRDS.scala => WindowedDStream.scala} (75%) rename streaming/src/test/scala/spark/streaming/{RDSSuite.scala => DStreamSuite.scala} (69%) diff --git a/streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala similarity index 70% rename from streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala rename to streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala index bf2e6f7e16..6a2be34633 100644 --- a/streaming/src/main/scala/spark/streaming/ConstantInputRDS.scala +++ b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala @@ -5,8 +5,8 @@ import spark.RDD /** * An input stream that always returns the same RDD on each timestep. Useful for testing. */ -class ConstantInputRDS[T: ClassManifest](ssc: SparkStreamContext, rdd: RDD[T]) - extends InputRDS[T](ssc) { +class ConstantInputDStream[T: ClassManifest](ssc: SparkStreamContext, rdd: RDD[T]) + extends InputDStream[T](ssc) { override def start() {} diff --git a/streaming/src/main/scala/spark/streaming/RDS.scala b/streaming/src/main/scala/spark/streaming/DStream.scala similarity index 74% rename from streaming/src/main/scala/spark/streaming/RDS.scala rename to streaming/src/main/scala/spark/streaming/DStream.scala index fd923929e7..e19d2ecef5 100644 --- a/streaming/src/main/scala/spark/streaming/RDS.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -15,7 +15,7 @@ import scala.collection.mutable.HashMap import java.util.concurrent.ArrayBlockingQueue -abstract class RDS[T: ClassManifest] (@transient val ssc: SparkStreamContext) +abstract class DStream[T: ClassManifest] (@transient val ssc: SparkStreamContext) extends Logging with Serializable { initLogging() @@ -26,25 +26,25 @@ extends Logging with Serializable { * ---------------------------------------------- */ - // Time by which the window slides in this RDS + // Time by which the window slides in this DStream def slideTime: Time - // List of parent RDSs on which this RDS depends on - def dependencies: List[RDS[_]] + // List of parent DStreams on which this DStream depends on + def dependencies: List[DStream[_]] // Key method that computes RDD for a valid time def compute (validTime: Time): Option[RDD[T]] /** * --------------------------------------- - * Other general fields and methods of RDS + * Other general fields and methods of DStream * --------------------------------------- */ // Variable to store the RDDs generated earlier in time @transient private val generatedRDDs = new HashMap[Time, RDD[T]] () - // Variable to be set to the first time seen by the RDS (effective time zero) + // Variable to be set to the first time seen by the DStream (effective time zero) private[streaming] var zeroTime: Time = null // Variable to specify storage level @@ -58,11 +58,11 @@ extends Logging with Serializable { def persist( storageLevel: StorageLevel, checkpointLevel: StorageLevel, - checkpointInterval: Time): RDS[T] = { + checkpointInterval: Time): DStream[T] = { if (this.storageLevel != StorageLevel.NONE && this.storageLevel != storageLevel) { - // TODO: not sure this is necessary for RDSes + // TODO: not sure this is necessary for DStreams throw new UnsupportedOperationException( - "Cannot change storage level of an RDS after it was already assigned a level") + "Cannot change storage level of an DStream after it was already assigned a level") } this.storageLevel = storageLevel this.checkpointLevel = checkpointLevel @@ -70,20 +70,20 @@ extends Logging with Serializable { this } - // Set caching level for the RDDs created by this RDS - def persist(newLevel: StorageLevel): RDS[T] = persist(newLevel, StorageLevel.NONE, null) + // Set caching level for the RDDs created by this DStream + def persist(newLevel: StorageLevel): DStream[T] = persist(newLevel, StorageLevel.NONE, null) - def persist(): RDS[T] = persist(StorageLevel.MEMORY_ONLY_DESER) + def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY_DESER) // Turn on the default caching level for this RDD - def cache(): RDS[T] = persist() + def cache(): DStream[T] = persist() def isInitialized = (zeroTime != null) /** - * This method initializes the RDS by setting the "zero" time, based on which + * This method initializes the DStream by setting the "zero" time, based on which * the validity of future times is calculated. This method also recursively initializes - * its parent RDSs. + * its parent DStreams. */ def initialize(time: Time) { if (zeroTime == null) { @@ -105,20 +105,20 @@ extends Logging with Serializable { } /** - * This method either retrieves a precomputed RDD of this RDS, + * This method either retrieves a precomputed RDD of this DStream, * or computes the RDD (if the time is valid) */ def getOrCompute(time: Time): Option[RDD[T]] = { - // If this RDS was not initialized (i.e., zeroTime not set), then do it + // If this DStream was not initialized (i.e., zeroTime not set), then do it // If RDD was already generated, then retrieve it from HashMap generatedRDDs.get(time) match { // If an RDD was already generated and is being reused, then - // probably all RDDs in this RDS will be reused and hence should be cached + // probably all RDDs in this DStream will be reused and hence should be cached case Some(oldRDD) => Some(oldRDD) // if RDD was not generated, and if the time is valid - // (based on sliding time of this RDS), then generate the RDD + // (based on sliding time of this DStream), then generate the RDD case None => if (isTimeValid(time)) { compute(time) match { @@ -160,21 +160,21 @@ extends Logging with Serializable { /** * -------------- - * RDS operations + * DStream operations * -------------- */ - def map[U: ClassManifest](mapFunc: T => U) = new MappedRDS(this, ssc.sc.clean(mapFunc)) + def map[U: ClassManifest](mapFunc: T => U) = new MappedDStream(this, ssc.sc.clean(mapFunc)) def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = - new FlatMappedRDS(this, ssc.sc.clean(flatMapFunc)) + new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) - def filter(filterFunc: T => Boolean) = new FilteredRDS(this, filterFunc) + def filter(filterFunc: T => Boolean) = new FilteredDStream(this, filterFunc) - def glom() = new GlommedRDS(this) + def glom() = new GlommedDStream(this) def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = - new MapPartitionedRDS(this, ssc.sc.clean(mapPartFunc)) + new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc)) def reduce(reduceFunc: (T, T) => T) = this.map(x => (1, x)).reduceByKey(reduceFunc, 1).map(_._2) @@ -183,18 +183,18 @@ extends Logging with Serializable { def collect() = this.map(x => (1, x)).groupByKey(1).map(_._2) def foreach(foreachFunc: T => Unit) = { - val newrds = new PerElementForEachRDS(this, ssc.sc.clean(foreachFunc)) - ssc.registerOutputStream(newrds) - newrds + val newStream = new PerElementForEachDStream(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newStream) + newStream } def foreachRDD(foreachFunc: RDD[T] => Unit) = { - val newrds = new PerRDDForEachRDS(this, ssc.sc.clean(foreachFunc)) - ssc.registerOutputStream(newrds) - newrds + val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newStream) + newStream } - private[streaming] def toQueue() = { + private[streaming] def toQueue = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { println("Added RDD " + rdd.id) @@ -213,12 +213,12 @@ extends Logging with Serializable { if (first11.size > 10) println("...") println() } - val newrds = new PerRDDForEachRDS(this, ssc.sc.clean(foreachFunc)) - ssc.registerOutputStream(newrds) - newrds + val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) + ssc.registerOutputStream(newStream) + newStream } - def window(windowTime: Time, slideTime: Time) = new WindowedRDS(this, windowTime, slideTime) + def window(windowTime: Time, slideTime: Time) = new WindowedDStream(this, windowTime, slideTime) def batch(batchTime: Time) = window(batchTime, batchTime) @@ -241,15 +241,17 @@ extends Logging with Serializable { this.map(_ => 1).reduceByWindow(add _, subtract _, windowTime, slideTime) } - def union(that: RDS[T]) = new UnifiedRDS(Array(this, that)) + def union(that: DStream[T]) = new UnifiedDStream(Array(this, that)) - def register() = ssc.registerOutputStream(this) + def register() { + ssc.registerOutputStream(this) + } } -abstract class InputRDS[T: ClassManifest] ( +abstract class InputDStream[T: ClassManifest] ( ssc: SparkStreamContext) -extends RDS[T](ssc) { +extends DStream[T](ssc) { override def dependencies = List() @@ -265,10 +267,10 @@ extends RDS[T](ssc) { * TODO */ -class MappedRDS[T: ClassManifest, U: ClassManifest] ( - parent: RDS[T], +class MappedDStream[T: ClassManifest, U: ClassManifest] ( + parent: DStream[T], mapFunc: T => U) -extends RDS[U](parent.ssc) { +extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -284,10 +286,10 @@ extends RDS[U](parent.ssc) { * TODO */ -class FlatMappedRDS[T: ClassManifest, U: ClassManifest]( - parent: RDS[T], +class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( + parent: DStream[T], flatMapFunc: T => Traversable[U]) -extends RDS[U](parent.ssc) { +extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -303,8 +305,8 @@ extends RDS[U](parent.ssc) { * TODO */ -class FilteredRDS[T: ClassManifest](parent: RDS[T], filterFunc: T => Boolean) -extends RDS[T](parent.ssc) { +class FilteredDStream[T: ClassManifest](parent: DStream[T], filterFunc: T => Boolean) +extends DStream[T](parent.ssc) { override def dependencies = List(parent) @@ -320,10 +322,10 @@ extends RDS[T](parent.ssc) { * TODO */ -class MapPartitionedRDS[T: ClassManifest, U: ClassManifest]( - parent: RDS[T], +class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( + parent: DStream[T], mapPartFunc: Iterator[T] => Iterator[U]) -extends RDS[U](parent.ssc) { +extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -339,7 +341,7 @@ extends RDS[U](parent.ssc) { * TODO */ -class GlommedRDS[T: ClassManifest](parent: RDS[T]) extends RDS[Array[T]](parent.ssc) { +class GlommedDStream[T: ClassManifest](parent: DStream[T]) extends DStream[Array[T]](parent.ssc) { override def dependencies = List(parent) @@ -355,13 +357,13 @@ class GlommedRDS[T: ClassManifest](parent: RDS[T]) extends RDS[Array[T]](parent. * TODO */ -class ShuffledRDS[K: ClassManifest, V: ClassManifest, C: ClassManifest]( - parent: RDS[(K,V)], +class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( + parent: DStream[(K,V)], createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, numPartitions: Int) - extends RDS [(K,C)] (parent.ssc) { + extends DStream [(K,C)] (parent.ssc) { override def dependencies = List(parent) @@ -388,8 +390,8 @@ class ShuffledRDS[K: ClassManifest, V: ClassManifest, C: ClassManifest]( * TODO */ -class UnifiedRDS[T: ClassManifest](parents: Array[RDS[T]]) -extends RDS[T](parents(0).ssc) { +class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) +extends DStream[T](parents(0).ssc) { if (parents.length == 0) { throw new IllegalArgumentException("Empty array of parents") @@ -426,10 +428,10 @@ extends RDS[T](parents(0).ssc) { * TODO */ -class PerElementForEachRDS[T: ClassManifest] ( - parent: RDS[T], +class PerElementForEachDStream[T: ClassManifest] ( + parent: DStream[T], foreachFunc: T => Unit) -extends RDS[Unit](parent.ssc) { +extends DStream[Unit](parent.ssc) { override def dependencies = List(parent) @@ -457,12 +459,12 @@ extends RDS[Unit](parent.ssc) { * TODO */ -class PerRDDForEachRDS[T: ClassManifest] ( - parent: RDS[T], +class PerRDDForEachDStream[T: ClassManifest] ( + parent: DStream[T], foreachFunc: (RDD[T], Time) => Unit) -extends RDS[Unit](parent.ssc) { +extends DStream[Unit](parent.ssc) { - def this(parent: RDS[T], altForeachFunc: (RDD[T]) => Unit) = + def this(parent: DStream[T], altForeachFunc: (RDD[T]) => Unit) = this(parent, (rdd: RDD[T], time: Time) => altForeachFunc(rdd)) override def dependencies = List(parent) diff --git a/streaming/src/main/scala/spark/streaming/FileInputRDS.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala similarity index 91% rename from streaming/src/main/scala/spark/streaming/FileInputRDS.scala rename to streaming/src/main/scala/spark/streaming/FileInputDStream.scala index ebd246823d..88aa375289 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputRDS.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -18,12 +18,12 @@ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} -class FileInputRDS[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( +class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( ssc: SparkStreamContext, directory: Path, - filter: PathFilter = FileInputRDS.defaultPathFilter, + filter: PathFilter = FileInputDStream.defaultPathFilter, newFilesOnly: Boolean = true) - extends InputRDS[(K, V)](ssc) { + extends InputDStream[(K, V)](ssc) { val fs = directory.getFileSystem(new Configuration()) var lastModTime: Long = 0 @@ -69,7 +69,7 @@ class FileInputRDS[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] } } -object FileInputRDS { +object FileInputDStream { val defaultPathFilter = new PathFilter { def accept(path: Path): Boolean = { val file = path.getName() @@ -83,12 +83,12 @@ object FileInputRDS { } /* -class NetworkInputRDS[T: ClassManifest]( +class NetworkInputDStream[T: ClassManifest]( val networkInputName: String, val addresses: Array[InetSocketAddress], batchDuration: Time, ssc: SparkStreamContext) -extends InputRDS[T](networkInputName, batchDuration, ssc) { +extends InputDStream[T](networkInputName, batchDuration, ssc) { // TODO(Haoyuan): This is for the performance test. @@ -139,11 +139,11 @@ extends InputRDS[T](networkInputName, batchDuration, ssc) { } -class TestInputRDS( +class TestInputDStream( val testInputName: String, batchDuration: Time, ssc: SparkStreamContext) -extends InputRDS[String](testInputName, batchDuration, ssc) { +extends InputDStream[String](testInputName, batchDuration, ssc) { @transient val references = new HashMap[Time,Array[String]] diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index 1960097216..088cbe4376 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -1,7 +1,6 @@ package spark.streaming -case class Interval (val beginTime: Time, val endTime: Time) { - +case class Interval (beginTime: Time, endTime: Time) { def this(beginMs: Long, endMs: Long) = this(Time(beginMs), new Time(endMs)) def duration(): Time = endTime - beginTime @@ -33,7 +32,7 @@ case class Interval (val beginTime: Time, val endTime: Time) { this + (endTime - beginTime) } - def isZero() = (beginTime.isZero && endTime.isZero) + def isZero = (beginTime.isZero && endTime.isZero) def toFormattedString = beginTime.toFormattedString + "-" + endTime.toFormattedString @@ -41,7 +40,6 @@ case class Interval (val beginTime: Time, val endTime: Time) { } object Interval { - def zero() = new Interval (Time.zero, Time.zero) def currentInterval(intervalDuration: Time): Interval = { @@ -49,7 +47,6 @@ object Interval { val intervalBegin = time.floor(intervalDuration) Interval(intervalBegin, intervalBegin + intervalDuration) } - } diff --git a/streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala similarity index 66% rename from streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala rename to streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 403ae233a5..0cf296f21a 100644 --- a/streaming/src/main/scala/spark/streaming/PairRDSFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -3,23 +3,23 @@ package spark.streaming import scala.collection.mutable.ArrayBuffer import spark.streaming.SparkStreamContext._ -class PairRDSFunctions[K: ClassManifest, V: ClassManifest](rds: RDS[(K,V)]) +class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) extends Serializable { - def ssc = rds.ssc + def ssc = stream.ssc /* ---------------------------------- */ - /* RDS operations for key-value pairs */ + /* DStream operations for key-value pairs */ /* ---------------------------------- */ - def groupByKey(numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { + def groupByKey(numPartitions: Int = 0): ShuffledDStream[K, V, ArrayBuffer[V]] = { def createCombiner(v: V) = ArrayBuffer[V](v) def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, numPartitions) } - def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledRDS[K, V, V] = { + def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledDStream[K, V, V] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, numPartitions) } @@ -28,23 +28,23 @@ extends Serializable { createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, - numPartitions: Int) : ShuffledRDS[K, V, C] = { - new ShuffledRDS[K, V, C](rds, createCombiner, mergeValue, mergeCombiner, numPartitions) + numPartitions: Int) : ShuffledDStream[K, V, C] = { + new ShuffledDStream[K, V, C](stream, createCombiner, mergeValue, mergeCombiner, numPartitions) } def groupByKeyAndWindow( windowTime: Time, slideTime: Time, - numPartitions: Int = 0): ShuffledRDS[K, V, ArrayBuffer[V]] = { - rds.window(windowTime, slideTime).groupByKey(numPartitions) + numPartitions: Int = 0): ShuffledDStream[K, V, ArrayBuffer[V]] = { + stream.window(windowTime, slideTime).groupByKey(numPartitions) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowTime: Time, slideTime: Time, - numPartitions: Int = 0): ShuffledRDS[K, V, V] = { - rds.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) + numPartitions: Int = 0): ShuffledDStream[K, V, V] = { + stream.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) } // This method is the efficient sliding window reduce operation, @@ -57,10 +57,10 @@ extends Serializable { invReduceFunc: (V, V) => V, windowTime: Time, slideTime: Time, - numPartitions: Int): ReducedWindowedRDS[K, V] = { + numPartitions: Int): ReducedWindowedDStream[K, V] = { - new ReducedWindowedRDS[K, V]( - rds, + new ReducedWindowedDStream[K, V]( + stream, ssc.sc.clean(reduceFunc), ssc.sc.clean(invReduceFunc), windowTime, diff --git a/streaming/src/main/scala/spark/streaming/QueueInputRDS.scala b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala similarity index 89% rename from streaming/src/main/scala/spark/streaming/QueueInputRDS.scala rename to streaming/src/main/scala/spark/streaming/QueueInputDStream.scala index 31e6a64e21..c78abd1a87 100644 --- a/streaming/src/main/scala/spark/streaming/QueueInputRDS.scala +++ b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala @@ -6,12 +6,12 @@ import spark.UnionRDD import scala.collection.mutable.Queue import scala.collection.mutable.ArrayBuffer -class QueueInputRDS[T: ClassManifest]( +class QueueInputDStream[T: ClassManifest]( ssc: SparkStreamContext, val queue: Queue[RDD[T]], oneAtATime: Boolean, defaultRDD: RDD[T] - ) extends InputRDS[T](ssc) { + ) extends InputDStream[T](ssc) { override def start() { } diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala similarity index 87% rename from streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala rename to streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index dd1f474657..11fa4e5443 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedRDS.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -11,28 +11,28 @@ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer -class ReducedWindowedRDS[K: ClassManifest, V: ClassManifest]( - parent: RDS[(K, V)], +class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( + parent: DStream[(K, V)], reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, _windowTime: Time, _slideTime: Time, numPartitions: Int) -extends RDS[(K,V)](parent.ssc) { +extends DStream[(K,V)](parent.ssc) { if (!_windowTime.isMultipleOf(parent.slideTime)) - throw new Exception("The window duration of ReducedWindowedRDS (" + _slideTime + ") " + - "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + throw new Exception("The window duration of ReducedWindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") if (!_slideTime.isMultipleOf(parent.slideTime)) - throw new Exception("The slide duration of ReducedWindowedRDS (" + _slideTime + ") " + - "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + throw new Exception("The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") - val reducedRDS = parent.reduceByKey(reduceFunc, numPartitions) + val reducedStream = parent.reduceByKey(reduceFunc, numPartitions) val allowPartialWindows = true - //reducedRDS.persist(StorageLevel.MEMORY_ONLY_DESER_2) + //reducedStream.persist(StorageLevel.MEMORY_ONLY_DESER_2) - override def dependencies = List(reducedRDS) + override def dependencies = List(reducedStream) def windowTime: Time = _windowTime @@ -41,9 +41,9 @@ extends RDS[(K,V)](parent.ssc) { override def persist( storageLevel: StorageLevel, checkpointLevel: StorageLevel, - checkpointInterval: Time): RDS[(K,V)] = { + checkpointInterval: Time): DStream[(K,V)] = { super.persist(storageLevel, checkpointLevel, checkpointInterval) - reducedRDS.persist(storageLevel, checkpointLevel, checkpointInterval) + reducedStream.persist(storageLevel, checkpointLevel, checkpointInterval) } override def compute(validTime: Time): Option[RDD[(K, V)]] = { @@ -80,7 +80,7 @@ extends RDS[(K,V)](parent.ssc) { if (allowPartialWindows) { if (currentTime - slideTime == parent.zeroTime) { - reducedRDS.getOrCompute(currentTime) match { + reducedStream.getOrCompute(currentTime) match { case Some(rdd) => return Some(rdd) case None => throw new Exception("Could not get first reduced RDD for time " + currentTime) } @@ -94,11 +94,11 @@ extends RDS[(K,V)](parent.ssc) { val reducedRDDs = new ArrayBuffer[RDD[(K, V)]]() var t = currentWindow.endTime while (t > currentWindow.beginTime) { - reducedRDS.getOrCompute(t) match { + reducedStream.getOrCompute(t) match { case Some(rdd) => reducedRDDs += rdd case None => throw new Exception("Could not get reduced RDD for time " + t) } - t -= reducedRDS.slideTime + t -= reducedStream.slideTime } if (reducedRDDs.size == 0) { throw new Exception("Could not generate the first RDD for time " + validTime) @@ -120,21 +120,21 @@ extends RDS[(K,V)](parent.ssc) { // Get the RDDs of the reduced values in "old time steps" var t = currentWindow.beginTime while (t > previousWindow.beginTime) { - reducedRDS.getOrCompute(t) match { + reducedStream.getOrCompute(t) match { case Some(rdd) => oldRDDs += rdd.asInstanceOf[RDD[(_, _)]] case None => throw new Exception("Could not get old reduced RDD for time " + t) } - t -= reducedRDS.slideTime + t -= reducedStream.slideTime } // Get the RDDs of the reduced values in "new time steps" t = currentWindow.endTime while (t > previousWindow.endTime) { - reducedRDS.getOrCompute(t) match { + reducedStream.getOrCompute(t) match { case Some(rdd) => newRDDs += rdd.asInstanceOf[RDD[(_, _)]] case None => throw new Exception("Could not get new reduced RDD for time " + t) } - t -= reducedRDS.slideTime + t -= reducedStream.slideTime } val partitioner = new HashPartitioner(numPartitions) diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 83f874e550..fff4924b4c 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -9,12 +9,11 @@ import scala.collection.mutable.HashMap sealed trait SchedulerMessage case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage -case class Test extends SchedulerMessage class Scheduler( ssc: SparkStreamContext, - inputRDSs: Array[InputRDS[_]], - outputRDSs: Array[RDS[_]]) + inputStreams: Array[InputDStream[_]], + outputStreams: Array[DStream[_]]) extends Logging { initLogging() @@ -26,21 +25,21 @@ extends Logging { def start() { val zeroTime = Time(timer.start()) - outputRDSs.foreach(_.initialize(zeroTime)) - inputRDSs.par.foreach(_.start()) + outputStreams.foreach(_.initialize(zeroTime)) + inputStreams.par.foreach(_.start()) logInfo("Scheduler started") } def stop() { timer.stop() - inputRDSs.par.foreach(_.stop()) + inputStreams.par.foreach(_.stop()) logInfo("Scheduler stopped") } def generateRDDs (time: Time) { logInfo("Generating RDDs for time " + time) - outputRDSs.foreach(outputRDS => { - outputRDS.generateJob(time) match { + outputStreams.foreach(outputStream => { + outputStream.generateJob(time) match { case Some(job) => submitJob(job) case None => } diff --git a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala index d32f6d588c..2bec1091c0 100644 --- a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala +++ b/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala @@ -31,8 +31,8 @@ class SparkStreamContext ( val sc = new SparkContext(master, frameworkName, sparkHome, jars) val env = SparkEnv.get - val inputRDSs = new ArrayBuffer[InputRDS[_]]() - val outputRDSs = new ArrayBuffer[RDS[_]]() + val inputStreams = new ArrayBuffer[InputDStream[_]]() + val outputStreams = new ArrayBuffer[DStream[_]]() var batchDuration: Time = null var scheduler: Scheduler = null @@ -48,17 +48,17 @@ class SparkStreamContext ( def createNetworkStream[T: ClassManifest]( name: String, addresses: Array[InetSocketAddress], - batchDuration: Time): RDS[T] = { + batchDuration: Time): DStream[T] = { - val inputRDS = new NetworkInputRDS[T](this, addresses) - inputRDSs += inputRDS - inputRDS + val inputStream = new NetworkinputStream[T](this, addresses) + inputStreams += inputStream + inputStream } def createNetworkStream[T: ClassManifest]( name: String, addresses: Array[String], - batchDuration: Long): RDS[T] = { + batchDuration: Long): DStream[T] = { def stringToInetSocketAddress (str: String): InetSocketAddress = { val parts = str.split(":") @@ -83,13 +83,13 @@ class SparkStreamContext ( K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K, V]: ClassManifest - ](directory: String): RDS[(K, V)] = { - val inputRDS = new FileInputRDS[K, V, F](this, new Path(directory)) - inputRDSs += inputRDS - inputRDS + ](directory: String): DStream[(K, V)] = { + val inputStream = new FileInputDStream[K, V, F](this, new Path(directory)) + inputStreams += inputStream + inputStream } - def createTextFileStream(directory: String): RDS[String] = { + def createTextFileStream(directory: String): DStream[String] = { createFileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) } @@ -101,26 +101,26 @@ class SparkStreamContext ( queue: Queue[RDD[T]], oneAtATime: Boolean = true, defaultRDD: RDD[T] = null - ): RDS[T] = { - val inputRDS = new QueueInputRDS(this, queue, oneAtATime, defaultRDD) - inputRDSs += inputRDS - inputRDS + ): DStream[T] = { + val inputStream = new QueueInputDStream(this, queue, oneAtATime, defaultRDD) + inputStreams += inputStream + inputStream } - def createQueueStream[T: ClassManifest](iterator: Iterator[RDD[T]]): RDS[T] = { + def createQueueStream[T: ClassManifest](iterator: Iterator[RDD[T]]): DStream[T] = { val queue = new Queue[RDD[T]] - val inputRDS = createQueueStream(queue, true, null) + val inputStream = createQueueStream(queue, true, null) queue ++= iterator - inputRDS + inputStream } /** - * This function registers a RDS as an output stream that will be + * This function registers a DStream as an output stream that will be * computed every interval. */ - def registerOutputStream (outputRDS: RDS[_]) { - outputRDSs += outputRDS + def registerOutputStream (outputStream: DStream[_]) { + outputStreams += outputStream } /** @@ -133,11 +133,11 @@ class SparkStreamContext ( if (batchDuration < Milliseconds(100)) { logWarning("Batch duration of " + batchDuration + " is very low") } - if (inputRDSs.size == 0) { - throw new Exception("No input RDSes created, so nothing to take input from") + if (inputStreams.size == 0) { + throw new Exception("No input streams created, so nothing to take input from") } - if (outputRDSs.size == 0) { - throw new Exception("No output RDSes registered, so nothing to execute") + if (outputStreams.size == 0) { + throw new Exception("No output streams registered, so nothing to execute") } } @@ -147,7 +147,7 @@ class SparkStreamContext ( */ def start() { verify() - scheduler = new Scheduler(this, inputRDSs.toArray, outputRDSs.toArray) + scheduler = new Scheduler(this, inputStreams.toArray, outputStreams.toArray) scheduler.start() } @@ -168,6 +168,6 @@ class SparkStreamContext ( object SparkStreamContext { - implicit def rdsToPairRdsFunctions [K: ClassManifest, V: ClassManifest] (rds: RDS[(K,V)]) = - new PairRDSFunctions (rds) + implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = + new PairDStreamFunctions(stream) } diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index c4573137ae..5c476f02c3 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -50,11 +50,11 @@ class Time(private var millis: Long) { def isZero = (this.millis == 0) - override def toString() = (millis.toString + " ms") + override def toString = (millis.toString + " ms") - def toFormattedString() = millis.toString + def toFormattedString = millis.toString - def milliseconds() = millis + def milliseconds = millis } object Time { diff --git a/streaming/src/main/scala/spark/streaming/WindowedRDS.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala similarity index 75% rename from streaming/src/main/scala/spark/streaming/WindowedRDS.scala rename to streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 812a982301..9a6617a1ee 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedRDS.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -8,19 +8,19 @@ import spark.SparkContext._ import scala.collection.mutable.ArrayBuffer -class WindowedRDS[T: ClassManifest]( - parent: RDS[T], +class WindowedDStream[T: ClassManifest]( + parent: DStream[T], _windowTime: Time, _slideTime: Time) - extends RDS[T](parent.ssc) { + extends DStream[T](parent.ssc) { if (!_windowTime.isMultipleOf(parent.slideTime)) - throw new Exception("The window duration of WindowedRDS (" + _slideTime + ") " + - "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + throw new Exception("The window duration of WindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") if (!_slideTime.isMultipleOf(parent.slideTime)) - throw new Exception("The slide duration of WindowedRDS (" + _slideTime + ") " + - "must be multiple of the slide duration of parent RDS (" + parent.slideTime + ")") + throw new Exception("The slide duration of WindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") val allowPartialWindows = true @@ -44,7 +44,7 @@ class WindowedRDS[T: ClassManifest]( if (windowStartTime >= parent.zeroTime) { // Walk back through time, from the 'windowEndTime' to 'windowStartTime' - // and get all parent RDDs from the parent RDS + // and get all parent RDDs from the parent DStream var t = windowEndTime while (t > windowStartTime) { parent.getOrCompute(t) match { diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala index d56fdcdf29..669f575240 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala @@ -20,10 +20,10 @@ object ExampleOne { ssc.setBatchDuration(Seconds(1)) // Create the queue through which RDDs can be pushed to - // a QueueInputRDS + // a QueueInputDStream val rddQueue = new SynchronizedQueue[RDD[Int]]() - // Create the QueueInputRDs and use it do some processing + // Create the QueueInputDStream and use it do some processing val inputStream = ssc.createQueueStream(rddQueue) val mappedStream = inputStream.map(x => (x % 10, 1)) val reducedStream = mappedStream.reduceByKey(_ + _) diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala index 4b8f6d609d..be47e47a5a 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala @@ -24,12 +24,12 @@ object ExampleTwo { if (fs.exists(directory)) throw new Exception("This directory already exists") fs.mkdirs(directory) - // Create the FileInputRDS on the directory and use the + // Create the FileInputDStream on the directory and use the // stream to count words in new files created - val inputRDS = ssc.createTextFileStream(directory.toString) - val wordsRDS = inputRDS.flatMap(_.split(" ")) - val wordCountsRDS = wordsRDS.map(x => (x, 1)).reduceByKey(_ + _) - wordCountsRDS.print + val inputStream = ssc.createTextFileStream(directory.toString) + val words = inputStream.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() ssc.start() // Creating new files in the directory diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala index a155630151..ba7bc63d6a 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala @@ -14,12 +14,12 @@ object WordCount { val ssc = new SparkStreamContext(args(0), "ExampleTwo") ssc.setBatchDuration(Seconds(2)) - // Create the FileInputRDS on the directory and use the + // Create the FileInputDStream on the directory and use the // stream to count words in new files created - val inputRDS = ssc.createTextFileStream(args(1)) - val wordsRDS = inputRDS.flatMap(_.split(" ")) - val wordCountsRDS = wordsRDS.map(x => (x, 1)).reduceByKey(_ + _) - wordCountsRDS.print() + val lines = ssc.createTextFileStream(args(1)) + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() ssc.start() } } diff --git a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala index 9925b1d07c..9fb1924798 100644 --- a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala +++ b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala @@ -23,7 +23,7 @@ object Receiver { count += 28 } } catch { - case e: Exception => e.printStackTrace + case e: Exception => e.printStackTrace() } val timeTaken = System.currentTimeMillis - time val tput = (count / 1024.0) / (timeTaken / 1000.0) diff --git a/streaming/src/test/scala/spark/streaming/RDSSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala similarity index 69% rename from streaming/src/test/scala/spark/streaming/RDSSuite.scala rename to streaming/src/test/scala/spark/streaming/DStreamSuite.scala index f51ea50a5d..ce7c3d2e2b 100644 --- a/streaming/src/test/scala/spark/streaming/RDSSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -1,6 +1,6 @@ package spark.streaming -import spark.RDD +import spark.{Logging, RDD} import org.scalatest.FunSuite import org.scalatest.BeforeAndAfter @@ -8,15 +8,15 @@ import org.scalatest.BeforeAndAfter import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.SynchronizedQueue -class RDSSuite extends FunSuite with BeforeAndAfter { +class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { var ssc: SparkStreamContext = null val batchDurationMillis = 1000 def testOp[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], - operation: RDS[U] => RDS[V], - expectedOutput: Seq[Seq[V]]) = { + operation: DStream[U] => DStream[V], + expectedOutput: Seq[Seq[V]]) { try { ssc = new SparkStreamContext("local", "test") ssc.setBatchDuration(Milliseconds(batchDurationMillis)) @@ -31,7 +31,7 @@ class RDSSuite extends FunSuite with BeforeAndAfter { val output = new ArrayBuffer[Seq[V]]() while(outputQueue.size > 0) { val rdd = outputQueue.take() - println("Collecting RDD " + rdd.id + ", " + rdd.getClass().getSimpleName() + ", " + rdd.splits.size) + logInfo("Collecting RDD " + rdd.id + ", " + rdd.getClass.getSimpleName + ", " + rdd.splits.size) output += (rdd.collect()) } assert(output.size === expectedOutput.size) @@ -47,19 +47,19 @@ class RDSSuite extends FunSuite with BeforeAndAfter { val inputData = Array(1 to 4, 5 to 8, 9 to 12) // map - testOp(inputData, (r: RDS[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) // flatMap - testOp(inputData, (r: RDS[Int]) => r.flatMap(x => Array(x, x * 2)), + testOp(inputData, (r: DStream[Int]) => r.flatMap(x => Array(x, x * 2)), inputData.map(_.flatMap(x => Array(x, x * 2))) ) } } -object RDSSuite { +object DStreamSuite { def main(args: Array[String]) { - val r = new RDSSuite() + val r = new DStreamSuite() val inputData = Array(1 to 4, 5 to 8, 9 to 12) - r.testOp(inputData, (r: RDS[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + r.testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) } } \ No newline at end of file From cae894ee7aefa4cf9b1952038a48be81e1d2a856 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 6 Aug 2012 14:52:46 -0700 Subject: [PATCH 009/226] Added new Clock interface that is used by RecurringTimer to scheduler events on system time or manually-configured time. --- .../main/scala/spark/streaming/DStream.scala | 1 - .../src/main/scala/spark/streaming/Job.scala | 9 +-- .../scala/spark/streaming/Scheduler.scala | 7 +- .../scala/spark/streaming/util/Clock.scala | 77 +++++++++++++++++++ .../spark/streaming/util/RecurringTimer.scala | 38 +++++---- .../scala/spark/streaming/DStreamSuite.scala | 28 +++++-- 6 files changed, 130 insertions(+), 30 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/util/Clock.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index e19d2ecef5..c63c043415 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -197,7 +197,6 @@ extends Logging with Serializable { private[streaming] def toQueue = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { - println("Added RDD " + rdd.id) queue.add(rdd) }) queue diff --git a/streaming/src/main/scala/spark/streaming/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala index 2481a9a3ef..0bd8343b9a 100644 --- a/streaming/src/main/scala/spark/streaming/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -1,5 +1,7 @@ package spark.streaming +import java.util.concurrent.atomic.AtomicLong + class Job(val time: Time, func: () => _) { val id = Job.getNewId() def run(): Long = { @@ -13,11 +15,8 @@ class Job(val time: Time, func: () => _) { } object Job { - var lastId = 1 + val id = new AtomicLong(0) - def getNewId() = synchronized { - lastId += 1 - lastId - } + def getNewId() = id.getAndIncrement() } diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index fff4924b4c..309bd95525 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -1,6 +1,7 @@ package spark.streaming import spark.streaming.util.RecurringTimer +import spark.streaming.util.Clock import spark.SparkEnv import spark.Logging @@ -20,8 +21,10 @@ extends Logging { val concurrentJobs = System.getProperty("spark.stream.concurrentJobs", "1").toInt val jobManager = new JobManager(ssc, concurrentJobs) - val timer = new RecurringTimer(ssc.batchDuration, generateRDDs(_)) - + val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") + val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] + val timer = new RecurringTimer(clock, ssc.batchDuration, generateRDDs(_)) + def start() { val zeroTime = Time(timer.start()) diff --git a/streaming/src/main/scala/spark/streaming/util/Clock.scala b/streaming/src/main/scala/spark/streaming/util/Clock.scala new file mode 100644 index 0000000000..72e786e0c3 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/util/Clock.scala @@ -0,0 +1,77 @@ +package spark.streaming.util + +import spark.streaming._ + +trait Clock { + def currentTime(): Long + def waitTillTime(targetTime: Long): Long +} + + +class SystemClock() extends Clock { + + val minPollTime = 25L + + def currentTime(): Long = { + System.currentTimeMillis() + } + + def waitTillTime(targetTime: Long): Long = { + var currentTime = 0L + currentTime = System.currentTimeMillis() + + var waitTime = targetTime - currentTime + if (waitTime <= 0) { + return currentTime + } + + val pollTime = { + if (waitTime / 10.0 > minPollTime) { + (waitTime / 10.0).toLong + } else { + minPollTime + } + } + + + while (true) { + currentTime = System.currentTimeMillis() + waitTime = targetTime - currentTime + + if (waitTime <= 0) { + + return currentTime + } + val sleepTime = + if (waitTime < pollTime) { + waitTime + } else { + pollTime + } + Thread.sleep(sleepTime) + } + return -1 + } +} + +class ManualClock() extends Clock { + + var time = 0L + + def currentTime() = time + + def addToTime(timeToAdd: Long) = { + this.synchronized { + time += timeToAdd + this.notifyAll() + } + } + def waitTillTime(targetTime: Long): Long = { + this.synchronized { + while (time < targetTime) { + this.wait(100) + } + } + return currentTime() + } +} diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala index 6125bb82eb..5da9fa6ecc 100644 --- a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -1,6 +1,6 @@ package spark.streaming.util -class RecurringTimer(period: Long, callback: (Long) => Unit) { +class RecurringTimer(val clock: Clock, val period: Long, val callback: (Long) => Unit) { val minPollTime = 25L @@ -19,7 +19,7 @@ class RecurringTimer(period: Long, callback: (Long) => Unit) { var nextTime = 0L def start(): Long = { - nextTime = (math.floor(System.currentTimeMillis() / period) + 1).toLong * period + nextTime = (math.floor(clock.currentTime / period) + 1).toLong * period thread.start() nextTime } @@ -31,22 +31,32 @@ class RecurringTimer(period: Long, callback: (Long) => Unit) { def loop() { try { while (true) { - val beforeSleepTime = System.currentTimeMillis() - while (beforeSleepTime >= nextTime) { - callback(nextTime) - nextTime += period - } - val sleepTime = if (nextTime - beforeSleepTime < 2 * pollTime) { - nextTime - beforeSleepTime - } else { - pollTime - } - Thread.sleep(sleepTime) - val afterSleepTime = System.currentTimeMillis() + clock.waitTillTime(nextTime) + callback(nextTime) + nextTime += period } + } catch { case e: InterruptedException => } } } +object RecurringTimer { + + def main(args: Array[String]) { + var lastRecurTime = 0L + val period = 1000 + + def onRecur(time: Long) { + val currentTime = System.currentTimeMillis() + println("" + currentTime + ": " + (currentTime - lastRecurTime)) + lastRecurTime = currentTime + } + val timer = new RecurringTimer(new SystemClock(), period, onRecur) + timer.start() + Thread.sleep(30 * 1000) + timer.stop() + } +} + diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala index ce7c3d2e2b..2c10a03e6d 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -1,6 +1,8 @@ package spark.streaming -import spark.{Logging, RDD} +import spark.Logging +import spark.RDD +import spark.streaming.util.ManualClock import org.scalatest.FunSuite import org.scalatest.BeforeAndAfter @@ -13,11 +15,13 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { var ssc: SparkStreamContext = null val batchDurationMillis = 1000 + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + def testOp[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], expectedOutput: Seq[Seq[V]]) { - try { + try { ssc = new SparkStreamContext("local", "test") ssc.setBatchDuration(Milliseconds(batchDurationMillis)) @@ -26,12 +30,14 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { val outputQueue = outputStream.toQueue ssc.start() - Thread.sleep(batchDurationMillis * input.size) + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + clock.addToTime(input.size * batchDurationMillis) + + Thread.sleep(100) val output = new ArrayBuffer[Seq[V]]() while(outputQueue.size > 0) { - val rdd = outputQueue.take() - logInfo("Collecting RDD " + rdd.id + ", " + rdd.getClass.getSimpleName + ", " + rdd.splits.size) + val rdd = outputQueue.take() output += (rdd.collect()) } assert(output.size === expectedOutput.size) @@ -58,8 +64,14 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { object DStreamSuite { def main(args: Array[String]) { - val r = new DStreamSuite() - val inputData = Array(1 to 4, 5 to 8, 9 to 12) - r.testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + try { + val r = new DStreamSuite() + val inputData = Array(1 to 4, 5 to 8, 9 to 12) + r.testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) + + } catch { + case e: Exception => e.printStackTrace() + } + System.exit(0) } } \ No newline at end of file From 091b1438f53b810fa366c3e913174d380865cd0c Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 24 Aug 2012 16:43:59 -0700 Subject: [PATCH 010/226] Fix WordCount job name --- .../src/main/scala/spark/streaming/examples/WordCount.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala index ba7bc63d6a..6c53007145 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount.scala @@ -11,7 +11,7 @@ object WordCount { } // Create the context and set the batch size - val ssc = new SparkStreamContext(args(0), "ExampleTwo") + val ssc = new SparkStreamContext(args(0), "WordCount") ssc.setBatchDuration(Seconds(2)) // Create the FileInputDStream on the directory and use the From e7a5cbb543bcf23517ba01040ff74cab0153d678 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 24 Aug 2012 16:45:01 -0700 Subject: [PATCH 011/226] Reduce log4j verbosity for streaming --- streaming/src/test/resources/log4j.properties | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 streaming/src/test/resources/log4j.properties diff --git a/streaming/src/test/resources/log4j.properties b/streaming/src/test/resources/log4j.properties new file mode 100644 index 0000000000..02fe16866e --- /dev/null +++ b/streaming/src/test/resources/log4j.properties @@ -0,0 +1,8 @@ +# Set everything to be logged to the console +log4j.rootCategory=WARN, console +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n + +# Ignore messages below warning level from Jetty, because it's a bit verbose +log4j.logger.org.eclipse.jetty=WARN From 741899b21e4e6439459fcf4966076661c851ed07 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 16:26:06 -0700 Subject: [PATCH 012/226] Fix sendMessageReliablySync --- core/src/main/scala/spark/network/ConnectionManager.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/network/ConnectionManager.scala b/core/src/main/scala/spark/network/ConnectionManager.scala index 1a22d06cc8..66b822117f 100644 --- a/core/src/main/scala/spark/network/ConnectionManager.scala +++ b/core/src/main/scala/spark/network/ConnectionManager.scala @@ -14,7 +14,8 @@ import scala.collection.mutable.SynchronizedQueue import scala.collection.mutable.Queue import scala.collection.mutable.ArrayBuffer -import akka.dispatch.{Promise, ExecutionContext, Future} +import akka.dispatch.{Await, Promise, ExecutionContext, Future} +import akka.util.Duration case class ConnectionManagerId(host: String, port: Int) { def toSocketAddress() = new InetSocketAddress(host, port) @@ -325,7 +326,7 @@ class ConnectionManager(port: Int) extends Logging { } def sendMessageReliablySync(connectionManagerId: ConnectionManagerId, message: Message): Option[Message] = { - sendMessageReliably(connectionManagerId, message)() + Await.result(sendMessageReliably(connectionManagerId, message), Duration.Inf) } def onReceiveMessage(callback: (Message, ConnectionManagerId) => Option[Message]) { From ad6537321ebda407f28a7c32845708ad66684c55 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 16:27:23 -0700 Subject: [PATCH 013/226] Make Time serializable --- streaming/src/main/scala/spark/streaming/Time.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 5c476f02c3..3901a26286 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,6 +1,6 @@ package spark.streaming -class Time(private var millis: Long) { +class Time(private var millis: Long) extends Serializable { def copy() = new Time(this.millis) From 06ef7c3d1bf8446d4d6ef8f3a055dd1e6d32ca3a Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 16:29:20 -0700 Subject: [PATCH 014/226] Less debug info --- core/src/main/scala/spark/network/ConnectionManager.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/network/ConnectionManager.scala b/core/src/main/scala/spark/network/ConnectionManager.scala index 66b822117f..bd0980029a 100644 --- a/core/src/main/scala/spark/network/ConnectionManager.scala +++ b/core/src/main/scala/spark/network/ConnectionManager.scala @@ -248,7 +248,7 @@ class ConnectionManager(port: Int) extends Logging { } private def handleMessage(connectionManagerId: ConnectionManagerId, message: Message) { - logInfo("Handling [" + message + "] from [" + connectionManagerId + "]") + logDebug("Handling [" + message + "] from [" + connectionManagerId + "]") message match { case bufferMessage: BufferMessage => { if (bufferMessage.hasAckId) { From b08ff710af9b6592e3b43308ec4598bd3e6da084 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 26 Aug 2012 23:40:50 +0000 Subject: [PATCH 015/226] Added sliding word count, and some fixes to reduce window DStream --- .../streaming/ReducedWindowedDStream.scala | 3 +- .../spark/streaming/examples/WordCount2.scala | 98 +++++++++++++++++++ .../streaming/util/SenderReceiverTest.scala | 9 +- 3 files changed, 106 insertions(+), 4 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount2.scala diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 11fa4e5443..d097896d0a 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -75,11 +75,12 @@ extends DStream[(K,V)](parent.ssc) { val previousWindow = getAdjustedWindow(currentTime - slideTime, windowTime) logInfo("Current window = " + currentWindow) + logInfo("Slide time = " + slideTime) logInfo("Previous window = " + previousWindow) logInfo("Parent.zeroTime = " + parent.zeroTime) if (allowPartialWindows) { - if (currentTime - slideTime == parent.zeroTime) { + if (currentTime - slideTime <= parent.zeroTime) { reducedStream.getOrCompute(currentTime) match { case Some(rdd) => return Some(rdd) case None => throw new Exception("Could not get first reduced RDD for time " + currentTime) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala new file mode 100644 index 0000000000..83cbd31283 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -0,0 +1,98 @@ +package spark.streaming.examples + +import spark.SparkContext +import SparkContext._ +import spark.streaming._ +import SparkStreamContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue +import scala.collection.JavaConversions.mapAsScalaMap + +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} + + +object WordCount2_ExtraFunctions { + + def add(v1: Long, v2: Long) = (v1 + v2) + + def subtract(v1: Long, v2: Long) = (v1 - v2) + + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = { + //val map = new java.util.HashMap[String, Long] + val map = new OLMap[String] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.getLong(w) + map.put(w, c + 1) +/* + if (c == null) { + map.put(w, 1) + } else { + map.put(w, c + 1) + } +*/ + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator.map{case (k, v) => (k, v)} + } +} + +object WordCount2 { + + def moreWarmup(sc: SparkContext) { + (0 until 40).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(_ % 1331).map(_.toString) + .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions).reduceByKey(_ + _, 10) + .collect() + } + } + + def main (args: Array[String]) { + + if (args.length < 2) { + println ("Usage: SparkStreamContext ") + System.exit(1) + } + + val ssc = new SparkStreamContext(args(0), "WordCount2") + ssc.setBatchDuration(Seconds(1)) + + val sentences = new ConstantInputDStream(ssc, ssc.sc.textFile(args(1)).cache()) + ssc.inputStreams += sentences + + import WordCount2_ExtraFunctions._ + + val windowedCounts = sentences + .mapPartitions(splitAndCountPartitions) + .reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(10)) + windowedCounts.print() + + ssc.start() + + while(true) { Thread.sleep(1000) } + } +} + + diff --git a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala index 9fb1924798..3922dfbad6 100644 --- a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala +++ b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala @@ -19,9 +19,12 @@ object Receiver { val is = new DataInputStream(new BufferedInputStream(socket.getInputStream)) var loop = true var string: String = null - while((string = is.readUTF) != null) { - count += 28 - } + do { + string = is.readUTF() + if (string != null) { + count += 28 + } + } while (string != null) } catch { case e: Exception => e.printStackTrace() } From b120e24fe04b987cfb5c487251fc10e36b377d6b Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 16:45:14 -0700 Subject: [PATCH 016/226] Add equals and hashCode to Time --- streaming/src/main/scala/spark/streaming/Time.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 3901a26286..7835bdd5e8 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -55,6 +55,14 @@ class Time(private var millis: Long) extends Serializable { def toFormattedString = millis.toString def milliseconds = millis + + override def hashCode = millis.toInt + + override def equals(other: Any): Boolean = other match { + case null => false + case t: Time => t.millis == millis + case _ => false + } } object Time { From 22b1a20e61e0573163b8896a604de78ebec65071 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 17:04:34 -0700 Subject: [PATCH 017/226] Made Time and Interval immutable --- .../main/scala/spark/streaming/DStream.scala | 2 +- .../main/scala/spark/streaming/Interval.scala | 8 +-- .../streaming/ReducedWindowedDStream.scala | 2 +- .../src/main/scala/spark/streaming/Time.scala | 50 ++++--------------- .../spark/streaming/WindowedDStream.scala | 2 +- 5 files changed, 13 insertions(+), 51 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index c63c043415..03a5b84210 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -130,7 +130,7 @@ extends Logging with Serializable { newRDD.persist(storageLevel) logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) } - generatedRDDs.put(time.copy(), newRDD) + generatedRDDs.put(time, newRDD) Some(newRDD) case None => None diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index 088cbe4376..87b8437b3d 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -1,15 +1,9 @@ package spark.streaming -case class Interval (beginTime: Time, endTime: Time) { +case class Interval(beginTime: Time, endTime: Time) { def this(beginMs: Long, endMs: Long) = this(Time(beginMs), new Time(endMs)) def duration(): Time = endTime - beginTime - - def += (time: Time) { - beginTime += time - endTime += time - this - } def + (time: Time): Interval = { new Interval(beginTime + time, endTime + time) diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index d097896d0a..7833fa6189 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -70,7 +70,7 @@ extends DStream[(K,V)](parent.ssc) { Interval(beginTime, endTime) } - val currentTime = validTime.copy + val currentTime = validTime val currentWindow = getAdjustedWindow(currentTime, windowTime) val previousWindow = getAdjustedWindow(currentTime - slideTime, windowTime) diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 7835bdd5e8..6937e8b52f 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,43 +1,21 @@ package spark.streaming -class Time(private var millis: Long) extends Serializable { - - def copy() = new Time(this.millis) - +case class Time(millis: Long) { def zero = Time.zero - def < (that: Time): Boolean = - (this.millis < that.millis) + def < (that: Time): Boolean = (this.millis < that.millis) - def <= (that: Time) = (this < that || this == that) + def <= (that: Time) = (this.millis <= that.millis) - def > (that: Time) = !(this <= that) + def > (that: Time) = (this.millis > that.millis) - def >= (that: Time) = !(this < that) - - def += (that: Time): Time = { - this.millis += that.millis - this - } - - def -= (that: Time): Time = { - this.millis -= that.millis - this - } + def >= (that: Time) = (this.millis >= that.millis) - def + (that: Time) = this.copy() += that + def + (that: Time) = new Time(millis + that.millis) - def - (that: Time) = this.copy() -= that + def - (that: Time) = new Time(millis - that.millis) - def * (times: Int) = { - var count = 0 - var result = this.copy() - while (count < times) { - result += this - count += 1 - } - result - } + def * (times: Int) = new Time(millis * times) def floor(that: Time): Time = { val t = that.millis @@ -55,21 +33,11 @@ class Time(private var millis: Long) extends Serializable { def toFormattedString = millis.toString def milliseconds = millis - - override def hashCode = millis.toInt - - override def equals(other: Any): Boolean = other match { - case null => false - case t: Time => t.millis == millis - case _ => false - } } object Time { val zero = new Time(0) - - def apply(milliseconds: Long) = new Time(milliseconds) - + implicit def toTime(long: Long) = Time(long) implicit def toLong(time: Time) = time.milliseconds diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 9a6617a1ee..17f3f3d952 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -32,7 +32,7 @@ class WindowedDStream[T: ClassManifest]( override def compute(validTime: Time): Option[RDD[T]] = { val parentRDDs = new ArrayBuffer[RDD[T]]() - val windowEndTime = validTime.copy() + val windowEndTime = validTime val windowStartTime = if (allowPartialWindows && windowEndTime - windowTime < parent.zeroTime) { parent.zeroTime } else { From 57796b183e18730f671f8b970fbf8880875d9f03 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 17:25:22 -0700 Subject: [PATCH 018/226] Code style --- .../src/main/scala/spark/streaming/Time.scala | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 6937e8b52f..7841a71cd5 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,42 +1,40 @@ package spark.streaming case class Time(millis: Long) { - def zero = Time.zero - def < (that: Time): Boolean = (this.millis < that.millis) - def <= (that: Time) = (this.millis <= that.millis) + def <= (that: Time): Boolean = (this.millis <= that.millis) - def > (that: Time) = (this.millis > that.millis) + def > (that: Time): Boolean = (this.millis > that.millis) - def >= (that: Time) = (this.millis >= that.millis) + def >= (that: Time): Boolean = (this.millis >= that.millis) - def + (that: Time) = new Time(millis + that.millis) + def + (that: Time): Time = Time(millis + that.millis) - def - (that: Time) = new Time(millis - that.millis) + def - (that: Time): Time = Time(millis - that.millis) - def * (times: Int) = new Time(millis * times) + def * (times: Int): Time = Time(millis * times) def floor(that: Time): Time = { val t = that.millis val m = math.floor(this.millis / t).toLong - new Time(m * t) + Time(m * t) } def isMultipleOf(that: Time): Boolean = (this.millis % that.millis == 0) - def isZero = (this.millis == 0) + def isZero: Boolean = (this.millis == 0) - override def toString = (millis.toString + " ms") + override def toString: String = (millis.toString + " ms") - def toFormattedString = millis.toString + def toFormattedString: String = millis.toString - def milliseconds = millis + def milliseconds: Long = millis } object Time { - val zero = new Time(0) + val zero = Time(0) implicit def toTime(long: Long) = Time(long) From 9de1c3abf90fff82901c1ee13297d436d2d7a25d Mon Sep 17 00:00:00 2001 From: root Date: Mon, 27 Aug 2012 00:57:00 +0000 Subject: [PATCH 019/226] Tweaks to WordCount2 --- .../spark/streaming/examples/WordCount2.scala | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 83cbd31283..87b62817ea 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -70,23 +70,30 @@ object WordCount2 { def main (args: Array[String]) { - if (args.length < 2) { - println ("Usage: SparkStreamContext ") + if (args.length < 4) { + println ("Usage: SparkStreamContext ") System.exit(1) } + + val Array(master, file, mapTasks, reduceTasks) = args - val ssc = new SparkStreamContext(args(0), "WordCount2") + val ssc = new SparkStreamContext(master, "WordCount2") ssc.setBatchDuration(Seconds(1)) + + val data = ssc.sc.textFile(file, mapTasks.toInt).persist(StorageLevel.MEMORY_ONLY_DESER_2) + println("Data count: " + data.count()) + println("Data count: " + data.count()) + println("Data count: " + data.count()) - val sentences = new ConstantInputDStream(ssc, ssc.sc.textFile(args(1)).cache()) + val sentences = new ConstantInputDStream(ssc, data) ssc.inputStreams += sentences import WordCount2_ExtraFunctions._ val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), 10) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER, Seconds(10)) + .reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), reduceTasks.toInt) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(10)) windowedCounts.print() ssc.start() From 29e83f39e90b4d3cbeeb40d5ec0c19bd003c1840 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 18:16:25 -0700 Subject: [PATCH 020/226] Fix replication with MEMORY_ONLY_DESER_2 --- .../main/scala/spark/storage/BlockManager.scala | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index ff9914ae25..45f99717bc 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -364,6 +364,12 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m val startTimeMs = System.currentTimeMillis var bytes: ByteBuffer = null + + // If we need to replicate the data, we'll want access to the values, but because our + // put will read the whole iterator, there will be no values left. For the case where + // the put serializes data, we'll remember the bytes, above; but for the case where + // it doesn't, such as MEMORY_ONLY_DESER, let's rely on the put returning an Iterator. + var valuesAfterPut: Iterator[Any] = null locker.getLock(blockId).synchronized { logDebug("Put for block " + blockId + " took " + Utils.getUsedTimeMs(startTimeMs) @@ -391,7 +397,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // If only save to memory memoryStore.putValues(blockId, values, level) match { case Right(newBytes) => bytes = newBytes - case _ => + case Left(newIterator) => valuesAfterPut = newIterator } } else { // If only save to disk @@ -408,8 +414,13 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // Replicate block if required if (level.replication > 1) { + // Serialize the block if not already done if (bytes == null) { - bytes = dataSerialize(values) // serialize the block if not already done + if (valuesAfterPut == null) { + throw new SparkException( + "Underlying put returned neither an Iterator nor bytes! This shouldn't happen.") + } + bytes = dataSerialize(valuesAfterPut) } replicate(blockId, bytes, level) } From 26dfd20c9a5139bd682a9902267b9d54a11ae20f Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 18:56:56 -0700 Subject: [PATCH 021/226] Detect disconnected slaves in StandaloneScheduler --- .../cluster/StandaloneSchedulerBackend.scala | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala b/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala index 013671c1c8..83e7c6e036 100644 --- a/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala +++ b/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala @@ -2,13 +2,14 @@ package spark.scheduler.cluster import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} -import akka.actor.{Props, Actor, ActorRef, ActorSystem} +import akka.actor._ import akka.util.duration._ import akka.pattern.ask import spark.{SparkException, Logging, TaskState} import akka.dispatch.Await import java.util.concurrent.atomic.AtomicInteger +import akka.remote.{RemoteClientShutdown, RemoteClientDisconnected, RemoteClientLifeCycleEvent} /** * A standalone scheduler backend, which waits for standalone executors to connect to it through @@ -23,8 +24,16 @@ class StandaloneSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Actor class MasterActor(sparkProperties: Seq[(String, String)]) extends Actor { val slaveActor = new HashMap[String, ActorRef] + val slaveAddress = new HashMap[String, Address] val slaveHost = new HashMap[String, String] val freeCores = new HashMap[String, Int] + val actorToSlaveId = new HashMap[ActorRef, String] + val addressToSlaveId = new HashMap[Address, String] + + override def preStart() { + // Listen for remote client disconnection events, since they don't go through Akka's watch() + context.system.eventStream.subscribe(self, classOf[RemoteClientLifeCycleEvent]) + } def receive = { case RegisterSlave(slaveId, host, cores) => @@ -33,9 +42,13 @@ class StandaloneSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Actor } else { logInfo("Registered slave: " + sender + " with ID " + slaveId) sender ! RegisteredSlave(sparkProperties) + context.watch(sender) slaveActor(slaveId) = sender slaveHost(slaveId) = host freeCores(slaveId) = cores + slaveAddress(slaveId) = sender.path.address + actorToSlaveId(sender) = slaveId + addressToSlaveId(sender.path.address) = slaveId totalCoreCount.addAndGet(cores) makeOffers() } @@ -54,7 +67,14 @@ class StandaloneSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Actor sender ! true context.stop(self) - // TODO: Deal with nodes disconnecting too! (Including decreasing totalCoreCount) + case Terminated(actor) => + actorToSlaveId.get(actor).foreach(removeSlave) + + case RemoteClientDisconnected(transport, address) => + addressToSlaveId.get(address).foreach(removeSlave) + + case RemoteClientShutdown(transport, address) => + addressToSlaveId.get(address).foreach(removeSlave) } // Make fake resource offers on all slaves @@ -76,6 +96,20 @@ class StandaloneSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Actor slaveActor(task.slaveId) ! LaunchTask(task) } } + + // Remove a disconnected slave from the cluster + def removeSlave(slaveId: String) { + logInfo("Slave " + slaveId + " disconnected, so removing it") + val numCores = freeCores(slaveId) + actorToSlaveId -= slaveActor(slaveId) + addressToSlaveId -= slaveAddress(slaveId) + slaveActor -= slaveId + slaveHost -= slaveId + freeCores -= slaveId + slaveHost -= slaveId + totalCoreCount.addAndGet(-numCores) + scheduler.slaveLost(slaveId) + } } var masterActor: ActorRef = null From 3c9c44a8d36c0f2dff40a50f1f1e3bc3dac7be7e Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 19:37:43 -0700 Subject: [PATCH 022/226] More helpful log messages --- core/src/main/scala/spark/MapOutputTracker.scala | 3 ++- core/src/main/scala/spark/network/Connection.scala | 4 ++-- core/src/main/scala/spark/network/ConnectionManager.scala | 2 +- .../main/scala/spark/scheduler/cluster/TaskSetManager.scala | 1 + 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index 0c97cd44a1..e249430905 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -116,7 +116,7 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg def getServerAddresses(shuffleId: Int): Array[BlockManagerId] = { val locs = bmAddresses.get(shuffleId) if (locs == null) { - logInfo("Don't have map outputs for shuffe " + shuffleId + ", fetching them") + logInfo("Don't have map outputs for shuffle " + shuffleId + ", fetching them") fetching.synchronized { if (fetching.contains(shuffleId)) { // Someone else is fetching it; wait for them to be done @@ -158,6 +158,7 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg def incrementGeneration() { generationLock.synchronized { generation += 1 + logInfo("Increasing generation to " + generation) } } diff --git a/core/src/main/scala/spark/network/Connection.scala b/core/src/main/scala/spark/network/Connection.scala index 451faee66e..da8aff9dd5 100644 --- a/core/src/main/scala/spark/network/Connection.scala +++ b/core/src/main/scala/spark/network/Connection.scala @@ -111,7 +111,7 @@ extends Connection(SocketChannel.open, selector_) { messages.synchronized{ /*messages += message*/ messages.enqueue(message) - logInfo("Added [" + message + "] to outbox for sending to [" + remoteConnectionManagerId + "]") + logDebug("Added [" + message + "] to outbox for sending to [" + remoteConnectionManagerId + "]") } } @@ -136,7 +136,7 @@ extends Connection(SocketChannel.open, selector_) { return chunk } /*logInfo("Finished sending [" + message + "] to [" + remoteConnectionManagerId + "]")*/ - logInfo("Finished sending [" + message + "] to [" + remoteConnectionManagerId + "] in " + message.timeTaken ) + logDebug("Finished sending [" + message + "] to [" + remoteConnectionManagerId + "] in " + message.timeTaken ) } } None diff --git a/core/src/main/scala/spark/network/ConnectionManager.scala b/core/src/main/scala/spark/network/ConnectionManager.scala index bd0980029a..0e764fff81 100644 --- a/core/src/main/scala/spark/network/ConnectionManager.scala +++ b/core/src/main/scala/spark/network/ConnectionManager.scala @@ -306,7 +306,7 @@ class ConnectionManager(port: Int) extends Logging { } val connection = connectionsById.getOrElse(connectionManagerId, startNewConnection()) message.senderAddress = id.toSocketAddress() - logInfo("Sending [" + message + "] to [" + connectionManagerId + "]") + logDebug("Sending [" + message + "] to [" + connectionManagerId + "]") /*connection.send(message)*/ sendMessageRequests.synchronized { sendMessageRequests += ((message, connection)) diff --git a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala index be24316e80..5412e8d8c0 100644 --- a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala +++ b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala @@ -88,6 +88,7 @@ class TaskSetManager( // Figure out the current map output tracker generation and set it on all tasks val generation = sched.mapOutputTracker.getGeneration + logInfo("Generation for " + taskSet.id + ": " + generation) for (t <- tasks) { t.generation = generation } From 117e3f8c8602c1303fa0e31840d85d1a7a6e3d9d Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 19:52:56 -0700 Subject: [PATCH 023/226] Fix a bug that was causing FetchFailedException not to be thrown --- core/src/main/scala/spark/BlockStoreShuffleFetcher.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala b/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala index 3431ad2258..45a14c8290 100644 --- a/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala +++ b/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala @@ -48,8 +48,9 @@ class BlockStoreShuffleFetcher extends ShuffleFetcher with Logging { } } } catch { + // TODO: this is really ugly -- let's find a better way of throwing a FetchFailedException case be: BlockException => { - val regex = "shuffledid_([0-9]*)_([0-9]*)_([0-9]]*)".r + val regex = "shuffleid_([0-9]*)_([0-9]*)_([0-9]]*)".r be.blockId match { case regex(sId, mId, rId) => { val address = addresses(mId.toInt) From 69c2ab04083972e4ecf1393ffd0cb0acb56b4f7d Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 20:00:58 -0700 Subject: [PATCH 024/226] logging --- core/src/main/scala/spark/executor/Executor.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/scala/spark/executor/Executor.scala b/core/src/main/scala/spark/executor/Executor.scala index 9e335c25f7..dba209ac27 100644 --- a/core/src/main/scala/spark/executor/Executor.scala +++ b/core/src/main/scala/spark/executor/Executor.scala @@ -63,6 +63,7 @@ class Executor extends Logging { Thread.currentThread.setContextClassLoader(classLoader) Accumulators.clear() val task = ser.deserialize[Task[Any]](serializedTask, classLoader) + logInfo("Its generation is " + task.generation) env.mapOutputTracker.updateGeneration(task.generation) val value = task.run(taskId.toInt) val accumUpdates = Accumulators.values From b914cd0dfa21b615c29d2ce935f623f209afa8f4 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sun, 26 Aug 2012 20:07:59 -0700 Subject: [PATCH 025/226] Serialize generation correctly in ShuffleMapTask --- core/src/main/scala/spark/scheduler/ShuffleMapTask.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index f78e0e5fb2..73479bff01 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -90,6 +90,7 @@ class ShuffleMapTask( out.writeInt(bytes.length) out.write(bytes) out.writeInt(partition) + out.writeLong(generation) out.writeObject(split) } @@ -102,6 +103,7 @@ class ShuffleMapTask( rdd = rdd_ dep = dep_ partition = in.readInt() + generation = in.readLong() split = in.readObject().asInstanceOf[Split] } From e2cf197a0a878f9c942dafe98a70bdaefb5df58d Mon Sep 17 00:00:00 2001 From: root Date: Mon, 27 Aug 2012 03:34:15 +0000 Subject: [PATCH 026/226] Made WordCount2 even more configurable --- .../scala/spark/streaming/examples/WordCount2.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 87b62817ea..1afe87e723 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -70,15 +70,17 @@ object WordCount2 { def main (args: Array[String]) { - if (args.length < 4) { - println ("Usage: SparkStreamContext ") + if (args.length != 5) { + println ("Usage: SparkStreamContext ") System.exit(1) } - val Array(master, file, mapTasks, reduceTasks) = args + val Array(master, file, mapTasks, reduceTasks, batchMillis) = args + + val BATCH_DURATION = Milliseconds(batchMillis.toLong) val ssc = new SparkStreamContext(master, "WordCount2") - ssc.setBatchDuration(Seconds(1)) + ssc.setBatchDuration(BATCH_DURATION) val data = ssc.sc.textFile(file, mapTasks.toInt).persist(StorageLevel.MEMORY_ONLY_DESER_2) println("Data count: " + data.count()) @@ -92,7 +94,7 @@ object WordCount2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(10), Seconds(1), reduceTasks.toInt) + .reduceByKeyAndWindow(add _, subtract _, Seconds(10), BATCH_DURATION, reduceTasks.toInt) windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(10)) windowedCounts.print() From b4a2214218eeb9ebd95b59d88c2212fe717efd9e Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Mon, 27 Aug 2012 22:49:29 -0700 Subject: [PATCH 027/226] More fault tolerance fixes to catch lost tasks --- .../scala/spark/scheduler/cluster/TaskInfo.scala | 2 ++ .../spark/scheduler/cluster/TaskSetManager.scala | 13 +++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/scheduler/cluster/TaskInfo.scala b/core/src/main/scala/spark/scheduler/cluster/TaskInfo.scala index 0fc1d8ed30..65e59841a9 100644 --- a/core/src/main/scala/spark/scheduler/cluster/TaskInfo.scala +++ b/core/src/main/scala/spark/scheduler/cluster/TaskInfo.scala @@ -20,6 +20,8 @@ class TaskInfo(val taskId: Long, val index: Int, val launchTime: Long, val host: def successful: Boolean = finished && !failed + def running: Boolean = !finished + def duration: Long = { if (!finished) { throw new UnsupportedOperationException("duration() called on unfinished tasks") diff --git a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala index 5412e8d8c0..17317e80df 100644 --- a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala +++ b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala @@ -265,6 +265,11 @@ class TaskSetManager( def taskLost(tid: Long, state: TaskState, serializedData: ByteBuffer) { val info = taskInfos(tid) + if (info.failed) { + // We might get two task-lost messages for the same task in coarse-grained Mesos mode, + // or even from Mesos itself when acks get delayed. + return + } val index = info.index info.markFailed() if (!finished(index)) { @@ -341,7 +346,7 @@ class TaskSetManager( } def hostLost(hostname: String) { - logInfo("Re-queueing tasks for " + hostname) + logInfo("Re-queueing tasks for " + hostname + " from TaskSet " + taskSet.id) // If some task has preferred locations only on hostname, put it in the no-prefs list // to avoid the wait from delay scheduling for (index <- getPendingTasksForHost(hostname)) { @@ -350,7 +355,7 @@ class TaskSetManager( pendingTasksWithNoPrefs += index } } - // Also re-enqueue any tasks that ran on the failed host if this is a shuffle map stage + // Re-enqueue any tasks that ran on the failed host if this is a shuffle map stage if (tasks(0).isInstanceOf[ShuffleMapTask]) { for ((tid, info) <- taskInfos if info.host == hostname) { val index = taskInfos(tid).index @@ -365,6 +370,10 @@ class TaskSetManager( } } } + // Also re-enqueue any tasks that were running on the node + for ((tid, info) <- taskInfos if info.running && info.host == hostname) { + taskLost(tid, TaskState.KILLED, null) + } } /** From 17af2df0cdcdc4f02013bd7b4351e0a9d9ee9b25 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Mon, 27 Aug 2012 23:07:32 -0700 Subject: [PATCH 028/226] Log levels --- core/src/main/scala/spark/MapOutputTracker.scala | 2 +- .../src/main/scala/spark/scheduler/cluster/TaskSetManager.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index e249430905..de23eb6f48 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -158,7 +158,7 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg def incrementGeneration() { generationLock.synchronized { generation += 1 - logInfo("Increasing generation to " + generation) + logDebug("Increasing generation to " + generation) } } diff --git a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala index 17317e80df..5a7df6040c 100644 --- a/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala +++ b/core/src/main/scala/spark/scheduler/cluster/TaskSetManager.scala @@ -88,7 +88,7 @@ class TaskSetManager( // Figure out the current map output tracker generation and set it on all tasks val generation = sched.mapOutputTracker.getGeneration - logInfo("Generation for " + taskSet.id + ": " + generation) + logDebug("Generation for " + taskSet.id + ": " + generation) for (t <- tasks) { t.generation = generation } From b5b93a621c6386929cee97e799d18b29a53bafbc Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 28 Aug 2012 12:35:19 -0700 Subject: [PATCH 029/226] Added capabllity to take streaming input from network. Renamed SparkStreamContext to StreamingContext. --- .../streaming/ConstantInputDStream.scala | 2 +- .../main/scala/spark/streaming/DStream.scala | 10 +- .../spark/streaming/FileInputDStream.scala | 84 +----- .../scala/spark/streaming/JobManager.scala | 4 +- .../spark/streaming/NetworkInputDStream.scala | 36 +++ .../streaming/NetworkInputReceiver.scala | 248 ++++++++++++++++++ .../spark/streaming/NetworkInputTracker.scala | 110 ++++++++ .../streaming/PairDStreamFunctions.scala | 2 +- .../spark/streaming/QueueInputDStream.scala | 2 +- .../streaming/ReducedWindowedDStream.scala | 2 +- .../scala/spark/streaming/Scheduler.scala | 3 +- ...amContext.scala => StreamingContext.scala} | 89 ++++--- .../src/main/scala/spark/streaming/Time.scala | 2 +- .../spark/streaming/WindowedDStream.scala | 2 +- .../spark/streaming/examples/ExampleOne.scala | 6 +- .../spark/streaming/examples/ExampleTwo.scala | 6 +- .../{WordCount.scala => WordCountHdfs.scala} | 11 +- .../streaming/examples/WordCountNetwork.scala | 25 ++ .../scala/spark/streaming/DStreamSuite.scala | 4 +- 19 files changed, 504 insertions(+), 144 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala create mode 100644 streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala rename streaming/src/main/scala/spark/streaming/{SparkStreamContext.scala => StreamingContext.scala} (68%) rename streaming/src/main/scala/spark/streaming/examples/{WordCount.scala => WordCountHdfs.scala} (68%) create mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala diff --git a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala index 6a2be34633..9bc204dd09 100644 --- a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala @@ -5,7 +5,7 @@ import spark.RDD /** * An input stream that always returns the same RDD on each timestep. Useful for testing. */ -class ConstantInputDStream[T: ClassManifest](ssc: SparkStreamContext, rdd: RDD[T]) +class ConstantInputDStream[T: ClassManifest](ssc: StreamingContext, rdd: RDD[T]) extends InputDStream[T](ssc) { override def start() {} diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index c63c043415..0e45acabc3 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -1,6 +1,6 @@ package spark.streaming -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext._ import spark.RDD import spark.BlockRDD @@ -15,7 +15,7 @@ import scala.collection.mutable.HashMap import java.util.concurrent.ArrayBlockingQueue -abstract class DStream[T: ClassManifest] (@transient val ssc: SparkStreamContext) +abstract class DStream[T: ClassManifest] (@transient val ssc: StreamingContext) extends Logging with Serializable { initLogging() @@ -142,7 +142,7 @@ extends Logging with Serializable { } /** - * This method generates a SparkStream job for the given time + * This method generates a SparkStreaming job for the given time * and may require to be overriden by subclasses */ def generateJob(time: Time): Option[Job] = { @@ -249,7 +249,7 @@ extends Logging with Serializable { abstract class InputDStream[T: ClassManifest] ( - ssc: SparkStreamContext) + @transient ssc: StreamingContext) extends DStream[T](ssc) { override def dependencies = List() @@ -397,7 +397,7 @@ extends DStream[T](parents(0).ssc) { } if (parents.map(_.ssc).distinct.size > 1) { - throw new IllegalArgumentException("Array of parents have different SparkStreamContexts") + throw new IllegalArgumentException("Array of parents have different StreamingContexts") } if (parents.map(_.slideTime).distinct.size > 1) { diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala index 88aa375289..96a64f0018 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -19,7 +19,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( - ssc: SparkStreamContext, + ssc: StreamingContext, directory: Path, filter: PathFilter = FileInputDStream.defaultPathFilter, newFilesOnly: Boolean = true) @@ -28,7 +28,7 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K val fs = directory.getFileSystem(new Configuration()) var lastModTime: Long = 0 - override def start() { + override def start() { if (newFilesOnly) { lastModTime = System.currentTimeMillis() } else { @@ -82,83 +82,3 @@ object FileInputDStream { } } -/* -class NetworkInputDStream[T: ClassManifest]( - val networkInputName: String, - val addresses: Array[InetSocketAddress], - batchDuration: Time, - ssc: SparkStreamContext) -extends InputDStream[T](networkInputName, batchDuration, ssc) { - - - // TODO(Haoyuan): This is for the performance test. - @transient var rdd: RDD[T] = null - - if (System.getProperty("spark.fake", "false") == "true") { - logInfo("Running initial count to cache fake RDD") - rdd = ssc.sc.textFile(SparkContext.inputFile, - SparkContext.idealPartitions).asInstanceOf[RDD[T]] - val fakeCacheLevel = System.getProperty("spark.fake.cache", "") - if (fakeCacheLevel == "MEMORY_ONLY_2") { - rdd.persist(StorageLevel.MEMORY_ONLY_2) - } else if (fakeCacheLevel == "MEMORY_ONLY_DESER_2") { - rdd.persist(StorageLevel.MEMORY_ONLY_2) - } else if (fakeCacheLevel != "") { - logError("Invalid fake cache level: " + fakeCacheLevel) - System.exit(1) - } - rdd.count() - } - - @transient val references = new HashMap[Time,String] - - override def compute(validTime: Time): Option[RDD[T]] = { - if (System.getProperty("spark.fake", "false") == "true") { - logInfo("Returning fake RDD at " + validTime) - return Some(rdd) - } - references.get(validTime) match { - case Some(reference) => - if (reference.startsWith("file") || reference.startsWith("hdfs")) { - logInfo("Reading from file " + reference + " for time " + validTime) - Some(ssc.sc.textFile(reference).asInstanceOf[RDD[T]]) - } else { - logInfo("Getting from BlockManager " + reference + " for time " + validTime) - Some(new BlockRDD(ssc.sc, Array(reference))) - } - case None => - throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") - None - } - } - - def setReference(time: Time, reference: AnyRef) { - references += ((time, reference.toString)) - logInfo("Reference added for time " + time + " - " + reference.toString) - } -} - - -class TestInputDStream( - val testInputName: String, - batchDuration: Time, - ssc: SparkStreamContext) -extends InputDStream[String](testInputName, batchDuration, ssc) { - - @transient val references = new HashMap[Time,Array[String]] - - override def compute(validTime: Time): Option[RDD[String]] = { - references.get(validTime) match { - case Some(reference) => - Some(new BlockRDD[String](ssc.sc, reference)) - case None => - throw new Exception(this.toString + ": Reference missing for time " + validTime + "!!!") - None - } - } - - def setReference(time: Time, reference: AnyRef) { - references += ((time, reference.asInstanceOf[Array[String]])) - } -} -*/ diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index c37fe1e9ad..2a4fe3dd11 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -5,9 +5,9 @@ import spark.SparkEnv import java.util.concurrent.Executors -class JobManager(ssc: SparkStreamContext, numThreads: Int = 1) extends Logging { +class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { - class JobHandler(ssc: SparkStreamContext, job: Job) extends Runnable { + class JobHandler(ssc: StreamingContext, job: Job) extends Runnable { def run() { SparkEnv.set(ssc.env) try { diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala new file mode 100644 index 0000000000..ee09324c8c --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -0,0 +1,36 @@ +package spark.streaming + +import akka.actor._ +import akka.pattern.ask +import akka.util.duration._ +import akka.dispatch._ + +import spark.RDD +import spark.BlockRDD +import spark.Logging + +import java.io.InputStream + + +class NetworkInputDStream[T: ClassManifest]( + @transient ssc: StreamingContext, + val host: String, + val port: Int, + val bytesToObjects: InputStream => Iterator[T] + ) extends InputDStream[T](ssc) with Logging { + + val id = ssc.getNewNetworkStreamId() + + def start() { } + + def stop() { } + + override def compute(validTime: Time): Option[RDD[T]] = { + val blockIds = ssc.networkInputTracker.getBlockIds(id, validTime) + return Some(new BlockRDD[T](ssc.sc, blockIds)) + } + + def createReceiver(): NetworkInputReceiver[T] = { + new NetworkInputReceiver(id, host, port, bytesToObjects) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala b/streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala new file mode 100644 index 0000000000..7add6246b7 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala @@ -0,0 +1,248 @@ +package spark.streaming + +import spark.Logging +import spark.storage.BlockManager +import spark.storage.StorageLevel +import spark.SparkEnv +import spark.streaming.util.SystemClock +import spark.streaming.util.RecurringTimer + +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.Queue +import scala.collection.mutable.SynchronizedPriorityQueue +import scala.math.Ordering + +import java.net.InetSocketAddress +import java.net.Socket +import java.io.InputStream +import java.io.BufferedInputStream +import java.io.DataInputStream +import java.io.EOFException +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.ArrayBlockingQueue + +import akka.actor._ +import akka.pattern.ask +import akka.util.duration._ +import akka.dispatch._ + +trait NetworkInputReceiverMessage +case class GetBlockIds(time: Long) extends NetworkInputReceiverMessage +case class GotBlockIds(streamId: Int, blocksIds: Array[String]) extends NetworkInputReceiverMessage +case class StopReceiver() extends NetworkInputReceiverMessage + +class NetworkInputReceiver[T: ClassManifest](streamId: Int, host: String, port: Int, bytesToObjects: InputStream => Iterator[T]) +extends Logging { + + class ReceiverActor extends Actor { + override def preStart() = { + logInfo("Attempting to register") + val ip = System.getProperty("spark.master.host", "localhost") + val port = System.getProperty("spark.master.port", "7077").toInt + val actorName: String = "NetworkInputTracker" + val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) + val trackerActor = env.actorSystem.actorFor(url) + val timeout = 100.milliseconds + val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) + Await.result(future, timeout) + } + + def receive = { + case GetBlockIds(time) => { + logInfo("Got request for block ids for " + time) + sender ! GotBlockIds(streamId, dataHandler.getPushedBlocks()) + } + + case StopReceiver() => { + if (receivingThread != null) { + receivingThread.interrupt() + } + sender ! true + } + } + } + + class DataHandler { + + class Block(val time: Long, val iterator: Iterator[T]) { + val blockId = "input-" + streamId + "-" + time + var pushed = true + override def toString() = "input block " + blockId + } + + val clock = new SystemClock() + val blockInterval = 200L + val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) + val blockOrdering = new Ordering[Block] { + def compare(b1: Block, b2: Block) = (b1.time - b2.time).toInt + } + val blockStorageLevel = StorageLevel.DISK_AND_MEMORY + val blocksForPushing = new ArrayBlockingQueue[Block](1000) + val blocksForReporting = new SynchronizedPriorityQueue[Block]()(blockOrdering) + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + var currentBuffer = new ArrayBuffer[T] + + def start() { + blockIntervalTimer.start() + blockPushingThread.start() + logInfo("Data handler started") + } + + def stop() { + blockIntervalTimer.stop() + blockPushingThread.interrupt() + } + + def += (obj: T) { + currentBuffer += obj + } + + def updateCurrentBuffer(time: Long) { + val newBlockBuffer = currentBuffer + currentBuffer = new ArrayBuffer[T] + if (newBlockBuffer.size > 0) { + val newBlock = new Block(time - blockInterval, newBlockBuffer.toIterator) + blocksForPushing.add(newBlock) + blocksForReporting.enqueue(newBlock) + } + } + + def keepPushingBlocks() { + logInfo("Block pushing thread started") + try { + while(true) { + val block = blocksForPushing.take() + if (blockManager != null) { + blockManager.put(block.blockId, block.iterator, blockStorageLevel) + block.pushed = true + } else { + logWarning(block + " not put as block manager is null") + } + } + } catch { + case ie: InterruptedException => println("Block pushing thread interrupted") + case e: Exception => e.printStackTrace() + } + } + + def getPushedBlocks(): Array[String] = { + val pushedBlocks = new ArrayBuffer[String]() + var loop = true + while(loop && !blocksForReporting.isEmpty) { + val block = blocksForReporting.dequeue() + if (block == null) { + loop = false + } else if (!block.pushed) { + blocksForReporting.enqueue(block) + } else { + pushedBlocks += block.blockId + } + } + logInfo("Got " + pushedBlocks.size + " blocks") + pushedBlocks.toArray + } + } + + val blockManager = if (SparkEnv.get != null) SparkEnv.get.blockManager else null + val dataHandler = new DataHandler() + val env = SparkEnv.get + + var receiverActor: ActorRef = null + var receivingThread: Thread = null + + def run() { + initLogging() + var socket: Socket = null + try { + if (SparkEnv.get != null) { + receiverActor = SparkEnv.get.actorSystem.actorOf(Props(new ReceiverActor), "ReceiverActor-" + streamId) + } + dataHandler.start() + socket = connect() + receivingThread = Thread.currentThread() + receive(socket) + } catch { + case ie: InterruptedException => logInfo("Receiver interrupted") + } finally { + receivingThread = null + if (socket != null) socket.close() + dataHandler.stop() + } + } + + def connect(): Socket = { + logInfo("Connecting to " + host + ":" + port) + val socket = new Socket(host, port) + logInfo("Connected to " + host + ":" + port) + socket + } + + def receive(socket: Socket) { + val iterator = bytesToObjects(socket.getInputStream()) + while(iterator.hasNext) { + val obj = iterator.next + dataHandler += obj + } + } +} + + +object NetworkInputReceiver { + + def bytesToLines(inputStream: InputStream): Iterator[String] = { + val bufferedInputStream = new BufferedInputStream(inputStream) + val dataInputStream = new DataInputStream(bufferedInputStream) + + val iterator = new Iterator[String] { + var gotNext = false + var finished = false + var nextValue: String = null + + private def getNext() { + try { + nextValue = dataInputStream.readLine() + println("[" + nextValue + "]") + } catch { + case eof: EOFException => + finished = true + } + gotNext = true + } + + override def hasNext: Boolean = { + if (!gotNext) { + getNext() + } + if (finished) { + dataInputStream.close() + } + !finished + } + + + override def next(): String = { + if (!gotNext) { + getNext() + } + if (finished) { + throw new NoSuchElementException("End of stream") + } + gotNext = false + nextValue + } + } + iterator + } + + def main(args: Array[String]) { + if (args.length < 2) { + println("NetworkReceiver ") + System.exit(1) + } + val host = args(0) + val port = args(1).toInt + val receiver = new NetworkInputReceiver(0, host, port, bytesToLines) + receiver.run() + } +} diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala new file mode 100644 index 0000000000..07758665c9 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -0,0 +1,110 @@ +package spark.streaming + +import spark.Logging +import spark.SparkEnv + +import scala.collection.mutable.HashMap + +import akka.actor._ +import akka.pattern.ask +import akka.util.duration._ +import akka.dispatch._ + +trait NetworkInputTrackerMessage +case class RegisterReceiver(streamId: Int, receiverActor: ActorRef) extends NetworkInputTrackerMessage + +class NetworkInputTracker( + @transient ssc: StreamingContext, + @transient networkInputStreams: Array[NetworkInputDStream[_]]) +extends Logging { + + class TrackerActor extends Actor { + def receive = { + case RegisterReceiver(streamId, receiverActor) => { + if (!networkInputStreamIds.contains(streamId)) { + throw new Exception("Register received for unexpected id " + streamId) + } + receiverInfo += ((streamId, receiverActor)) + logInfo("Registered receiver for network stream " + streamId) + sender ! true + } + } + } + + class ReceiverExecutor extends Thread { + val env = ssc.env + + override def run() { + try { + SparkEnv.set(env) + startReceivers() + } catch { + case ie: InterruptedException => logInfo("ReceiverExecutor interrupted") + } finally { + stopReceivers() + } + } + + def startReceivers() { + val tempRDD = ssc.sc.makeRDD(networkInputStreams, networkInputStreams.size) + + val startReceiver = (iterator: Iterator[NetworkInputDStream[_]]) => { + if (!iterator.hasNext) { + throw new Exception("Could not start receiver as details not found.") + } + val stream = iterator.next + val receiver = stream.createReceiver() + receiver.run() + } + + ssc.sc.runJob(tempRDD, startReceiver) + } + + def stopReceivers() { + implicit val ec = env.actorSystem.dispatcher + val message = new StopReceiver() + val listOfFutures = receiverInfo.values.map(_.ask(message)(timeout)).toList + val futureOfList = Future.sequence(listOfFutures) + Await.result(futureOfList, timeout) + } + } + + val networkInputStreamIds = networkInputStreams.map(_.id).toArray + val receiverExecutor = new ReceiverExecutor() + val receiverInfo = new HashMap[Int, ActorRef] + val receivedBlockIds = new HashMap[Int, Array[String]] + val timeout = 1000.milliseconds + + + var currentTime: Time = null + + def start() { + ssc.env.actorSystem.actorOf(Props(new TrackerActor), "NetworkInputTracker") + receiverExecutor.start() + } + + def stop() { + // stop the actor + receiverExecutor.interrupt() + } + + def getBlockIds(receiverId: Int, time: Time): Array[String] = synchronized { + if (currentTime == null || time > currentTime) { + logInfo("Getting block ids from receivers for " + time) + implicit val ec = ssc.env.actorSystem.dispatcher + receivedBlockIds.clear() + val message = new GetBlockIds(time) + val listOfFutures = receiverInfo.values.map( + _.ask(message)(timeout).mapTo[GotBlockIds] + ).toList + val futureOfList = Future.sequence(listOfFutures) + val allBlockIds = Await.result(futureOfList, timeout) + receivedBlockIds ++= allBlockIds.map(x => (x.streamId, x.blocksIds)) + if (receivedBlockIds.size != receiverInfo.size) { + throw new Exception("Unexpected number of the Block IDs received") + } + currentTime = time + } + receivedBlockIds.getOrElse(receiverId, Array[String]()) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 0cf296f21a..d2887c3aea 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -1,7 +1,7 @@ package spark.streaming import scala.collection.mutable.ArrayBuffer -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext._ class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) extends Serializable { diff --git a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala index c78abd1a87..bab48ff954 100644 --- a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala @@ -7,7 +7,7 @@ import scala.collection.mutable.Queue import scala.collection.mutable.ArrayBuffer class QueueInputDStream[T: ClassManifest]( - ssc: SparkStreamContext, + ssc: StreamingContext, val queue: Queue[RDD[T]], oneAtATime: Boolean, defaultRDD: RDD[T] diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 11fa4e5443..9d48870877 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext._ import spark.RDD import spark.UnionRDD diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 309bd95525..da50e22719 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -12,7 +12,7 @@ sealed trait SchedulerMessage case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage class Scheduler( - ssc: SparkStreamContext, + ssc: StreamingContext, inputStreams: Array[InputDStream[_]], outputStreams: Array[DStream[_]]) extends Logging { @@ -40,6 +40,7 @@ extends Logging { } def generateRDDs (time: Time) { + println("\n-----------------------------------------------------\n") logInfo("Generating RDDs for time " + time) outputStreams.foreach(outputStream => { outputStream.generateJob(time) match { diff --git a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala similarity index 68% rename from streaming/src/main/scala/spark/streaming/SparkStreamContext.scala rename to streaming/src/main/scala/spark/streaming/StreamingContext.scala index 2bec1091c0..0ac86cbdf2 100644 --- a/streaming/src/main/scala/spark/streaming/SparkStreamContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -8,6 +8,7 @@ import spark.SparkContext import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.Queue +import java.io.InputStream import java.io.IOException import java.net.InetSocketAddress import java.util.concurrent.atomic.AtomicInteger @@ -19,7 +20,7 @@ import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat -class SparkStreamContext ( +class StreamingContext ( master: String, frameworkName: String, val sparkHome: String = null, @@ -33,8 +34,12 @@ class SparkStreamContext ( val inputStreams = new ArrayBuffer[InputDStream[_]]() val outputStreams = new ArrayBuffer[DStream[_]]() + val nextNetworkInputStreamId = new AtomicInteger(0) + var batchDuration: Time = null var scheduler: Scheduler = null + var networkInputTracker: NetworkInputTracker = null + var receiverJobThread: Thread = null def setBatchDuration(duration: Long) { setBatchDuration(Time(duration)) @@ -43,35 +48,32 @@ class SparkStreamContext ( def setBatchDuration(duration: Time) { batchDuration = duration } - - /* - def createNetworkStream[T: ClassManifest]( - name: String, - addresses: Array[InetSocketAddress], - batchDuration: Time): DStream[T] = { - - val inputStream = new NetworkinputStream[T](this, addresses) - inputStreams += inputStream - inputStream - } + + private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() + + def createNetworkTextStream(hostname: String, port: Int): DStream[String] = { + createNetworkStream[String](hostname, port, NetworkInputReceiver.bytesToLines) + } def createNetworkStream[T: ClassManifest]( - name: String, - addresses: Array[String], - batchDuration: Long): DStream[T] = { - - def stringToInetSocketAddress (str: String): InetSocketAddress = { - val parts = str.split(":") - if (parts.length != 2) { - throw new IllegalArgumentException ("Address format error") - } - new InetSocketAddress(parts(0), parts(1).toInt) - } - - readNetworkStream( - name, - addresses.map(stringToInetSocketAddress).toArray, - LongTime(batchDuration)) + hostname: String, + port: Int, + converter: (InputStream) => Iterator[T] + ): DStream[T] = { + val inputStream = new NetworkInputDStream[T](this, hostname, port, converter) + inputStreams += inputStream + inputStream + } + + /* + def createHttpTextStream(url: String): DStream[String] = { + createHttpStream(url, NetworkInputReceiver.bytesToLines) + } + + def createHttpStream[T: ClassManifest]( + url: String, + converter: (InputStream) => Iterator[T] + ): DStream[T] = { } */ @@ -126,7 +128,7 @@ class SparkStreamContext ( /** * This function verify whether the stream computation is eligible to be executed. */ - def verify() { + private def verify() { if (batchDuration == null) { throw new Exception("Batch duration has not been set") } @@ -147,8 +149,21 @@ class SparkStreamContext ( */ def start() { verify() + val networkInputStreams = inputStreams.filter(s => s match { + case n: NetworkInputDStream[_] => true + case _ => false + }).map(_.asInstanceOf[NetworkInputDStream[_]]).toArray + + if (networkInputStreams.length > 0) { + // Start the network input tracker (must start before receivers) + networkInputTracker = new NetworkInputTracker(this, networkInputStreams) + networkInputTracker.start() + } + + Thread.sleep(1000) + // Start the scheduler scheduler = new Scheduler(this, inputStreams.toArray, outputStreams.toArray) - scheduler.start() + scheduler.start() } /** @@ -156,18 +171,22 @@ class SparkStreamContext ( */ def stop() { try { - scheduler.stop() + if (scheduler != null) scheduler.stop() + if (networkInputTracker != null) networkInputTracker.stop() + if (receiverJobThread != null) receiverJobThread.interrupt() sc.stop() } catch { case e: Exception => logWarning("Error while stopping", e) } - logInfo("SparkStreamContext stopped") + logInfo("StreamingContext stopped") } } -object SparkStreamContext { - implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = - new PairDStreamFunctions(stream) +object StreamingContext { + implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = { + new PairDStreamFunctions[K, V](stream) + } } + diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 5c476f02c3..1db95a45e1 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,6 +1,6 @@ package spark.streaming -class Time(private var millis: Long) { +class Time (private var millis: Long) extends Serializable { def copy() = new Time(this.millis) diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 9a6617a1ee..bf11cbcec0 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext._ import spark.RDD import spark.UnionRDD diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala index 669f575240..2ff8790e77 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala @@ -1,8 +1,8 @@ package spark.streaming.examples import spark.RDD -import spark.streaming.SparkStreamContext -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext +import spark.streaming.StreamingContext._ import spark.streaming.Seconds import scala.collection.mutable.SynchronizedQueue @@ -16,7 +16,7 @@ object ExampleOne { } // Create the context and set the batch size - val ssc = new SparkStreamContext(args(0), "ExampleOne") + val ssc = new StreamingContext(args(0), "ExampleOne") ssc.setBatchDuration(Seconds(1)) // Create the queue through which RDDs can be pushed to diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala index be47e47a5a..ad563e2c75 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala +++ b/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala @@ -1,7 +1,7 @@ package spark.streaming.examples -import spark.streaming.SparkStreamContext -import spark.streaming.SparkStreamContext._ +import spark.streaming.StreamingContext +import spark.streaming.StreamingContext._ import spark.streaming.Seconds import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration @@ -15,7 +15,7 @@ object ExampleTwo { } // Create the context and set the batch size - val ssc = new SparkStreamContext(args(0), "ExampleTwo") + val ssc = new StreamingContext(args(0), "ExampleTwo") ssc.setBatchDuration(Seconds(2)) // Create the new directory diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala similarity index 68% rename from streaming/src/main/scala/spark/streaming/examples/WordCount.scala rename to streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala index ba7bc63d6a..3b86948822 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala @@ -1,17 +1,17 @@ package spark.streaming.examples -import spark.streaming.{Seconds, SparkStreamContext} -import spark.streaming.SparkStreamContext._ +import spark.streaming.{Seconds, StreamingContext} +import spark.streaming.StreamingContext._ -object WordCount { +object WordCountHdfs { def main(args: Array[String]) { if (args.length < 2) { - System.err.println("Usage: WordCount ") + System.err.println("Usage: WordCountHdfs ") System.exit(1) } // Create the context and set the batch size - val ssc = new SparkStreamContext(args(0), "ExampleTwo") + val ssc = new StreamingContext(args(0), "WordCountHdfs") ssc.setBatchDuration(Seconds(2)) // Create the FileInputDStream on the directory and use the @@ -23,3 +23,4 @@ object WordCount { ssc.start() } } + diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala new file mode 100644 index 0000000000..0a33a05bae --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala @@ -0,0 +1,25 @@ +package spark.streaming.examples + +import spark.streaming.{Seconds, StreamingContext} +import spark.streaming.StreamingContext._ + +object WordCountNetwork { + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println("Usage: WordCountNetwork ") + System.exit(1) + } + + // Create the context and set the batch size + val ssc = new StreamingContext(args(0), "WordCountNetwork") + ssc.setBatchDuration(Seconds(2)) + + // Create the FileInputDStream on the directory and use the + // stream to count words in new files created + val lines = ssc.createNetworkTextStream(args(1), args(2).toInt) + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() + ssc.start() + } +} diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala index 2c10a03e6d..e9dc377263 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -12,7 +12,7 @@ import scala.collection.mutable.SynchronizedQueue class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { - var ssc: SparkStreamContext = null + var ssc: StreamingContext = null val batchDurationMillis = 1000 System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") @@ -22,7 +22,7 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { operation: DStream[U] => DStream[V], expectedOutput: Seq[Seq[V]]) { try { - ssc = new SparkStreamContext("local", "test") + ssc = new StreamingContext("local", "test") ssc.setBatchDuration(Milliseconds(batchDurationMillis)) val inputStream = ssc.createQueueStream(input.map(ssc.sc.makeRDD(_, 2)).toIterator) From 1f8085b8d097f060a3939eaff5be1f58111ec224 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Aug 2012 03:20:56 +0000 Subject: [PATCH 030/226] Compile fixes --- streaming/src/main/scala/spark/streaming/Time.scala | 2 +- .../main/scala/spark/streaming/examples/WordCount2.scala | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index e44025138f..9ddb65249a 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,6 +1,6 @@ package spark.streaming -case class Time(millis: Long) extends Serializable { +case class Time(millis: Long) { def < (that: Time): Boolean = (this.millis < that.millis) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 1afe87e723..3b2d909584 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -3,7 +3,7 @@ package spark.streaming.examples import spark.SparkContext import SparkContext._ import spark.streaming._ -import SparkStreamContext._ +import StreamingContext._ import spark.storage.StorageLevel @@ -71,7 +71,7 @@ object WordCount2 { def main (args: Array[String]) { if (args.length != 5) { - println ("Usage: SparkStreamContext ") + println ("Usage: WordCount2 ") System.exit(1) } @@ -79,7 +79,7 @@ object WordCount2 { val BATCH_DURATION = Milliseconds(batchMillis.toLong) - val ssc = new SparkStreamContext(master, "WordCount2") + val ssc = new StreamingContext(master, "WordCount2") ssc.setBatchDuration(BATCH_DURATION) val data = ssc.sc.textFile(file, mapTasks.toInt).persist(StorageLevel.MEMORY_ONLY_DESER_2) From 4db3a967669a53de4c4b79b4c0b70daa5accb682 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 29 Aug 2012 13:04:01 -0700 Subject: [PATCH 031/226] Made minor changes to reduce compilation errors in Eclipse. Twirl stuff still does not compile in Eclipse. --- .../scala/spark/network/ConnectionManager.scala | 16 +++++++++++++--- .../spark/network/ConnectionManagerTest.scala | 5 ++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/spark/network/ConnectionManager.scala b/core/src/main/scala/spark/network/ConnectionManager.scala index 0e764fff81..2bb5f5fc6b 100644 --- a/core/src/main/scala/spark/network/ConnectionManager.scala +++ b/core/src/main/scala/spark/network/ConnectionManager.scala @@ -16,6 +16,7 @@ import scala.collection.mutable.ArrayBuffer import akka.dispatch.{Await, Promise, ExecutionContext, Future} import akka.util.Duration +import akka.util.duration._ case class ConnectionManagerId(host: String, port: Int) { def toSocketAddress() = new InetSocketAddress(host, port) @@ -403,7 +404,10 @@ object ConnectionManager { (0 until count).map(i => { val bufferMessage = Message.createBufferMessage(buffer.duplicate) manager.sendMessageReliably(manager.id, bufferMessage) - }).foreach(f => {if (!f().isDefined) println("Failed")}) + }).foreach(f => { + val g = Await.result(f, 1 second) + if (!g.isDefined) println("Failed") + }) val finishTime = System.currentTimeMillis val mb = size * count / 1024.0 / 1024.0 @@ -430,7 +434,10 @@ object ConnectionManager { (0 until count).map(i => { val bufferMessage = Message.createBufferMessage(buffers(count - 1 - i).duplicate) manager.sendMessageReliably(manager.id, bufferMessage) - }).foreach(f => {if (!f().isDefined) println("Failed")}) + }).foreach(f => { + val g = Await.result(f, 1 second) + if (!g.isDefined) println("Failed") + }) val finishTime = System.currentTimeMillis val ms = finishTime - startTime @@ -457,7 +464,10 @@ object ConnectionManager { (0 until count).map(i => { val bufferMessage = Message.createBufferMessage(buffer.duplicate) manager.sendMessageReliably(manager.id, bufferMessage) - }).foreach(f => {if (!f().isDefined) println("Failed")}) + }).foreach(f => { + val g = Await.result(f, 1 second) + if (!g.isDefined) println("Failed") + }) val finishTime = System.currentTimeMillis Thread.sleep(1000) val mb = size * count / 1024.0 / 1024.0 diff --git a/core/src/main/scala/spark/network/ConnectionManagerTest.scala b/core/src/main/scala/spark/network/ConnectionManagerTest.scala index 5d21bb793f..555b3454ee 100644 --- a/core/src/main/scala/spark/network/ConnectionManagerTest.scala +++ b/core/src/main/scala/spark/network/ConnectionManagerTest.scala @@ -8,6 +8,9 @@ import scala.io.Source import java.nio.ByteBuffer import java.net.InetAddress +import akka.dispatch.Await +import akka.util.duration._ + object ConnectionManagerTest extends Logging{ def main(args: Array[String]) { if (args.length < 2) { @@ -53,7 +56,7 @@ object ConnectionManagerTest extends Logging{ logInfo("Sending [" + bufferMessage + "] to [" + slaveConnManagerId + "]") connManager.sendMessageReliably(slaveConnManagerId, bufferMessage) }) - val results = futures.map(f => f()) + val results = futures.map(f => Await.result(f, 1.second)) val finishTime = System.currentTimeMillis Thread.sleep(5000) From c4366eb76425d1c6aeaa7df750a2681a0da75db8 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 31 Aug 2012 00:34:24 +0000 Subject: [PATCH 032/226] Fixes to ShuffleFetcher --- .../spark/BlockStoreShuffleFetcher.scala | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala b/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala index 45a14c8290..0bbdb4e432 100644 --- a/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala +++ b/core/src/main/scala/spark/BlockStoreShuffleFetcher.scala @@ -32,36 +32,29 @@ class BlockStoreShuffleFetcher extends ShuffleFetcher with Logging { (address, splits.map(i => "shuffleid_%d_%d_%d".format(shuffleId, i, reduceId))) } - try { - for ((blockId, blockOption) <- blockManager.getMultiple(blocksByAddress)) { - blockOption match { - case Some(block) => { - val values = block - for(value <- values) { - val v = value.asInstanceOf[(K, V)] - func(v._1, v._2) - } - } - case None => { - throw new BlockException(blockId, "Did not get block " + blockId) + for ((blockId, blockOption) <- blockManager.getMultiple(blocksByAddress)) { + blockOption match { + case Some(block) => { + val values = block + for(value <- values) { + val v = value.asInstanceOf[(K, V)] + func(v._1, v._2) } } - } - } catch { - // TODO: this is really ugly -- let's find a better way of throwing a FetchFailedException - case be: BlockException => { - val regex = "shuffleid_([0-9]*)_([0-9]*)_([0-9]]*)".r - be.blockId match { - case regex(sId, mId, rId) => { - val address = addresses(mId.toInt) - throw new FetchFailedException(address, sId.toInt, mId.toInt, rId.toInt, be) - } - case _ => { - throw be + case None => { + val regex = "shuffleid_([0-9]*)_([0-9]*)_([0-9]*)".r + blockId match { + case regex(shufId, mapId, reduceId) => + val addr = addresses(mapId.toInt) + throw new FetchFailedException(addr, shufId.toInt, mapId.toInt, reduceId.toInt, null) + case _ => + throw new SparkException( + "Failed to get block " + blockId + ", which is not a shuffle block") } } } } + logDebug("Fetching and merging outputs of shuffle %d, reduce %d took %d ms".format( shuffleId, reduceId, System.currentTimeMillis - startTime)) } From d4d2cb670f9a19a6ab151e51398ba135445f1bb9 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 31 Aug 2012 00:34:57 +0000 Subject: [PATCH 033/226] Make checkpoint interval configurable in WordCount2 --- .../spark/streaming/examples/WordCount2.scala | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 3b2d909584..d4b7461099 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -70,17 +70,17 @@ object WordCount2 { def main (args: Array[String]) { - if (args.length != 5) { - println ("Usage: WordCount2 ") + if (args.length != 6) { + println ("Usage: WordCount2 ") System.exit(1) } - val Array(master, file, mapTasks, reduceTasks, batchMillis) = args + val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args - val BATCH_DURATION = Milliseconds(batchMillis.toLong) + val batchDuration = Milliseconds(batchMillis.toLong) val ssc = new StreamingContext(master, "WordCount2") - ssc.setBatchDuration(BATCH_DURATION) + ssc.setBatchDuration(batchDuration) val data = ssc.sc.textFile(file, mapTasks.toInt).persist(StorageLevel.MEMORY_ONLY_DESER_2) println("Data count: " + data.count()) @@ -94,8 +94,9 @@ object WordCount2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(10), BATCH_DURATION, reduceTasks.toInt) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Seconds(10)) + .reduceByKeyAndWindow(add _, subtract _, Seconds(10), batchDuration, reduceTasks.toInt) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + Milliseconds(chkptMillis.toLong)) windowedCounts.print() ssc.start() From 1b3e3352ebfed40881d534cd3096d4b6428c24d4 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Thu, 30 Aug 2012 17:59:25 -0700 Subject: [PATCH 034/226] Deserialize multi-get results in the caller's thread. This fixes an issue with shared buffers with the KryoSerializer. --- .../main/scala/spark/storage/BlockManager.scala | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index 45f99717bc..e9197f7169 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -272,11 +272,15 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m val totalBlocks = blocksByAddress.map(_._2.size).sum logDebug("Getting " + totalBlocks + " blocks") var startTime = System.currentTimeMillis - val results = new LinkedBlockingQueue[(String, Option[Iterator[Any]])] val localBlockIds = new ArrayBuffer[String]() val remoteBlockIds = new ArrayBuffer[String]() val remoteBlockIdsPerLocation = new HashMap[BlockManagerId, Seq[String]]() + // A queue to hold our results. Because we want all the deserializing the happen in the + // caller's thread, this will actually hold functions to produce the Iterator for each block. + // For local blocks we'll have an iterator already, while for remote ones we'll deserialize. + val results = new LinkedBlockingQueue[(String, Option[() => Iterator[Any]])] + // Split local and remote blocks for ((address, blockIds) <- blocksByAddress) { if (address == blockManagerId) { @@ -302,10 +306,8 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m throw new SparkException( "Unexpected message " + blockMessage.getType + " received from " + cmId) } - val buffer = blockMessage.getData val blockId = blockMessage.getId - val block = dataDeserialize(buffer) - results.put((blockId, Some(block))) + results.put((blockId, Some(() => dataDeserialize(blockMessage.getData)))) logDebug("Got remote block " + blockId + " after " + Utils.getUsedTimeMs(startTime)) }) } @@ -323,9 +325,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // Get the local blocks while remote blocks are being fetched startTime = System.currentTimeMillis localBlockIds.foreach(id => { - get(id) match { + getLocal(id) match { case Some(block) => { - results.put((id, Some(block))) + results.put((id, Some(() => block))) logDebug("Got local block " + id) } case None => { @@ -343,7 +345,8 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m def next(): (String, Option[Iterator[Any]]) = { resultsGotten += 1 - results.take() + val (blockId, functionOption) = results.take() + (blockId, functionOption.map(_.apply())) } } } From 101ae493e26693146114ac01d50d411f5b2e0762 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Thu, 30 Aug 2012 22:24:14 -0700 Subject: [PATCH 035/226] Replicate serialized blocks properly, without sharing a ByteBuffer. --- core/src/main/scala/spark/storage/BlockManager.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index e9197f7169..8a013230da 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -456,7 +456,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // data is already serialized and ready for sending val replicationFuture = if (level.replication > 1) { Future { - replicate(blockId, bytes, level) + replicate(blockId, bytes.duplicate(), level) } } else { null From 113277549c5ee1bcd58c7cebc365d28d92b74b4a Mon Sep 17 00:00:00 2001 From: root Date: Fri, 31 Aug 2012 05:39:35 +0000 Subject: [PATCH 036/226] Really fixed the replication-3 issue. The problem was a few buffers not being rewound. --- core/src/main/scala/spark/storage/BlockManager.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index 8a013230da..f2d9499bad 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -455,8 +455,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // Initiate the replication before storing it locally. This is faster as // data is already serialized and ready for sending val replicationFuture = if (level.replication > 1) { + val bufferView = bytes.duplicate() // Doesn't copy the bytes, just creates a wrapper Future { - replicate(blockId, bytes.duplicate(), level) + replicate(blockId, bufferView, level) } } else { null @@ -514,15 +515,16 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m var peers = master.mustGetPeers(GetPeers(blockManagerId, level.replication - 1)) for (peer: BlockManagerId <- peers) { val start = System.nanoTime + data.rewind() logDebug("Try to replicate BlockId " + blockId + " once; The size of the data is " - + data.array().length + " Bytes. To node: " + peer) + + data.limit() + " Bytes. To node: " + peer) if (!BlockManagerWorker.syncPutBlock(PutBlock(blockId, data, tLevel), new ConnectionManagerId(peer.ip, peer.port))) { logError("Failed to call syncPutBlock to " + peer) } logDebug("Replicated BlockId " + blockId + " once used " + (System.nanoTime - start) / 1e6 + " s; The size of the data is " + - data.array().length + " bytes.") + data.limit() + " bytes.") } } From e1da274a486b6fd5903d9b3643dc07c79973b81d Mon Sep 17 00:00:00 2001 From: root Date: Fri, 31 Aug 2012 07:16:19 +0000 Subject: [PATCH 037/226] WordCount tweaks --- .../spark/streaming/examples/WordCount2.scala | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index d4b7461099..a090dcb85d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -59,12 +59,12 @@ object WordCount2_ExtraFunctions { object WordCount2 { - def moreWarmup(sc: SparkContext) { - (0 until 40).foreach {i => + def warmup(sc: SparkContext) { + (0 until 10).foreach {i => sc.parallelize(1 to 20000000, 1000) - .map(_ % 1331).map(_.toString) - .mapPartitions(WordCount2_ExtraFunctions.splitAndCountPartitions).reduceByKey(_ + _, 10) - .collect() + .map(x => (x % 337, x % 1331)) + .reduceByKey(_ + _) + .count() } } @@ -82,7 +82,10 @@ object WordCount2 { val ssc = new StreamingContext(master, "WordCount2") ssc.setBatchDuration(batchDuration) - val data = ssc.sc.textFile(file, mapTasks.toInt).persist(StorageLevel.MEMORY_ONLY_DESER_2) + //warmup(ssc.sc) + + val data = ssc.sc.textFile(file, mapTasks.toInt).persist( + new StorageLevel(false, true, true, 2)) // Memory only, deserialized, 2 replicas println("Data count: " + data.count()) println("Data count: " + data.count()) println("Data count: " + data.count()) @@ -94,7 +97,7 @@ object WordCount2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(10), batchDuration, reduceTasks.toInt) + .reduceByKeyAndWindow(add _, subtract _, Seconds(30), batchDuration, reduceTasks.toInt) windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Milliseconds(chkptMillis.toLong)) windowedCounts.print() From 2d01d38a4199590145551a108903a3ac7cffcceb Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 31 Aug 2012 03:47:34 -0700 Subject: [PATCH 038/226] Added StateDStream, corresponding stateful stream operations, and testcases. Also refactored few PairDStreamFunctions methods. --- .../main/scala/spark/streaming/DStream.scala | 31 ++-- .../streaming/PairDStreamFunctions.scala | 162 ++++++++++++++---- .../streaming/ReducedWindowedDStream.scala | 13 +- .../scala/spark/streaming/Scheduler.scala | 2 +- .../scala/spark/streaming/StateDStream.scala | 83 +++++++++ .../streaming/examples/WordCountNetwork.scala | 4 +- .../scala/spark/streaming/DStreamSuite.scala | 92 +++++++--- 7 files changed, 304 insertions(+), 83 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/StateDStream.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 3a57488f9b..8c06345933 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -3,13 +3,12 @@ package spark.streaming import spark.streaming.StreamingContext._ import spark.RDD -import spark.BlockRDD import spark.UnionRDD import spark.Logging -import spark.SparkContext import spark.SparkContext._ import spark.storage.StorageLevel - +import spark.Partitioner + import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap @@ -95,12 +94,12 @@ extends Logging with Serializable { /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ private def isTimeValid (time: Time): Boolean = { - if (!isInitialized) + if (!isInitialized) { throw new Exception (this.toString + " has not been initialized") - if ((time - zeroTime).isMultipleOf(slideTime)) { - true - } else { + } else if (time < zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { false + } else { + true } } @@ -119,7 +118,7 @@ extends Logging with Serializable { // if RDD was not generated, and if the time is valid // (based on sliding time of this DStream), then generate the RDD - case None => + case None => { if (isTimeValid(time)) { compute(time) match { case Some(newRDD) => @@ -138,6 +137,7 @@ extends Logging with Serializable { } else { None } + } } } @@ -361,24 +361,19 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, - numPartitions: Int) + partitioner: Partitioner) extends DStream [(K,C)] (parent.ssc) { override def dependencies = List(parent) override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K,C)]] = { parent.getOrCompute(validTime) match { - case Some(rdd) => - val newrdd = { - if (numPartitions > 0) { - rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner, numPartitions) - } else { - rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner) - } - } - Some(newrdd) + case Some(rdd) => + Some(rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner, partitioner)) case None => None } } diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index d2887c3aea..13db34ac80 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -1,71 +1,169 @@ package spark.streaming import scala.collection.mutable.ArrayBuffer +import spark.Partitioner +import spark.HashPartitioner import spark.streaming.StreamingContext._ +import javax.annotation.Nullable class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) extends Serializable { def ssc = stream.ssc + def defaultPartitioner(numPartitions: Int = stream.ssc.sc.defaultParallelism) = { + new HashPartitioner(numPartitions) + } + /* ---------------------------------- */ /* DStream operations for key-value pairs */ /* ---------------------------------- */ - - def groupByKey(numPartitions: Int = 0): ShuffledDStream[K, V, ArrayBuffer[V]] = { + + def groupByKey(): ShuffledDStream[K, V, ArrayBuffer[V]] = { + groupByKey(defaultPartitioner()) + } + + def groupByKey(numPartitions: Int): ShuffledDStream[K, V, ArrayBuffer[V]] = { + groupByKey(defaultPartitioner(numPartitions)) + } + + def groupByKey(partitioner: Partitioner): ShuffledDStream[K, V, ArrayBuffer[V]] = { def createCombiner(v: V) = ArrayBuffer[V](v) def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) - combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, numPartitions) + combineByKey[ArrayBuffer[V]](createCombiner _, mergeValue _, mergeCombiner _, partitioner) } - - def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int = 0): ShuffledDStream[K, V, V] = { + + def reduceByKey(reduceFunc: (V, V) => V): ShuffledDStream[K, V, V] = { + reduceByKey(reduceFunc, defaultPartitioner()) + } + + def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): ShuffledDStream[K, V, V] = { + reduceByKey(reduceFunc, defaultPartitioner(numPartitions)) + } + + def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): ShuffledDStream[K, V, V] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) - combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, numPartitions) + combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner) } private def combineByKey[C: ClassManifest]( createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, - numPartitions: Int) : ShuffledDStream[K, V, C] = { - new ShuffledDStream[K, V, C](stream, createCombiner, mergeValue, mergeCombiner, numPartitions) + partitioner: Partitioner) : ShuffledDStream[K, V, C] = { + new ShuffledDStream[K, V, C](stream, createCombiner, mergeValue, mergeCombiner, partitioner) + } + + def groupByKeyAndWindow(windowTime: Time, slideTime: Time): ShuffledDStream[K, V, ArrayBuffer[V]] = { + groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner()) + } + + def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int): ShuffledDStream[K, V, ArrayBuffer[V]] = { + groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner(numPartitions)) } def groupByKeyAndWindow( - windowTime: Time, - slideTime: Time, - numPartitions: Int = 0): ShuffledDStream[K, V, ArrayBuffer[V]] = { - stream.window(windowTime, slideTime).groupByKey(numPartitions) + windowTime: Time, + slideTime: Time, + partitioner: Partitioner + ): ShuffledDStream[K, V, ArrayBuffer[V]] = { + stream.window(windowTime, slideTime).groupByKey(partitioner) + } + + def reduceByKeyAndWindow(reduceFunc: (V, V) => V, windowTime: Time, slideTime: Time): ShuffledDStream[K, V, V] = { + reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner()) + } + + def reduceByKeyAndWindow(reduceFunc: (V, V) => V, windowTime: Time, slideTime: Time, numPartitions: Int): ShuffledDStream[K, V, V] = { + reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) } def reduceByKeyAndWindow( - reduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, - numPartitions: Int = 0): ShuffledDStream[K, V, V] = { - stream.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), numPartitions) + reduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + partitioner: Partitioner + ): ShuffledDStream[K, V, V] = { + stream.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), partitioner) } - // This method is the efficient sliding window reduce operation, - // which requires the specification of an inverse reduce function, - // so that new elements introduced in the window can be "added" using - // reduceFunc to the previous window's result and old elements can be + // This method is the efficient sliding window reduce operation, + // which requires the specification of an inverse reduce function, + // so that new elements introduced in the window can be "added" using + // reduceFunc to the previous window's result and old elements can be // "subtracted using invReduceFunc. def reduceByKeyAndWindow( - reduceFunc: (V, V) => V, - invReduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, - numPartitions: Int): ReducedWindowedDStream[K, V] = { + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time + ): ReducedWindowedDStream[K, V] = { + reduceByKeyAndWindow( + reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner()) + } + + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int + ): ReducedWindowedDStream[K, V] = { + + reduceByKeyAndWindow( + reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) + } + + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + invReduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + partitioner: Partitioner + ): ReducedWindowedDStream[K, V] = { + + val cleanedReduceFunc = ssc.sc.clean(reduceFunc) + val cleanedInvReduceFunc = ssc.sc.clean(invReduceFunc) new ReducedWindowedDStream[K, V]( - stream, - ssc.sc.clean(reduceFunc), - ssc.sc.clean(invReduceFunc), - windowTime, - slideTime, - numPartitions) + stream, cleanedReduceFunc, cleanedInvReduceFunc, windowTime, slideTime, partitioner) + } + + // TODO: + // + // + // + // + def updateStateByKey[S <: AnyRef : ClassManifest]( + updateFunc: (Seq[V], S) => S + ): StateDStream[K, V, S] = { + updateStateByKey(updateFunc, defaultPartitioner()) + } + + def updateStateByKey[S <: AnyRef : ClassManifest]( + updateFunc: (Seq[V], S) => S, + numPartitions: Int + ): StateDStream[K, V, S] = { + updateStateByKey(updateFunc, defaultPartitioner(numPartitions)) + } + + def updateStateByKey[S <: AnyRef : ClassManifest]( + updateFunc: (Seq[V], S) => S, + partitioner: Partitioner + ): StateDStream[K, V, S] = { + val func = (iterator: Iterator[(K, Seq[V], S)]) => { + iterator.map(tuple => (tuple._1, updateFunc(tuple._2, tuple._3))) + } + updateStateByKey(func, partitioner, true) + } + + def updateStateByKey[S <: AnyRef : ClassManifest]( + updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], + partitioner: Partitioner, + rememberPartitioner: Boolean + ): StateDStream[K, V, S] = { + new StateDStream(stream, ssc.sc.clean(updateFunc), partitioner, rememberPartitioner) } } diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 896e7dbafb..191d264b2b 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -5,7 +5,7 @@ import spark.streaming.StreamingContext._ import spark.RDD import spark.UnionRDD import spark.CoGroupedRDD -import spark.HashPartitioner +import spark.Partitioner import spark.SparkContext._ import spark.storage.StorageLevel @@ -17,8 +17,8 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( invReduceFunc: (V, V) => V, _windowTime: Time, _slideTime: Time, - numPartitions: Int) -extends DStream[(K,V)](parent.ssc) { + partitioner: Partitioner + ) extends DStream[(K,V)](parent.ssc) { if (!_windowTime.isMultipleOf(parent.slideTime)) throw new Exception("The window duration of ReducedWindowedDStream (" + _slideTime + ") " + @@ -28,7 +28,7 @@ extends DStream[(K,V)](parent.ssc) { throw new Exception("The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") - val reducedStream = parent.reduceByKey(reduceFunc, numPartitions) + val reducedStream = parent.reduceByKey(reduceFunc, partitioner) val allowPartialWindows = true //reducedStream.persist(StorageLevel.MEMORY_ONLY_DESER_2) @@ -104,7 +104,7 @@ extends DStream[(K,V)](parent.ssc) { if (reducedRDDs.size == 0) { throw new Exception("Could not generate the first RDD for time " + validTime) } - return Some(new UnionRDD(ssc.sc, reducedRDDs).reduceByKey(reduceFunc, numPartitions)) + return Some(new UnionRDD(ssc.sc, reducedRDDs).reduceByKey(partitioner, reduceFunc)) } } } @@ -137,8 +137,7 @@ extends DStream[(K,V)](parent.ssc) { } t -= reducedStream.slideTime } - - val partitioner = new HashPartitioner(numPartitions) + val allRDDs = new ArrayBuffer[RDD[(_, _)]]() allRDDs += previousWindowRDD allRDDs ++= oldRDDs diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index da50e22719..12e52bf56c 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -41,7 +41,7 @@ extends Logging { def generateRDDs (time: Time) { println("\n-----------------------------------------------------\n") - logInfo("Generating RDDs for time " + time) + logInfo("Generating RDDs for time " + time) outputStreams.foreach(outputStream => { outputStream.generateJob(time) match { case Some(job) => submitJob(job) diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala new file mode 100644 index 0000000000..eabb33d89e --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -0,0 +1,83 @@ +package spark.streaming + +import spark.RDD +import spark.Partitioner +import spark.MapPartitionsRDD +import spark.SparkContext._ +import javax.annotation.Nullable + + +class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( + parent: DStream[(K, V)], + updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], + partitioner: Partitioner, + rememberPartitioner: Boolean + ) extends DStream[(K, S)](parent.ssc) { + + class SpecialMapPartitionsRDD[U: ClassManifest, T: ClassManifest](prev: RDD[T], f: Iterator[T] => Iterator[U]) + extends MapPartitionsRDD(prev, f) { + override val partitioner = if (rememberPartitioner) prev.partitioner else None + } + + override def dependencies = List(parent) + + override def slideTime = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K, S)]] = { + + // Try to get the previous state RDD + getOrCompute(validTime - slideTime) match { + + case Some(prevStateRDD) => { // If previous state RDD exists + + // Define the function for the mapPartition operation on cogrouped RDD; + // first map the cogrouped tuple to tuples of required type, + // and then apply the update function + val func = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { + val i = iterator.map(t => { + (t._1, t._2._1, t._2._2.headOption.getOrElse(null.asInstanceOf[S])) + }) + updateFunc(i) + } + + // Try to get the parent RDD + parent.getOrCompute(validTime) match { + case Some(parentRDD) => { // If parent RDD exists, then compute as usual + val cogroupedRDD = parentRDD.cogroup(prevStateRDD, partitioner) + val stateRDD = new SpecialMapPartitionsRDD(cogroupedRDD, func) + logDebug("Generating state RDD for time " + validTime) + return Some(stateRDD) + } + case None => { // If parent RDD does not exist, then return old state RDD + logDebug("Generating state RDD for time " + validTime + " (no change)") + return Some(prevStateRDD) + } + } + } + + case None => { // If previous session RDD does not exist (first input data) + + // Define the function for the mapPartition operation on grouped RDD; + // first map the grouped tuple to tuples of required type, + // and then apply the update function + val func = (iterator: Iterator[(K, Seq[V])]) => { + updateFunc(iterator.map(tuple => (tuple._1, tuple._2, null.asInstanceOf[S]))) + } + + // Try to get the parent RDD + parent.getOrCompute(validTime) match { + case Some(parentRDD) => { // If parent RDD exists, then compute as usual + val groupedRDD = parentRDD.groupByKey(partitioner) + val sessionRDD = new SpecialMapPartitionsRDD(groupedRDD, func) + logDebug("Generating state RDD for time " + validTime + " (first)") + return Some(sessionRDD) + } + case None => { // If parent RDD does not exist, then nothing to do! + logDebug("Not generating state RDD (no previous state, no parent)") + return None + } + } + } + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala index 0a33a05bae..0aa5294a17 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala @@ -14,8 +14,8 @@ object WordCountNetwork { val ssc = new StreamingContext(args(0), "WordCountNetwork") ssc.setBatchDuration(Seconds(2)) - // Create the FileInputDStream on the directory and use the - // stream to count words in new files created + // Create a NetworkInputDStream on target ip:port and count the + // words in input stream of \n delimited test (eg. generated by 'nc') val lines = ssc.createNetworkTextStream(args(1), args(2).toInt) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala index e9dc377263..d5eb20b37e 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -1,14 +1,14 @@ package spark.streaming import spark.Logging -import spark.RDD +import spark.streaming.StreamingContext._ import spark.streaming.util.ManualClock import org.scalatest.FunSuite import org.scalatest.BeforeAndAfter import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.SynchronizedQueue +import scala.runtime.RichInt class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { @@ -20,7 +20,9 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { def testOp[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], - expectedOutput: Seq[Seq[V]]) { + expectedOutput: Seq[Seq[V]], + useSet: Boolean = false + ) { try { ssc = new StreamingContext("local", "test") ssc.setBatchDuration(Milliseconds(batchDurationMillis)) @@ -33,45 +35,89 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] clock.addToTime(input.size * batchDurationMillis) - Thread.sleep(100) + Thread.sleep(1000) val output = new ArrayBuffer[Seq[V]]() while(outputQueue.size > 0) { val rdd = outputQueue.take() output += (rdd.collect()) } + assert(output.size === expectedOutput.size) for (i <- 0 until output.size) { - assert(output(i).toList === expectedOutput(i).toList) + if (useSet) { + assert(output(i).toSet === expectedOutput(i).toSet) + } else { + assert(output(i).toList === expectedOutput(i).toList) + } } } finally { ssc.stop() } } - - test("basic operations") { - val inputData = Array(1 to 4, 5 to 8, 9 to 12) + + test("map-like operations") { + val inputData = Seq(1 to 4, 5 to 8, 9 to 12) // map testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) // flatMap - testOp(inputData, (r: DStream[Int]) => r.flatMap(x => Array(x, x * 2)), - inputData.map(_.flatMap(x => Array(x, x * 2))) + testOp( + inputData, + (r: DStream[Int]) => r.flatMap(x => Seq(x, x * 2)), + inputData.map(_.flatMap(x => Array(x, x * 2))) ) } -} -object DStreamSuite { - def main(args: Array[String]) { - try { - val r = new DStreamSuite() - val inputData = Array(1 to 4, 5 to 8, 9 to 12) - r.testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) - - } catch { - case e: Exception => e.printStackTrace() - } - System.exit(0) + test("shuffle-based operations") { + // reduceByKey + testOp( + Seq(Seq("a", "a", "b"), Seq("", ""), Seq()), + (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), + Seq(Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq()), + true + ) + + // reduce + testOp( + Seq(1 to 4, 5 to 8, 9 to 12), + (s: DStream[Int]) => s.reduce(_ + _), + Seq(Seq(10), Seq(26), Seq(42)) + ) } -} \ No newline at end of file + + test("window-based operations") { + + } + + + test("stateful operations") { + val inputData = + Seq( + Seq("a", "b", "c"), + Seq("a", "b", "c"), + Seq("a", "b", "c") + ) + + val outputData = + Seq( + Seq(("a", 1), ("b", 1), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 2)), + Seq(("a", 3), ("b", 3), ("c", 3)) + )//.map(array => array.toSeq.map(x => (x._1, new RichInt(x._2)))) + + val updateStateOp =(s: DStream[String]) => { + val updateFunc = (values: Seq[Int], state: RichInt) => { + var newState = 0 + if (values != null) newState += values.reduce(_ + _) + if (state != null) newState += state.self + //println("values = " + values + ", state = " + state + ", " + " new state = " + newState) + new RichInt(newState) + } + s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) + } + + testOp(inputData, updateStateOp, outputData, true) + } +} From f92d4a6ac1f349fdfc88d4ef3b122ae32385b17e Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 31 Aug 2012 15:33:48 -0700 Subject: [PATCH 039/226] Better output messages for streaming job duration --- streaming/src/main/scala/spark/streaming/Job.scala | 2 +- streaming/src/main/scala/spark/streaming/JobManager.scala | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala index 0bd8343b9a..0bcb6fd8dc 100644 --- a/streaming/src/main/scala/spark/streaming/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -8,7 +8,7 @@ class Job(val time: Time, func: () => _) { val startTime = System.currentTimeMillis func() val stopTime = System.currentTimeMillis - (startTime - stopTime) + (stopTime - startTime) } override def toString = "streaming job " + id + " @ " + time diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 2a4fe3dd11..8a652cbfca 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -12,10 +12,8 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { SparkEnv.set(ssc.env) try { val timeTaken = job.run() - logInfo( - "Running " + job + " took " + timeTaken + " ms, " + - "total delay was " + (System.currentTimeMillis - job.time) + " ms" - ) + logInfo("Total delay: %.4f s for job %s; execution was %.4f s".format( + System.currentTimeMillis() - job.time, timeTaken)) } catch { case e: Exception => logError("Running " + job + " failed", e) From ce42a463750f37d9c1c6a2c982d3b947039233cd Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 31 Aug 2012 15:35:35 -0700 Subject: [PATCH 040/226] Bug fix --- streaming/src/main/scala/spark/streaming/JobManager.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 8a652cbfca..953493352c 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -13,7 +13,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { try { val timeTaken = job.run() logInfo("Total delay: %.4f s for job %s; execution was %.4f s".format( - System.currentTimeMillis() - job.time, timeTaken)) + (System.currentTimeMillis() - job.time) / 1000.0, timeTaken / 1000.0)) } catch { case e: Exception => logError("Running " + job + " failed", e) From 51fb13dd164c29e6bf97c2e3642b07a7f416ddaa Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 31 Aug 2012 15:36:11 -0700 Subject: [PATCH 041/226] Bug fix --- streaming/src/main/scala/spark/streaming/JobManager.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 953493352c..40e614b4ed 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -13,7 +13,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { try { val timeTaken = job.run() logInfo("Total delay: %.4f s for job %s; execution was %.4f s".format( - (System.currentTimeMillis() - job.time) / 1000.0, timeTaken / 1000.0)) + (System.currentTimeMillis() - job.time) / 1000.0, job.id, timeTaken / 1000.0)) } catch { case e: Exception => logError("Running " + job + " failed", e) From c42e7ac2822f697a355650a70379d9e4ce2022c0 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 1 Sep 2012 04:31:11 +0000 Subject: [PATCH 042/226] More block manager fixes --- .../spark/storage/BlockManagerWorker.scala | 2 +- .../main/scala/spark/storage/BlockStore.scala | 30 +++++++++---------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManagerWorker.scala b/core/src/main/scala/spark/storage/BlockManagerWorker.scala index d74cdb38a8..0658a57187 100644 --- a/core/src/main/scala/spark/storage/BlockManagerWorker.scala +++ b/core/src/main/scala/spark/storage/BlockManagerWorker.scala @@ -73,7 +73,7 @@ class BlockManagerWorker(val blockManager: BlockManager) extends Logging { logDebug("PutBlock " + id + " started from " + startTimeMs + " with data: " + bytes) blockManager.putBytes(id, bytes, level) logDebug("PutBlock " + id + " used " + Utils.getUsedTimeMs(startTimeMs) - + " with data size: " + bytes.array().length) + + " with data size: " + bytes.limit) } private def getBlock(id: String): ByteBuffer = { diff --git a/core/src/main/scala/spark/storage/BlockStore.scala b/core/src/main/scala/spark/storage/BlockStore.scala index 17f4f51aa8..77e0ed84c5 100644 --- a/core/src/main/scala/spark/storage/BlockStore.scala +++ b/core/src/main/scala/spark/storage/BlockStore.scala @@ -76,11 +76,11 @@ class MemoryStore(blockManager: BlockManager, maxMemory: Long) currentMemory += sizeEstimate logDebug("Block " + blockId + " stored as values to memory") } else { - val entry = new Entry(bytes, bytes.array().length, false) - ensureFreeSpace(bytes.array.length) + val entry = new Entry(bytes, bytes.limit, false) + ensureFreeSpace(bytes.limit) memoryStore.synchronized { memoryStore.put(blockId, entry) } - currentMemory += bytes.array().length - logDebug("Block " + blockId + " stored as " + bytes.array().length + " bytes to memory") + currentMemory += bytes.limit + logDebug("Block " + blockId + " stored as " + bytes.limit + " bytes to memory") } } @@ -97,11 +97,11 @@ class MemoryStore(blockManager: BlockManager, maxMemory: Long) return Left(elements.iterator) } else { val bytes = dataSerialize(values) - ensureFreeSpace(bytes.array().length) - val entry = new Entry(bytes, bytes.array().length, false) + ensureFreeSpace(bytes.limit) + val entry = new Entry(bytes, bytes.limit, false) memoryStore.synchronized { memoryStore.put(blockId, entry) } - currentMemory += bytes.array().length - logDebug("Block " + blockId + " stored as " + bytes.array.length + " bytes to memory") + currentMemory += bytes.limit + logDebug("Block " + blockId + " stored as " + bytes.limit + " bytes to memory") return Right(bytes) } } @@ -118,7 +118,7 @@ class MemoryStore(blockManager: BlockManager, maxMemory: Long) if (entry.deserialized) { return Some(entry.value.asInstanceOf[ArrayBuffer[Any]].toIterator) } else { - return Some(dataDeserialize(entry.value.asInstanceOf[ByteBuffer])) + return Some(dataDeserialize(entry.value.asInstanceOf[ByteBuffer].duplicate())) } } @@ -199,11 +199,11 @@ class DiskStore(blockManager: BlockManager, rootDirs: String) val file = createFile(blockId) if (file != null) { val channel = new RandomAccessFile(file, "rw").getChannel() - val buffer = channel.map(MapMode.READ_WRITE, 0, bytes.array.length) - buffer.put(bytes.array) + val buffer = channel.map(MapMode.READ_WRITE, 0, bytes.limit) + buffer.put(bytes) channel.close() val finishTime = System.currentTimeMillis - logDebug("Block " + blockId + " stored to file of " + bytes.array.length + " bytes to disk in " + (finishTime - startTime) + " ms") + logDebug("Block " + blockId + " stored to file of " + bytes.limit + " bytes to disk in " + (finishTime - startTime) + " ms") } else { logError("File not created for block " + blockId) } @@ -211,7 +211,7 @@ class DiskStore(blockManager: BlockManager, rootDirs: String) def putValues(blockId: String, values: Iterator[Any], level: StorageLevel): Either[Iterator[Any], ByteBuffer] = { val bytes = dataSerialize(values) - logDebug("Converted block " + blockId + " to " + bytes.array.length + " bytes") + logDebug("Converted block " + blockId + " to " + bytes.limit + " bytes") putBytes(blockId, bytes, level) return Right(bytes) } @@ -220,9 +220,7 @@ class DiskStore(blockManager: BlockManager, rootDirs: String) val file = getFile(blockId) val length = file.length().toInt val channel = new RandomAccessFile(file, "r").getChannel() - val bytes = ByteBuffer.allocate(length) - bytes.put(channel.map(MapMode.READ_WRITE, 0, length)) - return Some(bytes) + Some(channel.map(MapMode.READ_WRITE, 0, length)) } def getValues(blockId: String): Option[Iterator[Any]] = { From 44758aa8e2337364610ee80fa9ec913301712078 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sat, 1 Sep 2012 00:17:59 -0700 Subject: [PATCH 043/226] First work towards a RawInputDStream and a sender program for it. --- .../scala/spark/DaemonThreadFactory.scala | 12 +- .../spark/util/RateLimitedOutputStream.scala | 56 +++++++++ .../spark/streaming/NetworkInputDStream.scala | 33 ++--- .../NetworkInputReceiverMessage.scala | 7 ++ .../spark/streaming/NetworkInputTracker.scala | 7 +- .../spark/streaming/ObjectInputDStream.scala | 16 +++ ...ceiver.scala => ObjectInputReceiver.scala} | 32 +++-- .../spark/streaming/RawInputDStream.scala | 114 ++++++++++++++++++ .../spark/streaming/StreamingContext.scala | 8 +- .../spark/streaming/util/RawTextSender.scala | 51 ++++++++ 10 files changed, 282 insertions(+), 54 deletions(-) create mode 100644 core/src/main/scala/spark/util/RateLimitedOutputStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala create mode 100644 streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala rename streaming/src/main/scala/spark/streaming/{NetworkInputReceiver.scala => ObjectInputReceiver.scala} (89%) create mode 100644 streaming/src/main/scala/spark/streaming/RawInputDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/util/RawTextSender.scala diff --git a/core/src/main/scala/spark/DaemonThreadFactory.scala b/core/src/main/scala/spark/DaemonThreadFactory.scala index 003880c5e8..56e59adeb7 100644 --- a/core/src/main/scala/spark/DaemonThreadFactory.scala +++ b/core/src/main/scala/spark/DaemonThreadFactory.scala @@ -6,9 +6,13 @@ import java.util.concurrent.ThreadFactory * A ThreadFactory that creates daemon threads */ private object DaemonThreadFactory extends ThreadFactory { - override def newThread(r: Runnable): Thread = { - val t = new Thread(r) - t.setDaemon(true) - return t + override def newThread(r: Runnable): Thread = new DaemonThread(r) +} + +private class DaemonThread(r: Runnable = null) extends Thread { + override def run() { + if (r != null) { + r.run() + } } } \ No newline at end of file diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala new file mode 100644 index 0000000000..10f2272707 --- /dev/null +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -0,0 +1,56 @@ +package spark.util + +import java.io.OutputStream + +class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends OutputStream { + var lastSyncTime = System.nanoTime() + var bytesWrittenSinceSync: Long = 0 + + override def write(b: Int) { + waitToWrite(1) + out.write(b) + } + + override def write(bytes: Array[Byte]) { + write(bytes, 0, bytes.length) + } + + override def write(bytes: Array[Byte], offset: Int, length: Int) { + val CHUNK_SIZE = 8192 + var pos = 0 + while (pos < length) { + val writeSize = math.min(length - pos, CHUNK_SIZE) + waitToWrite(writeSize) + out.write(bytes, offset + pos, length - pos) + pos += writeSize + } + } + + def waitToWrite(numBytes: Int) { + while (true) { + val now = System.nanoTime() + val elapsed = math.max(now - lastSyncTime, 1) + val rate = bytesWrittenSinceSync.toDouble / (elapsed / 1.0e9) + if (rate < bytesPerSec) { + // It's okay to write; just update some variables and return + bytesWrittenSinceSync += numBytes + if (now > lastSyncTime + (1e10).toLong) { + // Ten seconds have passed since lastSyncTime; let's resync + lastSyncTime = now + bytesWrittenSinceSync = numBytes + } + return + } else { + Thread.sleep(5) + } + } + } + + override def flush() { + out.flush() + } + + override def close() { + out.close() + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index ee09324c8c..bf83f98ec4 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -1,36 +1,23 @@ package spark.streaming -import akka.actor._ -import akka.pattern.ask -import akka.util.duration._ -import akka.dispatch._ - import spark.RDD import spark.BlockRDD -import spark.Logging -import java.io.InputStream +abstract class NetworkInputDStream[T: ClassManifest](@transient ssc: StreamingContext) + extends InputDStream[T](ssc) { - -class NetworkInputDStream[T: ClassManifest]( - @transient ssc: StreamingContext, - val host: String, - val port: Int, - val bytesToObjects: InputStream => Iterator[T] - ) extends InputDStream[T](ssc) with Logging { - val id = ssc.getNewNetworkStreamId() - def start() { } + def start() {} - def stop() { } + def stop() {} override def compute(validTime: Time): Option[RDD[T]] = { val blockIds = ssc.networkInputTracker.getBlockIds(id, validTime) - return Some(new BlockRDD[T](ssc.sc, blockIds)) + Some(new BlockRDD[T](ssc.sc, blockIds)) } - - def createReceiver(): NetworkInputReceiver[T] = { - new NetworkInputReceiver(id, host, port, bytesToObjects) - } -} \ No newline at end of file + + /** Called on workers to run a receiver for the stream. */ + def runReceiver(): Unit +} + diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala b/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala new file mode 100644 index 0000000000..deaffe98c8 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala @@ -0,0 +1,7 @@ +package spark.streaming + +sealed trait NetworkInputReceiverMessage + +case class GetBlockIds(time: Long) extends NetworkInputReceiverMessage +case class GotBlockIds(streamId: Int, blocksIds: Array[String]) extends NetworkInputReceiverMessage +case object StopReceiver extends NetworkInputReceiverMessage diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 07758665c9..acf97c1883 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -52,9 +52,7 @@ extends Logging { if (!iterator.hasNext) { throw new Exception("Could not start receiver as details not found.") } - val stream = iterator.next - val receiver = stream.createReceiver() - receiver.run() + iterator.next().runReceiver() } ssc.sc.runJob(tempRDD, startReceiver) @@ -62,8 +60,7 @@ extends Logging { def stopReceivers() { implicit val ec = env.actorSystem.dispatcher - val message = new StopReceiver() - val listOfFutures = receiverInfo.values.map(_.ask(message)(timeout)).toList + val listOfFutures = receiverInfo.values.map(_.ask(StopReceiver)(timeout)).toList val futureOfList = Future.sequence(listOfFutures) Await.result(futureOfList, timeout) } diff --git a/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala b/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala new file mode 100644 index 0000000000..2396b374a0 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala @@ -0,0 +1,16 @@ +package spark.streaming + +import java.io.InputStream + +class ObjectInputDStream[T: ClassManifest]( + @transient ssc: StreamingContext, + val host: String, + val port: Int, + val bytesToObjects: InputStream => Iterator[T]) + extends NetworkInputDStream[T](ssc) { + + override def runReceiver() { + new ObjectInputReceiver(id, host, port, bytesToObjects).run() + } +} + diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala b/streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala similarity index 89% rename from streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala rename to streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala index 7add6246b7..70fa2cdf07 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputReceiver.scala +++ b/streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala @@ -26,23 +26,22 @@ import akka.pattern.ask import akka.util.duration._ import akka.dispatch._ -trait NetworkInputReceiverMessage -case class GetBlockIds(time: Long) extends NetworkInputReceiverMessage -case class GotBlockIds(streamId: Int, blocksIds: Array[String]) extends NetworkInputReceiverMessage -case class StopReceiver() extends NetworkInputReceiverMessage - -class NetworkInputReceiver[T: ClassManifest](streamId: Int, host: String, port: Int, bytesToObjects: InputStream => Iterator[T]) -extends Logging { +class ObjectInputReceiver[T: ClassManifest]( + streamId: Int, + host: String, + port: Int, + bytesToObjects: InputStream => Iterator[T]) + extends Logging { class ReceiverActor extends Actor { - override def preStart() = { + override def preStart() { logInfo("Attempting to register") val ip = System.getProperty("spark.master.host", "localhost") val port = System.getProperty("spark.master.port", "7077").toInt val actorName: String = "NetworkInputTracker" val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) val trackerActor = env.actorSystem.actorFor(url) - val timeout = 100.milliseconds + val timeout = 1.seconds val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) Await.result(future, timeout) } @@ -53,7 +52,7 @@ extends Logging { sender ! GotBlockIds(streamId, dataHandler.getPushedBlocks()) } - case StopReceiver() => { + case StopReceiver => { if (receivingThread != null) { receivingThread.interrupt() } @@ -63,11 +62,10 @@ extends Logging { } class DataHandler { - class Block(val time: Long, val iterator: Iterator[T]) { val blockId = "input-" + streamId + "-" + time var pushed = true - override def toString() = "input block " + blockId + override def toString = "input block " + blockId } val clock = new SystemClock() @@ -157,7 +155,7 @@ extends Logging { try { if (SparkEnv.get != null) { receiverActor = SparkEnv.get.actorSystem.actorOf(Props(new ReceiverActor), "ReceiverActor-" + streamId) - } + } dataHandler.start() socket = connect() receivingThread = Thread.currentThread() @@ -188,8 +186,7 @@ extends Logging { } -object NetworkInputReceiver { - +object ObjectInputReceiver { def bytesToLines(inputStream: InputStream): Iterator[String] = { val bufferedInputStream = new BufferedInputStream(inputStream) val dataInputStream = new DataInputStream(bufferedInputStream) @@ -219,7 +216,6 @@ object NetworkInputReceiver { } !finished } - override def next(): String = { if (!gotNext) { @@ -237,12 +233,12 @@ object NetworkInputReceiver { def main(args: Array[String]) { if (args.length < 2) { - println("NetworkReceiver ") + println("ObjectInputReceiver ") System.exit(1) } val host = args(0) val port = args(1).toInt - val receiver = new NetworkInputReceiver(0, host, port, bytesToLines) + val receiver = new ObjectInputReceiver(0, host, port, bytesToLines) receiver.run() } } diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala new file mode 100644 index 0000000000..49e4781e75 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -0,0 +1,114 @@ +package spark.streaming + +import akka.actor._ +import akka.pattern.ask +import akka.util.duration._ +import akka.dispatch._ +import java.net.InetSocketAddress +import java.nio.ByteBuffer +import java.nio.channels.{ReadableByteChannel, SocketChannel} +import java.io.EOFException +import java.util.concurrent.ArrayBlockingQueue +import scala.collection.mutable.ArrayBuffer +import spark.{DaemonThread, Logging, SparkEnv} +import spark.storage.StorageLevel + +/** + * An input stream that reads blocks of serialized objects from a given network address. + * The blocks will be inserted directly into the block store. This is the fastest way to get + * data into Spark Streaming, though it requires the sender to batch data and serialize it + * in the format that the system is configured with. + */ +class RawInputDStream[T: ClassManifest]( + @transient ssc: StreamingContext, + host: String, + port: Int) + extends NetworkInputDStream[T](ssc) with Logging { + + val streamId = id + + /** Called on workers to run a receiver for the stream. */ + def runReceiver() { + val env = SparkEnv.get + val actor = env.actorSystem.actorOf( + Props(new ReceiverActor(env, Thread.currentThread)), "ReceiverActor-" + streamId) + + // Open a socket to the target address and keep reading from it + logInfo("Connecting to " + host + ":" + port) + val channel = SocketChannel.open() + channel.configureBlocking(true) + channel.connect(new InetSocketAddress(host, port)) + logInfo("Connected to " + host + ":" + port) + + val queue = new ArrayBlockingQueue[ByteBuffer](2) + + new DaemonThread { + override def run() { + var nextBlockNumber = 0 + while (true) { + val buffer = queue.take() + val blockId = "input-" + streamId + "-" + nextBlockNumber + nextBlockNumber += 1 + env.blockManager.putBytes(blockId, buffer, StorageLevel.MEMORY_ONLY_2) + actor ! BlockPublished(blockId) + } + } + }.start() + + val lengthBuffer = ByteBuffer.allocate(4) + while (true) { + lengthBuffer.clear() + readFully(channel, lengthBuffer) + lengthBuffer.flip() + val length = lengthBuffer.getInt() + val dataBuffer = ByteBuffer.allocate(length) + readFully(channel, dataBuffer) + dataBuffer.flip() + logInfo("Read a block with " + length + " bytes") + queue.put(dataBuffer) + } + } + + /** Read a buffer fully from a given Channel */ + private def readFully(channel: ReadableByteChannel, dest: ByteBuffer) { + while (dest.position < dest.limit) { + if (channel.read(dest) == -1) { + throw new EOFException("End of channel") + } + } + } + + /** Message sent to ReceiverActor to tell it that a block was published */ + case class BlockPublished(blockId: String) {} + + /** A helper actor that communicates with the NetworkInputTracker */ + private class ReceiverActor(env: SparkEnv, receivingThread: Thread) extends Actor { + val newBlocks = new ArrayBuffer[String] + + override def preStart() { + logInfo("Attempting to register with tracker") + val ip = System.getProperty("spark.master.host", "localhost") + val port = System.getProperty("spark.master.port", "7077").toInt + val actorName: String = "NetworkInputTracker" + val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) + val trackerActor = env.actorSystem.actorFor(url) + val timeout = 1.seconds + val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) + Await.result(future, timeout) + } + + override def receive = { + case BlockPublished(blockId) => + newBlocks += blockId + + case GetBlockIds(time) => + logInfo("Got request for block IDs for " + time) + sender ! GotBlockIds(streamId, newBlocks.toArray) + newBlocks.clear() + + case StopReceiver => + receivingThread.interrupt() + sender ! true + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 0ac86cbdf2..feb769e036 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -52,22 +52,22 @@ class StreamingContext ( private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() def createNetworkTextStream(hostname: String, port: Int): DStream[String] = { - createNetworkStream[String](hostname, port, NetworkInputReceiver.bytesToLines) + createNetworkObjectStream[String](hostname, port, ObjectInputReceiver.bytesToLines) } - def createNetworkStream[T: ClassManifest]( + def createNetworkObjectStream[T: ClassManifest]( hostname: String, port: Int, converter: (InputStream) => Iterator[T] ): DStream[T] = { - val inputStream = new NetworkInputDStream[T](this, hostname, port, converter) + val inputStream = new ObjectInputDStream[T](this, hostname, port, converter) inputStreams += inputStream inputStream } /* def createHttpTextStream(url: String): DStream[String] = { - createHttpStream(url, NetworkInputReceiver.bytesToLines) + createHttpStream(url, ObjectInputReceiver.bytesToLines) } def createHttpStream[T: ClassManifest]( diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala new file mode 100644 index 0000000000..60d5849d71 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala @@ -0,0 +1,51 @@ +package spark.streaming.util + +import spark.util.{RateLimitedOutputStream, IntParam} +import java.net.ServerSocket +import spark.{Logging, KryoSerializer} +import it.unimi.dsi.fastutil.io.FastByteArrayOutputStream +import io.Source +import java.io.IOException + +/** + * A helper program that sends blocks of Kryo-serialized text strings out on a socket at a + * specified rate. Used to feed data into RawInputDStream. + */ +object RawTextSender extends Logging { + def main(args: Array[String]) { + if (args.length != 4) { + System.err.println("Usage: RawTextSender ") + } + // Parse the arguments using a pattern match + val Array(IntParam(port), file, IntParam(blockSize), IntParam(bytesPerSec)) = args + + // Repeat the input data multiple times to fill in a buffer + val lines = Source.fromFile(file).getLines().toArray + val bufferStream = new FastByteArrayOutputStream(blockSize + 1000) + val ser = new KryoSerializer().newInstance() + val serStream = ser.serializeStream(bufferStream) + var i = 0 + while (bufferStream.position < blockSize) { + serStream.writeObject(lines(i)) + i = (i + 1) % lines.length + } + bufferStream.trim() + val array = bufferStream.array + + val serverSocket = new ServerSocket(port) + + while (true) { + val socket = serverSocket.accept() + val out = new RateLimitedOutputStream(socket.getOutputStream, bytesPerSec) + try { + while (true) { + out.write(array) + } + } catch { + case e: IOException => + logError("Socket closed: ", e) + socket.close() + } + } + } +} From f84d2bbe55aaf3ef7a6631b9018a573aa5729ff7 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sat, 1 Sep 2012 00:31:15 -0700 Subject: [PATCH 044/226] Bug fixes to RateLimitedOutputStream --- core/src/main/scala/spark/util/RateLimitedOutputStream.scala | 2 +- .../src/main/scala/spark/streaming/util/RawTextSender.scala | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index 10f2272707..d11ed163ce 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -21,7 +21,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu while (pos < length) { val writeSize = math.min(length - pos, CHUNK_SIZE) waitToWrite(writeSize) - out.write(bytes, offset + pos, length - pos) + out.write(bytes, offset + pos, writeSize) pos += writeSize } } diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala index 60d5849d71..85927c02ec 100644 --- a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala +++ b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala @@ -15,6 +15,7 @@ object RawTextSender extends Logging { def main(args: Array[String]) { if (args.length != 4) { System.err.println("Usage: RawTextSender ") + System.exit(1) } // Parse the arguments using a pattern match val Array(IntParam(port), file, IntParam(blockSize), IntParam(bytesPerSec)) = args @@ -36,6 +37,7 @@ object RawTextSender extends Logging { while (true) { val socket = serverSocket.accept() + logInfo("Got a new connection") val out = new RateLimitedOutputStream(socket.getOutputStream, bytesPerSec) try { while (true) { @@ -43,7 +45,7 @@ object RawTextSender extends Logging { } } catch { case e: IOException => - logError("Socket closed: ", e) + logError("Socket closed", e) socket.close() } } From 83dad56334e73c477e9b62715df14b0c798220e3 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 1 Sep 2012 19:45:25 +0000 Subject: [PATCH 045/226] Further fixes to raw text sender, plus an app that uses it --- .../main/scala/spark/streaming/DStream.scala | 2 +- .../scala/spark/streaming/JobManager.scala | 2 +- .../spark/streaming/RawInputDStream.scala | 5 +-- .../spark/streaming/StreamingContext.scala | 11 +++++++ .../spark/streaming/examples/CountRaw.scala | 32 +++++++++++++++++++ .../spark/streaming/examples/WordCount2.scala | 2 +- .../spark/streaming/util/RawTextSender.scala | 7 ++++ 7 files changed, 56 insertions(+), 5 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/CountRaw.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 3a57488f9b..74140ab2b8 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -176,7 +176,7 @@ extends Logging with Serializable { def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc)) - def reduce(reduceFunc: (T, T) => T) = this.map(x => (1, x)).reduceByKey(reduceFunc, 1).map(_._2) + def reduce(reduceFunc: (T, T) => T) = this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) def count() = this.map(_ => 1).reduce(_ + _) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 40e614b4ed..9bf9251519 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -12,7 +12,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { SparkEnv.set(ssc.env) try { val timeTaken = job.run() - logInfo("Total delay: %.4f s for job %s; execution was %.4f s".format( + logInfo("Total delay: %.5f s for job %s (execution: %.5f s)".format( (System.currentTimeMillis() - job.time) / 1000.0, job.id, timeTaken / 1000.0)) } catch { case e: Exception => diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index 49e4781e75..d59c245a23 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -22,7 +22,8 @@ import spark.storage.StorageLevel class RawInputDStream[T: ClassManifest]( @transient ssc: StreamingContext, host: String, - port: Int) + port: Int, + storageLevel: StorageLevel) extends NetworkInputDStream[T](ssc) with Logging { val streamId = id @@ -49,7 +50,7 @@ class RawInputDStream[T: ClassManifest]( val buffer = queue.take() val blockId = "input-" + streamId + "-" + nextBlockNumber nextBlockNumber += 1 - env.blockManager.putBytes(blockId, buffer, StorageLevel.MEMORY_ONLY_2) + env.blockManager.putBytes(blockId, buffer, storageLevel) actor ! BlockPublished(blockId) } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index feb769e036..cb0f9ceb15 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -4,6 +4,7 @@ import spark.RDD import spark.Logging import spark.SparkEnv import spark.SparkContext +import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.Queue @@ -64,6 +65,16 @@ class StreamingContext ( inputStreams += inputStream inputStream } + + def createRawNetworkStream[T: ClassManifest]( + hostname: String, + port: Int, + storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_2 + ): DStream[T] = { + val inputStream = new RawInputDStream[T](this, hostname, port, storageLevel) + inputStreams += inputStream + inputStream + } /* def createHttpTextStream(url: String): DStream[String] = { diff --git a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala new file mode 100644 index 0000000000..17d1ce3602 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala @@ -0,0 +1,32 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel +import spark.streaming._ +import spark.streaming.StreamingContext._ + +object CountRaw { + def main(args: Array[String]) { + if (args.length < 4) { + System.err.println("Usage: WordCountNetwork ") + System.exit(1) + } + + val Array(master, IntParam(numStreams), hostname, IntParam(port)) = args + + // Create the context and set the batch size + val ssc = new StreamingContext(master, "CountRaw") + ssc.setBatchDuration(Seconds(1)) + + // Make sure some tasks have started on each node + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + + val rawStreams = (1 to numStreams).map(_ => + ssc.createRawNetworkStream[String](hostname, port, StorageLevel.MEMORY_ONLY_2)).toArray + val union = new UnifiedDStream(rawStreams) + union.map(_.length).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) + ssc.start() + } +} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index a090dcb85d..ce553758a7 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -85,7 +85,7 @@ object WordCount2 { //warmup(ssc.sc) val data = ssc.sc.textFile(file, mapTasks.toInt).persist( - new StorageLevel(false, true, true, 2)) // Memory only, deserialized, 2 replicas + new StorageLevel(false, true, false, 2)) // Memory only, serialized, 2 replicas println("Data count: " + data.count()) println("Data count: " + data.count()) println("Data count: " + data.count()) diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala index 85927c02ec..8db651ba19 100644 --- a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala +++ b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala @@ -1,5 +1,6 @@ package spark.streaming.util +import java.nio.ByteBuffer import spark.util.{RateLimitedOutputStream, IntParam} import java.net.ServerSocket import spark.{Logging, KryoSerializer} @@ -33,7 +34,12 @@ object RawTextSender extends Logging { bufferStream.trim() val array = bufferStream.array + val countBuf = ByteBuffer.wrap(new Array[Byte](4)) + countBuf.putInt(array.length) + countBuf.flip() + val serverSocket = new ServerSocket(port) + logInfo("Listening on port " + port) while (true) { val socket = serverSocket.accept() @@ -41,6 +47,7 @@ object RawTextSender extends Logging { val out = new RateLimitedOutputStream(socket.getOutputStream, bytesPerSec) try { while (true) { + out.write(countBuf.array) out.write(array) } } catch { From bf993cda632e4a9fa41035845491ae466d1a4431 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 1 Sep 2012 19:59:23 +0000 Subject: [PATCH 046/226] Make batch size configurable in RawCount --- .../scala/spark/streaming/examples/CountRaw.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala index 17d1ce3602..c78c1e9660 100644 --- a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala @@ -7,16 +7,16 @@ import spark.streaming.StreamingContext._ object CountRaw { def main(args: Array[String]) { - if (args.length < 4) { - System.err.println("Usage: WordCountNetwork ") + if (args.length != 5) { + System.err.println("Usage: CountRaw ") System.exit(1) } - val Array(master, IntParam(numStreams), hostname, IntParam(port)) = args + val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args // Create the context and set the batch size val ssc = new StreamingContext(master, "CountRaw") - ssc.setBatchDuration(Seconds(1)) + ssc.setBatchDuration(Milliseconds(batchMillis)) // Make sure some tasks have started on each node ssc.sc.parallelize(1 to 1000, 1000).count() @@ -24,9 +24,9 @@ object CountRaw { ssc.sc.parallelize(1 to 1000, 1000).count() val rawStreams = (1 to numStreams).map(_ => - ssc.createRawNetworkStream[String](hostname, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnifiedDStream(rawStreams) - union.map(_.length).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) + union.map(_.length + 2).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) ssc.start() } } From 6025889be0ecf1c9849c5c940a7171c6d82be0b5 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 1 Sep 2012 20:51:07 +0000 Subject: [PATCH 047/226] More raw network receiver programs --- .../mesos/CoarseMesosSchedulerBackend.scala | 4 +- .../spark/streaming/examples/GrepRaw.scala | 33 +++++++++++++++ .../spark/streaming/examples/WordCount2.scala | 2 +- .../streaming/examples/WordCountRaw.scala | 42 +++++++++++++++++++ 4 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala diff --git a/core/src/main/scala/spark/scheduler/mesos/CoarseMesosSchedulerBackend.scala b/core/src/main/scala/spark/scheduler/mesos/CoarseMesosSchedulerBackend.scala index 31784985dc..fdf007ffb2 100644 --- a/core/src/main/scala/spark/scheduler/mesos/CoarseMesosSchedulerBackend.scala +++ b/core/src/main/scala/spark/scheduler/mesos/CoarseMesosSchedulerBackend.scala @@ -80,6 +80,8 @@ class CoarseMesosSchedulerBackend( "property, the SPARK_HOME environment variable or the SparkContext constructor") } + val extraCoresPerSlave = System.getProperty("spark.mesos.extra.cores", "0").toInt + var nextMesosTaskId = 0 def newMesosTaskId(): Int = { @@ -177,7 +179,7 @@ class CoarseMesosSchedulerBackend( val task = MesosTaskInfo.newBuilder() .setTaskId(TaskID.newBuilder().setValue(taskId.toString).build()) .setSlaveId(offer.getSlaveId) - .setCommand(createCommand(offer, cpusToUse)) + .setCommand(createCommand(offer, cpusToUse + extraCoresPerSlave)) .setName("Task " + taskId) .addResources(createResource("cpus", cpusToUse)) .addResources(createResource("mem", executorMemory)) diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala new file mode 100644 index 0000000000..cc52da7bd4 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -0,0 +1,33 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel +import spark.streaming._ +import spark.streaming.StreamingContext._ + +object GrepRaw { + def main(args: Array[String]) { + if (args.length != 5) { + System.err.println("Usage: GrepRaw ") + System.exit(1) + } + + val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args + + // Create the context and set the batch size + val ssc = new StreamingContext(master, "GrepRaw") + ssc.setBatchDuration(Milliseconds(batchMillis)) + + // Make sure some tasks have started on each node + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + + val rawStreams = (1 to numStreams).map(_ => + ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + val union = new UnifiedDStream(rawStreams) + union.filter(_.contains("Culpepper")).count().foreachRDD(r => + println("Grep count: " + r.collect().mkString)) + ssc.start() + } +} diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index ce553758a7..8c2724e97c 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -100,7 +100,7 @@ object WordCount2 { .reduceByKeyAndWindow(add _, subtract _, Seconds(30), batchDuration, reduceTasks.toInt) windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, Milliseconds(chkptMillis.toLong)) - windowedCounts.print() + windowedCounts.foreachRDD(r => println("Element count: " + r.count())) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala new file mode 100644 index 0000000000..298d9ef381 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -0,0 +1,42 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel +import spark.streaming._ +import spark.streaming.StreamingContext._ + +object WordCountRaw { + def main(args: Array[String]) { + if (args.length != 7) { + System.err.println("Usage: WordCountRaw ") + System.exit(1) + } + + val Array(master, IntParam(streams), host, IntParam(port), IntParam(batchMs), + IntParam(chkptMs), IntParam(reduces)) = args + + // Create the context and set the batch size + val ssc = new StreamingContext(master, "WordCountRaw") + ssc.setBatchDuration(Milliseconds(batchMs)) + + // Make sure some tasks have started on each node + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + + val rawStreams = (1 to streams).map(_ => + ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + val union = new UnifiedDStream(rawStreams) + + import WordCount2_ExtraFunctions._ + + val windowedCounts = union.mapPartitions(splitAndCountPartitions) + .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + Milliseconds(chkptMs)) + //windowedCounts.print() // TODO: something else? + windowedCounts.foreachRDD(r => println("Element count: " + r.count())) + + ssc.start() + } +} From ceabf71257631c9e46f82897e540369b99a6bb57 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 1 Sep 2012 21:52:42 +0000 Subject: [PATCH 048/226] tweaks --- core/src/main/scala/spark/storage/StorageLevel.scala | 1 + .../src/main/scala/spark/streaming/util/RawTextSender.scala | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/storage/StorageLevel.scala b/core/src/main/scala/spark/storage/StorageLevel.scala index f067a2a6c5..a64393eba7 100644 --- a/core/src/main/scala/spark/storage/StorageLevel.scala +++ b/core/src/main/scala/spark/storage/StorageLevel.scala @@ -66,6 +66,7 @@ class StorageLevel( object StorageLevel { val NONE = new StorageLevel(false, false, false) val DISK_ONLY = new StorageLevel(true, false, false) + val DISK_ONLY_2 = new StorageLevel(true, false, false, 2) val MEMORY_ONLY = new StorageLevel(false, true, false) val MEMORY_ONLY_2 = new StorageLevel(false, true, false, 2) val MEMORY_ONLY_DESER = new StorageLevel(false, true, true) diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala index 8db651ba19..d8b987ec86 100644 --- a/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala +++ b/streaming/src/main/scala/spark/streaming/util/RawTextSender.scala @@ -52,7 +52,7 @@ object RawTextSender extends Logging { } } catch { case e: IOException => - logError("Socket closed", e) + logError("Client disconnected") socket.close() } } From 7419d2c7ea1be9dcc0079dbe6dfd7046f0c549e0 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 2 Sep 2012 02:35:17 -0700 Subject: [PATCH 049/226] Added transformRDD DStream operation and TransformedDStream. Added sbt assembly option for streaming project. --- project/SparkBuild.scala | 4 +- .../main/scala/spark/streaming/DStream.scala | 62 ++++++++++++++----- .../scala/spark/streaming/StateDStream.scala | 1 - .../scala/spark/streaming/DStreamSuite.scala | 2 +- 4 files changed, 49 insertions(+), 20 deletions(-) diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 6a60f10be4..358213fe64 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -84,7 +84,9 @@ object SparkBuild extends Build { def bagelSettings = sharedSettings ++ Seq(name := "spark-bagel") - def streamingSettings = sharedSettings ++ Seq(name := "spark-streaming") + def streamingSettings = sharedSettings ++ Seq( + name := "spark-streaming" + ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( mergeStrategy in assembly := { diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 8c06345933..08eda056c9 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -12,7 +12,7 @@ import spark.Partitioner import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap -import java.util.concurrent.ArrayBlockingQueue +import java.util.concurrent.ArrayBlockingQueue abstract class DStream[T: ClassManifest] (@transient val ssc: StreamingContext) extends Logging with Serializable { @@ -166,15 +166,17 @@ extends Logging with Serializable { def map[U: ClassManifest](mapFunc: T => U) = new MappedDStream(this, ssc.sc.clean(mapFunc)) - def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = + def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = { new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) + } def filter(filterFunc: T => Boolean) = new FilteredDStream(this, filterFunc) def glom() = new GlommedDStream(this) - def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = + def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = { new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc)) + } def reduce(reduceFunc: (T, T) => T) = this.map(x => (1, x)).reduceByKey(reduceFunc, 1).map(_._2) @@ -182,18 +184,30 @@ extends Logging with Serializable { def collect() = this.map(x => (1, x)).groupByKey(1).map(_._2) - def foreach(foreachFunc: T => Unit) = { + def foreach(foreachFunc: T => Unit) { val newStream = new PerElementForEachDStream(this, ssc.sc.clean(foreachFunc)) ssc.registerOutputStream(newStream) newStream } - def foreachRDD(foreachFunc: RDD[T] => Unit) = { + def foreachRDD(foreachFunc: RDD[T] => Unit) { + foreachRDD((r: RDD[T], t: Time) => foreachFunc(r)) + } + + def foreachRDD(foreachFunc: (RDD[T], Time) => Unit) { val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) ssc.registerOutputStream(newStream) newStream } + def transformRDD[U: ClassManifest](transformFunc: RDD[T] => RDD[U]): DStream[U] = { + transformRDD((r: RDD[T], t: Time) => transformFunc(r)) + } + + def transformRDD[U: ClassManifest](transformFunc: (RDD[T], Time) => RDD[U]): DStream[U] = { + new TransformedDStream(this, ssc.sc.clean(transformFunc)) + } + private[streaming] def toQueue = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { @@ -361,15 +375,13 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, - partitioner: Partitioner) - extends DStream [(K,C)] (parent.ssc) { + partitioner: Partitioner + ) extends DStream [(K,C)] (parent.ssc) { override def dependencies = List(parent) override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[(K,C)]] = { parent.getOrCompute(validTime) match { case Some(rdd) => @@ -385,7 +397,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( */ class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) -extends DStream[T](parents(0).ssc) { + extends DStream[T](parents(0).ssc) { if (parents.length == 0) { throw new IllegalArgumentException("Empty array of parents") @@ -424,8 +436,8 @@ extends DStream[T](parents(0).ssc) { class PerElementForEachDStream[T: ClassManifest] ( parent: DStream[T], - foreachFunc: T => Unit) -extends DStream[Unit](parent.ssc) { + foreachFunc: T => Unit + ) extends DStream[Unit](parent.ssc) { override def dependencies = List(parent) @@ -455,11 +467,8 @@ extends DStream[Unit](parent.ssc) { class PerRDDForEachDStream[T: ClassManifest] ( parent: DStream[T], - foreachFunc: (RDD[T], Time) => Unit) -extends DStream[Unit](parent.ssc) { - - def this(parent: DStream[T], altForeachFunc: (RDD[T]) => Unit) = - this(parent, (rdd: RDD[T], time: Time) => altForeachFunc(rdd)) + foreachFunc: (RDD[T], Time) => Unit + ) extends DStream[Unit](parent.ssc) { override def dependencies = List(parent) @@ -478,3 +487,22 @@ extends DStream[Unit](parent.ssc) { } } } + + +/** + * TODO + */ + +class TransformedDStream[T: ClassManifest, U: ClassManifest] ( + parent: DStream[T], + transformFunc: (RDD[T], Time) => RDD[U] + ) extends DStream[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(transformFunc(_, validTime)) + } + } diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index eabb33d89e..f313d8c162 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -4,7 +4,6 @@ import spark.RDD import spark.Partitioner import spark.MapPartitionsRDD import spark.SparkContext._ -import javax.annotation.Nullable class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala index d5eb20b37e..030f351080 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -105,7 +105,7 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { Seq(("a", 1), ("b", 1), ("c", 1)), Seq(("a", 2), ("b", 2), ("c", 2)), Seq(("a", 3), ("b", 3), ("c", 3)) - )//.map(array => array.toSeq.map(x => (x._1, new RichInt(x._2)))) + ) val updateStateOp =(s: DStream[String]) => { val updateFunc = (values: Seq[Int], state: RichInt) => { From 1878731671d89a2e2230f1cae62648fcd69c12ab Mon Sep 17 00:00:00 2001 From: root Date: Tue, 4 Sep 2012 04:26:53 +0000 Subject: [PATCH 050/226] Various test programs --- .../spark/streaming/examples/Grep2.scala | 64 ++++++++++++++++ .../spark/streaming/examples/WordCount2.scala | 8 +- .../spark/streaming/examples/WordMax2.scala | 73 +++++++++++++++++++ 3 files changed, 143 insertions(+), 2 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/Grep2.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/WordMax2.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/Grep2.scala b/streaming/src/main/scala/spark/streaming/examples/Grep2.scala new file mode 100644 index 0000000000..7237142c7c --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/Grep2.scala @@ -0,0 +1,64 @@ +package spark.streaming.examples + +import spark.SparkContext +import SparkContext._ +import spark.streaming._ +import StreamingContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue +import scala.collection.JavaConversions.mapAsScalaMap + +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} + + +object Grep2 { + + def warmup(sc: SparkContext) { + (0 until 10).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(x => (x % 337, x % 1331)) + .reduceByKey(_ + _) + .count() + } + } + + def main (args: Array[String]) { + + if (args.length != 6) { + println ("Usage: Grep2 ") + System.exit(1) + } + + val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args + + val batchDuration = Milliseconds(batchMillis.toLong) + + val ssc = new StreamingContext(master, "Grep2") + ssc.setBatchDuration(batchDuration) + + //warmup(ssc.sc) + + val data = ssc.sc.textFile(file, mapTasks.toInt).persist( + new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas + println("Data count: " + data.count()) + println("Data count: " + data.count()) + println("Data count: " + data.count()) + + val sentences = new ConstantInputDStream(ssc, data) + ssc.inputStreams += sentences + + sentences.filter(_.contains("Culpepper")).count().foreachRDD(r => + println("Grep count: " + r.collect().mkString)) + + ssc.start() + + while(true) { Thread.sleep(1000) } + } +} + + diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 8c2724e97c..aa542ba07d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -22,6 +22,8 @@ object WordCount2_ExtraFunctions { def subtract(v1: Long, v2: Long) = (v1 - v2) + def max(v1: Long, v2: Long) = math.max(v1, v2) + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = { //val map = new java.util.HashMap[String, Long] val map = new OLMap[String] @@ -85,7 +87,7 @@ object WordCount2 { //warmup(ssc.sc) val data = ssc.sc.textFile(file, mapTasks.toInt).persist( - new StorageLevel(false, true, false, 2)) // Memory only, serialized, 2 replicas + new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas println("Data count: " + data.count()) println("Data count: " + data.count()) println("Data count: " + data.count()) @@ -98,7 +100,9 @@ object WordCount2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), batchDuration, reduceTasks.toInt) - windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, + StorageLevel.MEMORY_ONLY_DESER_2, + //new StorageLevel(false, true, true, 3), Milliseconds(chkptMillis.toLong)) windowedCounts.foreachRDD(r => println("Element count: " + r.count())) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala new file mode 100644 index 0000000000..3658cb302d --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala @@ -0,0 +1,73 @@ +package spark.streaming.examples + +import spark.SparkContext +import SparkContext._ +import spark.streaming._ +import StreamingContext._ + +import spark.storage.StorageLevel + +import scala.util.Sorting +import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue +import scala.collection.JavaConversions.mapAsScalaMap + +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} + + +object WordMax2 { + + def warmup(sc: SparkContext) { + (0 until 10).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(x => (x % 337, x % 1331)) + .reduceByKey(_ + _) + .count() + } + } + + def main (args: Array[String]) { + + if (args.length != 6) { + println ("Usage: WordMax2 ") + System.exit(1) + } + + val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args + + val batchDuration = Milliseconds(batchMillis.toLong) + + val ssc = new StreamingContext(master, "WordMax2") + ssc.setBatchDuration(batchDuration) + + //warmup(ssc.sc) + + val data = ssc.sc.textFile(file, mapTasks.toInt).persist( + new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas + println("Data count: " + data.count()) + println("Data count: " + data.count()) + println("Data count: " + data.count()) + + val sentences = new ConstantInputDStream(ssc, data) + ssc.inputStreams += sentences + + import WordCount2_ExtraFunctions._ + + val windowedCounts = sentences + .mapPartitions(splitAndCountPartitions) + .reduceByKey(add _, reduceTasks.toInt) + .persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + Milliseconds(chkptMillis.toLong)) + .reduceByKeyAndWindow(max _, Seconds(10), batchDuration, reduceTasks.toInt) + //.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + // Milliseconds(chkptMillis.toLong)) + windowedCounts.foreachRDD(r => println("Element count: " + r.count())) + + ssc.start() + + while(true) { Thread.sleep(1000) } + } +} + + From 389a78722cabe9f964ac29edcc0c3d47db4ba021 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 4 Sep 2012 15:37:46 -0700 Subject: [PATCH 051/226] Updated the return types of PairDStreamFunctions to return DStreams instead of ShuffleDStreams for cleaner abstraction. --- .../streaming/PairDStreamFunctions.scala | 56 ++++++++++++------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 13db34ac80..3fd0a16bf0 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -19,32 +19,32 @@ extends Serializable { /* DStream operations for key-value pairs */ /* ---------------------------------- */ - def groupByKey(): ShuffledDStream[K, V, ArrayBuffer[V]] = { + def groupByKey(): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner()) } - def groupByKey(numPartitions: Int): ShuffledDStream[K, V, ArrayBuffer[V]] = { + def groupByKey(numPartitions: Int): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner(numPartitions)) } - def groupByKey(partitioner: Partitioner): ShuffledDStream[K, V, ArrayBuffer[V]] = { + def groupByKey(partitioner: Partitioner): DStream[(K, Seq[V])] = { def createCombiner(v: V) = ArrayBuffer[V](v) def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) - combineByKey[ArrayBuffer[V]](createCombiner _, mergeValue _, mergeCombiner _, partitioner) + combineByKey(createCombiner _, mergeValue _, mergeCombiner _, partitioner).asInstanceOf[DStream[(K, Seq[V])]] } - def reduceByKey(reduceFunc: (V, V) => V): ShuffledDStream[K, V, V] = { + def reduceByKey(reduceFunc: (V, V) => V): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner()) } - def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): ShuffledDStream[K, V, V] = { + def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner(numPartitions)) } - def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): ShuffledDStream[K, V, V] = { + def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) - combineByKey[V]((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner) + combineByKey((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner) } private def combineByKey[C: ClassManifest]( @@ -55,11 +55,15 @@ extends Serializable { new ShuffledDStream[K, V, C](stream, createCombiner, mergeValue, mergeCombiner, partitioner) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time): ShuffledDStream[K, V, ArrayBuffer[V]] = { + def groupByKeyAndWindow(windowTime: Time, slideTime: Time): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner()) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int): ShuffledDStream[K, V, ArrayBuffer[V]] = { + def groupByKeyAndWindow( + windowTime: Time, + slideTime: Time, + numPartitions: Int + ): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner(numPartitions)) } @@ -67,15 +71,24 @@ extends Serializable { windowTime: Time, slideTime: Time, partitioner: Partitioner - ): ShuffledDStream[K, V, ArrayBuffer[V]] = { + ): DStream[(K, Seq[V])] = { stream.window(windowTime, slideTime).groupByKey(partitioner) } - def reduceByKeyAndWindow(reduceFunc: (V, V) => V, windowTime: Time, slideTime: Time): ShuffledDStream[K, V, V] = { + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time + ): DStream[(K, V)] = { reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner()) } - def reduceByKeyAndWindow(reduceFunc: (V, V) => V, windowTime: Time, slideTime: Time, numPartitions: Int): ShuffledDStream[K, V, V] = { + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + windowTime: Time, + slideTime: Time, + numPartitions: Int + ): DStream[(K, V)] = { reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) } @@ -84,7 +97,7 @@ extends Serializable { windowTime: Time, slideTime: Time, partitioner: Partitioner - ): ShuffledDStream[K, V, V] = { + ): DStream[(K, V)] = { stream.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), partitioner) } @@ -93,12 +106,13 @@ extends Serializable { // so that new elements introduced in the window can be "added" using // reduceFunc to the previous window's result and old elements can be // "subtracted using invReduceFunc. + def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, windowTime: Time, slideTime: Time - ): ReducedWindowedDStream[K, V] = { + ): DStream[(K, V)] = { reduceByKeyAndWindow( reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner()) @@ -110,7 +124,7 @@ extends Serializable { windowTime: Time, slideTime: Time, numPartitions: Int - ): ReducedWindowedDStream[K, V] = { + ): DStream[(K, V)] = { reduceByKeyAndWindow( reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) @@ -122,7 +136,7 @@ extends Serializable { windowTime: Time, slideTime: Time, partitioner: Partitioner - ): ReducedWindowedDStream[K, V] = { + ): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) val cleanedInvReduceFunc = ssc.sc.clean(invReduceFunc) @@ -137,21 +151,21 @@ extends Serializable { // def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], S) => S - ): StateDStream[K, V, S] = { + ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner()) } def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], S) => S, numPartitions: Int - ): StateDStream[K, V, S] = { + ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner(numPartitions)) } def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], S) => S, partitioner: Partitioner - ): StateDStream[K, V, S] = { + ): DStream[(K, S)] = { val func = (iterator: Iterator[(K, Seq[V], S)]) => { iterator.map(tuple => (tuple._1, updateFunc(tuple._2, tuple._3))) } @@ -162,7 +176,7 @@ extends Serializable { updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean - ): StateDStream[K, V, S] = { + ): DStream[(K, S)] = { new StateDStream(stream, ssc.sc.clean(updateFunc), partitioner, rememberPartitioner) } } From 2ff72f60ac6e42e9511fcf4effd1df89da8dc410 Mon Sep 17 00:00:00 2001 From: haoyuan Date: Tue, 4 Sep 2012 17:55:55 -0700 Subject: [PATCH 052/226] add TopKWordCountRaw.scala --- .../streaming/examples/TopKWordCountRaw.scala | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala new file mode 100644 index 0000000000..7c01435f85 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -0,0 +1,86 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel +import spark.streaming._ +import spark.streaming.StreamingContext._ + +object WordCountRaw { + def main(args: Array[String]) { + if (args.length != 7) { + System.err.println("Usage: WordCountRaw ") + System.exit(1) + } + + val Array(master, IntParam(streams), host, IntParam(port), IntParam(batchMs), + IntParam(chkptMs), IntParam(reduces)) = args + + // Create the context and set the batch size + val ssc = new StreamingContext(master, "TopKWordCountRaw") + ssc.setBatchDuration(Milliseconds(batchMs)) + + // Make sure some tasks have started on each node + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + ssc.sc.parallelize(1 to 1000, 1000).count() + + val rawStreams = (1 to streams).map(_ => + ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + val union = new UnifiedDStream(rawStreams) + + import WordCount2_ExtraFunctions._ + + val windowedCounts = union.mapPartitions(splitAndCountPartitions) + .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) + windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, + Milliseconds(chkptMs)) + //windowedCounts.print() // TODO: something else? + + def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { + val taken = new Array[(String, JLong)](k) + + var i = 0 + var len = 0 + var done = false + var value: (String, JLong) = null + var swap: (String, JLong) = null + var count = 0 + + while(data.hasNext) { + value = data.next + count += 1 + println("count = " + count) + if (len == 0) { + taken(0) = value + len = 1 + } else if (len < k || value._2 > taken(len - 1)._2) { + if (len < k) { + len += 1 + } + taken(len - 1) = value + i = len - 1 + while(i > 0 && taken(i - 1)._2 < taken(i)._2) { + swap = taken(i) + taken(i) = taken(i-1) + taken(i - 1) = swap + i -= 1 + } + } + } + println("Took " + len + " out of " + count + " items") + return taken.toIterator + } + + val k = 50 + val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) + partialTopKWindowedCounts.foreachRDD(rdd => { + val collectedCounts = rdd.collect + println("Collected " + collectedCounts.size + " items") + topK(collectedCounts.toIterator, k).foreach(println) + }) + +// windowedCounts.foreachRDD(r => println("Element count: " + r.count())) + + ssc.start() + } +} From 96a1f2277d1c7e0ff970eafcb3ca53cd20b1b89c Mon Sep 17 00:00:00 2001 From: haoyuan Date: Tue, 4 Sep 2012 18:03:34 -0700 Subject: [PATCH 053/226] fix the compile error in TopKWordCountRaw.scala --- .../spark/streaming/examples/TopKWordCountRaw.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 7c01435f85..be3188c5ed 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -5,10 +5,10 @@ import spark.storage.StorageLevel import spark.streaming._ import spark.streaming.StreamingContext._ -object WordCountRaw { +object TopKWordCountRaw { def main(args: Array[String]) { if (args.length != 7) { - System.err.println("Usage: WordCountRaw ") + System.err.println("Usage: TopKWordCountRaw ") System.exit(1) } @@ -36,14 +36,14 @@ object WordCountRaw { Milliseconds(chkptMs)) //windowedCounts.print() // TODO: something else? - def topK(data: Iterator[(String, JLong)], k: Int): Iterator[(String, JLong)] = { - val taken = new Array[(String, JLong)](k) + def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = { + val taken = new Array[(String, Long)](k) var i = 0 var len = 0 var done = false - var value: (String, JLong) = null - var swap: (String, JLong) = null + var value: (String, Long) = null + var swap: (String, Long) = null var count = 0 while(data.hasNext) { From 7c09ad0e04639040864236cf13a9fedff6736b5d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 4 Sep 2012 19:11:49 -0700 Subject: [PATCH 054/226] Changed DStream member access permissions from private to protected. Updated StateDStream to checkpoint RDDs and forget lineage. --- core/src/main/scala/spark/RDD.scala | 2 +- .../main/scala/spark/streaming/DStream.scala | 16 ++-- .../spark/streaming/QueueInputDStream.scala | 2 +- .../scala/spark/streaming/StateDStream.scala | 93 ++++++++++++++----- .../scala/spark/streaming/DStreamSuite.scala | 4 +- 5 files changed, 81 insertions(+), 36 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 3fe8e8a4bf..d28f3593fe 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -94,7 +94,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial def getStorageLevel = storageLevel - def checkpoint(level: StorageLevel = StorageLevel.DISK_AND_MEMORY_DESER): RDD[T] = { + def checkpoint(level: StorageLevel = StorageLevel.DISK_AND_MEMORY_DESER_2): RDD[T] = { if (!level.useDisk && level.replication < 2) { throw new Exception("Cannot checkpoint without using disk or replication (level requested was " + level + ")") } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 9b0115eef6..20f1c4db20 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -41,17 +41,17 @@ extends Logging with Serializable { */ // Variable to store the RDDs generated earlier in time - @transient private val generatedRDDs = new HashMap[Time, RDD[T]] () + @transient protected val generatedRDDs = new HashMap[Time, RDD[T]] () // Variable to be set to the first time seen by the DStream (effective time zero) - private[streaming] var zeroTime: Time = null + protected[streaming] var zeroTime: Time = null // Variable to specify storage level - private var storageLevel: StorageLevel = StorageLevel.NONE + protected var storageLevel: StorageLevel = StorageLevel.NONE // Checkpoint level and checkpoint interval - private var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint - private var checkpointInterval: Time = null + protected var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint + protected var checkpointInterval: Time = null // Change this RDD's storage level def persist( @@ -84,7 +84,7 @@ extends Logging with Serializable { * the validity of future times is calculated. This method also recursively initializes * its parent DStreams. */ - def initialize(time: Time) { + protected[streaming] def initialize(time: Time) { if (zeroTime == null) { zeroTime = time } @@ -93,7 +93,7 @@ extends Logging with Serializable { } /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ - private def isTimeValid (time: Time): Boolean = { + protected def isTimeValid (time: Time): Boolean = { if (!isInitialized) { throw new Exception (this.toString + " has not been initialized") } else if (time < zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { @@ -208,7 +208,7 @@ extends Logging with Serializable { new TransformedDStream(this, ssc.sc.clean(transformFunc)) } - private[streaming] def toQueue = { + def toQueue = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { queue.add(rdd) diff --git a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala index bab48ff954..de30297c7d 100644 --- a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala @@ -7,7 +7,7 @@ import scala.collection.mutable.Queue import scala.collection.mutable.ArrayBuffer class QueueInputDStream[T: ClassManifest]( - ssc: StreamingContext, + @transient ssc: StreamingContext, val queue: Queue[RDD[T]], oneAtATime: Boolean, defaultRDD: RDD[T] diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index f313d8c162..4cb780c006 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -1,10 +1,11 @@ package spark.streaming import spark.RDD +import spark.BlockRDD import spark.Partitioner import spark.MapPartitionsRDD import spark.SparkContext._ - +import spark.storage.StorageLevel class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( parent: DStream[(K, V)], @@ -22,6 +23,47 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife override def slideTime = parent.slideTime + override def getOrCompute(time: Time): Option[RDD[(K, S)]] = { + generatedRDDs.get(time) match { + case Some(oldRDD) => { + if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval) && oldRDD.dependencies.size > 0) { + val r = oldRDD + val oldRDDBlockIds = oldRDD.splits.map(s => "rdd:" + r.id + ":" + s.index) + val checkpointedRDD = new BlockRDD[(K, S)](ssc.sc, oldRDDBlockIds) { + override val partitioner = oldRDD.partitioner + } + generatedRDDs.update(time, checkpointedRDD) + logInfo("Updated RDD of time " + time + " with its checkpointed version") + Some(checkpointedRDD) + } else { + Some(oldRDD) + } + } + case None => { + if (isTimeValid(time)) { + compute(time) match { + case Some(newRDD) => { + if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { + newRDD.persist(checkpointLevel) + logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) + } else if (storageLevel != StorageLevel.NONE) { + newRDD.persist(storageLevel) + logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) + } + generatedRDDs.put(time, newRDD) + Some(newRDD) + } + case None => { + None + } + } + } else { + None + } + } + } + } + override def compute(validTime: Time): Option[RDD[(K, S)]] = { // Try to get the previous state RDD @@ -29,26 +71,27 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife case Some(prevStateRDD) => { // If previous state RDD exists - // Define the function for the mapPartition operation on cogrouped RDD; - // first map the cogrouped tuple to tuples of required type, - // and then apply the update function - val func = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { - val i = iterator.map(t => { - (t._1, t._2._1, t._2._2.headOption.getOrElse(null.asInstanceOf[S])) - }) - updateFunc(i) - } - // Try to get the parent RDD parent.getOrCompute(validTime) match { case Some(parentRDD) => { // If parent RDD exists, then compute as usual + + // Define the function for the mapPartition operation on cogrouped RDD; + // first map the cogrouped tuple to tuples of required type, + // and then apply the update function + val updateFuncLocal = updateFunc + val mapPartitionFunc = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { + val i = iterator.map(t => { + (t._1, t._2._1, t._2._2.headOption.getOrElse(null.asInstanceOf[S])) + }) + updateFuncLocal(i) + } val cogroupedRDD = parentRDD.cogroup(prevStateRDD, partitioner) - val stateRDD = new SpecialMapPartitionsRDD(cogroupedRDD, func) - logDebug("Generating state RDD for time " + validTime) + val stateRDD = new SpecialMapPartitionsRDD(cogroupedRDD, mapPartitionFunc) + //logDebug("Generating state RDD for time " + validTime) return Some(stateRDD) } case None => { // If parent RDD does not exist, then return old state RDD - logDebug("Generating state RDD for time " + validTime + " (no change)") + //logDebug("Generating state RDD for time " + validTime + " (no change)") return Some(prevStateRDD) } } @@ -56,23 +99,25 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife case None => { // If previous session RDD does not exist (first input data) - // Define the function for the mapPartition operation on grouped RDD; - // first map the grouped tuple to tuples of required type, - // and then apply the update function - val func = (iterator: Iterator[(K, Seq[V])]) => { - updateFunc(iterator.map(tuple => (tuple._1, tuple._2, null.asInstanceOf[S]))) - } - // Try to get the parent RDD parent.getOrCompute(validTime) match { case Some(parentRDD) => { // If parent RDD exists, then compute as usual + + // Define the function for the mapPartition operation on grouped RDD; + // first map the grouped tuple to tuples of required type, + // and then apply the update function + val updateFuncLocal = updateFunc + val mapPartitionFunc = (iterator: Iterator[(K, Seq[V])]) => { + updateFuncLocal(iterator.map(tuple => (tuple._1, tuple._2, null.asInstanceOf[S]))) + } + val groupedRDD = parentRDD.groupByKey(partitioner) - val sessionRDD = new SpecialMapPartitionsRDD(groupedRDD, func) - logDebug("Generating state RDD for time " + validTime + " (first)") + val sessionRDD = new SpecialMapPartitionsRDD(groupedRDD, mapPartitionFunc) + //logDebug("Generating state RDD for time " + validTime + " (first)") return Some(sessionRDD) } case None => { // If parent RDD does not exist, then nothing to do! - logDebug("Not generating state RDD (no previous state, no parent)") + //logDebug("Not generating state RDD (no previous state, no parent)") return None } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala index 030f351080..fc00952afe 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala @@ -107,12 +107,12 @@ class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { Seq(("a", 3), ("b", 3), ("c", 3)) ) - val updateStateOp =(s: DStream[String]) => { + val updateStateOp = (s: DStream[String]) => { val updateFunc = (values: Seq[Int], state: RichInt) => { var newState = 0 if (values != null) newState += values.reduce(_ + _) if (state != null) newState += state.self - //println("values = " + values + ", state = " + state + ", " + " new state = " + newState) + println("values = " + values + ", state = " + state + ", " + " new state = " + newState) new RichInt(newState) } s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) From 4ea032a142ab7fb44f92b145cc8d850164419ab5 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 05:53:07 +0000 Subject: [PATCH 055/226] Some changes to make important log output visible even if we set the logging to WARNING --- .../spark/scheduler/cluster/StandaloneSchedulerBackend.scala | 2 +- streaming/src/main/scala/spark/streaming/JobManager.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala b/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala index 83e7c6e036..978b4f2676 100644 --- a/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala +++ b/core/src/main/scala/spark/scheduler/cluster/StandaloneSchedulerBackend.scala @@ -99,7 +99,7 @@ class StandaloneSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Actor // Remove a disconnected slave from the cluster def removeSlave(slaveId: String) { - logInfo("Slave " + slaveId + " disconnected, so removing it") + logWarning("Slave " + slaveId + " disconnected, so removing it") val numCores = freeCores(slaveId) actorToSlaveId -= slaveActor(slaveId) addressToSlaveId -= slaveAddress(slaveId) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 9bf9251519..230d806a89 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -12,7 +12,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { SparkEnv.set(ssc.env) try { val timeTaken = job.run() - logInfo("Total delay: %.5f s for job %s (execution: %.5f s)".format( + println("Total delay: %.5f s for job %s (execution: %.5f s)".format( (System.currentTimeMillis() - job.time) / 1000.0, job.id, timeTaken / 1000.0)) } catch { case e: Exception => From b7ad291ac52896af6cb1d882392f3d6fa0cf3b49 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 07:08:07 +0000 Subject: [PATCH 056/226] Tuning Akka for more connections --- core/src/main/scala/spark/util/AkkaUtils.scala | 1 + .../scala/spark/streaming/examples/WordCount2.scala | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/spark/util/AkkaUtils.scala b/core/src/main/scala/spark/util/AkkaUtils.scala index 57d212e4ca..fd64e224d7 100644 --- a/core/src/main/scala/spark/util/AkkaUtils.scala +++ b/core/src/main/scala/spark/util/AkkaUtils.scala @@ -31,6 +31,7 @@ object AkkaUtils { akka.remote.netty.hostname = "%s" akka.remote.netty.port = %d akka.remote.netty.connection-timeout = 1s + akka.remote.netty.execution-pool-size = 10 """.format(host, port)) val actorSystem = ActorSystem("spark", akkaConf, getClass.getClassLoader) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index aa542ba07d..8561e7f079 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -62,10 +62,10 @@ object WordCount2_ExtraFunctions { object WordCount2 { def warmup(sc: SparkContext) { - (0 until 10).foreach {i => - sc.parallelize(1 to 20000000, 1000) + (0 until 3).foreach {i => + sc.parallelize(1 to 20000000, 500) .map(x => (x % 337, x % 1331)) - .reduceByKey(_ + _) + .reduceByKey(_ + _, 100) .count() } } @@ -84,11 +84,11 @@ object WordCount2 { val ssc = new StreamingContext(master, "WordCount2") ssc.setBatchDuration(batchDuration) - //warmup(ssc.sc) + warmup(ssc.sc) val data = ssc.sc.textFile(file, mapTasks.toInt).persist( new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas - println("Data count: " + data.count()) + println("Data count: " + data.map(x => if (x == "") 1 else x.split(" ").size / x.split(" ").size).count()) println("Data count: " + data.count()) println("Data count: " + data.count()) From 75487b2f5a6abedd322520f759b814ec643aea01 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 08:14:50 +0000 Subject: [PATCH 057/226] Broadcast the JobConf in HadoopRDD to reduce task sizes --- core/src/main/scala/spark/HadoopRDD.scala | 5 +++-- core/src/main/scala/spark/KryoSerializer.scala | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/HadoopRDD.scala b/core/src/main/scala/spark/HadoopRDD.scala index f282a4023b..0befca582d 100644 --- a/core/src/main/scala/spark/HadoopRDD.scala +++ b/core/src/main/scala/spark/HadoopRDD.scala @@ -42,7 +42,8 @@ class HadoopRDD[K, V]( minSplits: Int) extends RDD[(K, V)](sc) { - val serializableConf = new SerializableWritable(conf) + // A Hadoop JobConf can be about 10 KB, which is pretty big, so broadcast it + val confBroadcast = sc.broadcast(new SerializableWritable(conf)) @transient val splits_ : Array[Split] = { @@ -66,7 +67,7 @@ class HadoopRDD[K, V]( val split = theSplit.asInstanceOf[HadoopSplit] var reader: RecordReader[K, V] = null - val conf = serializableConf.value + val conf = confBroadcast.value.value val fmt = createInputFormat(conf) reader = fmt.getRecordReader(split.inputSplit.value, conf, Reporter.NULL) diff --git a/core/src/main/scala/spark/KryoSerializer.scala b/core/src/main/scala/spark/KryoSerializer.scala index 65d0532bd5..3d042b2f11 100644 --- a/core/src/main/scala/spark/KryoSerializer.scala +++ b/core/src/main/scala/spark/KryoSerializer.scala @@ -10,6 +10,7 @@ import scala.collection.mutable import com.esotericsoftware.kryo._ import com.esotericsoftware.kryo.{Serializer => KSerializer} import com.esotericsoftware.kryo.serialize.ClassSerializer +import com.esotericsoftware.kryo.serialize.SerializableSerializer import de.javakaffee.kryoserializers.KryoReflectionFactorySupport import spark.storage._ @@ -203,6 +204,9 @@ class KryoSerializer extends Serializer with Logging { kryo.register(classOf[Class[_]], new ClassSerializer(kryo)) kryo.setRegistrationOptional(true) + // Allow sending SerializableWritable + kryo.register(classOf[SerializableWritable[_]], new SerializableSerializer()) + // Register some commonly used Scala singleton objects. Because these // are singletons, we must return the exact same local object when we // deserialize rather than returning a clone as FieldSerializer would. From efc7668d16b2a58f8d074c1cdaeae4b37dae1c9c Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 08:22:57 +0000 Subject: [PATCH 058/226] Allow serializing HttpBroadcast through Kryo --- core/src/main/scala/spark/KryoSerializer.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/main/scala/spark/KryoSerializer.scala b/core/src/main/scala/spark/KryoSerializer.scala index 3d042b2f11..8a3f565071 100644 --- a/core/src/main/scala/spark/KryoSerializer.scala +++ b/core/src/main/scala/spark/KryoSerializer.scala @@ -13,6 +13,7 @@ import com.esotericsoftware.kryo.serialize.ClassSerializer import com.esotericsoftware.kryo.serialize.SerializableSerializer import de.javakaffee.kryoserializers.KryoReflectionFactorySupport +import spark.broadcast._ import spark.storage._ /** @@ -206,6 +207,7 @@ class KryoSerializer extends Serializer with Logging { // Allow sending SerializableWritable kryo.register(classOf[SerializableWritable[_]], new SerializableSerializer()) + kryo.register(classOf[HttpBroadcast[_]], new SerializableSerializer()) // Register some commonly used Scala singleton objects. Because these // are singletons, we must return the exact same local object when we From 3fa0d7f0c9883ab77e89b7bcf70b7b11df9a4184 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 08:28:15 +0000 Subject: [PATCH 059/226] Serialize BlockRDD more efficiently --- core/src/main/scala/spark/BlockRDD.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/BlockRDD.scala b/core/src/main/scala/spark/BlockRDD.scala index ea009f0f4f..daabc0d566 100644 --- a/core/src/main/scala/spark/BlockRDD.scala +++ b/core/src/main/scala/spark/BlockRDD.scala @@ -7,7 +7,8 @@ class BlockRDDSplit(val blockId: String, idx: Int) extends Split { } -class BlockRDD[T: ClassManifest](sc: SparkContext, blockIds: Array[String]) extends RDD[T](sc) { +class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[String]) + extends RDD[T](sc) { @transient val splits_ = (0 until blockIds.size).map(i => { From 1d6b36d3c3698090b35d8e7c4f88cac410f9ea01 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 22:26:37 +0000 Subject: [PATCH 060/226] Further tuning for network performance --- .../main/scala/spark/storage/BlockMessage.scala | 14 +------------- core/src/main/scala/spark/util/AkkaUtils.scala | 3 ++- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockMessage.scala b/core/src/main/scala/spark/storage/BlockMessage.scala index 0b2ed69e07..607633c6df 100644 --- a/core/src/main/scala/spark/storage/BlockMessage.scala +++ b/core/src/main/scala/spark/storage/BlockMessage.scala @@ -12,7 +12,7 @@ case class GetBlock(id: String) case class GotBlock(id: String, data: ByteBuffer) case class PutBlock(id: String, data: ByteBuffer, level: StorageLevel) -class BlockMessage() extends Logging{ +class BlockMessage() { // Un-initialized: typ = 0 // GetBlock: typ = 1 // GotBlock: typ = 2 @@ -22,8 +22,6 @@ class BlockMessage() extends Logging{ private var data: ByteBuffer = null private var level: StorageLevel = null - initLogging() - def set(getBlock: GetBlock) { typ = BlockMessage.TYPE_GET_BLOCK id = getBlock.id @@ -62,8 +60,6 @@ class BlockMessage() extends Logging{ } id = idBuilder.toString() - logDebug("Set from buffer Result: " + typ + " " + id) - logDebug("Buffer position is " + buffer.position) if (typ == BlockMessage.TYPE_PUT_BLOCK) { val booleanInt = buffer.getInt() @@ -77,23 +73,18 @@ class BlockMessage() extends Logging{ } data.put(buffer) data.flip() - logDebug("Set from buffer Result 2: " + level + " " + data) } else if (typ == BlockMessage.TYPE_GOT_BLOCK) { val dataLength = buffer.getInt() - logDebug("Data length is "+ dataLength) - logDebug("Buffer position is " + buffer.position) data = ByteBuffer.allocate(dataLength) if (dataLength != buffer.remaining) { throw new Exception("Error parsing buffer") } data.put(buffer) data.flip() - logDebug("Set from buffer Result 3: " + data) } val finishTime = System.currentTimeMillis - logDebug("Converted " + id + " from bytebuffer in " + (finishTime - startTime) / 1000.0 + " s") } def set(bufferMsg: BufferMessage) { @@ -145,8 +136,6 @@ class BlockMessage() extends Logging{ buffers += data } - logDebug("Start to log buffers.") - buffers.foreach((x: ByteBuffer) => logDebug("" + x)) /* println() println("BlockMessage: ") @@ -160,7 +149,6 @@ class BlockMessage() extends Logging{ println() */ val finishTime = System.currentTimeMillis - logDebug("Converted " + id + " to buffer message in " + (finishTime - startTime) / 1000.0 + " s") return Message.createBufferMessage(buffers) } diff --git a/core/src/main/scala/spark/util/AkkaUtils.scala b/core/src/main/scala/spark/util/AkkaUtils.scala index fd64e224d7..330bb42e59 100644 --- a/core/src/main/scala/spark/util/AkkaUtils.scala +++ b/core/src/main/scala/spark/util/AkkaUtils.scala @@ -31,7 +31,8 @@ object AkkaUtils { akka.remote.netty.hostname = "%s" akka.remote.netty.port = %d akka.remote.netty.connection-timeout = 1s - akka.remote.netty.execution-pool-size = 10 + akka.remote.netty.execution-pool-size = 4 + akka.actor.default-dispatcher.throughput = 20 """.format(host, port)) val actorSystem = ActorSystem("spark", akkaConf, getClass.getClassLoader) From dc68febdce53efea43ee1ab91c05b14b7a5eae30 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 23:06:59 +0000 Subject: [PATCH 061/226] User Spark's closure serializer for the ShuffleMapTask cache --- core/src/main/scala/spark/scheduler/ShuffleMapTask.scala | 9 ++++----- .../scala/spark/scheduler/cluster/ClusterScheduler.scala | 1 + 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index 73479bff01..f1eae9bc88 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -26,7 +26,8 @@ object ShuffleMapTask { return old } else { val out = new ByteArrayOutputStream - val objOut = new ObjectOutputStream(new GZIPOutputStream(out)) + val ser = SparkEnv.get.closureSerializer.newInstance + val objOut = ser.serializeStream(new GZIPOutputStream(out)) objOut.writeObject(rdd) objOut.writeObject(dep) objOut.close() @@ -45,10 +46,8 @@ object ShuffleMapTask { } else { val loader = Thread.currentThread.getContextClassLoader val in = new GZIPInputStream(new ByteArrayInputStream(bytes)) - val objIn = new ObjectInputStream(in) { - override def resolveClass(desc: ObjectStreamClass) = - Class.forName(desc.getName, false, loader) - } + val ser = SparkEnv.get.closureSerializer.newInstance + val objIn = ser.deserializeStream(in) val rdd = objIn.readObject().asInstanceOf[RDD[_]] val dep = objIn.readObject().asInstanceOf[ShuffleDependency[_,_,_]] val tuple = (rdd, dep) diff --git a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala index 5b59479682..20c82ad0fa 100644 --- a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala +++ b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala @@ -115,6 +115,7 @@ class ClusterScheduler(sc: SparkContext) */ def resourceOffers(offers: Seq[WorkerOffer]): Seq[Seq[TaskDescription]] = { synchronized { + SparkEnv.set(sc.env) // Mark each slave as alive and remember its hostname for (o <- offers) { slaveIdToHost(o.slaveId) = o.hostname From 215544820fe70274d9dce1410f61e2052b8bc406 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 5 Sep 2012 23:54:04 +0000 Subject: [PATCH 062/226] Serialize map output locations more efficiently, and only once, in MapOutputTracker --- .../main/scala/spark/MapOutputTracker.scala | 88 +++++++++++++++++-- 1 file changed, 80 insertions(+), 8 deletions(-) diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index de23eb6f48..cee2391c71 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -1,5 +1,6 @@ package spark +import java.io.{DataInputStream, DataOutputStream, ByteArrayOutputStream, ByteArrayInputStream} import java.util.concurrent.ConcurrentHashMap import akka.actor._ @@ -10,6 +11,7 @@ import akka.util.Duration import akka.util.Timeout import akka.util.duration._ +import scala.collection.mutable.HashMap import scala.collection.mutable.HashSet import spark.storage.BlockManagerId @@ -18,12 +20,11 @@ sealed trait MapOutputTrackerMessage case class GetMapOutputLocations(shuffleId: Int) extends MapOutputTrackerMessage case object StopMapOutputTracker extends MapOutputTrackerMessage -class MapOutputTrackerActor(bmAddresses: ConcurrentHashMap[Int, Array[BlockManagerId]]) -extends Actor with Logging { +class MapOutputTrackerActor(tracker: MapOutputTracker) extends Actor with Logging { def receive = { case GetMapOutputLocations(shuffleId: Int) => logInfo("Asked to get map output locations for shuffle " + shuffleId) - sender ! bmAddresses.get(shuffleId) + sender ! tracker.getSerializedLocations(shuffleId) case StopMapOutputTracker => logInfo("MapOutputTrackerActor stopped!") @@ -39,15 +40,19 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg val timeout = 10.seconds - private var bmAddresses = new ConcurrentHashMap[Int, Array[BlockManagerId]] + var bmAddresses = new ConcurrentHashMap[Int, Array[BlockManagerId]] // Incremented every time a fetch fails so that client nodes know to clear // their cache of map output locations if this happens. private var generation: Long = 0 private var generationLock = new java.lang.Object + // Cache a serialized version of the output locations for each shuffle to send them out faster + var cacheGeneration = generation + val cachedSerializedLocs = new HashMap[Int, Array[Byte]] + var trackerActor: ActorRef = if (isMaster) { - val actor = actorSystem.actorOf(Props(new MapOutputTrackerActor(bmAddresses)), name = actorName) + val actor = actorSystem.actorOf(Props(new MapOutputTrackerActor(this)), name = actorName) logInfo("Registered MapOutputTrackerActor actor") actor } else { @@ -134,15 +139,16 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg } // We won the race to fetch the output locs; do so logInfo("Doing the fetch; tracker actor = " + trackerActor) - val fetched = askTracker(GetMapOutputLocations(shuffleId)).asInstanceOf[Array[BlockManagerId]] + val fetchedBytes = askTracker(GetMapOutputLocations(shuffleId)).asInstanceOf[Array[Byte]] + val fetchedLocs = deserializeLocations(fetchedBytes) logInfo("Got the output locations") - bmAddresses.put(shuffleId, fetched) + bmAddresses.put(shuffleId, fetchedLocs) fetching.synchronized { fetching -= shuffleId fetching.notifyAll() } - return fetched + return fetchedLocs } else { return locs } @@ -181,4 +187,70 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg } } } + + def getSerializedLocations(shuffleId: Int): Array[Byte] = { + var locs: Array[BlockManagerId] = null + var generationGotten: Long = -1 + generationLock.synchronized { + if (generation > cacheGeneration) { + cachedSerializedLocs.clear() + cacheGeneration = generation + } + cachedSerializedLocs.get(shuffleId) match { + case Some(bytes) => + return bytes + case None => + locs = bmAddresses.get(shuffleId) + generationGotten = generation + } + } + // If we got here, we failed to find the serialized locations in the cache, so we pulled + // out a snapshot of the locations as "locs"; let's serialize and return that + val bytes = serializeLocations(locs) + // Add them into the table only if the generation hasn't changed while we were working + generationLock.synchronized { + if (generation == generationGotten) { + cachedSerializedLocs(shuffleId) = bytes + } + } + return bytes + } + + // Serialize an array of map output locations into an efficient byte format so that we can send + // it to reduce tasks. We do this by grouping together the locations by block manager ID. + def serializeLocations(locs: Array[BlockManagerId]): Array[Byte] = { + val out = new ByteArrayOutputStream + val dataOut = new DataOutputStream(out) + dataOut.writeInt(locs.length) + val grouped = locs.zipWithIndex.groupBy(_._1) + dataOut.writeInt(grouped.size) + for ((id, pairs) <- grouped) { + dataOut.writeUTF(id.ip) + dataOut.writeInt(id.port) + dataOut.writeInt(pairs.length) + for ((_, blockIndex) <- pairs) { + dataOut.writeInt(blockIndex) + } + } + dataOut.close() + out.toByteArray + } + + // Opposite of serializeLocations. + def deserializeLocations(bytes: Array[Byte]): Array[BlockManagerId] = { + val dataIn = new DataInputStream(new ByteArrayInputStream(bytes)) + val length = dataIn.readInt() + val array = new Array[BlockManagerId](length) + val numGroups = dataIn.readInt() + for (i <- 0 until numGroups) { + val ip = dataIn.readUTF() + val port = dataIn.readInt() + val id = new BlockManagerId(ip, port) + val numBlocks = dataIn.readInt() + for (j <- 0 until numBlocks) { + array(dataIn.readInt()) = id + } + } + array + } } From 2fa6d999fd92cb7ce828278edcd09eecd1f458c1 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 6 Sep 2012 00:16:39 +0000 Subject: [PATCH 063/226] Tuning Akka more --- core/src/main/scala/spark/util/AkkaUtils.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/util/AkkaUtils.scala b/core/src/main/scala/spark/util/AkkaUtils.scala index 330bb42e59..df4e23bfd6 100644 --- a/core/src/main/scala/spark/util/AkkaUtils.scala +++ b/core/src/main/scala/spark/util/AkkaUtils.scala @@ -31,8 +31,8 @@ object AkkaUtils { akka.remote.netty.hostname = "%s" akka.remote.netty.port = %d akka.remote.netty.connection-timeout = 1s - akka.remote.netty.execution-pool-size = 4 - akka.actor.default-dispatcher.throughput = 20 + akka.remote.netty.execution-pool-size = 8 + akka.actor.default-dispatcher.throughput = 30 """.format(host, port)) val actorSystem = ActorSystem("spark", akkaConf, getClass.getClassLoader) From 9ef90c95f4947e47f7c44f952ff8d294e0932a73 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 6 Sep 2012 00:43:46 +0000 Subject: [PATCH 064/226] Bug fix --- core/src/main/scala/spark/MapOutputTracker.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index cee2391c71..82c1391345 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -224,7 +224,7 @@ class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolean) extends Logg dataOut.writeInt(locs.length) val grouped = locs.zipWithIndex.groupBy(_._1) dataOut.writeInt(grouped.size) - for ((id, pairs) <- grouped) { + for ((id, pairs) <- grouped if id != null) { dataOut.writeUTF(id.ip) dataOut.writeInt(id.port) dataOut.writeInt(pairs.length) From 019de4562c3c68ac36e6ab6a5577f5369336046b Mon Sep 17 00:00:00 2001 From: root Date: Thu, 6 Sep 2012 02:50:41 +0000 Subject: [PATCH 065/226] Less warmup in word count --- .../src/main/scala/spark/streaming/examples/WordCount2.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 8561e7f079..c22949d7b9 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -84,7 +84,7 @@ object WordCount2 { val ssc = new StreamingContext(master, "WordCount2") ssc.setBatchDuration(batchDuration) - warmup(ssc.sc) + //warmup(ssc.sc) val data = ssc.sc.textFile(file, mapTasks.toInt).persist( new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas From babb7e3ce2a5eda793f87b42839cc20d14cb94cf Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 6 Sep 2012 05:28:29 -0700 Subject: [PATCH 066/226] Re-implemented ReducedWindowedDSteam to simplify and fix bugs. Added slice operator to DStream. Also, refactored DStream testsuites and added tests for reduceByKeyAndWindow. --- .../main/scala/spark/streaming/DStream.scala | 20 +- .../main/scala/spark/streaming/Interval.scala | 4 + .../streaming/ReducedWindowedDStream.scala | 221 ++++++------------ .../spark/streaming/DStreamBasicSuite.scala | 67 ++++++ .../scala/spark/streaming/DStreamSuite.scala | 123 ---------- .../spark/streaming/DStreamSuiteBase.scala | 68 ++++++ .../spark/streaming/DStreamWindowSuite.scala | 107 +++++++++ 7 files changed, 334 insertions(+), 276 deletions(-) create mode 100644 streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala delete mode 100644 streaming/src/test/scala/spark/streaming/DStreamSuite.scala create mode 100644 streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala create mode 100644 streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 20f1c4db20..50b9458fae 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -143,7 +143,7 @@ extends Logging with Serializable { /** * This method generates a SparkStreaming job for the given time - * and may require to be overriden by subclasses + * and may required to be overriden by subclasses */ def generateJob(time: Time): Option[Job] = { getOrCompute(time) match { @@ -208,7 +208,7 @@ extends Logging with Serializable { new TransformedDStream(this, ssc.sc.clean(transformFunc)) } - def toQueue = { + def toBlockingQueue = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { queue.add(rdd) @@ -256,6 +256,22 @@ extends Logging with Serializable { def union(that: DStream[T]) = new UnifiedDStream(Array(this, that)) + def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { + + val rdds = new ArrayBuffer[RDD[T]]() + var time = toTime.floor(slideTime) + + while (time >= zeroTime && time >= fromTime) { + getOrCompute(time) match { + case Some(rdd) => rdds += rdd + case None => throw new Exception("Could not get old reduced RDD for time " + time) + } + time -= slideTime + } + + rdds.toSeq + } + def register() { ssc.registerOutputStream(this) } diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index 87b8437b3d..ffb7725ac9 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -9,6 +9,10 @@ case class Interval(beginTime: Time, endTime: Time) { new Interval(beginTime + time, endTime + time) } + def - (time: Time): Interval = { + new Interval(beginTime - time, endTime - time) + } + def < (that: Interval): Boolean = { if (this.duration != that.duration) { throw new Exception("Comparing two intervals with different durations [" + this + ", " + that + "]") diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 191d264b2b..b0beaba94d 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -12,7 +12,7 @@ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( - parent: DStream[(K, V)], + @transient parent: DStream[(K, V)], reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, _windowTime: Time, @@ -28,9 +28,7 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( throw new Exception("The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") - val reducedStream = parent.reduceByKey(reduceFunc, partitioner) - val allowPartialWindows = true - //reducedStream.persist(StorageLevel.MEMORY_ONLY_DESER_2) + @transient val reducedStream = parent.reduceByKey(reduceFunc, partitioner) override def dependencies = List(reducedStream) @@ -44,174 +42,95 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( checkpointInterval: Time): DStream[(K,V)] = { super.persist(storageLevel, checkpointLevel, checkpointInterval) reducedStream.persist(storageLevel, checkpointLevel, checkpointInterval) + this } - - override def compute(validTime: Time): Option[RDD[(K, V)]] = { - - // Notation: + override def compute(validTime: Time): Option[RDD[(K, V)]] = { + + val currentTime = validTime + val currentWindow = Interval(currentTime - windowTime + parent.slideTime, currentTime) + val previousWindow = currentWindow - slideTime + + logDebug("Window time = " + windowTime) + logDebug("Slide time = " + slideTime) + logDebug("ZeroTime = " + zeroTime) + logDebug("Current window = " + currentWindow) + logDebug("Previous window = " + previousWindow) + // _____________________________ - // | previous window _________|___________________ - // |___________________| current window | --------------> Time + // | previous window _________|___________________ + // |___________________| current window | --------------> Time // |_____________________________| - // + // // |________ _________| |________ _________| // | | // V V - // old time steps new time steps + // old RDDs new RDDs // - def getAdjustedWindow(endTime: Time, windowTime: Time): Interval = { - val beginTime = - if (allowPartialWindows && endTime - windowTime < parent.zeroTime) { - parent.zeroTime - } else { - endTime - windowTime - } - Interval(beginTime, endTime) - } - - val currentTime = validTime - val currentWindow = getAdjustedWindow(currentTime, windowTime) - val previousWindow = getAdjustedWindow(currentTime - slideTime, windowTime) - - logInfo("Current window = " + currentWindow) - logInfo("Slide time = " + slideTime) - logInfo("Previous window = " + previousWindow) - logInfo("Parent.zeroTime = " + parent.zeroTime) - if (allowPartialWindows) { - if (currentTime - slideTime <= parent.zeroTime) { - reducedStream.getOrCompute(currentTime) match { - case Some(rdd) => return Some(rdd) - case None => throw new Exception("Could not get first reduced RDD for time " + currentTime) - } - } - } else { - if (previousWindow.beginTime < parent.zeroTime) { - if (currentWindow.beginTime < parent.zeroTime) { - return None - } else { - // If this is the first feasible window, then generate reduced value in the naive manner - val reducedRDDs = new ArrayBuffer[RDD[(K, V)]]() - var t = currentWindow.endTime - while (t > currentWindow.beginTime) { - reducedStream.getOrCompute(t) match { - case Some(rdd) => reducedRDDs += rdd - case None => throw new Exception("Could not get reduced RDD for time " + t) - } - t -= reducedStream.slideTime - } - if (reducedRDDs.size == 0) { - throw new Exception("Could not generate the first RDD for time " + validTime) - } - return Some(new UnionRDD(ssc.sc, reducedRDDs).reduceByKey(partitioner, reduceFunc)) - } - } - } - - // Get the RDD of the reduced value of the previous window - val previousWindowRDD = getOrCompute(previousWindow.endTime) match { - case Some(rdd) => rdd.asInstanceOf[RDD[(_, _)]] - case None => throw new Exception("Could not get previous RDD for time " + previousWindow.endTime) - } - - val oldRDDs = new ArrayBuffer[RDD[(_, _)]]() - val newRDDs = new ArrayBuffer[RDD[(_, _)]]() - // Get the RDDs of the reduced values in "old time steps" - var t = currentWindow.beginTime - while (t > previousWindow.beginTime) { - reducedStream.getOrCompute(t) match { - case Some(rdd) => oldRDDs += rdd.asInstanceOf[RDD[(_, _)]] - case None => throw new Exception("Could not get old reduced RDD for time " + t) - } - t -= reducedStream.slideTime - } + val oldRDDs = reducedStream.slice(previousWindow.beginTime, currentWindow.beginTime - parent.slideTime) + logDebug("# old RDDs = " + oldRDDs.size) // Get the RDDs of the reduced values in "new time steps" - t = currentWindow.endTime - while (t > previousWindow.endTime) { - reducedStream.getOrCompute(t) match { - case Some(rdd) => newRDDs += rdd.asInstanceOf[RDD[(_, _)]] - case None => throw new Exception("Could not get new reduced RDD for time " + t) - } - t -= reducedStream.slideTime + val newRDDs = reducedStream.slice(previousWindow.endTime + parent.slideTime, currentWindow.endTime) + logDebug("# new RDDs = " + newRDDs.size) + + // Get the RDD of the reduced value of the previous window + val previousWindowRDD = getOrCompute(previousWindow.endTime).getOrElse(ssc.sc.makeRDD(Seq[(K,V)]())) + + // Make the list of RDDs that needs to cogrouped together for reducing their reduced values + val allRDDs = new ArrayBuffer[RDD[(K, V)]]() += previousWindowRDD ++= oldRDDs ++= newRDDs + + // Cogroup the reduced RDDs and merge the reduced values + val cogroupedRDD = new CoGroupedRDD[K](allRDDs.toSeq.asInstanceOf[Seq[RDD[(_, _)]]], partitioner) + val mergeValuesFunc = mergeValues(oldRDDs.size, newRDDs.size) _ + val mergedValuesRDD = cogroupedRDD.asInstanceOf[RDD[(K,Seq[Seq[V]])]].mapValues(mergeValuesFunc) + + Some(mergedValuesRDD) + } + + def mergeValues(numOldValues: Int, numNewValues: Int)(seqOfValues: Seq[Seq[V]]): V = { + + if (seqOfValues.size != 1 + numOldValues + numNewValues) { + throw new Exception("Unexpected number of sequences of reduced values") } - val allRDDs = new ArrayBuffer[RDD[(_, _)]]() - allRDDs += previousWindowRDD - allRDDs ++= oldRDDs - allRDDs ++= newRDDs - + // Getting reduced values "old time steps" that will be removed from current window + val oldValues = (1 to numOldValues).map(i => seqOfValues(i)).filter(!_.isEmpty).map(_.head) - val numOldRDDs = oldRDDs.size - val numNewRDDs = newRDDs.size - logInfo("Generated numOldRDDs = " + numOldRDDs + ", numNewRDDs = " + numNewRDDs) - logInfo("Generating CoGroupedRDD with " + allRDDs.size + " RDDs") - val newRDD = new CoGroupedRDD[K](allRDDs.toSeq, partitioner).asInstanceOf[RDD[(K,Seq[Seq[V]])]].map(x => { - val (key, value) = x - logDebug("value.size = " + value.size + ", numOldRDDs = " + numOldRDDs + ", numNewRDDs = " + numNewRDDs) - if (value.size != 1 + numOldRDDs + numNewRDDs) { - throw new Exception("Number of groups not odd!") + // Getting reduced values "new time steps" + val newValues = (1 to numNewValues).map(i => seqOfValues(numOldValues + i)).filter(!_.isEmpty).map(_.head) + + if (seqOfValues(0).isEmpty) { + + // If previous window's reduce value does not exist, then at least new values should exist + if (newValues.isEmpty) { + throw new Exception("Neither previous window has value for key, nor new values found") } - // old values = reduced values of the "old time steps" that are eliminated from current window - // new values = reduced values of the "new time steps" that are introduced to the current window - // previous value = reduced value of the previous window + // Reduce the new values + // println("new values = " + newValues.map(_.toString).reduce(_ + " " + _)) + return newValues.reduce(reduceFunc) + } else { - /*val numOldValues = (value.size - 1) / 2*/ - // Getting reduced values "old time steps" - val oldValues = - (0 until numOldRDDs).map(i => value(1 + i)).filter(_.size > 0).map(x => x(0)) - // Getting reduced values "new time steps" - val newValues = - (0 until numNewRDDs).map(i => value(1 + numOldRDDs + i)).filter(_.size > 0).map(x => x(0)) - - // If reduced value for the key does not exist in previous window, it should not exist in "old time steps" - if (value(0).size == 0 && oldValues.size != 0) { - throw new Exception("Unexpected: Key exists in old reduced values but not in previous reduced values") + // Get the previous window's reduced value + var tempValue = seqOfValues(0).head + + // If old values exists, then inverse reduce then from previous value + if (!oldValues.isEmpty) { + // println("old values = " + oldValues.map(_.toString).reduce(_ + " " + _)) + tempValue = invReduceFunc(tempValue, oldValues.reduce(reduceFunc)) } - // For the key, at least one of "old time steps", "new time steps" and previous window should have reduced values - if (value(0).size == 0 && oldValues.size == 0 && newValues.size == 0) { - throw new Exception("Unexpected: Key does not exist in any of old, new, or previour reduced values") + // If new values exists, then reduce them with previous value + if (!newValues.isEmpty) { + // println("new values = " + newValues.map(_.toString).reduce(_ + " " + _)) + tempValue = reduceFunc(tempValue, newValues.reduce(reduceFunc)) } - - // Logic to generate the final reduced value for current window: - // - // If previous window did not have reduced value for the key - // Then, return reduced value of "new time steps" as the final value - // Else, reduced value exists in previous window - // If "old" time steps did not have reduced value for the key - // Then, reduce previous window's reduced value with that of "new time steps" for final value - // Else, reduced values exists in "old time steps" - // If "new values" did not have reduced value for the key - // Then, inverse-reduce "old values" from previous window's reduced value for final value - // Else, all 3 values exist, combine all of them together - // - logDebug("# old values = " + oldValues.size + ", # new values = " + newValues) - val finalValue = { - if (value(0).size == 0) { - newValues.reduce(reduceFunc) - } else { - val prevValue = value(0)(0) - logDebug("prev value = " + prevValue) - if (oldValues.size == 0) { - // assuming newValue.size > 0 (all 3 cannot be zero, as checked earlier) - val temp = newValues.reduce(reduceFunc) - reduceFunc(prevValue, temp) - } else if (newValues.size == 0) { - invReduceFunc(prevValue, oldValues.reduce(reduceFunc)) - } else { - val tempValue = invReduceFunc(prevValue, oldValues.reduce(reduceFunc)) - reduceFunc(tempValue, newValues.reduce(reduceFunc)) - } - } - } - (key, finalValue) - }) - //newRDD.persist(StorageLevel.MEMORY_ONLY_DESER_2) - Some(newRDD) + // println("final value = " + tempValue) + return tempValue + } } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala new file mode 100644 index 0000000000..2634c9b405 --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -0,0 +1,67 @@ +package spark.streaming + +import spark.streaming.StreamingContext._ +import scala.runtime.RichInt + +class DStreamBasicSuite extends DStreamSuiteBase { + + test("map-like operations") { + val input = Seq(1 to 4, 5 to 8, 9 to 12) + + // map + testOperation(input, (r: DStream[Int]) => r.map(_.toString), input.map(_.map(_.toString))) + + // flatMap + testOperation( + input, + (r: DStream[Int]) => r.flatMap(x => Seq(x, x * 2)), + input.map(_.flatMap(x => Array(x, x * 2))) + ) + } + + test("shuffle-based operations") { + // reduceByKey + testOperation( + Seq(Seq("a", "a", "b"), Seq("", ""), Seq()), + (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), + Seq(Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq()), + true + ) + + // reduce + testOperation( + Seq(1 to 4, 5 to 8, 9 to 12), + (s: DStream[Int]) => s.reduce(_ + _), + Seq(Seq(10), Seq(26), Seq(42)) + ) + } + + test("stateful operations") { + val inputData = + Seq( + Seq("a", "b", "c"), + Seq("a", "b", "c"), + Seq("a", "b", "c") + ) + + val outputData = + Seq( + Seq(("a", 1), ("b", 1), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 2)), + Seq(("a", 3), ("b", 3), ("c", 3)) + ) + + val updateStateOp = (s: DStream[String]) => { + val updateFunc = (values: Seq[Int], state: RichInt) => { + var newState = 0 + if (values != null) newState += values.reduce(_ + _) + if (state != null) newState += state.self + println("values = " + values + ", state = " + state + ", " + " new state = " + newState) + new RichInt(newState) + } + s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) + } + + testOperation(inputData, updateStateOp, outputData, true) + } +} diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamSuite.scala deleted file mode 100644 index fc00952afe..0000000000 --- a/streaming/src/test/scala/spark/streaming/DStreamSuite.scala +++ /dev/null @@ -1,123 +0,0 @@ -package spark.streaming - -import spark.Logging -import spark.streaming.StreamingContext._ -import spark.streaming.util.ManualClock - -import org.scalatest.FunSuite -import org.scalatest.BeforeAndAfter - -import scala.collection.mutable.ArrayBuffer -import scala.runtime.RichInt - -class DStreamSuite extends FunSuite with BeforeAndAfter with Logging { - - var ssc: StreamingContext = null - val batchDurationMillis = 1000 - - System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - - def testOp[U: ClassManifest, V: ClassManifest]( - input: Seq[Seq[U]], - operation: DStream[U] => DStream[V], - expectedOutput: Seq[Seq[V]], - useSet: Boolean = false - ) { - try { - ssc = new StreamingContext("local", "test") - ssc.setBatchDuration(Milliseconds(batchDurationMillis)) - - val inputStream = ssc.createQueueStream(input.map(ssc.sc.makeRDD(_, 2)).toIterator) - val outputStream = operation(inputStream) - val outputQueue = outputStream.toQueue - - ssc.start() - val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] - clock.addToTime(input.size * batchDurationMillis) - - Thread.sleep(1000) - - val output = new ArrayBuffer[Seq[V]]() - while(outputQueue.size > 0) { - val rdd = outputQueue.take() - output += (rdd.collect()) - } - - assert(output.size === expectedOutput.size) - for (i <- 0 until output.size) { - if (useSet) { - assert(output(i).toSet === expectedOutput(i).toSet) - } else { - assert(output(i).toList === expectedOutput(i).toList) - } - } - } finally { - ssc.stop() - } - } - - test("map-like operations") { - val inputData = Seq(1 to 4, 5 to 8, 9 to 12) - - // map - testOp(inputData, (r: DStream[Int]) => r.map(_.toString), inputData.map(_.map(_.toString))) - - // flatMap - testOp( - inputData, - (r: DStream[Int]) => r.flatMap(x => Seq(x, x * 2)), - inputData.map(_.flatMap(x => Array(x, x * 2))) - ) - } - - test("shuffle-based operations") { - // reduceByKey - testOp( - Seq(Seq("a", "a", "b"), Seq("", ""), Seq()), - (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), - Seq(Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq()), - true - ) - - // reduce - testOp( - Seq(1 to 4, 5 to 8, 9 to 12), - (s: DStream[Int]) => s.reduce(_ + _), - Seq(Seq(10), Seq(26), Seq(42)) - ) - } - - test("window-based operations") { - - } - - - test("stateful operations") { - val inputData = - Seq( - Seq("a", "b", "c"), - Seq("a", "b", "c"), - Seq("a", "b", "c") - ) - - val outputData = - Seq( - Seq(("a", 1), ("b", 1), ("c", 1)), - Seq(("a", 2), ("b", 2), ("c", 2)), - Seq(("a", 3), ("b", 3), ("c", 3)) - ) - - val updateStateOp = (s: DStream[String]) => { - val updateFunc = (values: Seq[Int], state: RichInt) => { - var newState = 0 - if (values != null) newState += values.reduce(_ + _) - if (state != null) newState += state.self - println("values = " + values + ", state = " + state + ", " + " new state = " + newState) - new RichInt(newState) - } - s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) - } - - testOp(inputData, updateStateOp, outputData, true) - } -} diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala new file mode 100644 index 0000000000..1c4ea14b1d --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -0,0 +1,68 @@ +package spark.streaming + +import spark.{RDD, Logging} +import util.ManualClock +import collection.mutable.ArrayBuffer +import org.scalatest.FunSuite +import scala.collection.mutable.Queue + + +trait DStreamSuiteBase extends FunSuite with Logging { + + def batchDuration() = Seconds(1) + + def maxWaitTimeMillis() = 10000 + + def testOperation[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V], + expectedOutput: Seq[Seq[V]], + useSet: Boolean = false + ) { + + val manualClock = true + + if (manualClock) { + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + } + + val ssc = new StreamingContext("local", "test") + + try { + ssc.setBatchDuration(Milliseconds(batchDuration)) + + val inputQueue = new Queue[RDD[U]]() + inputQueue ++= input.map(ssc.sc.makeRDD(_, 2)) + val emptyRDD = ssc.sc.makeRDD(Seq[U](), 2) + + val inputStream = ssc.createQueueStream(inputQueue, true, emptyRDD) + val outputStream = operation(inputStream) + + val output = new ArrayBuffer[Seq[V]]() + outputStream.foreachRDD(rdd => output += rdd.collect()) + + ssc.start() + + val clock = ssc.scheduler.clock + if (clock.isInstanceOf[ManualClock]) { + clock.asInstanceOf[ManualClock].addToTime(input.size * batchDuration.milliseconds) + } + + val startTime = System.currentTimeMillis() + while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + Thread.sleep(500) + } + + assert(output.size === expectedOutput.size) + for (i <- 0 until output.size) { + if (useSet) { + assert(output(i).toSet === expectedOutput(i).toSet) + } else { + assert(output(i).toList === expectedOutput(i).toList) + } + } + } finally { + ssc.stop() + } + } +} diff --git a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala new file mode 100644 index 0000000000..c0e054418c --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala @@ -0,0 +1,107 @@ +package spark.streaming + +import spark.streaming.StreamingContext._ + +class DStreamWindowSuite extends DStreamSuiteBase { + + def testReduceByKeyAndWindow( + name: String, + input: Seq[Seq[(String, Int)]], + expectedOutput: Seq[Seq[(String, Int)]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("reduceByKeyAndWindow - " + name) { + testOperation( + input, + (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist(), + expectedOutput, + true + ) + } + } + + testReduceByKeyAndWindow( + "basic reduction", + Seq(Seq(("a", 1), ("a", 3)) ), + Seq(Seq(("a", 4)) ) + ) + + testReduceByKeyAndWindow( + "key already in window and new value added into window", + Seq( Seq(("a", 1)), Seq(("a", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2)) ) + ) + + testReduceByKeyAndWindow( + "new key added into window", + Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)) ) + ) + + testReduceByKeyAndWindow( + "key removed from window", + Seq( Seq(("a", 1)), Seq(("a", 1)), Seq(), Seq() ), + Seq( Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 1)), Seq(("a", 0)) ) + ) + + val largerSlideInput = Seq( + Seq(("a", 1)), // 1st window from here + Seq(("a", 2)), + Seq(("a", 3)), // 2nd window from here + Seq(("a", 4)), + Seq(("a", 5)), // 3rd window from here + Seq(("a", 6)), + Seq(), // 4th window from here + Seq(), + Seq() // 5th window from here + ) + + val largerSlideOutput = Seq( + Seq(("a", 1)), + Seq(("a", 6)), + Seq(("a", 14)), + Seq(("a", 15)), + Seq(("a", 6)) + ) + + testReduceByKeyAndWindow( + "larger slide time", + largerSlideInput, + largerSlideOutput, + Seconds(4), + Seconds(2) + ) + + val bigInput = Seq( + Seq(("a", 1)), + Seq(("a", 1), ("b", 1)), + Seq(("a", 1), ("b", 1), ("c", 1)), + Seq(("a", 1), ("b", 1)), + Seq(("a", 1)), + Seq(), + Seq(("a", 1)), + Seq(("a", 1), ("b", 1)), + Seq(("a", 1), ("b", 1), ("c", 1)), + Seq(("a", 1), ("b", 1)), + Seq(("a", 1)), + Seq() + ) + + val bigOutput = Seq( + Seq(("a", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 1), ("c", 0)), + Seq(("a", 1), ("b", 0), ("c", 0)), + Seq(("a", 1), ("b", 0), ("c", 0)), + Seq(("a", 2), ("b", 1), ("c", 0)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 1), ("c", 0)), + Seq(("a", 1), ("b", 0), ("c", 0)) + ) + + testReduceByKeyAndWindow("big test", bigInput, bigOutput) +} From 4a7bde6865cf22af060f20a9619c516b811c80f2 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 6 Sep 2012 19:06:59 -0700 Subject: [PATCH 067/226] Fixed bugs and added testcases for naive reduceByKeyAndWindow. --- .../main/scala/spark/streaming/DStream.scala | 6 + .../scala/spark/streaming/Scheduler.scala | 2 +- .../spark/streaming/WindowedDStream.scala | 38 +--- .../spark/streaming/DStreamBasicSuite.scala | 2 +- .../spark/streaming/DStreamWindowSuite.scala | 179 +++++++++++++----- 5 files changed, 140 insertions(+), 87 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 50b9458fae..3973ca1520 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -256,11 +256,17 @@ extends Logging with Serializable { def union(that: DStream[T]) = new UnifiedDStream(Array(this, that)) + def slice(interval: Interval): Seq[RDD[T]] = { + slice(interval.beginTime, interval.endTime) + } + + // Get all the RDDs between fromTime to toTime (both included) def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() var time = toTime.floor(slideTime) + while (time >= zeroTime && time >= fromTime) { getOrCompute(time) match { case Some(rdd) => rdds += rdd diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 12e52bf56c..00136685d5 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -40,7 +40,7 @@ extends Logging { } def generateRDDs (time: Time) { - println("\n-----------------------------------------------------\n") + logInfo("\n-----------------------------------------------------\n") logInfo("Generating RDDs for time " + time) outputStreams.foreach(outputStream => { outputStream.generateJob(time) match { diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 6c791fcfc1..93c1291691 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -1,12 +1,8 @@ package spark.streaming -import spark.streaming.StreamingContext._ - import spark.RDD import spark.UnionRDD -import spark.SparkContext._ -import scala.collection.mutable.ArrayBuffer class WindowedDStream[T: ClassManifest]( parent: DStream[T], @@ -22,8 +18,6 @@ class WindowedDStream[T: ClassManifest]( throw new Exception("The slide duration of WindowedDStream (" + _slideTime + ") " + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") - val allowPartialWindows = true - override def dependencies = List(parent) def windowTime: Time = _windowTime @@ -31,36 +25,8 @@ class WindowedDStream[T: ClassManifest]( override def slideTime: Time = _slideTime override def compute(validTime: Time): Option[RDD[T]] = { - val parentRDDs = new ArrayBuffer[RDD[T]]() - val windowEndTime = validTime - val windowStartTime = if (allowPartialWindows && windowEndTime - windowTime < parent.zeroTime) { - parent.zeroTime - } else { - windowEndTime - windowTime - } - - logInfo("Window = " + windowStartTime + " - " + windowEndTime) - logInfo("Parent.zeroTime = " + parent.zeroTime) - - if (windowStartTime >= parent.zeroTime) { - // Walk back through time, from the 'windowEndTime' to 'windowStartTime' - // and get all parent RDDs from the parent DStream - var t = windowEndTime - while (t > windowStartTime) { - parent.getOrCompute(t) match { - case Some(rdd) => parentRDDs += rdd - case None => throw new Exception("Could not generate parent RDD for time " + t) - } - t -= parent.slideTime - } - } - - // Do a union of all parent RDDs to generate the new RDD - if (parentRDDs.size > 0) { - Some(new UnionRDD(ssc.sc, parentRDDs)) - } else { - None - } + val currentWindow = Interval(validTime - windowTime + parent.slideTime, validTime) + Some(new UnionRDD(ssc.sc, parent.slice(currentWindow))) } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index 2634c9b405..9b953d9dae 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -56,7 +56,7 @@ class DStreamBasicSuite extends DStreamSuiteBase { var newState = 0 if (values != null) newState += values.reduce(_ + _) if (state != null) newState += state.self - println("values = " + values + ", state = " + state + ", " + " new state = " + newState) + //println("values = " + values + ", state = " + state + ", " + " new state = " + newState) new RichInt(newState) } s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) diff --git a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala index c0e054418c..061cab2cbb 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala @@ -4,47 +4,6 @@ import spark.streaming.StreamingContext._ class DStreamWindowSuite extends DStreamSuiteBase { - def testReduceByKeyAndWindow( - name: String, - input: Seq[Seq[(String, Int)]], - expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) - ) { - test("reduceByKeyAndWindow - " + name) { - testOperation( - input, - (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist(), - expectedOutput, - true - ) - } - } - - testReduceByKeyAndWindow( - "basic reduction", - Seq(Seq(("a", 1), ("a", 3)) ), - Seq(Seq(("a", 4)) ) - ) - - testReduceByKeyAndWindow( - "key already in window and new value added into window", - Seq( Seq(("a", 1)), Seq(("a", 1)) ), - Seq( Seq(("a", 1)), Seq(("a", 2)) ) - ) - - testReduceByKeyAndWindow( - "new key added into window", - Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)) ), - Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)) ) - ) - - testReduceByKeyAndWindow( - "key removed from window", - Seq( Seq(("a", 1)), Seq(("a", 1)), Seq(), Seq() ), - Seq( Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 1)), Seq(("a", 0)) ) - ) - val largerSlideInput = Seq( Seq(("a", 1)), // 1st window from here Seq(("a", 2)), @@ -65,14 +24,6 @@ class DStreamWindowSuite extends DStreamSuiteBase { Seq(("a", 6)) ) - testReduceByKeyAndWindow( - "larger slide time", - largerSlideInput, - largerSlideOutput, - Seconds(4), - Seconds(2) - ) - val bigInput = Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)), @@ -89,6 +40,29 @@ class DStreamWindowSuite extends DStreamSuiteBase { ) val bigOutput = Seq( + Seq(("a", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 1)), + Seq(("a", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 2), ("c", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 1)) + ) + + /* + The output of the reduceByKeyAndWindow with inverse reduce function is + difference from the naive reduceByKeyAndWindow. Even if the count of a + particular key is 0, the key does not get eliminated from the RDDs of + ReducedWindowedDStream. This causes the number of keys in these RDDs to + increase forever. A more generalized version that allows elimination of + keys should be considered. + */ + val bigOutputInv = Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)), Seq(("a", 2), ("b", 2), ("c", 1)), @@ -103,5 +77,112 @@ class DStreamWindowSuite extends DStreamSuiteBase { Seq(("a", 1), ("b", 0), ("c", 0)) ) + def testReduceByKeyAndWindow( + name: String, + input: Seq[Seq[(String, Int)]], + expectedOutput: Seq[Seq[(String, Int)]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("reduceByKeyAndWindow - " + name) { + testOperation( + input, + (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist(), + expectedOutput, + true + ) + } + } + + def testReduceByKeyAndWindowInv( + name: String, + input: Seq[Seq[(String, Int)]], + expectedOutput: Seq[Seq[(String, Int)]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("reduceByKeyAndWindowInv - " + name) { + testOperation( + input, + (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist(), + expectedOutput, + true + ) + } + } + + + // Testing naive reduceByKeyAndWindow (without invertible function) + + testReduceByKeyAndWindow( + "basic reduction", + Seq(Seq(("a", 1), ("a", 3)) ), + Seq(Seq(("a", 4)) ) + ) + + testReduceByKeyAndWindow( + "key already in window and new value added into window", + Seq( Seq(("a", 1)), Seq(("a", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2)) ) + ) + + + testReduceByKeyAndWindow( + "new key added into window", + Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)) ) + ) + + testReduceByKeyAndWindow( + "key removed from window", + Seq( Seq(("a", 1)), Seq(("a", 1)), Seq(), Seq() ), + Seq( Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 1)), Seq() ) + ) + + testReduceByKeyAndWindow( + "larger slide time", + largerSlideInput, + largerSlideOutput, + Seconds(4), + Seconds(2) + ) + testReduceByKeyAndWindow("big test", bigInput, bigOutput) + + + // Testing reduceByKeyAndWindow (with invertible reduce function) + + testReduceByKeyAndWindowInv( + "basic reduction", + Seq(Seq(("a", 1), ("a", 3)) ), + Seq(Seq(("a", 4)) ) + ) + + testReduceByKeyAndWindowInv( + "key already in window and new value added into window", + Seq( Seq(("a", 1)), Seq(("a", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2)) ) + ) + + testReduceByKeyAndWindowInv( + "new key added into window", + Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)) ), + Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)) ) + ) + + testReduceByKeyAndWindowInv( + "key removed from window", + Seq( Seq(("a", 1)), Seq(("a", 1)), Seq(), Seq() ), + Seq( Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 1)), Seq(("a", 0)) ) + ) + + testReduceByKeyAndWindowInv( + "larger slide time", + largerSlideInput, + largerSlideOutput, + Seconds(4), + Seconds(2) + ) + + testReduceByKeyAndWindowInv("big test", bigInput, bigOutputInv) } From db08a362aae68682f9105f9e5568bc9b9d9faaab Mon Sep 17 00:00:00 2001 From: haoyuan Date: Fri, 7 Sep 2012 02:17:52 +0000 Subject: [PATCH 068/226] commit opt for grep scalibility test. --- .../scala/spark/storage/BlockManager.scala | 7 +++- .../spark/streaming/NetworkInputTracker.scala | 40 ++++++++++--------- .../spark/streaming/RawInputDStream.scala | 17 ++++---- .../streaming/examples/WordCountRaw.scala | 19 ++++++--- 4 files changed, 51 insertions(+), 32 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index f2d9499bad..4cdb9710ec 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -509,10 +509,15 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m * Replicate block to another node. */ + var firstTime = true + var peers : Seq[BlockManagerId] = null private def replicate(blockId: String, data: ByteBuffer, level: StorageLevel) { val tLevel: StorageLevel = new StorageLevel(level.useDisk, level.useMemory, level.deserialized, 1) - var peers = master.mustGetPeers(GetPeers(blockManagerId, level.replication - 1)) + if (firstTime) { + peers = master.mustGetPeers(GetPeers(blockManagerId, level.replication - 1)) + firstTime = false; + } for (peer: BlockManagerId <- peers) { val start = System.nanoTime data.rewind() diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index acf97c1883..9f9001e4d5 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -4,6 +4,7 @@ import spark.Logging import spark.SparkEnv import scala.collection.mutable.HashMap +import scala.collection.mutable.Queue import akka.actor._ import akka.pattern.ask @@ -28,6 +29,17 @@ extends Logging { logInfo("Registered receiver for network stream " + streamId) sender ! true } + case GotBlockIds(streamId, blockIds) => { + val tmp = receivedBlockIds.synchronized { + if (!receivedBlockIds.contains(streamId)) { + receivedBlockIds += ((streamId, new Queue[String])) + } + receivedBlockIds(streamId) + } + tmp.synchronized { + tmp ++= blockIds + } + } } } @@ -69,8 +81,8 @@ extends Logging { val networkInputStreamIds = networkInputStreams.map(_.id).toArray val receiverExecutor = new ReceiverExecutor() val receiverInfo = new HashMap[Int, ActorRef] - val receivedBlockIds = new HashMap[Int, Array[String]] - val timeout = 1000.milliseconds + val receivedBlockIds = new HashMap[Int, Queue[String]] + val timeout = 5000.milliseconds var currentTime: Time = null @@ -86,22 +98,12 @@ extends Logging { } def getBlockIds(receiverId: Int, time: Time): Array[String] = synchronized { - if (currentTime == null || time > currentTime) { - logInfo("Getting block ids from receivers for " + time) - implicit val ec = ssc.env.actorSystem.dispatcher - receivedBlockIds.clear() - val message = new GetBlockIds(time) - val listOfFutures = receiverInfo.values.map( - _.ask(message)(timeout).mapTo[GotBlockIds] - ).toList - val futureOfList = Future.sequence(listOfFutures) - val allBlockIds = Await.result(futureOfList, timeout) - receivedBlockIds ++= allBlockIds.map(x => (x.streamId, x.blocksIds)) - if (receivedBlockIds.size != receiverInfo.size) { - throw new Exception("Unexpected number of the Block IDs received") - } - currentTime = time + val queue = receivedBlockIds.synchronized { + receivedBlockIds.getOrElse(receiverId, new Queue[String]()) } - receivedBlockIds.getOrElse(receiverId, Array[String]()) + val result = queue.synchronized { + queue.dequeueAll(x => true) + } + result.toArray } -} \ No newline at end of file +} diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index d59c245a23..d29aea7886 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -86,14 +86,15 @@ class RawInputDStream[T: ClassManifest]( private class ReceiverActor(env: SparkEnv, receivingThread: Thread) extends Actor { val newBlocks = new ArrayBuffer[String] + logInfo("Attempting to register with tracker") + val ip = System.getProperty("spark.master.host", "localhost") + val port = System.getProperty("spark.master.port", "7077").toInt + val actorName: String = "NetworkInputTracker" + val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) + val trackerActor = env.actorSystem.actorFor(url) + val timeout = 5.seconds + override def preStart() { - logInfo("Attempting to register with tracker") - val ip = System.getProperty("spark.master.host", "localhost") - val port = System.getProperty("spark.master.port", "7077").toInt - val actorName: String = "NetworkInputTracker" - val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) - val trackerActor = env.actorSystem.actorFor(url) - val timeout = 1.seconds val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) Await.result(future, timeout) } @@ -101,6 +102,7 @@ class RawInputDStream[T: ClassManifest]( override def receive = { case BlockPublished(blockId) => newBlocks += blockId + val future = trackerActor ! GotBlockIds(streamId, Array(blockId)) case GetBlockIds(time) => logInfo("Got request for block IDs for " + time) @@ -111,5 +113,6 @@ class RawInputDStream[T: ClassManifest]( receivingThread.interrupt() sender ! true } + } } diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index 298d9ef381..9702003805 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -1,11 +1,24 @@ package spark.streaming.examples import spark.util.IntParam +import spark.SparkContext +import spark.SparkContext._ import spark.storage.StorageLevel import spark.streaming._ import spark.streaming.StreamingContext._ +import WordCount2_ExtraFunctions._ + object WordCountRaw { + def moreWarmup(sc: SparkContext) { + (0 until 40).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(_ % 1331).map(_.toString) + .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + .collect() + } + } + def main(args: Array[String]) { if (args.length != 7) { System.err.println("Usage: WordCountRaw ") @@ -20,16 +33,12 @@ object WordCountRaw { ssc.setBatchDuration(Milliseconds(batchMs)) // Make sure some tasks have started on each node - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() + moreWarmup(ssc.sc) val rawStreams = (1 to streams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnifiedDStream(rawStreams) - import WordCount2_ExtraFunctions._ - val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, From 381e2c7ac4cba952de2bf8b0090ff0799829cf30 Mon Sep 17 00:00:00 2001 From: haoyuan Date: Thu, 6 Sep 2012 20:54:52 -0700 Subject: [PATCH 069/226] add warmup code for TopKWordCountRaw.scala --- .../streaming/examples/TopKWordCountRaw.scala | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index be3188c5ed..3ba07d0448 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -1,11 +1,24 @@ package spark.streaming.examples import spark.util.IntParam +import spark.SparkContext +import spark.SparkContext._ import spark.storage.StorageLevel import spark.streaming._ import spark.streaming.StreamingContext._ +import WordCount2_ExtraFunctions._ + object TopKWordCountRaw { + def moreWarmup(sc: SparkContext) { + (0 until 40).foreach {i => + sc.parallelize(1 to 20000000, 1000) + .map(_ % 1331).map(_.toString) + .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + .collect() + } + } + def main(args: Array[String]) { if (args.length != 7) { System.err.println("Usage: TopKWordCountRaw ") @@ -20,16 +33,12 @@ object TopKWordCountRaw { ssc.setBatchDuration(Milliseconds(batchMs)) // Make sure some tasks have started on each node - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() + moreWarmup(ssc.sc) val rawStreams = (1 to streams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnifiedDStream(rawStreams) - import WordCount2_ExtraFunctions._ - val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) windowedCounts.persist(StorageLevel.MEMORY_ONLY_DESER, StorageLevel.MEMORY_ONLY_DESER_2, From b5750726ff3306e0ea5741141f6cae0eb3449902 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 7 Sep 2012 20:16:21 +0000 Subject: [PATCH 070/226] Fixed bugs in streaming Scheduler and optimized QueueInputDStream. --- core/src/main/scala/spark/RDD.scala | 2 +- .../main/scala/spark/streaming/QueueInputDStream.scala | 8 ++++++-- streaming/src/main/scala/spark/streaming/Scheduler.scala | 2 +- .../src/main/scala/spark/streaming/StateDStream.scala | 2 +- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 3fe8e8a4bf..d28f3593fe 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -94,7 +94,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial def getStorageLevel = storageLevel - def checkpoint(level: StorageLevel = StorageLevel.DISK_AND_MEMORY_DESER): RDD[T] = { + def checkpoint(level: StorageLevel = StorageLevel.DISK_AND_MEMORY_DESER_2): RDD[T] = { if (!level.useDisk && level.replication < 2) { throw new Exception("Cannot checkpoint without using disk or replication (level requested was " + level + ")") } diff --git a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala index bab48ff954..f6b53fe2f2 100644 --- a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala @@ -25,7 +25,11 @@ class QueueInputDStream[T: ClassManifest]( buffer ++= queue } if (buffer.size > 0) { - Some(new UnionRDD(ssc.sc, buffer.toSeq)) + if (oneAtATime) { + Some(buffer.first) + } else { + Some(new UnionRDD(ssc.sc, buffer.toSeq)) + } } else if (defaultRDD != null) { Some(defaultRDD) } else { @@ -33,4 +37,4 @@ class QueueInputDStream[T: ClassManifest]( } } -} \ No newline at end of file +} diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 12e52bf56c..b4b8e34ec8 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -26,7 +26,6 @@ extends Logging { val timer = new RecurringTimer(clock, ssc.batchDuration, generateRDDs(_)) def start() { - val zeroTime = Time(timer.start()) outputStreams.foreach(_.initialize(zeroTime)) inputStreams.par.foreach(_.start()) @@ -41,6 +40,7 @@ extends Logging { def generateRDDs (time: Time) { println("\n-----------------------------------------------------\n") + SparkEnv.set(ssc.env) logInfo("Generating RDDs for time " + time) outputStreams.foreach(outputStream => { outputStream.generateJob(time) match { diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index f313d8c162..9d3561b4a0 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -7,7 +7,7 @@ import spark.SparkContext._ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( - parent: DStream[(K, V)], + @transient parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean From c63a6064584ea19d62e0abcbd3886d7b1e429ea1 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 10 Sep 2012 19:51:27 +0000 Subject: [PATCH 071/226] Made NewHadoopRDD broadcast its job configuration (same as HadoopRDD). --- core/src/main/scala/spark/NewHadoopRDD.scala | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/spark/NewHadoopRDD.scala b/core/src/main/scala/spark/NewHadoopRDD.scala index d024d38aa9..14f708a3f8 100644 --- a/core/src/main/scala/spark/NewHadoopRDD.scala +++ b/core/src/main/scala/spark/NewHadoopRDD.scala @@ -28,7 +28,9 @@ class NewHadoopRDD[K, V]( @transient conf: Configuration) extends RDD[(K, V)](sc) { - private val serializableConf = new SerializableWritable(conf) + // A Hadoop Configuration can be about 10 KB, which is pretty big, so broadcast it + val confBroadcast = sc.broadcast(new SerializableWritable(conf)) + // private val serializableConf = new SerializableWritable(conf) private val jobtrackerId: String = { val formatter = new SimpleDateFormat("yyyyMMddHHmm") @@ -41,7 +43,7 @@ class NewHadoopRDD[K, V]( @transient private val splits_ : Array[Split] = { val inputFormat = inputFormatClass.newInstance - val jobContext = new JobContext(serializableConf.value, jobId) + val jobContext = new JobContext(conf, jobId) val rawSplits = inputFormat.getSplits(jobContext).toArray val result = new Array[Split](rawSplits.size) for (i <- 0 until rawSplits.size) { @@ -54,9 +56,9 @@ class NewHadoopRDD[K, V]( override def compute(theSplit: Split) = new Iterator[(K, V)] { val split = theSplit.asInstanceOf[NewHadoopSplit] - val conf = serializableConf.value + val conf = confBroadcast.value.value val attemptId = new TaskAttemptID(jobtrackerId, id, true, split.index, 0) - val context = new TaskAttemptContext(serializableConf.value, attemptId) + val context = new TaskAttemptContext(conf, attemptId) val format = inputFormatClass.newInstance val reader = format.createRecordReader(split.serializableHadoopSplit.value, context) reader.initialize(split.serializableHadoopSplit.value, context) From 3cbc72ff1dc660a835c032356ba7b57883c5df5e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 14 Sep 2012 07:00:30 +0000 Subject: [PATCH 072/226] Minor tweaks --- .../main/scala/spark/streaming/DStream.scala | 42 ++++++++++--------- .../scala/spark/streaming/StateDStream.scala | 4 +- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 3973ca1520..7e8098c346 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -284,9 +284,8 @@ extends Logging with Serializable { } -abstract class InputDStream[T: ClassManifest] ( - @transient ssc: StreamingContext) -extends DStream[T](ssc) { +abstract class InputDStream[T: ClassManifest] (@transient ssc: StreamingContext) + extends DStream[T](ssc) { override def dependencies = List() @@ -303,9 +302,9 @@ extends DStream[T](ssc) { */ class MappedDStream[T: ClassManifest, U: ClassManifest] ( - parent: DStream[T], - mapFunc: T => U) -extends DStream[U](parent.ssc) { + @transient parent: DStream[T], + mapFunc: T => U + ) extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -322,9 +321,9 @@ extends DStream[U](parent.ssc) { */ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( - parent: DStream[T], - flatMapFunc: T => Traversable[U]) -extends DStream[U](parent.ssc) { + @transient parent: DStream[T], + flatMapFunc: T => Traversable[U] + ) extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -340,8 +339,10 @@ extends DStream[U](parent.ssc) { * TODO */ -class FilteredDStream[T: ClassManifest](parent: DStream[T], filterFunc: T => Boolean) -extends DStream[T](parent.ssc) { +class FilteredDStream[T: ClassManifest]( + @transient parent: DStream[T], + filterFunc: T => Boolean + ) extends DStream[T](parent.ssc) { override def dependencies = List(parent) @@ -358,9 +359,9 @@ extends DStream[T](parent.ssc) { */ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( - parent: DStream[T], - mapPartFunc: Iterator[T] => Iterator[U]) -extends DStream[U](parent.ssc) { + @transient parent: DStream[T], + mapPartFunc: Iterator[T] => Iterator[U] + ) extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -376,7 +377,8 @@ extends DStream[U](parent.ssc) { * TODO */ -class GlommedDStream[T: ClassManifest](parent: DStream[T]) extends DStream[Array[T]](parent.ssc) { +class GlommedDStream[T: ClassManifest](@transient parent: DStream[T]) + extends DStream[Array[T]](parent.ssc) { override def dependencies = List(parent) @@ -393,7 +395,7 @@ class GlommedDStream[T: ClassManifest](parent: DStream[T]) extends DStream[Array */ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( - parent: DStream[(K,V)], + @transient parent: DStream[(K,V)], createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, @@ -418,7 +420,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( * TODO */ -class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) +class UnifiedDStream[T: ClassManifest](@transient parents: Array[DStream[T]]) extends DStream[T](parents(0).ssc) { if (parents.length == 0) { @@ -457,7 +459,7 @@ class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) */ class PerElementForEachDStream[T: ClassManifest] ( - parent: DStream[T], + @transient parent: DStream[T], foreachFunc: T => Unit ) extends DStream[Unit](parent.ssc) { @@ -488,7 +490,7 @@ class PerElementForEachDStream[T: ClassManifest] ( */ class PerRDDForEachDStream[T: ClassManifest] ( - parent: DStream[T], + @transient parent: DStream[T], foreachFunc: (RDD[T], Time) => Unit ) extends DStream[Unit](parent.ssc) { @@ -516,7 +518,7 @@ class PerRDDForEachDStream[T: ClassManifest] ( */ class TransformedDStream[T: ClassManifest, U: ClassManifest] ( - parent: DStream[T], + @transient parent: DStream[T], transformFunc: (RDD[T], Time) => RDD[U] ) extends DStream[U](parent.ssc) { diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index 72b71d5fab..c40f70c91d 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -26,14 +26,14 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife override def getOrCompute(time: Time): Option[RDD[(K, S)]] = { generatedRDDs.get(time) match { case Some(oldRDD) => { - if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval) && oldRDD.dependencies.size > 0) { + if (checkpointInterval != null && time > zeroTime && (time - zeroTime).isMultipleOf(checkpointInterval) && oldRDD.dependencies.size > 0) { val r = oldRDD val oldRDDBlockIds = oldRDD.splits.map(s => "rdd:" + r.id + ":" + s.index) val checkpointedRDD = new BlockRDD[(K, S)](ssc.sc, oldRDDBlockIds) { override val partitioner = oldRDD.partitioner } generatedRDDs.update(time, checkpointedRDD) - logInfo("Updated RDD of time " + time + " with its checkpointed version") + logInfo("Checkpointed RDD " + oldRDD.id + " of time " + time + " with its new RDD " + checkpointedRDD.id) Some(checkpointedRDD) } else { Some(oldRDD) From 86d420478f711e0f4eccc64c238efddf030a9b0f Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 17 Sep 2012 14:25:48 -0700 Subject: [PATCH 073/226] Allowed StreamingContext to be created from existing SparkContext --- .../scala/spark/streaming/StreamingContext.scala | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index cb0f9ceb15..12f3626680 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -21,16 +21,13 @@ import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat -class StreamingContext ( - master: String, - frameworkName: String, - val sparkHome: String = null, - val jars: Seq[String] = Nil) - extends Logging { - +class StreamingContext (@transient val sc: SparkContext) extends Logging { + + def this(master: String, frameworkName: String, sparkHome: String = null, jars: Seq[String] = Nil) = + this(new SparkContext(master, frameworkName, sparkHome, jars)) + initLogging() - val sc = new SparkContext(master, frameworkName, sparkHome, jars) val env = SparkEnv.get val inputStreams = new ArrayBuffer[InputDStream[_]]() From e95ff45b53bf995d89f1825b9581cc18a083a438 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 13 Oct 2012 20:10:49 -0700 Subject: [PATCH 074/226] Implemented checkpointing of StreamingContext and DStream graph. --- core/src/main/scala/spark/SparkContext.scala | 4 +- .../scala/spark/streaming/Checkpoint.scala | 92 +++++++++++++ .../main/scala/spark/streaming/DStream.scala | 123 ++++++++++++------ .../scala/spark/streaming/DStreamGraph.scala | 80 ++++++++++++ .../spark/streaming/FileInputDStream.scala | 59 ++++++--- .../streaming/ReducedWindowedDStream.scala | 84 ++++++------ .../scala/spark/streaming/Scheduler.scala | 33 +++-- .../scala/spark/streaming/StateDStream.scala | 20 +-- .../spark/streaming/StreamingContext.scala | 109 +++++++++++----- .../examples/FileStreamWithCheckpoint.scala | 76 +++++++++++ .../spark/streaming/examples/Grep2.scala | 2 +- .../spark/streaming/examples/WordCount2.scala | 2 +- .../spark/streaming/examples/WordMax2.scala | 2 +- .../spark/streaming/util/RecurringTimer.scala | 19 ++- 14 files changed, 538 insertions(+), 167 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/Checkpoint.scala create mode 100644 streaming/src/main/scala/spark/streaming/DStreamGraph.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index bebebe8262..1d5131ad13 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -46,8 +46,8 @@ import spark.scheduler.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend import spark.storage.BlockManagerMaster class SparkContext( - master: String, - frameworkName: String, + val master: String, + val frameworkName: String, val sparkHome: String, val jars: Seq[String]) extends Logging { diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala new file mode 100644 index 0000000000..3bd8fd5a27 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -0,0 +1,92 @@ +package spark.streaming + +import spark.Utils + +import org.apache.hadoop.fs.{FileUtil, Path} +import org.apache.hadoop.conf.Configuration + +import java.io.{ObjectInputStream, ObjectOutputStream} + +class Checkpoint(@transient ssc: StreamingContext) extends Serializable { + val master = ssc.sc.master + val frameworkName = ssc.sc.frameworkName + val sparkHome = ssc.sc.sparkHome + val jars = ssc.sc.jars + val graph = ssc.graph + val batchDuration = ssc.batchDuration + val checkpointFile = ssc.checkpointFile + val checkpointInterval = ssc.checkpointInterval + + def saveToFile(file: String) { + val path = new Path(file) + val conf = new Configuration() + val fs = path.getFileSystem(conf) + if (fs.exists(path)) { + val bkPath = new Path(path.getParent, path.getName + ".bk") + FileUtil.copy(fs, path, fs, bkPath, true, true, conf) + println("Moved existing checkpoint file to " + bkPath) + } + val fos = fs.create(path) + val oos = new ObjectOutputStream(fos) + oos.writeObject(this) + oos.close() + fs.close() + } + + def toBytes(): Array[Byte] = { + val cp = new Checkpoint(ssc) + val bytes = Utils.serialize(cp) + bytes + } +} + +object Checkpoint { + + def loadFromFile(file: String): Checkpoint = { + val path = new Path(file) + val conf = new Configuration() + val fs = path.getFileSystem(conf) + if (!fs.exists(path)) { + throw new Exception("Could not read checkpoint file " + path) + } + val fis = fs.open(path) + val ois = new ObjectInputStream(fis) + val cp = ois.readObject.asInstanceOf[Checkpoint] + ois.close() + fs.close() + cp + } + + def fromBytes(bytes: Array[Byte]): Checkpoint = { + Utils.deserialize[Checkpoint](bytes) + } + + /*def toBytes(ssc: StreamingContext): Array[Byte] = { + val cp = new Checkpoint(ssc) + val bytes = Utils.serialize(cp) + bytes + } + + + def saveContext(ssc: StreamingContext, file: String) { + val cp = new Checkpoint(ssc) + val path = new Path(file) + val conf = new Configuration() + val fs = path.getFileSystem(conf) + if (fs.exists(path)) { + val bkPath = new Path(path.getParent, path.getName + ".bk") + FileUtil.copy(fs, path, fs, bkPath, true, true, conf) + println("Moved existing checkpoint file to " + bkPath) + } + val fos = fs.create(path) + val oos = new ObjectOutputStream(fos) + oos.writeObject(cp) + oos.close() + fs.close() + } + + def loadContext(file: String): StreamingContext = { + loadCheckpoint(file).createNewContext() + } + */ +} diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 7e8098c346..78e4c57647 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -2,20 +2,19 @@ package spark.streaming import spark.streaming.StreamingContext._ -import spark.RDD -import spark.UnionRDD -import spark.Logging +import spark._ import spark.SparkContext._ import spark.storage.StorageLevel -import spark.Partitioner import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap import java.util.concurrent.ArrayBlockingQueue +import java.io.{ObjectInputStream, IOException, ObjectOutputStream} +import scala.Some -abstract class DStream[T: ClassManifest] (@transient val ssc: StreamingContext) -extends Logging with Serializable { +abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) +extends Serializable with Logging { initLogging() @@ -41,10 +40,10 @@ extends Logging with Serializable { */ // Variable to store the RDDs generated earlier in time - @transient protected val generatedRDDs = new HashMap[Time, RDD[T]] () + protected val generatedRDDs = new HashMap[Time, RDD[T]] () // Variable to be set to the first time seen by the DStream (effective time zero) - protected[streaming] var zeroTime: Time = null + protected var zeroTime: Time = null // Variable to specify storage level protected var storageLevel: StorageLevel = StorageLevel.NONE @@ -53,6 +52,9 @@ extends Logging with Serializable { protected var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint protected var checkpointInterval: Time = null + // Reference to whole DStream graph, so that checkpointing process can lock it + protected var graph: DStreamGraph = null + // Change this RDD's storage level def persist( storageLevel: StorageLevel, @@ -77,7 +79,7 @@ extends Logging with Serializable { // Turn on the default caching level for this RDD def cache(): DStream[T] = persist() - def isInitialized = (zeroTime != null) + def isInitialized() = (zeroTime != null) /** * This method initializes the DStream by setting the "zero" time, based on which @@ -85,15 +87,33 @@ extends Logging with Serializable { * its parent DStreams. */ protected[streaming] def initialize(time: Time) { - if (zeroTime == null) { - zeroTime = time + if (zeroTime != null) { + throw new Exception("ZeroTime is already initialized, cannot initialize it again") } + zeroTime = time logInfo(this + " initialized") dependencies.foreach(_.initialize(zeroTime)) } + protected[streaming] def setContext(s: StreamingContext) { + if (ssc != null && ssc != s) { + throw new Exception("Context is already set, cannot set it again") + } + ssc = s + logInfo("Set context for " + this.getClass.getSimpleName) + dependencies.foreach(_.setContext(ssc)) + } + + protected[streaming] def setGraph(g: DStreamGraph) { + if (graph != null && graph != g) { + throw new Exception("Graph is already set, cannot set it again") + } + graph = g + dependencies.foreach(_.setGraph(graph)) + } + /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ - protected def isTimeValid (time: Time): Boolean = { + protected def isTimeValid(time: Time): Boolean = { if (!isInitialized) { throw new Exception (this.toString + " has not been initialized") } else if (time < zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { @@ -158,13 +178,42 @@ extends Logging with Serializable { } } + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + println(this.getClass().getSimpleName + ".writeObject used") + if (graph != null) { + graph.synchronized { + if (graph.checkpointInProgress) { + oos.defaultWriteObject() + } else { + val msg = "Object of " + this.getClass.getName + " is being serialized " + + " possibly as a part of closure of an RDD operation. This is because " + + " the DStream object is being referred to from within the closure. " + + " Please rewrite the RDD operation inside this DStream to avoid this. " + + " This has been enforced to avoid bloating of Spark tasks " + + " with unnecessary objects." + throw new java.io.NotSerializableException(msg) + } + } + } else { + throw new java.io.NotSerializableException("Graph is unexpectedly null when DStream is being serialized.") + } + } + + @throws(classOf[IOException]) + private def readObject(ois: ObjectInputStream) { + println(this.getClass().getSimpleName + ".readObject used") + ois.defaultReadObject() + } + /** * -------------- * DStream operations * -------------- */ - - def map[U: ClassManifest](mapFunc: T => U) = new MappedDStream(this, ssc.sc.clean(mapFunc)) + def map[U: ClassManifest](mapFunc: T => U) = { + new MappedDStream(this, ssc.sc.clean(mapFunc)) + } def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = { new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) @@ -262,19 +311,15 @@ extends Logging with Serializable { // Get all the RDDs between fromTime to toTime (both included) def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { - val rdds = new ArrayBuffer[RDD[T]]() var time = toTime.floor(slideTime) - - while (time >= zeroTime && time >= fromTime) { getOrCompute(time) match { case Some(rdd) => rdds += rdd - case None => throw new Exception("Could not get old reduced RDD for time " + time) + case None => //throw new Exception("Could not get RDD for time " + time) } time -= slideTime } - rdds.toSeq } @@ -284,12 +329,16 @@ extends Logging with Serializable { } -abstract class InputDStream[T: ClassManifest] (@transient ssc: StreamingContext) - extends DStream[T](ssc) { +abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContext) + extends DStream[T](ssc_) { override def dependencies = List() - override def slideTime = ssc.batchDuration + override def slideTime = { + if (ssc == null) throw new Exception("ssc is null") + if (ssc.batchDuration == null) throw new Exception("ssc.batchDuration is null") + ssc.batchDuration + } def start() @@ -302,7 +351,7 @@ abstract class InputDStream[T: ClassManifest] (@transient ssc: StreamingContext) */ class MappedDStream[T: ClassManifest, U: ClassManifest] ( - @transient parent: DStream[T], + parent: DStream[T], mapFunc: T => U ) extends DStream[U](parent.ssc) { @@ -321,7 +370,7 @@ class MappedDStream[T: ClassManifest, U: ClassManifest] ( */ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( - @transient parent: DStream[T], + parent: DStream[T], flatMapFunc: T => Traversable[U] ) extends DStream[U](parent.ssc) { @@ -340,7 +389,7 @@ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( */ class FilteredDStream[T: ClassManifest]( - @transient parent: DStream[T], + parent: DStream[T], filterFunc: T => Boolean ) extends DStream[T](parent.ssc) { @@ -359,7 +408,7 @@ class FilteredDStream[T: ClassManifest]( */ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( - @transient parent: DStream[T], + parent: DStream[T], mapPartFunc: Iterator[T] => Iterator[U] ) extends DStream[U](parent.ssc) { @@ -377,7 +426,7 @@ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( * TODO */ -class GlommedDStream[T: ClassManifest](@transient parent: DStream[T]) +class GlommedDStream[T: ClassManifest](parent: DStream[T]) extends DStream[Array[T]](parent.ssc) { override def dependencies = List(parent) @@ -395,7 +444,7 @@ class GlommedDStream[T: ClassManifest](@transient parent: DStream[T]) */ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( - @transient parent: DStream[(K,V)], + parent: DStream[(K,V)], createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, @@ -420,7 +469,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( * TODO */ -class UnifiedDStream[T: ClassManifest](@transient parents: Array[DStream[T]]) +class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) extends DStream[T](parents(0).ssc) { if (parents.length == 0) { @@ -459,7 +508,7 @@ class UnifiedDStream[T: ClassManifest](@transient parents: Array[DStream[T]]) */ class PerElementForEachDStream[T: ClassManifest] ( - @transient parent: DStream[T], + parent: DStream[T], foreachFunc: T => Unit ) extends DStream[Unit](parent.ssc) { @@ -490,7 +539,7 @@ class PerElementForEachDStream[T: ClassManifest] ( */ class PerRDDForEachDStream[T: ClassManifest] ( - @transient parent: DStream[T], + parent: DStream[T], foreachFunc: (RDD[T], Time) => Unit ) extends DStream[Unit](parent.ssc) { @@ -518,15 +567,15 @@ class PerRDDForEachDStream[T: ClassManifest] ( */ class TransformedDStream[T: ClassManifest, U: ClassManifest] ( - @transient parent: DStream[T], + parent: DStream[T], transformFunc: (RDD[T], Time) => RDD[U] ) extends DStream[U](parent.ssc) { - override def dependencies = List(parent) + override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Time = parent.slideTime - override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(transformFunc(_, validTime)) - } + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(transformFunc(_, validTime)) } +} diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala new file mode 100644 index 0000000000..67859e0131 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -0,0 +1,80 @@ +package spark.streaming + +import java.io.{ObjectInputStream, IOException, ObjectOutputStream} +import collection.mutable.ArrayBuffer + +final class DStreamGraph extends Serializable { + + private val inputStreams = new ArrayBuffer[InputDStream[_]]() + private val outputStreams = new ArrayBuffer[DStream[_]]() + + private[streaming] var zeroTime: Time = null + private[streaming] var checkpointInProgress = false; + + def started() = (zeroTime != null) + + def start(time: Time) { + this.synchronized { + if (started) { + throw new Exception("DStream graph computation already started") + } + zeroTime = time + outputStreams.foreach(_.initialize(zeroTime)) + inputStreams.par.foreach(_.start()) + } + + } + + def stop() { + this.synchronized { + inputStreams.par.foreach(_.stop()) + } + } + + private[streaming] def setContext(ssc: StreamingContext) { + this.synchronized { + outputStreams.foreach(_.setContext(ssc)) + } + } + + def addInputStream(inputStream: InputDStream[_]) { + inputStream.setGraph(this) + inputStreams += inputStream + } + + def addOutputStream(outputStream: DStream[_]) { + outputStream.setGraph(this) + outputStreams += outputStream + } + + def getInputStreams() = inputStreams.toArray + + def getOutputStreams() = outputStreams.toArray + + def generateRDDs(time: Time): Seq[Job] = { + this.synchronized { + outputStreams.flatMap(outputStream => outputStream.generateJob(time)) + } + } + + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + this.synchronized { + checkpointInProgress = true + oos.defaultWriteObject() + checkpointInProgress = false + } + println("DStreamGraph.writeObject used") + } + + @throws(classOf[IOException]) + private def readObject(ois: ObjectInputStream) { + this.synchronized { + checkpointInProgress = true + ois.defaultReadObject() + checkpointInProgress = false + } + println("DStreamGraph.readObject used") + } +} + diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala index 96a64f0018..29ae89616e 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -1,33 +1,45 @@ package spark.streaming -import spark.SparkContext import spark.RDD -import spark.BlockRDD import spark.UnionRDD -import spark.storage.StorageLevel -import spark.streaming._ -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap - -import java.net.InetSocketAddress - -import org.apache.hadoop.fs.Path -import org.apache.hadoop.fs.PathFilter +import org.apache.hadoop.fs.{FileSystem, Path, PathFilter} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} +import java.io.{ObjectInputStream, IOException} class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( - ssc: StreamingContext, - directory: Path, + @transient ssc_ : StreamingContext, + directory: String, filter: PathFilter = FileInputDStream.defaultPathFilter, newFilesOnly: Boolean = true) - extends InputDStream[(K, V)](ssc) { - - val fs = directory.getFileSystem(new Configuration()) + extends InputDStream[(K, V)](ssc_) { + + @transient private var path_ : Path = null + @transient private var fs_ : FileSystem = null + + /* + @transient @noinline lazy val path = { + //if (directory == null) throw new Exception("directory is null") + //println(directory) + new Path(directory) + } + @transient lazy val fs = path.getFileSystem(new Configuration()) + */ + var lastModTime: Long = 0 - + + def path(): Path = { + if (path_ == null) path_ = new Path(directory) + path_ + } + + def fs(): FileSystem = { + if (fs_ == null) fs_ = path.getFileSystem(new Configuration()) + fs_ + } + override def start() { if (newFilesOnly) { lastModTime = System.currentTimeMillis() @@ -58,7 +70,7 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K } } - val newFiles = fs.listStatus(directory, newFilter) + val newFiles = fs.listStatus(path, newFilter) logInfo("New files: " + newFiles.map(_.getPath).mkString(", ")) if (newFiles.length > 0) { lastModTime = newFilter.latestModTime @@ -67,10 +79,19 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K file => ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString))) Some(newRDD) } + /* + @throws(classOf[IOException]) + private def readObject(ois: ObjectInputStream) { + println(this.getClass().getSimpleName + ".readObject used") + ois.defaultReadObject() + println("HERE HERE" + this.directory) + } + */ + } object FileInputDStream { - val defaultPathFilter = new PathFilter { + val defaultPathFilter = new PathFilter with Serializable { def accept(path: Path): Boolean = { val file = path.getName() if (file.startsWith(".") || file.endsWith("_tmp")) { diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index b0beaba94d..e161b5ba92 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -10,9 +10,10 @@ import spark.SparkContext._ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer +import collection.SeqProxy class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( - @transient parent: DStream[(K, V)], + parent: DStream[(K, V)], reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, _windowTime: Time, @@ -46,6 +47,8 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( } override def compute(validTime: Time): Option[RDD[(K, V)]] = { + val reduceF = reduceFunc + val invReduceF = invReduceFunc val currentTime = validTime val currentWindow = Interval(currentTime - windowTime + parent.slideTime, currentTime) @@ -84,54 +87,47 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( // Cogroup the reduced RDDs and merge the reduced values val cogroupedRDD = new CoGroupedRDD[K](allRDDs.toSeq.asInstanceOf[Seq[RDD[(_, _)]]], partitioner) - val mergeValuesFunc = mergeValues(oldRDDs.size, newRDDs.size) _ - val mergedValuesRDD = cogroupedRDD.asInstanceOf[RDD[(K,Seq[Seq[V]])]].mapValues(mergeValuesFunc) + //val mergeValuesFunc = mergeValues(oldRDDs.size, newRDDs.size) _ + + val numOldValues = oldRDDs.size + val numNewValues = newRDDs.size + + val mergeValues = (seqOfValues: Seq[Seq[V]]) => { + if (seqOfValues.size != 1 + numOldValues + numNewValues) { + throw new Exception("Unexpected number of sequences of reduced values") + } + // Getting reduced values "old time steps" that will be removed from current window + val oldValues = (1 to numOldValues).map(i => seqOfValues(i)).filter(!_.isEmpty).map(_.head) + // Getting reduced values "new time steps" + val newValues = (1 to numNewValues).map(i => seqOfValues(numOldValues + i)).filter(!_.isEmpty).map(_.head) + if (seqOfValues(0).isEmpty) { + // If previous window's reduce value does not exist, then at least new values should exist + if (newValues.isEmpty) { + throw new Exception("Neither previous window has value for key, nor new values found") + } + // Reduce the new values + newValues.reduce(reduceF) // return + } else { + // Get the previous window's reduced value + var tempValue = seqOfValues(0).head + // If old values exists, then inverse reduce then from previous value + if (!oldValues.isEmpty) { + tempValue = invReduceF(tempValue, oldValues.reduce(reduceF)) + } + // If new values exists, then reduce them with previous value + if (!newValues.isEmpty) { + tempValue = reduceF(tempValue, newValues.reduce(reduceF)) + } + tempValue // return + } + } + + val mergedValuesRDD = cogroupedRDD.asInstanceOf[RDD[(K,Seq[Seq[V]])]].mapValues(mergeValues) Some(mergedValuesRDD) } - def mergeValues(numOldValues: Int, numNewValues: Int)(seqOfValues: Seq[Seq[V]]): V = { - if (seqOfValues.size != 1 + numOldValues + numNewValues) { - throw new Exception("Unexpected number of sequences of reduced values") - } - - // Getting reduced values "old time steps" that will be removed from current window - val oldValues = (1 to numOldValues).map(i => seqOfValues(i)).filter(!_.isEmpty).map(_.head) - - // Getting reduced values "new time steps" - val newValues = (1 to numNewValues).map(i => seqOfValues(numOldValues + i)).filter(!_.isEmpty).map(_.head) - - if (seqOfValues(0).isEmpty) { - - // If previous window's reduce value does not exist, then at least new values should exist - if (newValues.isEmpty) { - throw new Exception("Neither previous window has value for key, nor new values found") - } - - // Reduce the new values - // println("new values = " + newValues.map(_.toString).reduce(_ + " " + _)) - return newValues.reduce(reduceFunc) - } else { - - // Get the previous window's reduced value - var tempValue = seqOfValues(0).head - - // If old values exists, then inverse reduce then from previous value - if (!oldValues.isEmpty) { - // println("old values = " + oldValues.map(_.toString).reduce(_ + " " + _)) - tempValue = invReduceFunc(tempValue, oldValues.reduce(reduceFunc)) - } - - // If new values exists, then reduce them with previous value - if (!newValues.isEmpty) { - // println("new values = " + newValues.map(_.toString).reduce(_ + " " + _)) - tempValue = reduceFunc(tempValue, newValues.reduce(reduceFunc)) - } - // println("final value = " + tempValue) - return tempValue - } - } } diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index d2e907378d..d62b7e7140 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -11,45 +11,44 @@ import scala.collection.mutable.HashMap sealed trait SchedulerMessage case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage -class Scheduler( - ssc: StreamingContext, - inputStreams: Array[InputDStream[_]], - outputStreams: Array[DStream[_]]) +class Scheduler(ssc: StreamingContext) extends Logging { initLogging() + val graph = ssc.graph val concurrentJobs = System.getProperty("spark.stream.concurrentJobs", "1").toInt val jobManager = new JobManager(ssc, concurrentJobs) val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] val timer = new RecurringTimer(clock, ssc.batchDuration, generateRDDs(_)) - + + def start() { - val zeroTime = Time(timer.start()) - outputStreams.foreach(_.initialize(zeroTime)) - inputStreams.par.foreach(_.start()) + if (graph.started) { + timer.restart(graph.zeroTime.milliseconds) + } else { + val zeroTime = Time(timer.start()) + graph.start(zeroTime) + } logInfo("Scheduler started") } def stop() { timer.stop() - inputStreams.par.foreach(_.stop()) + graph.stop() logInfo("Scheduler stopped") } - def generateRDDs (time: Time) { + def generateRDDs(time: Time) { SparkEnv.set(ssc.env) logInfo("\n-----------------------------------------------------\n") logInfo("Generating RDDs for time " + time) - outputStreams.foreach(outputStream => { - outputStream.generateJob(time) match { - case Some(job) => submitJob(job) - case None => - } - } - ) + graph.generateRDDs(time).foreach(submitJob) logInfo("Generated RDDs for time " + time) + if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { + ssc.checkpoint() + } } def submitJob(job: Job) { diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index c40f70c91d..d223f25dfc 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -7,6 +7,12 @@ import spark.MapPartitionsRDD import spark.SparkContext._ import spark.storage.StorageLevel + +class StateRDD[U: ClassManifest, T: ClassManifest](prev: RDD[T], f: Iterator[T] => Iterator[U], rememberPartitioner: Boolean) + extends MapPartitionsRDD[U, T](prev, f) { + override val partitioner = if (rememberPartitioner) prev.partitioner else None +} + class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( @transient parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], @@ -14,11 +20,6 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife rememberPartitioner: Boolean ) extends DStream[(K, S)](parent.ssc) { - class SpecialMapPartitionsRDD[U: ClassManifest, T: ClassManifest](prev: RDD[T], f: Iterator[T] => Iterator[U]) - extends MapPartitionsRDD(prev, f) { - override val partitioner = if (rememberPartitioner) prev.partitioner else None - } - override def dependencies = List(parent) override def slideTime = parent.slideTime @@ -79,19 +80,18 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife // first map the cogrouped tuple to tuples of required type, // and then apply the update function val updateFuncLocal = updateFunc - val mapPartitionFunc = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { + val finalFunc = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { val i = iterator.map(t => { (t._1, t._2._1, t._2._2.headOption.getOrElse(null.asInstanceOf[S])) }) updateFuncLocal(i) } val cogroupedRDD = parentRDD.cogroup(prevStateRDD, partitioner) - val stateRDD = new SpecialMapPartitionsRDD(cogroupedRDD, mapPartitionFunc) + val stateRDD = new StateRDD(cogroupedRDD, finalFunc, rememberPartitioner) //logDebug("Generating state RDD for time " + validTime) return Some(stateRDD) } case None => { // If parent RDD does not exist, then return old state RDD - //logDebug("Generating state RDD for time " + validTime + " (no change)") return Some(prevStateRDD) } } @@ -107,12 +107,12 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife // first map the grouped tuple to tuples of required type, // and then apply the update function val updateFuncLocal = updateFunc - val mapPartitionFunc = (iterator: Iterator[(K, Seq[V])]) => { + val finalFunc = (iterator: Iterator[(K, Seq[V])]) => { updateFuncLocal(iterator.map(tuple => (tuple._1, tuple._2, null.asInstanceOf[S]))) } val groupedRDD = parentRDD.groupByKey(partitioner) - val sessionRDD = new SpecialMapPartitionsRDD(groupedRDD, mapPartitionFunc) + val sessionRDD = new StateRDD(groupedRDD, finalFunc, rememberPartitioner) //logDebug("Generating state RDD for time " + validTime + " (first)") return Some(sessionRDD) } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 12f3626680..1499ef4ea2 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -21,31 +21,70 @@ import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat -class StreamingContext (@transient val sc: SparkContext) extends Logging { +class StreamingContext ( + sc_ : SparkContext, + cp_ : Checkpoint + ) extends Logging { + + def this(sparkContext: SparkContext) = this(sparkContext, null) def this(master: String, frameworkName: String, sparkHome: String = null, jars: Seq[String] = Nil) = - this(new SparkContext(master, frameworkName, sparkHome, jars)) + this(new SparkContext(master, frameworkName, sparkHome, jars), null) + + def this(file: String) = this(null, Checkpoint.loadFromFile(file)) + + def this(cp_ : Checkpoint) = this(null, cp_) initLogging() + if (sc_ == null && cp_ == null) { + throw new Exception("Streaming Context cannot be initilalized with " + + "both SparkContext and checkpoint as null") + } + + val isCheckpointPresent = (cp_ != null) + + val sc: SparkContext = { + if (isCheckpointPresent) { + new SparkContext(cp_.master, cp_.frameworkName, cp_.sparkHome, cp_.jars) + } else { + sc_ + } + } + val env = SparkEnv.get - - val inputStreams = new ArrayBuffer[InputDStream[_]]() - val outputStreams = new ArrayBuffer[DStream[_]]() + + val graph: DStreamGraph = { + if (isCheckpointPresent) { + + cp_.graph.setContext(this) + cp_.graph + } else { + new DStreamGraph() + } + } + val nextNetworkInputStreamId = new AtomicInteger(0) - var batchDuration: Time = null - var scheduler: Scheduler = null + var batchDuration: Time = if (isCheckpointPresent) cp_.batchDuration else null + var checkpointFile: String = if (isCheckpointPresent) cp_.checkpointFile else null + var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null var networkInputTracker: NetworkInputTracker = null - var receiverJobThread: Thread = null - - def setBatchDuration(duration: Long) { - setBatchDuration(Time(duration)) - } - + var receiverJobThread: Thread = null + var scheduler: Scheduler = null + def setBatchDuration(duration: Time) { + if (batchDuration != null) { + throw new Exception("Batch duration alread set as " + batchDuration + + ". cannot set it again.") + } batchDuration = duration } + + def setCheckpointDetails(file: String, interval: Time) { + checkpointFile = file + checkpointInterval = interval + } private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() @@ -59,7 +98,7 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { converter: (InputStream) => Iterator[T] ): DStream[T] = { val inputStream = new ObjectInputDStream[T](this, hostname, port, converter) - inputStreams += inputStream + graph.addInputStream(inputStream) inputStream } @@ -69,7 +108,7 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_2 ): DStream[T] = { val inputStream = new RawInputDStream[T](this, hostname, port, storageLevel) - inputStreams += inputStream + graph.addInputStream(inputStream) inputStream } @@ -94,8 +133,8 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { V: ClassManifest, F <: NewInputFormat[K, V]: ClassManifest ](directory: String): DStream[(K, V)] = { - val inputStream = new FileInputDStream[K, V, F](this, new Path(directory)) - inputStreams += inputStream + val inputStream = new FileInputDStream[K, V, F](this, directory) + graph.addInputStream(inputStream) inputStream } @@ -113,24 +152,31 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { defaultRDD: RDD[T] = null ): DStream[T] = { val inputStream = new QueueInputDStream(this, queue, oneAtATime, defaultRDD) - inputStreams += inputStream + graph.addInputStream(inputStream) inputStream } - def createQueueStream[T: ClassManifest](iterator: Iterator[RDD[T]]): DStream[T] = { + def createQueueStream[T: ClassManifest](iterator: Array[RDD[T]]): DStream[T] = { val queue = new Queue[RDD[T]] val inputStream = createQueueStream(queue, true, null) queue ++= iterator inputStream - } + } + + /** + * This function registers a InputDStream as an input stream that will be + * started (InputDStream.start() called) to get the input data streams. + */ + def registerInputStream(inputStream: InputDStream[_]) { + graph.addInputStream(inputStream) + } - /** * This function registers a DStream as an output stream that will be * computed every interval. */ - def registerOutputStream (outputStream: DStream[_]) { - outputStreams += outputStream + def registerOutputStream(outputStream: DStream[_]) { + graph.addOutputStream(outputStream) } /** @@ -143,13 +189,9 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { if (batchDuration < Milliseconds(100)) { logWarning("Batch duration of " + batchDuration + " is very low") } - if (inputStreams.size == 0) { - throw new Exception("No input streams created, so nothing to take input from") - } - if (outputStreams.size == 0) { + if (graph.getOutputStreams().size == 0) { throw new Exception("No output streams registered, so nothing to execute") } - } /** @@ -157,7 +199,7 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { */ def start() { verify() - val networkInputStreams = inputStreams.filter(s => s match { + val networkInputStreams = graph.getInputStreams().filter(s => s match { case n: NetworkInputDStream[_] => true case _ => false }).map(_.asInstanceOf[NetworkInputDStream[_]]).toArray @@ -169,8 +211,9 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { } Thread.sleep(1000) - // Start the scheduler - scheduler = new Scheduler(this, inputStreams.toArray, outputStreams.toArray) + + // Start the scheduler + scheduler = new Scheduler(this) scheduler.start() } @@ -189,6 +232,10 @@ class StreamingContext (@transient val sc: SparkContext) extends Logging { logInfo("StreamingContext stopped") } + + def checkpoint() { + new Checkpoint(this).saveToFile(checkpointFile) + } } diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala new file mode 100644 index 0000000000..c725035a8a --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala @@ -0,0 +1,76 @@ +package spark.streaming.examples + +import spark.streaming._ +import spark.streaming.StreamingContext._ +import org.apache.hadoop.fs.Path +import org.apache.hadoop.conf.Configuration + +object FileStreamWithCheckpoint { + + def main(args: Array[String]) { + + if (args.size != 3) { + println("FileStreamWithCheckpoint ") + println("FileStreamWithCheckpoint restart ") + System.exit(-1) + } + + val directory = new Path(args(1)) + val checkpointFile = args(2) + + val ssc: StreamingContext = { + + if (args(0) == "restart") { + + // Recreated streaming context from specified checkpoint file + new StreamingContext(checkpointFile) + + } else { + + // Create directory if it does not exist + val fs = directory.getFileSystem(new Configuration()) + if (!fs.exists(directory)) fs.mkdirs(directory) + + // Create new streaming context + val ssc_ = new StreamingContext(args(0), "FileStreamWithCheckpoint") + ssc_.setBatchDuration(Seconds(1)) + ssc_.setCheckpointDetails(checkpointFile, Seconds(1)) + + // Setup the streaming computation + val inputStream = ssc_.createTextFileStream(directory.toString) + val words = inputStream.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() + + ssc_ + } + } + + // Start the stream computation + startFileWritingThread(directory.toString) + ssc.start() + } + + def startFileWritingThread(directory: String) { + + val fs = new Path(directory).getFileSystem(new Configuration()) + + val fileWritingThread = new Thread() { + override def run() { + val r = new scala.util.Random() + val text = "This is a sample text file with a random number " + while(true) { + val number = r.nextInt() + val file = new Path(directory, number.toString) + val fos = fs.create(file) + fos.writeChars(text + number) + fos.close() + println("Created text file " + file) + Thread.sleep(1000) + } + } + } + fileWritingThread.start() + } + +} diff --git a/streaming/src/main/scala/spark/streaming/examples/Grep2.scala b/streaming/src/main/scala/spark/streaming/examples/Grep2.scala index 7237142c7c..b1faa65c17 100644 --- a/streaming/src/main/scala/spark/streaming/examples/Grep2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/Grep2.scala @@ -50,7 +50,7 @@ object Grep2 { println("Data count: " + data.count()) val sentences = new ConstantInputDStream(ssc, data) - ssc.inputStreams += sentences + ssc.registerInputStream(sentences) sentences.filter(_.contains("Culpepper")).count().foreachRDD(r => println("Grep count: " + r.collect().mkString)) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index c22949d7b9..8390f4af94 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -93,7 +93,7 @@ object WordCount2 { println("Data count: " + data.count()) val sentences = new ConstantInputDStream(ssc, data) - ssc.inputStreams += sentences + ssc.registerInputStream(sentences) import WordCount2_ExtraFunctions._ diff --git a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala index 3658cb302d..fc7567322b 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala @@ -50,7 +50,7 @@ object WordMax2 { println("Data count: " + data.count()) val sentences = new ConstantInputDStream(ssc, data) - ssc.inputStreams += sentences + ssc.registerInputStream(sentences) import WordCount2_ExtraFunctions._ diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala index 5da9fa6ecc..7f19b26a79 100644 --- a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -17,12 +17,23 @@ class RecurringTimer(val clock: Clock, val period: Long, val callback: (Long) => } var nextTime = 0L - - def start(): Long = { - nextTime = (math.floor(clock.currentTime / period) + 1).toLong * period - thread.start() + + def start(startTime: Long): Long = { + nextTime = startTime + thread.start() nextTime } + + def start(): Long = { + val startTime = math.ceil(clock.currentTime / period).toLong * period + start(startTime) + } + + def restart(originalStartTime: Long): Long = { + val gap = clock.currentTime - originalStartTime + val newStartTime = math.ceil(gap / period).toLong * period + originalStartTime + start(newStartTime) + } def stop() { thread.interrupt() From b08708e6fcb59a09b36c5b8e3e7a4aa98f7ad050 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 13 Oct 2012 21:02:24 -0700 Subject: [PATCH 075/226] Fixed bugs in the streaming testsuites. --- .../spark/streaming/DStreamBasicSuite.scala | 18 ++++++++++++------ .../spark/streaming/DStreamSuiteBase.scala | 7 ++++++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index 9b953d9dae..965b58c03f 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -39,22 +39,28 @@ class DStreamBasicSuite extends DStreamSuiteBase { test("stateful operations") { val inputData = Seq( + Seq("a"), + Seq("a", "b"), Seq("a", "b", "c"), - Seq("a", "b", "c"), - Seq("a", "b", "c") + Seq("a", "b"), + Seq("a"), + Seq() ) val outputData = Seq( - Seq(("a", 1), ("b", 1), ("c", 1)), - Seq(("a", 2), ("b", 2), ("c", 2)), - Seq(("a", 3), ("b", 3), ("c", 3)) + Seq(("a", 1)), + Seq(("a", 2), ("b", 1)), + Seq(("a", 3), ("b", 2), ("c", 1)), + Seq(("a", 4), ("b", 3), ("c", 1)), + Seq(("a", 5), ("b", 3), ("c", 1)), + Seq(("a", 5), ("b", 3), ("c", 1)) ) val updateStateOp = (s: DStream[String]) => { val updateFunc = (values: Seq[Int], state: RichInt) => { var newState = 0 - if (values != null) newState += values.reduce(_ + _) + if (values != null && values.size > 0) newState += values.reduce(_ + _) if (state != null) newState += state.self //println("values = " + values + ", state = " + state + ", " + " new state = " + newState) new RichInt(newState) diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index 1c4ea14b1d..59fe36baf0 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -45,14 +45,19 @@ trait DStreamSuiteBase extends FunSuite with Logging { val clock = ssc.scheduler.clock if (clock.isInstanceOf[ManualClock]) { - clock.asInstanceOf[ManualClock].addToTime(input.size * batchDuration.milliseconds) + clock.asInstanceOf[ManualClock].addToTime((input.size - 1) * batchDuration.milliseconds) } val startTime = System.currentTimeMillis() while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + println("output.size = " + output.size + ", expectedOutput.size = " + expectedOutput.size) Thread.sleep(500) } + println("output.size = " + output.size) + println("output") + output.foreach(x => println("[" + x.mkString(",") + "]")) + assert(output.size === expectedOutput.size) for (i <- 0 until output.size) { if (useSet) { From 3f1aae5c71a220564adc9039dbc0e4b22aea315d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 14 Oct 2012 21:39:30 -0700 Subject: [PATCH 076/226] Refactored DStreamSuiteBase to create CheckpointSuite- testsuite for testing checkpointing under different operations. --- .../scala/spark/streaming/Checkpoint.scala | 94 ++++----- .../streaming/ConstantInputDStream.scala | 4 +- .../main/scala/spark/streaming/DStream.scala | 4 +- .../scala/spark/streaming/DStreamGraph.scala | 13 +- .../scala/spark/streaming/Scheduler.scala | 20 +- .../spark/streaming/StreamingContext.scala | 18 +- .../scala/spark/streaming/util/Clock.scala | 11 +- .../spark/streaming/util/RecurringTimer.scala | 4 +- .../spark/streaming/CheckpointSuite.scala | 48 +++++ .../spark/streaming/DStreamBasicSuite.scala | 5 +- .../spark/streaming/DStreamSuiteBase.scala | 185 +++++++++++++----- .../spark/streaming/DStreamWindowSuite.scala | 39 ++-- 12 files changed, 300 insertions(+), 145 deletions(-) create mode 100644 streaming/src/test/scala/spark/streaming/CheckpointSuite.scala diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 3bd8fd5a27..b38911b646 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -5,11 +5,12 @@ import spark.Utils import org.apache.hadoop.fs.{FileUtil, Path} import org.apache.hadoop.conf.Configuration -import java.io.{ObjectInputStream, ObjectOutputStream} +import java.io.{InputStream, ObjectStreamClass, ObjectInputStream, ObjectOutputStream} -class Checkpoint(@transient ssc: StreamingContext) extends Serializable { + +class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) extends Serializable { val master = ssc.sc.master - val frameworkName = ssc.sc.frameworkName + val framework = ssc.sc.frameworkName val sparkHome = ssc.sc.sparkHome val jars = ssc.sc.jars val graph = ssc.graph @@ -17,7 +18,16 @@ class Checkpoint(@transient ssc: StreamingContext) extends Serializable { val checkpointFile = ssc.checkpointFile val checkpointInterval = ssc.checkpointInterval - def saveToFile(file: String) { + validate() + + def validate() { + assert(master != null, "Checkpoint.master is null") + assert(framework != null, "Checkpoint.framework is null") + assert(graph != null, "Checkpoint.graph is null") + assert(batchDuration != null, "Checkpoint.batchDuration is null") + } + + def saveToFile(file: String = checkpointFile) { val path = new Path(file) val conf = new Configuration() val fs = path.getFileSystem(conf) @@ -34,8 +44,7 @@ class Checkpoint(@transient ssc: StreamingContext) extends Serializable { } def toBytes(): Array[Byte] = { - val cp = new Checkpoint(ssc) - val bytes = Utils.serialize(cp) + val bytes = Utils.serialize(this) bytes } } @@ -43,50 +52,41 @@ class Checkpoint(@transient ssc: StreamingContext) extends Serializable { object Checkpoint { def loadFromFile(file: String): Checkpoint = { - val path = new Path(file) - val conf = new Configuration() - val fs = path.getFileSystem(conf) - if (!fs.exists(path)) { - throw new Exception("Could not read checkpoint file " + path) + try { + val path = new Path(file) + val conf = new Configuration() + val fs = path.getFileSystem(conf) + if (!fs.exists(path)) { + throw new Exception("Checkpoint file '" + file + "' does not exist") + } + val fis = fs.open(path) + val ois = new ObjectInputStreamWithLoader(fis, Thread.currentThread().getContextClassLoader) + val cp = ois.readObject.asInstanceOf[Checkpoint] + ois.close() + fs.close() + cp.validate() + cp + } catch { + case e: Exception => + e.printStackTrace() + throw new Exception("Could not load checkpoint file '" + file + "'", e) } - val fis = fs.open(path) - val ois = new ObjectInputStream(fis) - val cp = ois.readObject.asInstanceOf[Checkpoint] - ois.close() - fs.close() - cp } def fromBytes(bytes: Array[Byte]): Checkpoint = { - Utils.deserialize[Checkpoint](bytes) + val cp = Utils.deserialize[Checkpoint](bytes) + cp.validate() + cp + } +} + +class ObjectInputStreamWithLoader(inputStream_ : InputStream, loader: ClassLoader) extends ObjectInputStream(inputStream_) { + override def resolveClass(desc: ObjectStreamClass): Class[_] = { + try { + return loader.loadClass(desc.getName()) + } catch { + case e: Exception => + } + return super.resolveClass(desc) } - - /*def toBytes(ssc: StreamingContext): Array[Byte] = { - val cp = new Checkpoint(ssc) - val bytes = Utils.serialize(cp) - bytes - } - - - def saveContext(ssc: StreamingContext, file: String) { - val cp = new Checkpoint(ssc) - val path = new Path(file) - val conf = new Configuration() - val fs = path.getFileSystem(conf) - if (fs.exists(path)) { - val bkPath = new Path(path.getParent, path.getName + ".bk") - FileUtil.copy(fs, path, fs, bkPath, true, true, conf) - println("Moved existing checkpoint file to " + bkPath) - } - val fos = fs.create(path) - val oos = new ObjectOutputStream(fos) - oos.writeObject(cp) - oos.close() - fs.close() - } - - def loadContext(file: String): StreamingContext = { - loadCheckpoint(file).createNewContext() - } - */ } diff --git a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala index 9bc204dd09..80150708fd 100644 --- a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala @@ -5,8 +5,8 @@ import spark.RDD /** * An input stream that always returns the same RDD on each timestep. Useful for testing. */ -class ConstantInputDStream[T: ClassManifest](ssc: StreamingContext, rdd: RDD[T]) - extends InputDStream[T](ssc) { +class ConstantInputDStream[T: ClassManifest](ssc_ : StreamingContext, rdd: RDD[T]) + extends InputDStream[T](ssc_) { override def start() {} diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 78e4c57647..0a43a042d0 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -180,7 +180,7 @@ extends Serializable with Logging { @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { - println(this.getClass().getSimpleName + ".writeObject used") + logDebug(this.getClass().getSimpleName + ".writeObject used") if (graph != null) { graph.synchronized { if (graph.checkpointInProgress) { @@ -202,7 +202,7 @@ extends Serializable with Logging { @throws(classOf[IOException]) private def readObject(ois: ObjectInputStream) { - println(this.getClass().getSimpleName + ".readObject used") + logDebug(this.getClass().getSimpleName + ".readObject used") ois.defaultReadObject() } diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index 67859e0131..bcd365e932 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -2,8 +2,10 @@ package spark.streaming import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import collection.mutable.ArrayBuffer +import spark.Logging -final class DStreamGraph extends Serializable { +final class DStreamGraph extends Serializable with Logging { + initLogging() private val inputStreams = new ArrayBuffer[InputDStream[_]]() private val outputStreams = new ArrayBuffer[DStream[_]]() @@ -11,18 +13,15 @@ final class DStreamGraph extends Serializable { private[streaming] var zeroTime: Time = null private[streaming] var checkpointInProgress = false; - def started() = (zeroTime != null) - def start(time: Time) { this.synchronized { - if (started) { + if (zeroTime != null) { throw new Exception("DStream graph computation already started") } zeroTime = time outputStreams.foreach(_.initialize(zeroTime)) inputStreams.par.foreach(_.start()) } - } def stop() { @@ -60,21 +59,21 @@ final class DStreamGraph extends Serializable { @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { this.synchronized { + logDebug("DStreamGraph.writeObject used") checkpointInProgress = true oos.defaultWriteObject() checkpointInProgress = false } - println("DStreamGraph.writeObject used") } @throws(classOf[IOException]) private def readObject(ois: ObjectInputStream) { this.synchronized { + logDebug("DStreamGraph.readObject used") checkpointInProgress = true ois.defaultReadObject() checkpointInProgress = false } - println("DStreamGraph.readObject used") } } diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index d62b7e7140..1e1425a88a 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -1,7 +1,6 @@ package spark.streaming -import spark.streaming.util.RecurringTimer -import spark.streaming.util.Clock +import util.{ManualClock, RecurringTimer, Clock} import spark.SparkEnv import spark.Logging @@ -23,13 +22,24 @@ extends Logging { val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] val timer = new RecurringTimer(clock, ssc.batchDuration, generateRDDs(_)) - def start() { - if (graph.started) { + // If context was started from checkpoint, then restart timer such that + // this timer's triggers occur at the same time as the original timer. + // Otherwise just start the timer from scratch, and initialize graph based + // on this first trigger time of the timer. + if (ssc.isCheckpointPresent) { + // If manual clock is being used for testing, then + // set manual clock to the last checkpointed time + if (clock.isInstanceOf[ManualClock]) { + val lastTime = ssc.getInitialCheckpoint.checkpointTime.milliseconds + clock.asInstanceOf[ManualClock].setTime(lastTime) + } timer.restart(graph.zeroTime.milliseconds) + logInfo("Scheduler's timer restarted") } else { val zeroTime = Time(timer.start()) graph.start(zeroTime) + logInfo("Scheduler's timer started") } logInfo("Scheduler started") } @@ -47,7 +57,7 @@ extends Logging { graph.generateRDDs(time).foreach(submitJob) logInfo("Generated RDDs for time " + time) if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { - ssc.checkpoint() + ssc.doCheckpoint(time) } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 1499ef4ea2..e072f15c93 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -46,7 +46,7 @@ class StreamingContext ( val sc: SparkContext = { if (isCheckpointPresent) { - new SparkContext(cp_.master, cp_.frameworkName, cp_.sparkHome, cp_.jars) + new SparkContext(cp_.master, cp_.framework, cp_.sparkHome, cp_.jars) } else { sc_ } @@ -85,9 +85,13 @@ class StreamingContext ( checkpointFile = file checkpointInterval = interval } - + + private[streaming] def getInitialCheckpoint(): Checkpoint = { + if (isCheckpointPresent) cp_ else null + } + private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() - + def createNetworkTextStream(hostname: String, port: Int): DStream[String] = { createNetworkObjectStream[String](hostname, port, ObjectInputReceiver.bytesToLines) } @@ -156,10 +160,10 @@ class StreamingContext ( inputStream } - def createQueueStream[T: ClassManifest](iterator: Array[RDD[T]]): DStream[T] = { + def createQueueStream[T: ClassManifest](array: Array[RDD[T]]): DStream[T] = { val queue = new Queue[RDD[T]] val inputStream = createQueueStream(queue, true, null) - queue ++= iterator + queue ++= array inputStream } @@ -233,8 +237,8 @@ class StreamingContext ( logInfo("StreamingContext stopped") } - def checkpoint() { - new Checkpoint(this).saveToFile(checkpointFile) + def doCheckpoint(currentTime: Time) { + new Checkpoint(this, currentTime).saveToFile(checkpointFile) } } diff --git a/streaming/src/main/scala/spark/streaming/util/Clock.scala b/streaming/src/main/scala/spark/streaming/util/Clock.scala index 72e786e0c3..ed087e4ea8 100644 --- a/streaming/src/main/scala/spark/streaming/util/Clock.scala +++ b/streaming/src/main/scala/spark/streaming/util/Clock.scala @@ -56,10 +56,17 @@ class SystemClock() extends Clock { class ManualClock() extends Clock { - var time = 0L - + var time = 0L + def currentTime() = time + def setTime(timeToSet: Long) = { + this.synchronized { + time = timeToSet + this.notifyAll() + } + } + def addToTime(timeToAdd: Long) = { this.synchronized { time += timeToAdd diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala index 7f19b26a79..dc55fd902b 100644 --- a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -25,13 +25,13 @@ class RecurringTimer(val clock: Clock, val period: Long, val callback: (Long) => } def start(): Long = { - val startTime = math.ceil(clock.currentTime / period).toLong * period + val startTime = (math.floor(clock.currentTime.toDouble / period) + 1).toLong * period start(startTime) } def restart(originalStartTime: Long): Long = { val gap = clock.currentTime - originalStartTime - val newStartTime = math.ceil(gap / period).toLong * period + originalStartTime + val newStartTime = (math.floor(gap.toDouble / period).toLong + 1) * period + originalStartTime start(newStartTime) } diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala new file mode 100644 index 0000000000..11cecf9822 --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -0,0 +1,48 @@ +package spark.streaming + +import spark.streaming.StreamingContext._ + +class CheckpointSuite extends DStreamSuiteBase { + + override def framework() = "CheckpointSuite" + + override def checkpointFile() = "checkpoint" + + def testCheckpointedOperation[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V], + expectedOutput: Seq[Seq[V]], + useSet: Boolean = false + ) { + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + + // Current code assumes that: + // number of inputs = number of outputs = number of batches to be run + + // Do half the computation (half the number of batches), create checkpoint file and quit + val totalNumBatches = input.size + val initialNumBatches = input.size / 2 + val nextNumBatches = totalNumBatches - initialNumBatches + val initialNumExpectedOutputs = initialNumBatches + + val ssc = setupStreams[U, V](input, operation) + val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) + verifyOutput[V](output, expectedOutput.take(initialNumBatches), useSet) + Thread.sleep(1000) + + // Restart and complete the computation from checkpoint file + val sscNew = new StreamingContext(checkpointFile) + sscNew.setCheckpointDetails(null, null) + val outputNew = runStreams[V](sscNew, nextNumBatches, expectedOutput.size) + verifyOutput[V](outputNew, expectedOutput, useSet) + } + + test("simple per-batch operation") { + testCheckpointedOperation( + Seq( Seq("a", "a", "b"), Seq("", ""), Seq(), Seq("a", "a", "b"), Seq("", ""), Seq() ), + (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), + Seq( Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq(), Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq() ), + true + ) + } +} \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index 965b58c03f..f8ca7febe7 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -22,9 +22,9 @@ class DStreamBasicSuite extends DStreamSuiteBase { test("shuffle-based operations") { // reduceByKey testOperation( - Seq(Seq("a", "a", "b"), Seq("", ""), Seq()), + Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), - Seq(Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq()), + Seq( Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq() ), true ) @@ -62,7 +62,6 @@ class DStreamBasicSuite extends DStreamSuiteBase { var newState = 0 if (values != null && values.size > 0) newState += values.reduce(_ + _) if (state != null) newState += state.self - //println("values = " + values + ", state = " + state + ", " + " new state = " + newState) new RichInt(newState) } s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index 59fe36baf0..cb95c36782 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -4,70 +4,157 @@ import spark.{RDD, Logging} import util.ManualClock import collection.mutable.ArrayBuffer import org.scalatest.FunSuite -import scala.collection.mutable.Queue +import collection.mutable.SynchronizedBuffer +class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, val input: Seq[Seq[T]]) + extends InputDStream[T](ssc_) { + var currentIndex = 0 + + def start() {} + + def stop() {} + + def compute(validTime: Time): Option[RDD[T]] = { + logInfo("Computing RDD for time " + validTime) + val rdd = if (currentIndex < input.size) { + ssc.sc.makeRDD(input(currentIndex), 2) + } else { + ssc.sc.makeRDD(Seq[T](), 2) + } + logInfo("Created RDD " + rdd.id) + currentIndex += 1 + Some(rdd) + } +} + +class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBuffer[Seq[T]]) + extends PerRDDForEachDStream[T](parent, (rdd: RDD[T], t: Time) => { + val collected = rdd.collect() + output += collected + }) trait DStreamSuiteBase extends FunSuite with Logging { + def framework() = "DStreamSuiteBase" + + def master() = "local[2]" + def batchDuration() = Seconds(1) + def checkpointFile() = null.asInstanceOf[String] + + def checkpointInterval() = batchDuration + def maxWaitTimeMillis() = 10000 + def setupStreams[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V] + ): StreamingContext = { + + // Create StreamingContext + val ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + if (checkpointFile != null) { + ssc.setCheckpointDetails(checkpointFile, checkpointInterval()) + } + + // Setup the stream computation + val inputStream = new TestInputStream(ssc, input) + ssc.registerInputStream(inputStream) + val operatedStream = operation(inputStream) + val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[V]] with SynchronizedBuffer[Seq[V]]) + ssc.registerOutputStream(outputStream) + ssc + } + + def runStreams[V: ClassManifest]( + ssc: StreamingContext, + numBatches: Int, + numExpectedOutput: Int + ): Seq[Seq[V]] = { + logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput) + + // Get the output buffer + val outputStream = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStream[V]] + val output = outputStream.output + + try { + // Start computation + ssc.start() + + // Advance manual clock + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + logInfo("Manual clock before advancing = " + clock.time) + clock.addToTime(numBatches * batchDuration.milliseconds) + logInfo("Manual clock after advancing = " + clock.time) + + // Wait until expected number of output items have been generated + val startTime = System.currentTimeMillis() + while (output.size < numExpectedOutput && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + logInfo("output.size = " + output.size + ", numExpectedOutput = " + numExpectedOutput) + Thread.sleep(100) + } + val timeTaken = System.currentTimeMillis() - startTime + + assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") + assert(output.size === numExpectedOutput, "Unexpected number of outputs generated") + } catch { + case e: Exception => e.printStackTrace(); throw e; + } finally { + ssc.stop() + } + + output + } + + def verifyOutput[V: ClassManifest]( + output: Seq[Seq[V]], + expectedOutput: Seq[Seq[V]], + useSet: Boolean + ) { + logInfo("--------------------------------") + logInfo("output.size = " + output.size) + logInfo("output") + output.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("expected output.size = " + expectedOutput.size) + logInfo("expected output") + expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("--------------------------------") + + // Match the output with the expected output + assert(output.size === expectedOutput.size, "Number of outputs do not match") + for (i <- 0 until output.size) { + if (useSet) { + assert(output(i).toSet === expectedOutput(i).toSet) + } else { + assert(output(i).toList === expectedOutput(i).toList) + } + } + logInfo("Output verified successfully") + } + def testOperation[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], expectedOutput: Seq[Seq[V]], useSet: Boolean = false ) { + testOperation[U, V](input, operation, expectedOutput, -1, useSet) + } - val manualClock = true + def testOperation[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V], + expectedOutput: Seq[Seq[V]], + numBatches: Int, + useSet: Boolean + ) { + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - if (manualClock) { - System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - } - - val ssc = new StreamingContext("local", "test") - - try { - ssc.setBatchDuration(Milliseconds(batchDuration)) - - val inputQueue = new Queue[RDD[U]]() - inputQueue ++= input.map(ssc.sc.makeRDD(_, 2)) - val emptyRDD = ssc.sc.makeRDD(Seq[U](), 2) - - val inputStream = ssc.createQueueStream(inputQueue, true, emptyRDD) - val outputStream = operation(inputStream) - - val output = new ArrayBuffer[Seq[V]]() - outputStream.foreachRDD(rdd => output += rdd.collect()) - - ssc.start() - - val clock = ssc.scheduler.clock - if (clock.isInstanceOf[ManualClock]) { - clock.asInstanceOf[ManualClock].addToTime((input.size - 1) * batchDuration.milliseconds) - } - - val startTime = System.currentTimeMillis() - while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { - println("output.size = " + output.size + ", expectedOutput.size = " + expectedOutput.size) - Thread.sleep(500) - } - - println("output.size = " + output.size) - println("output") - output.foreach(x => println("[" + x.mkString(",") + "]")) - - assert(output.size === expectedOutput.size) - for (i <- 0 until output.size) { - if (useSet) { - assert(output(i).toSet === expectedOutput(i).toSet) - } else { - assert(output(i).toList === expectedOutput(i).toList) - } - } - } finally { - ssc.stop() - } + val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size + val ssc = setupStreams[U, V](input, operation) + val output = runStreams[V](ssc, numBatches_, expectedOutput.size) + verifyOutput[V](output, expectedOutput, useSet) } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala index 061cab2cbb..8dd18f491a 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala @@ -4,6 +4,10 @@ import spark.streaming.StreamingContext._ class DStreamWindowSuite extends DStreamSuiteBase { + override def framework() = "DStreamWindowSuite" + + override def maxWaitTimeMillis() = 20000 + val largerSlideInput = Seq( Seq(("a", 1)), // 1st window from here Seq(("a", 2)), @@ -81,16 +85,15 @@ class DStreamWindowSuite extends DStreamSuiteBase { name: String, input: Seq[Seq[(String, Int)]], expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) + windowTime: Time = batchDuration * 2, + slideTime: Time = batchDuration ) { test("reduceByKeyAndWindow - " + name) { - testOperation( - input, - (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist(), - expectedOutput, - true - ) + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist() + } + testOperation(input, operation, expectedOutput, numBatches, true) } } @@ -98,16 +101,15 @@ class DStreamWindowSuite extends DStreamSuiteBase { name: String, input: Seq[Seq[(String, Int)]], expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) + windowTime: Time = batchDuration * 2, + slideTime: Time = batchDuration ) { test("reduceByKeyAndWindowInv - " + name) { - testOperation( - input, - (s: DStream[(String, Int)]) => s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist(), - expectedOutput, - true - ) + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist() + } + testOperation(input, operation, expectedOutput, numBatches, true) } } @@ -116,8 +118,8 @@ class DStreamWindowSuite extends DStreamSuiteBase { testReduceByKeyAndWindow( "basic reduction", - Seq(Seq(("a", 1), ("a", 3)) ), - Seq(Seq(("a", 4)) ) + Seq( Seq(("a", 1), ("a", 3)) ), + Seq( Seq(("a", 4)) ) ) testReduceByKeyAndWindow( @@ -126,7 +128,6 @@ class DStreamWindowSuite extends DStreamSuiteBase { Seq( Seq(("a", 1)), Seq(("a", 2)) ) ) - testReduceByKeyAndWindow( "new key added into window", Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)) ), From b760d6426a7fa2a6d115cefc786aa766b9419bd6 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 15 Oct 2012 12:26:44 -0700 Subject: [PATCH 077/226] Minor modifications. --- .../scala/spark/streaming/Checkpoint.scala | 6 ++++++ .../spark/streaming/StreamingContext.scala | 19 +++++++------------ .../spark/streaming/DStreamSuiteBase.scala | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index b38911b646..f7936bdc5f 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -25,6 +25,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext assert(framework != null, "Checkpoint.framework is null") assert(graph != null, "Checkpoint.graph is null") assert(batchDuration != null, "Checkpoint.batchDuration is null") + assert(checkpointTime != null, "Checkpoint.checkpointTime is null") } def saveToFile(file: String = checkpointFile) { @@ -60,6 +61,11 @@ object Checkpoint { throw new Exception("Checkpoint file '" + file + "' does not exist") } val fis = fs.open(path) + // ObjectInputStream uses the last defined user-defined class loader in the stack + // to find classes, which maybe the wrong class loader. Hence, a inherited version + // of ObjectInputStream is used to explicitly use the current thread's default class + // loader to find and load classes. This is a well know Java issue and has popped up + // in other places (e.g., http://jira.codehaus.org/browse/GROOVY-1627) val ois = new ObjectInputStreamWithLoader(fis, Thread.currentThread().getContextClassLoader) val cp = ois.readObject.asInstanceOf[Checkpoint] ois.close() diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index e072f15c93..62d21b83d9 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -184,25 +184,20 @@ class StreamingContext ( } /** - * This function verify whether the stream computation is eligible to be executed. + * This function validate whether the stream computation is eligible to be executed. */ - private def verify() { - if (batchDuration == null) { - throw new Exception("Batch duration has not been set") - } - if (batchDuration < Milliseconds(100)) { - logWarning("Batch duration of " + batchDuration + " is very low") - } - if (graph.getOutputStreams().size == 0) { - throw new Exception("No output streams registered, so nothing to execute") - } + private def validate() { + assert(batchDuration != null, "Batch duration has not been set") + assert(batchDuration > Milliseconds(100), "Batch duration of " + batchDuration + " is very low") + assert(graph != null, "Graph is null") + assert(graph.getOutputStreams().size > 0, "No output streams registered, so nothing to execute") } /** * This function starts the execution of the streams. */ def start() { - verify() + validate() val networkInputStreams = graph.getInputStreams().filter(s => s match { case n: NetworkInputDStream[_] => true case _ => false diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index cb95c36782..91ffc0c098 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -61,9 +61,9 @@ trait DStreamSuiteBase extends FunSuite with Logging { // Setup the stream computation val inputStream = new TestInputStream(ssc, input) - ssc.registerInputStream(inputStream) val operatedStream = operation(inputStream) val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[V]] with SynchronizedBuffer[Seq[V]]) + ssc.registerInputStream(inputStream) ssc.registerOutputStream(outputStream) ssc } From 6d5eb4b40ccad150c967fee8557a4e5d5664b4bd Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 19 Oct 2012 12:11:44 -0700 Subject: [PATCH 078/226] Added functionality to forget RDDs from DStreams. --- .../scala/spark/streaming/Checkpoint.scala | 2 - .../main/scala/spark/streaming/DStream.scala | 90 +++++++++++++------ .../scala/spark/streaming/DStreamGraph.scala | 40 +++++++-- .../streaming/PairDStreamFunctions.scala | 9 +- .../streaming/ReducedWindowedDStream.scala | 14 ++- .../scala/spark/streaming/Scheduler.scala | 5 +- .../spark/streaming/StreamingContext.scala | 34 +++---- .../spark/streaming/WindowedDStream.scala | 6 +- .../spark/streaming/CheckpointSuite.scala | 9 +- .../spark/streaming/DStreamBasicSuite.scala | 87 +++++++++++++++--- .../spark/streaming/DStreamSuiteBase.scala | 7 +- 11 files changed, 224 insertions(+), 79 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index f7936bdc5f..23fd0f2434 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -14,7 +14,6 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext val sparkHome = ssc.sc.sparkHome val jars = ssc.sc.jars val graph = ssc.graph - val batchDuration = ssc.batchDuration val checkpointFile = ssc.checkpointFile val checkpointInterval = ssc.checkpointInterval @@ -24,7 +23,6 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext assert(master != null, "Checkpoint.master is null") assert(framework != null, "Checkpoint.framework is null") assert(graph != null, "Checkpoint.graph is null") - assert(batchDuration != null, "Checkpoint.batchDuration is null") assert(checkpointTime != null, "Checkpoint.checkpointTime is null") } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 0a43a042d0..645636b603 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -39,22 +39,30 @@ extends Serializable with Logging { * --------------------------------------- */ - // Variable to store the RDDs generated earlier in time - protected val generatedRDDs = new HashMap[Time, RDD[T]] () + // RDDs generated, marked as protected[streaming] so that testsuites can access it + protected[streaming] val generatedRDDs = new HashMap[Time, RDD[T]] () - // Variable to be set to the first time seen by the DStream (effective time zero) + // Time zero for the DStream protected var zeroTime: Time = null - // Variable to specify storage level + // Time after which RDDs will be forgotten + protected var forgetTime: Time = null + + // Storage level of the RDDs in the stream protected var storageLevel: StorageLevel = StorageLevel.NONE // Checkpoint level and checkpoint interval protected var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint protected var checkpointInterval: Time = null - // Reference to whole DStream graph, so that checkpointing process can lock it + // Reference to whole DStream graph protected var graph: DStreamGraph = null + def isInitialized = (zeroTime != null) + + // Time gap for forgetting old RDDs (i.e. removing them from generatedRDDs) + def parentForgetTime = forgetTime + // Change this RDD's storage level def persist( storageLevel: StorageLevel, @@ -79,8 +87,6 @@ extends Serializable with Logging { // Turn on the default caching level for this RDD def cache(): DStream[T] = persist() - def isInitialized() = (zeroTime != null) - /** * This method initializes the DStream by setting the "zero" time, based on which * the validity of future times is calculated. This method also recursively initializes @@ -91,31 +97,43 @@ extends Serializable with Logging { throw new Exception("ZeroTime is already initialized, cannot initialize it again") } zeroTime = time - logInfo(this + " initialized") dependencies.foreach(_.initialize(zeroTime)) + logInfo("Initialized " + this) } protected[streaming] def setContext(s: StreamingContext) { if (ssc != null && ssc != s) { - throw new Exception("Context is already set, cannot set it again") + throw new Exception("Context is already set in " + this + ", cannot set it again") } ssc = s - logInfo("Set context for " + this.getClass.getSimpleName) + logInfo("Set context for " + this) dependencies.foreach(_.setContext(ssc)) } protected[streaming] def setGraph(g: DStreamGraph) { if (graph != null && graph != g) { - throw new Exception("Graph is already set, cannot set it again") + throw new Exception("Graph is already set in " + this + ", cannot set it again") } graph = g dependencies.foreach(_.setGraph(graph)) } + protected[streaming] def setForgetTime(time: Time = slideTime) { + if (time == null) { + throw new Exception("Time gap for forgetting RDDs cannot be set to null for " + this) + } else if (forgetTime != null && time < forgetTime) { + throw new Exception("Time gap for forgetting RDDs cannot be reduced from " + forgetTime + + " to " + time + " for " + this) + } + forgetTime = time + dependencies.foreach(_.setForgetTime(parentForgetTime)) + logInfo("Time gap for forgetting RDDs set to " + forgetTime + " for " + this) + } + /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ protected def isTimeValid(time: Time): Boolean = { if (!isInitialized) { - throw new Exception (this.toString + " has not been initialized") + throw new Exception (this + " has not been initialized") } else if (time < zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { false } else { @@ -178,6 +196,21 @@ extends Serializable with Logging { } } + def forgetOldRDDs(time: Time) { + val keys = generatedRDDs.keys + var numForgotten = 0 + + keys.foreach(t => { + if (t < (time - forgetTime)) { + generatedRDDs.remove(t) + numForgotten += 1 + //logInfo("Forgot RDD of time " + t + " from " + this) + } + }) + logInfo("Forgot " + numForgotten + " RDDs from " + this) + dependencies.foreach(_.forgetOldRDDs(time)) + } + @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { logDebug(this.getClass().getSimpleName + ".writeObject used") @@ -257,7 +290,7 @@ extends Serializable with Logging { new TransformedDStream(this, ssc.sc.clean(transformFunc)) } - def toBlockingQueue = { + def toBlockingQueue() = { val queue = new ArrayBlockingQueue[RDD[T]](10000) this.foreachRDD(rdd => { queue.add(rdd) @@ -265,7 +298,7 @@ extends Serializable with Logging { queue } - def print() = { + def print() { def foreachFunc = (rdd: RDD[T], time: Time) => { val first11 = rdd.take(11) println ("-------------------------------------------") @@ -277,33 +310,38 @@ extends Serializable with Logging { } val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) ssc.registerOutputStream(newStream) - newStream } - def window(windowTime: Time, slideTime: Time) = new WindowedDStream(this, windowTime, slideTime) + def window(windowTime: Time): DStream[T] = window(windowTime, this.slideTime) - def batch(batchTime: Time) = window(batchTime, batchTime) + def window(windowTime: Time, slideTime: Time): DStream[T] = { + new WindowedDStream(this, windowTime, slideTime) + } - def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Time, slideTime: Time) = + def tumble(batchTime: Time): DStream[T] = window(batchTime, batchTime) + + def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Time, slideTime: Time): DStream[T] = { this.window(windowTime, slideTime).reduce(reduceFunc) + } def reduceByWindow( - reduceFunc: (T, T) => T, - invReduceFunc: (T, T) => T, - windowTime: Time, - slideTime: Time) = { + reduceFunc: (T, T) => T, + invReduceFunc: (T, T) => T, + windowTime: Time, + slideTime: Time + ): DStream[T] = { this.map(x => (1, x)) .reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, 1) .map(_._2) } - def countByWindow(windowTime: Time, slideTime: Time) = { + def countByWindow(windowTime: Time, slideTime: Time): DStream[Int] = { def add(v1: Int, v2: Int) = (v1 + v2) def subtract(v1: Int, v2: Int) = (v1 - v2) this.map(_ => 1).reduceByWindow(add _, subtract _, windowTime, slideTime) } - def union(that: DStream[T]) = new UnifiedDStream(Array(this, that)) + def union(that: DStream[T]): DStream[T] = new UnifiedDStream[T](Array(this, that)) def slice(interval: Interval): Seq[RDD[T]] = { slice(interval.beginTime, interval.endTime) @@ -336,8 +374,8 @@ abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContex override def slideTime = { if (ssc == null) throw new Exception("ssc is null") - if (ssc.batchDuration == null) throw new Exception("ssc.batchDuration is null") - ssc.batchDuration + if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null") + ssc.graph.batchDuration } def start() diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index bcd365e932..964c8a26a0 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -11,7 +11,8 @@ final class DStreamGraph extends Serializable with Logging { private val outputStreams = new ArrayBuffer[DStream[_]]() private[streaming] var zeroTime: Time = null - private[streaming] var checkpointInProgress = false; + private[streaming] var batchDuration: Time = null + private[streaming] var checkpointInProgress = false def start(time: Time) { this.synchronized { @@ -20,6 +21,7 @@ final class DStreamGraph extends Serializable with Logging { } zeroTime = time outputStreams.foreach(_.initialize(zeroTime)) + outputStreams.foreach(_.setForgetTime()) inputStreams.par.foreach(_.start()) } } @@ -36,14 +38,28 @@ final class DStreamGraph extends Serializable with Logging { } } + def setBatchDuration(duration: Time) { + this.synchronized { + if (batchDuration != null) { + throw new Exception("Batch duration already set as " + batchDuration + + ". cannot set it again.") + } + } + batchDuration = duration + } + def addInputStream(inputStream: InputDStream[_]) { - inputStream.setGraph(this) - inputStreams += inputStream + this.synchronized { + inputStream.setGraph(this) + inputStreams += inputStream + } } def addOutputStream(outputStream: DStream[_]) { - outputStream.setGraph(this) - outputStreams += outputStream + this.synchronized { + outputStream.setGraph(this) + outputStreams += outputStream + } } def getInputStreams() = inputStreams.toArray @@ -56,6 +72,20 @@ final class DStreamGraph extends Serializable with Logging { } } + def forgetOldRDDs(time: Time) { + this.synchronized { + outputStreams.foreach(_.forgetOldRDDs(time)) + } + } + + def validate() { + this.synchronized { + assert(batchDuration != null, "Batch duration has not been set") + assert(batchDuration > Milliseconds(100), "Batch duration of " + batchDuration + " is very low") + assert(getOutputStreams().size > 0, "No output streams registered, so nothing to execute") + } + } + @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { this.synchronized { diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 3fd0a16bf0..0bd0321928 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -75,6 +75,13 @@ extends Serializable { stream.window(windowTime, slideTime).groupByKey(partitioner) } + def reduceByKeyAndWindow( + reduceFunc: (V, V) => V, + windowTime: Time + ): DStream[(K, V)] = { + reduceByKeyAndWindow(reduceFunc, windowTime, stream.slideTime, defaultPartitioner()) + } + def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowTime: Time, @@ -106,7 +113,7 @@ extends Serializable { // so that new elements introduced in the window can be "added" using // reduceFunc to the previous window's result and old elements can be // "subtracted using invReduceFunc. - + def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index e161b5ba92..f3e95c9e2b 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -31,12 +31,15 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( @transient val reducedStream = parent.reduceByKey(reduceFunc, partitioner) - override def dependencies = List(reducedStream) - def windowTime: Time = _windowTime + override def dependencies = List(reducedStream) + override def slideTime: Time = _slideTime + //TODO: This is wrong. This should depend on the checkpointInterval + override def parentForgetTime: Time = forgetTime + windowTime + override def persist( storageLevel: StorageLevel, checkpointLevel: StorageLevel, @@ -46,6 +49,13 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( this } + protected[streaming] override def setForgetTime(time: Time) { + if (forgetTime == null || forgetTime < time) { + forgetTime = time + dependencies.foreach(_.setForgetTime(forgetTime + windowTime)) + } + } + override def compute(validTime: Time): Option[RDD[(K, V)]] = { val reduceF = reduceFunc val invReduceF = invReduceFunc diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 1e1425a88a..99e30b6110 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -20,7 +20,7 @@ extends Logging { val jobManager = new JobManager(ssc, concurrentJobs) val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] - val timer = new RecurringTimer(clock, ssc.batchDuration, generateRDDs(_)) + val timer = new RecurringTimer(clock, ssc.graph.batchDuration, generateRDDs(_)) def start() { // If context was started from checkpoint, then restart timer such that @@ -53,11 +53,12 @@ extends Logging { def generateRDDs(time: Time) { SparkEnv.set(ssc.env) logInfo("\n-----------------------------------------------------\n") - logInfo("Generating RDDs for time " + time) graph.generateRDDs(time).foreach(submitJob) logInfo("Generated RDDs for time " + time) + graph.forgetOldRDDs(time) if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { ssc.doCheckpoint(time) + logInfo("Checkpointed at time " + time) } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 62d21b83d9..b5f4571798 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -6,16 +6,11 @@ import spark.SparkEnv import spark.SparkContext import spark.storage.StorageLevel -import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.Queue import java.io.InputStream -import java.io.IOException -import java.net.InetSocketAddress import java.util.concurrent.atomic.AtomicInteger -import org.apache.hadoop.fs.Path -import org.apache.hadoop.conf.Configuration import org.apache.hadoop.io.LongWritable import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} @@ -65,20 +60,15 @@ class StreamingContext ( } val nextNetworkInputStreamId = new AtomicInteger(0) - - var batchDuration: Time = if (isCheckpointPresent) cp_.batchDuration else null - var checkpointFile: String = if (isCheckpointPresent) cp_.checkpointFile else null - var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null var networkInputTracker: NetworkInputTracker = null - var receiverJobThread: Thread = null - var scheduler: Scheduler = null + + private[streaming] var checkpointFile: String = if (isCheckpointPresent) cp_.checkpointFile else null + private[streaming] var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null + private[streaming] var receiverJobThread: Thread = null + private[streaming] var scheduler: Scheduler = null def setBatchDuration(duration: Time) { - if (batchDuration != null) { - throw new Exception("Batch duration alread set as " + batchDuration + - ". cannot set it again.") - } - batchDuration = duration + graph.setBatchDuration(duration) } def setCheckpointDetails(file: String, interval: Time) { @@ -183,21 +173,17 @@ class StreamingContext ( graph.addOutputStream(outputStream) } - /** - * This function validate whether the stream computation is eligible to be executed. - */ - private def validate() { - assert(batchDuration != null, "Batch duration has not been set") - assert(batchDuration > Milliseconds(100), "Batch duration of " + batchDuration + " is very low") + def validate() { assert(graph != null, "Graph is null") - assert(graph.getOutputStreams().size > 0, "No output streams registered, so nothing to execute") + graph.validate() } - + /** * This function starts the execution of the streams. */ def start() { validate() + val networkInputStreams = graph.getInputStreams().filter(s => s match { case n: NetworkInputDStream[_] => true case _ => false diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 93c1291691..2984f88284 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -18,12 +18,14 @@ class WindowedDStream[T: ClassManifest]( throw new Exception("The slide duration of WindowedDStream (" + _slideTime + ") " + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") - override def dependencies = List(parent) - def windowTime: Time = _windowTime + override def dependencies = List(parent) + override def slideTime: Time = _slideTime + override def parentForgetTime: Time = forgetTime + windowTime + override def compute(validTime: Time): Option[RDD[T]] = { val currentWindow = Interval(validTime - windowTime + parent.slideTime, validTime) Some(new UnionRDD(ssc.sc, parent.slice(currentWindow))) diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 11cecf9822..061b331a16 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -1,6 +1,7 @@ package spark.streaming import spark.streaming.StreamingContext._ +import java.io.File class CheckpointSuite extends DStreamSuiteBase { @@ -14,17 +15,16 @@ class CheckpointSuite extends DStreamSuiteBase { expectedOutput: Seq[Seq[V]], useSet: Boolean = false ) { - System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") // Current code assumes that: // number of inputs = number of outputs = number of batches to be run - // Do half the computation (half the number of batches), create checkpoint file and quit val totalNumBatches = input.size val initialNumBatches = input.size / 2 val nextNumBatches = totalNumBatches - initialNumBatches val initialNumExpectedOutputs = initialNumBatches + // Do half the computation (half the number of batches), create checkpoint file and quit val ssc = setupStreams[U, V](input, operation) val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) verifyOutput[V](output, expectedOutput.take(initialNumBatches), useSet) @@ -35,6 +35,11 @@ class CheckpointSuite extends DStreamSuiteBase { sscNew.setCheckpointDetails(null, null) val outputNew = runStreams[V](sscNew, nextNumBatches, expectedOutput.size) verifyOutput[V](outputNew, expectedOutput, useSet) + + new File(checkpointFile).delete() + new File(checkpointFile + ".bk").delete() + new File("." + checkpointFile + ".crc").delete() + new File("." + checkpointFile + ".bk.crc").delete() } test("simple per-batch operation") { diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index f8ca7febe7..5dd8b675b1 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -2,16 +2,21 @@ package spark.streaming import spark.streaming.StreamingContext._ import scala.runtime.RichInt +import util.ManualClock class DStreamBasicSuite extends DStreamSuiteBase { - test("map-like operations") { + test("map") { + val input = Seq(1 to 4, 5 to 8, 9 to 12) + testOperation( + input, + (r: DStream[Int]) => r.map(_.toString), + input.map(_.map(_.toString)) + ) + } + + test("flatmap") { val input = Seq(1 to 4, 5 to 8, 9 to 12) - - // map - testOperation(input, (r: DStream[Int]) => r.map(_.toString), input.map(_.map(_.toString))) - - // flatMap testOperation( input, (r: DStream[Int]) => r.flatMap(x => Seq(x, x * 2)), @@ -19,16 +24,16 @@ class DStreamBasicSuite extends DStreamSuiteBase { ) } - test("shuffle-based operations") { - // reduceByKey + test("reduceByKey") { testOperation( Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), Seq( Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq() ), true ) + } - // reduce + test("reduce") { testOperation( Seq(1 to 4, 5 to 8, 9 to 12), (s: DStream[Int]) => s.reduce(_ + _), @@ -57,7 +62,7 @@ class DStreamBasicSuite extends DStreamSuiteBase { Seq(("a", 5), ("b", 3), ("c", 1)) ) - val updateStateOp = (s: DStream[String]) => { + val updateStateOperation = (s: DStream[String]) => { val updateFunc = (values: Seq[Int], state: RichInt) => { var newState = 0 if (values != null && values.size > 0) newState += values.reduce(_ + _) @@ -67,6 +72,66 @@ class DStreamBasicSuite extends DStreamSuiteBase { s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) } - testOperation(inputData, updateStateOp, outputData, true) + testOperation(inputData, updateStateOperation, outputData, true) + } + + test("forgetting of RDDs") { + assert(batchDuration === Seconds(1), "Batch duration has changed from 1 second") + + val input = Seq(1 to 4, 5 to 8, 9 to 12, 13 to 16, 17 to 20, 21 to 24, 25 to 28, 29 to 32) + + assert(input.size % 4 === 0, "Number of inputs should be a multiple of 4") + + def operation(s: DStream[Int]): DStream[(Int, Int)] = { + s.map(x => (x % 10, 1)) + .window(Seconds(2), Seconds(1)) + .reduceByKeyAndWindow(_ + _, _ - _, Seconds(4), Seconds(1)) + } + + val ssc = setupStreams(input, operation _) + runStreams[(Int, Int)](ssc, input.size, input.size) + + val reducedWindowedStream = ssc.graph.getOutputStreams().head.dependencies.head + .asInstanceOf[ReducedWindowedDStream[Int, Int]] + val windowedStream = reducedWindowedStream.dependencies.head.dependencies.head + .asInstanceOf[WindowedDStream[(Int, Int)]] + val mappedStream = windowedStream.dependencies.head + + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + val finalTime = Seconds(7) + //assert(clock.time === finalTime.milliseconds) + + // ReducedWindowedStream should remember the last RDD created + assert(reducedWindowedStream.generatedRDDs.contains(finalTime)) + + // ReducedWindowedStream should have forgotten the previous to last RDD created + assert(!reducedWindowedStream.generatedRDDs.contains(finalTime - reducedWindowedStream.slideTime)) + + // WindowedStream should remember the last RDD created + assert(windowedStream.generatedRDDs.contains(finalTime)) + + // WindowedStream should still remember the previous to last RDD created + // as the last RDD of ReducedWindowedStream requires that RDD + assert(windowedStream.generatedRDDs.contains(finalTime - windowedStream.slideTime)) + + // WindowedStream should have forgotten this RDD as the last RDD of + // ReducedWindowedStream DOES NOT require this RDD + assert(!windowedStream.generatedRDDs.contains(finalTime - windowedStream.slideTime - reducedWindowedStream.windowTime)) + + // MappedStream should remember the last RDD created + assert(mappedStream.generatedRDDs.contains(finalTime)) + + // MappedStream should still remember the previous to last RDD created + // as the last RDD of WindowedStream requires that RDD + assert(mappedStream.generatedRDDs.contains(finalTime - mappedStream.slideTime)) + + // MappedStream should still remember this RDD as the last RDD of + // ReducedWindowedStream requires that RDD (even though the last RDD of + // WindowedStream does not need it) + assert(mappedStream.generatedRDDs.contains(finalTime - windowedStream.windowTime)) + + // MappedStream should have forgotten this RDD as the last RDD of + // ReducedWindowedStream DOES NOT require this RDD + assert(!mappedStream.generatedRDDs.contains(finalTime - mappedStream.slideTime - windowedStream.windowTime - reducedWindowedStream.windowTime)) } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index 91ffc0c098..6e5a7a58bb 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -35,6 +35,8 @@ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBu trait DStreamSuiteBase extends FunSuite with Logging { + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + def framework() = "DStreamSuiteBase" def master() = "local[2]" @@ -73,6 +75,9 @@ trait DStreamSuiteBase extends FunSuite with Logging { numBatches: Int, numExpectedOutput: Int ): Seq[Seq[V]] = { + + assert(numBatches > 0, "Number of batches to run stream computation is zero") + assert(numExpectedOutput > 0, "Number of expected outputs after " + numBatches + " is zero") logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput) // Get the output buffer @@ -150,8 +155,6 @@ trait DStreamSuiteBase extends FunSuite with Logging { numBatches: Int, useSet: Boolean ) { - System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size val ssc = setupStreams[U, V](input, operation) val output = runStreams[V](ssc, numBatches_, expectedOutput.size) From c4a2b6f636040bacd3d4b443e65cc33dafd0aa7e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 21 Oct 2012 10:41:25 -0700 Subject: [PATCH 079/226] Fixed some bugs in tests for forgetting RDDs, and made sure that use of manual clock leads to a zeroTime of 0 in the DStreams (more intuitive). --- .../scala/spark/streaming/Checkpoint.scala | 2 +- .../main/scala/spark/streaming/DStream.scala | 40 +++++----- .../scala/spark/streaming/DStreamGraph.scala | 17 ++++- .../streaming/ReducedWindowedDStream.scala | 10 +-- .../scala/spark/streaming/Scheduler.scala | 4 +- .../spark/streaming/StreamingContext.scala | 4 + .../spark/streaming/WindowedDStream.scala | 2 +- .../spark/streaming/DStreamBasicSuite.scala | 73 ++++++++----------- .../spark/streaming/DStreamSuiteBase.scala | 2 + .../spark/streaming/DStreamWindowSuite.scala | 25 +++---- 10 files changed, 95 insertions(+), 84 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 23fd0f2434..ebff9bdb51 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -33,7 +33,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext if (fs.exists(path)) { val bkPath = new Path(path.getParent, path.getName + ".bk") FileUtil.copy(fs, path, fs, bkPath, true, true, conf) - println("Moved existing checkpoint file to " + bkPath) + //logInfo("Moved existing checkpoint file to " + bkPath) } val fos = fs.create(path) val oos = new ObjectOutputStream(fos) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 645636b603..f6cd135e59 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -30,7 +30,7 @@ extends Serializable with Logging { // List of parent DStreams on which this DStream depends on def dependencies: List[DStream[_]] - // Key method that computes RDD for a valid time + // Key method that computes RDD for a valid time def compute (validTime: Time): Option[RDD[T]] /** @@ -45,8 +45,8 @@ extends Serializable with Logging { // Time zero for the DStream protected var zeroTime: Time = null - // Time after which RDDs will be forgotten - protected var forgetTime: Time = null + // Duration for which the DStream will remember each RDD created + protected var rememberDuration: Time = null // Storage level of the RDDs in the stream protected var storageLevel: StorageLevel = StorageLevel.NONE @@ -60,8 +60,8 @@ extends Serializable with Logging { def isInitialized = (zeroTime != null) - // Time gap for forgetting old RDDs (i.e. removing them from generatedRDDs) - def parentForgetTime = forgetTime + // Duration for which the DStream requires its parent DStream to remember each RDD created + def parentRememberDuration = rememberDuration // Change this RDD's storage level def persist( @@ -118,23 +118,24 @@ extends Serializable with Logging { dependencies.foreach(_.setGraph(graph)) } - protected[streaming] def setForgetTime(time: Time = slideTime) { - if (time == null) { - throw new Exception("Time gap for forgetting RDDs cannot be set to null for " + this) - } else if (forgetTime != null && time < forgetTime) { - throw new Exception("Time gap for forgetting RDDs cannot be reduced from " + forgetTime - + " to " + time + " for " + this) + protected[streaming] def setRememberDuration(duration: Time = slideTime) { + if (duration == null) { + throw new Exception("Duration for remembering RDDs cannot be set to null for " + this) + } else if (rememberDuration != null && duration < rememberDuration) { + logWarning("Duration for remembering RDDs cannot be reduced from " + rememberDuration + + " to " + duration + " for " + this) + } else { + rememberDuration = duration + dependencies.foreach(_.setRememberDuration(parentRememberDuration)) + logInfo("Duration for remembering RDDs set to " + rememberDuration + " for " + this) } - forgetTime = time - dependencies.foreach(_.setForgetTime(parentForgetTime)) - logInfo("Time gap for forgetting RDDs set to " + forgetTime + " for " + this) } /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ protected def isTimeValid(time: Time): Boolean = { if (!isInitialized) { throw new Exception (this + " has not been initialized") - } else if (time < zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { + } else if (time <= zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { false } else { true @@ -143,7 +144,7 @@ extends Serializable with Logging { /** * This method either retrieves a precomputed RDD of this DStream, - * or computes the RDD (if the time is valid) + * or computes the RDD (if the time is valid) */ def getOrCompute(time: Time): Option[RDD[T]] = { // If this DStream was not initialized (i.e., zeroTime not set), then do it @@ -154,7 +155,7 @@ extends Serializable with Logging { // probably all RDDs in this DStream will be reused and hence should be cached case Some(oldRDD) => Some(oldRDD) - // if RDD was not generated, and if the time is valid + // if RDD was not generated, and if the time is valid // (based on sliding time of this DStream), then generate the RDD case None => { if (isTimeValid(time)) { @@ -199,9 +200,8 @@ extends Serializable with Logging { def forgetOldRDDs(time: Time) { val keys = generatedRDDs.keys var numForgotten = 0 - keys.foreach(t => { - if (t < (time - forgetTime)) { + if (t <= (time - rememberDuration)) { generatedRDDs.remove(t) numForgotten += 1 //logInfo("Forgot RDD of time " + t + " from " + this) @@ -530,7 +530,7 @@ class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) val rdds = new ArrayBuffer[RDD[T]]() parents.map(_.getOrCompute(validTime)).foreach(_ match { case Some(rdd) => rdds += rdd - case None => throw new Exception("Could not generate RDD from a parent for unifying at time " + validTime) + case None => throw new Exception("Could not generate RDD from a parent for unifying at time " + validTime) }) if (rdds.size > 0) { Some(new UnionRDD(ssc.sc, rdds)) diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index 964c8a26a0..ac44d7a2a6 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -12,6 +12,7 @@ final class DStreamGraph extends Serializable with Logging { private[streaming] var zeroTime: Time = null private[streaming] var batchDuration: Time = null + private[streaming] var rememberDuration: Time = null private[streaming] var checkpointInProgress = false def start(time: Time) { @@ -21,7 +22,11 @@ final class DStreamGraph extends Serializable with Logging { } zeroTime = time outputStreams.foreach(_.initialize(zeroTime)) - outputStreams.foreach(_.setForgetTime()) + outputStreams.foreach(_.setRememberDuration()) // first set the rememberDuration to default values + if (rememberDuration != null) { + // if custom rememberDuration has been provided, set the rememberDuration + outputStreams.foreach(_.setRememberDuration(rememberDuration)) + } inputStreams.par.foreach(_.start()) } } @@ -48,6 +53,16 @@ final class DStreamGraph extends Serializable with Logging { batchDuration = duration } + def setRememberDuration(duration: Time) { + this.synchronized { + if (rememberDuration != null) { + throw new Exception("Batch duration already set as " + batchDuration + + ". cannot set it again.") + } + } + rememberDuration = duration + } + def addInputStream(inputStream: InputDStream[_]) { this.synchronized { inputStream.setGraph(this) diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index f3e95c9e2b..fcf57aced7 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -38,7 +38,7 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( override def slideTime: Time = _slideTime //TODO: This is wrong. This should depend on the checkpointInterval - override def parentForgetTime: Time = forgetTime + windowTime + override def parentRememberDuration: Time = rememberDuration + windowTime override def persist( storageLevel: StorageLevel, @@ -49,10 +49,10 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( this } - protected[streaming] override def setForgetTime(time: Time) { - if (forgetTime == null || forgetTime < time) { - forgetTime = time - dependencies.foreach(_.setForgetTime(forgetTime + windowTime)) + protected[streaming] override def setRememberDuration(time: Time) { + if (rememberDuration == null || rememberDuration < time) { + rememberDuration = time + dependencies.foreach(_.setRememberDuration(rememberDuration + windowTime)) } } diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 99e30b6110..7d52e2eddf 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -37,8 +37,8 @@ extends Logging { timer.restart(graph.zeroTime.milliseconds) logInfo("Scheduler's timer restarted") } else { - val zeroTime = Time(timer.start()) - graph.start(zeroTime) + val firstTime = Time(timer.start()) + graph.start(firstTime - ssc.graph.batchDuration) logInfo("Scheduler's timer started") } logInfo("Scheduler started") diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index b5f4571798..7022056f7c 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -71,6 +71,10 @@ class StreamingContext ( graph.setBatchDuration(duration) } + def setRememberDuration(duration: Time) { + graph.setRememberDuration(duration) + } + def setCheckpointDetails(file: String, interval: Time) { checkpointFile = file checkpointInterval = interval diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index 2984f88284..b90e22351b 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -24,7 +24,7 @@ class WindowedDStream[T: ClassManifest]( override def slideTime: Time = _slideTime - override def parentForgetTime: Time = forgetTime + windowTime + override def parentRememberDuration: Time = rememberDuration + windowTime override def compute(validTime: Time): Option[RDD[T]] = { val currentWindow = Interval(validTime - windowTime + parent.slideTime, validTime) diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index 5dd8b675b1..28bbb152ca 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -41,7 +41,7 @@ class DStreamBasicSuite extends DStreamSuiteBase { ) } - test("stateful operations") { + test("updateStateByKey") { val inputData = Seq( Seq("a"), @@ -75,63 +75,54 @@ class DStreamBasicSuite extends DStreamSuiteBase { testOperation(inputData, updateStateOperation, outputData, true) } - test("forgetting of RDDs") { + test("forgetting of RDDs - map and window operations") { assert(batchDuration === Seconds(1), "Batch duration has changed from 1 second") - val input = Seq(1 to 4, 5 to 8, 9 to 12, 13 to 16, 17 to 20, 21 to 24, 25 to 28, 29 to 32) + val input = (0 until 10).map(x => Seq(x, x + 1)).toSeq + val rememberDuration = Seconds(3) - assert(input.size % 4 === 0, "Number of inputs should be a multiple of 4") + assert(input.size === 10, "Number of inputs have changed") def operation(s: DStream[Int]): DStream[(Int, Int)] = { s.map(x => (x % 10, 1)) .window(Seconds(2), Seconds(1)) - .reduceByKeyAndWindow(_ + _, _ - _, Seconds(4), Seconds(1)) + .window(Seconds(4), Seconds(2)) } val ssc = setupStreams(input, operation _) - runStreams[(Int, Int)](ssc, input.size, input.size) + ssc.setRememberDuration(rememberDuration) + runStreams[(Int, Int)](ssc, input.size, input.size / 2) - val reducedWindowedStream = ssc.graph.getOutputStreams().head.dependencies.head - .asInstanceOf[ReducedWindowedDStream[Int, Int]] - val windowedStream = reducedWindowedStream.dependencies.head.dependencies.head - .asInstanceOf[WindowedDStream[(Int, Int)]] - val mappedStream = windowedStream.dependencies.head + val windowedStream2 = ssc.graph.getOutputStreams().head.dependencies.head + val windowedStream1 = windowedStream2.dependencies.head + val mappedStream = windowedStream1.dependencies.head val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] - val finalTime = Seconds(7) - //assert(clock.time === finalTime.milliseconds) + assert(clock.time === Seconds(10).milliseconds) - // ReducedWindowedStream should remember the last RDD created - assert(reducedWindowedStream.generatedRDDs.contains(finalTime)) + // IDEALLY + // WindowedStream2 should remember till 7 seconds: 10, 8, + // WindowedStream1 should remember till 4 seconds: 10, 9, 8, 7, 6, 5 + // MappedStream should remember till 7 seconds: 10, 9, 8, 7, 6, 5, 4, 3, - // ReducedWindowedStream should have forgotten the previous to last RDD created - assert(!reducedWindowedStream.generatedRDDs.contains(finalTime - reducedWindowedStream.slideTime)) + // IN THIS TEST + // WindowedStream2 should remember till 7 seconds: 10, 8, + // WindowedStream1 should remember till 4 seconds: 10, 9, 8, 7, 6, 5, 4 + // MappedStream should remember till 7 seconds: 10, 9, 8, 7, 6, 5, 4, 3, 2 - // WindowedStream should remember the last RDD created - assert(windowedStream.generatedRDDs.contains(finalTime)) + // WindowedStream2 + assert(windowedStream2.generatedRDDs.contains(Seconds(10))) + assert(windowedStream2.generatedRDDs.contains(Seconds(8))) + assert(!windowedStream2.generatedRDDs.contains(Seconds(6))) - // WindowedStream should still remember the previous to last RDD created - // as the last RDD of ReducedWindowedStream requires that RDD - assert(windowedStream.generatedRDDs.contains(finalTime - windowedStream.slideTime)) + // WindowedStream1 + assert(windowedStream1.generatedRDDs.contains(Seconds(10))) + assert(windowedStream1.generatedRDDs.contains(Seconds(4))) + assert(!windowedStream1.generatedRDDs.contains(Seconds(3))) - // WindowedStream should have forgotten this RDD as the last RDD of - // ReducedWindowedStream DOES NOT require this RDD - assert(!windowedStream.generatedRDDs.contains(finalTime - windowedStream.slideTime - reducedWindowedStream.windowTime)) - - // MappedStream should remember the last RDD created - assert(mappedStream.generatedRDDs.contains(finalTime)) - - // MappedStream should still remember the previous to last RDD created - // as the last RDD of WindowedStream requires that RDD - assert(mappedStream.generatedRDDs.contains(finalTime - mappedStream.slideTime)) - - // MappedStream should still remember this RDD as the last RDD of - // ReducedWindowedStream requires that RDD (even though the last RDD of - // WindowedStream does not need it) - assert(mappedStream.generatedRDDs.contains(finalTime - windowedStream.windowTime)) - - // MappedStream should have forgotten this RDD as the last RDD of - // ReducedWindowedStream DOES NOT require this RDD - assert(!mappedStream.generatedRDDs.contains(finalTime - mappedStream.slideTime - windowedStream.windowTime - reducedWindowedStream.windowTime)) + // MappedStream + assert(mappedStream.generatedRDDs.contains(Seconds(10))) + assert(mappedStream.generatedRDDs.contains(Seconds(2))) + assert(!mappedStream.generatedRDDs.contains(Seconds(1))) } } diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index 6e5a7a58bb..59fa5a6f22 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -104,6 +104,8 @@ trait DStreamSuiteBase extends FunSuite with Logging { assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") assert(output.size === numExpectedOutput, "Unexpected number of outputs generated") + + Thread.sleep(500) // Give some time for the forgetting old RDDs to complete } catch { case e: Exception => e.printStackTrace(); throw e; } finally { diff --git a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala index 8dd18f491a..cfcab6298d 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala @@ -9,25 +9,24 @@ class DStreamWindowSuite extends DStreamSuiteBase { override def maxWaitTimeMillis() = 20000 val largerSlideInput = Seq( - Seq(("a", 1)), // 1st window from here - Seq(("a", 2)), - Seq(("a", 3)), // 2nd window from here - Seq(("a", 4)), - Seq(("a", 5)), // 3rd window from here - Seq(("a", 6)), - Seq(), // 4th window from here + Seq(("a", 1)), + Seq(("a", 2)), // 1st window from here + Seq(("a", 3)), + Seq(("a", 4)), // 2nd window from here + Seq(("a", 5)), + Seq(("a", 6)), // 3rd window from here Seq(), - Seq() // 5th window from here + Seq() // 4th window from here ) val largerSlideOutput = Seq( - Seq(("a", 1)), - Seq(("a", 6)), - Seq(("a", 14)), - Seq(("a", 15)), - Seq(("a", 6)) + Seq(("a", 3)), + Seq(("a", 10)), + Seq(("a", 18)), + Seq(("a", 11)) ) + val bigInput = Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 1)), From d85c66636ba3b5d32f7e3b47c5b68e1064f8f588 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 21 Oct 2012 17:40:08 -0700 Subject: [PATCH 080/226] Added MapValueDStream, FlatMappedValuesDStream and CoGroupedDStream, and therefore DStream operations mapValue, flatMapValues, cogroup, and join. Also, added tests for DStream operations filter, glom, mapPartitions, groupByKey, mapValues, flatMapValues, cogroup, and join. --- .../spark/streaming/CoGroupedDStream.scala | 37 ++++++++ .../main/scala/spark/streaming/DStream.scala | 63 +++++++++++--- .../streaming/PairDStreamFunctions.scala | 75 ++++++++++++---- .../spark/streaming/examples/CountRaw.scala | 2 +- .../spark/streaming/examples/GrepRaw.scala | 2 +- .../streaming/examples/TopKWordCountRaw.scala | 2 +- .../streaming/examples/WordCountRaw.scala | 2 +- .../spark/streaming/DStreamBasicSuite.scala | 86 +++++++++++++++++++ .../spark/streaming/DStreamSuiteBase.scala | 59 ++++++++++++- 9 files changed, 293 insertions(+), 35 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala diff --git a/streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala b/streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala new file mode 100644 index 0000000000..5522e2ee21 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala @@ -0,0 +1,37 @@ +package spark.streaming + +import spark.{CoGroupedRDD, RDD, Partitioner} + +class CoGroupedDStream[K : ClassManifest]( + parents: Seq[DStream[(_, _)]], + partitioner: Partitioner + ) extends DStream[(K, Seq[Seq[_]])](parents.head.ssc) { + + if (parents.length == 0) { + throw new IllegalArgumentException("Empty array of parents") + } + + if (parents.map(_.ssc).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different StreamingContexts") + } + + if (parents.map(_.slideTime).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different slide times") + } + + override def dependencies = parents.toList + + override def slideTime = parents.head.slideTime + + override def compute(validTime: Time): Option[RDD[(K, Seq[Seq[_]])]] = { + val part = partitioner + val rdds = parents.flatMap(_.getOrCompute(validTime)) + if (rdds.size > 0) { + val q = new CoGroupedRDD[K](rdds, part) + Some(q) + } else { + None + } + } + +} diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index f6cd135e59..38bb7c8b94 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -244,27 +244,27 @@ extends Serializable with Logging { * DStream operations * -------------- */ - def map[U: ClassManifest](mapFunc: T => U) = { + def map[U: ClassManifest](mapFunc: T => U): DStream[U] = { new MappedDStream(this, ssc.sc.clean(mapFunc)) } - def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]) = { + def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]): DStream[U] = { new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) } - def filter(filterFunc: T => Boolean) = new FilteredDStream(this, filterFunc) + def filter(filterFunc: T => Boolean): DStream[T] = new FilteredDStream(this, filterFunc) - def glom() = new GlommedDStream(this) + def glom(): DStream[Array[T]] = new GlommedDStream(this) - def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]) = { + def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]): DStream[U] = { new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc)) } - def reduce(reduceFunc: (T, T) => T) = this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) + def reduce(reduceFunc: (T, T) => T): DStream[T] = this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) - def count() = this.map(_ => 1).reduce(_ + _) + def count(): DStream[Int] = this.map(_ => 1).reduce(_ + _) - def collect() = this.map(x => (1, x)).groupByKey(1).map(_._2) + def collect(): DStream[Seq[T]] = this.map(x => (null, x)).groupByKey(1).map(_._2) def foreach(foreachFunc: T => Unit) { val newStream = new PerElementForEachDStream(this, ssc.sc.clean(foreachFunc)) @@ -341,7 +341,7 @@ extends Serializable with Logging { this.map(_ => 1).reduceByWindow(add _, subtract _, windowTime, slideTime) } - def union(that: DStream[T]): DStream[T] = new UnifiedDStream[T](Array(this, that)) + def union(that: DStream[T]): DStream[T] = new UnionDStream[T](Array(this, that)) def slice(interval: Interval): Seq[RDD[T]] = { slice(interval.beginTime, interval.endTime) @@ -507,8 +507,47 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( * TODO */ -class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) - extends DStream[T](parents(0).ssc) { +class MapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( + parent: DStream[(K, V)], + mapValueFunc: V => U + ) extends DStream[(K, U)](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K, U)]] = { + parent.getOrCompute(validTime).map(_.mapValues[U](mapValueFunc)) + } +} + + +/** + * TODO + */ + +class FlatMapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( + parent: DStream[(K, V)], + flatMapValueFunc: V => TraversableOnce[U] + ) extends DStream[(K, U)](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K, U)]] = { + parent.getOrCompute(validTime).map(_.flatMapValues[U](flatMapValueFunc)) + } +} + + + +/** + * TODO + */ + +class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) + extends DStream[T](parents.head.ssc) { if (parents.length == 0) { throw new IllegalArgumentException("Empty array of parents") @@ -524,7 +563,7 @@ class UnifiedDStream[T: ClassManifest](parents: Array[DStream[T]]) override def dependencies = parents.toList - override def slideTime: Time = parents(0).slideTime + override def slideTime: Time = parents.head.slideTime override def compute(validTime: Time): Option[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 0bd0321928..5de57eb2fd 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -1,17 +1,16 @@ package spark.streaming import scala.collection.mutable.ArrayBuffer -import spark.Partitioner -import spark.HashPartitioner +import spark.{Manifests, RDD, Partitioner, HashPartitioner} import spark.streaming.StreamingContext._ import javax.annotation.Nullable -class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) +class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](self: DStream[(K,V)]) extends Serializable { - def ssc = stream.ssc + def ssc = self.ssc - def defaultPartitioner(numPartitions: Int = stream.ssc.sc.defaultParallelism) = { + def defaultPartitioner(numPartitions: Int = self.ssc.sc.defaultParallelism) = { new HashPartitioner(numPartitions) } @@ -28,10 +27,10 @@ extends Serializable { } def groupByKey(partitioner: Partitioner): DStream[(K, Seq[V])] = { - def createCombiner(v: V) = ArrayBuffer[V](v) - def mergeValue(c: ArrayBuffer[V], v: V) = (c += v) - def mergeCombiner(c1: ArrayBuffer[V], c2: ArrayBuffer[V]) = (c1 ++ c2) - combineByKey(createCombiner _, mergeValue _, mergeCombiner _, partitioner).asInstanceOf[DStream[(K, Seq[V])]] + val createCombiner = (v: V) => ArrayBuffer[V](v) + val mergeValue = (c: ArrayBuffer[V], v: V) => (c += v) + val mergeCombiner = (c1: ArrayBuffer[V], c2: ArrayBuffer[V]) => (c1 ++ c2) + combineByKey(createCombiner, mergeValue, mergeCombiner, partitioner).asInstanceOf[DStream[(K, Seq[V])]] } def reduceByKey(reduceFunc: (V, V) => V): DStream[(K, V)] = { @@ -52,7 +51,7 @@ extends Serializable { mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, partitioner: Partitioner) : ShuffledDStream[K, V, C] = { - new ShuffledDStream[K, V, C](stream, createCombiner, mergeValue, mergeCombiner, partitioner) + new ShuffledDStream[K, V, C](self, createCombiner, mergeValue, mergeCombiner, partitioner) } def groupByKeyAndWindow(windowTime: Time, slideTime: Time): DStream[(K, Seq[V])] = { @@ -72,14 +71,14 @@ extends Serializable { slideTime: Time, partitioner: Partitioner ): DStream[(K, Seq[V])] = { - stream.window(windowTime, slideTime).groupByKey(partitioner) + self.window(windowTime, slideTime).groupByKey(partitioner) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowTime: Time ): DStream[(K, V)] = { - reduceByKeyAndWindow(reduceFunc, windowTime, stream.slideTime, defaultPartitioner()) + reduceByKeyAndWindow(reduceFunc, windowTime, self.slideTime, defaultPartitioner()) } def reduceByKeyAndWindow( @@ -105,7 +104,7 @@ extends Serializable { slideTime: Time, partitioner: Partitioner ): DStream[(K, V)] = { - stream.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), partitioner) + self.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), partitioner) } // This method is the efficient sliding window reduce operation, @@ -148,7 +147,7 @@ extends Serializable { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) val cleanedInvReduceFunc = ssc.sc.clean(invReduceFunc) new ReducedWindowedDStream[K, V]( - stream, cleanedReduceFunc, cleanedInvReduceFunc, windowTime, slideTime, partitioner) + self, cleanedReduceFunc, cleanedInvReduceFunc, windowTime, slideTime, partitioner) } // TODO: @@ -184,7 +183,53 @@ extends Serializable { partitioner: Partitioner, rememberPartitioner: Boolean ): DStream[(K, S)] = { - new StateDStream(stream, ssc.sc.clean(updateFunc), partitioner, rememberPartitioner) + new StateDStream(self, ssc.sc.clean(updateFunc), partitioner, rememberPartitioner) + } + + + def mapValues[U: ClassManifest](mapValuesFunc: V => U): DStream[(K, U)] = { + new MapValuesDStream[K, V, U](self, mapValuesFunc) + } + + def flatMapValues[U: ClassManifest]( + flatMapValuesFunc: V => TraversableOnce[U] + ): DStream[(K, U)] = { + new FlatMapValuesDStream[K, V, U](self, flatMapValuesFunc) + } + + def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { + cogroup(other, defaultPartitioner()) + } + + def cogroup[W: ClassManifest]( + other: DStream[(K, W)], + partitioner: Partitioner + ): DStream[(K, (Seq[V], Seq[W]))] = { + + val cgd = new CoGroupedDStream[K]( + Seq(self.asInstanceOf[DStream[(_, _)]], other.asInstanceOf[DStream[(_, _)]]), + partitioner + ) + val pdfs = new PairDStreamFunctions[K, Seq[Seq[_]]](cgd)( + classManifest[K], + Manifests.seqSeqManifest + ) + pdfs.mapValues { + case Seq(vs, ws) => + (vs.asInstanceOf[Seq[V]], ws.asInstanceOf[Seq[W]]) + } + } + + def join[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (V, W))] = { + join[W](other, defaultPartitioner()) + } + + def join[W: ClassManifest](other: DStream[(K, W)], partitioner: Partitioner): DStream[(K, (V, W))] = { + this.cogroup(other, partitioner) + .flatMapValues{ + case (vs, ws) => + for (v <- vs.iterator; w <- ws.iterator) yield (v, w) + } } } diff --git a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala index c78c1e9660..ed571d22e3 100644 --- a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala @@ -25,7 +25,7 @@ object CountRaw { val rawStreams = (1 to numStreams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnifiedDStream(rawStreams) + val union = new UnionDStream(rawStreams) union.map(_.length + 2).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) ssc.start() } diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index cc52da7bd4..6af1c36891 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -25,7 +25,7 @@ object GrepRaw { val rawStreams = (1 to numStreams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnifiedDStream(rawStreams) + val union = new UnionDStream(rawStreams) union.filter(_.contains("Culpepper")).count().foreachRDD(r => println("Grep count: " + r.collect().mkString)) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 3ba07d0448..af0a3bf98a 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -37,7 +37,7 @@ object TopKWordCountRaw { val rawStreams = (1 to streams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnifiedDStream(rawStreams) + val union = new UnionDStream(rawStreams) val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index 9702003805..98bafec529 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -37,7 +37,7 @@ object WordCountRaw { val rawStreams = (1 to streams).map(_ => ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnifiedDStream(rawStreams) + val union = new UnionDStream(rawStreams) val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index 28bbb152ca..db95c2cfaa 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -24,6 +24,44 @@ class DStreamBasicSuite extends DStreamSuiteBase { ) } + test("filter") { + val input = Seq(1 to 4, 5 to 8, 9 to 12) + testOperation( + input, + (r: DStream[Int]) => r.filter(x => (x % 2 == 0)), + input.map(_.filter(x => (x % 2 == 0))) + ) + } + + test("glom") { + assert(numInputPartitions === 2, "Number of input partitions has been changed from 2") + val input = Seq(1 to 4, 5 to 8, 9 to 12) + val output = Seq( + Seq( Seq(1, 2), Seq(3, 4) ), + Seq( Seq(5, 6), Seq(7, 8) ), + Seq( Seq(9, 10), Seq(11, 12) ) + ) + val operation = (r: DStream[Int]) => r.glom().map(_.toSeq) + testOperation(input, operation, output) + } + + test("mapPartitions") { + assert(numInputPartitions === 2, "Number of input partitions has been changed from 2") + val input = Seq(1 to 4, 5 to 8, 9 to 12) + val output = Seq(Seq(3, 7), Seq(11, 15), Seq(19, 23)) + val operation = (r: DStream[Int]) => r.mapPartitions(x => Iterator(x.reduce(_ + _))) + testOperation(input, operation, output, true) + } + + test("groupByKey") { + testOperation( + Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), + (s: DStream[String]) => s.map(x => (x, 1)).groupByKey(), + Seq( Seq(("a", Seq(1, 1)), ("b", Seq(1))), Seq(("", Seq(1, 1))), Seq() ), + true + ) + } + test("reduceByKey") { testOperation( Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), @@ -41,6 +79,54 @@ class DStreamBasicSuite extends DStreamSuiteBase { ) } + test("mapValues") { + testOperation( + Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), + (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _).mapValues(_ + 10), + Seq( Seq(("a", 12), ("b", 11)), Seq(("", 12)), Seq() ), + true + ) + } + + test("flatMapValues") { + testOperation( + Seq( Seq("a", "a", "b"), Seq("", ""), Seq() ), + (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _).flatMapValues(x => Seq(x, x + 10)), + Seq( Seq(("a", 2), ("a", 12), ("b", 1), ("b", 11)), Seq(("", 2), ("", 12)), Seq() ), + true + ) + } + + test("cogroup") { + val inputData1 = Seq( Seq("a", "a", "b"), Seq("a", ""), Seq(""), Seq() ) + val inputData2 = Seq( Seq("a", "a", "b"), Seq("b", ""), Seq(), Seq() ) + val outputData = Seq( + Seq( ("a", (Seq(1, 1), Seq("x", "x"))), ("b", (Seq(1), Seq("x"))) ), + Seq( ("a", (Seq(1), Seq())), ("b", (Seq(), Seq("x"))), ("", (Seq(1), Seq("x"))) ), + Seq( ("", (Seq(1), Seq())) ), + Seq( ) + ) + val operation = (s1: DStream[String], s2: DStream[String]) => { + s1.map(x => (x,1)).cogroup(s2.map(x => (x, "x"))) + } + testOperation(inputData1, inputData2, operation, outputData, true) + } + + test("join") { + val inputData1 = Seq( Seq("a", "b"), Seq("a", ""), Seq(""), Seq() ) + val inputData2 = Seq( Seq("a", "b"), Seq("b", ""), Seq(), Seq("") ) + val outputData = Seq( + Seq( ("a", (1, "x")), ("b", (1, "x")) ), + Seq( ("", (1, "x")) ), + Seq( ), + Seq( ) + ) + val operation = (s1: DStream[String], s2: DStream[String]) => { + s1.map(x => (x,1)).join(s2.map(x => (x,"x"))) + } + testOperation(inputData1, inputData2, operation, outputData, true) + } + test("updateStateByKey") { val inputData = Seq( diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala index 59fa5a6f22..2a4b37c965 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala @@ -6,7 +6,7 @@ import collection.mutable.ArrayBuffer import org.scalatest.FunSuite import collection.mutable.SynchronizedBuffer -class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, val input: Seq[Seq[T]]) +class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[T]], numPartitions: Int) extends InputDStream[T](ssc_) { var currentIndex = 0 @@ -17,9 +17,9 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, val input: Seq[ def compute(validTime: Time): Option[RDD[T]] = { logInfo("Computing RDD for time " + validTime) val rdd = if (currentIndex < input.size) { - ssc.sc.makeRDD(input(currentIndex), 2) + ssc.sc.makeRDD(input(currentIndex), numPartitions) } else { - ssc.sc.makeRDD(Seq[T](), 2) + ssc.sc.makeRDD(Seq[T](), numPartitions) } logInfo("Created RDD " + rdd.id) currentIndex += 1 @@ -47,6 +47,8 @@ trait DStreamSuiteBase extends FunSuite with Logging { def checkpointInterval() = batchDuration + def numInputPartitions() = 2 + def maxWaitTimeMillis() = 10000 def setupStreams[U: ClassManifest, V: ClassManifest]( @@ -62,7 +64,7 @@ trait DStreamSuiteBase extends FunSuite with Logging { } // Setup the stream computation - val inputStream = new TestInputStream(ssc, input) + val inputStream = new TestInputStream(ssc, input, numInputPartitions) val operatedStream = operation(inputStream) val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[V]] with SynchronizedBuffer[Seq[V]]) ssc.registerInputStream(inputStream) @@ -70,6 +72,31 @@ trait DStreamSuiteBase extends FunSuite with Logging { ssc } + def setupStreams[U: ClassManifest, V: ClassManifest, W: ClassManifest]( + input1: Seq[Seq[U]], + input2: Seq[Seq[V]], + operation: (DStream[U], DStream[V]) => DStream[W] + ): StreamingContext = { + + // Create StreamingContext + val ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + if (checkpointFile != null) { + ssc.setCheckpointDetails(checkpointFile, checkpointInterval()) + } + + // Setup the stream computation + val inputStream1 = new TestInputStream(ssc, input1, numInputPartitions) + val inputStream2 = new TestInputStream(ssc, input2, numInputPartitions) + val operatedStream = operation(inputStream1, inputStream2) + val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[W]] with SynchronizedBuffer[Seq[W]]) + ssc.registerInputStream(inputStream1) + ssc.registerInputStream(inputStream2) + ssc.registerOutputStream(outputStream) + ssc + } + + def runStreams[V: ClassManifest]( ssc: StreamingContext, numBatches: Int, @@ -162,4 +189,28 @@ trait DStreamSuiteBase extends FunSuite with Logging { val output = runStreams[V](ssc, numBatches_, expectedOutput.size) verifyOutput[V](output, expectedOutput, useSet) } + + def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest]( + input1: Seq[Seq[U]], + input2: Seq[Seq[V]], + operation: (DStream[U], DStream[V]) => DStream[W], + expectedOutput: Seq[Seq[W]], + useSet: Boolean + ) { + testOperation[U, V, W](input1, input2, operation, expectedOutput, -1, useSet) + } + + def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest]( + input1: Seq[Seq[U]], + input2: Seq[Seq[V]], + operation: (DStream[U], DStream[V]) => DStream[W], + expectedOutput: Seq[Seq[W]], + numBatches: Int, + useSet: Boolean + ) { + val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size + val ssc = setupStreams[U, V, W](input1, input2, operation) + val output = runStreams[W](ssc, numBatches_, expectedOutput.size) + verifyOutput[W](output, expectedOutput, useSet) + } } From 2c87c853ba24f55c142e4864b14c62d0a82a82df Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 22 Oct 2012 15:31:19 -0700 Subject: [PATCH 081/226] Renamed examples --- .../examples/{ExampleTwo.scala => FileStream.scala} | 8 ++++---- .../examples/{ExampleOne.scala => QueueStream.scala} | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) rename streaming/src/main/scala/spark/streaming/examples/{ExampleTwo.scala => FileStream.scala} (88%) rename streaming/src/main/scala/spark/streaming/examples/{ExampleOne.scala => QueueStream.scala} (88%) diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala similarity index 88% rename from streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala rename to streaming/src/main/scala/spark/streaming/examples/FileStream.scala index ad563e2c75..301da56014 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleTwo.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala @@ -7,15 +7,15 @@ import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration -object ExampleTwo { +object FileStream { def main(args: Array[String]) { if (args.length < 2) { - System.err.println("Usage: ExampleOne ") + System.err.println("Usage: FileStream ") System.exit(1) } // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "ExampleTwo") + val ssc = new StreamingContext(args(0), "FileStream") ssc.setBatchDuration(Seconds(2)) // Create the new directory @@ -23,6 +23,7 @@ object ExampleTwo { val fs = directory.getFileSystem(new Configuration()) if (fs.exists(directory)) throw new Exception("This directory already exists") fs.mkdirs(directory) + fs.deleteOnExit(directory) // Create the FileInputDStream on the directory and use the // stream to count words in new files created @@ -41,7 +42,6 @@ object ExampleTwo { } Thread.sleep(5000) // Waiting for the file to be processed ssc.stop() - fs.delete(directory) System.exit(0) } } \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala similarity index 88% rename from streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala rename to streaming/src/main/scala/spark/streaming/examples/QueueStream.scala index 2ff8790e77..ae701bba6d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/ExampleOne.scala +++ b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala @@ -7,16 +7,16 @@ import spark.streaming.Seconds import scala.collection.mutable.SynchronizedQueue -object ExampleOne { +object QueueStream { def main(args: Array[String]) { if (args.length < 1) { - System.err.println("Usage: ExampleOne ") + System.err.println("Usage: QueueStream ") System.exit(1) } // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "ExampleOne") + val ssc = new StreamingContext(args(0), "QueueStream") ssc.setBatchDuration(Seconds(1)) // Create the queue through which RDDs can be pushed to From a6de5758f1a48e6c25b441440d8cd84546857326 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 23 Oct 2012 01:41:13 -0700 Subject: [PATCH 082/226] Modified API of NetworkInputDStreams and got ObjectInputDStream and RawInputDStream working. --- .../spark/streaming/FileInputDStream.scala | 18 -- .../spark/streaming/NetworkInputDStream.scala | 141 +++++++++- .../NetworkInputReceiverMessage.scala | 7 - .../spark/streaming/NetworkInputTracker.scala | 84 +++--- .../spark/streaming/ObjectInputDStream.scala | 167 +++++++++++- .../spark/streaming/ObjectInputReceiver.scala | 244 ------------------ .../spark/streaming/RawInputDStream.scala | 77 ++---- .../spark/streaming/StreamingContext.scala | 4 +- 8 files changed, 360 insertions(+), 382 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala delete mode 100644 streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala index 29ae89616e..78537b8794 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -19,15 +19,6 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K @transient private var path_ : Path = null @transient private var fs_ : FileSystem = null - /* - @transient @noinline lazy val path = { - //if (directory == null) throw new Exception("directory is null") - //println(directory) - new Path(directory) - } - @transient lazy val fs = path.getFileSystem(new Configuration()) - */ - var lastModTime: Long = 0 def path(): Path = { @@ -79,15 +70,6 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K file => ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString))) Some(newRDD) } - /* - @throws(classOf[IOException]) - private def readObject(ois: ObjectInputStream) { - println(this.getClass().getSimpleName + ".readObject used") - ois.defaultReadObject() - println("HERE HERE" + this.directory) - } - */ - } object FileInputDStream { diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index bf83f98ec4..6b41e4d2c8 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -1,13 +1,30 @@ package spark.streaming -import spark.RDD -import spark.BlockRDD +import spark.{Logging, SparkEnv, RDD, BlockRDD} +import spark.storage.StorageLevel -abstract class NetworkInputDStream[T: ClassManifest](@transient ssc: StreamingContext) - extends InputDStream[T](ssc) { +import java.nio.ByteBuffer - val id = ssc.getNewNetworkStreamId() - +import akka.actor.{Props, Actor} +import akka.pattern.ask +import akka.dispatch.Await +import akka.util.duration._ + +abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : StreamingContext) + extends InputDStream[T](ssc_) { + + // This is an unique identifier that is used to match the network receiver with the + // corresponding network input stream. + val id = ssc.getNewNetworkStreamId() + + /** + * This method creates the receiver object that will be sent to the workers + * to receive data. This method needs to defined by any specific implementation + * of a NetworkInputDStream. + */ + def createReceiver(): NetworkReceiver[T] + + // Nothing to start or stop as both taken care of by the NetworkInputTracker. def start() {} def stop() {} @@ -16,8 +33,114 @@ abstract class NetworkInputDStream[T: ClassManifest](@transient ssc: StreamingCo val blockIds = ssc.networkInputTracker.getBlockIds(id, validTime) Some(new BlockRDD[T](ssc.sc, blockIds)) } - - /** Called on workers to run a receiver for the stream. */ - def runReceiver(): Unit +} + + +sealed trait NetworkReceiverMessage +case class StopReceiver(msg: String) extends NetworkReceiverMessage +case class ReportBlock(blockId: String) extends NetworkReceiverMessage +case class ReportError(msg: String) extends NetworkReceiverMessage + +abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializable with Logging { + + initLogging() + + lazy protected val env = SparkEnv.get + + lazy protected val actor = env.actorSystem.actorOf( + Props(new NetworkReceiverActor()), "NetworkReceiver-" + streamId) + + lazy protected val receivingThread = Thread.currentThread() + + /** This method will be called to start receiving data. */ + protected def onStart() + + /** This method will be called to stop receiving data. */ + protected def onStop() + + /** + * This method starts the receiver. First is accesses all the lazy members to + * materialize them. Then it calls the user-defined onStart() method to start + * other threads, etc required to receiver the data. + */ + def start() { + try { + // Access the lazy vals to materialize them + env + actor + receivingThread + + // Call user-defined onStart() + onStart() + } catch { + case ie: InterruptedException => + logWarning("Receiving thread interrupted") + case e: Exception => + stopOnError(e) + } + } + + /** + * This method stops the receiver. First it interrupts the main receiving thread, + * that is, the thread that called receiver.start(). Then it calls the user-defined + * onStop() method to stop other threads and/or do cleanup. + */ + def stop() { + receivingThread.interrupt() + onStop() + //TODO: terminate the actor + } + + /** + * This method stops the receiver and reports to exception to the tracker. + * This should be called whenever an exception has happened on any thread + * of the receiver. + */ + protected def stopOnError(e: Exception) { + logError("Error receiving data", e) + stop() + actor ! ReportError(e.toString) + } + + /** + * This method pushes a block (as iterator of values) into the block manager. + */ + protected def pushBlock(blockId: String, iterator: Iterator[T], level: StorageLevel) { + env.blockManager.put(blockId, iterator, level) + actor ! ReportBlock(blockId) + } + + /** + * This method pushes a block (as bytes) into the block manager. + */ + protected def pushBlock(blockId: String, bytes: ByteBuffer, level: StorageLevel) { + env.blockManager.putBytes(blockId, bytes, level) + actor ! ReportBlock(blockId) + } + + /** A helper actor that communicates with the NetworkInputTracker */ + private class NetworkReceiverActor extends Actor { + logInfo("Attempting to register with tracker") + val ip = System.getProperty("spark.master.host", "localhost") + val port = System.getProperty("spark.master.port", "7077").toInt + val url = "akka://spark@%s:%s/user/NetworkInputTracker".format(ip, port) + val tracker = env.actorSystem.actorFor(url) + val timeout = 5.seconds + + override def preStart() { + val future = tracker.ask(RegisterReceiver(streamId, self))(timeout) + Await.result(future, timeout) + } + + override def receive() = { + case ReportBlock(blockId) => + tracker ! AddBlocks(streamId, Array(blockId)) + case ReportError(msg) => + tracker ! DeregisterReceiver(streamId, msg) + case StopReceiver(msg) => + stop() + tracker ! DeregisterReceiver(streamId, msg) + } + } } diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala b/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala deleted file mode 100644 index deaffe98c8..0000000000 --- a/streaming/src/main/scala/spark/streaming/NetworkInputReceiverMessage.scala +++ /dev/null @@ -1,7 +0,0 @@ -package spark.streaming - -sealed trait NetworkInputReceiverMessage - -case class GetBlockIds(time: Long) extends NetworkInputReceiverMessage -case class GotBlockIds(streamId: Int, blocksIds: Array[String]) extends NetworkInputReceiverMessage -case object StopReceiver extends NetworkInputReceiverMessage diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 9f9001e4d5..9b1b8813de 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -13,13 +13,44 @@ import akka.dispatch._ trait NetworkInputTrackerMessage case class RegisterReceiver(streamId: Int, receiverActor: ActorRef) extends NetworkInputTrackerMessage +case class AddBlocks(streamId: Int, blockIds: Seq[String]) extends NetworkInputTrackerMessage +case class DeregisterReceiver(streamId: Int, msg: String) extends NetworkInputTrackerMessage + class NetworkInputTracker( @transient ssc: StreamingContext, - @transient networkInputStreams: Array[NetworkInputDStream[_]]) -extends Logging { + @transient networkInputStreams: Array[NetworkInputDStream[_]]) + extends Logging { - class TrackerActor extends Actor { + val networkInputStreamIds = networkInputStreams.map(_.id).toArray + val receiverExecutor = new ReceiverExecutor() + val receiverInfo = new HashMap[Int, ActorRef] + val receivedBlockIds = new HashMap[Int, Queue[String]] + val timeout = 5000.milliseconds + + var currentTime: Time = null + + def start() { + ssc.env.actorSystem.actorOf(Props(new NetworkInputTrackerActor), "NetworkInputTracker") + receiverExecutor.start() + } + + def stop() { + receiverExecutor.interrupt() + receiverExecutor.stopReceivers() + } + + def getBlockIds(receiverId: Int, time: Time): Array[String] = synchronized { + val queue = receivedBlockIds.synchronized { + receivedBlockIds.getOrElse(receiverId, new Queue[String]()) + } + val result = queue.synchronized { + queue.dequeueAll(x => true) + } + result.toArray + } + + private class NetworkInputTrackerActor extends Actor { def receive = { case RegisterReceiver(streamId, receiverActor) => { if (!networkInputStreamIds.contains(streamId)) { @@ -29,7 +60,7 @@ extends Logging { logInfo("Registered receiver for network stream " + streamId) sender ! true } - case GotBlockIds(streamId, blockIds) => { + case AddBlocks(streamId, blockIds) => { val tmp = receivedBlockIds.synchronized { if (!receivedBlockIds.contains(streamId)) { receivedBlockIds += ((streamId, new Queue[String])) @@ -40,6 +71,12 @@ extends Logging { tmp ++= blockIds } } + case DeregisterReceiver(streamId, msg) => { + receiverInfo -= streamId + logInfo("De-registered receiver for network stream " + streamId + + " with message " + msg) + //TODO: Do something about the corresponding NetworkInputDStream + } } } @@ -58,15 +95,15 @@ extends Logging { } def startReceivers() { - val tempRDD = ssc.sc.makeRDD(networkInputStreams, networkInputStreams.size) - - val startReceiver = (iterator: Iterator[NetworkInputDStream[_]]) => { + val receivers = networkInputStreams.map(_.createReceiver()) + val tempRDD = ssc.sc.makeRDD(receivers, receivers.size) + + val startReceiver = (iterator: Iterator[NetworkReceiver[_]]) => { if (!iterator.hasNext) { throw new Exception("Could not start receiver as details not found.") } - iterator.next().runReceiver() + iterator.next().start() } - ssc.sc.runJob(tempRDD, startReceiver) } @@ -77,33 +114,4 @@ extends Logging { Await.result(futureOfList, timeout) } } - - val networkInputStreamIds = networkInputStreams.map(_.id).toArray - val receiverExecutor = new ReceiverExecutor() - val receiverInfo = new HashMap[Int, ActorRef] - val receivedBlockIds = new HashMap[Int, Queue[String]] - val timeout = 5000.milliseconds - - - var currentTime: Time = null - - def start() { - ssc.env.actorSystem.actorOf(Props(new TrackerActor), "NetworkInputTracker") - receiverExecutor.start() - } - - def stop() { - // stop the actor - receiverExecutor.interrupt() - } - - def getBlockIds(receiverId: Int, time: Time): Array[String] = synchronized { - val queue = receivedBlockIds.synchronized { - receivedBlockIds.getOrElse(receiverId, new Queue[String]()) - } - val result = queue.synchronized { - queue.dequeueAll(x => true) - } - result.toArray - } } diff --git a/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala b/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala index 2396b374a0..89aeeda8b3 100644 --- a/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala @@ -1,16 +1,167 @@ package spark.streaming -import java.io.InputStream +import spark.streaming.util.{RecurringTimer, SystemClock} +import spark.storage.StorageLevel + +import java.io.{EOFException, DataInputStream, BufferedInputStream, InputStream} +import java.net.Socket +import java.util.concurrent.ArrayBlockingQueue + +import scala.collection.mutable.ArrayBuffer class ObjectInputDStream[T: ClassManifest]( - @transient ssc: StreamingContext, - val host: String, - val port: Int, - val bytesToObjects: InputStream => Iterator[T]) - extends NetworkInputDStream[T](ssc) { + @transient ssc_ : StreamingContext, + host: String, + port: Int, + bytesToObjects: InputStream => Iterator[T], + storageLevel: StorageLevel + ) extends NetworkInputDStream[T](ssc_) { - override def runReceiver() { - new ObjectInputReceiver(id, host, port, bytesToObjects).run() + def createReceiver(): NetworkReceiver[T] = { + new ObjectInputReceiver(id, host, port, bytesToObjects, storageLevel) } } + +class ObjectInputReceiver[T: ClassManifest]( + streamId: Int, + host: String, + port: Int, + bytesToObjects: InputStream => Iterator[T], + storageLevel: StorageLevel + ) extends NetworkReceiver[T](streamId) { + + lazy protected val dataHandler = new DataHandler(this) + + protected def onStart() { + logInfo("Connecting to " + host + ":" + port) + val socket = new Socket(host, port) + logInfo("Connected to " + host + ":" + port) + dataHandler.start() + val iterator = bytesToObjects(socket.getInputStream()) + while(iterator.hasNext) { + val obj = iterator.next + dataHandler += obj + } + } + + protected def onStop() { + dataHandler.stop() + } + + /** + * This is a helper object that manages the data received from the socket. It divides + * the object received into small batches of 100s of milliseconds, pushes them as + * blocks into the block manager and reports the block IDs to the network input + * tracker. It starts two threads, one to periodically start a new batch and prepare + * the previous batch of as a block, the other to push the blocks into the block + * manager. + */ + class DataHandler(receiver: NetworkReceiver[T]) extends Serializable { + case class Block(id: String, iterator: Iterator[T]) + + val clock = new SystemClock() + val blockInterval = 200L + val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) + val blockStorageLevel = storageLevel + val blocksForPushing = new ArrayBlockingQueue[Block](1000) + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + var currentBuffer = new ArrayBuffer[T] + + def start() { + blockIntervalTimer.start() + blockPushingThread.start() + logInfo("Data handler started") + } + + def stop() { + blockIntervalTimer.stop() + blockPushingThread.interrupt() + logInfo("Data handler stopped") + } + + def += (obj: T) { + currentBuffer += obj + } + + def updateCurrentBuffer(time: Long) { + try { + val newBlockBuffer = currentBuffer + currentBuffer = new ArrayBuffer[T] + if (newBlockBuffer.size > 0) { + val blockId = "input-" + streamId + "- " + (time - blockInterval) + val newBlock = new Block(blockId, newBlockBuffer.toIterator) + blocksForPushing.add(newBlock) + } + } catch { + case ie: InterruptedException => + logInfo("Block interval timer thread interrupted") + case e: Exception => + receiver.stop() + } + } + + def keepPushingBlocks() { + logInfo("Block pushing thread started") + try { + while(true) { + val block = blocksForPushing.take() + pushBlock(block.id, block.iterator, storageLevel) + } + } catch { + case ie: InterruptedException => + logInfo("Block pushing thread interrupted") + case e: Exception => + receiver.stop() + } + } + } +} + + +object ObjectInputReceiver { + def bytesToLines(inputStream: InputStream): Iterator[String] = { + val bufferedInputStream = new BufferedInputStream(inputStream) + val dataInputStream = new DataInputStream(bufferedInputStream) + + val iterator = new Iterator[String] { + var gotNext = false + var finished = false + var nextValue: String = null + + private def getNext() { + try { + nextValue = dataInputStream.readLine() + println("[" + nextValue + "]") + } catch { + case eof: EOFException => + finished = true + } + gotNext = true + } + + override def hasNext: Boolean = { + if (!gotNext) { + getNext() + } + if (finished) { + dataInputStream.close() + } + !finished + } + + override def next(): String = { + if (!gotNext) { + getNext() + } + if (finished) { + throw new NoSuchElementException("End of stream") + } + gotNext = false + nextValue + } + } + iterator + } +} diff --git a/streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala b/streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala deleted file mode 100644 index 70fa2cdf07..0000000000 --- a/streaming/src/main/scala/spark/streaming/ObjectInputReceiver.scala +++ /dev/null @@ -1,244 +0,0 @@ -package spark.streaming - -import spark.Logging -import spark.storage.BlockManager -import spark.storage.StorageLevel -import spark.SparkEnv -import spark.streaming.util.SystemClock -import spark.streaming.util.RecurringTimer - -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.Queue -import scala.collection.mutable.SynchronizedPriorityQueue -import scala.math.Ordering - -import java.net.InetSocketAddress -import java.net.Socket -import java.io.InputStream -import java.io.BufferedInputStream -import java.io.DataInputStream -import java.io.EOFException -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.ArrayBlockingQueue - -import akka.actor._ -import akka.pattern.ask -import akka.util.duration._ -import akka.dispatch._ - -class ObjectInputReceiver[T: ClassManifest]( - streamId: Int, - host: String, - port: Int, - bytesToObjects: InputStream => Iterator[T]) - extends Logging { - - class ReceiverActor extends Actor { - override def preStart() { - logInfo("Attempting to register") - val ip = System.getProperty("spark.master.host", "localhost") - val port = System.getProperty("spark.master.port", "7077").toInt - val actorName: String = "NetworkInputTracker" - val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) - val trackerActor = env.actorSystem.actorFor(url) - val timeout = 1.seconds - val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) - Await.result(future, timeout) - } - - def receive = { - case GetBlockIds(time) => { - logInfo("Got request for block ids for " + time) - sender ! GotBlockIds(streamId, dataHandler.getPushedBlocks()) - } - - case StopReceiver => { - if (receivingThread != null) { - receivingThread.interrupt() - } - sender ! true - } - } - } - - class DataHandler { - class Block(val time: Long, val iterator: Iterator[T]) { - val blockId = "input-" + streamId + "-" + time - var pushed = true - override def toString = "input block " + blockId - } - - val clock = new SystemClock() - val blockInterval = 200L - val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) - val blockOrdering = new Ordering[Block] { - def compare(b1: Block, b2: Block) = (b1.time - b2.time).toInt - } - val blockStorageLevel = StorageLevel.DISK_AND_MEMORY - val blocksForPushing = new ArrayBlockingQueue[Block](1000) - val blocksForReporting = new SynchronizedPriorityQueue[Block]()(blockOrdering) - val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } - - var currentBuffer = new ArrayBuffer[T] - - def start() { - blockIntervalTimer.start() - blockPushingThread.start() - logInfo("Data handler started") - } - - def stop() { - blockIntervalTimer.stop() - blockPushingThread.interrupt() - } - - def += (obj: T) { - currentBuffer += obj - } - - def updateCurrentBuffer(time: Long) { - val newBlockBuffer = currentBuffer - currentBuffer = new ArrayBuffer[T] - if (newBlockBuffer.size > 0) { - val newBlock = new Block(time - blockInterval, newBlockBuffer.toIterator) - blocksForPushing.add(newBlock) - blocksForReporting.enqueue(newBlock) - } - } - - def keepPushingBlocks() { - logInfo("Block pushing thread started") - try { - while(true) { - val block = blocksForPushing.take() - if (blockManager != null) { - blockManager.put(block.blockId, block.iterator, blockStorageLevel) - block.pushed = true - } else { - logWarning(block + " not put as block manager is null") - } - } - } catch { - case ie: InterruptedException => println("Block pushing thread interrupted") - case e: Exception => e.printStackTrace() - } - } - - def getPushedBlocks(): Array[String] = { - val pushedBlocks = new ArrayBuffer[String]() - var loop = true - while(loop && !blocksForReporting.isEmpty) { - val block = blocksForReporting.dequeue() - if (block == null) { - loop = false - } else if (!block.pushed) { - blocksForReporting.enqueue(block) - } else { - pushedBlocks += block.blockId - } - } - logInfo("Got " + pushedBlocks.size + " blocks") - pushedBlocks.toArray - } - } - - val blockManager = if (SparkEnv.get != null) SparkEnv.get.blockManager else null - val dataHandler = new DataHandler() - val env = SparkEnv.get - - var receiverActor: ActorRef = null - var receivingThread: Thread = null - - def run() { - initLogging() - var socket: Socket = null - try { - if (SparkEnv.get != null) { - receiverActor = SparkEnv.get.actorSystem.actorOf(Props(new ReceiverActor), "ReceiverActor-" + streamId) - } - dataHandler.start() - socket = connect() - receivingThread = Thread.currentThread() - receive(socket) - } catch { - case ie: InterruptedException => logInfo("Receiver interrupted") - } finally { - receivingThread = null - if (socket != null) socket.close() - dataHandler.stop() - } - } - - def connect(): Socket = { - logInfo("Connecting to " + host + ":" + port) - val socket = new Socket(host, port) - logInfo("Connected to " + host + ":" + port) - socket - } - - def receive(socket: Socket) { - val iterator = bytesToObjects(socket.getInputStream()) - while(iterator.hasNext) { - val obj = iterator.next - dataHandler += obj - } - } -} - - -object ObjectInputReceiver { - def bytesToLines(inputStream: InputStream): Iterator[String] = { - val bufferedInputStream = new BufferedInputStream(inputStream) - val dataInputStream = new DataInputStream(bufferedInputStream) - - val iterator = new Iterator[String] { - var gotNext = false - var finished = false - var nextValue: String = null - - private def getNext() { - try { - nextValue = dataInputStream.readLine() - println("[" + nextValue + "]") - } catch { - case eof: EOFException => - finished = true - } - gotNext = true - } - - override def hasNext: Boolean = { - if (!gotNext) { - getNext() - } - if (finished) { - dataInputStream.close() - } - !finished - } - - override def next(): String = { - if (!gotNext) { - getNext() - } - if (finished) { - throw new NoSuchElementException("End of stream") - } - gotNext = false - nextValue - } - } - iterator - } - - def main(args: Array[String]) { - if (args.length < 2) { - println("ObjectInputReceiver ") - System.exit(1) - } - val host = args(0) - val port = args(1).toInt - val receiver = new ObjectInputReceiver(0, host, port, bytesToLines) - receiver.run() - } -} diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index d29aea7886..e022b85fbe 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -1,16 +1,11 @@ package spark.streaming -import akka.actor._ -import akka.pattern.ask -import akka.util.duration._ -import akka.dispatch._ import java.net.InetSocketAddress import java.nio.ByteBuffer import java.nio.channels.{ReadableByteChannel, SocketChannel} import java.io.EOFException import java.util.concurrent.ArrayBlockingQueue -import scala.collection.mutable.ArrayBuffer -import spark.{DaemonThread, Logging, SparkEnv} +import spark._ import spark.storage.StorageLevel /** @@ -20,20 +15,23 @@ import spark.storage.StorageLevel * in the format that the system is configured with. */ class RawInputDStream[T: ClassManifest]( - @transient ssc: StreamingContext, + @transient ssc_ : StreamingContext, host: String, port: Int, - storageLevel: StorageLevel) - extends NetworkInputDStream[T](ssc) with Logging { + storageLevel: StorageLevel + ) extends NetworkInputDStream[T](ssc_ ) with Logging { - val streamId = id + def createReceiver(): NetworkReceiver[T] = { + new RawNetworkReceiver(id, host, port, storageLevel).asInstanceOf[NetworkReceiver[T]] + } +} - /** Called on workers to run a receiver for the stream. */ - def runReceiver() { - val env = SparkEnv.get - val actor = env.actorSystem.actorOf( - Props(new ReceiverActor(env, Thread.currentThread)), "ReceiverActor-" + streamId) +class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: StorageLevel) + extends NetworkReceiver[Any](streamId) { + var blockPushingThread: Thread = null + + def onStart() { // Open a socket to the target address and keep reading from it logInfo("Connecting to " + host + ":" + port) val channel = SocketChannel.open() @@ -43,18 +41,18 @@ class RawInputDStream[T: ClassManifest]( val queue = new ArrayBlockingQueue[ByteBuffer](2) - new DaemonThread { + blockPushingThread = new DaemonThread { override def run() { var nextBlockNumber = 0 while (true) { val buffer = queue.take() val blockId = "input-" + streamId + "-" + nextBlockNumber nextBlockNumber += 1 - env.blockManager.putBytes(blockId, buffer, storageLevel) - actor ! BlockPublished(blockId) + pushBlock(blockId, buffer, storageLevel) } } - }.start() + } + blockPushingThread.start() val lengthBuffer = ByteBuffer.allocate(4) while (true) { @@ -70,6 +68,10 @@ class RawInputDStream[T: ClassManifest]( } } + def onStop() { + blockPushingThread.interrupt() + } + /** Read a buffer fully from a given Channel */ private def readFully(channel: ReadableByteChannel, dest: ByteBuffer) { while (dest.position < dest.limit) { @@ -78,41 +80,4 @@ class RawInputDStream[T: ClassManifest]( } } } - - /** Message sent to ReceiverActor to tell it that a block was published */ - case class BlockPublished(blockId: String) {} - - /** A helper actor that communicates with the NetworkInputTracker */ - private class ReceiverActor(env: SparkEnv, receivingThread: Thread) extends Actor { - val newBlocks = new ArrayBuffer[String] - - logInfo("Attempting to register with tracker") - val ip = System.getProperty("spark.master.host", "localhost") - val port = System.getProperty("spark.master.port", "7077").toInt - val actorName: String = "NetworkInputTracker" - val url = "akka://spark@%s:%s/user/%s".format(ip, port, actorName) - val trackerActor = env.actorSystem.actorFor(url) - val timeout = 5.seconds - - override def preStart() { - val future = trackerActor.ask(RegisterReceiver(streamId, self))(timeout) - Await.result(future, timeout) - } - - override def receive = { - case BlockPublished(blockId) => - newBlocks += blockId - val future = trackerActor ! GotBlockIds(streamId, Array(blockId)) - - case GetBlockIds(time) => - logInfo("Got request for block IDs for " + time) - sender ! GotBlockIds(streamId, newBlocks.toArray) - newBlocks.clear() - - case StopReceiver => - receivingThread.interrupt() - sender ! true - } - - } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 7022056f7c..1dc5614a5c 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -95,7 +95,7 @@ class StreamingContext ( port: Int, converter: (InputStream) => Iterator[T] ): DStream[T] = { - val inputStream = new ObjectInputDStream[T](this, hostname, port, converter) + val inputStream = new ObjectInputDStream[T](this, hostname, port, converter, StorageLevel.DISK_AND_MEMORY_2) graph.addInputStream(inputStream) inputStream } @@ -207,7 +207,7 @@ class StreamingContext ( } /** - * This function starts the execution of the streams. + * This function stops the execution of the streams. */ def stop() { try { From 19191d178d194e4b57094ca868e1cc9c66b8d4a7 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 23 Oct 2012 14:40:24 -0700 Subject: [PATCH 083/226] Renamed the network input streams. --- ...DStream.scala => SocketInputDStream.scala} | 2 +- .../spark/streaming/StreamingContext.scala | 63 ++++++++++--------- .../spark/streaming/examples/CountRaw.scala | 2 +- .../spark/streaming/examples/GrepRaw.scala | 2 +- .../streaming/examples/TopKWordCountRaw.scala | 2 +- .../streaming/examples/WordCountNetwork.scala | 2 +- .../streaming/examples/WordCountRaw.scala | 2 +- 7 files changed, 40 insertions(+), 35 deletions(-) rename streaming/src/main/scala/spark/streaming/{ObjectInputDStream.scala => SocketInputDStream.scala} (99%) diff --git a/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala similarity index 99% rename from streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala rename to streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index 89aeeda8b3..4dbf421687 100644 --- a/streaming/src/main/scala/spark/streaming/ObjectInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -9,7 +9,7 @@ import java.util.concurrent.ArrayBlockingQueue import scala.collection.mutable.ArrayBuffer -class ObjectInputDStream[T: ClassManifest]( +class SocketInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, port: Int, diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 1dc5614a5c..90654cdad9 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -86,21 +86,26 @@ class StreamingContext ( private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() - def createNetworkTextStream(hostname: String, port: Int): DStream[String] = { - createNetworkObjectStream[String](hostname, port, ObjectInputReceiver.bytesToLines) + def networkTextStream( + hostname: String, + port: Int, + storageLevel: StorageLevel = StorageLevel.DISK_AND_MEMORY_2 + ): DStream[String] = { + networkStream[String](hostname, port, ObjectInputReceiver.bytesToLines, storageLevel) } - - def createNetworkObjectStream[T: ClassManifest]( - hostname: String, - port: Int, - converter: (InputStream) => Iterator[T] + + def networkStream[T: ClassManifest]( + hostname: String, + port: Int, + converter: (InputStream) => Iterator[T], + storageLevel: StorageLevel ): DStream[T] = { - val inputStream = new ObjectInputDStream[T](this, hostname, port, converter, StorageLevel.DISK_AND_MEMORY_2) + val inputStream = new SocketInputDStream[T](this, hostname, port, converter, storageLevel) graph.addInputStream(inputStream) inputStream } - - def createRawNetworkStream[T: ClassManifest]( + + def rawNetworkStream[T: ClassManifest]( hostname: String, port: Int, storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_2 @@ -109,26 +114,26 @@ class StreamingContext ( graph.addInputStream(inputStream) inputStream } - + /* def createHttpTextStream(url: String): DStream[String] = { createHttpStream(url, ObjectInputReceiver.bytesToLines) } - + def createHttpStream[T: ClassManifest]( - url: String, + url: String, converter: (InputStream) => Iterator[T] ): DStream[T] = { } */ - /** + /** * This function creates a input stream that monitors a Hadoop-compatible * for new files and executes the necessary processing on them. - */ + */ def createFileStream[ - K: ClassManifest, - V: ClassManifest, + K: ClassManifest, + V: ClassManifest, F <: NewInputFormat[K, V]: ClassManifest ](directory: String): DStream[(K, V)] = { val inputStream = new FileInputDStream[K, V, F](this, directory) @@ -139,13 +144,13 @@ class StreamingContext ( def createTextFileStream(directory: String): DStream[String] = { createFileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) } - + /** * This function create a input stream from an queue of RDDs. In each batch, - * it will process either one or all of the RDDs returned by the queue + * it will process either one or all of the RDDs returned by the queue */ def createQueueStream[T: ClassManifest]( - queue: Queue[RDD[T]], + queue: Queue[RDD[T]], oneAtATime: Boolean = true, defaultRDD: RDD[T] = null ): DStream[T] = { @@ -153,7 +158,7 @@ class StreamingContext ( graph.addInputStream(inputStream) inputStream } - + def createQueueStream[T: ClassManifest](array: Array[RDD[T]]): DStream[T] = { val queue = new Queue[RDD[T]] val inputStream = createQueueStream(queue, true, null) @@ -172,27 +177,27 @@ class StreamingContext ( /** * This function registers a DStream as an output stream that will be * computed every interval. - */ + */ def registerOutputStream(outputStream: DStream[_]) { graph.addOutputStream(outputStream) } - + def validate() { assert(graph != null, "Graph is null") graph.validate() } /** - * This function starts the execution of the streams. - */ + * This function starts the execution of the streams. + */ def start() { validate() val networkInputStreams = graph.getInputStreams().filter(s => s match { - case n: NetworkInputDStream[_] => true + case n: NetworkInputDStream[_] => true case _ => false }).map(_.asInstanceOf[NetworkInputDStream[_]]).toArray - + if (networkInputStreams.length > 0) { // Start the network input tracker (must start before receivers) networkInputTracker = new NetworkInputTracker(this, networkInputStreams) @@ -203,9 +208,9 @@ class StreamingContext ( // Start the scheduler scheduler = new Scheduler(this) - scheduler.start() + scheduler.start() } - + /** * This function stops the execution of the streams. */ diff --git a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala index ed571d22e3..d2fdabd659 100644 --- a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala @@ -24,7 +24,7 @@ object CountRaw { ssc.sc.parallelize(1 to 1000, 1000).count() val rawStreams = (1 to numStreams).map(_ => - ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnionDStream(rawStreams) union.map(_.length + 2).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index 6af1c36891..b1e1a613fe 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -24,7 +24,7 @@ object GrepRaw { ssc.sc.parallelize(1 to 1000, 1000).count() val rawStreams = (1 to numStreams).map(_ => - ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnionDStream(rawStreams) union.filter(_.contains("Culpepper")).count().foreachRDD(r => println("Grep count: " + r.collect().mkString)) diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index af0a3bf98a..9d1b0b9eb4 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -36,7 +36,7 @@ object TopKWordCountRaw { moreWarmup(ssc.sc) val rawStreams = (1 to streams).map(_ => - ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnionDStream(rawStreams) val windowedCounts = union.mapPartitions(splitAndCountPartitions) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala index 0aa5294a17..ba1bd1de7c 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala @@ -16,7 +16,7 @@ object WordCountNetwork { // Create a NetworkInputDStream on target ip:port and count the // words in input stream of \n delimited test (eg. generated by 'nc') - val lines = ssc.createNetworkTextStream(args(1), args(2).toInt) + val lines = ssc.networkTextStream(args(1), args(2).toInt) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index 98bafec529..d8a0664d7d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -36,7 +36,7 @@ object WordCountRaw { moreWarmup(ssc.sc) val rawStreams = (1 to streams).map(_ => - ssc.createRawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray val union = new UnionDStream(rawStreams) val windowedCounts = union.mapPartitions(splitAndCountPartitions) From c2731dd3effe780d7f37487f8cbd27179055ebee Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 23 Oct 2012 15:10:27 -0700 Subject: [PATCH 084/226] Updated StateDStream api to use Options instead of nulls. --- .../spark/streaming/PairDStreamFunctions.scala | 14 +++++++------- .../main/scala/spark/streaming/StateDStream.scala | 15 +++++++++------ .../scala/spark/streaming/DStreamBasicSuite.scala | 7 ++----- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 5de57eb2fd..ce1f4ad0a0 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -156,30 +156,30 @@ extends Serializable { // // def updateStateByKey[S <: AnyRef : ClassManifest]( - updateFunc: (Seq[V], S) => S + updateFunc: (Seq[V], Option[S]) => Option[S] ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner()) } def updateStateByKey[S <: AnyRef : ClassManifest]( - updateFunc: (Seq[V], S) => S, + updateFunc: (Seq[V], Option[S]) => Option[S], numPartitions: Int ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner(numPartitions)) } def updateStateByKey[S <: AnyRef : ClassManifest]( - updateFunc: (Seq[V], S) => S, + updateFunc: (Seq[V], Option[S]) => Option[S], partitioner: Partitioner ): DStream[(K, S)] = { - val func = (iterator: Iterator[(K, Seq[V], S)]) => { - iterator.map(tuple => (tuple._1, updateFunc(tuple._2, tuple._3))) + val newUpdateFunc = (iterator: Iterator[(K, Seq[V], Option[S])]) => { + iterator.flatMap(t => updateFunc(t._2, t._3).map(s => (t._1, s))) } - updateStateByKey(func, partitioner, true) + updateStateByKey(newUpdateFunc, partitioner, true) } def updateStateByKey[S <: AnyRef : ClassManifest]( - updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], + updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean ): DStream[(K, S)] = { diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index d223f25dfc..3ba8fb45fb 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -8,14 +8,17 @@ import spark.SparkContext._ import spark.storage.StorageLevel -class StateRDD[U: ClassManifest, T: ClassManifest](prev: RDD[T], f: Iterator[T] => Iterator[U], rememberPartitioner: Boolean) - extends MapPartitionsRDD[U, T](prev, f) { +class StateRDD[U: ClassManifest, T: ClassManifest]( + prev: RDD[T], + f: Iterator[T] => Iterator[U], + rememberPartitioner: Boolean + ) extends MapPartitionsRDD[U, T](prev, f) { override val partitioner = if (rememberPartitioner) prev.partitioner else None } class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( - @transient parent: DStream[(K, V)], - updateFunc: (Iterator[(K, Seq[V], S)]) => Iterator[(K, S)], + parent: DStream[(K, V)], + updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean ) extends DStream[(K, S)](parent.ssc) { @@ -82,7 +85,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife val updateFuncLocal = updateFunc val finalFunc = (iterator: Iterator[(K, (Seq[V], Seq[S]))]) => { val i = iterator.map(t => { - (t._1, t._2._1, t._2._2.headOption.getOrElse(null.asInstanceOf[S])) + (t._1, t._2._1, t._2._2.headOption) }) updateFuncLocal(i) } @@ -108,7 +111,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife // and then apply the update function val updateFuncLocal = updateFunc val finalFunc = (iterator: Iterator[(K, Seq[V])]) => { - updateFuncLocal(iterator.map(tuple => (tuple._1, tuple._2, null.asInstanceOf[S]))) + updateFuncLocal(iterator.map(tuple => (tuple._1, tuple._2, None))) } val groupedRDD = parentRDD.groupByKey(partitioner) diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala index db95c2cfaa..290a216797 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala @@ -149,11 +149,8 @@ class DStreamBasicSuite extends DStreamSuiteBase { ) val updateStateOperation = (s: DStream[String]) => { - val updateFunc = (values: Seq[Int], state: RichInt) => { - var newState = 0 - if (values != null && values.size > 0) newState += values.reduce(_ + _) - if (state != null) newState += state.self - new RichInt(newState) + val updateFunc = (values: Seq[Int], state: Option[RichInt]) => { + Some(new RichInt(values.foldLeft(0)(_ + _) + state.map(_.self).getOrElse(0))) } s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) } From 0e5d9be4dfe0d072db8410fe6d254555bba9367d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 23 Oct 2012 15:17:05 -0700 Subject: [PATCH 085/226] Renamed APIs to create queueStream and fileStream. --- .../scala/spark/streaming/StreamingContext.scala | 12 ++++++------ .../scala/spark/streaming/examples/FileStream.scala | 2 +- .../examples/FileStreamWithCheckpoint.scala | 2 +- .../scala/spark/streaming/examples/QueueStream.scala | 2 +- .../spark/streaming/examples/WordCountHdfs.scala | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 90654cdad9..228f1a3616 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -131,7 +131,7 @@ class StreamingContext ( * This function creates a input stream that monitors a Hadoop-compatible * for new files and executes the necessary processing on them. */ - def createFileStream[ + def fileStream[ K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K, V]: ClassManifest @@ -141,15 +141,15 @@ class StreamingContext ( inputStream } - def createTextFileStream(directory: String): DStream[String] = { - createFileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) + def textFileStream(directory: String): DStream[String] = { + fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) } /** * This function create a input stream from an queue of RDDs. In each batch, * it will process either one or all of the RDDs returned by the queue */ - def createQueueStream[T: ClassManifest]( + def queueStream[T: ClassManifest]( queue: Queue[RDD[T]], oneAtATime: Boolean = true, defaultRDD: RDD[T] = null @@ -159,9 +159,9 @@ class StreamingContext ( inputStream } - def createQueueStream[T: ClassManifest](array: Array[RDD[T]]): DStream[T] = { + def queueStream[T: ClassManifest](array: Array[RDD[T]]): DStream[T] = { val queue = new Queue[RDD[T]] - val inputStream = createQueueStream(queue, true, null) + val inputStream = queueStream(queue, true, null) queue ++= array inputStream } diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStream.scala b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala index 301da56014..d68611abd6 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FileStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala @@ -27,7 +27,7 @@ object FileStream { // Create the FileInputDStream on the directory and use the // stream to count words in new files created - val inputStream = ssc.createTextFileStream(directory.toString) + val inputStream = ssc.textFileStream(directory.toString) val words = inputStream.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala index c725035a8a..df96a811da 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala @@ -37,7 +37,7 @@ object FileStreamWithCheckpoint { ssc_.setCheckpointDetails(checkpointFile, Seconds(1)) // Setup the streaming computation - val inputStream = ssc_.createTextFileStream(directory.toString) + val inputStream = ssc_.textFileStream(directory.toString) val words = inputStream.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() diff --git a/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala index ae701bba6d..2af51bad28 100644 --- a/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala @@ -24,7 +24,7 @@ object QueueStream { val rddQueue = new SynchronizedQueue[RDD[Int]]() // Create the QueueInputDStream and use it do some processing - val inputStream = ssc.createQueueStream(rddQueue) + val inputStream = ssc.queueStream(rddQueue) val mappedStream = inputStream.map(x => (x % 10, 1)) val reducedStream = mappedStream.reduceByKey(_ + _) reducedStream.print() diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala index 3b86948822..591cb141c3 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala @@ -16,7 +16,7 @@ object WordCountHdfs { // Create the FileInputDStream on the directory and use the // stream to count words in new files created - val lines = ssc.createTextFileStream(args(1)) + val lines = ssc.textFileStream(args(1)) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() From 020d6434844b22c2fe611303b338eaf53397c9db Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 23 Oct 2012 16:24:05 -0700 Subject: [PATCH 086/226] Renamed the streaming testsuites. --- .../{DStreamBasicSuite.scala => BasicOperationsSuite.scala} | 4 +++- .../src/test/scala/spark/streaming/CheckpointSuite.scala | 2 +- .../streaming/{DStreamSuiteBase.scala => TestSuiteBase.scala} | 4 ++-- .../{DStreamWindowSuite.scala => WindowOperationsSuite.scala} | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) rename streaming/src/test/scala/spark/streaming/{DStreamBasicSuite.scala => BasicOperationsSuite.scala} (98%) rename streaming/src/test/scala/spark/streaming/{DStreamSuiteBase.scala => TestSuiteBase.scala} (98%) rename streaming/src/test/scala/spark/streaming/{DStreamWindowSuite.scala => WindowOperationsSuite.scala} (97%) diff --git a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala similarity index 98% rename from streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala rename to streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala index 290a216797..d0aaac0f2e 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamBasicSuite.scala +++ b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala @@ -4,7 +4,9 @@ import spark.streaming.StreamingContext._ import scala.runtime.RichInt import util.ManualClock -class DStreamBasicSuite extends DStreamSuiteBase { +class BasicOperationsSuite extends TestSuiteBase { + + override def framework() = "BasicOperationsSuite" test("map") { val input = Seq(1 to 4, 5 to 8, 9 to 12) diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 061b331a16..6dcedcf463 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -3,7 +3,7 @@ package spark.streaming import spark.streaming.StreamingContext._ import java.io.File -class CheckpointSuite extends DStreamSuiteBase { +class CheckpointSuite extends TestSuiteBase { override def framework() = "CheckpointSuite" diff --git a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala similarity index 98% rename from streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala rename to streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index 2a4b37c965..c1b7772e7b 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -33,11 +33,11 @@ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBu output += collected }) -trait DStreamSuiteBase extends FunSuite with Logging { +trait TestSuiteBase extends FunSuite with Logging { System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - def framework() = "DStreamSuiteBase" + def framework() = "TestSuiteBase" def master() = "local[2]" diff --git a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala similarity index 97% rename from streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala rename to streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index cfcab6298d..90d67844bb 100644 --- a/streaming/src/test/scala/spark/streaming/DStreamWindowSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -2,9 +2,9 @@ package spark.streaming import spark.streaming.StreamingContext._ -class DStreamWindowSuite extends DStreamSuiteBase { +class WindowOperationsSuite extends TestSuiteBase { - override def framework() = "DStreamWindowSuite" + override def framework() = "WindowOperationsSuite" override def maxWaitTimeMillis() = 20000 From 1ef6ea25135fd33a7913944628b67f24c87db1f5 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 24 Oct 2012 14:44:20 -0700 Subject: [PATCH 087/226] Added tests for testing network input stream. --- .../main/scala/spark/streaming/DStream.scala | 5 +- .../spark/streaming/NetworkInputTracker.scala | 9 +- .../spark/streaming/SocketInputDStream.scala | 17 ++- .../spark/streaming/StreamingContext.scala | 14 +-- .../spark/streaming/InputStreamsSuite.scala | 112 ++++++++++++++++++ 5 files changed, 134 insertions(+), 23 deletions(-) create mode 100644 streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 38bb7c8b94..4bc063719c 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -93,8 +93,9 @@ extends Serializable with Logging { * its parent DStreams. */ protected[streaming] def initialize(time: Time) { - if (zeroTime != null) { - throw new Exception("ZeroTime is already initialized, cannot initialize it again") + if (zeroTime != null && zeroTime != time) { + throw new Exception("ZeroTime is already initialized to " + zeroTime + + ", cannot initialize it again to " + time) } zeroTime = time dependencies.foreach(_.initialize(zeroTime)) diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 9b1b8813de..07ef79415d 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -108,10 +108,11 @@ class NetworkInputTracker( } def stopReceivers() { - implicit val ec = env.actorSystem.dispatcher - val listOfFutures = receiverInfo.values.map(_.ask(StopReceiver)(timeout)).toList - val futureOfList = Future.sequence(listOfFutures) - Await.result(futureOfList, timeout) + //implicit val ec = env.actorSystem.dispatcher + receiverInfo.values.foreach(_ ! StopReceiver) + //val listOfFutures = receiverInfo.values.map(_.ask(StopReceiver)(timeout)).toList + //val futureOfList = Future.sequence(listOfFutures) + //Await.result(futureOfList, timeout) } } } diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index 4dbf421687..8ff7865ca4 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -18,12 +18,12 @@ class SocketInputDStream[T: ClassManifest]( ) extends NetworkInputDStream[T](ssc_) { def createReceiver(): NetworkReceiver[T] = { - new ObjectInputReceiver(id, host, port, bytesToObjects, storageLevel) + new SocketReceiver(id, host, port, bytesToObjects, storageLevel) } } -class ObjectInputReceiver[T: ClassManifest]( +class SocketReceiver[T: ClassManifest]( streamId: Int, host: String, port: Int, @@ -120,7 +120,12 @@ class ObjectInputReceiver[T: ClassManifest]( } -object ObjectInputReceiver { +object SocketReceiver { + + /** + * This methods translates the data from an inputstream (say, from a socket) + * to '\n' delimited strings and returns an iterator to access the strings. + */ def bytesToLines(inputStream: InputStream): Iterator[String] = { val bufferedInputStream = new BufferedInputStream(inputStream) val dataInputStream = new DataInputStream(bufferedInputStream) @@ -133,7 +138,11 @@ object ObjectInputReceiver { private def getNext() { try { nextValue = dataInputStream.readLine() - println("[" + nextValue + "]") + if (nextValue != null) { + println("[" + nextValue + "]") + } else { + gotNext = false + } } catch { case eof: EOFException => finished = true diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 228f1a3616..e124b8cfa0 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -91,7 +91,7 @@ class StreamingContext ( port: Int, storageLevel: StorageLevel = StorageLevel.DISK_AND_MEMORY_2 ): DStream[String] = { - networkStream[String](hostname, port, ObjectInputReceiver.bytesToLines, storageLevel) + networkStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel) } def networkStream[T: ClassManifest]( @@ -115,18 +115,6 @@ class StreamingContext ( inputStream } - /* - def createHttpTextStream(url: String): DStream[String] = { - createHttpStream(url, ObjectInputReceiver.bytesToLines) - } - - def createHttpStream[T: ClassManifest]( - url: String, - converter: (InputStream) => Iterator[T] - ): DStream[T] = { - } - */ - /** * This function creates a input stream that monitors a Hadoop-compatible * for new files and executes the necessary processing on them. diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala new file mode 100644 index 0000000000..dd872059ea --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -0,0 +1,112 @@ +package spark.streaming + +import java.net.{SocketException, Socket, ServerSocket} +import java.io.{BufferedWriter, OutputStreamWriter} +import java.util.concurrent.{TimeUnit, ArrayBlockingQueue} +import collection.mutable.{SynchronizedBuffer, ArrayBuffer} +import util.ManualClock +import spark.storage.StorageLevel + + +class InputStreamsSuite extends TestSuiteBase { + + test("network input stream") { + val serverPort = 9999 + val server = new TestServer(9999) + server.start() + val ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + + val networkStream = ssc.networkTextStream("localhost", serverPort, StorageLevel.DISK_AND_MEMORY) + val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] + val outputStream = new TestOutputStream(networkStream, outputBuffer) + ssc.registerOutputStream(outputStream) + ssc.start() + + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + val input = Seq(1, 2, 3) + val expectedOutput = input.map(_.toString) + for (i <- 0 until input.size) { + server.send(input(i).toString + "\n") + Thread.sleep(1000) + clock.addToTime(1000) + } + val startTime = System.currentTimeMillis() + while (outputBuffer.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + logInfo("output.size = " + outputBuffer.size + ", expectedOutput.size = " + expectedOutput.size) + Thread.sleep(100) + } + Thread.sleep(5000) + val timeTaken = System.currentTimeMillis() - startTime + assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") + + ssc.stop() + server.stop() + + assert(outputBuffer.size === expectedOutput.size) + for (i <- 0 until outputBuffer.size) { + assert(outputBuffer(i).size === 1) + assert(outputBuffer(i).head === expectedOutput(i)) + } + } +} + + +class TestServer(port: Int) { + + val queue = new ArrayBlockingQueue[String](100) + + val serverSocket = new ServerSocket(port) + + val servingThread = new Thread() { + override def run() { + try { + while(true) { + println("Accepting connections on port " + port) + val clientSocket = serverSocket.accept() + println("New connection") + try { + clientSocket.setTcpNoDelay(true) + val outputStream = new BufferedWriter(new OutputStreamWriter(clientSocket.getOutputStream)) + + while(clientSocket.isConnected) { + val msg = queue.poll(100, TimeUnit.MILLISECONDS) + if (msg != null) { + outputStream.write(msg) + outputStream.flush() + println("Message '" + msg + "' sent") + } + } + } catch { + case e: SocketException => println(e) + } finally { + println("Connection closed") + if (!clientSocket.isClosed) clientSocket.close() + } + } + } catch { + case ie: InterruptedException => + + } finally { + serverSocket.close() + } + } + } + + def start() { servingThread.start() } + + def send(msg: String) { queue.add(msg) } + + def stop() { servingThread.interrupt() } +} + +object TestServer { + def main(args: Array[String]) { + val s = new TestServer(9999) + s.start() + while(true) { + Thread.sleep(1000) + s.send("hello") + } + } +} From ed71df46cddc9a4f1363b937c10bfa2a928e564c Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 24 Oct 2012 16:49:40 -0700 Subject: [PATCH 088/226] Minor fixes. --- .../scala/spark/streaming/JobManager.scala | 2 +- .../spark/streaming/NetworkInputDStream.scala | 3 +- .../spark/streaming/SocketInputDStream.scala | 33 +++++++++---------- .../spark/streaming/InputStreamsSuite.scala | 16 +++++---- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 230d806a89..9bf9251519 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -12,7 +12,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { SparkEnv.set(ssc.env) try { val timeTaken = job.run() - println("Total delay: %.5f s for job %s (execution: %.5f s)".format( + logInfo("Total delay: %.5f s for job %s (execution: %.5f s)".format( (System.currentTimeMillis() - job.time) / 1000.0, job.id, timeTaken / 1000.0)) } catch { case e: Exception => diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index 6b41e4d2c8..5669d7fedf 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -74,7 +74,8 @@ abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializ onStart() } catch { case ie: InterruptedException => - logWarning("Receiving thread interrupted") + logInfo("Receiving thread interrupted") + //println("Receiving thread interrupted") case e: Exception => stopOnError(e) } diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index 8ff7865ca4..b566200273 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -3,11 +3,12 @@ package spark.streaming import spark.streaming.util.{RecurringTimer, SystemClock} import spark.storage.StorageLevel -import java.io.{EOFException, DataInputStream, BufferedInputStream, InputStream} +import java.io._ import java.net.Socket import java.util.concurrent.ArrayBlockingQueue import scala.collection.mutable.ArrayBuffer +import scala.Serializable class SocketInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, @@ -127,8 +128,7 @@ object SocketReceiver { * to '\n' delimited strings and returns an iterator to access the strings. */ def bytesToLines(inputStream: InputStream): Iterator[String] = { - val bufferedInputStream = new BufferedInputStream(inputStream) - val dataInputStream = new DataInputStream(bufferedInputStream) + val dataInputStream = new BufferedReader(new InputStreamReader(inputStream, "UTF-8")) val iterator = new Iterator[String] { var gotNext = false @@ -138,35 +138,32 @@ object SocketReceiver { private def getNext() { try { nextValue = dataInputStream.readLine() - if (nextValue != null) { - println("[" + nextValue + "]") - } else { - gotNext = false - } - } catch { - case eof: EOFException => + if (nextValue == null) { finished = true + } } gotNext = true } override def hasNext: Boolean = { - if (!gotNext) { - getNext() - } - if (finished) { - dataInputStream.close() + if (!finished) { + if (!gotNext) { + getNext() + if (finished) { + dataInputStream.close() + } + } } !finished } override def next(): String = { - if (!gotNext) { - getNext() - } if (finished) { throw new NoSuchElementException("End of stream") } + if (!gotNext) { + getNext() + } gotNext = false nextValue } diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index dd872059ea..a3f213ebd0 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -6,6 +6,7 @@ import java.util.concurrent.{TimeUnit, ArrayBlockingQueue} import collection.mutable.{SynchronizedBuffer, ArrayBuffer} import util.ManualClock import spark.storage.StorageLevel +import spark.Logging class InputStreamsSuite extends TestSuiteBase { @@ -39,9 +40,10 @@ class InputStreamsSuite extends TestSuiteBase { Thread.sleep(5000) val timeTaken = System.currentTimeMillis() - startTime assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") - - ssc.stop() + logInfo("Stopping server") server.stop() + logInfo("Stopping context") + ssc.stop() assert(outputBuffer.size === expectedOutput.size) for (i <- 0 until outputBuffer.size) { @@ -52,7 +54,7 @@ class InputStreamsSuite extends TestSuiteBase { } -class TestServer(port: Int) { +class TestServer(port: Int) extends Logging { val queue = new ArrayBlockingQueue[String](100) @@ -62,9 +64,9 @@ class TestServer(port: Int) { override def run() { try { while(true) { - println("Accepting connections on port " + port) + logInfo("Accepting connections on port " + port) val clientSocket = serverSocket.accept() - println("New connection") + logInfo("New connection") try { clientSocket.setTcpNoDelay(true) val outputStream = new BufferedWriter(new OutputStreamWriter(clientSocket.getOutputStream)) @@ -74,13 +76,13 @@ class TestServer(port: Int) { if (msg != null) { outputStream.write(msg) outputStream.flush() - println("Message '" + msg + "' sent") + logInfo("Message '" + msg + "' sent") } } } catch { case e: SocketException => println(e) } finally { - println("Connection closed") + logInfo("Connection closed") if (!clientSocket.isClosed) clientSocket.close() } } From 926e05b0300ad2850d48e5692d73c209c1c90100 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 24 Oct 2012 23:14:37 -0700 Subject: [PATCH 089/226] Added tests for the file input stream. --- .../spark/streaming/FileInputDStream.scala | 4 +- .../spark/streaming/InputStreamsSuite.scala | 68 +++++++++++++++++-- 2 files changed, 64 insertions(+), 8 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala index 78537b8794..69d3504c72 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -49,7 +49,7 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K if (!filter.accept(path)) { return false } else { - val modTime = fs.getFileStatus(path).getModificationTime() + val modTime = fs.getFileStatus(path).getModificationTime() if (modTime <= lastModTime) { return false } @@ -60,7 +60,7 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K } } } - + val newFiles = fs.listStatus(path, newFilter) logInfo("New files: " + newFiles.map(_.getPath).mkString(", ")) if (newFiles.length > 0) { diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index a3f213ebd0..fcf5d22f5c 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -1,43 +1,48 @@ package spark.streaming import java.net.{SocketException, Socket, ServerSocket} -import java.io.{BufferedWriter, OutputStreamWriter} +import java.io.{File, BufferedWriter, OutputStreamWriter} import java.util.concurrent.{TimeUnit, ArrayBlockingQueue} import collection.mutable.{SynchronizedBuffer, ArrayBuffer} import util.ManualClock import spark.storage.StorageLevel import spark.Logging +import scala.util.Random +import org.apache.commons.io.FileUtils class InputStreamsSuite extends TestSuiteBase { test("network input stream") { + // Start the server val serverPort = 9999 val server = new TestServer(9999) server.start() + + // Set up the streaming context and input streams val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) - val networkStream = ssc.networkTextStream("localhost", serverPort, StorageLevel.DISK_AND_MEMORY) val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] val outputStream = new TestOutputStream(networkStream, outputBuffer) ssc.registerOutputStream(outputStream) ssc.start() + // Feed data to the server to send to the Spark Streaming network receiver val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] - val input = Seq(1, 2, 3) + val input = Seq(1, 2, 3, 4, 5) val expectedOutput = input.map(_.toString) for (i <- 0 until input.size) { server.send(input(i).toString + "\n") - Thread.sleep(1000) - clock.addToTime(1000) + Thread.sleep(500) + clock.addToTime(batchDuration.milliseconds) } val startTime = System.currentTimeMillis() while (outputBuffer.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { logInfo("output.size = " + outputBuffer.size + ", expectedOutput.size = " + expectedOutput.size) Thread.sleep(100) } - Thread.sleep(5000) + Thread.sleep(1000) val timeTaken = System.currentTimeMillis() - startTime assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") logInfo("Stopping server") @@ -45,6 +50,57 @@ class InputStreamsSuite extends TestSuiteBase { logInfo("Stopping context") ssc.stop() + // Verify whether data received by Spark Streaming was as expected + assert(outputBuffer.size === expectedOutput.size) + for (i <- 0 until outputBuffer.size) { + assert(outputBuffer(i).size === 1) + assert(outputBuffer(i).head === expectedOutput(i)) + } + } + + test("file input stream") { + // Create a temporary directory + val dir = { + var temp = File.createTempFile(".temp.", Random.nextInt().toString) + temp.delete() + temp.mkdirs() + temp.deleteOnExit() + println("Created temp dir " + temp) + temp + } + + // Set up the streaming context and input streams + val ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + val filestream = ssc.textFileStream(dir.toString) + val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] + val outputStream = new TestOutputStream(filestream, outputBuffer) + ssc.registerOutputStream(outputStream) + ssc.start() + + // Create files in the temporary directory so that Spark Streaming can read data from it + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + val input = Seq(1, 2, 3, 4, 5) + val expectedOutput = input.map(_.toString) + Thread.sleep(1000) + for (i <- 0 until input.size) { + FileUtils.writeStringToFile(new File(dir, i.toString), input(i).toString + "\n") + Thread.sleep(500) + clock.addToTime(batchDuration.milliseconds) + Thread.sleep(500) + } + val startTime = System.currentTimeMillis() + while (outputBuffer.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + println("output.size = " + outputBuffer.size + ", expectedOutput.size = " + expectedOutput.size) + Thread.sleep(100) + } + Thread.sleep(1000) + val timeTaken = System.currentTimeMillis() - startTime + assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") + println("Stopping context") + ssc.stop() + + // Verify whether data received by Spark Streaming was as expected assert(outputBuffer.size === expectedOutput.size) for (i <- 0 until outputBuffer.size) { assert(outputBuffer(i).size === 1) From 1b900183c8bb4063d8ae7bd5134fdadd52b3a155 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 27 Oct 2012 18:55:50 -0700 Subject: [PATCH 090/226] Added save operations to DStreams. --- .../main/scala/spark/streaming/DStream.scala | 16 +++++ .../streaming/PairDStreamFunctions.scala | 61 ++++++++++++++++++- .../spark/streaming/StreamingContext.scala | 10 +++ 3 files changed, 84 insertions(+), 3 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 12d7ba97ea..175ebf104f 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -363,6 +363,22 @@ extends Serializable with Logging { rdds.toSeq } + def saveAsObjectFiles(prefix: String, suffix: String = "") { + val saveFunc = (rdd: RDD[T], time: Time) => { + val file = rddToFileName(prefix, suffix, time) + rdd.saveAsObjectFile(file) + } + this.foreachRDD(saveFunc) + } + + def saveAsTextFiles(prefix: String, suffix: String = "") { + val saveFunc = (rdd: RDD[T], time: Time) => { + val file = rddToFileName(prefix, suffix, time) + rdd.saveAsTextFile(file) + } + this.foreachRDD(saveFunc) + } + def register() { ssc.registerOutputStream(this) } diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index ce1f4ad0a0..f88247708b 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -1,9 +1,16 @@ package spark.streaming -import scala.collection.mutable.ArrayBuffer -import spark.{Manifests, RDD, Partitioner, HashPartitioner} import spark.streaming.StreamingContext._ -import javax.annotation.Nullable + +import spark.{Manifests, RDD, Partitioner, HashPartitioner} +import spark.SparkContext._ + +import scala.collection.mutable.ArrayBuffer + +import org.apache.hadoop.mapred.{JobConf, OutputFormat} +import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} +import org.apache.hadoop.mapred.OutputFormat +import org.apache.hadoop.conf.Configuration class PairDStreamFunctions[K: ClassManifest, V: ClassManifest](self: DStream[(K,V)]) extends Serializable { @@ -231,6 +238,54 @@ extends Serializable { for (v <- vs.iterator; w <- ws.iterator) yield (v, w) } } + + def saveAsHadoopFiles[F <: OutputFormat[K, V]]( + prefix: String, + suffix: String + )(implicit fm: ClassManifest[F]) { + saveAsHadoopFiles(prefix, suffix, getKeyClass, getValueClass, fm.erasure.asInstanceOf[Class[F]]) + } + + def saveAsHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: OutputFormat[_, _]], + conf: JobConf = new JobConf + ) { + val saveFunc = (rdd: RDD[(K, V)], time: Time) => { + val file = rddToFileName(prefix, suffix, time) + rdd.saveAsHadoopFile(file, keyClass, valueClass, outputFormatClass, conf) + } + self.foreachRDD(saveFunc) + } + + def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]]( + prefix: String, + suffix: String + )(implicit fm: ClassManifest[F]) { + saveAsNewAPIHadoopFiles(prefix, suffix, getKeyClass, getValueClass, fm.erasure.asInstanceOf[Class[F]]) + } + + def saveAsNewAPIHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: NewOutputFormat[_, _]], + conf: Configuration = new Configuration + ) { + val saveFunc = (rdd: RDD[(K, V)], time: Time) => { + val file = rddToFileName(prefix, suffix, time) + rdd.saveAsNewAPIHadoopFile(file, keyClass, valueClass, outputFormatClass, conf) + } + self.foreachRDD(saveFunc) + } + + private def getKeyClass() = implicitly[ClassManifest[K]].erasure + + private def getValueClass() = implicitly[ClassManifest[V]].erasure } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 7c7b3afe47..b3148eaa97 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -225,5 +225,15 @@ object StreamingContext { implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = { new PairDStreamFunctions[K, V](stream) } + + def rddToFileName[T](prefix: String, suffix: String, time: Time): String = { + if (prefix == null) { + time.millis.toString + } else if (suffix == null || suffix.length ==0) { + prefix + "-" + time.milliseconds + } else { + prefix + "-" + time.milliseconds + "." + suffix + } + } } From ac12abc17ff90ec99192f3c3de4d1d390969e635 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 29 Oct 2012 11:55:27 -0700 Subject: [PATCH 091/226] Modified RDD API to make dependencies a var (therefore can be changed to checkpointed hadoop rdd) and othere references to parent RDDs either through dependencies or through a weak reference (to allow finalizing when dependencies do not refer to it any more). --- .../main/scala/spark/PairRDDFunctions.scala | 24 +++++++++--------- .../main/scala/spark/ParallelCollection.scala | 8 +++--- core/src/main/scala/spark/RDD.scala | 25 +++++++++++++------ core/src/main/scala/spark/SparkContext.scala | 4 +++ .../main/scala/spark/rdd/CartesianRDD.scala | 21 +++++++++++----- .../main/scala/spark/rdd/CoGroupedRDD.scala | 13 +++++++--- .../main/scala/spark/rdd/CoalescedRDD.scala | 19 +++++++++----- .../main/scala/spark/rdd/FilteredRDD.scala | 12 ++++++--- .../main/scala/spark/rdd/FlatMappedRDD.scala | 10 ++++---- .../src/main/scala/spark/rdd/GlommedRDD.scala | 9 ++++--- core/src/main/scala/spark/rdd/HadoopRDD.scala | 4 +-- .../scala/spark/rdd/MapPartitionsRDD.scala | 12 ++++----- .../spark/rdd/MapPartitionsWithSplitRDD.scala | 10 ++++---- core/src/main/scala/spark/rdd/MappedRDD.scala | 12 ++++----- .../main/scala/spark/rdd/NewHadoopRDD.scala | 9 +++---- core/src/main/scala/spark/rdd/PipedRDD.scala | 16 ++++++------ .../src/main/scala/spark/rdd/SampledRDD.scala | 15 ++++++----- .../main/scala/spark/rdd/ShuffledRDD.scala | 13 ++++++---- core/src/main/scala/spark/rdd/UnionRDD.scala | 20 +++++++++------ 19 files changed, 149 insertions(+), 107 deletions(-) diff --git a/core/src/main/scala/spark/PairRDDFunctions.scala b/core/src/main/scala/spark/PairRDDFunctions.scala index e5bb639cfd..f52af08125 100644 --- a/core/src/main/scala/spark/PairRDDFunctions.scala +++ b/core/src/main/scala/spark/PairRDDFunctions.scala @@ -23,6 +23,7 @@ import spark.partial.BoundedDouble import spark.partial.PartialResult import spark.rdd._ import spark.SparkContext._ +import java.lang.ref.WeakReference /** * Extra functions available on RDDs of (key, value) pairs through an implicit conversion. @@ -624,23 +625,22 @@ class OrderedRDDFunctions[K <% Ordered[K]: ClassManifest, V: ClassManifest]( } private[spark] -class MappedValuesRDD[K, V, U](prev: RDD[(K, V)], f: V => U) extends RDD[(K, U)](prev.context) { - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override val partitioner = prev.partitioner - override def compute(split: Split) = prev.iterator(split).map{case (k, v) => (k, f(v))} +class MappedValuesRDD[K, V, U](@transient prev: WeakReference[RDD[(K, V)]], f: V => U) + extends RDD[(K, U)](prev.get) { + + override def splits = firstParent[(K, V)].splits + override val partitioner = firstParent[(K, V)].partitioner + override def compute(split: Split) = firstParent[(K, V)].iterator(split).map{case (k, v) => (k, f(v))} } private[spark] -class FlatMappedValuesRDD[K, V, U](prev: RDD[(K, V)], f: V => TraversableOnce[U]) - extends RDD[(K, U)](prev.context) { - - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override val partitioner = prev.partitioner +class FlatMappedValuesRDD[K, V, U](@transient prev: WeakReference[RDD[(K, V)]], f: V => TraversableOnce[U]) + extends RDD[(K, U)](prev.get) { + override def splits = firstParent[(K, V)].splits + override val partitioner = firstParent[(K, V)].partitioner override def compute(split: Split) = { - prev.iterator(split).flatMap { case (k, v) => f(v).map(x => (k, x)) } + firstParent[(K, V)].iterator(split).flatMap { case (k, v) => f(v).map(x => (k, x)) } } } diff --git a/core/src/main/scala/spark/ParallelCollection.scala b/core/src/main/scala/spark/ParallelCollection.scala index 9b57ae3b4f..ad06ee9736 100644 --- a/core/src/main/scala/spark/ParallelCollection.scala +++ b/core/src/main/scala/spark/ParallelCollection.scala @@ -22,13 +22,13 @@ private[spark] class ParallelCollectionSplit[T: ClassManifest]( } private[spark] class ParallelCollection[T: ClassManifest]( - sc: SparkContext, + @transient sc_ : SparkContext, @transient data: Seq[T], numSlices: Int) - extends RDD[T](sc) { + extends RDD[T](sc_, Nil) { // TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets // cached. It might be worthwhile to write the data to a file in the DFS and read it in the split - // instead. + // instead. UPDATE: With the new changes to enable checkpointing, this an be done. @transient val splits_ = { @@ -41,8 +41,6 @@ private[spark] class ParallelCollection[T: ClassManifest]( override def compute(s: Split) = s.asInstanceOf[ParallelCollectionSplit[T]].iterator override def preferredLocations(s: Split): Seq[String] = Nil - - override val dependencies: List[Dependency[_]] = Nil } private object ParallelCollection { diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 338dff4061..c9f3763f73 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -72,7 +72,14 @@ import SparkContext._ * [[http://www.cs.berkeley.edu/~matei/papers/2012/nsdi_spark.pdf Spark paper]] for more details * on RDD internals. */ -abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serializable { +abstract class RDD[T: ClassManifest]( + @transient var sc: SparkContext, + @transient var dependencies_ : List[Dependency[_]] = Nil + ) extends Serializable { + + + def this(@transient oneParent: RDD[_]) = + this(oneParent.context , List(new OneToOneDependency(oneParent))) // Methods that must be implemented by subclasses: @@ -83,10 +90,9 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial def compute(split: Split): Iterator[T] /** How this RDD depends on any parent RDDs. */ - @transient val dependencies: List[Dependency[_]] + def dependencies: List[Dependency[_]] = dependencies_ + //var dependencies: List[Dependency[_]] = dependencies_ - // Methods available on all RDDs: - /** Record user function generating this RDD. */ private[spark] val origin = Utils.getSparkCallSite @@ -106,8 +112,13 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial // Variables relating to persistence private var storageLevel: StorageLevel = StorageLevel.NONE - - /** + + private[spark] def firstParent[U: ClassManifest] = dependencies.head.rdd.asInstanceOf[RDD[U]] + private[spark] def parent[U: ClassManifest](id: Int) = dependencies(id).rdd.asInstanceOf[RDD[U]] + + // Methods available on all RDDs: + + /** * Set this RDD's storage level to persist its values across operations after the first time * it is computed. Can only be called once on each RDD. */ @@ -129,7 +140,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial /** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */ def getStorageLevel = storageLevel - + private[spark] def checkpoint(level: StorageLevel = StorageLevel.MEMORY_AND_DISK_2): RDD[T] = { if (!level.useDisk && level.replication < 2) { throw new Exception("Cannot checkpoint without using disk or replication (level requested was " + level + ")") diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 0d37075ef3..6b957a6356 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -3,6 +3,7 @@ package spark import java.io._ import java.util.concurrent.atomic.AtomicInteger import java.net.{URI, URLClassLoader} +import java.lang.ref.WeakReference import scala.collection.Map import scala.collection.generic.Growable @@ -695,6 +696,9 @@ object SparkContext { /** Find the JAR that contains the class of a particular object */ def jarOfObject(obj: AnyRef): Seq[String] = jarOfClass(obj.getClass) + + implicit def rddToWeakRefRDD[T: ClassManifest](rdd: RDD[T]) = new WeakReference(rdd) + } diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index 7c354b6b2e..c97b835630 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -4,6 +4,7 @@ import spark.NarrowDependency import spark.RDD import spark.SparkContext import spark.Split +import java.lang.ref.WeakReference private[spark] class CartesianSplit(idx: Int, val s1: Split, val s2: Split) extends Split with Serializable { @@ -13,13 +14,17 @@ class CartesianSplit(idx: Int, val s1: Split, val s2: Split) extends Split with private[spark] class CartesianRDD[T: ClassManifest, U:ClassManifest]( sc: SparkContext, - rdd1: RDD[T], - rdd2: RDD[U]) + rdd1_ : WeakReference[RDD[T]], + rdd2_ : WeakReference[RDD[U]]) extends RDD[Pair[T, U]](sc) with Serializable { - + + def rdd1 = rdd1_.get + def rdd2 = rdd2_.get + val numSplitsInRdd2 = rdd2.splits.size - + + // TODO: make this null when finishing checkpoint @transient val splits_ = { // create the cross product split @@ -31,6 +36,7 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( array } + // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override def preferredLocations(split: Split) = { @@ -42,8 +48,9 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( val currSplit = split.asInstanceOf[CartesianSplit] for (x <- rdd1.iterator(currSplit.s1); y <- rdd2.iterator(currSplit.s2)) yield (x, y) } - - override val dependencies = List( + + // TODO: make this null when finishing checkpoint + var deps = List( new NarrowDependency(rdd1) { def getParents(id: Int): Seq[Int] = List(id / numSplitsInRdd2) }, @@ -51,4 +58,6 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( def getParents(id: Int): Seq[Int] = List(id % numSplitsInRdd2) } ) + + override def dependencies = deps } diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index 50bec9e63b..af54ac2fa0 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -31,12 +31,13 @@ private[spark] class CoGroupAggregator with Serializable class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) - extends RDD[(K, Seq[Seq[_]])](rdds.head.context) with Logging { + extends RDD[(K, Seq[Seq[_]])](rdds.head.context, Nil) with Logging { val aggr = new CoGroupAggregator - + + // TODO: make this null when finishing checkpoint @transient - override val dependencies = { + var deps = { val deps = new ArrayBuffer[Dependency[_]] for ((rdd, index) <- rdds.zipWithIndex) { val mapSideCombinedRDD = rdd.mapPartitions(aggr.combineValuesByKey(_), true) @@ -50,7 +51,10 @@ class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) } deps.toList } - + + override def dependencies = deps + + // TODO: make this null when finishing checkpoint @transient val splits_ : Array[Split] = { val firstRdd = rdds.head @@ -68,6 +72,7 @@ class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) array } + // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override val partitioner = Some(part) diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 0967f4f5df..573acf8893 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -14,11 +14,14 @@ private class CoalescedRDDSplit(val index: Int, val parents: Array[Split]) exten * This transformation is useful when an RDD with many partitions gets filtered into a smaller one, * or to avoid having a large number of small tasks when processing a directory with many files. */ -class CoalescedRDD[T: ClassManifest](prev: RDD[T], maxPartitions: Int) - extends RDD[T](prev.context) { +class CoalescedRDD[T: ClassManifest]( + @transient prev: RDD[T], // TODO: Make this a weak reference + maxPartitions: Int) + extends RDD[T](prev.context, Nil) { // Nil, so the dependencies_ var does not refer to parent RDDs + // TODO: make this null when finishing checkpoint @transient val splits_ : Array[Split] = { - val prevSplits = prev.splits + val prevSplits = firstParent[T].splits if (prevSplits.length < maxPartitions) { prevSplits.zipWithIndex.map{ case (s, idx) => new CoalescedRDDSplit(idx, Array(s)) } } else { @@ -30,18 +33,22 @@ class CoalescedRDD[T: ClassManifest](prev: RDD[T], maxPartitions: Int) } } + // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override def compute(split: Split): Iterator[T] = { split.asInstanceOf[CoalescedRDDSplit].parents.iterator.flatMap { - parentSplit => prev.iterator(parentSplit) + parentSplit => firstParent[T].iterator(parentSplit) } } - val dependencies = List( - new NarrowDependency(prev) { + // TODO: make this null when finishing checkpoint + var deps = List( + new NarrowDependency(firstParent) { def getParents(id: Int): Seq[Int] = splits(id).asInstanceOf[CoalescedRDDSplit].parents.map(_.index) } ) + + override def dependencies = deps } diff --git a/core/src/main/scala/spark/rdd/FilteredRDD.scala b/core/src/main/scala/spark/rdd/FilteredRDD.scala index dfe9dc73f3..cc2a3acd3a 100644 --- a/core/src/main/scala/spark/rdd/FilteredRDD.scala +++ b/core/src/main/scala/spark/rdd/FilteredRDD.scala @@ -3,10 +3,14 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference private[spark] -class FilteredRDD[T: ClassManifest](prev: RDD[T], f: T => Boolean) extends RDD[T](prev.context) { - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = prev.iterator(split).filter(f) +class FilteredRDD[T: ClassManifest]( + @transient prev: WeakReference[RDD[T]], + f: T => Boolean) + extends RDD[T](prev.get) { + + override def splits = firstParent[T].splits + override def compute(split: Split) = firstParent[T].iterator(split).filter(f) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala index 3534dc8057..34bd784c13 100644 --- a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala +++ b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala @@ -3,14 +3,14 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference private[spark] class FlatMappedRDD[U: ClassManifest, T: ClassManifest]( - prev: RDD[T], + @transient prev: WeakReference[RDD[T]], f: T => TraversableOnce[U]) - extends RDD[U](prev.context) { + extends RDD[U](prev.get) { - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = prev.iterator(split).flatMap(f) + override def splits = firstParent[T].splits + override def compute(split: Split) = firstParent[T].iterator(split).flatMap(f) } diff --git a/core/src/main/scala/spark/rdd/GlommedRDD.scala b/core/src/main/scala/spark/rdd/GlommedRDD.scala index e30564f2da..9321e89dcd 100644 --- a/core/src/main/scala/spark/rdd/GlommedRDD.scala +++ b/core/src/main/scala/spark/rdd/GlommedRDD.scala @@ -3,10 +3,11 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference private[spark] -class GlommedRDD[T: ClassManifest](prev: RDD[T]) extends RDD[Array[T]](prev.context) { - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = Array(prev.iterator(split).toArray).iterator +class GlommedRDD[T: ClassManifest](@transient prev: WeakReference[RDD[T]]) + extends RDD[Array[T]](prev.get) { + override def splits = firstParent[T].splits + override def compute(split: Split) = Array(firstParent[T].iterator(split).toArray).iterator } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index bf29a1f075..a12531ea89 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -46,7 +46,7 @@ class HadoopRDD[K, V]( keyClass: Class[K], valueClass: Class[V], minSplits: Int) - extends RDD[(K, V)](sc) { + extends RDD[(K, V)](sc, Nil) { // A Hadoop JobConf can be about 10 KB, which is pretty big, so broadcast it val confBroadcast = sc.broadcast(new SerializableWritable(conf)) @@ -115,6 +115,4 @@ class HadoopRDD[K, V]( val hadoopSplit = split.asInstanceOf[HadoopSplit] hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost") } - - override val dependencies: List[Dependency[_]] = Nil } diff --git a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala index a904ef62c3..bad872c430 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala @@ -3,17 +3,17 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference private[spark] class MapPartitionsRDD[U: ClassManifest, T: ClassManifest]( - prev: RDD[T], + @transient prev: WeakReference[RDD[T]], f: Iterator[T] => Iterator[U], preservesPartitioning: Boolean = false) - extends RDD[U](prev.context) { + extends RDD[U](prev.get) { - override val partitioner = if (preservesPartitioning) prev.partitioner else None + override val partitioner = if (preservesPartitioning) firstParent[T].partitioner else None - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = f(prev.iterator(split)) + override def splits = firstParent[T].splits + override def compute(split: Split) = f(firstParent[T].iterator(split)) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala index adc541694e..d7b238b05d 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala @@ -3,6 +3,7 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference /** * A variant of the MapPartitionsRDD that passes the split index into the @@ -11,11 +12,10 @@ import spark.Split */ private[spark] class MapPartitionsWithSplitRDD[U: ClassManifest, T: ClassManifest]( - prev: RDD[T], + @transient prev: WeakReference[RDD[T]], f: (Int, Iterator[T]) => Iterator[U]) - extends RDD[U](prev.context) { + extends RDD[U](prev.get) { - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = f(split.index, prev.iterator(split)) + override def splits = firstParent[T].splits + override def compute(split: Split) = f(split.index, firstParent[T].iterator(split)) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/MappedRDD.scala b/core/src/main/scala/spark/rdd/MappedRDD.scala index 59bedad8ef..126c6f332b 100644 --- a/core/src/main/scala/spark/rdd/MappedRDD.scala +++ b/core/src/main/scala/spark/rdd/MappedRDD.scala @@ -3,14 +3,14 @@ package spark.rdd import spark.OneToOneDependency import spark.RDD import spark.Split +import java.lang.ref.WeakReference private[spark] class MappedRDD[U: ClassManifest, T: ClassManifest]( - prev: RDD[T], + @transient prev: WeakReference[RDD[T]], f: T => U) - extends RDD[U](prev.context) { - - override def splits = prev.splits - override val dependencies = List(new OneToOneDependency(prev)) - override def compute(split: Split) = prev.iterator(split).map(f) + extends RDD[U](prev.get) { + + override def splits = firstParent[T].splits + override def compute(split: Split) = firstParent[T].iterator(split).map(f) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala index 7a1a0fb87d..c12df5839e 100644 --- a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala @@ -23,11 +23,12 @@ class NewHadoopSplit(rddId: Int, val index: Int, @transient rawSplit: InputSplit } class NewHadoopRDD[K, V]( - sc: SparkContext, + sc : SparkContext, inputFormatClass: Class[_ <: InputFormat[K, V]], - keyClass: Class[K], valueClass: Class[V], + keyClass: Class[K], + valueClass: Class[V], @transient conf: Configuration) - extends RDD[(K, V)](sc) + extends RDD[(K, V)](sc, Nil) with HadoopMapReduceUtil { // A Hadoop Configuration can be about 10 KB, which is pretty big, so broadcast it @@ -92,6 +93,4 @@ class NewHadoopRDD[K, V]( val theSplit = split.asInstanceOf[NewHadoopSplit] theSplit.serializableHadoopSplit.value.getLocations.filter(_ != "localhost") } - - override val dependencies: List[Dependency[_]] = Nil } diff --git a/core/src/main/scala/spark/rdd/PipedRDD.scala b/core/src/main/scala/spark/rdd/PipedRDD.scala index 98ea0c92d6..d54579d6d1 100644 --- a/core/src/main/scala/spark/rdd/PipedRDD.scala +++ b/core/src/main/scala/spark/rdd/PipedRDD.scala @@ -19,18 +19,18 @@ import spark.Split * (printing them one per line) and returns the output as a collection of strings. */ class PipedRDD[T: ClassManifest]( - parent: RDD[T], command: Seq[String], envVars: Map[String, String]) - extends RDD[String](parent.context) { + @transient prev: RDD[T], + command: Seq[String], + envVars: Map[String, String]) + extends RDD[String](prev) { - def this(parent: RDD[T], command: Seq[String]) = this(parent, command, Map()) + def this(@transient prev: RDD[T], command: Seq[String]) = this(prev, command, Map()) // Similar to Runtime.exec(), if we are given a single string, split it into words // using a standard StringTokenizer (i.e. by spaces) - def this(parent: RDD[T], command: String) = this(parent, PipedRDD.tokenize(command)) + def this(@transient prev: RDD[T], command: String) = this(prev, PipedRDD.tokenize(command)) - override def splits = parent.splits - - override val dependencies = List(new OneToOneDependency(parent)) + override def splits = firstParent[T].splits override def compute(split: Split): Iterator[String] = { val pb = new ProcessBuilder(command) @@ -55,7 +55,7 @@ class PipedRDD[T: ClassManifest]( override def run() { SparkEnv.set(env) val out = new PrintWriter(proc.getOutputStream) - for (elem <- parent.iterator(split)) { + for (elem <- firstParent[T].iterator(split)) { out.println(elem) } out.close() diff --git a/core/src/main/scala/spark/rdd/SampledRDD.scala b/core/src/main/scala/spark/rdd/SampledRDD.scala index 87a5268f27..00b521b130 100644 --- a/core/src/main/scala/spark/rdd/SampledRDD.scala +++ b/core/src/main/scala/spark/rdd/SampledRDD.scala @@ -7,6 +7,7 @@ import cern.jet.random.engine.DRand import spark.RDD import spark.OneToOneDependency import spark.Split +import java.lang.ref.WeakReference private[spark] class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Serializable { @@ -14,24 +15,22 @@ class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Seriali } class SampledRDD[T: ClassManifest]( - prev: RDD[T], + @transient prev: WeakReference[RDD[T]], withReplacement: Boolean, frac: Double, seed: Int) - extends RDD[T](prev.context) { + extends RDD[T](prev.get) { @transient val splits_ = { val rg = new Random(seed) - prev.splits.map(x => new SampledRDDSplit(x, rg.nextInt)) + firstParent[T].splits.map(x => new SampledRDDSplit(x, rg.nextInt)) } override def splits = splits_.asInstanceOf[Array[Split]] - override val dependencies = List(new OneToOneDependency(prev)) - override def preferredLocations(split: Split) = - prev.preferredLocations(split.asInstanceOf[SampledRDDSplit].prev) + firstParent[T].preferredLocations(split.asInstanceOf[SampledRDDSplit].prev) override def compute(splitIn: Split) = { val split = splitIn.asInstanceOf[SampledRDDSplit] @@ -39,7 +38,7 @@ class SampledRDD[T: ClassManifest]( // For large datasets, the expected number of occurrences of each element in a sample with // replacement is Poisson(frac). We use that to get a count for each element. val poisson = new Poisson(frac, new DRand(split.seed)) - prev.iterator(split.prev).flatMap { element => + firstParent[T].iterator(split.prev).flatMap { element => val count = poisson.nextInt() if (count == 0) { Iterator.empty // Avoid object allocation when we return 0 items, which is quite often @@ -49,7 +48,7 @@ class SampledRDD[T: ClassManifest]( } } else { // Sampling without replacement val rand = new Random(split.seed) - prev.iterator(split.prev).filter(x => (rand.nextDouble <= frac)) + firstParent[T].iterator(split.prev).filter(x => (rand.nextDouble <= frac)) } } } diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index 145e419c53..62867dab4f 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -5,6 +5,7 @@ import spark.RDD import spark.ShuffleDependency import spark.SparkEnv import spark.Split +import java.lang.ref.WeakReference private[spark] class ShuffledRDDSplit(val idx: Int) extends Split { override val index = idx @@ -19,8 +20,9 @@ private[spark] class ShuffledRDDSplit(val idx: Int) extends Split { * @tparam V the value class. */ class ShuffledRDD[K, V]( - @transient parent: RDD[(K, V)], - part: Partitioner) extends RDD[(K, V)](parent.context) { + @transient prev: WeakReference[RDD[(K, V)]], + part: Partitioner) + extends RDD[(K, V)](prev.get.context, List(new ShuffleDependency(prev.get, part))) { override val partitioner = Some(part) @@ -31,10 +33,11 @@ class ShuffledRDD[K, V]( override def preferredLocations(split: Split) = Nil - val dep = new ShuffleDependency(parent, part) - override val dependencies = List(dep) + //val dep = new ShuffleDependency(parent, part) + //override val dependencies = List(dep) override def compute(split: Split): Iterator[(K, V)] = { - SparkEnv.get.shuffleFetcher.fetch[K, V](dep.shuffleId, split.index) + val shuffledId = dependencies.head.asInstanceOf[ShuffleDependency[K, V]].shuffleId + SparkEnv.get.shuffleFetcher.fetch[K, V](shuffledId, split.index) } } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index f0b9225f7c..0a61a2d1f5 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -7,6 +7,7 @@ import spark.RangeDependency import spark.RDD import spark.SparkContext import spark.Split +import java.lang.ref.WeakReference private[spark] class UnionSplit[T: ClassManifest]( idx: Int, @@ -22,10 +23,10 @@ private[spark] class UnionSplit[T: ClassManifest]( class UnionRDD[T: ClassManifest]( sc: SparkContext, - @transient rdds: Seq[RDD[T]]) - extends RDD[T](sc) - with Serializable { - + @transient rdds: Seq[RDD[T]]) // TODO: Make this a weak reference + extends RDD[T](sc, Nil) { // Nil, so the dependencies_ var does not refer to parent RDDs + + // TODO: make this null when finishing checkpoint @transient val splits_ : Array[Split] = { val array = new Array[Split](rdds.map(_.splits.size).sum) @@ -37,19 +38,22 @@ class UnionRDD[T: ClassManifest]( array } + // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ - @transient - override val dependencies = { + // TODO: make this null when finishing checkpoint + @transient var deps = { val deps = new ArrayBuffer[Dependency[_]] var pos = 0 for (rdd <- rdds) { - deps += new RangeDependency(rdd, 0, pos, rdd.splits.size) + deps += new RangeDependency(rdd, 0, pos, rdd.splits.size) pos += rdd.splits.size } deps.toList } - + + override def dependencies = deps + override def compute(s: Split): Iterator[T] = s.asInstanceOf[UnionSplit[T]].iterator() override def preferredLocations(s: Split): Seq[String] = From 0dcd770fdc4d558972b635b6770ed0120280ef22 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 30 Oct 2012 16:09:37 -0700 Subject: [PATCH 092/226] Added checkpointing support to all RDDs, along with CheckpointSuite to test checkpointing in them. --- .../main/scala/spark/PairRDDFunctions.scala | 4 +- .../main/scala/spark/ParallelCollection.scala | 4 +- core/src/main/scala/spark/RDD.scala | 129 ++++++++++++++---- core/src/main/scala/spark/SparkContext.scala | 21 +++ core/src/main/scala/spark/rdd/BlockRDD.scala | 13 +- .../main/scala/spark/rdd/CartesianRDD.scala | 38 +++--- .../main/scala/spark/rdd/CoGroupedRDD.scala | 19 +-- .../main/scala/spark/rdd/CoalescedRDD.scala | 26 ++-- .../main/scala/spark/rdd/FilteredRDD.scala | 2 +- .../main/scala/spark/rdd/FlatMappedRDD.scala | 2 +- .../src/main/scala/spark/rdd/GlommedRDD.scala | 2 +- core/src/main/scala/spark/rdd/HadoopRDD.scala | 2 + .../scala/spark/rdd/MapPartitionsRDD.scala | 2 +- .../spark/rdd/MapPartitionsWithSplitRDD.scala | 2 +- core/src/main/scala/spark/rdd/MappedRDD.scala | 2 +- .../main/scala/spark/rdd/NewHadoopRDD.scala | 2 + core/src/main/scala/spark/rdd/PipedRDD.scala | 9 +- .../src/main/scala/spark/rdd/SampledRDD.scala | 2 +- .../main/scala/spark/rdd/ShuffledRDD.scala | 5 - core/src/main/scala/spark/rdd/UnionRDD.scala | 32 +++-- .../test/scala/spark/CheckpointSuite.scala | 116 ++++++++++++++++ core/src/test/scala/spark/RDDSuite.scala | 25 +++- 22 files changed, 352 insertions(+), 107 deletions(-) create mode 100644 core/src/test/scala/spark/CheckpointSuite.scala diff --git a/core/src/main/scala/spark/PairRDDFunctions.scala b/core/src/main/scala/spark/PairRDDFunctions.scala index f52af08125..1f82bd3ab8 100644 --- a/core/src/main/scala/spark/PairRDDFunctions.scala +++ b/core/src/main/scala/spark/PairRDDFunctions.scala @@ -625,7 +625,7 @@ class OrderedRDDFunctions[K <% Ordered[K]: ClassManifest, V: ClassManifest]( } private[spark] -class MappedValuesRDD[K, V, U](@transient prev: WeakReference[RDD[(K, V)]], f: V => U) +class MappedValuesRDD[K, V, U](prev: WeakReference[RDD[(K, V)]], f: V => U) extends RDD[(K, U)](prev.get) { override def splits = firstParent[(K, V)].splits @@ -634,7 +634,7 @@ class MappedValuesRDD[K, V, U](@transient prev: WeakReference[RDD[(K, V)]], f: V } private[spark] -class FlatMappedValuesRDD[K, V, U](@transient prev: WeakReference[RDD[(K, V)]], f: V => TraversableOnce[U]) +class FlatMappedValuesRDD[K, V, U](prev: WeakReference[RDD[(K, V)]], f: V => TraversableOnce[U]) extends RDD[(K, U)](prev.get) { override def splits = firstParent[(K, V)].splits diff --git a/core/src/main/scala/spark/ParallelCollection.scala b/core/src/main/scala/spark/ParallelCollection.scala index ad06ee9736..9725017b61 100644 --- a/core/src/main/scala/spark/ParallelCollection.scala +++ b/core/src/main/scala/spark/ParallelCollection.scala @@ -22,10 +22,10 @@ private[spark] class ParallelCollectionSplit[T: ClassManifest]( } private[spark] class ParallelCollection[T: ClassManifest]( - @transient sc_ : SparkContext, + @transient sc : SparkContext, @transient data: Seq[T], numSlices: Int) - extends RDD[T](sc_, Nil) { + extends RDD[T](sc, Nil) { // TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets // cached. It might be worthwhile to write the data to a file in the DFS and read it in the split // instead. UPDATE: With the new changes to enable checkpointing, this an be done. diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index c9f3763f73..e272a0ede9 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -13,6 +13,7 @@ import scala.collection.Map import scala.collection.mutable.HashMap import scala.collection.JavaConversions.mapAsScalaMap +import org.apache.hadoop.fs.Path import org.apache.hadoop.io.BytesWritable import org.apache.hadoop.io.NullWritable import org.apache.hadoop.io.Text @@ -74,7 +75,7 @@ import SparkContext._ */ abstract class RDD[T: ClassManifest]( @transient var sc: SparkContext, - @transient var dependencies_ : List[Dependency[_]] = Nil + var dependencies_ : List[Dependency[_]] ) extends Serializable { @@ -91,7 +92,6 @@ abstract class RDD[T: ClassManifest]( /** How this RDD depends on any parent RDDs. */ def dependencies: List[Dependency[_]] = dependencies_ - //var dependencies: List[Dependency[_]] = dependencies_ /** Record user function generating this RDD. */ private[spark] val origin = Utils.getSparkCallSite @@ -100,7 +100,13 @@ abstract class RDD[T: ClassManifest]( val partitioner: Option[Partitioner] = None /** Optionally overridden by subclasses to specify placement preferences. */ - def preferredLocations(split: Split): Seq[String] = Nil + def preferredLocations(split: Split): Seq[String] = { + if (isCheckpointed) { + checkpointRDD.preferredLocations(split) + } else { + Nil + } + } /** The [[spark.SparkContext]] that this RDD was created on. */ def context = sc @@ -113,8 +119,23 @@ abstract class RDD[T: ClassManifest]( // Variables relating to persistence private var storageLevel: StorageLevel = StorageLevel.NONE - private[spark] def firstParent[U: ClassManifest] = dependencies.head.rdd.asInstanceOf[RDD[U]] - private[spark] def parent[U: ClassManifest](id: Int) = dependencies(id).rdd.asInstanceOf[RDD[U]] + /** Returns the first parent RDD */ + private[spark] def firstParent[U: ClassManifest] = { + dependencies.head.rdd.asInstanceOf[RDD[U]] + } + + /** Returns the `i` th parent RDD */ + private[spark] def parent[U: ClassManifest](i: Int) = dependencies(i).rdd.asInstanceOf[RDD[U]] + + // Variables relating to checkpointing + val isCheckpointable = true // override to set this to false to avoid checkpointing an RDD + var shouldCheckpoint = false // set to true when an RDD is marked for checkpointing + var isCheckpointInProgress = false // set to true when checkpointing is in progress + var isCheckpointed = false // set to true after checkpointing is completed + + var checkpointFile: String = null // set to the checkpoint file after checkpointing is completed + var checkpointRDD: RDD[T] = null // set to the HadoopRDD of the checkpoint file + var checkpointRDDSplits: Seq[Split] = null // set to the splits of the Hadoop RDD // Methods available on all RDDs: @@ -141,32 +162,94 @@ abstract class RDD[T: ClassManifest]( /** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */ def getStorageLevel = storageLevel - private[spark] def checkpoint(level: StorageLevel = StorageLevel.MEMORY_AND_DISK_2): RDD[T] = { - if (!level.useDisk && level.replication < 2) { - throw new Exception("Cannot checkpoint without using disk or replication (level requested was " + level + ")") - } - - // This is a hack. Ideally this should re-use the code used by the CacheTracker - // to generate the key. - def getSplitKey(split: Split) = "rdd_%d_%d".format(this.id, split.index) - - persist(level) - sc.runJob(this, (iter: Iterator[T]) => {} ) - - val p = this.partitioner - - new BlockRDD[T](sc, splits.map(getSplitKey).toArray) { - override val partitioner = p + /** + * Mark this RDD for checkpointing. The RDD will be saved to a file inside `checkpointDir` + * (set using setCheckpointDir()) and all references to its parent RDDs will be removed. + * This is used to truncate very long lineages. In the current implementation, Spark will save + * this RDD to a file (using saveAsObjectFile()) after the first job using this RDD is done. + * Hence, it is strongly recommended to use checkpoint() on RDDs when + * (i) Checkpoint() is called before the any job has been executed on this RDD. + * (ii) This RDD has been made to persist in memory. Otherwise saving it on a file will + * require recomputation. + */ + protected[spark] def checkpoint() { + synchronized { + if (isCheckpointed || shouldCheckpoint || isCheckpointInProgress) { + // do nothing + } else if (isCheckpointable) { + shouldCheckpoint = true + } else { + throw new Exception(this + " cannot be checkpointed") + } } } - + + /** + * Performs the checkpointing of this RDD by saving this . It is called by the DAGScheduler after a job + * using this RDD has completed (therefore the RDD has been materialized and + * potentially stored in memory). In case this RDD is not marked for checkpointing, + * doCheckpoint() is called recursively on the parent RDDs. + */ + private[spark] def doCheckpoint() { + val startCheckpoint = synchronized { + if (isCheckpointable && shouldCheckpoint && !isCheckpointInProgress) { + isCheckpointInProgress = true + true + } else { + false + } + } + + if (startCheckpoint) { + val rdd = this + val env = SparkEnv.get + + // Spawn a new thread to do the checkpoint as it takes sometime to write the RDD to file + val th = new Thread() { + override def run() { + // Save the RDD to a file, create a new HadoopRDD from it, + // and change the dependencies from the original parents to the new RDD + SparkEnv.set(env) + rdd.checkpointFile = new Path(context.checkpointDir, "rdd-" + id).toString + rdd.saveAsObjectFile(checkpointFile) + rdd.synchronized { + rdd.checkpointRDD = context.objectFile[T](checkpointFile) + rdd.checkpointRDDSplits = rdd.checkpointRDD.splits + rdd.changeDependencies(rdd.checkpointRDD) + rdd.shouldCheckpoint = false + rdd.isCheckpointInProgress = false + rdd.isCheckpointed = true + } + } + } + th.start() + } else { + // Recursively call doCheckpoint() to perform checkpointing on parent RDD if they are marked + dependencies.foreach(_.rdd.doCheckpoint()) + } + } + + /** + * Changes the dependencies of this RDD from its original parents to the new [[spark.rdd.HadoopRDD]] + * (`newRDD`) created from the checkpoint file. This method must ensure that all references + * to the original parent RDDs must be removed to enable the parent RDDs to be garbage + * collected. Subclasses of RDD may override this method for implementing their own changing + * logic. See [[spark.rdd.UnionRDD]] and [[spark.rdd.ShuffledRDD]] to get a better idea. + */ + protected def changeDependencies(newRDD: RDD[_]) { + dependencies_ = List(new OneToOneDependency(newRDD)) + } + /** * Internal method to this RDD; will read from cache if applicable, or otherwise compute it. * This should ''not'' be called by users directly, but is available for implementors of custom * subclasses of RDD. */ final def iterator(split: Split): Iterator[T] = { - if (storageLevel != StorageLevel.NONE) { + if (isCheckpointed) { + // ASSUMPTION: Checkpoint Hadoop RDD will have same number of splits as original + checkpointRDD.iterator(checkpointRDDSplits(split.index)) + } else if (storageLevel != StorageLevel.NONE) { SparkEnv.get.cacheTracker.getOrCompute[T](this, split, storageLevel) } else { compute(split) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 6b957a6356..79ceab5f4f 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -188,6 +188,8 @@ class SparkContext( private var dagScheduler = new DAGScheduler(taskScheduler) + private[spark] var checkpointDir: String = null + // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. */ @@ -519,6 +521,7 @@ class SparkContext( val start = System.nanoTime val result = dagScheduler.runJob(rdd, func, partitions, callSite, allowLocal) logInfo("Job finished: " + callSite + ", took " + (System.nanoTime - start) / 1e9 + " s") + rdd.doCheckpoint() result } @@ -575,6 +578,24 @@ class SparkContext( return f } + /** + * Set the directory under which RDDs are going to be checkpointed. This method will + * create this directory and will throw an exception of the path already exists (to avoid + * overwriting existing files may be overwritten). The directory will be deleted on exit + * if indicated. + */ + def setCheckpointDir(dir: String, deleteOnExit: Boolean = false) { + val path = new Path(dir) + val fs = path.getFileSystem(new Configuration()) + if (fs.exists(path)) { + throw new Exception("Checkpoint directory '" + path + "' already exists.") + } else { + fs.mkdirs(path) + if (deleteOnExit) fs.deleteOnExit(path) + } + checkpointDir = dir + } + /** Default level of parallelism to use when not given by user (e.g. for reduce tasks) */ def defaultParallelism: Int = taskScheduler.defaultParallelism diff --git a/core/src/main/scala/spark/rdd/BlockRDD.scala b/core/src/main/scala/spark/rdd/BlockRDD.scala index cb73976aed..f4c3f99011 100644 --- a/core/src/main/scala/spark/rdd/BlockRDD.scala +++ b/core/src/main/scala/spark/rdd/BlockRDD.scala @@ -14,7 +14,7 @@ private[spark] class BlockRDDSplit(val blockId: String, idx: Int) extends Split private[spark] class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[String]) - extends RDD[T](sc) { + extends RDD[T](sc, Nil) { @transient val splits_ = (0 until blockIds.size).map(i => { @@ -41,9 +41,12 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St } } - override def preferredLocations(split: Split) = - locations_(split.asInstanceOf[BlockRDDSplit].blockId) - - override val dependencies: List[Dependency[_]] = Nil + override def preferredLocations(split: Split) = { + if (isCheckpointed) { + checkpointRDD.preferredLocations(split) + } else { + locations_(split.asInstanceOf[BlockRDDSplit].blockId) + } + } } diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index c97b835630..458ad38d55 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -1,9 +1,6 @@ package spark.rdd -import spark.NarrowDependency -import spark.RDD -import spark.SparkContext -import spark.Split +import spark._ import java.lang.ref.WeakReference private[spark] @@ -14,19 +11,15 @@ class CartesianSplit(idx: Int, val s1: Split, val s2: Split) extends Split with private[spark] class CartesianRDD[T: ClassManifest, U:ClassManifest]( sc: SparkContext, - rdd1_ : WeakReference[RDD[T]], - rdd2_ : WeakReference[RDD[U]]) - extends RDD[Pair[T, U]](sc) + var rdd1 : RDD[T], + var rdd2 : RDD[U]) + extends RDD[Pair[T, U]](sc, Nil) with Serializable { - def rdd1 = rdd1_.get - def rdd2 = rdd2_.get - val numSplitsInRdd2 = rdd2.splits.size - // TODO: make this null when finishing checkpoint @transient - val splits_ = { + var splits_ = { // create the cross product split val array = new Array[Split](rdd1.splits.size * rdd2.splits.size) for (s1 <- rdd1.splits; s2 <- rdd2.splits) { @@ -36,12 +29,15 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( array } - // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override def preferredLocations(split: Split) = { - val currSplit = split.asInstanceOf[CartesianSplit] - rdd1.preferredLocations(currSplit.s1) ++ rdd2.preferredLocations(currSplit.s2) + if (isCheckpointed) { + checkpointRDD.preferredLocations(split) + } else { + val currSplit = split.asInstanceOf[CartesianSplit] + rdd1.preferredLocations(currSplit.s1) ++ rdd2.preferredLocations(currSplit.s2) + } } override def compute(split: Split) = { @@ -49,8 +45,7 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( for (x <- rdd1.iterator(currSplit.s1); y <- rdd2.iterator(currSplit.s2)) yield (x, y) } - // TODO: make this null when finishing checkpoint - var deps = List( + var deps_ = List( new NarrowDependency(rdd1) { def getParents(id: Int): Seq[Int] = List(id / numSplitsInRdd2) }, @@ -59,5 +54,12 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( } ) - override def dependencies = deps + override def dependencies = deps_ + + override protected def changeDependencies(newRDD: RDD[_]) { + deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + splits_ = newRDD.splits + rdd1 = null + rdd2 = null + } } diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index af54ac2fa0..a313ebcbe8 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -30,14 +30,13 @@ private[spark] class CoGroupAggregator { (b1, b2) => b1 ++ b2 }) with Serializable -class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) +class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) extends RDD[(K, Seq[Seq[_]])](rdds.head.context, Nil) with Logging { val aggr = new CoGroupAggregator - // TODO: make this null when finishing checkpoint @transient - var deps = { + var deps_ = { val deps = new ArrayBuffer[Dependency[_]] for ((rdd, index) <- rdds.zipWithIndex) { val mapSideCombinedRDD = rdd.mapPartitions(aggr.combineValuesByKey(_), true) @@ -52,11 +51,10 @@ class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) deps.toList } - override def dependencies = deps + override def dependencies = deps_ - // TODO: make this null when finishing checkpoint @transient - val splits_ : Array[Split] = { + var splits_ : Array[Split] = { val firstRdd = rdds.head val array = new Array[Split](part.numPartitions) for (i <- 0 until array.size) { @@ -72,13 +70,10 @@ class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) array } - // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override val partitioner = Some(part) - override def preferredLocations(s: Split) = Nil - override def compute(s: Split): Iterator[(K, Seq[Seq[_]])] = { val split = s.asInstanceOf[CoGroupSplit] val numRdds = split.deps.size @@ -106,4 +101,10 @@ class CoGroupedRDD[K](@transient rdds: Seq[RDD[(_, _)]], part: Partitioner) } map.iterator } + + override protected def changeDependencies(newRDD: RDD[_]) { + deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + splits_ = newRDD.splits + rdds = null + } } diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 573acf8893..5b5f72ddeb 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -1,8 +1,7 @@ package spark.rdd -import spark.NarrowDependency -import spark.RDD -import spark.Split +import spark._ +import java.lang.ref.WeakReference private class CoalescedRDDSplit(val index: Int, val parents: Array[Split]) extends Split @@ -15,13 +14,12 @@ private class CoalescedRDDSplit(val index: Int, val parents: Array[Split]) exten * or to avoid having a large number of small tasks when processing a directory with many files. */ class CoalescedRDD[T: ClassManifest]( - @transient prev: RDD[T], // TODO: Make this a weak reference + var prev: RDD[T], maxPartitions: Int) extends RDD[T](prev.context, Nil) { // Nil, so the dependencies_ var does not refer to parent RDDs - // TODO: make this null when finishing checkpoint - @transient val splits_ : Array[Split] = { - val prevSplits = firstParent[T].splits + @transient var splits_ : Array[Split] = { + val prevSplits = prev.splits if (prevSplits.length < maxPartitions) { prevSplits.zipWithIndex.map{ case (s, idx) => new CoalescedRDDSplit(idx, Array(s)) } } else { @@ -33,7 +31,6 @@ class CoalescedRDD[T: ClassManifest]( } } - // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ override def compute(split: Split): Iterator[T] = { @@ -42,13 +39,18 @@ class CoalescedRDD[T: ClassManifest]( } } - // TODO: make this null when finishing checkpoint - var deps = List( - new NarrowDependency(firstParent) { + var deps_ : List[Dependency[_]] = List( + new NarrowDependency(prev) { def getParents(id: Int): Seq[Int] = splits(id).asInstanceOf[CoalescedRDDSplit].parents.map(_.index) } ) - override def dependencies = deps + override def dependencies = deps_ + + override protected def changeDependencies(newRDD: RDD[_]) { + deps_ = List(new OneToOneDependency(newRDD)) + splits_ = newRDD.splits + prev = null + } } diff --git a/core/src/main/scala/spark/rdd/FilteredRDD.scala b/core/src/main/scala/spark/rdd/FilteredRDD.scala index cc2a3acd3a..1370cf6faf 100644 --- a/core/src/main/scala/spark/rdd/FilteredRDD.scala +++ b/core/src/main/scala/spark/rdd/FilteredRDD.scala @@ -7,7 +7,7 @@ import java.lang.ref.WeakReference private[spark] class FilteredRDD[T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], f: T => Boolean) extends RDD[T](prev.get) { diff --git a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala index 34bd784c13..6b2cc67568 100644 --- a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala +++ b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala @@ -7,7 +7,7 @@ import java.lang.ref.WeakReference private[spark] class FlatMappedRDD[U: ClassManifest, T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], f: T => TraversableOnce[U]) extends RDD[U](prev.get) { diff --git a/core/src/main/scala/spark/rdd/GlommedRDD.scala b/core/src/main/scala/spark/rdd/GlommedRDD.scala index 9321e89dcd..0f0b6ab0ff 100644 --- a/core/src/main/scala/spark/rdd/GlommedRDD.scala +++ b/core/src/main/scala/spark/rdd/GlommedRDD.scala @@ -6,7 +6,7 @@ import spark.Split import java.lang.ref.WeakReference private[spark] -class GlommedRDD[T: ClassManifest](@transient prev: WeakReference[RDD[T]]) +class GlommedRDD[T: ClassManifest](prev: WeakReference[RDD[T]]) extends RDD[Array[T]](prev.get) { override def splits = firstParent[T].splits override def compute(split: Split) = Array(firstParent[T].iterator(split).toArray).iterator diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index a12531ea89..19ed56d9c0 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -115,4 +115,6 @@ class HadoopRDD[K, V]( val hadoopSplit = split.asInstanceOf[HadoopSplit] hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost") } + + override val isCheckpointable = false } diff --git a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala index bad872c430..b04f56cfcc 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala @@ -7,7 +7,7 @@ import java.lang.ref.WeakReference private[spark] class MapPartitionsRDD[U: ClassManifest, T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], f: Iterator[T] => Iterator[U], preservesPartitioning: Boolean = false) extends RDD[U](prev.get) { diff --git a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala index d7b238b05d..7a4b6ffb03 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala @@ -12,7 +12,7 @@ import java.lang.ref.WeakReference */ private[spark] class MapPartitionsWithSplitRDD[U: ClassManifest, T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], f: (Int, Iterator[T]) => Iterator[U]) extends RDD[U](prev.get) { diff --git a/core/src/main/scala/spark/rdd/MappedRDD.scala b/core/src/main/scala/spark/rdd/MappedRDD.scala index 126c6f332b..8fa1872e0a 100644 --- a/core/src/main/scala/spark/rdd/MappedRDD.scala +++ b/core/src/main/scala/spark/rdd/MappedRDD.scala @@ -7,7 +7,7 @@ import java.lang.ref.WeakReference private[spark] class MappedRDD[U: ClassManifest, T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], f: T => U) extends RDD[U](prev.get) { diff --git a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala index c12df5839e..2875abb2db 100644 --- a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala @@ -93,4 +93,6 @@ class NewHadoopRDD[K, V]( val theSplit = split.asInstanceOf[NewHadoopSplit] theSplit.serializableHadoopSplit.value.getLocations.filter(_ != "localhost") } + + override val isCheckpointable = false } diff --git a/core/src/main/scala/spark/rdd/PipedRDD.scala b/core/src/main/scala/spark/rdd/PipedRDD.scala index d54579d6d1..d9293a9d1a 100644 --- a/core/src/main/scala/spark/rdd/PipedRDD.scala +++ b/core/src/main/scala/spark/rdd/PipedRDD.scala @@ -12,6 +12,7 @@ import spark.OneToOneDependency import spark.RDD import spark.SparkEnv import spark.Split +import java.lang.ref.WeakReference /** @@ -19,16 +20,16 @@ import spark.Split * (printing them one per line) and returns the output as a collection of strings. */ class PipedRDD[T: ClassManifest]( - @transient prev: RDD[T], + prev: WeakReference[RDD[T]], command: Seq[String], envVars: Map[String, String]) - extends RDD[String](prev) { + extends RDD[String](prev.get) { - def this(@transient prev: RDD[T], command: Seq[String]) = this(prev, command, Map()) + def this(prev: WeakReference[RDD[T]], command: Seq[String]) = this(prev, command, Map()) // Similar to Runtime.exec(), if we are given a single string, split it into words // using a standard StringTokenizer (i.e. by spaces) - def this(@transient prev: RDD[T], command: String) = this(prev, PipedRDD.tokenize(command)) + def this(prev: WeakReference[RDD[T]], command: String) = this(prev, PipedRDD.tokenize(command)) override def splits = firstParent[T].splits diff --git a/core/src/main/scala/spark/rdd/SampledRDD.scala b/core/src/main/scala/spark/rdd/SampledRDD.scala index 00b521b130..f273f257f8 100644 --- a/core/src/main/scala/spark/rdd/SampledRDD.scala +++ b/core/src/main/scala/spark/rdd/SampledRDD.scala @@ -15,7 +15,7 @@ class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Seriali } class SampledRDD[T: ClassManifest]( - @transient prev: WeakReference[RDD[T]], + prev: WeakReference[RDD[T]], withReplacement: Boolean, frac: Double, seed: Int) diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index 62867dab4f..b7d843c26d 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -31,11 +31,6 @@ class ShuffledRDD[K, V]( override def splits = splits_ - override def preferredLocations(split: Split) = Nil - - //val dep = new ShuffleDependency(parent, part) - //override val dependencies = List(dep) - override def compute(split: Split): Iterator[(K, V)] = { val shuffledId = dependencies.head.asInstanceOf[ShuffleDependency[K, V]].shuffleId SparkEnv.get.shuffleFetcher.fetch[K, V](shuffledId, split.index) diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index 0a61a2d1f5..643a174160 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -2,11 +2,7 @@ package spark.rdd import scala.collection.mutable.ArrayBuffer -import spark.Dependency -import spark.RangeDependency -import spark.RDD -import spark.SparkContext -import spark.Split +import spark._ import java.lang.ref.WeakReference private[spark] class UnionSplit[T: ClassManifest]( @@ -23,12 +19,11 @@ private[spark] class UnionSplit[T: ClassManifest]( class UnionRDD[T: ClassManifest]( sc: SparkContext, - @transient rdds: Seq[RDD[T]]) // TODO: Make this a weak reference + @transient var rdds: Seq[RDD[T]]) extends RDD[T](sc, Nil) { // Nil, so the dependencies_ var does not refer to parent RDDs - // TODO: make this null when finishing checkpoint @transient - val splits_ : Array[Split] = { + var splits_ : Array[Split] = { val array = new Array[Split](rdds.map(_.splits.size).sum) var pos = 0 for (rdd <- rdds; split <- rdd.splits) { @@ -38,11 +33,9 @@ class UnionRDD[T: ClassManifest]( array } - // TODO: make this return checkpoint Hadoop RDDs split when checkpointed override def splits = splits_ - // TODO: make this null when finishing checkpoint - @transient var deps = { + @transient var deps_ = { val deps = new ArrayBuffer[Dependency[_]] var pos = 0 for (rdd <- rdds) { @@ -52,10 +45,21 @@ class UnionRDD[T: ClassManifest]( deps.toList } - override def dependencies = deps + override def dependencies = deps_ override def compute(s: Split): Iterator[T] = s.asInstanceOf[UnionSplit[T]].iterator() - override def preferredLocations(s: Split): Seq[String] = - s.asInstanceOf[UnionSplit[T]].preferredLocations() + override def preferredLocations(s: Split): Seq[String] = { + if (isCheckpointed) { + checkpointRDD.preferredLocations(s) + } else { + s.asInstanceOf[UnionSplit[T]].preferredLocations() + } + } + + override protected def changeDependencies(newRDD: RDD[_]) { + deps_ = List(new OneToOneDependency(newRDD)) + splits_ = newRDD.splits + rdds = null + } } diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala new file mode 100644 index 0000000000..0e5ca7dc21 --- /dev/null +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -0,0 +1,116 @@ +package spark + +import org.scalatest.{BeforeAndAfter, FunSuite} +import java.io.File +import rdd.{BlockRDD, CoalescedRDD, MapPartitionsWithSplitRDD} +import spark.SparkContext._ +import storage.StorageLevel + +class CheckpointSuite extends FunSuite with BeforeAndAfter { + + var sc: SparkContext = _ + var checkpointDir: File = _ + + before { + checkpointDir = File.createTempFile("temp", "") + checkpointDir.delete() + + sc = new SparkContext("local", "test") + sc.setCheckpointDir(checkpointDir.toString) + } + + after { + if (sc != null) { + sc.stop() + sc = null + } + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") + + if (checkpointDir != null) { + checkpointDir.delete() + } + } + + test("ParallelCollection") { + val parCollection = sc.makeRDD(1 to 4) + parCollection.checkpoint() + assert(parCollection.dependencies === Nil) + val result = parCollection.collect() + sleep(parCollection) // slightly extra time as loading classes for the first can take some time + assert(sc.objectFile[Int](parCollection.checkpointFile).collect() === result) + assert(parCollection.dependencies != Nil) + assert(parCollection.collect() === result) + } + + test("BlockRDD") { + val blockId = "id" + val blockManager = SparkEnv.get.blockManager + blockManager.putSingle(blockId, "test", StorageLevel.MEMORY_ONLY) + val blockRDD = new BlockRDD[String](sc, Array(blockId)) + blockRDD.checkpoint() + val result = blockRDD.collect() + sleep(blockRDD) + assert(sc.objectFile[String](blockRDD.checkpointFile).collect() === result) + assert(blockRDD.dependencies != Nil) + assert(blockRDD.collect() === result) + } + + test("RDDs with one-to-one dependencies") { + testCheckpointing(_.map(x => x.toString)) + testCheckpointing(_.flatMap(x => 1 to x)) + testCheckpointing(_.filter(_ % 2 == 0)) + testCheckpointing(_.sample(false, 0.5, 0)) + testCheckpointing(_.glom()) + testCheckpointing(_.mapPartitions(_.map(_.toString))) + testCheckpointing(r => new MapPartitionsWithSplitRDD(r, + (i: Int, iter: Iterator[Int]) => iter.map(_.toString) )) + testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).mapValues(_.toString), 1000) + testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).flatMapValues(x => 1 to x), 1000) + testCheckpointing(_.pipe(Seq("cat"))) + } + + test("ShuffledRDD") { + testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _)) + } + + test("UnionRDD") { + testCheckpointing(_.union(sc.makeRDD(5 to 6, 4))) + } + + test("CartesianRDD") { + testCheckpointing(_.cartesian(sc.makeRDD(5 to 6, 4)), 1000) + } + + test("CoalescedRDD") { + testCheckpointing(new CoalescedRDD(_, 2)) + } + + test("CoGroupedRDD") { + val rdd2 = sc.makeRDD(5 to 6, 4).map(x => (x % 2, 1)) + testCheckpointing(rdd1 => rdd1.map(x => (x % 2, 1)).cogroup(rdd2)) + testCheckpointing(rdd1 => rdd1.map(x => (x % 2, x)).join(rdd2)) + } + + def testCheckpointing[U: ClassManifest](op: (RDD[Int]) => RDD[U], sleepTime: Long = 500) { + val parCollection = sc.makeRDD(1 to 4, 4) + val operatedRDD = op(parCollection) + operatedRDD.checkpoint() + val parentRDD = operatedRDD.dependencies.head.rdd + val result = operatedRDD.collect() + sleep(operatedRDD) + //println(parentRDD + ", " + operatedRDD.dependencies.head.rdd ) + assert(sc.objectFile[U](operatedRDD.checkpointFile).collect() === result) + assert(operatedRDD.dependencies.head.rdd != parentRDD) + assert(operatedRDD.collect() === result) + } + + def sleep(rdd: RDD[_]) { + val startTime = System.currentTimeMillis() + val maxWaitTime = 5000 + while(rdd.isCheckpointed == false && System.currentTimeMillis() < startTime + maxWaitTime) { + Thread.sleep(50) + } + assert(rdd.isCheckpointed === true, "Waiting for checkpoint to complete took more than " + maxWaitTime + " ms") + } +} diff --git a/core/src/test/scala/spark/RDDSuite.scala b/core/src/test/scala/spark/RDDSuite.scala index 37a0ff0947..8ac7c8451a 100644 --- a/core/src/test/scala/spark/RDDSuite.scala +++ b/core/src/test/scala/spark/RDDSuite.scala @@ -19,7 +19,7 @@ class RDDSuite extends FunSuite with BeforeAndAfter { // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown System.clearProperty("spark.master.port") } - + test("basic operations") { sc = new SparkContext("local", "test") val nums = sc.makeRDD(Array(1, 2, 3, 4), 2) @@ -70,10 +70,23 @@ class RDDSuite extends FunSuite with BeforeAndAfter { assert(result.toSet === Set(("a", 6), ("b", 2), ("c", 5))) } - test("checkpointing") { + test("basic checkpointing") { + import java.io.File + val checkpointDir = File.createTempFile("temp", "") + checkpointDir.delete() + sc = new SparkContext("local", "test") - val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).flatMap(x => 1 to x).checkpoint() - assert(rdd.collect().toList === List(1, 1, 2, 1, 2, 3, 1, 2, 3, 4)) + sc.setCheckpointDir(checkpointDir.toString) + val parCollection = sc.makeRDD(1 to 4) + val flatMappedRDD = parCollection.flatMap(x => 1 to x) + flatMappedRDD.checkpoint() + assert(flatMappedRDD.dependencies.head.rdd == parCollection) + val result = flatMappedRDD.collect() + Thread.sleep(1000) + assert(flatMappedRDD.dependencies.head.rdd != parCollection) + assert(flatMappedRDD.collect() === result) + + checkpointDir.deleteOnExit() } test("basic caching") { @@ -94,8 +107,8 @@ class RDDSuite extends FunSuite with BeforeAndAfter { List(List(1, 2, 3, 4, 5), List(6, 7, 8, 9, 10))) // Check that the narrow dependency is also specified correctly - assert(coalesced1.dependencies.head.getParents(0).toList === List(0, 1, 2, 3, 4)) - assert(coalesced1.dependencies.head.getParents(1).toList === List(5, 6, 7, 8, 9)) + assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(0).toList === List(0, 1, 2, 3, 4)) + assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(1).toList === List(5, 6, 7, 8, 9)) val coalesced2 = new CoalescedRDD(data, 3) assert(coalesced2.collect().toList === (1 to 10).toList) From 34e569f40e184a6a4f21e9d79b0e8979d8f9541f Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 31 Oct 2012 00:56:40 -0700 Subject: [PATCH 093/226] Added 'synchronized' to RDD serialization to ensure checkpoint-related changes are reflected atomically in the task closure. Added to tests to ensure that jobs running on an RDD on which checkpointing is in progress does hurt the result of the job. --- core/src/main/scala/spark/RDD.scala | 18 ++++- .../main/scala/spark/rdd/ShuffledRDD.scala | 7 +- .../test/scala/spark/CheckpointSuite.scala | 71 ++++++++++++++++++- 3 files changed, 92 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index e272a0ede9..7b59a6f09e 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -1,8 +1,7 @@ package spark -import java.io.EOFException +import java.io.{ObjectOutputStream, IOException, EOFException, ObjectInputStream} import java.net.URL -import java.io.ObjectInputStream import java.util.concurrent.atomic.AtomicLong import java.util.Random import java.util.Date @@ -589,4 +588,19 @@ abstract class RDD[T: ClassManifest]( private[spark] def collectPartitions(): Array[Array[T]] = { sc.runJob(this, (iter: Iterator[T]) => iter.toArray) } + + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + synchronized { + oos.defaultWriteObject() + } + } + + @throws(classOf[IOException]) + private def readObject(ois: ObjectInputStream) { + synchronized { + ois.defaultReadObject() + } + } + } diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index b7d843c26d..31774585f4 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -27,7 +27,7 @@ class ShuffledRDD[K, V]( override val partitioner = Some(part) @transient - val splits_ = Array.tabulate[Split](part.numPartitions)(i => new ShuffledRDDSplit(i)) + var splits_ = Array.tabulate[Split](part.numPartitions)(i => new ShuffledRDDSplit(i)) override def splits = splits_ @@ -35,4 +35,9 @@ class ShuffledRDD[K, V]( val shuffledId = dependencies.head.asInstanceOf[ShuffleDependency[K, V]].shuffleId SparkEnv.get.shuffleFetcher.fetch[K, V](shuffledId, split.index) } + + override def changeDependencies(newRDD: RDD[_]) { + dependencies_ = Nil + splits_ = newRDD.splits + } } diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 0e5ca7dc21..57dc43ddac 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -5,8 +5,10 @@ import java.io.File import rdd.{BlockRDD, CoalescedRDD, MapPartitionsWithSplitRDD} import spark.SparkContext._ import storage.StorageLevel +import java.util.concurrent.Semaphore -class CheckpointSuite extends FunSuite with BeforeAndAfter { +class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { + initLogging() var sc: SparkContext = _ var checkpointDir: File = _ @@ -92,6 +94,35 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter { testCheckpointing(rdd1 => rdd1.map(x => (x % 2, x)).join(rdd2)) } + /** + * This test forces two ResultTasks of the same job to be launched before and after + * the checkpointing of job's RDD is completed. + */ + test("Threading - ResultTasks") { + val op1 = (parCollection: RDD[Int]) => { + parCollection.map(x => { println("1st map running on " + x); Thread.sleep(500); (x % 2, x) }) + } + val op2 = (firstRDD: RDD[(Int, Int)]) => { + firstRDD.map(x => { println("2nd map running on " + x); Thread.sleep(500); x }) + } + testThreading(op1, op2) + } + + /** + * This test forces two ShuffleMapTasks of the same job to be launched before and after + * the checkpointing of job's RDD is completed. + */ + test("Threading - ShuffleMapTasks") { + val op1 = (parCollection: RDD[Int]) => { + parCollection.map(x => { println("1st map running on " + x); Thread.sleep(500); (x % 2, x) }) + } + val op2 = (firstRDD: RDD[(Int, Int)]) => { + firstRDD.groupByKey(2).map(x => { println("2nd map running on " + x); Thread.sleep(500); x }) + } + testThreading(op1, op2) + } + + def testCheckpointing[U: ClassManifest](op: (RDD[Int]) => RDD[U], sleepTime: Long = 500) { val parCollection = sc.makeRDD(1 to 4, 4) val operatedRDD = op(parCollection) @@ -105,6 +136,44 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter { assert(operatedRDD.collect() === result) } + def testThreading[U: ClassManifest, V: ClassManifest](op1: (RDD[Int]) => RDD[U], op2: (RDD[U]) => RDD[V]) { + + val parCollection = sc.makeRDD(1 to 2, 2) + + // This is the RDD that is to be checkpointed + val firstRDD = op1(parCollection) + val parentRDD = firstRDD.dependencies.head.rdd + firstRDD.checkpoint() + + // This the RDD that uses firstRDD. This is designed to launch a + // ShuffleMapTask that uses firstRDD. + val secondRDD = op2(firstRDD) + + // Starting first job, to initiate the checkpointing + logInfo("\nLaunching 1st job to initiate checkpointing\n") + firstRDD.collect() + + // Checkpointing has started but not completed yet + Thread.sleep(100) + assert(firstRDD.dependencies.head.rdd === parentRDD) + + // Starting second job; first task of this job will be + // launched _before_ firstRDD is marked as checkpointed + // and the second task will be launched _after_ firstRDD + // is marked as checkpointed + logInfo("\nLaunching 2nd job that is designed to launch tasks " + + "before and after checkpointing is complete\n") + val result = secondRDD.collect() + + // Check whether firstRDD has been successfully checkpointed + assert(firstRDD.dependencies.head.rdd != parentRDD) + + logInfo("\nRecomputing 2nd job to verify the results of the previous computation\n") + // Check whether the result in the previous job was correct or not + val correctResult = secondRDD.collect() + assert(result === correctResult) + } + def sleep(rdd: RDD[_]) { val startTime = System.currentTimeMillis() val maxWaitTime = 5000 From 3fb5c9ee24302edf02df130bd0dfd0463cf6c0a4 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 2 Nov 2012 12:12:25 -0700 Subject: [PATCH 094/226] Fixed serialization bug in countByWindow, added countByKey and countByKeyAndWindow, and added testcases for them. --- .../main/scala/spark/streaming/DStream.scala | 4 +- .../streaming/PairDStreamFunctions.scala | 23 ++- .../spark/streaming/InputStreamsSuite.scala | 18 ++ .../scala/spark/streaming/TestSuiteBase.scala | 8 +- .../streaming/WindowOperationsSuite.scala | 181 ++++++++++++++---- 5 files changed, 186 insertions(+), 48 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 175ebf104f..a4921bb1a2 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -338,9 +338,7 @@ extends Serializable with Logging { } def countByWindow(windowTime: Time, slideTime: Time): DStream[Int] = { - def add(v1: Int, v2: Int) = (v1 + v2) - def subtract(v1: Int, v2: Int) = (v1 - v2) - this.map(_ => 1).reduceByWindow(add _, subtract _, windowTime, slideTime) + this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowTime, slideTime) } def union(that: DStream[T]): DStream[T] = new UnionDStream[T](Array(this, that)) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index f88247708b..e09d27d34f 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -53,14 +53,18 @@ extends Serializable { combineByKey((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner) } - private def combineByKey[C: ClassManifest]( + def combineByKey[C: ClassManifest]( createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiner: (C, C) => C, - partitioner: Partitioner) : ShuffledDStream[K, V, C] = { + partitioner: Partitioner) : DStream[(K, C)] = { new ShuffledDStream[K, V, C](self, createCombiner, mergeValue, mergeCombiner, partitioner) } + def countByKey(numPartitions: Int = self.ssc.sc.defaultParallelism): DStream[(K, Long)] = { + self.map(x => (x._1, 1L)).reduceByKey((x: Long, y: Long) => x + y, numPartitions) + } + def groupByKeyAndWindow(windowTime: Time, slideTime: Time): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner()) } @@ -157,6 +161,21 @@ extends Serializable { self, cleanedReduceFunc, cleanedInvReduceFunc, windowTime, slideTime, partitioner) } + def countByKeyAndWindow( + windowTime: Time, + slideTime: Time, + numPartitions: Int = self.ssc.sc.defaultParallelism + ): DStream[(K, Long)] = { + + self.map(x => (x._1, 1L)).reduceByKeyAndWindow( + (x: Long, y: Long) => x + y, + (x: Long, y: Long) => x - y, + windowTime, + slideTime, + numPartitions + ) + } + // TODO: // // diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 6f6b18a790..c17254b809 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -51,6 +51,15 @@ class InputStreamsSuite extends TestSuiteBase { ssc.stop() // Verify whether data received by Spark Streaming was as expected + logInfo("--------------------------------") + logInfo("output.size = " + outputBuffer.size) + logInfo("output") + outputBuffer.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("expected output.size = " + expectedOutput.size) + logInfo("expected output") + expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("--------------------------------") + assert(outputBuffer.size === expectedOutput.size) for (i <- 0 until outputBuffer.size) { assert(outputBuffer(i).size === 1) @@ -101,6 +110,15 @@ class InputStreamsSuite extends TestSuiteBase { ssc.stop() // Verify whether data received by Spark Streaming was as expected + logInfo("--------------------------------") + logInfo("output.size = " + outputBuffer.size) + logInfo("output") + outputBuffer.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("expected output.size = " + expectedOutput.size) + logInfo("expected output") + expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) + logInfo("--------------------------------") + assert(outputBuffer.size === expectedOutput.size) for (i <- 0 until outputBuffer.size) { assert(outputBuffer(i).size === 1) diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index c1b7772e7b..c9bc454f91 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -16,13 +16,14 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[ def compute(validTime: Time): Option[RDD[T]] = { logInfo("Computing RDD for time " + validTime) - val rdd = if (currentIndex < input.size) { - ssc.sc.makeRDD(input(currentIndex), numPartitions) + val index = ((validTime - zeroTime) / slideTime - 1).toInt + val rdd = if (index < input.size) { + ssc.sc.makeRDD(input(index), numPartitions) } else { ssc.sc.makeRDD(Seq[T](), numPartitions) } logInfo("Created RDD " + rdd.id) - currentIndex += 1 + //currentIndex += 1 Some(rdd) } } @@ -96,7 +97,6 @@ trait TestSuiteBase extends FunSuite with Logging { ssc } - def runStreams[V: ClassManifest]( ssc: StreamingContext, numBatches: Int, diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index 90d67844bb..d7d8d5bd36 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -1,6 +1,7 @@ package spark.streaming import spark.streaming.StreamingContext._ +import collection.mutable.ArrayBuffer class WindowOperationsSuite extends TestSuiteBase { @@ -8,6 +9,8 @@ class WindowOperationsSuite extends TestSuiteBase { override def maxWaitTimeMillis() = 20000 + override def batchDuration() = Seconds(1) + val largerSlideInput = Seq( Seq(("a", 1)), Seq(("a", 2)), // 1st window from here @@ -19,7 +22,7 @@ class WindowOperationsSuite extends TestSuiteBase { Seq() // 4th window from here ) - val largerSlideOutput = Seq( + val largerSlideReduceOutput = Seq( Seq(("a", 3)), Seq(("a", 10)), Seq(("a", 18)), @@ -42,7 +45,23 @@ class WindowOperationsSuite extends TestSuiteBase { Seq() ) - val bigOutput = Seq( + val bigGroupByOutput = Seq( + Seq(("a", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1, 1)), ("c", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1, 1)), ("c", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1))), + Seq(("a", Seq(1))), + Seq(("a", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1, 1)), ("c", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1, 1)), ("c", Seq(1))), + Seq(("a", Seq(1, 1)), ("b", Seq(1))), + Seq(("a", Seq(1))) + ) + + + val bigReduceOutput = Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)), Seq(("a", 2), ("b", 2), ("c", 1)), @@ -59,13 +78,14 @@ class WindowOperationsSuite extends TestSuiteBase { /* The output of the reduceByKeyAndWindow with inverse reduce function is - difference from the naive reduceByKeyAndWindow. Even if the count of a + different from the naive reduceByKeyAndWindow. Even if the count of a particular key is 0, the key does not get eliminated from the RDDs of ReducedWindowedDStream. This causes the number of keys in these RDDs to increase forever. A more generalized version that allows elimination of keys should be considered. */ - val bigOutputInv = Seq( + + val bigReduceInvOutput = Seq( Seq(("a", 1)), Seq(("a", 2), ("b", 1)), Seq(("a", 2), ("b", 2), ("c", 1)), @@ -80,38 +100,37 @@ class WindowOperationsSuite extends TestSuiteBase { Seq(("a", 1), ("b", 0), ("c", 0)) ) - def testReduceByKeyAndWindow( - name: String, - input: Seq[Seq[(String, Int)]], - expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = batchDuration * 2, - slideTime: Time = batchDuration - ) { - test("reduceByKeyAndWindow - " + name) { - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt - val operation = (s: DStream[(String, Int)]) => { - s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist() - } - testOperation(input, operation, expectedOutput, numBatches, true) - } - } + // Testing window operation - def testReduceByKeyAndWindowInv( - name: String, - input: Seq[Seq[(String, Int)]], - expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = batchDuration * 2, - slideTime: Time = batchDuration - ) { - test("reduceByKeyAndWindowInv - " + name) { - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt - val operation = (s: DStream[(String, Int)]) => { - s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist() - } - testOperation(input, operation, expectedOutput, numBatches, true) - } - } + testWindow( + "basic window", + Seq( Seq(0), Seq(1), Seq(2), Seq(3), Seq(4), Seq(5)), + Seq( Seq(0), Seq(0, 1), Seq(1, 2), Seq(2, 3), Seq(3, 4), Seq(4, 5)) + ) + testWindow( + "tumbling window", + Seq( Seq(0), Seq(1), Seq(2), Seq(3), Seq(4), Seq(5)), + Seq( Seq(0, 1), Seq(2, 3), Seq(4, 5)), + Seconds(2), + Seconds(2) + ) + + testWindow( + "larger window", + Seq( Seq(0), Seq(1), Seq(2), Seq(3), Seq(4), Seq(5)), + Seq( Seq(0, 1), Seq(0, 1, 2, 3), Seq(2, 3, 4, 5), Seq(4, 5)), + Seconds(4), + Seconds(2) + ) + + testWindow( + "non-overlapping window", + Seq( Seq(0), Seq(1), Seq(2), Seq(3), Seq(4), Seq(5)), + Seq( Seq(1, 2), Seq(4, 5)), + Seconds(2), + Seconds(3) + ) // Testing naive reduceByKeyAndWindow (without invertible function) @@ -142,13 +161,12 @@ class WindowOperationsSuite extends TestSuiteBase { testReduceByKeyAndWindow( "larger slide time", largerSlideInput, - largerSlideOutput, + largerSlideReduceOutput, Seconds(4), Seconds(2) ) - testReduceByKeyAndWindow("big test", bigInput, bigOutput) - + testReduceByKeyAndWindow("big test", bigInput, bigReduceOutput) // Testing reduceByKeyAndWindow (with invertible reduce function) @@ -179,10 +197,95 @@ class WindowOperationsSuite extends TestSuiteBase { testReduceByKeyAndWindowInv( "larger slide time", largerSlideInput, - largerSlideOutput, + largerSlideReduceOutput, Seconds(4), Seconds(2) ) - testReduceByKeyAndWindowInv("big test", bigInput, bigOutputInv) + testReduceByKeyAndWindowInv("big test", bigInput, bigReduceInvOutput) + + test("groupByKeyAndWindow") { + val input = bigInput + val expectedOutput = bigGroupByOutput.map(_.map(x => (x._1, x._2.toSet))) + val windowTime = Seconds(2) + val slideTime = Seconds(1) + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.groupByKeyAndWindow(windowTime, slideTime) + .map(x => (x._1, x._2.toSet)) + .persist() + } + testOperation(input, operation, expectedOutput, numBatches, true) + } + + test("countByWindow") { + val input = Seq(Seq(1), Seq(1), Seq(1, 2), Seq(0), Seq(), Seq() ) + val expectedOutput = Seq( Seq(1), Seq(2), Seq(3), Seq(3), Seq(1), Seq(0)) + val windowTime = Seconds(2) + val slideTime = Seconds(1) + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[Int]) => s.countByWindow(windowTime, slideTime) + testOperation(input, operation, expectedOutput, numBatches, true) + } + + test("countByKeyAndWindow") { + val input = Seq(Seq(("a", 1)), Seq(("b", 1), ("b", 2)), Seq(("a", 10), ("b", 20))) + val expectedOutput = Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 2)), Seq(("a", 1), ("b", 3))) + val windowTime = Seconds(2) + val slideTime = Seconds(1) + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.countByKeyAndWindow(windowTime, slideTime).map(x => (x._1, x._2.toInt)) + } + testOperation(input, operation, expectedOutput, numBatches, true) + } + + + // Helper functions + + def testWindow( + name: String, + input: Seq[Seq[Int]], + expectedOutput: Seq[Seq[Int]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("window - " + name) { + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[Int]) => s.window(windowTime, slideTime) + testOperation(input, operation, expectedOutput, numBatches, true) + } + } + + def testReduceByKeyAndWindow( + name: String, + input: Seq[Seq[(String, Int)]], + expectedOutput: Seq[Seq[(String, Int)]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("reduceByKeyAndWindow - " + name) { + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist() + } + testOperation(input, operation, expectedOutput, numBatches, true) + } + } + + def testReduceByKeyAndWindowInv( + name: String, + input: Seq[Seq[(String, Int)]], + expectedOutput: Seq[Seq[(String, Int)]], + windowTime: Time = Seconds(2), + slideTime: Time = Seconds(1) + ) { + test("reduceByKeyAndWindowInv - " + name) { + val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val operation = (s: DStream[(String, Int)]) => { + s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist() + } + testOperation(input, operation, expectedOutput, numBatches, true) + } + } } From d1542387891018914fdd6b647f17f0b05acdd40e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 4 Nov 2012 12:12:06 -0800 Subject: [PATCH 095/226] Made checkpointing of dstream graph to work with checkpointing of RDDs. For streams requiring checkpointing of its RDD, the default checkpoint interval is set to 10 seconds. --- core/src/main/scala/spark/RDD.scala | 32 +++- core/src/main/scala/spark/SparkContext.scala | 13 +- .../scala/spark/streaming/Checkpoint.scala | 83 ++++++---- .../main/scala/spark/streaming/DStream.scala | 156 ++++++++++++++---- .../scala/spark/streaming/DStreamGraph.scala | 7 +- .../streaming/ReducedWindowedDStream.scala | 36 ++-- .../scala/spark/streaming/StateDStream.scala | 45 +---- .../spark/streaming/StreamingContext.scala | 38 +++-- .../src/main/scala/spark/streaming/Time.scala | 4 + .../examples/FileStreamWithCheckpoint.scala | 10 +- .../streaming/examples/TopKWordCountRaw.scala | 5 +- .../spark/streaming/examples/WordCount2.scala | 7 +- .../streaming/examples/WordCountRaw.scala | 6 +- .../spark/streaming/examples/WordMax2.scala | 10 +- .../spark/streaming/CheckpointSuite.scala | 77 +++++++-- .../scala/spark/streaming/TestSuiteBase.scala | 37 ++++- .../streaming/WindowOperationsSuite.scala | 4 +- 17 files changed, 367 insertions(+), 203 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 7b59a6f09e..63048d5df0 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -119,22 +119,23 @@ abstract class RDD[T: ClassManifest]( private var storageLevel: StorageLevel = StorageLevel.NONE /** Returns the first parent RDD */ - private[spark] def firstParent[U: ClassManifest] = { + protected[spark] def firstParent[U: ClassManifest] = { dependencies.head.rdd.asInstanceOf[RDD[U]] } /** Returns the `i` th parent RDD */ - private[spark] def parent[U: ClassManifest](i: Int) = dependencies(i).rdd.asInstanceOf[RDD[U]] + protected[spark] def parent[U: ClassManifest](i: Int) = dependencies(i).rdd.asInstanceOf[RDD[U]] // Variables relating to checkpointing - val isCheckpointable = true // override to set this to false to avoid checkpointing an RDD - var shouldCheckpoint = false // set to true when an RDD is marked for checkpointing - var isCheckpointInProgress = false // set to true when checkpointing is in progress - var isCheckpointed = false // set to true after checkpointing is completed + protected val isCheckpointable = true // override to set this to false to avoid checkpointing an RDD - var checkpointFile: String = null // set to the checkpoint file after checkpointing is completed - var checkpointRDD: RDD[T] = null // set to the HadoopRDD of the checkpoint file - var checkpointRDDSplits: Seq[Split] = null // set to the splits of the Hadoop RDD + protected var shouldCheckpoint = false // set to true when an RDD is marked for checkpointing + protected var isCheckpointInProgress = false // set to true when checkpointing is in progress + protected[spark] var isCheckpointed = false // set to true after checkpointing is completed + + protected[spark] var checkpointFile: String = null // set to the checkpoint file after checkpointing is completed + protected var checkpointRDD: RDD[T] = null // set to the HadoopRDD of the checkpoint file + protected var checkpointRDDSplits: Seq[Split] = null // set to the splits of the Hadoop RDD // Methods available on all RDDs: @@ -176,6 +177,9 @@ abstract class RDD[T: ClassManifest]( if (isCheckpointed || shouldCheckpoint || isCheckpointInProgress) { // do nothing } else if (isCheckpointable) { + if (sc.checkpointDir == null) { + throw new Exception("Checkpoint directory has not been set in the SparkContext.") + } shouldCheckpoint = true } else { throw new Exception(this + " cannot be checkpointed") @@ -183,6 +187,16 @@ abstract class RDD[T: ClassManifest]( } } + def getCheckpointData(): Any = { + synchronized { + if (isCheckpointed) { + checkpointFile + } else { + null + } + } + } + /** * Performs the checkpointing of this RDD by saving this . It is called by the DAGScheduler after a job * using this RDD has completed (therefore the RDD has been materialized and diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 79ceab5f4f..d7326971a9 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -584,14 +584,15 @@ class SparkContext( * overwriting existing files may be overwritten). The directory will be deleted on exit * if indicated. */ - def setCheckpointDir(dir: String, deleteOnExit: Boolean = false) { + def setCheckpointDir(dir: String, useExisting: Boolean = false) { val path = new Path(dir) val fs = path.getFileSystem(new Configuration()) - if (fs.exists(path)) { - throw new Exception("Checkpoint directory '" + path + "' already exists.") - } else { - fs.mkdirs(path) - if (deleteOnExit) fs.deleteOnExit(path) + if (!useExisting) { + if (fs.exists(path)) { + throw new Exception("Checkpoint directory '" + path + "' already exists.") + } else { + fs.mkdirs(path) + } } checkpointDir = dir } diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 83a43d15cb..cf04c7031e 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -1,6 +1,6 @@ package spark.streaming -import spark.Utils +import spark.{Logging, Utils} import org.apache.hadoop.fs.{FileUtil, Path} import org.apache.hadoop.conf.Configuration @@ -8,13 +8,14 @@ import org.apache.hadoop.conf.Configuration import java.io.{InputStream, ObjectStreamClass, ObjectInputStream, ObjectOutputStream} -class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) extends Serializable { +class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) + extends Logging with Serializable { val master = ssc.sc.master val framework = ssc.sc.jobName val sparkHome = ssc.sc.sparkHome val jars = ssc.sc.jars val graph = ssc.graph - val checkpointFile = ssc.checkpointFile + val checkpointDir = ssc.checkpointDir val checkpointInterval = ssc.checkpointInterval validate() @@ -24,22 +25,25 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext assert(framework != null, "Checkpoint.framework is null") assert(graph != null, "Checkpoint.graph is null") assert(checkpointTime != null, "Checkpoint.checkpointTime is null") + logInfo("Checkpoint for time " + checkpointTime + " validated") } - def saveToFile(file: String = checkpointFile) { - val path = new Path(file) + def save(path: String) { + val file = new Path(path, "graph") val conf = new Configuration() - val fs = path.getFileSystem(conf) - if (fs.exists(path)) { - val bkPath = new Path(path.getParent, path.getName + ".bk") - FileUtil.copy(fs, path, fs, bkPath, true, true, conf) - //logInfo("Moved existing checkpoint file to " + bkPath) + val fs = file.getFileSystem(conf) + logDebug("Saved checkpoint for time " + checkpointTime + " to file '" + file + "'") + if (fs.exists(file)) { + val bkFile = new Path(file.getParent, file.getName + ".bk") + FileUtil.copy(fs, file, fs, bkFile, true, true, conf) + logDebug("Moved existing checkpoint file to " + bkFile) } - val fos = fs.create(path) + val fos = fs.create(file) val oos = new ObjectOutputStream(fos) oos.writeObject(this) oos.close() fs.close() + logInfo("Saved checkpoint for time " + checkpointTime + " to file '" + file + "'") } def toBytes(): Array[Byte] = { @@ -50,30 +54,41 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) ext object Checkpoint { - def loadFromFile(file: String): Checkpoint = { - try { - val path = new Path(file) - val conf = new Configuration() - val fs = path.getFileSystem(conf) - if (!fs.exists(path)) { - throw new Exception("Checkpoint file '" + file + "' does not exist") + def load(path: String): Checkpoint = { + + val fs = new Path(path).getFileSystem(new Configuration()) + val attempts = Seq(new Path(path), new Path(path, "graph"), new Path(path, "graph.bk")) + var lastException: Exception = null + var lastExceptionFile: String = null + + attempts.foreach(file => { + if (fs.exists(file)) { + try { + val fis = fs.open(file) + // ObjectInputStream uses the last defined user-defined class loader in the stack + // to find classes, which maybe the wrong class loader. Hence, a inherited version + // of ObjectInputStream is used to explicitly use the current thread's default class + // loader to find and load classes. This is a well know Java issue and has popped up + // in other places (e.g., http://jira.codehaus.org/browse/GROOVY-1627) + val ois = new ObjectInputStreamWithLoader(fis, Thread.currentThread().getContextClassLoader) + val cp = ois.readObject.asInstanceOf[Checkpoint] + ois.close() + fs.close() + cp.validate() + println("Checkpoint successfully loaded from file " + file) + return cp + } catch { + case e: Exception => + lastException = e + lastExceptionFile = file.toString + } } - val fis = fs.open(path) - // ObjectInputStream uses the last defined user-defined class loader in the stack - // to find classes, which maybe the wrong class loader. Hence, a inherited version - // of ObjectInputStream is used to explicitly use the current thread's default class - // loader to find and load classes. This is a well know Java issue and has popped up - // in other places (e.g., http://jira.codehaus.org/browse/GROOVY-1627) - val ois = new ObjectInputStreamWithLoader(fis, Thread.currentThread().getContextClassLoader) - val cp = ois.readObject.asInstanceOf[Checkpoint] - ois.close() - fs.close() - cp.validate() - cp - } catch { - case e: Exception => - e.printStackTrace() - throw new Exception("Could not load checkpoint file '" + file + "'", e) + }) + + if (lastException == null) { + throw new Exception("Could not load checkpoint from path '" + path + "'") + } else { + throw new Exception("Error loading checkpoint from path '" + lastExceptionFile + "'", lastException) } } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index a4921bb1a2..de51c5d34a 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -13,6 +13,7 @@ import scala.collection.mutable.HashMap import java.util.concurrent.ArrayBlockingQueue import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import scala.Some +import collection.mutable abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) extends Serializable with Logging { @@ -41,53 +42,55 @@ extends Serializable with Logging { */ // RDDs generated, marked as protected[streaming] so that testsuites can access it - protected[streaming] val generatedRDDs = new HashMap[Time, RDD[T]] () + protected[streaming] var generatedRDDs = new HashMap[Time, RDD[T]] () // Time zero for the DStream - protected var zeroTime: Time = null + protected[streaming] var zeroTime: Time = null // Duration for which the DStream will remember each RDD created - protected var rememberDuration: Time = null + protected[streaming] var rememberDuration: Time = null // Storage level of the RDDs in the stream - protected var storageLevel: StorageLevel = StorageLevel.NONE + protected[streaming] var storageLevel: StorageLevel = StorageLevel.NONE - // Checkpoint level and checkpoint interval - protected var checkpointLevel: StorageLevel = StorageLevel.NONE // NONE means don't checkpoint - protected var checkpointInterval: Time = null + // Checkpoint details + protected[streaming] val mustCheckpoint = false + protected[streaming] var checkpointInterval: Time = null + protected[streaming] val checkpointData = new HashMap[Time, Any]() // Reference to whole DStream graph - protected var graph: DStreamGraph = null + protected[streaming] var graph: DStreamGraph = null def isInitialized = (zeroTime != null) // Duration for which the DStream requires its parent DStream to remember each RDD created def parentRememberDuration = rememberDuration - // Change this RDD's storage level - def persist( - storageLevel: StorageLevel, - checkpointLevel: StorageLevel, - checkpointInterval: Time): DStream[T] = { - if (this.storageLevel != StorageLevel.NONE && this.storageLevel != storageLevel) { - // TODO: not sure this is necessary for DStreams + // Set caching level for the RDDs created by this DStream + def persist(level: StorageLevel): DStream[T] = { + if (this.isInitialized) { throw new UnsupportedOperationException( - "Cannot change storage level of an DStream after it was already assigned a level") + "Cannot change storage level of an DStream after streaming context has started") } - this.storageLevel = storageLevel - this.checkpointLevel = checkpointLevel - this.checkpointInterval = checkpointInterval + this.storageLevel = level this } - // Set caching level for the RDDs created by this DStream - def persist(newLevel: StorageLevel): DStream[T] = persist(newLevel, StorageLevel.NONE, null) - def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY) // Turn on the default caching level for this RDD def cache(): DStream[T] = persist() + def checkpoint(interval: Time): DStream[T] = { + if (isInitialized) { + throw new UnsupportedOperationException( + "Cannot change checkpoint interval of an DStream after streaming context has started") + } + persist() + checkpointInterval = interval + this + } + /** * This method initializes the DStream by setting the "zero" time, based on which * the validity of future times is calculated. This method also recursively initializes @@ -99,7 +102,67 @@ extends Serializable with Logging { + ", cannot initialize it again to " + time) } zeroTime = time + + // Set the checkpoint interval to be slideTime or 10 seconds, which ever is larger + if (mustCheckpoint && checkpointInterval == null) { + checkpointInterval = slideTime.max(Seconds(10)) + logInfo("Checkpoint interval automatically set to " + checkpointInterval) + } + + // Set the minimum value of the rememberDuration if not already set + var minRememberDuration = slideTime + if (checkpointInterval != null && minRememberDuration <= checkpointInterval) { + minRememberDuration = checkpointInterval + slideTime + } + if (rememberDuration == null || rememberDuration < minRememberDuration) { + rememberDuration = minRememberDuration + } + + // Initialize the dependencies dependencies.foreach(_.initialize(zeroTime)) + } + + protected[streaming] def validate() { + assert( + !mustCheckpoint || checkpointInterval != null, + "The checkpoint interval for " + this.getClass.getSimpleName + " has not been set. " + + " Please use DStream.checkpoint() to set the interval." + ) + + assert( + checkpointInterval == null || checkpointInterval >= slideTime, + "The checkpoint interval for " + this.getClass.getSimpleName + " has been set to " + + checkpointInterval + " which is lower than its slide time (" + slideTime + "). " + + "Please set it to at least " + slideTime + "." + ) + + assert( + checkpointInterval == null || checkpointInterval.isMultipleOf(slideTime), + "The checkpoint interval for " + this.getClass.getSimpleName + " has been set to " + + checkpointInterval + " which not a multiple of its slide time (" + slideTime + "). " + + "Please set it to a multiple " + slideTime + "." + ) + + assert( + checkpointInterval == null || storageLevel != StorageLevel.NONE, + "" + this.getClass.getSimpleName + " has been marked for checkpointing but the storage " + + "level has not been set to enable persisting. Please use DStream.persist() to set the " + + "storage level to use memory for better checkpointing performance." + ) + + assert( + checkpointInterval == null || rememberDuration > checkpointInterval, + "The remember duration for " + this.getClass.getSimpleName + " has been set to " + + rememberDuration + " which is not more than the checkpoint interval (" + + checkpointInterval + "). Please set it to higher than " + checkpointInterval + "." + ) + + dependencies.foreach(_.validate()) + + logInfo("Slide time = " + slideTime) + logInfo("Storage level = " + storageLevel) + logInfo("Checkpoint interval = " + checkpointInterval) + logInfo("Remember duration = " + rememberDuration) logInfo("Initialized " + this) } @@ -120,17 +183,12 @@ extends Serializable with Logging { dependencies.foreach(_.setGraph(graph)) } - protected[streaming] def setRememberDuration(duration: Time = slideTime) { - if (duration == null) { - throw new Exception("Duration for remembering RDDs cannot be set to null for " + this) - } else if (rememberDuration != null && duration < rememberDuration) { - logWarning("Duration for remembering RDDs cannot be reduced from " + rememberDuration - + " to " + duration + " for " + this) - } else { + protected[streaming] def setRememberDuration(duration: Time) { + if (duration != null && duration > rememberDuration) { rememberDuration = duration - dependencies.foreach(_.setRememberDuration(parentRememberDuration)) logInfo("Duration for remembering RDDs set to " + rememberDuration + " for " + this) } + dependencies.foreach(_.setRememberDuration(parentRememberDuration)) } /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ @@ -163,12 +221,13 @@ extends Serializable with Logging { if (isTimeValid(time)) { compute(time) match { case Some(newRDD) => - if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { - newRDD.persist(checkpointLevel) - logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) - } else if (storageLevel != StorageLevel.NONE) { + if (storageLevel != StorageLevel.NONE) { newRDD.persist(storageLevel) - logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) + logInfo("Persisting RDD for time " + time + " to " + storageLevel + " at time " + time) + } + if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { + newRDD.checkpoint() + logInfo("Marking RDD for time " + time + " for checkpointing at time " + time) } generatedRDDs.put(time, newRDD) Some(newRDD) @@ -199,7 +258,7 @@ extends Serializable with Logging { } } - def forgetOldRDDs(time: Time) { + protected[streaming] def forgetOldRDDs(time: Time) { val keys = generatedRDDs.keys var numForgotten = 0 keys.foreach(t => { @@ -213,12 +272,35 @@ extends Serializable with Logging { dependencies.foreach(_.forgetOldRDDs(time)) } + protected[streaming] def updateCheckpointData() { + checkpointData.clear() + generatedRDDs.foreach { + case(time, rdd) => { + logDebug("Adding checkpointed RDD for time " + time) + val data = rdd.getCheckpointData() + if (data != null) { + checkpointData += ((time, data)) + } + } + } + } + + protected[streaming] def restoreCheckpointData() { + checkpointData.foreach { + case(time, data) => { + logInfo("Restoring checkpointed RDD for time " + time) + generatedRDDs += ((time, ssc.sc.objectFile[T](data.toString))) + } + } + } + @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { logDebug(this.getClass().getSimpleName + ".writeObject used") if (graph != null) { graph.synchronized { if (graph.checkpointInProgress) { + updateCheckpointData() oos.defaultWriteObject() } else { val msg = "Object of " + this.getClass.getName + " is being serialized " + @@ -239,6 +321,8 @@ extends Serializable with Logging { private def readObject(ois: ObjectInputStream) { logDebug(this.getClass().getSimpleName + ".readObject used") ois.defaultReadObject() + generatedRDDs = new HashMap[Time, RDD[T]] () + restoreCheckpointData() } /** diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index ac44d7a2a6..f8922ec790 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -22,11 +22,8 @@ final class DStreamGraph extends Serializable with Logging { } zeroTime = time outputStreams.foreach(_.initialize(zeroTime)) - outputStreams.foreach(_.setRememberDuration()) // first set the rememberDuration to default values - if (rememberDuration != null) { - // if custom rememberDuration has been provided, set the rememberDuration - outputStreams.foreach(_.setRememberDuration(rememberDuration)) - } + outputStreams.foreach(_.setRememberDuration(rememberDuration)) + outputStreams.foreach(_.validate) inputStreams.par.foreach(_.start()) } } diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 1c57d5f855..6df82c0df3 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -21,15 +21,19 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( partitioner: Partitioner ) extends DStream[(K,V)](parent.ssc) { - if (!_windowTime.isMultipleOf(parent.slideTime)) - throw new Exception("The window duration of ReducedWindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") + assert(_windowTime.isMultipleOf(parent.slideTime), + "The window duration of ReducedWindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")" + ) - if (!_slideTime.isMultipleOf(parent.slideTime)) - throw new Exception("The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") + assert(_slideTime.isMultipleOf(parent.slideTime), + "The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")" + ) - @transient val reducedStream = parent.reduceByKey(reduceFunc, partitioner) + super.persist(StorageLevel.MEMORY_ONLY) + + val reducedStream = parent.reduceByKey(reduceFunc, partitioner) def windowTime: Time = _windowTime @@ -37,15 +41,19 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( override def slideTime: Time = _slideTime - //TODO: This is wrong. This should depend on the checkpointInterval + override val mustCheckpoint = true + override def parentRememberDuration: Time = rememberDuration + windowTime - override def persist( - storageLevel: StorageLevel, - checkpointLevel: StorageLevel, - checkpointInterval: Time): DStream[(K,V)] = { - super.persist(storageLevel, checkpointLevel, checkpointInterval) - reducedStream.persist(storageLevel, checkpointLevel, checkpointInterval) + override def persist(storageLevel: StorageLevel): DStream[(K,V)] = { + super.persist(storageLevel) + reducedStream.persist(storageLevel) + this + } + + override def checkpoint(interval: Time): DStream[(K, V)] = { + super.checkpoint(interval) + reducedStream.checkpoint(interval) this } diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index 086752ac55..0211df1343 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -23,51 +23,14 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife rememberPartitioner: Boolean ) extends DStream[(K, S)](parent.ssc) { + super.persist(StorageLevel.MEMORY_ONLY) + override def dependencies = List(parent) override def slideTime = parent.slideTime - override def getOrCompute(time: Time): Option[RDD[(K, S)]] = { - generatedRDDs.get(time) match { - case Some(oldRDD) => { - if (checkpointInterval != null && time > zeroTime && (time - zeroTime).isMultipleOf(checkpointInterval) && oldRDD.dependencies.size > 0) { - val r = oldRDD - val oldRDDBlockIds = oldRDD.splits.map(s => "rdd:" + r.id + ":" + s.index) - val checkpointedRDD = new BlockRDD[(K, S)](ssc.sc, oldRDDBlockIds) { - override val partitioner = oldRDD.partitioner - } - generatedRDDs.update(time, checkpointedRDD) - logInfo("Checkpointed RDD " + oldRDD.id + " of time " + time + " with its new RDD " + checkpointedRDD.id) - Some(checkpointedRDD) - } else { - Some(oldRDD) - } - } - case None => { - if (isTimeValid(time)) { - compute(time) match { - case Some(newRDD) => { - if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { - newRDD.persist(checkpointLevel) - logInfo("Persisting " + newRDD + " to " + checkpointLevel + " at time " + time) - } else if (storageLevel != StorageLevel.NONE) { - newRDD.persist(storageLevel) - logInfo("Persisting " + newRDD + " to " + storageLevel + " at time " + time) - } - generatedRDDs.put(time, newRDD) - Some(newRDD) - } - case None => { - None - } - } - } else { - None - } - } - } - } - + override val mustCheckpoint = true + override def compute(validTime: Time): Option[RDD[(K, S)]] = { // Try to get the previous state RDD diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index b3148eaa97..3838e84113 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -15,6 +15,8 @@ import org.apache.hadoop.io.LongWritable import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat +import org.apache.hadoop.fs.Path +import java.util.UUID class StreamingContext ( sc_ : SparkContext, @@ -26,7 +28,7 @@ class StreamingContext ( def this(master: String, frameworkName: String, sparkHome: String = null, jars: Seq[String] = Nil) = this(new SparkContext(master, frameworkName, sparkHome, jars), null) - def this(file: String) = this(null, Checkpoint.loadFromFile(file)) + def this(path: String) = this(null, Checkpoint.load(path)) def this(cp_ : Checkpoint) = this(null, cp_) @@ -51,7 +53,6 @@ class StreamingContext ( val graph: DStreamGraph = { if (isCheckpointPresent) { - cp_.graph.setContext(this) cp_.graph } else { @@ -62,7 +63,15 @@ class StreamingContext ( val nextNetworkInputStreamId = new AtomicInteger(0) var networkInputTracker: NetworkInputTracker = null - private[streaming] var checkpointFile: String = if (isCheckpointPresent) cp_.checkpointFile else null + private[streaming] var checkpointDir: String = { + if (isCheckpointPresent) { + sc.setCheckpointDir(cp_.checkpointDir, true) + cp_.checkpointDir + } else { + null + } + } + private[streaming] var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null private[streaming] var receiverJobThread: Thread = null private[streaming] var scheduler: Scheduler = null @@ -75,9 +84,15 @@ class StreamingContext ( graph.setRememberDuration(duration) } - def setCheckpointDetails(file: String, interval: Time) { - checkpointFile = file - checkpointInterval = interval + def checkpoint(dir: String, interval: Time) { + if (dir != null) { + sc.setCheckpointDir(new Path(dir, "rdds-" + UUID.randomUUID.toString).toString) + checkpointDir = dir + checkpointInterval = interval + } else { + checkpointDir = null + checkpointInterval = null + } } private[streaming] def getInitialCheckpoint(): Checkpoint = { @@ -170,16 +185,12 @@ class StreamingContext ( graph.addOutputStream(outputStream) } - def validate() { - assert(graph != null, "Graph is null") - graph.validate() - } - /** * This function starts the execution of the streams. */ def start() { - validate() + assert(graph != null, "Graph is null") + graph.validate() val networkInputStreams = graph.getInputStreams().filter(s => s match { case n: NetworkInputDStream[_] => true @@ -216,7 +227,8 @@ class StreamingContext ( } def doCheckpoint(currentTime: Time) { - new Checkpoint(this, currentTime).saveToFile(checkpointFile) + new Checkpoint(this, currentTime).save(checkpointDir) + } } diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 9ddb65249a..2ba6502971 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -25,6 +25,10 @@ case class Time(millis: Long) { def isMultipleOf(that: Time): Boolean = (this.millis % that.millis == 0) + def min(that: Time): Time = if (this < that) this else that + + def max(that: Time): Time = if (this > that) this else that + def isZero: Boolean = (this.millis == 0) override def toString: String = (millis.toString + " ms") diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala index df96a811da..21a83c0fde 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala @@ -10,20 +10,20 @@ object FileStreamWithCheckpoint { def main(args: Array[String]) { if (args.size != 3) { - println("FileStreamWithCheckpoint ") - println("FileStreamWithCheckpoint restart ") + println("FileStreamWithCheckpoint ") + println("FileStreamWithCheckpoint restart ") System.exit(-1) } val directory = new Path(args(1)) - val checkpointFile = args(2) + val checkpointDir = args(2) val ssc: StreamingContext = { if (args(0) == "restart") { // Recreated streaming context from specified checkpoint file - new StreamingContext(checkpointFile) + new StreamingContext(checkpointDir) } else { @@ -34,7 +34,7 @@ object FileStreamWithCheckpoint { // Create new streaming context val ssc_ = new StreamingContext(args(0), "FileStreamWithCheckpoint") ssc_.setBatchDuration(Seconds(1)) - ssc_.setCheckpointDetails(checkpointFile, Seconds(1)) + ssc_.checkpoint(checkpointDir, Seconds(1)) // Setup the streaming computation val inputStream = ssc_.textFileStream(directory.toString) diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 57fd10f0a5..750cb7445f 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -41,9 +41,8 @@ object TopKWordCountRaw { val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) - windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, - Milliseconds(chkptMs)) - //windowedCounts.print() // TODO: something else? + windowedCounts.persist().checkpoint(Milliseconds(chkptMs)) + //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMs)) def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = { val taken = new Array[(String, Long)](k) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala index 0d2e62b955..865026033e 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala @@ -100,10 +100,9 @@ object WordCount2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), batchDuration, reduceTasks.toInt) - windowedCounts.persist(StorageLevel.MEMORY_ONLY, - StorageLevel.MEMORY_ONLY_2, - //new StorageLevel(false, true, true, 3), - Milliseconds(chkptMillis.toLong)) + + windowedCounts.persist().checkpoint(Milliseconds(chkptMillis.toLong)) + //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) windowedCounts.foreachRDD(r => println("Element count: " + r.count())) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index abfd12890f..d1ea9a9cd5 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -41,9 +41,9 @@ object WordCountRaw { val windowedCounts = union.mapPartitions(splitAndCountPartitions) .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) - windowedCounts.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, - Milliseconds(chkptMs)) - //windowedCounts.print() // TODO: something else? + windowedCounts.persist().checkpoint(chkptMs) + //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMs)) + windowedCounts.foreachRDD(r => println("Element count: " + r.count())) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala index 9d44da2b11..6a9c8a9a69 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala @@ -57,11 +57,13 @@ object WordMax2 { val windowedCounts = sentences .mapPartitions(splitAndCountPartitions) .reduceByKey(add _, reduceTasks.toInt) - .persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, - Milliseconds(chkptMillis.toLong)) + .persist() + .checkpoint(Milliseconds(chkptMillis.toLong)) + //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) .reduceByKeyAndWindow(max _, Seconds(10), batchDuration, reduceTasks.toInt) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, - // Milliseconds(chkptMillis.toLong)) + .persist() + .checkpoint(Milliseconds(chkptMillis.toLong)) + //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) windowedCounts.foreachRDD(r => println("Element count: " + r.count())) ssc.start() diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 6dcedcf463..dfe31b5771 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -2,52 +2,95 @@ package spark.streaming import spark.streaming.StreamingContext._ import java.io.File +import collection.mutable.ArrayBuffer +import runtime.RichInt +import org.scalatest.BeforeAndAfter +import org.apache.hadoop.fs.Path +import org.apache.commons.io.FileUtils -class CheckpointSuite extends TestSuiteBase { +class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { + + before { + FileUtils.deleteDirectory(new File(checkpointDir)) + } + + after { + FileUtils.deleteDirectory(new File(checkpointDir)) + } override def framework() = "CheckpointSuite" - override def checkpointFile() = "checkpoint" + override def batchDuration() = Seconds(1) + + override def checkpointDir() = "checkpoint" + + override def checkpointInterval() = batchDuration def testCheckpointedOperation[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], expectedOutput: Seq[Seq[V]], - useSet: Boolean = false + initialNumBatches: Int ) { // Current code assumes that: // number of inputs = number of outputs = number of batches to be run - val totalNumBatches = input.size - val initialNumBatches = input.size / 2 val nextNumBatches = totalNumBatches - initialNumBatches val initialNumExpectedOutputs = initialNumBatches + val nextNumExpectedOutputs = expectedOutput.size - initialNumExpectedOutputs // Do half the computation (half the number of batches), create checkpoint file and quit val ssc = setupStreams[U, V](input, operation) val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) - verifyOutput[V](output, expectedOutput.take(initialNumBatches), useSet) + verifyOutput[V](output, expectedOutput.take(initialNumBatches), true) Thread.sleep(1000) // Restart and complete the computation from checkpoint file - val sscNew = new StreamingContext(checkpointFile) - sscNew.setCheckpointDetails(null, null) - val outputNew = runStreams[V](sscNew, nextNumBatches, expectedOutput.size) - verifyOutput[V](outputNew, expectedOutput, useSet) - - new File(checkpointFile).delete() - new File(checkpointFile + ".bk").delete() - new File("." + checkpointFile + ".crc").delete() - new File("." + checkpointFile + ".bk.crc").delete() + val sscNew = new StreamingContext(checkpointDir) + //sscNew.checkpoint(null, null) + val outputNew = runStreams[V](sscNew, nextNumBatches, nextNumExpectedOutputs) + verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) } - test("simple per-batch operation") { + + test("map and reduceByKey") { testCheckpointedOperation( Seq( Seq("a", "a", "b"), Seq("", ""), Seq(), Seq("a", "a", "b"), Seq("", ""), Seq() ), (s: DStream[String]) => s.map(x => (x, 1)).reduceByKey(_ + _), Seq( Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq(), Seq(("a", 2), ("b", 1)), Seq(("", 2)), Seq() ), - true + 3 ) } + + test("reduceByKeyAndWindowInv") { + val n = 10 + val w = 4 + val input = (1 to n).map(x => Seq("a")).toSeq + val output = Seq(Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 3))) ++ (1 to (n - w + 1)).map(x => Seq(("a", 4))) + val operation = (st: DStream[String]) => { + st.map(x => (x, 1)).reduceByKeyAndWindow(_ + _, _ - _, Seconds(w), Seconds(1)) + } + for (i <- Seq(3, 5, 7)) { + testCheckpointedOperation(input, operation, output, i) + } + } + + test("updateStateByKey") { + val input = (1 to 10).map(_ => Seq("a")).toSeq + val output = (1 to 10).map(x => Seq(("a", x))).toSeq + val operation = (st: DStream[String]) => { + val updateFunc = (values: Seq[Int], state: Option[RichInt]) => { + Some(new RichInt(values.foldLeft(0)(_ + _) + state.map(_.self).getOrElse(0))) + } + st.map(x => (x, 1)) + .updateStateByKey[RichInt](updateFunc) + .checkpoint(Seconds(5)) + .map(t => (t._1, t._2.self)) + } + for (i <- Seq(3, 5, 7)) { + testCheckpointedOperation(input, operation, output, i) + } + } + } \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index c9bc454f91..e441feea19 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -5,10 +5,16 @@ import util.ManualClock import collection.mutable.ArrayBuffer import org.scalatest.FunSuite import collection.mutable.SynchronizedBuffer +import java.io.{ObjectInputStream, IOException} + +/** + * This is a input stream just for the testsuites. This is equivalent to a checkpointable, + * replayable, reliable message queue like Kafka. It requires a sequence as input, and + * returns the i_th element at the i_th batch unde manual clock. + */ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[T]], numPartitions: Int) extends InputDStream[T](ssc_) { - var currentIndex = 0 def start() {} @@ -23,17 +29,32 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[ ssc.sc.makeRDD(Seq[T](), numPartitions) } logInfo("Created RDD " + rdd.id) - //currentIndex += 1 Some(rdd) } } +/** + * This is a output stream just for the testsuites. All the output is collected into a + * ArrayBuffer. This buffer is wiped clean on being restored from checkpoint. + */ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBuffer[Seq[T]]) extends PerRDDForEachDStream[T](parent, (rdd: RDD[T], t: Time) => { val collected = rdd.collect() output += collected - }) + }) { + // This is to clear the output buffer every it is read from a checkpoint + @throws(classOf[IOException]) + private def readObject(ois: ObjectInputStream) { + ois.defaultReadObject() + output.clear() + } +} + +/** + * This is the base trait for Spark Streaming testsuites. This provides basic functionality + * to run user-defined set of input on user-defined stream operations, and verify the output. + */ trait TestSuiteBase extends FunSuite with Logging { System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") @@ -44,7 +65,7 @@ trait TestSuiteBase extends FunSuite with Logging { def batchDuration() = Seconds(1) - def checkpointFile() = null.asInstanceOf[String] + def checkpointDir() = null.asInstanceOf[String] def checkpointInterval() = batchDuration @@ -60,8 +81,8 @@ trait TestSuiteBase extends FunSuite with Logging { // Create StreamingContext val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) - if (checkpointFile != null) { - ssc.setCheckpointDetails(checkpointFile, checkpointInterval()) + if (checkpointDir != null) { + ssc.checkpoint(checkpointDir, checkpointInterval()) } // Setup the stream computation @@ -82,8 +103,8 @@ trait TestSuiteBase extends FunSuite with Logging { // Create StreamingContext val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) - if (checkpointFile != null) { - ssc.setCheckpointDetails(checkpointFile, checkpointInterval()) + if (checkpointDir != null) { + ssc.checkpoint(checkpointDir, checkpointInterval()) } // Setup the stream computation diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index d7d8d5bd36..e282f0fdd5 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -283,7 +283,9 @@ class WindowOperationsSuite extends TestSuiteBase { test("reduceByKeyAndWindowInv - " + name) { val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt val operation = (s: DStream[(String, Int)]) => { - s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime).persist() + s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime) + .persist() + .checkpoint(Seconds(100)) // Large value to avoid effect of RDD checkpointing } testOperation(input, operation, expectedOutput, numBatches, true) } From 72b2303f99bd652fc4bdaa929f37731a7ba8f640 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 5 Nov 2012 11:41:36 -0800 Subject: [PATCH 096/226] Fixed major bugs in checkpointing. --- core/src/main/scala/spark/SparkContext.scala | 6 +- .../scala/spark/streaming/Checkpoint.scala | 24 ++- .../main/scala/spark/streaming/DStream.scala | 47 ++++-- .../scala/spark/streaming/DStreamGraph.scala | 36 +++-- .../scala/spark/streaming/Scheduler.scala | 1 - .../spark/streaming/StreamingContext.scala | 8 +- .../spark/streaming/CheckpointSuite.scala | 139 ++++++++++++++---- .../scala/spark/streaming/TestSuiteBase.scala | 37 +++-- .../streaming/WindowOperationsSuite.scala | 6 +- 9 files changed, 217 insertions(+), 87 deletions(-) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index d7326971a9..d7b46bee38 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -474,8 +474,10 @@ class SparkContext( /** Shut down the SparkContext. */ def stop() { - dagScheduler.stop() - dagScheduler = null + if (dagScheduler != null) { + dagScheduler.stop() + dagScheduler = null + } taskScheduler = null // TODO: Cache.stop()? env.stop() diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index cf04c7031e..6b4b05103f 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -6,6 +6,7 @@ import org.apache.hadoop.fs.{FileUtil, Path} import org.apache.hadoop.conf.Configuration import java.io.{InputStream, ObjectStreamClass, ObjectInputStream, ObjectOutputStream} +import sys.process.processInternal class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) @@ -52,17 +53,17 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) } } -object Checkpoint { +object Checkpoint extends Logging { def load(path: String): Checkpoint = { val fs = new Path(path).getFileSystem(new Configuration()) - val attempts = Seq(new Path(path), new Path(path, "graph"), new Path(path, "graph.bk")) - var lastException: Exception = null - var lastExceptionFile: String = null + val attempts = Seq(new Path(path, "graph"), new Path(path, "graph.bk"), new Path(path), new Path(path + ".bk")) + var detailedLog: String = "" attempts.foreach(file => { if (fs.exists(file)) { + logInfo("Attempting to load checkpoint from file '" + file + "'") try { val fis = fs.open(file) // ObjectInputStream uses the last defined user-defined class loader in the stack @@ -75,21 +76,18 @@ object Checkpoint { ois.close() fs.close() cp.validate() - println("Checkpoint successfully loaded from file " + file) + logInfo("Checkpoint successfully loaded from file '" + file + "'") return cp } catch { case e: Exception => - lastException = e - lastExceptionFile = file.toString + logError("Error loading checkpoint from file '" + file + "'", e) } + } else { + logWarning("Could not load checkpoint from file '" + file + "' as it does not exist") } - }) - if (lastException == null) { - throw new Exception("Could not load checkpoint from path '" + path + "'") - } else { - throw new Exception("Error loading checkpoint from path '" + lastExceptionFile + "'", lastException) - } + }) + throw new Exception("Could not load checkpoint from path '" + path + "'") } def fromBytes(bytes: Array[Byte]): Checkpoint = { diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index de51c5d34a..2fecbe0acf 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -14,6 +14,8 @@ import java.util.concurrent.ArrayBlockingQueue import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import scala.Some import collection.mutable +import org.apache.hadoop.fs.Path +import org.apache.hadoop.conf.Configuration abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) extends Serializable with Logging { @@ -42,6 +44,7 @@ extends Serializable with Logging { */ // RDDs generated, marked as protected[streaming] so that testsuites can access it + @transient protected[streaming] var generatedRDDs = new HashMap[Time, RDD[T]] () // Time zero for the DStream @@ -112,7 +115,7 @@ extends Serializable with Logging { // Set the minimum value of the rememberDuration if not already set var minRememberDuration = slideTime if (checkpointInterval != null && minRememberDuration <= checkpointInterval) { - minRememberDuration = checkpointInterval + slideTime + minRememberDuration = checkpointInterval * 2 // times 2 just to be sure that the latest checkpoint is not forgetten } if (rememberDuration == null || rememberDuration < minRememberDuration) { rememberDuration = minRememberDuration @@ -265,33 +268,59 @@ extends Serializable with Logging { if (t <= (time - rememberDuration)) { generatedRDDs.remove(t) numForgotten += 1 - //logInfo("Forgot RDD of time " + t + " from " + this) + logInfo("Forgot RDD of time " + t + " from " + this) } }) logInfo("Forgot " + numForgotten + " RDDs from " + this) dependencies.foreach(_.forgetOldRDDs(time)) } + /** + * Refreshes the list of checkpointed RDDs that will be saved along with checkpoint of this stream. + * Along with that it forget old checkpoint files. + */ protected[streaming] def updateCheckpointData() { + + // TODO (tdas): This code can be simplified. Its kept verbose to aid debugging. + val checkpointedRDDs = generatedRDDs.filter(_._2.getCheckpointData() != null) + val removedCheckpointData = checkpointData.filter(x => !generatedRDDs.contains(x._1)) + checkpointData.clear() - generatedRDDs.foreach { - case(time, rdd) => { - logDebug("Adding checkpointed RDD for time " + time) + checkpointedRDDs.foreach { + case (time, rdd) => { val data = rdd.getCheckpointData() - if (data != null) { - checkpointData += ((time, data)) + assert(data != null) + checkpointData += ((time, data)) + logInfo("Added checkpointed RDD " + rdd + " for time " + time + " to stream checkpoint") + } + } + + dependencies.foreach(_.updateCheckpointData()) + // If at least one checkpoint is present, then delete old checkpoints + if (checkpointData.size > 0) { + // Delete the checkpoint RDD files that are not needed any more + removedCheckpointData.foreach { + case (time: Time, file: String) => { + val path = new Path(file) + val fs = path.getFileSystem(new Configuration()) + fs.delete(path, true) + logInfo("Deleted checkpoint file '" + file + "' for time " + time) } } } + + logInfo("Updated checkpoint data") } protected[streaming] def restoreCheckpointData() { + logInfo("Restoring checkpoint data from " + checkpointData.size + " checkpointed RDDs") checkpointData.foreach { case(time, data) => { - logInfo("Restoring checkpointed RDD for time " + time) + logInfo("Restoring checkpointed RDD for time " + time + " from file") generatedRDDs += ((time, ssc.sc.objectFile[T](data.toString))) } } + dependencies.foreach(_.restoreCheckpointData()) } @throws(classOf[IOException]) @@ -300,7 +329,6 @@ extends Serializable with Logging { if (graph != null) { graph.synchronized { if (graph.checkpointInProgress) { - updateCheckpointData() oos.defaultWriteObject() } else { val msg = "Object of " + this.getClass.getName + " is being serialized " + @@ -322,7 +350,6 @@ extends Serializable with Logging { logDebug(this.getClass().getSimpleName + ".readObject used") ois.defaultReadObject() generatedRDDs = new HashMap[Time, RDD[T]] () - restoreCheckpointData() } /** diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index f8922ec790..7437f4402d 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -4,7 +4,7 @@ import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import collection.mutable.ArrayBuffer import spark.Logging -final class DStreamGraph extends Serializable with Logging { +final private[streaming] class DStreamGraph extends Serializable with Logging { initLogging() private val inputStreams = new ArrayBuffer[InputDStream[_]]() @@ -15,7 +15,7 @@ final class DStreamGraph extends Serializable with Logging { private[streaming] var rememberDuration: Time = null private[streaming] var checkpointInProgress = false - def start(time: Time) { + private[streaming] def start(time: Time) { this.synchronized { if (zeroTime != null) { throw new Exception("DStream graph computation already started") @@ -28,7 +28,7 @@ final class DStreamGraph extends Serializable with Logging { } } - def stop() { + private[streaming] def stop() { this.synchronized { inputStreams.par.foreach(_.stop()) } @@ -40,7 +40,7 @@ final class DStreamGraph extends Serializable with Logging { } } - def setBatchDuration(duration: Time) { + private[streaming] def setBatchDuration(duration: Time) { this.synchronized { if (batchDuration != null) { throw new Exception("Batch duration already set as " + batchDuration + @@ -50,7 +50,7 @@ final class DStreamGraph extends Serializable with Logging { batchDuration = duration } - def setRememberDuration(duration: Time) { + private[streaming] def setRememberDuration(duration: Time) { this.synchronized { if (rememberDuration != null) { throw new Exception("Batch duration already set as " + batchDuration + @@ -60,37 +60,49 @@ final class DStreamGraph extends Serializable with Logging { rememberDuration = duration } - def addInputStream(inputStream: InputDStream[_]) { + private[streaming] def addInputStream(inputStream: InputDStream[_]) { this.synchronized { inputStream.setGraph(this) inputStreams += inputStream } } - def addOutputStream(outputStream: DStream[_]) { + private[streaming] def addOutputStream(outputStream: DStream[_]) { this.synchronized { outputStream.setGraph(this) outputStreams += outputStream } } - def getInputStreams() = inputStreams.toArray + private[streaming] def getInputStreams() = this.synchronized { inputStreams.toArray } - def getOutputStreams() = outputStreams.toArray + private[streaming] def getOutputStreams() = this.synchronized { outputStreams.toArray } - def generateRDDs(time: Time): Seq[Job] = { + private[streaming] def generateRDDs(time: Time): Seq[Job] = { this.synchronized { outputStreams.flatMap(outputStream => outputStream.generateJob(time)) } } - def forgetOldRDDs(time: Time) { + private[streaming] def forgetOldRDDs(time: Time) { this.synchronized { outputStreams.foreach(_.forgetOldRDDs(time)) } } - def validate() { + private[streaming] def updateCheckpointData() { + this.synchronized { + outputStreams.foreach(_.updateCheckpointData()) + } + } + + private[streaming] def restoreCheckpointData() { + this.synchronized { + outputStreams.foreach(_.restoreCheckpointData()) + } + } + + private[streaming] def validate() { this.synchronized { assert(batchDuration != null, "Batch duration has not been set") assert(batchDuration > Milliseconds(100), "Batch duration of " + batchDuration + " is very low") diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 7d52e2eddf..2b3f5a4829 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -58,7 +58,6 @@ extends Logging { graph.forgetOldRDDs(time) if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { ssc.doCheckpoint(time) - logInfo("Checkpointed at time " + time) } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 3838e84113..fb36ab9dc9 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -54,6 +54,7 @@ class StreamingContext ( val graph: DStreamGraph = { if (isCheckpointPresent) { cp_.graph.setContext(this) + cp_.graph.restoreCheckpointData() cp_.graph } else { new DStreamGraph() @@ -218,17 +219,16 @@ class StreamingContext ( if (scheduler != null) scheduler.stop() if (networkInputTracker != null) networkInputTracker.stop() if (receiverJobThread != null) receiverJobThread.interrupt() - sc.stop() + sc.stop() + logInfo("StreamingContext stopped successfully") } catch { case e: Exception => logWarning("Error while stopping", e) } - - logInfo("StreamingContext stopped") } def doCheckpoint(currentTime: Time) { + graph.updateCheckpointData() new Checkpoint(this, currentTime).save(checkpointDir) - } } diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index dfe31b5771..aa8ded513c 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -2,11 +2,11 @@ package spark.streaming import spark.streaming.StreamingContext._ import java.io.File -import collection.mutable.ArrayBuffer import runtime.RichInt import org.scalatest.BeforeAndAfter -import org.apache.hadoop.fs.Path import org.apache.commons.io.FileUtils +import collection.mutable.{SynchronizedBuffer, ArrayBuffer} +import util.ManualClock class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { @@ -18,39 +18,83 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { FileUtils.deleteDirectory(new File(checkpointDir)) } - override def framework() = "CheckpointSuite" + override def framework = "CheckpointSuite" - override def batchDuration() = Seconds(1) + override def batchDuration = Milliseconds(500) - override def checkpointDir() = "checkpoint" + override def checkpointDir = "checkpoint" - override def checkpointInterval() = batchDuration + override def checkpointInterval = batchDuration - def testCheckpointedOperation[U: ClassManifest, V: ClassManifest]( - input: Seq[Seq[U]], - operation: DStream[U] => DStream[V], - expectedOutput: Seq[Seq[V]], - initialNumBatches: Int - ) { + override def actuallyWait = true - // Current code assumes that: - // number of inputs = number of outputs = number of batches to be run - val totalNumBatches = input.size - val nextNumBatches = totalNumBatches - initialNumBatches - val initialNumExpectedOutputs = initialNumBatches - val nextNumExpectedOutputs = expectedOutput.size - initialNumExpectedOutputs + test("basic stream+rdd recovery") { - // Do half the computation (half the number of batches), create checkpoint file and quit - val ssc = setupStreams[U, V](input, operation) - val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) - verifyOutput[V](output, expectedOutput.take(initialNumBatches), true) - Thread.sleep(1000) + assert(batchDuration === Milliseconds(500), "batchDuration for this test must be 1 second") + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - // Restart and complete the computation from checkpoint file + val checkpointingInterval = Seconds(2) + + // this ensure checkpointing occurs at least once + val firstNumBatches = (checkpointingInterval.millis / batchDuration.millis) * 2 + val secondNumBatches = firstNumBatches + + // Setup the streams + val input = (1 to 10).map(_ => Seq("a")).toSeq + val operation = (st: DStream[String]) => { + val updateFunc = (values: Seq[Int], state: Option[RichInt]) => { + Some(new RichInt(values.foldLeft(0)(_ + _) + state.map(_.self).getOrElse(0))) + } + st.map(x => (x, 1)) + .updateStateByKey[RichInt](updateFunc) + .checkpoint(checkpointingInterval) + .map(t => (t._1, t._2.self)) + } + val ssc = setupStreams(input, operation) + val stateStream = ssc.graph.getOutputStreams().head.dependencies.head.dependencies.head + + // Run till a time such that at least one RDD in the stream should have been checkpointed + ssc.start() + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + logInfo("Manual clock before advancing = " + clock.time) + for (i <- 1 to firstNumBatches.toInt) { + clock.addToTime(batchDuration.milliseconds) + Thread.sleep(batchDuration.milliseconds) + } + logInfo("Manual clock after advancing = " + clock.time) + Thread.sleep(batchDuration.milliseconds) + + // Check whether some RDD has been checkpointed or not + logInfo("Checkpoint data of state stream = \n[" + stateStream.checkpointData.mkString(",\n") + "]") + assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream") + stateStream.checkpointData.foreach { + case (time, data) => { + val file = new File(data.toString) + assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " does not exist") + } + } + val checkpointFiles = stateStream.checkpointData.map(x => new File(x._2.toString)) + + // Run till a further time such that previous checkpoint files in the stream would be deleted + logInfo("Manual clock before advancing = " + clock.time) + for (i <- 1 to secondNumBatches.toInt) { + clock.addToTime(batchDuration.milliseconds) + Thread.sleep(batchDuration.milliseconds) + } + logInfo("Manual clock after advancing = " + clock.time) + Thread.sleep(batchDuration.milliseconds) + + // Check whether the earlier checkpoint files are deleted + checkpointFiles.foreach(file => assert(!file.exists, "Checkpoint file '" + file + "' was not deleted")) + + // Restart stream computation using the checkpoint file and check whether + // checkpointed RDDs have been restored or not + ssc.stop() val sscNew = new StreamingContext(checkpointDir) - //sscNew.checkpoint(null, null) - val outputNew = runStreams[V](sscNew, nextNumBatches, nextNumExpectedOutputs) - verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) + val stateStreamNew = sscNew.graph.getOutputStreams().head.dependencies.head.dependencies.head + logInfo("Restored data of state stream = \n[" + stateStreamNew.generatedRDDs.mkString("\n") + "]") + assert(!stateStreamNew.generatedRDDs.isEmpty, "No restored RDDs in state stream") + sscNew.stop() } @@ -69,9 +113,9 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { val input = (1 to n).map(x => Seq("a")).toSeq val output = Seq(Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 3))) ++ (1 to (n - w + 1)).map(x => Seq(("a", 4))) val operation = (st: DStream[String]) => { - st.map(x => (x, 1)).reduceByKeyAndWindow(_ + _, _ - _, Seconds(w), Seconds(1)) + st.map(x => (x, 1)).reduceByKeyAndWindow(_ + _, _ - _, batchDuration * 4, batchDuration) } - for (i <- Seq(3, 5, 7)) { + for (i <- Seq(2, 3, 4)) { testCheckpointedOperation(input, operation, output, i) } } @@ -85,12 +129,45 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { } st.map(x => (x, 1)) .updateStateByKey[RichInt](updateFunc) - .checkpoint(Seconds(5)) + .checkpoint(Seconds(2)) .map(t => (t._1, t._2.self)) } - for (i <- Seq(3, 5, 7)) { + for (i <- Seq(2, 3, 4)) { testCheckpointedOperation(input, operation, output, i) } } + + + def testCheckpointedOperation[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V], + expectedOutput: Seq[Seq[V]], + initialNumBatches: Int + ) { + + // Current code assumes that: + // number of inputs = number of outputs = number of batches to be run + val totalNumBatches = input.size + val nextNumBatches = totalNumBatches - initialNumBatches + val initialNumExpectedOutputs = initialNumBatches + val nextNumExpectedOutputs = expectedOutput.size - initialNumExpectedOutputs + + // Do half the computation (half the number of batches), create checkpoint file and quit + + val ssc = setupStreams[U, V](input, operation) + val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) + verifyOutput[V](output, expectedOutput.take(initialNumBatches), true) + Thread.sleep(1000) + + // Restart and complete the computation from checkpoint file + logInfo( + "\n-------------------------------------------\n" + + " Restarting stream computation " + + "\n-------------------------------------------\n" + ) + val sscNew = new StreamingContext(checkpointDir) + val outputNew = runStreams[V](sscNew, nextNumBatches, nextNumExpectedOutputs) + verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) + } } \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index e441feea19..b8c7f99603 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -57,21 +57,21 @@ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBu */ trait TestSuiteBase extends FunSuite with Logging { - System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + def framework = "TestSuiteBase" - def framework() = "TestSuiteBase" + def master = "local[2]" - def master() = "local[2]" + def batchDuration = Seconds(1) - def batchDuration() = Seconds(1) + def checkpointDir = null.asInstanceOf[String] - def checkpointDir() = null.asInstanceOf[String] + def checkpointInterval = batchDuration - def checkpointInterval() = batchDuration + def numInputPartitions = 2 - def numInputPartitions() = 2 + def maxWaitTimeMillis = 10000 - def maxWaitTimeMillis() = 10000 + def actuallyWait = false def setupStreams[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], @@ -82,7 +82,7 @@ trait TestSuiteBase extends FunSuite with Logging { val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) if (checkpointDir != null) { - ssc.checkpoint(checkpointDir, checkpointInterval()) + ssc.checkpoint(checkpointDir, checkpointInterval) } // Setup the stream computation @@ -104,7 +104,7 @@ trait TestSuiteBase extends FunSuite with Logging { val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) if (checkpointDir != null) { - ssc.checkpoint(checkpointDir, checkpointInterval()) + ssc.checkpoint(checkpointDir, checkpointInterval) } // Setup the stream computation @@ -118,12 +118,19 @@ trait TestSuiteBase extends FunSuite with Logging { ssc } + /** + * Runs the streams set up in `ssc` on manual clock for `numBatches` batches and + * returns the collected output. It will wait until `numExpectedOutput` number of + * output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached. + */ def runStreams[V: ClassManifest]( ssc: StreamingContext, numBatches: Int, numExpectedOutput: Int ): Seq[Seq[V]] = { + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + assert(numBatches > 0, "Number of batches to run stream computation is zero") assert(numExpectedOutput > 0, "Number of expected outputs after " + numBatches + " is zero") logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput) @@ -139,7 +146,15 @@ trait TestSuiteBase extends FunSuite with Logging { // Advance manual clock val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] logInfo("Manual clock before advancing = " + clock.time) - clock.addToTime(numBatches * batchDuration.milliseconds) + if (actuallyWait) { + for (i <- 1 to numBatches) { + logInfo("Actually waiting for " + batchDuration) + clock.addToTime(batchDuration.milliseconds) + Thread.sleep(batchDuration.milliseconds) + } + } else { + clock.addToTime(numBatches * batchDuration.milliseconds) + } logInfo("Manual clock after advancing = " + clock.time) // Wait until expected number of output items have been generated diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index e282f0fdd5..3e20e16708 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -5,11 +5,11 @@ import collection.mutable.ArrayBuffer class WindowOperationsSuite extends TestSuiteBase { - override def framework() = "WindowOperationsSuite" + override def framework = "WindowOperationsSuite" - override def maxWaitTimeMillis() = 20000 + override def maxWaitTimeMillis = 20000 - override def batchDuration() = Seconds(1) + override def batchDuration = Seconds(1) val largerSlideInput = Seq( Seq(("a", 1)), From 395167f2b2a1906cde23b1f3ddc2808514bce47b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 5 Nov 2012 16:11:50 -0800 Subject: [PATCH 097/226] Made more bug fixes for checkpointing. --- .../scala/spark/streaming/Checkpoint.scala | 1 - .../main/scala/spark/streaming/DStream.scala | 77 ++++++++++-------- .../scala/spark/streaming/DStreamGraph.scala | 4 +- .../spark/streaming/StreamingContext.scala | 2 +- .../src/main/scala/spark/streaming/Time.scala | 2 +- .../spark/streaming/CheckpointSuite.scala | 79 ++++++++++++------- 6 files changed, 98 insertions(+), 67 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 6b4b05103f..1643f45ffb 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -6,7 +6,6 @@ import org.apache.hadoop.fs.{FileUtil, Path} import org.apache.hadoop.conf.Configuration import java.io.{InputStream, ObjectStreamClass, ObjectInputStream, ObjectOutputStream} -import sys.process.processInternal class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 2fecbe0acf..922ff5088d 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -1,6 +1,7 @@ package spark.streaming -import spark.streaming.StreamingContext._ +import StreamingContext._ +import Time._ import spark._ import spark.SparkContext._ @@ -12,8 +13,7 @@ import scala.collection.mutable.HashMap import java.util.concurrent.ArrayBlockingQueue import java.io.{ObjectInputStream, IOException, ObjectOutputStream} -import scala.Some -import collection.mutable + import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration @@ -206,10 +206,10 @@ extends Serializable with Logging { } /** - * This method either retrieves a precomputed RDD of this DStream, - * or computes the RDD (if the time is valid) + * Retrieves a precomputed RDD of this DStream, or computes the RDD. This is an internal + * method that should not be called directly. */ - def getOrCompute(time: Time): Option[RDD[T]] = { + protected[streaming] def getOrCompute(time: Time): Option[RDD[T]] = { // If this DStream was not initialized (i.e., zeroTime not set), then do it // If RDD was already generated, then retrieve it from HashMap generatedRDDs.get(time) match { @@ -245,10 +245,12 @@ extends Serializable with Logging { } /** - * This method generates a SparkStreaming job for the given time - * and may required to be overriden by subclasses + * Generates a SparkStreaming job for the given time. This is an internal method that + * should not be called directly. This default implementation creates a job + * that materializes the corresponding RDD. Subclasses of DStream may override this + * (eg. PerRDDForEachDStream). */ - def generateJob(time: Time): Option[Job] = { + protected[streaming] def generateJob(time: Time): Option[Job] = { getOrCompute(time) match { case Some(rdd) => { val jobFunc = () => { @@ -261,6 +263,9 @@ extends Serializable with Logging { } } + /** + * Dereferences RDDs that are older than rememberDuration. + */ protected[streaming] def forgetOldRDDs(time: Time) { val keys = generatedRDDs.keys var numForgotten = 0 @@ -276,42 +281,46 @@ extends Serializable with Logging { } /** - * Refreshes the list of checkpointed RDDs that will be saved along with checkpoint of this stream. - * Along with that it forget old checkpoint files. + * Refreshes the list of checkpointed RDDs that will be saved along with checkpoint of + * this stream. This is an internal method that should not be called directly. This is + * a default implementation that saves only the file names of the checkpointed RDDs to + * checkpointData. Subclasses of DStream (especially those of InputDStream) may override + * this method to save custom checkpoint data. */ - protected[streaming] def updateCheckpointData() { - - // TODO (tdas): This code can be simplified. Its kept verbose to aid debugging. - val checkpointedRDDs = generatedRDDs.filter(_._2.getCheckpointData() != null) - val removedCheckpointData = checkpointData.filter(x => !generatedRDDs.contains(x._1)) - - checkpointData.clear() - checkpointedRDDs.foreach { - case (time, rdd) => { - val data = rdd.getCheckpointData() - assert(data != null) - checkpointData += ((time, data)) - logInfo("Added checkpointed RDD " + rdd + " for time " + time + " to stream checkpoint") - } + protected[streaming] def updateCheckpointData(currentTime: Time) { + val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) + .map(x => (x._1, x._2.getCheckpointData())) + val oldCheckpointData = checkpointData.clone() + if (newCheckpointData.size > 0) { + checkpointData.clear() + checkpointData ++= newCheckpointData } - dependencies.foreach(_.updateCheckpointData()) - // If at least one checkpoint is present, then delete old checkpoints - if (checkpointData.size > 0) { - // Delete the checkpoint RDD files that are not needed any more - removedCheckpointData.foreach { - case (time: Time, file: String) => { - val path = new Path(file) + dependencies.foreach(_.updateCheckpointData(currentTime)) + + newCheckpointData.foreach { + case (time, data) => { logInfo("Added checkpointed RDD for time " + time + " to stream checkpoint") } + } + + if (newCheckpointData.size > 0) { + (oldCheckpointData -- newCheckpointData.keySet).foreach { + case (time, data) => { + val path = new Path(data.toString) val fs = path.getFileSystem(new Configuration()) fs.delete(path, true) - logInfo("Deleted checkpoint file '" + file + "' for time " + time) + logInfo("Deleted checkpoint file '" + path + "' for time " + time) } } } - logInfo("Updated checkpoint data") } + /** + * Restores the RDDs in generatedRDDs from the checkpointData. This is an internal method + * that should not be called directly. This is a default implementation that recreates RDDs + * from the checkpoint file names stored in checkpointData. Subclasses of DStream that + * override the updateCheckpointData() method would also need to override this method. + */ protected[streaming] def restoreCheckpointData() { logInfo("Restoring checkpoint data from " + checkpointData.size + " checkpointed RDDs") checkpointData.foreach { diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index 7437f4402d..246522838a 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -90,9 +90,9 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { } } - private[streaming] def updateCheckpointData() { + private[streaming] def updateCheckpointData(time: Time) { this.synchronized { - outputStreams.foreach(_.updateCheckpointData()) + outputStreams.foreach(_.updateCheckpointData(time)) } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index fb36ab9dc9..25caaf7d39 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -227,7 +227,7 @@ class StreamingContext ( } def doCheckpoint(currentTime: Time) { - graph.updateCheckpointData() + graph.updateCheckpointData(currentTime) new Checkpoint(this, currentTime).save(checkpointDir) } } diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 2ba6502971..480d292d7c 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -43,7 +43,7 @@ object Time { implicit def toTime(long: Long) = Time(long) - implicit def toLong(time: Time) = time.milliseconds + implicit def toLong(time: Time) = time.milliseconds } object Milliseconds { diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index aa8ded513c..9fdfd50be2 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -6,7 +6,7 @@ import runtime.RichInt import org.scalatest.BeforeAndAfter import org.apache.commons.io.FileUtils import collection.mutable.{SynchronizedBuffer, ArrayBuffer} -import util.ManualClock +import util.{Clock, ManualClock} class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { @@ -31,12 +31,14 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { test("basic stream+rdd recovery") { assert(batchDuration === Milliseconds(500), "batchDuration for this test must be 1 second") + assert(checkpointInterval === batchDuration, "checkpointInterval for this test much be same as batchDuration") + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - val checkpointingInterval = Seconds(2) + val stateStreamCheckpointInterval = Seconds(2) // this ensure checkpointing occurs at least once - val firstNumBatches = (checkpointingInterval.millis / batchDuration.millis) * 2 + val firstNumBatches = (stateStreamCheckpointInterval.millis / batchDuration.millis) * 2 val secondNumBatches = firstNumBatches // Setup the streams @@ -47,7 +49,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { } st.map(x => (x, 1)) .updateStateByKey[RichInt](updateFunc) - .checkpoint(checkpointingInterval) + .checkpoint(stateStreamCheckpointInterval) .map(t => (t._1, t._2.self)) } val ssc = setupStreams(input, operation) @@ -56,35 +58,22 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // Run till a time such that at least one RDD in the stream should have been checkpointed ssc.start() val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] - logInfo("Manual clock before advancing = " + clock.time) - for (i <- 1 to firstNumBatches.toInt) { - clock.addToTime(batchDuration.milliseconds) - Thread.sleep(batchDuration.milliseconds) - } - logInfo("Manual clock after advancing = " + clock.time) - Thread.sleep(batchDuration.milliseconds) + advanceClock(clock, firstNumBatches) // Check whether some RDD has been checkpointed or not logInfo("Checkpoint data of state stream = \n[" + stateStream.checkpointData.mkString(",\n") + "]") - assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream") + assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream before first failure") stateStream.checkpointData.foreach { case (time, data) => { val file = new File(data.toString) - assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " does not exist") + assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " for state stream before first failure does not exist") } } - val checkpointFiles = stateStream.checkpointData.map(x => new File(x._2.toString)) // Run till a further time such that previous checkpoint files in the stream would be deleted - logInfo("Manual clock before advancing = " + clock.time) - for (i <- 1 to secondNumBatches.toInt) { - clock.addToTime(batchDuration.milliseconds) - Thread.sleep(batchDuration.milliseconds) - } - logInfo("Manual clock after advancing = " + clock.time) - Thread.sleep(batchDuration.milliseconds) - - // Check whether the earlier checkpoint files are deleted + // and check whether the earlier checkpoint files are deleted + val checkpointFiles = stateStream.checkpointData.map(x => new File(x._2.toString)) + advanceClock(clock, secondNumBatches) checkpointFiles.foreach(file => assert(!file.exists, "Checkpoint file '" + file + "' was not deleted")) // Restart stream computation using the checkpoint file and check whether @@ -93,11 +82,35 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { val sscNew = new StreamingContext(checkpointDir) val stateStreamNew = sscNew.graph.getOutputStreams().head.dependencies.head.dependencies.head logInfo("Restored data of state stream = \n[" + stateStreamNew.generatedRDDs.mkString("\n") + "]") - assert(!stateStreamNew.generatedRDDs.isEmpty, "No restored RDDs in state stream") - sscNew.stop() - } + assert(!stateStreamNew.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from first failure") + // Run one batch to generate a new checkpoint file + sscNew.start() + val clockNew = sscNew.scheduler.clock.asInstanceOf[ManualClock] + advanceClock(clockNew, 1) + + // Check whether some RDD is present in the checkpoint data or not + assert(!stateStreamNew.checkpointData.isEmpty, "No checkpointed RDDs in state stream before second failure") + stateStream.checkpointData.foreach { + case (time, data) => { + val file = new File(data.toString) + assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " for state stream before seconds failure does not exist") + } + } + + // Restart stream computation from the new checkpoint file to see whether that file has + // correct checkpoint data + sscNew.stop() + val sscNewNew = new StreamingContext(checkpointDir) + val stateStreamNewNew = sscNew.graph.getOutputStreams().head.dependencies.head.dependencies.head + logInfo("Restored data of state stream = \n[" + stateStreamNew.generatedRDDs.mkString("\n") + "]") + assert(!stateStreamNewNew.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from second failure") + sscNewNew.start() + advanceClock(sscNewNew.scheduler.clock.asInstanceOf[ManualClock], 1) + sscNewNew.stop() + } + test("map and reduceByKey") { testCheckpointedOperation( Seq( Seq("a", "a", "b"), Seq("", ""), Seq(), Seq("a", "a", "b"), Seq("", ""), Seq() ), @@ -163,11 +176,21 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // Restart and complete the computation from checkpoint file logInfo( "\n-------------------------------------------\n" + - " Restarting stream computation " + - "\n-------------------------------------------\n" + " Restarting stream computation " + + "\n-------------------------------------------\n" ) val sscNew = new StreamingContext(checkpointDir) val outputNew = runStreams[V](sscNew, nextNumBatches, nextNumExpectedOutputs) verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) } + + def advanceClock(clock: ManualClock, numBatches: Long) { + logInfo("Manual clock before advancing = " + clock.time) + for (i <- 1 to numBatches.toInt) { + clock.addToTime(batchDuration.milliseconds) + Thread.sleep(batchDuration.milliseconds) + } + logInfo("Manual clock after advancing = " + clock.time) + Thread.sleep(batchDuration.milliseconds) + } } \ No newline at end of file From f8bb719cd212f7e7f821c3f69b897985f47a2f83 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 5 Nov 2012 17:53:56 -0800 Subject: [PATCH 098/226] Added a few more comments to the checkpoint-related functions. --- streaming/src/main/scala/spark/streaming/DStream.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 922ff5088d..40744eac19 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -288,20 +288,27 @@ extends Serializable with Logging { * this method to save custom checkpoint data. */ protected[streaming] def updateCheckpointData(currentTime: Time) { + // Get the checkpointed RDDs from the generated RDDs val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) .map(x => (x._1, x._2.getCheckpointData())) + // Make a copy of the existing checkpoint data val oldCheckpointData = checkpointData.clone() + + // If the new checkpoint has checkpoints then replace existing with the new one if (newCheckpointData.size > 0) { checkpointData.clear() checkpointData ++= newCheckpointData } + // Make dependencies update their checkpoint data dependencies.foreach(_.updateCheckpointData(currentTime)) + // TODO: remove this, this is just for debugging newCheckpointData.foreach { case (time, data) => { logInfo("Added checkpointed RDD for time " + time + " to stream checkpoint") } } + // If old checkpoint files have been removed from checkpoint data, then remove the files if (newCheckpointData.size > 0) { (oldCheckpointData -- newCheckpointData.keySet).foreach { case (time, data) => { @@ -322,6 +329,7 @@ extends Serializable with Logging { * override the updateCheckpointData() method would also need to override this method. */ protected[streaming] def restoreCheckpointData() { + // Create RDDs from the checkpoint data logInfo("Restoring checkpoint data from " + checkpointData.size + " checkpointed RDDs") checkpointData.foreach { case(time, data) => { From 0c1de43fc7a9fea8629907d5b331e466f18be418 Mon Sep 17 00:00:00 2001 From: Denny Date: Tue, 6 Nov 2012 09:41:42 -0800 Subject: [PATCH 099/226] Working on kafka. --- project/SparkBuild.scala | 4 +- .../spark/streaming/StreamingContext.scala | 11 ++ .../streaming/examples/KafkaWordCount.scala | 27 ++++ .../streaming/input/KafkaInputDStream.scala | 121 ++++++++++++++++++ 4 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala create mode 100644 streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 688bb16a03..f34736b1c4 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -156,7 +156,9 @@ object SparkBuild extends Build { def bagelSettings = sharedSettings ++ Seq(name := "spark-bagel") def streamingSettings = sharedSettings ++ Seq( - name := "spark-streaming" + name := "spark-streaming", + libraryDependencies ++= Seq( + "kafka" % "core-kafka_2.9.1" % "0.7.2") ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index b3148eaa97..4a78090597 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -86,6 +86,17 @@ class StreamingContext ( private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() + def kafkaStream[T: ClassManifest]( + hostname: String, + port: Int, + groupId: String, + storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_SER_2 + ): DStream[T] = { + val inputStream = new KafkaInputDStream[T](this, hostname, port, groupId, storageLevel) + graph.addInputStream(inputStream) + inputStream + } + def networkTextStream( hostname: String, port: Int, diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala new file mode 100644 index 0000000000..3f637150d1 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -0,0 +1,27 @@ +package spark.streaming.examples + +import spark.streaming.{Seconds, StreamingContext, KafkaInputDStream} +import spark.streaming.StreamingContext._ +import spark.storage.StorageLevel + +object KafkaWordCount { + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println("Usage: WordCountNetwork ") + System.exit(1) + } + + // Create the context and set the batch size + val ssc = new StreamingContext(args(0), "WordCountNetwork") + ssc.setBatchDuration(Seconds(2)) + + // Create a NetworkInputDStream on target ip:port and count the + // words in input stream of \n delimited test (eg. generated by 'nc') + val lines = ssc.kafkaStream[String](args(1), args(2).toInt, "test_group") + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() + ssc.start() + + } +} diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala new file mode 100644 index 0000000000..427f398237 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -0,0 +1,121 @@ +package spark.streaming + +import java.nio.ByteBuffer +import java.util.Properties +import java.util.concurrent.{ArrayBlockingQueue, Executors} +import kafka.api.{FetchRequest} +import kafka.consumer.{Consumer, ConsumerConfig, KafkaStream} +import kafka.javaapi.consumer.SimpleConsumer +import kafka.javaapi.message.ByteBufferMessageSet +import kafka.message.{Message, MessageSet, MessageAndMetadata} +import kafka.utils.Utils +import scala.collection.JavaConversions._ +import spark._ +import spark.RDD +import spark.storage.StorageLevel + + +/** + * An input stream that pulls messages form a Kafka Broker. + */ +class KafkaInputDStream[T: ClassManifest]( + @transient ssc_ : StreamingContext, + host: String, + port: Int, + groupId: String, + storageLevel: StorageLevel, + timeout: Int = 10000, + bufferSize: Int = 1024000 + ) extends NetworkInputDStream[T](ssc_ ) with Logging { + + def createReceiver(): NetworkReceiver[T] = { + new KafkaReceiver(id, host, port, storageLevel, groupId, timeout).asInstanceOf[NetworkReceiver[T]] + } +} + +class KafkaReceiver(streamId: Int, host: String, port: Int, storageLevel: StorageLevel, groupId: String, timeout: Int) + extends NetworkReceiver[Any](streamId) { + + //var executorPool : = null + var blockPushingThread : Thread = null + + def onStop() { + blockPushingThread.interrupt() + } + + def onStart() { + + val executorPool = Executors.newFixedThreadPool(2) + + logInfo("Starting Kafka Consumer with groupId " + groupId) + + val zooKeeperEndPoint = host + ":" + port + logInfo("Connecting to " + zooKeeperEndPoint) + + // Specify some consumer properties + val props = new Properties() + props.put("zk.connect", zooKeeperEndPoint) + props.put("zk.connectiontimeout.ms", timeout.toString) + props.put("groupid", groupId) + + // Create the connection to the cluster + val consumerConfig = new ConsumerConfig(props) + val consumerConnector = Consumer.create(consumerConfig) + logInfo("Connected to " + zooKeeperEndPoint) + logInfo("") + logInfo("") + + // Specify which topics we are listening to + val topicCountMap = Map("test" -> 2) + val topicMessageStreams = consumerConnector.createMessageStreams(topicCountMap) + val streams = topicMessageStreams.get("test") + + // Queue that holds the blocks + val queue = new ArrayBlockingQueue[ByteBuffer](2) + + streams.getOrElse(Nil).foreach { stream => + executorPool.submit(new MessageHandler(stream, queue)) + } + + blockPushingThread = new DaemonThread { + override def run() { + logInfo("Starting BlockPushingThread.") + var nextBlockNumber = 0 + while (true) { + val buffer = queue.take() + val blockId = "input-" + streamId + "-" + nextBlockNumber + nextBlockNumber += 1 + pushBlock(blockId, buffer, storageLevel) + } + } + } + blockPushingThread.start() + + // while (true) { + // // Create a fetch request for topic “test”, partition 0, current offset, and fetch size of 1MB + // val fetchRequest = new FetchRequest("test", 0, offset, 1000000) + + // // get the message set from the consumer and print them out + // val messages = consumer.fetch(fetchRequest) + // for(msg <- messages.iterator) { + // logInfo("consumed: " + Utils.toString(msg.message.payload, "UTF-8")) + // // advance the offset after consuming each message + // offset = msg.offset + // queue.put(msg.message.payload) + // } + // } + } + + class MessageHandler(stream: KafkaStream[Message], queue: ArrayBlockingQueue[ByteBuffer]) extends Runnable { + def run() { + logInfo("Starting MessageHandler.") + while(true) { + stream.foreach { msgAndMetadata => + logInfo("Consumed: " + Utils.toString(msgAndMetadata.message.payload, "UTF-8")) + queue.put(msgAndMetadata.message.payload) + } + } + } + } + +} From fc3d0b602a08fdd182c2138506d1cd9952631f95 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 6 Nov 2012 17:23:31 -0800 Subject: [PATCH 100/226] Added FailureTestsuite for testing multiple, repeated master failures. --- .../scala/spark/streaming/Checkpoint.scala | 6 +- .../main/scala/spark/streaming/DStream.scala | 4 +- .../scala/spark/streaming/Scheduler.scala | 6 +- .../spark/streaming/StreamingContext.scala | 17 +- .../spark/streaming/CheckpointSuite.scala | 75 ++++--- .../scala/spark/streaming/FailureSuite.scala | 188 ++++++++++++++++++ .../scala/spark/streaming/TestSuiteBase.scala | 9 +- 7 files changed, 256 insertions(+), 49 deletions(-) create mode 100644 streaming/src/test/scala/spark/streaming/FailureSuite.scala diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 1643f45ffb..a70fb8f73a 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -32,7 +32,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) val file = new Path(path, "graph") val conf = new Configuration() val fs = file.getFileSystem(conf) - logDebug("Saved checkpoint for time " + checkpointTime + " to file '" + file + "'") + logDebug("Saving checkpoint for time " + checkpointTime + " to file '" + file + "'") if (fs.exists(file)) { val bkFile = new Path(file.getParent, file.getName + ".bk") FileUtil.copy(fs, file, fs, bkFile, true, true, conf) @@ -43,7 +43,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) oos.writeObject(this) oos.close() fs.close() - logInfo("Saved checkpoint for time " + checkpointTime + " to file '" + file + "'") + logInfo("Checkpoint of streaming context for time " + checkpointTime + " saved successfully to file '" + file + "'") } def toBytes(): Array[Byte] = { @@ -58,7 +58,6 @@ object Checkpoint extends Logging { val fs = new Path(path).getFileSystem(new Configuration()) val attempts = Seq(new Path(path, "graph"), new Path(path, "graph.bk"), new Path(path), new Path(path + ".bk")) - var detailedLog: String = "" attempts.foreach(file => { if (fs.exists(file)) { @@ -76,6 +75,7 @@ object Checkpoint extends Logging { fs.close() cp.validate() logInfo("Checkpoint successfully loaded from file '" + file + "'") + logInfo("Checkpoint was generated at time " + cp.checkpointTime) return cp } catch { case e: Exception => diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 40744eac19..73096edec5 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -288,6 +288,7 @@ extends Serializable with Logging { * this method to save custom checkpoint data. */ protected[streaming] def updateCheckpointData(currentTime: Time) { + logInfo("Updating checkpoint data for time " + currentTime) // Get the checkpointed RDDs from the generated RDDs val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) .map(x => (x._1, x._2.getCheckpointData())) @@ -319,7 +320,7 @@ extends Serializable with Logging { } } } - logInfo("Updated checkpoint data") + logInfo("Updated checkpoint data for time " + currentTime) } /** @@ -338,6 +339,7 @@ extends Serializable with Logging { } } dependencies.foreach(_.restoreCheckpointData()) + logInfo("Restored checkpoint data") } @throws(classOf[IOException]) diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 2b3f5a4829..de0fb1f3ad 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -29,10 +29,12 @@ extends Logging { // on this first trigger time of the timer. if (ssc.isCheckpointPresent) { // If manual clock is being used for testing, then - // set manual clock to the last checkpointed time + // either set the manual clock to the last checkpointed time, + // or if the property is defined set it to that time if (clock.isInstanceOf[ManualClock]) { val lastTime = ssc.getInitialCheckpoint.checkpointTime.milliseconds - clock.asInstanceOf[ManualClock].setTime(lastTime) + val jumpTime = System.getProperty("spark.streaming.manualClock.jump", "0").toLong + clock.asInstanceOf[ManualClock].setTime(lastTime + jumpTime) } timer.restart(graph.zeroTime.milliseconds) logInfo("Scheduler's timer restarted") diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 25caaf7d39..eb83aaee7a 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -18,7 +18,7 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat import org.apache.hadoop.fs.Path import java.util.UUID -class StreamingContext ( +final class StreamingContext ( sc_ : SparkContext, cp_ : Checkpoint ) extends Logging { @@ -61,12 +61,12 @@ class StreamingContext ( } } - val nextNetworkInputStreamId = new AtomicInteger(0) - var networkInputTracker: NetworkInputTracker = null + private[streaming] val nextNetworkInputStreamId = new AtomicInteger(0) + private[streaming] var networkInputTracker: NetworkInputTracker = null private[streaming] var checkpointDir: String = { if (isCheckpointPresent) { - sc.setCheckpointDir(cp_.checkpointDir, true) + sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(cp_.checkpointDir), true) cp_.checkpointDir } else { null @@ -87,7 +87,7 @@ class StreamingContext ( def checkpoint(dir: String, interval: Time) { if (dir != null) { - sc.setCheckpointDir(new Path(dir, "rdds-" + UUID.randomUUID.toString).toString) + sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(dir)) checkpointDir = dir checkpointInterval = interval } else { @@ -227,8 +227,11 @@ class StreamingContext ( } def doCheckpoint(currentTime: Time) { + val startTime = System.currentTimeMillis() graph.updateCheckpointData(currentTime) new Checkpoint(this, currentTime).save(checkpointDir) + val stopTime = System.currentTimeMillis() + logInfo("Checkpointing the graph took " + (stopTime - startTime) + " ms") } } @@ -247,5 +250,9 @@ object StreamingContext { prefix + "-" + time.milliseconds + "." + suffix } } + + def getSparkCheckpointDir(sscCheckpointDir: String): String = { + new Path(sscCheckpointDir, UUID.randomUUID.toString).toString + } } diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 9fdfd50be2..038827ddb0 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -52,15 +52,13 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { .checkpoint(stateStreamCheckpointInterval) .map(t => (t._1, t._2.self)) } - val ssc = setupStreams(input, operation) - val stateStream = ssc.graph.getOutputStreams().head.dependencies.head.dependencies.head + var ssc = setupStreams(input, operation) + var stateStream = ssc.graph.getOutputStreams().head.dependencies.head.dependencies.head - // Run till a time such that at least one RDD in the stream should have been checkpointed + // Run till a time such that at least one RDD in the stream should have been checkpointed, + // then check whether some RDD has been checkpointed or not ssc.start() - val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] - advanceClock(clock, firstNumBatches) - - // Check whether some RDD has been checkpointed or not + runStreamsWithRealDelay(ssc, firstNumBatches) logInfo("Checkpoint data of state stream = \n[" + stateStream.checkpointData.mkString(",\n") + "]") assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream before first failure") stateStream.checkpointData.foreach { @@ -73,42 +71,45 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // Run till a further time such that previous checkpoint files in the stream would be deleted // and check whether the earlier checkpoint files are deleted val checkpointFiles = stateStream.checkpointData.map(x => new File(x._2.toString)) - advanceClock(clock, secondNumBatches) + runStreamsWithRealDelay(ssc, secondNumBatches) checkpointFiles.foreach(file => assert(!file.exists, "Checkpoint file '" + file + "' was not deleted")) + ssc.stop() // Restart stream computation using the checkpoint file and check whether // checkpointed RDDs have been restored or not - ssc.stop() - val sscNew = new StreamingContext(checkpointDir) - val stateStreamNew = sscNew.graph.getOutputStreams().head.dependencies.head.dependencies.head - logInfo("Restored data of state stream = \n[" + stateStreamNew.generatedRDDs.mkString("\n") + "]") - assert(!stateStreamNew.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from first failure") + ssc = new StreamingContext(checkpointDir) + stateStream = ssc.graph.getOutputStreams().head.dependencies.head.dependencies.head + logInfo("Restored data of state stream = \n[" + stateStream.generatedRDDs.mkString("\n") + "]") + assert(!stateStream.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from first failure") - // Run one batch to generate a new checkpoint file - sscNew.start() - val clockNew = sscNew.scheduler.clock.asInstanceOf[ManualClock] - advanceClock(clockNew, 1) - - // Check whether some RDD is present in the checkpoint data or not - assert(!stateStreamNew.checkpointData.isEmpty, "No checkpointed RDDs in state stream before second failure") + // Run one batch to generate a new checkpoint file and check whether some RDD + // is present in the checkpoint data or not + ssc.start() + runStreamsWithRealDelay(ssc, 1) + assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream before second failure") stateStream.checkpointData.foreach { case (time, data) => { val file = new File(data.toString) - assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " for state stream before seconds failure does not exist") + assert(file.exists(), + "Checkpoint file '" + file +"' for time " + time + " for state stream before seconds failure does not exist") } } + ssc.stop() // Restart stream computation from the new checkpoint file to see whether that file has // correct checkpoint data - sscNew.stop() - val sscNewNew = new StreamingContext(checkpointDir) - val stateStreamNewNew = sscNew.graph.getOutputStreams().head.dependencies.head.dependencies.head - logInfo("Restored data of state stream = \n[" + stateStreamNew.generatedRDDs.mkString("\n") + "]") - assert(!stateStreamNewNew.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from second failure") - sscNewNew.start() - advanceClock(sscNewNew.scheduler.clock.asInstanceOf[ManualClock], 1) - sscNewNew.stop() + ssc = new StreamingContext(checkpointDir) + stateStream = ssc.graph.getOutputStreams().head.dependencies.head.dependencies.head + logInfo("Restored data of state stream = \n[" + stateStream.generatedRDDs.mkString("\n") + "]") + assert(!stateStream.generatedRDDs.isEmpty, "No restored RDDs in state stream after recovery from second failure") + + // Adjust manual clock time as if it is being restarted after a delay + System.setProperty("spark.streaming.manualClock.jump", (batchDuration.milliseconds * 7).toString) + ssc.start() + runStreamsWithRealDelay(ssc, 4) + ssc.stop() + System.clearProperty("spark.streaming.manualClock.jump") } test("map and reduceByKey") { @@ -123,10 +124,12 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { test("reduceByKeyAndWindowInv") { val n = 10 val w = 4 - val input = (1 to n).map(x => Seq("a")).toSeq + val input = (1 to n).map(_ => Seq("a")).toSeq val output = Seq(Seq(("a", 1)), Seq(("a", 2)), Seq(("a", 3))) ++ (1 to (n - w + 1)).map(x => Seq(("a", 4))) val operation = (st: DStream[String]) => { - st.map(x => (x, 1)).reduceByKeyAndWindow(_ + _, _ - _, batchDuration * 4, batchDuration) + st.map(x => (x, 1)) + .reduceByKeyAndWindow(_ + _, _ - _, batchDuration * w, batchDuration) + .checkpoint(Seconds(2)) } for (i <- Seq(2, 3, 4)) { testCheckpointedOperation(input, operation, output, i) @@ -184,7 +187,14 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) } - def advanceClock(clock: ManualClock, numBatches: Long) { + /** + * Advances the manual clock on the streaming scheduler by given number of batches. + * It also wait for the expected amount of time for each batch. + */ + + + def runStreamsWithRealDelay(ssc: StreamingContext, numBatches: Long) { + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] logInfo("Manual clock before advancing = " + clock.time) for (i <- 1 to numBatches.toInt) { clock.addToTime(batchDuration.milliseconds) @@ -193,4 +203,5 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { logInfo("Manual clock after advancing = " + clock.time) Thread.sleep(batchDuration.milliseconds) } + } \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/FailureSuite.scala b/streaming/src/test/scala/spark/streaming/FailureSuite.scala new file mode 100644 index 0000000000..5b414117fc --- /dev/null +++ b/streaming/src/test/scala/spark/streaming/FailureSuite.scala @@ -0,0 +1,188 @@ +package spark.streaming + +import org.scalatest.BeforeAndAfter +import org.apache.commons.io.FileUtils +import java.io.File +import scala.runtime.RichInt +import scala.util.Random +import spark.streaming.StreamingContext._ +import collection.mutable.ArrayBuffer +import spark.Logging + +/** + * This testsuite tests master failures at random times while the stream is running using + * the real clock. + */ +class FailureSuite extends TestSuiteBase with BeforeAndAfter { + + before { + FileUtils.deleteDirectory(new File(checkpointDir)) + } + + after { + FailureSuite.reset() + FileUtils.deleteDirectory(new File(checkpointDir)) + } + + override def framework = "CheckpointSuite" + + override def batchDuration = Milliseconds(500) + + override def checkpointDir = "checkpoint" + + override def checkpointInterval = batchDuration + + test("multiple failures with updateStateByKey") { + val n = 30 + // Input: time=1 ==> [ a ] , time=2 ==> [ a, a ] , time=3 ==> [ a, a, a ] , ... + val input = (1 to n).map(i => (1 to i).map(_ =>"a").toSeq).toSeq + // Last output: [ (a, 465) ] for n=30 + val lastOutput = Seq( ("a", (1 to n).reduce(_ + _)) ) + + val operation = (st: DStream[String]) => { + val updateFunc = (values: Seq[Int], state: Option[RichInt]) => { + Some(new RichInt(values.foldLeft(0)(_ + _) + state.map(_.self).getOrElse(0))) + } + st.map(x => (x, 1)) + .updateStateByKey[RichInt](updateFunc) + .checkpoint(Seconds(2)) + .map(t => (t._1, t._2.self)) + } + + testOperationWithMultipleFailures(input, operation, lastOutput, n, n) + } + + test("multiple failures with reduceByKeyAndWindow") { + val n = 30 + val w = 100 + assert(w > n, "Window should be much larger than the number of input sets in this test") + // Input: time=1 ==> [ a ] , time=2 ==> [ a, a ] , time=3 ==> [ a, a, a ] , ... + val input = (1 to n).map(i => (1 to i).map(_ =>"a").toSeq).toSeq + // Last output: [ (a, 465) ] + val lastOutput = Seq( ("a", (1 to n).reduce(_ + _)) ) + + val operation = (st: DStream[String]) => { + st.map(x => (x, 1)) + .reduceByKeyAndWindow(_ + _, _ - _, batchDuration * w, batchDuration) + .checkpoint(Seconds(2)) + } + + testOperationWithMultipleFailures(input, operation, lastOutput, n, n) + } + + + /** + * Tests stream operation with multiple master failures, and verifies whether the + * final set of output values is as expected or not. Checking the final value is + * proof that no intermediate data was lost due to master failures. + */ + def testOperationWithMultipleFailures[U: ClassManifest, V: ClassManifest]( + input: Seq[Seq[U]], + operation: DStream[U] => DStream[V], + lastExpectedOutput: Seq[V], + numBatches: Int, + numExpectedOutput: Int + ) { + var ssc = setupStreams[U, V](input, operation) + val mergedOutput = new ArrayBuffer[Seq[V]]() + + var totalTimeRan = 0L + while(totalTimeRan <= numBatches * batchDuration.milliseconds * 2) { + new KillingThread(ssc, numBatches * batchDuration.milliseconds.toInt / 4).start() + val (output, timeRan) = runStreamsWithRealClock[V](ssc, numBatches, numExpectedOutput) + + mergedOutput ++= output + totalTimeRan += timeRan + logInfo("New output = " + output) + logInfo("Merged output = " + mergedOutput) + logInfo("Total time spent = " + totalTimeRan) + val sleepTime = Random.nextInt(numBatches * batchDuration.milliseconds.toInt / 8) + logInfo( + "\n-------------------------------------------\n" + + " Restarting stream computation in " + sleepTime + " ms " + + "\n-------------------------------------------\n" + ) + Thread.sleep(sleepTime) + FailureSuite.failed = false + ssc = new StreamingContext(checkpointDir) + } + ssc.stop() + ssc = null + + // Verify whether the last output is the expected one + val lastOutput = mergedOutput(mergedOutput.lastIndexWhere(!_.isEmpty)) + assert(lastOutput.toSet === lastExpectedOutput.toSet) + logInfo("Finished computation after " + FailureSuite.failureCount + " failures") + } + + /** + * Runs the streams set up in `ssc` on real clock until the expected max number of + */ + def runStreamsWithRealClock[V: ClassManifest]( + ssc: StreamingContext, + numBatches: Int, + maxExpectedOutput: Int + ): (Seq[Seq[V]], Long) = { + + System.clearProperty("spark.streaming.clock") + + assert(numBatches > 0, "Number of batches to run stream computation is zero") + assert(maxExpectedOutput > 0, "Max expected outputs after " + numBatches + " is zero") + logInfo("numBatches = " + numBatches + ", maxExpectedOutput = " + maxExpectedOutput) + + // Get the output buffer + val outputStream = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStream[V]] + val output = outputStream.output + val waitTime = (batchDuration.millis * (numBatches.toDouble + 0.5)).toLong + val startTime = System.currentTimeMillis() + + try { + // Start computation + ssc.start() + + // Wait until expected number of output items have been generated + while (output.size < maxExpectedOutput && System.currentTimeMillis() - startTime < waitTime && !FailureSuite.failed) { + logInfo("output.size = " + output.size + ", maxExpectedOutput = " + maxExpectedOutput) + Thread.sleep(100) + } + } catch { + case e: Exception => logInfo("Exception while running streams: " + e) + } finally { + ssc.stop() + } + val timeTaken = System.currentTimeMillis() - startTime + logInfo("" + output.size + " sets of output generated in " + timeTaken + " ms") + (output, timeTaken) + } + + +} + +object FailureSuite { + var failed = false + var failureCount = 0 + + def reset() { + failed = false + failureCount = 0 + } +} + +class KillingThread(ssc: StreamingContext, maxKillWaitTime: Int) extends Thread with Logging { + initLogging() + + override def run() { + var minKillWaitTime = if (FailureSuite.failureCount == 0) 3000 else 1000 // to allow the first checkpoint + val killWaitTime = minKillWaitTime + Random.nextInt(maxKillWaitTime) + logInfo("Kill wait time = " + killWaitTime) + Thread.sleep(killWaitTime.toLong) + logInfo( + "\n---------------------------------------\n" + + "Killing streaming context after " + killWaitTime + " ms" + + "\n---------------------------------------\n" + ) + if (ssc != null) ssc.stop() + FailureSuite.failed = true + FailureSuite.failureCount += 1 + } +} diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index b8c7f99603..5fb5cc504c 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -23,12 +23,9 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[ def compute(validTime: Time): Option[RDD[T]] = { logInfo("Computing RDD for time " + validTime) val index = ((validTime - zeroTime) / slideTime - 1).toInt - val rdd = if (index < input.size) { - ssc.sc.makeRDD(input(index), numPartitions) - } else { - ssc.sc.makeRDD(Seq[T](), numPartitions) - } - logInfo("Created RDD " + rdd.id) + val selectedInput = if (index < input.size) input(index) else Seq[T]() + val rdd = ssc.sc.makeRDD(selectedInput, numPartitions) + logInfo("Created RDD " + rdd.id + " with " + selectedInput) Some(rdd) } } From cc2a65f54715ff0990d5873d50eec0dedf64d409 Mon Sep 17 00:00:00 2001 From: tdas Date: Thu, 8 Nov 2012 11:17:57 +0000 Subject: [PATCH 101/226] Fixed bug in InputStreamsSuite --- .../src/test/scala/spark/streaming/InputStreamsSuite.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index c17254b809..8f892baab1 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -12,6 +12,8 @@ import org.apache.commons.io.FileUtils class InputStreamsSuite extends TestSuiteBase { + + System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") test("network input stream") { // Start the server From 52d21cb682d1c4ca05e6823f8049ccedc3c5530c Mon Sep 17 00:00:00 2001 From: tdas Date: Thu, 8 Nov 2012 11:35:40 +0000 Subject: [PATCH 102/226] Removed unnecessary files. --- .../streaming/util/ConnectionHandler.scala | 157 ------- .../streaming/util/SenderReceiverTest.scala | 67 --- .../util/SentenceFileGenerator.scala | 92 ---- .../spark/streaming/util/ShuffleTest.scala | 23 - .../spark/streaming/util/TestGenerator.scala | 107 ----- .../spark/streaming/util/TestGenerator2.scala | 119 ----- .../spark/streaming/util/TestGenerator4.scala | 244 ---------- .../util/TestStreamCoordinator.scala | 39 -- .../streaming/util/TestStreamReceiver3.scala | 421 ------------------ .../streaming/util/TestStreamReceiver4.scala | 374 ---------------- 10 files changed, 1643 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestGenerator.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala delete mode 100644 streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala diff --git a/streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala b/streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala deleted file mode 100644 index cde868a0c9..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/ConnectionHandler.scala +++ /dev/null @@ -1,157 +0,0 @@ -package spark.streaming.util - -import spark.Logging - -import scala.collection.mutable.{ArrayBuffer, SynchronizedQueue} - -import java.net._ -import java.io._ -import java.nio._ -import java.nio.charset._ -import java.nio.channels._ -import java.nio.channels.spi._ - -abstract class ConnectionHandler(host: String, port: Int, connect: Boolean) -extends Thread with Logging { - - val selector = SelectorProvider.provider.openSelector() - val interestChangeRequests = new SynchronizedQueue[(SelectionKey, Int)] - - initLogging() - - override def run() { - try { - if (connect) { - connect() - } else { - listen() - } - - var interrupted = false - while(!interrupted) { - - preSelect() - - while(!interestChangeRequests.isEmpty) { - val (key, ops) = interestChangeRequests.dequeue - val lastOps = key.interestOps() - key.interestOps(ops) - - def intToOpStr(op: Int): String = { - val opStrs = new ArrayBuffer[String]() - if ((op & SelectionKey.OP_READ) != 0) opStrs += "READ" - if ((op & SelectionKey.OP_WRITE) != 0) opStrs += "WRITE" - if ((op & SelectionKey.OP_CONNECT) != 0) opStrs += "CONNECT" - if ((op & SelectionKey.OP_ACCEPT) != 0) opStrs += "ACCEPT" - if (opStrs.size > 0) opStrs.reduceLeft(_ + " | " + _) else " " - } - - logTrace("Changed ops from [" + intToOpStr(lastOps) + "] to [" + intToOpStr(ops) + "]") - } - - selector.select() - interrupted = Thread.currentThread.isInterrupted - - val selectedKeys = selector.selectedKeys().iterator() - while (selectedKeys.hasNext) { - val key = selectedKeys.next.asInstanceOf[SelectionKey] - selectedKeys.remove() - if (key.isValid) { - if (key.isAcceptable) { - accept(key) - } else if (key.isConnectable) { - finishConnect(key) - } else if (key.isReadable) { - read(key) - } else if (key.isWritable) { - write(key) - } - } - } - } - } catch { - case e: Exception => { - logError("Error in select loop", e) - } - } - } - - def connect() { - val socketAddress = new InetSocketAddress(host, port) - val channel = SocketChannel.open() - channel.configureBlocking(false) - channel.socket.setReuseAddress(true) - channel.socket.setTcpNoDelay(true) - channel.connect(socketAddress) - channel.register(selector, SelectionKey.OP_CONNECT) - logInfo("Initiating connection to [" + socketAddress + "]") - } - - def listen() { - val channel = ServerSocketChannel.open() - channel.configureBlocking(false) - channel.socket.setReuseAddress(true) - channel.socket.setReceiveBufferSize(256 * 1024) - channel.socket.bind(new InetSocketAddress(port)) - channel.register(selector, SelectionKey.OP_ACCEPT) - logInfo("Listening on port " + port) - } - - def finishConnect(key: SelectionKey) { - try { - val channel = key.channel.asInstanceOf[SocketChannel] - val address = channel.socket.getRemoteSocketAddress - channel.finishConnect() - logInfo("Connected to [" + host + ":" + port + "]") - ready(key) - } catch { - case e: IOException => { - logError("Error finishing connect to " + host + ":" + port) - close(key) - } - } - } - - def accept(key: SelectionKey) { - try { - val serverChannel = key.channel.asInstanceOf[ServerSocketChannel] - val channel = serverChannel.accept() - val address = channel.socket.getRemoteSocketAddress - channel.configureBlocking(false) - logInfo("Accepted connection from [" + address + "]") - ready(channel.register(selector, 0)) - } catch { - case e: IOException => { - logError("Error accepting connection", e) - } - } - } - - def changeInterest(key: SelectionKey, ops: Int) { - logTrace("Added request to change ops to " + ops) - interestChangeRequests += ((key, ops)) - } - - def ready(key: SelectionKey) - - def preSelect() { - } - - def read(key: SelectionKey) { - throw new UnsupportedOperationException("Cannot read on connection of type " + this.getClass.toString) - } - - def write(key: SelectionKey) { - throw new UnsupportedOperationException("Cannot write on connection of type " + this.getClass.toString) - } - - def close(key: SelectionKey) { - try { - key.channel.close() - key.cancel() - Thread.currentThread.interrupt - } catch { - case e: Exception => logError("Error closing connection", e) - } - } -} diff --git a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala b/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala deleted file mode 100644 index 3922dfbad6..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/SenderReceiverTest.scala +++ /dev/null @@ -1,67 +0,0 @@ -package spark.streaming.util - -import java.net.{Socket, ServerSocket} -import java.io.{ByteArrayOutputStream, DataOutputStream, DataInputStream, BufferedInputStream} - -object Receiver { - def main(args: Array[String]) { - val port = args(0).toInt - val lsocket = new ServerSocket(port) - println("Listening on port " + port ) - while(true) { - val socket = lsocket.accept() - (new Thread() { - override def run() { - val buffer = new Array[Byte](100000) - var count = 0 - val time = System.currentTimeMillis - try { - val is = new DataInputStream(new BufferedInputStream(socket.getInputStream)) - var loop = true - var string: String = null - do { - string = is.readUTF() - if (string != null) { - count += 28 - } - } while (string != null) - } catch { - case e: Exception => e.printStackTrace() - } - val timeTaken = System.currentTimeMillis - time - val tput = (count / 1024.0) / (timeTaken / 1000.0) - println("Data = " + count + " bytes\nTime = " + timeTaken + " ms\nTput = " + tput + " KB/s") - } - }).start() - } - } - -} - -object Sender { - - def main(args: Array[String]) { - try { - val host = args(0) - val port = args(1).toInt - val size = args(2).toInt - - val byteStream = new ByteArrayOutputStream() - val stringDataStream = new DataOutputStream(byteStream) - (0 until size).foreach(_ => stringDataStream.writeUTF("abcdedfghijklmnopqrstuvwxy")) - val bytes = byteStream.toByteArray() - println("Generated array of " + bytes.length + " bytes") - - /*val bytes = new Array[Byte](size)*/ - val socket = new Socket(host, port) - val os = socket.getOutputStream - os.write(bytes) - os.flush - socket.close() - - } catch { - case e: Exception => e.printStackTrace - } - } -} - diff --git a/streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala b/streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala deleted file mode 100644 index 94e8f7a849..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/SentenceFileGenerator.scala +++ /dev/null @@ -1,92 +0,0 @@ -package spark.streaming.util - -import spark._ - -import scala.collection.mutable.ArrayBuffer -import scala.util.Random -import scala.io.Source - -import java.net.InetSocketAddress - -import org.apache.hadoop.fs._ -import org.apache.hadoop.conf._ -import org.apache.hadoop.io._ -import org.apache.hadoop.mapred._ -import org.apache.hadoop.util._ - -object SentenceFileGenerator { - - def printUsage () { - println ("Usage: SentenceFileGenerator <# partitions> []") - System.exit(0) - } - - def main (args: Array[String]) { - if (args.length < 4) { - printUsage - } - - val master = args(0) - val fs = new Path(args(1)).getFileSystem(new Configuration()) - val targetDirectory = new Path(args(1)).makeQualified(fs) - val numPartitions = args(2).toInt - val sentenceFile = args(3) - val sentencesPerSecond = { - if (args.length > 4) args(4).toInt - else 10 - } - - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n").toArray - source.close () - println("Read " + lines.length + " lines from file " + sentenceFile) - - val sentences = { - val buffer = ArrayBuffer[String]() - val random = new Random() - var i = 0 - while (i < sentencesPerSecond) { - buffer += lines(random.nextInt(lines.length)) - i += 1 - } - buffer.toArray - } - println("Generated " + sentences.length + " sentences") - - val sc = new SparkContext(master, "SentenceFileGenerator") - val sentencesRDD = sc.parallelize(sentences, numPartitions) - - val tempDirectory = new Path(targetDirectory, "_tmp") - - fs.mkdirs(targetDirectory) - fs.mkdirs(tempDirectory) - - var saveTimeMillis = System.currentTimeMillis - try { - while (true) { - val newDir = new Path(targetDirectory, "Sentences-" + saveTimeMillis) - val tmpNewDir = new Path(tempDirectory, "Sentences-" + saveTimeMillis) - println("Writing to file " + newDir) - sentencesRDD.saveAsTextFile(tmpNewDir.toString) - fs.rename(tmpNewDir, newDir) - saveTimeMillis += 1000 - val sleepTimeMillis = { - val currentTimeMillis = System.currentTimeMillis - if (saveTimeMillis < currentTimeMillis) { - 0 - } else { - saveTimeMillis - currentTimeMillis - } - } - println("Sleeping for " + sleepTimeMillis + " ms") - Thread.sleep(sleepTimeMillis) - } - } catch { - case e: Exception => - } - } -} - - - - diff --git a/streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala b/streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala deleted file mode 100644 index 60085f4f88..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/ShuffleTest.scala +++ /dev/null @@ -1,23 +0,0 @@ -package spark.streaming.util - -import spark.SparkContext -import SparkContext._ - -object ShuffleTest { - def main(args: Array[String]) { - - if (args.length < 1) { - println ("Usage: ShuffleTest ") - System.exit(1) - } - - val sc = new spark.SparkContext(args(0), "ShuffleTest") - val rdd = sc.parallelize(1 to 1000, 500).cache - - def time(f: => Unit) { val start = System.nanoTime; f; println((System.nanoTime - start) * 1.0e-6) } - - time { for (i <- 0 until 50) time { rdd.map(x => (x % 100, x)).reduceByKey(_ + _, 10).count } } - System.exit(0) - } -} - diff --git a/streaming/src/main/scala/spark/streaming/util/TestGenerator.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator.scala deleted file mode 100644 index 23e9235c60..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestGenerator.scala +++ /dev/null @@ -1,107 +0,0 @@ -package spark.streaming.util - -import scala.util.Random -import scala.io.Source -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import java.net.InetSocketAddress - - -object TestGenerator { - - def printUsage { - println ("Usage: SentenceGenerator []") - System.exit(0) - } - /* - def generateRandomSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { - val sleepBetweenSentences = 1000.0 / sentencesPerSecond.toDouble - 1 - val random = new Random () - - try { - var lastPrintTime = System.currentTimeMillis() - var count = 0 - while(true) { - streamReceiver ! lines(random.nextInt(lines.length)) - count += 1 - if (System.currentTimeMillis - lastPrintTime >= 1000) { - println (count + " sentences sent last second") - count = 0 - lastPrintTime = System.currentTimeMillis - } - Thread.sleep(sleepBetweenSentences.toLong) - } - } catch { - case e: Exception => - } - }*/ - - def generateSameSentences(lines: Array[String], sentencesPerSecond: Int, streamReceiver: AbstractActor) { - try { - val numSentences = if (sentencesPerSecond <= 0) { - lines.length - } else { - sentencesPerSecond - } - val sentences = lines.take(numSentences).toArray - - var nextSendingTime = System.currentTimeMillis() - val sendAsArray = true - while(true) { - if (sendAsArray) { - println("Sending as array") - streamReceiver !? sentences - } else { - println("Sending individually") - sentences.foreach(sentence => { - streamReceiver !? sentence - }) - } - println ("Sent " + numSentences + " sentences in " + (System.currentTimeMillis - nextSendingTime) + " ms") - nextSendingTime += 1000 - val sleepTime = nextSendingTime - System.currentTimeMillis - if (sleepTime > 0) { - println ("Sleeping for " + sleepTime + " ms") - Thread.sleep(sleepTime) - } - } - } catch { - case e: Exception => - } - } - - def main(args: Array[String]) { - if (args.length < 3) { - printUsage - } - - val generateRandomly = false - - val streamReceiverIP = args(0) - val streamReceiverPort = args(1).toInt - val sentenceFile = args(2) - val sentencesPerSecond = if (args.length > 3) args(3).toInt else 10 - val sentenceInputName = if (args.length > 4) args(4) else "Sentences" - - println("Sending " + sentencesPerSecond + " sentences per second to " + - streamReceiverIP + ":" + streamReceiverPort + "/NetworkStreamReceiver-" + sentenceInputName) - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n") - source.close () - - val streamReceiver = select( - Node(streamReceiverIP, streamReceiverPort), - Symbol("NetworkStreamReceiver-" + sentenceInputName)) - if (generateRandomly) { - /*generateRandomSentences(lines, sentencesPerSecond, streamReceiver)*/ - } else { - generateSameSentences(lines, sentencesPerSecond, streamReceiver) - } - } -} - - - diff --git a/streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala deleted file mode 100644 index ff840d084f..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestGenerator2.scala +++ /dev/null @@ -1,119 +0,0 @@ -package spark.streaming.util - -import scala.util.Random -import scala.io.Source -import scala.actors._ -import scala.actors.Actor._ -import scala.actors.remote._ -import scala.actors.remote.RemoteActor._ - -import java.io.{DataOutputStream, ByteArrayOutputStream, DataInputStream} -import java.net.Socket - -object TestGenerator2 { - - def printUsage { - println ("Usage: SentenceGenerator []") - System.exit(0) - } - - def sendSentences(streamReceiverHost: String, streamReceiverPort: Int, numSentences: Int, bytes: Array[Byte], intervalTime: Long){ - try { - println("Connecting to " + streamReceiverHost + ":" + streamReceiverPort) - val socket = new Socket(streamReceiverHost, streamReceiverPort) - - println("Sending " + numSentences+ " sentences / " + (bytes.length / 1024.0 / 1024.0) + " MB per " + intervalTime + " ms to " + streamReceiverHost + ":" + streamReceiverPort ) - val currentTime = System.currentTimeMillis - var targetTime = (currentTime / intervalTime + 1).toLong * intervalTime - Thread.sleep(targetTime - currentTime) - - while(true) { - val startTime = System.currentTimeMillis() - println("Sending at " + startTime + " ms with delay of " + (startTime - targetTime) + " ms") - val socketOutputStream = socket.getOutputStream - val parts = 10 - (0 until parts).foreach(i => { - val partStartTime = System.currentTimeMillis - - val offset = (i * bytes.length / parts).toInt - val len = math.min(((i + 1) * bytes.length / parts).toInt - offset, bytes.length) - socketOutputStream.write(bytes, offset, len) - socketOutputStream.flush() - val partFinishTime = System.currentTimeMillis - println("Sending part " + i + " of " + len + " bytes took " + (partFinishTime - partStartTime) + " ms") - val sleepTime = math.max(0, 1000 / parts - (partFinishTime - partStartTime) - 1) - Thread.sleep(sleepTime) - }) - - socketOutputStream.flush() - /*val socketInputStream = new DataInputStream(socket.getInputStream)*/ - /*val reply = socketInputStream.readUTF()*/ - val finishTime = System.currentTimeMillis() - println ("Sent " + bytes.length + " bytes in " + (finishTime - startTime) + " ms for interval [" + targetTime + ", " + (targetTime + intervalTime) + "]") - /*println("Received = " + reply)*/ - targetTime = targetTime + intervalTime - val sleepTime = (targetTime - finishTime) + 10 - if (sleepTime > 0) { - println("Sleeping for " + sleepTime + " ms") - Thread.sleep(sleepTime) - } else { - println("############################") - println("###### Skipping sleep ######") - println("############################") - } - } - } catch { - case e: Exception => println(e) - } - println("Stopped sending") - } - - def main(args: Array[String]) { - if (args.length < 4) { - printUsage - } - - val streamReceiverHost = args(0) - val streamReceiverPort = args(1).toInt - val sentenceFile = args(2) - val intervalTime = args(3).toLong - val sentencesPerInterval = if (args.length > 4) args(4).toInt else 0 - - println("Reading the file " + sentenceFile) - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n") - source.close() - - val numSentences = if (sentencesPerInterval <= 0) { - lines.length - } else { - sentencesPerInterval - } - - println("Generating sentences") - val sentences: Array[String] = if (numSentences <= lines.length) { - lines.take(numSentences).toArray - } else { - (0 until numSentences).map(i => lines(i % lines.length)).toArray - } - - println("Converting to byte array") - val byteStream = new ByteArrayOutputStream() - val stringDataStream = new DataOutputStream(byteStream) - /*stringDataStream.writeInt(sentences.size)*/ - sentences.foreach(stringDataStream.writeUTF) - val bytes = byteStream.toByteArray() - stringDataStream.close() - println("Generated array of " + bytes.length + " bytes") - - /*while(true) { */ - sendSentences(streamReceiverHost, streamReceiverPort, numSentences, bytes, intervalTime) - /*println("Sleeping for 5 seconds")*/ - /*Thread.sleep(5000)*/ - /*System.gc()*/ - /*}*/ - } -} - - - diff --git a/streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala b/streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala deleted file mode 100644 index 9c39ef3e12..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestGenerator4.scala +++ /dev/null @@ -1,244 +0,0 @@ -package spark.streaming.util - -import spark.Logging - -import scala.util.Random -import scala.io.Source -import scala.collection.mutable.{ArrayBuffer, Queue} - -import java.net._ -import java.io._ -import java.nio._ -import java.nio.charset._ -import java.nio.channels._ - -import it.unimi.dsi.fastutil.io._ - -class TestGenerator4(targetHost: String, targetPort: Int, sentenceFile: String, intervalDuration: Long, sentencesPerInterval: Int) -extends Logging { - - class SendingConnectionHandler(host: String, port: Int, generator: TestGenerator4) - extends ConnectionHandler(host, port, true) { - - val buffers = new ArrayBuffer[ByteBuffer] - val newBuffers = new Queue[ByteBuffer] - var activeKey: SelectionKey = null - - def send(buffer: ByteBuffer) { - logDebug("Sending: " + buffer) - newBuffers.synchronized { - newBuffers.enqueue(buffer) - } - selector.wakeup() - buffer.synchronized { - buffer.wait() - } - } - - override def ready(key: SelectionKey) { - logDebug("Ready") - activeKey = key - val channel = key.channel.asInstanceOf[SocketChannel] - channel.register(selector, SelectionKey.OP_WRITE) - generator.startSending() - } - - override def preSelect() { - newBuffers.synchronized { - while(!newBuffers.isEmpty) { - val buffer = newBuffers.dequeue - buffers += buffer - logDebug("Added: " + buffer) - changeInterest(activeKey, SelectionKey.OP_WRITE) - } - } - } - - override def write(key: SelectionKey) { - try { - /*while(true) {*/ - val channel = key.channel.asInstanceOf[SocketChannel] - if (buffers.size > 0) { - val buffer = buffers(0) - val newBuffer = buffer.slice() - newBuffer.limit(math.min(newBuffer.remaining, 32768)) - val bytesWritten = channel.write(newBuffer) - buffer.position(buffer.position + bytesWritten) - if (bytesWritten == 0) return - if (buffer.remaining == 0) { - buffers -= buffer - buffer.synchronized { - buffer.notify() - } - } - /*changeInterest(key, SelectionKey.OP_WRITE)*/ - } else { - changeInterest(key, 0) - } - /*}*/ - } catch { - case e: IOException => { - if (e.toString.contains("pipe") || e.toString.contains("reset")) { - logError("Connection broken") - } else { - logError("Connection error", e) - } - close(key) - } - } - } - - override def close(key: SelectionKey) { - buffers.clear() - super.close(key) - } - } - - initLogging() - - val connectionHandler = new SendingConnectionHandler(targetHost, targetPort, this) - var sendingThread: Thread = null - var sendCount = 0 - val sendBatches = 5 - - def run() { - logInfo("Connection handler started") - connectionHandler.start() - connectionHandler.join() - if (sendingThread != null && !sendingThread.isInterrupted) { - sendingThread.interrupt - } - logInfo("Connection handler stopped") - } - - def startSending() { - sendingThread = new Thread() { - override def run() { - logInfo("STARTING TO SEND") - sendSentences() - logInfo("SENDING STOPPED AFTER " + sendCount) - connectionHandler.interrupt() - } - } - sendingThread.start() - } - - def stopSending() { - sendingThread.interrupt() - } - - def sendSentences() { - logInfo("Reading the file " + sentenceFile) - val source = Source.fromFile(sentenceFile) - val lines = source.mkString.split ("\n") - source.close() - - val numSentences = if (sentencesPerInterval <= 0) { - lines.length - } else { - sentencesPerInterval - } - - logInfo("Generating sentence buffer") - val sentences: Array[String] = if (numSentences <= lines.length) { - lines.take(numSentences).toArray - } else { - (0 until numSentences).map(i => lines(i % lines.length)).toArray - } - - /* - val sentences: Array[String] = if (numSentences <= lines.length) { - lines.take((numSentences / sendBatches).toInt).toArray - } else { - (0 until (numSentences/sendBatches)).map(i => lines(i % lines.length)).toArray - }*/ - - - val serializer = new spark.KryoSerializer().newInstance() - val byteStream = new FastByteArrayOutputStream(100 * 1024 * 1024) - serializer.serializeStream(byteStream).writeAll(sentences.toIterator.asInstanceOf[Iterator[Any]]).close() - byteStream.trim() - val sentenceBuffer = ByteBuffer.wrap(byteStream.array) - - logInfo("Sending " + numSentences+ " sentences / " + sentenceBuffer.limit + " bytes per " + intervalDuration + " ms to " + targetHost + ":" + targetPort ) - val currentTime = System.currentTimeMillis - var targetTime = (currentTime / intervalDuration + 1).toLong * intervalDuration - Thread.sleep(targetTime - currentTime) - - val totalBytes = sentenceBuffer.limit - - while(true) { - val batchesInCurrentInterval = sendBatches // if (sendCount < 10) 1 else sendBatches - - val startTime = System.currentTimeMillis() - logDebug("Sending # " + sendCount + " at " + startTime + " ms with delay of " + (startTime - targetTime) + " ms") - - (0 until batchesInCurrentInterval).foreach(i => { - try { - val position = (i * totalBytes / sendBatches).toInt - val limit = if (i == sendBatches - 1) { - totalBytes - } else { - ((i + 1) * totalBytes / sendBatches).toInt - 1 - } - - val partStartTime = System.currentTimeMillis - sentenceBuffer.limit(limit) - connectionHandler.send(sentenceBuffer) - val partFinishTime = System.currentTimeMillis - val sleepTime = math.max(0, intervalDuration / sendBatches - (partFinishTime - partStartTime) - 1) - Thread.sleep(sleepTime) - - } catch { - case ie: InterruptedException => return - case e: Exception => e.printStackTrace() - } - }) - sentenceBuffer.rewind() - - val finishTime = System.currentTimeMillis() - /*logInfo ("Sent " + sentenceBuffer.limit + " bytes in " + (finishTime - startTime) + " ms")*/ - targetTime = targetTime + intervalDuration //+ (if (sendCount < 3) 1000 else 0) - - val sleepTime = (targetTime - finishTime) + 20 - if (sleepTime > 0) { - logInfo("Sleeping for " + sleepTime + " ms") - Thread.sleep(sleepTime) - } else { - logInfo("###### Skipping sleep ######") - } - if (Thread.currentThread.isInterrupted) { - return - } - sendCount += 1 - } - } -} - -object TestGenerator4 { - def printUsage { - println("Usage: TestGenerator4 []") - System.exit(0) - } - - def main(args: Array[String]) { - println("GENERATOR STARTED") - if (args.length < 4) { - printUsage - } - - - val streamReceiverHost = args(0) - val streamReceiverPort = args(1).toInt - val sentenceFile = args(2) - val intervalDuration = args(3).toLong - val sentencesPerInterval = if (args.length > 4) args(4).toInt else 0 - - while(true) { - val generator = new TestGenerator4(streamReceiverHost, streamReceiverPort, sentenceFile, intervalDuration, sentencesPerInterval) - generator.run() - Thread.sleep(2000) - } - println("GENERATOR STOPPED") - } -} diff --git a/streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala deleted file mode 100644 index f584f772bb..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestStreamCoordinator.scala +++ /dev/null @@ -1,39 +0,0 @@ -package spark.streaming.util - -import spark.streaming._ -import spark.Logging - -import akka.actor._ -import akka.actor.Actor -import akka.actor.Actor._ - -sealed trait TestStreamCoordinatorMessage -case class GetStreamDetails extends TestStreamCoordinatorMessage -case class GotStreamDetails(name: String, duration: Long) extends TestStreamCoordinatorMessage -case class TestStarted extends TestStreamCoordinatorMessage - -class TestStreamCoordinator(streamDetails: Array[(String, Long)]) extends Actor with Logging { - - var index = 0 - - initLogging() - - logInfo("Created") - - def receive = { - case TestStarted => { - sender ! "OK" - } - - case GetStreamDetails => { - val streamDetail = if (index >= streamDetails.length) null else streamDetails(index) - sender ! GotStreamDetails(streamDetail._1, streamDetail._2) - index += 1 - if (streamDetail != null) { - logInfo("Allocated " + streamDetail._1 + " (" + index + "/" + streamDetails.length + ")" ) - } - } - } - -} - diff --git a/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala deleted file mode 100644 index 80ad924dd8..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver3.scala +++ /dev/null @@ -1,421 +0,0 @@ -package spark.streaming.util - -import spark._ -import spark.storage._ -import spark.util.AkkaUtils -import spark.streaming._ - -import scala.math._ -import scala.collection.mutable.{Queue, HashMap, ArrayBuffer, SynchronizedMap} - -import akka.actor._ -import akka.actor.Actor -import akka.dispatch._ -import akka.pattern.ask -import akka.util.duration._ - -import java.io.DataInputStream -import java.io.BufferedInputStream -import java.net.Socket -import java.net.ServerSocket -import java.util.LinkedHashMap - -import org.apache.hadoop.fs._ -import org.apache.hadoop.conf._ -import org.apache.hadoop.io._ -import org.apache.hadoop.mapred._ -import org.apache.hadoop.util._ - -import spark.Utils - - -class TestStreamReceiver3(actorSystem: ActorSystem, blockManager: BlockManager) -extends Thread with Logging { - - - class DataHandler( - inputName: String, - longIntervalDuration: Time, - shortIntervalDuration: Time, - blockManager: BlockManager - ) - extends Logging { - - class Block(var id: String, var shortInterval: Interval) { - val data = ArrayBuffer[String]() - var pushed = false - def longInterval = getLongInterval(shortInterval) - def empty() = (data.size == 0) - def += (str: String) = (data += str) - override def toString() = "Block " + id - } - - class Bucket(val longInterval: Interval) { - val blocks = new ArrayBuffer[Block]() - var filled = false - def += (block: Block) = blocks += block - def empty() = (blocks.size == 0) - def ready() = (filled && !blocks.exists(! _.pushed)) - def blockIds() = blocks.map(_.id).toArray - override def toString() = "Bucket [" + longInterval + ", " + blocks.size + " blocks]" - } - - initLogging() - - val shortIntervalDurationMillis = shortIntervalDuration.toLong - val longIntervalDurationMillis = longIntervalDuration.toLong - - var currentBlock: Block = null - var currentBucket: Bucket = null - - val blocksForPushing = new Queue[Block]() - val buckets = new HashMap[Interval, Bucket]() with SynchronizedMap[Interval, Bucket] - - val blockUpdatingThread = new Thread() { override def run() { keepUpdatingCurrentBlock() } } - val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } - - def start() { - blockUpdatingThread.start() - blockPushingThread.start() - } - - def += (data: String) = addData(data) - - def addData(data: String) { - if (currentBlock == null) { - updateCurrentBlock() - } - currentBlock.synchronized { - currentBlock += data - } - } - - def getShortInterval(time: Time): Interval = { - val intervalBegin = time.floor(shortIntervalDuration) - Interval(intervalBegin, intervalBegin + shortIntervalDuration) - } - - def getLongInterval(shortInterval: Interval): Interval = { - val intervalBegin = shortInterval.beginTime.floor(longIntervalDuration) - Interval(intervalBegin, intervalBegin + longIntervalDuration) - } - - def updateCurrentBlock() { - /*logInfo("Updating current block")*/ - val currentTime = Time(System.currentTimeMillis) - val shortInterval = getShortInterval(currentTime) - val longInterval = getLongInterval(shortInterval) - - def createBlock(reuseCurrentBlock: Boolean = false) { - val newBlockId = inputName + "-" + longInterval.toFormattedString + "-" + currentBucket.blocks.size - if (!reuseCurrentBlock) { - val newBlock = new Block(newBlockId, shortInterval) - /*logInfo("Created " + currentBlock)*/ - currentBlock = newBlock - } else { - currentBlock.shortInterval = shortInterval - currentBlock.id = newBlockId - } - } - - def createBucket() { - val newBucket = new Bucket(longInterval) - buckets += ((longInterval, newBucket)) - currentBucket = newBucket - /*logInfo("Created " + currentBucket + ", " + buckets.size + " buckets")*/ - } - - if (currentBlock == null || currentBucket == null) { - createBucket() - currentBucket.synchronized { - createBlock() - } - return - } - - currentBlock.synchronized { - var reuseCurrentBlock = false - - if (shortInterval != currentBlock.shortInterval) { - if (!currentBlock.empty) { - blocksForPushing.synchronized { - blocksForPushing += currentBlock - blocksForPushing.notifyAll() - } - } - - currentBucket.synchronized { - if (currentBlock.empty) { - reuseCurrentBlock = true - } else { - currentBucket += currentBlock - } - - if (longInterval != currentBucket.longInterval) { - currentBucket.filled = true - if (currentBucket.ready) { - currentBucket.notifyAll() - } - createBucket() - } - } - - createBlock(reuseCurrentBlock) - } - } - } - - def pushBlock(block: Block) { - try{ - if (blockManager != null) { - logInfo("Pushing block") - val startTime = System.currentTimeMillis - - val bytes = blockManager.dataSerialize("rdd_", block.data.toIterator) // TODO: Will this be an RDD block? - val finishTime = System.currentTimeMillis - logInfo(block + " serialization delay is " + (finishTime - startTime) / 1000.0 + " s") - - blockManager.putBytes(block.id.toString, bytes, StorageLevel.MEMORY_AND_DISK_SER_2) - /*blockManager.putBytes(block.id.toString, bytes, StorageLevel.DISK_AND_MEMORY_DESER_2)*/ - /*blockManager.put(block.id.toString, block.data.toIterator, StorageLevel.DISK_AND_MEMORY_DESER)*/ - /*blockManager.put(block.id.toString, block.data.toIterator, StorageLevel.DISK_AND_MEMORY)*/ - val finishTime1 = System.currentTimeMillis - logInfo(block + " put delay is " + (finishTime1 - startTime) / 1000.0 + " s") - } else { - logWarning(block + " not put as block manager is null") - } - } catch { - case e: Exception => logError("Exception writing " + block + " to blockmanager" , e) - } - } - - def getBucket(longInterval: Interval): Option[Bucket] = { - buckets.get(longInterval) - } - - def clearBucket(longInterval: Interval) { - buckets.remove(longInterval) - } - - def keepUpdatingCurrentBlock() { - logInfo("Thread to update current block started") - while(true) { - updateCurrentBlock() - val currentTimeMillis = System.currentTimeMillis - val sleepTimeMillis = (currentTimeMillis / shortIntervalDurationMillis + 1) * - shortIntervalDurationMillis - currentTimeMillis + 1 - Thread.sleep(sleepTimeMillis) - } - } - - def keepPushingBlocks() { - var loop = true - logInfo("Thread to push blocks started") - while(loop) { - val block = blocksForPushing.synchronized { - if (blocksForPushing.size == 0) { - blocksForPushing.wait() - } - blocksForPushing.dequeue - } - pushBlock(block) - block.pushed = true - block.data.clear() - - val bucket = buckets(block.longInterval) - bucket.synchronized { - if (bucket.ready) { - bucket.notifyAll() - } - } - } - } - } - - - class ConnectionListener(port: Int, dataHandler: DataHandler) - extends Thread with Logging { - initLogging() - override def run { - try { - val listener = new ServerSocket(port) - logInfo("Listening on port " + port) - while (true) { - new ConnectionHandler(listener.accept(), dataHandler).start(); - } - listener.close() - } catch { - case e: Exception => logError("", e); - } - } - } - - class ConnectionHandler(socket: Socket, dataHandler: DataHandler) extends Thread with Logging { - initLogging() - override def run { - logInfo("New connection from " + socket.getInetAddress() + ":" + socket.getPort) - val bytes = new Array[Byte](100 * 1024 * 1024) - try { - - val inputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream, 1024 * 1024)) - /*val inputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream))*/ - var str: String = null - str = inputStream.readUTF - while(str != null) { - dataHandler += str - str = inputStream.readUTF() - } - - /* - var loop = true - while(loop) { - val numRead = inputStream.read(bytes) - if (numRead < 0) { - loop = false - } - inbox += ((LongTime(SystemTime.currentTimeMillis), "test")) - }*/ - - inputStream.close() - } catch { - case e => logError("Error receiving data", e) - } - socket.close() - } - } - - initLogging() - - val masterHost = System.getProperty("spark.master.host") - val masterPort = System.getProperty("spark.master.port").toInt - - val akkaPath = "akka://spark@%s:%s/user/".format(masterHost, masterPort) - val sparkstreamScheduler = actorSystem.actorFor(akkaPath + "/SparkStreamScheduler") - val testStreamCoordinator = actorSystem.actorFor(akkaPath + "/TestStreamCoordinator") - - logInfo("Getting stream details from master " + masterHost + ":" + masterPort) - - val timeout = 50 millis - - var started = false - while (!started) { - askActor[String](testStreamCoordinator, TestStarted) match { - case Some(str) => { - started = true - logInfo("TestStreamCoordinator started") - } - case None => { - logInfo("TestStreamCoordinator not started yet") - Thread.sleep(200) - } - } - } - - val streamDetails = askActor[GotStreamDetails](testStreamCoordinator, GetStreamDetails) match { - case Some(details) => details - case None => throw new Exception("Could not get stream details") - } - logInfo("Stream details received: " + streamDetails) - - val inputName = streamDetails.name - val intervalDurationMillis = streamDetails.duration - val intervalDuration = Time(intervalDurationMillis) - - val dataHandler = new DataHandler( - inputName, - intervalDuration, - Time(TestStreamReceiver3.SHORT_INTERVAL_MILLIS), - blockManager) - - val connListener = new ConnectionListener(TestStreamReceiver3.PORT, dataHandler) - - // Send a message to an actor and return an option with its reply, or None if this times out - def askActor[T](actor: ActorRef, message: Any): Option[T] = { - try { - val future = actor.ask(message)(timeout) - return Some(Await.result(future, timeout).asInstanceOf[T]) - } catch { - case e: Exception => - logInfo("Error communicating with " + actor, e) - return None - } - } - - override def run() { - connListener.start() - dataHandler.start() - - var interval = Interval.currentInterval(intervalDuration) - var dataStarted = false - - while(true) { - waitFor(interval.endTime) - logInfo("Woken up at " + System.currentTimeMillis + " for " + interval) - dataHandler.getBucket(interval) match { - case Some(bucket) => { - logInfo("Found " + bucket + " for " + interval) - bucket.synchronized { - if (!bucket.ready) { - logInfo("Waiting for " + bucket) - bucket.wait() - logInfo("Wait over for " + bucket) - } - if (dataStarted || !bucket.empty) { - logInfo("Notifying " + bucket) - notifyScheduler(interval, bucket.blockIds) - dataStarted = true - } - bucket.blocks.clear() - dataHandler.clearBucket(interval) - } - } - case None => { - logInfo("Found none for " + interval) - if (dataStarted) { - logInfo("Notifying none") - notifyScheduler(interval, Array[String]()) - } - } - } - interval = interval.next - } - } - - def waitFor(time: Time) { - val currentTimeMillis = System.currentTimeMillis - val targetTimeMillis = time.milliseconds - if (currentTimeMillis < targetTimeMillis) { - val sleepTime = (targetTimeMillis - currentTimeMillis) - Thread.sleep(sleepTime + 1) - } - } - - def notifyScheduler(interval: Interval, blockIds: Array[String]) { - try { - sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) - val time = interval.endTime - val delay = (System.currentTimeMillis - time.milliseconds) / 1000.0 - logInfo("Pushing delay for " + time + " is " + delay + " s") - } catch { - case _ => logError("Exception notifying scheduler at interval " + interval) - } - } -} - -object TestStreamReceiver3 { - - val PORT = 9999 - val SHORT_INTERVAL_MILLIS = 100 - - def main(args: Array[String]) { - System.setProperty("spark.master.host", Utils.localHostName) - System.setProperty("spark.master.port", "7078") - val details = Array(("Sentences", 2000L)) - val (actorSystem, port) = AkkaUtils.createActorSystem("spark", Utils.localHostName, 7078) - actorSystem.actorOf(Props(new TestStreamCoordinator(details)), name = "TestStreamCoordinator") - new TestStreamReceiver3(actorSystem, null).start() - } -} - - - diff --git a/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala b/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala deleted file mode 100644 index 31754870dd..0000000000 --- a/streaming/src/main/scala/spark/streaming/util/TestStreamReceiver4.scala +++ /dev/null @@ -1,374 +0,0 @@ -package spark.streaming.util - -import spark.streaming._ -import spark._ -import spark.storage._ -import spark.util.AkkaUtils - -import scala.math._ -import scala.collection.mutable.{Queue, HashMap, ArrayBuffer, SynchronizedMap} - -import java.io._ -import java.nio._ -import java.nio.charset._ -import java.nio.channels._ -import java.util.concurrent.Executors - -import akka.actor._ -import akka.actor.Actor -import akka.dispatch._ -import akka.pattern.ask -import akka.util.duration._ - -class TestStreamReceiver4(actorSystem: ActorSystem, blockManager: BlockManager) -extends Thread with Logging { - - class DataHandler( - inputName: String, - longIntervalDuration: Time, - shortIntervalDuration: Time, - blockManager: BlockManager - ) - extends Logging { - - class Block(val id: String, val shortInterval: Interval, val buffer: ByteBuffer) { - var pushed = false - def longInterval = getLongInterval(shortInterval) - override def toString() = "Block " + id - } - - class Bucket(val longInterval: Interval) { - val blocks = new ArrayBuffer[Block]() - var filled = false - def += (block: Block) = blocks += block - def empty() = (blocks.size == 0) - def ready() = (filled && !blocks.exists(! _.pushed)) - def blockIds() = blocks.map(_.id).toArray - override def toString() = "Bucket [" + longInterval + ", " + blocks.size + " blocks]" - } - - initLogging() - - val syncOnLastShortInterval = true - - val shortIntervalDurationMillis = shortIntervalDuration.milliseconds - val longIntervalDurationMillis = longIntervalDuration.milliseconds - - val buffer = ByteBuffer.allocateDirect(100 * 1024 * 1024) - var currentShortInterval = Interval.currentInterval(shortIntervalDuration) - - val blocksForPushing = new Queue[Block]() - val buckets = new HashMap[Interval, Bucket]() with SynchronizedMap[Interval, Bucket] - - val bufferProcessingThread = new Thread() { override def run() { keepProcessingBuffers() } } - val blockPushingExecutor = Executors.newFixedThreadPool(5) - - - def start() { - buffer.clear() - if (buffer.remaining == 0) { - throw new Exception("Buffer initialization error") - } - bufferProcessingThread.start() - } - - def readDataToBuffer(func: ByteBuffer => Int): Int = { - buffer.synchronized { - if (buffer.remaining == 0) { - logInfo("Received first data for interval " + currentShortInterval) - } - func(buffer) - } - } - - def getLongInterval(shortInterval: Interval): Interval = { - val intervalBegin = shortInterval.beginTime.floor(longIntervalDuration) - Interval(intervalBegin, intervalBegin + longIntervalDuration) - } - - def processBuffer() { - - def readInt(buffer: ByteBuffer): Int = { - var offset = 0 - var result = 0 - while (offset < 32) { - val b = buffer.get() - result |= ((b & 0x7F) << offset) - if ((b & 0x80) == 0) { - return result - } - offset += 7 - } - throw new Exception("Malformed zigzag-encoded integer") - } - - val currentLongInterval = getLongInterval(currentShortInterval) - val startTime = System.currentTimeMillis - val newBuffer: ByteBuffer = buffer.synchronized { - buffer.flip() - if (buffer.remaining == 0) { - buffer.clear() - null - } else { - logDebug("Processing interval " + currentShortInterval + " with delay of " + (System.currentTimeMillis - startTime) + " ms") - val startTime1 = System.currentTimeMillis - var loop = true - var count = 0 - while(loop) { - buffer.mark() - try { - val len = readInt(buffer) - buffer.position(buffer.position + len) - count += 1 - } catch { - case e: Exception => { - buffer.reset() - loop = false - } - } - } - val bytesToCopy = buffer.position - val newBuf = ByteBuffer.allocate(bytesToCopy) - buffer.position(0) - newBuf.put(buffer.slice().limit(bytesToCopy).asInstanceOf[ByteBuffer]) - newBuf.flip() - buffer.position(bytesToCopy) - buffer.compact() - newBuf - } - } - - if (newBuffer != null) { - val bucket = buckets.getOrElseUpdate(currentLongInterval, new Bucket(currentLongInterval)) - bucket.synchronized { - val newBlockId = inputName + "-" + currentLongInterval.toFormattedString + "-" + currentShortInterval.toFormattedString - val newBlock = new Block(newBlockId, currentShortInterval, newBuffer) - if (syncOnLastShortInterval) { - bucket += newBlock - } - logDebug("Created " + newBlock + " with " + newBuffer.remaining + " bytes, creation delay is " + (System.currentTimeMillis - currentShortInterval.endTime.milliseconds) / 1000.0 + " s" ) - blockPushingExecutor.execute(new Runnable() { def run() { pushAndNotifyBlock(newBlock) } }) - } - } - - val newShortInterval = Interval.currentInterval(shortIntervalDuration) - val newLongInterval = getLongInterval(newShortInterval) - - if (newLongInterval != currentLongInterval) { - buckets.get(currentLongInterval) match { - case Some(bucket) => { - bucket.synchronized { - bucket.filled = true - if (bucket.ready) { - bucket.notifyAll() - } - } - } - case None => - } - buckets += ((newLongInterval, new Bucket(newLongInterval))) - } - - currentShortInterval = newShortInterval - } - - def pushBlock(block: Block) { - try{ - if (blockManager != null) { - val startTime = System.currentTimeMillis - logInfo(block + " put start delay is " + (startTime - block.shortInterval.endTime.milliseconds) + " ms") - /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY)*/ - /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_2)*/ - blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.MEMORY_ONLY_2) - /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.MEMORY_ONLY)*/ - /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_DESER)*/ - /*blockManager.putBytes(block.id.toString, block.buffer, StorageLevel.DISK_AND_MEMORY_DESER_2)*/ - val finishTime = System.currentTimeMillis - logInfo(block + " put delay is " + (finishTime - startTime) + " ms") - } else { - logWarning(block + " not put as block manager is null") - } - } catch { - case e: Exception => logError("Exception writing " + block + " to blockmanager" , e) - } - } - - def getBucket(longInterval: Interval): Option[Bucket] = { - buckets.get(longInterval) - } - - def clearBucket(longInterval: Interval) { - buckets.remove(longInterval) - } - - def keepProcessingBuffers() { - logInfo("Thread to process buffers started") - while(true) { - processBuffer() - val currentTimeMillis = System.currentTimeMillis - val sleepTimeMillis = (currentTimeMillis / shortIntervalDurationMillis + 1) * - shortIntervalDurationMillis - currentTimeMillis + 1 - Thread.sleep(sleepTimeMillis) - } - } - - def pushAndNotifyBlock(block: Block) { - pushBlock(block) - block.pushed = true - val bucket = if (syncOnLastShortInterval) { - buckets(block.longInterval) - } else { - var longInterval = block.longInterval - while(!buckets.contains(longInterval)) { - logWarning("Skipping bucket of " + longInterval + " for " + block) - longInterval = longInterval.next - } - val chosenBucket = buckets(longInterval) - logDebug("Choosing bucket of " + longInterval + " for " + block) - chosenBucket += block - chosenBucket - } - - bucket.synchronized { - if (bucket.ready) { - bucket.notifyAll() - } - } - - } - } - - - class ReceivingConnectionHandler(host: String, port: Int, dataHandler: DataHandler) - extends ConnectionHandler(host, port, false) { - - override def ready(key: SelectionKey) { - changeInterest(key, SelectionKey.OP_READ) - } - - override def read(key: SelectionKey) { - try { - val channel = key.channel.asInstanceOf[SocketChannel] - val bytesRead = dataHandler.readDataToBuffer(channel.read) - if (bytesRead < 0) { - close(key) - } - } catch { - case e: IOException => { - logError("Error reading", e) - close(key) - } - } - } - } - - initLogging() - - val masterHost = System.getProperty("spark.master.host", "localhost") - val masterPort = System.getProperty("spark.master.port", "7078").toInt - - val akkaPath = "akka://spark@%s:%s/user/".format(masterHost, masterPort) - val sparkstreamScheduler = actorSystem.actorFor(akkaPath + "/SparkStreamScheduler") - val testStreamCoordinator = actorSystem.actorFor(akkaPath + "/TestStreamCoordinator") - - logInfo("Getting stream details from master " + masterHost + ":" + masterPort) - - val streamDetails = askActor[GotStreamDetails](testStreamCoordinator, GetStreamDetails) match { - case Some(details) => details - case None => throw new Exception("Could not get stream details") - } - logInfo("Stream details received: " + streamDetails) - - val inputName = streamDetails.name - val intervalDurationMillis = streamDetails.duration - val intervalDuration = Milliseconds(intervalDurationMillis) - val shortIntervalDuration = Milliseconds(System.getProperty("spark.stream.shortinterval", "500").toInt) - - val dataHandler = new DataHandler(inputName, intervalDuration, shortIntervalDuration, blockManager) - val connectionHandler = new ReceivingConnectionHandler("localhost", 9999, dataHandler) - - val timeout = 100 millis - - // Send a message to an actor and return an option with its reply, or None if this times out - def askActor[T](actor: ActorRef, message: Any): Option[T] = { - try { - val future = actor.ask(message)(timeout) - return Some(Await.result(future, timeout).asInstanceOf[T]) - } catch { - case e: Exception => - logInfo("Error communicating with " + actor, e) - return None - } - } - - override def run() { - connectionHandler.start() - dataHandler.start() - - var interval = Interval.currentInterval(intervalDuration) - var dataStarted = false - - - while(true) { - waitFor(interval.endTime) - /*logInfo("Woken up at " + System.currentTimeMillis + " for " + interval)*/ - dataHandler.getBucket(interval) match { - case Some(bucket) => { - logDebug("Found " + bucket + " for " + interval) - bucket.synchronized { - if (!bucket.ready) { - logDebug("Waiting for " + bucket) - bucket.wait() - logDebug("Wait over for " + bucket) - } - if (dataStarted || !bucket.empty) { - logDebug("Notifying " + bucket) - notifyScheduler(interval, bucket.blockIds) - dataStarted = true - } - bucket.blocks.clear() - dataHandler.clearBucket(interval) - } - } - case None => { - logDebug("Found none for " + interval) - if (dataStarted) { - logDebug("Notifying none") - notifyScheduler(interval, Array[String]()) - } - } - } - interval = interval.next - } - } - - def waitFor(time: Time) { - val currentTimeMillis = System.currentTimeMillis - val targetTimeMillis = time.milliseconds - if (currentTimeMillis < targetTimeMillis) { - val sleepTime = (targetTimeMillis - currentTimeMillis) - Thread.sleep(sleepTime + 1) - } - } - - def notifyScheduler(interval: Interval, blockIds: Array[String]) { - try { - sparkstreamScheduler ! InputGenerated(inputName, interval, blockIds.toArray) - val time = interval.endTime - val delay = (System.currentTimeMillis - time.milliseconds) - logInfo("Notification delay for " + time + " is " + delay + " ms") - } catch { - case e: Exception => logError("Exception notifying scheduler at interval " + interval + ": " + e) - } - } -} - - -object TestStreamReceiver4 { - def main(args: Array[String]) { - val details = Array(("Sentences", 2000L)) - val (actorSystem, port) = AkkaUtils.createActorSystem("spark", Utils.localHostName, 7078) - actorSystem.actorOf(Props(new TestStreamCoordinator(details)), name = "TestStreamCoordinator") - new TestStreamReceiver4(actorSystem, null).start() - } -} From e5a09367870be757a0abb3e2ad7a53e74110b033 Mon Sep 17 00:00:00 2001 From: Denny Date: Fri, 9 Nov 2012 12:23:46 -0800 Subject: [PATCH 103/226] Kafka Stream. --- .../main/scala/spark/streaming/DStream.scala | 31 ++-- .../scala/spark/streaming/DataHandler.scala | 83 +++++++++ .../spark/streaming/NetworkInputDStream.scala | 21 ++- .../spark/streaming/NetworkInputTracker.scala | 9 +- .../spark/streaming/RawInputDStream.scala | 2 +- .../spark/streaming/SocketInputDStream.scala | 70 +------ .../spark/streaming/StreamingContext.scala | 4 +- .../streaming/examples/KafkaWordCount.scala | 13 +- .../streaming/input/KafkaInputDStream.scala | 175 +++++++++++------- 9 files changed, 246 insertions(+), 162 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/DataHandler.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 922ff5088d..f891730317 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -17,6 +17,8 @@ import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration +case class DStreamCheckpointData(rdds: HashMap[Time, Any]) + abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) extends Serializable with Logging { @@ -59,7 +61,7 @@ extends Serializable with Logging { // Checkpoint details protected[streaming] val mustCheckpoint = false protected[streaming] var checkpointInterval: Time = null - protected[streaming] val checkpointData = new HashMap[Time, Any]() + protected[streaming] var checkpointData = DStreamCheckpointData(HashMap[Time, Any]()) // Reference to whole DStream graph protected[streaming] var graph: DStreamGraph = null @@ -280,6 +282,13 @@ extends Serializable with Logging { dependencies.foreach(_.forgetOldRDDs(time)) } + /* Adds metadata to the Stream while it is running. + * This methd should be overwritten by sublcasses of InputDStream. + */ + protected[streaming] def addMetadata(metadata: Any) { + logInfo("Dropping Metadata: " + metadata.toString) + } + /** * Refreshes the list of checkpointed RDDs that will be saved along with checkpoint of * this stream. This is an internal method that should not be called directly. This is @@ -288,22 +297,22 @@ extends Serializable with Logging { * this method to save custom checkpoint data. */ protected[streaming] def updateCheckpointData(currentTime: Time) { - val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) + val newRdds = generatedRDDs.filter(_._2.getCheckpointData() != null) .map(x => (x._1, x._2.getCheckpointData())) - val oldCheckpointData = checkpointData.clone() - if (newCheckpointData.size > 0) { - checkpointData.clear() - checkpointData ++= newCheckpointData + val oldRdds = checkpointData.rdds.clone() + if (newRdds.size > 0) { + checkpointData.rdds.clear() + checkpointData.rdds ++= newRdds } dependencies.foreach(_.updateCheckpointData(currentTime)) - newCheckpointData.foreach { + newRdds.foreach { case (time, data) => { logInfo("Added checkpointed RDD for time " + time + " to stream checkpoint") } } - if (newCheckpointData.size > 0) { - (oldCheckpointData -- newCheckpointData.keySet).foreach { + if (newRdds.size > 0) { + (oldRdds -- newRdds.keySet).foreach { case (time, data) => { val path = new Path(data.toString) val fs = path.getFileSystem(new Configuration()) @@ -322,8 +331,8 @@ extends Serializable with Logging { * override the updateCheckpointData() method would also need to override this method. */ protected[streaming] def restoreCheckpointData() { - logInfo("Restoring checkpoint data from " + checkpointData.size + " checkpointed RDDs") - checkpointData.foreach { + logInfo("Restoring checkpoint data from " + checkpointData.rdds.size + " checkpointed RDDs") + checkpointData.rdds.foreach { case(time, data) => { logInfo("Restoring checkpointed RDD for time " + time + " from file") generatedRDDs += ((time, ssc.sc.objectFile[T](data.toString))) diff --git a/streaming/src/main/scala/spark/streaming/DataHandler.scala b/streaming/src/main/scala/spark/streaming/DataHandler.scala new file mode 100644 index 0000000000..05f307a8d1 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/DataHandler.scala @@ -0,0 +1,83 @@ +package spark.streaming + +import java.util.concurrent.ArrayBlockingQueue +import scala.collection.mutable.ArrayBuffer +import spark.Logging +import spark.streaming.util.{RecurringTimer, SystemClock} +import spark.storage.StorageLevel + + +/** + * This is a helper object that manages the data received from the socket. It divides + * the object received into small batches of 100s of milliseconds, pushes them as + * blocks into the block manager and reports the block IDs to the network input + * tracker. It starts two threads, one to periodically start a new batch and prepare + * the previous batch of as a block, the other to push the blocks into the block + * manager. + */ + class DataHandler[T](receiver: NetworkReceiver[T], storageLevel: StorageLevel) + extends Serializable with Logging { + + case class Block(id: String, iterator: Iterator[T], metadata: Any = null) + + val clock = new SystemClock() + val blockInterval = 200L + val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) + val blockStorageLevel = storageLevel + val blocksForPushing = new ArrayBlockingQueue[Block](1000) + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + var currentBuffer = new ArrayBuffer[T] + + def createBlock(blockId: String, iterator: Iterator[T]) : Block = { + new Block(blockId, iterator) + } + + def start() { + blockIntervalTimer.start() + blockPushingThread.start() + logInfo("Data handler started") + } + + def stop() { + blockIntervalTimer.stop() + blockPushingThread.interrupt() + logInfo("Data handler stopped") + } + + def += (obj: T) { + currentBuffer += obj + } + + def updateCurrentBuffer(time: Long) { + try { + val newBlockBuffer = currentBuffer + currentBuffer = new ArrayBuffer[T] + if (newBlockBuffer.size > 0) { + val blockId = "input-" + receiver.streamId + "- " + (time - blockInterval) + val newBlock = createBlock(blockId, newBlockBuffer.toIterator) + blocksForPushing.add(newBlock) + } + } catch { + case ie: InterruptedException => + logInfo("Block interval timer thread interrupted") + case e: Exception => + receiver.stop() + } + } + + def keepPushingBlocks() { + logInfo("Block pushing thread started") + try { + while(true) { + val block = blocksForPushing.take() + receiver.pushBlock(block.id, block.iterator, block.metadata, storageLevel) + } + } catch { + case ie: InterruptedException => + logInfo("Block pushing thread interrupted") + case e: Exception => + receiver.stop() + } + } + } \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index f3f4c3ab13..d3f37b8b0e 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -4,9 +4,11 @@ import scala.collection.mutable.ArrayBuffer import spark.{Logging, SparkEnv, RDD} import spark.rdd.BlockRDD +import spark.streaming.util.{RecurringTimer, SystemClock} import spark.storage.StorageLevel import java.nio.ByteBuffer +import java.util.concurrent.ArrayBlockingQueue import akka.actor.{Props, Actor} import akka.pattern.ask @@ -41,10 +43,10 @@ abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : Streaming sealed trait NetworkReceiverMessage case class StopReceiver(msg: String) extends NetworkReceiverMessage -case class ReportBlock(blockId: String) extends NetworkReceiverMessage +case class ReportBlock(blockId: String, metadata: Any) extends NetworkReceiverMessage case class ReportError(msg: String) extends NetworkReceiverMessage -abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializable with Logging { +abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Serializable with Logging { initLogging() @@ -106,21 +108,23 @@ abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializ actor ! ReportError(e.toString) } + /** * This method pushes a block (as iterator of values) into the block manager. */ - protected def pushBlock(blockId: String, iterator: Iterator[T], level: StorageLevel) { + def pushBlock(blockId: String, iterator: Iterator[T], metadata: Any, level: StorageLevel) { val buffer = new ArrayBuffer[T] ++ iterator env.blockManager.put(blockId, buffer.asInstanceOf[ArrayBuffer[Any]], level) - actor ! ReportBlock(blockId) + + actor ! ReportBlock(blockId, metadata) } /** * This method pushes a block (as bytes) into the block manager. */ - protected def pushBlock(blockId: String, bytes: ByteBuffer, level: StorageLevel) { + def pushBlock(blockId: String, bytes: ByteBuffer, metadata: Any, level: StorageLevel) { env.blockManager.putBytes(blockId, bytes, level) - actor ! ReportBlock(blockId) + actor ! ReportBlock(blockId, metadata) } /** A helper actor that communicates with the NetworkInputTracker */ @@ -138,8 +142,8 @@ abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializ } override def receive() = { - case ReportBlock(blockId) => - tracker ! AddBlocks(streamId, Array(blockId)) + case ReportBlock(blockId, metadata) => + tracker ! AddBlocks(streamId, Array(blockId), metadata) case ReportError(msg) => tracker ! DeregisterReceiver(streamId, msg) case StopReceiver(msg) => @@ -147,5 +151,6 @@ abstract class NetworkReceiver[T: ClassManifest](streamId: Int) extends Serializ tracker ! DeregisterReceiver(streamId, msg) } } + } diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 07ef79415d..4d9346edd8 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -13,7 +13,7 @@ import akka.dispatch._ trait NetworkInputTrackerMessage case class RegisterReceiver(streamId: Int, receiverActor: ActorRef) extends NetworkInputTrackerMessage -case class AddBlocks(streamId: Int, blockIds: Seq[String]) extends NetworkInputTrackerMessage +case class AddBlocks(streamId: Int, blockIds: Seq[String], metadata: Any) extends NetworkInputTrackerMessage case class DeregisterReceiver(streamId: Int, msg: String) extends NetworkInputTrackerMessage @@ -22,7 +22,7 @@ class NetworkInputTracker( @transient networkInputStreams: Array[NetworkInputDStream[_]]) extends Logging { - val networkInputStreamIds = networkInputStreams.map(_.id).toArray + val networkInputStreamMap = Map(networkInputStreams.map(x => (x.id, x)): _*) val receiverExecutor = new ReceiverExecutor() val receiverInfo = new HashMap[Int, ActorRef] val receivedBlockIds = new HashMap[Int, Queue[String]] @@ -53,14 +53,14 @@ class NetworkInputTracker( private class NetworkInputTrackerActor extends Actor { def receive = { case RegisterReceiver(streamId, receiverActor) => { - if (!networkInputStreamIds.contains(streamId)) { + if (!networkInputStreamMap.contains(streamId)) { throw new Exception("Register received for unexpected id " + streamId) } receiverInfo += ((streamId, receiverActor)) logInfo("Registered receiver for network stream " + streamId) sender ! true } - case AddBlocks(streamId, blockIds) => { + case AddBlocks(streamId, blockIds, metadata) => { val tmp = receivedBlockIds.synchronized { if (!receivedBlockIds.contains(streamId)) { receivedBlockIds += ((streamId, new Queue[String])) @@ -70,6 +70,7 @@ class NetworkInputTracker( tmp.synchronized { tmp ++= blockIds } + networkInputStreamMap(streamId).addMetadata(metadata) } case DeregisterReceiver(streamId, msg) => { receiverInfo -= streamId diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index e022b85fbe..90d8528d5b 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -48,7 +48,7 @@ class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: S val buffer = queue.take() val blockId = "input-" + streamId + "-" + nextBlockNumber nextBlockNumber += 1 - pushBlock(blockId, buffer, storageLevel) + pushBlock(blockId, buffer, null, storageLevel) } } } diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index b566200273..ff99d50b76 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -32,7 +32,7 @@ class SocketReceiver[T: ClassManifest]( storageLevel: StorageLevel ) extends NetworkReceiver[T](streamId) { - lazy protected val dataHandler = new DataHandler(this) + lazy protected val dataHandler = new DataHandler(this, storageLevel) protected def onStart() { logInfo("Connecting to " + host + ":" + port) @@ -50,74 +50,6 @@ class SocketReceiver[T: ClassManifest]( dataHandler.stop() } - /** - * This is a helper object that manages the data received from the socket. It divides - * the object received into small batches of 100s of milliseconds, pushes them as - * blocks into the block manager and reports the block IDs to the network input - * tracker. It starts two threads, one to periodically start a new batch and prepare - * the previous batch of as a block, the other to push the blocks into the block - * manager. - */ - class DataHandler(receiver: NetworkReceiver[T]) extends Serializable { - case class Block(id: String, iterator: Iterator[T]) - - val clock = new SystemClock() - val blockInterval = 200L - val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) - val blockStorageLevel = storageLevel - val blocksForPushing = new ArrayBlockingQueue[Block](1000) - val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } - - var currentBuffer = new ArrayBuffer[T] - - def start() { - blockIntervalTimer.start() - blockPushingThread.start() - logInfo("Data handler started") - } - - def stop() { - blockIntervalTimer.stop() - blockPushingThread.interrupt() - logInfo("Data handler stopped") - } - - def += (obj: T) { - currentBuffer += obj - } - - def updateCurrentBuffer(time: Long) { - try { - val newBlockBuffer = currentBuffer - currentBuffer = new ArrayBuffer[T] - if (newBlockBuffer.size > 0) { - val blockId = "input-" + streamId + "- " + (time - blockInterval) - val newBlock = new Block(blockId, newBlockBuffer.toIterator) - blocksForPushing.add(newBlock) - } - } catch { - case ie: InterruptedException => - logInfo("Block interval timer thread interrupted") - case e: Exception => - receiver.stop() - } - } - - def keepPushingBlocks() { - logInfo("Block pushing thread started") - try { - while(true) { - val block = blocksForPushing.take() - pushBlock(block.id, block.iterator, storageLevel) - } - } catch { - case ie: InterruptedException => - logInfo("Block pushing thread interrupted") - case e: Exception => - receiver.stop() - } - } - } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 05c83d6c08..770fd61498 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -106,9 +106,11 @@ class StreamingContext ( hostname: String, port: Int, groupId: String, + topics: Map[String, Int], + initialOffsets: Map[KafkaPartitionKey, Long] = Map[KafkaPartitionKey, Long](), storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_SER_2 ): DStream[T] = { - val inputStream = new KafkaInputDStream[T](this, hostname, port, groupId, storageLevel) + val inputStream = new KafkaInputDStream[T](this, hostname, port, groupId, topics, initialOffsets, storageLevel) graph.addInputStream(inputStream) inputStream } diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala index 3f637150d1..655f9627b3 100644 --- a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -1,6 +1,6 @@ package spark.streaming.examples -import spark.streaming.{Seconds, StreamingContext, KafkaInputDStream} +import spark.streaming._ import spark.streaming.StreamingContext._ import spark.storage.StorageLevel @@ -17,11 +17,20 @@ object KafkaWordCount { // Create a NetworkInputDStream on target ip:port and count the // words in input stream of \n delimited test (eg. generated by 'nc') - val lines = ssc.kafkaStream[String](args(1), args(2).toInt, "test_group") + ssc.checkpoint("checkpoint", Time(1000 * 5)) + val lines = ssc.kafkaStream[String](args(1), args(2).toInt, "test_group", Map("test" -> 1), + Map(KafkaPartitionKey(0, "test", "test_group", 0) -> 2382)) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() ssc.start() + // Wait for 12 seconds + Thread.sleep(12000) + ssc.stop() + + val newSsc = new StreamingContext("checkpoint") + newSsc.start() + } } diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index 427f398237..814f2706d6 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -1,121 +1,164 @@ package spark.streaming +import java.lang.reflect.Method import java.nio.ByteBuffer import java.util.Properties -import java.util.concurrent.{ArrayBlockingQueue, Executors} +import java.util.concurrent.{ArrayBlockingQueue, ConcurrentHashMap, Executors} import kafka.api.{FetchRequest} -import kafka.consumer.{Consumer, ConsumerConfig, KafkaStream} -import kafka.javaapi.consumer.SimpleConsumer -import kafka.javaapi.message.ByteBufferMessageSet +import kafka.consumer._ +import kafka.cluster.Partition import kafka.message.{Message, MessageSet, MessageAndMetadata} -import kafka.utils.Utils +import kafka.serializer.StringDecoder +import kafka.utils.{Pool, Utils, ZKGroupTopicDirs} +import kafka.utils.ZkUtils._ +import scala.collection.mutable.HashMap import scala.collection.JavaConversions._ import spark._ import spark.RDD import spark.storage.StorageLevel +case class KafkaPartitionKey(brokerId: Int, topic: String, groupId: String, partId: Int) +case class KafkaInputDStreamMetadata(timestamp: Long, data: Map[KafkaPartitionKey, Long]) +case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], + savedOffsets: HashMap[Long, Map[KafkaPartitionKey, Long]]) extends DStreamCheckpointData(kafkaRdds) + /** - * An input stream that pulls messages form a Kafka Broker. + * Input stream that pulls messages form a Kafka Broker. */ class KafkaInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, port: Int, groupId: String, - storageLevel: StorageLevel, - timeout: Int = 10000, - bufferSize: Int = 1024000 + topics: Map[String, Int], + initialOffsets: Map[KafkaPartitionKey, Long], + storageLevel: StorageLevel ) extends NetworkInputDStream[T](ssc_ ) with Logging { + var savedOffsets = HashMap[Long, Map[KafkaPartitionKey, Long]]() + + override protected[streaming] def addMetadata(metadata: Any) { + metadata match { + case x : KafkaInputDStreamMetadata => + savedOffsets(x.timestamp) = x.data + logInfo("Saved Offsets: " + savedOffsets) + case _ => logInfo("Received unknown metadata: " + metadata.toString) + } + } + + override protected[streaming] def updateCheckpointData(currentTime: Time) { + super.updateCheckpointData(currentTime) + logInfo("Updating KafkaDStream checkpoint data: " + savedOffsets.toString) + checkpointData = KafkaDStreamCheckpointData(checkpointData.rdds, savedOffsets) + } + + override protected[streaming] def restoreCheckpointData() { + super.restoreCheckpointData() + logInfo("Restoring KafkaDStream checkpoint data.") + checkpointData match { + case x : KafkaDStreamCheckpointData => + savedOffsets = x.savedOffsets + logInfo("Restored KafkaDStream offsets: " + savedOffsets.toString) + } + } + def createReceiver(): NetworkReceiver[T] = { - new KafkaReceiver(id, host, port, storageLevel, groupId, timeout).asInstanceOf[NetworkReceiver[T]] + new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) + .asInstanceOf[NetworkReceiver[T]] } } -class KafkaReceiver(streamId: Int, host: String, port: Int, storageLevel: StorageLevel, groupId: String, timeout: Int) - extends NetworkReceiver[Any](streamId) { +class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, + topics: Map[String, Int], initialOffsets: Map[KafkaPartitionKey, Long], + storageLevel: StorageLevel) extends NetworkReceiver[Any](streamId) { - //var executorPool : = null - var blockPushingThread : Thread = null + // Timeout for establishing a connection to Zookeper in ms. + val ZK_TIMEOUT = 10000 + + // Handles pushing data into the BlockManager + lazy protected val dataHandler = new KafkaDataHandler(this, storageLevel) + // Keeps track of the current offsets. Maps from (topic, partitionID) -> Offset + lazy val offsets = HashMap[KafkaPartitionKey, Long]() + // Connection to Kafka + var consumerConnector : ZookeeperConsumerConnector = null def onStop() { - blockPushingThread.interrupt() + dataHandler.stop() } def onStart() { - val executorPool = Executors.newFixedThreadPool(2) + // Starting the DataHandler that buffers blocks and pushes them into them BlockManager + dataHandler.start() - logInfo("Starting Kafka Consumer with groupId " + groupId) + // In case we are using multiple Threads to handle Kafka Messages + val executorPool = Executors.newFixedThreadPool(topics.values.reduce(_ + _)) val zooKeeperEndPoint = host + ":" + port + logInfo("Starting Kafka Consumer Stream in group " + groupId) + logInfo("Initial offsets: " + initialOffsets.toString) logInfo("Connecting to " + zooKeeperEndPoint) - - // Specify some consumer properties + // Specify some Consumer properties val props = new Properties() props.put("zk.connect", zooKeeperEndPoint) - props.put("zk.connectiontimeout.ms", timeout.toString) + props.put("zk.connectiontimeout.ms", ZK_TIMEOUT.toString) props.put("groupid", groupId) // Create the connection to the cluster val consumerConfig = new ConsumerConfig(props) - val consumerConnector = Consumer.create(consumerConfig) - logInfo("Connected to " + zooKeeperEndPoint) - logInfo("") - logInfo("") + consumerConnector = Consumer.create(consumerConfig).asInstanceOf[ZookeeperConsumerConnector] - // Specify which topics we are listening to - val topicCountMap = Map("test" -> 2) - val topicMessageStreams = consumerConnector.createMessageStreams(topicCountMap) - val streams = topicMessageStreams.get("test") + // Reset the Kafka offsets in case we are recovering from a failure + resetOffsets(initialOffsets) - // Queue that holds the blocks - val queue = new ArrayBlockingQueue[ByteBuffer](2) + logInfo("Connected to " + zooKeeperEndPoint) - streams.getOrElse(Nil).foreach { stream => - executorPool.submit(new MessageHandler(stream, queue)) + // Create Threads for each Topic/Message Stream we are listening + val topicMessageStreams = consumerConnector.createMessageStreams(topics, new StringDecoder()) + + topicMessageStreams.values.foreach { streams => + streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) } } - blockPushingThread = new DaemonThread { - override def run() { - logInfo("Starting BlockPushingThread.") - var nextBlockNumber = 0 - while (true) { - val buffer = queue.take() - val blockId = "input-" + streamId + "-" + nextBlockNumber - nextBlockNumber += 1 - pushBlock(blockId, buffer, storageLevel) - } - } - } - blockPushingThread.start() - - // while (true) { - // // Create a fetch request for topic “test”, partition 0, current offset, and fetch size of 1MB - // val fetchRequest = new FetchRequest("test", 0, offset, 1000000) - - // // get the message set from the consumer and print them out - // val messages = consumer.fetch(fetchRequest) - // for(msg <- messages.iterator) { - // logInfo("consumed: " + Utils.toString(msg.message.payload, "UTF-8")) - // // advance the offset after consuming each message - // offset = msg.offset - // queue.put(msg.message.payload) - // } - // } } - class MessageHandler(stream: KafkaStream[Message], queue: ArrayBlockingQueue[ByteBuffer]) extends Runnable { + // Overwrites the offets in Zookeper. + private def resetOffsets(offsets: Map[KafkaPartitionKey, Long]) { + offsets.foreach { case(key, offset) => + val topicDirs = new ZKGroupTopicDirs(key.groupId, key.topic) + val partitionName = key.brokerId + "-" + key.partId + updatePersistentPath(consumerConnector.zkClient, + topicDirs.consumerOffsetDir + "/" + partitionName, offset.toString) + } + } + + // Responsible for handling Kafka Messages + class MessageHandler(stream: KafkaStream[String]) extends Runnable { def run() { logInfo("Starting MessageHandler.") - while(true) { - stream.foreach { msgAndMetadata => - logInfo("Consumed: " + Utils.toString(msgAndMetadata.message.payload, "UTF-8")) - queue.put(msgAndMetadata.message.payload) - } - } + stream.takeWhile { msgAndMetadata => + dataHandler += msgAndMetadata.message + + // Updating the offet. The key is (topic, partitionID). + val key = KafkaPartitionKey(msgAndMetadata.topicInfo.brokerId, msgAndMetadata.topic, + groupId, msgAndMetadata.topicInfo.partition.partId) + val offset = msgAndMetadata.topicInfo.getConsumeOffset + offsets.put(key, offset) + logInfo((key, offset).toString) + + // Keep on handling messages + true + } } } + class KafkaDataHandler(receiver: KafkaReceiver, storageLevel: StorageLevel) + extends DataHandler[Any](receiver, storageLevel) { + + override def createBlock(blockId: String, iterator: Iterator[Any]) : Block = { + new Block(blockId, iterator, KafkaInputDStreamMetadata(System.currentTimeMillis, offsets.toMap)) + } + + } } From 355c8e4b17cc3e67b1e18cc24e74d88416b5779b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Fri, 9 Nov 2012 16:28:45 -0800 Subject: [PATCH 104/226] Fixed deadlock in BlockManager. --- .../scala/spark/storage/BlockManager.scala | 111 ++++++++---------- .../scala/spark/storage/MemoryStore.scala | 79 ++++++++----- 2 files changed, 101 insertions(+), 89 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index bd9155ef29..8c7b1417be 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -50,16 +50,6 @@ private[spark] case class BlockException(blockId: String, message: String, ex: Exception = null) extends Exception(message) - -private[spark] class BlockLocker(numLockers: Int) { - private val hashLocker = Array.fill(numLockers)(new Object()) - - def getLock(blockId: String): Object = { - return hashLocker(math.abs(blockId.hashCode % numLockers)) - } -} - - private[spark] class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, maxMemory: Long) extends Logging { @@ -87,10 +77,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - private val NUM_LOCKS = 337 - private val locker = new BlockLocker(NUM_LOCKS) - - private val blockInfo = new ConcurrentHashMap[String, BlockInfo]() + private val blockInfo = new ConcurrentHashMap[String, BlockInfo](1000) private[storage] val memoryStore: BlockStore = new MemoryStore(this, maxMemory) private[storage] val diskStore: BlockStore = @@ -110,7 +97,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m val maxBytesInFlight = System.getProperty("spark.reducer.maxMbInFlight", "48").toLong * 1024 * 1024 + // Whether to compress broadcast variables that are stored val compressBroadcast = System.getProperty("spark.broadcast.compress", "true").toBoolean + // Whether to compress shuffle output that are stored val compressShuffle = System.getProperty("spark.shuffle.compress", "true").toBoolean // Whether to compress RDD partitions that are stored serialized val compressRdds = System.getProperty("spark.rdd.compress", "false").toBoolean @@ -150,28 +139,27 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m * For example, a block with MEMORY_AND_DISK set might have fallen out to be only on disk. */ def reportBlockStatus(blockId: String) { - locker.getLock(blockId).synchronized { - val curLevel = blockInfo.get(blockId) match { - case null => - StorageLevel.NONE - case info => + val (curLevel, inMemSize, onDiskSize) = blockInfo.get(blockId) match { + case null => + (StorageLevel.NONE, 0L, 0L) + case info => + info.synchronized { info.level match { case null => - StorageLevel.NONE + (StorageLevel.NONE, 0L, 0L) case level => val inMem = level.useMemory && memoryStore.contains(blockId) val onDisk = level.useDisk && diskStore.contains(blockId) - new StorageLevel(onDisk, inMem, level.deserialized, level.replication) + ( + new StorageLevel(onDisk, inMem, level.deserialized, level.replication), + if (inMem) memoryStore.getSize(blockId) else 0L, + if (onDisk) diskStore.getSize(blockId) else 0L + ) } - } - master.mustHeartBeat(HeartBeat( - blockManagerId, - blockId, - curLevel, - if (curLevel.useMemory) memoryStore.getSize(blockId) else 0L, - if (curLevel.useDisk) diskStore.getSize(blockId) else 0L)) - logDebug("Told master about block " + blockId) + } } + master.mustHeartBeat(HeartBeat(blockManagerId, blockId, curLevel, inMemSize, onDiskSize)) + logDebug("Told master about block " + blockId) } /** @@ -213,9 +201,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - locker.getLock(blockId).synchronized { - val info = blockInfo.get(blockId) - if (info != null) { + val info = blockInfo.get(blockId) + if (info != null) { + info.synchronized { info.waitForReady() // In case the block is still being put() by another thread val level = info.level logDebug("Level for block " + blockId + " is " + level) @@ -273,9 +261,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } } - } else { - logDebug("Block " + blockId + " not registered locally") } + } else { + logDebug("Block " + blockId + " not registered locally") } return None } @@ -298,9 +286,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - locker.getLock(blockId).synchronized { - val info = blockInfo.get(blockId) - if (info != null) { + val info = blockInfo.get(blockId) + if (info != null) { + info.synchronized { info.waitForReady() // In case the block is still being put() by another thread val level = info.level logDebug("Level for block " + blockId + " is " + level) @@ -338,10 +326,11 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m throw new Exception("Block " + blockId + " not found on disk, though it should be") } } - } else { - logDebug("Block " + blockId + " not registered locally") } + } else { + logDebug("Block " + blockId + " not registered locally") } + return None } @@ -583,7 +572,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m // Size of the block in bytes (to return to caller) var size = 0L - locker.getLock(blockId).synchronized { + myInfo.synchronized { logDebug("Put for block " + blockId + " took " + Utils.getUsedTimeMs(startTimeMs) + " to get into synchronized block") @@ -681,7 +670,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m null } - locker.getLock(blockId).synchronized { + myInfo.synchronized { logDebug("PutBytes for block " + blockId + " took " + Utils.getUsedTimeMs(startTimeMs) + " to get into synchronized block") @@ -779,26 +768,30 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m */ def dropFromMemory(blockId: String, data: Either[ArrayBuffer[Any], ByteBuffer]) { logInfo("Dropping block " + blockId + " from memory") - locker.getLock(blockId).synchronized { - val info = blockInfo.get(blockId) - val level = info.level - if (level.useDisk && !diskStore.contains(blockId)) { - logInfo("Writing block " + blockId + " to disk") - data match { - case Left(elements) => - diskStore.putValues(blockId, elements, level, false) - case Right(bytes) => - diskStore.putBytes(blockId, bytes, level) + val info = blockInfo.get(blockId) + if (info != null) { + info.synchronized { + val level = info.level + if (level.useDisk && !diskStore.contains(blockId)) { + logInfo("Writing block " + blockId + " to disk") + data match { + case Left(elements) => + diskStore.putValues(blockId, elements, level, false) + case Right(bytes) => + diskStore.putBytes(blockId, bytes, level) + } + } + memoryStore.remove(blockId) + if (info.tellMaster) { + reportBlockStatus(blockId) + } + if (!level.useDisk) { + // The block is completely gone from this node; forget it so we can put() it again later. + blockInfo.remove(blockId) } } - memoryStore.remove(blockId) - if (info.tellMaster) { - reportBlockStatus(blockId) - } - if (!level.useDisk) { - // The block is completely gone from this node; forget it so we can put() it again later. - blockInfo.remove(blockId) - } + } else { + // The block has already been dropped } } diff --git a/core/src/main/scala/spark/storage/MemoryStore.scala b/core/src/main/scala/spark/storage/MemoryStore.scala index 773970446a..09769d1f7d 100644 --- a/core/src/main/scala/spark/storage/MemoryStore.scala +++ b/core/src/main/scala/spark/storage/MemoryStore.scala @@ -17,13 +17,16 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) private val entries = new LinkedHashMap[String, Entry](32, 0.75f, true) private var currentMemory = 0L + // Object used to ensure that only one thread is putting blocks and if necessary, dropping + // blocks from the memory store. + private val putLock = new Object() logInfo("MemoryStore started with capacity %s.".format(Utils.memoryBytesToString(maxMemory))) def freeMemory: Long = maxMemory - currentMemory override def getSize(blockId: String): Long = { - synchronized { + entries.synchronized { entries.get(blockId).size } } @@ -38,8 +41,6 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) tryToPut(blockId, elements, sizeEstimate, true) } else { val entry = new Entry(bytes, bytes.limit, false) - ensureFreeSpace(blockId, bytes.limit) - synchronized { entries.put(blockId, entry) } tryToPut(blockId, bytes, bytes.limit, false) } } @@ -63,7 +64,7 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) } override def getBytes(blockId: String): Option[ByteBuffer] = { - val entry = synchronized { + val entry = entries.synchronized { entries.get(blockId) } if (entry == null) { @@ -76,7 +77,7 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) } override def getValues(blockId: String): Option[Iterator[Any]] = { - val entry = synchronized { + val entry = entries.synchronized { entries.get(blockId) } if (entry == null) { @@ -90,7 +91,7 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) } override def remove(blockId: String) { - synchronized { + entries.synchronized { val entry = entries.get(blockId) if (entry != null) { entries.remove(blockId) @@ -104,7 +105,7 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) } override def clear() { - synchronized { + entries.synchronized { entries.clear() } logInfo("MemoryStore cleared") @@ -125,12 +126,22 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) * Try to put in a set of values, if we can free up enough space. The value should either be * an ArrayBuffer if deserialized is true or a ByteBuffer otherwise. Its (possibly estimated) * size must also be passed by the caller. + * + * Locks on the object putLock to ensure that all the put requests and its associated block + * dropping is done by only on thread at a time. Otherwise while one thread is dropping + * blocks to free memory for one block, another thread may use up the freed space for + * another block. */ private def tryToPut(blockId: String, value: Any, size: Long, deserialized: Boolean): Boolean = { - synchronized { + // TODO: Its possible to optimize the locking by locking entries only when selecting blocks + // to be dropped. Once the to-be-dropped blocks have been selected, and lock on entries has been + // released, it must be ensured that those to-be-dropped blocks are not double counted for + // freeing up more space for another block that needs to be put. Only then the actually dropping + // of blocks (and writing to disk if necessary) can proceed in parallel. + putLock.synchronized { if (ensureFreeSpace(blockId, size)) { val entry = new Entry(value, size, deserialized) - entries.put(blockId, entry) + entries.synchronized { entries.put(blockId, entry) } currentMemory += size if (deserialized) { logInfo("Block %s stored as values to memory (estimated size %s, free %s)".format( @@ -160,8 +171,8 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) * block from the same RDD (which leads to a wasteful cyclic replacement pattern for RDDs that * don't fit into memory that we want to avoid). * - * Assumes that a lock on the MemoryStore is held by the caller. (Otherwise, the freed space - * might fill up before the caller puts in their new value.) + * Assumes that a lock is held by the caller to ensure only one thread is dropping blocks. + * Otherwise, the freed space may fill up before the caller puts in their new value. */ private def ensureFreeSpace(blockIdToAdd: String, space: Long): Boolean = { logInfo("ensureFreeSpace(%d) called with curMem=%d, maxMem=%d".format( @@ -172,36 +183,44 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) return false } - // TODO: This should relinquish the lock on the MemoryStore while flushing out old blocks - // in order to allow parallelism in writing to disk if (maxMemory - currentMemory < space) { val rddToAdd = getRddId(blockIdToAdd) val selectedBlocks = new ArrayBuffer[String]() var selectedMemory = 0L - val iterator = entries.entrySet().iterator() - while (maxMemory - (currentMemory - selectedMemory) < space && iterator.hasNext) { - val pair = iterator.next() - val blockId = pair.getKey - if (rddToAdd != null && rddToAdd == getRddId(blockId)) { - logInfo("Will not store " + blockIdToAdd + " as it would require dropping another " + - "block from the same RDD") - return false + // This is synchronized to ensure that the set of entries is not changed + // (because of getValue or getBytes) while traversing the iterator, as that + // can lead to exceptions. + entries.synchronized { + val iterator = entries.entrySet().iterator() + while (maxMemory - (currentMemory - selectedMemory) < space && iterator.hasNext) { + val pair = iterator.next() + val blockId = pair.getKey + if (rddToAdd != null && rddToAdd == getRddId(blockId)) { + logInfo("Will not store " + blockIdToAdd + " as it would require dropping another " + + "block from the same RDD") + return false + } + selectedBlocks += blockId + selectedMemory += pair.getValue.size } - selectedBlocks += blockId - selectedMemory += pair.getValue.size } if (maxMemory - (currentMemory - selectedMemory) >= space) { logInfo(selectedBlocks.size + " blocks selected for dropping") for (blockId <- selectedBlocks) { - val entry = entries.get(blockId) - val data = if (entry.deserialized) { - Left(entry.value.asInstanceOf[ArrayBuffer[Any]]) - } else { - Right(entry.value.asInstanceOf[ByteBuffer].duplicate()) + val entry = entries.synchronized { entries.get(blockId) } + // This should never be null as only one thread should be dropping + // blocks and removing entries. However the check is still here for + // future safety. + if (entry != null) { + val data = if (entry.deserialized) { + Left(entry.value.asInstanceOf[ArrayBuffer[Any]]) + } else { + Right(entry.value.asInstanceOf[ByteBuffer].duplicate()) + } + blockManager.dropFromMemory(blockId, data) } - blockManager.dropFromMemory(blockId, data) } return true } else { @@ -212,7 +231,7 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long) } override def contains(blockId: String): Boolean = { - synchronized { entries.containsKey(blockId) } + entries.synchronized { entries.containsKey(blockId) } } } From 04e9e9d93c512f856116bc2c99c35dfb48b4adee Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 11 Nov 2012 08:54:21 -0800 Subject: [PATCH 105/226] Refactored BlockManagerMaster (not BlockManagerMasterActor) to simplify the code and fix live lock problem in unlimited attempts to contact the master. Also added testcases in the BlockManagerSuite to test BlockManagerMaster methods getPeers and getLocations. --- .../scala/spark/storage/BlockManager.scala | 14 +- .../spark/storage/BlockManagerMaster.scala | 319 +++++++----------- .../spark/storage/BlockManagerSuite.scala | 30 +- 3 files changed, 146 insertions(+), 217 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index 8c7b1417be..70d6d8369d 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -120,8 +120,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m * BlockManagerWorker actor. */ private def initialize() { - master.mustRegisterBlockManager( - RegisterBlockManager(blockManagerId, maxMemory)) + master.registerBlockManager(blockManagerId, maxMemory) BlockManagerWorker.startBlockManagerWorker(this) } @@ -158,7 +157,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } } - master.mustHeartBeat(HeartBeat(blockManagerId, blockId, curLevel, inMemSize, onDiskSize)) + master.updateBlockInfo(blockManagerId, blockId, curLevel, inMemSize, onDiskSize) logDebug("Told master about block " + blockId) } @@ -167,7 +166,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m */ def getLocations(blockId: String): Seq[String] = { val startTimeMs = System.currentTimeMillis - var managers = master.mustGetLocations(GetLocations(blockId)) + var managers = master.getLocations(blockId) val locations = managers.map(_.ip) logDebug("Get block locations in " + Utils.getUsedTimeMs(startTimeMs)) return locations @@ -178,8 +177,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m */ def getLocations(blockIds: Array[String]): Array[Seq[String]] = { val startTimeMs = System.currentTimeMillis - val locations = master.mustGetLocationsMultipleBlockIds( - GetLocationsMultipleBlockIds(blockIds)).map(_.map(_.ip).toSeq).toArray + val locations = master.getLocations(blockIds).map(_.map(_.ip).toSeq).toArray logDebug("Get multiple block location in " + Utils.getUsedTimeMs(startTimeMs)) return locations } @@ -343,7 +341,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } logDebug("Getting remote block " + blockId) // Get locations of block - val locations = master.mustGetLocations(GetLocations(blockId)) + val locations = master.getLocations(blockId) // Get block from remote locations for (loc <- locations) { @@ -721,7 +719,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m val tLevel: StorageLevel = new StorageLevel(level.useDisk, level.useMemory, level.deserialized, 1) if (cachedPeers == null) { - cachedPeers = master.mustGetPeers(GetPeers(blockManagerId, level.replication - 1)) + cachedPeers = master.getPeers(blockManagerId, level.replication - 1) } for (peer: BlockManagerId <- cachedPeers) { val start = System.nanoTime diff --git a/core/src/main/scala/spark/storage/BlockManagerMaster.scala b/core/src/main/scala/spark/storage/BlockManagerMaster.scala index b3345623b3..4d5ee8318c 100644 --- a/core/src/main/scala/spark/storage/BlockManagerMaster.scala +++ b/core/src/main/scala/spark/storage/BlockManagerMaster.scala @@ -26,7 +26,7 @@ case class RegisterBlockManager( extends ToBlockManagerMaster private[spark] -class HeartBeat( +class UpdateBlockInfo( var blockManagerId: BlockManagerId, var blockId: String, var storageLevel: StorageLevel, @@ -57,17 +57,17 @@ class HeartBeat( } private[spark] -object HeartBeat { +object UpdateBlockInfo { def apply(blockManagerId: BlockManagerId, blockId: String, storageLevel: StorageLevel, memSize: Long, - diskSize: Long): HeartBeat = { - new HeartBeat(blockManagerId, blockId, storageLevel, memSize, diskSize) + diskSize: Long): UpdateBlockInfo = { + new UpdateBlockInfo(blockManagerId, blockId, storageLevel, memSize, diskSize) } // For pattern-matching - def unapply(h: HeartBeat): Option[(BlockManagerId, String, StorageLevel, Long, Long)] = { + def unapply(h: UpdateBlockInfo): Option[(BlockManagerId, String, StorageLevel, Long, Long)] = { Some((h.blockManagerId, h.blockId, h.storageLevel, h.memSize, h.diskSize)) } } @@ -182,8 +182,8 @@ private[spark] class BlockManagerMasterActor(val isLocal: Boolean) extends Actor case RegisterBlockManager(blockManagerId, maxMemSize) => register(blockManagerId, maxMemSize) - case HeartBeat(blockManagerId, blockId, storageLevel, deserializedSize, size) => - heartBeat(blockManagerId, blockId, storageLevel, deserializedSize, size) + case UpdateBlockInfo(blockManagerId, blockId, storageLevel, deserializedSize, size) => + updateBlockInfo(blockManagerId, blockId, storageLevel, deserializedSize, size) case GetLocations(blockId) => getLocations(blockId) @@ -233,7 +233,7 @@ private[spark] class BlockManagerMasterActor(val isLocal: Boolean) extends Actor sender ! true } - private def heartBeat( + private def updateBlockInfo( blockManagerId: BlockManagerId, blockId: String, storageLevel: StorageLevel, @@ -245,7 +245,7 @@ private[spark] class BlockManagerMasterActor(val isLocal: Boolean) extends Actor if (blockId == null) { blockManagerInfo(blockManagerId).updateLastSeenMs() - logDebug("Got in heartBeat 1" + tmp + " used " + Utils.getUsedTimeMs(startTimeMs)) + logDebug("Got in updateBlockInfo 1" + tmp + " used " + Utils.getUsedTimeMs(startTimeMs)) sender ! true } @@ -350,211 +350,124 @@ private[spark] class BlockManagerMasterActor(val isLocal: Boolean) extends Actor private[spark] class BlockManagerMaster(actorSystem: ActorSystem, isMaster: Boolean, isLocal: Boolean) extends Logging { - val AKKA_ACTOR_NAME: String = "BlockMasterManager" - val REQUEST_RETRY_INTERVAL_MS = 100 - val DEFAULT_MASTER_IP: String = System.getProperty("spark.master.host", "localhost") - val DEFAULT_MASTER_PORT: Int = System.getProperty("spark.master.port", "7077").toInt - val DEFAULT_MANAGER_IP: String = Utils.localHostName() - val DEFAULT_MANAGER_PORT: String = "10902" - + val actorName = "BlockMasterManager" val timeout = 10.seconds - var masterActor: ActorRef = null + val maxAttempts = 5 - if (isMaster) { - masterActor = actorSystem.actorOf( - Props(new BlockManagerMasterActor(isLocal)), name = AKKA_ACTOR_NAME) + var masterActor = if (isMaster) { + val actor = actorSystem.actorOf(Props(new BlockManagerMasterActor(isLocal)), name = actorName) logInfo("Registered BlockManagerMaster Actor") + actor } else { - val url = "akka://spark@%s:%s/user/%s".format( - DEFAULT_MASTER_IP, DEFAULT_MASTER_PORT, AKKA_ACTOR_NAME) + val host = System.getProperty("spark.master.host", "localhost") + val port = System.getProperty("spark.master.port", "7077").toInt + val url = "akka://spark@%s:%s/user/%s".format(host, port, actorName) + val actor = actorSystem.actorFor(url) logInfo("Connecting to BlockManagerMaster: " + url) - masterActor = actorSystem.actorFor(url) + actor } + /** + * Send a message to the master actor and get its result within a default timeout, or + * throw a SparkException if this fails. + */ + private def ask[T](message: Any): T = { + // TODO: Consider removing multiple attempts + if (masterActor == null) { + throw new SparkException("Error sending message to BlockManager as masterActor is null " + + "[message = " + message + "]") + } + var attempts = 0 + var lastException: Exception = null + while (attempts < maxAttempts) { + attempts += 1 + try { + val future = masterActor.ask(message)(timeout) + val result = Await.result(future, timeout) + if (result == null) { + throw new Exception("BlockManagerMaster returned null") + } + return result.asInstanceOf[T] + } catch { + case ie: InterruptedException => + throw ie + case e: Exception => + lastException = e + logWarning( + "Error sending message to BlockManagerMaster in " + attempts + " attempts", e) + } + Thread.sleep(100) + } + throw new SparkException( + "Error sending message to BlockManagerMaster [message = " + message + "]", lastException) + } + + /** + * Send a one-way message to the master actor, to which we expect it to reply with true + */ + private def tell(message: Any) { + if (!ask[Boolean](message)) { + throw new SparkException("Telling master a message returned false") + } + } + + /** + * Register the BlockManager's id with the master + */ + def registerBlockManager(blockManagerId: BlockManagerId, maxMemSize: Long) { + logInfo("Trying to register BlockManager") + tell(RegisterBlockManager(blockManagerId, maxMemSize)) + logInfo("Registered BlockManager") + } + + def updateBlockInfo( + blockManagerId: BlockManagerId, + blockId: String, + storageLevel: StorageLevel, + memSize: Long, + diskSize: Long + ) { + tell(UpdateBlockInfo(blockManagerId, blockId, storageLevel, memSize, diskSize)) + logInfo("Updated info of block " + blockId) + } + + /** Get locations of the blockId from the master */ + def getLocations(blockId: String): Seq[BlockManagerId] = { + ask[Seq[BlockManagerId]](GetLocations(blockId)) + } + + /** Get locations of multiple blockIds from the master */ + def getLocations(blockIds: Array[String]): Seq[Seq[BlockManagerId]] = { + ask[Seq[Seq[BlockManagerId]]](GetLocationsMultipleBlockIds(blockIds)) + } + + /** Get ids of other nodes in the cluster from the master */ + def getPeers(blockManagerId: BlockManagerId, numPeers: Int): Seq[BlockManagerId] = { + val result = ask[Seq[BlockManagerId]](GetPeers(blockManagerId, numPeers)) + if (result.length != numPeers) { + throw new SparkException( + "Error getting peers, only got " + result.size + " instead of " + numPeers) + } + result + } + + /** Notify the master of a dead node */ + def notifyADeadHost(host: String) { + tell(RemoveHost(host + ":10902")) + logInfo("Told BlockManagerMaster to remove dead host " + host) + } + + /** Get the memory status form the master */ + def getMemoryStatus(): Map[BlockManagerId, (Long, Long)] = { + ask[Map[BlockManagerId, (Long, Long)]](GetMemoryStatus) + } + + /** Stop the master actor, called only on the Spark master node */ def stop() { if (masterActor != null) { - communicate(StopBlockManagerMaster) + tell(StopBlockManagerMaster) masterActor = null logInfo("BlockManagerMaster stopped") } } - - // Send a message to the master actor and get its result within a default timeout, or - // throw a SparkException if this fails. - def askMaster(message: Any): Any = { - try { - val future = masterActor.ask(message)(timeout) - return Await.result(future, timeout) - } catch { - case e: Exception => - throw new SparkException("Error communicating with BlockManagerMaster", e) - } - } - - // Send a one-way message to the master actor, to which we expect it to reply with true. - def communicate(message: Any) { - if (askMaster(message) != true) { - throw new SparkException("Error reply received from BlockManagerMaster") - } - } - - def notifyADeadHost(host: String) { - communicate(RemoveHost(host + ":" + DEFAULT_MANAGER_PORT)) - logInfo("Removed " + host + " successfully in notifyADeadHost") - } - - def mustRegisterBlockManager(msg: RegisterBlockManager) { - logInfo("Trying to register BlockManager") - while (! syncRegisterBlockManager(msg)) { - logWarning("Failed to register " + msg) - Thread.sleep(REQUEST_RETRY_INTERVAL_MS) - } - logInfo("Done registering BlockManager") - } - - def syncRegisterBlockManager(msg: RegisterBlockManager): Boolean = { - //val masterActor = RemoteActor.select(node, name) - val startTimeMs = System.currentTimeMillis() - val tmp = " msg " + msg + " " - logDebug("Got in syncRegisterBlockManager 0 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - - try { - communicate(msg) - logInfo("BlockManager registered successfully @ syncRegisterBlockManager") - logDebug("Got in syncRegisterBlockManager 1 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - return true - } catch { - case e: Exception => - logError("Failed in syncRegisterBlockManager", e) - return false - } - } - - def mustHeartBeat(msg: HeartBeat) { - while (! syncHeartBeat(msg)) { - logWarning("Failed to send heartbeat" + msg) - Thread.sleep(REQUEST_RETRY_INTERVAL_MS) - } - } - - def syncHeartBeat(msg: HeartBeat): Boolean = { - val startTimeMs = System.currentTimeMillis() - val tmp = " msg " + msg + " " - logDebug("Got in syncHeartBeat " + tmp + " 0 " + Utils.getUsedTimeMs(startTimeMs)) - - try { - communicate(msg) - logDebug("Heartbeat sent successfully") - logDebug("Got in syncHeartBeat 1 " + tmp + " 1 " + Utils.getUsedTimeMs(startTimeMs)) - return true - } catch { - case e: Exception => - logError("Failed in syncHeartBeat", e) - return false - } - } - - def mustGetLocations(msg: GetLocations): Seq[BlockManagerId] = { - var res = syncGetLocations(msg) - while (res == null) { - logInfo("Failed to get locations " + msg) - Thread.sleep(REQUEST_RETRY_INTERVAL_MS) - res = syncGetLocations(msg) - } - return res - } - - def syncGetLocations(msg: GetLocations): Seq[BlockManagerId] = { - val startTimeMs = System.currentTimeMillis() - val tmp = " msg " + msg + " " - logDebug("Got in syncGetLocations 0 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - - try { - val answer = askMaster(msg).asInstanceOf[ArrayBuffer[BlockManagerId]] - if (answer != null) { - logDebug("GetLocations successful") - logDebug("Got in syncGetLocations 1 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - return answer - } else { - logError("Master replied null in response to GetLocations") - return null - } - } catch { - case e: Exception => - logError("GetLocations failed", e) - return null - } - } - - def mustGetLocationsMultipleBlockIds(msg: GetLocationsMultipleBlockIds): - Seq[Seq[BlockManagerId]] = { - var res: Seq[Seq[BlockManagerId]] = syncGetLocationsMultipleBlockIds(msg) - while (res == null) { - logWarning("Failed to GetLocationsMultipleBlockIds " + msg) - Thread.sleep(REQUEST_RETRY_INTERVAL_MS) - res = syncGetLocationsMultipleBlockIds(msg) - } - return res - } - - def syncGetLocationsMultipleBlockIds(msg: GetLocationsMultipleBlockIds): - Seq[Seq[BlockManagerId]] = { - val startTimeMs = System.currentTimeMillis - val tmp = " msg " + msg + " " - logDebug("Got in syncGetLocationsMultipleBlockIds 0 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - - try { - val answer = askMaster(msg).asInstanceOf[Seq[Seq[BlockManagerId]]] - if (answer != null) { - logDebug("GetLocationsMultipleBlockIds successful") - logDebug("Got in syncGetLocationsMultipleBlockIds 1 " + tmp + - Utils.getUsedTimeMs(startTimeMs)) - return answer - } else { - logError("Master replied null in response to GetLocationsMultipleBlockIds") - return null - } - } catch { - case e: Exception => - logError("GetLocationsMultipleBlockIds failed", e) - return null - } - } - - def mustGetPeers(msg: GetPeers): Seq[BlockManagerId] = { - var res = syncGetPeers(msg) - while ((res == null) || (res.length != msg.size)) { - logInfo("Failed to get peers " + msg) - Thread.sleep(REQUEST_RETRY_INTERVAL_MS) - res = syncGetPeers(msg) - } - - return res - } - - def syncGetPeers(msg: GetPeers): Seq[BlockManagerId] = { - val startTimeMs = System.currentTimeMillis - val tmp = " msg " + msg + " " - logDebug("Got in syncGetPeers 0 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - - try { - val answer = askMaster(msg).asInstanceOf[Seq[BlockManagerId]] - if (answer != null) { - logDebug("GetPeers successful") - logDebug("Got in syncGetPeers 1 " + tmp + Utils.getUsedTimeMs(startTimeMs)) - return answer - } else { - logError("Master replied null in response to GetPeers") - return null - } - } catch { - case e: Exception => - logError("GetPeers failed", e) - return null - } - } - - def getMemoryStatus: Map[BlockManagerId, (Long, Long)] = { - askMaster(GetMemoryStatus).asInstanceOf[Map[BlockManagerId, (Long, Long)]] - } } diff --git a/core/src/test/scala/spark/storage/BlockManagerSuite.scala b/core/src/test/scala/spark/storage/BlockManagerSuite.scala index b9c19e61cd..0e78228134 100644 --- a/core/src/test/scala/spark/storage/BlockManagerSuite.scala +++ b/core/src/test/scala/spark/storage/BlockManagerSuite.scala @@ -20,9 +20,11 @@ class BlockManagerSuite extends FunSuite with BeforeAndAfter with PrivateMethodT var oldOops: String = null // Reuse a serializer across tests to avoid creating a new thread-local buffer on each test + System.setProperty("spark.kryoserializer.buffer.mb", "1") val serializer = new KryoSerializer before { + actorSystem = ActorSystem("test") master = new BlockManagerMaster(actorSystem, true, true) @@ -55,7 +57,7 @@ class BlockManagerSuite extends FunSuite with BeforeAndAfter with PrivateMethodT } } - test("manager-master interaction") { + test("master + 1 manager interaction") { store = new BlockManager(master, serializer, 2000) val a1 = new Array[Byte](400) val a2 = new Array[Byte](400) @@ -72,17 +74,33 @@ class BlockManagerSuite extends FunSuite with BeforeAndAfter with PrivateMethodT assert(store.getSingle("a3") != None, "a3 was not in store") // Checking whether master knows about the blocks or not - assert(master.mustGetLocations(GetLocations("a1")).size > 0, "master was not told about a1") - assert(master.mustGetLocations(GetLocations("a2")).size > 0, "master was not told about a2") - assert(master.mustGetLocations(GetLocations("a3")).size === 0, "master was told about a3") + assert(master.getLocations("a1").size === 1, "master was not told about a1") + assert(master.getLocations("a2").size === 1, "master was not told about a2") + assert(master.getLocations("a3").size === 0, "master was told about a3") // Drop a1 and a2 from memory; this should be reported back to the master store.dropFromMemory("a1", null) store.dropFromMemory("a2", null) assert(store.getSingle("a1") === None, "a1 not removed from store") assert(store.getSingle("a2") === None, "a2 not removed from store") - assert(master.mustGetLocations(GetLocations("a1")).size === 0, "master did not remove a1") - assert(master.mustGetLocations(GetLocations("a2")).size === 0, "master did not remove a2") + assert(master.getLocations("a1").size === 0, "master did not remove a1") + assert(master.getLocations("a2").size === 0, "master did not remove a2") + } + + test("master + 2 managers interaction") { + store = new BlockManager(master, serializer, 2000) + val otherStore = new BlockManager(master, new KryoSerializer, 2000) + + val peers = master.getPeers(store.blockManagerId, 1) + assert(peers.size === 1, "master did not return the other manager as a peer") + assert(peers.head === otherStore.blockManagerId, "peer returned by master is not the other manager") + + val a1 = new Array[Byte](400) + val a2 = new Array[Byte](400) + store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_2) + otherStore.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_2) + assert(master.getLocations("a1").size === 2, "master did not report 2 locations for a1") + assert(master.getLocations("a2").size === 2, "master did not report 2 locations for a2") } test("in-memory LRU storage") { From d006109e9504b3221de3a15f9bfee96dafa8b593 Mon Sep 17 00:00:00 2001 From: Denny Date: Sun, 11 Nov 2012 11:06:49 -0800 Subject: [PATCH 106/226] Kafka Stream comments. --- .../main/scala/spark/streaming/DStream.scala | 7 +- .../spark/streaming/StreamingContext.scala | 12 +++ .../streaming/examples/KafkaWordCount.scala | 44 ++++++----- .../streaming/input/KafkaInputDStream.scala | 77 +++++++++++++------ .../spark/streaming/CheckpointSuite.scala | 12 +-- 5 files changed, 99 insertions(+), 53 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 3219919a24..b8324d11a3 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -17,6 +17,7 @@ import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration + case class DStreamCheckpointData(rdds: HashMap[Time, Any]) abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) @@ -61,7 +62,7 @@ extends Serializable with Logging { // Checkpoint details protected[streaming] val mustCheckpoint = false protected[streaming] var checkpointInterval: Time = null - protected[streaming] var checkpointData = DStreamCheckpointData(HashMap[Time, Any]()) + protected[streaming] var checkpointData = new DStreamCheckpointData(HashMap[Time, Any]()) // Reference to whole DStream graph protected[streaming] var graph: DStreamGraph = null @@ -286,7 +287,9 @@ extends Serializable with Logging { * This methd should be overwritten by sublcasses of InputDStream. */ protected[streaming] def addMetadata(metadata: Any) { - logInfo("Dropping Metadata: " + metadata.toString) + if (metadata != null) { + logInfo("Dropping Metadata: " + metadata.toString) + } } /** diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index d68d2632e7..e87d0cb7c8 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -102,6 +102,18 @@ final class StreamingContext ( private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() + /** + * Create an input stream that pulls messages form a Kafka Broker. + * + * @param host Zookeper hostname. + * @param port Zookeper port. + * @param groupId The group id for this consumer. + * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * in its own thread. + * @param initialOffsets Optional initial offsets for each of the partitions to consume. + * By default the value is pulled from zookeper. + * @param storageLevel RDD storage level. Defaults to memory-only. + */ def kafkaStream[T: ClassManifest]( hostname: String, port: Int, diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala index 655f9627b3..1e92cbb210 100644 --- a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -3,34 +3,38 @@ package spark.streaming.examples import spark.streaming._ import spark.streaming.StreamingContext._ import spark.storage.StorageLevel +import WordCount2_ExtraFunctions._ object KafkaWordCount { def main(args: Array[String]) { - if (args.length < 2) { - System.err.println("Usage: WordCountNetwork ") + + if (args.length < 4) { + System.err.println("Usage: KafkaWordCount ") System.exit(1) } - // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "WordCountNetwork") - ssc.setBatchDuration(Seconds(2)) + val ssc = args(3) match { + // Restore the stream from a checkpoint + case "true" => + new StreamingContext("work/checkpoint") + case _ => + val tmp = new StreamingContext(args(0), "KafkaWordCount") - // Create a NetworkInputDStream on target ip:port and count the - // words in input stream of \n delimited test (eg. generated by 'nc') - ssc.checkpoint("checkpoint", Time(1000 * 5)) - val lines = ssc.kafkaStream[String](args(1), args(2).toInt, "test_group", Map("test" -> 1), - Map(KafkaPartitionKey(0, "test", "test_group", 0) -> 2382)) - val words = lines.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) - wordCounts.print() + tmp.setBatchDuration(Seconds(2)) + tmp.checkpoint("work/checkpoint", Seconds(10)) + + val lines = tmp.kafkaStream[String](args(1), args(2).toInt, "test_group", Map("test" -> 1), + Map(KafkaPartitionKey(0,"test","test_group",0) -> 0l)) + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1l)).reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2) + + wordCounts.persist().checkpoint(Seconds(10)) + wordCounts.print() + + tmp + } ssc.start() - // Wait for 12 seconds - Thread.sleep(12000) - ssc.stop() - - val newSsc = new StreamingContext("checkpoint") - newSsc.start() - } } + diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index 814f2706d6..ad8e86a094 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -1,15 +1,11 @@ package spark.streaming -import java.lang.reflect.Method -import java.nio.ByteBuffer import java.util.Properties -import java.util.concurrent.{ArrayBlockingQueue, ConcurrentHashMap, Executors} -import kafka.api.{FetchRequest} +import java.util.concurrent.Executors import kafka.consumer._ -import kafka.cluster.Partition import kafka.message.{Message, MessageSet, MessageAndMetadata} import kafka.serializer.StringDecoder -import kafka.utils.{Pool, Utils, ZKGroupTopicDirs} +import kafka.utils.{Utils, ZKGroupTopicDirs} import kafka.utils.ZkUtils._ import scala.collection.mutable.HashMap import scala.collection.JavaConversions._ @@ -17,14 +13,25 @@ import spark._ import spark.RDD import spark.storage.StorageLevel - +// Key for a specific Kafka Partition: (broker, topic, group, part) case class KafkaPartitionKey(brokerId: Int, topic: String, groupId: String, partId: Int) +// Metadata for a Kafka Stream that it sent to the Master case class KafkaInputDStreamMetadata(timestamp: Long, data: Map[KafkaPartitionKey, Long]) +// Checkpoint data specific to a KafkaInputDstream case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], - savedOffsets: HashMap[Long, Map[KafkaPartitionKey, Long]]) extends DStreamCheckpointData(kafkaRdds) + savedOffsets: Map[KafkaPartitionKey, Long]) extends DStreamCheckpointData(kafkaRdds) /** * Input stream that pulls messages form a Kafka Broker. + * + * @param host Zookeper hostname. + * @param port Zookeper port. + * @param groupId The group id for this consumer. + * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * in its own thread. + * @param initialOffsets Optional initial offsets for each of the partitions to consume. + * By default the value is pulled from zookeper. + * @param storageLevel RDD storage level. */ class KafkaInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, @@ -36,21 +43,31 @@ class KafkaInputDStream[T: ClassManifest]( storageLevel: StorageLevel ) extends NetworkInputDStream[T](ssc_ ) with Logging { + // Metadata that keeps track of which messages have already been consumed. var savedOffsets = HashMap[Long, Map[KafkaPartitionKey, Long]]() + // In case of a failure, the offets for a particular timestamp will be restored. + @transient var restoredOffsets : Map[KafkaPartitionKey, Long] = null override protected[streaming] def addMetadata(metadata: Any) { metadata match { - case x : KafkaInputDStreamMetadata => + case x : KafkaInputDStreamMetadata => savedOffsets(x.timestamp) = x.data - logInfo("Saved Offsets: " + savedOffsets) + // TOOD: Remove logging + logInfo("New saved Offsets: " + savedOffsets) case _ => logInfo("Received unknown metadata: " + metadata.toString) } } override protected[streaming] def updateCheckpointData(currentTime: Time) { super.updateCheckpointData(currentTime) - logInfo("Updating KafkaDStream checkpoint data: " + savedOffsets.toString) - checkpointData = KafkaDStreamCheckpointData(checkpointData.rdds, savedOffsets) + if(savedOffsets.size > 0) { + // Find the offets that were stored before the checkpoint was initiated + val key = savedOffsets.keys.toList.sortWith(_ < _).filter(_ < currentTime.millis).last + val latestOffsets = savedOffsets(key) + logInfo("Updating KafkaDStream checkpoint data: " + latestOffsets.toString) + checkpointData = KafkaDStreamCheckpointData(checkpointData.rdds, latestOffsets) + savedOffsets.clear() + } } override protected[streaming] def restoreCheckpointData() { @@ -58,14 +75,21 @@ class KafkaInputDStream[T: ClassManifest]( logInfo("Restoring KafkaDStream checkpoint data.") checkpointData match { case x : KafkaDStreamCheckpointData => - savedOffsets = x.savedOffsets - logInfo("Restored KafkaDStream offsets: " + savedOffsets.toString) + restoredOffsets = x.savedOffsets + logInfo("Restored KafkaDStream offsets: " + savedOffsets) } } def createReceiver(): NetworkReceiver[T] = { - new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) - .asInstanceOf[NetworkReceiver[T]] + // We have restored from a checkpoint, use the restored offsets + if (restoredOffsets != null) { + new KafkaReceiver(id, host, port, groupId, topics, restoredOffsets, storageLevel) + .asInstanceOf[NetworkReceiver[T]] + } else { + new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) + .asInstanceOf[NetworkReceiver[T]] + } + } } @@ -96,27 +120,28 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, val executorPool = Executors.newFixedThreadPool(topics.values.reduce(_ + _)) val zooKeeperEndPoint = host + ":" + port - logInfo("Starting Kafka Consumer Stream in group " + groupId) + logInfo("Starting Kafka Consumer Stream with group: " + groupId) logInfo("Initial offsets: " + initialOffsets.toString) - logInfo("Connecting to " + zooKeeperEndPoint) - // Specify some Consumer properties + + // Zookeper connection properties val props = new Properties() props.put("zk.connect", zooKeeperEndPoint) props.put("zk.connectiontimeout.ms", ZK_TIMEOUT.toString) props.put("groupid", groupId) // Create the connection to the cluster + logInfo("Connecting to Zookeper: " + zooKeeperEndPoint) val consumerConfig = new ConsumerConfig(props) consumerConnector = Consumer.create(consumerConfig).asInstanceOf[ZookeeperConsumerConnector] + logInfo("Connected to " + zooKeeperEndPoint) // Reset the Kafka offsets in case we are recovering from a failure resetOffsets(initialOffsets) - - logInfo("Connected to " + zooKeeperEndPoint) // Create Threads for each Topic/Message Stream we are listening val topicMessageStreams = consumerConnector.createMessageStreams(topics, new StringDecoder()) + // Start the messages handler for each partition topicMessageStreams.values.foreach { streams => streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) } } @@ -133,19 +158,20 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, } } - // Responsible for handling Kafka Messages - class MessageHandler(stream: KafkaStream[String]) extends Runnable { + // Handles Kafka Messages + private class MessageHandler(stream: KafkaStream[String]) extends Runnable { def run() { logInfo("Starting MessageHandler.") stream.takeWhile { msgAndMetadata => dataHandler += msgAndMetadata.message - // Updating the offet. The key is (topic, partitionID). + // Updating the offet. The key is (broker, topic, group, partition). val key = KafkaPartitionKey(msgAndMetadata.topicInfo.brokerId, msgAndMetadata.topic, groupId, msgAndMetadata.topicInfo.partition.partId) val offset = msgAndMetadata.topicInfo.getConsumeOffset offsets.put(key, offset) - logInfo((key, offset).toString) + // TODO: Remove Logging + logInfo("Handled message: " + (key, offset).toString) // Keep on handling messages true @@ -157,6 +183,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, extends DataHandler[Any](receiver, storageLevel) { override def createBlock(blockId: String, iterator: Iterator[Any]) : Block = { + // Creates a new Block with Kafka-specific Metadata new Block(blockId, iterator, KafkaInputDStreamMetadata(System.currentTimeMillis, offsets.toMap)) } diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 038827ddb0..0450120061 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -59,9 +59,9 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // then check whether some RDD has been checkpointed or not ssc.start() runStreamsWithRealDelay(ssc, firstNumBatches) - logInfo("Checkpoint data of state stream = \n[" + stateStream.checkpointData.mkString(",\n") + "]") - assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream before first failure") - stateStream.checkpointData.foreach { + logInfo("Checkpoint data of state stream = \n[" + stateStream.checkpointData.rdds.mkString(",\n") + "]") + assert(!stateStream.checkpointData.rdds.isEmpty, "No checkpointed RDDs in state stream before first failure") + stateStream.checkpointData.rdds.foreach { case (time, data) => { val file = new File(data.toString) assert(file.exists(), "Checkpoint file '" + file +"' for time " + time + " for state stream before first failure does not exist") @@ -70,7 +70,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // Run till a further time such that previous checkpoint files in the stream would be deleted // and check whether the earlier checkpoint files are deleted - val checkpointFiles = stateStream.checkpointData.map(x => new File(x._2.toString)) + val checkpointFiles = stateStream.checkpointData.rdds.map(x => new File(x._2.toString)) runStreamsWithRealDelay(ssc, secondNumBatches) checkpointFiles.foreach(file => assert(!file.exists, "Checkpoint file '" + file + "' was not deleted")) ssc.stop() @@ -87,8 +87,8 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // is present in the checkpoint data or not ssc.start() runStreamsWithRealDelay(ssc, 1) - assert(!stateStream.checkpointData.isEmpty, "No checkpointed RDDs in state stream before second failure") - stateStream.checkpointData.foreach { + assert(!stateStream.checkpointData.rdds.isEmpty, "No checkpointed RDDs in state stream before second failure") + stateStream.checkpointData.rdds.foreach { case (time, data) => { val file = new File(data.toString) assert(file.exists(), From deb2c4df72f65f2bd90cc97a9abcd59a12eecabc Mon Sep 17 00:00:00 2001 From: Denny Date: Sun, 11 Nov 2012 11:11:49 -0800 Subject: [PATCH 107/226] Add comment. --- .../main/scala/spark/streaming/input/KafkaInputDStream.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index ad8e86a094..cc74855983 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -66,6 +66,8 @@ class KafkaInputDStream[T: ClassManifest]( val latestOffsets = savedOffsets(key) logInfo("Updating KafkaDStream checkpoint data: " + latestOffsets.toString) checkpointData = KafkaDStreamCheckpointData(checkpointData.rdds, latestOffsets) + // TODO: This may throw out offsets that are created after the checkpoint, + // but it's unlikely we'll need them. savedOffsets.clear() } } From 0fd4c93f1c349f052f633fea64f975d53976bd9c Mon Sep 17 00:00:00 2001 From: Denny Date: Sun, 11 Nov 2012 11:15:31 -0800 Subject: [PATCH 108/226] Updated comment. --- .../main/scala/spark/streaming/input/KafkaInputDStream.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index cc74855983..318537532c 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -104,7 +104,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, // Handles pushing data into the BlockManager lazy protected val dataHandler = new KafkaDataHandler(this, storageLevel) - // Keeps track of the current offsets. Maps from (topic, partitionID) -> Offset + // Keeps track of the current offsets. Maps from (broker, topic, group, part) -> Offset lazy val offsets = HashMap[KafkaPartitionKey, Long]() // Connection to Kafka var consumerConnector : ZookeeperConsumerConnector = null From 46222dc56db4a521bd613bd3fac5b91868bb339e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 11 Nov 2012 13:20:09 -0800 Subject: [PATCH 109/226] Fixed bug in FileInputDStream that allowed it to miss new files. Added tests in the InputStreamsSuite to test checkpointing of file and network streams. --- .../spark/streaming/FileInputDStream.scala | 34 ++++- .../spark/streaming/InputStreamsSuite.scala | 136 +++++++++++++++--- 2 files changed, 148 insertions(+), 22 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala index 9d7361097b..88856364d2 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FileInputDStream.scala @@ -6,7 +6,8 @@ import spark.rdd.UnionRDD import org.apache.hadoop.fs.{FileSystem, Path, PathFilter} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} -import java.io.{ObjectInputStream, IOException} + +import scala.collection.mutable.HashSet class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( @@ -19,7 +20,8 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K @transient private var path_ : Path = null @transient private var fs_ : FileSystem = null - var lastModTime: Long = 0 + var lastModTime = 0L + val lastModTimeFiles = new HashSet[String]() def path(): Path = { if (path_ == null) path_ = new Path(directory) @@ -40,22 +42,37 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K } override def stop() { } - + + /** + * Finds the files that were modified since the last time this method was called and makes + * a union RDD out of them. Note that this maintains the list of files that were processed + * in the latest modification time in the previous call to this method. This is because the + * modification time returned by the FileStatus API seems to return times only at the + * granularity of seconds. Hence, new files may have the same modification time as the + * latest modification time in the previous call to this method and the list of files + * maintained is used to filter the one that have been processed. + */ override def compute(validTime: Time): Option[RDD[(K, V)]] = { + // Create the filter for selecting new files val newFilter = new PathFilter() { var latestModTime = 0L - + val latestModTimeFiles = new HashSet[String]() + def accept(path: Path): Boolean = { if (!filter.accept(path)) { return false } else { val modTime = fs.getFileStatus(path).getModificationTime() - if (modTime <= lastModTime) { + if (modTime < lastModTime){ + return false + } else if (modTime == lastModTime && lastModTimeFiles.contains(path.toString)) { return false } if (modTime > latestModTime) { latestModTime = modTime + latestModTimeFiles.clear() } + latestModTimeFiles += path.toString return true } } @@ -64,7 +81,12 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K val newFiles = fs.listStatus(path, newFilter) logInfo("New files: " + newFiles.map(_.getPath).mkString(", ")) if (newFiles.length > 0) { - lastModTime = newFilter.latestModTime + // Update the modification time and the files processed for that modification time + if (lastModTime != newFilter.latestModTime) { + lastModTime = newFilter.latestModTime + lastModTimeFiles.clear() + } + lastModTimeFiles ++= newFilter.latestModTimeFiles } val newRDD = new UnionRDD(ssc.sc, newFiles.map( file => ssc.sc.newAPIHadoopFile[K, V, F](file.getPath.toString))) diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 8f892baab1..0957748603 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -9,12 +9,19 @@ import spark.storage.StorageLevel import spark.Logging import scala.util.Random import org.apache.commons.io.FileUtils +import org.scalatest.BeforeAndAfter -class InputStreamsSuite extends TestSuiteBase { +class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + override def checkpointDir = "checkpoint" + + after { + FileUtils.deleteDirectory(new File(checkpointDir)) + } + test("network input stream") { // Start the server val serverPort = 9999 @@ -30,7 +37,7 @@ class InputStreamsSuite extends TestSuiteBase { ssc.registerOutputStream(outputStream) ssc.start() - // Feed data to the server to send to the Spark Streaming network receiver + // Feed data to the server to send to the network receiver val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] val input = Seq(1, 2, 3, 4, 5) val expectedOutput = input.map(_.toString) @@ -52,7 +59,7 @@ class InputStreamsSuite extends TestSuiteBase { logInfo("Stopping context") ssc.stop() - // Verify whether data received by Spark Streaming was as expected + // Verify whether data received was as expected logInfo("--------------------------------") logInfo("output.size = " + outputBuffer.size) logInfo("output") @@ -69,6 +76,49 @@ class InputStreamsSuite extends TestSuiteBase { } } + test("network input stream with checkpoint") { + // Start the server + val serverPort = 9999 + val server = new TestServer(9999) + server.start() + + // Set up the streaming context and input streams + var ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + ssc.checkpoint(checkpointDir, checkpointInterval) + val networkStream = ssc.networkTextStream("localhost", serverPort, StorageLevel.MEMORY_AND_DISK) + var outputStream = new TestOutputStream(networkStream, new ArrayBuffer[Seq[String]]) + ssc.registerOutputStream(outputStream) + ssc.start() + + // Feed data to the server to send to the network receiver + var clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + for (i <- Seq(1, 2, 3)) { + server.send(i.toString + "\n") + Thread.sleep(100) + clock.addToTime(batchDuration.milliseconds) + } + Thread.sleep(500) + assert(outputStream.output.size > 0) + ssc.stop() + + // Restart stream computation from checkpoint and feed more data to see whether + // they are being received and processed + logInfo("*********** RESTARTING ************") + ssc = new StreamingContext(checkpointDir) + ssc.start() + clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + for (i <- Seq(4, 5, 6)) { + server.send(i.toString + "\n") + Thread.sleep(100) + clock.addToTime(batchDuration.milliseconds) + } + Thread.sleep(500) + outputStream = ssc.graph.getOutputStreams().head.asInstanceOf[TestOutputStream[String]] + assert(outputStream.output.size > 0) + ssc.stop() + } + test("file input stream") { // Create a temporary directory val dir = { @@ -76,7 +126,7 @@ class InputStreamsSuite extends TestSuiteBase { temp.delete() temp.mkdirs() temp.deleteOnExit() - println("Created temp dir " + temp) + logInfo("Created temp dir " + temp) temp } @@ -84,7 +134,9 @@ class InputStreamsSuite extends TestSuiteBase { val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) val filestream = ssc.textFileStream(dir.toString) - val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] + val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String]] + def output = outputBuffer.flatMap(x => x) + val outputStream = new TestOutputStream(filestream, outputBuffer) ssc.registerOutputStream(outputStream) ssc.start() @@ -96,37 +148,89 @@ class InputStreamsSuite extends TestSuiteBase { Thread.sleep(1000) for (i <- 0 until input.size) { FileUtils.writeStringToFile(new File(dir, i.toString), input(i).toString + "\n") - Thread.sleep(500) + Thread.sleep(100) clock.addToTime(batchDuration.milliseconds) - Thread.sleep(500) + Thread.sleep(100) } val startTime = System.currentTimeMillis() - while (outputBuffer.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { - println("output.size = " + outputBuffer.size + ", expectedOutput.size = " + expectedOutput.size) + while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + //println("output.size = " + output.size + ", expectedOutput.size = " + expectedOutput.size) Thread.sleep(100) } Thread.sleep(1000) val timeTaken = System.currentTimeMillis() - startTime assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") - println("Stopping context") + logInfo("Stopping context") ssc.stop() // Verify whether data received by Spark Streaming was as expected logInfo("--------------------------------") - logInfo("output.size = " + outputBuffer.size) + logInfo("output.size = " + output.size) logInfo("output") - outputBuffer.foreach(x => logInfo("[" + x.mkString(",") + "]")) + output.foreach(x => logInfo("[" + x.mkString(",") + "]")) logInfo("expected output.size = " + expectedOutput.size) logInfo("expected output") expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) logInfo("--------------------------------") - assert(outputBuffer.size === expectedOutput.size) - for (i <- 0 until outputBuffer.size) { - assert(outputBuffer(i).size === 1) - assert(outputBuffer(i).head === expectedOutput(i)) + assert(output.size === expectedOutput.size) + for (i <- 0 until output.size) { + assert(output(i).size === 1) + assert(output(i).head.toString === expectedOutput(i)) } } + + test("file input stream with checkpoint") { + // Create a temporary directory + val dir = { + var temp = File.createTempFile(".temp.", Random.nextInt().toString) + temp.delete() + temp.mkdirs() + temp.deleteOnExit() + println("Created temp dir " + temp) + temp + } + + // Set up the streaming context and input streams + var ssc = new StreamingContext(master, framework) + ssc.setBatchDuration(batchDuration) + ssc.checkpoint(checkpointDir, checkpointInterval) + val filestream = ssc.textFileStream(dir.toString) + var outputStream = new TestOutputStream(filestream, new ArrayBuffer[Seq[String]]) + ssc.registerOutputStream(outputStream) + ssc.start() + + // Create files and advance manual clock to process them + var clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + Thread.sleep(1000) + for (i <- Seq(1, 2, 3)) { + FileUtils.writeStringToFile(new File(dir, i.toString), i.toString + "\n") + Thread.sleep(100) + clock.addToTime(batchDuration.milliseconds) + } + Thread.sleep(500) + logInfo("Output = " + outputStream.output.mkString(",")) + assert(outputStream.output.size > 0) + ssc.stop() + + // Restart stream computation from checkpoint and create more files to see whether + // they are being processed + logInfo("*********** RESTARTING ************") + ssc = new StreamingContext(checkpointDir) + ssc.start() + clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + Thread.sleep(500) + for (i <- Seq(4, 5, 6)) { + FileUtils.writeStringToFile(new File(dir, i.toString), i.toString + "\n") + Thread.sleep(100) + clock.addToTime(batchDuration.milliseconds) + } + Thread.sleep(500) + outputStream = ssc.graph.getOutputStreams().head.asInstanceOf[TestOutputStream[String]] + logInfo("Output = " + outputStream.output.mkString(",")) + assert(outputStream.output.size > 0) + ssc.stop() + } } From ae61ebaee64fad117155d65bcdfc8520bda0e6b4 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 12 Nov 2012 21:45:16 +0000 Subject: [PATCH 110/226] Fixed bugs in RawNetworkInputDStream and in its examples. Made the ReducedWindowedDStream persist RDDs to MEMOERY_SER_ONLY by default. Removed unncessary examples. Added streaming-env.sh.template to add recommended setting for streaming. --- conf/streaming-env.sh.template | 22 ++++ run | 4 + startTrigger.sh | 3 - .../spark/streaming/NetworkInputTracker.scala | 1 + .../spark/streaming/RawInputDStream.scala | 2 +- .../streaming/ReducedWindowedDStream.scala | 15 +-- .../spark/streaming/StreamingContext.scala | 4 +- .../spark/streaming/examples/Grep2.scala | 64 ---------- .../spark/streaming/examples/GrepRaw.scala | 11 +- .../streaming/examples/TopKWordCountRaw.scala | 102 +++++----------- .../spark/streaming/examples/WordCount2.scala | 114 ------------------ .../streaming/examples/WordCountRaw.scala | 55 ++++----- .../spark/streaming/examples/WordMax2.scala | 75 ------------ .../spark/streaming/util/RawTextHelper.scala | 98 +++++++++++++++ 14 files changed, 192 insertions(+), 378 deletions(-) create mode 100755 conf/streaming-env.sh.template delete mode 100755 startTrigger.sh delete mode 100644 streaming/src/main/scala/spark/streaming/examples/Grep2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordCount2.scala delete mode 100644 streaming/src/main/scala/spark/streaming/examples/WordMax2.scala create mode 100644 streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala diff --git a/conf/streaming-env.sh.template b/conf/streaming-env.sh.template new file mode 100755 index 0000000000..6b4094c515 --- /dev/null +++ b/conf/streaming-env.sh.template @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# This file contains a few additional setting that are useful for +# running streaming jobs in Spark. Copy this file as streaming-env.sh . +# Note that this shell script will be read after spark-env.sh, so settings +# in this file may override similar settings (if present) in spark-env.sh . + + +# Using concurrent GC is strongly recommended as it can significantly +# reduce GC related pauses. + +SPARK_JAVA_OPTS+=" -XX:+UseConcMarkSweepGC" + +# Using of Kryo serialization can improve serialization performance +# and therefore the throughput of the Spark Streaming programs. However, +# using Kryo serialization with custom classes may required you to +# register the classes with Kryo. Refer to the Spark documentation +# for more details. + +# SPARK_JAVA_OPTS+=" -Dspark.serializer=spark.KryoSerializer" + +export SPARK_JAVA_OPTS diff --git a/run b/run index a363599cf0..d91430ad2e 100755 --- a/run +++ b/run @@ -13,6 +13,10 @@ if [ -e $FWDIR/conf/spark-env.sh ] ; then . $FWDIR/conf/spark-env.sh fi +if [ -e $FWDIR/conf/streaming-env.sh ] ; then + . $FWDIR/conf/streaming-env.sh +fi + if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then if [ `command -v scala` ]; then RUNNER="scala" diff --git a/startTrigger.sh b/startTrigger.sh deleted file mode 100755 index 373dbda93e..0000000000 --- a/startTrigger.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -./run spark.streaming.SentenceGenerator localhost 7078 sentences.txt 1 diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 07ef79415d..d0fef70f7e 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -47,6 +47,7 @@ class NetworkInputTracker( val result = queue.synchronized { queue.dequeueAll(x => true) } + logInfo("Stream " + receiverId + " received " + result.size + " blocks") result.toArray } diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index e022b85fbe..03726bfba6 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -69,7 +69,7 @@ class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: S } def onStop() { - blockPushingThread.interrupt() + if (blockPushingThread != null) blockPushingThread.interrupt() } /** Read a buffer fully from a given Channel */ diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 6df82c0df3..b07d51fa6b 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -31,10 +31,14 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")" ) - super.persist(StorageLevel.MEMORY_ONLY) - + // Reduce each batch of data using reduceByKey which will be further reduced by window + // by ReducedWindowedDStream val reducedStream = parent.reduceByKey(reduceFunc, partitioner) + // Persist RDDs to memory by default as these RDDs are going to be reused. + super.persist(StorageLevel.MEMORY_ONLY_SER) + reducedStream.persist(StorageLevel.MEMORY_ONLY_SER) + def windowTime: Time = _windowTime override def dependencies = List(reducedStream) @@ -57,13 +61,6 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( this } - protected[streaming] override def setRememberDuration(time: Time) { - if (rememberDuration == null || rememberDuration < time) { - rememberDuration = time - dependencies.foreach(_.setRememberDuration(rememberDuration + windowTime)) - } - } - override def compute(validTime: Time): Option[RDD[(K, V)]] = { val reduceF = reduceFunc val invReduceF = invReduceFunc diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index eb83aaee7a..ab6d6e8dea 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -124,7 +124,7 @@ final class StreamingContext ( def rawNetworkStream[T: ClassManifest]( hostname: String, port: Int, - storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_SER_2 + storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2 ): DStream[T] = { val inputStream = new RawInputDStream[T](this, hostname, port, storageLevel) graph.addInputStream(inputStream) @@ -132,7 +132,7 @@ final class StreamingContext ( } /** - * This function creates a input stream that monitors a Hadoop-compatible + * This function creates a input stream that monitors a Hadoop-compatible filesystem * for new files and executes the necessary processing on them. */ def fileStream[ diff --git a/streaming/src/main/scala/spark/streaming/examples/Grep2.scala b/streaming/src/main/scala/spark/streaming/examples/Grep2.scala deleted file mode 100644 index b1faa65c17..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/Grep2.scala +++ /dev/null @@ -1,64 +0,0 @@ -package spark.streaming.examples - -import spark.SparkContext -import SparkContext._ -import spark.streaming._ -import StreamingContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap -import scala.collection.mutable.Queue -import scala.collection.JavaConversions.mapAsScalaMap - -import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} - - -object Grep2 { - - def warmup(sc: SparkContext) { - (0 until 10).foreach {i => - sc.parallelize(1 to 20000000, 1000) - .map(x => (x % 337, x % 1331)) - .reduceByKey(_ + _) - .count() - } - } - - def main (args: Array[String]) { - - if (args.length != 6) { - println ("Usage: Grep2 ") - System.exit(1) - } - - val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args - - val batchDuration = Milliseconds(batchMillis.toLong) - - val ssc = new StreamingContext(master, "Grep2") - ssc.setBatchDuration(batchDuration) - - //warmup(ssc.sc) - - val data = ssc.sc.textFile(file, mapTasks.toInt).persist( - new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas - println("Data count: " + data.count()) - println("Data count: " + data.count()) - println("Data count: " + data.count()) - - val sentences = new ConstantInputDStream(ssc, data) - ssc.registerInputStream(sentences) - - sentences.filter(_.contains("Culpepper")).count().foreachRDD(r => - println("Grep count: " + r.collect().mkString)) - - ssc.start() - - while(true) { Thread.sleep(1000) } - } -} - - diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index b1e1a613fe..ffbea6e55d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -2,8 +2,10 @@ package spark.streaming.examples import spark.util.IntParam import spark.storage.StorageLevel + import spark.streaming._ import spark.streaming.StreamingContext._ +import spark.streaming.util.RawTextHelper._ object GrepRaw { def main(args: Array[String]) { @@ -17,16 +19,13 @@ object GrepRaw { // Create the context and set the batch size val ssc = new StreamingContext(master, "GrepRaw") ssc.setBatchDuration(Milliseconds(batchMillis)) + warmUp(ssc.sc) - // Make sure some tasks have started on each node - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() val rawStreams = (1 to numStreams).map(_ => - ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray val union = new UnionDStream(rawStreams) - union.filter(_.contains("Culpepper")).count().foreachRDD(r => + union.filter(_.contains("Alice")).count().foreachRDD(r => println("Grep count: " + r.collect().mkString)) ssc.start() } diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 750cb7445f..0411bde1a7 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -1,94 +1,50 @@ package spark.streaming.examples -import spark.util.IntParam -import spark.SparkContext -import spark.SparkContext._ import spark.storage.StorageLevel +import spark.util.IntParam + import spark.streaming._ import spark.streaming.StreamingContext._ +import spark.streaming.util.RawTextHelper._ -import WordCount2_ExtraFunctions._ +import java.util.UUID object TopKWordCountRaw { - def moreWarmup(sc: SparkContext) { - (0 until 40).foreach {i => - sc.parallelize(1 to 20000000, 1000) - .map(_ % 1331).map(_.toString) - .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) - .collect() - } - } - + def main(args: Array[String]) { - if (args.length != 7) { - System.err.println("Usage: TopKWordCountRaw ") + if (args.length != 4) { + System.err.println("Usage: WordCountRaw <# streams> ") System.exit(1) } - val Array(master, IntParam(streams), host, IntParam(port), IntParam(batchMs), - IntParam(chkptMs), IntParam(reduces)) = args - - // Create the context and set the batch size - val ssc = new StreamingContext(master, "TopKWordCountRaw") - ssc.setBatchDuration(Milliseconds(batchMs)) - - // Make sure some tasks have started on each node - moreWarmup(ssc.sc) - - val rawStreams = (1 to streams).map(_ => - ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnionDStream(rawStreams) - - val windowedCounts = union.mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) - windowedCounts.persist().checkpoint(Milliseconds(chkptMs)) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMs)) - - def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = { - val taken = new Array[(String, Long)](k) - - var i = 0 - var len = 0 - var done = false - var value: (String, Long) = null - var swap: (String, Long) = null - var count = 0 - - while(data.hasNext) { - value = data.next - count += 1 - println("count = " + count) - if (len == 0) { - taken(0) = value - len = 1 - } else if (len < k || value._2 > taken(len - 1)._2) { - if (len < k) { - len += 1 - } - taken(len - 1) = value - i = len - 1 - while(i > 0 && taken(i - 1)._2 < taken(i)._2) { - swap = taken(i) - taken(i) = taken(i-1) - taken(i - 1) = swap - i -= 1 - } - } - } - println("Took " + len + " out of " + count + " items") - return taken.toIterator - } + val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args + val k = 10 - val k = 50 + // Create the context, set the batch size and checkpoint directory. + // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts + // periodically to HDFS + val ssc = new StreamingContext(master, "TopKWordCountRaw") + ssc.setBatchDuration(Seconds(1)) + ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) + + // Warm up the JVMs on master and slave for JIT compilation to kick in + /*warmUp(ssc.sc)*/ + + // Set up the raw network streams that will connect to localhost:port to raw test + // senders on the slaves and generate top K words of last 30 seconds + val lines = (1 to numStreams).map(_ => { + ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) + }) + val union = new UnionDStream(lines.toArray) + val counts = union.mapPartitions(splitAndCountPartitions) + val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) partialTopKWindowedCounts.foreachRDD(rdd => { val collectedCounts = rdd.collect - println("Collected " + collectedCounts.size + " items") - topK(collectedCounts.toIterator, k).foreach(println) + println("Collected " + collectedCounts.size + " words from partial top words") + println("Top " + k + " words are " + topK(collectedCounts.toIterator, k).mkString(",")) }) -// windowedCounts.foreachRDD(r => println("Element count: " + r.count())) - ssc.start() } } diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala b/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala deleted file mode 100644 index 865026033e..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordCount2.scala +++ /dev/null @@ -1,114 +0,0 @@ -package spark.streaming.examples - -import spark.SparkContext -import SparkContext._ -import spark.streaming._ -import StreamingContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap -import scala.collection.mutable.Queue -import scala.collection.JavaConversions.mapAsScalaMap - -import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} - - -object WordCount2_ExtraFunctions { - - def add(v1: Long, v2: Long) = (v1 + v2) - - def subtract(v1: Long, v2: Long) = (v1 - v2) - - def max(v1: Long, v2: Long) = math.max(v1, v2) - - def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = { - //val map = new java.util.HashMap[String, Long] - val map = new OLMap[String] - var i = 0 - var j = 0 - while (iter.hasNext) { - val s = iter.next() - i = 0 - while (i < s.length) { - j = i - while (j < s.length && s.charAt(j) != ' ') { - j += 1 - } - if (j > i) { - val w = s.substring(i, j) - val c = map.getLong(w) - map.put(w, c + 1) -/* - if (c == null) { - map.put(w, 1) - } else { - map.put(w, c + 1) - } -*/ - } - i = j - while (i < s.length && s.charAt(i) == ' ') { - i += 1 - } - } - } - map.toIterator.map{case (k, v) => (k, v)} - } -} - -object WordCount2 { - - def warmup(sc: SparkContext) { - (0 until 3).foreach {i => - sc.parallelize(1 to 20000000, 500) - .map(x => (x % 337, x % 1331)) - .reduceByKey(_ + _, 100) - .count() - } - } - - def main (args: Array[String]) { - - if (args.length != 6) { - println ("Usage: WordCount2 ") - System.exit(1) - } - - val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args - - val batchDuration = Milliseconds(batchMillis.toLong) - - val ssc = new StreamingContext(master, "WordCount2") - ssc.setBatchDuration(batchDuration) - - //warmup(ssc.sc) - - val data = ssc.sc.textFile(file, mapTasks.toInt).persist( - new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas - println("Data count: " + data.map(x => if (x == "") 1 else x.split(" ").size / x.split(" ").size).count()) - println("Data count: " + data.count()) - println("Data count: " + data.count()) - - val sentences = new ConstantInputDStream(ssc, data) - ssc.registerInputStream(sentences) - - import WordCount2_ExtraFunctions._ - - val windowedCounts = sentences - .mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(30), batchDuration, reduceTasks.toInt) - - windowedCounts.persist().checkpoint(Milliseconds(chkptMillis.toLong)) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) - windowedCounts.foreachRDD(r => println("Element count: " + r.count())) - - ssc.start() - - while(true) { Thread.sleep(1000) } - } -} - - diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index d1ea9a9cd5..571428c0fe 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -1,50 +1,43 @@ package spark.streaming.examples -import spark.util.IntParam -import spark.SparkContext -import spark.SparkContext._ import spark.storage.StorageLevel +import spark.util.IntParam + import spark.streaming._ import spark.streaming.StreamingContext._ +import spark.streaming.util.RawTextHelper._ -import WordCount2_ExtraFunctions._ +import java.util.UUID object WordCountRaw { - def moreWarmup(sc: SparkContext) { - (0 until 40).foreach {i => - sc.parallelize(1 to 20000000, 1000) - .map(_ % 1331).map(_.toString) - .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) - .collect() - } - } def main(args: Array[String]) { - if (args.length != 7) { - System.err.println("Usage: WordCountRaw ") + if (args.length != 4) { + System.err.println("Usage: WordCountRaw <# streams> ") System.exit(1) } - val Array(master, IntParam(streams), host, IntParam(port), IntParam(batchMs), - IntParam(chkptMs), IntParam(reduces)) = args + val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args - // Create the context and set the batch size + // Create the context, set the batch size and checkpoint directory. + // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts + // periodically to HDFS val ssc = new StreamingContext(master, "WordCountRaw") - ssc.setBatchDuration(Milliseconds(batchMs)) + ssc.setBatchDuration(Seconds(1)) + ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) + + // Warm up the JVMs on master and slave for JIT compilation to kick in + warmUp(ssc.sc) - // Make sure some tasks have started on each node - moreWarmup(ssc.sc) - - val rawStreams = (1 to streams).map(_ => - ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnionDStream(rawStreams) - - val windowedCounts = union.mapPartitions(splitAndCountPartitions) - .reduceByKeyAndWindow(add _, subtract _, Seconds(30), Milliseconds(batchMs), reduces) - windowedCounts.persist().checkpoint(chkptMs) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMs)) - - windowedCounts.foreachRDD(r => println("Element count: " + r.count())) + // Set up the raw network streams that will connect to localhost:port to raw test + // senders on the slaves and generate count of words of last 30 seconds + val lines = (1 to numStreams).map(_ => { + ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) + }) + val union = new UnionDStream(lines.toArray) + val counts = union.mapPartitions(splitAndCountPartitions) + val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) + windowedCounts.foreachRDD(r => println("# unique words = " + r.count())) ssc.start() } diff --git a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala b/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala deleted file mode 100644 index 6a9c8a9a69..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/WordMax2.scala +++ /dev/null @@ -1,75 +0,0 @@ -package spark.streaming.examples - -import spark.SparkContext -import SparkContext._ -import spark.streaming._ -import StreamingContext._ - -import spark.storage.StorageLevel - -import scala.util.Sorting -import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.HashMap -import scala.collection.mutable.Queue -import scala.collection.JavaConversions.mapAsScalaMap - -import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} - - -object WordMax2 { - - def warmup(sc: SparkContext) { - (0 until 10).foreach {i => - sc.parallelize(1 to 20000000, 1000) - .map(x => (x % 337, x % 1331)) - .reduceByKey(_ + _) - .count() - } - } - - def main (args: Array[String]) { - - if (args.length != 6) { - println ("Usage: WordMax2 ") - System.exit(1) - } - - val Array(master, file, mapTasks, reduceTasks, batchMillis, chkptMillis) = args - - val batchDuration = Milliseconds(batchMillis.toLong) - - val ssc = new StreamingContext(master, "WordMax2") - ssc.setBatchDuration(batchDuration) - - //warmup(ssc.sc) - - val data = ssc.sc.textFile(file, mapTasks.toInt).persist( - new StorageLevel(false, true, false, 3)) // Memory only, serialized, 3 replicas - println("Data count: " + data.count()) - println("Data count: " + data.count()) - println("Data count: " + data.count()) - - val sentences = new ConstantInputDStream(ssc, data) - ssc.registerInputStream(sentences) - - import WordCount2_ExtraFunctions._ - - val windowedCounts = sentences - .mapPartitions(splitAndCountPartitions) - .reduceByKey(add _, reduceTasks.toInt) - .persist() - .checkpoint(Milliseconds(chkptMillis.toLong)) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) - .reduceByKeyAndWindow(max _, Seconds(10), batchDuration, reduceTasks.toInt) - .persist() - .checkpoint(Milliseconds(chkptMillis.toLong)) - //.persist(StorageLevel.MEMORY_ONLY, StorageLevel.MEMORY_ONLY_2, Milliseconds(chkptMillis.toLong)) - windowedCounts.foreachRDD(r => println("Element count: " + r.count())) - - ssc.start() - - while(true) { Thread.sleep(1000) } - } -} - - diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala b/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala new file mode 100644 index 0000000000..f31ae39a16 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala @@ -0,0 +1,98 @@ +package spark.streaming.util + +import spark.SparkContext +import spark.SparkContext._ +import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} +import scala.collection.JavaConversions.mapAsScalaMap + +object RawTextHelper { + + /** + * Splits lines and counts the words in them using specialized object-to-long hashmap + * (to avoid boxing-unboxing overhead of Long in java/scala HashMap) + */ + def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = { + val map = new OLMap[String] + var i = 0 + var j = 0 + while (iter.hasNext) { + val s = iter.next() + i = 0 + while (i < s.length) { + j = i + while (j < s.length && s.charAt(j) != ' ') { + j += 1 + } + if (j > i) { + val w = s.substring(i, j) + val c = map.getLong(w) + map.put(w, c + 1) + } + i = j + while (i < s.length && s.charAt(i) == ' ') { + i += 1 + } + } + } + map.toIterator.map{case (k, v) => (k, v)} + } + + /** + * Gets the top k words in terms of word counts. Assumes that each word exists only once + * in the `data` iterator (that is, the counts have been reduced). + */ + def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = { + val taken = new Array[(String, Long)](k) + + var i = 0 + var len = 0 + var done = false + var value: (String, Long) = null + var swap: (String, Long) = null + var count = 0 + + while(data.hasNext) { + value = data.next + if (value != null) { + count += 1 + if (len == 0) { + taken(0) = value + len = 1 + } else if (len < k || value._2 > taken(len - 1)._2) { + if (len < k) { + len += 1 + } + taken(len - 1) = value + i = len - 1 + while(i > 0 && taken(i - 1)._2 < taken(i)._2) { + swap = taken(i) + taken(i) = taken(i-1) + taken(i - 1) = swap + i -= 1 + } + } + } + } + return taken.toIterator + } + + /** + * Warms up the SparkContext in master and slave by running tasks to force JIT kick in + * before real workload starts. + */ + def warmUp(sc: SparkContext) { + for(i <- 0 to 4) { + sc.parallelize(1 to 200000, 1000) + .map(_ % 1331).map(_.toString) + .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) + .count() + } + } + + def add(v1: Long, v2: Long) = (v1 + v2) + + def subtract(v1: Long, v2: Long) = (v1 - v2) + + def max(v1: Long, v2: Long) = math.max(v1, v2) +} + From b9bfd1456f09f4db281fb9d108a339c59a2e2dda Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 12 Nov 2012 21:51:42 +0000 Subject: [PATCH 111/226] Changed default level on calling DStream.persist() to be MEMORY_ONLY_SER. Also changed the persist level of StateDStream to be MEMORY_ONLY_SER. --- streaming/src/main/scala/spark/streaming/DStream.scala | 2 +- streaming/src/main/scala/spark/streaming/StateDStream.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 73096edec5..abf132e45e 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -79,7 +79,7 @@ extends Serializable with Logging { this } - def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY) + def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY_SER) // Turn on the default caching level for this RDD def cache(): DStream[T] = persist() diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index 0211df1343..cb261808f5 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -23,7 +23,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife rememberPartitioner: Boolean ) extends DStream[(K, S)](parent.ssc) { - super.persist(StorageLevel.MEMORY_ONLY) + super.persist(StorageLevel.MEMORY_ONLY_SER) override def dependencies = List(parent) From 564dd8c3f415746a68f05bde6ea2a0e7a7760b4c Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 12 Nov 2012 14:22:05 -0800 Subject: [PATCH 112/226] Speeded up CheckpointSuite --- .../spark/streaming/CheckpointSuite.scala | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 038827ddb0..0ad57e38b9 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -15,12 +15,16 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { } after { + + if (ssc != null) ssc.stop() FileUtils.deleteDirectory(new File(checkpointDir)) } + var ssc: StreamingContext = null + override def framework = "CheckpointSuite" - override def batchDuration = Milliseconds(500) + override def batchDuration = Milliseconds(200) override def checkpointDir = "checkpoint" @@ -30,12 +34,12 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { test("basic stream+rdd recovery") { - assert(batchDuration === Milliseconds(500), "batchDuration for this test must be 1 second") + assert(batchDuration === Milliseconds(200), "batchDuration for this test must be 1 second") assert(checkpointInterval === batchDuration, "checkpointInterval for this test much be same as batchDuration") System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") - val stateStreamCheckpointInterval = Seconds(2) + val stateStreamCheckpointInterval = Seconds(1) // this ensure checkpointing occurs at least once val firstNumBatches = (stateStreamCheckpointInterval.millis / batchDuration.millis) * 2 @@ -110,6 +114,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { runStreamsWithRealDelay(ssc, 4) ssc.stop() System.clearProperty("spark.streaming.manualClock.jump") + ssc = null } test("map and reduceByKey") { @@ -131,9 +136,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { .reduceByKeyAndWindow(_ + _, _ - _, batchDuration * w, batchDuration) .checkpoint(Seconds(2)) } - for (i <- Seq(2, 3, 4)) { - testCheckpointedOperation(input, operation, output, i) - } + testCheckpointedOperation(input, operation, output, 3) } test("updateStateByKey") { @@ -148,9 +151,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { .checkpoint(Seconds(2)) .map(t => (t._1, t._2.self)) } - for (i <- Seq(2, 3, 4)) { - testCheckpointedOperation(input, operation, output, i) - } + testCheckpointedOperation(input, operation, output, 3) } @@ -171,7 +172,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { // Do half the computation (half the number of batches), create checkpoint file and quit - val ssc = setupStreams[U, V](input, operation) + ssc = setupStreams[U, V](input, operation) val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) verifyOutput[V](output, expectedOutput.take(initialNumBatches), true) Thread.sleep(1000) @@ -182,9 +183,10 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { " Restarting stream computation " + "\n-------------------------------------------\n" ) - val sscNew = new StreamingContext(checkpointDir) - val outputNew = runStreams[V](sscNew, nextNumBatches, nextNumExpectedOutputs) + ssc = new StreamingContext(checkpointDir) + val outputNew = runStreams[V](ssc, nextNumBatches, nextNumExpectedOutputs) verifyOutput[V](outputNew, expectedOutput.takeRight(nextNumExpectedOutputs), true) + ssc = null } /** From 8a25d530edfa3abcdbe2effcd6bfbe484ac40acb Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 13 Nov 2012 02:16:28 -0800 Subject: [PATCH 113/226] Optimized checkpoint writing by reusing FileSystem object. Fixed bug in updating of checkpoint data in DStream where the checkpointed RDDs, upon recovery, were not recognized as checkpointed RDDs and therefore deleted from HDFS. Made InputStreamsSuite more robust to timing delays. --- core/src/main/scala/spark/RDD.scala | 6 +- .../scala/spark/streaming/Checkpoint.scala | 73 ++++++++------- .../main/scala/spark/streaming/DStream.scala | 8 +- .../scala/spark/streaming/Scheduler.scala | 28 ++++-- .../spark/streaming/StreamingContext.scala | 10 +-- streaming/src/test/resources/log4j.properties | 2 +- .../spark/streaming/CheckpointSuite.scala | 25 +++--- .../spark/streaming/InputStreamsSuite.scala | 88 ++++++++++--------- 8 files changed, 129 insertions(+), 111 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 63048d5df0..6af8c377b5 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -189,11 +189,7 @@ abstract class RDD[T: ClassManifest]( def getCheckpointData(): Any = { synchronized { - if (isCheckpointed) { - checkpointFile - } else { - null - } + checkpointFile } } diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index a70fb8f73a..770f7b0cc0 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -5,7 +5,7 @@ import spark.{Logging, Utils} import org.apache.hadoop.fs.{FileUtil, Path} import org.apache.hadoop.conf.Configuration -import java.io.{InputStream, ObjectStreamClass, ObjectInputStream, ObjectOutputStream} +import java.io._ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) @@ -18,8 +18,6 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) val checkpointDir = ssc.checkpointDir val checkpointInterval = ssc.checkpointInterval - validate() - def validate() { assert(master != null, "Checkpoint.master is null") assert(framework != null, "Checkpoint.framework is null") @@ -27,35 +25,50 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) assert(checkpointTime != null, "Checkpoint.checkpointTime is null") logInfo("Checkpoint for time " + checkpointTime + " validated") } +} - def save(path: String) { - val file = new Path(path, "graph") - val conf = new Configuration() - val fs = file.getFileSystem(conf) - logDebug("Saving checkpoint for time " + checkpointTime + " to file '" + file + "'") - if (fs.exists(file)) { - val bkFile = new Path(file.getParent, file.getName + ".bk") - FileUtil.copy(fs, file, fs, bkFile, true, true, conf) - logDebug("Moved existing checkpoint file to " + bkFile) +/** + * Convenience class to speed up the writing of graph checkpoint to file + */ +class CheckpointWriter(checkpointDir: String) extends Logging { + val file = new Path(checkpointDir, "graph") + val conf = new Configuration() + var fs = file.getFileSystem(conf) + val maxAttempts = 3 + + def write(checkpoint: Checkpoint) { + // TODO: maybe do this in a different thread from the main stream execution thread + var attempts = 0 + while (attempts < maxAttempts) { + attempts += 1 + try { + logDebug("Saving checkpoint for time " + checkpoint.checkpointTime + " to file '" + file + "'") + if (fs.exists(file)) { + val bkFile = new Path(file.getParent, file.getName + ".bk") + FileUtil.copy(fs, file, fs, bkFile, true, true, conf) + logDebug("Moved existing checkpoint file to " + bkFile) + } + val fos = fs.create(file) + val oos = new ObjectOutputStream(fos) + oos.writeObject(checkpoint) + oos.close() + logInfo("Checkpoint for time " + checkpoint.checkpointTime + " saved to file '" + file + "'") + fos.close() + return + } catch { + case ioe: IOException => + logWarning("Error writing checkpoint to file in " + attempts + " attempts", ioe) + } } - val fos = fs.create(file) - val oos = new ObjectOutputStream(fos) - oos.writeObject(this) - oos.close() - fs.close() - logInfo("Checkpoint of streaming context for time " + checkpointTime + " saved successfully to file '" + file + "'") - } - - def toBytes(): Array[Byte] = { - val bytes = Utils.serialize(this) - bytes + logError("Could not write checkpoint for time " + checkpoint.checkpointTime + " to file '" + file + "'") } } -object Checkpoint extends Logging { - def load(path: String): Checkpoint = { +object CheckpointReader extends Logging { + + def read(path: String): Checkpoint = { val fs = new Path(path).getFileSystem(new Configuration()) val attempts = Seq(new Path(path, "graph"), new Path(path, "graph.bk"), new Path(path), new Path(path + ".bk")) @@ -82,17 +95,11 @@ object Checkpoint extends Logging { logError("Error loading checkpoint from file '" + file + "'", e) } } else { - logWarning("Could not load checkpoint from file '" + file + "' as it does not exist") + logWarning("Could not read checkpoint from file '" + file + "' as it does not exist") } }) - throw new Exception("Could not load checkpoint from path '" + path + "'") - } - - def fromBytes(bytes: Array[Byte]): Checkpoint = { - val cp = Utils.deserialize[Checkpoint](bytes) - cp.validate() - cp + throw new Exception("Could not read checkpoint from path '" + path + "'") } } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index abf132e45e..7e6f73dd7d 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -289,6 +289,7 @@ extends Serializable with Logging { */ protected[streaming] def updateCheckpointData(currentTime: Time) { logInfo("Updating checkpoint data for time " + currentTime) + // Get the checkpointed RDDs from the generated RDDs val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) .map(x => (x._1, x._2.getCheckpointData())) @@ -334,8 +335,11 @@ extends Serializable with Logging { logInfo("Restoring checkpoint data from " + checkpointData.size + " checkpointed RDDs") checkpointData.foreach { case(time, data) => { - logInfo("Restoring checkpointed RDD for time " + time + " from file") - generatedRDDs += ((time, ssc.sc.objectFile[T](data.toString))) + logInfo("Restoring checkpointed RDD for time " + time + " from file '" + data.toString + "'") + val rdd = ssc.sc.objectFile[T](data.toString) + // Set the checkpoint file name to identify this RDD as a checkpointed RDD by updateCheckpointData() + rdd.checkpointFile = data.toString + generatedRDDs += ((time, rdd)) } } dependencies.foreach(_.restoreCheckpointData()) diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index de0fb1f3ad..e2dca91179 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -16,8 +16,16 @@ extends Logging { initLogging() val graph = ssc.graph + val concurrentJobs = System.getProperty("spark.stream.concurrentJobs", "1").toInt val jobManager = new JobManager(ssc, concurrentJobs) + + val checkpointWriter = if (ssc.checkpointInterval != null && ssc.checkpointDir != null) { + new CheckpointWriter(ssc.checkpointDir) + } else { + null + } + val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] val timer = new RecurringTimer(clock, ssc.graph.batchDuration, generateRDDs(_)) @@ -52,19 +60,23 @@ extends Logging { logInfo("Scheduler stopped") } - def generateRDDs(time: Time) { + private def generateRDDs(time: Time) { SparkEnv.set(ssc.env) logInfo("\n-----------------------------------------------------\n") - graph.generateRDDs(time).foreach(submitJob) - logInfo("Generated RDDs for time " + time) + graph.generateRDDs(time).foreach(jobManager.runJob) graph.forgetOldRDDs(time) - if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { - ssc.doCheckpoint(time) - } + doCheckpoint(time) + logInfo("Generated RDDs for time " + time) } - def submitJob(job: Job) { - jobManager.runJob(job) + private def doCheckpoint(time: Time) { + if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { + val startTime = System.currentTimeMillis() + ssc.graph.updateCheckpointData(time) + checkpointWriter.write(new Checkpoint(ssc, time)) + val stopTime = System.currentTimeMillis() + logInfo("Checkpointing the graph took " + (stopTime - startTime) + " ms") + } } } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index ab6d6e8dea..ef6a05a392 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -28,7 +28,7 @@ final class StreamingContext ( def this(master: String, frameworkName: String, sparkHome: String = null, jars: Seq[String] = Nil) = this(new SparkContext(master, frameworkName, sparkHome, jars), null) - def this(path: String) = this(null, Checkpoint.load(path)) + def this(path: String) = this(null, CheckpointReader.read(path)) def this(cp_ : Checkpoint) = this(null, cp_) @@ -225,14 +225,6 @@ final class StreamingContext ( case e: Exception => logWarning("Error while stopping", e) } } - - def doCheckpoint(currentTime: Time) { - val startTime = System.currentTimeMillis() - graph.updateCheckpointData(currentTime) - new Checkpoint(this, currentTime).save(checkpointDir) - val stopTime = System.currentTimeMillis() - logInfo("Checkpointing the graph took " + (stopTime - startTime) + " ms") - } } diff --git a/streaming/src/test/resources/log4j.properties b/streaming/src/test/resources/log4j.properties index 02fe16866e..33774b463d 100644 --- a/streaming/src/test/resources/log4j.properties +++ b/streaming/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # Set everything to be logged to the console -log4j.rootCategory=WARN, console +log4j.rootCategory=INFO, console log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 0ad57e38b9..b3afedf39f 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -24,7 +24,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { override def framework = "CheckpointSuite" - override def batchDuration = Milliseconds(200) + override def batchDuration = Milliseconds(500) override def checkpointDir = "checkpoint" @@ -34,7 +34,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { test("basic stream+rdd recovery") { - assert(batchDuration === Milliseconds(200), "batchDuration for this test must be 1 second") + assert(batchDuration === Milliseconds(500), "batchDuration for this test must be 1 second") assert(checkpointInterval === batchDuration, "checkpointInterval for this test much be same as batchDuration") System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") @@ -134,9 +134,9 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { val operation = (st: DStream[String]) => { st.map(x => (x, 1)) .reduceByKeyAndWindow(_ + _, _ - _, batchDuration * w, batchDuration) - .checkpoint(Seconds(2)) + .checkpoint(batchDuration * 2) } - testCheckpointedOperation(input, operation, output, 3) + testCheckpointedOperation(input, operation, output, 7) } test("updateStateByKey") { @@ -148,14 +148,18 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { } st.map(x => (x, 1)) .updateStateByKey[RichInt](updateFunc) - .checkpoint(Seconds(2)) + .checkpoint(batchDuration * 2) .map(t => (t._1, t._2.self)) } - testCheckpointedOperation(input, operation, output, 3) + testCheckpointedOperation(input, operation, output, 7) } - - + /** + * Tests a streaming operation under checkpointing, by restart the operation + * from checkpoint file and verifying whether the final output is correct. + * The output is assumed to have come from a reliable queue which an replay + * data as required. + */ def testCheckpointedOperation[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], @@ -170,8 +174,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { val initialNumExpectedOutputs = initialNumBatches val nextNumExpectedOutputs = expectedOutput.size - initialNumExpectedOutputs - // Do half the computation (half the number of batches), create checkpoint file and quit - + // Do the computation for initial number of batches, create checkpoint file and quit ssc = setupStreams[U, V](input, operation) val output = runStreams[V](ssc, initialNumBatches, initialNumExpectedOutputs) verifyOutput[V](output, expectedOutput.take(initialNumBatches), true) @@ -193,8 +196,6 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { * Advances the manual clock on the streaming scheduler by given number of batches. * It also wait for the expected amount of time for each batch. */ - - def runStreamsWithRealDelay(ssc: StreamingContext, numBatches: Long) { val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] logInfo("Manual clock before advancing = " + clock.time) diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 0957748603..3e99440226 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -16,24 +16,36 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { System.setProperty("spark.streaming.clock", "spark.streaming.util.ManualClock") + val testPort = 9999 + var testServer: TestServer = null + var testDir: File = null + override def checkpointDir = "checkpoint" after { FileUtils.deleteDirectory(new File(checkpointDir)) + if (testServer != null) { + testServer.stop() + testServer = null + } + if (testDir != null && testDir.exists()) { + FileUtils.deleteDirectory(testDir) + testDir = null + } } test("network input stream") { // Start the server - val serverPort = 9999 - val server = new TestServer(9999) - server.start() + testServer = new TestServer(testPort) + testServer.start() // Set up the streaming context and input streams val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) - val networkStream = ssc.networkTextStream("localhost", serverPort, StorageLevel.MEMORY_AND_DISK) + val networkStream = ssc.networkTextStream("localhost", testPort, StorageLevel.MEMORY_AND_DISK) val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] val outputStream = new TestOutputStream(networkStream, outputBuffer) + def output = outputBuffer.flatMap(x => x) ssc.registerOutputStream(outputStream) ssc.start() @@ -41,21 +53,15 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] val input = Seq(1, 2, 3, 4, 5) val expectedOutput = input.map(_.toString) + Thread.sleep(1000) for (i <- 0 until input.size) { - server.send(input(i).toString + "\n") + testServer.send(input(i).toString + "\n") Thread.sleep(500) clock.addToTime(batchDuration.milliseconds) } - val startTime = System.currentTimeMillis() - while (outputBuffer.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { - logInfo("output.size = " + outputBuffer.size + ", expectedOutput.size = " + expectedOutput.size) - Thread.sleep(100) - } Thread.sleep(1000) - val timeTaken = System.currentTimeMillis() - startTime - assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") logInfo("Stopping server") - server.stop() + testServer.stop() logInfo("Stopping context") ssc.stop() @@ -69,24 +75,24 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) logInfo("--------------------------------") - assert(outputBuffer.size === expectedOutput.size) - for (i <- 0 until outputBuffer.size) { - assert(outputBuffer(i).size === 1) - assert(outputBuffer(i).head === expectedOutput(i)) + // Verify whether all the elements received are as expected + // (whether the elements were received one in each interval is not verified) + assert(output.size === expectedOutput.size) + for (i <- 0 until output.size) { + assert(output(i) === expectedOutput(i)) } } test("network input stream with checkpoint") { // Start the server - val serverPort = 9999 - val server = new TestServer(9999) - server.start() + testServer = new TestServer(testPort) + testServer.start() // Set up the streaming context and input streams var ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) ssc.checkpoint(checkpointDir, checkpointInterval) - val networkStream = ssc.networkTextStream("localhost", serverPort, StorageLevel.MEMORY_AND_DISK) + val networkStream = ssc.networkTextStream("localhost", testPort, StorageLevel.MEMORY_AND_DISK) var outputStream = new TestOutputStream(networkStream, new ArrayBuffer[Seq[String]]) ssc.registerOutputStream(outputStream) ssc.start() @@ -94,7 +100,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { // Feed data to the server to send to the network receiver var clock = ssc.scheduler.clock.asInstanceOf[ManualClock] for (i <- Seq(1, 2, 3)) { - server.send(i.toString + "\n") + testServer.send(i.toString + "\n") Thread.sleep(100) clock.addToTime(batchDuration.milliseconds) } @@ -109,7 +115,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { ssc.start() clock = ssc.scheduler.clock.asInstanceOf[ManualClock] for (i <- Seq(4, 5, 6)) { - server.send(i.toString + "\n") + testServer.send(i.toString + "\n") Thread.sleep(100) clock.addToTime(batchDuration.milliseconds) } @@ -120,12 +126,12 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { } test("file input stream") { + // Create a temporary directory - val dir = { + testDir = { var temp = File.createTempFile(".temp.", Random.nextInt().toString) temp.delete() temp.mkdirs() - temp.deleteOnExit() logInfo("Created temp dir " + temp) temp } @@ -133,10 +139,9 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { // Set up the streaming context and input streams val ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) - val filestream = ssc.textFileStream(dir.toString) + val filestream = ssc.textFileStream(testDir.toString) val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String]] def output = outputBuffer.flatMap(x => x) - val outputStream = new TestOutputStream(filestream, outputBuffer) ssc.registerOutputStream(outputStream) ssc.start() @@ -147,16 +152,16 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { val expectedOutput = input.map(_.toString) Thread.sleep(1000) for (i <- 0 until input.size) { - FileUtils.writeStringToFile(new File(dir, i.toString), input(i).toString + "\n") - Thread.sleep(100) + FileUtils.writeStringToFile(new File(testDir, i.toString), input(i).toString + "\n") + Thread.sleep(500) clock.addToTime(batchDuration.milliseconds) - Thread.sleep(100) + //Thread.sleep(100) } val startTime = System.currentTimeMillis() - while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { - //println("output.size = " + output.size + ", expectedOutput.size = " + expectedOutput.size) + /*while (output.size < expectedOutput.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + logInfo("output.size = " + output.size + ", expectedOutput.size = " + expectedOutput.size) Thread.sleep(100) - } + }*/ Thread.sleep(1000) val timeTaken = System.currentTimeMillis() - startTime assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") @@ -165,14 +170,16 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { // Verify whether data received by Spark Streaming was as expected logInfo("--------------------------------") - logInfo("output.size = " + output.size) + logInfo("output.size = " + outputBuffer.size) logInfo("output") - output.foreach(x => logInfo("[" + x.mkString(",") + "]")) + outputBuffer.foreach(x => logInfo("[" + x.mkString(",") + "]")) logInfo("expected output.size = " + expectedOutput.size) logInfo("expected output") expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]")) logInfo("--------------------------------") + // Verify whether all the elements received are as expected + // (whether the elements were received one in each interval is not verified) assert(output.size === expectedOutput.size) for (i <- 0 until output.size) { assert(output(i).size === 1) @@ -182,12 +189,11 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { test("file input stream with checkpoint") { // Create a temporary directory - val dir = { + testDir = { var temp = File.createTempFile(".temp.", Random.nextInt().toString) temp.delete() temp.mkdirs() - temp.deleteOnExit() - println("Created temp dir " + temp) + logInfo("Created temp dir " + temp) temp } @@ -195,7 +201,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { var ssc = new StreamingContext(master, framework) ssc.setBatchDuration(batchDuration) ssc.checkpoint(checkpointDir, checkpointInterval) - val filestream = ssc.textFileStream(dir.toString) + val filestream = ssc.textFileStream(testDir.toString) var outputStream = new TestOutputStream(filestream, new ArrayBuffer[Seq[String]]) ssc.registerOutputStream(outputStream) ssc.start() @@ -204,7 +210,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { var clock = ssc.scheduler.clock.asInstanceOf[ManualClock] Thread.sleep(1000) for (i <- Seq(1, 2, 3)) { - FileUtils.writeStringToFile(new File(dir, i.toString), i.toString + "\n") + FileUtils.writeStringToFile(new File(testDir, i.toString), i.toString + "\n") Thread.sleep(100) clock.addToTime(batchDuration.milliseconds) } @@ -221,7 +227,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { clock = ssc.scheduler.clock.asInstanceOf[ManualClock] Thread.sleep(500) for (i <- Seq(4, 5, 6)) { - FileUtils.writeStringToFile(new File(dir, i.toString), i.toString + "\n") + FileUtils.writeStringToFile(new File(testDir, i.toString), i.toString + "\n") Thread.sleep(100) clock.addToTime(batchDuration.milliseconds) } From c3ccd14cf8d7c5a867992758b74922890408541e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 13 Nov 2012 02:43:03 -0800 Subject: [PATCH 114/226] Replaced StateRDD in StateDStream with MapPartitionsRDD. --- .../main/scala/spark/streaming/StateDStream.scala | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/StateDStream.scala index cb261808f5..b7e4c1c30c 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/StateDStream.scala @@ -7,20 +7,11 @@ import spark.rdd.MapPartitionsRDD import spark.SparkContext._ import spark.storage.StorageLevel - -class StateRDD[U: ClassManifest, T: ClassManifest]( - prev: RDD[T], - f: Iterator[T] => Iterator[U], - rememberPartitioner: Boolean - ) extends MapPartitionsRDD[U, T](prev, f) { - override val partitioner = if (rememberPartitioner) prev.partitioner else None -} - class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, - rememberPartitioner: Boolean + preservePartitioning: Boolean ) extends DStream[(K, S)](parent.ssc) { super.persist(StorageLevel.MEMORY_ONLY_SER) @@ -53,7 +44,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife updateFuncLocal(i) } val cogroupedRDD = parentRDD.cogroup(prevStateRDD, partitioner) - val stateRDD = new StateRDD(cogroupedRDD, finalFunc, rememberPartitioner) + val stateRDD = cogroupedRDD.mapPartitions(finalFunc, preservePartitioning) //logDebug("Generating state RDD for time " + validTime) return Some(stateRDD) } @@ -78,7 +69,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife } val groupedRDD = parentRDD.groupByKey(partitioner) - val sessionRDD = new StateRDD(groupedRDD, finalFunc, rememberPartitioner) + val sessionRDD = groupedRDD.mapPartitions(finalFunc, preservePartitioning) //logDebug("Generating state RDD for time " + validTime + " (first)") return Some(sessionRDD) } From 26fec8f0b850e7eb0b6cfe63770f2e68cd50441b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 13 Nov 2012 11:05:57 -0800 Subject: [PATCH 115/226] Fixed bug in MappedValuesRDD, and set default graph checkpoint interval to be batch duration. --- .../main/scala/spark/streaming/DStream.scala | 2 +- .../streaming/ReducedWindowedDStream.scala | 2 +- .../spark/streaming/StreamingContext.scala | 23 ++++++++++++++++--- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 7e6f73dd7d..76cdf8c464 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -230,7 +230,7 @@ extends Serializable with Logging { } if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { newRDD.checkpoint() - logInfo("Marking RDD for time " + time + " for checkpointing at time " + time) + logInfo("Marking RDD " + newRDD + " for time " + time + " for checkpointing at time " + time) } generatedRDDs.put(time, newRDD) Some(newRDD) diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index b07d51fa6b..8b484e6acf 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -57,7 +57,7 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( override def checkpoint(interval: Time): DStream[(K, V)] = { super.checkpoint(interval) - reducedStream.checkpoint(interval) + //reducedStream.checkpoint(interval) this } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index ef6a05a392..7a9a71f303 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -85,7 +85,7 @@ final class StreamingContext ( graph.setRememberDuration(duration) } - def checkpoint(dir: String, interval: Time) { + def checkpoint(dir: String, interval: Time = null) { if (dir != null) { sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(dir)) checkpointDir = dir @@ -186,12 +186,29 @@ final class StreamingContext ( graph.addOutputStream(outputStream) } + def validate() { + assert(graph != null, "Graph is null") + graph.validate() + + assert( + checkpointDir == null || checkpointInterval != null, + "Checkpoint directory has been set, but the graph checkpointing interval has " + + "not been set. Please use StreamingContext.checkpoint() to set the interval." + ) + + + } + + /** * This function starts the execution of the streams. */ def start() { - assert(graph != null, "Graph is null") - graph.validate() + if (checkpointDir != null && checkpointInterval == null && graph != null) { + checkpointInterval = graph.batchDuration + } + + validate() val networkInputStreams = graph.getInputStreams().filter(s => s match { case n: NetworkInputDStream[_] => true From b6f7ba813e93916dad9dbb0f06819362a5fb7cf7 Mon Sep 17 00:00:00 2001 From: Denny Date: Tue, 13 Nov 2012 13:15:32 -0800 Subject: [PATCH 116/226] change import for example function --- .../main/scala/spark/streaming/examples/KafkaWordCount.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala index 1e92cbb210..c85ac8e984 100644 --- a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -3,7 +3,7 @@ package spark.streaming.examples import spark.streaming._ import spark.streaming.StreamingContext._ import spark.storage.StorageLevel -import WordCount2_ExtraFunctions._ +import spark.streaming.util.RawTextHelper._ object KafkaWordCount { def main(args: Array[String]) { From d39ac5fbc1b374b420f0a72125b35ea047b8cabb Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Tue, 13 Nov 2012 21:11:55 -0800 Subject: [PATCH 117/226] Streaming programming guide. STREAMING-2 #resolve --- docs/_layouts/global.html | 1 + docs/index.md | 1 + docs/streaming-programming-guide.md | 163 ++++++++++++++++++++++++++++ 3 files changed, 165 insertions(+) create mode 100644 docs/streaming-programming-guide.md diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html index 7244ab6fc9..d656b3e3de 100755 --- a/docs/_layouts/global.html +++ b/docs/_layouts/global.html @@ -47,6 +47,7 @@
  • Quick Start
  • Scala
  • Java
  • +
  • Spark Streaming (Alpha)
  • diff --git a/docs/index.md b/docs/index.md index ed9953a590..560811ade8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -59,6 +59,7 @@ of `project/SparkBuild.scala`, then rebuilding Spark (`sbt/sbt clean compile`). * [Quick Start](quick-start.html): a quick introduction to the Spark API; start here! * [Spark Programming Guide](scala-programming-guide.html): an overview of Spark concepts, and details on the Scala API * [Java Programming Guide](java-programming-guide.html): using Spark from Java +* [Streaming Guide](streaming-programming-guide.html): an API preview of Spark Streaming **Deployment guides:** diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md new file mode 100644 index 0000000000..90916545bc --- /dev/null +++ b/docs/streaming-programming-guide.md @@ -0,0 +1,163 @@ +--- +layout: global +title: Streaming (Alpha) Programming Guide +--- +# Initializing Spark Streaming +The first thing a Spark Streaming program must do is create a `StreamingContext` object, which tells Spark how to access a cluster. A `StreamingContext` can be created from an existing `SparkContext`, or directly: + +{% highlight scala %} +new StreamingContext(master, jobName, [sparkHome], [jars]) +new StreamingContext(sparkContext) +{% endhighlight %} + +Once a context is instantiated, the batch interval must be set: + +{% highlight scala %} +context.setBatchDuration(Milliseconds(2000)) +{% endhighlight %} + + +# DStreams - Discretized Streams +The primary abstraction in Spark Streaming is a DStream. A DStream represents distributed collection which is computed periodically according to a specified batch interval. DStream's can be chained together to create complex chains of transformation on streaming data. DStreams can be created by operating on existing DStreams or from an input source. To creating DStreams from an input source, use the StreamingContext: + +{% highlight scala %} +context.neworkStream(host, port) // A stream that reads from a socket +context.flumeStream(hosts, ports) // A stream populated by a Flume flow +{% endhighlight %} + +# DStream Operators +Once an input stream has been created, you can transform it using _stream operators_. Most of these operators return new DStreams which you can further transform. Eventually, you'll need to call an _output operator_, which forces evaluation of the stream by writing data out to an external source. + +## Transformations + +DStreams support many of the transformations available on normal Spark RDD's: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TransformationMeaning
    map(func) Return a new stream formed by passing each element of the source through a function func.
    filter(func) Return a new stream formed by selecting those elements of the source on which func returns true.
    flatMap(func) Similar to map, but each input item can be mapped to 0 or more output items (so func should return a Seq rather than a single item).
    mapPartitions(func) Similar to map, but runs separately on each partition (block) of the DStream, so func must be of type + Iterator[T] => Iterator[U] when running on an DStream of type T.
    union(otherStream) Return a new stream that contains the union of the elements in the source stream and the argument.
    groupByKey([numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, Seq[V]) pairs.
    +Note: By default, this uses only 8 parallel tasks to do the grouping. You can pass an optional numTasks argument to set a different number of tasks. +
    reduceByKey(func, [numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, V) pairs where the values for each key are aggregated using the given reduce function. Like in groupByKey, the number of reduce tasks is configurable through an optional second argument.
    join(otherStream, [numTasks]) When called on streams of type (K, V) and (K, W), returns a stream of (K, (V, W)) pairs with all pairs of elements for each key.
    cogroup(otherStream, [numTasks]) When called on streams of type (K, V) and (K, W), returns a stream of (K, Seq[V], Seq[W]) tuples. This operation is also called groupWith.
    + +DStreams also support the following additional transformations: + + + + + + +
    reduce(func) Create a new single-element stream by aggregating the elements of the stream using a function func (which takes two arguments and returns one). The function should be associative so that it can be computed correctly in parallel.
    + + +## Windowed Transformations +Spark streaming features windowed computations, which allow you to report statistics over a sliding window of data. All window functions take a windowTime, which represents the width of the window and a slideTime, which represents the frequency during which the window is calculated. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TransformationMeaning
    window(windowTime, slideTime) Return a new stream which is computed based on windowed batches of the source stream. windowTime is the width of the window and slideTime is the frequency during which the window is calculated. Both times must be multiples of the batch interval. +
    countByWindow(windowTime, slideTime) Return a sliding count of elements in the stream. windowTime and slideTime are exactly as defined in window(). +
    reduceByWindow(func, windowTime, slideTime) Return a new single-element stream, created by aggregating elements in the stream over a sliding interval using func. The function should be associative so that it can be computed correctly in parallel. windowTime and slideTime are exactly as defined in window(). +
    groupByKeyAndWindow(windowTime, slideTime, [numTasks]) + When called on a stream of (K, V) pairs, returns a stream of (K, Seq[V]) pairs over a sliding window.
    +Note: By default, this uses only 8 parallel tasks to do the grouping. You can pass an optional numTasks argument to set a different number of tasks. windowTime and slideTime are exactly as defined in window(). +
    reduceByKeyAndWindow(func, [numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, V) pairs where the values for each key are aggregated using the given reduce function over batches within a sliding window. Like in groupByKeyAndWindow, the number of reduce tasks is configurable through an optional second argument. + windowTime and slideTime are exactly as defined in window(). +
    countByKeyAndWindow([numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, Int) pairs where the values for each key are the count within a sliding window. Like in countByKeyAndWindow, the number of reduce tasks is configurable through an optional second argument. + windowTime and slideTime are exactly as defined in window(). +
    + + +## Output Operators +When an output operator is called, it triggers the computation of a stream. Currently the following output operators are defined: + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OperatorMeaning
    foreachRDD(func) The fundamental output operator. Applies a function, func, to each RDD generated from the stream. This function should have side effects, such as printing output, saving the RDD to external files, or writing it over the network to an external system.
    print() Prints the contents of this DStream on the driver. At each interval, this will take at most ten elements from the DStream's RDD and print them.
    saveAsObjectFile(prefix, [suffix]) Save this DStream's contents as a SequenceFile of serialized objects. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]". +
    saveAsTextFile(prefix, suffix) Save this DStream's contents as a text files. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
    saveAsHadoopFiles(prefix, suffix) Save this DStream's contents as a Hadoop file. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
    + From 720cb0f46736105d200128f13081489281dbe118 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 14 Nov 2012 18:08:02 -0800 Subject: [PATCH 118/226] A "streaming page view" example. --- .../clickstream/PageViewGenerator.scala | 85 +++++++++++++++++++ .../examples/clickstream/PageViewStream.scala | 85 +++++++++++++++++++ 2 files changed, 170 insertions(+) create mode 100644 streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala new file mode 100644 index 0000000000..4c6e08bc74 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala @@ -0,0 +1,85 @@ +package spark.streaming.examples.clickstream + +import java.net.{InetAddress,ServerSocket,Socket,SocketException} +import java.io.{InputStreamReader, BufferedReader, PrintWriter} +import util.Random + +/** Represents a page view on a website with associated dimension data.*/ +class PageView(val url : String, val status : Int, val zipCode : Int, val userID : Int) { + override def toString() : String = { + "%s\t%s\t%s\t%s\n".format(url, status, zipCode, userID) + } +} +object PageView { + def fromString(in : String) : PageView = { + val parts = in.split("\t") + new PageView(parts(0), parts(1).toInt, parts(2).toInt, parts(3).toInt) + } +} + +/** Generates streaming events to simulate page views on a website. + * + * This should be used in tandem with PageViewStream.scala. Example: + * $ ./run spark.streaming.examples.clickstream.PageViewGenerator 44444 10 + * $ ./run spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444 + * */ +object PageViewGenerator { + val pages = Map("http://foo.com/" -> .7, + "http://foo.com/news" -> 0.2, + "http://foo.com/contact" -> .1) + val httpStatus = Map(200 -> .95, + 404 -> .05) + val userZipCode = Map(94709 -> .5, + 94117 -> .5) + val userID = Map((1 to 100).map(_ -> .01):_*) + + + def pickFromDistribution[T](inputMap : Map[T, Double]) : T = { + val rand = new Random().nextDouble() + var total = 0.0 + for ((item, prob) <- inputMap) { + total = total + prob + if (total > rand) { + return item + } + } + return inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0 + } + + def getNextClickEvent() : String = { + val id = pickFromDistribution(userID) + val page = pickFromDistribution(pages) + val status = pickFromDistribution(httpStatus) + val zipCode = pickFromDistribution(userZipCode) + new PageView(page, status, zipCode, id).toString() + } + + def main(args : Array[String]) { + if (args.length != 2) { + System.err.println("Usage: PageViewGenerator ") + System.exit(1) + } + val port = args(0).toInt + val viewsPerSecond = args(1).toFloat + val sleepDelayMs = (1000.0 / viewsPerSecond).toInt + val listener = new ServerSocket(port) + println("Listening on port: " + port) + + while (true) { + val socket = listener.accept() + new Thread() { + override def run = { + println("Got client connected from: " + socket.getInetAddress) + val out = new PrintWriter(socket.getOutputStream(), true) + + while (true) { + Thread.sleep(sleepDelayMs) + out.write(getNextClickEvent()) + out.flush() + } + socket.close() + } + }.start() + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala new file mode 100644 index 0000000000..1a51fb66cd --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala @@ -0,0 +1,85 @@ +package spark.streaming.examples.clickstream + +import spark.streaming.{Seconds, StreamingContext} +import spark.streaming.StreamingContext._ +import spark.SparkContext._ + +/** Analyses a streaming dataset of web page views. This class demonstrates several types of + * operators available in Spark streaming. + * + * This should be used in tandem with PageViewStream.scala. Example: + * $ ./run spark.streaming.examples.clickstream.PageViewGenerator 44444 10 + * $ ./run spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444 + * */ +object PageViewStream { + def main(args: Array[String]) { + if (args.length != 3) { + System.err.println("Usage: PageViewStream ") + System.err.println(" must be one of pageCounts, slidingPageCounts," + + " errorRatePerZipCode, activeUserCount, popularUsersSeen") + System.exit(1) + } + val metric = args(0) + val host = args(1) + val port = args(2).toInt + + // Create the context and set the batch size + val ssc = new StreamingContext("local[2]", "PageViewStream") + ssc.setBatchDuration(Seconds(1)) + + // Create a NetworkInputDStream on target host:port and convert each line to a PageView + val pageViews = ssc.networkTextStream(host, port) + .flatMap(_.split("\n")) + .map(PageView.fromString(_)) + + // Return a count of views per URL seen in each batch + val pageCounts = pageViews.map(view => ((view.url, 1))).countByKey() + + // Return a sliding window of page views per URL in the last ten seconds + val slidingPageCounts = pageViews.map(view => ((view.url, 1))) + .window(Seconds(10), Seconds(2)) + .countByKey() + + + // Return the rate of error pages (a non 200 status) in each zip code over the last 30 seconds + val statusesPerZipCode = pageViews.window(Seconds(30), Seconds(2)) + .map(view => ((view.zipCode, view.status))) + .groupByKey() + val errorRatePerZipCode = statusesPerZipCode.map{ + case(zip, statuses) => + val normalCount = statuses.filter(_ == 200).size + val errorCount = statuses.size - normalCount + val errorRatio = errorCount.toFloat / statuses.size + if (errorRatio > 0.05) {"%s: **%s**".format(zip, errorRatio)} + else {"%s: %s".format(zip, errorRatio)} + } + + // Return the number unique users in last 15 seconds + val activeUserCount = pageViews.window(Seconds(15), Seconds(2)) + .map(view => (view.userID, 1)) + .groupByKey() + .count() + .map("Unique active users: " + _) + + // An external dataset we want to join to this stream + val userList = ssc.sc.parallelize( + Map(1 -> "Patrick Wendell", 2->"Reynold Xin", 3->"Matei Zaharia").toSeq) + + metric match { + case "pageCounts" => pageCounts.print() + case "slidingPageCounts" => slidingPageCounts.print() + case "errorRatePerZipCode" => errorRatePerZipCode.print() + case "activeUserCount" => activeUserCount.print() + case "popularUsersSeen" => + // Look for users in our existing dataset and print it out if we have a match + pageViews.map(view => (view.userID, 1)) + .foreachRDD((rdd, time) => rdd.join(userList) + .map(_._2._2) + .take(10) + .foreach(u => println("Saw user %s at time %s".format(u, time)))) + case _ => println("Invalid metric entered: " + metric) + } + + ssc.start() + } +} From 10c1abcb6ac42b248818fa585a9ad49c2fa4851a Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 17 Nov 2012 17:27:00 -0800 Subject: [PATCH 119/226] Fixed checkpointing bug in CoGroupedRDD. CoGroupSplits kept around the RDD splits of its parent RDDs, thus checkpointing its parents did not release the references to the parent splits. --- .../main/scala/spark/rdd/CoGroupedRDD.scala | 18 +++++++++--- .../test/scala/spark/CheckpointSuite.scala | 28 +++++++++++++++++++ .../main/scala/spark/streaming/DStream.scala | 4 +-- .../scala/spark/streaming/DStreamGraph.scala | 2 +- 4 files changed, 45 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index a313ebcbe8..94ef1b56e8 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -12,9 +12,20 @@ import spark.RDD import spark.ShuffleDependency import spark.SparkEnv import spark.Split +import java.io.{ObjectOutputStream, IOException} private[spark] sealed trait CoGroupSplitDep extends Serializable -private[spark] case class NarrowCoGroupSplitDep(rdd: RDD[_], split: Split) extends CoGroupSplitDep +private[spark] case class NarrowCoGroupSplitDep(rdd: RDD[_], splitIndex: Int, var split: Split = null) + extends CoGroupSplitDep { + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + rdd.synchronized { + // Update the reference to parent split at the time of task serialization + split = rdd.splits(splitIndex) + oos.defaultWriteObject() + } + } +} private[spark] case class ShuffleCoGroupSplitDep(shuffleId: Int) extends CoGroupSplitDep private[spark] @@ -55,7 +66,6 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) @transient var splits_ : Array[Split] = { - val firstRdd = rdds.head val array = new Array[Split](part.numPartitions) for (i <- 0 until array.size) { array(i) = new CoGroupSplit(i, rdds.zipWithIndex.map { case (r, j) => @@ -63,7 +73,7 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) case s: ShuffleDependency[_, _] => new ShuffleCoGroupSplitDep(s.shuffleId): CoGroupSplitDep case _ => - new NarrowCoGroupSplitDep(r, r.splits(i)): CoGroupSplitDep + new NarrowCoGroupSplitDep(r, i): CoGroupSplitDep } }.toList) } @@ -82,7 +92,7 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) map.getOrElseUpdate(k, Array.fill(numRdds)(new ArrayBuffer[Any])) } for ((dep, depNum) <- split.deps.zipWithIndex) dep match { - case NarrowCoGroupSplitDep(rdd, itsSplit) => { + case NarrowCoGroupSplitDep(rdd, itsSplitIndex, itsSplit) => { // Read them from the parent for ((k, v) <- rdd.iterator(itsSplit)) { getSeq(k.asInstanceOf[K])(depNum) += v diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 57dc43ddac..8622ce92aa 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -6,6 +6,7 @@ import rdd.{BlockRDD, CoalescedRDD, MapPartitionsWithSplitRDD} import spark.SparkContext._ import storage.StorageLevel import java.util.concurrent.Semaphore +import collection.mutable.ArrayBuffer class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { initLogging() @@ -92,6 +93,33 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val rdd2 = sc.makeRDD(5 to 6, 4).map(x => (x % 2, 1)) testCheckpointing(rdd1 => rdd1.map(x => (x % 2, 1)).cogroup(rdd2)) testCheckpointing(rdd1 => rdd1.map(x => (x % 2, x)).join(rdd2)) + + // Special test to make sure that the CoGroupSplit of CoGroupedRDD do not + // hold on to the splits of its parent RDDs, as the splits of parent RDDs + // may change while checkpointing. Rather the splits of parent RDDs must + // be fetched at the time of serialization to ensure the latest splits to + // be sent along with the task. + + val add = (x: (Seq[Int], Seq[Int])) => (x._1 ++ x._2).reduce(_ + _) + + val ones = sc.parallelize(1 to 100, 1).map(x => (x,1)) + val reduced = ones.reduceByKey(_ + _) + val seqOfCogrouped = new ArrayBuffer[RDD[(Int, Int)]]() + seqOfCogrouped += reduced.cogroup(ones).mapValues[Int](add) + for(i <- 1 to 10) { + seqOfCogrouped += seqOfCogrouped.last.cogroup(ones).mapValues(add) + } + val finalCogrouped = seqOfCogrouped.last + val intermediateCogrouped = seqOfCogrouped(5) + + val bytesBeforeCheckpoint = Utils.serialize(finalCogrouped.splits) + intermediateCogrouped.checkpoint() + finalCogrouped.count() + sleep(intermediateCogrouped) + val bytesAfterCheckpoint = Utils.serialize(finalCogrouped.splits) + println("Before = " + bytesBeforeCheckpoint.size + ", after = " + bytesAfterCheckpoint.size) + assert(bytesAfterCheckpoint.size < bytesBeforeCheckpoint.size, + "CoGroupedSplits still holds on to the splits of its parent RDDs") } /** diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 76cdf8c464..13770aa8fd 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -226,11 +226,11 @@ extends Serializable with Logging { case Some(newRDD) => if (storageLevel != StorageLevel.NONE) { newRDD.persist(storageLevel) - logInfo("Persisting RDD for time " + time + " to " + storageLevel + " at time " + time) + logInfo("Persisting RDD " + newRDD.id + " for time " + time + " to " + storageLevel + " at time " + time) } if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { newRDD.checkpoint() - logInfo("Marking RDD " + newRDD + " for time " + time + " for checkpointing at time " + time) + logInfo("Marking RDD " + newRDD.id + " for time " + time + " for checkpointing at time " + time) } generatedRDDs.put(time, newRDD) Some(newRDD) diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index 246522838a..bd8c033eab 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -105,7 +105,7 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { private[streaming] def validate() { this.synchronized { assert(batchDuration != null, "Batch duration has not been set") - assert(batchDuration > Milliseconds(100), "Batch duration of " + batchDuration + " is very low") + //assert(batchDuration >= Milliseconds(100), "Batch duration of " + batchDuration + " is very low") assert(getOutputStreams().size > 0, "No output streams registered, so nothing to execute") } } From 6757ed6a40121ee97a15506af8717bb8d97cf1ec Mon Sep 17 00:00:00 2001 From: Denny Date: Mon, 19 Nov 2012 09:42:35 -0800 Subject: [PATCH 120/226] Comment out code for fault-tolerance. --- .../streaming/input/KafkaInputDStream.scala | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index 318537532c..3685d6c666 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -15,8 +15,10 @@ import spark.storage.StorageLevel // Key for a specific Kafka Partition: (broker, topic, group, part) case class KafkaPartitionKey(brokerId: Int, topic: String, groupId: String, partId: Int) +// NOT USED - Originally intended for fault-tolerance // Metadata for a Kafka Stream that it sent to the Master case class KafkaInputDStreamMetadata(timestamp: Long, data: Map[KafkaPartitionKey, Long]) +// NOT USED - Originally intended for fault-tolerance // Checkpoint data specific to a KafkaInputDstream case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], savedOffsets: Map[KafkaPartitionKey, Long]) extends DStreamCheckpointData(kafkaRdds) @@ -45,9 +47,13 @@ class KafkaInputDStream[T: ClassManifest]( // Metadata that keeps track of which messages have already been consumed. var savedOffsets = HashMap[Long, Map[KafkaPartitionKey, Long]]() + + /* NOT USED - Originally intended for fault-tolerance + // In case of a failure, the offets for a particular timestamp will be restored. @transient var restoredOffsets : Map[KafkaPartitionKey, Long] = null + override protected[streaming] def addMetadata(metadata: Any) { metadata match { case x : KafkaInputDStreamMetadata => @@ -80,18 +86,11 @@ class KafkaInputDStream[T: ClassManifest]( restoredOffsets = x.savedOffsets logInfo("Restored KafkaDStream offsets: " + savedOffsets) } - } + } */ def createReceiver(): NetworkReceiver[T] = { - // We have restored from a checkpoint, use the restored offsets - if (restoredOffsets != null) { - new KafkaReceiver(id, host, port, groupId, topics, restoredOffsets, storageLevel) + new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) .asInstanceOf[NetworkReceiver[T]] - } else { - new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) - .asInstanceOf[NetworkReceiver[T]] - } - } } @@ -103,7 +102,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, val ZK_TIMEOUT = 10000 // Handles pushing data into the BlockManager - lazy protected val dataHandler = new KafkaDataHandler(this, storageLevel) + lazy protected val dataHandler = new DataHandler(this, storageLevel) // Keeps track of the current offsets. Maps from (broker, topic, group, part) -> Offset lazy val offsets = HashMap[KafkaPartitionKey, Long]() // Connection to Kafka @@ -181,13 +180,15 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, } } - class KafkaDataHandler(receiver: KafkaReceiver, storageLevel: StorageLevel) - extends DataHandler[Any](receiver, storageLevel) { + // NOT USED - Originally intended for fault-tolerance + // class KafkaDataHandler(receiver: KafkaReceiver, storageLevel: StorageLevel) + // extends DataHandler[Any](receiver, storageLevel) { - override def createBlock(blockId: String, iterator: Iterator[Any]) : Block = { - // Creates a new Block with Kafka-specific Metadata - new Block(blockId, iterator, KafkaInputDStreamMetadata(System.currentTimeMillis, offsets.toMap)) - } + // override def createBlock(blockId: String, iterator: Iterator[Any]) : Block = { + // // Creates a new Block with Kafka-specific Metadata + // new Block(blockId, iterator, KafkaInputDStreamMetadata(System.currentTimeMillis, offsets.toMap)) + // } - } + // } + } From 5e2b0a3bf60dead1ac7946c9984b067c926c2904 Mon Sep 17 00:00:00 2001 From: Denny Date: Mon, 19 Nov 2012 10:17:58 -0800 Subject: [PATCH 121/226] Added Kafka Wordcount producer --- .../streaming/examples/KafkaWordCount.scala | 72 +++++++++++++------ .../streaming/input/KafkaInputDStream.scala | 5 +- 2 files changed, 52 insertions(+), 25 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala index c85ac8e984..12e3f49fe9 100644 --- a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -1,5 +1,9 @@ package spark.streaming.examples +import java.util.Properties +import kafka.message.Message +import kafka.producer.SyncProducerConfig +import kafka.producer._ import spark.streaming._ import spark.streaming.StreamingContext._ import spark.storage.StorageLevel @@ -8,33 +12,57 @@ import spark.streaming.util.RawTextHelper._ object KafkaWordCount { def main(args: Array[String]) { - if (args.length < 4) { - System.err.println("Usage: KafkaWordCount ") + if (args.length < 6) { + System.err.println("Usage: KafkaWordCount ") System.exit(1) } - val ssc = args(3) match { - // Restore the stream from a checkpoint - case "true" => - new StreamingContext("work/checkpoint") - case _ => - val tmp = new StreamingContext(args(0), "KafkaWordCount") + val Array(master, hostname, port, group, topics, numThreads) = args - tmp.setBatchDuration(Seconds(2)) - tmp.checkpoint("work/checkpoint", Seconds(10)) - - val lines = tmp.kafkaStream[String](args(1), args(2).toInt, "test_group", Map("test" -> 1), - Map(KafkaPartitionKey(0,"test","test_group",0) -> 0l)) - val words = lines.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1l)).reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2) - - wordCounts.persist().checkpoint(Seconds(10)) - wordCounts.print() - - tmp - } + val ssc = new StreamingContext(master, "KafkaWordCount") + ssc.checkpoint("checkpoint") + ssc.setBatchDuration(Seconds(2)) + + val topicpMap = topics.split(",").map((_,numThreads.toInt)).toMap + val lines = ssc.kafkaStream[String](hostname, port.toInt, group, topicpMap) + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1l)).reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2) + wordCounts.print() + ssc.start() - } } +// Produces some random words between 1 and 100. +object KafkaWordCountProducer { + + def main(args: Array[String]) { + if (args.length < 3) { + System.err.println("Usage: KafkaWordCountProducer ") + System.exit(1) + } + + val Array(hostname, port, topic, messagesPerSec, wordsPerMessage) = args + + // Zookeper connection properties + val props = new Properties() + props.put("zk.connect", hostname + ":" + port) + props.put("serializer.class", "kafka.serializer.StringEncoder") + + val config = new ProducerConfig(props) + val producer = new Producer[String, String](config) + + // Send some messages + while(true) { + val messages = (1 to messagesPerSec.toInt).map { messageNum => + (1 to wordsPerMessage.toInt).map(x => scala.util.Random.nextInt(10).toString).mkString(" ") + }.toArray + println(messages.mkString(",")) + val data = new ProducerData[String, String](topic, messages) + producer.send(data) + Thread.sleep(100) + } + } + +} + diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index 3685d6c666..7c642d4802 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -171,8 +171,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, groupId, msgAndMetadata.topicInfo.partition.partId) val offset = msgAndMetadata.topicInfo.getConsumeOffset offsets.put(key, offset) - // TODO: Remove Logging - logInfo("Handled message: " + (key, offset).toString) + // logInfo("Handled message: " + (key, offset).toString) // Keep on handling messages true @@ -190,5 +189,5 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, // } // } - + } From c97ebf64377e853ab7c616a103869a4417f25954 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 19 Nov 2012 23:22:07 +0000 Subject: [PATCH 122/226] Fixed bug in the number of splits in RDD after checkpointing. Modified reduceByKeyAndWindow (naive) computation from window+reduceByKey to reduceByKey+window+reduceByKey. --- conf/streaming-env.sh.template | 2 +- core/src/main/scala/spark/RDD.scala | 3 ++- streaming/src/main/scala/spark/streaming/DStream.scala | 3 ++- .../main/scala/spark/streaming/PairDStreamFunctions.scala | 6 +++++- streaming/src/main/scala/spark/streaming/Scheduler.scala | 2 +- .../src/main/scala/spark/streaming/WindowedDStream.scala | 3 +++ 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/conf/streaming-env.sh.template b/conf/streaming-env.sh.template index 6b4094c515..1ea9ba5541 100755 --- a/conf/streaming-env.sh.template +++ b/conf/streaming-env.sh.template @@ -11,7 +11,7 @@ SPARK_JAVA_OPTS+=" -XX:+UseConcMarkSweepGC" -# Using of Kryo serialization can improve serialization performance +# Using Kryo serialization can improve serialization performance # and therefore the throughput of the Spark Streaming programs. However, # using Kryo serialization with custom classes may required you to # register the classes with Kryo. Refer to the Spark documentation diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 6af8c377b5..8af6c9bd6a 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -222,12 +222,13 @@ abstract class RDD[T: ClassManifest]( rdd.checkpointFile = new Path(context.checkpointDir, "rdd-" + id).toString rdd.saveAsObjectFile(checkpointFile) rdd.synchronized { - rdd.checkpointRDD = context.objectFile[T](checkpointFile) + rdd.checkpointRDD = context.objectFile[T](checkpointFile, rdd.splits.size) rdd.checkpointRDDSplits = rdd.checkpointRDD.splits rdd.changeDependencies(rdd.checkpointRDD) rdd.shouldCheckpoint = false rdd.isCheckpointInProgress = false rdd.isCheckpointed = true + println("Done checkpointing RDD " + rdd.id + ", " + rdd) } } } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 13770aa8fd..26d5ce9198 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -321,7 +321,8 @@ extends Serializable with Logging { } } } - logInfo("Updated checkpoint data for time " + currentTime) + logInfo("Updated checkpoint data for time " + currentTime + ", " + checkpointData.size + " checkpoints, " + + "[" + checkpointData.mkString(",") + "]") } /** diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index e09d27d34f..720e63bba0 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -4,6 +4,7 @@ import spark.streaming.StreamingContext._ import spark.{Manifests, RDD, Partitioner, HashPartitioner} import spark.SparkContext._ +import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer @@ -115,7 +116,10 @@ extends Serializable { slideTime: Time, partitioner: Partitioner ): DStream[(K, V)] = { - self.window(windowTime, slideTime).reduceByKey(ssc.sc.clean(reduceFunc), partitioner) + val cleanedReduceFunc = ssc.sc.clean(reduceFunc) + self.reduceByKey(cleanedReduceFunc, partitioner) + .window(windowTime, slideTime) + .reduceByKey(cleanedReduceFunc, partitioner) } // This method is the efficient sliding window reduce operation, diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index e2dca91179..014021be61 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -17,7 +17,7 @@ extends Logging { val graph = ssc.graph - val concurrentJobs = System.getProperty("spark.stream.concurrentJobs", "1").toInt + val concurrentJobs = System.getProperty("spark.streaming.concurrentJobs", "1").toInt val jobManager = new JobManager(ssc, concurrentJobs) val checkpointWriter = if (ssc.checkpointInterval != null && ssc.checkpointDir != null) { diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala index ce89a3f99b..e4d2a634f5 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/WindowedDStream.scala @@ -2,6 +2,7 @@ package spark.streaming import spark.RDD import spark.rdd.UnionRDD +import spark.storage.StorageLevel class WindowedDStream[T: ClassManifest]( @@ -18,6 +19,8 @@ class WindowedDStream[T: ClassManifest]( throw new Exception("The slide duration of WindowedDStream (" + _slideTime + ") " + "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") + parent.persist(StorageLevel.MEMORY_ONLY_SER) + def windowTime: Time = _windowTime override def dependencies = List(parent) From fd11d23bb3a817dabd414bceddebc35ad731f626 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 19 Nov 2012 19:04:39 -0800 Subject: [PATCH 123/226] Modified StreamingContext API to make constructor accept the batch size (since it is always needed, Patrick's suggestion). Added description to DStream and StreamingContext. --- .../main/scala/spark/streaming/DStream.scala | 26 ++++++++-- .../scala/spark/streaming/DStreamGraph.scala | 4 +- .../spark/streaming/StreamingContext.scala | 49 +++++++++++++------ .../spark/streaming/examples/CountRaw.scala | 32 ------------ .../spark/streaming/examples/FileStream.scala | 7 ++- .../examples/FileStreamWithCheckpoint.scala | 5 +- .../spark/streaming/examples/GrepRaw.scala | 7 +-- .../streaming/examples/QueueStream.scala | 10 ++-- .../streaming/examples/TopKWordCountRaw.scala | 7 ++- .../streaming/examples/WordCountHdfs.scala | 5 +- .../streaming/examples/WordCountNetwork.scala | 6 +-- .../streaming/examples/WordCountRaw.scala | 7 ++- .../examples/clickstream/PageViewStream.scala | 5 +- .../streaming/BasicOperationsSuite.scala | 2 +- .../spark/streaming/InputStreamsSuite.scala | 12 ++--- .../scala/spark/streaming/TestSuiteBase.scala | 6 +-- 16 files changed, 92 insertions(+), 98 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/examples/CountRaw.scala diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 26d5ce9198..8efda2074d 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -17,6 +17,26 @@ import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration +/** + * A Discretized Stream (DStream), the basic abstraction in Spark Streaming, is a continuous + * sequence of RDDs (of the same type) representing a continuous stream of data (see [[spark.RDD]] + * for more details on RDDs). DStreams can either be created from live data (such as, data from + * HDFS. Kafka or Flume) or it can be generated by transformation existing DStreams using operations + * such as `map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming program is running, each + * DStream periodically generates a RDD, either from live data or by transforming the RDD generated + * by a parent DStream. + * + * This class contains the basic operations available on all DStreams, such as `map`, `filter` and + * `window`. In addition, [[spark.streaming.PairDStreamFunctions]] contains operations available + * only on DStreams of key-value pairs, such as `groupByKeyAndWindow` and `join`. These operations + * are automatically available on any DStream of the right type (e.g., DStream[(Int, Int)] through + * implicit conversions when `spark.streaming.StreamingContext._` is imported. + * + * DStreams internally is characterized by a few basic properties: + * - A list of other DStreams that the DStream depends on + * - A time interval at which the DStream generates an RDD + * - A function that is used to generate an RDD after each time interval + */ abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) extends Serializable with Logging { @@ -28,7 +48,7 @@ extends Serializable with Logging { * ---------------------------------------------- */ - // Time by which the window slides in this DStream + // Time interval at which the DStream generates an RDD def slideTime: Time // List of parent DStreams on which this DStream depends on @@ -186,12 +206,12 @@ extends Serializable with Logging { dependencies.foreach(_.setGraph(graph)) } - protected[streaming] def setRememberDuration(duration: Time) { + protected[streaming] def remember(duration: Time) { if (duration != null && duration > rememberDuration) { rememberDuration = duration logInfo("Duration for remembering RDDs set to " + rememberDuration + " for " + this) } - dependencies.foreach(_.setRememberDuration(parentRememberDuration)) + dependencies.foreach(_.remember(parentRememberDuration)) } /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index bd8c033eab..d0a9ade61d 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -22,7 +22,7 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { } zeroTime = time outputStreams.foreach(_.initialize(zeroTime)) - outputStreams.foreach(_.setRememberDuration(rememberDuration)) + outputStreams.foreach(_.remember(rememberDuration)) outputStreams.foreach(_.validate) inputStreams.par.foreach(_.start()) } @@ -50,7 +50,7 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { batchDuration = duration } - private[streaming] def setRememberDuration(duration: Time) { + private[streaming] def remember(duration: Time) { this.synchronized { if (rememberDuration != null) { throw new Exception("Batch duration already set as " + batchDuration + diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 7a9a71f303..4a41f2f516 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -18,19 +18,39 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat import org.apache.hadoop.fs.Path import java.util.UUID -final class StreamingContext ( +/** + * A StreamingContext is the main entry point for Spark Streaming functionality. Besides the basic + * information (such as, cluster URL and job name) to internally create a SparkContext, it provides + * methods used to create DStream from various input sources. + */ +class StreamingContext private ( sc_ : SparkContext, - cp_ : Checkpoint + cp_ : Checkpoint, + batchDur_ : Time ) extends Logging { - def this(sparkContext: SparkContext) = this(sparkContext, null) + /** + * Creates a StreamingContext using an existing SparkContext. + * @param sparkContext Existing SparkContext + * @param batchDuration The time interval at which streaming data will be divided into batches + */ + def this(sparkContext: SparkContext, batchDuration: Time) = this(sparkContext, null, batchDuration) - def this(master: String, frameworkName: String, sparkHome: String = null, jars: Seq[String] = Nil) = - this(new SparkContext(master, frameworkName, sparkHome, jars), null) + /** + * Creates a StreamingContext by providing the details necessary for creating a new SparkContext. + * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). + * @param frameworkName A name for your job, to display on the cluster web UI + * @param batchDuration The time interval at which streaming data will be divided into batches + */ + def this(master: String, frameworkName: String, batchDuration: Time) = + this(new SparkContext(master, frameworkName), null, batchDuration) - def this(path: String) = this(null, CheckpointReader.read(path)) - - def this(cp_ : Checkpoint) = this(null, cp_) + /** + * Recreates the StreamingContext from a checkpoint file. + * @param path Path either to the directory that was specified as the checkpoint directory, or + * to the checkpoint file 'graph' or 'graph.bk'. + */ + def this(path: String) = this(null, CheckpointReader.read(path), null) initLogging() @@ -57,7 +77,10 @@ final class StreamingContext ( cp_.graph.restoreCheckpointData() cp_.graph } else { - new DStreamGraph() + assert(batchDur_ != null, "Batch duration for streaming context cannot be null") + val newGraph = new DStreamGraph() + newGraph.setBatchDuration(batchDur_) + newGraph } } @@ -77,12 +100,8 @@ final class StreamingContext ( private[streaming] var receiverJobThread: Thread = null private[streaming] var scheduler: Scheduler = null - def setBatchDuration(duration: Time) { - graph.setBatchDuration(duration) - } - - def setRememberDuration(duration: Time) { - graph.setRememberDuration(duration) + def remember(duration: Time) { + graph.remember(duration) } def checkpoint(dir: String, interval: Time = null) { diff --git a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala deleted file mode 100644 index d2fdabd659..0000000000 --- a/streaming/src/main/scala/spark/streaming/examples/CountRaw.scala +++ /dev/null @@ -1,32 +0,0 @@ -package spark.streaming.examples - -import spark.util.IntParam -import spark.storage.StorageLevel -import spark.streaming._ -import spark.streaming.StreamingContext._ - -object CountRaw { - def main(args: Array[String]) { - if (args.length != 5) { - System.err.println("Usage: CountRaw ") - System.exit(1) - } - - val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args - - // Create the context and set the batch size - val ssc = new StreamingContext(master, "CountRaw") - ssc.setBatchDuration(Milliseconds(batchMillis)) - - // Make sure some tasks have started on each node - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() - ssc.sc.parallelize(1 to 1000, 1000).count() - - val rawStreams = (1 to numStreams).map(_ => - ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_2)).toArray - val union = new UnionDStream(rawStreams) - union.map(_.length + 2).reduce(_ + _).foreachRDD(r => println("Byte count: " + r.collect().mkString)) - ssc.start() - } -} diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStream.scala b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala index d68611abd6..81938d30d4 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FileStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStream.scala @@ -14,10 +14,9 @@ object FileStream { System.exit(1) } - // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "FileStream") - ssc.setBatchDuration(Seconds(2)) - + // Create the context + val ssc = new StreamingContext(args(0), "FileStream", Seconds(1)) + // Create the new directory val directory = new Path(args(1)) val fs = directory.getFileSystem(new Configuration()) diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala index 21a83c0fde..b7bc15a1d5 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala @@ -32,9 +32,8 @@ object FileStreamWithCheckpoint { if (!fs.exists(directory)) fs.mkdirs(directory) // Create new streaming context - val ssc_ = new StreamingContext(args(0), "FileStreamWithCheckpoint") - ssc_.setBatchDuration(Seconds(1)) - ssc_.checkpoint(checkpointDir, Seconds(1)) + val ssc_ = new StreamingContext(args(0), "FileStreamWithCheckpoint", Seconds(1)) + ssc_.checkpoint(checkpointDir) // Setup the streaming computation val inputStream = ssc_.textFileStream(directory.toString) diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index ffbea6e55d..6cb2b4c042 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -16,9 +16,10 @@ object GrepRaw { val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args - // Create the context and set the batch size - val ssc = new StreamingContext(master, "GrepRaw") - ssc.setBatchDuration(Milliseconds(batchMillis)) + // Create the context + val ssc = new StreamingContext(master, "GrepRaw", Milliseconds(batchMillis)) + + // Warm up the JVMs on master and slave for JIT compilation to kick in warmUp(ssc.sc) diff --git a/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala index 2af51bad28..2a265d021d 100644 --- a/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala @@ -1,9 +1,8 @@ package spark.streaming.examples import spark.RDD -import spark.streaming.StreamingContext +import spark.streaming.{Seconds, StreamingContext} import spark.streaming.StreamingContext._ -import spark.streaming.Seconds import scala.collection.mutable.SynchronizedQueue @@ -15,10 +14,9 @@ object QueueStream { System.exit(1) } - // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "QueueStream") - ssc.setBatchDuration(Seconds(1)) - + // Create the context + val ssc = new StreamingContext(args(0), "QueueStream", Seconds(1)) + // Create the queue through which RDDs can be pushed to // a QueueInputDStream val rddQueue = new SynchronizedQueue[RDD[Int]]() diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 0411bde1a7..fe4c2bf155 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -20,12 +20,11 @@ object TopKWordCountRaw { val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args val k = 10 - // Create the context, set the batch size and checkpoint directory. + // Create the context, and set the checkpoint directory. // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts // periodically to HDFS - val ssc = new StreamingContext(master, "TopKWordCountRaw") - ssc.setBatchDuration(Seconds(1)) - ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) + val ssc = new StreamingContext(master, "TopKWordCountRaw", Seconds(1)) + ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) // Warm up the JVMs on master and slave for JIT compilation to kick in /*warmUp(ssc.sc)*/ diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala index 591cb141c3..867a8f42c4 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala @@ -10,9 +10,8 @@ object WordCountHdfs { System.exit(1) } - // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "WordCountHdfs") - ssc.setBatchDuration(Seconds(2)) + // Create the context + val ssc = new StreamingContext(args(0), "WordCountHdfs", Seconds(2)) // Create the FileInputDStream on the directory and use the // stream to count words in new files created diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala index ba1bd1de7c..eadda60563 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala @@ -6,13 +6,13 @@ import spark.streaming.StreamingContext._ object WordCountNetwork { def main(args: Array[String]) { if (args.length < 2) { - System.err.println("Usage: WordCountNetwork ") + System.err.println("Usage: WordCountNetwork \n" + + "In local mode, should be 'local[n]' with n > 1") System.exit(1) } // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "WordCountNetwork") - ssc.setBatchDuration(Seconds(2)) + val ssc = new StreamingContext(args(0), "WordCountNetwork", Seconds(1)) // Create a NetworkInputDStream on target ip:port and count the // words in input stream of \n delimited test (eg. generated by 'nc') diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index 571428c0fe..a29c81d437 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -19,12 +19,11 @@ object WordCountRaw { val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args - // Create the context, set the batch size and checkpoint directory. + // Create the context, and set the checkpoint directory. // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts // periodically to HDFS - val ssc = new StreamingContext(master, "WordCountRaw") - ssc.setBatchDuration(Seconds(1)) - ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) + val ssc = new StreamingContext(master, "WordCountRaw", Seconds(1)) + ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) // Warm up the JVMs on master and slave for JIT compilation to kick in warmUp(ssc.sc) diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala index 1a51fb66cd..68be6b7893 100644 --- a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala @@ -23,9 +23,8 @@ object PageViewStream { val host = args(1) val port = args(2).toInt - // Create the context and set the batch size - val ssc = new StreamingContext("local[2]", "PageViewStream") - ssc.setBatchDuration(Seconds(1)) + // Create the context + val ssc = new StreamingContext("local[2]", "PageViewStream", Seconds(1)) // Create a NetworkInputDStream on target host:port and convert each line to a PageView val pageViews = ssc.networkTextStream(host, port) diff --git a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala index d0aaac0f2e..dc38ef4912 100644 --- a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala @@ -175,7 +175,7 @@ class BasicOperationsSuite extends TestSuiteBase { } val ssc = setupStreams(input, operation _) - ssc.setRememberDuration(rememberDuration) + ssc.remember(rememberDuration) runStreams[(Int, Int)](ssc, input.size, input.size / 2) val windowedStream2 = ssc.graph.getOutputStreams().head.dependencies.head diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 3e99440226..e98c096725 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -40,8 +40,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { testServer.start() // Set up the streaming context and input streams - val ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + val ssc = new StreamingContext(master, framework, batchDuration) val networkStream = ssc.networkTextStream("localhost", testPort, StorageLevel.MEMORY_AND_DISK) val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String ]] val outputStream = new TestOutputStream(networkStream, outputBuffer) @@ -89,8 +88,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { testServer.start() // Set up the streaming context and input streams - var ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + var ssc = new StreamingContext(master, framework, batchDuration) ssc.checkpoint(checkpointDir, checkpointInterval) val networkStream = ssc.networkTextStream("localhost", testPort, StorageLevel.MEMORY_AND_DISK) var outputStream = new TestOutputStream(networkStream, new ArrayBuffer[Seq[String]]) @@ -137,8 +135,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { } // Set up the streaming context and input streams - val ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + val ssc = new StreamingContext(master, framework, batchDuration) val filestream = ssc.textFileStream(testDir.toString) val outputBuffer = new ArrayBuffer[Seq[String]] with SynchronizedBuffer[Seq[String]] def output = outputBuffer.flatMap(x => x) @@ -198,8 +195,7 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { } // Set up the streaming context and input streams - var ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + var ssc = new StreamingContext(master, framework, batchDuration) ssc.checkpoint(checkpointDir, checkpointInterval) val filestream = ssc.textFileStream(testDir.toString) var outputStream = new TestOutputStream(filestream, new ArrayBuffer[Seq[String]]) diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index 5fb5cc504c..8cc2f8ccfc 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -76,8 +76,7 @@ trait TestSuiteBase extends FunSuite with Logging { ): StreamingContext = { // Create StreamingContext - val ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + val ssc = new StreamingContext(master, framework, batchDuration) if (checkpointDir != null) { ssc.checkpoint(checkpointDir, checkpointInterval) } @@ -98,8 +97,7 @@ trait TestSuiteBase extends FunSuite with Logging { ): StreamingContext = { // Create StreamingContext - val ssc = new StreamingContext(master, framework) - ssc.setBatchDuration(batchDuration) + val ssc = new StreamingContext(master, framework, batchDuration) if (checkpointDir != null) { ssc.checkpoint(checkpointDir, checkpointInterval) } From b18d70870a33a4783c6b3b787bef9b0eec30bce0 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 27 Nov 2012 15:08:49 -0800 Subject: [PATCH 124/226] Modified bunch HashMaps in Spark to use TimeStampedHashMap and made various modules use CleanupTask to periodically clean up metadata. --- core/src/main/scala/spark/CacheTracker.scala | 6 +- .../main/scala/spark/MapOutputTracker.scala | 27 ++++-- .../scala/spark/scheduler/DAGScheduler.scala | 13 ++- .../spark/scheduler/ShuffleMapTask.scala | 6 +- .../main/scala/spark/util/CleanupTask.scala | 31 +++++++ .../scala/spark/util/TimeStampedHashMap.scala | 87 +++++++++++++++++++ .../spark/streaming/StreamingContext.scala | 13 ++- 7 files changed, 165 insertions(+), 18 deletions(-) create mode 100644 core/src/main/scala/spark/util/CleanupTask.scala create mode 100644 core/src/main/scala/spark/util/TimeStampedHashMap.scala diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala index c5db6ce63a..0ee59bee0f 100644 --- a/core/src/main/scala/spark/CacheTracker.scala +++ b/core/src/main/scala/spark/CacheTracker.scala @@ -14,6 +14,7 @@ import scala.collection.mutable.HashSet import spark.storage.BlockManager import spark.storage.StorageLevel +import util.{CleanupTask, TimeStampedHashMap} private[spark] sealed trait CacheTrackerMessage @@ -30,7 +31,7 @@ private[spark] case object StopCacheTracker extends CacheTrackerMessage private[spark] class CacheTrackerActor extends Actor with Logging { // TODO: Should probably store (String, CacheType) tuples - private val locs = new HashMap[Int, Array[List[String]]] + private val locs = new TimeStampedHashMap[Int, Array[List[String]]] /** * A map from the slave's host name to its cache size. @@ -38,6 +39,8 @@ private[spark] class CacheTrackerActor extends Actor with Logging { private val slaveCapacity = new HashMap[String, Long] private val slaveUsage = new HashMap[String, Long] + private val cleanupTask = new CleanupTask("CacheTracker", locs.cleanup) + private def getCacheUsage(host: String): Long = slaveUsage.getOrElse(host, 0L) private def getCacheCapacity(host: String): Long = slaveCapacity.getOrElse(host, 0L) private def getCacheAvailable(host: String): Long = getCacheCapacity(host) - getCacheUsage(host) @@ -86,6 +89,7 @@ private[spark] class CacheTrackerActor extends Actor with Logging { case StopCacheTracker => logInfo("Stopping CacheTrackerActor") sender ! true + cleanupTask.cancel() context.stop(self) } } diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index 45441aa5e5..d0be1bb913 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -17,6 +17,7 @@ import scala.collection.mutable.HashSet import scheduler.MapStatus import spark.storage.BlockManagerId import java.util.zip.{GZIPInputStream, GZIPOutputStream} +import util.{CleanupTask, TimeStampedHashMap} private[spark] sealed trait MapOutputTrackerMessage private[spark] case class GetMapOutputStatuses(shuffleId: Int, requester: String) @@ -43,7 +44,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea val timeout = 10.seconds - var mapStatuses = new ConcurrentHashMap[Int, Array[MapStatus]] + var mapStatuses = new TimeStampedHashMap[Int, Array[MapStatus]] // Incremented every time a fetch fails so that client nodes know to clear // their cache of map output locations if this happens. @@ -52,7 +53,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea // Cache a serialized version of the output statuses for each shuffle to send them out faster var cacheGeneration = generation - val cachedSerializedStatuses = new HashMap[Int, Array[Byte]] + val cachedSerializedStatuses = new TimeStampedHashMap[Int, Array[Byte]] var trackerActor: ActorRef = if (isMaster) { val actor = actorSystem.actorOf(Props(new MapOutputTrackerActor(this)), name = actorName) @@ -63,6 +64,8 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea actorSystem.actorFor(url) } + val cleanupTask = new CleanupTask("MapOutputTracker", this.cleanup) + // Send a message to the trackerActor and get its result within a default timeout, or // throw a SparkException if this fails. def askTracker(message: Any): Any = { @@ -83,14 +86,14 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea } def registerShuffle(shuffleId: Int, numMaps: Int) { - if (mapStatuses.get(shuffleId) != null) { + if (mapStatuses.get(shuffleId) != None) { throw new IllegalArgumentException("Shuffle ID " + shuffleId + " registered twice") } mapStatuses.put(shuffleId, new Array[MapStatus](numMaps)) } def registerMapOutput(shuffleId: Int, mapId: Int, status: MapStatus) { - var array = mapStatuses.get(shuffleId) + var array = mapStatuses(shuffleId) array.synchronized { array(mapId) = status } @@ -107,7 +110,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea } def unregisterMapOutput(shuffleId: Int, mapId: Int, bmAddress: BlockManagerId) { - var array = mapStatuses.get(shuffleId) + var array = mapStatuses(shuffleId) if (array != null) { array.synchronized { if (array(mapId).address == bmAddress) { @@ -125,7 +128,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea // Called on possibly remote nodes to get the server URIs and output sizes for a given shuffle def getServerStatuses(shuffleId: Int, reduceId: Int): Array[(BlockManagerId, Long)] = { - val statuses = mapStatuses.get(shuffleId) + val statuses = mapStatuses.get(shuffleId).orNull if (statuses == null) { logInfo("Don't have map outputs for shuffle " + shuffleId + ", fetching them") fetching.synchronized { @@ -138,7 +141,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea case e: InterruptedException => } } - return mapStatuses.get(shuffleId).map(status => + return mapStatuses(shuffleId).map(status => (status.address, MapOutputTracker.decompressSize(status.compressedSizes(reduceId)))) } else { fetching += shuffleId @@ -164,9 +167,15 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea } } + def cleanup(cleanupTime: Long) { + mapStatuses.cleanup(cleanupTime) + cachedSerializedStatuses.cleanup(cleanupTime) + } + def stop() { communicate(StopMapOutputTracker) mapStatuses.clear() + cleanupTask.cancel() trackerActor = null } @@ -192,7 +201,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea generationLock.synchronized { if (newGen > generation) { logInfo("Updating generation to " + newGen + " and clearing cache") - mapStatuses = new ConcurrentHashMap[Int, Array[MapStatus]] + mapStatuses = new TimeStampedHashMap[Int, Array[MapStatus]] generation = newGen } } @@ -210,7 +219,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea case Some(bytes) => return bytes case None => - statuses = mapStatuses.get(shuffleId) + statuses = mapStatuses(shuffleId) generationGotten = generation } } diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index aaaed59c4a..3af877b817 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -14,6 +14,7 @@ import spark.partial.ApproximateEvaluator import spark.partial.PartialResult import spark.storage.BlockManagerMaster import spark.storage.BlockManagerId +import util.{CleanupTask, TimeStampedHashMap} /** * A Scheduler subclass that implements stage-oriented scheduling. It computes a DAG of stages for @@ -61,9 +62,9 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with val nextStageId = new AtomicInteger(0) - val idToStage = new HashMap[Int, Stage] + val idToStage = new TimeStampedHashMap[Int, Stage] - val shuffleToMapStage = new HashMap[Int, Stage] + val shuffleToMapStage = new TimeStampedHashMap[Int, Stage] var cacheLocs = new HashMap[Int, Array[List[String]]] @@ -83,6 +84,8 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with val activeJobs = new HashSet[ActiveJob] val resultStageToJob = new HashMap[Stage, ActiveJob] + val cleanupTask = new CleanupTask("DAGScheduler", this.cleanup) + // Start a thread to run the DAGScheduler event loop new Thread("DAGScheduler") { setDaemon(true) @@ -591,8 +594,14 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with return Nil } + def cleanup(cleanupTime: Long) { + idToStage.cleanup(cleanupTime) + shuffleToMapStage.cleanup(cleanupTime) + } + def stop() { eventQueue.put(StopDAGScheduler) + cleanupTask.cancel() taskSched.stop() } } diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index 60105c42b6..fbf618c906 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -14,17 +14,19 @@ import com.ning.compress.lzf.LZFOutputStream import spark._ import spark.storage._ +import util.{TimeStampedHashMap, CleanupTask} private[spark] object ShuffleMapTask { // A simple map between the stage id to the serialized byte array of a task. // Served as a cache for task serialization because serialization can be // expensive on the master node if it needs to launch thousands of tasks. - val serializedInfoCache = new JHashMap[Int, Array[Byte]] + val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]] + val cleanupTask = new CleanupTask("ShuffleMapTask", serializedInfoCache.cleanup) def serializeInfo(stageId: Int, rdd: RDD[_], dep: ShuffleDependency[_,_]): Array[Byte] = { synchronized { - val old = serializedInfoCache.get(stageId) + val old = serializedInfoCache.get(stageId).orNull if (old != null) { return old } else { diff --git a/core/src/main/scala/spark/util/CleanupTask.scala b/core/src/main/scala/spark/util/CleanupTask.scala new file mode 100644 index 0000000000..ccc28803e0 --- /dev/null +++ b/core/src/main/scala/spark/util/CleanupTask.scala @@ -0,0 +1,31 @@ +package spark.util + +import java.util.concurrent.{TimeUnit, ScheduledFuture, Executors} +import java.util.{TimerTask, Timer} +import spark.Logging + +class CleanupTask(name: String, cleanupFunc: (Long) => Unit) extends Logging { + val delayMins = System.getProperty("spark.cleanup.delay", "-100").toInt + val periodMins = System.getProperty("spark.cleanup.period", (delayMins / 10).toString).toInt + val timer = new Timer(name + " cleanup timer", true) + val task = new TimerTask { + def run() { + try { + if (delayMins > 0) { + + cleanupFunc(System.currentTimeMillis() - (delayMins * 60 * 1000)) + logInfo("Ran cleanup task for " + name) + } + } catch { + case e: Exception => logError("Error running cleanup task for " + name, e) + } + } + } + if (periodMins > 0) { + timer.schedule(task, periodMins * 60 * 1000, periodMins * 60 * 1000) + } + + def cancel() { + timer.cancel() + } +} diff --git a/core/src/main/scala/spark/util/TimeStampedHashMap.scala b/core/src/main/scala/spark/util/TimeStampedHashMap.scala new file mode 100644 index 0000000000..7a22b80a20 --- /dev/null +++ b/core/src/main/scala/spark/util/TimeStampedHashMap.scala @@ -0,0 +1,87 @@ +package spark.util + +import scala.collection.JavaConversions._ +import scala.collection.mutable.{HashMap, Map} +import java.util.concurrent.ConcurrentHashMap + +/** + * This is a custom implementation of scala.collection.mutable.Map which stores the insertion + * time stamp along with each key-value pair. Key-value pairs that are older than a particular + * threshold time can them be removed using the cleanup method. This is intended to be a drop-in + * replacement of scala.collection.mutable.HashMap. + */ +class TimeStampedHashMap[A, B] extends Map[A, B]() { + val internalMap = new ConcurrentHashMap[A, (B, Long)]() + + def get(key: A): Option[B] = { + val value = internalMap.get(key) + if (value != null) Some(value._1) else None + } + + def iterator: Iterator[(A, B)] = { + val jIterator = internalMap.entrySet().iterator() + jIterator.map(kv => (kv.getKey, kv.getValue._1)) + } + + override def + [B1 >: B](kv: (A, B1)): Map[A, B1] = { + val newMap = new TimeStampedHashMap[A, B1] + newMap.internalMap.putAll(this.internalMap) + newMap.internalMap.put(kv._1, (kv._2, currentTime)) + newMap + } + + override def - (key: A): Map[A, B] = { + internalMap.remove(key) + this + } + + override def += (kv: (A, B)): this.type = { + internalMap.put(kv._1, (kv._2, currentTime)) + this + } + + override def -= (key: A): this.type = { + internalMap.remove(key) + this + } + + override def update(key: A, value: B) { + this += ((key, value)) + } + + override def apply(key: A): B = { + val value = internalMap.get(key) + if (value == null) throw new NoSuchElementException() + value._1 + } + + override def filter(p: ((A, B)) => Boolean): Map[A, B] = { + internalMap.map(kv => (kv._1, kv._2._1)).filter(p) + } + + override def empty: Map[A, B] = new TimeStampedHashMap[A, B]() + + override def size(): Int = internalMap.size() + + override def foreach[U](f: ((A, B)) => U): Unit = { + val iterator = internalMap.entrySet().iterator() + while(iterator.hasNext) { + val entry = iterator.next() + val kv = (entry.getKey, entry.getValue._1) + f(kv) + } + } + + def cleanup(threshTime: Long) { + val iterator = internalMap.entrySet().iterator() + while(iterator.hasNext) { + val entry = iterator.next() + if (entry.getValue._2 < threshTime) { + iterator.remove() + } + } + } + + private def currentTime: Long = System.currentTimeMillis() + +} diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 4a41f2f516..58123dc82c 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -43,7 +43,7 @@ class StreamingContext private ( * @param batchDuration The time interval at which streaming data will be divided into batches */ def this(master: String, frameworkName: String, batchDuration: Time) = - this(new SparkContext(master, frameworkName), null, batchDuration) + this(StreamingContext.createNewSparkContext(master, frameworkName), null, batchDuration) /** * Recreates the StreamingContext from a checkpoint file. @@ -214,11 +214,8 @@ class StreamingContext private ( "Checkpoint directory has been set, but the graph checkpointing interval has " + "not been set. Please use StreamingContext.checkpoint() to set the interval." ) - - } - /** * This function starts the execution of the streams. */ @@ -265,6 +262,14 @@ class StreamingContext private ( object StreamingContext { + + def createNewSparkContext(master: String, frameworkName: String): SparkContext = { + if (System.getProperty("spark.cleanup.delay", "-1").toInt < 0) { + System.setProperty("spark.cleanup.delay", "60") + } + new SparkContext(master, frameworkName) + } + implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = { new PairDStreamFunctions[K, V](stream) } From d5e7aad039603a8a02d11f9ebda001422ca4c341 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 28 Nov 2012 08:36:55 +0000 Subject: [PATCH 125/226] Bug fixes --- .../scala/spark/scheduler/DAGScheduler.scala | 11 ++++++++++- .../src/main/scala/spark/util/CleanupTask.scala | 17 +++++++++-------- .../spark/streaming/StreamingContext.scala | 2 +- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index 3af877b817..affacb43ca 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -78,7 +78,7 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with val waiting = new HashSet[Stage] // Stages we need to run whose parents aren't done val running = new HashSet[Stage] // Stages we are running right now val failed = new HashSet[Stage] // Stages that must be resubmitted due to fetch failures - val pendingTasks = new HashMap[Stage, HashSet[Task[_]]] // Missing tasks from each stage + val pendingTasks = new TimeStampedHashMap[Stage, HashSet[Task[_]]] // Missing tasks from each stage var lastFetchFailureTime: Long = 0 // Used to wait a bit to avoid repeated resubmits val activeJobs = new HashSet[ActiveJob] @@ -595,8 +595,17 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with } def cleanup(cleanupTime: Long) { + var sizeBefore = idToStage.size idToStage.cleanup(cleanupTime) + logInfo("idToStage " + sizeBefore + " --> " + idToStage.size) + + sizeBefore = shuffleToMapStage.size shuffleToMapStage.cleanup(cleanupTime) + logInfo("shuffleToMapStage " + sizeBefore + " --> " + shuffleToMapStage.size) + + sizeBefore = pendingTasks.size + pendingTasks.cleanup(cleanupTime) + logInfo("pendingTasks " + sizeBefore + " --> " + pendingTasks.size) } def stop() { diff --git a/core/src/main/scala/spark/util/CleanupTask.scala b/core/src/main/scala/spark/util/CleanupTask.scala index ccc28803e0..a4357c62c6 100644 --- a/core/src/main/scala/spark/util/CleanupTask.scala +++ b/core/src/main/scala/spark/util/CleanupTask.scala @@ -5,24 +5,25 @@ import java.util.{TimerTask, Timer} import spark.Logging class CleanupTask(name: String, cleanupFunc: (Long) => Unit) extends Logging { - val delayMins = System.getProperty("spark.cleanup.delay", "-100").toInt - val periodMins = System.getProperty("spark.cleanup.period", (delayMins / 10).toString).toInt + val delaySeconds = (System.getProperty("spark.cleanup.delay", "-100").toDouble * 60).toInt + val periodSeconds = math.max(10, delaySeconds / 10) val timer = new Timer(name + " cleanup timer", true) val task = new TimerTask { def run() { try { - if (delayMins > 0) { - - cleanupFunc(System.currentTimeMillis() - (delayMins * 60 * 1000)) + if (delaySeconds > 0) { + cleanupFunc(System.currentTimeMillis() - (delaySeconds * 1000)) logInfo("Ran cleanup task for " + name) - } + } } catch { case e: Exception => logError("Error running cleanup task for " + name, e) } } } - if (periodMins > 0) { - timer.schedule(task, periodMins * 60 * 1000, periodMins * 60 * 1000) + if (periodSeconds > 0) { + logInfo("Starting cleanup task for " + name + " with delay of " + delaySeconds + " seconds and " + + "period of " + periodSeconds + " secs") + timer.schedule(task, periodSeconds * 1000, periodSeconds * 1000) } def cancel() { diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 58123dc82c..90dd560752 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -264,7 +264,7 @@ class StreamingContext private ( object StreamingContext { def createNewSparkContext(master: String, frameworkName: String): SparkContext = { - if (System.getProperty("spark.cleanup.delay", "-1").toInt < 0) { + if (System.getProperty("spark.cleanup.delay", "-1").toDouble < 0) { System.setProperty("spark.cleanup.delay", "60") } new SparkContext(master, frameworkName) From e463ae492068d2922e1d50c051a87f8010953dff Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 28 Nov 2012 14:05:01 -0800 Subject: [PATCH 126/226] Modified StorageLevel and BlockManagerId to cache common objects and use cached object while deserializing. --- .../scala/spark/storage/BlockManager.scala | 28 +---------- .../scala/spark/storage/BlockManagerId.scala | 48 +++++++++++++++++++ .../scala/spark/storage/StorageLevel.scala | 28 ++++++++++- .../spark/storage/BlockManagerSuite.scala | 26 ++++++++++ 4 files changed, 101 insertions(+), 29 deletions(-) create mode 100644 core/src/main/scala/spark/storage/BlockManagerId.scala diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index 70d6d8369d..e4aa9247a3 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -20,33 +20,7 @@ import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream} import sun.nio.ch.DirectBuffer -private[spark] class BlockManagerId(var ip: String, var port: Int) extends Externalizable { - def this() = this(null, 0) // For deserialization only - - def this(in: ObjectInput) = this(in.readUTF(), in.readInt()) - - override def writeExternal(out: ObjectOutput) { - out.writeUTF(ip) - out.writeInt(port) - } - - override def readExternal(in: ObjectInput) { - ip = in.readUTF() - port = in.readInt() - } - - override def toString = "BlockManagerId(" + ip + ", " + port + ")" - - override def hashCode = ip.hashCode * 41 + port - - override def equals(that: Any) = that match { - case id: BlockManagerId => port == id.port && ip == id.ip - case _ => false - } -} - - -private[spark] +private[spark] case class BlockException(blockId: String, message: String, ex: Exception = null) extends Exception(message) diff --git a/core/src/main/scala/spark/storage/BlockManagerId.scala b/core/src/main/scala/spark/storage/BlockManagerId.scala new file mode 100644 index 0000000000..4933cc6606 --- /dev/null +++ b/core/src/main/scala/spark/storage/BlockManagerId.scala @@ -0,0 +1,48 @@ +package spark.storage + +import java.io.{IOException, ObjectOutput, ObjectInput, Externalizable} +import java.util.concurrent.ConcurrentHashMap + +private[spark] class BlockManagerId(var ip: String, var port: Int) extends Externalizable { + def this() = this(null, 0) // For deserialization only + + def this(in: ObjectInput) = this(in.readUTF(), in.readInt()) + + override def writeExternal(out: ObjectOutput) { + out.writeUTF(ip) + out.writeInt(port) + } + + override def readExternal(in: ObjectInput) { + ip = in.readUTF() + port = in.readInt() + } + + @throws(classOf[IOException]) + private def readResolve(): Object = { + BlockManagerId.getCachedBlockManagerId(this) + } + + + override def toString = "BlockManagerId(" + ip + ", " + port + ")" + + override def hashCode = ip.hashCode * 41 + port + + override def equals(that: Any) = that match { + case id: BlockManagerId => port == id.port && ip == id.ip + case _ => false + } +} + +object BlockManagerId { + val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]() + + def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = { + if (blockManagerIdCache.containsKey(id)) { + blockManagerIdCache.get(id) + } else { + blockManagerIdCache.put(id, id) + id + } + } +} \ No newline at end of file diff --git a/core/src/main/scala/spark/storage/StorageLevel.scala b/core/src/main/scala/spark/storage/StorageLevel.scala index c497f03e0c..eb88eb2759 100644 --- a/core/src/main/scala/spark/storage/StorageLevel.scala +++ b/core/src/main/scala/spark/storage/StorageLevel.scala @@ -1,6 +1,9 @@ package spark.storage -import java.io.{Externalizable, ObjectInput, ObjectOutput} +import java.io.{IOException, Externalizable, ObjectInput, ObjectOutput} +import collection.mutable +import util.Random +import collection.mutable.ArrayBuffer /** * Flags for controlling the storage of an RDD. Each StorageLevel records whether to use memory, @@ -17,7 +20,8 @@ class StorageLevel( extends Externalizable { // TODO: Also add fields for caching priority, dataset ID, and flushing. - + assert(replication < 40, "Replication restricted to be less than 40 for calculating hashcodes") + def this(flags: Int, replication: Int) { this((flags & 4) != 0, (flags & 2) != 0, (flags & 1) != 0, replication) } @@ -27,6 +31,10 @@ class StorageLevel( override def clone(): StorageLevel = new StorageLevel( this.useDisk, this.useMemory, this.deserialized, this.replication) + override def hashCode(): Int = { + toInt * 41 + replication + } + override def equals(other: Any): Boolean = other match { case s: StorageLevel => s.useDisk == useDisk && @@ -66,6 +74,11 @@ class StorageLevel( replication = in.readByte() } + @throws(classOf[IOException]) + private def readResolve(): Object = { + StorageLevel.getCachedStorageLevel(this) + } + override def toString: String = "StorageLevel(%b, %b, %b, %d)".format(useDisk, useMemory, deserialized, replication) } @@ -82,4 +95,15 @@ object StorageLevel { val MEMORY_AND_DISK_2 = new StorageLevel(true, true, true, 2) val MEMORY_AND_DISK_SER = new StorageLevel(true, true, false) val MEMORY_AND_DISK_SER_2 = new StorageLevel(true, true, false, 2) + + val storageLevelCache = new java.util.concurrent.ConcurrentHashMap[StorageLevel, StorageLevel]() + + def getCachedStorageLevel(level: StorageLevel): StorageLevel = { + if (storageLevelCache.containsKey(level)) { + storageLevelCache.get(level) + } else { + storageLevelCache.put(level, level) + level + } + } } diff --git a/core/src/test/scala/spark/storage/BlockManagerSuite.scala b/core/src/test/scala/spark/storage/BlockManagerSuite.scala index 0e78228134..a2d5e39859 100644 --- a/core/src/test/scala/spark/storage/BlockManagerSuite.scala +++ b/core/src/test/scala/spark/storage/BlockManagerSuite.scala @@ -57,6 +57,32 @@ class BlockManagerSuite extends FunSuite with BeforeAndAfter with PrivateMethodT } } + test("StorageLevel object caching") { + val level1 = new StorageLevel(false, false, false, 3) + val level2 = new StorageLevel(false, false, false, 3) + val bytes1 = spark.Utils.serialize(level1) + val level1_ = spark.Utils.deserialize[StorageLevel](bytes1) + val bytes2 = spark.Utils.serialize(level2) + val level2_ = spark.Utils.deserialize[StorageLevel](bytes2) + assert(level1_ === level1, "Deserialized level1 not same as original level1") + assert(level2_ === level2, "Deserialized level2 not same as original level1") + assert(level1_ === level2_, "Deserialized level1 not same as deserialized level2") + assert(level2_.eq(level1_), "Deserialized level2 not the same object as deserialized level1") + } + + test("BlockManagerId object caching") { + val id1 = new StorageLevel(false, false, false, 3) + val id2 = new StorageLevel(false, false, false, 3) + val bytes1 = spark.Utils.serialize(id1) + val id1_ = spark.Utils.deserialize[StorageLevel](bytes1) + val bytes2 = spark.Utils.serialize(id2) + val id2_ = spark.Utils.deserialize[StorageLevel](bytes2) + assert(id1_ === id1, "Deserialized id1 not same as original id1") + assert(id2_ === id2, "Deserialized id2 not same as original id1") + assert(id1_ === id2_, "Deserialized id1 not same as deserialized id2") + assert(id2_.eq(id1_), "Deserialized id2 not the same object as deserialized level1") + } + test("master + 1 manager interaction") { store = new BlockManager(master, serializer, 2000) val a1 = new Array[Byte](400) From 9e9e9e1d898387a1996e4c57128bafadb5938a9b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 28 Nov 2012 18:48:14 -0800 Subject: [PATCH 127/226] Renamed CleanupTask to MetadataCleaner. --- core/src/main/scala/spark/CacheTracker.scala | 6 +++--- core/src/main/scala/spark/MapOutputTracker.scala | 6 +++--- core/src/main/scala/spark/scheduler/DAGScheduler.scala | 6 +++--- core/src/main/scala/spark/scheduler/ShuffleMapTask.scala | 5 +++-- .../spark/util/{CleanupTask.scala => MetadataCleaner.scala} | 6 +++--- 5 files changed, 15 insertions(+), 14 deletions(-) rename core/src/main/scala/spark/util/{CleanupTask.scala => MetadataCleaner.scala} (77%) diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala index 0ee59bee0f..9888f061d9 100644 --- a/core/src/main/scala/spark/CacheTracker.scala +++ b/core/src/main/scala/spark/CacheTracker.scala @@ -14,7 +14,7 @@ import scala.collection.mutable.HashSet import spark.storage.BlockManager import spark.storage.StorageLevel -import util.{CleanupTask, TimeStampedHashMap} +import util.{MetadataCleaner, TimeStampedHashMap} private[spark] sealed trait CacheTrackerMessage @@ -39,7 +39,7 @@ private[spark] class CacheTrackerActor extends Actor with Logging { private val slaveCapacity = new HashMap[String, Long] private val slaveUsage = new HashMap[String, Long] - private val cleanupTask = new CleanupTask("CacheTracker", locs.cleanup) + private val metadataCleaner = new MetadataCleaner("CacheTracker", locs.cleanup) private def getCacheUsage(host: String): Long = slaveUsage.getOrElse(host, 0L) private def getCacheCapacity(host: String): Long = slaveCapacity.getOrElse(host, 0L) @@ -89,7 +89,7 @@ private[spark] class CacheTrackerActor extends Actor with Logging { case StopCacheTracker => logInfo("Stopping CacheTrackerActor") sender ! true - cleanupTask.cancel() + metadataCleaner.cancel() context.stop(self) } } diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index d0be1bb913..20ff5431af 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -17,7 +17,7 @@ import scala.collection.mutable.HashSet import scheduler.MapStatus import spark.storage.BlockManagerId import java.util.zip.{GZIPInputStream, GZIPOutputStream} -import util.{CleanupTask, TimeStampedHashMap} +import util.{MetadataCleaner, TimeStampedHashMap} private[spark] sealed trait MapOutputTrackerMessage private[spark] case class GetMapOutputStatuses(shuffleId: Int, requester: String) @@ -64,7 +64,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea actorSystem.actorFor(url) } - val cleanupTask = new CleanupTask("MapOutputTracker", this.cleanup) + val metadataCleaner = new MetadataCleaner("MapOutputTracker", this.cleanup) // Send a message to the trackerActor and get its result within a default timeout, or // throw a SparkException if this fails. @@ -175,7 +175,7 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea def stop() { communicate(StopMapOutputTracker) mapStatuses.clear() - cleanupTask.cancel() + metadataCleaner.cancel() trackerActor = null } diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index affacb43ca..4b2570fa2b 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -14,7 +14,7 @@ import spark.partial.ApproximateEvaluator import spark.partial.PartialResult import spark.storage.BlockManagerMaster import spark.storage.BlockManagerId -import util.{CleanupTask, TimeStampedHashMap} +import util.{MetadataCleaner, TimeStampedHashMap} /** * A Scheduler subclass that implements stage-oriented scheduling. It computes a DAG of stages for @@ -84,7 +84,7 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with val activeJobs = new HashSet[ActiveJob] val resultStageToJob = new HashMap[Stage, ActiveJob] - val cleanupTask = new CleanupTask("DAGScheduler", this.cleanup) + val metadataCleaner = new MetadataCleaner("DAGScheduler", this.cleanup) // Start a thread to run the DAGScheduler event loop new Thread("DAGScheduler") { @@ -610,7 +610,7 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with def stop() { eventQueue.put(StopDAGScheduler) - cleanupTask.cancel() + metadataCleaner.cancel() taskSched.stop() } } diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index fbf618c906..683f5ebec3 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -14,7 +14,7 @@ import com.ning.compress.lzf.LZFOutputStream import spark._ import spark.storage._ -import util.{TimeStampedHashMap, CleanupTask} +import util.{TimeStampedHashMap, MetadataCleaner} private[spark] object ShuffleMapTask { @@ -22,7 +22,8 @@ private[spark] object ShuffleMapTask { // Served as a cache for task serialization because serialization can be // expensive on the master node if it needs to launch thousands of tasks. val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]] - val cleanupTask = new CleanupTask("ShuffleMapTask", serializedInfoCache.cleanup) + + val metadataCleaner = new MetadataCleaner("ShuffleMapTask", serializedInfoCache.cleanup) def serializeInfo(stageId: Int, rdd: RDD[_], dep: ShuffleDependency[_,_]): Array[Byte] = { synchronized { diff --git a/core/src/main/scala/spark/util/CleanupTask.scala b/core/src/main/scala/spark/util/MetadataCleaner.scala similarity index 77% rename from core/src/main/scala/spark/util/CleanupTask.scala rename to core/src/main/scala/spark/util/MetadataCleaner.scala index a4357c62c6..71ac39864e 100644 --- a/core/src/main/scala/spark/util/CleanupTask.scala +++ b/core/src/main/scala/spark/util/MetadataCleaner.scala @@ -4,7 +4,7 @@ import java.util.concurrent.{TimeUnit, ScheduledFuture, Executors} import java.util.{TimerTask, Timer} import spark.Logging -class CleanupTask(name: String, cleanupFunc: (Long) => Unit) extends Logging { +class MetadataCleaner(name: String, cleanupFunc: (Long) => Unit) extends Logging { val delaySeconds = (System.getProperty("spark.cleanup.delay", "-100").toDouble * 60).toInt val periodSeconds = math.max(10, delaySeconds / 10) val timer = new Timer(name + " cleanup timer", true) @@ -13,7 +13,7 @@ class CleanupTask(name: String, cleanupFunc: (Long) => Unit) extends Logging { try { if (delaySeconds > 0) { cleanupFunc(System.currentTimeMillis() - (delaySeconds * 1000)) - logInfo("Ran cleanup task for " + name) + logInfo("Ran metadata cleaner for " + name) } } catch { case e: Exception => logError("Error running cleanup task for " + name, e) @@ -21,7 +21,7 @@ class CleanupTask(name: String, cleanupFunc: (Long) => Unit) extends Logging { } } if (periodSeconds > 0) { - logInfo("Starting cleanup task for " + name + " with delay of " + delaySeconds + " seconds and " + logInfo("Starting metadata cleaner for " + name + " with delay of " + delaySeconds + " seconds and " + "period of " + periodSeconds + " secs") timer.schedule(task, periodSeconds * 1000, periodSeconds * 1000) } From c9789751bfc496d24e8369a0035d57f0ed8dcb58 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 28 Nov 2012 23:18:24 -0800 Subject: [PATCH 128/226] Added metadata cleaner to BlockManager to remove old blocks completely. --- .../scala/spark/storage/BlockManager.scala | 47 ++++++++++++++----- .../spark/storage/BlockManagerMaster.scala | 1 + 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/core/src/main/scala/spark/storage/BlockManager.scala b/core/src/main/scala/spark/storage/BlockManager.scala index e4aa9247a3..1e36578e1a 100644 --- a/core/src/main/scala/spark/storage/BlockManager.scala +++ b/core/src/main/scala/spark/storage/BlockManager.scala @@ -10,12 +10,12 @@ import java.nio.{MappedByteBuffer, ByteBuffer} import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue} import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue} -import scala.collection.JavaConversions._ import spark.{CacheTracker, Logging, SizeEstimator, SparkException, Utils} import spark.network._ import spark.serializer.Serializer -import spark.util.ByteBufferInputStream +import spark.util.{MetadataCleaner, TimeStampedHashMap, ByteBufferInputStream} + import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream} import sun.nio.ch.DirectBuffer @@ -51,7 +51,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - private val blockInfo = new ConcurrentHashMap[String, BlockInfo](1000) + private val blockInfo = new TimeStampedHashMap[String, BlockInfo]() private[storage] val memoryStore: BlockStore = new MemoryStore(this, maxMemory) private[storage] val diskStore: BlockStore = @@ -80,6 +80,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m val host = System.getProperty("spark.hostname", Utils.localHostName()) + val metadataCleaner = new MetadataCleaner("BlockManager", this.dropOldBlocks) initialize() /** @@ -102,8 +103,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m * Get storage level of local block. If no info exists for the block, then returns null. */ def getLevel(blockId: String): StorageLevel = { - val info = blockInfo.get(blockId) - if (info != null) info.level else null + blockInfo.get(blockId).map(_.level).orNull } /** @@ -113,9 +113,9 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m */ def reportBlockStatus(blockId: String) { val (curLevel, inMemSize, onDiskSize) = blockInfo.get(blockId) match { - case null => + case None => (StorageLevel.NONE, 0L, 0L) - case info => + case Some(info) => info.synchronized { info.level match { case null => @@ -173,7 +173,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - val info = blockInfo.get(blockId) + val info = blockInfo.get(blockId).orNull if (info != null) { info.synchronized { info.waitForReady() // In case the block is still being put() by another thread @@ -258,7 +258,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } - val info = blockInfo.get(blockId) + val info = blockInfo.get(blockId).orNull if (info != null) { info.synchronized { info.waitForReady() // In case the block is still being put() by another thread @@ -517,7 +517,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m throw new IllegalArgumentException("Storage level is null or invalid") } - val oldBlock = blockInfo.get(blockId) + val oldBlock = blockInfo.get(blockId).orNull if (oldBlock != null) { logWarning("Block " + blockId + " already exists on this machine; not re-adding it") oldBlock.waitForReady() @@ -618,7 +618,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m throw new IllegalArgumentException("Storage level is null or invalid") } - if (blockInfo.containsKey(blockId)) { + if (blockInfo.contains(blockId)) { logWarning("Block " + blockId + " already exists on this machine; not re-adding it") return } @@ -740,7 +740,7 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m */ def dropFromMemory(blockId: String, data: Either[ArrayBuffer[Any], ByteBuffer]) { logInfo("Dropping block " + blockId + " from memory") - val info = blockInfo.get(blockId) + val info = blockInfo.get(blockId).orNull if (info != null) { info.synchronized { val level = info.level @@ -767,6 +767,29 @@ class BlockManager(val master: BlockManagerMaster, val serializer: Serializer, m } } + def dropOldBlocks(cleanupTime: Long) { + logInfo("Dropping blocks older than " + cleanupTime) + val iterator = blockInfo.internalMap.entrySet().iterator() + while(iterator.hasNext) { + val entry = iterator.next() + val (id, info, time) = (entry.getKey, entry.getValue._1, entry.getValue._2) + if (time < cleanupTime) { + info.synchronized { + val level = info.level + if (level.useMemory) { + memoryStore.remove(id) + } + if (level.useDisk) { + diskStore.remove(id) + } + iterator.remove() + logInfo("Dropped block " + id) + } + reportBlockStatus(id) + } + } + } + def shouldCompress(blockId: String): Boolean = { if (blockId.startsWith("shuffle_")) { compressShuffle diff --git a/core/src/main/scala/spark/storage/BlockManagerMaster.scala b/core/src/main/scala/spark/storage/BlockManagerMaster.scala index 397395a65b..af15663621 100644 --- a/core/src/main/scala/spark/storage/BlockManagerMaster.scala +++ b/core/src/main/scala/spark/storage/BlockManagerMaster.scala @@ -341,6 +341,7 @@ private[spark] class BlockManagerMasterActor(val isLocal: Boolean) extends Actor throw new Exception("Self index for " + blockManagerId + " not found") } + // Note that this logic will select the same node multiple times if there aren't enough peers var index = selfIndex while (res.size < size) { index += 1 From 6fcd09f499dca66d255aa7196839156433aae442 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 29 Nov 2012 02:06:33 -0800 Subject: [PATCH 129/226] Added TimeStampedHashSet and used that to cleanup the list of registered RDD IDs in CacheTracker. --- core/src/main/scala/spark/CacheTracker.scala | 10 ++- .../scala/spark/util/TimeStampedHashMap.scala | 14 ++-- .../scala/spark/util/TimeStampedHashSet.scala | 66 +++++++++++++++++++ 3 files changed, 81 insertions(+), 9 deletions(-) create mode 100644 core/src/main/scala/spark/util/TimeStampedHashSet.scala diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala index 9888f061d9..cb54e12257 100644 --- a/core/src/main/scala/spark/CacheTracker.scala +++ b/core/src/main/scala/spark/CacheTracker.scala @@ -14,7 +14,7 @@ import scala.collection.mutable.HashSet import spark.storage.BlockManager import spark.storage.StorageLevel -import util.{MetadataCleaner, TimeStampedHashMap} +import util.{TimeStampedHashSet, MetadataCleaner, TimeStampedHashMap} private[spark] sealed trait CacheTrackerMessage @@ -39,7 +39,7 @@ private[spark] class CacheTrackerActor extends Actor with Logging { private val slaveCapacity = new HashMap[String, Long] private val slaveUsage = new HashMap[String, Long] - private val metadataCleaner = new MetadataCleaner("CacheTracker", locs.cleanup) + private val metadataCleaner = new MetadataCleaner("CacheTrackerActor", locs.cleanup) private def getCacheUsage(host: String): Long = slaveUsage.getOrElse(host, 0L) private def getCacheCapacity(host: String): Long = slaveCapacity.getOrElse(host, 0L) @@ -113,11 +113,15 @@ private[spark] class CacheTracker(actorSystem: ActorSystem, isMaster: Boolean, b actorSystem.actorFor(url) } - val registeredRddIds = new HashSet[Int] + // TODO: Consider removing this HashSet completely as locs CacheTrackerActor already + // keeps track of registered RDDs + val registeredRddIds = new TimeStampedHashSet[Int] // Remembers which splits are currently being loaded (on worker nodes) val loading = new HashSet[String] + val metadataCleaner = new MetadataCleaner("CacheTracker", registeredRddIds.cleanup) + // Send a message to the trackerActor and get its result within a default timeout, or // throw a SparkException if this fails. def askTracker(message: Any): Any = { diff --git a/core/src/main/scala/spark/util/TimeStampedHashMap.scala b/core/src/main/scala/spark/util/TimeStampedHashMap.scala index 7a22b80a20..9bcc9245c0 100644 --- a/core/src/main/scala/spark/util/TimeStampedHashMap.scala +++ b/core/src/main/scala/spark/util/TimeStampedHashMap.scala @@ -1,7 +1,7 @@ package spark.util -import scala.collection.JavaConversions._ -import scala.collection.mutable.{HashMap, Map} +import scala.collection.JavaConversions +import scala.collection.mutable.Map import java.util.concurrent.ConcurrentHashMap /** @@ -20,7 +20,7 @@ class TimeStampedHashMap[A, B] extends Map[A, B]() { def iterator: Iterator[(A, B)] = { val jIterator = internalMap.entrySet().iterator() - jIterator.map(kv => (kv.getKey, kv.getValue._1)) + JavaConversions.asScalaIterator(jIterator).map(kv => (kv.getKey, kv.getValue._1)) } override def + [B1 >: B](kv: (A, B1)): Map[A, B1] = { @@ -31,8 +31,10 @@ class TimeStampedHashMap[A, B] extends Map[A, B]() { } override def - (key: A): Map[A, B] = { - internalMap.remove(key) - this + val newMap = new TimeStampedHashMap[A, B] + newMap.internalMap.putAll(this.internalMap) + newMap.internalMap.remove(key) + newMap } override def += (kv: (A, B)): this.type = { @@ -56,7 +58,7 @@ class TimeStampedHashMap[A, B] extends Map[A, B]() { } override def filter(p: ((A, B)) => Boolean): Map[A, B] = { - internalMap.map(kv => (kv._1, kv._2._1)).filter(p) + JavaConversions.asScalaConcurrentMap(internalMap).map(kv => (kv._1, kv._2._1)).filter(p) } override def empty: Map[A, B] = new TimeStampedHashMap[A, B]() diff --git a/core/src/main/scala/spark/util/TimeStampedHashSet.scala b/core/src/main/scala/spark/util/TimeStampedHashSet.scala new file mode 100644 index 0000000000..539dd75844 --- /dev/null +++ b/core/src/main/scala/spark/util/TimeStampedHashSet.scala @@ -0,0 +1,66 @@ +package spark.util + +import scala.collection.mutable.Set +import scala.collection.JavaConversions +import java.util.concurrent.ConcurrentHashMap + + +class TimeStampedHashSet[A] extends Set[A] { + val internalMap = new ConcurrentHashMap[A, Long]() + + def contains(key: A): Boolean = { + internalMap.contains(key) + } + + def iterator: Iterator[A] = { + val jIterator = internalMap.entrySet().iterator() + JavaConversions.asScalaIterator(jIterator).map(_.getKey) + } + + override def + (elem: A): Set[A] = { + val newSet = new TimeStampedHashSet[A] + newSet ++= this + newSet += elem + newSet + } + + override def - (elem: A): Set[A] = { + val newSet = new TimeStampedHashSet[A] + newSet ++= this + newSet -= elem + newSet + } + + override def += (key: A): this.type = { + internalMap.put(key, currentTime) + this + } + + override def -= (key: A): this.type = { + internalMap.remove(key) + this + } + + override def empty: Set[A] = new TimeStampedHashSet[A]() + + override def size(): Int = internalMap.size() + + override def foreach[U](f: (A) => U): Unit = { + val iterator = internalMap.entrySet().iterator() + while(iterator.hasNext) { + f(iterator.next.getKey) + } + } + + def cleanup(threshTime: Long) { + val iterator = internalMap.entrySet().iterator() + while(iterator.hasNext) { + val entry = iterator.next() + if (entry.getValue < threshTime) { + iterator.remove() + } + } + } + + private def currentTime: Long = System.currentTimeMillis() +} From 62965c5d8e3f4f0246ac2c8814ac75ea82b3f238 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 1 Dec 2012 08:26:10 -0800 Subject: [PATCH 130/226] Added ssc.union --- .../main/scala/spark/streaming/ReducedWindowedDStream.scala | 3 ++- .../src/main/scala/spark/streaming/StreamingContext.scala | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index 8b484e6acf..bb852cbcca 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -118,7 +118,8 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( if (seqOfValues(0).isEmpty) { // If previous window's reduce value does not exist, then at least new values should exist if (newValues.isEmpty) { - throw new Exception("Neither previous window has value for key, nor new values found") + val info = "seqOfValues =\n" + seqOfValues.map(x => "[" + x.mkString(",") + "]").mkString("\n") + throw new Exception("Neither previous window has value for key, nor new values found\n" + info) } // Reduce the new values newValues.reduce(reduceF) // return diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 90dd560752..63d8766749 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -189,6 +189,10 @@ class StreamingContext private ( inputStream } + def union[T: ClassManifest](streams: Seq[DStream[T]]): DStream[T] = { + new UnionDStream[T](streams.toArray) + } + /** * This function registers a InputDStream as an input stream that will be * started (InputDStream.start() called) to get the input data streams. From 477de94894b7d8eeed281d33c12bcb2269d117c7 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 1 Dec 2012 13:15:06 -0800 Subject: [PATCH 131/226] Minor modifications. --- .../main/scala/spark/util/MetadataCleaner.scala | 7 ++++++- .../src/main/scala/spark/streaming/DStream.scala | 15 ++++++++++++++- .../spark/streaming/ReducedWindowedDStream.scala | 4 ++-- .../scala/spark/streaming/StreamingContext.scala | 8 ++++++-- 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/core/src/main/scala/spark/util/MetadataCleaner.scala b/core/src/main/scala/spark/util/MetadataCleaner.scala index 71ac39864e..2541b26255 100644 --- a/core/src/main/scala/spark/util/MetadataCleaner.scala +++ b/core/src/main/scala/spark/util/MetadataCleaner.scala @@ -5,7 +5,7 @@ import java.util.{TimerTask, Timer} import spark.Logging class MetadataCleaner(name: String, cleanupFunc: (Long) => Unit) extends Logging { - val delaySeconds = (System.getProperty("spark.cleanup.delay", "-100").toDouble * 60).toInt + val delaySeconds = MetadataCleaner.getDelaySeconds val periodSeconds = math.max(10, delaySeconds / 10) val timer = new Timer(name + " cleanup timer", true) val task = new TimerTask { @@ -30,3 +30,8 @@ class MetadataCleaner(name: String, cleanupFunc: (Long) => Unit) extends Logging timer.cancel() } } + +object MetadataCleaner { + def getDelaySeconds = (System.getProperty("spark.cleaner.delay", "-100").toDouble * 60).toInt + def setDelaySeconds(delay: Long) { System.setProperty("spark.cleaner.delay", delay.toString) } +} diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 8efda2074d..28a3e2dfc7 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -146,6 +146,8 @@ extends Serializable with Logging { } protected[streaming] def validate() { + assert(rememberDuration != null, "Remember duration is set to null") + assert( !mustCheckpoint || checkpointInterval != null, "The checkpoint interval for " + this.getClass.getSimpleName + " has not been set. " + @@ -180,13 +182,24 @@ extends Serializable with Logging { checkpointInterval + "). Please set it to higher than " + checkpointInterval + "." ) + val metadataCleanupDelay = System.getProperty("spark.cleanup.delay", "-1").toDouble + assert( + metadataCleanupDelay < 0 || rememberDuration < metadataCleanupDelay * 60 * 1000, + "It seems you are doing some DStream window operation or setting a checkpoint interval " + + "which requires " + this.getClass.getSimpleName + " to remember generated RDDs for more " + + "than " + rememberDuration.milliseconds + " milliseconds. But the Spark's metadata cleanup" + + "delay is set to " + metadataCleanupDelay + " minutes, which is not sufficient. Please set " + + "the Java property 'spark.cleanup.delay' to more than " + + math.ceil(rememberDuration.millis.toDouble / 60000.0).toInt + " minutes." + ) + dependencies.foreach(_.validate()) logInfo("Slide time = " + slideTime) logInfo("Storage level = " + storageLevel) logInfo("Checkpoint interval = " + checkpointInterval) logInfo("Remember duration = " + rememberDuration) - logInfo("Initialized " + this) + logInfo("Initialized and validated " + this) } protected[streaming] def setContext(s: StreamingContext) { diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala index bb852cbcca..f63a9e0011 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala @@ -118,8 +118,8 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( if (seqOfValues(0).isEmpty) { // If previous window's reduce value does not exist, then at least new values should exist if (newValues.isEmpty) { - val info = "seqOfValues =\n" + seqOfValues.map(x => "[" + x.mkString(",") + "]").mkString("\n") - throw new Exception("Neither previous window has value for key, nor new values found\n" + info) + throw new Exception("Neither previous window has value for key, nor new values found. " + + "Are you sure your key class hashes consistently?") } // Reduce the new values newValues.reduce(reduceF) // return diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 63d8766749..9c19f6588d 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -17,6 +17,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat import org.apache.hadoop.fs.Path import java.util.UUID +import spark.util.MetadataCleaner /** * A StreamingContext is the main entry point for Spark Streaming functionality. Besides the basic @@ -268,8 +269,11 @@ class StreamingContext private ( object StreamingContext { def createNewSparkContext(master: String, frameworkName: String): SparkContext = { - if (System.getProperty("spark.cleanup.delay", "-1").toDouble < 0) { - System.setProperty("spark.cleanup.delay", "60") + + // Set the default cleaner delay to an hour if not already set. + // This should be sufficient for even 1 second interval. + if (MetadataCleaner.getDelaySeconds < 0) { + MetadataCleaner.setDelaySeconds(60) } new SparkContext(master, frameworkName) } From b4dba55f78b0dfda728cf69c9c17e4863010d28d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 2 Dec 2012 02:03:05 +0000 Subject: [PATCH 132/226] Made RDD checkpoint not create a new thread. Fixed bug in detecting when spark.cleaner.delay is insufficient. --- core/src/main/scala/spark/RDD.scala | 31 ++++++------------- .../scala/spark/util/TimeStampedHashMap.scala | 3 +- .../main/scala/spark/streaming/DStream.scala | 9 +++--- 3 files changed, 17 insertions(+), 26 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 8af6c9bd6a..fbfcfbd704 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -211,28 +211,17 @@ abstract class RDD[T: ClassManifest]( if (startCheckpoint) { val rdd = this - val env = SparkEnv.get - - // Spawn a new thread to do the checkpoint as it takes sometime to write the RDD to file - val th = new Thread() { - override def run() { - // Save the RDD to a file, create a new HadoopRDD from it, - // and change the dependencies from the original parents to the new RDD - SparkEnv.set(env) - rdd.checkpointFile = new Path(context.checkpointDir, "rdd-" + id).toString - rdd.saveAsObjectFile(checkpointFile) - rdd.synchronized { - rdd.checkpointRDD = context.objectFile[T](checkpointFile, rdd.splits.size) - rdd.checkpointRDDSplits = rdd.checkpointRDD.splits - rdd.changeDependencies(rdd.checkpointRDD) - rdd.shouldCheckpoint = false - rdd.isCheckpointInProgress = false - rdd.isCheckpointed = true - println("Done checkpointing RDD " + rdd.id + ", " + rdd) - } - } + rdd.checkpointFile = new Path(context.checkpointDir, "rdd-" + id).toString + rdd.saveAsObjectFile(checkpointFile) + rdd.synchronized { + rdd.checkpointRDD = context.objectFile[T](checkpointFile, rdd.splits.size) + rdd.checkpointRDDSplits = rdd.checkpointRDD.splits + rdd.changeDependencies(rdd.checkpointRDD) + rdd.shouldCheckpoint = false + rdd.isCheckpointInProgress = false + rdd.isCheckpointed = true + println("Done checkpointing RDD " + rdd.id + ", " + rdd + ", created RDD " + rdd.checkpointRDD.id + ", " + rdd.checkpointRDD) } - th.start() } else { // Recursively call doCheckpoint() to perform checkpointing on parent RDD if they are marked dependencies.foreach(_.rdd.doCheckpoint()) diff --git a/core/src/main/scala/spark/util/TimeStampedHashMap.scala b/core/src/main/scala/spark/util/TimeStampedHashMap.scala index 9bcc9245c0..52f03784db 100644 --- a/core/src/main/scala/spark/util/TimeStampedHashMap.scala +++ b/core/src/main/scala/spark/util/TimeStampedHashMap.scala @@ -10,7 +10,7 @@ import java.util.concurrent.ConcurrentHashMap * threshold time can them be removed using the cleanup method. This is intended to be a drop-in * replacement of scala.collection.mutable.HashMap. */ -class TimeStampedHashMap[A, B] extends Map[A, B]() { +class TimeStampedHashMap[A, B] extends Map[A, B]() with spark.Logging { val internalMap = new ConcurrentHashMap[A, (B, Long)]() def get(key: A): Option[B] = { @@ -79,6 +79,7 @@ class TimeStampedHashMap[A, B] extends Map[A, B]() { while(iterator.hasNext) { val entry = iterator.next() if (entry.getValue._2 < threshTime) { + logDebug("Removing key " + entry.getKey) iterator.remove() } } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 28a3e2dfc7..d2e9de110e 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -182,14 +182,15 @@ extends Serializable with Logging { checkpointInterval + "). Please set it to higher than " + checkpointInterval + "." ) - val metadataCleanupDelay = System.getProperty("spark.cleanup.delay", "-1").toDouble + val metadataCleanerDelay = spark.util.MetadataCleaner.getDelaySeconds + logInfo("metadataCleanupDelay = " + metadataCleanerDelay) assert( - metadataCleanupDelay < 0 || rememberDuration < metadataCleanupDelay * 60 * 1000, + metadataCleanerDelay < 0 || rememberDuration < metadataCleanerDelay * 1000, "It seems you are doing some DStream window operation or setting a checkpoint interval " + "which requires " + this.getClass.getSimpleName + " to remember generated RDDs for more " + "than " + rememberDuration.milliseconds + " milliseconds. But the Spark's metadata cleanup" + - "delay is set to " + metadataCleanupDelay + " minutes, which is not sufficient. Please set " + - "the Java property 'spark.cleanup.delay' to more than " + + "delay is set to " + (metadataCleanerDelay / 60.0) + " minutes, which is not sufficient. Please set " + + "the Java property 'spark.cleaner.delay' to more than " + math.ceil(rememberDuration.millis.toDouble / 60000.0).toInt + " minutes." ) From 609e00d599d3f429a838f598b3f32c5fdbd7ec5e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 2 Dec 2012 02:39:08 +0000 Subject: [PATCH 133/226] Minor mods --- .../src/main/scala/spark/streaming/NetworkInputTracker.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index d0fef70f7e..ae6692290e 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -58,7 +58,7 @@ class NetworkInputTracker( throw new Exception("Register received for unexpected id " + streamId) } receiverInfo += ((streamId, receiverActor)) - logInfo("Registered receiver for network stream " + streamId) + logInfo("Registered receiver for network stream " + streamId + " from " + sender.path.address) sender ! true } case AddBlocks(streamId, blockIds) => { From a69a82be2682148f5d1ebbdede15a47c90eea73d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 3 Dec 2012 22:37:31 -0800 Subject: [PATCH 134/226] Added metadata cleaner to HttpBroadcast to clean up old broacast files. --- .../scala/spark/broadcast/HttpBroadcast.scala | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/core/src/main/scala/spark/broadcast/HttpBroadcast.scala b/core/src/main/scala/spark/broadcast/HttpBroadcast.scala index 7eb4ddb74f..fef264aab1 100644 --- a/core/src/main/scala/spark/broadcast/HttpBroadcast.scala +++ b/core/src/main/scala/spark/broadcast/HttpBroadcast.scala @@ -11,6 +11,7 @@ import it.unimi.dsi.fastutil.io.FastBufferedOutputStream import spark._ import spark.storage.StorageLevel +import util.{MetadataCleaner, TimeStampedHashSet} private[spark] class HttpBroadcast[T](@transient var value_ : T, isLocal: Boolean, id: Long) extends Broadcast[T](id) with Logging with Serializable { @@ -64,6 +65,10 @@ private object HttpBroadcast extends Logging { private var serverUri: String = null private var server: HttpServer = null + private val files = new TimeStampedHashSet[String] + private val cleaner = new MetadataCleaner("HttpBroadcast", cleanup) + + def initialize(isMaster: Boolean) { synchronized { if (!initialized) { @@ -85,6 +90,7 @@ private object HttpBroadcast extends Logging { server = null } initialized = false + cleaner.cancel() } } @@ -108,6 +114,7 @@ private object HttpBroadcast extends Logging { val serOut = ser.serializeStream(out) serOut.writeObject(value) serOut.close() + files += file.getAbsolutePath } def read[T](id: Long): T = { @@ -123,4 +130,21 @@ private object HttpBroadcast extends Logging { serIn.close() obj } + + def cleanup(cleanupTime: Long) { + val iterator = files.internalMap.entrySet().iterator() + while(iterator.hasNext) { + val entry = iterator.next() + val (file, time) = (entry.getKey, entry.getValue) + if (time < cleanupTime) { + try { + iterator.remove() + new File(file.toString).delete() + logInfo("Deleted broadcast file '" + file + "'") + } catch { + case e: Exception => logWarning("Could not delete broadcast file '" + file + "'", e) + } + } + } + } } From 21a08529768a5073bc5c15b6c2642ceef2acd0d5 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 4 Dec 2012 22:10:25 -0800 Subject: [PATCH 135/226] Refactored RDD checkpointing to minimize extra fields in RDD class. --- core/src/main/scala/spark/RDD.scala | 149 +++++++----------- .../main/scala/spark/RDDCheckpointData.scala | 68 ++++++++ core/src/main/scala/spark/rdd/BlockRDD.scala | 9 +- .../main/scala/spark/rdd/CartesianRDD.scala | 10 +- .../main/scala/spark/rdd/CoGroupedRDD.scala | 2 +- .../main/scala/spark/rdd/CoalescedRDD.scala | 2 +- core/src/main/scala/spark/rdd/HadoopRDD.scala | 2 - .../main/scala/spark/rdd/NewHadoopRDD.scala | 2 - core/src/main/scala/spark/rdd/UnionRDD.scala | 12 +- .../scala/spark/scheduler/DAGScheduler.scala | 2 +- .../test/scala/spark/CheckpointSuite.scala | 73 +-------- .../main/scala/spark/streaming/DStream.scala | 7 +- 12 files changed, 144 insertions(+), 194 deletions(-) create mode 100644 core/src/main/scala/spark/RDDCheckpointData.scala diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index fbfcfbd704..e9bd131e61 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -99,13 +99,7 @@ abstract class RDD[T: ClassManifest]( val partitioner: Option[Partitioner] = None /** Optionally overridden by subclasses to specify placement preferences. */ - def preferredLocations(split: Split): Seq[String] = { - if (isCheckpointed) { - checkpointRDD.preferredLocations(split) - } else { - Nil - } - } + def preferredLocations(split: Split): Seq[String] = Nil /** The [[spark.SparkContext]] that this RDD was created on. */ def context = sc @@ -118,6 +112,8 @@ abstract class RDD[T: ClassManifest]( // Variables relating to persistence private var storageLevel: StorageLevel = StorageLevel.NONE + protected[spark] val checkpointData = new RDDCheckpointData(this) + /** Returns the first parent RDD */ protected[spark] def firstParent[U: ClassManifest] = { dependencies.head.rdd.asInstanceOf[RDD[U]] @@ -126,17 +122,6 @@ abstract class RDD[T: ClassManifest]( /** Returns the `i` th parent RDD */ protected[spark] def parent[U: ClassManifest](i: Int) = dependencies(i).rdd.asInstanceOf[RDD[U]] - // Variables relating to checkpointing - protected val isCheckpointable = true // override to set this to false to avoid checkpointing an RDD - - protected var shouldCheckpoint = false // set to true when an RDD is marked for checkpointing - protected var isCheckpointInProgress = false // set to true when checkpointing is in progress - protected[spark] var isCheckpointed = false // set to true after checkpointing is completed - - protected[spark] var checkpointFile: String = null // set to the checkpoint file after checkpointing is completed - protected var checkpointRDD: RDD[T] = null // set to the HadoopRDD of the checkpoint file - protected var checkpointRDDSplits: Seq[Split] = null // set to the splits of the Hadoop RDD - // Methods available on all RDDs: /** @@ -162,83 +147,14 @@ abstract class RDD[T: ClassManifest]( /** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */ def getStorageLevel = storageLevel - /** - * Mark this RDD for checkpointing. The RDD will be saved to a file inside `checkpointDir` - * (set using setCheckpointDir()) and all references to its parent RDDs will be removed. - * This is used to truncate very long lineages. In the current implementation, Spark will save - * this RDD to a file (using saveAsObjectFile()) after the first job using this RDD is done. - * Hence, it is strongly recommended to use checkpoint() on RDDs when - * (i) Checkpoint() is called before the any job has been executed on this RDD. - * (ii) This RDD has been made to persist in memory. Otherwise saving it on a file will - * require recomputation. - */ - protected[spark] def checkpoint() { - synchronized { - if (isCheckpointed || shouldCheckpoint || isCheckpointInProgress) { - // do nothing - } else if (isCheckpointable) { - if (sc.checkpointDir == null) { - throw new Exception("Checkpoint directory has not been set in the SparkContext.") - } - shouldCheckpoint = true - } else { - throw new Exception(this + " cannot be checkpointed") - } - } - } - - def getCheckpointData(): Any = { - synchronized { - checkpointFile - } - } - - /** - * Performs the checkpointing of this RDD by saving this . It is called by the DAGScheduler after a job - * using this RDD has completed (therefore the RDD has been materialized and - * potentially stored in memory). In case this RDD is not marked for checkpointing, - * doCheckpoint() is called recursively on the parent RDDs. - */ - private[spark] def doCheckpoint() { - val startCheckpoint = synchronized { - if (isCheckpointable && shouldCheckpoint && !isCheckpointInProgress) { - isCheckpointInProgress = true - true - } else { - false - } - } - - if (startCheckpoint) { - val rdd = this - rdd.checkpointFile = new Path(context.checkpointDir, "rdd-" + id).toString - rdd.saveAsObjectFile(checkpointFile) - rdd.synchronized { - rdd.checkpointRDD = context.objectFile[T](checkpointFile, rdd.splits.size) - rdd.checkpointRDDSplits = rdd.checkpointRDD.splits - rdd.changeDependencies(rdd.checkpointRDD) - rdd.shouldCheckpoint = false - rdd.isCheckpointInProgress = false - rdd.isCheckpointed = true - println("Done checkpointing RDD " + rdd.id + ", " + rdd + ", created RDD " + rdd.checkpointRDD.id + ", " + rdd.checkpointRDD) - } + def getPreferredLocations(split: Split) = { + if (isCheckpointed) { + checkpointData.preferredLocations(split) } else { - // Recursively call doCheckpoint() to perform checkpointing on parent RDD if they are marked - dependencies.foreach(_.rdd.doCheckpoint()) + preferredLocations(split) } } - /** - * Changes the dependencies of this RDD from its original parents to the new [[spark.rdd.HadoopRDD]] - * (`newRDD`) created from the checkpoint file. This method must ensure that all references - * to the original parent RDDs must be removed to enable the parent RDDs to be garbage - * collected. Subclasses of RDD may override this method for implementing their own changing - * logic. See [[spark.rdd.UnionRDD]] and [[spark.rdd.ShuffledRDD]] to get a better idea. - */ - protected def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD)) - } - /** * Internal method to this RDD; will read from cache if applicable, or otherwise compute it. * This should ''not'' be called by users directly, but is available for implementors of custom @@ -247,7 +163,7 @@ abstract class RDD[T: ClassManifest]( final def iterator(split: Split): Iterator[T] = { if (isCheckpointed) { // ASSUMPTION: Checkpoint Hadoop RDD will have same number of splits as original - checkpointRDD.iterator(checkpointRDDSplits(split.index)) + checkpointData.iterator(split.index) } else if (storageLevel != StorageLevel.NONE) { SparkEnv.get.cacheTracker.getOrCompute[T](this, split, storageLevel) } else { @@ -589,6 +505,55 @@ abstract class RDD[T: ClassManifest]( sc.runJob(this, (iter: Iterator[T]) => iter.toArray) } + /** + * Mark this RDD for checkpointing. The RDD will be saved to a file inside `checkpointDir` + * (set using setCheckpointDir()) and all references to its parent RDDs will be removed. + * This is used to truncate very long lineages. In the current implementation, Spark will save + * this RDD to a file (using saveAsObjectFile()) after the first job using this RDD is done. + * Hence, it is strongly recommended to use checkpoint() on RDDs when + * (i) checkpoint() is called before the any job has been executed on this RDD. + * (ii) This RDD has been made to persist in memory. Otherwise saving it on a file will + * require recomputation. + */ + def checkpoint() { + checkpointData.markForCheckpoint() + } + + /** + * Return whether this RDD has been checkpointed or not + */ + def isCheckpointed(): Boolean = { + checkpointData.isCheckpointed() + } + + /** + * Gets the name of the file to which this RDD was checkpointed + */ + def getCheckpointFile(): Option[String] = { + checkpointData.getCheckpointFile() + } + + /** + * Performs the checkpointing of this RDD by saving this . It is called by the DAGScheduler + * after a job using this RDD has completed (therefore the RDD has been materialized and + * potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs. + */ + protected[spark] def doCheckpoint() { + checkpointData.doCheckpoint() + dependencies.foreach(_.rdd.doCheckpoint()) + } + + /** + * Changes the dependencies of this RDD from its original parents to the new [[spark.rdd.HadoopRDD]] + * (`newRDD`) created from the checkpoint file. This method must ensure that all references + * to the original parent RDDs must be removed to enable the parent RDDs to be garbage + * collected. Subclasses of RDD may override this method for implementing their own changing + * logic. See [[spark.rdd.UnionRDD]] and [[spark.rdd.ShuffledRDD]] to get a better idea. + */ + protected[spark] def changeDependencies(newRDD: RDD[_]) { + dependencies_ = List(new OneToOneDependency(newRDD)) + } + @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { synchronized { diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala new file mode 100644 index 0000000000..eb4482acee --- /dev/null +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -0,0 +1,68 @@ +package spark + +import org.apache.hadoop.fs.Path + + + +private[spark] class RDDCheckpointData[T: ClassManifest](rdd: RDD[T]) +extends Serializable { + + class CheckpointState extends Serializable { + var state = 0 + + def mark() { if (state == 0) state = 1 } + def start() { assert(state == 1); state = 2 } + def finish() { assert(state == 2); state = 3 } + + def isMarked() = { state == 1 } + def isInProgress = { state == 2 } + def isCheckpointed = { state == 3 } + } + + val cpState = new CheckpointState() + var cpFile: Option[String] = None + var cpRDD: Option[RDD[T]] = None + var cpRDDSplits: Seq[Split] = Nil + + def markForCheckpoint() = { + rdd.synchronized { cpState.mark() } + } + + def isCheckpointed() = { + rdd.synchronized { cpState.isCheckpointed } + } + + def getCheckpointFile() = { + rdd.synchronized { cpFile } + } + + def doCheckpoint() { + rdd.synchronized { + if (cpState.isMarked && !cpState.isInProgress) { + cpState.start() + } else { + return + } + } + + val file = new Path(rdd.context.checkpointDir, "rdd-" + rdd.id).toString + rdd.saveAsObjectFile(file) + val newRDD = rdd.context.objectFile[T](file, rdd.splits.size) + + rdd.synchronized { + rdd.changeDependencies(newRDD) + cpFile = Some(file) + cpRDD = Some(newRDD) + cpRDDSplits = newRDD.splits + cpState.finish() + } + } + + def preferredLocations(split: Split) = { + cpRDD.get.preferredLocations(split) + } + + def iterator(splitIndex: Int): Iterator[T] = { + cpRDD.get.iterator(cpRDDSplits(splitIndex)) + } +} diff --git a/core/src/main/scala/spark/rdd/BlockRDD.scala b/core/src/main/scala/spark/rdd/BlockRDD.scala index f4c3f99011..590f9eb738 100644 --- a/core/src/main/scala/spark/rdd/BlockRDD.scala +++ b/core/src/main/scala/spark/rdd/BlockRDD.scala @@ -41,12 +41,7 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St } } - override def preferredLocations(split: Split) = { - if (isCheckpointed) { - checkpointRDD.preferredLocations(split) - } else { - locations_(split.asInstanceOf[BlockRDDSplit].blockId) - } - } + override def preferredLocations(split: Split) = + locations_(split.asInstanceOf[BlockRDDSplit].blockId) } diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index 458ad38d55..9bfc3f8ca3 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -32,12 +32,8 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( override def splits = splits_ override def preferredLocations(split: Split) = { - if (isCheckpointed) { - checkpointRDD.preferredLocations(split) - } else { - val currSplit = split.asInstanceOf[CartesianSplit] - rdd1.preferredLocations(currSplit.s1) ++ rdd2.preferredLocations(currSplit.s2) - } + val currSplit = split.asInstanceOf[CartesianSplit] + rdd1.preferredLocations(currSplit.s1) ++ rdd2.preferredLocations(currSplit.s2) } override def compute(split: Split) = { @@ -56,7 +52,7 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( override def dependencies = deps_ - override protected def changeDependencies(newRDD: RDD[_]) { + override def changeDependencies(newRDD: RDD[_]) { deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) splits_ = newRDD.splits rdd1 = null diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index 94ef1b56e8..adfecea966 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -112,7 +112,7 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) map.iterator } - override protected def changeDependencies(newRDD: RDD[_]) { + override def changeDependencies(newRDD: RDD[_]) { deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) splits_ = newRDD.splits rdds = null diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 5b5f72ddeb..90c3b8bfd8 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -48,7 +48,7 @@ class CoalescedRDD[T: ClassManifest]( override def dependencies = deps_ - override protected def changeDependencies(newRDD: RDD[_]) { + override def changeDependencies(newRDD: RDD[_]) { deps_ = List(new OneToOneDependency(newRDD)) splits_ = newRDD.splits prev = null diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index 19ed56d9c0..a12531ea89 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -115,6 +115,4 @@ class HadoopRDD[K, V]( val hadoopSplit = split.asInstanceOf[HadoopSplit] hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost") } - - override val isCheckpointable = false } diff --git a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala index 2875abb2db..c12df5839e 100644 --- a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala @@ -93,6 +93,4 @@ class NewHadoopRDD[K, V]( val theSplit = split.asInstanceOf[NewHadoopSplit] theSplit.serializableHadoopSplit.value.getLocations.filter(_ != "localhost") } - - override val isCheckpointable = false } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index 643a174160..30eb8483b6 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -49,15 +49,11 @@ class UnionRDD[T: ClassManifest]( override def compute(s: Split): Iterator[T] = s.asInstanceOf[UnionSplit[T]].iterator() - override def preferredLocations(s: Split): Seq[String] = { - if (isCheckpointed) { - checkpointRDD.preferredLocations(s) - } else { - s.asInstanceOf[UnionSplit[T]].preferredLocations() - } - } + override def preferredLocations(s: Split): Seq[String] = + s.asInstanceOf[UnionSplit[T]].preferredLocations() - override protected def changeDependencies(newRDD: RDD[_]) { + + override def changeDependencies(newRDD: RDD[_]) { deps_ = List(new OneToOneDependency(newRDD)) splits_ = newRDD.splits rdds = null diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index 4b2570fa2b..33d35b35d1 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -575,7 +575,7 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with return cached } // If the RDD has some placement preferences (as is the case for input RDDs), get those - val rddPrefs = rdd.preferredLocations(rdd.splits(partition)).toList + val rddPrefs = rdd.getPreferredLocations(rdd.splits(partition)).toList if (rddPrefs != Nil) { return rddPrefs } diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 8622ce92aa..2cafef444c 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -41,7 +41,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(parCollection.dependencies === Nil) val result = parCollection.collect() sleep(parCollection) // slightly extra time as loading classes for the first can take some time - assert(sc.objectFile[Int](parCollection.checkpointFile).collect() === result) + assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) assert(parCollection.dependencies != Nil) assert(parCollection.collect() === result) } @@ -54,7 +54,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { blockRDD.checkpoint() val result = blockRDD.collect() sleep(blockRDD) - assert(sc.objectFile[String](blockRDD.checkpointFile).collect() === result) + assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) assert(blockRDD.dependencies != Nil) assert(blockRDD.collect() === result) } @@ -122,35 +122,6 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { "CoGroupedSplits still holds on to the splits of its parent RDDs") } - /** - * This test forces two ResultTasks of the same job to be launched before and after - * the checkpointing of job's RDD is completed. - */ - test("Threading - ResultTasks") { - val op1 = (parCollection: RDD[Int]) => { - parCollection.map(x => { println("1st map running on " + x); Thread.sleep(500); (x % 2, x) }) - } - val op2 = (firstRDD: RDD[(Int, Int)]) => { - firstRDD.map(x => { println("2nd map running on " + x); Thread.sleep(500); x }) - } - testThreading(op1, op2) - } - - /** - * This test forces two ShuffleMapTasks of the same job to be launched before and after - * the checkpointing of job's RDD is completed. - */ - test("Threading - ShuffleMapTasks") { - val op1 = (parCollection: RDD[Int]) => { - parCollection.map(x => { println("1st map running on " + x); Thread.sleep(500); (x % 2, x) }) - } - val op2 = (firstRDD: RDD[(Int, Int)]) => { - firstRDD.groupByKey(2).map(x => { println("2nd map running on " + x); Thread.sleep(500); x }) - } - testThreading(op1, op2) - } - - def testCheckpointing[U: ClassManifest](op: (RDD[Int]) => RDD[U], sleepTime: Long = 500) { val parCollection = sc.makeRDD(1 to 4, 4) val operatedRDD = op(parCollection) @@ -159,49 +130,11 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val result = operatedRDD.collect() sleep(operatedRDD) //println(parentRDD + ", " + operatedRDD.dependencies.head.rdd ) - assert(sc.objectFile[U](operatedRDD.checkpointFile).collect() === result) + assert(sc.objectFile[U](operatedRDD.getCheckpointFile.get).collect() === result) assert(operatedRDD.dependencies.head.rdd != parentRDD) assert(operatedRDD.collect() === result) } - def testThreading[U: ClassManifest, V: ClassManifest](op1: (RDD[Int]) => RDD[U], op2: (RDD[U]) => RDD[V]) { - - val parCollection = sc.makeRDD(1 to 2, 2) - - // This is the RDD that is to be checkpointed - val firstRDD = op1(parCollection) - val parentRDD = firstRDD.dependencies.head.rdd - firstRDD.checkpoint() - - // This the RDD that uses firstRDD. This is designed to launch a - // ShuffleMapTask that uses firstRDD. - val secondRDD = op2(firstRDD) - - // Starting first job, to initiate the checkpointing - logInfo("\nLaunching 1st job to initiate checkpointing\n") - firstRDD.collect() - - // Checkpointing has started but not completed yet - Thread.sleep(100) - assert(firstRDD.dependencies.head.rdd === parentRDD) - - // Starting second job; first task of this job will be - // launched _before_ firstRDD is marked as checkpointed - // and the second task will be launched _after_ firstRDD - // is marked as checkpointed - logInfo("\nLaunching 2nd job that is designed to launch tasks " + - "before and after checkpointing is complete\n") - val result = secondRDD.collect() - - // Check whether firstRDD has been successfully checkpointed - assert(firstRDD.dependencies.head.rdd != parentRDD) - - logInfo("\nRecomputing 2nd job to verify the results of the previous computation\n") - // Check whether the result in the previous job was correct or not - val correctResult = secondRDD.collect() - assert(result === correctResult) - } - def sleep(rdd: RDD[_]) { val startTime = System.currentTimeMillis() val maxWaitTime = 5000 diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index d2e9de110e..d290c5927e 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -325,8 +325,9 @@ extends Serializable with Logging { logInfo("Updating checkpoint data for time " + currentTime) // Get the checkpointed RDDs from the generated RDDs - val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointData() != null) - .map(x => (x._1, x._2.getCheckpointData())) + + val newCheckpointData = generatedRDDs.filter(_._2.getCheckpointFile.isDefined) + .map(x => (x._1, x._2.getCheckpointFile.get)) // Make a copy of the existing checkpoint data val oldCheckpointData = checkpointData.clone() @@ -373,7 +374,7 @@ extends Serializable with Logging { logInfo("Restoring checkpointed RDD for time " + time + " from file '" + data.toString + "'") val rdd = ssc.sc.objectFile[T](data.toString) // Set the checkpoint file name to identify this RDD as a checkpointed RDD by updateCheckpointData() - rdd.checkpointFile = data.toString + rdd.checkpointData.cpFile = Some(data.toString) generatedRDDs += ((time, rdd)) } } From a23462191f9ad492d14f9efc3e915b1f522f543a Mon Sep 17 00:00:00 2001 From: Denny Date: Wed, 5 Dec 2012 10:30:40 -0800 Subject: [PATCH 136/226] Adjust Kafka code to work with new streaming changes. --- streaming/src/main/scala/spark/streaming/DStream.scala | 4 ++-- .../main/scala/spark/streaming/examples/KafkaWordCount.scala | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 85106b3ad8..792c129be8 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -365,8 +365,8 @@ extends Serializable with Logging { } } } - logInfo("Updated checkpoint data for time " + currentTime + ", " + checkpointData.size + " checkpoints, " - + "[" + checkpointData.mkString(",") + "]") + logInfo("Updated checkpoint data for time " + currentTime + ", " + checkpointData.rdds.size + " checkpoints, " + + "[" + checkpointData.rdds.mkString(",") + "]") } /** diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala index 12e3f49fe9..fe55db6e2c 100644 --- a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala @@ -4,6 +4,7 @@ import java.util.Properties import kafka.message.Message import kafka.producer.SyncProducerConfig import kafka.producer._ +import spark.SparkContext import spark.streaming._ import spark.streaming.StreamingContext._ import spark.storage.StorageLevel @@ -19,9 +20,9 @@ object KafkaWordCount { val Array(master, hostname, port, group, topics, numThreads) = args - val ssc = new StreamingContext(master, "KafkaWordCount") + val sc = new SparkContext(master, "KafkaWordCount") + val ssc = new StreamingContext(sc, Seconds(2)) ssc.checkpoint("checkpoint") - ssc.setBatchDuration(Seconds(2)) val topicpMap = topics.split(",").map((_,numThreads.toInt)).toMap val lines = ssc.kafkaStream[String](hostname, port.toInt, group, topicpMap) From 556c38ed91a405e0665897873e025e94971226af Mon Sep 17 00:00:00 2001 From: Denny Date: Wed, 5 Dec 2012 11:54:42 -0800 Subject: [PATCH 137/226] Added kafka JAR --- project/SparkBuild.scala | 2 +- streaming/lib/kafka-0.7.2.jar | Bin 0 -> 1358063 bytes 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 streaming/lib/kafka-0.7.2.jar diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index f34736b1c4..6ef2ac477a 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -158,7 +158,7 @@ object SparkBuild extends Build { def streamingSettings = sharedSettings ++ Seq( name := "spark-streaming", libraryDependencies ++= Seq( - "kafka" % "core-kafka_2.9.1" % "0.7.2") + "com.github.sgroschupf" % "zkclient" % "0.1") ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( diff --git a/streaming/lib/kafka-0.7.2.jar b/streaming/lib/kafka-0.7.2.jar new file mode 100644 index 0000000000000000000000000000000000000000..65f79925a4d06a41b7b98d7e6f92fedb408c9b3a GIT binary patch literal 1358063 zcma%j1yo#H(k|}q?(XjH?k<7G-QC@Ty9Rd+0fM``yA#|U5_sIXGr2SKzyH41tX{nq zUHhC9s<5t@rJuTM@M~nT<67C|{ul1&LIJ*3yPa zMq%mvm#=o##(r?X*)1EE=F(bG9vV>e5|Mx}Cjy8X@@Gp(60B(T4-<`jn>|e6R_Ww_H zFn&MTzZTh=Iy?WpHvjWR4o>zauEu}6Z}|V?M!y#UoIUM~|7H{Zf`^U0`QOO>wORjf z9sZKFGj(ydce4JQ>;8q0-;eyiV;TLop1+0vU-}{cUgB)(WC^gb^!nSK{0rG1;%vkV{ik;#|9#Eh@KSXFxL6vi**jPoi`u)|xiA>p z0GyqxH8)gom(T^>H7iZYfj^CafS~|{A$+mb3%H1sGZ${K!J3f5pw3!o>$`&*K`c#Y zxolSj+)AYLR<8}V(*%lG;bTuT6kcY>@8VAG^rU^TMmXjmB)l~rb8mUOZe1@Iy54Ue z{egtwtiWtmoT^o(G42W^wihLywHT=mG6AuguvW?gV@J9%W)h(62W*TU>DTy(}fgU5s>JUqs6NYPfSUpZmGED+yEu$x1-~~eiBDfm@DfK zpS*@yEz(FWX{yAr6_42OeJLn!8*5vDd{X$1Bn7$prKuR*CWm1`j@@9sK-TL1OV4^i zvippcyAEgLHy!FYw=%Fm#Mt=`(u#FMl#ZB!E+w@}bpjsfGsszcblJBEK71mfX86`^ z3>FT=I~w5Q)`P0@Os}0N-^_Uy68j4DoC0?I*B>Ro?SPj}#V&lMXax<{wc&}y(>*t$ zx*>LYW1GGcK9bKbYdg&)S)l~D zD2Y_xfL}O7x^EX%_I41ek+Se505L*u5FBrp*;?O{GIjbzTp)Eok=F<1f4tBkB147{ z*#x&SDFo0TB5tj_XO4nJ2!Ao=^zY>S+_{huCRR~`>HZA{QBYimwKYHMD-6_-qfBVL%8={h&+{#Cuu=4=7({rlq^7UFHA&F0z#ylgawRscDOD1EGr+I*=_ zu}qHisU!{}p)u3LT|v1z)H*a9zd;$c>$Mikawaxp5nusvdR3L!371(ef%--+Lc9x>AqWQ&=_X!%V z^R%cyfFM8t5zA&|JkD3h#;CnPjWEvB#Ob1}NwU7+koGey&|vV``|Ny0$a6=^J_QR3 zAYvO?x+iD+v~Y8P^zv@UXz6S9>t<2TX|h;Be;i@0xB&L0;<9#gyt2E|tFDH9)Nrb< zaf4LYp~oOPT5-EbOZEbvzPc18t(ljm;OCW=V^e7ba2Q1I_UH{f(pWi|#C{eAh2de% zvj{yiFJG;uwH3Tw!PrCR#YQoVbDPT(A^XF^<=|(f{k}*8bX;NyFOw#K__ffu9 zOzh}Z3XZ$Dxz|As(cbJys{O?}#dsRO)P!EWm@2;9X^oe;#P%~2UQJ1mnZR<;W}ybU zvCk-hsOu=a!;{_$O9bS(7kBFyUv0y!R?9qBBds5^9$2y&(iop>sLLZ%8(`P_M*2pN z@FH8l*?C2(+7x%&bjcNOuB1WEi@mk{W+w}`?CxW!VzsfOX?HFSll^+@v-Rb^V;lT{ z6=+4x^KP(Ykl-|;Y(N{o5+f#Opn!xZLAaj^-?K_%B!*5W3&o#?&a0=g0g{}lsACn| z-v>Gt(9(Wf%>0>l1{%pfkde#y9OS+F!Wy;Jx0s&%QwV4O5}Hz$j?~_9F5|k##7}_W zSUzDVjw|ns;hAEFQC6=h4z4w1dp~I0rmAYnRE=^+B~d@6sWgwq@iLKQOFIS3lB=KP zxQ>bbIIcv7RwIc};WItTSfp-BDGZ`&f!^}AmZfTp^zQs&fo@|WprJ=*R%;}u$_$lN zmz0doNu`cbJewmZ{L-tCk@LPHR-O)5BEwF4Sq=%LV$@VSBr zfVc$?p=g1cr{KldbK08pi67_{bPIdiG5J_H_R=joLy{UpQUER12XdFwl^9Z>9HyIk zeeh686f9p4=$;cs{=^=at z2J#yBLQOt}xlv5BO*EesYcTE|9ZajG=BKzScf{6}(R9&61p#z`hzI)(=+znQa^z3c zmt0Yf8~(fM4S6l9vZ0_ju(VfTJY9kdgXw#a0)h*;f+;`@PSNRG$hUgC>RNa2s(aVX z8xS6(#oo6Al7e8@Bmo)##TulGo=>cm+<4TDv}kV_FQN(dJ>lz*qFX&P28_$D3*rj(egb_$ zpXimB#JBY@$xplr#iA#)A>L)V8t?=Zu8fB)K?M(J@I;F@IpH>)kMbhSJ$!$D9NCai z8Z!*45|BVZz#oywACbqO2Q#+&e;&+i|MOrL{RjZ<(BLmpk+nKrAk=X() z?Z{aEJg18_tW|NTGJ&c!pqq;ILU6vY1BY0h5_6NmW^J^z z?WkP7(DKYV&O5gI)l;dR#XLkC{8Wy;TnUjSX6s8IKjY?p?VR%VS9p7Vj1vUD+lGZO z=%v%5W-d4m+L`}ixm?1quQCixYgF1YiEZ_&+G=JoX&QE7c*quPJ}sB@(@|FoXQ?#Q z)VhS3ouj|IhE*)}sTbwLaHwW{%L0Ko<&5(U!JPD1K{{Zp)aQk+dtnJh3p8NbQ1BIX7_q`EHAFo$K`$XSo_6@IT@?S@|HrQ_PB_Rc?Y&@L%w{; z5T@=Z)dS<*i}uNobhR@nNV<(i(~uK~&Ar`Wn^YPY1)=c>jQU!cuJsi^L3LWdf7>slBV~uc{E{v< zOw6E^n9DxrU_b2ICkN!Tw`NW@v0COBf|W@Ajg)bnA5k{TiRR3NFVj;F4N%$+Y#XtI z6LJ!)EHG62#v+p?yjTp1Vv;Ztqr@afvG`o^@tVv^;j}Fh@urzV!1dvbDd^?JJ0uLs z6WLM7?K^!;633h?s*WAY1@UUL4sGOe;md_)G|(oo>cD?i1pDO z-d@Xfu`%3qqsw(_G7t3VOV~TO&JQp>sS~@7ttRAQEoZSsUi(CT~?RpJj1 zpP09iLeZmt>{YlR+OTj}6tS*~xL-PW`U~9^as3yxwvpQR%Xf-KD;>`xPBvHD-RB_- zeMv*ud@G;0ZZ7=rOMnG_N1THr#~o6wfXJ2&)lZ~adq5oySzBbiw}pXZJHQClUOJmz zj(OV54E9p##c#+PvDZIAR0!2qxCHkYTNt0et0<{~t_aj&Q%R}P+SL(v> z+e7}Dip>x$yok!j!qvA@}IuZS7ps^P6&xVsYN;mM?{n{1mwVoC=#jUfLR@x zMvVrBD4F9D*L}_Aw1(CjKByMNG09^a^ogqfjDoQkOm`D|f?#T#o3+dD{R!F-9$%z7 z4?~--S)Byh*;|7AxsMMcl4#tnKXP`|24%GRAz<6K_%)_Ac}y*Kx#Ew4fM5e}b3SGX0ZrznX-UJaXrS?r(k0%=@>RC;^+4x8{*Hywve=E*y#>7)clm z%q|f1XyMN`bo06b92Jvu9Rzt1QjhvAEz2eC1fs^xXf2z#bTnP|MfS_NAv9?`xsg}L zTys&9UV>=}B@}!XMk@m-RCQWPpe-}r9E=Nz&bTdr1K^ygk~-gvYnnoe8=67s*X z7~*E`!UMOFJ9L08UiuEC_~)~k>e1)NjcCx_;cCc_j&dK#^Q6H7EtUakU4~64eNBhy zz~#8m71o7v$ZF@<;0X=c`U6Yt)a6xYA6OFoTP!jC#?pUR;Y2)LOhsJH%uJo+|E$ZY zn*OuAlA?L6iYtlX&tHhmECK?7PvjVj6pla_-DhYr981;>A{`5q6+rv|6pU2`s^9O{ zkad>RQG2#?mhvl^QF>wU+n{b%MfKUzufnE>dnq$*?O0>=^0v~~`^%0;@7gW7_gBI9 zCrY4@ZALJ;(@-l*1O3?s8eq+RVe!vuh=>a+D)eb7F|9Qk3@MK(RU%*H$E?w(0=>%A zzM@Z&0lSUkQW{2JAUIUM=Q?>PLKD!IMmZILJAXUxF$*9dy#k9F!L84V6h{oa?x#J2 z&&me}1CB9d0fmJ?jUE3yD1S6qot_WBS}p_$?58M6VyLLZ-_x$q@`XsQTLhVn+J-w|{KOlJGjg-__$$cgb7=b7l8W#A-x~AJ zl;3;}a-c}>GV@WZK<u4UY1FFq1Bsn@+~H2==L7yQ*9_HYMN%WI=SxP8DSfcgYl)- zhGb>4@UuHi@BaVzY$D23(~Y8N9W}3#}whFT|%%auB_IC$TA`!`s25 zc$v@|^ty|66{gQuHuH#5V+kq(7394f@*!;NgB8V?WrpRMwHDm-+AX!IOBK)XK1{s~ ziQhF_x|F5RsDc;kQ=-vkrOB=k*4cZq0>o?kS4$+9pog#*@Uz~HYR$g8!I6?42BP2p1=5^C610jhY`S91`65l4uE)lql0k( z8E#@$pT?X}%3seBN*TmXQH*8ZH))LLrQXq^-KSo8-GTMYo*qTtU@)KKt66x21oO%~ zeD;L**9{22LI5tu==deV5$#eqxD9g>g>h4A6^WxH(PEE;CDX2^5bmoO;J%ZUPA$W& zQu~x-JeBdzr&XhqPmNK9Y9D6b=Pn0zuRk97E3v{*YO+s+=-Fv7cdo7mqi@0h-0Vr9 z-Cl&pPbh_*2w>MChv}tj&{w!Tb3_-|Upe>)&JH7s3g3S3Dd4E_@=jkaQiu|>$7q3P zKvOz&tqJlIMc|9%zQDxnmf>ehwbuiUrm8t0LwxdS14IdF{-Pcz0o`(SEuFXriypG| zHol$2S27VT7P`@yJ0I7DeL)#J#Z+&+j;}q{<$cQG=5!>icx$D$Kim!UZ%B$B3^8-zY?wD+z-IY%6S(g!4DVJ@wU zS(svmHa76qHxE#fg`zIkm?);1Qr^83Ffth*AA$)YcPLPges(6dZ1<=6+KO_d<`D?5 z&aTXIbLOG|HNrbb>85Z@HlEUfw>!KY#Xet-|egK?hp8- z`TS8aDKR2Cd#qSm$Jr)?B82vpbV!L5XyYUKTpS zA;zKy2v6zc*($nofmo zyj9n{etd|0o0v1KFw&po3k`1Fg3wDGt7E+0*n+jDE zt-^6hcxN%DLGjtCWqXa(A${lXH&O@H*W^y}D9OR~SlV^43gCO)?ew7$LbLtJtfA|_ zzMhOJ=oFeOnK zaGzTmoUE1IH^U`ezjJ9mcR0HWQm4S3L#HPhrElh1^k3CZ=AfX(ep@XeA5}m}pl!Mt zcNsoU(Zqu}cJwA^UF{ezPWj^egUhbB#eNF9T3iF;fXG-8Lc%h8F%EPrm3)8dEr8N) z7jRISmC6|oduk0m-dr!og05v3*Wevd3pcVpbCP*pl1pi4KOFRwm92v39-20v;w}Me zxj%6(zt^6;Z(4soG2-NMo-^}6?nI^?rFX(HRKuthxrQU4<~j9CEw5VuOHsZBPFBU+!3wd%*viySpTsCk2dTnLdbhNrZ7E*C`hd(aA5B(>pcR-qSs5Fh z_rN0ilIX$*bH2*HHjj_l%8oCs0B5w2_ohQ#Q0l?*%9t=SG&dx!r<`WRZXCnWAtuyd z(3Z+1lELz&p+aB!m2sbd8I+?Y-Y_;ZvY2t;6kjR&bgS_ zcRO;io?vAgWgV|tB1e=x2aX+Mwh<#RT#F-@KDI#Tn#74ho$L7iX|MP^^33W`LMo9* ze38~fh4<^ikE_hQRyuHQ1O_>P@r6F2PPGIk+;mT6V`4s6Vh;&jFy_;e~MADyCZ+5GSjs7)=< zF4iky7F?3sGi9PhqNZ z(Rjl9(X6CrCzcyfM$iT&uybDiJ#FlB+o!&VmuM_KOBc043`QB(W5ImFM#ZP3K!4#a z(g^U*fX?}+>>f*@0Ngktsb5Ywb?RQN7F2XKpofuB*N2`L4ikYd8(@8j*&I7z(mYWkm;-~o5Q z%H<1V@4G0Zwgp~5cSu9$ijH>S$FF~BB)hkF!yi`rO4!#7Vt&=Z`UGG-R=vuFjnLhVXhTYL0OC{n7LZltR!D52~U_@Ve`d_`I#gxmN@h2q*yU-*dhH z?s;;^%x8j5FwunKIJfer&=iB%f-bmuVPRm~&P5JH-)bkP;(&Y>muD9n#?7vw~-E{wxs)x3g< z`Rvu9d}d~BgR^j3&#_Xq47?`0Q?Z@ae4Mi??@KW68^=r%VP$Q+NVJ*pCQw9{*7qAj z?7;QoO@c16s(gdLX*NU zJZ+llQ8EY;8evA+^9fZ&QvK!Uf z=r~~<$z*&*8$8s3LE0hM$>d%uw-oMpIW*YzbyVQdChfLtm(cVja`o7`Bq6~5tQOjP zZ`rfh6#9_ABDIL%``%kko=LqBfPUYf`vpAsxf5c!h2J zPf0Tlb4TKHRGA~O@BK%}c0NKU{Lmuiaui2`t3V{5<&b!z=mtT3(cF(P3fiLD)bO*r@U2&og6M}uC-bN;vqZiTn zuKf8QwlP0bcG(O91a$sUSo^D}_TRQ4{@XVGMn4gRtTM^{rPPhl4qCrcw& z7gJ|4=D)rII5^mNlCl12DJkl<&T|q-ehkpcWMZI}P$5?+Ity65{aCBByI?0FMP%P{ z0*ssQe$nONBWpX|_mcgZ?NYN~Vl^!}&^3Q4OurZ*0R0Ay>U)**NWlMkws~=~mFE8j zP6Qh#OzWLsAEmyA3^%lx)ux+!I1WkS&egXZbi7=HziEV}@TrC+hRz#fCl<1|`MWB* zpP{#%={W+Zu6avG-|0ui3Pm*jSOpuxoxhpj`ygY&)#DA|Xzd#nT3~{&YrihN`BY1d zq1+G^8afSIm~tQKmnnyD4qUr<9j7-XObu{{Fo@5jomrMnX`EuJEdaB^2c>jCBoG)M z%BvYyMob`fc-vv>=RCCay>k$_PDAx#+Ki3M30`t*N zcGx}_;?AfT!tePTX!@p);{2t?IOb~EgK&N2%>iiHg}kV-7A$giL#2BlX6o5P;$bTY zp~u_^_%qdJnaWh4lj{dPZVAn3KEv@Iv=wp{8-pq)ys1~{K4O}6NNKULvRE_mLWw7p zw=X4(bjqA)MOewZgE=yQf%pu+z2y0cov0|bAFl1;ju~^6Vm%uj*1~5ox)^;a!jBJK zMXrqp^wC_kJFODggsKCZczvb5-%kPPJ`xLn#%!uDEe4OXf$?bqNwHbOmQQ%FXaeY| z4$?clz%W}WQbz5eYb@RdN)T28jM>9k* zxvf1&^sODquzbn+!K{(K*gDXmR6MyugJVYV*ssSc4De$q`l^j4{M2QZOr~@C9;&9D z5*vLJ4(v{Qy@mCpZ1dzdQr}r0hBWo&u0d$>3ZR<#D|S})dGChTXULDOYiNI(hC>E^ zfk{7VVHh%iebw~Dg~`0zQ63=uVU+>VLwpnn{|%f^<^Hy_0@ zI;g*MGC04T%pVjmIeT+eQ*&F>e=&_dYaDCfj-YogRNIw*BWkV-A@42;1(K#~ptCXQ zCZk_56zV48fW_Uzu!@ABC;Dcy7x`78X(U%Jb%ez;aD+P}o7s)!a3A=F(mpv!mmYo) zWNJL_qN4SFVdHf1*VOyly**GwuK~1{Ce8gxEKfDMI*3Sx3Rd|!m3U|XwSD}miub57 zd4W0!8cU#yJ}vP^%Btqgt4vXi^?;5Ex@C(wefhUc6(vozobCgP0%_Js#_TiF%*taK z^gxr?0M=S{>sB9!FR9LzVF69a`BEmwLQ6EJA+K9RavXOwGzTIk)NQ2~N2sb$C>o6- z+B2u>`-YR{;_8~tQ~*>>opXg_AwiOE2+EH|Y5J{W=)vF-*{>eDEVn*5k;JoE*}&3taJQAqteHBQ&l znlMT}d6RmT2ty`~c@b%P+)1x0w9~LH+M`g{;tFNkEE)r-Fr7Y^Yd!%psO3QLBp~1E zI4nLliYT}3fYJb@z+BXB-5?7{RxK)zVTaRfe6YG%*P{G+T;j%H_%oMVoIs(mAG*CO zn~4}}!1wV1SUyRP7CAx&S7Xnii|5-Ruxg=Pz`CsbIDGOw@}q-l9}JuM3Srs;JLIDm zlGlkVc92;8>bkc4BP&w2 zTTpfFq7Rgot~(XKrVqa2l7m164igCrs)DcctOHz%3A*FvBpxJVA6clP15((9ul`~5;|M|k>#o{r6M!$xMA`6btwoNaZ5Odgt0Ojq9!&d?ik zValg3mPx@sEV$^N^_1X0u9)6&?x%c#iZwZtx8_i{4us6rLtoEd#k)H5KtvH)CKfzp z!waBzxbP0sYmhfA-r*$w7sLGRQSJ zYgJo~sh%voZWlF#&EQXHyguus!jnNX@(Rpb7vkN`bSMkq8M?{It??jZ%_O`6qmygM za-cIu#U`9{^tGsJK;OtQz{_?ls8Ni-x{;A~>JKfL44=ttVxZIID%tbQ^Jp1k&32dZ z4{{^Vbd#_Dbh#1X-p(MKUnZ0&wq58{$J$xSe_LAYV%Wq#{$h%0X$a|9>i;SeZmu*Ex(`|vr^EAQn)q)DFn zxF$-V>_JV-cD5<>f_uQDSvc>I>t}O%WCK0;drVH5wAEW)OXbZ+sI#lpcCy+vZ9w(n zj9bQ6u~OS0EOOZ@(ayEQ?eQgSBWG4&iJKA#nhjKFa%MG)TX-WMIh)i-B%>45BJX~d z)1DTAnDR2jJhBJLa}dwOWciWv^7X+w%zI?HFacH{34({u2keU5k-iKX@X)i3q?V&( zo(flql|g;hBo|0+6Ed@5d%{%$mn`*jPI5^hW!53452Qg=YOR9}I1`0rx+El&kkFO2 zXYd)npj} z^2sh@Oe-=( z-20*WDI*ae$A=K>iPZ;;uf-xSRK6*?Np&zHCz07GJcLv){m=`shx8A>d ziO_x3ap5y*PXcTg$mI}M4<*EVyYRY586r>Cv?JQ^Y{ylKro*#w@VR9b;X6T*@+kTd zFq0Vyji~tcR|kpxQl_g%RubrQp~=9osbC{qR|Jkt%G60d*tq?5At z456THEn$m+;fjc?kQAxVQ9eixU`Q9h~K1BXZJr5od2EDfZ;QCiGr-qe&auIJV+Gh?yeAy1l!O+B8yvxBM z78X!M0){3o?X>A-8u9A;@LL%IG^Zt7LXaF<)V$H{XD(J9F*8cd1i(%l9-XI_P<3df z){l$yLM~FiY}uDQfx4EkydM%)y(d4dpf&)b12JHnx$3DZr#m)E=V>%DvoTrs5}AcA zA|GDPKfu zqm*gA{`wD5r8}(#jyuu!yyAOfuhuq^w%^v?#=|hnf!3xW+szG2!D>>vUw+xQ+%P7a z#|)snB-;*%bQu>*X>&t%^c>E@1*kIsEw`7QTh#`-~9f64Y>R{{B46+) zr+!IYT~1F2);+Kf!KH-%6Ocgi^_h!kkVevlnK{Ry&uxx#r}K})+Dkzox;+8W&YInB z^sHP)tP(3=cVA`eZz9MGr07?^^;oUzhj5^kHJ8$iI>twgsW@M#BEHP^&4!sCjsGV0i49YfHvgEq;jrt3A-pceIB|cpv8>HSV z{Q;qL<9JTPtXlolp}5Ly-Wh;)uVDkHi8jAoEx_>Ap7A?-pVC5-5w9JmcwJwXbV};O zvTZXUd^d)eknw9D^>KgXY8Vn4@zWzfFrV z>1M&mBxePlLzJ4ZgfS_4FLQRDm`G6~J+N^;ia>4f3o;^|!I&akwnbz{a}I%zLBo5_ zy|_|F8uw5U(r+4$l!w4}P{lY^B}Y#08&ldmTby;Wo6C0SN3b{dWRO*EuB|(e0ELz6 zMIV>1q^>3vJjbMxHm6~JlCy*FMW$~Mn!D~#QULmGsBT#o2vwHt#GgXmm(~sa!LuB& zLt!^lr#_5T^~wha4y|!iroZGq=$&L@xyztGfOkCVfasepW0q56PMjgR%DR6VF1#7j zgd>$}!|;e4e0l#a#bQ(LpXrvGP2(?2?5KJTt%tic%i{x*BcK0#LmA`Sht{|CR4V2x zFfdj01YFSh1pM`~wQj0MVa8fbrpwIWDW?8L6AksDQ}qeAQ6c`b5>czBqyi&8HWFgi_LG(NPAGxInP$81@M*(Z{KMGj?&71Z4pMck~x3@Mmbue}M z7jyRG^N*sg{l8IY|1q#e_AeVD`@1Ek04EzuQ|AxEkOO>Fz+C>co_`rfwd$W7vp*<7 zY{|1u)>IHS@m4PpD#Yr6GLa&cH1Uv|92X_7duI}-uHZiCP|#!qAJzfG>^3f{E+lL? z#=Ez{%F1hQ*6I6xgWiq77xp4V&V$qjAj8tBP055}Mr|!L$(CcK7B@`Zi~%KZ3|n4A_B%q>jf;C11gDf(xdVMr0J$T{FeOUvAKQ((J z2cim5NZI-dyM~kVS2UB4;#D7!in(hKdfL};rc9B_(?{<_(v4k*x9QK?qIltW-?MT1 zD!e-_TQ_o-zB*ze1aFXIwxV8hVWA}S}xezyvd$4E!E_F zm^w!ZO|v^Rf*m3e*ZU^S%pS6yJ2UuS?$3Dxa4uNUG93#*>Tx8306yfYJh36{(z<2f z_*W6EaCG3xRHC;L`L%pqJ@E#SoYekWu;@1yqEz^UH|MLW5D(cq*92Rudw$cR*NR_K zW%LG}XcNc_5f;DC8nJ8G+Qlf1S?Q*oAVBcl^*QbobZ@(;tr97@2 zdNA++SYs8thRMtBD1BF7d~mKCDGRH@9p%uN!lKFYSD@MZoc3ajkTE4laVCf+m2CRi zdf*jQNn)v$!&YJ*STkLj4gg4^;3x+?qIO+o*DOV61}Eo(CfwcvZpI~Z>Nn~n^2-=+ zPO`WIpkFhGyQeSa@y5NVhz+{&M%kv#+;78jM~I@>>ph9b722hR-e<;oY<2eK>aj4! zKHCurSZt9W@K`Uc3LP{FW2F@T)X8exbt)9j?c*Tav)EMPtvXK27AEDpd+)E zaCa8+W}nICQklRZr1CEB$V_EmjCzu&#xQEP_SmI5{4w6iCRMlmt^1DHE<5DK6K?bu z?12#kC-tO-RI`^7D}R|t@eBCCsOI?E!-}BR^ZaVNOq8lTcuQ~_8S+L^S=I}s|34~! zO+V;gZ9njx@*nvA@AiV+Z}0pkXIIDi-&nhUj53&;x_p4z#nQ#{1NI-t{>K>N|E0_D zPeyOGhWE#`1KRt>jaCC)u_0_wN=d^2-)f24Yn>9kqBMLQ2((IdtBqZ6;)LCLhj5gA zSC~Ps5p>~r**o_hrczhT-uEmtFrzgI`HW=dpLILUsDm8l|P`|JYvC3L4TbtwqN%zI->55dpNqtVG z=N;n*M;<+QQ2K;pWTA#Eo^544?+n{+HkwVWL5-1cG~;325;8{foJE8+3hUkg*Y_?j z$L=gTGk=RM77^S+Q%&TrT>!qsmMzu>w-0XxzN7J-y`B7C!ZDhG-_B$RUbvctz|$+_ z1Wk@VFU(Q*MGSt)EcsSEu__VEZlUsfVU6}c@Z?0QPf$lNate)Y@=h-+xr#<0?Uz(L z6uVhu;ZsU>x!ny7Im0NJKBxXfN>K&*613}KJx11UiM-(yIHdD5i;2qf6N_SkwInT(itw+&LB;VIFR@1dy=kl}`&U(O2 zVu;3eB~pYnS11Cv=OXqbem}1Eb*Oo|5#uQghZLLEcIy*dBi%YgHMlr?4>dh;hRwjL%02 zmz=)_j*=CX>*mOcJ49c^y-BY*re!!F=Ou;}ovH3fbqK)^5wwBWUmhDdraRk1bLq3A z`3IkeJbW={4)FDgqG4*(uFMWybt2quN4OQXV8UoTN`vb2gj~I{fN*Hi1$29Q8D_`o zpAa@{w@J%yz`QKgEg|1}q244?6?Q6&d}0(qzNP}=Nn?&Sv;w8qABUAbvECv4O1;sc zd&9j!*V1+HU+%M`VA!4lYOWk@4zJ7)QH-3lVDMmX6yHq^xH%Jvp4crKK9`o|BN-AMNG}?olMDC|N8FF-s?XT zv>Y`{`;R#X-z*MSe7I1Dtu$C^T^t=dl-S(qZ4{#72*o-_j-L5z2tuf*LBZYlQR2X- z@i>w3RbUUe5Q-fl$j>Oe$DzY?&`xf`pq*ocZd$k6}Z|Hl2DX zcx2==rWu$WHekYR&;LUaI?#JU zg#8Y`R@-Dby=DDIT%S!2J`1aHVDwDideQ`|VeuRT-ewR3Uv#*Fe+ZScP(%Jnw!I{Z z5UVB*AEySOdf%zTbLy?b z`RLu%m!x^`h4(@)*9Q8NYXAsdAGCNkSKk#O_1A3liaoM#!Sl1b#C=jVxMeiCceBBP z(%Gv*y#n%1@PypJq9ZTLY2A<)%a$pNp`mdYM8qKby^F}f@_}e}Di11u?yI*>l+_brjpl|LJ(RCq_-z6v zXNZ}{VmI3hYkD6aa0UxZ=oHfnA{X1s!u|zz=B(ZWc-%h!-VG5FS-g#kHhbqnHfBWzj$Kli3r*+_0)N}WxfrII;L6KU9!l{Cy<^^9 zXlcu+@6@A1{2Oq&$2)P3^o;ize3O}bl@VgcTwdL!-_{XLY`bKQS67}!q1CsjUdU|e z9rWkYI(cz&w7RqwsZ)GuyzGJRSf+4?j)4!d_3jA`><;Bg3*-${_P!=}un1MVqn9El zf}$raQPivE^ZgkLRN(pTv!s^}=lAx#MgC#ZAK9jbfnx|)l9Npr^4I1F7`uV-VvC%s zV#>?QKO*)^oi#U+kK#(w$B&-;{}3jAN9_Oq1r{57=MO9XS5@WwkE#kk=vr#3`WKRj zAfhTQng$}jp8*uETU-^&==_#4__%z zdJHkCT^BD)ufijr--Ojna1fN9FXXfs8)In%mPy(yw|oiKGom6;B~<<4kRc_nz%dAb zH-gi|l8}Yn-~(ZJ+_2hH^01m19AG$R4|6Qc#3fgGw%rjx*oyi=P@dL@M%~=}wL{iQ z^9_`w#BzM!5udZ{=3edIs(d!9#QIRWn3v0+<;{##qradD<(T^9z==i!fzx*Uc=m^5 zbb}_O-+0}MF-Iwn+{P#mpg^p@Zr$2)zDh=FWr-V}@SfAwsbmpTQyWoP4Q>< zue(p5bNX`c6)qpKm%LBe{v%MsER8<9HZLcvlT~w7IpR?2+m1#Y<3LRXn-ksh9d|w6 z`JC-oqAxtsbSMnpvNKqu=e@;LTrB~n2g{(9kifF{D;%Ui`Wp(LcDY}KYl%Yg*TS-O z)mjkl4wP1CYM>Yy<(}@yJx#i#y~KUKV<{Q1K1!?SMUi z?oCjt@hpn0(UP=JcE<6~Dc^5qI!#XLeK28~BfKC7Br#Q<|CB{Bd{o7= ze(%@o|7*X_^>;P;iw^YV-O|L&)<(e_snuzkF6}6KILxA<=0R~uQw%yq_^1Zzq9jWq{k#Bwa`4&5-;zLufltXuT z%E3|q?8Nk}2``VFyRO!klZ(yQ4~!7GM8=O&7U3B01fbMbQ45YcOdkem9Ta|Kmve_H zR1V@ud;p{w-O%l+dkj%>?vjxt!z02KZp?5j&+g~to>omwBZE6^yYs7kdWM0X3(;b< zJSVR62R)WcAbo51S`z3Z*vqu@;V`oCH<3?Ma!e+bJif>hEx4Qos&Ga|TI!?BLFbik z@lBo6w&oFOQF>cKKTny^uO;X(dodw)2jEi3hL@~ebUqm-?_q>iv`{Rdw$k?^=(z<4 zs56lk@(eMqL`wB}NKdiH0_gX=`V0ICaNEKbncMtni|s=NcF3e9DoxJC>K)j+&BF+Un>0llHP zKHD2-RdicPpy;~OqAQ?f!VkJrV6hnQ%zUUpVOP*n7Hl#ZW=yQb4_AX3L;DOn=7wiD zLX%>%C})&j6(dI%7UuCO1fB^iR=4fTCJ_a~qSOj1K3n*2#@+A203fy61LbCbB|}fD z%`6Kt!k%m}Sr-jVImO@`OMgW-4eOf8q5Wh2m@!DWCvzBGA^a;11f<>8E#XGr6a0ba zyi8xHYLXx_ z*JXnV(;Zd}e{Pg6{)M;o$o0ruY*b*d8@^d71pKl^j?Sy{Q2>+zq7gG>&hl&kh*$8N zx<$@y=TVo<7_~WR`7fWtPnJ9Wkn7yL7AuK}r{*gqkF)+{rG z!ED2tPNB7WA1J!L)&>!SlrAl1u|DIA1$x@;63VJ{i=Nt>&{#Mey%eeFz~d_qkGg&8 zZWQ0OC`)W%%(iThnqh#bEwUU|_*(N8_MKq3xpo#8AO9pX`=>l^eiK71++$(@NaY&7<_bdk2o|CT|Wxw;f0qJTYn zES>WL^+%0e_(F3?@cshP|5v^c+uyIyKdqp|Ep1HyC87J*^8++BwtqKK`X?A0ooM}z zf}w*p1A1p0pwUGIVZ~@##55zeLl)o&Nm(Hn#G(jF$EN9R%t!Q)U(|3gqDUWSMBfrG z$6LblN*o1FPbafHrkw0H{d`|P5r&F^G?rm>RI0RE3;?QQ9lv<>Fif|m+{s(HMAW}G z(OEv+VXw@k(}9K?8s+wVavbRF>|qug@gBU-P{3%I0bbtbvADlMF^*qYlDB8$c>-kH zdz}%3Ge<2CXJ=sOA}EdEv#BV4g^V%kS&=1~AO3uLy0z|=H<#{w_Qz#URVzI0OsSf^ zR1LFFi?M~9qYy5{zwa`NEE!QYSYGY@F)bkvXXwwp9o9b``w_H}KsF$*SPSi@vTg*h zBc`za(wyQw5YDW5aK0go1(FQ32wpRzEK-=^tvOA&_Aa^cWAymk2T%epRDdW?2poFqTHJII?FAL>zRk-Kc|Mle>NCn*i)5oA%dg;)~Q2U;8eN%gS4JHLUUp_y zzLPu$f@Rw^ylwdnxk#IVZTy3n#1bS2N60&keNDro_uCh2a@U7j`o$YF^fQxblLQ$V zJ1^$)%oLIY7FcdIXTfm7KuQsb*()7GmMC=X!4a0GYtMD>i8v_?w|o3WS$dk4>#LaQ z^=E;E-xaoJ*9}xx{I?o2aYhrB$syh6aB5|Tw@!DAH|)%wHHO?;GZm`vno83$d9t4O zd5|hc1*Yn(dIz3r7Nj{FFtjjn5i*j_`A9GDF9!fBK;G?{lXe9_ls1Bo zM@uwrs!Zj3SOlE!4Rj4!ia}Jynbcw#jO`3JvNkTOJOl^y*x<@2cc0)T9!bS9xt&;C zKLEQ^t=`V$$;>(wy$@kGUUsyt=^V?M_c_T5Os715OgSqaMD0&0 z(nn<4PIstW!0c0$QG~^hbDI7LGI9DgTgTcxT8u7Qmr#5G+dA#8j6l8rD^5r^zg3s9 zeFxggwJV?ry(;6fglTiAsSBldsLmf5^7(guNX(OJ35#q8`=iRMAP(rwMU?nr#s#qE z>L@N7;nGJ@!FsCe!6_1@nO?sVeTz<4BD zj3;;q&{7=xu6pP=g56=xcSIx5F*vP>r*9;w7Rc@K8zlUsLl5&gVJI;~8~zlrZ}wev zw`ZpU^?M0zWu2*IPRk3A)rX<#(U3H0I;Kx8FU5F|wXH%%$@0EIkH|}mlLgyZTaqME z43i78a->;tMg*TNiv>R9*!mUJnNW9bROHjFY<75Ql-&=FP}B+$PNLsIPV%wCy^(4W z8!}>t=OLE-Q=1-P@h^BMH@X;ixd?U>`jRDKb43HmDRNXzfFO0KFP_1$x)0g;kTqMy zoIBH+XT8cYgRSxM_Xb@VSCLg2tD4oGhC)!?1&)xd!eH>CD+i(yw-LQ+wqeUX?ln~N zqG%JZ-umauhLAo^qb2-i1H;_-TYw0awYrh*eDX;PCJm^BoE?ZeHe%gB&{9X>vEva) zx*<;n7Wzm5t7SL^JaXa!{N5Yost(BCN^9Gh z=V*Mjd$D<`W8z_TG*L(rxQ3=gl84#Rb)bW-V|4gbi3w5}SP!$`f(@%_&ZGPycEyNZ zRNIKljXiD6T8BnK{ne!)a^z(9E8CvALt;)hZ8k|C9ygPT9;zoST*+BtdPdqlx*@kE zX?wyC^zqb5Gl9GVAEUeUbDN8)!XR3M8@R_@TcbM0rhu(kiKuB+&W#2IyFAGS$iYpN zge}78Cn~5qG5DT2zNKE02u-UYyprYqQ)*j-?Nk|V&9wr@4~e!GA+xxF53~IgQAy+i zTPBuhb0G5}0(mS8ydt*11X%M#?kmpF>!w_3#?LqUvJ2*@81%-N>Sz$$!#y#$!J{q5 zZ!mwPB^JZDCCHJrXPc>(@jsgzF?ma!d-`D{qcW2o?tH2`YUdX#@lCt(Jx}qf* zvB^?%SNd>#v~@`w#yF$H$q_}zTq12Q{L=nuQ;y6HYfyl9euMefCb}ui&M#^YHA}@i z;gSL$-SOSfrJ8W^XUosxil=|KwC9Qn%%l{;%9+GT5=FDX`(UX5ny6FSd1c8Sh(k=W zVW(p0M6({bHHZUBL1L}K?>*Zy4wtg$U_tISG^1xD-(koe?pB)Yex%W!c9b-4L`O=ZOfgR5Emb)mO2lWon*275fy&F19?sV_LT5ve|Gr5m%{Ik zoBir-vW*sTq96=s0}Ujw33G9`kJT{TCq;h5QLbpyU+wzbV>jGpytFb``4^$17Rp76 z^zjS`ooYhh29N-jTg=_2KmmbkI-SB;OtTGTUb(O0(Lk~sd(Q-m~G< zegX??^vbMKjKH8`>IgmKeC!F(;uvOe3n020G8c2JK9SifVGAty77UuHU#+D@pOm41@P z-RpQH&q`MSViB6fkRzzZZ*^d1WO~~@{;($Lv7Hi^Gkk%xV|s$Iu5%-_Sk3V1;dGN2 zXXdq~f5tkG3|u-d78oXO)2<3?q_6VwRB?|5Q!A3ddx001vd8?*S6%!wf+$;a?Q;ov zX~m=wAq?6<818Gtj?yY(?j}8^Vo7~83TR%4tq3+FLFZ>b8;&8X1o5avbo_ioUpi_+ zi#e1hdmK=nCxV-=-ZC;m;U;<~Fu-u0!jgiX`R3eqd_CcP>ba!=GYwtnUmcUufuY0Q zuhgF9&bKM&JHCwirS6=bR!94Msr6;x{SmlQUCu1@N&^I?k6>0P+0Ye-IaL4th6LAy z?_h5VQZH-fa7f%|EKYJ5BD>=iT9;FM*)M!?(gWc;t`&@Ao${Ft!t5SO;7X{mW2(Gt<;&C(2 zd;WYm?9+F2$9avC&T$iScFC#m=KKRu6JFdAVV6B>uXTv`zMClTxlZgSB==!i4w<3{1D}OLXn*6$zMA&ubndg;rBd2|F69r z_20eS-+A&+4F2D!jem7&w#vE+I7-Ob?Tei!uw_`V9iWWBrWEld|6COP1so|Q;nZed zwH2LV-3C~hWnEVC7gL`@q{CRNjwM#P#7UEVlYQCAu`B|CuRc)zu(ZVE%Z1^ai9_dt z#6?;$Xv5Qp5J@ik>Q>x^rVnFj4LAW-eJGy;9kiA}T(c zIwN}I{N3r^cKrm>G_Y#z?3h#5`tMZKnIcgs#zyAx^AI;4+>`27PMJA%nTeC^ng~yP zrLC&qGu{fO7QTH+*Us!8Ec7vPK8h_VGD)N^)y#H>Jo4Q>CNLoC`lkBjWq1V~?bLAN zgk`Y=j##woCS%8{!r9!|#WD3LJGZuQbFacLJr?*(0c-Wd}bgg~M$PACQ zypC7Jp5<jV1R@zleh>Lxdu}?y z1a9QG=kp{T*!^^cn!V{#q7-)9g@6v>*;<1s#6n2Szlr|YiV@`(l`o8EczKbjy}o)F zO+8Kn4Wr?28HWuH%klw*S+M{CBI~?RwG=H<>$IHMUbGm3@eb~f)}eAo; z{{J^W41Y)6-xI}O-r!0urY@!;e@xpZV(!MK4u5c2{tCTlJVjm zW}ZW}`eLV+;~RD$P^uarAB12P3j{Nq`CPLtjT ztM)y$oBVzsKYS)f>CX)M#^y0`?g-v4{tZm;41srJ)RQGjt6EepB?t#GpA_-g) zIa~xj%>S3b5`N&GXR5BV=H=|?Ca}a#vHf*!vrGNcQRpQ8J6^MJp_R@4-oAkQ0FnI_ zqD(L8S)*ajw@qVo$pIlas=qr@`V3=K?BpBB;&=eLXD<+_^ekD@#904B8cn&JaP~9>-yDwO zq`7higUqAF(L+n>=toB2wa0842kDz>6JHPYmQ4=c$+~g3{<^#XU6(eTvNIO$d>-fe zw1(+*?~1}&)qYr$hnHA-R@B(XAMjasw@nx_Yq@M;HI*#IlSktpiQZCV&geWVd+o_c;il`?FlcEaPn*5Pv*SvP7Z*4BcK|Ao?m{tt%@sC6| zia!^tW0182VDh%bj0m5}#-qg0`B6J*8xn@=!)A*2c*6W}777jNk3V2bM);gcK$XRP z0Qg3KD=Q1-L^^jg3vK&QPv#taPWYKPUm$K!lmVx}*qyQfU2im^IASMC!x%rx?@z>a zZ=;yO?>(;V|BW{OpLTfvdQtz0xc&oel{K|9ceeOH!dmwaFsa+kwbXP~c-boclVw9C zIwhT40A(PPCLDVjrV1>8bx`)C4!%iyWjzAtElGi#{~qkUAMd0)4&qPCd}wo0ke}=} z?|Hj_MEEcoClto!)hDFQul6~-PBFEIK}Rz&2RG}Y+kUK4@OKy6S32%bL|2j?)b3Gn zdNsOXZUyrwIoei`V0-O6G!)RD#dj$d32b{KL58hlP!a_gs8cTi1c=>&E_?P@l%i;P z0r){MjA6u_>iohru2hrsV&y!Ts*kO8pMwt(J`w$HE%{Y1v7*o~7$R&pFU#8Jn=vXj zydd7440%;WvMO|>K7jV!fiT>^l^=Xo!U})FWk?t#Rp_1O*Zx_xA;ikp?nGx9!q=r^ zM!j9(JYdi{NWi8l`b4Isu-=_%%T+(wrOF(ct#3E-1r#06$pDMC9JFw98gF?g*m&cbN>lXj-e@9TyL2?4MOF=y2^= zGa)2*Z4YeL0W$Z zmb>eecFJD2EVjk=tW?&Hadsr?-XzyF3tV`Aumf$6CZll2qgybTJx>4bb*P522xa{8 zn?#WBMn_)0#1^6kqsS}C`0Q6v*hl0hr_q)Gr4OQ4mHLFY>2~nMhVm|i-U+wqOrN&j zqja6z(5gR>gg3~NyE*Qhy_%CRvq_nM?eWhkkqBjEoZ~$l;r}fh|FaP$|NE)^YYqAD z5T}ZTsf`WkKVTS57;n`*l-!YK2~0_LaA+WHonQlujUYW3HB4}a&W)` z>fV_)@|j_COY@J4<^?NjYlZ4!OhtA=wep*$RNHF4A1hiF&DFO(v}bQm8)E}K!2tiw zo{LP5&GF}h%yj#nAvAM*-=we5oH;3)QXLn(!m{> zITW%QMlSYNn>AFs80?yu0^qJCOR{n-Xwj^=Hzk%kp?Jg z?*pw9LJE}`?)C){J8EEJWQ3sw@A7Kp@r3jz<*qHhQGPOjdeeqMX#P->UKuSj*9VfU zd9NU8>F}F!A!PMa7|E-JlDa<-GC9ctYwM=_Q78KQ~0-`InWKG9-Nm>|g& z1HZd{^v^a8Z{kE%pS6UVFsrEMT~+;F=SsV*Vo{U1AEyu{9h~wYJGU1D^*hR}AJwXk z$fJ0r5n^?V(8l+B%D!T$!zHC`TzeR1rv;IkXW6_cz~BnL&8Jm=eN0s^{e&2Wl%LZ` z*4}Ga##GoWu6)Jr&9zjmPx1HqYrX@M8hgwLUydT<-uRf~l_VTQl(2|8aRoHQm|P~a zR(}{-v(`h$sBf-~J6}`amv96vgehE~de#Q2_;XY02}4Y&cyQawf+11w5kljNaVA6E zniGb2vvzKy?hGRnXIgCwF|F;lfWAku;oiL=QY&$5xVtFZw|Zi_~M)BqiHP#UJ3 zXW4{_6&A_JJe7EY6#D8*M2&|<=g2&nc!FgqK&nJ{y7bsn`EJ@S@nph?w7p|M68PJn zrUD?nCZd#BsUAhDE*nL0wGjZxWCtgl zD-xq%M5M8=r~+{3dbwO(ismIn$T2VCe>)PKVcqJEuv*i~L+OUVeibmgX|D8QVbVP- zUyLsf7)5~pkoBhx<^7Qc4h-%eB3u%5@`Cllk8kq`i?T7M@(B7ZtqV^U%<0136*hUMC zzpvjgabS99mD`xxp*T%bOdklgB!uH0O_Hj}n9`?C^e%4wRG29Z2@)W2qm9^^lDcE2 zLyloS=#{Yq<8Sna_z1S~h&Q)Ow7yle^f=XbTATU>G1Q%N5HM0k6@U+C1*ruJ1UhEKdWWU126R7%LZ2V|oYKUCH!( z8_}gUdpzDuctzo`)K24prQ~P39kH*NjMYrkK9+f=UwV-5_ogo9#nOGLmW7Sw0&9o4 zMmtYXj!4cPfYJ^k0`X)Z0UVe&Bs8@{7)kCD55vy4%S?5>cq7d865$JGFSBV?fwDG> z51h8I@1I14>IScWN^%#s4Uh1z7kE77X-+-5vwQsfB4nVsU4nIlq2q-dr5(cQ(Ff;o zsBmM>u`c9SPu)A+Nsjaq{d!&!bb~bhgq|hNBBA!RPvot@56zjUeg(EhiArjUM>rb;#DeD1`Q^V8`&%~YA z;V^GzOIdaKijv|V6o5`d3gU_UYc+INJzGCfPc=F3LL{kgJKB=Q;$E&^5r6Je z>zoT6UOA_;wePcqxSm4C9zn4Lw)al3xRbYhQMoI0R7=ijjCowcujDJVZ0P>1gg-H0 z&Bq&Q-ucmXGAyRo`^12tK{rPjKSx2#E@udC+T>hnkQIr66(&+p@n+*j)BNaX$SRdg z;kiD!U1cE|UCj3=d?*->;!FGjWIAo+_EDB4z2WjPUZHXIAoJ|3jJ;0N!xwa^C7To{ z>*o0cOm>CFS_(-*0KDbamL*by$O`EfOr#7_GqTy}PXY6^l55~4LObB5ZMMoh7o21K zQZ7jSVQJ%h5^LX@K^T-9YcB-#CuZ4>zr;B>l1eg&*^2mwJ>GSsIbiwz|$e{?{!$JJ?`7q_X-UNXHkq5%#p%mF*5V_Fwt+#r4(NNscwZ5TK0*R9QMb)6K2 zl0xZ{8Vg1pThMLj6Sp&@T+uftE;?9bU9rO_wknNUH|??+eZ3Yt1+TDtrkx=Qvug;&&!M~__d1+F3)x_Wy;-;p-7rJyHZGW6JzeuCJIlGa`hP?B-=ZJS#WB zwvpN0sx_YpRptW0wz1mPs=d1k{(u-4J1ueD=&bNc;yfhvXp+LOt*)`tJ>~Uvz%BPD zkHuAcgEO=@6kSgN&IQVa2J}1g!JC`&0K2*-mO#EbL%nP{h|yJ&BmPje}2Y z7w-<&6ygU?KUB?+I$h35ZbZ6hy@P8`M)hZB)D)GbiMV{TigR=G%sZ^8*IsVMcAU9m z<)}A9tiW@^kg7X=)tQ(bML*kp@G6@E`ni*MN+xN$ zT`9O=&x-?})rtw*Kxkb7XwHzG*I#$CkS27N)ReMJOYf=Zb#;^nb9{4@-}M`%DVI%X zyoHs$D*To!yH^QUvp{<>O#%J5+`V=e6`B&WWd>hNq>yP#aQGQs;H)>RE!=d>!^}@w zQN;;~$~wvCSadZdvW^7mCgjLCmXD%dY}FF#b77E0F!rv2Es=pm1un-T+6jW4`9V+I z(=|hmK4@Rog)0wTwdCApaCFSnr@^K(#lr9}!1=i-MP!#NuXN)7A zxY*5y+5=Mn*P-RwS&H#)a7gV{2%z2IK&C5DefxM(5Xu9icCfl5{d|kff}XGL@kR)>|P%y|K_8z1mIw%^>UQ|$78 zP|3sIT$g+c^49zb2zBD&@mRzx4(E`$pSVvCx4wecWS~C8KJggzekJ$=2qHm?$(DXU zrgnn=-+&;a|CmnxuOvy=_p5Lw@7mFS@+U3r%oX0{sqbgk?BA{B{{`s(Me1k$YqF_U z{l~`ZuLZM^M>&R!2Rd>t1bPIZNU|wx1~B1_efX_G+s&Nmewm<1NqWAY;AbrJ#&Swn zptljnayyUHRQJIK7mI+O&(lW(cqjP!z@5&LwA2)XLp_Wdoy`fZmIHgZyK+ik21o z7s>ae=qFez*>eZiLao@1A!uevvtiBfrL#y_9I)~u6?5 z&o;tmPB(6*wO>|XW%J2hY9F&*q!1{qH^A5=R0Sp$8X}lhVW$RP;C8KXTE%#dhxb9VuY>ulwYG9R>OO5+u;x z_}y&(e~P{TVnY2RuYCRo6H2UzFpMw{osOLM%mtH3)ID!df_GO)t4CCyFedmJUDmo_-W))}n* zyARuFSmyj>O8NS)YH=^dZNJZ)6~Ar8;fkFAwZfdd_z&#*}-9Fw1}PJRASggbe2 zyK@cLPODiPq1}rJe3Se#mb0=WroPL%5z|F70CXv(>{6p+H7w9IKQlSBjT;lh5(27~ z7ZLp~9lRQ)(d_CyYp9dtZ8qH#F`euuyXl{a){9v9HY`ZcJQ1HcDk{Le-4^%}bo7R$ zlp2+d!Q`B+QW-#mhUI(%s6k1I;~%RW{kDv<;MB~(z7Qr3MiOjl>tU$Y+7W=F#ukCg zMCzCr71Xe!GKG5y1ZjQ(EN2%vawB<~a8$BIpw>F!5DMcr2%qJY$1V$2Y(ZP_L%oUX z)^wal07(%h*LvuvKgtyZNTFdCIGQ+y8Xh`_Peu6tbmU2^mDbiaM_7yA$!W2npNH%* zmOmE8)AXbYY*- z`~^nc%{vL)NuyUy%x)*s8xL~4O8s76pAigTVzFmkIDXOgnDJy0scd2`SWnqPVBTSR zk2%YNOqPK*hGP=&FAu^&L@mcH(7dNkQ|#H0ciyWQ7ZnQF?-x$mZ5z97aXL{V9kZBI zSwd)50)`)Snt(Deq-_21l0+e_BBKWlb_BoHP?glMA;;Hs&2tS|^~-G231s`2#D>ab zrZBTn`)nwu>vV>;Qb-X7asEKPyuFP?-~+T6J!}@6vYRcEm6@i`b0 z;XrLg1s-jEPLHstyDvJTbLgctU@0#ZyoS+2&dt8B8g_(d%@{DI9=nP%5UxEWi!=95 zE-d4?BfE1sIvzbF1*(c4xei#Q4aA{3?ld(japOQ@5QY%<$@VN1<`h>PhaM$9=Cb-6 zrKjk?i5?CZeY&=!r5rN`&fQ|~dpYKuqfk5Z9zZG==bTZ29$WVll0Z^EQu`8WpUd2y z+z-?1@D?ec7TVXG%Tp5hmQXdwyL~mTlb@GwE5(%yT$!j%+{r9j+gmrH=egL#Mf$+1 z6k#fb9qfNF`Q#>kOs_Hi{p-^*l(hLLq$A;yVG5AMVP9JZma`N~g_1?gjGjPrk^&%F zi0@MMm}b%kW|5@AX6BWdi7}(p&v5qzMVuuSkxBCzbc&=?sJ73I5IXEf4&B^M47-*4 z3nU4PB9&r>@tG3e=$tlb_S*z>1^V4w7QMWWM2kDPh~+U)z*mtSzyNfJ*eJ%!3j9C0fqxu6HZ}SCKmM6V1L_Y>Dr)Gj^a#9H&=iwj=njkvCp-S zbRXpG@oaM4Z3w(RZPEJUWEsV+$}TE<6qY!FLUc>gw|IxDJ?HAyt2l6UTx6}=tEoGB z89jn3tU>!WMrC2DZj9i^6X>?t@5M&t2GMRg!?I}z7qG+*nv9xFR5G#>(yi(Wa9F&} z?#i?QP`9FHoH+z(Z`B#4iN{(8pMkOFQcDM0lgc6*q81Sb#$l6($SrPR_U-pC$u2PK z$g!i54|FE-)R@{FKw0&&!Ly)wCG(SsZy#yM-03L`HJ~Ny$O{NEif(_3wuG7ZIbqk5 zsV!BkZNU8l4ch!ijQi4L2qPSe3Gl&cW&6g8y|0Fen-^AmmtgKtfIbWBqYGscDRAtX zE*QB4_PzqZ9z6jC0|&t2`@ALed21}jm@#CG_(y7tlr8k4VSbc#ec1O7BSk$8oD_xc zeFObH2q76uXzhW^q@a*GaE@W<9dVFm582|3GfxWRoaKb1Oc}Y6rFg6qYYb^uK{Q(j z=vYy$rVIl%WxIqb%m^C>UTr$r63bXaI7-OM%F;r?27zTilX_YkH1$)`)z^j_qh;uA!`DYdTshm85^|PS~u~i-=ua z;6Z!q)2P!o)4;pHd)iOZ7h`&ix+2^LJJ*jrGpWesEtB1zTe?zyCe;??DevXPJHp7J zBpyaxi>52I7Oo4=Aub&|JBv-Q*=b5z@5MWnxZdE$No=I75O-VVT z@O{Ui-q5l+@;t1jve=$_p|Risu4E)99n-5GkE-jRF@QHTZb^+2PE+n4G2h&=w%2mF z^rLU(3~_GW(j2F|0Lp6TW3LtrHg?AcLp>?~d2c!tqR%*xeb36^5uY{!;^BJxyWxq{ zn2vSi6xg)PQYK^L>tx{k*x0@Zh$ba1IhZJKqi&OFC#1|uEGif>aiQa+_5-E{SH=gncI(EQ}Uv!6Q9E&9lZq($ysUy zdhem_U@Ql@OEO*?b_~T;@@ZulDji|igiVnOGxA0Zfzu(e&;ry{d(oWVXz^2AA~Kk~ zJTq)AAW)FDsCemiqC?PcxC27*+=f1-N!gl4w7zhTCVh$fg}I)n zQ@Fa8%eu1-V)G)c4u0TqS07w;X9zxe_1Xmk!T5liJ!!Il>Pe`2*e+3U;gY=s&1ZE)2X9ew9Z}^~Z$UzCJmxd=WuM&z@qBh|cTK?y|!tcn~9ZUj_O6#W(f-HKEkBI(K9elGg`zCFe z9lZ$)y6z+2#?+3;r~OPNAPzM*TCYSuzM%ySKfQ%DQ93M6Wy&jv3XF4MITr1pKt{pX z_oig-knowGAevE4Y)=heEL}p{o!)nA!cXe$BlpK1kV6Av;?a{9bbk;;0SL}m#4rkL zaG|#hU3{J3?a!ha@Gw+Y_C8=4q5e<6<3ByN#Q&d)=07QmD$YPtLt7`wKNqT?2EpmzYthOpG&XFVJXY72OD~c9#!FHKBQn<>3+{-@8vW+~6laom; z5d~#=+W}=7ydO~_u~ZwFsy>S=lK&7Hs5=(G;-Za_Ld$U>=1NbOmuBN zWp8d?yln6SuQr}O%r`#311s&YQ8uauxq|#!YwjF2#-hb_WwcH4>hxbY|AGac2`|cH$i#&fP}T(d#J-A}J<`HmI>Nk1^%e zHcqYC9S1D(4IB-ivG@B9)fgV^ojXM#Ta46OdGheEVz*oiBXJtVl%U_xgkY^SO z%Blj-XnESQ_T<_17^ziUd#h||%h>2TAUm+|0u{9EX%YOp2NB5`08;Z4!GdIqGO zfrWCkEIO=qnh;rZhAl{BaWabGT=4Js{p8!nezSu)Wx`%4f)Z$zGQl4hgw*~qE#aUu zjE_CBgeuwka~Znuk=M0*YKqeg#F2yIl}M&(vT(uzz|p|Q6%a`V$<`exkG)5D6!T}!5nU3RAe9$wOq}!s%Qu0 zmwP^QVmu9ZF4kMCsXU*nfWkb2Kgo!j_Z^pQF6{XJTc1!nfx}c zHXF{9igurNy4cw=IY2+#pcj#iWCEXnz1 zYXxLCj5?gJvp9@xWeW1<>6+w-T#5v)AEK5OpgAe>M#4lW1k={3f3u&+^LbFRCAss? zH6?3yD-G}m#A-?D<#7oAzqEc()zPXW#Hu#yN^ zJj6kPVM8P#Bex%7V`lPz!iWx@m7&VijvpGKvETB=s=*LD$ zo;AxnhMKO0#VWofo?*anjNGSbWW@g*DMHGO@NqV4-swVthaa?e`zCC^qZx?@QxhBmqN7U`6cw7m?KR!9d zY-6KLy#5+xKG1*Jo?CYqyVP>8`}Axle$RvSm<={roAw%Yn-N;y_(6L z^V}7+>KF`M3z%XLlJLMX9mndgHtpx(q3Tf$WY`{qXND+USM>Bc^Ky0J1QY11CvOso zb`Cu&xusvDo69LI64gV6xnp#frMAc?V)sySf})xNGo(XpO*rV3~{7E!V- zBzzHe{?+`Qu`uq|pUu6QFmcmK<__2_G}}{DFxF|d2lEZ=wCfk%DmF}%5DxVuzWsFO zL}{^kcUdNQk3^ZdfW6DsM|HA_-l*Nw!PY)}xg~L5FWcbT%NT30u_Q-dY_n7nmb6rb zSAA9s7|mlM%778>qpGO91xP8#%WR-gon$7{W?ehuEI)wi#7 z-|c_#Tp5Fi@m!*=w(fcj9eIUX;p#1LzVze0a`g=-<`ph3Dy?((Jna))7RllW3^<4f zjFkinpz(&wq)OlAK(ya|H^iqv-F{$wC5C^Y5&2P8g!(FP4wQw4mGHR_{WZmMUmCT; zNo@CiQ;=@uy0-No0_6~t(z{}3WzqKoqIP!YrhSJ&i9!EcWN(Kb)`v(mJQxNRRUGf! zVjNzs`REp!?auJ6KEG+|9gHsSdP#ouUO&?;bV>T9#eGT=CdR5MuHTKl8H+URnJ63< z`|;Su8;1Haw!-R3*Se-sTZkkQT;G#18l`rDk9j!AueTuDB=-=f|@D8F)n=!&Syhk87PVo z;wkSh{(gGzI??hx{|;lDsXI-T$1jNI}%bFO%79?Zh+)_5n} zsiver$LVaR4U@Tfq({JO`TRan^$597_-hchoJQxF*w1_eSeaRk6_nGVh>ulQdF;`U zm(Jfm;PAPBkjp#?+uv3bvkc}5Ocr~w!2b5d%XDn}`XhX!D=$fvFd$Rn(3lIi@ z1xaekf5UK0r@4{aiK{!4lela_-{A6h^9nIXn&5!wWHWj=sn%eAGM*w_sQWOu$J$2Rs516`x) z5v0f%3g1U^Q43@a5%|4zYJfAub9iY_#k(4N@TjOFqx6zWh!!umRlH(|QX{j~ZwL;Z zNda-aXk##6g59VI{-miCA=d-8cO(7(vG&fvnW*`{V8^!EvDL9{+qSI^JGO1xw$-t1 z+xF(nnK`p_e!Ek9_ul-KdaF{Es`t_NLlQt`b0p~PT%5H#tlJo$NpP-P|m6?{K+!cxm!%! zTfj+I=`E!lHvbePzhpK&G%SQ#q%je)tED<}OA0;Vu@N$HA?e~}Ml@nY0@ap|)t@>& zg|nu*6luc~x^?*0t9Hs_^)SpzY`qt*EgXd*{NY1u*ZH@Z(h?@`(Ht~RL(VR1yOC=c ze?FP#;_l?Tz9PMx{wqgl{@0WFF9i1gkNx!PkN+Suq>Wtuw5f_&3%P!IRKEzse;9)P zl_gZ5=&tY;0QQL!AGlzevjxN#f?1$gjrWv$4*+h^&{;SV&i)Zor~>ZK(N5IKoe=4p zH}M4UCL8H)W+Hq1)NM@Cw0^eR)V1F6B{TN~pbPmK=%^$-vLBW?i2?e3GgZMpqRc%K zvSTVPmkRSna*MsQZ%WJFQ^s{0C2(Tx2vI;^dttbYbZf-njkpr<##Oa*qT8B&ZZUl@ zRbffC7PmzcWo582trg6^&%rMg&0o{jXf-lA7XczIT?-f3t9Trc5@0 zV(*>;Za8mrdFe{$$K9duDhOvo-%l>cjMPaD5{H<>#TNQ#Bhf_t8_7Gu7~$^^42Be$ zTQ0fxm_M>R&?~vkM1Cur=E!=eeJZqDKNG3e>douVcAJYD4FpW8uCofJfHR^w1*-e+KanBmvxO>Pj`njK*59!9#nDyAVk=MlK$4WHY@FCQQQdrryJ&g7sq zf8b?;>VG==>t^F%J2d~frT$bClm;=wyEuH?@uDiVKTFPCa_pBl-Kg)*JcjQLGiQn?vPm558a%Z9@KZYr%v@)O2CT&USL=&f{hZg4ws!dVl*%Z@W?WZ?}E zM>_NgZb4i&1}!R+_NT8fj?ut-+k-k=I^MpO6k zxn1T_=@P4-kJif&uN4(PE6))+o)l|6Q-@o%5jDh0mRN>2NghJ{3QQi3veIYe=?p=7 zhD)9j{PU{rt;7#{`(eXx{ z>!k=q9!H)Wlb#a}neH=}JYUfXzp%&f$r2t)6ilMZNd(Zdhf_$9%ELiYQ?KbvR*BH%05S~fbZ zl>RVTEdRMI@^O&FD~xVQ$MU&hghL=@@^H%s&MON55(wXKG}^eP1dQL2HF*Yt4@?Lt znvJ;NX*Hbyp`#bXmbhHcO*eG-KuKHz=ss$RhQFqh<+4OV`5@UUh3T1tR{qtpR@dqt zf^t2IB1(q}j0=j~47!V^^fU+V!CvRmlcg=_o2ET%2SUP2SVXZ=rc@5fUyxO>39i#^ z@4a3oQD6;rq&2unZ@AMa^*DzQx5M45Cn6Pk(q;d+lUdPlk+)qMJcuJ804q?N0M(Bc z1-GMStb(?5Fj#3!Fex;$9N(E`p%htHat;{kk&j8))vOU`OBF>{Oc_{uut-D`E9bC= z38^&gsY0_wS9Q@pLhBG(U7LsT?D(#B=k}w1A|Q?<(HtExD|Q}U1Oo7)^fZ+9eOLcJ@Iiee3D{BSO=2CcGz zhw@0FbCV_KRVJ-6D+Twck4rh?WMSD?TsdDV(>YV5?jGo|8JS;Tcdb#vKF)Av3 zWKoZF8i6*)6f@^0kT<0r5Jagt7NpM@+Chd@P-59fJ>v(>uCJZ}uP*`;rJE>|UeKX* zM_qr|8CZa-s66({p|=9krMIrR4`bzkYACUhXL2fD)cOY_ZLt-7=NZJ}!9=1hm&Wk; zJ{q)T0KVJ~2eGyI_9bPLVP)G0VrJ2rYq|un3kAJ3+X&M2G2IGuJpnc!uw39mbLmsB z(a|JxFx}`>@2JsXG=9SGiyI0S92%&DvbW0Ww`Ff4EFqQi+jc@@rj3bm>H&2lTG}-j zYDV2`0h4q6Ba3LtaBJVaVtzgWOi`k`5$fdjv5H(eG0p}HW1#(VAIhVMTrf}M3LiQ2 z_c>;b3o;;}qwpO}Pz>V_9YL*FPFnq=&g=!cKp4%LX~JecRlpa?CIU})ndHfNpQQ~V z%szBtsx*sHtk!v$@Z!=RYoU0NtA=N2R#9#n!ATBdh~2VIv4h71M>*!g7RbaKJB+;k zoVo!kTq@t|dIb9rraWNZBU+X1(w&SXFw+NsB;q|&`{_tvl1(EC{M@1YOe8!KA3c&Z zKIDbeSJ;sT-^mIaj3g?VuQI~q(ey`qCAQJ#nazBA(S=XL#WYfsV2 z&Io>&M1fs1jcvqfYzrkBW|cB#Dtf zD9FE&^M9it<^Q^Q{=ZO=|IcXZzcph&{<(Gjkc%ns!En5C-Qh>>o4Kz;I6UbqmjLuYq z5no4y!nrZkK4Mk8dALIXW$u&J*bbv?$9CKr>7t45LK2Snbt=4W+LLz8M1vJG^~{+h zPD4V^RN`vrH@XAZ1MO^K`Ww3lR!=tFR_Pf z_f{f^i~ui3#e&p_MlcQ0k`7-zg<+X9SSp=^{Sq^g3#WnMW|4zjC+5I+#FjQd$6OZv zT^1>@*0Y8p|N6Tv5*5@rEyX#~2?fLnk1A|LaPQI+7dnxpxx#NyfrcxkG#1ng1(gK- z^U2ooy=ov1D~mS!-+FNL5Vl3-in=&pJVM)PH zONV;suVzdU=5=HEQQD&yTaUYENAZ2eSX(*WbV{n94@>UVj}i&XM4{HDvhscka6r?w ze!q1Y!}^?20#_MkLKyWF;%TY-g>SnxrSj93YM>*HaYi)>pt%hr)eG+ksIhH zk9%j#9UttB-`N!p4=8q}p{sCd#HCvrc=dDAo0@(TFL#R(+2n z>)t^FqJ}+%fJ?W94XU=)0@oPvkB5VYYVhR3yfxW?VlA%@!^K7)7*8`slt-ZlkFD-a z1EJAkdZk~Dr`=a=sxX%spP8q4#CAIi^}N?JAEB|bS{<_1CnG;u@#$*YwDT@6B%tqX zbowr49H6;|0N5+^_8@*@G^#;krM&#oi1#jz;}Kjqa7kE@eftrQo-Ighc?7+TyxcSG zvh;)*RI(P+R4|Q(PSMf-V^#emZ4HUJF@w)Ps%-%bM=Vf*<|RK5BFKH^d6E?%&<^$x z=NUPpteIC%`zw+Dk1Ofsg3Is^xmd=3y!!rrCH*_!^8Y=LUf`c;mkzQ$Zo=lrmdkDLz@g?%ev9s9a*=$ZLQ`T6nq$n%Xv&R7&Ry_=~&*9;<1Ve*uCv%VcG zvxj=Z)_$T|a`o<#-Z6hj8DmGSh~Jqj(^ks50&`YL+XQD7YTC0%hEg4hQ*R?ws@vq4htwL}A&uyt3tVQoB} z7TT)J+8+xTTWyS;ksbZ_GP^;h^U^lyq6Rv{xIEQ!KmP!Jh z+iYq9C@YCNhigweqWtI%qO(TtebAO-FxdM_Y95`kdv1 z(HI0Aj&!o?_TXuv>$h~hD_I7RB(cj*MKp`btpv@26u>}-cJ+ea^-V<<)R5Y3JNr1PPBOXgXtg9 z-2kJaO@gg((FLVo3+|>p7Yk*0zWybBw<$`KUWLH0OQd$4LVzBat`rdvA+>>PBJu2H zr(AfLrwfqPYV0V^g8&@|3Pr$k`m!yy^Lh!&z7Y`%i`ncMd(Z0Lu%u_ z*CdW7cF+*iKKS*xdS9pPpn;fz$7ygRmusmbJIKdiLM3vVU2fdyd!!w@%`MxgjbqNa zu_q3y&cQwv)t8Qtbune#8h97HT8o`h)j`N}2Jx3T&=ufi(ed|`YpZZWlExqDEu9g7 z(;w3G#+NxGtoV1Ip21I4;ahM|7_>)b!Hz_|mwt?Bc}wGhr=NeE8iV1#dnUggvfBSO zkMjR$sn);ZpB0zo=XjC1xTy33@#x8d$+9Eqp%9r-a5TT=Kw|S5mCQ6{PC@%*GCCs) z@^oJ!ppiBFGqq({vV=@l2zMr%wk+z(4aGFoWlKHm?^tm=m6kG}v|6Q>v!~makl5Pa=Mi(&KA=eVbUVR>J+fKy zKD3N|yxXp9H_%QV4ho%yzYC#GJ;F3%+lVbXn`tb0K9daXq{`6R$%MG6>RYy&^=Zh^ z0<@t6NF*5pbk)(-Dd7P(V=|Rw3u1Q@q3aRF#lGW@OgI*1I}Y!pt!X2nhKMW>O61&< zNas}`6irP`c?HM#!1hwtE3XKWrXsP1IY8%$sK_Q5exM;706{WMS?vg-b45Y{lVG+M z04=(V^rp(L>Klaw+H4`hc$rxn$NZLO?GGMl4X3orpOP5-NZtK`9LZLe_ry!w-w28k z3x+6}!M zoI|K7^5RF-CzV~7=*Vm4+hakPOLTUL5Wyo$-5R(Bcm_SMwW(y5gW z2UGHED&|XmfE%ZvGJniBUqYUC`Wl$2rRfoNhlSYx&~i81R-_*cT`PA%vNwfxoMxL~ z{i9U_y%;pJ;1WH@8SEeMMP%-FQd^=)Db_1;GahZos~F?KWD zEp7;Btw6)K{#u;H>mNu=OV*V5*4Lfv`(M*sNdI~d{{@vw=owq+ef2~$n)BJ(eo0jS z1;!MpLAYWq+GQ>e=mWqLfm{`eLS}aZZ2`#F_m9SY#jAx7iG~IV`XEr2mVA9@`^jF zPSjfhQhyJ7p0K_>dmK@IqbVn%TrAQJ<~!XfPx6~9{Z0~+{Zr^p6g|U}-wJ9*CN8a4<1md|T(@Z%BC-cu0Fk z<$6#IXrWvDN9N-o1K^;SL$BW$SGr+>mdASX8Sxd(RU?2A&yXns zzY@;r$ySg*D}GaJLK^58)Av}gAv1O4vG!AVqYU2DqVTEhQ?~u z;*1nXo#T$5$6@N58n>rVL75xtW9hyK+5kycVkjQ#SDVlnd{F}wz0o(w1O-pO;qT+^DOK91m?eX7MgXje3@Xuk3XAZ(Tea}6(jAcU(NwPJKJI*6b(3j&^kEN z5OJDzssJHo=mLYZzEr?7&<`OyRx{=|o)gE?duCRbw4&Ycr)hl3$j6qOut|x)P{{kh zv&w(4f~=z|*CP8=CmFAd&Kia@R8}Tq3%kk#-+9_Ax$V(| z<9D#-2_gf{4EWyE8`UF&hdsiPQ)!ivBAZ9FOh4LS{airD#!x= zqH!BFPIekUdy_axRg%hlGBv^B&`hIo4Q~ksGY)hm&^?AoERaDyFtXK1=;ma#P>w)z8M~QE;J` z_sj5Db=0SX3U2+w$l;O*)g_+FLV3p!vLltb-&YaUxdUn7U4gci^o7kkl$CcV#0AZ@ z=%%v?OU(He)N$f%iO4DxIsWq~N@}>aYFiTEnj8^bd`=a!fL#GoF(wVchL_Jd~j4etfX1{>;o zrrPQ^`U>vFXxLUgRkudxv@Mv=*TiNrTGO{xSu3aC$WyL48LBPR)M6N>>m{a}G;u1M zI9?AO;cnGeD8=I~s}ihUNPBgveT7*+5I-QdyJ?KoyDWXW_=;(xHJ!4;u3Y90jj7We zWa?wR(LOdnZMu16hcti1fPeZ=snUg<+Sj&!EQ8wgb}empTyZC!d_>GB^k5aN5=P7A zsms0v!EE>@m82eBEXCHLr0i8XrsrOiIW{@UDW2uqADy=fPNb9BeKpUk>xgVsbjp*XfsKiEJSdM6l1rcQcWDm}YejN%l7sU-jI zs$MVJh@>v40eQ||3GGRc@2bcyP#eQGk$xDOVH&!v5k_MZBOb+==5rzPqa@fzbw!kt z9GSovIi46=wzR~hxsPEzhV9m#MA_Xo0WO&)a`C>Lj9Y}|Tw{M)!-O4|i@&Qv!8#-g zc3!(wU4W`7cke)mQir4jHiYF?X?=mbea=Pbexh}q1WJV}xpdJyvQiNo3s&t!LW-H= z%UF20bih+Xr{YmU>hQCBjtuGn(aU_~ocUIB`lpQ3Z~;JC^^tXaOJcvF03Bjk!wuId zkqDzhhi?-H?uj;*xwB*jBAdHJ$h)LTF_TAU?S$DJLc@d9^_={Pxg{v8{5l;(Itu@8|Xle3}8*_nVk*xXR6Zq@qPSAvDPgd?G9~yGM`JO+_q$FNw}h(dqWricwS?-jPWwb! zB$JhMR!*lsC_l#u+h&_pJ6>pm(a#h&j$|rEWD;bd29dZ@GTA)Ff9OCQH%wCVL`@m% zb)R5d=RLuCHl)~<0OvTJCw6(JEA7}@PvUc;{No_wHr&Q3kv6q&Bt*ht_numXaJFcH z*weEEhq`USCc`@QvI@SH%`T+uXZ&G+eu*G_5F1tL)=4vk#dE|Gzf5Ui%Y`NRvdG+9 zAElMA${w4EAvNPkq6OOqe!@Cg%6Cm%EUtzN?%#WiEgW*1D$=e8J2{zii60L6UCrHh zkD)%iw6or&0*H;rL`^Ooz(KZ3(F+{rV3QVJ7&~#uo7Oy#$2De?Ob0SmN*6iJ-xJwt zZ{fWo9w4e*jV#?BR4q$sJBv5Nu+Fp{bCNgeq}}o{J7!eNA_UOYZv2J z0|1-!veebjhQafi+&C=222Dsfsc>~XN96FcxThjIA~7vs%%QQx^yxU5G0Q1B9q(#t zrdzisrg&H6Y|c$yJyPazNQ9|deCV_YnMT_in%jPdh3j-U`5mpl>7BY69ElnmZ+H#z zY_?W1Z-(}tgf&OEZT=X@gNH}Ok>iUhMs>RD#Exj+bJ*4XTQQaix*~D2atuK0xQcO(AS@dvEX! z=n9WD5G>t(#X;jh1*@V+a5C}C*$tUDlgZPdaSjfhKgHN-=AN&ik-8`5Pk0c8NT@b@ zPrC$RDv%$J)Bk$5O>~AOVGy|>pcN0#&Ne4@-cwNQ5Hy#}0Q>YwJdrK=80dt2Gezz$ zkI;9nHx#jOt~VMYU>715Nkj9C^OP%UiSry?SV&*qG?JCh>+j5;eS#ks&xw^*p00Ak zEYSn$c|@G!l0l-7zhkrra}sppCDhAT5#@^<e9Pmk;yxMh{y9S&x*Dl=0 z?%4&_vj_cU$B$o!F3{_lFNYTS7fqMLk;LFs(s$&S*dKQZU;&+q-(C`2V6~V6J72P= zlnjn6;2^cAzj1v?=Mc(~K!-}+C{#D0SC1p--$D-ctVWMfV}~o#QH3npTc`kWRbeew zTC`YVRG9vDCQLfVciUhzZi^XBoK}Zk-WV}D%{|;5w6xi;HL6z@Xp}jqb(*~;H~8Zh zqk87A4-RXs@(6WNS$0Nk_=p31{$wgi>64Olq@G2keY;Yt0GUoa(-V=#JDlSnxuGZ}>y_O`7VqXiDK3 zg`0*vUF>=x;Cv)_8zXr;E3@Trg}d;-=q?&cY0v?4PrKb9X}_UDZsPFHq_*m%o%m5^ zs?n8iHQ#8{i^dHIwvy2`;vBrAMprkesZ-SorUTu;=Dq%$HnHi2@i7JLvK6JHw|&cw zTt|JFWr)W8z#jjU?k$|0uG0eLy?*hg(fAv9!;gjk+-2)KsmJ5ixa<(l(;~;!<>MB$ z{2fomX9I-J<&wWmJ1jHi$D z^IS^6b!wZNuFKS1=Dmy$+QOTU6c)eJc5{|H*>lP`ntuh1;Oy|<;gkv$bt?QAKo0zX z`vdQML_+Q5LiqMAnC$=V_xL+Y;_^Qcng0#(+Mmj`f3_$5_&0O;XWE68fwCBpgYCaK zmWF?tkQKxwEcWFP_KB5LEX!(fuxebL(6Cx9#Y|mX zfV6|ohZo@dgMzUs%0>we>zVpafFWGha+BC%Z~|K}L)mE&HcrrISuET1)vhWbH`@fX zSyObj z%%C`1lA=sG8*N>Px|2i?gr0{fm=wk?K<@tQB5wt?hVo$KbEq-7*DRbe6p1|FR@2ES zR4*6l9JIHosil*^rR9+k{`3xE2qREY>Y`FY;K?Jd4cb6$S9i4aG@iq+PHb!o`?)tM zqnF$iT^@w?ZTXny9{J0v>_gReYT2R!N{%^_!SE%Gt^C<(q5h$c*IPI~WJew{dh(X* zHT!_xo{8{MWH9XhLkhI>|laWQ3mP)}1?# zf`k&Y@EPsz?{S&7Opy0s#E9SR&IDM$%Qc=}(8_`e@Ak8Vw=WHuzRcm0+-A|egB+SQ z1}#67t6uKY>>Uo-Xp&u-HO{ zV@USGaR@r$ohYADMIna(H!46pPtc3iW4QaPa#!a0n*0$}h&!stg#L=A&_#;|>1Vna zONJ422f94wq|3Ac32KGnJf1Nw2I;MB*-$e_)m&B z_Ef0L0iyX(uot{I9}=_7g_><$926{;YO$ANwRn0?orb~Bc!C!umf?C?==Fp&BU2cp z6Q{A`ebG4nePtPAQP%dG*pWtVN-$+lAAkug0p_1jTG`N*1{1Hhy;_%n~N`2C|W^ zR3p#6f+`a2(X0;EE+K}zmNj#)oRIVkzww1{mk`a$-!756PcB?E+rWu~w%Nr&U&57% zf!=*8+6|Af?`R^hB)(b81JO-eDdbp9tros>Qfz0kb=d^BxccLSTCz7JDt`P!rR5@H z7YO}_9OXZDdc7+1ndgI|`UOSzG*T?FGnd zMIrt`<^n-Q7evsUzPB)OQd|NX@>B#5;bZB^9{kn*EE@<|2c02RWRTk>Zjc)S_wmg; z{<<+7?v#XnxW1{QsOT_rL#NC84cOLSAaI0CS_v<*Pn3@FwMKHLK^BM_d9lVV9+-0n zdfys8fPRiE(4&k?kL+$21E)D19OcH8KoN29W_^nd$bEQ9>-stLm2dFiQ{wce=AeD1 zIBhVn%DW;tzLR&#NP>$Ze@iHVI4rgzy7SYq*!kG72H}>v*r_F1%JyC9p^a~ zXsA{PvURndy@{p+3!TRWr*b?b(qk1S%a~%kT}Tcybb)I|Wf6bG^IWp+LoUlK62g%* zijrn|bS|hsO`6(vL!buaIeOJYMM$T30qkP0H?vgzjt{rAQTA}d5HVxfl2DGXMYHq* zW&?DG)!HXhOgg!now#hEUj+pQW?!ns7HO1>RSc`v z!Uuj(x%8>sc0@Of$={*W9>i+~#aU%Vp|50gieU1P^HHv`;P%Be*G`5SbZ>@?NmIFv z&c8+FWj-^_h|KQ+&-cxUtm@5W4LfspJq7}3nJd%4CT>{Fj2REH8YUU#(y*%6-J+U+ z%T2mhF>DX7uqxAo``C*mD!-)Be@)mE}-vVBgSFAv50=NbR6B-Qorj=u*t=S zwfv~09_s_*%rE9wGf^1%M`{U+1Unn%*Gk;_&y{Fx{KvWX*Uy)r{+q=g%n$8fE0Nz) z&%na+OY#2a374Rvrm(7t%B@mMWjM-g?;Fw+o%n-MyV?N!hi$!SotGRFbCTms#qk?@1tuPI^N9u+2#r$EB;wPPHk0QGm&uN| z!!GZ4tZ)6>F2I%+#(C;f2lutYyy`4b_kbUj7W1YR*@;Sk?dHgfk4%2Nx~?^BsA)Fn z0j<`U*M?WU-~bXURJqibIjt_mgOG)C7T9LxZn>(bsB_9@71z|7$Lo(i169b%OlX7KyoiquI}x+F|9Cle zDX*L_Yn3wxfB?5}8oKV zH`&gcOHt-pm5FR|B6NEDBg$O&@bP+`ju3U>17&NKDrTn2)GJz`>1C!C6V++YEPmr~ zV6WkC&1qc5+b?^Fb2-E4IEwmcCMXO0gF`P(J%Rg7+_K{87Z>Ce0!fm?>S3+<$^qu7 zR0jPrPaf3n15bw`q#Nn80$ea#pE|7rwuaHENDte-0A>+t279Q6 zLaMMSRmu>rt`Lyu;q;QmbHiA@bZ{{HnR>iYU^J{JRI<-2x2k}(?}jJ6KUKa&KvV76 z(70)Ctm@hS(0EhH9Ny}cKd^8#Yu=|i1ZHUtXX``V2{vtMTn1R)jcNS;RAKiR&=8NL z2gGazzTR7#OEzzRu1SBsKRu^j!YWSaEsDYjo`ndn@jyQw!pYgI30XSrfxOMIRMThB zys)n$%zQEt z>wdo@ewi+`VXV&ASrVkb4`5`U%8C3oMcG1ONB{%qciQB_c)HxsFFP)`i5H6~!XeOP zW>()_2FgklSUA?2JnKe(JpPQtR!|r$Y(V)=7>+vxhf6ln5|>6HT<)eGG)1k+R7ip%>GzsJVM zB0gKzFIn9YpQj5Xin`soK6>rNnNG%MoFgjF1zpc1&Wp@JOXN4#kJyJ-SZGEYB_got zX|UZ%;TpPM7-d8an%8E>`LOMr>|{#Vo0lfzqO>W5ZtV!Sy9l?3yh*LTEX1&o+wE$* zgf3}me#fBqG?1w+L+!Feuv)VgB5}y!vJxE=b~m|AnFgQC$MJ`{WeZ`&ASdP4+CX3J zN{VV$($*!_Q8xTJVYHj6LKFb8aM40ofRGq7%g?zu9K7y3Cm|sSIbznZv4@YZSwpg=0lJ$38FSWKMV`eFjqniV(V`z5Sc=*Dm}T#<)|qe;ETq~~f~JxAuIldvlf5*{GO zywK9M$MB^EtIJ@kYRdhngC8@EYMheM)Z9DYK;YY*`Yt75+wj}N`>(9vK3|Iiy(FSC z!A3W+LtnI0YFBxpy&Y^{)Q({_zi=vOYs8|~D*}!i(|UYb-E}t;&NJpy6|NtoWtGGB zF7XIE`ol*x^bloKA|12O7JzyKjIYU}n$-MHq0CrI^c}Bh(qk-Xv_oUvm@bDucVZXI01C1H-b_T zbyR5(6mTICSSUy$9U*>*^JXuwmpepau?%Mqo@}`H*SKn>j%mxM($2mXxOgPN7AVVwifFKNb@OVD4pH_1;xMqr9c)>=U?d5N{}~M-gc#|?*;n3(c~q!9Yj>&$-B(^#KfD7^ZVXZbhjIy#v0H;C)DW{+I1W^W$;aVQu>X-!_|(!M-RO5Ywwa7JRe|7 z_gH}UTBS$kQEhcw2YWM9uULgQ=+N9da2}~Tsvl+VJ zrcCmIW^tRQ^w7l{uK`8(Lh71b#|cyIJK)w@H8BJmU(vMt^qgN=s6IW+J&7^T6&Hc9 zGf^;(iZisiK#*7I9@EQT^x0#*^sJUv628RU&dD-bcUav?2x^e)`zl-QMaL*TRfe&v(=xgB_tEv<2t7Q;_>B#W@2~DhruL7Zuubk= zej}8wH<+0p?}$KI8L=|eanIWZ|qMScv2xoE!+m{OtQ-ujY?F(^i% zCz4Cr#ODyKH}%73RI8A8ZtO>vXh^KtNoi1_f=5X)+}RvkXtWwH_1-tN+FB$&VAx}dX%q4n)yP~6K8yA4gHapQ0labzANlz_AuVuI8d z9|CPS2`OA*c)JI1Kmh~-g0T4Z!a(OO>U+{BV9#knM+kc5Q_b#R2&47wSKR?XrpAb8 zLr_CU==olzB$lC^-#aOk4^4@i7|7EJD72Ev$fVr_Abf)n|Fznz;HP1l$7<8(h*58W zXcP0J!#W^JBRQ}+{nu2UtD=p(OFN-Xv_lrr6=QIDI^5@DtC*f@qeP_Jcn6Go0)_S%Md0BaX0fj?S};8s z2!1nz-f`vVu)%P2Px~3_!7H6XI;L#;3B4qq??N_+tb#bGH&H}Fy^0Unx$?+7nQs{d z?q_hS=ko?Uw$xAW#7qQCEQYLTPQh7w68B$eI@7p(@YsA@Tow@U=mf=Jiu&MOj1mE6 zN4!PAR)T1)5b=Bi>-4T^=|~0NJfgId{mWv**xK>8Htdfie4)N2NIsCx=tMvN>gg~5 z6u}@3NZ9R#tbiNl9~Yz*OryywsO^$<2;SZY;{1pi_7bLd_+|2nwpAEurywswlHlJz zI(m=Lnukt2Xdu4{evDJPf*k4n58<4>R0Gt_*StjhA7b{ur#${IcWM5sFi?Kg`pYfM z71u0g0G(5aR%rzQB<6o|07I@stQf3_%u7ssiRH4|?Brb50OMN)Xcy*On9b!I03Owu(BXV6j;xhP+T9y zB+cJGj}4Di!!7DsMZruz20CBNpkl+Chj0q2cO_P3@#~b$-(~_sY)9T&M4lAQ7-Kp5u-8~tr7E#yZ`$jjSD@x*VWr7(+dNl7`-c4zd zH@lnM9v~Hi5q}bK%a&bEf3InrM%c}SpNF~Si+tBg<_)yXcgvZFq0Ca3m(|3L3>8&Z zJl8%ZHNP}Wd~pJ`Ie^|H1L|aVq^YEG(lk|4DpuXzV7yMjtWi=VNUzG;X=|h8n#X(e zWJXoD-G}tW+b`5Vxkf3w1vqQT^dl38b^bW0CkN{0?*P7i^ZAb{9Q$um_`gdi&Gnr1 z^lZ&&|B_|?2}ug*Sz79UjphFl?358BCDY3bA83M5jz@;)^(*tsXNaC=JBtYGfi9I= z?BjZ+TN@98w$NzZB>b5Q_D67NvsD}>lo*zKO6TlPlgtSZPj3%EUZ_Z9Q!?9NJPB$F z_&FtmNS&Gkh>>~?YYu-Xam|jOQQ3?n3TbgZ_d5ttct@p!fHG*R4$WNGlD$l4of?JC zhu)p2dulc$D0si&j>md?cWZNE=Bl|ck2Cr!^-z2XG9h~j*Wv`%z&>fyj`4g~gz0i* z-XP{uVK#~>4bKdG5bEX24_y`m>%|X->M5U2EsN}*B})87un{c~V{pstqEQT`+sV$y zKI<0HorTb#ZcDIuACu2V1eE7dTn{CN?;Pxnmt3Xpqe>e^(DWz+D^i>pGLaOHGWL;u zd#4_n#c0UmF4W-|mOh5&iy14bW=DXh$rYEzGsdn)QV|^wt3aaZg?j^21y3{)YvvECP1P z|LZ#@|F6!>f1ezG`)>LE$h7tVfB9W4C1ELZHh4IPDH~p;rI0CxMach;|lUTotn$08h zOkHpBNL|Kwe_p)w#`Cu)1Nyy)o;C}+J`aj4D>*$WoRKtZw|F*XX2yA9_RiFi2dl^{ z!(G}DRqA+RDPCGY;n|mc&6hD#=~%ANmpyA>rY|4YITLuNnv29+Ibi>=<-X2TuY=<< zpZ1cI2c>T?TYRjuzMibC!NmTZ#!SijM5IVT3Wttoc7}HzM>%HTB;-Xb0qmrhmLDdY*aH7PQAqeOt^V2TOG3QyzI(cDzAiJYgyu7i6z<{ECM z;j?cZW#z5p9`Tx_*e4$> z^=AkG={?SNRYkx7uSqgs0*#`5zh~o$1Wv1gvaFz(@2if|BgiO`LkKx|o2y5{{^jDo zvZjXQ9Xyg33Ev7uL!(w28d3nsxX}!SD)XBMfU9D2cOdlSA{HWXU#?|*bK6HfX$pi8 z+E^#KHp>p^8mVs@*1Qnkyx!D{%PQ_4a@xcdo4$)!gqUuTJ>Cq@h~rmxx83pr8^Lkz z$0NYd_!GK;?T}mp_#y11Qk%KUJRl@v4ixq^&D#3{+06ZoF@Sd4j zH$^Ze=7u`Helw1`P)X}+Yj1+p=auY5l!ma`Yg+BFbeqy#4=^I=UzknS7>E3Ez5Lb) zzqE(Pe?>o`eF6f!QKnrtnXl6Up{s)1t))z{b%|ZhxZfl}dXNs))B=1}8P7|Br`%{v z^9CK&INpDYqXYrC-Fr{>Gxqzc^Ywn5i-S&T&Lf5Ze%e+5^OG%`Bd7h0Qs3>tV&)o1{NO$=Qyv z&)3)s0{64Awh7bwXcy>FLELUQ-)z?b#$kYJxCC&n$K}9JnzD*zZT;wWxBWsbi#+_Y zmKB)8?xba9JI~%sVnrj&2EvD`@#daEJHjY1M6u1OZM9tOrKRcxa@R?y%X*&LUyHW9 zJ%&Emy_0ZK-qH_uV%&|gB0?;SQW~%K1pDALZ0i{2FuX&SkcFnI$}m~0Z{kq`TzRAN z&JRjuXMS=Q;$F^$mo+0WkZgZ-X5Rp&eJsnn|cxog$>abMS*bBr;^UBQQi6>O?O zWgR`+UqOX^j{aW7B#azA7d^ASJl1&&3m(BPZ7o-W_vU)8^ASoI9fv{RP=9N%RJs zATEV9EO+B^@p5&s64(8up%KmO!t{M{&g)ovx~rh*af0wGK#lVbXgG#t-FZvX>v7}Y z{dsDt>zhdr3&5_|p#te@#$mGBv7d2>ZOXQ6j?seUtO8V;t*~K8s-$(ax}B@w(?l3c zuNa&KK&Rl8YRcX^dYD*v%D-OAG=~=ML|3g3kl#B!ZpIKr?iai)|AEf7ITe@RCt^!c zxB|n{TtHf?i;-8E<6;+|i*=BBpdy$$%TA>*ac~DECN7R{nB{+fr@#w|51x`EMyDC1 z4%iw<^yjw(n1|+q4N_JX=9v}*1lKT}vN7=;(Z~4fn+gT8Cb<@&Ir-;7)6tCYXI949 zg&Gi^v75;q#pIQ8EtVYycM%t&zxk(0##+=M^MO+U%b25gY9d^Vs7PDoYm`YhU`aNc zLOC!awKx646&FD|6A$^_qn^s2eH~SRikWtP!$*{l%MfVE)ronM8z`h1H_dO^p9-Kp zDzjKCm>vLyfTOdlHArZqTmj63ITd4efiEq-JXx*PZwcdBR-K0d=_F^jiQ@>N8z>Jo z&IgIRQ{c$?-vWpzZ%?YDVg_b?MppzW zpm0ZJ#R_C3C4PSkP&JD=gmV+{jM@fA9!p_=k&sVZIU zOv5C#o2KH3Og2I70K_`Wm_p-4V(^|p=@vXe=4R>I^t5D3C8Z`C|Ck5jgBr4O&bzo| zPU7dm4gKN##ScHv1Z6#*A{Bv0dT~Wxtum~Lkq#a*T>2Reb}su_TR+gB1YPM#!xKgN zR#vcA-rvZ&-JEu@BACozky`gRAQO7mbUGU2rU`UCeX^vm5X~uIjKhF!pvyu-2qO1`D&U-Egd>C`$2BfD_$&%{>caufK2h8m$^T+;36`hayJN=5(Go zD{S=_?2xrTs_V}=mYZ92Mhfg9ako36?%WgY+-m_+P?=mFJojgK9(Gm3D4zT~)kK9* zvUbP^et+rgPU^=d6R2;j28A+D>-znVNNGc(m*&<_$p_MZ80-FBXr=tmBK5aE{J&c8 z|CQ|H^U*&%!33Q?Ie2?QhJWcn{uEUGHRKm4nmW%3A$9Dr%2EP<4+B#sQ`q|bu~dns!z9AmqH-4?#Ti!eydhbTyY>Kk|2blUQ~iqLwy zf6e+*xJ|dLmHsK#2#C>XuCCnuejjk-4_I9jyVjxq<=T%fRP;_7pOdMJRw`vdk94%> zEg?n1_bEL^B8T|Q*lg+UPDE#RdRm03dIbA@%IQk|{-XPs0GU*?E@l{@;2A?0CLJ&@ zdhn3x{rEa_I~pG#3r0{aM%rUU`dT8F-x?FDz}RrCScta17^C|!V@eA!zC@c;BvFfc zme-1EK40Xj;9DX3HA1QRjr?1UVvS8|kD98w%d%QIZFmEW2qw%Zt9mbbIzz2Y=tBJx zeCY&7;5a7KZU_WcGhTpGlitB;Gw#c!%dQ=IvuCA4aR{R`C)&UoQd-dlPC>k%ZnHdB zxQ*VIo~S&Lr@>a72e%StJTP|icmL=Pzc6Mz_ayTABZG?DYUc&)f@u!ShX!LtA@8T>03fzE!VpV4QG*KTPHq*e*G?&OCMcco|4Vnl*Q^W z7*=@@j7E<;Q%|NR^z5hot)`~F1GDoxS=XO@WiK^Hi3@8OQ5r*-G0{)@*9wG(@6^YJ zE6_NM6uOppHV#$_4j{D{XJg8mRw!ag(jWbR0*r!bO|*WuQ%lS+ah?iFo4q+KH^M!+ zU}R}cw|5&Gb3F&0Wkh*bAPo&IyZlox_S6MGc^={ng_qlO#dKtPK-U#HdGqoPbQl zsB1vSc?C=t6Q{~@HqDO^vC-=2dl)eKyqwa0y>3O9-Av;>5HvQ~F1@RCjW_+n% z1^%kiL#hO^Oty~B+9}Xx%c-$8aF^+ZW0|nnmW(H#XZC=%SL$?Y+}xl|Yixr8bNo~A z?3pq?!j;m%by=k!VI(c(VM%rJP@&t7xGSrs#+RwwSOV!d{YZQCvFjf|ULGBkp6?%U z3)sJ7iU*E2;gts73?HP%%?yL%({Mo(MGKzfcKATDQ)S6 zE&LYLmY!q0Xj>4w!^v;#W#@<2u)Q#RqVe5hWCPk)Ou9qEtG`>B3XpN;i#PyiMvmwg-SOhC+zB?LO@-tBuY zad$7=%Pg8eqVA=`$oTJoQgB6z3HeYXK`U|`A?^~cOd9K3&?e1F=8J~YMjO7|=c7>F zVnY$2B+B%Kl$2&ZDnOG#_jPthV?~;vEjz{>AVmq?U=&BOsmGLEWJyKi^vJ?KaPYRs zq!_2JjAXtn=8XF`Ki5aa<1AQ(31|<+PNaa&hqF&4w-~oX<`VK%mkHeS(qBBrj?r!P z2VO+3kdtC;%cZ(Pgwnd~p$he0?=ns$gU)@#&%aZuy#o_YaZ<}AZESAhND zHNptekgnKQ&`Z4XL>p%FM^S!eKSB{}Ve8(g=R85A^uk-=yalnikEcbL8`~}fKU)nv z%GcodQqSizIfj#|wFth&W%l=;j70v(gK&5N-2~9a# z5jxuZ5v%tXkMzG7u2}v&9t{fW(ulHfzokwBDipKvwcw|bp%l|z_#|ffv(c)x!q7r! zipIXeY-G-*bBTYd)AW2pU0A9|_lItCmu4}^$UJ&~L)Az&BC;_i6d+B6uM+9x8 zPf-`YWFf4;9bl2d>+~cPwB*aEc`*g@)(|u$m}XY|Be^iVT&DMlTUpJre>BSdY&}#^ z*N=*R>+NK_L+fdMN39gj&t<)bmfg-%%ly^(Bcb&IW) zx0=?F$RuaUj24(G7cykqL^Td+&EZz~JEg=MFL(IV3n6f3)LJfy*4GZck9CAr^LgMS zze7^toIfsAjC98(h8>fj_aZSIjUDWjo!u-Xd|WCc#{(^y!E7~LQEHAPR<7{o_9U{M zTj1lvcg1FmCC0*VqJMO>vUtN%c=5WMdXU`4z66z4wwJY5P(m+M=b62+m$sdqUZLK# z;D^1&sNBOfh)X|AYjKIiX>zl zh%;|m-t^cK(jjNl;+`S!(PlNk_#r_1yJtDBKZ8Gbh!f1w$W-y-0OLZ%m7+1v_Uxth z?7mn2$PT^7P1|7f_miURY}n zdMsw*r+aF~8pxPKdn%1&WlJL0j!9__{oOtjKZ~PtVh%%smN7VTIb_NM$NHk8e#Tm) zRf@UzBib^3-)dxQOP)~!vBtv|OngKFNxKBMM8ullhHcA;rJ$_jm_p&VmXnlF$t_a| zF8Wry0C*w)9Q7K`q&2`}*oAG9{;}Md;~sL~4|2_QCGBF>+4a#x4Rw~LW7lzMFgALC zZ(eX6)0E;EsCDVMd#mhDqrX>f(jy`ZE7zkXY zi%uUIlAdX(Vj)dT6iy4QD66J2`k_l!K|Uof8aWuLC$*~q43Tb}%!QE%9MIZj*r5Os zOQ8?&gb&}vj|ew!pq|^M4;}C6G+UJT+`ohyGz?5oXiHUL02bIUmpg%h=qErgV4ZO%UjM!Z*I&ee2zn ze^*`p?HK+wX#LfE{iC{kp2R}@u|>sf9b`dZRb(~Cicl7r7$h`rT)~&|~JlPM;pZz(U&w3FS3D9PlOWeO}UdjpR%n zokz6(z_!{Qb}i`16)x`%hYva$S-{D!Y|`Ffah&BwiUf_20&L~PjbQ0t!|Hi^z1Du} zB~LULz0p1>P!s>?&KTFoBVD?$>zjqi01W5Sz6|N;%jgD+(D?B1h_|Kq?WQfcflR6u zf_Tpo8q~jZHmRQ1eFOa1_nZPr+~bRUU;<&59?#fL9QfM0rxhi%79jM{L+G7x5*Nw3 z^*iY;c4%vf*=_xIhN-1MTaTYxrp5p%yREP5j-&+(sX)9~!c8IitUWfhkUL~sB!~su z!FFtsj3ANJ-QA*~W5jK-3p{<(KL`$We$o6uYJe@8Jr?9SK&c3c*43{D34-PSvp) zv__K+vZU#H_eAS<<4u=3WM21uZixzUX-*-U=w9N(I*&58#V2iWt4O+k3WQTpVi?$` z4l)pP&2OwkmPJ3Kut`)SB~RAVd=-dC5@A^S>o69NRO^I=k(O5@1t~xmfz09LL_nV< z&pmUIxb`BCryx3=nfK>J;WG@_CgH2oyBX6f<#VCQ#iS*ET}WPoAtKk-blS(4S1z}^ z3Zzv=$PGR-1i$(zrK=srNy6d{*08U&JotAS(4O%g^+^e1Kl@r>o3BxnwXS}oEe9Gc z%uoX0@uA)EKh&Tvw4kZdJ|cm#Z1+xt0Y4`Wwg}>0gV#9f)+5s@_KXk{ReE6&+Brs^ zO?ov*25B>^6LUQiDC%1O+(3|R>K{b?9Dg?c^UVH@asC$@`KKwLqJyc$zl}TpEg+fh zFNh;SPSP5Y4$gDa!>Tp9_Q!V_HORGC?|CAuXc=-sg<>S6?Px}4T%73M7sx-~zPhtCs*AMQ_LPLjeWB*s@5r8K-%C z!qUH!-`+GlgWWnXPQR8GKVQ%26tkx5wlQ$-T)CDvXAWEylHk+5Uv9zi+mkl9sHaN` zG9SvXd9GlMrSJT@{=mSaHbU$Di3|}$W=gliNRyy4U*bHt=at0l#}msiAsGX&v7s5< zu%f!kyCv4L!x&k|hv`bZi#oS%z)ANSAO#2!J6#7%^R@UeTfsZWqX-!Q&c;eahi2Yx zQmY(tXEjb&Fm)s6_yZdZQ<&C?ZYHxi(peJo#UPf%IJ0}5uD!g!JfivfEaI=pb54Pb zU{4|nT}lo|G1mrCxoL1WNIifHj6WFip;E$9rVY0a>n`C^yXl~{3f_(^wn=htOop*+r3H+yRqnCPKnHG)?=$o3 zja0>M+K9NV6J1RI>Zn*S1a%6_sp&K4;6#1y#60{xP;Y8C^=MXOd}+5-awEd|zVC7` zClAbtA9uL!gJsRUA$*vS7jBLL>Tt&|jvs==-*qw+1CDlyDvcnNsarkbH!Qnlj6ORM zpd@O@RTBQVlE9VS1FCIgEk`JG}l}#ms#hzgxshw&a@k>tT6-Mg3e*3 z2#leXD|57vcZ?>89{`_jh(^wysS<^W3~wPu73h0oA>l7H0UJJS&EEKS3rK$3^A9`{ z24zt|2oH?@4E$j@m+B1v8Xm*rOWjW1=W&QTEuIkM&&Z?tW<7k%t~FqeaLL+N(l{yf zy)@Uzthqb8A|bIU6hwZ|?wEqCGm<5i_L^AHfa`jXSQob&IAzDS1FyaKB6!E`(8a86 z+a4nbJ~0)K%@ryIi7h36i31})kPN#IEw;c(H7HX4CQ-lHIkuqCXH-a@#(477Mg1kK zkAq#RDW-j_I{PhcwCh>p11zbiLoQd(!v{|WWvj0NDW`qX<`o2Ekyg4y#@c%Fq!|yf z+WYaO0&$OotJxcN?tWoEl^$|M$Ni=jb}IrZ_k0%z8HaB(wnC7EvU*R{_O)@BtcTPa zg5o=GU!A#}HlVkD6*og}E$5O+nm5P!5(Ui%I9d5FhouJ&Qz`Y5ZadW@xRc8>U^L1t zC&q>x`ykRkkzj1sM8qb|t&@bWr7vQ3*c-<`s9rd-DqLGE!H%4#2ID+2!U?xQr&~UY zWHB6<;Vc}+j`hjN(4IKmC8}jiHqF)t(wI_^kQsqS0uO%X14w2h5(vqfISK|LOGaDJ zWB`lWB1U)pHn<@H{La2KHt=-ld^HGzr{d}nd8X5n$}-=1i=v%FRUbxJ@ERzvvvz`= zQ)1&aFoIPsx5M42R6Fn*F~RE!f-Of${5e>G6YU0Nz7;1LsC>*RspFm8eJQsw_>`Pp z<(wXcm@xTTOJ#vod`Dp3F>@_yu&g>`sesg2$mBK%H zX?_Gnu(!{PiTYCuD5#3)`QH8>v!(kzk*dN1T zaAXGV2D6gD%I~_@vlf2_rdS>eCnQ_+R5rd(e_^GaCj| z29p=BlZIVhuXiBboRxI@r@Z+ZHM`HgJ(~buO+0o!Dlq!+)tdO;1)o%C%}e{h9-^hk z{32_*J7jT8z{x?jXeB|849+8qKV-)bZq1Id?n?&FtpoM8_Fdu~@(-zPi%Kqd=ute) z95b!r0HjgdX28A@Va6FxRQud7ZH`+OdbE04ZwVCDeY8M!4O3}(Cu5%+nYEoNmw;YN z4sVp8b$?GAUU1SQN1OOm$TVzV4=t;aN zeLx?RbZC%l8{T~DFFlp@Jzx>5HFcj_NAYEz@2fpHGf?VULyJBea8B}1d|`)@?r{1@ z8Qzd91}fygzun2|e;Tide`aP4oj7m(z~F$B$5G1% zUw~!~W2k}e$u1S2`)KFEgUOK5_YFlqk&NBB#jx#$;}5V9o=i|Z>vNpb{@;Qq|Fd4{ z^t69g>Wa@yk3Zn@Myhy@QW0cekFO1S>d|r@O#!5zX6baRp%7lHTBxjU8x@ay(_8ys zAy;14V4tdT`?Wz4(!u=AptOyQ6l0^zj1^p-E>E!NAx=M4B_H#Uphf~TV&?c#s2QZo z*jTOUX*tL-<;!qm+aQKJWU9}=fv#>iN#jLyJ@HbZN)pbU)pb?WN@I#=kAts3>V~8X z&qf(X<;)tF$6rcN8|CYo)hS*px1q1#lbD2?OOTSptU&3RSTKdPzjXv-((2FyfG02> z1uJq>8xIO@0DVLmL;?0#-wFim41bk&Pxqxs9x?UFwAS}IyH4)dQ9S0i@(TrwgYqU= zkS+QODK;Z(ISPD(z=G-n(1$zpQm#0{4~g#jiQTqPbpQv_PDjb386}@19bU_R8=}*Z ziiCsvOOO_vaAD<}@u^B_*9yGLSiRZdF4M!QMD`ojy}H zy#@KqJ3X_`LP+2n;rmecDd5qxgU4E|!Q*79*JrS-6pehK1S;thhAe073j!pxYHu@@`< z)BAlfGd3JsX9_5&IWdEPNT&tMuxfLU_FGojF#Va9=GaL>QOh$vJu4|m7z#SKWd;Y) zmfU94Pn#Qe%8c;A6rn^)5Iig~!lU)kT!a+mjrwJe5w83W{9H~qm~l8e;G)7%hPX)T z{Pb||W!4M~t{e#xL@10#NKM0tNIJFG)e#jb;W;|P8CRsQK;VK*4MM9aZHNr{&y_hQ z8rYNlg$w)5^b>Eg`X=x|BoI1HKHy65p*g_1JR>WBB4{VS{$sHbjI;SA9U+u>1u<`W z|2(F%Uv$2c6)s2o0m!2{u@e>&q2V&Qg3%l)Aw_v~3m4Lyg_G%#!gJrS2+xaxfcC~| z(%|5y_+_Yk5nr}0nEUhwtgAK!}w+@2_REPcI&( zI={QwODu+H9M+Xnjn(>icb3jJZjJ@vMQh`{l?Qi9^QaCjNCYF@^zt;v+JXzB^F4jU zE1XcH@t7sQo-R|Kl<}bj-0)z(DDxbEvEL|lTQ@Y$#DiJaFNRV!_|rC(j(ILv3UNk} z2)(ICAn}+o%~as%(^9qd$IZL}eN27N0ObNwnyCWh=LbGFRdIII7kZv@;qx(tTZ=+j zO+8b2_bhe;AeeE=27O9678%SWJj`ny-82+YS_Tm=rS7uEjt2?sTWxRc`4 zQMGhdD!Ex?%s82x)FqcnTW9|apj4U-{ZqLkw}?TJ9OP{Z8hLWV9lkqTw++lF8B0;K zntuLXvEfAJ(KSyUh-)0Nds9c|7=@wvC%}L*tSe}aBjSP?3-u7aDT?)~Ii`jm$?Xn= z3#4upuMqgPWVspZ%^xn9r5I^Q=wCb0b7)gBjD_{A(`$;D1ItPZWL}k?m?=DE8*lLFtS2hv^BL3$phgpKx%ciPIY0Ulp+Jp7oxDPSab^3RNw ze%XrnPP>M{=NGq;XZx!4)amhr^YlH2o?A?9Bj(v}6hGyo@0#=WL%7&=R1myZ70Ai_ z(Gp(%5%G!ci52nbJsyJ$^1J`uTAU%)`Z!LOH!9i?E+JUS_>CGlx|*Mw|9tAkek4_j zJYq{IT#keN{Cp97Sa7OX2}4IKj5JzX{urwRA39T>9A$qk*w8QRLkFj(#RCgCsS#U2 z$--n^=>Y;H3!X`j?B_IkA|1ah@1&d01Q~k1IX`2_f?*1!<{sa6Gx?vDz-{v0%48mX zMQ&2C_v|OZDFqJ(loRz2^Hv!3dAWxljzABLd+}9H6!=MS~;U=iur@M8sJE zZAyAK@{e?~%(y&5tC#_k?>P}I&5|L3l0!#3#y2~TPv7`bHup;F!o%9qm{xg-a55C- zP3h;RYM};{2vb!%NqeVyrjFd44e3%TJn~NE0*`^81jTGCB5<{}6dJg_6lo$1shy!g zV$!vJX;eigY2uD%A++gBWEQUHEm}t4-l}@5YFJ+#zy0(wk&oa2d7`ISf}(8s8D{4= zHcH)nCDS^{P$Qerio;~+x8;{qe%*Hc>JYi<9s%Azmv>tJ<)_GwHsWyR)K7zertK*B zRb0;N6|zGMq-|;VR@`iA06JNLau`ot@7nbd$5NrFcbC95PcX9(w60Q0S8XrH^-{Uv znXclK^@!Q2FGgL@RvpEj`ZcUW!}@MH!Zt41wH_fYdtt_70+Tvh#>kuT6Dq|U;@jCI z^pBvKE&|T`?aO6RXh@5L(U0ohw(G2NV{4cY z^7eI&HZ|C;Bjp3dvq=5%LmmzT;Z%k4=L=ebKhYHRAsZ+iChp1?`yI`U&S)EoU0`;e zE6i<^emim2HD@zpXfQDwxmQM?6mR@Ks6U+=pDZ2GaWa&U+^SlyaclI8ohQxqp8ohQ zTWwhNMj5I!&R-^r*)%97_0{q`Y&ntfe0h)$9?B*O8A^qTd@WR~n~Q^}I4-1W+MJ4I@C99<7&Qvk%;OHIR+&9Vp*mROyBzeft6>E<;+* zQC&C%n>0vX0lcnS9ir_OAHTivuo`y3BWpC6f4fz|XEgG$#pd=@K^-oXnQWpl+@rm6 z&FE|HX#rVVXteUwQhro$=b7o*XBjPN=PAXH{_YL_+!&dBL`v;4;O*qZe~sY}l2&^1 ztK$`a|FJXRrwrCH$(fX=35|-n@Z?FHSr=w`Ir@O!rcyRMOj9zHff!;VKRW3wQrqRo z?$XaZbFqgVbV2&+{n z;R>I%usMxCN^eYlC`g3Vyc{2_bTEaS*f>$@Wg3DAjCAoeZpzP)lcp(GdY@fDJ|Ue1 z*=`I^6#d7TMoz^C_^%7C#6*w1ixe5h7qhR{rN=yk&u+I8Mw1-zOA&&V?;b_)NtIJnj~ zs>np^nczy!l2~_agxoUhQcvX`#bOUB=JN9Hr)!vE9bEF8hp{eXG=nk-1!}msI~<%f zwGS;dkPODVqcKyc-(suQ4QNQsD6nn`zeU^!ZFnw|p|WCty|`GZ#wQl?XR0ilnTK8Q zFmkmR8{w-=hAlaGp>v-@1gZmVXc>RR$W>9U#0-(&*~1i=Jn@Mistw}_7x=}(2?U%S zMsf%E5<1vpGmg4$Xz+-6l%lkV$1P0ySaC8EhRtwMPcphl#Pj>rYu8c~3BtnIJC%#Z zJLVhOj%Y3U5jepcLtG`8+)ISuY7%%*Db2mAIy#u(D^04KN!Gd?10eClTf4UT;xyya zDv{!Up*+$mdn1wpGRv|k+K&@}nYwsPm@{~G5~GT_~co^d1Z?frC|c7=&v$}YCLeK?O9--1SL@~>|59~_B+UREv<5` zhvc|c+9c|1SBeN)Tj+`7!Ylu=>9%?(Pn?F}Ba1Grb%!ka5c!~qqjBgl6SAD| z{Gwp1s-;=-c=s8YRN>9S<2BJMo{7^FZ!4O)5v8~J%rNQ6E4)T|Fw~An#ULNSKsiY* z=^A8k&}}PaGJ@)KpGv{Xdme2Vrnr3}2Ae9a{UDEp6wKMAtnqN4Ox_&pEu1)!kPcL~ zQ6O2@W2PG0>Z!E3bIifC63cpECxSO;B|038t++ufe^R?2^Vn!?AR4YjriVAB9arSZ zUE+NOj?H!?>8w1Tl>FA&u?{h^gw39aUcWd2HpNu%fST0M);CcX6qeztSeVF|3e;P_ zr5rwsM>Zd{*s`1JDU4;AL_aVon|^b#hrnGP3nf7v$|22jIs1--!Fkdy$GSpK!13)? z*pRV74rYPFLXS6$syhZZ-;T~=&??hCs#(Al1xh{L7>Y8zB&Eyf=JiZctMzEPb)pa_ zpEd3=`3B0Ua*!k~lr1H%mNT_t$T+uz6j?%>GgE{@4X=pe$9J3}`HTb<_U9j+S2+~h zc?xeVSXJMo`~U-Jz%a-zG(tbTxz3fTzA)bCd@cRP$YOeiuhMYuQEh4t@?wv7o;x$- zg(|6!JRzV~@Jh**Fc-{tQ>m=G<(MjC&iA2KKLCk80Tv|1F6}GH*ob)IbsM)1C36M| z>xuVh;M}6+2gH&yT0q6+iTJSPof^{0a$Ixz*%DSEX7RB?Qn8T@0Q-p6baTr!_%1%o zXcISd%kU<}_&Ul|#VJL*N$`f1T@p8It^B~m{{;mGBzolX9?N$8NNjZK)iv@EkB_NUmy#J9$`G&wjL* zW2u}dG=AJIvbz|`>?~K8W5mJfK8^rxUBLUBRF1U8H_w}$9BszH-=7bD{^RbDKSpKo z;l928!^72{BcKw4Kk5>xwH4xS3>FG|rAq13%GKjDAmgD6*nw&>mb!wZV!nmsgbc(q z!#%7~DE>?_acTLBJvo8Fb|~=yIcK5xJCY+pIq>UQyU`Mr%Uf7=7__7FwkhKDEVc_z zFV$0IR5wo0Gc_lo(?;OYWp|Lowp|O>;o9{~;Mz6NebY{mk|QY*)t7XGKz@b!GSMiE zQ#P{_kwtSiCS#(*yPH1PzOd9yuP68?SxIvqk5~tNFE*I0!X~4QFFZm#pB-)O>YkvQ z;n|Yt)|tU0>W;jl^A4!|4T*JkhWhoK^ENaLH*8kR*RJZL3H56V=WP~=ySKDH|C=W3 z9^7PHOClBPfD7OQ>2}T~vuDD2SCA!57mjng>!ux=%U%vfM6157b)T-i?w&44?HsM4 zT{x|&No{}Pwu3yI{vPwG!<4k^gqJ||YwP~~4cdYDjd8U1CG00$#AMO#rA?23X>Kr@ zKATA+Pc>tw`11j^5aQ4$Bgg_2G=Jt?H(+Yn{=#8$<>TjfgO(%GH*OK#N4(nOFh;Lb zr*?_g6i+Zx!VzM&SFNCrfGV?x4jApZ^EL3L<6mP28~V`#ryL`lK(s-``@<}Lw0=eg z9R)`A_0y42Jwx}w&tGI|KN{vY2HLC3els6#b#FAujHY5d!mzA=MreEqyV872#Xejx zpqjrqA-qq_%a`(y)T!S`vhG39{DrbEyrYAE`hJ|v6#0-`9dG2Ov+d>fs_UYqd_zV5 z6{>exlM;4o5QtZ zsGU=nlXn|?yy18WA=e98gBd6`UoG@yg3}$jcdbDujm5>uP zQ#ZtU!RHT9(#cBvYMcZ9YU>>yAAt3xKTahF7Zir7@4ipxV&xb2%cCmRJQaf|L7fN0 zne<-eOIo!+aFAxz4(q!_d+j7IGGT?B`GhiTPi*=o(W)eH19}+hJ6wO=oj(Q%VxZ}M zvUXk7r0nds4Klc?xv2mosa+(h@Ncg)Fy~SJt}iA_L*$+oW0BX5+?{z*SEqXn>>DBH zPyg`RfcFa@&i?dob)ouqj^XdvkK{i&hCc~Vc~eUp3&YQKUVBH&Pb*0iLko*PnS;M; z%l@0j<*!||1|?JZH6As@nE5?Nn-4exavZwfmk$?Y;}Xz6>x)* zgnfzGpRrZ%AgU^b6B29I87EPvcbz0D0P$~z#m4&-TpI`N?o%DoKR<5QuD*co(DmC} zO;B8ujU56u*vjM@o}15}8XzC-DApURoQP&c)GOnm@#m<_88B*CCb~OBa8&M%QF?h$vkQ%z|c3ZhEimk)i=*pkdB{dazOJyW6C6 z>02kpj8|Zps^F&t-k`T)Sc%gXqX8t&Yx(lknpuHEjIc`U_|VTA#;eP>-cbu;M3;hh z!-9Rq*vOz%XRPa(O_Ezj0xOYlyi%F3i&MVTkfnEQIdU3-S<}H*wgz(~LP4yc1^J+O zD+25h!7)m48WFs2sfzc|YT6^Oak<$uuiD35KKpiN*ddbXs=`}Pgj##E>V!#hCscs9 z&?(ICo0WS;Ziob!Xd87q&MLGE&c zzm7go#YYRGL){fp2>1rM5ZY1`1iAK(R0mt*w;$~@T*&FTh_XnuS|x@Nr!?-_=DS6$ zLjKVLDLSX;YgquVD`=#~wB?zK_URoB`#x$SdUy{03y2gff|Kd3RA~zG1Ue%Edk-T_- z1F;t|$-iyNDXXK{gG?Y*%Td>yQGaLYqr^kRQ&UaUJU3Ex>RRrQ&=e3m zd3fS{--LZfe8#6hk`EoK1h-wreZEskz9%1nm*G4s?bw8)xgfCYwX+P>1S zCDuP6OWinGPsWbo7&Xtt%{ni>LADPEZTo=X^LI*1tJU_%VA(R1>kh{uK-|;zVf@_B zW+Vhb<{GY9jOWNI%^&ci;sQHJ7971L2qk7A0dqIV<3HVjcdd&6rZgZ`7=$s?aFVZ4{UelrYyMWxN`;R-@RDvd$a8 z>WTPuZDc&cq18RuUUsn}j3s|f`3w&w#KQV6Zqfpp}huL9>sl)zAx-5KefBTI1j5SO+_} z`>HqZMcR_*cUW0mBPW-A$hUMT_ZdnYq-|oGwn5m#QZVB89>DK{YB%ziMyAAFQc!nA z#VA1b2g+dLm#n;mEK#llgXKNC_im31Po>d-^6G^Kvdl;LU;#;=G#jMLK>x_hxgT|NVz+U$k1~2-Q0U`(?Wek4OhVgu`Ju==#uZ16Ds^p+P48xPm`dVJ zpwth&9Bk%`{m@k7W1WK^B%{NVjEi7bq?b&&z4~t1bd@p+FV*0!eX@9p=K<)#11oqV z7iC$&_p;72#u+*Mo|NzUU`G9<-8p2B6L56?7?5r%JSmkOedlwAOoo&-nB@aSiq5a< zoUI2;A-prz0}CE)p%1A@W`QZ}7+;oe^B5@0c&G!A-0*=vm(ragI^Q@LelRq6Z8`T+ zC$^lx05Y?TT_CHeETKm0~fwy@(7+OwzJZ) zGDgP96PdRFQULOHJ0R{-z{nG@G?m*QavYC6la}X_CVbcPh-3$YGHs1K1Ih>&ASqx7 z{?&gbcw;`5fK~MN9RU4uYtqA1ze^MQCfT~)3JsFZ>=7JgAy>7h@4=T3ddT8uq>PUO|6DUsm+*o--APfy7CJy5 zji{7H8(Kkw5JUKSuI1xeopYyYnOeXo>AS^G<@5WpyDV}3GvhZt>#uDx8Fm(8JdZ_L zh9^g4N6$2WR5+7O?8T^0jcXb7|E|K({jO4h*k3LAA8SqapJwm>WvpGH z0^xzQfd0{?5tl*vwYLRSH-KsXOXBxjVkDxks|%9f_N!+aooYGS(CInq#>a)eDY~hc zH54?k)Hk3~JqSfY(^cr0gA^1vm#Mg~sf7(}HeEa#8O2G~s&?a!T(NAOyKlX2I^*8< zi^FmOvNNC2`klB81@@ss$r%(Z81GET@i8$SH8qWd2o?26HL+!pC2JOGk(8Dk=#ZGs zWcf0UMg#FnG;4LqTNlyQhPtf`iqGQoJmqLlUdhFejd%jV|Cz?m|YBWi0Rn1)r>Uft$m$+yfW0PKl()VdEGTFOTur1Zk(X+RW3 z!`i?jv?#r|{GOYAbqo3NbIZsrW0-s|Zj`=;p($ws&*gO~gEz;H*Q_$Rd)^C}i1I=%Ua{{a(3|Im80r2UR zKOK6b)adV_&|uW42>@yJq!d5yx*auQYE|M@B24#JzwO>#75~f#HYztxP8ehP_DyT# zdwqSy8K4R_8aB{gFQ${PdB#t9hj`XrU^Z1Q6k|i@ct4hDfe8_Iz%U|~k8n9XKukbw znj9g9c~*{3jNYFF3_i~A0~i?$RCEZO%CCh{8xujL#u(^1=%Tc7DsbR6&&oOugiQgM)jMpd$>QsG%{3; zYoSOVO8j9U%)*NmBF8W))!(-QbH7+(1vF9k22Jqgfd^!nl`TX$;vP^2Z$&^7pSZ^3 zqeOx3ER9bjwS{-ac2O|J>V6SlPArfpfwO9C67xP1C-fN5IXLxtRFCo=hcT=_6o{d6 zs5BhNAP!sse4p5HR*2&(8HLllVePLArt6%iT7^m6fmI5iS?A#4t>!2dI+Hou>)Q}R z)a9|;9EP$%t46iaPfDd)NmqJPDd8Y3?v*hmpxXenj#fe2{7E`&Nx|6-@*6KPN}-7< z=qvhA;B8LS_h!cjrb}%ccXL&xn%5h;QWS^v6={xuwWXHu2jHJ$h>bWuV51I_a^&o$ zp-=N%c6$i79C{s6@LFjE5Py6l5s=iiz?k`~eR2~$aeRs2lfmeT;2|#D2q23umG2T2 zs}ho^k8@u2KvOytJVa~7$4AcI-CZllPIHm%C2Mx$6{Z-TiIwOJm3822g3sJ0xv~Cu z_~s@nz$#5a+sZM}Jf{uK3ae(XNQo93j`k;C`yh@lX!3Seck4s1b~UQETmW4lRPu(z zGg;F9ufF_7n3yQ;(Y$Pc8>o%Wf^VyLgJ*e)B45mX1<%jN|OEhl-pjNSAHo9kA|+zw?BZ@L|7 znXV}yuX9PZQ523Q82R_HiRBbzYcp`R z!M3z7+*S!Y5?!D8mO$A=6|qlVvjGGn(W~DuasyXgC)owPP#%rY;CzCLV8 zd?St)63T2RKb@x3r74kxRR%mJ`VEWA&KYO~foeq6G1n-*(unFDe0Ih<4Js~RPlX>@ezFs+)PqWXB+NqSIl(Z>mX&C}pYg8TL2?gS z@4V^Qv+Fj$zrW9VTNz{3K6!sn-&Vrl$!!YDsKtfxbHLF^l5BKPO^VJ|yrSk2_A7g! zKfoR6*@cA3Eop7AN2nK6)A@_mQB54KT;~7^eY{w2R<_(*FuSs{tHv01aIwN~$sTNr zg&i~{57}e6@7jA*1GUZ_u!z>hL7|K%>gRi&wfE=O@7S#{c^k`W>BsJ|Lf5ZEAqTsOB5K}fD*Z?a5kto zu|wf;oqDm8JF#QtalNX3<4q^IfzJ%dtDn}!H$qdHyG9mdSJ1jZ2D6h)@YX}X5%9I- z@9GWDY9cN_l22}R#jfCLNrCpn-GJhNvoT@M)V_7YHLK1wxTsUND57o3hBmE8sv08S ze?oGoJOz%5gme@H!2QY}y;~E$((&_v+K`n#{vo!5h%rZ#g=*p10yaq4uJZBu4321JZ5Z}4N~~bRg^q6ZBm<%=_exKgz#B&7 zmJhjJ*P!AKaU7Oe6D2N)XC8#44q)>^Ji32Z2I66Wq3AK*Em4LD56P7{?Wh?I-KtQz zrY^`EA5}gpl#z`CAlcjmRc-RUTaQ)Fi;VTZy_3qL)2Oi*#=~O__4X-cRGeS4_4Vf;a@(9ivFX&=cBJ1?_KHzvA(Jvbr1rSw zzzN+kMyK8x=!KP775Pgm$5&s?RE*BY1|+9r4BD;tv8??HOsL_9N#v5u8)R}GtIh!J z2Hb>DXz|(#OFru9&G?0Dxa`IYwP5jNhL~j;7>GdZ?P&8EB%Q1M zKIE9)1F{IaHLzR3NK;~~c5G}^eHb5uQ{Ln^lJ02NOLMv5OwcRy5=ipEK{bjg)Zda- z;|oQA$0#(~1sRn~Q1ldjk0j&|L|cVjm^HrNJRSqTm_0!+!UsSc^;GEyyx;)cWsk;U zE3r9l4iqimUKSO}Jw-MP?|8EY)1F0-jW|o}gG2F;1Tq1D!hy#-zAJpr41NzPU3$mh z=-MN6A+v3`!h5WwfoQN@cm# z=a6eXS}Q6PsDJaziS|=v&!O-3OiwR&@Ybbn&{i1V{TdW+Vt*}z8x4}y# z2Deb~J{8Cf*37aah-$sp5*_*c)1I&nOI{A)5!)%uwr|l~UEjW*&#Y!(voL+eQ-2)h zc?D;cUU1$FN_8FgYU1!5n1*U6ZhKOu87TYZa&y zM;^ufl;s1lHnMn7+zJ=7Zj*TspeWR0Ko11^x$uf)qzXx{fC)n$23&9Z6*&dg_OEH3NIXQ}<6Mv}xZB!BQ~4p2C6{bG8cGELW} z{zDVOL{j7;DoCkeu(~!vgqYRKjTCWQH_kMMCn@{~#xXz?#4%13>e5UBpePZ-02A0@og4g>WYx zTz{YDh_Dx}(zaa=fE{%5W0cuXz?^vWRT%*0fZU#9@!HrpbK_Y}bi@WGhw(|?VX{lt zg-W%!R1HNtOl8}{lc>~H_XvUVjj1P&<4RR5Ui6ow!wjfj>JD4r7Lt?8uoHtlyLrQG zDD9%A6to6pFBCs+s76utf#MOC_xAf^^XSE-_6D6uy&zP#;rtA@mbqO!b9Y%(>u$QZFu(zlMyaB7{Ut8E;DsFI&ghMlV(HN=|3Qz#^(-O$r|!} zW7p;ekt%Yn^Gr-T^#%ckAHGYtO&75nYUj!0JDMmTXX^j+&3Ml~nJ#l9Bz$h-?+R9Y zN?QNi8j60*N(rACgw2ew1MZ+L6+zKpp`SYrp91u6Tmp@2WER%8$%|r@g7m4fvY5Be zlXkz&;#G`$nH?O&c1G|)>_tWZfkGDp1qRWCXhEp0?-i5OgJ@AV21$nMkUyd8ANjDO zOTK@Ih6mhO(9>U{VIInVp8)^eFPQwV4*CDzFZh3vWBbp(Jg;hDi>!?DVJ+S&jpdv0 zC+bglR6*cW4ytC^Z_Qs(yr$VpIX$dv1(J=eUz0Ex>gHu*u&n1O{-;=cgW?y{KA&lV z{o|ms(`jyc+HT-_EfpdzuU^UtJjHC{Amfndu=OzG{o%5b_Ul1T9bC5cSWUh|dX}Si zgm(69mwTezeO8S9JqC;0JawT(NNHXf&?7Cc&BWXqp|6rv=HIPsgGV^u3}tOrve8{0 zV1nh*3kzdO)?3V5UnmPJtau<0%XQ^)Zoa#|*Bfk4l|y1_Z996Y3Ojpb2h&-%E4zBF z`&F^p4=NSpj)cFXu&rIuqdZ9u9-#Fe42}7f8>=NPNY~k0L}D)HNSm+e4p(s#-ikIj zrBllR1>Pgdolm)ikn)NENF;q)w$89wtq7#KG(QLNlrSsIQyeLK!43{+c;Oian zNg=G-L;<^MXl#~d72$*Aa;HslBdWW0@9cthzw+A$Lr&XDR8SmcgqU1wXE&wwgTG<6 zsg97V^+mb-4I6`x6NW1lokz$gNVw5F4TP8ERtrmoLQZTxZ^*}F02^&f3POs0My)#( zCseHh+V(qbOv=TK_S0Oo$^$irg=>r=%8x_jO3hAwF35?qi3hvOgC|0K2ZbuL8#GzM z7-CIqk8#jMRilzB&-K$fuC-Wn9(CMd?1fE*WZ!q^O^8Cr#TkGVz6@e>Oyva1?)6;s8idG=*iRKQY~e<`$i_{f=odT3w$ zB5znw&U6CRhl?*JtTTQqZc4^>xpv4BdnL3A+XJIvysk1Ybx?zHqRv8E{G^LlzTN|4 zLMMpkrbd35{4M?z)sf*1AxS^?&9)F<&>@$m1;R_N9r z&|-tZDL7eT`XC+x2b%FRG9EmdIos_1wJH`+xsoI#eLj&hvnM$%HY+sVk*YJ} zT+dHm<=8tpA|j=9X+>jFTCEPaPevoso*Nslz1Oh*3Oiq%=b(GQF`V+GzM2YulUlfI zn8(=)dYD5=zkN3bp2rOe_+|%h!OJs7u}>AB`t^sB2ml^#q3c zO?h$fk4IEArljzbnBfH9%=O)-Kola}<*;+yW=w?fn0aWnm=UYyIu}TjyUzMI_VuM~ zW_a$V%$f>0G4cO3-*K*V3nqBW#5DzwB-fq7RhP?o#VjN^x!HkVP9va`8mhEckRqk6 zC@j~N=4}a@2Q(9A#U@G#wtAY)$pmwfKLv;=uvuknE4uUDhPW`hOVc3k*5xsmN^q*q z7(^?JABv~Qw$yGBo_kX4`#n+Fwss0lXL-`19SLV_F(Koi7B7SCZJIsA>Mls}d{I7h2+tva?_FYuNM`{v zBu5p9h8iU@EtdBL)-^kzcpR&th~=oFA816Vw#G(q4pwzC9XUMz^c;b_!0*rImdmQP zGc`PlP4n#5lxynZ0Wm~T!2=)Eb5RNFY8Fc!?G<>bKsz?%%Sn0<`1Xp)a)!=at&3>b zxlR0RWzI$R1vw zpi&uzyIl-Y<~EBh1}y<|lp>(tPjb{*!al#Pd>A5o%u^IpO}EVg zNvOQk6ibXNw&!2FFr2*Xysx=9vNLeG3fbvq9`@F+UYi`LnVlV97U5n+fYC6tmu4AP zg&u|u>MFP6{7OgE30R`C>JyXixc0-*)*P^5J58yD|fPm&3AjwR}FEDokv)M$$Q@z-*k5T$P z)D2b*m_Rv{xS~TQFu+F+QGaQb4x)FmDj{p`A62uXKlJ7T?b{vpD87-&s4y^u@qOQD zkwxowL1Avszo(6p$_gL=uW!4U@_=||ZT;e+AR7WU8H4WCS`&2rbN4Zdv08z=@zN>% z+8vWLiK8j^Ej3h@@snxJnDn*`;+&fX7$Ibq&)G^c+G>;SKM>nsjg#@1!=gn{FSE{2 z^N?7!nP8Fw`LG+_=B`;Uh#4ZF0Da!H22LkRfQ|kIIO)$I9c<~_>4xxkSo>xq^2}9p zo;YXbmNY<^ug8%LVJHPB*?AzPK{vIp;S?;Q%nGu8SJ^Be zopqLeG;2BI)LxiLi#}AkAzHNWtt`e>)`5G!B8XB0-NK*E6a=0@c9&9&;te$s!Z&MO zn#Drmn#PG&Xc-G{c7IWv0zh_$8$pZPj?gl^DBRf>lA&NEoT;Pz83rv{&4W=6i)6=L zA~oeRImb}pNlfG`DbLybU0B*T+XSQ{CUB_*aEj82_$N{?c4gfX_c4rHz9BmO#n;y` z%v<+mrR;}+9Q=+et%@lew5yOxOpd!^ z?B+iXzl}POGT}>Fk^LVA;C~++1^;#U|KALb#Q%xZ{P$4tc!klguqYIs*v4=I<~RX2 z78-D*DDxH}ieD&2N@D!v%%K;g6LogGV?w(Okh}n!Zv?)5e(+nl2#d(IML!j)_L~dC|smOXX*VDKaTs z=5;Cyj|+k+L%?YP7&FIr9$`kyoiCF*_xZ>6<)_#RDdZ8)w|nY`WDW*le1Gg2RUwg$%kcPdWUk)wdEM! zCnY1>%-8wy!h|_MNx=9RfLV$t=&`+~To&v6QFpXWDgm=WE9}H}AWmp{GLTlvw}*_GVQ|JayXNjVvc)xGq%lZwsv^RB_Y= zzb{kPZB~3)w|3FoXob_CA^!<4eze{8e*B^>6@5WX{;SCT`#Jl+i)dhA|4&4Ny5bk2 z;e+(o=zub_0ScX*s&p{cZaM!Rf<$q!C}BdIG%HaV43izakWVbUMn=B!NviS(`RInY zQeQ4L1AOvw@U6!8D(6EjHJ7KJ&dGzcmCi4$btHwyTY}U+rpge zSngW(fC~cImyt*SCa3)~CSbyD{nw&&Hl!!*A0dPnjrA6RDNA5>P1hyuRn8)S>Vx;M z$7&*MBt*D53S6e^>JCCHH3DR<aFQqg{qF-zBEv^`{<1Jg2DQd&4CrKz=r?}!8IK_0C#4< z-V{@AaqkhQ!|{2Gcx=d0jP`*2*aLnpVR3*Wav(U8pDQ;5|zJrBfn`Qk*6pgR!%OL2+8C52ld+moG+q zltj?VE@QN2t5mG->MhTWBlW)1IhFZbUkzCbVt7WNAzVZblk}}uTK1=?j_9dAiY;AA zSbU<|7jRi>SIK~*{^1Q^UVl?1K2!1H02HQeNNnKSQFi=q6h6^}ZCEbaEiNzHX2N zpp;K+IT&RadTf(8^A*?_5M1Y=;=)-+4Q4b94zWe{K)|&1bu13?WM9s~QHJ#!%p7w|lPW)x_vwz;%mZStr>+*9UwP z&oVS|`NJB^9+J+U7|Z^+y=I40X=nq}`anA2#W9@79^$}ZcQM+Gd}xm&tK-B1b7y)h zeehaz;eNlUTe;zJuTy-OoB4Vr`mIa$?_ZV-I&|l`)8rrVf4M939^!3z6G!se`*HVw z6X&RJc=GjZxC|-TUU&Xi)bv(GmE-0*TD4i}I97s}4q%HCw+{ zdC-pYJZTk2W{qu10;PB{RbAvP1;?cG#_XC=coz8ZQFArvHYeHluM63)p?WgT&RV`tnCks?Zw-W|gG%w=eES}k>we=i^O>XLh&iErjn6iLhE zNE%8@s)5EgwS7H$>nFA{@sVB)F1If-CU;RRM&P>CwLOKyR;&7uVO2b%x!qc=pSQtB zo}lpNz*{lhEK*TaO%JURz08DRg4wD*!0Rh7cV_OU#}T52nj4CL)QEtr=ldQ_S1zYn zeE@-Y)}{2E+ssBS9<%Xxpb9a?F$H~4wYhJrYgcsSHY7DK%YJMO(C!7KO$^=Zn7Yy= z2F>+%)?+ccL5UBqv0E^y(et8aU)q_p>G2|n?$8f6vIJjJud1$^+vO*8L(pBs`wUnM zj+Inb#M>aSjR932c=pz4u*>qBZC}o*=r3~kIXPa+_UKbkBpwgpFUQFVca86X zbwmCc4A$}e_T2D94O7uXg{7BGKb?R4j#pz}0DQXtD85WOmMB+qtPov`TfH)C6`c~^{LhJ~kSl&0en@rP z+V9vBFbx}G*LdqUud7^B44PFBSZI9YF0EHA_n+fs`|pFs&4sNq9UH_h-jKXfj}PFb z2OA&}9xi6SbzPB$+`Kw(Z$q+Ki?WM6R2@ifD7!CaD*Oxs_ek!X=YJTy(utViKDk$} z!|nyD6SmMX@9DOWTiFwncM}6eU-<60?Wy<7NZnrcm_=)!E4wRaS$F}orFkTp$7~Eq znQ}4DXS-5^nGz-VUjAsw ze1G}!Bz+XvBAtubHJ3nqJR9;uiG*#?SYBowoI7oof46>etj_8p9L>O?w8k7U_s?34 zHuDf}>LQx@AbEbjVXM2*5AfKsnKalr`{vj~8b@`u|Crs}L zMj^Aa0gVbEiFdKea*B6G&oZJ*Uhx%K`>qCvCBaea1Oz>?&dZs>ud}7f6E`^yrZ&Z* z54me;4Qv-L8b9VeV=+N!+9E9+QtkG>Mwt3_edv3Mv?L{TWRk|o)M70ARX_Lq# zk3Yr<)LmsFnnTpaWq04sef!+!{Ui~EIyhz)ELU$f(=Lv&tF@8(bO=Lh$jI-Y@oMUL z+~eSeMc-hV4j+eZ=3o#X4567q^>_C6O&#_cHYwx$FMeH8Y(U*R&2Qkzfbk;9@F!Cj^N z@&wYpb7fvd96~DJSgR_Vpxx$dC0z%A_BYDxMGlN{`6Z0PpA#qiVaThkQ!K~P_ zDO#Rv_)cw6nw<$FWA_nvHZYs1jSm+htl**ii)u>5PzL+&uzN3JJEqqi6GY|DtRVXp zrHu=gZO7mQ%C#dn)ix^9;TIO}tQB`Qk%co1gB^`Nfp?+v}+8!MgLs>I$APtI?wB1*KK55;I~TAT_ljf$HO9!weVlIR~Bz#lH= zxKwLSzt1fy=AUBczg3hBV0ScJw{v|jBtbbL;_tXEidDOLU6gtddwtS5IPrI^zcDVrDG*u8`@TQ_f#z>R~z71CT$f*{{nlA);ph-ud;B3G$cCt zJkbrw<4>0z6G{ARWNEQzP0<>sh2PBVRIz%{CKFFQYf02LE!$4DZuToDTr~2aSFB}8 z%5!&fqh+#j67s+?%o#V#c@zI=hJFj(Mi?Mrv&TwoxE~Iw@H!Mz$v8WmFif*@pq^NH zgTD|?5UoAEcrBG&os;RyIdu}cKS`6CnClb0ED42>W=$(JzXfMs?00){-z_|ypS^e2 z98_Gb4)h3}nCN}ysAFQwUR_RUcPO^&clMx{ee8WYemh)xbDB}o|43=r4C~l5ZrRLG z0-uB!;MZAQbXFDE77*QIgmQotxKk(8Q6m#QZzZI30#SXuDiKTw84CS)J=QAK+b)IL z_xfVW@6GS1qm+66nBMtogZ9o{AKGq6vop6N+USzGy3eS8liK9+&PX~yg@vz5)Fgnx zcKxiT+j8)5wf(T;xmb&xl9A+EDsw&$e8oj<1V>(_DoEfsbd^|-GHGiB+`d+KM6XnZ zSj^w{(68`NMdtPR@`hIG*KzZ)%^9qotgPxBtBo48u|gW>Jj=SOs}z9W>RZ^e)Nlfy z3n#;#H)$b38jAfaVLp$IJOCr*(eyiX1x{plWc^N`-mab%Q(nrR%GT)he?M z=)P&ZiE2KRsZ3=hJI%)IeY$BUZqJVA7~gI??hrHR6z@55h?7+&OQCl>(GRcO`qcDi zm~znw#JbhF=uu_K-JVT+6uKMh^p)a^IT<+CPBP4Tm$C(GShRa?MEJp%u@Jeb{6pV` zPiV3$5SR3$L#Oq6tmUgs?MPT@-KkfNRIq1Grw%j?_!qkHI4kh?*%a^a!dHtSsT2)+ zI9Y*yPSIVm@WbR6I+3O3V<0d6D=S3KHb+^l2KSUhpgs}xt-H6qcDLurw5zF?IIU;{ zU_|~giCVHOgJL$R(eROCQsGHsQ%d&q$a=u4XLADX{cuvEAgOy)h(2*N5#?Plb?gFZ z1=r6jp1$|+s${Pl{}L2$dJ^WY73Slhs4!Evu2B6_^RBRnul%J}!$;y}wt&y#?oauI zV&S9lTW=~~hr`t0%P&`jbrjp_4VwSj_wtms@n0?{PtHwwJekw4iLmU6wEu`U1zf{P zUkG8sr!VRB>u^oUR(%tggvfquUy-e$wJc>Oz(vWb1etsF{&C0HZ~!cu#8R)+vnYnd zr5C6)pi&8d!_3BeA!#POOF@)iQ_@D{G!J;Gil-F#A#(Jsp#eArMdXOFngW~>Cvrqt zT>;X%ZXEg9S)NkX0K&>*mqri~>5cSRnaIGq0ouP27LRO+o()QcRY zAhtxDGoh-pg-y{|(y7k5jTU{u^*LK9Yc^SH$f}%F13LO4m8;toh$6l}x?mB_J8!tr7^)H|!gHxj~k3*CkzORE@#_x%;ThI~P z>I-fb@91Vr-xuDI+PV5#Y$`uz*tT#_3ts4aU^`AgJ0379s(oGNeRy!U@ZrO1d~G~& zJKjM%X6dQAz&kY$7GQ{wE%!X6!Jmm^eLkVywivi4Ir+3cb%5@uytW|to-nY}^02lF zpPbfrpx$Q~1xNiGNG-U-Hr@q3M4#m90o1Aj&91MhlZ-0%*Dz=g!{tYd-@$S(a* zZ+@1=vgw6-;#@?@^80y$D^*}QH;-bBeTGo2s6+M^Zf}08YxD{F{HBo^Qh*Kd87QmM zb>MSViPfLk-T6t|^XC)&AD86f-rDPBxNqMCaR0kF_3xKt?th7(|6iT({~<#u`#8*s zAb9=ZLUdLV0D+ZW`z-jAXqadi=1%cD&HdC^ z-Fqd|yBzKValcfN8F#X|$VzP8>&5zNdwb{S^SR46b#6Vexbq6d!&AU8X2GaPtKm(X zRr5Nl%Sh`QGhMUO`W&VV^`dkyV;dc2PH{gjKyAZXZj2(43t)X=JIG>Ls-R}9=Ymmj zemu^@v%B1IYy#dz3AA2Lh=H}NwV;4oSBD}q3KIYlMS%7e8MyFV&{LZW?(7E_uUodP zDb~RJVafCYRPp4FGo@J7U)+##gde3xJO^wWPzqT)EY53k*WUE)QmvcBa`kUXCVOyH zZ#3s(h$R_C0rTg94F&XL_~IO7yD%H!yUYa`11!Be*z3;WT&wQVM!OH!#Gdabbn?xF z`YCZ*aIr3YEM;*vTL%DS2@#}AGJ1b?z%0MeB9kfUxOF{DDakf>@#a>&b?CMfrPP%B zVu^%e&Z$14hesZpcZP9 zr~y%!T3^|9R_O~mF2yWLboIqB9$ErHr@1=ZibWR))42QTnkGiTXp@A5Yb#*X_Pr^r z7%G6nsXs#=FwE3H_u~u@oQ&0G4iFzNz~|zuht$wkTjIBo^UN7p4S6u5gwXz#j@1D_ z6m=b8N~ClPO>v(6AvT%RGCvY$4ljPUw$sTsox$UNjAT8`9vA}3e^}g$qw@rOPhz92 zn(Pg?=Vxq3W^+<8THHtWgVOl3kHH&ho_HJ2Dv%v9 zYXLxB(0|Myw3;Wzw+vN{R713fEF9J*^47@^Y{888iD@|bb{#Ne{?sWrNv?j+F>zs7 zP*UK*{)i+2`l?nW;-N-e@EBcDBFUrW9ui4}fPvU1IvMM|=hfr>TU$;vV&edW+0)PB zLyMBP-zmic(BuV}q=?bS)D5Vej4}4BeSx;-&i{VvEBYS!SS2iE1E^r5U=~nOne{Q) zS1scmOfU4Jgh2G`QQBH8{IzRg`XMBaB3Os?44E^0r;D*FTD;P`fN%Z+W?dvnC#T}E zDWe>Jp|R&@5m84F;=T~hr=2?@wdKw19M zwoCsmnLcBDl)$=|5t>stic7kg($@;>Fht^FaDQ}DY=4qA?N zcqprVT=A&9edO!>gv!F_V;EAOtyki$I8Mv9UYK@#1(s8`^vH2#oup)Q_`@hqt3?p7 z8_-5Cs;6qE4a^WBUF_oyKJe3eo4f7;CRZEHP97br+R^D&WzA2`KBnC{> zp_>b^IHi|@5X;-*8p|JsK=tcn-yW@M%ui6Dz&vNRszSACP@vsQ1E8ms&Za)*b}02{ zyahFwp7J!+_7=jr;BpbA3Exa4-b840eo15m#ZQTJIm<8N04k2A4GmERpmv4Zh;wo2 zkYPoa<7}h(N2J0h3I+*b4rfKRA#;!`18kx8Y~LNh+!~Iqe^ELnXPaaxy>y!fI#6z} zl|{)@ql=(0U}^^=zX$ownDAaA&I^X;V~&yrv1T(^DFtAHZ07 z`%ArOTL&TwlZw5T;T(?nLbQ#G6)NmtI6&Ra#GJ@xYnGlPvE2{`)iye1F9k$m>{H0g z!mPj@*3_~C)jRa5V%|k_${@a?Sc_%|X@x2<7Szn~+480sy>+(u5mnl=%_uOjS4}!?Uqn(|hmW~8fnBF=wG6!U{ zc!ePw=NV802-{9;QKgLAo=k})vk+KP3u8HE<;|`+I~{5?C*TdKr}sklm&uXpT1x|| z3Zrt6$1^&j<5bH$z^Ip59yG6-y*ECAFU`RaIj_Mr4(xQZZ!tD{=@T8g`rjza(w2}1 zA!RZC1ZY?W$X|By7#xvT_DtoRs{v2WpyA9m$#NF>F*A3VN}Lute_{{7D6?4!OxXVE z*D`>_jq}yNadg9hy)iZS4)qDCG;LA@beAJ)S6f+!Qe92$NjxPC@ll&fe>g9+rqql2 z;Y0CF;o|2=VR#F%X!O@hK6>Zs4bAe5YzP;YicH|TG`BvxV1Z{JtLgGR{g`Ib} z>VdH-=ngT<^<)ZjM;|x(R6l6>&3N`DH ze_;b%F1t5;U-!0-u#VtA ze7aVD+%PxXV4}%sW6`>-Zh2P2DqWMW`RLu*QnAeBVKjL$Z?)dxa49@sdp+6B`f;50 zMf2zw{Pak>l+b-%IP8S{#yj>aNfc>WNsJRWL;|v*I}QUV;eUH;M!^&#p4V69NRNw@ zONaB*9d!t)AIYT}myW`h;`qQUE09>q2W4~b2#J1C?%y3hb`LvyH7 zLxg5&MSfdq(I#iqjpQLslkYY($`)8e9b@E%uhI_Kv&Mvkh%XX=o{5m#dDPX-LLfWj zx#{Z?)501V)+(4Qr5P9DDlH-N9$QLDu;ig`896e4L(N(O0l|=#1IdR(CAQ+Sppp9) z7-kKxUo#B3ydWm6{DgxjZ(OcP6+**378MuT3(_{2aI-U*sto23Ly{*zIoBxu5vQiZj4 z96WHtt9Vv?W>Xqn5Yl7(K~T?Q0>siX7al&@BJ!o7`Mr<}G;iuZNlmS^q=eNlN~_N{ za-3FGY|^7>7{Y~m>=ozw2F&Nbefn#3-qZWwpJG8LKpWjGKEl- z=7#_VQa;?}!W-?jC<^Q#dXgb}?|r2Wd|AoHO?uIe6ms%X$Bq8FFH51V$wniGOp;O4 z7a^qUGbNiQjT;e;n7-E}0X6ls2vYREKE;e1PuQ6rH)cl%0=*?H5RH z)yyrpqqhz=VhX`*|5>YZe76aeYn7sYt z>RhPu3R|p?B=OrM65P9g;lK;07UZ)R_JdeIe6LH&mf#gmPsO+t*pqdOZGL_khO`Zl z(+J4s57n_N#3yq3zP@<%VlJ8BKLvS82Dn`<5D-yMUNF29^d1?sf#Ieb#-6r(o!M z;0aJH=D@%6_9Im>!0Q0;3HDG0U;Cv@v;B>uzVS7{ZZV$9rf#D?+0AgtoC8KhFUE;u zq7hQ}hk4aRJGM6d=CP6zT+&ON5rAh4QKJo48O^c=q(wyB$;G)(T`1w{DHhiM@cRtr6^{J`N~U z)@agv5V~J*9#6z<5nsZ-3w?DtLF!cfx{{vrWiiJHhSwNR74R2nrnc`k@-tz>zs5K) zw%~}~K={c>V--AQI*b}xiZ8nZpDcdJ)iVbhliYDP#vov+7= zrT)9;A847GCR6W-^B+xX3pX{yj6B)k{!tSE0fiLc51DCa@)WV!4ll#5$YGk;Z$2XX z^){yft-l{VpgwRuA-48~%hb@$EYMerkkXf2fvBpV;g}nNonEFSMOkm{m>Y&^0e3YR z;iL&HSRo?kI$QYLnPZ^+d_BJ~P2-$a|mS7V%Y`@ZafPZr3_xsuz7b6v}@LkdzgRePQ zPL75R1AOu!XNQcwe}v!gCdb4WNMH@if%NaW)Cg*53p!1}H|A6?2WE615MsC#RqPON3iWrg?6{pjV5m*k( z+w0ImX`va)^*x7%d}Spj%Xd=?8%c50`YApV9!Pq!|B4)-Wx^lEb=2cTEkPAgrXgvf ziZiT4Oj@9_R5KgwMh(|)!feR9uf>T>M0NJq8)zV944}_#k+Vw%AYZz#+UfdO=Naw` zN9*+wmatJ03aV4o_t39x)$3EFhks!-ZqO3+D0+*+%)1x7zCTt03AIPms&G1@f>_Jw zdlrY#TGm*w(pt;0F8aN~*7z&$AA2P95P(vv9*E@z@2J{wuWOI~(Za)AO_>+iW%{bw z3GzR~Q>R_y8e&-0CX0lnMvNX7 zJao+y>y=_g)=0b@CB1+WpS5f};$|OnKJRqxnppNCM=A{Wg7BIQbmi8!$#-R8$}&jP zxO5g6hHnXOxQc?FF&i!tHF1geXQbb4t?r*nF8vwMF|uhYz9Xut6YgR%!EpyoP_T^r z1;ZPVzoL9bg2S{+9#Lx%RRM2ui+5hrtMMAdoTJ=D!Ia?vYAe?zn`&p`V98qZC~?vO zOZ(t{5Mwp{1`<26Kn)m^^L0c#h)w&=8Vgxc$?DGLk=^A6qX^*(J$DvSb;5}RRa@yb zk8Wd@p`8JH$~EGmqD1ku93K`Z;uZA$xh(j64jrfJMS$dEd1ByYKjWr1c|-s8IAEQR zOO5peMSc+KJ)z<|?w_H<$d{jgG-kWo@9WRMw~HUv(ZHuY^4V3C^=YGSHMjjpusutY zjeN%w^2#`O?c$uKLIwK|3N8l#&0%VOYTfcKB|>%cVqOUq9U4?zQdOEbcPnI1MEI zfxgGn(U(VuR#yGuiu|XTrpgg7O=*^ifmx8#F8=0#p&1?+yX zF~STpnJ}0?GGIR%dSzmjFS!FbEsDvEuCiA(j;A3!zi)7x!EHS+G>6zJ{+&IJB*bbd z4*PP*E@}RhY9k1hiDeHKDIWV@jJ;!YW!=^-T$QB49ox3;q+;8)ZCe#v72CFL+jdg1 zee)bV=f3B*?`_|Y-PUext-XJ&Hs|bP^f5;7-i=+gR-5s(#t}k>I*mi<%SH>So-(l= z?ca}DJdfaR=14UWpSg~%l2covYRwHKuYu(IUv%n)yzpSKhPLYb>8&#f2x*@ zEUI8u(VfUBPbStI)WtVdTm_BWC}rZ0knCp91;ArC-Hi`=VnB&bnFX*Zs;)3$lDARa z%RC2(~4wUY~;U&r!u^jYn|uYi)did?WWjJz(K@WL7wN%I1J1Y7%u2LN6L!6 zZH1jjQd=AP%@7`0x!8JRQ0OU#THOf7Cc~2?$Clz4M1si7ou29<-`QxG@$9Fs!&$YY zu^kOT#ehl+`5#h%`a=d430>J;Dqw`zx#5~D zgMAaI{W&yUUaA>Y0mkpn-$!za#4%!pH(xOlw2C0^pYF_JP|pR;{WmC1fFSlwEs5pR zKF~MDwy&ihI#N9-?%i$rHq*ARMkR3DAsSV3jB_uj-5oWnJkFeU6~2XJvy&D?D4NIv zszVeT>u@`cqs1oX6sIc+vccLZPPFBt$xW{oH$WaEsp|y-1v5a`fYmKw&FiSwMp?c= z8zzFDf2UT*WkUFocfVE80X@NLm~54QKL$R6WDBDHZC{NNtKgj_^(AHevKggosFD}2 zs~gn&oS`dwv?}1OB2VsWabYWXZWWRPDGp13Pbp>s%|hB}Lrg|%weZXG?vBQ4?$z1c zeTk(ImaF|FV6_nTeEyElYEH!2{AHV^Fq*slWN)=FYop|^i_F)(Yb#0K&#OWii!^V& zqQ5RQ7xQ-~R+6w^Pdcm?PF>D_nHSBWe*Ju%rBKIHbuRY(TZ!FBf1@@7mX?nl^z6Rz zY{y*-2zlZirej52_vt zY(BS#`2K(f%G)c2EA(Dkp$%V#w8G5ox+`{bM>uZ`Z(*Tr>|NsN=3>s?vfrF4J0|z1 z3glgoWt*I~U^^)7-pJ|+&lv6WEs(rAcMsH0;saJs%ot(b0X(NG`%TAvZkXQDU#iU^ zUyd9$PtY!@D}bu8sD|EWsbJRGQ$2Hw-)2=mD9Dh^N7fx2BQuLkjGy#cM%2d9JHd67 zpmhRyW1&szs$yfQU{JSZcFr7uMuu6H95DAyd^f6kbt-z=;y0C^+L&FVBR_4SK8zfa z?fg}}0X&sB9~AUP!_kY#UVAW}@xWc1g-dIfbPks8AKlpVb)kA*LaI6~0m6ZVp5$M; zuaJw=Txtr{HDB7TWKG1iqQ-|HA`HPU-Oe24YXo^OBxMd}4bVGQSVc(I!fDL)t z8YZWiIRS(T&MtZSOkk76>+`u>26T2e5k2_^Ts3emGu}{&xrxqht5dy*;sgo`<;j>^ zluiSx<-fxakB?&G1p4+ln=yN))}A;`MVcX{SUS-tFSFwVPzDW|4y>+Z(4B@tMG@W74| zvC>uSZMEfg#sl&pkBq8I>$IwWIjPEY*D@O^1E5@e<7V)x$mU1${1l2sLxX z+C+qSH$PnxUmLpo6~P|1XxofQy+4V-_!4Dffe*wK*^~*%GGFHnlAc4VX;#kL_;jd= z;d<-7eQivi6@JJXKg^dH4(o5daLu?IUG>3S84t`{#Fvi6B@7wal@OAG+@ z%F7`F_edn>x9%2n2k=NW0aIC;3|~;e`y?iVAmT<$eUR;?S{e9)0^1^VkfWh_HHXBM$+3YMlm5&RDI!m$tT{J{$_%tl3bA4RkP ztdyG1hVoP8`i58#gaH$)SWKZULj|8$b#_i@RVbphX=N86B#T#)*3R$xQB9~FJ4{23 z1xIoTL|y&~^Z+;*T^go5(w2!2ib)7krp0bgxW%h>p^Da|*JBtSK=+v+I+CHhkcX(= zigKd;WQhg>4Xxe;=z~7;vSa74Oih<5s0Odh&FZ1uA)Ry$iW%h2?l5XUG(OR|2+**u zFGW)CoIQtH#{+|z+!CD_QCO-Avx?`#sP}r6@`N2yB+m6J={L4&YUq)<4}rkXk_}o$ zY-0!*H|fuwwnn#D-4J8UC zK(xthVJ;Y!CskdwS)^*11PAbVLiwAfFgcM}1NJ=>>m1a~NVv+%r!STE#JwA-XwOH~ zA^328o@Wx%1@uUgE!9t?S@1K*`5Vo)zB`vllHN@haf&|JA^kuKnTh==D&%YP4e}B% zE}sUM@8O$(gDo)Y$dsfKhbiGnruFZ)s9LcwZ2ytMx)xi4pcF~9Ex+)U1iN4)x;)Y7 zBbr0qe@ZKh3P9DUe6@-@{;w)4M1QZ_znR*vKJkBu!T+;g{9iIFUP@>7bHYd;32C%c z4Hes(fBNZl5k!8Z{mjvv{njXkX-*0kB0me2nCMjgl&hGOJ!f(nJ*!MNH+lE{rHQsj zfsT82bQ)65<508fz1?GSDZOlGd&l#LZgwV)yQ$LIWv2pChcL^=*Lp5RRkS z29uDpTevyn^b<{!WQGFM4$Y=nQ^@-20nx8Y?|dGmF%V8FRtRRc{Fi)Tu*c|qYod*5 zsX4mG2m>ulp3`HL%M$@gxT-tTBLW`MUBW`Ewyq`m7Wbcuw>gTBQhRJM9s;EHP?vc~ zX6D4mJaS85>GIU89jm8y!SPRc!jT+fF>s6Q!THt7X!5=4MlnJ6IbT}Obdd^`I{KpF z<3hpcdRqD^$crLOu>%V(;lp$xZm3uE0>VG57!8FYYN2{x0`=q!zjT=d|G@OcRhI(X z^fn4Y-Cr>|D+nvq;5{*hOxSwpq99NmZLkoA#egm#WZL#xq%z_z}!?U<#_T7Ul9mB85RT*{6+aH_hqoL?n- z*b-|L?ugvASPja;((0J)zR<{Rk@KX~t0k{=8*RHVn!;4#D^MX24vLx1#hGkd$W9h; ze!VyetcvfHepY{AzF(O$+seAWFRv^L5B=M;Bph8zh_)Zi1b=@3EMF#O4)U;J4WNRC^aLo)_Y$Cp3l z>5UP;nhl8BRLppAZ6%o6RGcPwkTpookGSw0%x3KMZuYnq*+uafr=Wd#vU-$26tP2r zoHV{;iox|RG29o{_D6`Rgydy;?|~!n(az)S>|rOcv9NVpS;aWJgCEWuFi6DAAEP~P zM!kG$ViiQ#<#LNXM#Q_@VK-0@+S{=K7NaQvm|<_rJ*}jy>&Q*W&y--uLLY;CQn6WV z%bp*`4v(h6e0A^zg`~%Y`6YR3FEk3UG}ECpatoCT?A9~v7K*1<>I?Ze+=aWmIi)zV zEm6fVLz~|>2qR}6IUgxL|Do4(gI68!^i>RR`d^FTw11x@e^1o@b&}Xv8vVZz$nei{ zPo(_6(2ts`B0Pv-VfP2^4-Yw(g&_n&MKSZ!$i?5=bB>$|K}LnC8&=b}`LsNulJifx zo)04EQ`So$fr8F;bqXSnnF zu@b~hz-sL#x}*I>5lpkgbdpy)(_UjSv>j&WH}FlGG+@eWOHo+!V;X>!B*K9@-7Zqc zkVpQ}u>wNYtVidd^QAFTEr3JOPmC0LJ$(*=69)j4TuTy6{^cJi3oUGL8^#!H27sn6 zoyTc*-XiisuhFpmnQ7s&4r}bt0N~Ir)bYjzD>`SDqmH1TiN2J6U%Cu`Nxr{&Ow5sj zM_JdN*?MBv?!|(8YH9s}70B#$KJ&NV83{`4=r*Wz-151dLtS*iIYsoHE5#f30%`VV zHFLG3J50-i;BvH7&nth`$#*69u6c~Q>cq1!vZ{roA$NZhGjb7)Jc>nk4Jr>+_!x+P`7$pJhk^QzHY5|0hcO*E$VSwD@b@ zn;VKc#|o+B7aj$2K>ZKsdDa0i6u5K(4rHuBnYHSQ8Z-G@TVZeD4^*A)_44TFo9ulIR&!kw(-{+1bp7I!C`Q<~ffn`_0?nBx%V(5OfC!2yCfAA{r!6}wnM+mG zx#sibhVI4R>-7!({@TdgvIwGhBQdMivcIVTI%#^M$vRgmnoZ4GOO%}!tM>!}@-7+K z1;}K}F6xDHEZpdxl~l}U!3JsxGPRC%D*#XJhCm!pIqUQ$^+k(dKg&vm6GFKlk;-KR zW~m-KeGM%QRa#nefY(8nb2x>xE}h@^bikve1{dJB(hd*AGa89E=E(YJgvUU{{At6d zD3tw1xI+I}fV;x7H&LJ-i-)Y%h0jB7N<)%Jsq~&?oMJ|S#42sAvKOBiBm;#a76hz^ zq!4aY5akVvW45zUJ5?HV&NdLU-=!c3MXhKNonLxbo?NO}ZV6OcYme*c&0_nRJ;&B= z7AV1mG^qCoETe|L1W9~Acry~mH2NUyJyvM`*<}g4b^I&f`41m~y}}>cJqnsVamXK< z?@h$3zm@I?Gl_Ud%iND-6-4crE7iu_#jM#`0<93_3}m zJaX;KAPEyt+V4)BPy6{BxhE2dBKJY+qrr%zXw#llw-Mo35#FQeMd-JkBvTC+(FbN_ z3hn~SU+%Z^a-BSQCzjIp_IdycL>}nGmv51eSlVuJ?K*4G3DG_yxCD$CrO!%6aSD0& z)KNF(Q3#p4)k;i4N;`Y-cHnl(xh^cmXD&p$XVx~fTfc{E}_KiEg z1HLv}=Kq*7|L+Ya{P%|Y8xZ~}vG8v^Q25d^S2VRZ(lh*z!ot_121X7J|FEET_;;4n zpsc2V^ri7f6T!q7!3VD>=?_ZEHeO8l+thIyFU=RYm{HeO?9WLH09uOm&R-X$?lN=* zQR*f}vD7niRKCBozex4qb67|8fIQKm4NO)|;Z#y->%wD-`*Ho&%cSM@(xwZzxF;Im zm%oz&X`{?MS$&;iW{4G(CRS-8??sZ#$Vn}MhxYXxuuw!ien!W=ybA6ZPP>_04n4Km!A_Q2*A%!OVA%G~ZCQXcML zbYV=`BArjYI0`!tMZV+sO~Pc=DpR~k$3%0MIZxqcDvD{ajG?_ey$9psS~dZ;*Bgq- zg+8RrOqqmLXUZc)A%L*RD0-d|sTEonLuUcs;tGc;cQFKNsZS=H~`hdmI?AzCY=7Uw@V$g@<)EYskz zMrEZQ2^lHeW0MOs5T!YQz664B@|8FyAaLD;gAGo)dP9t|@_e66-{#YYkSUP{*`Q72~O+p zc_1^^IGdc`fQOWhm@qC2uSUfweq69?D?VK?6-_lS62s>#fu?Kf4IY{|F$M_uZaPi= z-OzP)1eAVX=;eyAI>Q_%XX_5m@=?+SsG7c4)-cV0yf6IQC@565#Fg?N^nsLb*Z#}r zh<5jodNz+GhRW{&8bTuic9QX(#sOjpbJSx$u;3#tZej*DIB2eC@E7*cW5WQgqD_+T zVDv01H@#y6HdYkKy!$9RKJpW%wdX1I73MuyOg3RT`ex?{o3Ob^U$d4Cu;@3!!@IG_ zgcv&cB5L)NYVC;dsO2ejT>$KrTI(zD6A)8#;OVCYb_flaQ)gpC z%y`~Q33?FBkoOl}TjKPO6YVz1@#TJ;v3VkcnN{jxQ8!JaWC#u^S)w93#J5n-nC0Jm z8(bP;f45q&Zm zntk(QIL$gpSsf5MydsR%O%l*~uT^~&T57_osaiuN_Wh?mehHEeYpjvVqOZ{Nu_e8d zfaj5Wx>b{u_tT|Hh$zcIkeRawcZhCjUx7lr2;+Mo=o|78Yc==H+{seidqX z>JHL9{R)e25fXvVHJC;lS~pAw()jZWZ#iOYWE8gz_X4(=*L< zRw&P%*JD7#9$aHGw(+o?1=O{DMvJ|6@P8uyFm)xlZ6Nr$$_*+jzo?PnuB;ip8kBLL z=3ZY*rpMPpld&R&D!V^+&=Ip=$(vn6e# z5?sC4wa)ibk+#Fpi?gkKABP#U4ak#?v#aF?kyWnK-TTLuCu{1sxb&3;YwVa)rYQ}E z^)P7#<8$dEVlbJ>AWDpzKd{`w>}Wp?bYRwaY((!@XcJ#!-ALs+9L_QE!axm6NFaTo zu?qO*{9g`V&Nb~12!x%iZQt4$of_M1{DKWg2|X^g6@jy-fZ>d%=yEB*{<}As8^Eo?1wWrVqR}C3^aLkL^>&l#fnD-ox581bteUTAWfqPj1 z-r%Q@R1)J=`;sj&phOIjEj`-{BfH{5bZ4D&kWxnQ4TvAHtS_5J=Tb%r>oT(PT{%q1 znF>eCsAM+HEWqJ_aiZsGnrlu*F{Yh2Hf~6RW?(~~m$*l!JKy^aaWm!xab&E!P-FeG z+TIBP_ru7a_nZ`thk*|Qo})jI!>hx`w)#&*rQ`WT-R`YmrZ9SNir=T-v&w5gf3J{k z{5b%Y<;>vhy0eD(#KFaare=0R);i>u{NindS_nvV!hzes%~8SAuBcyK*1De_k0KkGQUXj zVd+tnQTwf{Wm8%T*t+{_z3*ka?ma(1_vyM^*NVtPWsEDeaFX9Cl!YS3yt4{24PYc) zCc*MEp&64gzd_k9v(bfV4hXbUZ`uwVHO%YSb!T(DiN1~?h;om9qhD3y-WnB@2mr)y zH?s_Rb&#L>4&9DlJrPnJ^gAp+uCM@*$qGqFS){j_-HTLDE!wxQdE&deY{jmf%4@M= zUqmgZ$*9(s)A~4>ag>ZB79Ps^LUS?H8@j>#;FIR^eXPVvP-;i1ZoR+`um{nP+)0!% z#*y9P3NG}mCLT9fymw^sap7weiD5yR1-lRy2?xqMv7nac<+!;kz_|OBxw3-7CcWXt z=Uj?-jt%%^bVh%Z2_`b6_AT^A)Kb3+q=PKgrw||6Liz@~Yin#m(fCUK1n#Kv`z8*o z|2U=mnfgM^UGV+)&v@|Z`nUMpGAzA(0wO}b-x0n*WXB?5{N4cGazX^KVwsCs!-$s_ zZ>DXLU{E7#)kP$x{VD^WioFl_{i=J!mPL;%cUbMq9#Z$KP#n)(u&h5lq#jozIB%w~ z-t}O-s{u0i+X~!I^IO;Uz1uqErjo*v`^l_>)soR;Q*)SxZ~qu2Jmi%VkasT&!qpVA%Tk9R01K(A?2TjS@EI3*^u{0z$=tsC=k_rHm-dKZDgF#7QHKB zq9FSH^sUp6)_|+dYVQXV^D2o0%h5;1<+$g^%MPNi+q$u+iqbK=r&4n|zJ`K%@-51v zxcG}6V_Y1k-;J@?kh4|re%u};xxZ2Hho*M`zOpT21N_HI3px|8~$MyKUXL5+jd(Cm4(G2Kad!(9g(~!{zS}4Aq8ux zAL)J)yUgflRELR1C3RDIW8*NLZj<_jL~yL;2idJ66EM(Tt74X}`UG9|8O>$$I&9F| zwlZZPxb2{Kz&}1D3I;kt!WV`6=x93i_m%vGB*w~oV@3zY%dkzPz1~6ZO>YB_}nD2%- zkMSR|WmP#zUEkfYJKadN6u9C?|3%VtvGCqqm!Zp?0H56gQkfq+R~P+V+XKl9&Z5L}4Xr$= zu>Bihunv|kk%K$#5#%Ku9|5D-5D!f!31=U92Jy|ize&Q5JZT?@SOqrOpS>GBm0sVH zs51uSGtyL=@%ACXoo{X%vRg7Ab&gSSQSGwbj`Ow{jLqT?s{3;yK9sXlLbj;3*sKJ! z!qbGlTmPWx1|Q;8^1o=hod1iXxo3+8eIfMV~p0BO;*M=fu`d1*Qc&UOY zg46-*C$(5%T3s(1n=L1hP1U%lp52JRm$wrFQq!14k_J(3X>9Lo;xXM-%B^I1pZ;Jl zb%J|vJz=boW1st~XL2}^_IS~DIr(<`a**|{a+?&it#Wr7kQRP;EclV~j>ssu~`n*S<(N3B)HwNM&au)1Y1^cMaJmO5O7( zxMtp4qgknhRVch2%(V!SOOH+AcPaNFuLJA0{ZPf(bTVhXSBb~C0^d*k5{xEni%oFB z(^{lgK7~M(n?PH7{JtJ^LrcJ(L2%A*+5z=->tD8G=>&L*G#rph+v?%YBB=EghS%gj z@TmA%&`~5ps>@k%;y-&8&-=2e*x179<3Aa}V0akrG7Qkp_F0(x z5ZmQZ9oh8^5`_WcFP)lfL+5MC2+p$u=U8YA!UXBF2AEbjDds~1OJR71%2%AwCvnb~ z<5p2;jH#MT_59RNndU!|a)raN3BzOM?`^>pICtIziMY{$fu~4qD@oDSxsH|s$rc2kLz>pOV=&av$ z)XZ41(-~mc*NvN%PfL#X5-lp%TAK8hcJY>Fd62PbEfW>O4Y-Ua;Uc?={f1_9~nWZQP;R*xE8Fv{gC9R75oX@Yn z`lVfkh1_>V_(E^44d*^{b4Wo%=#)ALc)pvgj6|%mjhlVGaascV@EucICAgI6^?ql^CO3Q!m)E?f;h*{-C4#| z8yU8p?hOp89^zuY_q%5^50@kO2H94zP=mBmOG3#E**aR#^Yg>x}IYlm7mtqp0)s`OjPYZz~kGzu)5D@rAIFqk*ZB zy}XmPwVuAE(SMG`{}$=|O6a~iOyImxAkVRjro+fgbHoQa$-sV9!66EX;UVjk5_zv{ zqq^H}5W0Beeo#cJBZ%GQh4PNFd8p*(e!uKHH#9j+JxWb||Jb`l{Z`l9r@Q!0X4_wT z1`#|r#?MX-&lR4(4=p1Z0`tfwU_5iJc~aU2=qR}15HKIHk$;mV14{NdBLLUZ=~Q*= z?p8)WYb;y-xXP<;dhh)%(|UNP+~()s;hO7oAe>p5Zvre46Oid0=_&4kvsE zcPYn`u~o?*ip-r-d_M2N*WZCJ4LHl%*6*j_f$|(IaaWSRB+oNWwlxn3Z?H9q$^s-V zhb5vF4Ekf0xC-7%ti3a(z1CDtvhQ5T$bYtAwu%?HA7hnx_jU7>Dqf5(iQgmMByunR zR-eLcao}JX=>~`iTbjjEC550}s(bR{LS=PBuM>?CRmFx0m_yad9$?S=E_n9!a38?_M6a&mOcOO==Cm5OPPtS*tayy~{biN8(>pS0*Pxpfh*dA8D zyT=(uXz4C_L$(Hnd)h(*k3r`FF{ zmTypqVF6u!)w>M5N>-9E`j+Dj<>RQqk2z}SeOn#46B@(Ob*HCspkjg>T#n!|h~!Jb{!Md)c;=E9>K!i%N?<6tqMiw98# zR>eDVd3#gn2}Cx}>6G?JJtShC;*%Gv!04c0@f>i`IL0?sSeQuf0)aWC)Q&TCQdYaf zpG{=KY(&^;Jmb400liFuW`3~EX5Ku+<5$D;4rEFg2g~T`FHw6Lm?#uP(?HM?_)~+X z-3KuGG+QCa;wl&syD0WTh8SG{_i0&4aOnjrb!tf8T<<*QdZy%h z30T)VpG^sEMI34|n6f-#sCo4!>CKndmj3DID*IHEE`8=qMHPRLlL0wTl@(Q1HgK2Y zv-oCRp~3i>uI4;5PW#<-Tfy0UC&WH8qtIbFbnYr*kTE3l+@1K?Cg^KQcVKV5_e?^E z)nW^IfFP1wV!;0q*j&WtoUZLnYNfpm$N7>e75g(NbbbqN{xq85*y`7bG6FaI(=#K-u>qEx`FP`DObsobjy}>$z%d318*Oc7rMO?p9|WoJMX=%%haL^FkgB=!SLBlCcj0gt_>1BZP>{7xS{ z220rem2ni8ZD1FIzQ+s6?YO)L^>0&0+&a-(}g=A2kw$U zTA!I|bijvXF&ghCh6i zLeeA&j<>`M4)EeIndT20-v>QE1x#*VN8Z{xps@DuboXF%&7I%oE)51tq_7B{ zX>0C&Ty;f3K&c_6;1i)^M>kR#zcp|`c_HW{SKge1aZy!uFd`Z30?OC z;))j-Bo0Tq_1bGXKnC*#Xrt^mS%=}jz<5!^@SQT zUo?!KqnV905yOAtq%T0RbR+tgpnr$TvkKM#%10Q$vCvH5=Kz22mJgRB{eb9HKGx^M13#+VcMLJ1%f zbEq!%Jues>3sLMAj6nxnY760Q^ozwKk6-VJ#Gr{iJd=q|oLW3L%v&?z)G%o>Z>7{d zryka{NR!VeBw}2hlwG_O&>o%BK5QSqr-A)mAV=TJ-}y4A3H(_ba!D9D7CHSKRlju6 z+~H=U90<9ghA-(~z(AKTt8rY^T}oiI+D@5_zHME_qiYAP)T}5-I~dcFB1)a!%-6#U zUR{A7-@ZB^Sgw12s6R7Z4QioOKFC4f#kbA*OO$}0+#!LoIn49Gpzz28kh~BqXEU`T z-&oRO9UW`|M?198oP_`3(wMu=v8!a_Vxdz?Jz4!>bKa#UfQ=wGpX(iZy;`0vHvkG> zCNWjuGK!8tpa98&@Z5aHVIBL;Dq&_b&?iZu^Enw5IvIR&qr2J*{xN%7Sbc4x?0z2* zaLXhU9w3kCzJF-BHGZ|vq8@YhAk8C+zu|vKX5FhCtaKDF|C1=vHr#})41T$3w~@@c zhXtt8mpwI|_7@_8mBwWK?7Gr4#S@C$Ot3RFr5rhdEm+?v6fx9~1U{8R*e*#VCTapr zm0?q8a>vr;C>jFFoWXOZzC{N_zQ=Cx=VeWm6yvZLNfeiDnw@LuRVRCPck93@W zA21uZh1BC&0C%~N#s-O^#x(n;KSyZRNtJ^IW!Il)rz;_zXX{ceMs{gO_d*h~J@Z_> zpdqq79vG!|LEz`>L#%188>7kM9YJt81VIx1ibMXG!`utCSBp8hg>)&O!%kJ1HzVODx8=3Q zHr~3|S+<`mt)CpE%KhUhNT#K|G-Yh(%1rB^$F7IZq%UAO6=5iEGqh}IEUu+)>J<&b z_|B?B6R49e+|pqrl~n8ZBd4)Y#}(-WQW?c5>E3?2G^R8+&HShrcMM=shEzH5IgQ*G zWkqC{r_QfQa7@PMUoEg_0lFp+7w!^x7JJqtFGgrL(TTZdz_36ll4T5Ecn^Is(_|00 zCw4_VBblZPhqk9-4>*(fDW4{@b7&NAnkuC6$z)xW8b$6XlT%b0?R|Q}8Pvu}YO8}+ z#wzLO!kpQ&FuNSX#L_;!Y=@QOL#RPeGyJKGEkJ?74s8LsGG~ivQr!hIR4tByMhF}^ z=c)_(N!?@jCpJFBe?$aw!aB7(zGgUS_JeWSc}U!+Cv2~oMwuh%C@N@;b3H3%Py&is zW^&imhl(Lm#i#IBqAv2g^9Y+m*bUyI!s|>RyrV&aI(0Zl4&WY$1t-UN8yO9W3l~=p zYl|>M$L?F#+#9ZIj8HL&a;hq6VAMg<>2y4WfIi>LUc%q%N^#UG|0h|9qTcT_y|1nxhN7IvEPu|j&U zW!a*F_I7mN{sFY@HXj2kUo6e+|1iY)Z;s(Vg8)`v{a1e}wFuZ)+1h`7{{l_{8$%<5 zfAXwGaaJ;`vPeUhCk3c9KQyn+7C01Q@CRqJ70`@H3KTriFbr2fhsTj2%YIVUZ|Em^ zm3FL!d--}58ySskjypI=U$B2V#CbS)cX@w4g~Cx}gJhfbT^DKHE@PznCIL)5d6~7A z>@E9FQa9Syj0i!0T~0FmbY*mbB%4i9!d(0|UB~LfSzoe(o2n)ATC_ZVTXe`r=k~gt zO4IEE)L^gmJt~eFa>ISZ*knzpsLo|e%(J4j0^2W|lAsLfH6VN$)hWmpD2sBx0F{vN zyY4q!tplM#Taa*m1;4y`ueRDNy>;@fjRYg!nbmWny&?_~GHvb#L2kjTR%xeZ8n$E& zMM+>BjEZG@q3Fr$S%JdE&2u&6ny4~8>4_OJ^&#!VyCkLYisE`TG!`g>JzJhMoAak# zea4XjU1$l*on7~-Pvs&+x~rG=VHuXYja#Va66$2_EuyAyyI%4_8vd|g zf^ekVAb^w!IWrru2_Uq}#Bn|CzGNsy$<+FHkEV1%oGS^gCB$2z9acPuwp3U|{ws5> zSKQOnQ&;%BI3-^CAMT;oMJGFbni1j(E&JXO%*!E!9@Yek!L@Df*Bq$|r3Z`c>Y-kT43+46Md>L)wE1r_jt6U*%q&}Kl}?-tPHWCwhd&&fdrm{jc@0+>Jh>%W-*{J%rLzxy_r%)cT(M|&GfOC$S#F2}^L z$PIG|`C}6>Yg+G1{IHNTPJr*%tDgqsOf}*gN&HJyQqcAo9F!iJJv*++vVn~*QNF4_2uW=Iqo;mtM36$ z+O%jEr};_B=%;z^=B|{vCbz+v(q+Ov)>2}WL<#b@^$GHkSOR%Vs2lRu0L+=Fbv<+$EPj7<6SyqGhz! z@Zlb2X#?u2lNH^Ug&qGSZ53z;7|PNX|B>nYHz6qLSUDpTk%>yi2oDLt zIdK-`bSdlD0(zahcr~25z94n$_9&LYNR8?4>DCHM%Vd^hD4qjq2r0nAsnsgY3i)$* z&ZQj!F*rdEW=VE!Ub$6i4^tBYRVbo4lzN4!Fedv82tEWh00jr(KIyly>9un0(9-Za zdUFLEG^~Jt5jsSMA$3)9G5tJt@S|O?vt!=SmxgPLNIMd(Q3jT#&PdOg^Ar5G-5)|9 zrBwmR5sCpX5cKr=Xwe?J`9FC83vYrxlG>mMb22W)euHlT%3LA%KviS}J6Oc4g>m60 z!5p#5jD~C~2 zsN@dBTrw*)Yz|g_svD6wy!sQySB?cMemp{@`aQZ1V2)$H&B}H0z_|kvK zelm&+HyC6Vb(>8qm(>P2uKHu<$II81=2TFlk4o1pqYv_EE!a(9J9QLGI%*60(7 zzjR^#GG8kVL@s@T-WVoB8rHtF%XV{eDc2Y8icrF|-mO#Q8zM|jqclW47T}NT$(*;L zJ=W8bDHBhk7-@TJ?z!Po(+n{mz+f2`c$SKst>k`*@aDr-kUY9+KUcmel7A^a%9Mqz zzjthzV#-=pAaj{5ke+dr&(gIOa=Ph(9+++p7&~9lX<3NIeLN&)z@M@xDUYK?%O^Kk z=;(tnRd#_A=P((f-b6X97D1O3V;14Qm(6d`hvKvhb-7~KzI7S-Gs9;%RMsxX6EZb- zt$6c&O1?AJV4v;BW@2b>vbeZTA&e}8(nZb&vP7|e!Ef|;$;#?Rx5B^`o1Ud7q6OeA zb_GW!sm>mQG6kzu`{=$qMcN#O(u!+{d89T6WS+>c6gcSZMUG_3 z9*KT<7j}hY-Bw;hg^eSwD30543MyWEef%Z`#okMD(+*7#1z?A~D{gETU~y@Z7obg@ z?q{_BHt6**E{q)5kc^o)zp1RIt1Wq6g=DlcXj$mFFHmh$OGvk0-=J?yOi{CK?Hg}5 z#Oa*Qf=khWVdqv!IW8u>uk+tjhyX9)Ipo4QvrKX{Ud<@m)Y#a|-F*Y#0b`EWwDMrn8k+CV*cgT4asBW7ZK}99@cw zjd9Z(ZQo2O!Cddu-Y~lkEUTz3ZszYjsYu(%NJ|nkwrjj>?FGhE9y`xlE zpcG*53JrxfzF|xEOsnKrNx-5@HJUj|cW$fjKgMisJ1XY!F>O)Kb_w$*h>yOe96JsGz52Sqf6K$Xgz74WJgltg$YND@DoAY-ue_P1H*x=OJ_L zJ;!Nb(LO>EVYy?7l98qwCfUh#)Q-|Y4m#3deCA(C+;Ir+kUk7&Bu8k}v2ABEL^N9S zon>iNqijR1sx4hMA7UGrGIS>T5yClW87i^o;YFQS7km4CgwPRwvf*VqF^BV(F$1fM z$_DY#GuG9#l95ONRvu z=Lr}(D-D_`*EBqQvDYngR{$tm!|@&stLIVhZ^->jOq}SaFf6`xNRo6A;2Vr(m|}QO zPaDStwWf|!um_3^Qk9-WO=tG$<`jz*ZE&k{s#}|KL}FQxr_bN^gUM9Bk1@%FsHIGx zLSD|Ylqx~@W3h8*e8R9PPvCrggY?OLZT~tj8rmuS#iGCl%kI)ZEcCTT^c_DxO~+ho z0%Y#773(tp{fh}8JZ%!>4ll>&7%0fVH=2IL7GB&vH~g8;&b{c8a1Y+j2!Hl1lBU*6 z%&UM)WB(Ijw(1B-CujQ3WsF29?MH^J5{oKNKz~C$yrcsv{}}#OVRQ}zK?1lK&H3k} z_9wWI%+J6q|C-rZ*-Iez2f`5Vh$$wiOil^5PSiNp%#ALAT}|fBNC$c`gXVgEOM(1p z0SnC~QnIhdT#+#9@bMnMriN*#h&%kX?PWVd()01})#46Vc;cQ>=_P|{OAI^M!F5=ZX$I|GcL^{e^4zhz9)DM;UE(|NAqWZi7cZ#tCcdhtn zv@d24v`M)V_CO9Nl^`%d6^9O{KUuEI(ONE!RZtil0kZupA}WSg@RfYPxHZFB zHxzXaV+0zE2K2S`>+6afOjgun5P+UqJI;jK*LGncts zE;1Z0Qs3`iUuD0%px`pSN^KORUB>#1O-I-={bw44$tgl)zpyz#5`J*E!)EdG_2Bts1Idj zfaD^rRSGfp_%#U`Fhs_wC|wLaA&gea#uYQ-Dq9ySj$s5U>sy|zhUT@gB(2qibdDXO zj&)S)wx|hR&>&Wwa&`O=Rm%73H^l58jb+r5O4@lb>-SnZwL;%WE>jm;Yy3kP>SbS{ zFpl^M{H*U7g~E$a=sMP+^k&;IcHD?akbO$!LxpfXdgix~3AKtOzDf)^1s%^Z$W>FW z1`sCGX2B~?{Qd&Q5<>+Bqcx?VUAPkZSkxwbNDQMTq$FC+1T$x`lI1wA9Xnp`wcUQb zN7?Z;){vgk+(+5$oh@*DDwz@|xg4<=S=5%v4_i$BP!g7JLAX-GKZ5)622QwIXabi|qZ!}nr&-wNhx})dAp>}I|7~wqMVVWhyhDfxuGJ{c&cw3P z4b}y#!r`$iEPWa#&v$Mn$Di9{hfr0RJO1Iu$Xvu*_W5pAqtrH<^9sFe>-cy z&_uJs`Npuv%n_F2_?47N#NYR+|H`s6aml??y%$Cx*!rg6qcqP2a7LEWod4KrTWN`Q znCNhLob1Bm1F=Obhj|W?sHaQvlcPuJR5Vu&iJF3z5D{P1pmk5q%+ zh^lf}h6#M&rcGRvDQGhIVOb)xwRi!A>2Zhy(}v*}F9_!rL?}yAI|ZY9`mR#R2y-Jt z);?)lqE@!K?czqV_e+_u>3idmYxaEpuPR|hOAk0WdiHXQaJEyHXubN60kipj4Bnw_ z0=U$=?_t>D9+F3}okmLdO&NIt6;KpAJ%v3=%n{KgFtQp}M5~pf^opM7QWTX3<@Ce~ z#6BdZP4P>HvR(_Eh`#K8`EI~=5v})>!z&B8Q1-TKf#F^vBginSFQ`-9WH6_!;g5{r zk9=u9bY7c@A=|dK;HyDTu}fZ1bkX8ch;2zR%#j~7<6O5I4k!BePV21+9>)&~EXd1< z(%4d^vbm-KR18w@<#YGCs_ZF6c55oF;|4+YwQPHM4*NR}8F<|$CG9+}6vpVU3~9xr zUi_fOv*Bt~4}4b#l(OxVF5e(~olwPEWgD_Rrx|i^wQTL+mbqznsDhBS6*yzp-}nK+cri zWixO!%TN`t*~S%UHs~cgT-C+)b+gsKILu)MaWdAUng-4s>W;1X{8Hz{&hgPV-89zo z7HnC&#CV?P#b*{w$(od7{>Jr;&5Og;9vnSvoUOm}@ckJc<-zt>ZSc*vjNvWx8=?Zz zhUHu?ZI@LPDaeGhk$^2ZNhXyF5Y+GZ?d-64}zZW?cDEQ0&fJz zutHMV^y#oSt-sRhO(fd)r4r4)wj#a3{?SOC z;(Trt0c0g1@PBWl|6Sth|5qjccPy@fjg^&|<6kmjfMoGsMgAAW^bdYwhl+S?pfAcRX2s_%>9Be7h9Zc@_HhoAJclk>k8dBBvV0&%{cJEd&Bq$*-U_oEf~F(P z&>6|iEV@{Yns+7Lbx~$ctLnblG}nvtdyod zQG^f}1qGKJajh9(Nb%RyGIFOGBsHAn>dtKG1cR&RoS4_6q%(PqQmrYJ6A7+i@hF2j zV!6hnOOx_BP+(=b^`z465NnDGjIR)zQnpnj#zSf@+b@M7(<%DgE}pWKuG}dwRIa`@ z4w?3=1TC!oMw{^qBB^H(GZ6uw9nPiiP1;)z*nwK9nqw%UDxOh7tKx{0sHxU&a3;dDtNfTlhV?eL8aPH{wTdMY90RkeS7~4JQ-V_uXSt4&L96+mSA4)FMAQg-gvk); zJUN>^j8`nGUw*2(0QRL+6)cY#cTn@sHPH^-RR}DgZiNOV%oQ6WaThBSBiz~fU3NnA z`fR;Zo&e&hF$fIvSSogk9QbXN!afEnaunjPb=b_xjmij`RokY*c&BWKEnX8W3U`2A zg09inb3!U}BzNz@(ACHh1Zg*BcS!^jMt)T*69Ll1Fb2A@#=3%x(^E={S-`yQfRag& z9{DeKqq&2;q*;z!f=I~JmvfS!HBPBvF3eyODaMTIR0a6nq(d1Py3v##s_22tZ%8@@ zv3mEN!dBilyS60|pc##GXIE2qG1=(Amb48FexuY<$jfD7jvI?r zLtCdsWBDRizhAICeaH6eKBRo<%S?UO%WqvjDUDDv1 zvo~|6ByJZ65{o8QfUz3(fQT(>x^hNHwhPR?$OA7Qzyk5XNR6)UL<+?%-c343R9zgD z8qZPZW6s_$94YzyKA?O@f7ZO7jf}ZkJ6EAt>8}HcKm4R7W{puzog*zMqQYin-o&() z%5W@iQKcd~LUa7~Qk&=ail11bN3|Xdd>FWQu|za| ziOP({8j&hS^w^3Niy+ngg%|^m?aTEd0*rU`fW^k6ajf{~8~hC6QDGHh7PlQP?le9E zrGX9RYtj@?ONfp>4)yLbW0r-RFRz$&P6Ya~CT6i9-;O0_DdU7s`1&C5$2y6Wv;UHfdHZ z2q?B>WiOM$)_&=LUheR4nlM9dJT^qeMbvC;++$-U+a~(mZPQIxVM7Q%uK(u0d5gCw zC%5^bvTtbzWujr`f=iXHu#U}!ZLzzzYv#cD;nRxjnlEQ`OYIn&Hb zV9wRYr=-_AhNqD5X&C~@Do~-$?VBshMM77jtWzHyTrieoVHB}XPJ+^APL37uX%WIp zi;Sw=jVpTA-R7MSv(naRTgy)SW{_4;>-ob7fKzj6jNNFPFyo$!ZcQ+VjAYqVnsKp> zLJqwIP|Pq4$;L{hCzVIRWKp#>*{GU8en43$mn-jb^e*X)`KhwFax$%XhE23v->BQv zjh%yT|IWdYA=yR_nlkX4N5-G1F4r_4ksFsVuWFYGWrA|IlM#XECCP-Z;75e?f||%v z6q}u-pTi-AL>C*V=!%D;C87qbfa^`{xjG4;qnjxP`_Ud7pA{ z?aT=lq1vP3U^f)xfW;A+?F-!3y~|Eih4WB)pK}1LNw$o+o3VN;w|UU=G7A^S)mZwn z@7zP=oFP$0i+VH%G@~)}!px0Iy}NKg;cRm3qQ%l; zQYaUIPz@}{Ms&W3WnTK1Lu!EU_#{2D&4+1rN+<0=MFj9c$Od@J{yi-1Hr{ck`Lfph zf{_10VakovrrX@4N}m9@_kdSg){XA_Z4GX?KGe$v==}Twsg-8^5R#xxzTjTIJAnaa z*9?NwqT|fR#o&&V;3(?(I0DZGE;Gb>$A$^UP_`p0Bay#wfdplwi$W=Hr?-y}nlZNH z{xs|8MJy9{FtIG;R4W3DL&AM$e{d|5YUP;pOP2nINSJ%SlJ+ENi6E?~nm^%cpmnOq zmvawOi#xCV6JD4?#d@P6_4XYsTy}ESQ_O+(!gS)9J*G%i;er5Z%%YSjbpL`zlhzkf z*>y^;s!H<Cg5g!~PCEslhjVZD!qZcMbROEyOYdiF5EYfVW7ZQw47MN?ZaG{Uy zgH)d_nh;SjHa?;ctQy(G(BD3PM$cU>)AD9_FyhIy4?N?-P+RRP+>K(aZ2&A zXq9fC_Tv^L4;^d3?y|g1p>GVFU#m#aw}c;K89U3VNmv?u$R-FN4Ci&p&5hzx(?weD zZfjCv0Wy(2{7h@$9GO7PoWW-AxJM^;5Kn~zvs8ynV#pE2xO)t;ze9HMlN`+&QV4#d zEcfP(@(`+0AoKCx2r!K#j%zzA<@ag%Q6H%#C+kgXTSkU1d%8rxWyKNUUdC~Qi0~4B zIaLc1Z&yM6LQ5MHI)L<9TF)8)?*8Ldl$hb)rd5EJ=HGw( zlOJVephD~b$S?Z0$Nrr+RjIgYJ^LMjC%#!67bZ6s{bFBV0*O!x&s9PhGEjw5f}fnw zhTC$ja(7`l(i5m#)wfxk`wGYtMZY~XAaAN^Db9r3!`XQvV|mlZ`vZ7NAYShHnEGL# zA49xnzX~oY0LeE3pR6xv`%8oRj=pw6t zF4|@qiHqt9U{L3mfC4p}k*J!KMnJww=1P}1u^{6>WDylv2IyY9&z6>7Q1Q)(m-u~4 zjk;sVtqV{bZI)dsrGxT}I)I0m$W>sx5F!#MZfashEi^IadrLvr7Rj`U8>d9TBCpvO z-(OW8Ee@;fw-Ekd`IDWw0uX?)Nh-43tJ&`ek$6xMTWUb%T-0o z*{rRFAi;~O#B_`Xq%T%7<9i8KK7+8`#6vqgalNhYVdUG!$M?E6)sCMgb)!6WSnNE) z(4lj=nzExVWi!?mKhXVg7}0(u_kjX-DEj}w?Z&h=<_Da=zrP3o-~B0ir2C7^_fJum zqN$Okq9y9OJ#f_{5^;QrGnc~Gf+ckX<1w^Z8fvq`UGkYBwsL? zsW%g9!KCPZr#{P`X4FvuI)UpICE)e8NS14<))T@)qYK^-)$!{b<)vIlDAwL@9n z)N~PnA6zxbDL+rw>BiJpg0HGrszX7-NPD#;;iX`oMNwtmDZ+jprF6Q?LM>6S^&CR8 z9~bIk2m{rwMkNd3p_x|?3YMy1ys^wmIg?(6a*2oaZD>2Tc01S6&$X<_8FrgEOiExU zU|WWCO+2=Q)pvwn(ZBA`GRg`0EUU5 zO;M>nu0j0MKwmAuBev(Kw!&1r*4EEo8tO`GK=8!xldHMquuCO+JQsS~hrLj{I-rjo zJlK@1R7d0xWH#8yLcgi+zzw%q37ochVKbNVbo(QXl^pB1Dt4Zv60PQ3I6}I8;60)NgQqIT^Z8EMYB01Kp$2YIlObkF;JW z#auNiP}-yx4M_$j$kQ%rEoYQQY&Wlgw}p~AP_bZH^=CnBgx32Rbv_WX`jO8Fbe+9{ zGRk_{e7)>xYiX$t=H+A%hWDZbLlKf)a(9wK0k_o5j@^fmJJW%&f?LYh(oXCcns;o5 z7sSGaV=_xfXKp5IakW#yf&SG(uchGukGFtv9^!XVC|;mgt(K@?IYS|y?+_gJNa0G* z(oD%KdNXzgA`RNXr*em8 zd3za0un{C$Hv@{_8{o4&x7(_YoG-kr+igggem0OMbnU&z%pC?#zO-dx5>>Y$F~v(d zu*Lh5Ig4b zLx_ouO`&u_ON8+5X6>#vkePY6Ij_PmwrS|rRGCzl;#=jm>sMMCa zvepctU*$0=DRzOgxC6<p7_p{J?k_I{tM#U5%ZM0p7PgLaeDzy_G?(B&3VL@ zZVZ?$olw#o44&wlvsbvD*%%UeM3&#^o@>P`p=D}TjTi8DL>b%hsg7EW1Q-XH?LGsF z$(~Cq+BPP%{Q^PKj!mB2z`etM#H5v>v=p7a6I@l?&|OnOp0-=Iiwhp8G)Ydt9!k$L zT5M$&IFrH8KgGd}?~SvR4{~KF#dx-v@MOAzw=_}eyCFw8Hgl#tm+q~@ymQ%7L)_n- z09VP|PSOmCfvqGAeNUDv(nu#%_iK-ZB&9A3fJ$0>gfinbbVtca3fw&+fv@b(!<)7e zMC)=}`GSHK~>)_cE69}G_EgP<-6p&v7}u)@3f{ZO4L{y zWmr)hbq?-f@HmKPDsh<|UnJO}dAgKn-4@ar?pkur$mvkI`m05bJE0@2oAY7udPCjN zzT>(POoVRR7JA2O^@QCWmFM_Doja*5B-GUdw;wE`9?K>?I4yo#UFvYh93;`v5N&>q z+VUjJaw{K}J9a_C#i`J6OPnrT6oSeT zbMipLQ`4r&}3Q>vsHaOOU@Ep4J- z3WC^(fh7cxe|P~~A?|r=jPCB`&TWVXYO9uros*6<$hmxRk_~#OcvKS2>`Qh}alYJR z*d_?}1Oi2Q)*sL&9Akdy%e@lFZQYQO6|ENH0_+Hzj~;s$16m~R;X@R@$=%r5PbImN z!E)Hza#r#Pr||wd759*RDZ0=_DkK6af^3=3<-V6NAbx%cWfL>fu5tI^lqY46+30Y# ztiKu;$@A<5raQEo=%)T9oH5Al(tYQbGKK54_c@i$GSVWiUqDLz4k{_og2<4aw1ue` zq@sKwkF*RcOG#*++a`;_pld8^~f!*}g6@;R57HSJXGXx-n! zszT0?E-cW27sio9Yv(pqoQ3nbBHd3EO~hCBeHAFti=dGu<*T>n6t=9(7`@7VUNuEK zPNqWH*q!4#l`Y(59{Zs$X*prHJL^g&Kj1)b(hR{0jI8sBi zVC^N;@_>mx6F(TWRej}?WDwCKObd6aJpg=IhsHEuF+EVNp+jq(qc7iwLlckx$I&ez z-!C8n45LQ>F^vBA(fyau@Gp9yvZI;hKaT1@w?2`If8pVRl8e^p5|UmhUs2Fs6q16& z)bQ~Ukm7}rd`Ues8YSCm##mpcp2FWErV)s*!JqOYTpC%B&>^`}2RGI{CU_h!-`?)u z!GDreFqCjvA2n1+uhwEPdib6{L3@XDW#~N-pu8XnPDH>A7?W%Go`>~w3id#QR_;sq z6SiD5Qm|XB^thwrvC-Hhvyo_Bhv4JbeM`s@`C56Z)hzHsC1xS!J0mPACe(ZI_ihOP z0S}(Tc<(jyBX3zjv{D!mhJl#qUdkaRa?MqnhcVd9T-^cf6D+9a)B$#DhJ?>_b)V!B zo>HA|+@LvV&82RtpyL~o-~PYd3v=~=<0-?MJ-onL!_0yb%YTjwe)wX;q2n4fownfb z@53!!l%{lh1zm?v7p)#I<>hYy%}?EMFB+s!4TfPbi4Ad#ndIaPDZ{olPSU#x>nlOc zlEi>LTBgqBoCUHcTgKORF*c?Lt5C^ZiJ6O)s{Q0Iy|`PYZ6nl(I*s?E0d!oeAo*Yv z_4CJE1h1eO5*T#ro$Cpb*6^D7j_lX5a!0X<(LgTA)%TkryV z$s{FXbX$x8WSm2y?n!d|iOwR)H@4q7BxBKyMg9ZE?G-cE@*u6}2iW z9Ud1ghZ`L)r++Li<%IkRHDm3zZsHCdg7P05&0`OPnQ~8`;ChGJPd$3Vs~BMp$WCBV zg_nuj3<%-52di%h?g~nlvl(hMVO*LrB54N0Py^-z8Kp(VY7wCw;zi445Zn;0ASD?~ zMj8);jIUDzXRTj1)05$3P*@@`FoKE%0Y>>t33h!TLFxOwcI=|DtiUb(Aaz6ZD&9xj z_SBn*=_({p+_mDTf}s?UF-?hZ`9`u=`_1!P{PSkSGlgn2P1fmptG`d#vq)OWOeODr zLt`$$hgxQH_R-E3RVILF)+Q&kwF*8H>OgXUfJITB+rt$066S78Q{wJ7oCU@zoJ(<( zVp4?&?=>_x%(xD%n?~AApXae~PbW?}mUqL}1GMx{ zXtlAtFPtAB(IsF5KOkn=gcFg&JFYNfI!%+Uwn!~emuWx-XX=zL%JB;|>O_`jYozWK zH5<#{6PK8Xddi#g4L~hm9&dx^xnB}haChG7miLz)RSQKappzQ4H&kM<%+==@dTs6a zRWtJ`-u=cnimzAJyZKzR~VGU+is8IY`rk!23NV=3DUs}&SK?(H;OY>1+H|b zU3y{^`LHD4RpMkW-g9Qkwz*!&kM;t)y9p*riX`#F80Npn#lnF_3K}wnz^KEL)M1uv z)>M7^ed8mU98{iYJyoUuj$VmwkDH-%3Q`}uVTSvJZF1~Q*^qPX7#7Gj#D$ACFWSb= z`z)_g7l@m&+Re4D2mZ__=NeXeKUia0wbZ>|8-1MQ;U{Z$v?T&A**1iK_crN}W#W-# zA}_%W7_oUT-8(lTf0Ik`cBA92ePAZy|3C_!7G#33;tn;ynx|cXT85cPNq<8Y&B~K$ zzx>ciS?h1K?7O#+$!v#xNI5bYy};Ar!yhi@?{29|^1C`>7)I-!avu|V|eC{aYn zwe1Hhh^B6H+l1;uiz2KMiL>z0nfhwgI8Ti7WMr65fe?Yf>VYqe%Fb^%VPYk5JCZYb z5E{}ny@FVgpxou8S|mcjT-U8gC>ZH>jr$*rj)y^ElTQv8Puyxw5|np-${M`st}cLr z?U`@Lh*Z1&MRyW&ymU)N#5~0xHW2S9)p6n=( zaZev%=7n8QEr^Nv*4JKpfu^S8hb~W#V^wFGH$$QD_7UCv5B&%P!@-_mm|yJuQ?KB0 z2*Nhn7&WDti4`PzJJc(hc3}RTLEW1kxp*a^to0`3nR-E?Yd&`wyc}fCBXs?pKC}@#<%iBissVL8P@oke|1FT z;`&wP5u=V~$Z+&)+3Xc5oU52-!)pByN!ZPkvxr+^s1syk2tx7az?XZ+%odF$h<)w+*DGy!F&uWPB%EcR&CWV_rh*Jo!r8VHPCLg7+w=_cBWyv|*7Oqk$4&1gL(FCm zNoCzO`h{|2C4()dDiS_Vs6mpeu)?JVqYsZbVK_E9F(mWUtF#mXrS|P#sRt0ucmWT9 z7LNWO1~>l=G)DYgc>gD~u)jGY|5w&;X4ZiE1<0D+jsB^7{+}!p#(#ADen7`ZRt5l- zY0l}63|4)V8ow9(pnOTTcdWTalDwNAS8& z(ovTjZMY?+SGB`Z8{-_8`)=8fzhpn$-q?N$uNjg*Dq*$guit_So@*952SNzXsW8fb zd@Hm?My2Tk6mIX{uTzg1$twDY`NJm6Fe?^<*<&>!yvHVC6c34wlM2Qf3mCq$n@sHu zM+cYjPxvi%0I4%xVlajMm2@IpiYT{MPV3Tlsmqy`$DzM>>T^R~qn_T(09XZ*BmuKR zC@|~P^S9jj#{Q%0?4kYA8A&WzFins`hL9h4ZQYP<5kR2`LWp+pL8T%J!Le#+W%y zPbz1#Q_1MdDIDm1U|8pq96_DYsJEMP@DLHH3gF6L(zi40w2^h5y0`W8;M_|~u`o%` z)n3z{D6$8;1;wYzCyew#3*DBr((R;|uI2HeZ5Y(No_r*o3<<#q_t9%!a!%lxF% z&5gJR;p84z83MacVXW3!)PC1p?~Wl5*_bKjHNr_3c<|41YM=?5pL zyH5tM+EpU&J)=wfy%JV@I2v!09cVwJif!TwY0GXu3?H6&RDO8ifJcwKS)RxB#`INH zataY#69Xn$ z1rOv`5ACQ+TIZ$8O>`(WKr&+jv6)>-%z=Zd8G?}AGiN0$%2cCc>s`gr#7!c+L%%p^ z1bj#(NOb9)px<)m2a|UFAft=e>#26_aJPrIiO2bbZdf&l+_YVyzhl`IJMF3q}0rpT78$ zT>}tqfL{IiA2hT7e)0TQ5XIlU`v2eI&d|&OaH;&e$(`39abfZZA2JlwnvE)9XoxK( z%I^FNXiD`VL;}DneJEm{-0OHNfksWs(C9v4T>|aj=Y+hNu0=6Tnj*hbuLtuSj(IZO zj-NkfS8oFC(GMa}sApA%3a=x;T+jHHR<5Y@&j#>po33ratTUzMrnhp9PQ^;t&<7k= zLiy5h_AIp25?YEjjolX>!dRvfHjT42wP@J5%geS&k+ztnre~#A97P$Y7EP_~B?MT5 zYR4E3h0r^a&`hGT_R7^pmIDitRWC+@1WA=O_q(*1+t!tJQrg7M_D~FKj5j{GE+c?u zdsdXB>-VoxKzod%Q_3OSe|u!SW7%=5vSv4*E>m%;qs)ya$Gp3ufGwI49<3^CpJ+&v z6X$L~B?yi28x_0TLVCe`nFJEY$ay)pTUDYDQTwWD7pd3k1uQN8;)5t}9a|iL2#W4` z$ykM)oD*Gko?6xUZO&w&v!p_ik7SLQ_LNG}0tTet;d{b7&011yABOLG+6BE-GM&0fCu8*V{B&O09GHG)OZaJR%mihr8+8B z5NiT4ciuWDg|8oG0A>P-`|>TA3l4u`1e;odpYY5kYuaM~nqtusj4Wc$&o0dR3OgT} zW0?3V{|_PRn4oPR?O$(*Ji*-^iH|C!u$_EkHmin85AXw z7ZOE<5$gTaYAeO~`@#1KQRwGi^2G8si_YcH8kr37!QPay5F<#v@BH~jTuoLnbQql3 zxaznZ4mKPv41C@{9zcE*X8{seMqDWM`+tE7ewHoOKI_QO>u>j&7Y+@};qZOe)x@t0ZqiH`5B zwhOGd-Q~l?$r9F2jK@<)_Y&zk_J{?4Fz6QMZTHETN9jSfV%tO&VlJ6Gj+84}%v>SEjvP^Np_%}Voj(R zOo*XdP7GDEJZx>*Y+;_w(bFXN%pI=Um8ugak4taM8s^b)re~vQpK+||=v7Eq;aVte zAPHNcdbonKaVANPr~2hbBI=5yWQc+mrV2vO&kOmN=su^?8JwaH zRxELi8mypBqUj@(j8~NQ$0K{w3)y&3k=r4cyLKKSoA%Ey$xN?eXS*+|1;^jK!-GH~ ze27tcg{`RAuaT>VWg5-M`0I46CUJCcm=hAVWRN*UX;)37j0QRhVmbN}icmR&vWse? z7a#&(T@j|Dwh8g3z1e>U98G|pGWi~Ha1RI&unS$fQS&!h5j_6Mek<;u)tU!Xk=cJ} z75C`<09bpYhK00MrKWUbQR5N}zZ7<4qqWcaX_a!hQ>oAV zW{34=hYc8*D>dPVdh_GD3t(@Q;W6Pd(GgAeaT+g+B;U{+H9UZ(YL zBl~b%n{;u}oV7Gp{`z=Yn`qaCHj`4prNMe>UPfm`X|+=Abh+3bDn^X*m?lG6Y4mIU zS+LudzBY=Y7{E^=9W4_3GD@B5-k4}9<>qAG7><*0NWcZKUin~thNBs&Zx#5 zT+fCO>DlNs6gY)C)@cv^x>E4fN&2-OB#o^g(4+l4RE>U3F}=tpRAQyIJ}s4{g6qKsp`y?t2M;Y z_SZ&Dl2~PwoWta=4J)Hnx_PqUNmcqgU`2U1P)x_h zY@ogF4!^1;qn*U4+DbwWmTR4of1{-^#tzZ8rl3i*lE_&`5KHL$h=mVUe%;hm=2cM= zm*c{`)Ymx3Vbq}YKp$47Ord3DB~5}M^YdzTG1!a2TLQJIQy_VBRCN#xX1@L^wAJFL zvQiq>PMc<9sy4nilJY*F@NQh>w!bS)oS*YGC}nPi)_xXv_!g>2qVBy7F%~$urPi@e zJA&kttS~Ez3d55Y*JN)RNrJ^5nPI$IQdKP7Op>uez%tQYTdz}=w4S!CuzU{;h>SXP zLl?((8vRl)jVnqGO!%w8qG@f;9o@d6)c&pzJ>jMy3O_WXRPINZ) z{W8)0a+sR0qf@-Z;xsnkA8#%OG)~MAyEL~oo9OC?YZ`}iscW8@MN?cEcbD*UBk{~m zzPxsv%SaZpMEhZ`r>*9`+t#H|xD7VX!bleJJeEf|z|n}Kq`!x@wVR+-{sASH)FW&t z`uK|DSp+*rO7M&)Ov z57f@5OpiKqdziY0-Lw{E%ngpceC{+?PyI}YSE9Y-xT3Wg{L3+$)afW{4r_(E*pc^T z?EBE(7uL(H_jIM=g|90jrNA8Fzt-(0KeVA-DjcB|?c$fILb3#_>t-#E?}rM&>K0P) zrrH=zVHsN@uoU-f>Srq*u{Ye>lxQP8{C%67sIBwae1ntDj{R$beo_jMCU9Ex`gx_& zFqvxP*;U*dyN82VbR?QhED zLqy=NY||Rv$c32Q!f16H#=ai&z@43g%+SD(*k5!E)gZsrCT;}VlVb*n3k0tk$!IW! zvdr=x=D|;aCAGF}2n5z1t4Q;9ZE{S%=*Tv#8T+j9^n@Y2AIzw4Wg(2ThDT zK9A* za&sVv*(j1+?71qlQ*cwT--Gyk>lbGh^B3LND%~%%Cc1JUH)<3k{OLXz(zk>9`Odyq zoK8i$t(I6R_>sbI?%W$?{Yd=7$!3pklB}7C>R?*j)RjV}HbUZgtw_~AaQ;TW2Ziad zbgv}|G>B$UVvk^}S1{!p4Qx~9CZF7ULVj*C!n63XNE zQGOXiRb!}d5X|Wj=>p*uA~YuoxY>c9!L=#;>^(7JDc_0P&L+?a*UH-~L&AP1xh2)j z@5&_~S8M8%lbCJ?MjXt>-?19D6_o>|0oysAh2fltm8+_C@^F#w6Rcfte*gM{&Lp5v zsftQYL07J!KHJgIGr%DQOl3!Fj7pkJjwYn7B^kJ>w zbF;7-0Y{A+i6c~>Y_p+NyWH>KS_P5-3V}y*aUiCEx_&#tW&&*qv)I}kawr}fZbV3A zmvuObL_zBTJwexLQPGZI?fH>Qgct~5xgqYZsWYkz7yV4769)1{nL;Xu@4wfc{+aaC z+d`_wTw|Y)ZeM|nZksXTGgzx#>mZt+cJkfjCAth1DQMz4l9Q2D#UZ`>@F6IyV^v$L zpXz6JyFw;?FHhO`@A5tM?Vgtp;!t5QghCqq^H-)-dX%`L8whYm>`PG+=%lnPXbIDb&M4 zP`n=$poPi)c8?BcyEX=2@4v6f$?by9m%-O z&PXB9<7FK!gqcX<%XBgzv;jJ->IO#r8^3Y4Lx8egdtfx9WDK_#U2_RU2}|)e*Ker9 z2j>*DTRlnA%RA*``n~c`x-T6806?s}HPTL3lUx?Hqg#%kC|FOBUc_h_M|=)npu4Z$ zYQW7_O8ayN`$?n#rgNBjhf9A2XoxI7kK|0W&jmA7c(`WnY>u_{7s%_EQ<(<`wi`9H zyv9D@R+RF&7`T+x@U%)ZB&fiS*E5{G_ZULFP?Ec!tuzd&25@vS`ifUwi2#0K10Skd<{}E8%;=BRjYH6 z+nH_!J(P3@N^>TVM4vgz0ko^P`%x@>2{As_7Nck7NvEKcCsr)KN)%}Ocss6HziDRR zQ7@dO>j$`~lW7K=H*+L%4o}%D8z~UR%}G-9^*chqCI~@l#LNhMk2D+@a);FrVRs6* z@Z*r+RS774QoPg-$5Y3fx6&!0+u#y*D48eAyhXXy8$XJg$DmC` zBEGQ(YX@kBCyp=TR9k}~xi5l&IfJ-8X?R9382&agoOb|U8IsK4b_wqRlzx1>5FdUJ z*|Yrc)h?^GUMp7c0@*5EKIs6q~Lz>!e5W}NRQ66z*-Q-d7&EWC5FskR-p91!?% zlbwSzVonUuZt+%0hda6XE)Yhm@{40=J2>qz2v)8R=rF;Jky0z^2|#bmYDcUcw1MCw z0SI&aorD>fm@NQyK0knli`hud$vd8HccsqD_M+s;o;CGa@y0P0MU4DwLWRyBu{sX_ z)~OQyl&?3t>2~%FR5uA~!D_0Ggj?ZKPe76pQQFv8jQAbNM|~thV;~P7iZntKIKn5e z>*Pyyb?Ww<>68%O3)pO2Y0bQt$mXwGwr38MWN-tGdg)7S_Qh?|X%L))p z!klJn;cBWuBF9eGZ@y%iTj&fRvW#NS)8x1_)!j}}%!nFHm5)gOA8}6SJ;P8WRJ(m1 z!zG&YBE7+@^5Uk1N?{D_{g;M4nSK(_efVc@9s|!V@yC@>X5(m+H9zIPoy|PF#ryv_ z;FAo7JNC-wvw~YatA1nS>gw|mP1Q2m^&!~VosMz*+&7Lj0|uu3K>u9jKm0mTGoiK; zJI`-+?)GvsG%OAg3&Mwn7Q#UIJOn9WWETrA4FC1sf5w7T-N|3q+uom^F}HR z*=B{QO2`3X?&qLz+i2)*PiXg}!1}Sv%9aOxbhP|YO1Zd`A6fbeNqVZZsFE3ZLe!bQ zjOCSiV}wb=0+fsRVSP@?Va6xf>6D&7lO$=3D#FH1YYq7265aIK|v`DSt#8wo-#~eH{^xnA8X@N(!RM|3N@I>5< z_$GARxmJ8m)_Zvq2gY}MfkK8ZPNLSbCfo{`?DOQ9tf$g_>8bgtz|{VG4({*IVZ$aU zCDVzUQ{Y5YsyW&8B}kme79lQN$C-{if3s_eXxYv->-`dZRP!1XroG}7=Uo>sE!d@v z6iN)EkTGeW#2CKW&GtULGE%NaDT1lTyu&0eQ6bP9e{uv@zzAZz>qh%AdfnrwDRir( zDTPjv_Izx@M)SRm|AQ7{rEn`%(rwGIUy)Gbd5}}+0mz=G3Xk>Yd>*r}C5d>|Ata@n z)Vcv}(qfvqEs$9F@eH#W9WkouqMu{wep1Gxd|+|iF;lPXEqB(el6M$49?M;;VtswK z%6S!cu;`>g;$aqWwx@=0GJLfHjQBpK+f=`ABgDC9!-=ONkj2>7_6U_?adbg$3f-*Y z8DW?|5RTg8*#k4va%t>buZE;?v6&BlS(RfDzAVZFfpe>s_I8W zG1e6T$8is<=czU7w#{qaHzb#Am{+(DN-5c-^W9s}Xx^_Ml(#`+xB1H;o2|KNN--^bKKcD9~LqPWTS2!{*#83=yxxq zWnMnzG>?>i*AamR!%D3ox8Az+=Kf}z3lkIg)=)Oy{(zWSPk9#5#(tAqsH#<~SQ$f? z)qKS3*cv67JzGu&nLV^qA~A5uZT9JCx3H0@A9KSQhmcL*GRc^2;Oe?Jc5Xf7l^%_Xo#p23*SIJQ5Ns=-_=nQ;SUXHdAx&?QYF zBr13FeJ8@UAX%DOF{AXWih86XHt~UBktshr6qLJ_W?kCkd&TBOyw4RrHeQJ{teIysn#rZ zN&J3CeT86yPow|PG}Sd>A%ypbf((od4g#!oyy|a*;Vu5i02egZpW3xOK+6{Lu6Q2h zM{1_!PYjoPEV>~yI6KbaYR@P<-^dTAsdz@{Hv+VIc)J#FnE9fYJp*r#-8Q5HqR)iG z8|2~lLLErm`VF6Q!MVJe&v!g2~AHESD1-V4l^Cxu1f&V-^oWc_|GEGC@ zQ8h7|4+W5cy2(B}SIe>WA&oNw`g!qPtN{ScWk$kWw0P6^Y%v2kHlmkqc_?U>ZW-*B zvNr3H!YCoWE?Q^25+FZ%QN)bd5GVc)St4MuyWcPl=6ZSpz`GZ36M+4hTF{deXOm^f z1ziAxd^CH>WoNG?DCzQ`5tGV68+zMBFg6Fj19WzyIkzNwQ)%c@<{F6ngAd<_><}uQ za}-zRdJtkF|Eb!z&8IDzSFcS)kOy)ggKSUnL6v4e2K9!NzM-{*p|pK0T<< z?9S>4NX6SLJk(H~DMMQkoL07g?G>(bZn)L$rw=87U9gVGN?NLBfY1BeAi&2cxp%)6 z=+^jeTM?WPwc4Mi5aScbH@pgd`9cIPa%{T zSa1au+;6`LDoj2bt-8RDmr%MsQ3mPDupW2CRRL$5AcU5A%XQG4E<5}-7Ddn!+&M@) z#BzQk3*;62c7>e|QG`iTsla~Ub?z}7))44NM&Kcrb&*S5mzgr%kb}1-sdth?c0WBH zmxYF%L&WV*aPobm2x(nFCwFm%b(%+1R33dz!Y6l?Ez#Npb48#|$+U4gwJGLei)@8$ zMG%csz-FKGn5HB1;vi@`Bg^AS-W@eF7xe2@jqa4xdgGpI=X<=NrTc*3#=AMT+}>K4?QREn)i{0jZyIq> zx4gOwqkhk!Cr+6RT~LU-Bg-_8HB&m%=MWBxh*>8jti%9+0nDEPq^g7SK?PEhm(o)#fz{vJ5n&LlO<%l0zvKahG?JhxD&iqsT zfH~CXe#-KvAkvh=Ry99(F|v?~G0IrbZTpm#m|yiyCQo9;abJL5a#JR9-o-qRd&kl_ zA19hgO(vo;H#>m%;0xdgs8?NDELYDM{X+^SJsZv+Uo~`E&PEumEQZf#c!Z-Dq(|>I z@Q4o0QSVYtyv*_kD0zgNC*-lTT5y*BHpQI`F-d`&8PGskBvMHA%)P1SS+&%BofP(@ z*pqA|C&^TJ=a!jKfkv}v9ghfW;jG=>V zwJ1yK_qpTykrHWmvXPLProu{+Pgr%H{tVV{{`@wcR7OL7^6NLLa{$uUrFj9+qn+B>0=hJ+y7vJw? z_On|eCV*8qpwOqDT!y+L`njpDhM@agVoZ!OAh{%NAsgYIIm#sYCr-bKqwRZ~Q~>4B z0n%~x<4v3bOe}Fv)Z)|p-|`1lpK)6OP@x&v)0jUu0ax;~!s6OdlYGk~ zM3)pz)%K_DLIi#QcQgzq_J_$mbraH9R9wBuZywN?9HrE0iE?yn1p+m{Q3&db%nuzE z(Q*Z~mgb$1J(u16^c`mmmOdXP*iN`A(_x=5ZdSCXj$0OEUR56dPJrCoIkQw8n$eu!Bmg$#y*rP{YWe*OQW-~rLkt;T&5A<${-Ka?Fn1QW1oWutqXlr~$`}ci>xx z9tNARQAHS@GJ_q)paNPSoLAfxq?$p}m{*WbaSOj~*z?WD7OHeww_3zAR*b&t@HR~v ztKLr4fV*$@Wn%zcbRiK)$?*~ijkzkYW{&;ZMNd%(;n4!lKyZ>3kv17nA%2-zJ@wnZ zT*Xv>7cxj~NrQ@_I7ki+SK!7MMbN$P(*KsCBA?FpwC{kg{uiB{f)DF24c-KLnO`Aw z%3}JzvtuYP#ZPx|T~%HnosUs4SFj%y=V7_5K^?NgPw0)ev>_guRS>#cb{Xd$2OfD> zBAeEemGrV=y{vJ`^~4g918PRh6ybacI286N&!=a&DXmnV6(fY<%P4ntlexVxvNGDA zEAi>V_dnzcGgH}lZS(<*a@55ow#S>+9)XFu)+^+QQ+y{Vu@ilXQ_96lAbSysrV2rk z##igCLrh8aMLFvk&%tN7`6%}Z-KF_HIbw$!+3rXu-np2{L}~>YuqrJHNYEP4%Q_DixR7 zvMN)uUn4I{?LfWG63 zTt?;ikHyX#aO8`nSQc*mO{TBsZ6jj-%H;b0VB-BZ=qdcCMtzZH9Gt9->}mf{U;pO2 z{>^{=!@&ES>iREW^dDUG-;~#XUwr@t%+%bF1%m+sQ`7;IKPgjyAtQ(Z*VTvMHjW~F zaRVdLB^GQeuk{1j$Gn$@4~cE37z(FWiwHjL(6A zG)z?2SSeqP2Y&ZJmaL6y63Gxr;R_SPfhT&bP0q)p#zS}!9rTzoPKDZjhZjOuM`XBa z9;M6ZjC0MvCwY2x@Q0!{E24(Io9w=-Pi6eZO44M|%Yp<+0zvi&DV80SX z0M&{m+(Uj%WhlDS!UIY&QS#|ivt)ZjazvfM?5r3+!nhIXWHHY{ST*vlh=sPfld`xo zH_;eMuv&Xeq@)<07-*8OaY7Z*ua$l9j!Q;H-{}-8*3LKsa_F#!zSSXG2E0z?b1 z<&d5k!~J|}6TgRPHdXO-7H)19BgB*c)ujDW*~;Qg2ahV(Y3F-&<6slcDQ{2toeI<} zLfGP?!De*cI(z{S{O@ep>*FG`HiC*{kVUxilR9EA|Ku>a6fQ%IDQRkWw6vFi2gEV? zlUCkYg&;JUL0I|(o&liz`eUeW1NX;i^nfqcfEn$t74BJY;RNgpdcGwXjR%op26 zWljXm+gL3`Z6+sJy{ufK5zU#}?z~+Revd#BND?z)L6R0fT#}A+SeyjsT=uxq)U027 z;`sRRqX=gc9j>|zbSy~9_{(R|>$YvO?epXDaPyn_lW+E3zZ=F?Gu;l9p!SkD0S|T< z)#cUP>XtlH0ul6Oa75zJgeP+NM2on%R&-K^StJ?87$hS# zn+5bS)k($xP6xO{JMk<7@83dEXeAHceTpjuhW=>~fHJz%8{EIIvMRhQH=%)bMmR0F zw5}Hkthb%+O3!5Ip`61Ex0u9hw@si9&uE%WcUDm*b|Mfabc45d^f1s}W4l!?C+gOg z8G5TcmV8?R8m}5cZkxL3ol^{{H72@5a)|!ubuhNH5 ztp+{f#5d@-4Eh097jfo0C5Z^H_T|5}$jbqY@;n>wt+V->iOK95IMzK~fR`4DAc z?fr=v>siXPuEEgY%-VaOixA%W$`qf+-^!D16Mh_EwLQv3+yL)^ThiPx45Wt3-6YhN z3(Rag+XJ3PC9j65n3Vof*c`1F#L2o7FF!h8iBvaD`iR@8Bg02l8HFG-@wgbo2~ zE)h+WtaWN1;({srD3i<7bfrI5TrXANjhBfYUojV2<6iBL0_HuoYF6=HOAx#h?(G&c z8sVDmEr6VqlHzrkwKl@pXcyFN4%R(;JT%E5^MJktesLU#^-*+UzQuV{D?W|ZAqpF) z7?r=ln{^S4Q(QW&N+bo_PEOpe?(gm{eXDZ!p3^By@hm^}O5YC2q)geVSJe{Xz^ms9 ziTc@dRW5N4F7nY84`^y*35jdDTf1|aBu4~H08-B*Y=`KTRLC>m1%@J*{am*(Th|mR zj$Ic@zLu3|e1DAYDK0*CPQoru31OvF@~P{M38|s_;YKS#o6Wa$Shj#uD$J!b&66wP z+G?2yJ}YS8lmCKd5KkFJuj=MkskCwi+>?W&bY#P{OkKX=w$|UM-V8`yoPlp{tln7o zS8x33lgVH5RZXq`gQWW3)s*DVWALxTUTG&wM>A=ozpX(OzK%qUtPTG)>M5TpV5lJR zx+DU-J*gtcJ?-TKDu)g{buldy56JPA{uF^GX|36!I1_OW5x4rx^)@vr5l=~$;eCuc zxjXa2)KY> z+!!QZ@8frkI@)+BGwRBVIV0>!K!xp3cz@#J1i2f9weLs8wj;pKwrF=*kZI$yKti^{%7*WpfZfFn zF+q*sP^|0nMDJ4NnVPkWP&P=@$r1sLT69pmFla!yzSS)5R$uzA5IgN9Oe%o43gby7 zem_F2f(M`Irg@lYz`Ff`{OCfe)*R=gL}mx8w%3Az2;hjjOozU#hM=3Uf`_1dV>p$6 zv?#KM8}15JC}%~|st~8w_M$b@ zfF_Reheo0Il=sS1Q3$0`3b%j23tUued@AxlM}SfFLk667u1A4_2rY@jl%TT)lCm|W zn7t6sNp32HaRtP3?!_hw4@IS$FT}K=hdCqhXB5xMEmM|6FD^q=Z6dWRZ{xNhD@a)G zYp*KraUkXT#f-%Ty$=C%cJ@jE5XKI?i7A#BQG-MGBas=!zvVK+xT+gg^~46VrP%(8 z*YUD$3}p|i3fwImJl{dod_8W`OWGUL8R|K)sGTN#7(!WiGs3$KVeOZf;1x1TlsD0C z-Cg4(i0?*t=#&+t({fSgLY#QR`JA(0ujyW7S~idgd1UsOio4PNg7B%^Xj^es{FhHE zwY3%nrc8gu9IGfZ1YHijIe5O>@X1<1l$`%{%{3p7d@N~6fV!R9+7*f08X6H6pk}xDqCnQltMdDO zja>T6T4At?U*Pn(d|SAOz1VF6lXm&FHCf>mf70nSFiC-T^4`^~5>8Qvn#CJtc^4oB z!as=^VKu#+M6?jpb%wRO%1;AboDJQBHh%A=R}gCSCN0%CVNb)3moNtrsIZ*e8v6bA zJ8J8BxKrlSnP%V;kN=%-#k3vkZBk7ZPESjst3?`AHDOsmm;+S26WE${&hPuXPAI%o za?Aa9!=+)QXK!CBoX*IlWpaT`qDM_2F4mTxRV^Mc2fs;SaV`u+(N@{rK3>OT5Jfh?~8Q?pHri1^ZHg%b#(*b;WTL*U<;AK&=z0-hG%s)SXZSBBKhM_Q zZ~IfZXC!YtyBo$7n8U7^_qpJ{`k7t1=PsYmO!#ho=7wS2 zn8(#QcQ2`sXgyxP=tCsQ&d`v&z*`Tt5kfgTlT{s)GghS>L8kM`eoRhvL!$)|kmNym zyZ&m#i4#WM{-zC-RCsy}KgNVc3L&&+HV{6qizNW;LaLp3MQT=b4TcUwl%d-Ku;lZ% zh&@gG#~#+#Jdi-NT^{;ZHq@l2?47U;QNwo(l zLefBw77T{EiiWid=LgO78w~tW7tCK=OcW%U3=Na2ncU1r^ix3RWom+nZg%3XI{i56 z#L=iVBzA^`)+E2Sb<=zEByB#^=lurrn<1azm8HsQy`aG`6(*Qoxx2eEBh_e`a>kPJ za!MV{eNE()p3+@yZP-3U&yae(bK1vBQL;Px`kIVQW1ESS$7%)oiy02TG94BKM0et{%|$bgA!N8VLhdN54))ys8O{zyRg2 z)xU5#R3z%(4=m)cj6*#tSdvH&m9<))#B!_R1Tcjtk3nAr(5*4R( zRl}3#5}+cXjPxAvFYw^JKe*;bv(NaE^DBHWj!GXDlmI={iV}XCWW;&c(p;$Di(%RK zXh&$*a936BBM{8kT_0`o?>J98HQN%dvG(`!NDqgUl?`hdaNtHK5syzw9c56<2%kW1 z?SIcHXi|Zho69vPvng*G1uO)Pz8<5r)6#V0xhJ!Yu8cyq=1O16j9H2e%L@JB&mAM6 zMO#x+y%UypzIXa^am9VfSCO}_we~~EFl{q;clY?jxm%m7?_%0%W*FZgDh70YES-6X(ulrq^t`!?iGT;21N( z4vBlD0}s9}-RDzYG{gu%%qF9XAvfH%=t8;3+t!7xDCY(%OvcJ&fVyjBrrDcU99pK_ zQ|^1flENoKNsZ|Uc!tIq>VEnB;j9yRoGu#?_nu(wwWCf)99(}((e)?+shBu$Sj}b} zH)nX%ULLjDh?@E^GZAg1+fteEpv6%Hy7pQR9K*e@iabnB|GpnlRFH6ojhE7ZEhufc zDlCvctC%ZJi+F&0KZ|UH)-1zu0%g3!{DKt*C=)~8!L~Kl$@mT>#}*(NlUkdj0=(iD z-Vv1A%joy?X7L5-S0-|^^Q=bi8nFaoACS+eJ3iiUWw6D?IS>k{Jq;-a&t)Vcf!y7_i zI#WgY3j&cTH0v+j?;-#gvIcNAblL2#&~wm!DNoykMe(wH(@2*5T5=lL6U%$9ClCz7 z*Svgv+;2E-odT{tZx9!4z&;G~4VbHiUCE78_ZdD458M_ zAlZi$NH$#^;>Ke3)p8y9aIrCQ^}GFOKS2I!a;l`Fm27gQB{(VvYw@$8ushO>ik%8G?K7s$<2;?i8DSWNE zy%1xV?U4C}u6qpTvCN@1K;WUWbo&VT;ytHUEJ4tg#`26^+P&jRmCfQ`#wmQ_j3!BN zf53^S=5h0ocuv^rDi;$^U}`4DLFKr6N?Zg;WI}N* zx}dW_yLLnqU>Pn&PmIyk#VbZsSVK0URPgpX;l-ul;OglLb=K`oP&d0(3MYO#d^33% z(9+IFo7rul@{5+Nz#fd3m=KGuoW9DWb1WFGhIuY7+P?kBfzQgNBFYRuQK~3hE19%fTGHQ` z8rJ;`Z>UPgzs~BKN}q^BT@SL(031KGQ9>nmogRR>%bSWw&pUv%A)XVM&^Tgtd9^w- z^Lt=`s-UHYd_}()#F}7*IqW=D*yVwJKB);eQ_5`bwx!bgU{2gImoy$20Kk&Uh7Bsp z?*WFVn_EQc^ZdvMXLLUa;#%pQyA9^R3V0LSvP! zAvnm#6iHEyEKp(Xw|O2hS4QJNa2l@8W3dpU6EQyZ(?d?>cPj(+7AD$VV!aS>4oTSx+%40;{TVOj}!`gVSz#EJ7a^5fj^>Cx`MUK|^{|&eYf7 z3!^kw-`UBJ9&;a)dOHx4;mM+V}LpQcLfku*Z$8C;U{8h>9~zsEYe~5mFRN z|D;O^OWe@18OE%Lgs#~I$4=(UieAO~m#G(v^8-m*u7G)BJ-x4yw6t;MS)h=MEj{NwbyHAk6 z8}ur>Z+ZR~D5!Bz^84p@>SSdj>^IaSgtw-m5k=`5P52gta*5(F`-+59O>?IXaU&Y= z9cPx7r~&RCy}D>wNcJMf1RIdpQ!^%gDD~3oxSR&A>;(t4Rpjz`*`EB^Rbt#6Szth zA+fHKaWIq8ZkL*#9D_`to4DZDoSIVLF0UHEv@Lhhs&u-+RS2(NG|ujIs%?n;vAwlk z?fUF%NP-&eJc0gt7meX>V?Y7EeKYv_`XBJ`zb_4${`@!pyo-OK0XF}$@%$fIjQ@4n z86e;NC3%aKMr?+KDQId6nH`5Qgue|p3sk5qMppMj9w3{KhTAGlx^>)`^{m48MoCc& z&u$CoiL9?S6>{$i=3A(b!2xpNHz?)Q?>4}RIK`?2J*wf%8-nZ@TUiz2f2 z>lt>%tDFEQ^yXj!{=sOiF-hV# z*NX1hdAG-6*Ouza8)HTZVu`BPJc-KT9!{DjrdZbZ?Km+EEii2QCJE zO(-!*B!X5p+a9gq1oh1i_9QbVgoo6}Y(q{5!>_yFPU#oqBC32f4E1f3H4Su3M%aFT z{CHAeEa3kHC+#BH*C6Vxnr7FI%;F5M4)B1w&R*zwKz5N~3p~j0;crHGZl*)q;;~{U z5{H64GDD`HJt&ohSI!|4f2USjL2@17>;-+-xb{hQD*- z*Hn*J!SxcDI;yC=S1a@$xr!<|XC9(*WF2pR5tnnRj|r{>pB`w`c@O$ms1TX`T|{C`5_zZc_de?sM-#QWbxn*PR@ei(hJsr*HOQ20`6_;)2tSCn=@6ovC59L2aK zxB|t~Cq)ATl&s+wv$NF)2SkIkRAy2AUApyKy?pSdhoSTg)aVJC{UFVv0-)qfv4|jz z?JD=ju^Kgg=omm^IDOg-T0HSJ5naS{}v z(r9%)wx$u<$o?>E^PY9gViGOasv}Fl;cA6t`I@0_Db&8`!2@9}8bz@F_qg$tnfTT0 zF=wGE$JT~=d_a7lmHDd75X%q)q6nlW5NM9xs^xPIS@Z!15uxlQsMbg_6~#08-UNMW z&ovb7Sxd^AXcv_y%xudU&_N_}Ec@cslF}@$8HZx1jm1a;*h+s)d65`yBL3AsW;rM1#x-dG!r5 zhDl63J3?ZtRe9SSN6{o1aW4u{x%6o#o8Il`pc!Vsm!qPGcMTMRKL#O=@?crrQTvW+ z>aZ3I_f83x>>9zms_3vR6jy*sd*p_oPm-cyt2GDVgXpHPpACKlL#mL);2k*T2 z%ju?Xt%O*Aze~-B=8CW9z$*o!ypzsD3odH?UI;vreml1CGd@Y2sY_(L<4TF5T#gAo z<2CSq^Ifs@x}Zime6T%(X0TwA`S4YM9EJ=lma_Nl>}rNu^nOO>ddB=MSV`y5NtZlw zmdH1I8$y~aPvYcFE7VgaO z0ul)M+LXJe>qR#_R0N0tFT{=Dh>d9~q)$Rn&8I(km-qH}Qs=prnrf~qcEEG(DHe}?+>kP1co3n?z&HtQo# z>{tsSG)6afE8gocG?gx}*LY)}Y+73#+7mo>#dKUR5;sc7BMWvKJ0wW<(cUF>O5h(95T9<`(U>#6M-ru2G*a%W1z@SXJI zp)~c;W)2cq8b7Y3ko_Rr$fPty6tKAQ>Qh+#MUi!Rd-zgD4A&EdaTxMqMG^P=YtQJ7bayZ+ShHZ?gHH28m;1YoL&>GQOD2|Ge1q#U+ z5H0x0nz1M1xW|pUy3*N1;*FTr905y8V|&@>%;rEt(qxa7*_`M?)XR@nJo9%`3%FPB zfqt87`=@&QVr0HaN#Tv?PPpHl#nQk6@{_H60`_#xb=&>m7f7b$x^;JDt5h@wf;B_w z5612#6e>D0gPQV`D7q`<=-+Yeie(6Z)?zN=lfO2K<$9a|ly2mujVbP_U-Ybtdwhg?Bw@3Cx5F|A-j>kD0dXsh*GgW51v5lqk!pG}*SDwGq ziPyGPty|>*vx95|-%ePVDJ$$zH*-sLXl=hqnFB=)2@1YkHSzIpMKjoYdWL4wQVeIl zh`7xhPn^eCQ+a%!OyZziWTdH;_X&wXBjwPUdh3JqMvA-$y@Rhza?cld4{WB0XKQ$d zcM9-|pa)3@)tNr?9K5fE=u{46--wu*|8eY$od(j}6>mP+$VOVpyTkOHE}PF|b0oa4U8LR!4!%7jZ5MICqL>}=jNQGZX5URCK1o*FzS;euIDHAx= zk=cf)Gj&wj`bqkw;Txp6^|My1*K*82~NGohrA9zG5}#&;qoY zHFqC!*)%5zhO1y6H4bpi^)P&OQmkk0-%`h#y`m_rF5|c41Al`+1gD&dCsq|+b*GYi ze}NtA|D955hoY>!$x26`j>m)|9wLEA0gL2!{nbO+uyUn-zJ|bdX5IB zU+smu#sB>6e_yg$ni)G987kTv8PR_Uy)yh0@A(Q=7GDcBZh?0a7>LJWq_mMEUOOGT_4g>T^=04ju)77>cMD1u68xzf@l zD@mmQ^SmX`2Ix?x#A#rVO+#`QV#FIlvpmRyuhm$iKAdNn5StAWWP0UdW|UCP=>bUR z?k3DR<*Fm*In^cvM*E7cv@hArYmKilH`F07L{Z}iEYajU6csTy&QyYdkCX*GDh3EN zhbKc2y;q(}-ca$8B<|cYRstt9L=(=3R!SQR-dTbi$%{Ra_doSq>iyl;^i(zU6??c1 z(Ih}aA65Zi1^I87KR9J^V2emU;=_l=qbgFNeBaPwM|1fJ52n=UEoWjmow_pr|tNnb4CN|Fz<-Q@KEzSSk3 zB|{SqlaA+!YJ)ms-g&*c30uD(hEG>voIL*lQs3j*)MR$ z=g=G=hckw=-c;)P5ja1V_OESlBC&^QiqNC;eQ!iprzYnEqjwaOOzXDM5H@j=pLb+)T10KHWZ{)3j*ZgQ^c$+m<(+7gLE@mlx>9 z6Lhr2T#H3GE4k7?Js#HgJ6~OOUw!<67o`L5qFcKigldM#cQi_56Kr)84f#(VNFThT zTRfMYk@F_JC4Mk+S*WMP8|S;pT&u`u=$E4$$6R)XLA8qc>_E{XZdN(}0G&8{b-$9( zKK)yq>|9FRa`!85j{ak0{O=By;!ocE)0rq6*_#=EA=Lk{jsE~A|4toMa8IOp^iQ8* zlG;|aNUB80?|xKGmVl|>g`k4{zhS5%2sHUMI4+Hm2s*nYEUU4rms8g-;5^ioKXhe0 zR8eOU_08q#TUo%Cx0hR;>s?A(T$;bOv$o>vnFxXrzdf`kKWcqmzHU5rutv2zZUTKf zA(yyvMpI)fu$n@|$Tkl$KDibg7omuiHa}3ds!Wknptlky89?-_X|`H_IC{a;)2ma7 zcr1Y*r9(&-gKyZ9@bgfj$}V)YV9sW$GX?FQw{)y7*;0K%ZUWygCF+FLj`P^&2gXdN zYiVV!Rg$hD@^Q>`Psh-UybudAIw`hR*05H91C;MQi!3r@UdR=G+OP47Dbhq}xOR|C zOsKgz_+FckG4vf6APgLYm^;-dS%d&TZ+y5r!BJr9$_U)0(wcbw3;E=u_AkyBmRXPv zJs~e=PF*WaUY%`q>uGV3N`byMgig+MKH6Y$~I9(VlAP_eFLnw6g&f=KStrx`LJn1ezJ$3$xOd; zv$x_&b)Fx!iMDU7s5+T$oLT|KtPsWCS{;6Bs9H-*N$hA?+U_3TX)gm6DnLs(x6S<# z`iA_l6l-?PPgP5ZUz_K0(pr0oQVaq`UH(b|6<>B6H;zZ^N8F}%pwGtWU{lJoD6@uTiLQ7?$E>E6s zUYtiKw+X@2Y(r*7nN=myOBbZJqa#q);=#p=I=S1CPT(LQ1|9* zbZ;^m7f3QnLQKvDUmpHm%QX&7 zGi&vF!i#jj23TWfs0m_7NKm}eJ7B3vo1C>YUJn*~P1aF}E&I%UN&?009t1}=tF~p{ zwcJfd0fVNmGb(pvnZz%dBfS#WyDeOzUg1vhDV?hEx!e zC8fWIVi>jd>;>!{Bn-;pjT#zENMq<@B>8amw^UNftHO-kM02;!$5ediiX-XvAs+Rqj^fU)X~9g8Va zT>)~P8qLBXpu(1%#86Zap~#R17Dk3zpma@|f}@ZkZ#^h?oM48>r^UJ6Vd@Y|($UR3 z!c~fe2TREFRz3Z3MNH#w_X`O3)b5DoB!8@Os~W-+UB_tVbWTXwA}ox1OA{u2^0;yr zli(Nb;S3qaa6$`;WSB6_qkZC}25U`gqnZqoXDpq|({_tTY&$H+l8w2aGz&X*M?bgX z9hM@4e8+Bgu`?*WV2D&qd;gTjmRMj-CvRVq{{G{{ zw#6VmB}}*C`D7~8H7Sw|X~jv0-NiJ_niirTCiB&AJcd%8{g-_(uiThtMD!ImPBoHZ zue(i}ZHnEQ?XEI)fV*4T1zWWKVFHoo@z^n)?$>}HBsp1IoQbPGKlOnCORk) z{sje150p%5X)>)w{zI4f5-B|amO@TP>ohh-5YmVhw(1D;e#Wk*9M@Xz=8eYt@P-Xn zMyq0{^vji*moyaK50=WalwM4+#%^kdXZW7IWXS|2kLD2LdYfRXest1Z~1skI6gKzbjbQy1K5 z4v`XlWM~75MO=#?K2yx0A*k{1GF+q}JJk0OrvY|s(VV>(+{gv)&!p^}ltS!HiK@gMJ}%a)vC!b$^+hD>VNs4ASQc zcM^IvOLPjTxSPRH4DMan_YGRGKg2=O+3>4L;P94O+0J#U%c%6K^QANmdXJe}>@gaO zdv70jRy5NfL%l6w$;n_Y;|EXlmnIYCBLm(--R$0D1CMp1g4c=ItHo7twM5wLbS>MN zEen5Ms(fP#5WSW~_b0Ntg{?^oDeBFK zQK!I$F0o@MLJ4!ob6CRClXGi2ATp0coocAHNMH*tcR zP7qreV5}>w+Gz^kwp~$r<6JN3+GjY>UkHwG>rnrpgTJqJOk{IjO+R9RLt>L2*T_R^ z!#&ujxF^w6@PJg|%u=;Nf{swM2Zh9l-$F#%5+H2j7>I*SdlxY=r7;NUWdMJqL4)Pq z%6J!fEnwpk*x}VSwHn5^>9#%P!Y;VUt$oy*maW72^&^ze=Q%)da~DeM>(ks5QwF;M z9fe?y=RMhj>DiCrsP1d%FCEtcQK-HdO>#&6L_Z$P;%4}^QqS>xTrE*FFRlty6fADzf!4+~^02#fzI)2lkG6GPsu9%WBcZA&;p5SvcYKZ*I?0S5;R?Ow(3NCy$;yWIc1-ZIK@CUNK zzM@>1czNqQ?!~?65uksMO`9;kW0-UZzkTL!n?k>O*u4aLG9d@N5Z#7pdcDfrjmThx z1(^JGRwa5x^$8$)P0$>qnb|}2kq*vk?BJjffENY%WUCe4oTlPniwHZwul zS_QYL4?sDbp8ApDpHzLZ$73BbzxdX>Ns!6Ml9k0tx4pueb#1z8F=l`T` z9ml5l40n4b+}7vo`2SJ%mQitKTexTvfU5v<>3;8?JKhgQG4|N?r}kQV&2P?6SbKfN)f!)op)uQ1OL~JO+;_0ox+T=Lcm<_2vT+w-x6m`hsk-c{Zh=$(QMq8p1tgSi_Xm)>6nowVqMT0U^fQ2fl zotl;fTUyIy7dA$f;%FOS+98g6b-kh|xtuVFf&7HNUtXe(r}q=|T08V_FKi?lx%`8yZ8w@^j~#c7km9;lZ2+U^Z3E>k;ZQ{8PREz*VUJGBiY%|b)XuXvJ++J(gS zbY|PN`RwK&+}n_k$Fr|2o7g8SYi%sMn0oc7`Kb@l?u}&L)?jPmge<;8{4$kjK>=+~ z+sJzCLekg=uJ&n)us-`7PL89fThc%@+fsPa>fm?S)Sn+>&eW=Qml=_5x(qsnw}z}* zW}xlM!vzzfE1m4=y}-KIV;~QN#)?vb>~g89sf(u|VbNXf=~dGtuq!_dQtnAj8t zgyF;JVQ>l|b+rIq&jII0TcR;;MUfa8nfB!A>WVib!CkqoNIx4p@5tk%fcag4nG&;v zu_o+f5#=S^k5?L6p8+kaOncZ1u#f@_D&w~maA%->sglj2IQ^#YFs`z|S7sV`LVwrl<)NN+Pb8TFBT~ z3EL?`*gSE?<_gKPh>AeTt@wLATbCy$qtEpNauHveh$=lq@8p765vlwvY@FEUJ?pE= z5{w%%YvB%CHh>k@y)(JhV{i{2I~(D;ToB}!-qLKRT`6$LPWMhzebO=)QP&MaDh(G| z8rYS5Cz>73sY4ZuZOV*B#5!u;WNe*zclHjEd+E`h-rlw%KmFlMCfC_;O#cfDzu>iT zP#oKNQ2JT`dXJ$Q?3z4}3{7npdSeWi`9YA*;>?)kUZm2_m#$m#DR)zW{B4ukXI{5} z3SY*IkeA{2`cm-xE3&GleYF={oGdscO7ZBX%^0d0$XKVHB3`cub0xohtV&P+T3E}7 zElasedP|jdRii8;oyo{Wa?(#p<=?~k>C%(*Df}6o55I%*44cM+7U`h4e9XryQo=IV zL7;FP3+L3`E2dO^baDz9Pa!-g5%caG<0J}+-V`nLmZcq)2t`*JDwW2&Z{Yq6UE3d} z;cX+oUlrsjTf5AxaD$OJS2MlgdjEU&Lyc02grT7~S+Hv)?ZOE!P;n?XVA((S4+42m zLKY_s?WVGi^pE!76X;Xz$B)2^p+ws9v_$OV%gtC2w-Ttlrpanz`<2NVopEcX3}umo z%t9*j$(QndcJYtKGJEDL^8gJooN)KR{9x`j=u@x(b(d<>tI{Zbdt_&tn9B$Rfv zAdpU&G*@kiGo$Qbd^@eUea%GsGZ%j`wuAQpNVVH~8Yo+tu7mb!1B2rG!wgE{D*_SH zELcWYDc@W}f}KV#x~GCe^g&(UPTjACFA;8J%aMF@whE!Ky;8^();9jppCy&gbT1pS z{p;NaB3?j9#3~ZHn5j*J38W24Y#vSI@nb*wCgz5}(?@WdyYe38^<)>B;N10p3jw*m zHQfd>hFVZVxJ(R0-|soXXy?MRS6uGLF}fl<(b~`g`v*{wB3#j&if&?>i^d7?Wm{Vx z>v?gsJ2GkKuo9{WS^_eXCVW_2G6-(8!?b;4wyXr8b&AGorOoA}jK_VJ)IrA-j&nr1 zY%Y*B^KgeuUw8Zjt%4=2iZrZ>6|5MKyDXLG-eFu5on47p%_1JYInGRA)(Sn!8NG#R0 z_nH<3e1(P2Ga4mP$~n)C5sF!IinHMRatbp`6YhmDej054V72(E*q|_0T#{9p;86&7 zwIotvIneE73sDilr`{Vh$9@tw=Zh_TZ%OlgohM=i>-5x3d#YvU^mq$z#TA3_TJIg< z4Z~X9LWi*zwTkO3UgqIB!^Zwuv@5~tUa4+R@0r{aPy6z=W(N`$GdIEc_sy@Ibb(%u zYFkm3PQCAVKJ9ewk?7!jb0nCp+SnyPIOJCs4KN+7mYKV35jscK-diHO-4h9)xWeey zX^y=?wqQhA4bC&`h9Ma@CkN6_)tgK?-+YLWl?(UGM<9&Tav6nueDf_nHP#LC8;LY; z{ln}nVg;cv01U>8$+2@(c5ZGVj;>B$NQSY@H1Bj)@G|-d^TsyDegGfa&Iu~wNGZGc z^M`x`xCxP#KommSC3r8tC?=s3mv6?vWKDIjbI$*{9QZrv!$RLhU*FE0Q5>wOHa9eP z`mab-C)IaT7;PY`epv$io+_UWxjo@6^$u!-6n+dl(*E^hml} z0{0Ss;PO@GvV2@SPH0!7^?R&2vC$0v2eW*T^kL$!5toGjMzn|nh|^6;;G??~gQ zy?Ko0+DZXb8Mu`Z8=!mWLAq+=Pi4*0<)~>jljqLNSp$c>S!zC?l~<)?3YdU|d^T=B z+P{y_l?~V=J$9F8V>)8jhXx%p6iQCS2Z_`; zqoL|EeqlT-mU4!nPP=hj#^-32l2hBIYTcaYjH{X;XoP1m%ao&jo-yOm@%DW51n43@ zbzwZ7e0sZ3h;C93u4^(g)}QEQ6KNq6s%bo)>{>VQmAdiIsTdVQwz0-WmPa$DEcJMp zS|FFRNIy#qyRtWxJkMd8!*V8&+5tY@eZ8plIt!Lgu0!_63Bc8UjyLB#L(Rsm7r)#q zJh!u#`oUB0ga0}8{{Jt&|7$Y%Cm$8r8wg%0pm904oOq~I;3N=1OOrU~V?pj4L6Fu5 z>m}=!BNP0i%9yoZOQi$)R6eBU03Hri$K90Y!R2u!P4}DcBo5vLjVNu3dZPLpmWuFmS=fkmGYK=9bJBLlZt*g3L35yFvqH#2ISx*o zV~89R-nh_e*&?)thR}C#)JpEuqGEMJ&Bo6QA`%J-sV2dMe+sSUz{AfBjUt)oN+&D% zGL(MkzG5Vms?8HOx#Xe)4Q%R(qVTaXxHB_DY=Lx2*i7CrP3$uaN=UZKELCdO{ z24!(+EB)5g;jQ@*cUqQjy2>jiRq@dewleU0gP-{g;y|qwsP2OU!AilUX~WfzK~J z&~thalG}k->%qpMdFJ8LOO!p(ePY}bU7Y(c%aUQ{+h6asXb?{QM6p>YyMR8P1ra$E*u$0?1QsuI2?Z_e{Y0O$#vw=5QlO$y#0mY z8;}?7DIe(x#vwu}R<0;-4a4gW`g^O>QkFB^FLsVvefhqg$LcR_>k6MIDxs@JPnYJ@kJ;0XQHsB4eFdYlbtr zYg*{+U2uaF_?lc&kJ|TyxG<&Zh%E(xlSB)~p!kO;@&h)Qo~GjqCUoNN87u#x11c7D z(*UP(Neg%5fwB_L(ut~Q>C_qljbb!)dAQsvGjvH}X_O2d7TToeH->PQxdaG~$uF-NY7W0xDeau_PQ6z~NLJt~3Q=xcSOMOFTR#hruX++U0@xuh-@RxCh zdqoL`8u%&U{NpM88}?!S`zieqp!nz9o?Twwa!L-p?Y;VBVa3s811?695rR6DW(XD9 z4}sKLl7sk4=QU_(#`(qTg)QOOq)^{}-!;n4KS)eJPB~Z@Zgjxe8QLFqpW0rYpkI0I z5wn@7r}RGxk>TlIv~6%Is`Ksc&jwQasP)k4!|Z4h2XTpv-z>ogHZZl2c$kuqy~q`! zjP25C;H&9YbMKH#t+)~9K}IPfTF{T{SG;zvM*dkh-ImfM;Ko&AJgEHHOmcrh!}}2&0lELPLx~!~>xp zhoqRvS;XHQlDFuvR4I{Aw_EmApLw~8GApFVoF<@507?86vn>XmQhwP3KK8BQ8qxza} z5y?wS`!n4Z%p9$cn6D(wSfcrzsi3?Oz7%+PSK`@dfdwXBg-?BD|k*H_q6# zB7dr;X8CpLfSc=s-REA1A2bUtj-e> z4>RRn18BHa?C_a>h#uV{yeQ(?b@-Y}3zbQ#N^PJ^#`0w^w6-`O!HF4g5mXXBjK;(i$I@x0@e~}spj6qw$aS{>m<*z$Zvt=y{pdz(`LJjY zPSo^h4+p|Wh96Y=uLZF`D95D9;dc2V50R$=i002?7t|Eo3kIEwb{l7<85L%x08)Cd z%!it3$Qt~g1~wb)$2OJrCyqRRDm@qOCgI?#qis_qXV(EH14fw;4%Gu%>tw>maC#IAkFQST6Z>Uiz|aq_>k(`a&=S zR<;!5iD&S%>3>eW=CE7tQSLznyuMm5$uMQnIfsZ^?3AJ+McGAM5?d?04u`ai9J8~u zC3tBzeH%YQJM?vAA5~EtS9(dIT=zcG+`MI|Nbvh-J?R8&kDC!wy;PiyF|D3t53~6m z{3T;1Q}V0f{9$k1=rAK~7iJIfbga}*{+^e7z2bC0g6|(nvmiX`Vw;mlTliD^2Z~aj zdpI>B1bi#bZgK0sh4pFQzruakMDHFj(b<$c-^276X!PnUN!U5myu?&BFuGqQRb1+t zM{1f2OxhZ;Qv-_lcQ?t}CE?id8eLgPy1=Wtw6Ji^TO!U1_Mm>?yF@(7VyZoVXQ*2T zOxydek+wJ1-c2w4#T>fh?Gw@jPS83>j40bAq+X2C(Q4qJnKunhAA$+9Xx`hSzJqh$ zUjcjDJbfh`U?$elKbWTf7D}=F-lo6ZD&-t(jhqd^VYs%oR)6by46SS(e=9Y-6^<;x zR2Sa3lZdjK8kI#2T<0?Jn93(=SjbRJ8hP51JN|j8Df2I^`YtNPNE?n+0RjBrzJWw5 zw|b|-K-cI}is#M>*!T-fbOBq1Ft=!W+t79OMF*;w47 zsdF3vrcV{_8tO;hejs(?9B(-*39*_DjyiB2nlmcCbg-zn))QD?4!`ANom|&cC+!1maye8O$Rd+u3aHRRutYGak_v&l#Vej`3 zy5GMKd~g%^)vp9s#y`t4Fg?}O+{Tp5&`RI&KLpi8b5*cS0ch0{^OHPBEiz7Q^;{_P z+c*%1xtInj5q$)N_;&>bRMELE7$aDt6uRXbZ`}*{d-m;5vU{9Ie;8wBjwS?F8&kU+ zI_A@^a$k({u-**)>5L__NerYa!MLn<5>_DtX~WW|B}^ZF8CyRQ88ERL% z6_EwtdKJ>p9qWI7tbboaViRI7tWVk^2~chINILDqT-whcDgpDbe|GPbe|Ss8H=#u^+kF-o8G=WsJs zl`#0_rAIV+sty!HG~l4^lZoA|EvzV*33i-@O{nip?eejY_OjmyoyT~gMaIC_M1Pm3 zN|Xg%WD3#AnoG;)*ArV>XpL^eLCFs3ur?%8`pY;4iDjqhpG3x$^kT5U1qfOw9QHt_ zSBE*rCYq2dPi|kPGgN9%I-j+JnGh!!JuWay4$?!(edxlB20USWAhGEtwDVH*9%_qS za_O;9r5Lq(1uE(aPmGTr@jlT5I?O>_H1t)?q6s(%+ER*Fp57MLgWA-^g>!3mCv@F0 zdvOcZ?@T#c9T8f#AK7mBD$ny-&_(NS3jHnsJKi4t%^lmkAz0KT8wE?Qwq&^VekfWN zosvyjDk8KQM16n--bt7u4CXAPjW9o;8T`6&t6*ApS!CUt%c}B-QuZ=h-sF#j(`x!`Y-wvdch$6;TS~mMRTaMzTIM%!4 zBnknQ-dd?Sy50?>=b2a{3R$B1qJPj_$~>h$#)!uCV!%rQ2?1Ad|Qy;ViOa z_dGb=$ME`!!rUU~AoF#IB6t_7&7s5Fx?0jOVuSLFy0}jgl+-j_8xF7UMDfpH`L;EW zS)rPJI*F_ZjhCc4RA(+{5%4RQKq?~a)8lvgxl<@l^xe#^_r{VVj`t5HUcrQbzAQk? ztdSb^4cH-E*}qjQ&h!?|XNUSxP+98yxa=fX8t?TBUcRd({!anLOEw|bEYbI=u6C^E6R|EFnD+0 zZiMor>BjNu%f)8l1uzj}k&`fk8N#x#?V*)EE;swGl;HgS4G;y3Fn1ZJ|GtrF=oG1s z&@eZohcI%$hUHB&^*uqeOH#~mq zd>i+#Cv{JQ)6YNAH5+e|af^j)Dibl@0%h)iG7o}$z-D0lVG2z&)+2FIsh~_t9j|2R zF3kCSDzWJ!vFQ`hu+2O?nP&`2obC`V{k^xd>qCaja6V!1wZ%BA%SY?NVb5N9R_jPeALtpy`h24H|&|);~Bi!*4tN;yeC}5^S!FXwH;W#kHe`rH8S=etxC z>mMO|nVK6a7(%%0q#kSjdotkvWIylub3Z$M4_WEaY@9*#;8@+NQA0VwI6K4SLn4~F z-^Ogud7$uF>Z*F(bmM~SGNGII>4V1k$wm{sIVZyS&HKt1{y)%p&~_zLGq}d+L;hcI z;BQ6CpL{@3W2Zk0{r@ko#J}1#5lX(`d~S5!D%fR{mIYKeK&gTS-JQI;VnQPsiFFRL zRGNhuI$pb1>=ocpY?N%S|WEt$`ER#8b^^=Pt)FJ zy>nkvv&n$Y+4OALK0r9+03PQ|dzhtk-7Jsu3?<8GW}Y;VFfKRZF}CIFcXfQIe~UUk zDBlkk!m156V|e!%36tT{zirY$I?x%pK*vJnb%ignl(N}`gY}k6-)L%p?N6G0(=!T)N2MSFOW)lycla}mcRGywW__kP%dlb0i<9gjTP>n{t!5GQNP zZ&&)!#&OJ!Es-+h>#u?fy;YH15x$c_GD<|Ny;DpU;uhmT`$zx^ z95I4!@>Jn+fGy-XC+;pGrZH1980VT?^T=1)`5@eAvu5a(yGg3Wp|^&Z(u4u3tD+`H zR0U9AY||gRlnSK{;hALPgvwmfK*lx)$~vP9Xd~}N;R{F>(jCoK7b-f;3=&=A(**Vo z?csk+z{x{r3fC$`7z~p&FV_6wMb=iT2!hT-jX#GNvc9$c$vJpBB=x?kq|wDEerH4Z zet1;uM>_8qs{F@xOwddpGumrWcyVBF2b;{#_$uom)vBR@0W_Jkw1a2Em8AlSnQ9=; ziA~bUz}+bhGR9<5G5IMZ4EuF4(KZh1{A9Gr5mNFxny`i4bMaV%2-TZm%(R%I)ST?~ z%1F=JF_|A8d5%9prY-G7<|Xtq>X}jq=!Kk^#KbCdLuK=RUMh)8t3xwygM) z&g6xc&}zf_$}OL6srJu~{M}w*U-_ARZy;A>bq+~nXS~iKJe=hB3{w2c0{1rRZcpZ- z!wt6}+2F}nStx1(=u}EftHx(pY)W{yA!hA?!qoxNOZhOiWmCpOM{Im2qFxbnsMcHO zzJ_I=e<~h6JM0;!%XMtOR@sc6{)qpF#p2zqbJh`wzCpdr_W4*%s-{?&0)w8YTKxSzBw*AYcL@6|6V@l9!Y>@v7|wEm};!@pcvgnn0$zdH%P1tb3< z6oWti$o{r<_%Fcn=Oqox|H#V|6fOS<1n0$q8)hgQ52LckG3(`v4+yJL5kf%~6P8X8 zYF}o+QfqFLyw;EB-&h?lCXW%~Prc0xcl=UD!l!W4J$k&vYC1ZSad&>zE&6II*^tS? zW>4Kxo6^Y^Ta*zRqU5=^(orTop%VzvzjyfC7x0u#V#GpWHq z+qlb2P?3H#x>D|jbbd5bbPqGgzm&dLVy+ziBs9!mpl^_x1Fb1#`DB5m@e+c8K4x8@ z5UE>8bSO~Ew0P?sm8aTj;hsbLLP~XQXdFP^LMDaDUWokw0ocBYX$~N#e!Eot9=oqZ zGFT8v2s!ZFn8`*D!4!M)CD3AkWv?aTLqaGl0^mawJe!#RVr7?*4rd${UVBax5=A79 zU!){L+}D>TclOLY{(Tj}e)p|~SPHZCAaWNaQFbZpdKjk&!y^;V9E8I{s^y9Y|NaU+ z=6LyoA~r2%J7r?D&P1@(+_`cqs!lpCskAxXDPd}7yrOSvO3~7Gp(EfA3HUo+&Kg|^3)KAhblFf-Nedy{1y=~Fr922BOIihZ%G48nV=m-CW-1=pgnt`1VRE4C(t*)lv+oYrmYiodau|ez>kO#BJL--7sz z5CKe~Ju;r7jzNqUiU>&-0&}%x@5>ZET+1v@;J!_AlVSyo5+HQBU`X%t(Z7cE^kt=1l}I6cic)fA%DCpD6vQSM%tkJJq?8(}5U&^FxhblcH?H(%&- z*J84J*NyOreHJTfqXGm&cmblMQv=9xMlyv{5@&Q=1m<-uXx1pnXe(^bU8RzYY)FIJ zu`9sl#L>*mAW5ZE2SB~BnzhKVmJRZz;F*~YX62f7T$2_#!jSH*G$>^jQq96j;nMrH zi~%sjT}u(YucXI?AMOV_i0Ie97q2E{)#5%A`fmN1?pP6NIQyrh_gyLGjvQwM%MLbO z@Kz$pV2pmPoU~yg1q|~YdR9ul=v7Gia!Xt|RXq;>ICX9`G=^jC%bf zLIyk@2%4MC5OPdNGb?ttFU<@;?Q~x~3kbQThx>{mC;we_gLR3|e|Vi?fz(hRUkSeK z=L1Rlg&R6-sRu)j>Z~rbt7rnnXX*QKfKXUYgid&IjVxhSI!=T~!GW|T$jf72e58RL z;%e3u@NMIg@&2hC6^qy7g>vDfprQ5i=YmzHd&<5{o_^!|s!y(5$paYs%;MkPRl3?a z#(3ErZCo>$4BQT$sOb$oXMftxf-Qiy9iPg_a0`Wf{H*NJ5-ZfPdn?$_m>1dKWBVpg zE$0h?SHYN%I04XY(%2`84z8a{`Z=PC`}SJdkI&(?j@jvYk2&E|hwE+THFm@!VY3|C zRvr~XB1M*dz0(u6O+se3yhSyHA9h=5lQbeledccUYt>z0{K_NJmtS(Xnn&`8pWw6h z?jL9Ezq|J2zh5I@D`RtgE3i9@1EY|!i9XoT7;Ic-Yh>*3=ddOycJ&WYalh$x6L(Y= zi3;RfPcxj?S8&Q{G|Zgi8n|YV;Tnd7m|A<(1Vi{79Q!Xn+UvowHPDw)7R|w`Qx@X7 zCN8Ef{HrO;cV`!iuCE3J;5s!=u05r%m-d(fJ18t&WYug3EhgUyM;{%}V6a*T_FrtX zE^y^7aMsZ+m}qSGRTTyIpzx4K3c@5H#j8BHAXJlG zG@@TPnX|!2CvH&-9L(V|+-9h9$#Q_}Z^4;gzsj7XEp;xQpV4_Qqdv70lJcTN0_;F~ z z#&b(x?bmiBu>OXk;emI>dy;p;u;SgA#W(n-!HaH#%>@(8TwOu-sxmRAe664Ra}Jwy z{5K@t@!#fB09lMbh0rG_x`!4M`qD%_gA)RUybQR$plct;2PC7x(3JX9D+6HR_Ddxo zQ>Zw77sJKL7)pOC)Q^Wy&03)FwrHjV75V3Hq%6x@Qj_V_psG-(SXLj9E}>2ggs zlnf;lVvU2eM*Gog#Njo={4|r(ULw# zh_|0e`W4TK;>R`_l;S-@ z@pXP_En&IsfY0ix>O3?Le=%zGGRnc@m?9oLszSS~zS&xG9DRBkJ*bL0d49$8nGifX zP6&)tOQ_UKo@Aueg@+1&!opL%XKC*oSEaLNGD=eV{3t?l zzRBP&s&IERn}`kQy>4{(Oj9)|SSmgz%&|^;7_y~oe@bqt?KQ0NB!8;#AQB`{6!Wwh zi-h)n=xJ}&wV&?D4tfosklF%3Staz47~K}QBr}yF0AC{+rmT#yY*`}R8R`(vR!ES( z8_bZpSF{987?jLj#jt%#;Lb4%ZL}sD(_5-!T&=2@L+EahwJF0Cv0v(8P9Jxj#5xF_ zMPKJ#`9Yqpr3!gf#@HpO85E=jIY})g9OO$hnI#ZD5yvxs9!TiQ#5971^oG`1*CqY{ zPSjI1f0w+*r^HebiR2AiwaIn5o~iE*c;wZW7~ql8tS(NIG%OF#L3 z)o6OFk;yL8BhF&;EICJu=T;8LQ+mCJ3XjFa9SFB`{Y4(DE(ithJIeRr%ML+Tsl1JK z7vHGDoCZ4+*_O(<};n_w>~5H9d?QB_e~}1CVa4`^xm@3kxO=;J4h3p-l$2aFPc`z|4f=Pnp5j_ zH78*qbKOhJxUl`BLT*L3MC1k^ZRh_}k&5?wZ~s{#%g8F}DhLY*ssF_dv6hpT>;{|F zB{hnJO#vT^Jf`s(-jnVD@-RNmI&_fThj=+a^ganG?p6c(JQR#i>H<9D6q>NwZ8Wq=hTrpdgM;ylFVBcT&(CT!A*x z^jr0*l(pf)xrXk(h$OvV>B_nHayN7R3%_$kjfeTvdN`YW6VX2hlM?jP2rcXUs=M2& zlpkM1ooFz*8!|6!lni?Ww*@>XKmeg1$nj2ppnFz#^{ru~r3$KTPL(sdq{uW3=)JG} z^`nxLGuXg`C;aCBnDYO7y7PCv@CRk@SMI@o#)k^V_RhwRPJjKn|1p3fjcSmDbPwf; z5B}UJtVT;@@j4+%1s)Iv*eE(;3FKYcAF%GCPsD-@hIQNJ%yTEqV#?~WB2QB zCa3Fl|5(-P>-?I&?A^A3FLz;fAAqYCTY(#2PmY8*Zr1jsxW>-`)whhqZ zB6NS9RXJ-g)-cwe5d>g^F6tUdwNOS`*~nrw5h6aXypu4l$zLZq zJ343jwtJT|#7`&+&E;I5_c`Y#eisQ2F$us>e347u-u*d$w|48vngLA|;@)6Sm>-*H z>9oK$VR(rW0(IWVxr*>>4wbzlaL!ZxJ=q9`)-){Zz__;KXOKRKf^(NCJX55Ww}0J* zif0zRKmlE1dk3_A>;UJ12#Q1rXIZM;4A=jfsTr=d*#k>ju@2R(WX-L!{p=x?vXxlR zQ9|mE-YwV0#=c}^M(-hHb%I?$zX&sgpdDJ;o-m+XT_48%TCx9w4YL-nW;AF~DKhll zU1mSIhfUe8iSkWpcmS$d{$OzG(BRueQe1#q<|Dmo02C^d9VRYNbd*$BGm$a>)*@6# z3GW?#;8&fwBDBfVkNv><0U8T1`&5mn&ucrjudZ_VNw^=J1q0?QPfxkF!`u~8(re#E zDZsRO?ZLaMZe%h^z0fis6S~bLm|}|UDT1!Uk|g>vZ^UFar2bxvb+=``s7WNi%vfI#equ_F~7=|!Lrld+yQCUk9+FF`K1Em4I-bOGLhA?%> zc~-_RWQ+P1#_csfPzVaz_0B#T)0U)83nINv&gBm#$M8s5LCJM57ddh=&Q=&w=^vvx z`qkGe9oS0S2NxSZ{sGzly>~2rzmR^9-Zj9iKwGe@+Ha=JuN|kSr$Aci5KPwGto{WDcmA)}ykT_2}Co&G8D3Lf%EPX6i8r34o{|I43=wdRuec;%B zadmf5mE{Y`6xNR*q8OY>YNACA7_Aza=4oSS1#@d`)-04&t-Y6Bo3;>&`tbK@-Y4sQ zkf)<(b&}p!9^%@BAKP{S^J)wv>JZ8fTH8jCB8yakqHfpzb1DX!llZe|xRr!3 zGOr5T^TTWUE-x zw=yCh6W0ZqgY1pOWX4v$S0<_eJa8*f;nzhz6S+ykp;5?)CR{Ry<3Bvo@fjtDPgP_% z9Z+o@*~!r*zUvj7?or=DM9A*Y!>1tqEC9j{rkU7Kpm53352Oj}HR9HgSQnDN76Axt z&0n+v6Vm%3A%M3bIj8S_)&VH!8&P@7`!veaZUIRM;cG12at;Alr5(%2F4DGeF9&k; z6QY<|Ov&_ZU-J)WkS$Rp?+n_0xQ*)CX*x8R6uFdcDWW54352dMwWpTMfLdhSFlc%w zXx1uopGProht?!@!C<|7&&K{` zWFKk@*#MhWi?cZDi2{u?oD@iR1qVI0FEvv$LBO53ccX$}zt3wc=^0dXvzVR<8T^^FG!rm7}8dzfG z>bf%A;P7gmdFrxl{@@pd!S=u$;?MTQ-EFk5sYN`XAL3sZfm)5D{ltp$irjDd6uWIV z4xMJx1VOEXeu?76@Xa$1WP8$^wC$l2zhmO^$&Rc?=3I(d&~cBW&hfwKwD=Ur@z60HP~74Kb`GAO%=tB9OaB1 z{@l=iO~)#fPgEC`06q+XpuUkeUFa0JhVwI+jT-4`Pn(oF#TTwb8$5)0GaoBDb+oL;%9g5gI>8rW6E5fwaR7SJDX4yVE zgy%qYb%l*p8T0qXpE}B}2GD+Cxf+cP>*^B9%QJEGpA32@rX8BKbaPy7%0@!=l2H5r zLddAya=5hG3{8>x)-?91BUUAhKaxu{(rR|B0@ia~aLiOH?5hUDv(SZ2Q=4}bmTF3O zOUV_Z=*zC))KMg|G|Ey|(KMQjwJ$9S`Qk$A`5f7g#XvdX`ukC>7NH#L0EBaWXp|G? zOP(x!z&p`*L20zs6DzRTq#qdd0Vk~C>`!>5vGUV~@Yj-UnK$Jm-XVYj0(X@`ANBgF zM6lrpA1tQ$aFZVrdANSMey(LArQ z1$$lbsZ&(B!kbw;-jFgFv_uU`E;&2FTXQ365p+e=3Ms`s&^Diaaegz<9W-@B)9Cme zS0iYWq&_XgJ}ZiZp24z1@2W`VrxV zd}Px<+!bX5nqiO=H>qZhYgzH zX!tsKQ;Ug+z;^fxG<;NUMK5<*AwN8}g z4#wn|@7SNL{;6QB|6r!vGN`#^DVujXQTUweG1Ld2MUuim0tO(yn2BQ z=+r$OwS4i8{({Tffw;pUUV3yP@fmh z5Lzc^>f~H_qM!q{!mkJsQRZ5dpJ65Z1O;CU^DjN9_2Q7TfVkZP7^$~R`F0e~0UfFp zN+%OiND-ZTKXd^$YiX(vxX=S{=}#}D^Dsa7dPdrsXT-D@(-)J?pEd{OPH~UtzITZZ zJ2u#K#_Aa7(}PJX9&cV-JWWRM^%=H}gvDk5#>`vqT!&xx{p{CtpPVML)d1WxDTe*; z>HfbBV*k#({s1oyAK&i zF?bbbOkBtcC)PS=Mg5|h+*4C5jU-8oDUVbgE-J;+-jIJaO2##uGAhXMiXRyWY+`d2A80%R=d7Q_`C-__I}MNB@)S4l7m{Yy0vf|B7LT zCA$x+!1GT#l}yly?)tHD!WYT2x@Q{E4Yh-Mh7YGCl6J}q9h6XwrX3B~cI(?nJn6v- z<|BBQlqw9n*;h0x>qVLq?piTE0nV2n;F}IquZ{`-0NEETtL>FwESLhWYyW0_|C>(w zxBd735fA?Eoq^R{P7Z%i0sopu{hH5$Nu`PwN!qIZVcP|M%?8v72}xBXu(@Q(0Z}wQ znGA|*Y9Yz{kB3n#D;X*uC5exN=LeTq!3gnYCF@(eH#AdFFoH;L6oe>U zOQs$`E23?{8|4J}NC&_BgsQTsA^mm+ z0qEFhMJukkPXV&)?I5a* zUGJ3%mlMvVe{Ymb|MKl-D^SI@0|1h#i6z#&E+L)aRL*!*Cr?$03eq_6Akx;ew6;O z{UR+zWUYtFVchWe0rhqFp1OF+0Zl~Co(B#77!vhKX-K12>H6uuEBgp@GOa?G-r%M_ zRk6-IXf!O9^af~940>S8;(|4F^FxP<{hFQfonl%1g{_y%jGX)@Tg2Az-aTdds_H{# z&%TLQ1VG_(41!g+90Bi5oVgFXo>z0aOkE=eg<(C{!I9x;eu!IwLw))b-r*( z_5;Sz%2VjFv{EFdA{k>edB?d(H|SyVD#qV`^*dm)Pw+gzYy0*8#B4mj3%K8y?LYmF zf6}EL!I_U>OUmDi{jZ`eK}qtDu$N~FSUPppm=GeQPSGA7KLENAT8yi5d9hhE#FsyU z&4=lZKMDFqyk{iHSE}bx%Y2egY)hl>l@prpzLaqtn~kb6^?7`}NBl@2iD1!tZlz6! zTc{^iKBw}1DyYpseWxkUF^<&<-FYe+#NCZ7T`&tUfLDn~ALH)gX!=TF)nC2!d6ERe zvxlkS`};U+cF(lyQ;K4MxvnK7s6^J&xvl^Khr^JqDf|Wt+e6k)VO$ciVqvDSTCyQKhaAy+Pt_h40*5XA3fC?rXS{Y{ z#_I^i-q^qT=pNZqkg7-}l-JDLP=#Mx+-;ds- z*ZG!*Axo_djOn|XS_KgSWS>+Tu2n1C;b@zT_QWd)ArY)JA1dVlC|*8o?I`;ASSR#NA8N%mn{$H7`7w_Rw~@@?c_42> zVF*DvjLJ4YiZu(V14+6si8gABsH8gS+TMNd7Uy9nQ3){Z zc00Pr*YnTJ`OpxfkAl(+d--;JD*n{=%^+Ytw_ah;p9AW}G720nJ|b|sx*bKIae0(S zMoQLF7re*Wqqjo}TKO2-CQNr9c^RMJn@UDx7c>I{Itxdx1fiG!-_plDMfMO^`U>28 zuen}seW>6?Y;?`A_L-s4L4k`)fD44m4? z@du0)LOir(vqEL5{@>3F zR|j(^Vv^$xqQNQMR?R!oS zpgdciQJ@NZ!2YnbjbQB+~PXbd{cRV|Rj_dg|f9qvNPFXBkdVyWIm=*8Dr zT<55gd|*==-WXDB5L37H#tw5Hqp>h!oq+q}q`0M#ub}X<49jV$|9;a{k#EX)7;Hcc zJHn#1cGcW6#uU%^WXvtXhAU~!C{C`Dw;Xml>)wk&A*B`O&V_xSrZa69Bp`Xke7P-~ zz^uYC5kUZ?0_3LX$md4a3?o zr`RH+hWRKOmQt<{tlLdGME?(8Zxv7nmNktg5D0{WJA~ja!QI{6-QC@tgA?4{-QC^Y z-JL*imrHj~XJ-2Q@4WBFwO1{vRW-LC0@2V)20gdr#2`+WI3fz^HMPfsQ+Uz@9IxBu z8N9R6qG!(x%#Wc41`cL&rCYnYH#vitOvsi7D9T2s8p3;(HNT>xauL0y+4M}`n&xlpR*a`aX|LpR?@i{}R--EjKzqW|~3~GYkAn|`SWB($({%eP>bRvhjfXwBT zu^i{G3<=rWQ;7qC04w0_7uIF_N zAe*N%j@p!g!^$}Cry`XS-EmD_nC2`b#7)1+)F6Vjxa-as%bRLWY!T{k9(T|OR8k|G zhx4j*w)q!gO8nP_a2I|!h@jxIxi;rnKJM@iFzjk?NCHUyNXkwS;vFU^pJ@Y_k<(xe zQ9H|~nl0&P{1?47tywXbH^oPMd^y-PKwAJP9)vJkfV)TqQ!yHThKYR14~ogP>>Rj4 z`)oTfAF)QUu=Vjk+GwTlnc}Fa6MAC2JbCJB6zh87ChKwwZ|+~n>eNMh`63|te%`0G zs^g5bH_B^;$2Ycdos%x=Nw!tS^yp7o?EQQ7&RP2AjFQuW4oXrpmo~MVPO9*y8Zo#BG=;hk+ki)B8{cW2La+m6Kb$U;B|GvK1$D zVEfvdf(1~AwXc=Z_|%t7`LKn0?Jg5kNXV%I|HjB;5EJ zxjz&k62BeS1(pGY;B^P<VI_kq^6h^4lZLpWInTXwL? z0+`HL_ZZ_@S8w(xIX8$r4q@Msu8ZjI&ACicrdp^+>8N4;2}5kujRgbgU{Ad?mSMae z#E>HqBmw;N3j$ z3u`dQ^AXe~EdpWyWzlSw<4eXA^se2gNR8u#hxb!oXfdO$z0qZd^R%m6 zW~uw5%vvG*8M)6noe2UFMN@uc)!Kurrt*4*jBmg7TX%K`m;QcPadsf8-GjskG2IJ@ z-hP{p${jDwv;c@6AZk`VEZDf!!dt3XN20PiWIcshnYw0yP{IxFo`5z>zMBFcW~Akb z_$;zpXzo-7FJ;L;b{6(vniRLZ*mv}n2;8;ovk#lh#&z~ha&H7yzdYV>sf1ms!y2ps zl#W1f>D5nN@u_LIt2!*)l@=GI!Cdtc0ttwd?S8_W_#~C7*e|Gv=$`5_+5R-pjo^N- zv%HcrYo+;u3jH!wF@vGbNFp}0rnZ_p=nbVOeozmsBX(BeCp_uN_@yjZt=UYBuqXru zK$*BIf~deqGi8t;X-k>g;9I50_$(zA zrle%&ujMOcR~(gyn!yQE6t=z=ODfJ{4iz#8qMd=K7y<{;LNCnWR~{wGE7Z~_2>p-L zG#0+v$+=J02sS6&xnR8jguw4Zh)ctbe>UFf40&)gaI&?fYbTvEXUHS?b~^)%;9p;lHTx~C7@Eljb4XMT8{cACyP zJ2oV^Bx7MU+%L3NPMu_1X-T{FYrvTZ>1DAm$RagKiZyZS98kXDXtMQiTotKN3pv{L zWk0QLpe74CFPqS>fHZ+{19^)R0MWWj^ign@0|wqjJFN(Ew~MMzB4?C+`QoN{HGDbB zgVnfgzijR7N9eEes8+H*GCGQyU-E6DsKJG9zLNDYe*HEH1>YT=+hkfoZ3Gq(M^DBn zXcWRy(qF3CYU6E@?-2cPiKUKpS)VPV3|vaiHo>qQ(76bvo257Iz5%C;EC}JSw)A@*c;*F1fmzJd%o=XmsKo4;mMqcXxt_e9 zuZT^zC%CQ@Q3ep1@%dWCbef zD_H*b?*!LB9bgQ`vtGFzyxT*$)>J3eocGSD-afR?&JNl&zDmS&mE7ZK3Po~eZ*nM` z)5fF(uEW0?}T>Hxt7FO zxTiHtbqFBqw-UR{d3hho>pp62x`A{ZdHGt|fhSSgI73rjT3U+OUxX56sJ$Cl_pBox zrt^mVs<6)RDbUu<02$!?7?pEZF_HJvboiiG8>G^eXPRGan&vAPw}^oAXQ-ba2pMqe zdakU&mszWBb0nkOoy%ptFbB`muBUL<`Yp2EL^s=f;<~sPGk9C}Kb*j7AAcUq2X7Dj zkn@zjWEnhW3>qY5k?96Y{7JAKwBr{Gd%xq;MaG8o za>UiN<2#ijM`PJsO#1oF*U;Pb0o`+y`GwHaQ`Rrz_0O4>zX9})$NOLt0scQ{TK`m{BT#%@$seEBOU)Z~=vixJ;yS*vd53z2j&!dj@h}btMOrqby&J z54f=me-<5mZ|*P1Kpn08{7ZL5r8IFQlNqXs5`PTIi4-9F0XcPp-NLjw| z7$F7P#4a~7lmed$apNmP*JY(;mpf_fkAk+Cd)Ak4O%LLj3ch;xK91DzF^Ydo{G z%h8y#jwNse04=ZCn7ew$p#?K+-6wq_ako6a125jq=5u@E?kyfp$Oe+C@q;b|A+ah{ zPwB^QbB4N0d6lqRwUV3IL{-lPLj4r*Vn1Ys*ps5j7##Fxxv?Leb^zb>n2~(HLoMtH zz)!NGl-G36oPTP%Rk~Se6~Mh(s{tR^l^*y21pMsd%p1_jZpqJ5STc2<_V(czCX%2m zs+u}SS_4d|ql!5f)*k2Qn#@d>c;`0^aQB8C;`BH5KBQ^+bmFA*AleLAI93~?= zdQOUW4Kc|lIAL6a0n?QL4(V|+NW}XluTiP?`Nk|r(5v?hv#VJ|MuFoEDjexE2_bpo z6Px~_o$*hiv(w;fdAN??CvsQwsXziBF!;s0Z_v)D0mADK22XAfaF46=j+wjvQrlI=gUgP(SUEgIq8~--8!i zP!e4by0fw%$13{<*h)J?G7=zLYD0`TM`aTnrCASh@p_(GZ$%A&`8qXyvGO)|oLUej z@DdUOBj?(w!)Cl>ibOtli7cV4T^Tgk#Lz6T6I{3pH?4)N4xVhlfB5WJvBm1+cH)TV z0+kb-u7)O3y1jvJl6~uS` z-V@XP&n*%E??n37mPppX+FZxwJ)!^{*?o~!|V@-Vu z7lQ!X@|WX=$E`>D-4auU=GzUEH|g_eWJRFKZiv7HDgc#{f8{{RRL6|tOG*)W_R!+q zyKoD`&SUy@vH#J58FOLQlZo;a_8w|si0TxH( z)^hrlC3;8_rVg#vNIzGOwT&2TVT8DZ>G;Oh2b+_yNdYnYEPaSfg$j9_;x7cgDOsoa zOFFhC>YOKtMUsm-%s zdx}G4OR#0f!ur3KrDYOd%+|0JYIT^r2G2;{JY^QsoywndOrLRLh~;qrPPbyr7kduH z)Lne4p3;IZE;&UfXdVDLUvglLkcrYhl3glYQ`k#yZi;yb2HU3Q-(a#nnyu#0bqv`i z=b$57#0`J9xngO50=4VF<}GUtFvZGLZ48H3HIlz$LtnN9Dw4c~&U~l#^1=>%iyfYz zmR}y2-ZJ0lr~JB!QtK#Q0v_H_dZ6@vKvEUL;EI`Z=U|=*Gj}m-mAKD1>HP>hHT?Rg ztfMY9?Ss!d;l=5{2rvH}h5g$G=I>G1AF^!L??x~d*0%4gwRf64-}_OC-d_W;aD^4w zZDpi2VqAZ2?j8YXGDkRh^VlwOQ8NW@eYmffhbaw5tp464a5z~d9?96rf#ojeI^Xs| z_jB_1{YOR@>*b&wiWg5x>uwDd3=SqbJl{Y+Y*+eqYR*4$2yj@^D}0hODVsYgYF?jQ zHYqD6J8AY!LF?i#C{TC`i83jstg|?T*jK^}=rRzWN|sQd2_lH6oBhUEtBK&#H;WIk zv9*j9Q5Qs9UQ%U?>BM@c|vQoAZfi{1DB zP2#rLI+SZm8L{D{s%Dt7N#a|yNt12PdcR;auO%sg_@q3p;P)S%g<7xL3OT5x9>C)3!|hoN@O=X)v4FwxvPW zD{wRe32oJ(U{{2T?niuW)0hZfrV zYdlnzCitt@_Z>1RdJjr z$lv_F*I%+tHIr{{%GiE_u&^0}GvDp3)!9>}?`#npecQ3N9Hp{C-eCj?y6GNgq{vb~hlJ?hkb zyJ91O$AuGnsh=_~`i0?|GcCX8s76=$Fd9RM{Fu(s-3{(LRFT1tQa|Xs>Tf~K^)+Z8 zh#QC-_j&SzffwiiD02rO1k#qox@$KRVT^<4xWIlwui5wvf|oj2(>GBb6SthR^ST0` zyc(gs>zu3eX9B#Rc>w&5O-(=**b>MF0YwBB?*v?YzyF?*Dt|wvVg@dM;3wA)vb!yY zildugpEhyMSy^bXJn#RM2%Tl*gv5C-H4^`e?Eg<5!oPdA-$c9LggPs06Fs?i*}*^E zQ~ydl22oySGum(izoLmu9`$NwEgj>#XDp1Dm*D&rN+6_;86)Ff4jB*N`eS~oh$nC^ zCG#hKALDZW*#1E);+|g-u65_E@yT`qG~?;v=?V6OwKjSdeQ-_Fgi}uterbemls+9N zSFjbIS9Eo4#yy2D#Eo}>UOEBA=Fpa3L?&kgCk}gbvRlS0ITG>1G64W?`N=Dz^5B_u z+tc(rQrEQkyTqWeW_#$l*g%qXg__$GS<(WCi@QcON<>aA7ergr9JAjk&5q{zX7t);R9cqhY!5NYAS>C8l4&p#eQ< zXY-lM^Vbgc;>~_PU=bM{glsvcQnW9cG? zwKvCNWj48!F!KtJk9u`$4=CleA#HVAM|mw=g?RKo=%Rpho+w0Fs5WH$6hlA+hVFgn zmUf-`x6ZZKUmx@m=ybSMbknOVMk5j-eu7$&$zVv&!IM>fY9a-5fhGAcSiGm;m^hJl z;!-dqA1xEF$0tbh@#@cFQ1AIOck-aiLe(-PQreu2c>AI#`Y!gwW2;NQd*3K@td7T=ufOtT z%>9hnV)UIRmfX&oobH}}OlW-&--PGO)_18VIy(kxA12ilW7b=LU86lHt)8S}#$aOh zyDVgW%Z(m&vO(d|W8>)S%2Aqj} zY5N#TH7%7$<04vb|^XItFe*_89w7=9RGM&@fCeb%`$ zxO`4z88F0x64guzOe5Kf*Q?vhrF%&R#7Z;0z*<2bU{-@=;%QR3Ci0R>2~jHHgnGV@ zFAYMa>0EqS>rqHZChZ-4Kh%EF7t~0JP4<5D2*wc^YneWD0HHNxLf&pA?h*2mi4r1O zA49d&j^SC}Ksb?gq{bt953btzWwbSgo*aG3*G_r?WW7oN4v%U@X9hM?uaVrTV+VFz6@pO#B!?WcPeiR!6)McNbqX$aZcy zn3D}QtTea3YlQvGUSuq^pj8}PS&+}!2MmX4wbxjU-i9p82S0SyPeQ?DA74I2M$V7I zxUNrs4$d_i&G_17<0@z!MnW8f04?+QD#>y*kiF~9i+=$MzTqE)#e9RgjLMxeH1qVa zh=aGYMu!w|@QTWO$!2f$A&kE#3SCzCb0hr{e4dlgMwvz!=X+<~ORT=se^c=^X}P(N zO~juInX>`6@x7ZMALMX|TV#YyJdi?VlUx-*G&<;@! zDT@v%ls66YBilOBA8m&mJ?hl27!?|cm>eQ=3LN77P(0Q1U7mQ=IRhB4(Hea8lqaF}8;wRTmkrq}Xye&|G*v%{vdFx)56=a!CX_S9bh1x;Zo&CeDbzB5H2K(#A4`1mbDYv=77lcx{Ql-epsCqGsa-DgBA z8hh^*2k~@vVcF{w`22M2&60Gh<3T&iH^iEW6+sBnd73-6cm(F&7+3;{O@x83GoXOV z1i`ZtIK_p0AIupxhr<0Kvt3OUcSJ&|Q;i9E^2J^$J(?I#R63 zriB1^Gow8T1BMZ5br$K9&&8KKtg$1v=D}HdGIaKGP6=puO&1M-Q-c!06Bx}t8++L$c@k z8|qUMRi{{IkWY2ZMnMG4lUN0DPiT*JJ8!-sxsyAX<#gAdYJjHgN{+X;0sFSA!Z?28 z1bD`G5)D-13Gtg~qS}k!IaDobshV_T%MAD&zHU*6Y0LpjO;ruo=xS!ZIwjwXTTi+r z84sU8)WlO-htY^Lf*GSIR5WqMn8jsD?5Y_U)4{457_tzl8SFFXqva>-!f^D=l~mi! zl*!EossMTAU23j39dOKf#j5N!1%~uqw;CbM(4pTvJce2DM#rgWi28(5}3Dn z{_JQlsL(7xHysru7UFwy0BE;&0W}a2}S8k$H|#y<0SA! z!H%iS#T9$cY>YGT9Qrenw(kz=$NsZ{vdBeXMRc+UxhQ#5!jvK&csE6}2eKowN_pjC zYmv=MlN*0P3UUbPn9FsT0eYVm zR^w!ZXIgAgZP#+;xIdP%WVONN1y%`s0`xh;1(q*fxOY*{+=2_W8-vq^e8y7`RzI@? zcIThCP0OhPa^{x5@s*?7LvkVqPwu>?L*Rdp{O*KJTR8wI32b#5#9|8FPK7aDQ3tO^ z6-)Ub+xRZVDP0*EsMF0K)wR23Ns>LNsh4Plb-u6~#mmxCTMK$8U3faXp^dg9B=m}j zr&*TAoiXB3P{u2D7UM=r>YaGcNr)FQj-%MR6AHM*xqU8&fEK6OWr{(djEyAM#V|rA zQcFjY+TjuM|9(vgyf7SAu_q@hM4}eE3-XQB0FE7hM@n~qftW9>d;P)F(+xIyREtWf zjJ6;;N+&Mw%AbuC{GHjte!_Bj!duQO$tyTb$CY(c)!g3ngAV)csp$DU!^ql=b# zqn+9$b4SyiOenLZEe;7y-}nW|yW+`nWuCFrt-eEC)zr2t{QUixr8V?&>&OF_42g9Tg=J!1Qp0X-ocK<>-jEq+j~_AZS8( zJ7fa$n7}j!%Y4Q8kUk$i`izO>6P5Pnd!$61NSemgpZ6#fI~0pz%+f#Sv}?p%kVCMD z=`0D4QUhU7{b*n68D{3iTLeyFFi9!OC|a+q_3Cf?@`*vhco)etU|xk4AL+PO=&~*> zWp{!w5&n^F=)qL>jMZ{S(l@&y9px|1n4C3zOxSz8N540%|0d4=bIHi^JKldM?*BU# zr~h}*|0O8#uiMTzc?%~*0VL0Si5dxYOkuM+9L8-B6ABH@knaR2CSo*^yo9*Js1r%% z`A5^I;^SwfpI?Y?ln!Nlg(faO-{&x|FG=Jf;W==Pj;}VjoZ0&SR}FRVtKdn2ui znXh|+*bu;?GHC}rDSxr30nl-3l`9afN7n$I*49bG!O|is^zdfn?m?wX9gj4h zHJ!%hkp`!x6>LEp*lre>1X0U@X%)|^y%*1OzZ!F$X+ac47?xf1e73A*WSzE_KE6}4 zkTZ3iM6=weHk^oBNcYTA+C@Itrn0c3pYStt{9=xXLjeG~y?}nbR}LpoDmP2H!1kX} zkM7acCq+1L>5!*qeaTKBH1sOrFPY@LPy#@6O5DDF0IiGo4i=ENCt+K&bpc*y#F-}5 z^ve?Az;t>L-Z~;-nJDS(G{Jffj(;Epn&}>&^O@7PnVY$Q{)5lF+Rwq#FYV<|3V-!gsq`wcJ|*BK`( zXj|E`r%}q`jrz;h>n@6T?jbpIQ0B-Cdixz>X6cKC%-DBl`yurrK^^Q`R-NrC6onku zPTWQTgU=)WYd`r}chlE{3}pMnP%QVWOj(I?oSuWWmTA0OY3XBi=VhuGQ`rWF{IeFN z{Y%G8EQ>;c9LEv4S@X?m7Ih`$Vu8Mi&$bdB#7z-=m+8j&75rQA*C5C;(2N1=GPBc0 z8-Ss1qMrT1r-i0{dNBN2HBqqfEk0e#FYXzx)l_*93n1SFd?7TSxo9ZZwV7YAzoDqk z_6V&7t=FE7?P|P>x?ii_1J@V7Dx_?wh3Sp6+g+cbNHh?8VJJTu4H~8@;?YMLYPXwH z-I1?95-w%Xr?Jg?1=1{kB=^#NoT3{=IRIytdw{+IWv(Spf4ftDz)A?)6(v*ows|E_ z%YAAT!d)YHg0u$xbH@LbAGG#I#Zmcppi^2_N0V7LZD%+t)e?WB)Cr)SfVtm3{1x0e ztNI>kBra5nM#}}r;OF&|u%N;?jSxLa7po!q`O9Lb#q@?q=?NsNArr7AxIL=U9#+jC z=6W-HhI213IM6u8fr{fVVI{uE6AhR5!fg4!3bTJ+Nb&vt-@ozfkAalc{|nIUZC&`x z4RkCWtbgkq{suSMQ^`Ms*Fp16=M~-?h`42B@@u}9%x6%ET2T<5@S^C$jw8p;fFpCd z7BdhjLK(5_VA#!>`%nFBD-1M~{C5MZV{Jy`scvU47bho9A4-(VF1G_v@(q^;`HX)7&&fr0M7 zB3m?vx6^gX6q~-xYL*s9ahi&CDX=@EehTCT6gDRFtqY>CK%smA$Z=#u1NzcHUwvAl zOFSA7O|(ucDfV~#q61sS`EvU?V4o7sVP(iO;+{H7EX_vy== z-eTbk;km(`_jT!l)2q}*bGR7^y3B&bVxw`Y3Qmcd%WCcxe;0&BPggC^8mpP`V|ut4 zxOSFkPI(&BsCYeXbgmxqgU1%<88B*JvlhLCe^V4@1Z*~pO)hQ9R-8*N9l(p{OqG52 z@kaGw;MuNmgqq3l9x$S(AC7361D4RWE-QLizpF#SyBrmDFIgS;T4h5U_*8p{B4X(5 z4H-LXI(3N}9)(0FpOPE%C{m4{_}xq;!+MM0A%mej?}kBF8ws^t=k}CzKU68Oaq2#d zHb6HZ0vCl#Htm^#0`itv6Tas(HcNa;zW{@8l?68MTV*US>QNPBNPyrK)Nr`mtkn1x zod|uSAYOe^tvxLHEd5$;aonz0$HzYkvE0p~OW}85x_%Gde*@D$d;9+fFzML2SnB<) z3MSxaU}-O`W2tZct}OWb+~r@J+BErh$<05)HMY7=wlHWLsxO2Rf?Pm;QFagAETjT2 z*b}$MQ5B`W#sQn)e(epPCW;@i=bfj2ac0Tm0ugS(nwI2tFxqyo`iDK&(?@N<6`(45 zZ>d?CUc76P={wmf-C6)8?wXN$yoAmVR#qk!J|RgaXp=@4;Jm$IK$gWg*-R3Z&>}dK zlK&}Myt^8X_-(3=dyEoelWId*bpe#x-gy1&9OAhgDHWy4g}y08NCfjTyh0{UKit+$ zbJG}gDit_Q;fUrMk_x39pvq-UT8rU)BM3I#T0PwMzJMk~639OlxgDUShImV>brlow z2K`NdiErc>jZcbIZgo>louR3;`1=B*|Cy z)F10b$RS2K#z}F^;MQ!wPqGB5Etbpx#J;!4AP7Tb;CN36H9;tfMX5Cz7+zi1&w`%r zy@=9?J=@e@t@Glp4|Jlur05ENoYf?P&kR_x$Q&|{4-~=(iPV?TaCPZ%0`QOWS!SQ( zk6red1K#`vJA&!FZ|dO*b^&q+t@0>-FkpBVs4`+mce-=B76 z^rK`K_kzKSQsVhcDIlHJ$mnuYz(Vna>h>T+0xzG2PvT8Na9Ha0n>$qZaDme0#_&C=0(;HOcKY zj>}DYGf|5D&fI`F+T!6eg|N6Wv&j4Aw-M#c^o-o*CJer_=hw%OeDiB-p6bki67emf~2YeNkk1EzCs1}6?w80>C{7h0Gw z+QOexgnWaXwl*}7t)P=1MjOxBf8cYJ1ULn5kTF@M*+;J#=MePdzQJPuuov(@zE-3+ zzxMU}*JApgn85iP6aMej?0;O){-pr$@4sd1U`fF6S0f+yFYCLMMjI>uZk9!3LZ}Y` zWg;gIuB-yDkEj471lVP}P5l_-nPr_L_c{JEcu-0&D0Zh1Zhs0peXM9#ypv$fK#kpa z`k$U=l|C(izVdG=!sa8&)Uj;SXcnyQ1fy+#7;z}ChUpgRH}^$W%(($1#b6=a7~usk z8;Qm7>Rj8d{Czm|oSC;<@W17&4v0ACYFHC^G0^+M`o0@-I1Z3eUvpSj9p-#k@C(FC zBlLv^?%i^F_4Fi`v_-y7Te)zdNC3e+ozXh<3g?fBaYQYOdao{cG9pGqIo(soT@%pb z4PRZSHc(}juWi8d@IJ<)eMyn)FH?21x7*lEL!dH)iAlg^q5O1xQVt zHu_w!$*n^gy+aACSz{r4avpxXiiu`xL0mH<19eWhAl^6M$%3ag%_h$Trl z6l6*fi*8x*u#P|Sd7Q0NSx%=-+jClW>6|;oBIF}q5y1D>x$@$p)~P5-Hcw?DinPr8 zU3xGUhB8#w3SrQe;tNHs@-tTGGf7QlwXv7+4=Q5mOLbjw?#vRWB+x;L-&IKKBcWnw zNk0ffvT*vXY=3SgIBe3Jv{L4TF|e8fs6eLm4 z->+O!D4lNCqhWEaSb2mVA@8(UtxC0xU6|rFv>!SAb2Y*PJu?D$kGqrqO6mXXHT-|a z-S1xG|1CW8JDvY&;6d>BYl8ou*Y6Uus$zd?RQ$Jo&3~Lh{#V=5Ca?AfI=q5^$#bZa z{!}qYMqZ0t=$rsTMgl2oMH+p$Qkm90Oe<&PByrt!&-M(-X@V4#eE)|m6y1tz&P^QS z5*^D`8r=a~!!NFOcTnm8Ou+kcpq3nkLyJ1qxtYCUE`JiX38rmQ&Q@f8R)5nrgYJ5? zv7NZhIwFfMLCZjxqL57G2AtPzRSTIFCv-rp0ew*exRBCXKNRXvRvblcUsYx`uq8XB zN|1(j`pnDr##0e`esmd9>~_|HD(Cg}!gbB-NL@60fR`znG&j;v7}!79G#E+Awe#J1 z>cF;58M2OeK@1=Sn#U)zc4fFJP6LS7dlgW6Xtxe;R)(J#$|v^>-m<3J7~eAvT~b84 zWYD6=y^1NZmKFm=om`S@qE36fIT_Bf3dYB7|E9sl6dTrjB5 z5~@-%_=`2yK^9wZw1N)(xz!TQ!|VCN-C0Cm6nZKGWUjF~Rl?8<^xS3^0TaC6AE$Tk zzxJ%CyJ)Eu&#KPdmT4R6G}`3gqr0kQJDX8LFgW$}3HLW%iHPUR&<9BPV`_Gu8+r}Z zj(&D9Zpg`&HU_Vo2xj0bB+)8KMfqH(TGQ1+7A_Fegs-6m7JKf7w<*1%}_UyMID)5>CM{4n!yh8%e^gYIVhtt20bd|Ms&&=; zw?VZ85Cw!$pg2PaJX5LoRFn-+ z!_(^pW|Q?B?cw|FqeazjqwkjboLQ;$5N3AEqX@wA34bOMtc=P6e|Xb#|g7=l;m5z^VhX z2Q-Mp&uF7jAt8Q@olid>k(!&}H7`R$>&%RRLeoXN#}@6O=8M-Y6rE26)!B@mruCJP z<-T5V|3PoPM8Z{K1;s|NTtiuvfDYU?O?VLoj+c{g-QEhE(?*+J=XB*#oUFel8x2X* zZ<`{Q#`q7h&`53%mAJYbCF(KQt{tTFiN9{QQzqj&DD*IZ66V^A z>j8BxYX^mHaITxMs05}J50`QwF$^7&DQ$%V)FeBp*A+X1voaYEioZow*d>#inoY0R zmofNpgibrQbw-6F!&|yw0?=1b*^zqtwNWHvvr!&Dl_>Yk&=k=-+59**79o8GOZe=A zW6-21Smzv@28yiE!yY^_Idc913r&7TG=Z=HJY=4Ji8Tt(jFnS@heIQ2R7jtz_epbZ zp_h}FTuw8OFV#y3vzhtJH|+L2t6y4&{2*iToLO&hsIWA*6+KYf-=x0wBLrG*r{DW- zKQUetiW43h4CndHKhH|(cj!Fczz=hY%4JMxv35h}weH0JO6{&^w?=H(9=)`IR!-&6 zZC|-~z;czd=Q_*bWlJkfQY8G#sgr@kZZ`Qn*iHWDtc&J1{`^(<2sm3?+1jg`{dF7p zADZQVvOs^6KFyTW1d!JF@DeaB17w#)EhtTm`FJsT%0wf1U6f66jD<~$Du%`g)NQre zeo!5wz6EyhPW2C{HovPx*t!@B!|y86VABpIakyo$R6OiVXnBHCL;t{)yV>qUDv76j zmoNMIpb2N;L{_Fql6`JyY@0#{7)`3*Q=MYxBV$r|SBP+t=!Ol%t8COj zS$prEolAN`qYjpIF0f;lD4aMNkXuv&)hr!Vd%S%--^Kg&g@+D4Mi~~M_Yb$EV+X|V ztYj-+;L9$`rx1d#$-VIfw^$=6A;bGjCf0GSrxbVT%id$QA@G~)k$;YiC61biwB1?^AP?7I&D{c5S?SJur1|xE zRSN(jSwx04$L39R#32_$>@g)DC{h{3l#4Uh+w0Y=T+hnelLC05{BHd|f}BW0Dh?6K z^}_oEzTRKP(;DD}Hcq%J*5ek7q=jLgV4IB8*X)C^Pn+2H1P(}zB6LjI+O`YP?7{2F z%x)z&@8GS~YwA%g>W#wYB1aKv^WOuUX$Pn({8X`TvA2|1=IUGn#7$wKXJdtm-U>c= z(UUVMgskc@I|$K)#d3t0rp;t#9+t%GHhQS~HpN7xC|MLMmDP{rFyqvl1uNKf^LNe! zn-*fA^)CVPas^3QNJ4QO@kikM*w)jH=Z!H;8>MtgElMqhbZ)-F%?*Q^pWJ_fMSuJz zHR4U+JkPa|Qb7lapUZR`eUJ+1pWE3E4X6Inh+fuTPNs)Zz1oQ?Y&7XD;#cKLJs+s3 zK6AS(tc*Gpv7q1hY}J?PL&U492m-yf+OwNnUpb;(+BCZ)#t6&D!@y{}sj0w$GOtwq ziJtlT^>7!~FBGYN7n9p+I>;QXZhAJ4K_U^!BW7ke8TfpC@_2{#EcwDoP8Yyg_r_pz zJ*7JQ<~_xWn$X@g%GrNsV?nUchg5c?Wpl+-EPblGZ`8UHNVN~y!ktN_IK#D-PH>8k zJ6RYXD~8rfn_uaxrkrP}!7Fe|itZj%ZJX`yU>+HRT{M$Y3$wG6J_Ds6WsI+NkENzD zdv|fv5J>jo58`mVQQD7N;xlp1a!NP6mcUVe;x97A8vC8SkJs~@^0Aj{8 zZkji$PM+Rr(nq3$wzh=)X;JGBm}Le*cf)+oIxrCbIn4Oy6z<<@v>b!<-4%v^zT$np+VtBR{PX+kod{E1=~Quc{F(wI6az(Qs5tp7~0 z%PJ3+^M3R5IUjlfvYtB*FiHH?X#9D6SARbho2$d)v-cMYTnMMNsqQDnSuK8s5m9Ohhd@cJtaIX5K!-m-AGU%@_t=vWHC`#0~XH!EUy@w!DTxftJ? zU(J4hW3fr(!bo=b)Z2I~0h=|4v%JJ94VT~L;;Q}Cy!i$HsnJfhQd52+-T@jBw@jU7 zDLI44MP!O8Op;WUFX)Txcr-l{M1E1c5lA{h#(mvoi|T~?5_5!s87a*Gz0YRRDyu=W z_s$oY+UQo7bRv46b07^wJ+dG#uQ_p(x~m)(DgBQ*jaZSGGXHF=42I&Vu<}0IToNm! zz2qu1PY?FU6M9->6NKdcsynxrY4sPAbn`62uds6OhX~inQ)`L|(&V^Ne95yiczn6; zKkVr7G_+O3bVX&A{9G`cDNaN*#0-R!_iv@hGLV){=|P?MsfiVmB?!pe$R|Q;%yg{YowC^JCF*l%>^tt8Os^x0j(e~E3=3{U^TY&9gb(i`Ws0rxf>N;4d9 z^}wSJ;!D`)AJ_}jO5IMRovR=a9zMcbN$V_Wkc|(O*$K#_FPuxsbHM zloN%9>jMUXPeF|z%C}wJ`5D|D5NMIqvtJDgoa6!EfFcNYD@7AY9Op}Y7MmQR6Vj@z zBm@QEkJcMvfppEGhfB7CGZG-TMYlaZi-AQvUc=S{U+1RdC?0-8 zHw0I2rELx+%xKNq4)JXRl0X`Sooe{)l@dXFxGh$1R0`*d_JV-Lo#De z8`iVpIS*k9GBw#;Z!JnpT~&ZE9(&1rJ^%OO9ndK2Zjd^HV{WI)j3gaUoNjb)R(MCg zHfZ%`e>Zq@(wQ*VIhRjHeXb0TlK8Qac#L&H&|h>u_bdrpn(FUNH6{xeR>}Md5j!GS zVG39eX*qPxbr5Y&;v)h^29e^%HHpbe2%+NPqFA=nS!7hD0kkcw(ew>7QKsE>$-WvT zZ*&$6!agi98;B0D@>3LM*E*^O2PEkcwbkfixs9ES*$V45&2mR8R1y!27iE2#GFdG) zK7plEVM{DcLo(~txXNF>k?PAvO+*Xh!IuOuGt&1fvaENqa)j+@`pc=!?8OV{YSNcS zP>L^Wb36IWnCFuoGBVCU$nj7IkOr)1D}Tn&p@TM#9izt~tH_5ErO@?@grOfd#F+e) zV!|!QFBRUGO@o;_la;?D?w4#p2OUBdLY+kri;g*ZB2B|MlZjxS&yyxZBKG+*KW+eA zC339$0l}a`tDyR*N?EfC8BaW6uBf*yFWb=1VpH3^%|>BZOQYR960`}hDS>2<+>VR6 z>yzfukz|jhgfBm%P8tYN)tjl(QZJRZ@NURgKv&&7IxFqen&=31=NKsM@Ajz|;SB{@*>^GHbR zuYfz1Esk@~?LroDljq6guW11C{^y?aAmnwh4v|4QQ!D%goZWs9{I4{M0#|}bVOe?{ ziSy7$Rhvs6W=|CGa?1mP8rDI6$SyRD<4+&S9K6_&vx8$39)&Sqs&48*#FxkXf3&@2 zP+jTvubU7ouy9+ryGw9)ch}(V9wfotT>>oJ-Q5Z9?(VLE0Ecwa zA$8X{rji%DGJgn9h~}1Xwq1>RNp{eU=+odl0LaBL@(oP2Fa=FCu}*`h){9Rv9P&cQ znRf4QVqQ5B>(gG8!UmM|@Nh5w)X9omF)v zm0obyD1PoqXDA|@Qdq-`B##s|e4cEwgVsL}r0e`uJlM(IoOIVgX4t{Xy!H*EJ=E^A zVZlne%BW$7NdU&?F?188@emk&jiwz5O8fhDqrn@ZVf6L21zA7^M7vZ3?PckunV(wL z0?fMKc|}8n0Bha5)!nPZ%C3w#pYro#04iPt>C7c>!q0Wvp7S>xW!E_oDiBBp;(9%= z%Ai^raPam~_{Ipkw1KdJz`S|C8TDdorD5uWPn_N=qGh8EzzgK7>XzG(i$G(jpxzi? zL=%sYCd;q&MXBx5>y~5Qq~EAe^kTqG`<_Rs zqMk32Dm@H5iM?`+NbcWW*WAQ-D`=igu}i@x8gP#xmbi_ILKwEK1lIoe@(TUh zYxav-p9I^fFlkP=VT$rtZ*x7l=Y`9iks~0mAM$mu^#S98%CNo*hR;t=fieU89+I^w z&N30C8T}bj78c#q#B|>QS;4~jK(L`Yz-~GJkW{BW{5y-vl^`?hW$%#~)ag>kC3cTR zfsu-5o2oBwb4KFo&d07F?#5)?HXGn(`6advEhO1G>QdP#q=)x$ZiW(&Cb`W)pGiO# zuIIl(H!M6z=F_aq`lOHWCPWG5VyCfGyJbFh2!|Ar21&Y@eojQAy`Y|BTNR>&!JU~X zJuRk5mg-~6)|CcdL|YS{@nv-~Xh*xYYk>o2P(l;LaOT18SXH-!@=Z?c4HTg;%Ek^W zN-ZFfJx`o~La74>^?$px%OzpVOY9X_zv~J|=dDOgU)2#Y<#v+79OOSjelXuF;Sfzi z^w=@kuTqZwYL>sm{yr~Jb09xtKXl_U9LtE%d>Tu(G@GJnvEb&S;WM12C_vWs+qwRV zb)P0l!?<$)fYOPXS1FQrVsjxU>sneLoi*rPL5y#BLAhP$|^=(IKtn@0xr+PbiCI5!5y8rR${(}WI<%Hhye8i};F z(xCfl1M^=*!u4DMC*^@{G!ubNPzR=*z$`OEb_Z2_oC!Qp8aHfrt^G&1ImGSgU_ZCS z>eGCx>W$#LJ25?^t;N8c_=Zat&kus=xG748AyTIpJ(?wE50b)jSUu4RW-%Ub>my&| zz1d6qY1#GrB*^@3ubgDW$>V+WuZo%=n7;T4JhBvPA5$a9ZUaD~N7vWT&e{o2Iy!zg2 zT|Lo;RppdZVwur0Fn=*qp*jc*XWFm%1VFe)i+qkVSty=Py(jlHs$Zz}Cs+ zsX%MyObIG|koClXz);b4HxV7kzpp11jW8`vIaJ*H!L#OfZtTA8rKKS99fI$3Ur-8F z=o|=6FO?Q9h@>Qv{hdL35j-NE8!m_9>*j9VSHeYudJ^XHM~T9Owz%bsxG(KLQH=?! ztMC?2+J+9c45Huj`pH$VTx%Qw=Pt4;gG&<@*V9PP(y~LDZMw#ZjRp=?Wv%;nS-9kc z?8$fooSx|>F3b35>cs>V=7P3sG#%SW>&p2uZcap0=MooAz+tnnp`+G0hw7l57njGC zE_r0zRz?B|%66m-CC$DN7tTV&VajXPtmn&8z%qxVVYSn1UP{p{>;3#n5LJGRkzC#( z-IaIrP-M88%nDK59E#XoPHWeb@o^x3T5PDkAGfX81aU)B4G?Aa`o&I8OE$v0_k_%3wi!nV#7$x zFghqRf>-K0{9}vk7~H44(<(-LwD3qx**9!Sln-E^%6XW}G!{?6;pcyK3af9*J z0yt|~g~Lk%zU8F_?Xxa`?9C#XB@~2iu?vNy?y8emB3lkx*-tL;qyh+Lh_yp5-4?^I zL~4MUx}p5?$*sN9)gBKVU#5lQn9~$b^%DirqkhQI@5p!!*UK#~Rt*SHvUN`-cgtZA zmjs-`L2~Huus)roE&A0J3p-s|#9(HL=i>R1G;YJw?j`d5N))ibeCz7mV1 zgT@@)x??k;cT3uriZ*FhoE%Hbi91>&mR;R~U3A5txF8tQcWgD~FLuQJIKPWLKw|Ad zYH2TIs2HJ%I*_pZ+{)olftwA8;K>QOA)1Vu@dv;@*8UYIlNXAj}4~@(XV}v@$Mj^^6#}3&|W6>ECL-*N2 z(NvR8l@(Rj3xn1viC1ewA&0*#_9D#2&_|$**UF8c_Xr|lRTzR6lSz~5tL)Q0?`fH> z6B?cD;|%MRXP2pGoQUN{_1bJ#eVAD##*Gd%>e_7y9mBZ#>tK&;Wq z)Zn$W#aqxiX3te>#RoT=VyFzq)FF6kLENi#oV&fvqn0=0m;fp+dfcVo*=_v2Sk`XV z$VU&{1=X*hoXNCJ*H49hJ4r2?JAih@Gus&BNplPxTBcO37Ony!Ki|&zPy<-a$gw^0 zNaOpMR_7h4GP1F<)RWlwE{;?8ry9lMQ|<PhXNNQ_lCKhr$uHXgn;Dpw?In*NA{o>+9HBI1$Nrx-6^q`7I7i zB@EN?mgsKfDr;kU{4Kc)!T97+co|%DHze0_Pb`be=9HrVMQO)RzTaI39ei@c0k8{H z7hGlRyviMw$a>pj;tJDMlPGJ~8pIw0UhoPuaiO>_AZoS=(sNpe;C7}w0xeWg2M)^~N0%#mF5i^SK^pq6lS znz*Kv$Pm^Wik5{2!xvPOi1duA+(S6FSUdz6=_ro9uG;79$YxAEq{Rlgvz;f+N#X>1 zjdYi_8Qs0SR#MkXE(&~#-E@l8CcYMR)Q5a&H8Je{Ed*C^l0mf}EtRG9mEPGU(hfI( zAJ4wS&Nn+h)(#}{lis%+AQoSHIH2yF*AKBzAtJ;sr9| zg&sS90-)74%!#v^mnQR~#lm)KAPcmapUXRI7u3kS@5F_hT>AbM$3?VT$GQRMxq;`( z$a-9hX$6y86{XGaUj2Z?0|ImZL=r!%etAcUnP>kmo$5Et4OEZcst)|#9-WZcnO@J? z2-soXPi3LJRy2>;HYzupaKiW=&)GrYB5JlOH_=rGaSc>o%sx!^ROsC}_Ggc;6d&w| zi$wi?up)Y$-G=en(tP=)Dw4^D?zQ>$Sm(citx$7|8g@1;al2!#(iZS_}G&l=}4s?}rI$x$^h4IUTk>PjOUfGQ}hNdcF;~ zPHA|4(qDIxTeqneR%f7tPqx3y(f71KuFh|MkhK_%zyxn#nD>_v{mLTT+{fI+s26__5d3veA+aC;?1&}#g_GiY z8`kiYwAm++zm;Uu*}#55-|b3;u?fYtA%?T^jZgG|C^!>Qcoz9r3Z$a*hZn3(d89>A z&A?1Jx*mGSq>XdO7k#9cfXEjpxUdU7sf%@b<+5HS!pRSAzF)?oe4u#Yoi9EfWt1xx zJ>-%F=Rug|8@NA@CS^ z45tQWGTTMtQ)s;+a`}BWxepKzV2t-PRo)R>hZ@MvG5#7v3K$X#$MO6&{P>tg!Tl0g z=lPes>biM})aG^CT}(9d1_v;zh@z2m2!@kXbr0gS-lfuko4D}`z(C2^@<$|!&u6va zb$(pM#i5CnHc0n65J6utv47lOd^1@?lN+AmJ|+zWsq*v}c_h`ND>G=(t)O|R*%@j+!idRgWa&2K_q*+V5fXfFa?xENj-kND?a|>@w z@{dl%lUvn0TO7D74uM`HlVa&jkYtKd`{svJqf)LDQ`l;W!8bg9!*1tU!`1$3iI?6c z@eOpF%SjhqCGtlsO1-;?6I@W#Nf)b2Tdaxb-}j7kol^09h;)+9#mq+RjA^!e0E2HE zmM3Quai`CmY!vO0iYk(`9mpvg{cT)q942+-r50Obrf%#h$nQ(P)fnB|S3hzqccp=_ zc;9`xF!y~VJ8c7d|G*f*S5UG*uaDmf^E8=CHAy4DVz!JVFVC3Ww^{+Jbo!_xVG<+(|vzsjpm3 zSi`ovFOo?YObbbuQaiwLJ%}z+Gj3&-7HO%61UP7fsacQU8Qwx9-rO8X6goM|2ot?N zj8FALmzEFCr&b*8{`8gt8s$aG;!F>4sUXpy9^0uOHFi@C?E&jKjB#7n%Qxp-_P^-b zvv;Xrx$_mAI(3M+tA zsOm*12tqXtvME01ZskdCZ98%0!A>ulxXvwCO#NP3|A*Ye)?hsz z^T0m+JM_x#=;9_H)38rsFoDySg3GjQ#9;I9@@Z4*3;>;Id}8 z?x=@u$Ho=pjPu5$pO0RU^<9;X!)Yz8RxLvaOWoA?2BZ5;4l2y@?j2|cflrdV7~A6N zKw<6>5=DTYJKIFJ;SZIly|2u+vFfmqti~;Dua(@lY9|VblSZ{Enl1St{~~4nsf%VrcMTTC)};%+;WOFbaV%xEMZvK zOlG=`yDytV{qmVONCZr2M&?vB6rB#+3~M^5Wd7P}Gxt4-f>AOR&O8h=Fs_50^#dh- zOJp`LubXvPqy$G~3FSy?vq_GHo6nT2R{Z&#*Qvc$1q4;;s3*=Y>K+_ZzE7r^fy1x_CiCGSmQ<+OM|##y*w`Tc~98$q4OW4Zo_lE*^>{Oa|!mk!ek)kj1d_BkpbrJ6y^-*dj_ z8e9KPW2KjNDer(4bN315cf<8qUL!f~tx@&uf0TOs-*ruFe_Hu}=$eM96tlpt;zO;G zLgi+%Lx{*`d?l$v@C%0J(d(Hnq&eOs+XqZrsyda>!PwD$EB>%*I(t75gny4}@%E3zf*E)29U>fsjiI*y2iV zVMdG2fsJG%fSPl`P z?`tF+;8qIn-+=Uk>KXWhfyCk{t9VE< zcH_nBkR^?mq#RRQW(%e|eo=9zcGK!<=twm(6R{L3@>*~)Hnqy(b4b2yXXhsr_ukm# z9`2qI4fiOEjB|(KuUO(zIT+b^Ky~pcvlyX~^(vmgqIxjdQE+Dt6;4BBJM?sWw%fMFtWBOnz`!B)PityptA_j@88=j3yO6JvwL7TwuoW2q$N zigA!f27z^vs%(N?;;9hsd?)gt+=PMKH+aCYecp2H6pF|?`ywY2u_ZTmgaDsTNH5e) zE5f~3b3u$(I57Ec2p{xHYi%aZqK#iI`{n-GM^ldSq*)FcBr>&*c@CP<~>Jb{; zxo-iB#5*l08T3=3lMqc&M5nPvGPP8fe}HGK2?OC%`i$j!(j@?f}7>J)c|5>N#z+VjO%hqm%dI|YDOXG6N6xVLE0 z$seq>90m#C|B~?*%nspAeaH!SY>5#32A)*)n{S3=RlejV*JSk!!@K0rd} z67`Hh8kc_0o;e)sOjN4M>C+c7m05nqpjh}H!d$vP7+hb9Bdx$981ljGGFr5`4P^AP~|nn82O$3wGQywbx&idP zHFo$wXmSbjJs5Zl?4$K)evcDMj!_f)Rb5lN?^dIK#p_L=fW)dgcN<^F{N!ix0vnfH zT7Pts9Of3zr#OleeX^2+t`<6z=*ewLZOU>S6#6c}Trw?AYDhP>PXr5E2J_2;ZUI=J zcnS$+;b0@o36CoH^PGYoRU%v_1R+~}gS7ZiXHhhyJiEaBr2GjA;+BqNeN360-xwr> z#HvCoz=2|!TdkF{P@-H9wv4%urtAV$EOn>IQ`O%-*O`MBP1~hiJyg_!PJaJ~;y?V6 zo6GVIhNSsF>P3wH{*aEw4lc$H^iujJmim8fX#VRX{)g`JPr3h}uaf+qUv)6nHSh)nGr;R`8*)VQ+zwqIm8$?_g^ zfjDq}nKMad3>RrZjI^GH&PyXI7fq;)b%qlQ3tCqTL;nYMHc`BaboMwu{U0L!g9_MP zE0^q2q8{}ooy-PpkK1n*y%RlJwC6lMcC2b^TU;x5jaCb2XpPkZXN&3;rw6kXiJ8*Sjetl4_0VIb+{2%6%%~Ch z(L<$AK1IJ0k5Na^5#mdz(WE zo}B%p9}I!>EaU(ndx@57%wF*?){#z9fZ6>_X4@eu(?rF4N^KMMu|zo>yTeAzgVPj? zIC!F>mHf(cRiU>L<}y*HM25`X1Qru#F%*kwILx(l?TZf9P|$!SE`g~~) z_IN0h$GbT)@Ngfkn-RI54+uXwU<0%*4cM7DAz)l(GD?^r>_Is&rq6#gP)8Z3aBD{4 zmQc;S+qq}BW{W@QIpR+$l4Ve-Y2?<;CaFz$f~Te?<5E~5+++C;0a4!>uN3YXHWTNoAP6a zm-{aYJZ-inVB(NI=p3eQ%=fF$tM{vE9j`yH>EAg&_;uk=*NFNy6jP2>7B{pFPCzwI z76B72ikj#0Zx5G8{9_e?xx~0oQDRzxfKQX}^KAKJm;*CL$QlVfh61pFlGrGeDoSO~ zfPhVLHX((Y3=*ldy=+{xb%)Ig6m#>5f#Q#GmL;P4)qz#Qx}8KKZKWihwWb{X=N0>uM9^hGF{hR zie4O78KS@6ioi5Q8(P>OW#NxGH|kItzdj1UHsSIjpV#tQ@YM|D7o0pf(1PO`Mf~Wo zQ=DnJB9G#5jjF0hWZ04!KjZE`XH4O@!VoZ7$k|DHlKT{|$^@9Kw9NWcve@d3cY9>c z82h#I{YgKM@j`$H=G+)^gMS;q@+$d&AzXs3=n*zZ*`bC!{d|_N{LSSgjXza(03sS~ z*PLsGx}UvSLZ};+!PGmmc!_gKkWcI zfL(a8g08sMsN7}js2_fQ>79~xkVpKnW@m?zy1)pKoroQJ!Q;*UqrS(Ri)Q%Bv-NLvIZ+Jls7%y z283Nxm*nWbS+lDBlPlASIeGuh3#t-i%p(sGxbM zol5?y{bbA4#nOx-N|f;FWBG2#gLq^YhSJlgz9Fm~t6is2WZnC1xcH9;6jzfccj&n4 zBPnDbtg+sEI!ZFi*vLA?VloJKdyp_Dh;%<0*>SG7YIVwf#2?i%OY)c7qpcO|Hf)W+p+JWcY=~+&a<=$QoxkcOc|;Fe zXy>OVL!=>l3>feaC4?sUlXOuz)uyOnfmH_JC8CusmtP(m5;BndkHRbl6jp0U)mcJ+ zFw{qU9su5@)V>A%#!iQI@`B!Ok{ zWzGQ>(!));G?S}yhK>LsVG(KCd_ZZ9P6;^Foo}lxjK!3MEP)!cME?CTwZO1n>h7`~sFL5SA#g`?xSHrM_5AAT9(S5n-*M$CxUZ}6%_=ea$ zhW%v9x>k)$neIurSpD=Nv1*_DLbNL#C{vd0W>FTKF}eBt z1I34c=G@Jv!m~z7T&FKV!JEP}ZSKvFhY;Fo z6lH-`4q{Uya!^k_`Xmu8-+QY4XdK*k6gGyYR>?|T;QO!@5d5)b5&A*i)Q>(^h>5*c z=&i)1YQ+Tvp}2a37?-v5J$@uRon4D0t|65UCl${xgs$^;cJO^*A5@_izjM5rji&-JKdex*8&dW!iNV5(Vl1)%;(5EKSK7frc=OjomFaw6t$8kY1@P)Q>? zH(M1$sDI3NfH;IZ8gUOU_q|9RQ{(Y$G7&YCihd%dLUcS2^Y}fe+3pO-21%0qi_G57 zpkN&ohda?0omEG>H=4d|8jAlYzi^CEz>rw8%chPWUcWqqrnC(_X1 z!=lLp&8Xg=`;z}7D7QGmG z#k~pS^B@Met)hes3Lkud15L9JO|atmSb(sLXs+1%Co~-Mk`dUB1nIHp1Ix|J63%Qb zcA?h`pr20Y2u*lyjxjrT2DvhaE z+D#n50C+ltSt73eC9_h?4MD-g8ru9Jl~|SoBCtV|#zX+*o~?8ziVvjzVN#33Vy*$$98h=TLQPs)RH-2S#8AwJX zUxuLq0P;;=vmBVALnSq~SeAN|{R7KjFI6TuXmRB*@y0?vt029Hx{FBZ2lPaIrS;Ko zd{8^37}a!phow?Nl{XQGckdep^QMF3NcTpX0UAc zG>6?E^3wuR>?>&jslIkv$2kqt%}YGG#6{xRLAmB+Uo{4OT#^%ErM;0Ac|3%UeM;^V zdP7L3g$O*r|30ylDQ|Fux5VG{zh;a7*?ju{J^5$;GwI1wTDEyRsyuPI5-0!=2!kR? z(SqP3w36Efp$e&TFhwqb_#m;twiQal8Rx=r_LT~O4Kjk~JNy8kt*`t{);9|&u98LO zHyFjpTk?O(^-X1k05w$rr@8vJE$^V*b4Lx>ANN zdG2rmI$;fx6Z?S^iICg_ZQvK5FHJwc@kf2ExQo?>$H)6Iftj>W9{@8WTWa;84ocqR zjee0`Lz*M*WUOTIWCK5qUj1V?M$Csg_71mO+fWS$dH|#N*y_G}qs;87S~MB?x&5oS zI)&PcF9j<%X=eYEHKq+Y9W1&mS(7Pr4Y9)bQp?orC)fgc8ivn_(K^^iiqvdm)#VXo z1e1<~(5L=YQ)}%`XAQ)-FlzILyKfkH7kE?J5n`oI4w~v;&HEz$^Klj2i4!!kbMOYV zLfu1r6U6aY?@`P@X29f|?ChYkKq)DE7HababI-2EHe)zJ52HBlRSK#j6CRA1`g8yw%p;QrD=*cU!(S4zUH44cb30>jkA-vl_R~%@85rSO5ggY zhL*qmg}H;Vk${7#ZfY8+I|I&eWj35jXsBp`0?kI|rIf6j!91;<}9c;vdk znG#rlRe-+3cvIbea!M8hsRC#NT;Glw7Um4z^}hUiIR)eTw5c%$b%;v#=pdk2Kq$jk zL{_>rfT<_A4qMcG>l1NiS{+3m!SPjPcUHg^4^m^zDSKK0IOnEt%-5(*?Wzw%TQ(x7 z)3)yK^@dZmxYz9JJ z=tpw~K42QB(V>yoZZbU-bPOYSaxZAusD2cm9CH3(;IVZtcJ z)UCcrZTT(tApHsZ@!ru!UC zepYI{s@ZCmp^T}J-n9WJ_tqeU!Rue;NmGiYq0cb+*Qf`EOX9BF4&5{Mx2R{~*xl_t zI=Q6l>VzS6hZD5KhLWyK>E~Nb=7yq=8Z~XA8@U?N3zw?qihWCSbgt7w!cXWQmtqwA zKH(NHL>ELVQ%?5G^=U+Zk+IQ*JHI;R{3+2_kSpX>)=YS2g@Nd7ie`yLGXsq0GFH$D z5-xWtEO$QST6a*m3fMOsvnDk^X*^Vlnhah z0yH~D?YXjNwxHYyY#%rFoZf55OS9RMf^5SA=gzqQPO>S?=N4SQOL5zd!J~Q!$Dxs9 zgxM~FAlmDe%fUlo6Qpe}IGIW;jlfex)RgA;aBHzNe()smf{(cBM>H<M1%j?82^|LPvnpkHC}AI@<^pBY9%Y1WPSpP+V{pSUs^7jSse<`~OI@nqoJN&_(ag;W9bTYR2Pj<%J zrWs?W-zy)C|CIyfy@APQ-jM$vV2zW8q=XTDiqR59C^t|r1TsE0wpL4K_aoG-zK6+M ztyRi-p^AV}Kz=scieg$jr1O;o81OkQr8u6aJMN7=-#t&8zUxUg65oBMsO(GMh6x-i zKqvtFss&EX9UbZ9X}hL%4<(e=k`A&Nalsvr*#^uWzI+Pot<}pd%$Em;cbykTRP5dU zi3wTXS=l-|-_WE2^UJ^}tdoxJp-}+ITm^&FVUZP@^FbbX+o)veRsoS?F569bU zMI<0Bfzn-yx6?|Ah{738F8-f1pp84NIWXL-vIyaND8ng?1Z_A8dyWoMaK9cOz}N10 z8dUWk#>R~%aF!1yr3r4Anzne0^U}dI9V&6;!}mCc6Ln>cd&GZT+SR8B&T&T$#p*vG ziQJa(A1dG3uHSA|^=FZm=aY)X03gT*z|T?^4%8K9=Q9de&fbu1U0Q)170HmTgI=tE zt(L+!R`UvA0wZ{y%)G0E<`S=44n|tV=IP2kg;RaJT_;}y2>|;CtNepq4ik$+oaWkj zDbl1G7!gz&GA0=F@In@|Bmxrfe*5Nl(f=!C z{AZFU^7py>yX4^b)~|kR_Ru@pThYro=o?xY|5;T~`t$$)@XA)Uj>aVHf5wCfe<7ao zQ4fR3}iyJ#i92I_fS*x`@xDFMvxh1(e&-UVLNj8f#@i z#zWq>Lc{T!ddcJsGyV&$lVT6J1%cU^uU>1_HBocqFpXZayqs|X+wSD90PrAd;1Ifw zH`j{Xm1Abfox%o8x;KgN?(=MFuB;)Jn@X5^9U^_uGD%+CU2a)n$lHeS=>F6y$Z_M(Ya^M-fOv45b!m5X{U%YPOrLbB21rr%4mx-2 z*^`}AUtK}U&wLWAR(05wYBvu{FD|FG6a@M272S}E8hUik(2GF0&U}7+zIOE%(t-0r)%fc66b;m ze=>&`mSQ(z3L*O$og~pj>DP~Gd?$Lp8F>=LL}H=(t`H3aLoQ;NT#W3@ ztwOIgbMW*+ruEV4si+nwd#&4ta)?(!yQdKT!z6n@!`Q^vYQmbd3_1JfzD~x3+vLDy z`xS-6h=3s(3oSckFX2@3Zu&vmz|BYbH{``+Hy)#t8VAT~KW{sjffg;>5C#EwLmJz3 z&3ex*z$~G>f!-};*;YQWLD&Yt2qr-+1kQ7`Hu)4SA7AiEvKjJGX-pm~;4#&p>fVe4 zaDN&qqVi0z$U3o<`14yYxf8QdP^6s*=9D1_#~fm91C!%c9-Ebd()rDz)vTGbkb|9B-y+_{B~YXwWsS-Z-?mffAx(1 zS-0c)-w)C6g4};1Uxj{0ux|v4zdEU+whq?%PEN)Se;po`Wd{^7L>@!pvRW&^+cxwr zGO|$8;ZDfPp1w%GvfgiqYSi%2;MJOPJAEa%#`NIr!D@;LKgx5dDwHqbKn|368{`yU zOy+%Jci$l_r5qk|cr`zb91ISxzr3>pu==%A5UuaQMh;^_4t2x&5APy=YWm7jNjq=w z!#{g>Aq+GgU9whprjc*y4i^WAoKH3pUo3|{@U*q}4AIxppqVxho4tJ4E!5ce9xax7 zC{j5NQW>sAajGHKd_hJ4E9t`UU$5vhzs4M?ui!|j^k1e&>+4nH21Huw-im}h zfqFjd#wvaXSoMqOw2Me6Go$goFiJ!7o!2M`g=^>T7 z0TH8H?J0&V6JPqg!$nx}5zP|02{*9kXqdHLFf5XI*8=dzpz;n z?E~|J^Z<(qW;M2!DHHoJ1FG6oRO^JfgQM=se%I}fl1afH?xy*^=hQ-&imu8Mb$2EB zDRHzHFufxRL!Y}AhFQCeYAXr9W6VPFHtkN4YSLEBJF6J>5Po5OY9bJV6D=6W0=0c& zX?JCf5;+#S^Gtl|mEvdj4^o#rFR71pKt73ni1BT24PM0bCDZo(=4kA)3T+%jE_WY+ScE1xxUn)KVYNnq(IW-sp0lx1PQUimw+<3ES9W z6I(u1)&>ei>Bl>`YjXG69lD0EYWFnOHrSJxP8CFK*HVH!P;UrGA%4lMK0-aaiJp&k zB=_pE7u_kpODBMxv4Y7gwvyd+5+u&r68G;G#?BnaKMQ6T%bavh5Bt!d;DxKMwlRKq z0gG0eTde&+ul_#CPGeW66HS}+9MyRbt6XVKnpgj$&tz_^M(9s8=k5MIo10>V#Jhao zc9D*Biwo-&iQ!lO){jpLG}E|NZ>`KZ<^TGd)@V?L+?y zllU*l=UZLnH{=uJY}bGw?u9b)IxX&-9w_Dhob)w4PDofks&-B{Au3V?YG^9y$H~>K${>;p z4%jp8PI6{TOtzF>ArgBXKtD0-5K1pHup2+M_rnHeyajuGS2Xc%p(RRYX~#n~G;Frs z0%bqp_NDh$kbaT!sx9I&ReW7^>nu>`nRcM~$9l zEpre0&GYR3k8+=X1J*3`UG(+s%<2ENmr2mw$ym_Y#KhR)Kg2tKc^d^8xn+JtFBHLW zT(;;bR>BEIP2x!c3#7=9FVOhNDuVFl9_z_6Ms{;Crv#UoOG27O;$<`Vol(V{Yq>Z? z?4K=7KiRIgv(-30-OuB{ds-kX`4;%^CRXe$x-G;_l21zE53<94TJ3j4r7>wsuE@?Wc@S5Z&~p3$g0v}cC2Z^E z@9XBgDiWI8f!9SwZ<)TYlS_!5)F;?d@AqFK#v4GQQXXsfp%qk!8lKI5m07CCOBlM@|>2@OS#V5-1QfB8s)aDE`6 zGV?=%)6x&dlipYSclRK&U9a6XqQiQY?~Zkp0}2kq8f@R4)pW#^MvS&3!G_93H|Kve z=g}IE;*M%4xwx4$k5kiH(U)sGNrUE@Zib8r;*gZHdHp$A7-mA<;QE&05?${;&BA4e zC}y<{Q;7zGDCzXTNl=!NYByAYqrptIdWVd`%K*d64h@akm)&gTV#QXlQ~laact);s zliBRh9XbiYaF8AyEcMg`njm>ICZ}W}_$0O{K0@xWP;tHaQev$QuKFF+_|}S;ic||2 zj_Co)s1$ooR_^oZ_o!J^>puUgOLersB1F_6lT#*X zZB^(dZv0;5Zk;oe_`sez9zr2QZu|a1SILDZx6UdAdgTs^`ynQDQLrfU>Xio0I*GC` zge`z7#W=wortGQsXXLU@dHQAHPkHQ|1!_29qgMr&R_h7OelkfGSBVvS*4wa?Z45&9 z-Nmj#v+e8ELV-;B!;0O03#^99nWBWw3>dclG?2 zOq>K_5vp=Fs3>bsD)0z?aiN7tgcNCGx8`wZIk=v+=>h z%4Dm{#h+n2!-}#9k>*GP`v#A{0`EpCm(zvrl)Qfl1AQ4Fx^N8J7&LQ3V1g{!8UBpQ z-*7kT{Cv8B>O#DrGf;UawdspJ1q-@h+IQO)JhF*qa2RLHpwMKRoRXs~XCOp*hvL5v z!nbK|r1qP--&c!ls{(<1)Vdl+X@fUKLE0^n-I_^EjiF==EO}k6sNECNEhSC>bz(i2 zc^AGqx4cx^RzufBmKQbt#rGSp{I0l%Qow7UZF3tbdeq2J38=zCIkXL)?s-Hx&!rcE&H&G@9SSZl##nSbd5!9hAi3W*O=qaQNuD#T z0Uf{SQ~{EJ81EGYD)l3#isikFx$m4h4PQo3d2Ih*W9 zkgwkPq+Y*M7)uny&oq&k?uJJGSHn?9HTJZ-gmp;3!ZgeIotccZBmU&OoHu4eh{UPF zVy%x>1lt*8Hzr(vv7r5(5x4*auCmE_v(pw; zl6#8XsKpo&Bh91{N`gf8ZK}pi$&klA`&>L4trsG}UeXZoQEcs~V0t%1`@U^q2aV#; z@hg02xN|+A5xu*`1aV2W5wP}(p}Uj9!dug(xP(J6^56|7EM3!%uh<&9!YFh`=hekz z5lTxAP6_Q~{X7^+M%B!D2DtG0h=5Qek>v zQwVci>(*2V0xZ905Z$K}dII}nsNRmRYJK}U)I$H`QvKiW9LGQJ+#h3&l%s`%>3=H+ z{z)yjH+3@`em?aROIJJEJ`TkoD0jxttQ7u#sV@dQ;>D_d|+
      ajm_c@{PXDg)o1MZ4&AfGij-Qfd2A9RpNHNOuxbFyEZ7EY4M3Rcj}V}6@gFYSxPDPf zhdTap#Tjg;Y6?VixnOge*Uuf-1+S_()B2NfrE03Ug~)g%9jaF6cjA^Sf|-3KZ%N%4FO~$~PkNu_{{xCk10k%F8R?6f1v~1Go{3D4+Pd1t4#P%vb=2 zSg6ktXDN*-nwUk3YJmlOzHvC4!vbSy}^Xs(2)C3 z!$%|TR1&_ZQTxQO^@^RYlG>J3b{-?txRNng1RNq-u>~}eD?($tA}iE6oAtbUXzHI# zc}?Cn_S%80uy?z!uyZwS|LU%`cQNx_{AqOrS!w%eZR2X{z4Fl9IiGra9i4nzLDcB` z+tbwhC&Tsgr}cWu>-6Wr2Xyh<+q`Ep<8k}I^dAr0(Mhjoh-&d2>I)ghNbPw6YvQ<% z$0YAp0#dGb%ab=Ozlpr@g>4$$V|mjvtM3-g&VrKPensEHPnR(r>tOCN-)7%6PY=_t zXO662aWDKf3*R z6Ms*#?aGG%L=x|Hvwr2h`&kb4BpQ!@Yo-2jPag|S#QMf>d^q8u6K$^h{T=!(Yh#O{ zf8aOy4gbR>2*YLZ*e@iE%9h`LFMiu&-|=&fg+{P`eWx%<>m*fLRW|X|8r}&$t$>gL zN4z^expo4HULk?YPuwL3;q?2=AGnB?Rkijk;ySds$3jO>0#D%`a^?h#fBNhLs6%-r zR$+YregmvBcifaVRtoB^tq9p7)zTHpey$q#eP^FSNjxq+_QY z#d2Hh6WCwFBtPA+d>&#S+E)7M4J8Zq0<_cFq+_LPO`!kC8dqq-+QuB&e~&;dH)&pv zPm3{JP^Mo|AEhrp_ksFDf&!44jZ$kxN_(FCqM|xdv7-%YDI^5H`Mu^15-Pa8iJ87J zo5mtDv_TRg?tDYuW_{)&ldJ)Qzdwm>rh_BW|FT%D=9VT2@~@K?cELn~|S(Q%}=CD&NK* z-!yR=U=e`-x|GImnYDjb3YTw{!TMZgwI|;o)%tu2@T(R2M+PN-0MvwK25x5Z`YwMb z4SN)DCt%?3M?wKW?a|?U?qWc9bCL)9w+`g0tTB{R;k!y<-8$&Q%iKVY|J%<0d$%y? zmjSp-RW8V&rzGAkY32I{_TP<^;{9~5mmz;i~(fO zvNVXuBP4fG%&+`HYS|@5+}|=UqLi|TJs*YXM5PNVX?Z76mPTB%&eKPjMxhXME~`Xz zXw0IR)BY$sXiWg<{xte%O^RhR!6JRc69silV@!mB`yiR1ly+$3ksU19jW%eg`y^}Z zllsUT%PWuZiyX~ITS~e~sQ21Wi^*W@*>3$gsIHm~@of&$SAYW0R0T+iw&G97gv5n_ zQ}}~Z01mi=Zv+#&^ao_Ta;6xQ<@zPf%DHiGM^fz7UP}5*h?K}t1egjWeq4rSD0P5( z*X@;F3N&Yz=w&!4u$qp?{u-2@q9gelG2_1h8`Lz0TrqO3VMpE`S%V5uk+H!GGe#jg8?mH$d1{V4)~VYS{>+2ZD{v%$$#BfWEjZT%MzQVs9N5 z)x7!!{s-%%Zjfeqi~#^}!2SPZo&Kjq^A8C1ANErU_VzaaIy#=#h4jG|U(REs8qaBj8iY&8{cpc&_KYAI33PZla@)(LTHR)8uI z(3Z+#7Xk3V{kE*PI7c9FB14wsv@2k&b&_!)DVjTS#KIj@h4zD z`4t_=ni(-G^BBPwQ2Wrxv>GQJ2fhq?EOKVNJwVo<8aWo%#9=Ip!uivfH)6#%TIif!7~(HMQY-=qUzI$DgvI(Os0+1G2aa=f zFnNHiER#-NM5S?TZEJWS>lw=?8>)=udM>6ZDFX(Wshnjn4fD-_RGT218Kgmhr!U}Q zrU`R#&}0Jzn(z6*Lw>3z*6N5ud$CvIei~^~1qsx_YkuVL5o0Fpx;?voK}H-1SipOT z;+4L2>W}lf={QgY`!TOkmoU|&GC)sFL4&k_jE;upJ%GL&c+e9GHp|B1_RKvgNBB_fguDh`snysCss8QU~a|jH9 zDM;y>)0R=FwiH-!64uj0LnY)ApXMN0`KGb@ViuWyg)lHh)l)zQSi6{T@fhS*T}rTI zhrc+A(6hCg^5Be=X3;#PGxki$H82;Gdu|)S?ev^_hi!mRXQ6n!;C9XDJNH(A+kskbdxFwHy=t6<2R2Fr8Wb)2n3$Pou zw-h_u*)H~dhtk0S8%k*UG}LFQH0Yy8ph>6L7t9>I?o@1;O+;cKK>&Fp})!4)$q$E>2qO>!Ws>7Tjb`#Vm?H@cjwtJmWH`v)|zKu0pm< zicfJ<7H|h>z$8~A9PkoRI~V^AQ?C{-e`P}3$P*z zp?QonXKD1SrPQ17t%lafP`|B0N59p`5Q2~yd{dN3zqo%(#z~c#bC!vwjg~8~6`>J( z3ArRbu_V9L4D}Xz+*=CcL@WrSseug*B6%lRO-9P)7*zBWo_QVtCZFVnMm~e2Yb1Dz z!ik3vq>>Ufzt0A(Qs?mNagZP6 z{Z>{qX~Mb*7B9tM^iAf?w-3$+)T%%?AolitDx61gzn5TX>}e2h#e0SXN%6{fs^;5O zvdMh0fS9%|+A&VGRV=69KaCL&Q#LwZS+C$oGQee4P>m@LCLT=cvgn$pa!vbO8+Ph< z0IL|3X9vCjs#PLNd_%^|QImcbNl~Cdj#{WQJ!+cGTKas$R8K_PFYx(+wyGPXxai(p zF)V!E&^@s~!6%A+KG5Uv5`J5{?mdYIc#1NUp5;8M)$BiP2>EIDr>^?}%*pi!gwR4T z_sm#^)&|4T1<4}+hP!MZ;IbDKG5p};L{W$uiip?hVpwmU4`!3lFJm%vF~H1L5Q3Ua zr=V)^l>!(YKG;W?Uu8+bWfR=kv(9W@EdMq!<1A*nIwv8aD%xn}Y;(+_PjSqeZ*MYS z!*qwM4LsBULcF8=9vkZJJw9vnbe=f=>^-*WVwVs4Ba6*)f!nmQ(3rG09Pr1&HjN^T zboiH(y#)g+^)%F-6|#Z*H+;C!NDmFWak5)E5L$$E2WHJkLtQ`2Ke($;By z90<_;FFx1J;*H2manth9qv9bbozk8J1ESI|;7ck;uM9?~Qk5U3J}@{<0|L{^khDTS z4y% zk8v@;6>HEGr9h@(@f+Mr2So`w@H5cs{k-DoDcQ>lw^@O|1Y<9F=leL_Mg_lHpZjMQMCKu~bEB4i{j4I~^ z^~Aj`@Vf&6Mwx+)I3chUhf=2jI;JywPKENobMOJvgT>CZVABPT|CRjRa5^QPLZIX> z<#cqrt7vIl4a~a%|Jf>dw0LL$ErS8z<#?c&2H4E^42hGN1XIn%y5btfL^&HyD6z?S zQ9mCC?r5n~8cg&OZM|M`PPY|*mYc;KOI*&{*&H1&PI@FNtfMIh*ywnzI$&3k?Q^ou zqn+b1X9Qsj-JHMZ3wCTNbxrnQPGp%!0jjXR6ru?R2>&A_7mQ^*+>y@IEN6J^qg9yh zH*{`(dS}fhfAq{(ap{W1_V$cG!8fkGIG0t}9ziiBW-b~jEb9k7#aUs}+&FG^DUTKX zI4*U#P_zuPIy^LZAXD~<9^SR6dgMLC$h7N7&XWbc|A{HOruN11_qVNxPi&vyg%F zlJSB|vXMK54_EUvel)eqUH#IeOQ)6r~0Wm^YYL%(^fU8Mjg>Nrs7;`o5jVc_35Dv2}byPJ@*zexv}yl zSYgqXW}->WLfgOv#OGuGjF}N%oOMl1s5;DA>L67xGWL)IcXz~tC$nft+OEyd3ul?o zHY=NLuxb{(+~$~em0>q0GMbI!w)~^wE53H*Z9$~&q|kg%(iti3#5F6GSItZEM>HQMFt%UW1$(G)f35$z>zm&J5VRDR2 zgNYj$)&0xi1C&1N!aXoi3LuwPp4VI!L^?S&E8+@nbgQ$;uA(Aw{)TCggH}1(;W_aa z#AgOHFJyKRWpaI6W?g}!SbuG@;kF%s@>?%N+NL_0K*7s!O)tBv7h*Mk_vB}XOd zq77fPUwF?RzzxL9B+2(MT+05a$(o_~Eoktkfm`3}$V_e2Hl*&=X+3|&zyU(fI)uBZ z%IjLtpVQ<2QA=YTF2?&_dU!#vzZP<222t-T?keDDHJw>ki)>CxGkpKE^MDUVR$)y9VIP(CMn-@F=9wGk;uz zA{|{4B5f@7Au^OZj4|S2M10lAXY3p>u5-*h2EnWti!jcpE&^#&^SUHu|0lIHix>&^ z03&YFr}now-c@SF{2|Fg&pcsYCKv?jc23!#nFW)6|D;5ss7eC%P;V|d$%vutPx|tG zt7I(k1yDOnX?DUZQp~Lqj{OUJN}M)K8w{<+QKXO%!G&4nzFuN0mWGd&c+NtZAfH&% z9)~4A^*xqsycX+jA$7%7#Gc{Rl2t1p8JwXK_M;)*CN!5y0w~FSiVrGeW9D`KJLUfU zAqjBn$en1ycyXsA+=4PNNSfYR7!D{IuzAIwo+E3?6xaUq8vSO{+`$WU@MUl-6oM$S z2{EJ+&^4*aLIiPf9#3{o*HGSbe6np=++?Ijih_Oyw%IeT*|E%x}lu$F`rg~IxpOJ55= z8pVlHU*&qEIIh52fBOeX=2DCfo&Vwdj;AuN@m%1XU)rBAK$SiAe&JgvL|_R07uk?}S^(R!8dny1Kso}oViF7YgA zu;mA}mWUJHe{FMto<8;XynY$ zXKHSBY4R&DuecX8=VPlSnMEl&_muF}^4YvYK(+viGnNm#+<;)4BLcle4UgGB99blg zsk@-SB=3&}Qmut;BDyCOYpbY-!LuA?rng>5%B$tC)M`&S4464%Mp|o57SNy*!zpaK z3dVb;C0CPj^wJ!RNy(J3Tu!;@qY`sHt)BE|FGz-Gkt-E?zT<6q$*Sx7w1`FC`HuI& z$l4FEFDLZAgdLD-x{f;Yqt}`yCVbhmUG_;CBT>08sxK=p9RpMvUfo!deJih|8fiBw zg3dUiTlLQPZ|iwjZR6|Ry0q7t4>oi^U_as+wTHj{QTCECWGb}1{?h^touNPLYtDHP z`dK5i?;dtIVf@riWkO#}ZB>}CBPl{;g}mYd-}RF*l{fl3467%2?G>O7@g<0S+G+`6 zWPK`h9xv5tmG?^*X$K!9l z=uG0q^AJP)#(Lm>2+Vc4J)3YY?p}=L(_&M_0*?6m_)d55clQ*yk%eGM*OEg*&~J2q zqeyOvF`6kETU3Q*#fosS#y*BVi0Moo(+8U1$UwupS{kn@Nz?xp`5w#EE8R;;72tdv zXw9(PYKud`uJg|DOA`4(8E#m-`!UxyVmTU95%^>pKl=sxp)CrNrf$)y#F>XnHln6` z$k2TA_$8`*1`3++IU>u{mBN61Hn&kN$CKzE%O~vWD}G-mt}lOBKRmBG(ttkb`l;|Z z+j;j$LEvf>1^G$WfSB;6swv-X(tWXf%V~*ah_giF$vV?WJ9FBAfAfJ(N>$UHvjzk()H`D*wX__NuiKBBD%b>9;xHCkf7UO|{pz-v_2<|F~^@CvcJ? z@}hkQJ2)h1x%B`ps)7{@BAd-WXH1Ywwf8;xZQ*dkjhqD0D*)oh=0J2NFIjM6J5&1G zPZ>TH@*@M2|M6@zD&iyWA=(V}bf;rvi5@xyv+_FLA5WdPOJqMWI7+Ql18!Ul_?a@z zBn^inqlYDhra|V%KHzMD5V(q0*oUe^L>1v6vHHFoV4AAIkie5 zC!L2#ruFSzffVBo=+Od@W5E1`uODjh)W|PlS|r?+beamheF;<}ZW9?!Snmeu%H-5= z{P(0k8wW;yNtGhzlnY{Z8)vBF)NE>E*0{bM(v_)d;dnRkD|`_;)_aPgROeD@D&))B zp1&J5d|IMXBw;&psf*ccT^Kd3gzV*sdt~Nvb>y!Eqw`3>d1a5XI3WKQWA7MUiMB)w zcWm3Xla8HqY^P(}b~@Uz&5qfzZQHi(q~kB=o_o)^_q;dW_UI`fIf~O5y2p_g(Ne`##HpH3TGLxuZl^bQ46Tz`F%XF$58iW>IDh57 z>$}n*UIk3AVOBjEX!mQ!;m0aAH#bFV+<`ITbm+J>y=ND18DK_+t6Jnim%O)A3zQC} zw<#yn!c+9mzE48dZ6Rbt8fua78bPeqL)Qg>CfX3-E+KL%9@_MEp69)^x&~68xAm#6 zHnmt;R~dre!TVc3UE?FTab?Khn_jxjv&9^8CT>v23~*_zs45FGxh{;5z=>JKnI>u~ z9I!=RWNp*ogR1Q(MP3XTM;VknsCS)LuNJ5n%YvEIe~Y?|o-FrQ)R|GL&b|v1O{TLm zYUp8yKU1gssZ&2Rfm62LzKZBF@&1ekXDnmbdb*ZH6g+!SONXu10Y`nnu|B?|(=Ik@vr!*pitg%f>5)crqUs~TdFXA~(-Y!XDchf9Kag@4wGoVIJUJInx zev9?8*`(GvF56sYG(F$geL1zZq_>rbIAc(1Aka9cJ+`h-elp>firV6)bxI~dmS*f_NwOc?RUDUSxow*Y7c)>x; z_$?~cWuvOG6y1bYQ7^dsm;L>;%tV~V>$)oz!+9w`YN;7cg>#ha*ij6y_u91CX-rS? zlQ6Y^l_9%veL|D8Gfu>!OOYVuXN=)bqZ+r zL9`&p>aAqm^TgCMi6gvpaIm2CVSFngAt=iQ*Y=?~IQgj|`*k|#yrUeA&Gnq|q1Cal z95s7n{G3hBQ26O!J5lyf^k@&?TvjblESB1;K>Hml+`7LB`Dsm?Ps^;uLX!aNwRbpF(ntqqA3MMowr-?4Zn-gRf z%UJGo<#d}EH}vMyj!tsqgNy~h_+{EhWWY1))lxMMx^f#vu;8>NKBWYp?V3{fD1~-@ z=~z9XuS$K~g&1?X(rta-7s@n4kJF>#W zQ!m0b?(pi`vEZ*#Z5&bZv+WmG!3lI<@BX(ywT~YnZ7NpI(3L>f3ss^>8!IogJLOWn zt+3Y%;n!^FVT(N)B5vvBD-3wHFgw=$7)H+94KFnRW1Wvjfp{Oy@64?s=R(Y zYk3`?@92)_;LhtAFxA{PTFe!U+FvAw>)5`5!)BnEHpr@zWn7UXIeAko(uOlu1Z<)H8;7YiE(yzp{5yE{V5 zSqgqP!~%8nc9aP{FFo3ojFG?kT`Q)Nz7oB}{k)UCFQt|#3R%~l5+U&Lp! z2RJt1+1qDf?#gV0k7D3Oipb442Z@s#Da&lk;L9d7q>fHNdjoQro(Oj;d|@=>JF5!>lhI_Ok7k_U_x znvkn{&;s(R^pQ2>y7`xOLyIYHq!y_ZbOBdCbD+DB>j(6(Mg)(b(nn2YJoqj_zms6Z z=c=VqZJi&*KZeQ9S;DSh9i(P^z+nkD2w|&#F?RUOgJB|(m;_(9D(bf-b?j1b_^D3y zx2eUHVW=B+C3P%p6# zRDlHj0L;=|f#TpEo2s|i8(tfs73EH_rZ+z{JXNcXz-!*sW<`7%fD=19+Ruv0D6V!} zoYOUKaCm(plsQPZgXLqH!}7}JW{s}%1m-3T6w1|xbZs-}#@8Bs$*6I+=}&5l7T2~O zu_xOkCE(DAG~61qhfkFJh)IFX*MzY_6C%37Fch-|(o={KlEw!~fO^06B;;|WzW{>~ z$TTbVjcL~KO2H3~&)ld%j%XO=${c7oUX)ZE6xyhwFvf3 z`8F`G9LUNwY8|jj>1{gEHZlXAGR^4kZYo}>c3)^GxuX?x;z9JlgcdOc7G>}}$SN_u z=XCD?sC0XAlvK@f&w(~F=q&odd{QwPHRm*yX15wbzeH84<*f#Fk>yyV=uAo`CA8Q*oP#JkM#Hu0+cj3 zQap-qSiz_H$DSlf+^)C-n%cJm+!_m6egfH4N6>58s~`AnMsB>L&AZlK(vKcQulern zj6QM7caWnMsW&Mq=EBr$J0U$XK9YzwH?yy>D3{AuWpo$ISC+r)PcG?IIGra&(L~m3 z6%uI_Iop3gjhMWOpudTPMG80K3hFS_EJ~HHND1Fv~jt4TYjI?X1={bG7S>?EtD zK|dy~)E9V^aEi-p`C8F(`}&#qCvR6-z)B@iDBGnr@?rLNY*iblzus{pzR?XF*-n0Y zJc#t6c!`MOt40z$I(*`033%AwiG=iVO%wCN5ZdV7wEp65TiC`59&;%Z(1t*?X$tw| zCq2X!9`kq$;WV`WTQ-3Oe-8oW#Qpuq{e5~2zH-r=A5a%vB?={eDI6Bhm| z7YCHUc0bd299KYEq$5c@bqLz>DpCw{dgzPx^}V&pp6nx7{Mz@s+FF?{Im3@uP^%9T zlhY)17sHaF>}tsGYnVr+;8OUZQ=&HuEx`plEiiErm8rkeA}ig0YlKufc+vGQGJDN$ zS)z3;Y?U=%D7*tm5Tq>m`)pHFwCPozf|_{9y`Pl&6f}w)c|PTBHIzvPM^$wa*uHow zTXcUXt=CCtaCptjd_QWUbar4v;|<8J@2c>Qr4h%-flHg zx-a_aBH?+}9_Uwxqr1*jBDOMW21K}BJvvfoqLY5gAW3(1Zp+bf%f=4n1i4*5 zge{2QIDOcDh{!By(^>=vV~qs59mIjO`M?$}=BZCssQs{0codzouK~2$NqnPg{^(B; zjA^hAKL=2!M2EtBMd77Xnc1JmT>D?%pt1p)GA#;_#V;3&NvYL@CO-P_$E0w=hUP|V^PDbJD`9GOx#`LgJq)n=a>-mVsG^l+(mo%GLS3VH~m zeu&64jF%ru$y_MmU6HMx;}sG7+{5;nUKnPvWs|qEOC!G9rbjbYse^ZJUqCdHf{K@s zSn6rvTXH)tA7&4plZ)Gk&d5Qdz9r|UfEv4Z_v(EVHd^`E=s=6?mo|DRoQ4Jk!q1AQxf zn@>+0=|8+=ZT{PE!NJ(dSl`iD{?m5B+{xV5M$Yx~XGb%0yZ_kC7AVQseuj~EIs*j} zBKC^VkC`vvasG_Fxyopi@+F97=7hl;Y!?m71Iv*MmWZ#Epr5PcApAcFf-t!!J~xyS z4MwxBFK*jX*=svLUSGlV@DMOZc=51wt%d~{zdqFCLop#0=BZ#tqLc;N*1Kaq^K8NP z!|}&RuE+qaYUfRdobQ0%JeW{LZ?YMrDeFBN-y;0l^{&KT?fQ86;SMupw=O!@P(XI% zI~cXGG;#v%wJYklA`&dW5xP;jG*{0c_f9~pzy~|*RB%(O%KuOjf-(m1{FL^Oh7BKA zeN5>*#1G=R@mLUdo-Q{? zlka|Fj$Ocn#wnr9G;!^STdgiG3thAjL4r_P}fhIyNMqwUwGMaS6y-llNi*+%p6#cnpU=_7cb4TuXR3@t9g(g{G%kjv1jj>H-_fP$e|D-lj4XOWZn=Ajz!tZajgFmE<|7~(#uKHaKRSflAQgp>!Lxd2KjGieE zK0yB)O6?Cy*ce#&aCp5g(v>^Zsg+sY2Y)H_T3eI*Jx&T+_efD9$**nu8LttqF&S&6 z<^q`tdzcgYEv+XTmo}U2D!w0{FZ4fDy(Y6wGP_vaD$VTyfZaB|V+@vwxtP;E;DA=T ztQ<|Q-WHc$X6xBYz2Xq|Nak;fvQb}4c`wphSVy!coz7yN*g;MWIh<5~@|00{4_3Ds zL77>-nvGPq6HcygnzX)MS;OLd zisju=WjO$^M%OdXnHqhQVBSl2&yrq!x95|Hv%DATZW*eg!PkIdjeqMmEy_GS?8ut+ zZkHj|GaRUU5{UUpX!#u2&uXU5z*Avt_C50_bXG1bMvpaMz)k6>cSplPp;1N{BuiBx zdK~sNJO{R!XOO}W8XGI&z560oT2A`6>n1&wO@9MrR<{fD72=%@v}|`REJT5TG&rJX z#xs^OpJSN*BQ%+$1gFCRXJ0OAKRsY+*Nk$hS6+mN67;=Fi4hLKH8O+cEh?wc!kaku zTwxBgMNX>RH$7HmSinW?mc@*I!4Jv7VGqFJ10t`M6M+fgPCVC#ISWnJpjWZRp11qL zP`nh1fn_0=BuVKOD#(3#0=A=9bb8X)`QV1tU09U@F#LWeQ=T*i(UGlv18R=pCTkJj zSh=N=7$zNP<0c-9hMW6v-;`;%gv$UnapO31-^-Dk3z9jv$0;ka-TZEq?8`@kW)690 zE}o;g*CN&nZiTw$8(E`X(TSQrOy200bs((Kt6-FS^tsCFB=i`0{L4L5N`3{WFXjuc zs7vx~)e_}*3+2h3HUYb9zU>1@Q5D!-(B3W-xXgVyPRKq~Y{R3lN{0iUAJ}nQ`C;~L zPrj{g0^O^y6j^iqo<|Fg-CJPY{!^E;)Utj9zSh%1Tj@rARYf7!(8W-T4vWrOoUGx_ zmvB}RdEVA!Fu-w!HPD<>`NedGDt;2q_eC<%+gB)}{qpe)$8La38adaQF^;{LVp%bq z`V`ztSUeg6*z-nYp4eXXr#q%<*T zV9Avs0gset&RawxA+Bf`g|Q!^V`-_XNQR-TfIExkIn{R#v~3&B0Rl)`%ONIfuKvrC z`xk$GB(&sfpx%cUdoBu|&3QQASl)7IKE>A<9+SHHqzhOTbeBbBEw{WOL@QX zogSg&Qfzy;7&+|FyNDQRu(!BP2Ug%~HLS>YK&j=8cYHC^!qA&<{#QFzdl2W^B?mli zV%I9iUj|t|eJgNhS^XOa-_L&bcAo+3WQn}cwj?!pndYe2vJAl)NAm))M@5(7#^;km z`}qO%M`||5+Qg>*6j1Vj`EP{rPwBC*f2HQXpr_J*Va~tv@FxJVF*bDi4_Ju&bj_R< zKz&DtF&73c2@69nMWxLKZG#Ds1foj{M^C+GlEHusT$iPXf6l|jL&Hm*7BhWSh%$}m z@aLN6F{(X$&UEaEx83CT24{>i25+Ypo;*4!Smt*a0;h-| zx+SC8LSI1Sbs&qW&YDa?&XTjc&|I{Qxt|d>!V)7QDTWcn;-re(o!pa&{ivFV(L1de zk+OK}H&!$);{)m!&ef}Y{UvaJ1wB;;UgaI3JKt}XBX8yblhIE3ejB%UnS9>zs9Cvap#O=q|Mfq$dTSONkS6ijOmV}IJ-A~qB#aSo zQ9B8929~?NdygE*3_P2OA?*u(sD5hguPLyiG8YviHITmnX|}G_UUqh&8Fo>-VNpz4 z#z#YyQVayW`G5lZ%puI7CU0;XYCY|ydv1t0Aj~6qa4bZeoBl#8#r%smG(9S(b+ltd zVq2TV12|S5xLK|XYKuF(id&y?1fEg&17Rc?cj;blJeCn=G%GH?UiI2F-(|?2^m1{@ z8k1=sBzCVgxp_YdI+|tY=>rA&!|kb~@zNj_-*NYzUIqiP%cu z=Oqe->1qyin}>nSILRuJE-UUOiWchm=0?Ss0J6HRh!v%0TO%~NW9irn$aW|VQW-&B z>(nvLy3Wv!);U<@H)at%r;RnS#aaZ8Sl;^J+TP2URpAf(FsEU7f`ka4K31|e!AjmW zADIeR3Kf6KF&du2Efk3>+)T+8Nut8{uLU=D6inKLxO7e~(Ox1id>6<`p1J_B4HEWO zAhMv={g}(Pnn+&8@aE!*i%&bqQapt=h}c*CxkNr5p`Z?@8Iv8p*H7pU0clcLrwj3^ z5by9mYGwa~@_%EX|36UvFDC~_GiN6wTUVR^_@pH&{;SsGjc<}bfz8RmxS-P`go0GM zPEia5P7G5-lpF^6lzUojvUka=mjTlSqr>=9?kS>iJI8_sOmpS?ct&QbqlwSQ(-TY= zHFltbSP^*)jy@?A&1F5lBxh1#fl5ZyFBN9-I(JkEV&uT(dO~QGlwT+}^&ZJvbxuEj z>O>SkiOeOE!+%2bCx3xTNV#AGQ_5GpxC#E&ABRu{^RlIFZZYKE?Uij5B4aI-+3KH2 zoSw2>rANq9&I{3pbgrV4m(fG_j(C}LEaaNwSzNtzeo{brB`2Ihr0fId<8)MwQFSMg}`zsgR3r*<1-Cs$hsO9mxd zLrY^PC1VGd|1J#w3bB9N3?(XQ{~^uF3ldc0TrbaG6|xWwZ9PptPXWTDk&s`QTiEsD z#N9PxT$q{(_G$GMtG5$yy-Z--R!aviX|Y4j&Ek2P={U*zcu~2z>HDR4n@PB^z~DUm zo)8tbIq9agr!J5WwFUc$XAf4X%m;I*i^mv~A+Zrl(Xv#ydp33o2%#}33Ye@Bn*no) z24InV>C{XS#HrYFnkh<)-mL6Ek<@R-z=r2c8eadkP)nNlyh{+E@Biqr4FPykyrrGc zEcZeZk*rjQUN#}@7xkJ#_lR$#yI)4yHa98>jQ659i5bzu=A`aD14HU{xcz{vWaMZ^ zaRR)GXOWsJ#Rjfs7!gN-t;ET?aV3#+ zAnIthV`S2uh-2R>4=jip&fZ`meG_k#x3fcO#2SFNOj>BRf{iZ=z{db2^3#bE`)pek z9-a@GU8LWuKLg19G+MTD&75_aCMg03K6q!?nBdUpklC+OnI z!^GKFP4Y2##$@lHUTj?WA6)1Zb1h4dDMHHkVQovbjEElj(md!hbH$VEhF;{`78FHn%qZx6YV#!gr<5 z@C&l7HOeEyks=PP0`(KbL;_2Mqy_@&RRr7i4#|TL)I+Ko!SIb`NDwWp8xbRTC3}Y+ zb!_G4iHM(DbDX$iKVIe4etLU+kt0satI1w^U*310*M791@9}dznP4lG9Y;dl*q~Dx zu>=!5z6rfO=&NR6Xw&F5DXb!;RQ$Ej*5X=CTkP#{A}>F9vTXS(e#6fpjUrd8C ziJC|0tPjW>$nw-NheS>nN0VI@kF|Drv8fL#PUmK4CrQM?5mpIQ!*vlf|5Mx!Fp*xAnJr$I&KL?PK)!`&1%-JX_ zO1Apv*SWZo-kY&gm8fw`DgJbECA+s|r=iMQ=au+=egPwi%T0jWdN8-S(keD+b5AUd zU~m`HLRzl`AsoK8X1y^pxR&7{wHD_f*DSNI+Jv;9`jyl&|87-iQ z2c}uDaHh$@uJD!c2<>3p0fR$|YJ;%AJ`XBSB9AK1Jdan@Gx7oJoIQ;xO=TsLlbb5= zd#|`TrEd>ErH9OoBv|*V-VfY_SlLsWeZI18pt+cqRKZAyTzCF75gE{25}$+|BSNkN zF$j4jzoRAsyl*lPxhOsf5T3x(RQz0lD>xytFVj30x`bT2V!;Y8APTF|QQ0QI)13TV zVQFyR4&RP2>#sY*1%0>8Zr0@%Sn1Stk75(8K4$YXlG>7xMxRCH?9et@%_Dl5D@-AG zal^=^)k6*93zah)kW|$9Cpz#95(XCte2nay9wa#Kpcb-0d|eL^dQ~$xWS~?~EFKDp zA1y`tx{Yh=x=zFSgHPvXhfB+{`EsgDA%FH}%j~fe*Bf1ChgeOmp!%>2Ganb{q5IE6 z_sPu8x5r6eupgMOQ3B_sI?T)`P953EX6oL##!0Ym*$E1fW&II~@{o4l`<0cnHcezH)bT-`=_^o+)s(SDt-yMniJzUn#f^q4M)oAQb`C2H zY9TAk*ANeg6CWxHfj&{~0#+F3$OYOt#$0LP;bP#3m0~81T6uP}aiMimMq|1V`Qb5) zu$+XfQGJb^_FRj!(rS#-1uW{L=IP%2id29OrF?>izf^T+TOF<4S7&sT8gOQXS`>ElOvpOu)^g2ZV0kg`9;( zK*7)zHeM^a6_%-Iya|}NT$PXYv7^k|Kk67(u!mcqx(D~uWGHGtOk|Uy;4Ps}vv!ze zYZl3_9;?pHN|?)Sw!pFAXW1)DbI|h2Ud?Pm;KozhIeRR+No$ekT2YH^b>iP8?V#mm z@DlxDy|gXp=bb?!JbpCLJ%eT2H2ZvbguMb-*XT2b0@b#5+Xf;xolV%CrJisW{UI!G zNrZq9JLwfOXw|CA_uK6Ti)|DbJchFkMUe}i^)xKDh$`vUYO>_u`)og1z5vDw5KsfZ z7lfnmShFmUG^L^%8Spq{48i`yO}>I^6*sken}rz9^^n@8JIp0rmGJCH*}QOd9`Z9M z_npjI(UPCiKBKjJKk4cDhGgv*-5-tSTii`jma zEp3r4#V1L|IV&J*5gOhHIsnS(IX}}pu|C|VBwWom%$>uh?e2}UoBI! z8jU!Q_RewQOX>aXfShY_&9y=B-^Tksw2j-xgwTV?3P~yXV^6Ei#^#C-bJe*saU1ye z)rM`fo6)?o51|zCw7ch}?XC8x(jjXJO*1kn8uWCMH+Nl@-1n^B9)m-9sRxDjn>N1^ zS~r@|p`yaq(jqvz2QRWWCa&HKDaa&r{H!G!8|SPClM2BBHgnoLJnSKT@oFKD5|zt} zzQ+zf=wfX`N!mW(h`|a-DI5h&>`k@2)%l%_?M@H!o`Pb^YbU~S2k*wBudAt&j`y3d zYJ6SKC>JTHsVC=jLt%8<9nUyyqd{~jm`Dk$*Uy-b8D6xb&q!;U6QhpJ81+XK)o)@* zm$&jSjux(}SFUI)nd#?b6w6c8I_%@>8te=>JnMP8U-mfJC?Qe@mUEJhE<#eH8+FA; zI$}ln++s+rJQ(=4&$Fy3cyKHgO+;&-h6mJ68<8Py9Rt{Wa!h#JSz*pxM5OaGiZ`t&fR*GrmM=J(mixE)SL4Yv2nPTX=u<+TZDpr;Ezg8Mfn z&9CObc{>_8`*<8}D3}@$E#mPt|9C=@TMDToLnqL2Jy^7_6oUtF5<5uD*cHVl4-|$J z)$y>;DK?^Q%CXDQj>5rS9^Zwg)@weG)$*KO$Gc^v_6GGx0A0rzn)cfv1n`uI>R_eE zS6cBKND{E=rh~shW5e2=4gWd!-M(BpNYj%aLT+joDWI z+{bP((KO?^iUc(6T4`_z^C1%S{y!H_%n14b{i zo8bfKH#oRl&IJ{3OLsIs_zB%nKXV;aCuk=I=UE8 z_DO7Rn2NmmWPJLl_)n;P*FT@h`7aU(oa3Xp_pcEk&no@ftVsgy7$bF)CC`FQW#UEymHq~B-YO9I`mTMasaE^>9K1B?mBc=>1v18n+@9TfYUqb zZGb)MV+gVX@ePDMC$9=F7_@kW90iddozmneqs^T6Lm*moh7Kx1UmU#h?Se`gf0*k< zGE2fn;KJ3U21gxLPNoSEE0NuvCcy3a=)1XQae~A^w??zNvy{1Nxrq3Uk!0KS$wQ;t z*;Mzz->A4a3h#8y{spKTHUOZLRGbKHq;%4ON`XtsMXE3K|)I>Q?{jxBoN-y_LQz&B~*` z)6)emnB@>cjuaxT_yj#Q{9NY zSDf=`S~=)}WE!x?BW%8-&f%jbsPghmVP#MeTiCsQ&Ee|%;FU07yVS~Bu!s~T#UyT- z9+OsRwZ>?R_9UTDW_1zL5471GvWqORd4ryGE3G}+kNebm2)wrz`uDgEbKdtrWfsFGWY*V4RIo2DoT)?U1Bjgq1>KJfW*i z!osTDbCSIA0OQs|s|u*gNbQf!8Ll%0yPs|^$y~6bsUz1J^-;7&a!cW!oz18s$|6;R z!EWDZsHLbf;`+7K|XnjY0-_$%}XS$(;CH9DxWtf=y zs7kr(29UfwkVEB~st8Wrx{rxJFeT89_M_~cK&S%o0)q!0?{I&kqzS8;$sEEndi|)r zOd+&Bfn=9EY$pymrnF`DKOzem!x;1B0TCG#Lgf{vWQr?UJ0mJ|a16$r@E+KCFUfbK zY(4U1&p5$<+j08=oFpz4$Bwij=JT&U_y$r6bL1y*xBs7l^q*z|EPn<4U+p1mr$O2)QiXzMe6qprJ<8EgC`SBr@SY;2nic8^^$7MIeoH9&rPg~;n?ueC5sWAiy? zl3RPnNY@A~3xvj-!01VmT^o2~t+hhDgp7LbMQFavM-U@1bqi7yF?3K4x4AogBNlSv zZSNjtOI@$;< zgn=WGXfKC2h9*tjv*GZ$9HlO_ljeir#XCJ&|JuQWa1uz@uTB)H< zBJ=?%xjDuk7B+CgVSz4WlB!05qwwcUV_k#}%iDqXFjlx|U~f{p#7xgJ@2G+m%xiGTzZG82WW=XZPbeqgF0(2)Xlf{IC6oJYodH8qsV6}!e32zs zFsu{PVSQlEVTcy)U}fp#H?)CWRbgSb?XehWqXDm?AJFQFKCVXbRH7~tQfQ|QX^~jM z312i<{ljv;Gi=~R?`zh~c@(lR!1SoAs!i8v*U+KG3@)}jJ}Y60A5QSSC2v~p=;`S4 zVv9AE}im&s^NdWp7$hLX?d#uhv-@$jZKJ^KB)BZ}SjD#F%LEPg4M zx7XM=S1Sw+{t%LYMc0^OzP`wIHhZSwHB)kU1dHT+(4#uW5mMfzKTi+OU#I7?W zL623tu;~BNSt^hY%yN#fCC1cF)2t*kQ^ISgZGLI^*>${rSWoZ#LTeD5GZlfnA3FZo zbu4mK-lNEpmmzvnm4muq=)*_g?466l0B4>Wh27mBp%=EHyBXehqgC$e)y~-=j6*m% zg*}7sz31In-o4PnoZP=wI&Kc`O_tOWt%)Cs%l$xuQDP(4-;WjZ(^n1UE7CM}-K~z{ zqNiidu*wh7Vt^LjHgB1;4%4-OMe$;U{K=d_sfHc>+wcOQ<+5I=x+i;Ln7SH;+;NmZ zi)&id(6C7Gl}!?sB@$#MEs~gT!VsMzf<9))X2up~e#csPHu9bj-|qaLvIgZwo!k0$ z{~Mm^gzGox`RUj^PvRE`53T1Mc_j`J0k@g+S$#mnw(1k(+Q>F(fDixl!F$g510i{e zsa=BrBY%O=D~?g?TSx|NMy>vC%JvP<`~&3*c6V0Q6_^SOd{$^(oimPZ&euo=ra?P4 z$J~{pY=*V@qtK8`iW(6D^aHFjm%Dtl`2uw9oMN{9=a)5vNhlMrdEk*IamH35#X&cI z8a{P}Fr{vbOOzOW{;(guGwfL)+eH(o&z7{9;97v@k9}IH2rEb z^%{GaX5bEx_0-vR!a_E@`vU|(48XVsm2Bmc>B#H>Fd!jLY0Y>5QsB!-?2o3 z0}q^AY$iii`O*Hiipm;g4zij_o19a&M!x-yJL80)$+{{MuS&P*P=bqu#w$7z)f1>p zo#f5LqCNJ&d}H8Bxb%a`cncnTNwT~|=ljH;b|w0XU<3-E69TpWaW4L6C-!gd;lG`W zrSwfK_5Yj^e736`oc{Oj`)gS6Z%$6+-}5L#SaV?pdHRBKKT@F-RUw}I(m?1$XrMsF zn-2SiNKIGm^2PG4GJ!Qx{NG>rpqVxTgG&XwlU!75P3aHEUZ3A~DSkMAg|eX-cA|96 zRYy+J340@%j(t`>i9%Wl(V-tImMePG!y3K+YKXW6$+U^>qUbO1wZ(h`5h34D6U;Xo6l zofE8LpBny-zfLS28Xy0gSHF7WH$?7ARDxF~-edm8s5U;2)`a6E^1*~i`!U|gR2`3& z;Jk4cZcZI%?rLcxMVxu$IbpK-@(IR~bn1~hxSVInH1SvNRA@MuhvEoJeDtlRrYJJi zdDz^9DND9Zh*5Y>gt-z?Xj{+6-)~Ae0UuR7V+o6 z#8A#r^jwdq#jKsXqB_lX@V>#5U=K{t?3|ou=WIz&>(wd1A2bV{7Vb|?!7u0{ zW7Et}|I*4D>tca?{p2y`{?91tpZ5d(|9L#Fo_1O5f4(-@r<)Te4RG zH3P%Y(6W9$+zlxRj7CV&6GA5pn@?v`zR!P-!jE!`(shxa#-Pr~mwa8#-#ZI|H2 zTN9U!!^IbfANUWDk$%J0zbc$_A%l|_#uD3;7tD)fJmO`g*~5*Ap$3)qRi_Moku%F> z93jzjO>4;>Fnd*-WDJe~Xh7d;&B!sBrH#1YASOj$?JC8U|d;y|{N|vzzyj^unv7z#C}JIe+@+HS+fa z=n9nv>U?)^OBV_Im!;g3xn?-}=Rf}b`S^SI_s=4Z-@MJoOW)Sl1x&M1l~@AZXQD^zq1vXO_y&7p2mlRjH}YOmz2@;Wq% z5qI_z5GfE%_YrJGGf(Kebc^%5HJ@4ifol&^pYC5?KDDxz42Bv4IPUr?7{Gehg1b8& zjO&oxwHyG_z40A4_lAIDhy5Q!^(anI>;$1xhc^O&u>{3$2Q1(Z{`!_(2oP(LGSIFW zrOG?h^1;>A&OxXeZ3itMs%}goSs^nf*jPBV;+j=4#wonyuIA}EQ(QX~Yl&L-YYp(; z^JOe!SG$H!u{r7$(hikmzqA=*6y4H7ntRo6qfiZpW|xw+kqVjQg0LlY*`URZV+VnS zEB0xwnekn|{hs)?T*YMSrcJ@2f?hg0Mfsufq;ePnJx^2qL8H}Z4d8f<}YLK?!?-?EcTknH+o||BMe`{k9Xky z6E)hbT~+=d! zy8>#Zek&WmRb*WKB3=Jw7@)35>R!I*eT?u2j7;!rv6B0Qk&FM6CjJ?;T7L!YUocWb z>i?D~{`^ka*3R6}@&9Bxo9WvaSs4qPnOhkNf4(<0{!g`5ploT2B8JL~f@^El;GV)G zPGv+TWzj0Zmcw)URR9)9sS!@{w1JQ>VsY1P*kmy+vmCTDtn^bMqJ%%Yv-nBT-gH56 zl{A;~Ow#etp5QRc)AarIrIG*3uPr2Ddm7mS1~z|r#thG!{gw#Vz_mR^FzS#z(YeTE zlHH&wFKe`*#a-ZDVG7bhicc}Yv;>d+&9!c=G_Vr3 zJu2`3GisG8tz$t^fX|!qIQXbaiJ>K*P|;{m#4uCF&|8dgm*QedxMiUO^PVvZMNRNSS?KY`=Da^nmx9pdJQ}x7t*>qYk_b-)N#VAajbnzRgd}5Ik zj-sX^?+jz_X+($cCrzdySh-K&_%W(PdHZmG9AFD#-!%5m!yErUVDg{${Qsp6{7(h= zhdk!rEfmMkdXSXq@6RlM-ujuUr%E^~s2}S$OBcvs)CJ{QE+iCIvtP|(0sDYqYk z7N9lNaf=K~!>!uvoS7$*&4{Nhb%#%GfO@aZY=QD;__=@^UycsmuhZOyo8UA{pucbuoUJYJ+w2pKu8ix(~c)8NQlymX5%F=`t%jNU!w zl_5OTU_&il;okWK5)KM$00i#mmJ)anvp^wyuZjUI4XZv!5*aMk>^D^_lqZsip)I9% zKKIsK9(8~U83>!`Ftdg^#P9&C3$qP#19G>aSD`*G3T*P}aV3p-Y^EcO(rBuA=1pTo zB3CMBBo>;ITMr^r6E9YZs?C5iG;Fm;#V}0zb5g809x++WR})rX_mL^j2;<>>?zw#< zW79FZ9K9&$g067bR5Ot-X}#b&;b9g@9<6N{%k*!OlC*l&aJO!vblF+e&^55pxkdXD zZ%&l0Uf%=YPqs;>ck#$GIop?L7HRyP=qhmRXk$y_NGpz?uOhRKb&o1s>0JMx&Rp59X^?xY5imvY*b(aOEZvBEnoA z4hJug#k~2}rF{wgymImjF1X`hH`-W)I2~4F$;{=bXERa~Wb0TuJHRr^9tBgU$8j0% zch)(rm4rQB9vpDb18b*CSwwOmIak&4CRlCNSW#2%SCxT=DO7k{?}juE(R1hT8I^kR7aKEVKP7TEc5fw)A9lzvp87cu1; z@vs@}n#1T7X)B~cenruOL!w?TH!Ar^mv#r@Hj50YCrWNEAkwR9* zh3FelfxLiFR}x1cu4Xx-vR92H=`M|G96Y9)-Lm_UZB3wA=1KX=i}-=}X5@aY0wXT9cAp#&LoSfTwx}6 zh`)ul-?~leDjHWM*9qgy{y)mzDLB)1TiZ^@w%M_5+qOEkosMnWwrzE6+fK(eI{EW* z&iSsjX8qqrDpgP2c`6(CxUPY79A}SQTV3w=W(2w-TwfPb*2{g3irko2t3u9w|wnHAG$5JTK6arOhn?_E<*qNrO?*d)+q1DOjAW;UT~vkU3;gX z%!u-Q5((BX8ul{g@)#bc6*zL1K-~%&i`MU+t&!arZpT=kd zgggN_RthIj~@qA<$GA^Nq-w-r>T zTHl$Oq~JjyE#X2J2A5ETvowzp|gMx$btIzx?l2>RtPBehyO zN{^m3=0f}YQZ`ExRi23iu&IHS`30a)_X}zy6k)fbntz;ZoSoyiY?*5BvNmR%=?IrAi^+HfBm2-Qf2$HFh(v- zLqmFBe*an?s&hlR(BgA&Yw*Jmt5~^;N@u|0@T@DOt9j;Pwqng0gL70kDZYt4DvBFf zD0SO; z0%J);Uu{(C5C&;Y%1s7n;BsD|KDMTBt>day;!#dbNJ@~a4p#8WX%6}zaet!vWr~Un zx%-e%axM~VPpXb+4KsL!41x&j6-be@4l`uOt@#mGAtqP`G&ztxpzS z2SYA8R5Z;h#y^~94jk2a`q`*8h)0{9bl7fx=JzgEu8t=S5c=_|9e7`r8s2`ov7S@i zb3* zDNEU==eebxoYe*uCOsUQVy^9rkGr7?PkQ^g3=AAf7DMcDbUpZ6p{>9JPTA=&&PUl7 zJ8s3)1r`|nC=Jx^G0mos;Brk(eYxtjOPlVk|Y9612n$D?nUdq ze(tRr%F}&`_a8Ng0#*1?z?n72{Zs8La6Fqdd_0FpNUW&s2TL7+;x=PMs%XkDI{g@a zI(;r1mJdUI+;9{>Nyxc*r(vH#DK>2s6L*v#aAE1UjSmH(;q&HS_NjF9j4N z;G5K-s9-Hh;V9Y|13a`Ml>BmHCm6*lqIn`Ebz_78?U{%w zQZ`tIp2sAdA=y1wA40Rhbv~4QFhMl2fHcb|4;VIs8qy#%2}c~jD$Z;Q4G zTkGT6@oUy0QEgx}Itdm7vh1vvW(o5qOCRn;ZMlS_&{C~!MvsOWO%-brHqh2#WwI?G zsa3)?Jq!zH)Ev`|_gbxO(5mzBX_a1f4~>3xk3n1g>K`j!BpzDLbOUs` zLXTYvZ^Dc+=+>=*w(Uw!^6=BFGa9GGt5OsJG;Q0c+mUKzCbk0MNFKFHEp|T8|4S2KNW?Ddr0;jON%FL zT6*}%iQ5D=)zT+`i82p{Xl!ja(3h;>EO5J3M;wBh;WmsbdbJa~<1dxwH6*X=#i7?} z6Jt})4z^9v-FO&#?{T6BQC{R8m!vc5QrPGe;vWayEy^_Pc4^D>S@xKU(qc`DhP#A_ zHekynnCGGw!RZ8*SRas1?<*XE$qRH;f_GxXGHmo2*Iotee3L!Gu0@;tJKI z+3p^ui-i8iV5JTOkDmbvIm(wL+u)1h8>W`Xl?pKwr}5Yi>!7S5MG7~Y;!;jSdQ&Jf&EhJxw#z3x$2!xj<3=?T4{i|dz?4X#^`h6g!R>i`^?we z8h0v0)tPhmw7*5~FV{o|)z5lq;yZO0xMt`{2|J4y8`qu-0eI%%F z@mK5aQ@d|7M~BRP@m)e)9Rn+{2N2jaSZD>S#0U`+8U+%%@oqzUW6|QLP^drf56A#u zM7R!u>wI`tYYHam#IFix7F&Dnm+W2dpM5(x0QZ(5xrfIIJvRmM&Z!tHreNq+AaAK|s912~#~^};g6d7DY=#;%=&3^whQrFQk+T{>4%k&5 z%%O<}0F#K@Zy;37up$m7)gcNXAjs5eWQ_Z+@Gf}hESM9U^xjjNxT8FkYtV47 z4j90OYi=Z#6zf=a->pRzT6^T!GM#8br}^y-X#_yuCk8UiyYVcr?>pxA^P=>qi&rwbtl&bg zy$0niBr6U$Vb8{Tc+W5`n2g?g#Cg9+9HsIaFKZx_g=i)m>nzJfh&-Mhd{6I#wBCai zEG$RH=l+!N3sq1TT&A-m;vvZhGnF1t&!*`mzq`%q-9g(oBp1elPnU_P+@MgMLQQ(I zJh<6EmVQu7MEta@dIF>?IJHRbTvi`=QK>98h*{Jv=Si7OXJa>JL-DJ&7L z#W(LQL=K+*37R)rXonrtMOWA1tTaYBC2_(Rga=||YoZ&plZrQBt713DX6?I!@3yR! zIo5pG<^6ttdI^A9Go`DvCk5(37{S&zo)1RJVC~SlH}>g-q}Gy$F-7W&_Y8fQ>_>*I z*EUY4KNV|j$7~(57#J`%9sSTMC6@oQioT{Ay|-!0@>NE4Fp_F})|I@;=$Qa@LQ1W* z;r`pITcBfjaq&@TW6n-TsJtnMBRaIfy#a(igOqzKP}H~(rgKZZmiM^7V=^lx^x>N| z>r0m2>;=@h(MAZZN`kwAXadd%R)?8cOLvZyb@f7rQ@CC)*0ze2)Q%HQd`pQ-?zX0S z@#I04j;b%SK&%(_F%1ny)#}AMdAxJqb~g%n?zbK+&|E3T6jGR3y1pFmc2g(7C5bjy z&~HdKyVOASErYhlg+7>@BzP#)`Yq49n_r^{!oCDfL2zbLDXVa_cbVET?BJ+@T5pr| z?W7i7frw?nC(eDx5J`TyfABxgOeIg8OVrhe{jAhSp_e|Wy)ZXT&}|0eUVD5OR^V2gHN%Yin>VE320#eDLT?mEo@y=N?fJtpO1aV_Jb0d%-<0e0eh1|= z9C1J6z0M5rxL{Q>N~z0ujMawG?3HB{Cu=^_D{a+_3$>F%pPw|GNbDKa4*jtgiJ2G} zG&LN!n$*s^zB?okaL0aetTyNw)Ef_Sus&@CUH?F75-=JcO3$pxf?fp^woy%5dgp(~ zR*BiAbfCbr?Pt8*dNOmAp@znfOqy$Ib#wRCT8{C(Sjb{ozvcBu{W$!ZHkD?S`5G8pjIwrS1;OP9xK)p~r>x6c38Pb2+Hm0#r9NYj0et+O--8#&M9rT7@P6hn zK7r#QeOS}{g20_6&VGn&5k)4pN*eXnZ3-?dR(rliK6!&sxLTOr12V$L>ZbVF9(QY>F~LQ*wk*sHC*kW?&}D`x<0|$J`r8GT{KRD7`FSd= zg8loL>7UbM+P_bRzhzSzHZ~SUMz%)wzqjXwosFy=RrP*XQvFc?7pj@@VX50!Emf+_ zadg9g!-JkB5%cZFfGq3krkFuQ3>c01U_{h56KhP1fQeQ1JWNWF)URZubFPfkinAR4 zoQ$X+WIQ$eUKejj+AdD=*xFj(dilCupe>0VG-!MU=TfcXU9Ih@d|y^}e>~-Vwg&)D zfC|`jY>A&NH)TPd*gaCda#mJWRoa?*mX?I>Dl>895SFDT?~wT%5T^jHhO{E3ph;q@ zudgg#MZxbei8gCagHA3@0IKWbBUt!KOO{%gkVd$@(yn(Hfe8AT;Ynp`yIXXVfha|u zda&J)HODXpB-kRKvY5merag=gBT132Ml9C~;yGfesO+V8u+HVH2PRidEq13V$vuLZFws>aV3w3t++7LHw6A}t6_yjWG7w7BcAwgRmH_qG|`c3eka z^I%Ir(FT}v*&mKNNQtBpK-+1h?HOC&=fcV(Q?dX1MHo-FZ-frR|jIQ||Ob@0aS8 zLM}e;F(H_e07~MDaL&Z|DeFyHRe3AcX69E)T=pBWJ3YV>GSO9&{b1$-CN}P3A|*^d zV#Z8bL&UbEi3}F)*NI<-eolO}8Tqg$qGP9&8o270o%dj(7=sk)lLqeykD*0qwBZ)2 z2^k{(&B#5KugEOY_z3}10qN?9$GlW4ztYKH!Hsw$9uXR`JPMo{DO=A#3;9u)=K6&U z6d$ow_JH0{3NfP*S{&IlanleZ%ZZt_)ttKx>B)+Y#_6VsIT&HPB7F0V;?%*$k@@Q# zjeUL|jRQ;&nhK5>Vb{n3YvnTnIc82mPrF}nF(;4x#D=HMLv%FGQQ}~fL)ZGIFq4=7 z)Ie}Tl8HfPFu`YZ)OpQY1j*q80@bmU&YN(+QA108~IHeuv3&QZ7ua6ws;!6Wb9O;iVR?33n4Vr*Q(GbSI`9Pa$zB} z5rVq9977|e3x4b}Cn(h;gwN!(U5th4WeZHl&_8+Sm+Ab5>e0!`XASK7gb4VyKd`eB zia_g|2EhogaXYp>2J)ov1Zlp+$A}N%8yH3qN4$ zcZ113L1`Ry`ytkqZImZtrs@2r+p9{oxZ6M%p4hqFlZL7zb3>5I+@F#9Mop|?LgW{@ zw_Pw@8=!c8m)&YK#*M(#VIta4QMsYldAy=H0U30tUGd$ep+mb7woleyx}dsDn&VXupY(m;tS9w4t4E$$r(E zHXDUSWzFrt3&5+(ec_D=HN>H30IM@-remPdBz^a@xC(xF(!I4vVRCbww zH`En@=c6X@=TM=T`{4QdK4mf6 zz@3sD?uR520`8Og6Sred@*=9#yAzm4g)aK9FK_r(nC~HcdxXlnJF-^>sNtq8@jda_ zSL_<_@5t}adzX_cxLG|@@s|AAR4T(IK<|L>aenWR^&=098*E^2(g@ehPdo2&zL>N@ z?+szx4ifGDxZ69IW@J2|Mg=wA$lG|^+2BG_7Ya<~X%#n`1bz)=mZ6kJ58k=BZ>nG( z{4?zzGTSSp58J*IUa7I5#m6=n(waJ6=5g8`+l%SDaF8yiEl*ZB4h)a%lsn%fiKNtK z$t3z5JS`lU&pm3oydOkd@|yGQ=~`V}YX)6AC9&|*W089q@@}6G#&`KZvxUi}DjXKG zHYZ(!8bSi-G6eTMPU+!lS6g9)iKJ{iY9Z|U0@52F=ACNB>676#@Eo$GqqLh`h$gis zMGWG4Ya=`tHC4STw+1J|a0AWM+n1SYGfL5xbBaO(ni)1ScoQ<=3JE9lhkF^LlI({} z4?QgOn~L-r{0`DWw(Y}k?fsnES&J~;thGA`Dm2+d!ZHwuLXBkXdfmhy@=Q;qAm9V|q5|b0kDMH#P%LA^q4V zQXa93%0v;0?VN4q(b}v6J$;4DQ^s@BKAn{Gi{rJ?J3;lx)5jt1U|Qr^mESVhGfY>7 z1%ouN@1h@J=!qc;c46{JxsNq|-f^G+o@|pK;?E*@v&z=i+;p5BpbaV?i-C)MqnB(I z$Vz&H3^tJBK%*eE(>F*oZ+KiaHE}1_V}Az`I3wtzf0(}QJl@Nb^QCc?M2Kn}on7@K z<&HeW&kiAK@$}VW*hPFR%eKADq|_-p7viDW!AAR9&Lf^VhkFSJHF=5SAx#__8v@9-kZ1*Jv>KAFWWZc*YWWDQtPhz9v3JARrL4{)1 zwu8L~mf#u9V4srNl3SpK3Ed`D1?jp>07Yw1!~+_E!KS9wg>NDH>O||<=2&u|lV*U{ z*SzS4F<33Ex|BUGuN8g*BEv*M6eys;qv`EY90X`ctLRJaF$Er*C>%tI_IK#-&3y5^0ss=nw<55lj|Xk69OVP^$>P;W7q8p zzL-Q7xslM!x*Xw`&iqVO?p4R+W>qzCBicom_Josqvl-?eJJ7Mti_#|~Xzlfwqa*W_ zh6wi#G#TQeL#xSSqAARcw?$%@m6myuF@;6G>9^>QD3Q>jQ`P7uDk^)Z(fT+IcHw=v zsTkAqxjkK$3G1;LwQcC+^_FVv#8O#aw}pEdiqJx;|PkaqZid^fYDy+Xo|N~EXrO9^j0YB2_ZRqq3fws&nO+%`#HzBcyhE^#H-6kh_sK@t=5>hCCKoz6Vch9gn zX)^k5lUFocwc=a`1)oyl4^o_~1W}DzlOjhO<6$S-PWbKJj@hx%=m&u?pet3WHm&*Wqg9;ym!8SFwssIXXqZdKSDBKM8Tu|bLT}8quU{RVYc~ch@(&g4| z@ABFs5!1jgxzi|00X?;FxujIIb$FoOx?a2SYqC{*+fG-T{y%+c?`FB#?9f&ZS2ufy zeyXl*z5tJs2hZ6Wzb0T72+7v&#={JmW1mJS3M+zeP9Q_)4HGm#EaXFK09ife+%|eH zqiYZt5;UzzSw^=m2mR7FGIU&oUb6~159AS&v;2U4a6)&PuwAh}V+;y}28Qlc1Q@gg z&)S3qT!}$riD$hz!9U2{BDR^2`JPK;1N>`ZcNlDSRVUca0YnxRe+}?C&d*Wg)7@+a ziOg*_<7FUUA)Bcfmy_at1gJ!om;D~Y;Y2k`!gDa=rCzj<#{uBJTvo=jC*x&TzLYja z@rRw#2^5%|YF38ZddADFeCg0cDQUC>w->;DB583|s-g}zh0{ca+e^uOBT$J+b^?c2 zP+yvO?8rGS!xJ^ca~x`WZeZv1OWfVzmADTK(sA>n_2m`*@W8{ZKiVF zsmn^;J8G}k|Bp!sGE&$czV5=8>&6d5v zm%Wb{9X4wLc*_BJu0UQH-G-WWFkjBoZM~6j&)D*iQUP3I+u$k}{VX+dF+_QRYs8xM zMj&%ESLp-*;rZ{P`iv!Ek#w=W0KE_wqH?r&V6*AOx>}|84M%v4lv<@-ENHD(ncwbM zUVxUP<&mVnzL=PS-2rSC-FaWYJb{3ggvjz!({u@{X-4K1~3m z5?bh>&)sfZj z!(iwFm+)-;Xx}ZLyIcqdlO7n*7La;2piQjZlDox|9W*j35uI_h-dZc_gQ`+t?E@`} z>inH2+S~ctyUp8cHrE^NTee*t$R>+n=#`9_2gV6R;{+vJUi16!8xrtE4UFTk006B7 z|4}G^|4$f;%YP}9KL<7rPF6BE$@4cTtdZg0PyD-%M$gvP(v|2_ z87ofYU`xdGN5$Q!e5tr50`Cb1swYa_Bd72cqDa1n1$f9RtsrnZNGCGPmj!+*z~kuC zwj1&$^N`5stG6hnAP@#H@Cz9PB=gjW<3^WI?vAWiI@~AgW0R;N(q&dLUqq z+TP(cqnFz36Gh&-%4 zMsx78KR27WdY1A;BFm+%2I%^Qxr-55_P;Jo(k=PRj)2YJ1NT|JgJ<5aB8BlNmSjXt zM1<-^m_jB?r#y*;yC?3f5(Cky% z&fy#TLqcWo*uwYgC4#LFc{d?#>ht!F((r|DOVIC5ni06k``XmtiF-j~u_5IzLB2MO zPcnpxJ5^|O0f(Z$`e%eTvMa?1G@MM;6yk|-)*ELqt2!ln?~VADYDW9C#JSV+%FE41 zeiUmqhDLvyE_~fi;Ma}uxmw<2RDF%)+7a*4AyEFXEIhdNh9*I7K;m1acqOAD zN}{vVE4fY#=-`b;nr+bF^2}ZoPHQX6c)|8lH}=8bQN%c9NFZZItl~>}{o)fW)xhCc zEv`WeT)J}~vLzs83A{O#Ji&&>2gq-IF?ulo2l3Con8$zg#r`>#rTjbG|6(tRTRZDn zntevCos*G+qp*vCk?n63-ajHVQqe+oRTr5jcvinPBC(uoH%y938&jQywnXxrPTp5~ z*%3bZ00gRBfYWT|yzhNZsww5H&#T*OzOH$=&b^N5^4L{&ihas~=GV>roC$zh5GVL% z8v9Xm15Rf-zSKD;T0wcXQsb?!@yY-z^?74u`e57wftKI)v00KZ$2_AF2Qiw^1!6wo z(pi_aBP4h%`>~C2vw7BZjIKCq!)VUZ{EMvkoNv3k$f}7D3)AcDS@y^Wz+-m`9=h#q zw?CTi+BD3@ra;K#F9;GG0;#te6|kVn$A)Z$jT?VFKyC0|D~WNhLQU|MS?_ByHs$mN zgy4c|PLPN)-JjS>=>93m3CmL!~8YVFI_hFQ0?w^j<&S}Jkadk9Ys2Tz;b>tPitO8Y6M z+}hS$rfzhxqJ|)`Tg1nrN8vY4#F0|6T3f?KDAD1`Z#b=!40CZlC-r`%Q_j(-g6X8M zYpHgGtrRuFwP_;N(d4&a`@&u7hZ1?iuBk4r47hCCUJB#X6~GEqwE16&g;0`L8vP3g z+bWAtql^=Lx=-93GwW#;PzB9U?8jcuJx!3iADT$ zFVW<{3Did~lFpEy$%SN%#AA80yG>x(0wRY$4)F7#la?J;r+C4lQ!?=>jLUB(e<_*+ zW_kSLaak(RoF!px8X+gXWU~JP|LyL})xl3*j$OzXrdS})9p`3$%wm5WdcJ*JvI&rb82A#X?9PHy!?>aAljW8} zHm6(pJZ8B) z9$?5ebA1F}rOWy}8`YY^tqP}dZK-uNaQkN&14&oICUZ6Auf-Q5sZT97%EMS7ZqPSy z03>V~ynuo-IMH~aIH-|6dA}q2ZkyMl>6j9otI*5c*4~9psw8kdb|AZK`+1b+rU~)e z0*j(s!Nqmz^ishl$+#BK*u!+9>=8a>(S&)%Ye{k`Qy#*-vu5$9Cdx&R^lmhxgYKg| z9HWQ9UXu)xUW5dJS@w#?#pWr}(mgL5gXzXOFzHI9JTE^aJZJqHLftrzORqzo6;V7w z{4STf=awt=9iIFp)&sZdcu3SL3ZJ<1)Poa})1#$L(J;aoYcV zYyqt z5hHR$grrcD`)uR;4V@pp5cZ8TfaQgN2~9j`7;&YD(2=50jUgAVXJidrfBvM><7`Bq2pwsFrMC zHfWolTrDj=v3-7_7S=&Vo+%kq(*x8Vnd@Eo6>@oU`b0C?q;d?-Kr2i7u-0~=kF-I) z7xoPK(-yQZHkgGl$1nGoSBM!)>gkI={(5_@s1Wf3!!Vq^K(_&OBpk53Gnb_Gsq4~ThJ=HH!G;{VYZQ-1N zS%xidKOO^J`Ej&@i%S`?Cmw4>Qk+@_|}9wWZQu(dQH_Al;Kr8Fy|qLI^F-Z!71ZJylGflQiG__ ze}WCAU%Y|NQYj+*nq-WDmT9G_lwrQ2H><`P&}*N5!q&=l zc{dDi&z$PqnUNzp{+gvBTrAI^6`H1npcOi+a7l9VXg?sDvA-`oveIjW3u?a_YNOEQ z3pcbXF21jWUmxHLx|bL7KDEaRWIXHlty?8L_?c+_AS?Eks{CVTDl=HPIlC{kGkvK) z$-gh3QcHd+#+z+ns}pvOI!5Kvkebs$NlA)B)<-}%dEDptORkFI3Xu`{Z~;Bml^xwj zu(-!+`5`bT!_j+6SQnp?){sSU%z7?i41ginscs-&-sWJaB`NaH6TD42j}6R%0Jg8auecl;`aokn|* zuhIf0mI-Ec9bK!O)T(w&4Y-_VGxm>hPc{KJ3%)%zkhWn6jG!chAlU3lz}xKL1m%Uc z4Vb0&*;=1lD;V^qzdej|T77rwoSTO}aior?c*N8Cg69TJfnbNSL}b}dVngwkqIbF5 zlBUpPKwieU^a|oZvCH@^pK?ACYnHVUz07(wVTk1-BE@V}o^1!gk+DlZ1`(9sdJ$#q zXTQ9eo+j!Qb>|b*tLKXSwF2a=qA%~gMRJE5)RV853@`*_Uo@G>Fjn6&>7a^DZ=q8- z8d4>3fuht0L^Un7?;{#GsJHMj^@n@7>PtojgrO1l`9{cFP#u-CQ7vt_|BRUuT5jcw z?7X&U*$b}unnhj6R-Ymaac*3YQ?3(Z30{a)t{x_7*!DRXD#mA}JVT`+=u5>>Jfj)V z)!_pA3nuy~|?y#)jXFV2bWwMx8tRtb<6{~K~1JI8zU^F1w{wh-2 zfW$eWpaJ49klR$m2MRo9q{FHRl?p^|aMdQ)gH|1{W7vKGX1*QnxT6kk4!A@e!H?pT zA?A4dl}<4-^pz+xuVk4$3%=wYj;x5C{&ivpI35D(Z*PkfgOoobRG$)!y{vWw-+H$` zW~<%Uy9QbBP)M=FJ%TSo@e`Svv`NozJU0BF8+m>_94>nQ&IPGMc<^ofq#9Iy*6#nh zS^k;GOZ|Pb{2zOipBk3GmslM{Z2p*ff67??d2Rf;;Zpe`FcG!c4ahA@H z9__E^S-0myhqM5LwY~5osa49zX3}WTneAX}QAKM8&atN%xt?7m7C}F$=o*OPIH7Zm z>V>@MHPtYez6Tgt6xDXhu${3BzW7|?*e=|Eod=MRZ_&eaugXmbtRJ8jbubOhWU1^> zp9^bZcwh5hGJl~oQV?-Rwu88*vy+3nK{1qH9BVv`oWi*)Zoutg4AZ z4Sm#@oV1WTZRGjDc3(u~Of{rqwBj3Z3u_`VDv^P#STp_x= zrAW)+-5YMf6GqNNFn9CQ%aMsWz>HEsIf-*6I!msKEyly>gP!qoO<+xyQbb$VJ(4`j zf<-zCy^-e%(FHbup51v`dBJQI3r2#ry6wqP45k$_8(qWmH<{C>T8_TZ*W* zYp?6EAg}h7EoD6o!mlvOTx+Vqs?WNOhCXj>78pd#MW&t z36I~`vf82*o!qQ)^b)!$h*yn?6y_rt{+46 zG;6hfeXi9a85e*S;6cl`Nn|s{%ffYoBA9=d@%0(uWbKFU5au#HSHVdNt#k}gELW)L zmj{Cvi90Ktcb}ELA|FquU{GC3>NG<4;m6n`ndcYR>^cyxzC77vp}Oz)NRd41yuU}- z^y5_3(P!Rv{2%O)|2x9|&fWfChX@%N>p59E{;nVq`+cR;{$D|5U}I^i=lHp@{I8E# z{?wQJNv3c5CSZ^d?&AYVhO*`-)UjRYV~=CY2d3e=ZIw;~51t-piGKANVEK7}(WlP8 z3IuZOn}P33m!J8?fK>#I|nK`d_%?Ffq6## z&L-Gba@Kv zvFGlS8wwXT$;D|yYaJPAcRbgTEiVUj&NEyYm*l?bIy z*Fd6Jtd8Cirh-Fl%BZ_-FZPC6>S&)GBopLTi)1@wVk(IXiSQlS%0#1fLA6eM*G2+FC z)s+*)d_>o8CtWSZP8_Azu)aF*E>O)JPo0xSn3SB_N|oC8Jg}3T0G(A}V(-Jm#bkq6Ma1n}4s4+tSIU-BYRrWs!~bZrJSH&Bh)vpiNm$I2!?N~m%hm9J$!F>_ zQVWXR7Z8u1CL}-^uUO7aVmsJ`Ra$oaNkFSWKXmO!!Ao0sRfK)<>PJH8@FkH+N_v=&r<`%S3`-bwZtf=xoMT#ab~YeM-rm`3VA*JNo^U4q z{xjExtk8K(!X~6x1$wE(`_SaG-?0Fb81z!R$|<%b z!)9SQ_BQ{~AkNG5YaMUF5w6rs(#&CG{DP}~imRRR@%;7kmWl}?26iu;yIgE6c9Rse zJx9NvEM3$(UoZm(&T<&za&S04=o$U%G6EtL`CyCM>V`Ko9m1k`@{N*ft ziaVXAJY?Lp=nj5!Whi@k(opzgp>4EjG3Qqj`+N@@@VNw|R6zGKh2j9LqPrLd7$KgD z*g&|+T^I$mIa4B9BV(oYyWr8d&KHPp;7}j2WWGZXT+3&45ZaRCV(P&3+uhRELMP8INn)6x$XTlr@v{*xD-tzb~P4 znEPp_#L9Y9siL+X7=79;{thEg>Q)E%>me@1kVPc$rx34ZK+QU< zRA(j533nz;jyXC`e45*kt^p5cl>y=~_-5=+F?gWVNJRj)kY0jUMOQwH!4#BbYk$`$ zSDO3Hx~~kBb?AKP*yArYaxuDqx-|s4CM~0E8G=!yROUvph9{IQyuI_;O1U~E!_+8x z+0i?uk*6`{UNvCwaoJy=e5tQkG1s~^7X%iiE07%g`Oq)uE_G`L;E#KHe@73@eL=7u z{0xkg|KPv<)1IN|@4)zVE2p8!%2^i>rc!g%DNueccn7HnK1pH?Q=4xul-_2 zMRDRBjw)f(WS*=LuHzWSDt&BmvFZjXWee@l&qb?+KeA*3qFmNdI6@OVTHjNAxs6=D zcfW#hf!EviY(czbMjqygVlyDO;Sh_6<$7jI62F$-X<>h;Vt@SD3#gS4l1#DX!)Qv_ zlo6&t^ezlZg@hzak`qYPON$n{#tM4Y=!X3&w zc|_7fjDC!q)<%c2_RYcoujc)Eqs^X&k)fYrC8}Uq*q1d;>oUdiMmJHv$&)x;lH(U| zG+I0=r#6wcQPD_hVbmAaxUU55NFmLH+>@`Z&jcq;nV5^ea)DAS*TZ#Vl+oupy9k{Z zu=6WmeyEy2T=7a-f`E=Iqd^{ep(&5YNnDCjv1q~3!~@VcTPz*)xPyG0@0_7RT12g1 zIq2fLAxLlUal4;f&uJQaO~joGDmTnvXC98_`6y(=32+T|8bH|=heyyTQ(?p(Isu1kzsLJJ>KxNvQdvSWpjiT;G|La7>2FeGvnkNMB%s{i$BlTbcbnL zwT6UN!Rb#o?uaifd=TBP$=HIMcKb)|`w!fz)6DDL(yCHXZc1v%|5{@CcY@TPgN{EFw|_&1{z2!E{><#m=jf&ni& z@Zs$J{_=*xhnf#9(er>KD3zL|d*n8%%JP)watyWP-fJ|17{$4d`N;s7$PY4-n8PwK z6@#H+6e2xC#K^P@G_bcw%_*YJmgUvgWrU7q1=Qgnr|g|MWnW~Xv1;9ww4zQJe@QZ- zVRIh&Oy>K{SJ2axneb6Xu^|QHjbF2D(O+(cK!@p>ZtPdwCZ;pXdR$lL;rfSR6xu3X z6~oxQ>&Y^RwHpE;QHuRite|o387xFN1utva`)!l-hyu)%Q&fJSE#P8<_tJSN?LIqc z@3ajZlxbgKD^@9xQ%vr3_z`hnyePWE-wrtG$<=;@qjSJTBJ>HP6wj2&QMQHR<6}4u zC15eGq z^^T^WoID7d;0U@3w-Y6Fn`0W2SPSSE`JNo6@D=mG-sMak&czyF)BK%hK;-ml+`Equ z$xN+(RkLFODgi6_NUh!3(Dy4GO9wsqDdLwi z;(nLjSlzD@Ma6o?26|1Cl7 z0pnEKFrzd65!aOg;+1BG7V zpyN%xhiIk8+boGf_fan47naS~Yt#^>Ml>jhJ!quP)<#vG8Y$~n=;UF%p-QvoQ`c;) zRqYLMkbai}ey@U+NXE zr+1>g9R3Q<&PyP|9V9RIM2Wl}QtsKqTm2>sELZG;TYV#wp|0Cw-%F9QerknwkE9eFb3nN`%VYE$iDTh_9V{_9OD^f@tD2CVI|c_7yssitNzkNx72%%YX8G$lhy0%7*7Yy6$IQ9ZuuQ{ zD12>%fXgPd$}nS4yQ6ij=Y`cQf`$aiE7(Po+zZB|Hy5ZhJD*}rAg!28GeDPRKFa5R zaccS?3(YeGbn|w)#kf+8KK9KstaO|)0S^a=#|?l7Gj2rS1-{KR;59L3nX*KWlc+J; zx*}v$sNwy0o*x$;A`#qYro0XD-@2mzwD0+yE%(QnQt1YvMyT$!MCdff>N2kpVM7A4mqlEGD4(osqG#z3cL|>(bq%b1#2yIqICP7O6V( zN#d+uMxWGqw_he>IW*HS)AGifI5bx{wD9rP<8#5M=;YDsWA9fr;(e&>ue&MyfHyYZ zSUYA_8lriyV*1G*d z_Q8C!_7z0YC_1Tf&rAZW!O-Y-;bU{BP>j|Q8fY|vBoieN*Ic_XGZCi!=sTkkGkubE z#i&_ivM5`janjE!l7Wsa%&s4CJZStDqJis-2irm**z)l@(#3TWrQhV7q;IvC+dQs> zq!UO&1z1A_07Vp4Del55#)Nd+qPk&vwFR@*qe8a&VK^yu9DG$xOViqy zAUq3FbS*-zGoIJ{vO%PZIV@bo1fKyOKS|nvF38o z{fK9zP$XN(enkQQyS;#%IWD2ycA}bw3(DSN24`qd$hWn6Bhdg8nQj0u&13B7q`mfwgUnGhn;)BZQre-lLg$jVF8kjURq)=2p^UgF$a703v|PschQvw+D{= z+zF86wj0>}d>0K$QYJF8N?SMND^GId$UP9rc-?wZ+NkwqKO1XS_}lw~xFcXcRK$uM zYl_YFPab}md)6dEn#3Y0k0i-9JG2@oa}yt~RwFIE*j42K<uR_vjPaqh~607X3AOgq!E_|@yUgJ*2>Q`)3}a}^v!o75&t0vp%OIBH8W^c^)- zuL|RVsK9A+2oSVqIo6q*kC^~*D*1n)PRq_g`IRLshWTJ{my3Bs$!nfp`^Vc~lcfbR zxs+u9?4LvKmFM>azyHV%AqATlj91F_Mm1b;d#>A!4UNwC z&YoDD?au$s?uv5rj53VICy$S-y`km<(9&ceo|QU-mP<>5t>YJa@ytA0FouJgK$euy ze~oEog6<0DxYqIi5%x}Df_2H(Zl!J8sI+a{wr$(CZKKk*D{b3WrJea_>+8LHKc~<2 zx}9srjEFJbVZ;34aE~GsTGP1`c9+;Y;vB4FX)Jd1L(Yr&6;t4*DH+m<`F+h^AJF>9 zVQ6kZE8j?DTMpqSnl7EcMvWpjYk@@8xzy&?i}4k;{za4iEPZW8I;RO{Bn2jJ3|Z+A zzL2$IVby6Wl}({u9fe~Tl8ewMX)B&W2X^Mh*yZ}O~1_Hwd`k?+Kq^QoFpX5$5h?O35&Aab^-n_cy({yJ}) z1o{impx{}Goqd#h-aQcVqzgM^_Bwx?%<~IHB-h(KR0jMEb-f)xZHikJZn@z!;R;I6 zkSaY|iq62O5(TV~)48?ClA?>jhI#Rj{6y!04-vfpsy!^2|A0H@ccv*jGgCi%~?f<}}Ds?u$MDvnIKY zZ|YE{Q471hIm;Tc)7Sqqq4xbrf?j>ZSaJ3O(l5^^q-K)o%OaOF_@HJ+7isG&d{#4; zU~+Nyi(fj2EV`!P^<0ftw(*_#6|3Oz%ine9Jh!3wTG43xD^LNvAphP6R3>tXo!a6z zx*}iX>9W1XJ);9A6N8}e-SUlk61jOVwtW3*lUK*#ak49_0rzTja)?mB!yu0|(3&19f^(I{Zbw20Ys9Lm9S%B9` zmNRdvOnWewIPUCPvPzyni>hePM2;h!)&_+slctPrZ~8;Z%swBoE7B}(^mI5B=EU(O zv*8D`npcSHtXoo4h+?S1WJ^1(RbHBT-#FOZsV&fU_b?l8xy(jgMOaHa>%G>oPDdR^ z++H042P`7>W7fg*bVnI5>ngb}mi=AGJMaxH5~fP?u>Ks(;tb;XqMFB|+j$E3S}=|K zMz0CK;JQw7Lk&7<@NcS{4h$QZQMiG5hwIcbvrc`3POI;ep8X(K481H=P9whlZ@puZ z8Ue+SzU1K5i|Y8Mmm$DO1(j*^nh_>vK_g-FK?IKa1}~_RD_kRJsDgf$*%Z&_&-hC| z^@HMP>I@6h7l_l*sgqd?w}?o+$cD)1cC8hObkyw26r7&;Nl|TygGYU0Js+0q*32yJ z^cYD;D#@-Lly^I-mUe}VZW8*=@a$B4PjUhdr^d#8S*xZtZOCnkTEhbA6syT!ocUSHi?3!Y2#t>C4wP`8 zqi#X(RD$-4Z=K@S`q-_Zdt?)tMA=*)7r*cgs+UJnjx~wLG^3iJi$l!Mq^<7Xq`7y= zI=C$ho@W3b`+3TX31uAsdMvYJXY^C{21~%Xe^9arov>&2?yVzcMqakTNOtrEo(gTKb{a=4<`m>%l4k3UPM5iBVNL*3>!KDvU_p%$%xZvKvRwQ1+6+ zN?6gG*gbiAk|ma-%<{IL%7&wg$5_|i>nVgq3bdk&Gth8x8W|<0K`B){y(UTiREga-*)n8%efvHucR<%cYa{pT_9-cV_ooYROkXrVfTf8~3T2 zT5)7b_-Rctiw(v5Xx9YnXpoYR0}2hz@!n6>e|&8hdlU*@rd+O{ez?1q?XkM@X>?~I zT-XOL#<~J~W&}DXpmvpCsECo=9482hp!!=lVfwt#wR=M&Y`8f#wV z97vxM9>`af5XyDc_a$0PP*HU&`l1vjx~c@E}>R$eGd_@weWfiYD7a zePS`Dafftbn< z^EW-FK{tfh^vUuIth(X8-C$S`1F;_qiQ5Tjg8bU4XCrtf89{o?Ns0qfg6be2G>6G0 zbqk18MA70Qv`@2dq*S77Gegg4c>H0*1!+&hJa34UnZhkgv#EN47vvRFCG;^Mfplm< zO>M8TAE}_sd&xJZQ~jJm+AKuX?`q5}t63bH481rnXz<(DxgmjhK?gZ z86oUBvG700kzJ@+ZdJVWjW15kJ6wI?&tCJTh!E(4_%@8nnJjDGO(wgUS7fU?GRxp1 z_9ScC8zJA0KW?;RHD@BMo!5&jFUk0w=+dzY|)3EE>J8uYow^0P8!N9*<`XU8MX$O zlf8Ktz;DMb^+LIv%fR1fnMbyew~o^x;GL!~yaGI*(%&fhAh8xdPBbiK&p6h2kCSpG z)CY{%!u%YVK^V7&#O2jaA*9xE!JC7L)MfC} z+#OV$DY4B0?3Z*379$l)e7h+IBd1Lq$MR$@0+-~S{SX3AbW|%)gm_7iO&?{1ZT}0_ z-JVQs5`&br1|hU{5L=3#o?anY1B4hHWxQ=F2DQx3bP{jjJn0G2o-l%%JuBJ;n4xjj zyAHqw#r8z$$nMKovhw77AKgky42VGOs`_Ma!7)h#q|FfaQIgqJj09aKBonYY;Q%*N z)BI|^RgvvGT>dXk4Q&Trjg8CLL1+@`Sb$mD34aS>9g-nnd+H+FUD7f~!O@GWm06q2 z4(}~?^U!CfLzo?~!camVv!hVWFr7ks-UWoV$Vu^t1=s=L2IC)|?ds$jW2h{@LN+r~ zbY}K(q+>JhfqfP_S<@sRq>Dv30`EE~uTcJEjyN!^ZVNstHhM*Ri=2^8@M|8e#&d^h zcK;g9jFo-HBWLjl?-3ne0Ot;6@45rX5MAq*GD^vX333U`OFg)E4R`6-9@;%S|h!Lg!Y082b83>Njs5|PldYJ%mm zN}5=akMhbf-oU<&x}eWtwS)4VD5ICV+DSiTeemqC-Z(gg8%pU8>rMfwDVOUm$yw4r zDh4$eDJWChHLs=uc%Mmk4YA&8zGN&zeF@B1&>QH@Pc$9dbZo6uSvtwZc>!)@@Ovf8 zwnN2L9J_~3Y0s@LdvGyCUtpAjEH$>zmx&~|cHgGfF>5PPSiW{t{f{OSX$(S7on3e7 z1@&*h>7O2!n0r_ZE^K$Q zs_1m-?%~Vm#=Y+PLS-sY%E@+sMk1WgSOGrG;5mpJe3rb^oFWY%%0Ohro_0y%dB8n|BvJK!-%?XfxX481&yN?QreL5!5#FprRtZ z%kj1+Pg)J|Q180_7UYt5valZPwXouNPqmopDTW3Y4dHh6bx^ugO*-^(6HsfMfNF6H3|(;aEd z`G&!W*Cj%ZX7>KtZB>%(Hi-UR<8V9>at2M^yFOq#!%BZeowMH-+?66>G^)3 z_EX}|ph?}MU|~xuUMeU<5%jT*K4N?weOSAPd6|Fx?gMhcadj>L$?IkU=Pt+Y&87WRq4`Zo#5;Yc&Cok8so;}gyz{AbD4Tfj&%wwuUmG}wAC(7&fz<~V=F@#;Ol{zn|>H3y- zY_1^&(8@Cxv0J9`2b+%iYh~wMxmw2xd~$9eYr$z|g>#iLO6r*Km%N2quLryq)Er0~6+Oy^XXP4qX2&6d z9^wjm;@_obsoFL@a_BHrC!t$_b@!flXqEu0LelWYate~HdolOIk6~n(u>Hi@Xr#65 zQA`r*i=PVnvt{8FgV=-N`t#{c;agqnRV z)dfhl1-c67cb6W0{jE(8+}T10wF|`F=YSU@sdwBETAymORYs2LEx7}R zs$8&kZrDVofy1NqB6Rl5fNZ4nXI+5uBk&oX=xxK`_R6c3F@3$_ZT5XMtw=*XSeWGud|57D{r5uUf?gtBdM-zp; zeW5Rahx#%NwS-jp9F9AE+>tbL*9Br8;DuCYK8dDiL{WjZMDp(zqQ>j-YcwBG@itl) zo^s)HVM5+kr|m9`cQfsJjuF}OJR5pqL8e`bytXF(ROx!&64~Q^*n7i_z$-)BUKV@5 z^tkbk9)w$r#IqsaQl<52jd%0ydfpY;3w+oU#fs?Br1fcxf0pZdo)y^}c-SMGMdn)O z->SjKIoLP#4$=}7nsGwWpU)v01lS|-_;W*{hf40W7_LHY@MrqR(=qcspvFS6X7wz0 zP}U;QucaTBFz756e$w}1y7`xpIbP5sIE|XZ&3*i{wU|x&7#`t|w6C|+y%Z;T6wROH zaF*2y^7Q}*WhI)J+_O(lqoS2z&|_zoz8>;#T=-CBWwBJcLOdV`4I(2A9tZ2uao0;k zx=aE-t++4oA|un)qCPT22gi#oNaoaQCFVYVqwj$dFvG2{1Beq}%u6fs1@H*StP&wE9+v|B-USGn^ct1|y z3h?Gq--z(K{NBm&w*~Pk$Gk}-+p}nAhqo&sec-xkt~>B*2z&rP4%nffxR_HiX=k_H zzp(EVk5N^K&Ci~?g?P0C-=SaCxyhWmgm|B}*bj~$oL}CU2WeRQe&XC=U`&@oF3IWl7HHJJth=gtbBJhFwZ~+qjbf>291^ci88tM6U`s#V|gf%$S z)2(*=3B&jG<3k?^?5iWAmsh~|7aDw#)n_Oio$nl9`Il1Q`_I<~(73N#fP-l)@mE{s z3EjM&g&{t-&)AF)-{8-73tzB%!uky632Cr9#E+UMn${g(px49`+j{|Lc!7tnC4rN~ z6UEmtL#5!jnt}>}^-89a2!jDydPjm$Iw(D$BTO~_mkN{v$s$mON#-yRO=lp;?Xvbr zFWQCP;?^cir-P1s(10~dCY+VoJo24t%-`W1SgQ+~0~><{K1O?yf@Ch1a%GOX}{q9ODzLVf)SVfW-)Zw5wUu2q`Am6z?D zvn#5fD$g?)m8+}g7a#bYvn#9b7M=6kX_S{k7oCgStCg3(34LYlHmb|nXYH!$Uw^lw zJw(*ktYLm9=zfRf{@XP0pLR5||GU{&{PyhsOYZWm1N#Ov|6^U2tZb_~Z-U&zYObL$ zXP`-;qRFI|M?DimsPJ=dju5;NA6f3cjEvemoDWd^Y<4oX5zW!`B!>18;4$CDWj^T? zpK~md>*?ig{QA@T>TY>E_Y2rh+%00K=G3|{pQ<7hvcGAKv+8DL=4lOPi2Phb(b{-2 z#EYEBZaBopicqVnGi7_Et+^QY2;BV_dP+r_Fz|G@jRv^>@w?oTMq7q)nZ`H|=MoIZ zfbpEFEo)|kyrM6mw<>0P$xj$dNp6MvW@VC39z|Pha^347=wc|%C8{AeB0NH&ugwO3 z-N!)D+w}^A3{Vp|kL*=u0AN!WZ9hN8NTNY47_Vd{nURbI2M0Vi@iolFBSBb;1NsYmRoG@&Lpb4+Ls7x2ln()Zhdc52nd zb63igKcH|qA$|sZGR`a)Q~m(c!J;Gen;u;T4sexORI<#e&vH1HzLvB0CD5$tA)WT3 zTP+ur@P;jUj|=4lN7|<9tqzP9TxIgMOKl{xz7!9-Q99)OqFIxGvYUouq?2@Ac$sWo zo3LItoC9+N?>>9&cR3`Ge^R9pAV zr`|l~)gj!11+4H!uB-yLgdUpw`ae6y>l8ckM_*mKyU)FOwY+(nN@2wlajBBi!O2KS zQZDY19U|GtZ^?FdK{`$rk0u$+x-^MZCYwJ=5RpSw!x)!iMEcAkx)Z}J-lQQE|{ zeC659>5E}TW3dzej6b_+)L3gk8Aq9n*nE4iy->Wda_UX7-M1aU!O(TQc;}z$Q{8ET z590aYI9+^&2mN}1i$>;q8_HFJ;5O9-rc>G#%*~g(esxlg8o&#a7KjL$ytX!->Yo?0 zIppZ$F2^Sxzv0DyG!W+EO4Lnk0OLVb4Muw9P3kEbnXZy|YfB;v7e#iiE>WH7b5@iq zfQV(3Xx4xGhZkp0Ty+@mtY!@ll_X0grm8<+!xmU?dwW^9@>__lTyZ>n2V5La#Vn|C zMqP`0C_OtAPQ-ru=yIIviRu7^e;c09m>VVHgc7ulT*E#EkQhQlY)u`%ml&!ieprpm zD433ckkvC-HZy*dOuL{sL7Cbo_-&Ibrs-IvanCyeFzs^@HjRQ;jqsKoPtX6Y4 zSdIg*9fr28`l1_kd%{#*RCH&EA@?;jaK$lO4|bM25-5d{KJD%Jq%D72yg=?y;L;VHliqUI zQ6=l4SEL_NEO-8VPw(B2V1!oUB58uYrn^U#JckY!p06$U|3jjB-VawaJFSY#P)i|r z#k}n`scoEbW&2CO-?EUENIOl^pqHOF;b&2t$q8*5IWo_H-DPb*gCO4amP!XSSw!&~ zWw#)P7VDO|riwvNtbm0hmt3elHxa25?Wzc?C*OMFfDWT zyW*|p3I`-kI`y=4i<+}=eItJx z>nSe^5_46kbTWdg8c^J}H-c+$x_GcnRLrI0c)@6)8p`3dIJUMa1$1o1kwlLfs#ha5 z=yd4R4Tl-og}s_n%_u03Ine!^#PIPn!*AtVVmR<$Rqb?tJw*PcMyLHRYV?1+NQP2l z9g&2QhkGQ=YhY&Q6!@_z@#Qnaf&}F2if2lyiliap1V}zNv{+7=Q#+HnBmQd6XX|v`ueZdTu|tY;SH|UHfu!3FodW3f0l?xN}8?C(f>)LhWmReUC? zFy60n&_tepr?^Vp zi7n;#j~L9tL_pI|l8|6lu&)C_J?jWZi%PmA>15q^{>q`US-C!sV;SMoRkXWqBA`pN z;U|`NoM9Rd7+)9Ww_fPk^`BY4i+c5n6}z3{K^e(g8K9NP>E=InOpA^^qesTIn(b)P zj~j#UM!9O#++=qw64Fwsyy9k(ZzoBeiuqsV^l1Dku`>H!#dbUZgS%S##11@@@_eM8 z$oZQ(<_ZU+f-3>s8Q(s&XJ{^_ z{2b5fjRwYKnCnj8$F>$a@I_n^}Yw(Ph9FInzIpla~n17j$l; zu1(-8EpL1&NWz;RQEI%nzns!Hm;bsVS=w9jC`}}-i5{3M=rmJy@DrZAbC+;`fSgvw z`V2VVidHJxlYbN7Ba@5Le&hkU7yAm7%*!|;*s`VYSY$?OTgx3`iqQRli=V`tiLbsZ zT(e~H{w(2*FQ49iAYUSf>8Ap9WihBx9E z15Fe%WJr4h+25>XOtVH8-*T>5j}Awo!X(oh~C$JO*e$bIC`7-X%ge^?;h_S?w-y*@SgValV^Zx zsB>zWH4SP4&@uf&W#kIAPE2e6?R!L1k-@sefF)ff^dBB( z&L`&gvTybZ9o+vcnE&apWcN&&faL9iol_BKn-CA zsyt#1UPB4*IMk2^%sd?a9y0`O9;0g*XLW76d}fHsB&>}s(?Ct_-iRtjnc=J{To*!f z0s^gv)=t3_CDcQ&%%W=OjoYnCdCJId*^{V4Wj!ZUQ-u#q$5)vzSJxjG!fT`&eafV> z7a{qMqx7ini~$H)4Kn;VQXA%g*fbeOIz%=_r$vrU(dHIXr=t(LQ-?N4(?1ZZh>2V% z*ni2Vqc7lS4U!MxDqh*GX}z>>Gv=+j1}7t2Y`8=n#t2C$Uv+AYUq^l<@|9&`G4tRc zHFLnSq&$ZnNdAfwJEb6nQEL@xP?IXb)do-%8?8$9hfqZ0Wq6x#$s6c7Q7AW7$s^Al`1jI=#X$Z ziU;4`cO3NSM$-(5ni*46G{$BHl=BG7vS{94 zz6oZFm2(%zj$H5e^U&bIxyh^boBY zn((2N_7>=N@^eov-Hn+B{b#%F^w;@z`VN_Nq*eL0Hk!Eq%Wd^#;IgVG? z?^F9XiQAvRwL#P&4xAGPx>h56bQ4yQYL8bnArzypgr5pdZoSF0G{ihO=%gthd=%+E z6Lcz>>n7yYA=2?A>Eh>{BnH;3Hh^?lWs}jjY$j|q+hM}0_gYZ2jB1)y zP3x1+srnT!8`cAT;#u|ffHfza<%18uwh?Zqw`PrmoRiy{SUKdx3UZG#+&rb9ADOb(1H7x>Cfu7rxiX_Y~~{n1MW#p#(+KndT$_kn1+KbS}bMmBL+uxZ1^sCX%=dTejFB3Anfoe=gAEffR(Sr)N#K3 zMmU#GO~u(7Wq_f|S2hhT7S5PF)?HTPl)!Z4(;oj*5WzY?c3f>XW#~Re3cY}6(!mXd z`5qG-;((=eNDO6)%)XObtw`~F46Z+D&nEm{X2qC|-D0lUd9s>;Ei4axk1Dw3&HS*~}lk0DBsz$ilXUZ+w9no9; zAdt2%ppBw3yaRbtF^kJqSHIX0Y4f+d_{=SVKE7X5U(uu6_QuBuJTK#!+r#R%`gl36 zK6p@DL|!HNXgQJfGTVwpM7qBWCW};9FnX2|(+1URpW)J7DKi}{hC+76+W&X?Z^3U;_il!>2I_g#_tg@V(2$a&Gn>c_7 z4Pc+-u;!d%UOs(6G$xX5HnQBT0uA7hlPSX;+ELf`D>d!i3}fZG(Zerh%OmHUeb40KcQauA9{4De;DzCo|-EbRes9&26U})w);8GCio5Ry&1y6MiAvtYwU6D@!UvX#}OV^P^1CL_@05H$E2!29) z@W}4%J@#UCEcMvZg1jmWJr_~mHcN8wuofMs&kfDU`7M#*j=25{C(!kQ$rbP;0H zkdA3z#)VdCvYApYByP}mF3*6dX{$duF$LN9IeC}I)d+X_I9fly^bK#g{Y#f4(sc!n%)uW4H*Nf#CkBPL(pzp4~=2GXiLNBAXGdbKEur%HdQ z)gYi7!D`u?O%pw0aggCXC{G}XjJOmm%Bl@W*1~XF-TXiUI(4#3<=cKRQ{Tnn>o*c zz7=%gR2jBoJZV*H7t_RS@v5l;UMIO<<*an%crDc>)A|KY+?u5fP6k<6FwqsZ93A?Z ze>>6{cr9;4O|Q+d{-mHIQ1m@@PR|y@TUUkA zG10tCwB|^0!CXtccMdKUTcyTV!KarsMRNXzl92ppQJRnyWt&Sg%V1&_qcM4cDw8m8=gZjG!huLja~l3u>2$~x2>DU?O455@6vxT1 zdi|$SiIkQ3M+$a>nWLXk9L%vXFW`<2VG)MESqocZ84;6uR3b%Ui(gQWXvVOY~UPl|4)%LaZQ+L@f`!hhhJlzu|Y zMvoK>8S*REbcfoT%o>_A*wd3r$$b1!83(JKh11Jdl?s_9>3ux)2QMKFvHk;&gdIwf zWf+C4cb-=TTWGvuNva@n6oqA3xe40>f}IZLg`Tcm6V*1Yb-?N8g%#K9L$(RPH0E;u z5dAbdkGWml zhK%`wQ6h-9D}925{JDnGh(f9gwy?+*D5hvo3cnn*y#7bBY*I{35OrLXtsC}bhcRaf zwg@Hi9e9~q8t16OpUBwFbTVAAZ0Dr+knmH&nf&YTIECV9WG|WTqTC7d-&#(hzY6m| zXSDx*Lpywvdx-xDPy8GA7D*nSI~BEAGewmj8G*cAKr;?R$*YmajWi9?B6%=&{ZIkb z<~qUq`BfRSm^k6@EvEfdb2u{6zRwCLr|Z;2I%5N`ulEP29>6*DX$0B}?6w_<>CCu} zHI1E>K4XzYj9oDnkCE8HEhqG-vTdrtE6`O(^UAKzj>qld5hjplKb?!&HO^Yf9}83x zdTC7$2ik;^YY2TB3o(q7fD%E`jQsKYg19$m??V=~cyh5%m3VqIbdPbcN33-RZb|o( z`gtA>#A}QYhnJ(|TI0!zpl(VO7m5-acBKSPg1Uw?c{5cI+u&b9)hmbHA<**VLz5Wj z0u0goT5?8Oqb&s<$#1kM5&6d;#e{k(&&H*StwL-Cy@xb{KV3umT^{@>Q11@E4CR>Y zFbXR!P=G)^lBo8Ts->Y})DgAUoNaa}KIO=rJJb%bfRJ~ay&L!;6ZM}*!M-W5*FaP(-NQ8 z_C5YXRO}zJZcgd}}FT;%+wq!Z3mHn`QGEEH94#{p^ zn{X6~A7QSL2fj02enaZg&H9O}m{h!^Tke3W!%X-V>4ij66a&^l0NDO!h~xUdJQZr;9_dHMDCY&0GgBJqarw$$-|wWa?#cG3FlANc?A zcmH=N=|6Z0f@bDcM*l>T{uAq7tQ0Bp_w`0`v2k)}SY81L8aax(S7a4!1*sW|wEuG$ z{eX*vXv(IE>yl89M4`aKAlx6pcj*1>&EH|v7PZnYq^{q=y;+{u6U*1R-9Ol(5d};3 zg1f10Bt_}4-5p%}o>+pu{N5xePTL}FCNp8IuIxgt3NFP|D=MM0+fJ23ghneBlyvdE zMzfW7!I10f-4gvb_0=|anqX|0y=pMm`diivGV82YEyXy=$N3;spG!uyr>lai)sJl! z5sKTLDoEC9pzgFFH$fA`isMB>@>#8v+7K7f`?x0QRAQbrnvdD~m%@Kd%H1%FszK>j zk-h^8qS-QKsX_;~o!&xyCTPO?YoZ)Vx@O0axeAAAqY4lbzazh*Ck&O5f`Z14c`Lwo)^gAI3J~U=enXV?Mc`Xr0E>#*UJNdA$M-vE;CiGV^AUpQNX=!-!XF<+) zRBrUu>FJ>pWvu7}fj*wr;~0-L3(>y4lQ_!}QMEOT?V=}(yQAcz|8}&V8=cIB2rgh! zk9yc)`;8Hz=`~tN#R|SqL{B&qmhUfq|ELG3n6b5Ry=ex?T#MI*Vc?yx>t5Y?oKNxY zOm#1Y7n}V)qwJul#<~P}=%~2~yGd?D6TAvdwRsR!G48iJyM^&#KVc4;Sb|2e%ZU6b z$UMNeN+eyGbwwU-4Qr%I(mmhRqP2blc=nq+{|pyq{XNIA*e*Q3M;K~AsQMvcC`C?u zcC9v6VkoH{*4r)(*aMb`VCfvS!DvW}3M(2?5)pQ+|;?NdLcRSO573lK=Gw{sZ19_dOo^cOfD+w=s7j{;L!zeV3x|WuYSEc(rn) z0E$vYs{!5#WcSa45y8Fy^je7AiQ1^z+9{Sdy0epSeLi<4#6lxpnkRCzGcw z&f}c6r;U%#{Qsa)Yi9DbX2qaX& zP#oeqaE$uJo@o{IHTSNo9^wrFWh94`#CBQ{ilH-7-zl8jap&XRvl2T)$g2tu1uEic z72(ziRquFyP%z~TqQNIyJ0MXEhc%1umq(svX>z{O%4D8a43-!xgdl;T>Gzn3Czj>N zy(@tq&H=UEK8oOuQyi!Zx}0nPdNUZ+i;>Mg5%twhLO9we&bBkh1pU2d0Ti^1@`2c- zRo+Zy#>#xO|2I37DSeqo#p?9k%4Qnu06vpk9%%ICV^67Yi~)5Z;uvZKx&hmbY3lD3 zJf#%NBE7A<+Dd;4U+;}Pp7WoNQ z%=j_PGPfkrX89)_%;mD>UA}mF1UDZ;y@|w2r)bq>!$qDztcxG>s3QC`FTXDbgnn-^ z?n?ao)wvRu)hc0&im0LpkAAi;N2A;{{ zd)n&sx0jutJ_IQK&5poHIF>T~88!L~S zfHOKTQ$0Ltk^RwHH__R;#%%5jltQ&j9p%E)w;<8H#$XlEM^|&nw|U{m(0_@sH;u9$ zpofa?ZNP4LPXHT(8Fz}(JGpU~rMJ%Br1KQAXC?An9-M#ooo9F|+7xt(fVYWi-+e$x zc-YQH;c_u=u#tP-$LNV#&dXl+dkLdP(5-<{B8DvI#xX-*q)j+&bnHCT0MoU+X>k6s zKX}RN#)nKG#H$>2sQGvllBXnZ9-K4jmZl>jLX;qk#H!ao7!eAn(BP?ia1YUdOhC)- zR#T0RBOi1rWqg@$%4Z-#q7I%HaOpsiG^vhFXp8d1CBdN1cdo2!o#9epmM3&z{2=!c z2{uri{bNK_OA&#YFwlQQeGC&>B-J3?qJ42qO&K`DBXP!G-OQmIA;<9`RLO-^f4CD# z6IX1_Tq*Ar9NNgOmQ8#2$;12*d;an3y+rUSC>c{i0#ooK@<2vXx5xk}vfd>O>B!2X z$5mwxPS&x>E%{)|ReUV&h|sApkwNV2)qr~VD>y=wBD@?CB|5%3e<|X*cMWKdnAYKnq|Mog5mV=u|1ft{{>Qtid z6a*jegMbg7osf?`1s}S%2|8XL_$itTxPVCL<-t!fDU+;q&@1c<@NehzN1AMl()aC8 z`F&RZzg2ku9uEHF_E*riF*PPOw99Rj0TPNc*T?K1wxRINO;&k_?QkqVd+a@uTc6rUNKxr{Rqt0M`y9Xxe;G1nl z)DQO*L5&x!<+NRu*_H}x)f+ltbP}^YrVlFRer}`HkY_MNpungf+>YQ+$1Me-^_!kp z0u9KOwF{XgdI~(s{9tfihG?}bv#7=Ch$u>Z1iAvjgrrx%bAN2bQDCo>i|o~gqGJFrDAIaW>~^x%SztSVc+pS<0CanC4A5bdMkgn-lLKI2y5zUQk&ei?oB z0~*Y>*6!lt$H&SW>_9jMnEMomTWq7lN`N-LM=vobU9jg&oxpxF7bv-m8U&nqHW?nk z3|lCo$aGk&k>;y^Jee+?20Cv5E|AX<)oSp|N`5vQsyTcAT&dz(B=wpk*_vc)2L9wM znc>Ajrfu8_YnmHZwaOHG5bEUD*vlx_|Hui%U@UPUoFGm)>yVy z#{bfe5=)ue7)yVL6nrb56pZ!%;&mn~oqsn2=-i5VP~(Kf$iNif8cmyoP-SjEb@C(x z6lehgcT?5Z0MJr4+dCdm@ab>VnPv@$Zvec=_gUwK^)#8@SJGG0uU)2IOng2*Z;*PS zQ_vTbTaJ=n5{w-~{0q*Xt0#5cZHiTnwvu#NtP=^Y>>K*IL-OaEU;3D+meqb)+c?Ck zsvL;>=NDRa@C=!aA1$G;MShQ(JtR{GbQfG$Tc6f8;D4I9PE6v=mxW+NiT$2%ym~uA zwGEm~vYvlJaiX6vl03elI#^VhY(Q0O<{z!xfTFsR?CD#gN6|7uw?Q~N zaZ78=L}SN`cd1H*ESPI8G5{`w{umY^3zOQfk06M$JQ0;3Sx^O+_%n4YKK(U1H;7BW zWsQTzr@dTHb4DJaUW^=A<*qN5qIEXSJl`N>8w|5Vy#S8HP!to?B5gXJH7cz(LKFh)I#8Sck@!*ifNL>^^#~|Rx6hX+a z8>k3l6FWBwBAfIy6`p|+x36Cvkko>`vfFPNH=iJxeYXKz3@xC8 zGY*@PJLLM2crc{bkelIwdd>%qlC!jMmCx|!5yNH(Bb=7Pl7QHI-7`{SLi~KZ9o$*f znN=(o$Z87lmBI&}V!R6^Vir3MXkdKxh7r%0ltO!x`I;~Rc>31aEfaL>9UmnyzioTEV&qg7WgG}4{C-8jun|D3Dt`Ci^FWx z$lt9DrnkN4qXutnX3eVY$1KMzV5`*CcFYlU=$X9?zSW^gE9zz(=33&01ege|&rbD& zxcO(=MyzSNqx!7=ds@+adOl`_x3X9-S*Ehvz}m51g$e}|H^z8LXLp%`Vxvurn?}?n zWn1mXLdlJw4im2}u9}-A^AlfBa-woD+B?qSOS@czTlE3SX*2W{18_A^uiR7cbF?M@ z4o;)gk?)z=2se0`FKw<;gGS*^iuBb7Y37)HGRxZqd*+8qwn<>lWLKaP_}iy&4=K<7 zwz_Dj2~e*VxFxid5_0~EazZuB95aT(d-Erhp>HO#dLuQ5(z z5uV(8Z_H0qH;W)LNm0-`0*if!{*_iKLikQero&L!N`Lh?@l{7)1_Xaqxfb4%!po`5 zC?64hbPJb`!mMG*8XtT^S0|2A-B9NGdU|cY#uR1E0|}gwEyL*{6UMm|WC+-AJ2Rv0x|AEl-!Cg0~{)C`l~?0l_&gl603*n zXZB=z?V7Efst2_y`jHvW<}4EomvpoVD1k)6o?WrN3e`ukhSNW&Y5?y=j`t@0aKjm< zZ6SkUmiaC!k$a)$V~PlgLPIUF4Z?dZpel`bg!Ei|ounhDSUJY=B2WAGQLyGRHZ`u*XsVm9L zm96bZz=YM<8kr}#=C6<4 zUe9h^ROXST>qy! zPEsx6ca5gbB&Kh+s#b-s$7lj;hlFv{rUw^*0o*?-D`<^hZf*6j-eaf==emNfm4v=g zIz6gq^UB52@Q53@R;E{wa4wN9-Zk&*q16}Zf2uxbpn8qIftnV>SOTproIvj*%rdnXN8m`RLF26M` zlSO+3?g8uu%>$LTTnTPfVciR2l@^wHV7Zx6?K-7e%99xcIV9&yer66B4P?CYT?nk9 z?wGD;?<2NeVQP4QJxJ2&uq*`XhDCGO>aVkwWvUVm9JuCr^(jR0l4Cn|OQ#lszBMJU zp5-^6YND(-r`$m5)`CtWZkyj(L6mm+M%uh`CUXlmUy|KOP1jX-br*W-V2KCw`_#^>+)lXV%Ps7^rZorLSC@YfPB<(RCxv~7jV79 z)_VqKr2R^B2Z1DK+A13WOt^-aD^oUB29Ch5EU?WJYLQl&v~XHK7rZ;Ke$HGl__{t^VNoY_huSN_e- zLSx=unq<&NI*Zr%t-T$rQn>!W zEW`7^4&iMoTrlQWmoV{vPJ5z%AHu&Fr~kDT@TV>J*VC`W_J8jT82^z&$7gBz^_#>0 z9LPrHQ|CW~U7akBBaU$Dc(Tg9K33lgK#+(-68MRUrIHJLQIJSSB{(h&izej1)3oeH zwcv$4ZB{Gi692|ZmdV=3J-q2kT?qu`#xb*>>PU6IYQNgr*7|(9E6xUJ*w+AMH)>Ok zayH{IQH`ey=Pt`dX>FXIDs3C9xfPXOhMHs_lU?74(*t)*bL};EkWhGlYPOtY0^KJ? zQ)TVHp>L!s68>3_mYIwUQyyf7q)B)#*DnZ7TY%!YjB`5kbFIGrmAFDt7K}Uu&oHR1 z=9FVYADp;sG>X1Lc6j(<94c}$oL!%PlR5tk_@YFZKCR7?vx7(vT0mf`k;|n_u28>*uT|fE`2!sX_nJO{VhA`LSMBFu7Y8K%_13`b%QBs&$zzcA5Uh$wv zu}CEON+_w}`HvL6{r(oBa}waYj-0l(YN}Iy&9bfibR3nL2o!V~0)AB7(iW~6ESpiX z75`3#CVgB1iJ!1@kNUZ1Y~%v!ufLBawT{_?0g1(t=31aH`1jwTz5}GN3^!`ZQ^ODZ zI+*~BW=}E(hlJfD_dS^>YDgOhU>PFj5||YaVJ|a|k@}U63Vt>jh2UIaq}r=DaCSo# zMR#5`RO=|wYrsYk#Wh`HR?8cm9>0NO3%%Mc$#^;+Medha5@8ug5OiQcq6{^hj5T5? zdc!0jQrs8^h9EmH*M#D+XN3M|k^DE0gCY^CyErl*WJK*GJAegrW!rIvQkpCEv$nFX z34B@)F|n>C#H!R;m#fiHEL>DBgs^e{F#w3P2)W!F`w(6-zGS)EZ{#IWWt4QD0;PZ~ z6ZvdA#lgV*l0|7M&nQazdXuG=3bS@QV>R@AO8SC}*dXzF;OjZUsJ6m_{Tw%BSW@PY zQBJd0C}32?g|ftj=ii!)13_rV@(OIY} zOP=}6c@U~%bNxrz4!soi@Fjug-CcM?oNoXGCIe=y%tj0E78Ux^VVfrJNVT-C7@74J z{A%6=2fhGY%%=psSA8Llx+a5C_T#G`#kV)D5wyU^VF|pvl{{t#j`Kxs%l(<~jMFuD z+wXZhl)xpdL(u*jnQi9l8E=~>I$5`%v=?DN^yY`Obi2g;aWtDa`kY!AOInOPnM1J~@5k;Vm((Yelsm!Y|LdH}x>irJ z)}duudY?KD{zjK_AGzaW3$1B22v;P;JT(O?YqMA2W> zD{PU@UOmCv(`L$~jz)G@K{3;;&i;PU*YvaKfm;t@4VPgJvcIlMvIiq`(LH`k6g=QG zi5S~DoZ%h>9UTO9)aYve=!t(UZ1x_9OvHXl%YZm1Meh~2C8GFcYt$*LCPHa3s`Dqb zmj^WD)8__s-Uy_EX2*pKp-gVn8L}Tr8Zb9gNOXse$}@_+H;jt(xUGDL4AyA~qf(ru_HV zuobS@g;D*E^q(=d_aE&sHCoIXpvPk=U4b;1^Brs)zc(47t3;V;wAyb6c^EaTM<+hXbZF2kDc+{9(#`ssWILwR|a@2Pf^6cBE(*wc0s_KHB-LuyF13Rf+g zAAJzIG6_TR^(;;l1!igoZv+-cN`)7+yL>2WaNPOJ^|Xsc@zE(dydx@s=lQF02=_+{ zeLNn_lHK^+gEcoSKmgfEBjXR!Gj3$cue-!(dy(PHCIkz_>FXG-PZCQbBeyA~bc8hE zAH=H48^q{)BEXh^7l;;RJwH+Vt;~twGPb`z%b^q5i;ue&WDv79Lz`dR(E=%#sg^D} zr)U}^v5gH*i!oS6O2Xi)r>@}I0hW4H)15L0*I9OB)|oZSd?b)(-}N#TA$H29<&#cU zNhNJmJ*lH_kitLV#D8$jBX}&?%03~hFSoncije2btPS5EfEu4L02dT2dO(?M5rr1x z=ooTh5UC>MRjy$Q_t`Z9URK`Qfeg^(%wUgIsJ=tDI+r5fmBC!k;eA38S>xSX4z@s=FVC%KNWyn9cKqRn^L3*a% zR2f(te(SmfV_%q=w?~c~<)lpwk-}l$L7e5afoZMR*>)>75 zz1-2`cN%f~@d$Slnr2OpKyGrnxcuTA31LyCi(*no`h}8EY%>L7mAz1l`chm?REc?c z4c1wLwO{-o3GEids8lfBwO+=inmtx$i=tjPzprpf1Bu_>GgPH}r=FsK%!8ZoOx$Nl z{dFCDMSxIE4=%4m>_so6wQpz+l0B=}%LSaT0|SUc6{pwvjcA7l32wtq7%sNjuC5r| z-uv73+$|~GQ>6^XDPk~0A{5BC*9ExPAASNf4~XC&TPAKV3}GGkehm^Cc;g}i`-Xw- zc>ERLVgR60&Md%njeY8Ykighjdj<&q+$WV^HX#E20$ff1PnP)K+xmaySNw+_qahA)}SqG8f4^y?4P3`Mqh557L?u#PmJ4Ysjl_ZhzbK%2`pH{hmkd!j8aRNKmgh%#I?EjAQiguBS@Y`?TodQtSy(RX(a0@?bsS`zoJ*)NW}EEHseK{9YcnPTF*G!g2ID4kmjvf zsCaOxb~11Vf)xqy%W!bQ%(ItOKGN>5=DiZCy{JxEL*0kZ{oeSF!lVoP0bFWq{^gqe zp}Q1+)jiyEG>M0y4L1LTjo(xm%iQpx@L{J(iA_P<|Ve}nvgqx*la zs4)I}X8vpGJYo1cB zxQ;%ydcwR7r|w!5?MV~2_PjN^no%#@AyI^}BYNue2&Z>O8k(sQu)|g~bW=)9VxKq? z#a0l#q)68Fi~DS86iixFsTOf>wp(!RRv$16$l8grF3LeD!USVvBfMy(TsRScM#M3M zv2&?4bh7W6gZg>f1 zQ53$ca;acx_8IM4&-wbpO1)&ubJ$2M_aT>cga}5DX*%rerA3yf=%sK$=*YC4_Q>o{^sG!WfXu(&}6-*gSSr3OE@zuqSRiLs`Q61eRn%U8;LK_5Wm|Q=CnuO{SW{D#zC|pqi zFe06W5m?x{u?z$Pl)fp{SF*5DPAVIjeWbw0XK#YH$B~w@Bv{_#p02b(5g;p+HVf93 zhomrG!H)JlLTMDg=@hPF@{|5y_U`Hfakh&|+Bx71`k%ab=y28LdVr{^A^>eC+q`@7D}72KUST-dIG0p~?n-LW;zkxFuKunRnUP=m!Pt*vQ|uyUovc^P`NQHCqdp7{-)^Z-h&Do`uT)pH z6<3e&D^cyqnM5j~dd$8|$*u%qZ&XUv!cC&*7EV!TY~CFa{`L1e%0KQSUls*4`4?(H z_J7uY+`sSRpFFXGwVthmsg2{m!s-9Kb&V>&90*o_@_r>9rxmMjfL%I3jP|bs|j+IjnE0uakKhxS$ zgpPmhL&bZ9UeskO7TQ|_rJ-L$I6n%MejY-NgkjSCs-rXT}&z$1V0=X=3fTQ85puD2X|Y_5_L zum^J`xyGCdrLjW)oPutKv0T4=FvW+h_So)scY?+2SWeTp>7=l-ipBDis8X|-Z^cSX z&jCFnoT|2kc__28!RoZbXNNDL736^t_R1>e7=VYG{tpHLL8_Z9?o$}NjZ0G!Pp(aBiqI;pyrvunz5fZZaQErhn zVou+Z)?JNddNP8v(a=LA+1&E}W-CP(-gLm<3)-yP2dI~lZxkNKW>TBJ1H%Bv z9k<}8NXE3MQP=CvSJ&f?Ck`+xH8tO$lV8~myjB(02OO?Sqo`K70LRoQTi=U-RuSW4 zRA4U^U!DdX=g1q3FNAKKlXMABw^gJB;)#al>g7d2#P znc#T|{<8_1MK?aLUC6BD*aj27LJl)?x`t0>GmyKiHrhTcOtxEc7TjObgoKPg0bxxJ z;!*AD3gR1m6^o0Y1S&|trR0qjd1RfvTrNk^TC0a!nQqVs3$+C@)flFIqoA{+gGM`M zfEH(&^4Vbi{0p&()JUAGnm{onbWuj$JP1}#UR3Gu+DsWrf)6MLiV>M~beXucZdq~eoVAbvx7zWfc9sp+H zfxkYbpOe-}X{D)uG*bcZT8Mt6c*8-do4i-yq7~(MwWi#Mp}_OcjE*!H#J5L?W0xl?k1UM&)_b9~xz;-)uCTg$+d;%{ z&OKpXGoa;TyAW>hUr5>%sCeL;fK(}bygAWE1Akrs@jE}qHu}_f>9fPIR}RkjeWW09 zkOAj=c<+{4W&9naz9UI?M$II`s6HNno<73y`Qe zT7r?}GL=QTk&NID%0NW2YK3K-%(#2}bB3J|QY1s`E_ZLG@{NBZYK32Y5x(6lF}DhE{7D=a6bT!^#SRJA$cBdq}Y(|`pg z=mcF9_;oqZcslFYYpslwiTMta*jdIQ5mr-_lRI-`VhchGx8q1&S4{Fu@*)S&mUFlS zTlypzg9byhMs~>_W4Q6MY^~Bz_ox_^4*2~%c`J;uEeOOO7&3p02r;9Ul3P=EjWN{Y zNK0})7LU_tpM^*qnVwFGC7(fHk!)gXjBbt!#L8@ArIHTNjDQko8B~s9qHSvimqv{C zKTUyLPIx3wK)k=*>x=H8q1#1c3NmKK9 zB~yiQ(F3a*pZdr^c3hg|X03IOat=E~rNHjdZ;6w>7HLa6uAvyr*{UBrbB%3d9t_^! zUOHkgV}B2c8@SgHI7qXQzjiU6_+UC_3R9FpdU(2)h(s;A^&`Y~fa+wZ1k{ zjxvLVhUoh~jHcGN=!e%l?Yc?h9@ix@0l)N@rG~;CXxcS{{y0+;91eGWM0ORK(d+INLE8IlqawtJ z7Q{{)%_xQFD&FQFl1Q!VT;uK?acb<^{O_5zMk|}nS!d~U!meA>RCE`Xbnpm5@!@wN z523!Jw{?$6Fc!BADO7_j})LA)#&z0GNU> zo*3k)Bw_l{K@sJH90_bp-e2os?S;3KI!SF^YUo*hZDb2@7nUAR6Y3mCn+&0la>e!! z%$cg#dW%yQLQ(D6L;p&&Z4%``!f^gvnCH5YePdG|zQVd;%-vQN=J)WllwP4Y?rRhz zVbNTT#45};pynBdEzmQDcoY(i>lld9mGxHDF$rZ;+3d>iL3|yaKD`6rzzvI)&cHRh z5IBBOtW1TCFo`Bs4drZ^TXm5!Iw0R3&oVbSO zx>-2n`C}`_CU!lGVuK&r~#5X0QE$T^&p?n$4pfsQ~Ty_DC zKT!HgZ!A#ZdOyOSqO9}`dh=Lm@IHW4`7R(;m5iaSfusw0Kbcbs+V*(^Y+*2ep~j@s zJ7IJKcCM`RttMY*cPn=J^f^DAl!zee4j%FPCqAQ^8hjvgO-%QUzhO!;@CL6>(Bcig zaT=z6`d}aN?tU4OB*+~XT|PpP*_IqX0*8s>cSQI@iOBMENcLfE4ATP@z=atkjQBkc4VI%1yu*0pueoJw*5c&0)&HMa%CW+PrK8*Poo3Z3uUKXwArakc;C z;(fTDOs`skH13zkeB#xHsV>D^$tKg8pWuFRVd;JHV6w)NZ}nki>HRe4RBQG+gxtMq z<2QPvveWZ>*z`01a2c2Ops1_@8`a(?{3kzZIA%&CckM~!xKKMS>iZL8uICNQB1ehPsqw5B@{sO()d(6&Eq2Y~)gFkPxe?UmHi z9OjPuuNgX+PSW9S2pv}-K$(VNocLNW)SRDD!+=XciKn1RY3LQ$a#Y`=?NmF7cFMFf zL!$s1dwW!FCgpXytAK=dES^5+G2YNU1Y}5ht4DdBNMb(!MR=~GVu71~;omaI{|O`f zbGbq6@3F<|PdOkhzpJAW|4(D%FAesezbF`e?KkLIf0D5*0Q6U_3vh)11;AYv5`qzDNB0ZWacuq)1e`CQ(Lh=3lfhxIcP81eM87nw4sY z{OSKWXmL|FBeHxe(z;<{tO=(9avPI*UQfII@%wPP+v^#+2f+z_3_H2?2mucqHax8`4lG;$K^^MTTr}^OY0K0VBhs=`bcVDD|0~xDD#vYNNB!5o z)r7tT>+NPx2b-DBy+XG^n>Bt)IL33x&i=vWFr!fT2I2JR)n? z{MV{KE85VrEgF|Dn@Xn5=CRJLThC$6R%jzF*u4h@b&Z!sc-BcfiZr7t=S3uEWzjg@ zCWfokO398Tgo*7`$Fux7iJr;{_ZBlS&CxS9uRX$PscnfEzFYVd5A0jkJaJA`F#rk05^s>y!`-fJ~e#c8Oic>h?G%}Q_<}iTq&%L>3TkvQu zrYWRj${?gHdLj6J&X{lo=OuF}oUqoKqE#?Xa3|g+dH)DWHf4Af59T6KE+0nxIe;38 z2s%Bq-og{ud_C1o$~bJf4WN|r|&NMmnk`@>}rplZ&_>?6xX~hGU^{MH&)+a6@cjPJE z8~{G`Xx0(GnRQxp4~PtJ#Yxar?AE|c2#;nQ+AD|W`l7UI@%S1`=i%i?4eBm2H&6kV zff9eMQs>8c3V+wl;7Z+a!j@Db8H9~wT6zLH+gp+HfUj22O#edSQQ8E4mr9n{HN4P# zdc|1u<(K>YnIX<$dv|bbT*M{P@CKe{3%mUN+c9{uci-X^eARn|Gvrec6uPlw%fhLJ zhj{MRxPTU(S;KxKVTbodf;AXM>nY1FO-zQ`6Ocl;{^6y7$CJC&t_WLWuqq=NBw?47 zLAt9hHl`Ueqgf|aC6Zakq~fRDSTEIKOX8AF&bHcOq2vejA7F(2GMJ>!7Z{=UUtq*P znJwnOZ}s2cpq|5*cPy>g*KUQ0p5?!?!2dudjD%bajBNjKtoaKVjJNvQwLlu4HZzl; z3<(5q;+YVa1TL2HOu=j+_F)^9m8Xy47Pa?ET(Z({QvKtwVL6 zb$7dDf8X?;W`7)>w%G>g2><~uoWq4pzaP1j4@lH+mf$R~h&=rguUrqIPCdrZiLuP*Z>al!D7i)`XDan!S zM0OGlw$y-vcw;4O=;(UN)ymUTUG=y+pN4Xa6hc~y)QXnR!k-QMP6PhxvO6d`-SSc&symEHl5}a-s0Quluk2bg z4f(f&+T9uQ)Y)VqAPYwFf@XC`Lng^8d46q^$Mx`PPnbVTqc6(2h%}yfvW)0#o$kB;l2ScXJcRDaZ<# z&N&{LLqgLgc0OzYD;Vw5KIq`zB)9$O#`kbcE`9464gsGORQowYT!`NMnWpXS#E*9% z>O1H*fkBN}gSl(r2mW*>o0SJ!2>*h?ga1pk^Ursf`2Xv)3mO^geID6T0=EYEW8LZ*I~{d$-5t1YKfh+{e14;gis75&%ryxS znn!@@yvv&qpj34sIo|7Iq;Dt)T1MYD=D*S3H=+W!sX2r#N4G5DOUu zQ4T}HsWcjRv>t_X)X97Rv#sWQN~1(Ld0;1tO#-VXb2F8@toe7tL zU3=UpSeBW}$rJyk$08!J+!C@QBbfx{okc51W-g=ifunlAQAcVI=1Htc5p`0(xtt4# zc7mnJT_26y!htNg8gkb(WuJ_f1*nEte`y=e1zc_e4TL}%urz~THr8!?!<9WP9grEl zqPYky{=QzJ_;Q(mQA@n}5bwq}Q??{xsMAdP@nQP-*U)-@Jjy{U zdwE->ej&#(-a`z3rxae0D~16U(x`cr$AT7tmMZ>XcPJZH{L%1Yb_WqMG!nb#BS?61 z>T*5cd~DHu&mhX5q_hn{^%M+`nOeKN-!A|?$wuH(^6E8g>zwc?4(b!>AW?{H)+xMonu;5B<_%5 zscuRnIctq|yPPEY77&G-_drY@I`r&}6gb=QK)7|*CEx#Ta_mafx}m^(NEfp))LqG5&~gD z5za70d##qO9}6<|f(vO?dUrh?F!iGQi1uMZMU+gD8B0B^QqW zta9$S`)}4?1T*4)r6vDUA4~jqmHYo87>NFZW{6aN{UY~}KB@rp`^tQ{2Ef6!=+y#r z2YD9E;Txc#qsRK^*7o8o_3&2*HzNyWQkQr>!#)5m8rX;>e@SMR@;VkVen?nNx26Ld zLr~Lgw7WlUTzMR4TxIN5cYD8L_qcjc!eur=N)NS|9YOiNs7+6_0$B@Bp8_^k6?10! zpdo^zD=Xt=Y0qB$8U<7%W^76%XN%Hv72B-8GbUlDYHqNi)*>M*kLfnrphT-WMi3Ow z8Lj8$R^h;8h0w>q`Bth2VS8BW12R2jda=1tz0^AcH7d9&Si)=oIgxz3yy(aX=xI z9RxmGuldKXEv97Z;T486jVyRmn4_!dJ72;?@h9-f!yRW-eqTRAk6o<5Qj>bi zDHBgQ4haTLAl^)s9KOS1NI)xjJ+We4SK}>F?dIjk^9mz3Jx!vldc;z6-{1E9E~ycnM)CKHo!!6-b#Uf&^72w<>9)s?!-fC}if{yb8OI8e*GeE-Y?Cs}?6q+p zC4R>qFRUO@uM@Xz%^8~WjO8zJxj{D^gStietN+lT2!G~eTnZyG{}p31 zK_xlt0U9Phv=}Y%5-ys!bzU{ry}%KN`r^UfX0NNLGZvC)8>HNSN!aZd9w(oBe?NPKXCM@A;RenJ%@#B=v^xmdCIF3Lm1} z9bha?fm3)?>%wKAONdYv@yZxL$SZK~43^6%ZhkeEn3dnxF#|64yvks{zKzyj%4$OK zsDi7Nf~A1>p7a`wxBx?`M5Nftvt-1a>F4B65ET$$ari5*RnMuFQ^`Rk+Xa$Bad-0fOzf)jz|8*Q_r$h@cfGk5&;Bg{MH*pcQ<&jZvwGa`0o+o(LG6VCm<25uJJlPs#>P!X1fQ zTJKjPU`l1ln#8m>hpXmDv36Mu<-IH586BHVeq3=IOm{*y(CD>_D=GUav5WnpuqU3s zB4>ipExVFg;0k`KTS>6 z)5~PRq11S+1*a=$@_1Xxfq8z+^6ug~EV?(RA@n^mi4c!hI6ljn-=Bs-e z)B6ldWx8d&m(?8Z;{8O(QpGw`!0YXX!_-}d!}Lb?$Hz6;w}KrAzw$Ng`?EEN&PsgX z@9Oc;Ye$$%j6W5wFw|`dbxf>+cFyVZXD{$16QWm_CDBvdG)h-gQS>OXo{-K*oH3D{ zDk?D7%K3_m9-P!B3F2f_?*hn+JHOIGmfgm*wBd!hdi(_dAvD3~P9Na=I-ihL($T38 ze_p7Tr?1NA8Ky$gFgy}OcvA@n*a6SZbkR!$iS_L@^#e<`^?)gW$2+Bo2d9dRk(X!J zB%vVg;yhb+({xbthT}htXV;9n;eF$SDAGGhdq|>!?*k_ksql7G0_U-=-3kVy3F)D_ zP^-N1Eh*H;I1){AEqH`K8vE41p`h{3%iPu+zEH=D!0?}nqz4&@Gg={jcCRSbP@(Gp zqwi|BSd99vxu^lk2*%By$-X%=S=_Y64O`)(f9h$Ncm{u{&^Y$v2pp_+bF%$S=87Me z)R8hCi`*p*dua*AGk)kDe{DX^ia)_(k22E`Z(&NRGWy9Mv0LJ5WF!~qe$>OLF>%`h zX?UyX<%OS0H-I%weJ;6=I$>ozeD7ApY)yi*Zmju^5o~^3+S<@nL-?RDg(^|C!NVVY&Hot5{3_?a?74-=W0@py{G=93XHnxpIie!mM?WmQUZ*6#R=O{;=JiC)a z^$|QN-1YG@!00P;oJtq64l~l7B6=l7FVWv%j%#*X&espbjU=US% zt2XF_YSaj8Hp95qp@!!5jT}ta&Odv&$BD?jf>yJkW4d)>)hE18e}MR;KqB+%Y&HPn zkwFr?cVVoZH13t_6kI<5(kQ9x{=*%XgaDWlsDFWTEo*DJ6hf*0B+YWAgza3&Y6O6}?ZI;9ZU~W|77$;0O~_O@?JGKa=OYwl;f;yIYDOj>On9`H z!D=n)+wLH5Z@LGp!LG!$H>^ST57Qfj1BsA88SeszsVF+Vco0*lKD2-nR39OJ<7)jL zE~;-U<-u^xt9>y(M6R^!^hx0Pw3Z{Cg4#)wY&S)5-TpS8+y&l1kJGGST80pGLLMJ_ zz=2Dq)oiMb2&kql94}BTrcU!W81J}`dhh=-5?z21lYeWn{@J2Y{oSHj z8#y}L*jvzkNwN&SP*j`$h?V%m8=LXZdh%1@;!p1-jU?*wA3|`zf@-zYBYscN^l|~f z^6(0#d()>3J#$7ytD6Kbie5>Ai0@{5{ZHUSoa;*dYb5Kd6WoszsScN3uQxZ0KCV>s zhT~=MZu*O`zyehj9cdxs1Y8KamIpUJjE=OfzO?U800JgmvU+q$i!i>g1n@6hB_VXT zi5uGP6Fp1lMdlvOLt5l}s1y~g>AAOpNToA&Ut+9g{LGjv5X4QEHeL0a7^4$=MM4z( ze%n&1PXd9Us9H zc3GEbp+8(|2>|w=`A>at;pbGUnx_kqzWC$$m%K=P%@KXLekj5VI}(~WZCcsb8v3Hc z2>lGb9aPtz!>rc^w;#HBTqqyaw$yE^NS-z`pLscoX6-RriM()M9 z--J6ROS7Wgbm7uBEM70G%8s?hT)CVz`GF&{O3B^}7L|DvX@+}46>5rehy5D}p+ECdd_31I^{EMpBvbcxd(?Z%CpzXJ5}Rd`0BH`~?2_ zh9vm=SpPYBC^?#0I{X_*q5tbJ%S(OvV8D5KgG%!2CV|yLA2yqa3HQ62%dEA@>yJS&AUwqwdM z8-=9`;jSm+L8?T79*MqRp7SlfW&&}aS<|^TfJESDJCokK&DpR&QKW0ar7?&2Xb4!L zCMG3Ll3r_zZ+gSx*1+ToBQ|-wD7OdxoPop0ObW}y5cVx=oLvGrPH<_6tGVb=zmqcq z7lU}HzNW0=Xl3P;xA}q?=8avl6Sg_^9`<0oAQ6-6+zjQET-UgoWy2{yTS02BfDR)s zbsG`#*koUxWS}y1*lqC{8!o-G_5ft-s}c8=vFq{>DOE?Y&#pbpbUY*PiB7jqllVyl zbL2YVm~f~DwHo|G-Ve0s=*quTn z_)=L{@VpAuFxl(MoKOOyz%WzFP>*yP0rZtGCoQ}an_GWj;QUZ8rW+CTD@)ezVVj1| z#>ZK&Y>!#bH+Od`-|AgPVkla3O~Q#!!GpGtyBfz?HNULh+OCT{(>XJ84Yr&L;sAOz z>Jh(^cR-xucT7BY#V#u#r<1GP)Li;{lF)~oe8ZFvg*)w))py5LTd|`4_&NLhSyG2VS-?+(_n7beg28aeIhLv z)Bu5~#i2smFB=a9$^c~m-(&u*qMLkW;D<6gs7$FNe%)m1$?y+^WBQSqPu>JyNGL6e zib^}$7=VWvR{FeO*h4~T!cKWD{f{nNyJczK%B%2BKYXW|z_p6rW<74TO90gnpNB8I zu5|*+BzZxp&IAWHY1=`NL&~fzNi^7S=y-}l_FK~WkPXei?`|?g>ZOWvgy?Qb7tBxC zEUF58w)VQH&(gwtElBJ|3+MvP5C%upah2YB_MK{wC^Yg-_FU0Z#A4}2@!>5cvUAO_ zhZh93BD>!(LYst!y1|&FJO2){NE;jflbZDz z-m{fZb2+g&M_hTc!qUjMrF+VSYBlf?V^^1%8H=<7C> zg@|%^Y&@{_4+`Ngu~pl%lf2=nFPBee%8r!#8o0Cchw2SGc(P*YnNArVqazsX!Ix%1 zwK!VYWR3SAe`uc4d@g+S)mV3)In1i7*MDvk_5yT46)TQ79H5nq3Fo9_499{hSvXGUQQ+FvH zX;>!#g63-v$s9X-(op&cS5ey4A6U?7OvchEExvLuJ!)dOrCU1BUpyGpb@uF+wB5b55& z5F}`Te`R;?-n;ud;~VGVKUZsvcimP!HEUMYoYAhTJ%mb`2{n=M1071C2 z@7l^brGn(&XEo@gs0ssOv8VCOBt~u|VU`_&s6T1Dl%YK0eoAqNRuy03E{id70lHFd z6$c1e55Hc&^sHO9>ef}y03HUeiErdJA-B+ChCS>Hw7L!$BKx6Ums)It$gva9-G*8Q zB$OnSHHS9Tncg|pkKRT*635uN@8%`;=~N@!7C6I|sn((wKCf~nRJKN89pPLIKX;-p zb~x@LSux*qTI@cf8D4vZs-ssxTvmX}6)Fsa3(Hrr8y|#Ta<|wr%#}80{m5O1lPO}X zBGsa93a#uq;mL+7wI_FPr6ypCV#Npz&;U%UV|3dN7!k`F5$Mf6GQ`_(9jyxXmZ3J( zTto(l;xLSGi9lD>Db2|@yoj(R1lXPI%A;VyZRcZWogEhXLC94!o+pzdSJ9C!M1e~- zoh*PamawJ&g0`8tdrP85WIcT$JW;PIctw3mZB5x8ZsX0&>2zcfT)*VKv)+HzxdZx) zq4JErfmT4bk!EvEm7Ig83G<+p&9w4+6|SVobVnBQW(gk|cRf9GI0l4~SLEQ?GHa9uAcV^@>MomKq#e&l9c>D^J-OA? z_gwWOPlNXyH=NZ4#t=~q)kjjlk8=?RVye!C#V|+zK zATsH*z$aB`pJ5Xh^~#B&;GlaLz6-J%y4M?*FxxU!=15GN>sd)b)-CsUx*?ly$?h z7pBV$0XccF7lh6RX+_9kw$Nz*?nOKkjKdBMaxJkl0#+~+& zy-gm3Uj5|~5oOq|_%0Msnd85pgm?wjngC*F+PfEY)#{8PRXlJA*DlMHD&&`&-Z+^R zSiR8N{9y+N|AU1q$%TRh6PG8)yeAErGcNsQv9V(k10`qVBj)cWBnw`^3I>JW`9v z-W7zu5A+yxHfZIriq#QvH?aIRdYtUKJ)Ky8_#n53BV3$MynVanPKAA*oSv{$n^#w( zGaGQfGHk~836=tHQ#YYJ@+Ic%xeI$l1C0BP)aV-`-difY$fT`Q^M3BNH<*~l=6t-! zud+M+)}!5+u$05zZ3fPf$t}$O0BL|G=vdF0)b{~_<@+j{B(jO(J_K!j&G0hK=fYak zv_|1CL7d+L3P#xm^25Ms8#5M5+Y-g^i0e{*Yvrv&3+~z7^UUDlbW4kvP5C>E&vt z@ApRb%wc~355U^m0SBwH>Gc(Jfa_q@%)G^JfwE`}Sd=f&wVATpSv*ph4_t@_YLqvI z40rFwp16lHaX)?!{q=+9?pSX>kbLz<+E#Id9J@74h&G=gWx_Bo?3#Y)e827^SX!?! zZBNLNa$0ezGx6KMuAf?8&x+81nHluAg`lZ)+%n&IFD5?xb!={=Dqoy_*ncgMfrnif-u%qWe$RQc>G9*^1KB~DCgn`dmCH4CkAnGT)GaT|jwI16pG)R8au-e= zu#+-hpxnfhR{*W*edJ(yt$swsF=-z8E_HE8&#E)xXf#!(D4hBtBn^R6y${JA(*eum3v)gCgVa9W z2k$n+ciU0={hX_8)ge;~#i~8_R@6eTYr|2RF;+l)divWY6~|uvn#L7A6mS0Gdi_p| zQre8$<|7nk1skS2p^fWEb2yTDHJav5l#TPi{V+AaYF+zIoFoVO7vcBOXfIIFcP1p^ zht*FC1g!yPjA+BLoxWL6J7@Zn)yr_vNO4BdNGD0}Pif~A3RB%QA3?z#kqDFs@C67w zFsUFz!ith2N69ElIx(+($CJYlmH0%e`;k9XkL*SyCzdL`eEj_qx1{pMN7xi)AXP^61b|eI|yOpE$W3b zdhrzq4vyf9+3=qmpkQ8oxy$ZF!#@Ew$z2JxKb7m)qNSzPK9|W-o2$=3kO8pv;1sva z)2^^Qm7eIjr(6;z%608+gw+~O@q~%>i)z;LX{o;k!orQ;jQ4x`DjJ&qf%*gScrS_Rhh}Z0{>)$O=)G56%Qcu#@{rFqB2LxLMpGehf8PJ z+*f8bNQP!`U_M7PM3*lkn!~8r@h+mmSSch*EEnA|HM>o_eqU&{7{X_E0d*Wrv3w#Z z_*IZjlgG-pEuY#Nu%&Wo{&iMPC}vj08Q48O&sCIDpH3srM*GvaY}0 zidAEjrXG|42XHo`#+G|2Rd=6F?AGv3$4_#D`^7{-X z%PXRsw@>f{9Ix|>WaRttQ*%FmjCGyy4=3msxyDL7hy`cjOE2r?!>42kG7~`#OXT>r zxX1B3N#N_fiTi%v*4GbUjSB z9dCG*^t^k&L+ARV2-lwhbffK75X`XX+xihRHY(U#_i10)7MR8~t+d8y-0AFjt9lu& z@X~&QDf7FW6}=Zkz*)}zXtls@CLCo!6@yiJ5JFy&tyXt^pt@nJQ8vfKwwO6`#cG$X z7Ijtv&^@ZHY#eSix6wdPi*SK0yLi9iX~?pK&@=`H@}cXT$O(w@3m2nGexw4lZbN}h zyi%&becB2YEL6SARS%3}jMD4Z?89<6M6hEnjP|(=D&r|$v4TWX)$S7k-A^-4iy^YC z-jADb4YB3OFd$Mrt>=cRW~y{n!Q?&9n+=OX^UBI|Tf%Uy9cIj7;|4`@;-O|vlQhI> zfOEn$A4|y4O$J_FCVY88Z9y}KliDm2`-THYvg(S;AvDL0VlZu#hsXgOW5jOMe73!A zFD8;NnMlU8i*k`|zROItYz)qTrZr4AIBS$nH@Ey1Wi8!X6=f}pM+5GBvPx`kt{~Qc z2}|#>(SJeO2Q;aeaLt~TK30_5-O&iV!1(m>!Q^^R@^h~+AluTJU+FymsI{B!e*Cj} zE5^XVp^w6kG}B^>wL$~hmrFwACI7~YCXvs5%I!QEgYz_8#aB)_^c7MX0a){nou_nk z6pbc%?L=~3fxb2gv2!e`{QbSq0PGW!J1BKOgEPqD?V}!pOYk2SslKpKc)@6qswE<6ER*B>+IEgz2TQ$Ojrk!+*Uq|jlJvj^KXWz)9HA-M#< zN1EGb@Dy|6rZwtgWpda|)b)P^bZ<;T2s_T%By8{c|7w-p0&0;zsHbAB*Z}le+^r%p zg_fIw`qgvcgLtYJBJhkd*3}Y24If4w*VyDAKa)xnw3?0Z&iERlvB8~T`KJiMSO7o< zqJ?%@RA?aGt-=Lib!M2Oxd1YU$lFIB>n$88@usG}yLn59R@Yue7Mo+^ zFoS=oqT8&Y#RYo6&d5n~$LxK95oe^Hp%t$b%>w+mx(3KhkJa1QL{?&3LD$nQ&rmzA zo)USv2*w0q74Ud0UP4iPPtb2oI5%NO)w5}Drjwy+)_37nq^yasvD6ia8on0(iY2R% z@E~9rEq+kA+ZtoXNXud`6dEJ!=#sEI4Pq)Vz&dauK6j3WK-D(Ro1QSHi`_vy8hG*^ zrVUfVIPgO!mP|vXk}l66WxS0}2v8&q$Rg6-a!!@3(DRk!O1j7CHTk4$%CF zyA;}Q^^Qi<+z3XbvZt9Put)nG+oSj=>uV*{WkoWV7uBN8U6+~&UFV0}vUw=EyX`R_ zNf#*hAqK9i4f!2~Do9`s#gLvi;M9r5Px(s>sG?0Q(%}d-K?dv~sq#vwl8saC!{;GN z01{&n+qQmP>jI&v_zSLJLS+#@^45*9R%O?^MjJ%qg@}ap3})5wsjEdZAOncD(yLW< z*fh@(p)2yl&=ZM0;LcA5(yT)lmsxJ8%F_ubrX=f63*ya7H%SjT;-q*7W|f-=;#Xs4 z>`);J+8!~xr@VWDB)`6F6LqYn`o)wznbyHE1%Zp$`j|ZYf#hn4fHILxxn_aT~1rrztw z@rdX6WjcM^&-WL&LLZKBd701*Mn((`7RFYDPbwk1%2iWIdmde9y`57k%VgzxHA&VE z@_+QZ{CK?`V+_BIaK)ZuRdlB6~eH)GqW z-l%Z!DAEK8CYjq;pNF_zgKj45qs_>#3BH_2Tdv!vomxxbMYu6&TD!Tz9i0Ul$uj^$ zB3bWOUo7V(NDD)RYc$h%gD+zou1_E>+Vo!EDyFMzgDefaCxXLosrDSx(W37fIu#(M z1-{5HiqF}^$3S364181b$@sMX7>fGI#DOiGrd4cOaQhm&^4;7~YK>a>dv1Rmw!E?( zHO$75iXe(*p_-wO%JE^MhK=*T1~k}GJh__ca2xCn(vgV+_~n{TD^#au^RCHPqg&F* zcdDrd7LLOZFoSX>6O1{2ezbF@JIHXVOCrz@Sd;BZ{S|E+4<9A#IFXnAMK3(sTSYu2 zDzJZ6-AX31|L`XePH?}Mrg%%RO!shpkLC#@!e&*zRDZt|ZzW1lOLF@|$#%?n$+rAxK{Jy@g5JW*_ z0QbHd>nsLrax*N?C+n@|@(tYu>t%a*>C${_SS%f zLVD6HeF3}@9Md3nKG3+e+u%g{sHF*?&c-r7fxfl*6ZI-n@+V|_*_zeP!HI}q7P$GF zvpq4s5=Q5vSoa3z_E~&k3QzBxPmdAmG$`0rCz0{9ol?=TEdkSvo3tijrCP;fEgI`Qaq$QoMTSo6ATq+q30lOJrpzruREgBA?ftlP% z@nKxr#Kva56r6GiAX%Vypi%KSrlDVt!%!uDz39U@k_b!TSGc$9HijSSq_I(F2VdXy=ffT&AsOCrX2Om6-TZ=fzpSUVgF@6)Q3=*iA`h z&-1+GcdN)DzI-EwFK#a+SkK2ud9V`Ry{n6S(|&zq05{{X+~sXC1O3ESvGX@EB7g&z zzvK=~b;3JWAJwOgeIz*od7qIJzJwg6>o;)Lrw_7XI4z?yrKRhi*3fugOHUptHr9DS z$UQ%x4*7g0>e1r|WmXRG;Z`t(hz-lHkqZ8H`z5#Zi*Vw<03d6CQ_m4Fz0m$w%17g` z^!khH`Cq1)>OkW;BLhccWoJhd0~@FRagF2j$9K{{mr1Jr?W09G@r@c08H6ww!x}`R zPhEya5k{oeQUaFTmucZbAXU!n{5yU48}2*EAUG;N7BJtU+}v=3It>G5o4wqQ_@;pi zQonxve1-Ja_-r7NyeY75xEdd5@V)DtJ=h#t$2l33wV(y1`RWXN<|u*18PocdawNy> zWb}v~i#yUbTF8qx51*bR_#RKas)m-iRu$kFWXIlW(z2jYf;rmhWIT1$u6EzFBYQ=r z&PL@-K9F3&4wihq_j9r`;F7gnKaDZoP~t{S+Sf2`I)yk&jm%t-W!BkSi{F|k^wB9Sxw4qVorEPt+?t<`^9gXM4*bCYkl=UF4{}!9CI+w11t}Y zZ_QXWN>xS;-eiak2_YK3TT@*^esxb9CAfMJfthbj%g2t2gZkCJ&s0fRb;my}dxFB# z0CctZ6dXyf*wH8H#X4|L)B$4?LT@qyPf2L$&6=HqkaWcw@-ts*r}csXa{ONMqHvVQ zVnfIb20KdmH467ph=n$@=kvhXQ$|<`o@)Z_$VjW1Gq6N`0Uimn$%bW?^TwyMEW(X7!?FPI}vFT$4 zBnW~z9kY3BEP@3VJY53kcb9oh$X8UFyCr$G$8+YXn$}v(RiC`U_^|ttQ3k=Q%Q7E0 zJbn9AYk;%?c-I_L%%^(P)(Ex;99uIVF&cv;E-n}UEg$Xm=uCvr=y+Z%o?UKRkr3I9 zLCOmjrKJmY{}P)IV`Fw{D?B8!H}YReFNvjJnCi6`X^2G<=iXEX`*}Age1#xTjQM+o zZ?bCrts8}F#XLHRN(PZ$>BoZgoIp!7Jr=pl7r0`n1cT<$**f9Y z&81c~)Bs-;9<_iJ47?Y_1_~>Kct2GcDzfdMW27$J1{v;P zeYsq-iiQ}e9nWHYv{zbV+)z@q?wv-VO17Aj_Tt7`<9AK#ucovu4d2wLE{ej{3@4Tt zVOnvkhUcS4RS#fBhJ6a@K7VjI+pu{EuVJKkG!6iVZOOk$kHKgr(6-n{LP{#VF}eXaXgW z1-HOK=7?E*Klg(K#4UCL?G_Uh4HmIqPont89TJP_NWKJkk6v6{@O892e1Cg>{mJ7` zdxV|uvCyQ#RB;;ksoO9ex0<>VePI#OI{7>^&sS!NP1tD|+fvEL3a&}{2m+g9wST^~ zo19jQC0hM<0nR$Wz&6$H(y<7Mh-&1MNjh$E-6>8zT@*Z*)+(Z!h%V*H)j?(OGgf~b zaTw7!Qi|*{GRDB@-Vo>k)|t0CK*rJ`^vX%}csS+8+ef@quA0Gr6SaA&CAeNid#u7i zk!7&HqlLawwYxeJ%dqvpWNDJoyH1-_+W_mv=P;qhyWLKd5?w&@XI2T26RIZ076n~C zIXbpanjN3@YbKR$b5Wg%cC-)|N3&_2p4A;v5W*C;TUrslx47!6#Xqb?uVOYT9aN4q z+!Ap^gVGRD2{5Hs_b#WW#WFr+1mVzabYk zBb%?UIHxH#MWQp^D$n1Ujf(q01o*Tl#uOMTg5lRonPSDcmaRa(Y%{V~&T@`ZZYB#p zDav%DA*V8B88yzr_akTV>}JT~n*?HYMzg6k9kkhcVW_*>>-Y%A!p81l>dwb1>nEj7 zhY%gw@1(-kTa3YMMNaD&u4UoVVGIEf77N>&`C@gNPyX!$Y^0NONU;jhSlPtEYIeHw z`mtW95#KbqEAsd^x0b*4zx2nCinHQ9IyQ)d-`q{}1|wC4-%$)A&N+rn3y-EVoWSma zoeG=ZbV9?g41IV_6!j{Mzn()m8Nui7IoG0b7d%-ddN%hbkTXMI`U20OJq)|w9V^~| zG9Z`&3nIsSiweKZc=HTZz(1cp6IK%Y<0KcxwmvH3?p%4?X`=?odt>D7DOj%qJX=4a z&^BQ`&~j=PE1m3Iq&609ng2IqC#Np*3JUDk75~<+|FhJH{*`<{_X39hb}#rX?(w_M z7z3BeoK2KX%xr)M(@y`Le}AZla{WPACnv1i0+SB#J~rmS2+vl8BuA%DbVzJ5q?!z) z(xgHyB}f_f65Zgi$$dVqOVX$Odx0Yma!1u2a?stTjJhj=&TF`#$IY}mYunH74YW3d zK94P;VK5cafE0Qe;9yUh1Z6xg?Sl+0wgNwP{H{;gI0FH^U4nwOWIWhcdP#oj7xvbdj>%xwmL$mS|3=4Xi9A{1ZBgM*V9t%gM;eDuQ1zI)&({ zX396wTxu9yfvOcH;}DJo03{K0G1S@`x)WPwvm@;uyXka>Z9Z>s#@LSlYuV6l8nq${tm5XzEzEq9UnW(7wfNFx+6Av| zy@v#5Se32WD)Y3=q_9H}!j>9#$|n*)f9fv453sVOh9-1~`!EBGoR<5x{rhC zry^Av6%X~6^?BYJ0-@g9bXpk@1#&ISRw{jz1(~O#>!*Q9OE7yZUZLLF$+R0L% z5!PjQmpxbv(X#K#RURHe+&)*kW`4acTBB0$fD$I@0yckZ6?d%Fm=FLWJI8Ga;X>b5 zf<&(7MHlgQtMa0W$UAWfX12D1R%oAoMxB$2_PGs`_QSmzYKRWP@{mk;Tw#rTpKODC zt6O0cV=YeUX9aM1VbzkL5T*#5vw?P9;$ML@fVzG)zwqpC>wT2rhSo4>%gohekb&`; z%C`d1U|t8tnYb7H3T;)?JBX6&+?2a#W`$GuIqluMpA&PPve}(skW;;n8*CMMjs%2#3hul1U48=-{M%L^dzdU4*!n{go>cYG{^0rAFIvAQ3+R?$uq8c~F}56v-sz|tpi zGr1xj3g%{skk<=NNF_TO`ChQq&d8L;)SXQ^aYf_7yA6ZCfdj6CiN(FruM)|TMW>Va zb~m)N-D4+ejC$TisWeC0eOm)79DHz>86{oCWLxJZTw{$7lCwQRPYZeqB?(I;_t{~L z<%jr?(+n$f!Wj%Oob1h0$*hW{UwD2$x7{o)Q?Wp-v-CHt^Uq@-;{Wxz{kKU>)^Aox z0PQVB%N!aFNzqod&XP((MIb~CO%NF#GemF%(~Qb0JQzF??Um*YRL?<7-x+U&g7BB( z*{W=uR$0u#*k$&MTQ~1IAwR$0M|<>JxNDD=lfxmO2@JUCVy(0z1O}*@?HQ*Gmcegr zn2*W)ylNsBKeb zG)>iw_hY}Q+YA(BtS`7LxVcs9+ZCK6amGm(BJ%NLXi5d-(QGPXcr3Mc$ULDIABLsx zttI@xcuY7bxD}`lOIL~DsZMM!7F<>gFULBY>#bF&9(_m26^4anWx9Kp4tTX$7WM9u z2w#R9bH+yS#K4YXMi5_^2wP+8rZyd1_s?Y z%034=vLR$~3UlmJy1<(73#G<5ouv34W1k@|ezOojy}4YcCcv{w?sE|hDtk#CKeb->?LauXh* z%1kLS2uy0gqroEUo;NKUQ!aG7yfRH4{#e&+d+X1*p+ri5HoE5uoSzGSPaZGYr5Em~ zFC0x+pE8j>%aCqyX%0zz@eGB%|39JaPAZ_6MKc z1)n8UJ%Nv34;vsr)RowL*7({|?qRre8;2zqL@~s?T#iUkGMsZ$#jT=M>iZ~570Ia3 z=e;?NDPgz-31#Kb5(>M7o{c?QW`FIbrKu&DXPYN8*-4O0Oq#mRzTsy%z63YGUCm-; zE5mKuJ0u1>8%vl932HE}pS%Zy0Px|u-hYM+X3W3JU#k&AT4PbHK#7{ojo2C_95RX# zwUZ!u0Z&j5>}$XpP2(fAb3}Qia*firY-BFKZ3__(WY?G8!X)PfTnd<$5+`VdfhdD0 zUAy~J31kuMA)C~wAsOFW6d|sD?W4U=Wu1z>4wiGcDObNQ-u|70s^cbSd%Cqzpo7Qz=YQ!QWv>dA=Dk z>Lrp;<}IS$ zXE12clrL3b2fWLN^DbN}Wy!+wjL+`1C_BnDW=>yMayc&Zb_(V{>K~o^KU#uSw zi3G~Q0SSN`1sy8Yq(FiQ)dMjSGAm0-ixXtf0tW7!8B-Ei2~y+5dh+urZp}5tQVVWs zF~s78pz!qmJRj&PcKHH4~znmii- z)hE9lvL;t@Rap&NwOc7(nqv#=4I9OSyk!ubSvmnicc?*KxjCoFrt$Ul2sKjsjcw3& zg$jk-rLTZ-H(~}jdHrb3gXTQZ0dR8Nemnv(#)0t&xt-o_68W_we??AnFa~`!_(mCX z%XUpwA?$W3jwV`qMv#%5(L7f)vCF5AoZuj!2yMElb~;s*ni|_exvw*p2&8z7Ac^e8 zY5h^QkWxxrF$lp(yvlK4=M#pZ1NH}Xbr(POb}5faiZ|j)(N%o%tIkJugYjefBpa8L zOlJ#foWQlmC|cl-k3NR=Qqe96+29sItaB!tHzeoI@xYr|kJ2IAe|6NWpb;$!ZZ~9Q z;cqgdjD)qvz^F?v{py=1*H9vto5Ou`idDGOuO?Km<&A*Ce!vy_)=akhNz%W-3mw!b zmW(%`O|y#KMH4rV)iXDD*5pf_AdD`FeW}mH^_ZqC4I5id`c5|^q6OSaXL;E(Xh3C; zPesJ%5}53O8reOfOpVc1A}}j(F}?|C0(wJM>85sK%x3SUrofsy*~mPTUPEP;m43j6 zwEZf?x&6lhl~UD~!*(DNhpWmKw#nZdZk9$^39e|&)g2L;Yf-U|?V1VjM`86guztSi z6Tq@?P>!fJQ#IhQwZ$~iq52BWhE_+uta|2A(UWDdZ8d58w))EF|HF~nR51WZf_0~< z#qc%L1&Eup5id>1o|*nK6Hu$- zO)0Om3u!*=cWIN++LmpkUOgJ5TwDj;2;`_8Q!^>t|Mlf+WpyjK#k%#~1F|(YQ7)l{ zDx{oiC-P`4brQiuAP^PD?t`~t86}pSOr)Xgz>j$8#$|m7{nB{WRm);;n(yF%9L%Iw z51UohWZY7Tnsfm^UU1DH+&dl4P`AV33i#1@Y)*!O?j{js#wq_LDuHw2T5fG? zTh%0t@TTvMw6Zl$)|}#p1Y%NtqqLYQzrZMJ%35BaHAp)-8wW%N zP)0hbg2fwlwXE0N94zgdHFQyD*@p3PQM-)PhUa;Y&%>BE0|#T`nfe(8ThQe? zHO%DE;b`g`J+-CZKqq_U{f=~6QB62gb4S``?BRc?jM_y*%fj+0oX49&9^8!>7cmiP z#KX&Q{)a`)PV}>esew>hat*xL;^f9>LXvxyP0+C+Qu217G)sQPChdUc@}X+Kc8n)! zlhy1{JBu!rjTtv;qRBD(pqVylAN}wUp+jb=4c@q26k(SFegu>&8xrbRYet6UpwYo> z{W4bvj+=v;Z6%6G8FwVxB+t1X0kQR7@RiXvf+@ z1S|r-+?)v&so#ECRoJ?ms>!LOtHS>g`Yg}BDP-sd>=w`5>o}-ok6V?6I$aM3n~I!J z56j-#<^fg-3`j4gE;30JRZ+>jZn<`6sxhgaSRJF_>9)sN^;S;B#q?S$ibtcZy`G*$ z=I-LdosOE=WQiE+Szw^JKS;Z7$arWDaJs4u?glKGevN9-$vrU}#J@w;WpmEam0}tW zdP<$vX=mPuNQJM@sXj?@j7Y?&GJkIA*u9Ac-miT05<2D;F&>{IrywS zvqADL6)GK2+6v0{ge*tC6fm=UnN zse+=jj33%I@ajeBU?2F}ZlO}S?qc}3It$(UKLei8#9TlIa(aWg-~~B@cu+EfD4+3y zbUGP2;T_yDG2l0<{RMn{g4ex%pg&UY&B4n(;usF*CUI!$+13s$@8X6LZL;(PDumkM ze8~>1_(6AYsGyG19qsiEqkCJ>gR@jCoMnEzDF(w^zGPi0@|>vJ-n&KVQL>uwvadtl z$>ciSCn+=I(fFDdEAtoB7BruK&PRTTtLMQ3;YT(w#GMvG!T0@*`d!=~4~70^)i~{N zTjFVO{a2c8tSjS1ubsP5IuP%#1lzQ#HUzIAZ&7wU1lt4ee%8G?I}8Bwz!tw}iZ{aF zFtJDNxNZx0-sFM&zY!L!f5F863&P^RoSa;gE`B>XW$~QT-jG!XVgwdTix#4@KT47{ zVJnnDiT0s2X|n7Hs2ixKZmuEbRPsN6@QLFSTM%S=opUBvZPDC%AY?7Jdd^xDA4 z1Hkap*~%C(QrYxa zKbj@V>UKDlmrT5hOsTWZbaD!!YuZOkF{D*1Xbj^>nojP^)G*diYVqtSNkA;PcsStC8YKmPxXMY9UeR7ZJBr-A13plRra9xkdNF#Zwt?oY6m4C_Cjy$D@1I zxFQ2xqK$bP72}|Rg5v1ybVev!jdrgesjhLf)6G;Ir*98kkSRnfC&4TnuJw`|fQsp3 zE1kiZ%9piD5l0(aOEsJ_!tP(6Q34Pv5qgyk`HcF5=^bB1E=}A_C*Wn zU%g0n!^w2V2o+{fBWUhEbb{_bfh}2aiFc)M_>!UU-ps&knmz55 z&X&PuCCpMN#KS5*6M%$R#TcCPzR6vkPtlhE3}8CW`c?er)hm658o$kEt-br`jX9aN zDhRAR)u>iXNh><%^`$wm20%nceMp#v5&|7VGC61;nfufZNvRvqTDOudC^E=K9;03e zIkGJN_AmZ;qOQ+i07z)$|BcoACx1-&UrAzPV`s}CY3mA{N*b%!*;^R>Lq|45Nl_k0 z5Gi|g|FRXVF*J~OuvmPNS`Au^PR+F?hsyDCIM z=GmRT)CR?Amb#sy)7_OWP_cr>E?d(PkciMeWOBqVryKQP-)A(tiDZ1zKFa%Le*#`< zcjaTd8sy&=1Lueb9#)F;~b5cC5N^kz2jIMFRciRO<^*G{w1LFQ{mSa{%*LhFk^8ljmR*w`uM>VKjX1J&e%SDS5tV* zaxiDSvwkaC>4df>bWrL~S6vVY+#ffrPuaB1o}5vmZ^+mLZgtU_v4=)y?P#o$=I5R- z)08kkn0XxbGKOR)b7h@5eZZDQ6Um>hq!H5~a?oL&MDKErM@a`57p$h9K}1UgD(bXj zy|i9-`t~N%DND*mYSNpPlQ!pMNo8WFUT38gsJ(4yM(Zu#MY|r_V`iYhh{TnD={pH^ zZdT#cu)>vU>csst_LHlswIj4%(Mwljy0UEVhw?P-ap=9beHTOA_sqI5VomdKQ741; zMH9V8xA9#{c*XmzAn;2^!poP1_>#}f ztA{t^xj}tqNFTd}y@56|-s-LB9(M^N!Cq87&ah|8in=gH*^rcdoGqsvFkymt`p(k5 zSTmuVv6l|nG4>$&9^SDnjL5{hdJ-9(7~&`3L^s%MaCXdj!UxuX1_19@5qAoZ_M<&YVU%i3X51eh zCnr~LZ|6<0YD?fAO1$mH2TF5c-*t$ffE$bqiGti?lV7?L z`!>08=q*cm>}QQjZ=ODT%+#)3-XT6}5s+(NhJqCswQZZU*1dd(s>n%v;>sW_Tm6hI z7KO{%H+wds)H!NOlBaj8$oI?OTWSFq@kzVAL#N~+FJMiL>Cv>K=kCjTHiU% zncL{8fMmK*_L;tc|2I@@GHSB199R}bBf#|hhKn{i{MF(K;8U@HBf)2#Shl5 zeQKp&WWq1uJ1*A)s)>5p#=kb6(~3R*OM?5QQi$ONI0LEvTde()d1Cr2*8UL9k#%Ra zl(91~{`*?2hoQ;ukEDNU54$L?%b^J(@qU~@V2P=&B+4b$6>FdkCkf{hl$oT4Hl2$T zCGb8^x5-W2B4EZu?~&T0yY55$Q4qB>C1#}doF`v$rt-5)ncvUn9Yi1g0a}ou;zq@m znhrX(Wt$(znPo2ZDTo+d6RDU(sVa&(2Z>yZ#w%Ep;#|w}lM~rp_9Sd_`m)vx)Lme$ zC+U5x2yje+dA)6`NGfA`PlF0SBuQ+;==t4%TLeEFkr1RMbq>d%ng_{Fv@(P%%nkAO zdzM6_Hib<1zLAemnq5`qfVp#WJVS*6@vROn4Dw$71+Q@0Ysr}^bDTvR!5#_@2&yUc z&*-^0*sAyDmP7^~>Ag!H9A^Hhc=vGF0hEC9@7Kc})Qz?g^ zVq*ZD#B0LJR+A9RDl_9CBe~5S{(jNt9hE+_{UM6YI7!ctbhzx^jFtZ;NZt07ex(mV zxh6`n$Iv!Y>igKQFwamISw@J9B?j~6p55nDte&IU8(U6QX}K5X<*laV%%3{m*Z*3C z5Y4Ub>;Xo{2=F!f&xGus(eeN6nwk9%VzWriMjclb$=6m&BcZxT5a$B6=nO+xXjQ3c z9z_94UszZ`xpqi%lQz|OtUmHZNy|6+CmHWZc7gB0KL2&m8+feKl~S)MB88!2y%&qy zQM!kz>G5TapWi!}Ka(|cVY%%|y7yK+*Fam77EAW}!B+k5M>YL6xv^Dyai4`+ctCcb zg;or@3=`f2LJhMs`ld@g_o34a^LN$DC*wM$uO|Aia22vf1r{gk*&PI5Hx`WQnj~D; zH=AX8te-&KT1zYd``oyB1#EC;n@2+clNwA5o#QpOkG$0FdH+(}(Q+E__ku$`Bzeiu z=d`u9TaU$RwDwqV*FRzBTBryxijmQheY+_g zn#%B!ic=IB!+2ihE{`UJRoC={P79$XSc@$}lCyK(Tz4-}j(J5U($1pmVi) zogppCtr2cm)5{)2IdHK&!4m6{!vztzlf0zaa2ySx!b3kjw5lq8!QMR44~y1ln_-dC zlRvz|yff!3ITrFTr*fH&j(irlZXKwgCRaex$TDu7)2Oy&`e~f@y*B1rz9-`0N4LZo zE7{N5Oe#v|%=>|_cAjb%J6~?FG4{en@{*zCzFa=pWiU`5G3NBs^5tUVveF9mI63=< z$|*BF2bMbeTwE^{a>xBJ&n_gE)L_>j?`{O0EQ(eQLV);+-XUz;8MLcuuAWZZX}na< zBZlM&&Vt5MvL?a`uNSOjuQeojS8NVC_mu3;cyG7nm7=`*J}19t-dT&N32o!G+JyI& z(+N?wn=T67JE5P~0S!(J`c8+G3aSF{uEEyoBr|%5W^zea6NzPcu3!za zBureN5UAPUm`n|ZkKyz}SLA)nTylQuJbjqF8*mR^?sLj{V=1IY#H(H*y*|I_^Cjw? zU$he)T=NgHZL;=*(`6yo_`){3c&lA0*kv)zRz+mL+f=QzwNyetZ3){#|5VMW9%#v_ zXNn=D@5qNY{}_Iz7_-*cp0HgO+66iK2zWBu(NUWXq4YHP+(1!Hve9yZb27v0c#+q* zZVJUs6Mo4A@Xf@wU-DlpcK3ajHw!_3k#8Ck7x8jPZuIG_mHaZY!L&5Mq)vRtrpVqS zP<*``(SGAOaFqY?v@dMW;?;_+K};SA7H%ftA)8_Yj)z-s46Ugrn$?(~*!KqVyQRKP zse1VW9DVZrO;+`vSnEG}Uw^gK!gjV!cGf1pe|}@Hzh6OSDX+_+Dx&F#!w9d(JBt74 z9|B!)Ec54}G?W+$E?fyEMode?D-Lx%ah^7&$9e@Ldj@$688*D@d;Bhr{QyU1(+IH- zEK}JH3|1E`=f57Ge&YI*eKrn5FSQ zG^c7C&fbX6-)k3`W)L@=CQ+^4u)FB2WE&(?jm4?WVR)tp((%lEzAcXPwZ~>PXbrXxzJ)T8ryI+? zK*HgkvL!|**|0XS^idg-CMrnnZw!xSY;IWGOU>ve!(y2Houv8^iu)^atjeL-&Q3^E zpi4mvOz0TRBV#WAIF<{EiVoUsdLom&_2_eJYxQYE?|yd zp$nQtHOGkqqt!*cdDl?E=%Z}{Gppnz+lH@%CKyob)ja-~ihY zd7VSx+xp11WcB_VFqOvrS45o2*%gA4Gb>}7rHUPMV%XlNGp#!pOUU&eTY9cxSIWRE z8f*thIC=q{Xu^nluP3yB#J54Hp-%AE>AmnD*ZcqMhW|hLFEjlY2mW6Os=v4-NlKRv zh^$E5kkKnHEfDtjiL*>lRJ*PCk`VVm8Q&np>Dj*5W~6;2u7o!jQM)&;!`}0sST-)O zIIXAg@Zpv(R5bbVi9@^zQfhB`I$yPkzP{z~egNLEv!F{*C!NVviq`3+t}d2m1PjoT z;K#5F=HU#-MNTpd#ErlIX~H~qYewKSctD+(AU0-;*@C{R9bBZ{-8B)=M<#hgkag`? zcIZO!o3f_Zi!LOFAQ!tteBBudCwY?5_e2!?wqh?u@Op)fH;>MR1Q8*I_mhT znEa${L8bel|BW#* zBRtmHzMa4DM_ji*P(@YC_0qa#(-(k4+{$}MWies1Fg<@nbKs4mjbuQLSm+^`jx+qw z?Px0&YxZG_?dE-B(@x>#^B2c$AQl$yA@ui@`0b z*Nr1fPJKVtC2t}0WOt6Q!CA*Kn+i`|h~knieMbl*6XhFiO43f98Fh-;>cG4#(x6@D zFa!szTQH>0pztd%(L|E`tNQE4{lOBq*DVE64tma<+Hgu>Z#&Jx4he-as?UJc1ADc4 zqhZaJMC_J3K#{9h$ZO^u!YoMI__!5?k@a6=Nab$0l_03iQL z-@j4EiOI`md1T=lVc+wrB>25(yT6UAm2n6d56{uA*fq}CjS!*k_#{OGMo&Kjy~{^? znqv#U@5W9ZvSb{)`c_E7lGD&C1l<=4JJyXxFMq;w*reNSSGJ+Lb~^dQPgo`q>T zZJQ8)Dl=c<9>?HV=z;fgT<|@gESxjN4{`!UH$tCZzi=oq@KoT6WJxD5OVx&!(QdlfRpoZsKr-st_aVA)MqZ`2?|>U2!=guYh|+I<25S(V zLKQ01DF4K*D-Swg(_hVO)Z5E7pC!i#y9`PkYPJJsKfp9*+YUn$8nhBrI$FYnrxC5^ zr!{ATR#EISFHK*}Ck718Gq{|IcK)4ci-oA3$sd=e*JI;pvvC+Nu~EfmM_oG1lJdA{ zggZ`zQpMp#ojLGh&;8?C-HEb!*7h5^6g?I4`J0@y^Ysr)ksQJfs2s~+j@@dVNTbcw zGA5JyRMQd6kwN2Sa?sQ`3A}nO{g-OY`@{v(Z_$OraFyFHnEq7pT9K@deg`nCEnxbAU9; z{ngm0)DIf!q_38}EDRB~eHLV2Pwi;@O9)(n#!A1kGe9!f%3!7V+z2i%Cbu<`(w?4O zYl!(ZJF&AcOxy0pp(s^xSz9(S6>D@d5nyp2}A{P0U!v{2*YZC|ru+!&L=qN&wHyTy}LqChsgG z?Rrs3o&hOfP6aR7v9c5zzb4hu0q+*W8mb3Lv|?_4;7shGeHv++;*3G>pxGO#mzIgs zhJ@iv$P^S^YcdNej2Es4Pyypcr*3XUsZ=bfXtD`fBsUB$gZIp|ZIjiq5!=F9IfbVb zSWV8FI4wk#Rx*g^|4Am5NObvQLKZeo>X z4?)BTk>6xPQXtRFTr?O~%T+O093Sd3U8Gnil*mM9O8z}F&xm5Z%!=F+(}Gm`QR^Jq zye70XG&|s&;^1W3B2#>X#dS0C$7<^qhBscCSV??FMx&(-B2v0q=DqB;!V0at9d#nP zUON+Vrl~$#V5pmmU<|nStYyG@Wso;`vzD34))@;xpW|%O_5}FDVJMi$AgJ@r_XBs> zyd|>X3gGU>bPeZgK?+KTd%s0dKgr~n^B)PwLlApk~3{dAi^?@QrZuG`>^4Jb#VdIC>L)s>UDDP?n(uR79)K07%bZN6y9; zcd?;HkR&>h2v4L#Lr-e5Vs{r;(_4c^Tio^yO@b&+f-EQriXFCrS#b2kNo*T^y}c+V z`q@=F5z(dR*xV_`C|Z0j52hZRH6qYzXXebX^<4=R^a!g$_7e(BbAMHE>N+7J^MEC) zIYX;L_@Fg0LJiXV3<(PT0I_7UaS;sp$S9`#b0S&FeU?zuG6btadF#435#c1IP%?Al z1<+F}<=`l$@{F(N+%iSPs)A6>XqbrsSw#-@v8=BPDH7#a0-O}*#hV%3lRG&mmeq{s z?B>!uWnn!rJF~=}<&WYAMyYOKEtpsmv&bJ*G|pj?t!`x8RO#Q*Iy2srW{ad3oD&$LxgP9%Onq2OVbGu&%(>`yNEC z)0@&9FdIO;Kru0eM4ux|(CGP?*VLRxgh280GBti%PCedSZfqJhI>xh%y$O&hY*&0EqNW|Cvs(_P2fijA}>qNCG3KwlYWhRjap9B75Z zCX>$i%xqHi9MWiC0AKT#6??%oWiq(Kq-YcTkz0RBNZS_FCZu*w_b zXE2WW!6eP6pk_9>Z-x8bS1aLHhrMpTb2c|^B6V9K_7i9Z>XPj>#3U=Q<|owr-Rho8~*Cja8_GIgnN`-cpi-3|5q(k`?j8h!iQZQlZn`cn^YM zE6GGSRTLIGh210q=$laYD)~^PXXKL#q*PiM$B$(C`p8no0>kr}iRs79te(Fw%Uch5 zU@q6sSzG0zk*?v`eXhCJ{45Fx5Xdlxe*nPL@C~3=Cv!{frpgl>6H2spJ9t@}BmuKN z8BmchwkjY{8r|^6R=F^;DvxAswyz?|VF;w+1TjoO?m$PIT3-QbX%@u5(I4CEatm%3 zyFk>L`?MOh)JB1Y>Q{gaa@!rgo7TZQ9U^YT0=+^^UTe^m7yrxQ<= zIXkUEJQ=wk%i#6q&b;5i6GoQCTcth#TnF`Mj~bj&jfoL5W*4a1QAY`G|{54=8y zog80H!g|@)J%!o7_eym4ukBo2Ky3^)3;?Wg@tWxHqPse2=a4z0+WR3Og;Vo~QT~(| zWRadx(T{`DQx|OdZizOG_~146I3^U8mH?7cG2kBkEapgP2758t+~judk&k#lSV(+_ zWeZDTsK(^cM!K%Uyc@6OqvH}ZXTfXf8g)-{Y$8Z1nan*mR@PiM4RzuiHhpK3ZTS#R z^P(aB8GNV^s^ensvPOeCn-b>V>L0&y3JG2zU}sYAq9;l1?NI4mma1qpw!=n#a~65% zy7tBxKVdw6#~*@qvMnEi^}8q5(7wVh|T+Zgj&#$DK6sqo8KEnb(&QQ(}< ztddHv{iYEwRHky5DtyCcPZ|||3Q@^kMg-H!uxR3U%R0GDkur?zAcZ9Zw)5|_OzuZ@ zQk~3Sp+lDdePPs@6^CU(^XmZpk|#%t4g)H+^}`6ULF64`X9aBa9>a&HS{Pz8Fhy^v zy2BWqYF4VXb5t`0V-3A@+5@QmsY$~VGC=i)VP3j6Hv6SO` z>g6{XX1-eY;q?P3yQUoM5lUFQDe?%-8FHJcsW4^OO93l@+OlN{l0b240j|R$9#4>c zmyhLry$|KtL1P+y?{voqN#B#DRNHHS9dD};CIQk@Q0v$GxY5Zd5;Ceq0j?NrsGNaT$8obtOm#lZw|123KTm?D zCnk*BsZ!C!>X4Pezf=rl#A_KIsoI93xjY7|g044mX=2)qS~h%4LZ31=Z5E6n*SscB zCY>h-?*Z%!K4V|a?6dUlq#jYNMtXT%X<&Boh7j<2K>~H9h1qYY)tltTt4cTAl~4X6 zZTUT39@&)daDETYqlj-~+dAu{gY98q^(yaYY3+bO71749wv5_ywv%e+BEC^i1zx>t zX1c`3#&UQQ2vM!GM$X(Sr4nJcEqt4uo?6b1tb2x|Lq7AeL;K=>7htdfyYd)}+L7J9 zW{w+KYVuT7rjZwVEp%6*)gSX(BUODb;yT^$Ta<~mE6=eNVv(}eS6Y9uw?E^AvOoGD7XUX zmq#bjL}BYH+HPw!I)iPouVvjl3iVHEEmQ=a;W-rR?4Eg>>y-oR{pne+1ch;NM%f2s zMJ?5{O&A*0G$P1b2XQKfCh#}zm%63!|+#^EMYKA-7Z$zg2@H;T;zr>mNZg4Yn-W;G(}mg~xpw|ePNAu8cu z64|K7{=S_4fU-;>x1HiTMr7bZjpggx z46D$E6hN)!VI6dKeqYaPQ{q|TXeUW+tCp&Dx1*NnyrDUX)pBYbTL|T_<4?w81Mfyd zJa`;p?4B!t(1lYJ-JmP+l%#d3qg(3K5}SS8)gQ^EzR-ir*da@7@miH1YyUjadUp=Q zoE%+LJr#DBEfMbOR@8V|OxgQ`IJBXjYjQ>mnN(*XYic3s-8*!zvX1~Fo~TX4y}Qz5 z`KQHh}Zr&Y_8BITgm$LY??RYtbTekrU=Q5ta#5R88V_yo{-h z`9%j?60#=Y4OFm$y1R0B1>X+^2o4j8vg_2T>_*xV7Xv4nSaqQCTF@UHc$#!^a*6dfL85!jysH#wPd0q=vx?CjEXx+^EHJ)%_MU4(dz)OB z^&fYDVY}`&%4T?MVc%>AGeTA66pbv*P*!U%46%x6|f@?%MFmMwJf9X!BfgT*@ zKL>lUupKZ5XU;!Ug1y33DBlZ$wjf{{9Pz#I2|vn1;DFV9@Oln4X4IA>W~Xx4R_2&h z;`RvL#jDEtX}7-4|3N*TI~YZqpT(e(J?jCGzT3d+b>3dO=WDt9S{y=8Yuq1-YcMjZ zu~zPt4Hs6TEnm;@iz{lj??dZOF-MrWzrNXYkdDJXct53T>#@0de`qfqgPGCot+j9v zlG^?2Oj;H*!{?y2xFkxt$60G(Atd!{>R+yk}ffk$#RuUYQ7_Vb>AHrVM;%_p%P$#Oc> zNchv3(3x&T4X%Ycs!1_d$FZKFZX!Go+MH-3co5#*9W?aiOdfz= zj6Giax9d5))6^ZE_R=NYEqy?`OEZ2$DEMz3H!&wzd)N)HJM^Vl>;&zs2JNsLmeVQs zTqo6ZLF?*xp?BB2p?=i9$GGmzP)I%P)|~2mv^bSbN4xG_U-$F+IPkulGp*?0OQVo~ zmt3UREqXD&>4j8|4&rK=x^A<^`3S=bXf%QRNCO<1D+i#qYFsx2rugim~;1#MYzd zh7*4Or8mEsuv8sR1={Zm;a8R@M+?6_0&$MW)RI$_$NdyC8zo(s25efFc2t^OmlNuw ztba@pbb zTGx-}>sPHS+%tVJl)16GcWAEr8{4g_rr!xn=ei?;yW4a1+HqH%;^VO25ti%oqNDFO z15f+}HYcoN{wjTK4n13Z3jG?vF$sip;Gzj+Wd@`+6vzq?&rSSWy=Iv#rNGM-=F&_Z zerGOfMyv}5i}`xYX&Zy2>XBQ>HA}F@hRdydT)heE@zs7&zjOyRt=x;d13GHs6Ktn{ zM4L2b3KP4kGyCy_VYQe84k2C$R9en7RC?=()}q=`yq&aScGZDh))kliFboEgqLT7! z%!MA3p@#I)t|kT;gk;0h=bt7QVCUM4YDnL{p-}&KK+Zp>m9&4uwf^U{Qb6C((%4Ac z#?IO4U-DJ|8d^quQFk{)ka#HFiHR~I=s+Xegrf0LSiummdJ|O;g842O(Ds>tiyJ`K zz^%yZezLfq-j^Iw(`KDECdA8dE_6SMHgU(}H!@0&Y_#9t++#b{R0Q3S{ zA;vy!x%?VjZ#+i!>M1B|6p?q#wmO$*xeByCQ^C7k05${)us42<}f9?9sJ2<|4aXw)#Y_u^7elA5p6Ia(aG)>jpc z1)|yt`wsJpK!jMQ)uYA8J?UN_mm{c%Pl0*$c zLsSW+b!$tWoZ)iqm=Hd zruQ1Yzy4+g(q+f7h6MZ&&P=kO-ecqajYE(!+mj651yaO_uC!ZYyX}{_y94;M`wclb zhtwzd7B2Q3+g0yv*Y~?4z+fKcwF}?~CtY~y?YDy~TX_I-wBmF}(0C=XvtJ+x7I1r?_XvUT zN12fDU|jNf2*&(2iDEJRv5INCWla%#=-OFtCYtd+|3ndL8KANMfwA%Zk6X=uVr&Th z-n9Ouz^WnnZ)}Z{*_X_o(O;J;1~lzzaW zqdP(nZ0yp^OHd`d3|WB4WN?UQv_Hk+Ty2qX$8R@jV_wXXSZ^|(dgFcyfBx{{uvOKI zR0-V0Eqr?Hx}KhNHQDZ5Tlsvtqx|+g@bkM}WVt&v4R*=`f?vq15^>6m*!oFiqD;&2 zzJUgj-ppw_kqJ1H5KZ`4gaMuo9|-8lL22EFbgp5*W%g(#8!hO3emvxEcx7RG%syp* zX{Le@cTL{i-KLa|Y$=q4(Bc6j@MCJtp)$t`bACV)e?DuWB?SO_%3;&OZ!|cWFhY^kz)5?KY0oCl6v41uTbS}kGI<#OwGa?8S zTOvqD(6uR!VTrbs^F{*LQ;!B@FR> zd=tnMN>Fj(K7A4EwZ?Gjj{3eX#$^t8QD z`(|r`Qt@XU%v{D8x?*(8=-88{n7Lg$$uMy`BM2+@45hucN)oJL#}J1MiW;g@KR!mT z=t80E{9=ASB^g|59?AR672cuO}Rm zrl2QWX9-NwOImcs`82a>eh87FExxGFwn7$Uo8S-e@UvCKGv(JLe;>qUPD4{ZAXKl4Q zWH6r%-Lu9C>IiqPT64=F-AjsVfXCv=3mIBkr;MR|DOi^~dsZwH zPPaJqV2e&*irjK2Qp%w_`J5W)&ae_?9M@(qrk6$+Q?Ws}xCmo2wxdrJd62V`J)#Z!u4rv276zjN z4QX+q7W(F}kA$kUqt*?Bl00MbZ92n@@qxtC>Iro~j^0nTm5AzKv?}>uuNGSFZpa1Z z@OC&p!QwW+gQ!B5RWUFaP5Ee+>8*IsPtbnTQTIumNfRRE%vM9@wsN`T#O5TxDlU^G*ryH) zvL_1Bv7hVW)~2J9XbR-9#rI~4E)KPqw~y`BCpTry*gu_Jl3k$&jLyVZSlEsp9}(UH zfA8)+q6t2>LFlukWNa|9ZVFw<*z}x`OzxeE!|lI9pf=%FuFWuPln3dJs_!ISRMa!O zvzvlo;yi|9O|TVor-%R2hNU^PK}hpnW6TzlC_`NRLDd0aYs%4;l$ zSz(uD7Q;MJuz@L~RG%3Fo+)=}>{{2noB9n7om35AaQ?54_x$goxg zof+;X+&6W#dM#x<4KiYM*wn+X2iBRc+Q@sc>++})l$yNBr>qHi`5+pkH+y=TE{vly zCr`r`AT^4;-j_I&CDF9v%DYT3S;sZv6f5YTa$lDX(I3&wqe%+?T+696vY2y~Q+n@Y zvQ#bP{IUqpmh@}=Rp4RkL2Gse)50y$x=b65%U4f_V=1}(W_3+V4&0r4tAYN|z23j< zFs6A*$<3vv`?@;Gx{!EqY*VZDPRo_YtI@RBfGU-D&d4ys0~11_3PpV7{V;SFg{?SP zkR5Qwu1Y%&G0FSFe*Z`eYVUr}erIg*1t&>whR49FTa*!ZX9bw4anDqO0)S(HNB3eMrqAN}Ax&h}s zG*4Xon%$2P*3tAXe|)nQJfJqa!<3ZW@qfjf<(l(A#m%U0+V)2Nsl*3{rLk0xJyoJ{ zQHv=ZwZCptO5d2QiE+}xP>Z|{DF>G>3e)(eY3ujt0q=sgblvhDvld}PpSrnN%?jM# zt``!sw%?_nUr5esh5N#q&1>t+E5#~i4<86XbXWeH;WEfpMW1g5{&i_m=WLVo__;P= z%}ngdmwq4g!PNffo92ei0E8tFrt}wtxLXe|OMXs@|6UvkySbRBR{2cd@~osqf?Lb~ zH4UtU$4J`SsA4gsvMk0r=~WKm>;O*2yFcB-s$!9}vMgtV;vo!SkrX%cHIezj)hg+a z<+Ms6hk~E`c6X83@~o^%A*HfxLbHe_Dc9$gzYOse!q9$=O;0~F$l)bi8UkIKqXjQ{6p^fKv z>}Q4u&1U{j0xxIyZa@Fzy`fbEuZlCyh?{`r47=YfRC|?YupiX@G4c?~k6&=hIVaW& z;hy(Jw5NVc5K<=eE@9};nf+<^%`}n3&_YBtTH;YYhp3TbC|HMsEA8ht{!i2jEKH~? zGDhn6hu3p^l+PUh9i)8*|BqaT^AqJ)Duv(F&8WUY+q8pLk~bxqxtmZ)1AHQN)@Zth z(pwg-XIJNGA6AonqXj*?qw_R}xX+IJce=MvEm*?`IYCd=Xb79nOIFdUQ@5XgKx^`M zN4QGA=*n*Vz#trG>VR!suP1$;^(Y?BmQ|nT}U& zUw+)r5PHBF&=S*?8ty!#(9_@5n-31c^xu<3kWGflnOs4?ZuXr}bV_1Th$wwrD{8c~zwDiEMPKx~1 zwU((hSNo~xYaMDYMu(!6fi6C@PxiDAv;xh3NZd)2x*Wl^b&@e@wLY?`mphb#TuG+w7CF zA8w~7XEjjEuj`Akmh9tTc$kYi#tBvLi47^=Qy>+_I49CqONF3k^@&m{JcaPtm^KVS7`U!S$2ayFw7D_SEEEI?F-D_OmuygQHA5GbUjd( z^9k)s?reK&%!yrVN$^L|9)`h!LZqkoFsZk^?x5yCRh0g^1^jgeh6oEReY<I-qa*$JBj{>15xL zCijW!56Oj6LCS@o)?{OO#71!nYlYZwPm>DPiOfkGph@6jC4Gi8=6_-DbroQc6@o3L z=;U$C3w}DIKmG~&zQy?sMfn9IefVE7ZGwL%-ruB#zX_!OqCvZS;YU^Voea(X&2RWq zzxm$;(!X47lKvu)ZfIdc=j9=dSAjwk;Vu0b1d{lcH!C3w5D3V>Z+&2uVV${U)@7sI z4H*cG0O$1$HwcZhS`I|4FWGgtVT6a#bS5TeryHm>gha5a1`#ow8XW@kx@#E`-=x2D zjuGAF3d(rL9y2;~LntRZIw+0{0~}GZYTJg_@XH4S zNBVg>5G+r{o&lNUnbq&wv1myq|J^NfHI7H_<2PhE&3GxBotz=r?HUz_k(zn4ho@H# z7`$o+{|-i(%LX)DyB4{1SHU$m4tk$xNDmYX@q^I#8TO&0vVC~oc7jBwRI#8%B%C67 zv|}g6RoHB^b}O5Dfe8lmPZ^p4k%pF=*h{6l2dVL1qVe%E@N8H^OE>yo@RDvGJ;Q36XO(3E;I^Ne3Z%kb#&#P3Z2(KKo5tYu7M*t& zy5o9MTC8sHCyXkE?h8R>Ff~1fyUi}ceT=6c?&}$#psl%)A3h^_x=|DJ{=91d2n}I< zt8$isUIX+LaZA%72R3M;^GQy;(xtNP*dxJLH-2 zz^Q2AVMs(+koP!WPZ(7smcu9`8U=ig5!o_ME1Wi|-GFdOHIcd!8t8@=&E1hIRvh;* zosuMilMxn0&s5fV6xgQX=N*dreOs2yO{%`8xe;|ph@zw{FhH7|j0~rW4dJeKQrIe7 zx)uf*gV6}$os^XzNesW0DcRzC?99j|D1B}f+VFL?@RzdR4%J*e##qJD%cQh#AaXZ=o;~Ntyj>B+a?GL11Vf zY9ZV}EKT5WZl1=>?Q&9B!*N)!d4si<$mQ*^;jHIc=wyr+TEC~2r*!v0qvdre;`~|Z z!~LY$U_}XCyKB}{dNt`|NiTfqpNrOmI~wcT@t3WMxTyyyacNlzh_z*>2KD3TA%IGL zJ!`fIi)=l8<;`&2?|xu;R8wnsN*Z*Ep1xq3`dnyKEe+5`xvk)MN+TnFh>g<6VTgmw-vM=;Y<9*>IWHm~le`zyxR$W}?poKTP3F3cXKgAt1&@bIUy< ziFvyRBmP%eCSuU?Mt4Ug^I-LR1F;&~^gPTXn3o=-0L)#aS;8U_I*=sOey+iKk?qLYfcq+pSa`oA73$9Q@ zfEoE94AMZXLB69;Iodm(r;FWh;}dKzOGFhxg%V>2N~+HBdSVZevI*ZSLs!m55(6JQH)%e+X)P9T_qkeBZOw zN9->GHcl6u%aqx@QO}~jA~1SMH)H5BVt2uE-s!bdPvd>G z+MqLWkq*A6Y{KXZgU_za$-0xKvC_z?G2r@=l+@27Flr~mK4}-Vwrtb0(zwZL!uW0a z!)E7vgXdhmrpVtsapv*fm}P#n8koa5t%|KCjqb+= zgDgI=18Vag1RL{e#!fuLt=dOfMo=Wtk7#rM`Jr0~50D-Vqgex5e-X5+P`E~@56Ceam z(C)locm6xunCf`Iz3U$xihpSaM$p%3<@z6|)jucG{}1-(AGFV}@cx$%Fp;5^zN6z` z-HN=nWWOF7_cc?AiTR^Gt{jqNVt$;9sZ$&ZunKDM?9X$rrqfNmYv?mpXL(hAe%@SM zWbO%k1Ao{@u9=SG%!aIn>zo+cZzHQlp(~l)mZ6dh2+(Quh7&%rt&7(BZqc4ZFgEtV z)&t0!h${jT+bGO2GTdd%!Vyo@vXpdMhvnMaXAWE7MEAZLMPA?H`kC8|c@Rq(5rWOYA#Q@Z~{-`UrISitZFN4Q{ z2ocB1*PfZ)K>hJHxt!)uoW9fwSpVZ~{_}EF^?%kL|Jt$s-Fhm1*){y@%Hh8VkAGDf z!hcl6jzs^%z~L_{RgvPqwNwQ0m>P?IF7v?mOu6iq?@PQXZD_s`$rX z#Ek$=%bB_R(Rz1{#8xkXL8k@S3H5A72Hv^TeW$n+eChs`?uSi(Km9Vs?pplI4K{|R z=$I`Y)u263WY zm)IBublY;KN5`^uvKk}2?L_HWshWi^L8j_}80FRJfSO0TkyrAF2Smjvl07D7VH3_9 zd_afO9f-1cc_#L-23hd$pKbI??fCMTb{3HHE;KiDz7xx5PZ^$3z80o!A7zIeDoAYS z)9nqL$vWRflnz-1Vg}#`CXCw->!xadr$js%z(+)*AhyfTlf|G*V3P-NfV&|@ zllu|~BBuVBS_ZoZZ)E`6&H-xR)U~&Sm#~guE`+`ZsZs!ag~<>fUqn6udaa)M)+&hl zu-PC0IW!jad;ZIvPJ@gs$d1Kmp zN&`&}e|;-fr^HQ^as^Lff5sV@K?S)zSW_`EVz1yag~k@j?1Rwo_-xDTSrFB1T|Kp) zcb6uo@fg`QZB+i=ivDf4l0gwSFRJ&g=Rzta{U|z*iTX1KmPaYRoUFlSn?QHywKTrN6UOPsz0GP_|nI8VYzV z5bac-7-9&1@U0{STI;ypqT6>vS5wCtX{`Y>ToJo0ian7LLZrLEp1tOQt@7V@H-U0w zy=)|nTSNn!pAhJ&#bTTGEZnN8t$q;FSH)of&=%Iw_)QerWy@$L4Mi?VG>5Qg#Rd^_ z;-qFp4YX&bu&VvN0*RA;5KwDO45Lk4G{a1_qp=ZGRQ0tqXSQ8QQoXTlr8N(7r{Z;z z4084WrBbf7iNn^b(7&+R(A0`uCQJlKuFh$;2OyaRmxw@g%rrB06vY@5>O1K?v>7ie z#^|)}LRSHvibxi)9*x+2@-x2v#Fd*rSo3E10myvS6{Pa7XtYV}6=a{eJ$^jESL=KE zjBf@7x^QDim+i4cU6L~lRoqSt(yWn244?47kK+VstIAI3Hk{f8IMXRLnkG~hj9EU> z2XyI%Z*T(Yc!qAU8INB;O`5FwU=)?~;7OQmA`jV;Yh_{)rAK+GFj7A=go9O}TGESY zLuFCLvFUSkt#)mI;CS*r1qYQpiwfSLgC_UM%?s+dzm%@On}11L&&I>Ii%<$3+gDe( zi7@aBsn(+8(Nh$C1pEA1Su)g7A?1Aqe*FJBo*?`?@c&}D+8OCP{bd^EC~l)6`PYT8 zlBVPO7jUJb>HEYI6p9ebAgG~LD1Q>|gY__@fIa;aV3TI=ZKM@&T$C^jL0L&c&iv=> zBR-t>3EWA{nNi>LJ${ugMU+U#&P%uB+M46X>st&j0H2>SaP^TLLpw`1Js7C}&0381 z7K=sK_G;}2NQL%zbj^cbwBD5rD>ZOll+25WZ^;DE*4c!#r;*0*f<^WQ%K;k4n>&^3 zt+Ts97BIsXC{qXkEbu^y%LnxrB0s&Ncdp-~Kr(p1z;^E61~Mb6xWKWDhcP$6QCJjo zO%4EXpGzj)fu|9@nds10fc!-1MthGM3yarGPm@i5ju95H(p6i(Gz`sOj*YSj^y+fg z%7wlS`CUby(GS9lCeAL+`sFdR?4Y8$UMLXW<)e)y3lo&lBlj>SY;~mvW2spMQC`;6 z00-`i9eAgbIU_TA2Y*!fY-!4|gY`**B$PYCc2*)FYm*IVqz?%`t5|yWvoHNTE(&Rq zFzv8BGQ&Owp-CWks-S586>Y;7&^82ZPToTOQ-%|^+H^w`k)jz;3ro1JkXDJ-g%tPf z5Xancf}a4Xlw(GIuIs92_3{BT13*a4s~b%*4AGOa0`qSELz802UrXYra<`nUc!=wi z+~K0Eo2hY>hdEXiT~Gd#!v@O%q{Kdl1O5v4t4vyAIRt6Vo+|%1rFbAUwfQkK(g%fz zLKM-_A#~W1Bo!`2K1hgvGad2yz#~5L^vSvNVq9i${+5xT`(>N8*&uw6WElFNSb-MfwHsJxQrfuw(0)+_byOf7y}m5`IQ39IPJ z(N9xSaVSfW$k*z6EX9+XQ2d-Y?iWW5fYD8U*m#s^O5>;R;+m#-D z6K#u&2f5A&c;WD1{S@cHCRTdTLfKNRM5ObqN~1T_5^VLhVSf|Vw-mY<)ZgzSG}h5AA7++D{#EO_kzDim6G}mKAE-8pgGA79|MRiKG zwW%D7L zt$Xj^Ri{qvbN1eS^920&6vmZm>Ch8hoS5*gl+>Mk;Hkz@B_-PpoG3~>pD(*~G1H(E z_3Y&$H!bx|-aKzStg~Uu>htJuE~3JOq)26!x8UusodH{HREt2BgCcc9O&4MM?4u|MNYO_#WcUfasnpa>YYTWK}gyH|3|9knVt8H8AO0-O3S zY?lSENxG>pLyMY5b|<;M@a)zmML{-~6QyZZY6z1Bj#S2?udafr!P&TyQLz*w`P&eO zBz5)_NBrDB^rxLthtgtQJfT`rG@=Yi+af7QMCSKatSO_9^RKhaQ z#%ZCEWm~-^A)KI^?+W=H2y&b+#(^J!el{YNhmb~VCM`*bh6~|uJEA2MFjlV8HU@0Z zfs(xHh+VK>XZYX&y2%j;yD+nDgBfNKbbga|&VnXnCTjzJV+-MSbwb4+tSBZsk7M_U zD1xxJq9sU{p5`{u>!mfQmVj;TP=5yf`iI@q`;Djv0~(NID7BPV7>M7&dbc|WS39v( z2TyV$W}^`5f3qXt#9k)+*^ZqNyjj>u86I<*Jty*IIJwi^b1N+6-Y7i_ugceP1wTU$@&_@M}?xS?WMO%v6I%W*~3>c*sqYZh8odKR;CzafuO51=pl#nh{MHu9+~ z#DSICkc{H6Rq1;(c?#y%Tz@~F_{*1B+qf@U{gUDiuX&|D6Sf+57MUbX_jJZ8YIcHc zO41HUDug?6uIq25_{GhtnNfrG#p3rUrI z#uOU4V0^@@KSAxmRvz9e^Wq<6qBWIk?-$XDjn5|WjVJlwgSQu?{VS!I-b4TJ`Y{VN zyEri4U|GPz%)sOw@eynCj7*v-t6+j+c`!`#us^|asdk>%T)PsR)%OkCH#~=KsPbF?6k)# zaW8|1bgMHeC9H9V?z{+{w$?kOQ_SpHw%nA6)romq;F3BqAa?FBTmhqJZXZ5`5#L*Q z+_^LSCiPWp{4WTOc3fgsHDiS$57q~UcSg?_l#Qh5Becnz07a|#K6!(CBijpIZ5x%| zYA%E6CE+w5DVo|c?s(rUFsaJY{+$RB+TTU&l5NxlNdbW&X}A(xUsc9DkM=zR0lk~S zrtaBvncr>fgBPZ7rkR%nT#4i`v<-+W+m{!VIZ;Z8RWzT1iz{$0*Mhd`bYM5sT8wfxsQU$!4wGUv4ALZTx<0q#1 zU1&JB4@&TR4BH~UX+6okFl>#eD*?JTLa7D3JyElZ3#;P_x#8RpD|VnyeZeSwJw~OF z`DW7vz^vo(MrAU0qi}m?aW0g28W51b$wSBuz|NuV?>*`kdC1*VbzKYo+){>x>bMFo zg+1gbrJmkitV*!k`8u19 z+!x8e-2o|7M8-fKykuz^SY6>Wpcq_OThLO~G zsKlT%tPP4e9C<4nIV7q9OxzQJPWPh_JcRBu+o$j|7p&SWOGeO$pSu;|N!CdA4N^xo zAW{HM1<869;Pxf7rweNugr-^4F}qu;i@RWT3f?V$_Iw*P_3W`p%zR;hCqqtSrJ#GB zP0tVbiZ38;NnCD3Yi(jFhAdN|K4O|+xF!YpGkSrbv_0?(a6`x`8eqzcd~wD}VQ*N# zv4dN2bu_a2)2XKF)5+2UO8I%wNlA%$u;Y$_Uv4XqFoQ-hAr1MkMR3*5{0f-JRQFGs z(ehV$e9MQJ(z6snRMkxF)-Lu1PY*LR>qlk9iumoIxWP3Obt{rZEC&6of?fpxhD8y! z4*W$`B^AZpWok}fx$+&wIS(jP<@|y^qJoFZ0z1F`$8uHT`u; zEC#?AS(R%$44Gd=(H1yASDW5zx!cv)XjYw2JVutXdbW6oGLGiVWteo)a2)}%>tV|- z42H&5W39g&Bw<1Y3b&b1`f>Kgas}vBMG0!fah88y;ns`cdLYK-B1=alhdU!dW}MJ5 zw-1Ul31k@-Q$-g-G2I8{Siv_X{jrcET6VrtCJjntKkF7p|G=T>)GbPIfRQKM$SJzFdHXVD z79Xz44gcVi$DH&oe%zxu5Tj?2!kU_enl7481*T7a`>l56W3ayn|JC?pCi}CMzgFl* zl&3MRY0=>83k5YW($cgQh)-Up8)#y#>c(>M`FG%80*fV#*y%$2Nv?5yhma#ZKw4sh z{`AdnQTK&Or`$6pWtM_>M{r-Sx(!Hu=JOIbhkZ}TeU7n}pSx>^m}@AGL^3|e5#1+g zrUuw;(P2V9N!=h1BfMbUan;lgk{zMv;hJ^s8Nxufj2+>3^1%XWAJazR%?CtnpoGIn z`7K-=uJAm3eikv=0)G7~Y)cQjWZ-(~x4L8I>=o$81oD~;fSZ-{Z-lMkEreHS4)@#^ zn@RTPGPG$c{+L-V> zO8!`@ONt6`f{Q@HkO{%|G_#_uiIydt=tAv|0u?!#mmDB#wmPKP>=L-?9%r{_%J<(+ zF_z=z`JF0?tLbh5(x?rpX6F5oBt!HwRsjYA{4v%MnCmO!*V06v=m?LxCm13F=Jne1 z2%h2AKf;5VIzVcWL$`mw#@V1rErn=GuCMz~GD@kEUbQeCptoCc^b6>mUr7okql$P}pdCNVv3RVH5LAM>$CVL^}u8S?8l5hxZd>*Y~mnM$(ilKKg#YGFZJ!rzTiTWDEU>Ik& zVi;Fu)b8&75s;`SFQVw4o+{^1)D~$leF7(2{D+QUd^k>UXueLtlzww9b{+lReJ*G= zZg+N^1lrkFOFgAe(J%1p_(|l|YH8k<^+X!Bs(J0(=LVAOPSOWPvk2@EDi#-2B(Vph z@X9B=2nW@r0UnxtDE(;>B2-^UONLujJoFM&ei0Pp?-gNJQw`SV@uGVktEv6KYnnL%WPefl*>6d1hd@-^wOt!`T&k z-;jkHX}lK^8a9s67?2sIwcJ$=SmH3G0Efy58FQkj$oElgmd_-Wf&T5yCo})iV8Qfv zhg+g5t4A%BpNFt7f-~PU?)ijN#*+k|K(m+;usiZjvL%`B0<4`q$r#&WhMN31woC_L4eXgZ#whcPxKX&-)_XcJP^rV&G1SM>sz}8iH;v z$ARUBGqWwIhUcycvf{Y91kd2V1!HUc_e5i@EJ~?Y&v9s4=^3;N%B4c-^FR<(m|^Zz z;qK{eDmlOdSYVE1gK?XibzI#1A;JYo;aR!kODkl%0h?Szet%10sP7tH+Ds%momyy^_x|9o={37q3ocg=tPU`WD#X-FC zi$&jD?9Y|q;Wm-}V9xa4!zW;BWKWYYjJsp6vW;Dp!^T!4*d*_j9$h&u<}@WKE$60N zam#|>W9zbtB5iEjW{f6QEA(0_?-x+LLBmK>Dm_48(o_(`RNR0!}((-;4`x2VrM zQ~PxOBY}6LchDXCGO3l}m*#{Q-Y?K+i`y2}C~MmGhr-C{#F7;sh^v%63eOIT+E4YU zySVxm`wPCFn%K?MOzoCc-m+rCZ&<(N<_13Dyq;?CujRd;bmka#mr4yvhiz!dGr{Cv z28HlXn^5v+ZZdi0ax}lXewt5p$B5AhQ5dR`?TZWF&n}DcJ_Si*<(2J0H_W9`)(gLt zm(cf|J?Zp#>928wo-f)pf!G;!lt+i_n86`zH7((k)GR-ufRnB7+8S3qNm{FhlxWjR z5iaHOgzLK14G*`oRI^KqK$+;>rA;7-va=t!IfNzLB+jeN+|>hhuZ4*&rbc&Kw3(R@ z+g0S+eTmyh$0W(U89e|c2C9Gg5MUQ6Fbnti1)p!$>8XIuo9!EkHyLSqX%u1kwTwI_ z;DY~~!Z3Df8hjcg1(gGBt$A5ZR&U3-CP!6qUJ-|)UUujy`D#;ITE28uk$|pVg)38b z*sVH)o*qQK>mZfO6t$IR|IgA*8lC$Vrlnp6s+vYymD&}&%;=#qt=#eZXjIm|t->-G zCr(C1`44oNhQq|ayeq2*_3~*}&H%G2|2-mY+Y(f#WXf@Q%hlT1K5N>frryh@Ur#on z4ej8!JZ(jhNA~(N$|jCc)=ubhEgaez*2OfJHq{l`^I*nOS>Ou#U29{T6xl`4w{kRe zx;jCJ!|R#*8NFSkUgpp6}6GCB`>|=7Jdw?n$CnsgN3719q!( zjPv`|XKu@*Z|d@V%_XMuFekX6N_KLd)HePb)m4k@R$RB9E3B)lEwkaz8JV??T=Y~N zMeMbmgq3LGT5J;UttgQVRSwHS{A<>`R4I#{zMN;e^{w_8v+{M4$&4QeDdR+|7YGja z^#?AwQf`A+j#oA(r}R^Cttto@qw#+&sE!-Lwx<9+ z>5Ha=ckT;__x886G>zA1`K!@uylUCZzbrNJ-KT-2yP6hzsXg)vyyChWyTn(m)~)T2 zY@H*XI5I`slwLS88l262J)G^H1|Hha`z?<5SgSLSSdHZ@)7VYRChK%lj5YbLqhGH! z_sfsb)@Yiwu1Yg6T!ek?4vSs6t=gM^q#RAfLiJFXm727@>R zakng~D%N|%Jk3nVR$sb~rBiac(m_tRq0xqqG}SIB9z^UtS&7G`bg}Pkzg@qH46CFp zs-D-cjGLx36b9V8MOz+S*$!4@DU6Yt_m2Mg*t87M$Z_d z85=cd4%wA9c7jblM57wII?#R%JlN;wyqJ>UnlC94Se@=~QjR>zLXt*DLMbg8SY0ev zkJinZRF>|Ys=8H_EK!&i^Mzqes)3#{p8BdXc{He6CD>uCQ3Yk+yGeF~QE$BO5s75a zs7=&o{T)BH>cKh=!`w45r;}*)P@N;M)zX{viS%Iis*I|hpRTENytS3k|EzE|>zQx< zrd95#vlZ@CK-nOs$WxmAQ(`XJ~ROSt!{Eo>bXnNK_y+Xd&y#RG=c;ht?Wi zNi8zCHza_O=!%hSO61pOlLRFu;+yGx6TtglU4x#lDb0}hg&;K;Mt_oqYK~M2_EFUX zeh7AyV%qyPdqA!TDu!gm%QEwz-^dx4L>_RD(`>Cm-)04Q97o*nCBSdJP;HrqsD5}h zsQS8fly}D}Eb5D#zz(W9J2tC2hhzR7U)|Fkqnp2B+jB__+V)g+mM+0JQx6+np{kO> zWsK1+dwSz>ODgB))s2#mpE|4mWO{7P3k>Gm# zVy)*qT88?7@kWe9e4!g zG;3FbJLWga+<lxzz(FE#5$AJ^jL}15falOBHqbg;~FB3(0Rmi%rC+Arl#YN z5=m5F*!i_{k9Wn7KQ7~d*`y8fISfs z8km zGV*c9E-&cGlQ`W2WS*StJD0A z15q~m&F#SLYD4e1IFF{zqR&KFdHN5cdyOlkKSDl2L)A$15p6;pm^t^F>Bb|IQSid%b3lJI|A?SLgZ`om$;NG-2a_Hf_?(N3l<_-gF?hgR(n zJwlYG!3(DAq@eLJAN2CAG4;snr8HZh^A`XIKEzOmEKADWB;}FQ8V2=G&R+jud+~UL zU-|w67OM%yD`WC|_`s4a$l-w`ydCNS^}}I>1d=`*^i0S8n$A~;d99rNVLx^iH$Ab~ z&e9NoYW5%>6#pXem&SOJ?D4%;785A&VcR-8O(_Q}Soz^bd@4Zx~E6 zS$c-%O!HTK+Nmcgr?{&qE*#>iw&5ntgUiGk?&~vf>J6-FxQ-(q{1AO)Y7ZBhKxQw6ylK_@Wl_xbu|i{ItOhw&hdr}UguD&^s~*gQo;kETyrj2HBD!NpFbHC*X@rHp<(!mXy96|?O|d3#C0@w<-^ck zvn@mJM`s7KkLyH-F*+^R%B7?T4&)^v6twnbbig7=scgLR0|jz0PHu_w=xiXr>T<6e z_WBOr$%zJYm}@+mx>Pvv`aevXrz2P8j_9Fax=j=`!f5Cw?9FP(9xC5n=tD$bY9`RZ z*Kn(zN{1aLa&KIu7dPIX7gg`B_Lo)B4>yMGd#-M^aT~V>rU|a^$lp66bDpm6fPeAz zP5YH2NaOOWy|3ITD?BiC8^3MLd5Ur(N`yS!WI$xaT8;oTv>jGz_c<6aa@2D_Cs*{= zuDu-$woz&#XC<$y;68grfYQEn1w%De98JuHcS9W%$0 z?UYc^pi3uU%3}PKjgu-W<81zd{73b>;u|rm#Rme~jQfAFC6xbN{r;yif~bdsy_1WU zwY-^`v*~v`;=jG!Yz-~#NSOZf{ZE?)#{d5DFXB#>nx`_FINBF$C0xUAK?KD>aUz@$ zQO%o?Jc&f+6DNw$UJ|l~hR*TU2IdS-q)}U;D@0UiAyO66&y;eXIG;!ry(gN;T55FI zGNV6F59jV5TeE&&Z*K%ZUf0aRE@RL-L5LX`<{TW28V)*Na&|-sH?4nNy#aQoa7~>6 z)3xI5VTH|E&T%9xhq(d2p8e`F?S_p9nc-|KJQIUutRW{avJ!Hi;ku3igt)p?J8n0! zT4T@6M&sk=U*{o#4?mY05**|Ns{q&(R4O7^Rz`9=&MA5HZ9iC3u+X+CQ^hJn^B6tl z^=D9%?aWb?yR6qVxwgnTDJk#{ONb!Eg&qRz<6pVi#dOFiWMIRCiq9!6OiJa`*$5DC zDX-+tDI=|CYV}(kWAohUhp0IQYPTJ1=trF02WSVpSQ_*71`RtZcS-A%9fJcGwSzBJ zz{#oFigq!h#b<-nG3BK0Q31x98`sb~B7D~&1fq|hJ&ki4st!UM5hAnEn8N+Vnt9A< zaDb!eDJN5GxxGY%MM1ZC;|)1wjqGM;BG(u+{VSPIeqjkCT;0Ck^NFG1S}w{*uW};= zyOlFb2!ZXMI8JR?bwg;GC5f0Bn3<9YaX7T2QZj{mM0)Cu!1fF}iMWi=h8Jl_cD8J& z?1Hl%2=|i5+O24$7L=6j+57o{)GFK3ZNTFP&Gfe&O_-AGwZ6>Lk-%Mp%!T{5I;yjw z)^MTOy@dw;R5{u_-JM1e43?OEwGo296>K@|*ciRs7LLV?D9t56DZ=tdVZT8Ho`SEtyfDL{nyR36Ogq@95149gs@?$enY6wM zwI9p%d4ey@{NQ?A;2CdJ@jues91J4@FTXD?Ij24~X&A`=DZDFtrxJ@UTaa4SA3$62 zS((Uy*npt9QJ`{vEE{9E2i`8UNnu({KG{fvZ&f`HzB&+1>Tdn)BI{f@CApdNV^!_P zXXav+!vM#@M5WwaJa!5$+RD$Oo4pd6gZ&C32%5!oAN2Sat?t`r&{n*GjyXq#P;26oCVX#v|3dv@Y5? z3b}c?@qn6z# z_o{nk`KpobyuE$c80yzA^rV}t1K@>4)yc6nh0^^$FpHhX5PUQf*#vnO8=GA}LjJ(> zkkoX4GH*R#(%#I+II?s_+Y?+8bySgb$P$f`xW$tFB0t@hRzck7`<1&1z4j#OMLjFT zv$&TIv;6hVOoN01@1Ie4`)<8ug#F*K_rE)jzIC>MtpCw!ME}p&D{JZGWbY(vX#Kw_ zk^d8S|J4H?rKD|-B8c`00MT|xii8=|?lc$|)vW89#cWyw17!vDMauUx(P}5Jnz>mn zJQsA3u-^Q$16R`n2s%9BB0G(FJJY%AZ|1h&uXiwoVN`(0a0Z3;1$VWkYR?o@4;b$hf+UhC)^*e|cb-+-E)#v-NN$KF*#Se6Sb=!LA$j>tM7Sm;YgPZYk6i_5K8q78C0g-_pwU)& zy{%=Eu`j$Hfe5BIVD*4-xO~aNIMWrXCb?&<79F~nD)tXlL&&Jx`tjiKNJ*+MDq%jf zaV1nN4r9a4>#az^6cOfF^4>p?lfnQYYw$24bO+i1cpY*F1{WWqIwgRgiw%Rw)EV6B zAxP7FK_ahQ6p^Lb>wvrY<%Uk>vFA1-T1(JJcq6Elg5Wx6Z}vaFc&vVdWhJ!qrXW7zTtIr#8GYe&+*<@r&MB^@hhykx8)bChHVkU| z%7E=3bFHhkfmhMWTRiRY@PfQW=$3w@qF7CEL`7Il(Xb*t@bpu0tIX1d zrLt#8s&<GE>KCuoInxg>CrAfO!2b zWu5*+{2n~{T%;*#o+qpVzAnWIZ>YES}Gc)b@Q)x zTMCbeL9){n^3JReBk3{A85klgV8s{Y;SgR+wClY3rWOp5!Y$2zH7rB`l4#L~>}QtL zs4!=4yV!#xD!iIO5;o63^bL^*{vL#Upnq&~(}AH5AN3fxte;}strSgori&JbN|Y?k z^;Cog^%G7?Qf7w|4`=LM%t@qB&)w)|FNiizWiJ=xdaZCSB|Z6~kJV?qy++@M^u5E9 zL4QQ)Fjyw*W-ZWTw^>l32|*HLxFmjjt$hfn1YwR_sX^nGx`iGlMhd%!+DtG_UgS7c z*WkI5UY=kw(Io>3{JQe@UGkUW1a3A4BT#7Fn^DuFUGHf3?)rDSFYq?+2G#MiQ#`iA$KW=P&nVI>Q}X-o zc;!ZKE_SxzjHJnB5dGubifdNt4%qFVGkOhN*RQV_!0MM&Y**#>&e!-pkU_ciTZx&; z5t@@Y2fN-ZVNW}6Dzf~o1bginxx9Gv_#iD(AH4J>4JT^39mYD68d4X%%CdAwN8M(7 zIH;QgBG{bABRx|CtI7SUI;&ZH+m0*bwr{;gL-b&hsSdDDjp03?B3h5mj4dN3#Fy<= zqT{|;M)bMsUdll z267^PU&|+Fy3fJhtfra}R-XP&80mLpMP4>*9~2A7B0l)(lTho*8)*{9B8s;)3PAAV zYUAUWZT(9xwpTrAr--C6m2({?%cgb>b>&c+is{%C623Ve%{|{gMM>*6ZGF&-N@9jW zthRI(Z4-m>gXi%fuTy~w)qN5&lTg;>ZNm(O5Jv)S1)no1V`vHcQ9KbJvKwn9o9>mP z%)m@GItyPgojqeE;PfzL2ntmODRPFqJ_PbED1$sJZ&5}gOf;V+VHHK>)wFEHi(80Qz01XEZ%>U{bQ8(06f{ z_r57qsjetsspqq_xN@ON{OR42XE`L#(`k9~W$)wn=>s7h1yy_AL9#KM7}&&lxT=g% z$Hb$|QN%}9v{%fiFo)T=+|lsd7+%Jq;k%Pw{XXV4F7isMd?P+zn6))<3VYQ?>Lt!f zHkV)RUKZR$0&<>d1Svk#H-c-+a^Wraqz1trx?Vy*$hs~&fX`FnA+2%oP|xT7F}8Km zy9bklwN}mSt|6N99JW%iq)0yOvW*vI=<94&M@+N2+A5E>;cQJa?_O{Ucjw)@R>5l- z#!y{N#%7j5ULkb6@S>z(QWY;Na~XA97}pCu$XbTCjdh{SEets()jZsSr@}qC3O}*l zoT2T4$W*+EL5JD6na;P?&Pv_J{YV0tG0+*6 zuMM&1h@jOUaNIa9O#CV1ARM|yBY62t@?2Rm18d>+g;K96zE(kum%^!yysD<$)v44~ zmE2MBkNLey>ZLmU_Ue1qy4_5=)onWPraJVt9(z(Bds>SB=S)zUNKj5mQ2ITS{?9up zzZNCGn)0Vy(Ypri%c#^#o!0Gq((Q8%(3QI27IUoJtD`<1G9B@?a%Zr`(i1RESgZfb zleB+`7Fwh!%)3I36f&*yPN=7=uW99^2@dku>;pCS9lJDRRB zUC-+<_uY;@CCOjMop}FF z)NSm|>1FKA|0f@k6*4un`zKLIRLNF;O#!W|_sOc51wfFxl}()N=M>p)53X|QOf16R3HxFmCS7edp>4;@OY^|p_gt`MLSK-? z92Sv%!W-A|pO2}nh75y`z=ssgI4^uvr^)ECND4P;)jT{%Pk;>z5-6FBndo3f%^n9E z0oc%aQxiocsxb$1~a-K1W3V(pdr2?;f9ln20`0{U1kfPM+ME(rnUe zKADGLUGOOxEe#qdBCfurO>;Q8B$`NhRQ2IB)Oe9(2zgM#RJKP zlGQFWwvraZC~)fhlpXjw)kO@J%&KINSdW%ND$6wH4hnwyl8g~Vxb#=X&F*v**Piq9 z74dnJeapOL|h^}n^ab@&|t zGKiFqT=2}|M8O#K-p#`qo(R6cd|yI_ReXisghpFZW(tz{@jrB9KLX$Y+3&o4`g{CG z>g2!ICG)@6<$nnz2>wGR;s0=s{{zu6{)YJ>u!x~}I=%Vxs9n1lfY5tc9z^^CAuvK2kF*$jKc`3oA#d%uy zewp8|1Bf9CB1p?gcp6PQnHH07>UA2=dDHnkkFrC%Yt(jXhTV8A*%rX=)m9W)T zRf1bLJx}Di>h4SOxe5#{oK2dcQ--~*b3vL1W9z*o?oH+ShGSOM%xnqzz2!+rycjl> zaLpAU;2QIO@6_%Vj<;jAF^4&mUMpGy-rcH?`YwG!G?L^sT66KE81So51;#Oe5S1GN zbo?vPj){WW8a8P!e~LkYr!0Q?8Utb;vxgK=HeY8OOoT>yF0KgUu|thHm%Cdz zaf=5yFIlHd67}t$XH$G=CDeyu)++eHo|1b6f3|dv|YzA6Rsc`x zHA-8se|ThEX;Q(75s2Oe#9AEF`KtPw03er!kEQ_Ll-{oG1}pay+X(>C5Yqob_7{4~ zJXQ6E^2nkYlzph$P_RCBkf>i%o`oi7h#|jv6Ek#_W7PPn4lsQqP=rj0Tf)US=Rk+w zPt;$O(6&Im5C)@W*{`AoYYNEQ!$};O=VF`F=WE(|13o;GlqtGPTBhow<=p;Sql zCo2+@a$6Q5$C^wWe|b~XI*ng+@Idl1SyPTkd3+HaZq&_zX&?$Dr)#?3&f>@-H&G+@ z(`j%?9ToX5A7e92=W5t>(sE)L++f+pL2i_-D;0GbjTkC>1dzV7V@^4*S35etecA4Z zs1#h$b0C-+IB)+@iImalR{~LHst}k_rDPIBPtS4Z?BXZr=0MafM4q5#Olwk@s*!RwJKpYc`6d5Qae^@NclQz$&OZ7hVw_fI zd-w#nKI08@J!rP?664B@FJGGG^T2&#UKm!ImWJO7l0zKR)ZcESACHUuWb%h(t~GQq zUteQ==$w2_0Ba!AAzf3-=|#S?!@GPYr$ACHj~6G~Ck|cA<$m4o71x1BoH8731u1>; z-*93Zj6Ods{W3_XK7J95a(*Z`cF8Q9%8^=fnceFD<1hr?iLyugMy&wf2jYK%&VQp; zO8*Wz{{{>GE$#I!S>gPDOIN5{+L#y{I+^^FP3J6OXlG*czu_%L&VRbb zB&=uV)mW@0H+FynEVvqak+hXqop?`ShW8!;4m#4K#}$ZJE7-ziQ!Qsa=FPiK!;lPm z8g7#Za#a_cf$SUNI!iXYm=Gyab}w@y)0QXFxj)ho@Ejp@JvLHVZ^g0=ThWfh18g#O zC_;2JJE$1$ug&IvO)@0XUEcC$H?(a`@s?f1D-t%0#?ojiRlY@)J5Y64MS`JlAcJ9= zV4enR)}IOWQyK+E+_sW#f&pSLhArWkW!zw#?KHT1{@LgT}Zd051A^z)NjMbNAMZ5xU^&) zN=w7wZ8(ZnC7lgQFWxGjjC(ZO`C)&0T4G-?}A&2gDf9&e{ zj;r8|2rI3l+w55Uev4O*c+}Q#?S=3PunU^i4Oj37^`jirS%(zt7H75vo~qKrYv8&q zu*rfvf$0P}_8|oku!7!iLGdJ*;slf5E{ddVCts{83ouLkO8SLTN5`|9G%y_k$21mA{4J}Z{ z3wn7$I3&3MFMy0vjC#1e5-Y7vr*b{Rs=c$T;m)^;z3N!-qAo=+RIMuHAZoS-thb^v zA>}>bUd1JCm4E&C$F|Y@*fweTKBo`=Z(|r!uAH8fchpX&ksGD|wY&-O#Jy#iLX}dp= zBbgM1|A2VSJ)Ma&JYnvnK;sjxUaP8|h&{S&eoHp2J(H+I#$yTY9>givBo>Vqhq@a&8qv4>59$Ay9%(p|x(PBfqG% z*>Mtohv>J+j64))azO}wSl~vPv>g-g$24u|tZN>Ot@#WUkwQ&fV2q}`?MggIr*~nm znZxI|H6wP~{EHBwwGZ-j%2c+{&c}{G;C*GKcIn%BpukwOFEOhJ z+fO=_Wi34fGO7~z0ih>?dQ~{K-fEZNS(bfXaYQV5+r_zjzJETUK?(Mb4Oy?i2nrI) z%I6sCVaRGC@b$a$xAXiKMuXS%Uw_9LAJEt9$6r6dUKnq@hiU(9w~+6bSwUx-6E?Sd zB!}oSJbL&plS?(|GbnNOR(dH%Z3Ue{kaeUvqF%M^{4|l7A(cNh+4g-vQkhG=TIR zW`uHQ5=0V6xC0#xDgpv@B#G+dzy!;|Fl~^R=4Sf&M+cHr#eF;fSiL!FFB;e?ZoCFB#P73wI6u+5k* zX^ok_2b()g-<_>`^}!W$EfzpJ!^bwEL>N3S+FvF?TD>}C4vk*;DisSJ9^x!-5II!N zWAJyHp~bZQwK@;2Eqm4`vlfL2+Ti-~#h@~P}9yvi|7Dagx+Mz>Rb~gQY z195IFS6Aodg@Of92^9OGE#*;PRsMCdr=qF{lM_QrrO?!U3w&dqC^^$58cIj4F9h?~ zUnUCVhSO0>j0nvDWnn0ODWjeqN!48}z!FuGC5Jftx0Xq`*-{U<&VnO;3LHx^kt|@# z0?Lv;F`$!l&KaFI)yZ_HEgJ9(RMtX`5E^hb7F+qI^&FX}Q~U#-5~ARwOV6#L`W%W5 ziwk8s-tWHFhtJ9Ys-FwOF)i;`CC^vDz^oksuOtTC@1F(6K0Nm1uk#J^N=!z-67GvYnn_^Y(5{Be*)EIe%OqFpu@ zz3$T`^~6IjfQZ-CEffLle&uHRnC&p$76xS|_GLJ={)JWYkq*CCaw)rHm+HelIz2}s zcY~8h5>ENkct2>NI2XM%bSrgM&=l=sJsJ}xTEbYW@5}RvOui-lrfg+o`i8eKlXFp) zJ$<6^h}~H3ULWLmKhsrk5FnRHXNVkXSI_KfJkBvq8FR~|oa2z3>t%VVe6HR2Yp_Sz zOLpmsxHGh38?k`}MY)zpD=kD%3$Y;$a#cnrvYr;{{QS@c)pSYB%7LJT-}oMsY#XnC zlS6S=&F7{6Uy3eUY zd!P=Ze~MFEl4?Sj*aSt%k`Nk?l2J4VD4^!n!-d}<_xM`}3NOuR>1ki2Z=$mix~H?) z&J{|=B(m8iBqtY2&I<(j&ybyF6`OmduytnWon~!8`ZiGO8=$`XE=_d!-fd;Df4$!w zy?pULu=qn9nO?hEbMlhftJE_IOSU9qCr{dGZj+ynM zG(t&;Nu|Xyg)FWDqX#Z+q|#Cm>4st7Mu|35)Fm?6QIf(x2uI*UFgE~)L3u_*2&+pUp7^fjB>P#rG z72qH{&Rwc6u=MAjBq|iYYV^~x=FWyn&kXD^RyJ93_3vrUS@@N(Rt`|%uNJXLr!Yqt zj#NvYg#sR+rOATZ#? zYzr_}))9G)nIL$ya2BDNd>%bD4Lvm_<{Rp_Pzi6@%Kg3#rP7prn5)ES^&;L-e>*Dg z1CK~HG8}E^GdAon3-jwIYvc7sw#CFMxJ2<`W%K`!v2%*it%0flJ@DApHY~p%hmZj-;iR7}H<>^dWWL zlJQm1VYARG8(&3*P^BOuzV7s1Qb$IiJ;b33=_Z7mU5drrgC`v6O+$Xhobq_4H2aRY zV>0bdp&b$=S+NX03*Exn`6v*xSOU}7o|>n&nsgw6$k^RkxYeJkQeHlVFvia{(A3#7 zTv~nsZLupd+p`CJI|90$eqsLDs}HoHGO=DTffuJfZgH;b=|iNe)5u0tawN8Yi5aQl zUiR>N-1hh0;n8LNA%Pi*joW3W6F4L71k7Xy;^80H)J(l;N`KB52aLP|=JuoqfH$h| zV_~vFI%!MOeHw?hSdXxi;adUJooMY|*J zR=vzFU*h19f>lR5sE@wjb3d9BUnKU?g`;`m8 zWgIKxlD=Wom>YHZWuvt`uG%27fU@gN?hyL$X&$)aOy|m%7B`CbpA}1Nk-O!JV8o3= zr4|rq zsgqgiPWU~W&+q`FoF;P(^m{mh-tp?H_r<8napsH7&wn12g%*CO_k2Y#Q64(hB{DXt zO~?xT%PyeL69NChRX1#b(oQWVZ7vL3MX#g<|IZwi z5BSf)EKWCdmpf_%f2o71ju-#wp4<_K$kLDCQ`DW6sdTFGOswQjAD;(;ilf`dJq6c@ zS!cS)H-L!gh|Y0NvM*4;^FG>V?9F(h)L-?SQ*fqTPK za_fe1e)cPK&1A#4Q#we7`*gAAFKJ_%8ZA1ApgTQ(^yIkg^v*A8NnHSDJpgWx+KW0m zb04w+8Zhe=LE?SniSD4`B*EfJ+Xn+}U$^bRZWpcVt%p?cE*@g?OiqVH8R)RgQKcoV zmI6_78O|3&SoJ<*Q4eziK&5ruqu*}6k#BlX9_Y*W5T!G#C2axq z#1d|&jZ--jcfB(cW-S{psgN5(&?kA3qb98!64oZs8m1c@YN;OA_AubWI-#N!`R}2M zQe(@!j$U38V`QSl2FTbZ4ENR9w0pMdeL(_LLeosDp7~Q#4ok`Hs4*OKq&o zXX})#B^%T_V(CMZ!VPoCeZvH*}g>MJTX2C~LO)J*< zGFUJTY2GzHHkat^@g2Ba`KT8yFJDop)%phLao2b`G)yP8mYTgTcKyfF_6SXmiPfG` zSTgL?Lx3;dcr?q!+|upq+1=cOUzP{He1pO1h7++;F97islWTZ>EKNi@2KF1FQY)T zp_4AL?L<3kBF5*YF`Jz}YR0$Zb4}I!gfC$;KEliHREn}D+r!&>+>kutH@NY;>x|ZI zG4%+`kf<7TLxvd=a!q3NLB!EQuyJ=ehZ<^{yY`QBpUxd!6H;APYaf4YQH%B3QatVs zf6d3&@_CyYS81ogqHD5i575&8THJjpql3P3Vkr$gJM@X!Bt0B zqb=ISvD|e4W0P$cwB2**$6{YO#x1M(S$pY9jss(U0u`NWR0AP_Q(j12pi<_R<4 zbq&*K3q2rgOL)xlTT9hlPZp~1$d@XwRYYt|2+ z0dM%Da_t?BXk=mHu#Mec+Mk2!!$j!m)Jd1GJ|^J(U*aDjufp0M^^syVl z@7N#3lQSy`A7g_0ziyF16P;qCjBWPoOesrh1R_>?nU9zgmvJnz;@tihP4o^nc3t|NNkJan=)Gd6uj@$%3+VFza<9|6vC) z5!&+7IcesgM=-4!$`IBwN(TPgV)AH4Ni(-R$dmtNnI=gwC&S`Lkh+j&j{6a0Z`-;j zU(MLNH)*&xWl!Ij_jzyA1r^@2turL&KWlG4Uo2NwX*}1hA{fF-}D0$_-l-VG!?}9T*cb zhbT#jneBG;Uv&t6HSbjR(xB$^<7Ayf4TJ79{Tb_EkCELugN;#aiFJMj&gEq`8Gc1@ zk9l$9W~y4}A7krm)l$Mn1{)@skD_G$<7oEZ7_6VgkPGJZ=8O#hKnUbyS13u?j<`VH z7>tzHPsxXi6FxpVl-L2U2qeDj_ZajT^dZXI-LEM}p{_2R=;=p@uLrBJN@IQaM;t>q z2+E`b873HpWU;XK?1?Lexn;-EqSl|06Cnu{Ngx}SsYhYi^J`C;7uW2Irnb{*O!Jh+ z?plxrO`OII%p$bbT^ChAw=0e(;5>{}gG_GXO!Dh5bbs{0BsuYA={u z5M-{k^kEtxvUq-mDT<*^PdmfP8E1GqgOn+zgAn?W5azsD61BF>eP3l62xNZVW;CaO z{jkjQ{)rgtfrh}F`QgmokWM#a!$u0Jlk9m~*XOAD8VF!pB5n>0Z7e9-;HK;r zh64WOWl$O5J#_>QgsIin%J>2aY~R=E3Yx)6=_6ISLsPK+#dFnjqqO_fvtaANgKF6| zk7pAK6D-(K3{aVwO+w{2L108=IML}dfP^sUpZ^G_GQ==qs2wy{dATjc*ebLIrz^mV zdl(1pS(=L=md4HuCI7JxEJMpPfEhF4SER zFSjN|4i7~adMmiHv9e%8k9sWuCkD9oa}3fp0+8;mgttL1L?vn}^s|i+#8*mE<#|LV zF^^TRmMo8*B(*B9nxdWlp0OVp^Fj{%h7bF3qeqE|a$j$`lG=^U*eLHto?7}I;Ys&u z3$w$~_abM&GtK_;i&7(Ho74ibKiU`8UG%p@XxyP}5$!gYrBbk`rYRvi9b6lEc$(JpjhY}!f`vLoR#V`W zrJg{tAyDQdkwO2!O}i8$-x}9xkuy<#t;L={gqIJ0T9c#7PHD||pP-(urB)S)xtGG@ zf=fV)q_q^8#af2xRf;;fmm=pv76F1%BhhNCMQyI6$vQ)Rb&o;M!qiks#&(_J&Ir9q zRR&`X>SdWgYxIZXV6^RWrJk5yvCi;Uq+z1YR!=tiS^30#{0poa27Z{jaIyex zw-qA;+iu)u%FU~NYJb&Uu3s&-uYS^+ub#gYd*Xfy$3ZJ|qQQ*|Gc2VIWdM1FzxI-_ z#8za~G0Wv3?st3FoX?)q$p~}^!!;w_D@*%(1|0izO-pz!c(n;sk5*a?i_W@RY3(r> z^+Nc^F91QjW&Frpq(z5s5N3$ zv>zQ$!UjpR6Pu@+ruZZL-j#V;7ll_x33y|Y^oz8lmh?#ezaJZSv(KisQYJJT9VH@a zV_t$$%jLkmAC#N4+A0|3bI*u&TYMXy+n<2MB=7~ltVVW5@1DZO?{+ewd-lv$G!S)C z5=fLEDH`f|^{GW#*(=S;w!r6_u|LvSqYTEX%{F?4Sgke^hMi%+nG~M(%B~^OgHaQT zT*?1vFpqK6XtKt3vQgFz3CSI<*^aig@->yft4xdhP()d%Jy_>2+%fDZ6i$ZJu)n|P ze@{d_IWf(2)#*yrNLT{5DH|AmTt4sV5y;uNm=q;@J5$S$II z`$?AK5;3Sq*j&v8mGbEF1VH+N{X$#PAq+}4bBSrJm8QOs%h-H8b6DH@MvtnsL-2a? zz+5l*ar8tY1`heB{zT&{C>jv|4IB6XG+c6=S8a@~T^b`7>IsU;AnDD9q zN&Jt^=4Lhe>x*eR?;q&>SMx-l+Qdpw@&EX>!+11{Q;5JK6i-}B*_ zvAX>^{+j2&NzZ4%OJIob&Vct|9xCJi8oa*YSE>VpW)IPbT{T%j))}%QUq}D8WA%jd*#qa6aqG$LuTb`Lz1X1y)}pTsi-_;!{B-6R3s1M-X)KjJJc#gNc@AWDH59_0;{Mpt_!2L%2u$$`&0b%2mBw%6xDHVxheY}lX03cIHQC!) zwuj6OcF;wm1KOR=-KM6TOJK+NCJ{Z$#+$S;cdt~mQk{@3b9g-ub@M)FO8sK?6i{?G zXrD@`({3tEaToIA8y_P;UFr;VTqC-{#CxuP%nHmqDe%#f&|RIb^zN6#Aoi*M`8&}D z@mZq;!O!0O^##41w9E$x_nu;I^fcd$-;t37g9s+Bd+4$V^54IUcemACI}QD3dnCa? zKG5x&n)O|rM5f1;e1Z39IoK!WH>pocfA$lsiD6Pm)zIogU_te2|0Dxa#6Udla&JqDo>vr)_<8d%mzD!cR0 z8E`@T`Xh`Yq-SI>U-$s6&^QC52!P?3C)B}MP%by4B2Q$Gn5p42+ov1AA3NZX8-^g% zIBj$ngd<);OR3B*%AnjC^ksKow`I@a@Kz|P6MNIOP83B3Z$A|?J^*Mj!^FUkMg5Zm z&gJV*WWZR|A;|_$M@cqMU5lvt()HA|MDLg=gLrRLubw#j8vYjFcSB3?5A089SdT!T zR>xXeMC(gZd{Br}VG*3W-ml;H8{~&9`NYCj;d=7w0)r!LA+~ke^K%swt(y0V8+%Vb zfD%6gdKk2_)BdN0xB{K;2jZtDGJmuWOt9uwky8&y_&~=w)OUL$|*6$ z927_9WJ5Qx!)O6^ARU)u1f}Bh8GAUh>to@Tkh^fL$ReK z!zX%5m47e;JKHMyj1Wf*+!TN%r-Y^|`P(Eo$bbrK`BfdRsFeSAIZa{Fl}?8EukRzsP{b*jp8Z zLX1Uns0cFEa9zPXHl*aL>_65UViCB10ps(h;*DP4@E8*Ci*Hvtos2FzB~dmCRHhN~_W zONpJersAq0Nv+YG2xIKOYG3wq?ewuM$(aNm2d``W>uOreExIHheL=>x5yaquXwe5_ zUjya;`Xb1(o!{k#J02^*2=}H-f#IR=mfV-&Du(R$IKg2t6uhrqit9G{up>L7k)BQa z*s3P-pMrgBbPTC<>*Q=H!325GJsytSvTq4I_|4A}yM+krC>%nO1p(N$$y?GpQ?#$% z59l!|*+8m={V<%IV0o!Ez>z)E%rNSFQvo~T_=fYR*NnX%Sz601;?t`wcBLHCB1)m$>#r!f;~{a zqjK&$x_;A=$_FaQZIqjn3#`ck!A zw>%@d*ishuj_i-S)WZbNFDF(oAR|{m% zXxo>sUm=JSEphEfk%=&$aSEcYo3{Fj=+apq=n{Psul;g<`f6li(vhP5chSaQHJ|<+ zLIKS`KoY+b^&;pR@j$Dl2l6eLXwZR#2z+;Vr&bubl5rfl z986!cXy;$Og`pD>RU{?z5&K6HEtF!YSMH9Dysz}2+ueVrM&?rl*ohg47+3_|IIO+b zGt^fbkegfrxW4{N?i)Q@Y^y*01bN-kerdUZyk)oY!KnhR^rD}2p?Ru@cU~`@gw9v88{a(_$D4= zvv%6oe?1en65bMO?uj3C|3UEJ19*1;(VRv#5lSj#t_ld;?7gfOvL)WroVtM^}zgeD{wi!cm6%c<6P>j|;%*`~njgU0c1~E7M z!#rguB>!LNe7?b)vNi-DHcjN&kqK7px;<#ROC%BX2ji|bQhk9MKvl3DfGfg!*Q{RR zHkE8Yj#BtzN9@+{y+MOHi!C0@@kDO5=xe=K68i+CgCZAzE5`cJtmTLB7P00aRjRf@ zy?}mU&cuckBUcw1LWCygJ==(hrScOwSrK?Am!optN>1z@iI3h8-Rf|B!Uvd1JyBLa zgBLSvg56P;!x#cygC!TB~XPHAni( z&PI}E$7&Zu>1X3srCsma=*+iRxn|jge7M)j<|;`Jqqw3w8aB2$j7ZNGAw;()>if0J zT#hcf?BQj*W8yu#-MJ!R{skjunHPJ(LmyEKNc`_+ASrHvEK&T7S=_3$VPhe4V?h%e z++b3jii8LX_O5Vh>Mu~_sg>~NjM~reqa|w@s5FW8o?lRH)Dv&mokI|Rj4q$J!mS|W z+Lw&RtSQ{V46(wk@xP2Llm_Pxf$n<5IIVL&Q@Ik4MW6K%*6Q4r-0r_oc{Nxm+o4Vd zvIZb(EPhn9WSDIPBW41wh~rR9#qJO*4y6tz3j+N+{h}J^piVEN&Lb8raAH0Qy+gwEa~{G2 zzTC0BI9Xp-Pd(~O5g&BzX}!BqlS|3|Pa*0#rFwj>Hjqg`!oH#<2u+d!G~2+^{<0KQ9A5zaiMeeRHw73F1VDDc=*EWbGqtC{f=xA{)vHlR~7mUgF8DGLu%tke# z=9%vo$qLN|&xV_|11+5Wfkm5BoTrYm_lDG3M}aYm3VHSdZ0b65+&I#GCiKkf=;zl| zvaqPFQ|<_Q)C48Nq>fQG4fSOHH->WW1sK^@GoJcC!vh7h&BhXtW$axFBV?hINS|?G zDs+IJjxv<5j>C zOzph3eD-Xr1xa&?LnX2BS(>yICPluB?@*g>t^UmA2y z;9NL1msD^&mhY`A6f)1g;rG=>KFyiqs1nhVdaR?%e*6G*vZX7PxcwOYww5jTs)jC) ziUdfXN#(eJNSD1CX&p1l9O#E5&ri?(!UQMFc%iNnt+a@yZqn%FYR8Swtw0aj;&wa# z#4|1Qd%iPM*9@7kI2d*}KY` zz>KD%u`m#`jOuRUHnPn{;#LG}F#Cz^rU7%HP5V!A4d3IN(mq`| z@V*a(XPfD#1CTCUmCmj94%rF&RpZG=K>Wt7hkShQ11s7hF4X5()LkRCDscNd_$Y8m zE6ldkXh%}Cq!%zem-6i5L!;naAtd+1LP9}qzW(!n$7CpQ&?8(9x9|Mi)~}L=zZXOu zJ|ao;N6HrYA|809eS(v}AW4)Ted{(e(VuXY-l;9>>Hd4IP1@fe20ZUBH^czq5rw_D zhHQw-aE-@_?32KwMLCzjUYL|1IsF+<{E~_so2B&S@@=!IrukAZsH~&^i<3!;hO$c5 zO(yk9<|x;Xu>xSY>#x2_MWYo&$qzE$CMYZXH&2gs0GDb%sDPeeH7b1j`X!UA|*j zBXdpR)1QgQo>7}D7BKmeX*Dw1+BtS582R7;vlU#k?`bCw5Z!4Cq56nwfqe93?Gto` zPI1kxzjWcAr-6YR`DDqoMm4b6hn4?^L-2xn%y1uzjdEO=+v-dGKl^Rp5vM%5J4jM& zu4@@AQJLF^NOVgFk27f^F37i>9Gv8uO4s-<`7||QoXNde)OtGH0<4RP{{8t+>eu+| z0oP{HNOhvNs`LK2r8u2Kep~X#J7wr;@b}jw&)ZQFJMQ<_Cg5LJh5L-yJTw{XU3pi$ zk%fOdyfU?SkH$b3#2;mwmq#x5t&>T^gO5m7bCdM|TbCJmB36#g z(XoLmi+w5r$_B6HF6~?0t`7Y&ZaeGhH8K$wzG1V#9G+$D66Z87@VD6|rAZRM6elK; zgAw|^QTOc6NGq<~moaO0A`b{mXob{>5}?v!|&PcbV+Q)u3l=q*NCr z4|-#jInOkU*H<~Vxd>d(G?q%N(wK~pR&Cot*K3m1e{#HLdm1n7)amddpH^7}v$E@C zbZ2Vjmb5a{9?k0q2&nDpGhW=fae^d4Q`dI{cFGtJ;|a2v6{+Ck*ne-sCJ8;&v*v2_9ovPXev*7^Yfx%i!8Z zIYlV=Vt{d=?dsgLI4f z=}!K=qRh-3z|Ym;kK#qeRH{|#s)J;drEZuBkh3Znr0?ftb#Dj<9X6IY*$(Q4&mG-@ zeeebQt#6pjqK3+QWx|w67aG7A$Cqkb!cAechGi1SnE>d3{A=L>#~qqG_;r~#r^05) zF+jnl5OMghFRNOkRn2Z$r!=;>XLyb7s~elaADwYDK=mam6EJj%S1BxsUM>}bv+G>; zirD43tg(k=1jm$UVcH05F}G-(UYU+oa=H~VgL$9QKpB3GvG|DM(7^v(C>r2C^%L3^ zD|ca0=#>a-S?9T@CMUZI=ftaf^iL{u=AWkY3_sREi3AsCucK&XhDiX8sOqLPWP?Um z1$oHW#d{RZxgWmM^M~}|ppz`l+m9jt?ad;lJBKdNQV~3`oi(89oSv4^YU2}3OvKCL zX22bDGidr?Yf#%scM$kG>C#RZzkkSUC6?-&DHoar!HBV5jJhnz#)s$+)N$47tKL!d zV~mn%H3n~Iv4lCJ6sqh!Px_$^A9OBtdoIwy1W%v0B|R-{Wpz9mdpJ5;0NYQG*+o5PH1Q29v_1OJxSlsBV)SxNpV0J-d|8ca&i#QoX=d z3hl}12iemji-XXbc)~sO#&QIev`M~NHXn?@Tdz)}YD-3mn%d)owr|%^0$DzypFhQr z=a{nYhaVBJyVnvTf;%x&uoHT;ZE>ycL;c^LUX!h%ypn>jHR#wP2{}d>bh0~hVi`R^M;MlyPlViV^ z9$#9U63!03nNdIqmq4HByDVVdVkR|pdEvlSKJL?w_n)o~qfzAU9Tg0)3-O^!$byME zl5h>=T>OMKCD*QajV=d`9J?HAbZ3zYSsCW`dpR@p7q8yXLzXya9g&h?SQAa zKDEuwxK#zw290*&_yDkhUdQH+OaJi-po;s7$N_#E*>?sZ{gXmK;~JgV=n4oWFtKR^ zg=rZus^nW-SkA3}itdXtu!@d(lL4D_mGe-fG!9MoDd2~*u+nFM z=Wxv~rpcAA<6_)U;r(Y4e2-^83K$5QI*1)*KdUivROBqt_G-*J8!oe{>k{bKAqnyb znLHK-?ASW&190sZ*wY=L;4}BHZn8^KhX_qZihjhMnUK9YJO zW5H`I*r)+vrr(Gv^&;3$3oTqJ!>knK(5gXQDX32<*~+8mkK5m^k-$xE)FUexq@z2H z9MERQKw2*g1eHt+929`+LqCVi0~m89;-gRjad`+C+z1pF_m1FpWHcnYnNm*g1C-}K zaz5IsR1VTdqyttfr~|0xkJ>hKr}A3oWwjhQdsSxk@p>~JnJ@`qnRD)8(iWIty3N9Z*9!e+-y zFLUD@=8G@mHKM8#e6JQHt7U87FlYQ+zKNagmnYrZS+ER?a@|N4si!oxYi&^pvWPn$DEGoxKYV~;1t7vW#4zY<@w**xt9 zYZ(T;*5)jKwW)+Zuor~{O@$P4s+Nkg+AqOZW$lRza26lk?{P+qPb%s;=N zXW@`}e!UJRoU_`TuI~s1yq=dlB!}<#?1TM2078J^jRW9XR1jXh5KWbr6r3 zjav~sydL1V4g(;(_)n-KK{pAA32u)zvU*#wzuL)A$r?r;ebsfdQ!*p}B~So06P@w) z0m1lj3la1JWK;s+tLTzNzwrUQ>9g_P!$d}S%Td~4w)(&nUk(+hq?#s%ec=LbI*Q!_ z@crUMMK)`c!G3U^SN^d+S{hi_N7z&(CU>b82`T6SC0u`w6nP=O^q*gm;a`k#UR9iA-qKERxd~wy|chMbM!OQwg zWd!1j9CqB#|5sVH-T{o>=i<4O-LgWW0j<@#aEJ@q+Pb0pUZLaTUu8jE<*E&CEkV0*m1YUNYFjOvj#NEJ{oxtqhn#-y?~& z@zp)#q|jzx?BTqLdJn(ZyHsG$xx2tY&7q6PSC=g-Hgiu`6|_`&ivuJk$2I{2-CcZ) z+wsmDtfiGe_OQlUd2{7NbRhr}GE7}ESgU>oC_zfkfBs!RC*p}4lM_y>cI;MHC(Jv( z+q*sWZs_13s!`g?mV#AYVTQRaOFGn5dz*qK;mtBk*8j{B$gGFJa=h;Ab6y zDbjxwcInB?B0f)+=)s?*4j5REdiEst;f%L|UMd4!D8NPbgHxZJdj@u%Il00y_mAC*! z65aTa6|R==6$s8mx@$a(R%dBADw=wMpApp!=hWb19HG@0?q0PRCwHqCWh{HH2`n{Z zLi~XL>|;w`M@2^n57m8WygG(FiUuHAAQd^&P=WuG$tjsjd!*39&2bbv!Sxp-)~}Z3 z!htuJWUrKRQgUcjC9Mn6D->+_2npOR=$AhTY9_qr_AXV{%IY`v@bRs+FCv;(h4h9p z55^}opgTGEKAb6zE{7Kjn)Rf5i>TMog^y_yJ&j|CSr*?!ZUJNxLMLE z-OS~wMT6LaZehGzVeYzhGG*#&^7Ly<5VH;HzYZo1@*f65k4qqg7@bNZry5%YdZ7CD z&p}yu7)yiO?w#w`#M;BOpt)U}q#IL&GV|6;pcn)hJ`a|iF|`2ipCJd*mABj zr)2j}3uxOCqcd|qSsN1YEBkb)QKwwK{=03XQBA?E`iHz`-Cs^15h2Vk}r_DoM;kHluEb5d3zfU1*ey<)wmb)#J^Iy#vK zu4H?197%QlQyR>XGN>!Is`6Em>t(6()zwaRwcvPL)k|%}-9hQzmUXsWyHtV)NaUpY z^y9lJQ^H*(&VG&^aV9;1%134r#^IqmJ4dA)4-%p`QTs8s)(GR;LIgmiUd=hgioh%5 zt@?O%NY12)-gcW2WY|4 zX-z>(NsqR6^I8?AnNMAy&H%s79zNe7#bs#j+q*k1NPq2|D7JC^tB|V!05qOsqnR*9 z{~UHe|FP56%tA|$FnxHM5X!sSOzS(1lyC>5la1#eKb$V^UGc4$wE+5YcUV`PI(CLf z8N)Ci!|+dIkXG?8xZmHjnJL3?DI$Y;1oqLA2h^TdmWdf~4+jNbOvlQw=jj&3mQ(5} zS1M9L=&XseM3KbvYe1s+z_DcIfZ{m{ zMDrw%zyGog?c{gctI(;3aI52)I||K$6ZL-d1`qc&IolDH*Q|V=(-}=Lwfqt#EycbH z1>MK0E=J;eAD*y^^TpQ0XBQcaNlUm(zlGh?^Bwl6{$&T(kn(=Pi2JlI8l~8|mSGj^ z9vX-aQ97hAZZbw(;+5{7w=q_A?XSLm;lFexKJj2meJEr%D141ZpS>o;WZi=>&pRyM z9%z5~rGzoB*CCLAUE`Bh+L3RW*|ARqrPUjNiyKNla}#`{74%%V5m8HTGeCU!>Ga-4 z+%h{?_)4q#hknslZKM@Cx6}xxUOZB(*0i#D>qK8Hoj!lOZYBV5#qI141u?S)aY6X`> z$h;}K6-re*^qVc|JyuoY3rZPHY?JPBn>{?6Ga2Jh{{nxz_%eOQ$OGd)C$v+sPSH)h zg~qG_`!i=Ql3f?4?LcW*ml)_*VL(9#ai0-*Qgtz^bX&}0`NjAa2)t<`Zu>Q2tK-qA zHiwAKUc_fE$`dRe6g=O_VU;gPfv;xQnd+{*oFE-EG9PNq&BFahzO>s?`q*P5(%>$c zW6Z%D(1(?3AbGk9{ZcmNQB`G=Cuk81zEzi4g89%Hcoo$^&V9*PpU08+F<2w-x8r${ zsFVBgNv~qyy{}@xp67j~UfyfV`#k*n(ks2v!1GsS%lr86_oJ%&C5G*bB@K_v4?PCo z3lAKHubp3cdqgGRzDEA*Mz8F90fYbfUrhP;!>{f)qSE)HUhcn3uM)5f$p5IGw{a)U zq?p4I2zMtB;V9(5N$8!XB;YfY{MeIMPBYy#N;uvz6YYfT*$jN@*)+q8n3a9v2VFq0 zJ)LMyisiQy{FrlJkb0XEBUrhvPkq|IE>Y?97l7I2567SH(oMA^*vYd?_Tz`&g_)y( zCG~dw$5_7q$n->2Z2EoX{u0xQ=(i#G@qBGNdzPdd?x0Zcn@U^fV3B|Ii1pSw>zFZX zPe5Mb@s8`8JbwyD{<-vDc9GQd(HLs z$^Q{#-|hpP)P%U8*%Ie(sx1-l4diTJ(1*DzD1g%!^NoG;?Tq(z$o=7V(l;>5{SiNT zCXgQ**&O%n!!m5;`Oa@D#1InsM>%b{kh+745;=G^BqdoVy?$eY~)-9qaCWHJ3XW6Dj@p0RMM{A3)NUHXNklc z9W&o(j_PH9wygJ}jdcsLv(&nT2}nb?Lf|P-{G5*;4O-4kBZC^C$e3> zak5gZ+>^4FLt5vLcp()0-6(h5;fCiAyT0UnLZtSGon7seJDerD%-DXSl@5SnW4dPG z3$zZhl0|v^1Wm|YxyN1{YpVG3JCZUQ{t)52$e!d|tyBHhw!%vY>npRBeshokv%Z=O zR^JtjKe!o;Ct24Qwc4138ihaNcY5=kQ%i~;|8{-=k`J&SSgr&@aWrDWZCK!^Gt{_4palZRtw#mY)8d)cTWe-?{(O&^sKTCx z2+=h9%xzLND8?WKg2FANkijIF-!|GOL~vEU$hmEz={`rp@g1f=P}#TPGq?Y%;5GMm z!AS+dc7J?@%GZ&}+r69bF^qpwQTg3s{JzENy++-h1M=I;HO<>U%#IGYPmygTe_yPh z%1vD}4wkh3>vFXgcYC}a+}`hH?(Z(STL(e)w=JL^Uv%faQ^R=y=&<)i#Ug*CXJOu~ z;1^iYEV3s#SI7@^Z=_IU_S@o(80G4t@1@{wf}fsFLYjH3Pnu7Jz1e3=kROt73QaN# zMY3;;PlkecGSkt%kslcSblrC3e^?URivnQz0kQ)FI}T7(=0)kuA#DgTA* zWApvyqV4spx1@)VEpbQk%cd3p5S^jef+BdC%N&D zb*GI*e}dehj(eOwm20itXHjSQEK)2boUQ`q)J`kt`D1uN5sORO*A_e}Z>j&^vuavd zU7X!I>@E0({zAD*6CDnFB3}Y?)XI3l3Tzm@kVcS&Zufr%FD0L52v`sd0~gDYc?>TJ zO6^6A6Aed{o@niwd+S3urncSnXUv(7*%|zXBPt4swj36xd1CAC@I^pfP9t`$+4&+A z9FmM3BSKQYB!OPcE33;RTWhv@`O{{!ee@?a6O0w9CAy9OP7(9v((r>2jWJfG4q!S! zizTj3+?-cQ?D78wAT*8k;2iyTsOJ97?DgluZvChn*q;Lh>tvFZUFkcyOUh&O1; zsE09|eQ7Y7r*d0A-8MIFmIsp~xyz%P%cFtoH2Y2C^QV-Ob1JDtm87!EBsShS$_c4v>n7U$783|%6Qi{dRM7quJ4>()8-N>w_)44RtoP?t9?Rz%;xVb%^|1M~6`9TuU&>$KC@ss)d|?lQo7aI= zafC=Tlo&pWBn2tQ$v#%! zS81b()%T{43)CbB%&YWfSN95k!zl)LY~-3N_+)+l1FZlT26~&hz50Kw{bO`x-L^dr zS8UsrWXHB`+p5^ME4FRhPAWDlwvCGIke#(f_T-ZBA%Jn5}feAGQ_Axq1$|{l}5BY-4b_H=(ZeG$*TQXFB4t z8pi9YT@XGXlqLsm6_O4I3|s&37aO$M$O0WOWBY5>Xl15f#j zEB5VWBH1RyF-}~IpALa=+diHdfjgOSylWn~nbyebG}4XFyaex0@H{*c0zsvHeP$tE zf0NPSX-0QgZ^+e{1i1n^{#Ww0$Sy(N03Xovm{HT2n$e?~%Td!jMR(+**pj*g?vRX; z6nl(k)`x`4-EYslxNasMzBJ$w875S*p8Y1)6 zxs}axx@gWa`dsG)@3rOIWiCM`b;1ze>e3cG|DXYGOi#XF?Eyh`M&K9t0Jd>a3x(`R z1P30b^*Mn*Dv4M*p^@J*@s|wOMFu|jwvoDW(2_69DZOwzG3L2d??HK>K8wp)r(=ds zjIrHty}zxy>EmYeMB|tHf4gV*NY#HI1~RvbwQ@uB%3Q!39iIYz9%!YAnB;$K8=g_p zQs*gfev4o9QnBnBO!xca%sGYFh^@$UPRdg_8{`)wZyLh4% zymYcwGwq7R9P3~DG)$d7``BYzg|0v#wpAgCHuNNLfBo7hrgjX`Lq#`-(A$lmR#Bv2 zy#Vf=+IA+EK(ZVL_X62|ZJ)YVK&QgDc(R8;$mB^-^jv>JsprE*siXi0fj^)td;q-f z_|tSjkelwAOE*n7 zV;aZbE!{2IZGwD7Oi32TTiG&tiQd_CS|w$9ZF%n^Z+p40mU4Vfb^qW#FWn9A)+zWZ z`*3Pqw)y9g0m5nPo7pXuSvQn*)yLb<&E}KybC1)7HP5?;L63}*+t;X%jpA{dQ|qkF z=40y;*+-tv_q@fAe<=R;GLey4aDDss?Ejx*y#Fn^L;JVn&i|uy^`9n44$e0Jqj%-4 zsAD@vkIG{+7vtn-V{Db!t19}_3L)@1nA30i$E>`*e|=Yb*^>Kh=BRz_wwhlo|E?cF zPmJJttGj+Fd;R6_`L~b18J5S1hW`gfMN$5$Z^z*J#GN&^?X!m>wUHk6|NhejaBc7hw z8-mGP_?E&*c4DyAp=x1kf>hRXFP2f#7_6qIJjxIN)%eA2Xu`ht1 z$&!}+E2*n=&iS*EzwR}c6P)^{vob3DxR|BO@waKYgSYuD^&e2D7zNh3mLab1=02o@H|2p` z@=V>NdWw+U+CdkayLVQ*<Mv`sNi4lHu8-Q=#DwFsF=RlAOo>qCRc28NC$Pn zVO(+uWI)M)4>NEuIq* zF*K*3SqQQ)BBhY$z>T&I)167WVH8q=q*3ZuHRp);(|4L`DQ4*!QI+&+-=g1NAuCd4ZclIc6E{T8JVqXC3D!{G`%m)zXCQ3 zYh&M0mt^WZQ{4#DOoH}w^;aA3$%$y(8g%tc*06D?P;2zhaNYGEt$@yXZ&>GT8b;#! zb!x^)F(#bV=457OM;?xxt?v+qb&E6Xhj!F~8OJEPeT3RWXh;t}!4~AG3L%4)l#ZbWPgXK3@M>PWbmPBYh^G}wM#+sb-8AA!tpw{yQ z9cgmN?8ER`9@Iu3XoJDd;}8W~$<4Niw-4JCu6PMAY>HvCGH8WT5ot1Tw`AHoV%ImQ zN22yEe)~`J^@LDf>=8pV@DdW%~2*iqDnJ1 zo`|f7p^k^7D#rCO$8l9eqH*Udoo3R``br)iwG8O*-eKmHi4zN%v;QjiabaX-`8A=0 zf_CdRHoep%S~LG_VT0RkXictPwy z&P(!1tk&G7>V#<{W@t$>U$kkDnlM#Iq}VP=NvJ{#4gbcdR()nE*>rAkE@{t`l4xDKeuuE1qngS1l-WS|kDFv;&t^o>KmyX=sUoA(4%G z>~uNU<`lipIM){%%c>7r2NWOKe6BC>wMQ3XU6ov)TD&_{qz!uUm2=G2=dN>_#qzW$ z>9M@Gt*{GheZUJz9pS33LR&YVN~8@zbE9kyI(V-9gmsZk1M7DvoiuIJY7XB8;*Qe@ zo-rf#w1CheJr*uj`XHxr!`M=&D25u9#Y*3^>-%#PSAUbV;QAhNDCSWcARM^^2CV#a zXE95>NpdG24G?euXp}JUHy<76*bu6LAa+0exMI_Gqv);)ZCrY&IR0+g!_NOW2IQgk zaBZ@*rm`O1-FL1Q`W_9CDR+xGEKrGj?k|DDCPP-47-7c|VMho-KaGU+7HHH_teP(+?N`j@HHddvBhH=U?LR~! z??3BN;$oO2upr<5q zIcS$$_8~%em$T)`{zB%qVvpI5U0C=0kHk=7o7C z#bf7jt#Qm}YiVN^rw>d?E$Iri+PY_vf=SY7YX?{aWy*Q+DdoBH>$te{RXm!&CGU9s zs|+-_OYs3ax@(N_0f;I)^7LSw(w6j>PCr@Wft5gk3vnI|5&6vg#1)nBy83y*IZMPc*o$V;>4L8^?_m04Bg-_5ckTv;}9 zXDOk>R}2UbM2n4Yg8NlT&2gZ3I{LZ4+Ugxn)IQ^s%eA>pGHLBVNN(GFXI)LHCf;8| z0_s!y8O*Ocu|A3B64rTPfsUa1p&V7`#C!{o+4{xcEw5aei`H5#@FB(PgGei^WX=@T z+gBPoKlanGp)p~sq#};{&>T!w(H6`i&ufsnlaKpT*{JLmN2C)T!#AUnrPGIxRzMxE5)ovz6iOJ zJNka-@P1Wb_8@U~7_#i1i`aylWlljiQw%Bk?}V{1<+c zX|eV!s&1``LO;$yY3K`o9L&+wlY&log_u-{d|rkGVWQD!3)NXM2)ulsucwd!Y2*zU z_g^EwcsbAH@sOA#SiYOYQ$m-gnpoNs4C0mxFeE4kr%IF~K}ws6#+#d;6F&Cns!?x* zG!~{s8pjj%es7c@R4Pgwd1Rq&DU2nNx2ecyCiJ7jVO4Tj6))WNXr&@KTdUGA-%vbG zl`Qa=a2}8@W`gR_OdW{qmE`Q5UrJwt<;)ORgFQU(eXFognJ5jCjVz z)EnqRlq!3IMErFd8L$R12r!{GLpU+_79!~cvCT&hJC+YSkxcYpv3L5~;qK8FQb1{o;aAW8~+qu@onxABOP zyv%=OR5_SH?qAcBxHSD$#!L$BjTlnk!D)a6B2P$T=wUPMczQ?pQY(otBbJIjWl}Ib ze6^cV*M&l{AANY@pg9=uR1x~j&O<%m1f_fEV>=W_y1rb8mZ= zz)&-F0>CQmBcV3C*Gheqg|QjOX8@RMPL4dzePZrtj|C=}{9Y%wV_h`3l_|8rVoXHFdC>MF8nl@p zpsCpGjPOj%{!XOBs6Z&VYB;JZKi4ROR-_FwRM`YGZKOE2$ z9kHM?E49snxI{z@UsDsX6g_H;q+9bH3;yI6?Bxu>_L$;sKkd)+FWc4mLj)(|Gi8&T zKsUC}&<}O;53<@QpY?CI4y}NP&%_UDuo^zGYet@p%LM0C!7YgC=stwmh0j?jUjbh* z79D%aJ2rAoF&dhMf?-W+DK5hJxQZH11yrPi46lOzut*x+FFEf_*)ZksImG9dJ69t0 z4}9GaK~6?SnuWgYc5T>;4$v%Y0_ z;O~*C}PM%5$VO4$}k$yQyN4+*G^6D)plLC^{m`ikjz3 ze!vfJ{~Q~C=A0*VG+WD#{9`7tLrfT?mV227v&w9MgxxVVPW229hzw0mBw3J6^vjoX zWSGv)o~I#3w#gEOUaLIBQK8q^LvQp>c7eTYN8HelJch18v(8L5_(OiTj!g3FWtOwL zYB!@5enVd37|n)XNQ<8`TX23v(3CnZ)SV~4UcPR{X*OA(&UQQMN*W8n90}1`4H7bL z%`q*A?G_ygUQ6M|>_nH)UW^nS1*xvch4mh{ZH)uehwWBNncQg{?V<=KDXmY2oWfe^ z3S75kJSPj%ooh>V@VA#I1!0}W4wte`9%Z5I4!Ig{oyZQCiVaHE1f1Hm$WFTnY7CF; zGSw~yg|mE%qh;J^al5Pyj`+nPe3Gjpnegwjlu)b2Jl7P=AG9Cb2gKYzw-Mn2%}Hi=9}9n_1$mK2 z3o?Lu_AVNPw};i!-tY*TOU<0wr7+3fp77#b7q49T5pv5hNL%B{t*E9f)nCQ;*kDr~ z(@zkqSlMNo8w1&MA<7!Fja+3hD}JpoNxsCcuOqd1E?gq@KR;FuwUp{4mASXg^$-<3ISK|BupWmJh?Xk{_bt0RO8qw@Ida8 ztcW+m7Y5EH8;teN$+gbP-iM1oi97QFw+`31jS9m(uk?hDSv9C4wTX zKzm*w*nH&hkt|Yqb59)WoYb@SI+vf0?~Xl=Ro3%(FS%lJWCO(O?wHzJKMOq@DF9k1 z-f*4LW%I9Y>3gg==Me|{Ojp<4aZ!1v8Y>~&`sgO!9FrZvJ)@Clr4vM_W=WS{VBN`o z?H84fMDT5l)TN^v-i7ixjWp$0&^L$jt^Cjy(AE6u*s4`&K_C8^2z3*kaM_{V z_;Xv+0+l@k>c{pLbXyy8;ewM+v=d~y8~rBmEZV_iN>b{cZFubbR3jBO{DRf$v>mH=7Hoe`5v_`^-6$ptDiId>FK!QNLxEbFwIzB) zRrtf%K(o%i;GR&HlA%_X+P3n;9l@AA_9^3NFWWd7wvT36=f+yYSQpdSG~#TSwE%aE z&9chO=oV^Cj&o(nw(Dq$D^6F+@^<_g7|mhPjXGWD(|7;eBXwr}LQPGG#)xE^~R!g#wP9dD>1c-L^< zZ9WTbox*!>dF5Tw0KfMv2=N`mzh^%S{&lmy4f3glylc_px$D1}Zu)w82G7H&cN`MI zzf^? zx4Cl6chXHKg#KLbHmDN zbi$S~{}cPlaL$2Vmf658OyYZ>#>o~|kr{XR`!=0Sr zozl^o(7Q2^h*#x=?^_I%M*&}M^qj9=J{ynn2Y?3L8cr#Be7qO?hO%DUJ#CUzMS}UP ze-YS}YCNK$!^uD85n`-2poU=YN3u{#YgZ(#qhW$=BpcZLy~bwpffn7hRvKw*k#D9e ze*tyN;jvBnHmlV|W?fr_`HaXWQ}4z*klv3O_D7TFZ=h7juk}zDmnl?vLnrpUcd$BK zy^rCYW22odY$_1wGFNdQ{3^K~nZymtbnuY58u%RpPzWv)qnG9}1xn!Y!_QBZhA<#7 zWKV#3?_m<&1Hw8F#0u9@mCc~Gu!kRA*!P=JBm?npz*?EGmp@cWF?7W}_ZebUv{BIo_6S9Zh@R4;3b)29C?w}V_PHa5j zW_3$wU+|sh5U#}EV_%NN8e@F?ui+y3`kx#ly9;@B`Cr|s@V3{Ezqd>iUKDyhql!E; zpS*#EUrVL>W(7ZS8Qr*24Dyji`1(C(gpPh&$v!oopOZn7L)bXt5PTf3^__&;8m)ay zeY5dF@b?TW$=Mpe3`oM^u6pzN_%_=QFGuW)*x-Quka#PUN|PJ%{Q5>XL3yn4f$odR z1h(|Z?F-r<=@FH47;`f$U)w!t`o!#;seG}&@J8V|q$=GZ%DkmWCTAuTOM+ z9r0v}>EAV=cUY5)aFV=2+cWNHC}m?c9wp{rNXq7u)ID@GO;eo7LOZ&`mxeG#aRT5JIWwD^2!`;}_qtZh;u9Zse} zdA>qQqhC`P+5H<7JI~Sa2*bfx%k2FF+`f!cjxN7-Y+k=D5)W4`uUDXg$X$VbvZkP^ zxg`%4#mfnrAJAc$G0s2|N3$ctMkPao>@dJya7y~+`KeW0I0iT04dD8YInt&uam+gG zOA|8vo@U$;XTYLO3{u-QR!gO?Bl8&b3I5m^Vzc|X6a8TDeq-w;pxgAo&T z5sP`RFpi>q{bqUhENaHaRKnN0TR_fUT>Z(((C?D5E7{f5tfFekbcX0h2Z_({bbK3!}r zsZpZY{1F?9n2g*PI7R}78%&y+VI;CvdscqsB1%<31HZ-~gyE`luO!vR5}brVMu5ox zR|3@lg-{vBC6?Qa9rxBlq+|5OUHPU$*W^5;e24ZkqG?271`ZbS$=*N71jWECCKw%V z-H4gdtcpwn?;Q%;TPx<+GICh2z+K|#;PVlrLivs~l>=GWHFlV_pc3`cYCF6_AymB&b~+AvGppm{u#Kebr>$eP=4Ey`E4 z-vn{K9g#3Oo(;BfXF5X%MR;$;s_(^4hp=i*4`^9jja!zQJ~{=n;k@h9p#x^V5jWq7 zW}>g2p#DMx;2lM?Zom9f8o>W^$>IE$e+u>AM`QnEyWLcwaL;-%hR*XVMq?tTXBxE}x6aL-dc88TQ$;(+6kav|`7(jaI zmh>CL2k4%8OD=hXgs1J4`{b)**XP?4Ru3#bqnU_?t4y8Mia()lqKV3r$PQ~D*Lm9d zA?yZBu{B5X=g&Oiq)mfR+A(Y3oX0>*drzs>Y?7?sNo25AN&59;uI1}?f!%Z)<#iX5 zN^aRs2@)6LrX?2u9gibar_*Eqe8Hfh>7+zwy;fMEAl3)>U|HRYl^RUOuMr(vyZ%Gj z6*t{gwBnpkp?U@T175lnA_Ai|tymX!@~kE=%NrV;#{j;%1mV-`y=i6zPR!8Zz<93^6;gY4rp3KR!ho0Vlp9 zu5wxdSxMwLJUWs1XJpNZ}NES;mYVt&NGzoN*9USo|c=B(%wD z_3F5TmD}6iSkxrxW>X0%vy$8j<6xNM`)urq5DxGr1JgUF^Owcf_;;Aad_8q4)*gN} z;^zOJ^`o%ht&{JKga=W8<5)Mq#bswy{iwiOm`$UGYr8olDL> zNX&uiCTa=0HLBXf&xd^p--C3WIm{+2CAltsG-w51JUv9cnA0rVPuKw>!h|N5tw2W* z7c)(C!n-T_na3QyeWJMFddvZP>HXzc_5#)CGL1FfA@&A|(V!&OirZuM49vvoN8lzv zT0oOf@-@p*R)fjcx6mTqNhq+NILhojoWWlh2Sq#_JE&Y5vje_C(!_Vy)$rWNMD$YzJO9bent*!M5{5TYP-l$~)88P&;!8Ttoq5G$a zNdpk4SpHY+m;EpAp#MfxRsN3szr89or34*pEsY)Izc4WKzu+$k8xvc=Ki?_a+L;?V zN}D@68Qc6PVCG=#XzcWt{i-qGUw&Cd%K!4q3Mw*26A(tMtEg5Fum9dv8wzZpNR6OG zqOcco;_}naxMU?n!8b`Xh&;@P`C1I?(#rBm0Cv}Bc_oYEaozD^J*{So*9VwB*gRNT zW6+Q4LPC!Ij<1amr4$Z{`ucl(oU+f1o&s^+IM3uX>xsu~$_%QIqm5Y_AxlL$sxtu9 zZ-28MQZU?2xud`55EGs4X$;)e!moU&8Zwio?fhk3NF~8WJzgK+g|y82nADB@>hfI> zDTx>#%5$ARBTN<3yf9ARtVsno>}Q@lr(Hg<5KPTNM+)plTiz_E?<8K#YlSQabYZvT zctDuzuu3+W_U{9$A*33&e4w96o~3VKPA{f!1@Q0&F6SDRgI<|x$2R7RPWgPAPnJ%U(g(uk$rI9 zBLaX>(m@Ebw#=4Th0WFg5$~Kmd5f)6K?o&eCN9ta%g65EqoC~$iK9uX8_{z! zKAxB5d;_Dp^yCK!F&6tTrFv%=fip1&CHA=_dgxZ1jfnSw=~BNsSXy%e`HJb>nzM*9gxlGUybD67CrhWP>3SKt*McZ}nOgYOL=pX(p#o`KzCu; z1C(}8GQXS5ehfM0i+M_NyM*$nvCdCuzbU-M#voGawDIWrSD6|1M;ko;T53=Kr(cld z|GU)m|1yN76E-%{ceZj8aWS@W`hT1lnf_gZ^zH1d+yTu0EJ9iTf}%-5lLM{u)D^;~ zQB$yiL6v@td}|hY1d@vo^k`*u@I* zPE1U8q?l%NUvl|=-kuVC6Gfs2z_@JxF_@$W6MBqX-u640;>GwXL0ZO2t;E@rGLzGs zWf{T7Pmh@CvjJ4F*XHfgym;Ama5mMDOH5Xl9BlnP_>2=)w^7#F&f4v`$9=yYE`y3p zW5R08*XygIdS-_nQ`YS#Fjk$xswg#c0kQn?c_IxUU$u$t#x#^yK;3)oiveDNo9?zs z-v8|7)@{XLC(R(r`di;7IO3%D4c&LLfwYBk#s3mpH7I&Y>>l6AB}~a=B9#BxQe=6Y)ncC;tYkOrF$b zr9u^W7pQWJh7)(+MgT&5V0#Mt8bZDLDiIl2ib|$f`FBcO2Ou zUe5x6*=yj+2PEF#Et#!3$X@_C#k2!8Z`Oe_n0`GNme-^em7~q?OLaJ|)AkbbQsD>M ziI6~YPvTitepZ8*LLBFbF6j>~cBIfq4F zflGctbIHDb{z;=d)T8yf#4fsc$jO`sMXOX&fs$^3uzeB3mZCQUUb=j|+_eVsBmLmC zXMUN#dHgU?)mU&Y)7@uELG_=rJ0u)Q#)vk6$rXpt)@U=dgL0xiTIrJ@lQKI?9n*0oqWMM*=h5XabETIFmZ@Xkq?!-;fW zgnGO__k!)JI|$YMnTp7_-=$*RbfgdmCGMCk+MJoPYlzKSeGml=uQe7B<|7qHdWz>p z>s<`**19xGy4jNBPDuZoWGxrl-qh7;N8Alsk}{hpJ20BD|DgYXP36Wb<@7`QM^H*z z@eks#@W#m3j9@YLDd2`3tnY2Eht`sveJ5pv!KZL{7bhC2)aYavQMkE+y1WYXl<1Ej zDF!SU`KHz0i?cFo1)o23pE6i=is-xpoBMNaKJ80y_o?Go8%KHMrU$u*GX$SlvFS*3 z!SL_1K(G6^3!pY`j0;R#l^$~Kii;&3Gp*9i0Ma`)xHn7j;vIr(F*`5*iNf!1p5c9F zn{QB;&bsA!F(wDDg{m2hdqu3WqS3fXr@81tc+)iki zv0*A`Ai7~@3xkK-)gyKw#zcN+3;_(7c?GBzOZPeLnxacnOu|Yzl=@$~+(8|qhDx^g zDP=IsZSvbpkLeoU!M*R{26EE!mRi%We4;)kaID5~hT*}Mi|eQ+Hb6kM2#I+JLtq!C zS7BEYDljXr*5TbdgG}v&WDOgmQ>BG(yZ=gaLwCMnwqLsml>ZZG7Wq5P{mrlWV$wL; zS{eU!`^$#*-xH=97 z%*vaeF~<$(!Ga1dN>&vW>q~b6?82ekgfdNB2F58ft{9J~TIvLAOFcsQu7s-Jlu{WG zH+i&uruy+1x;c%-$hjlR)`R*Q-+Na#a>^$0?J7R6`F=dFXxt5L(f-&J!Ve+}Cs&Zz zu~o1=NBIbcKvE0XHp4t!>c9vC@6T)>s8y_bhfuJQB7vV%kQ)$V-S6!e$?){kzh198 z>O?^zg+oY@r$zb^N7&6o_|dqU9i_ULbFO~Bm{}>1SuPwcJbji1IMlCCDW@mIV}oh^ zqmR{wtZE-MAR9bO&um)BM?xhL_voA+nyT-p$_Sg(T-@BAshb(fP9pI>=_WWPr;KS% zys*s=5ZW+ZPOahxdc<7xwd6(~y7GgBA8IapNlbop?~~wv{2AM5n0C+xKSR; zcUc!6YiaHTE@$2sQH;wAhSk8P#C{9M`&Q4vIX)}Ptj!G3elxf$rv0@22F3-IaGPO- zwbJ%Nro5RTbZfgq`vgx1>0u200wy&lFB;R%=y##0r%_+@(mz!?-@W^*Dw89mTjrH< z5HbiS5qGjZh=p^CLpr#Ni`Hl9`o^9B0k;J9*`*3uZTR>fzOFATS=%UI#rFOGWO*_F z9feM|wpNaGipF+U`tH)UrhnV7YfAl3JSv+x80#DTiv*z#<)*xV_F;>B)3;wo5?fmX z0zio;1Vw}rCj2QJ{1Y(`h{`*y10Y0t8_Z^_Ua3;ie3(i^uoiQQ}Ia8b5UTyG~MUUtYU4XzY`KR~RBTtx!_`bRanaUf8boR5_$tNkF7) z><(A&)TJ#K-*L{H>%%v3+F0gsXDYvzj@Y2)u4+LY9rASLkBFZ29vkbL!x*hR`f<1+ z8KoHY4+~=axHc-BJ6`L*8#5BxD>U^b~lhKUhu6rXd`{&j&tDo^!ye&i0(ZHs_tRlq zNJE=YX23@gH9$9kk&nxYAj%b0J00_ahDo~Q@Gmdx2=MGq(x(+&x&;Hig#FuMG(dPQ@L9c5x zp0+SG;>}`&O-!u>^XE(?I&E=~Sb)k%7=7%-;L+Qmx4g7(utT6??jakrLA{HQjDe`jq5w#%tN#G(eT`p~xm!-WKh^-;EVYjy{TFqF`sERGY{Bj8x_LSp(AvQdtSm=fDcKGqYt&{58R#8&jU z*gTa|kkx|i5kNV?6BdqGF44e@9kyD^vz}0j`Sp^Po)1fPDh@*>GB70O*~c%;d-Eg- zM`d5oc>0wi7qz`m1g(HR9ck&RjfPiDRG9(X5bIrli>O2@jBtOL-mRlRL8RzxL;8dm{I89vEoR*SI}9^+vLV^a{{e?U(D z#NIwD($HrN7wJ*AB z9bIHFE6755(YJw*JH$izMbRVKB%}YWN<7l5j!)tXH|WaC<0oHHz3p=|ug*Tu+m>5e zJjAle9U9_r{SCjEIz=_zUo5yyBm&hdI=td`Gwo+aUe>$EM1Y~2HorL|@&k zUe2(6v713EE+wj0Z$QEsQGa}cx=ki5HM&ZB*atPZ8jqxw>$PbCg zdmeilT&c3LEIaiMp~3DeTZGLau1vccgPG4FMBJzZk8$lyySN>6<1Lc=t6zwyYu2xc zK_?y=#EfN>EY4!5_40vl7B%RTNW_da-j6yS<_@0`VpdYzebH5+xWCMqF<(z%Z`5^k zi7Cb&bvLnUl!L_fJqE<{;=472twmHK4>#PkBK3=QWn|v!h@6tLiHD2@e|`*uMKza7 zlmuyKQ?HdmI1aegTh{1z5rTu6xMF~h3hQP06XHP&6LJKkia*Zz)rF@{nEJPur-rvX z4x%zd@kI2873{%=Q{1>y@st}RPel9Mqy%~HdT7(l5!X8t@oQ*Ni^ZmxCMF++x+gLU z)~VEi#N|p?b15qT!k?XUdP?`9Xd2*!&9lI~DzVauk9paxd(tJ5oN}_2_jS8RbE(nY zh1*KES5G+|XzGN21k*4U&s71WwWU9?J0&7Qs+y3Rf0l*9a2sIdROTlR7HMuAccQv4 zS;h^~ch_tG@Sfp-?wL2&(r!YBXEW|U^9;fthfc|4u5 z@WvBR(P0i!U4CVnezw0>*AfK_AAtQ2RNLvQkj1&HiZpY-k9 zEI|n_NpFYaUoI1#$Y^U)lhcML*YQkp37IqMp(Gm>sEEe8Bsh+te+U&V;VFJs$U3Rm zg=P3l+53TH$^NPHU*VlK7!C7T9v?v-*B+pCMbSE;b&>V0tEdI0+Nd4hs8wkVMU2$h z4B!b`AlpyAAJQeF0U(tQVe>yM8t6P0*QDH7tnY(GBPebT>E2irb;v|_G&OFe%~N*8 zgO$_{>hz|u7HZ}0l8^A9XYGV;O6H%gm)YV_-gCPCxx}Kp|HVA-3Y+sH`}KrJd7pJL z?+TanGRZvOw4-EeSF*{j{0Hu0J|kGNlMwDcc}EGMP09A_f3aNm&3TFlMR_)`IUe&_ zzc`ftD7gMvlrIyTli%Xf{^L)#ODl26ZxPQS-R+cO@28su-5KF?B6+$pn{E7DAw~bID zdozxq1bl%eN$zz_jvWJ zvMI@JwR3j<_oY*mCmF8lha{)%M|a<|o~>1TnyTBF2@!YOg~jgB_46XSAM*;*H0M3O z@QMtJexC=OgQ1loctm^TH|_vEBWDEb7Kr-WAP33FKZu5&LpL`fK7W=2~l%z4l zv(hA(92Fss4kMo5gdIsB5+YI#VY-z`GpL00QpjtL!UEOTqff&QsP4#ngFefzfX{Xd}{tp9>`c>it#{%%J9afB^o>ulrn?>_Xek4_HGhED(S@n2nN zl#;$2iU6WFOpwDpDml475gKK)2*-8;)PQgWfF+kf1o_>dd8r7$^McjWlk}znKnX~H z0(nyyTCr9kBDv;W*EZRl?0Pi07@n%x0;)6&AXHN;gCBH3g$WMhS%~T@mn_nd5yW>4 zuB)PtHZsCzgiRTHg-t8B&@>}*8rbvBOAsGbYa)X^!|D%U-PI$J=$G;1}t&MS`;6> zbjn!3*tjeNQn(=!c0RU~Vn0sZu@HWV!a6Ev1XS=0P073%ei|+*!ytVZio`YdIdkA< zLkVVb;G!hPugt)4)t#2vKK*7U%X?z{k?E7H+7>{P0q_$yc5Bg2-1dvXh?h(ad3p+ zE|ho;P1G0rwN~etWsl7UZ)LEC#&FAMhgn2%ju;5M5zzhfKg*ap@OsSnMNd@awJNZX zz{RXr#Rz4tigla`j1Gd-5?(x8r?!_0QGa3I0+682736F;tnHkPbfyfVJ|JS08N-EA zo7%-c#W&Ms8HAejJrclZGyJ?oc`i|C@!BsXESUrcz7n z{QJZxXPB)(AS`!D@VYkmxaDR{#3;$sIT1)+$RF||oTd;5wT%y84yj67J-y&_>}Pf* zqfJ-JG~5RtTjWb>NOkzB3bl*XpK+o>rGRDo!!E_oW0_Ou zix{7uKBcl-piY&0js3Om?yfneIU8p;OIvy$px>~viJwk4d}@k1XW`%o)lZry5$>a# zEt-^tM7Fiwb>*1O!-Ia3fU;)gEfzS*nK_kKW@t8TnOk%UHJxk^Pc6(jvMjYY_m6GZ zJ^v8?hR)4>wJnC(S@hRFx_5M3K~|P$~t~6=d-nYMSsiwa}aoGXGD7l58x^n)d=(n6M;*KMU}{b9tL~ zZBlwYajSD#L-6gV-qmxUbcjRRV8Is6ju5|@szgq89kzbQeT#^_7Q(Lo3@_KyXkNVT zW%Or9Ju|9krNQ{IVEzETkK0J4B#K$ag(APS203T!itWI2KA$`4z=p5X^vWZ~xYSY4 z0&^XKQF9|K2rk53mc?=-YBJ-drH_R$Wo(g$aGK?kTq6<4k~k{b5x_o4EU-A<5J#f} zs7#3(=k5(&=gGBi7|Eu(40Q901Z^Ey5IksaLh1Y-J|;!bPZL`24|{YK;~*`qd!!=L z3&ongtSw>1Ozc%xhIqM?{Rr5^n##F^I1q*&wn+_1M-h8-m0LH*ET_%5nQ zT_&Rq+gVijqX~V`x-~8r_14;WXy3^2DhTb$}7MDHzcXyBdQk>VBgs<-ahF@>yspLv$}I_<0l zfdn(XJ@Ji-$v%-)IxXsspO3j4HTDhYp}XT(i`1;~#41Rx9;bIo?^7R6k$H#r(<2Cl zckCyclsWQX(R>+lQh|0FdC|D8epu6+L* zb5wM;vC%iMGX8h@`vFone7?T0nZxJZyGYjF!6FFnTpiut{U>u%mEoVo%qxR9kQ^ZP4Bn zF3s7DwWhGAVm7ZM&fFAzrTs}vJ(|?N1jAhb;o&|Nz8}9fY_7W0Wrz)lgBrnLG{DC( ztXCF%>c;4oFmrtlD9{^BUn&P_uFn zEGb1e^tM5ae$|~C5S>;?VvVXzCXMg!AvEcF;M66WQJw(;LVH%hP4DShSf{EBYUd(v3n;TE3s;k_Isq`_W7ik-;7v!(4 zAEqZqXv|j$UHo4q^uJj>|DN-;{-W~en;O#z*;?Cu5fvQ2C=Du3=2njXj(Ed=efb}7 zHRxZ^gH6lIMLi<62bg3VH8~Wr6q0ROth`xKZX$Hi-G=?f_TsguXW7r1{9V%P9*8T2 z(7J>!X63Vf{Bg&_iHYmCkK0>}pEBGAke=pgP;CW`M(Q>t{R*VF+C_&tc}^by3~k5Y zXPUeug+Z9$6_352y_O{*iFQ0GX}e!jP_#w2;eMSaZZ3T>vTlq365iNRkp=ccQWmkI zut5Ma-y;J-9xxyO&)D)2w*>t%&j~sA{Mo{WmUn>>1p#E5U=CjFlNea4`yrA@3pK3g zV%!Jg=BZar(NMbu3p#r0Y*TX^@KN1Q_3(VO9qe*KBy=oFtpp9V)6c{fa4UAx_)6v= z?6Li?c?U+?@T^(LarZxlEPad}h?cDqc#lJ(sx48|=G7BF)X*>Igpgli)OasqKaWHi z&X(HuW{DOwe;G)W#HMA{FLWwTbfuWF&?34Fi_dKh@&uO}Moz4zAGH-KHreN8dm7%4 zs$L<@ZGtkR_6toQ&SuQeE>w00;t!gf$f2}rAv{!o_F`yrI39sn-T}H~M<9>V^%P#E zAy&zyTvJNiq~>vy<}cOLi)Swkj0HR8yniDG{~q3wxBTd-YM|&kn!d658pYAM#QFb7 z`>Loow{+VCmm&mr2u^T!cXxMpcPCik?(Xic0fM^*cXtU89C9mrckk}r=iGj{chmp| z^;f;Exjvh7eu>XLri#q=_f~%Ie;=ZN(4KdjAh9_0e~c0SPLIL(>lXbp4^uR-HTy>r zCKB%_x5AG(blK+EqY(c6M4Y7^bt%YZ`uISunN;i$oy%UKSeBR;T9rDP(Ly8tN78xB z*Dqg7O~kS^Xs3AEE`2UM93OeRf4#f}f7BG^;4fRS=RV;i(4Klva_Q84s2oUp&;AKH z$QuU2Uush2@=)Jw^%B5;GL-Hd*u;Xu|2{7V`HeJqSDO{HQ1?RKLqKy{vt)-0?7O!pM9 zgAh2h6MwC$Ri0fLi6D6ze4jgk?6Xp8m!(B@terK2U3O%J=JWN2{pY@U?qca0RB=E6 z+Wdvep_SaIF|=0Xq}46Eoxk&vucq|DsZy@^Vgjep7_Bc&ZKY(hKn0TuF(tuec2{wI zKpUKfbVgKBiWA8jEMU}HyxCO`*BIThWoo3D zp?hH6wz)xb(3Vwjeb80dK%E>jM^Ir_Q#uaOQefnAX{)2VcG7|oF(;iDztD&<9O}mg zPwM2mi~R2R*@({>dNL2E6>rL*ix1#&^ntv(TcO~gMzafql8n>rF7S5Y8+kh93UcsG_2E& zAh~YVfl^mD9o23aBt^vyiKEA62m(yRriCv@=Pi_g(`w$lNZ+2sAiO?WXjKnEx-n0D zR2+X2Ea)S1TlbWA*!bwAJ!aPtr8(hSPLrlYI#G4*ymt+ES7r`G7Q=-Fn#w%kyJ4o5RTf#O4${=k8#?&$QH|KNVwXjwNs1zZ6jxv6jCl&gzC$vk!$S141U63JOY6 zuFS;o<9w))uj6a=An&`*-c%D{voInp*_VJ(o=3usA`Uh(zCEBx*&f(%s#U`77ZYk4 zoPO1!#atPU7v?jGtqltajbu8$4yZFcVkMosRKsXJMtkyQDP#Y#0O9hBS%}h`t9=)K zN^|EaRD1t|SWkL3B%lFcsZuRFiuBpW76xeS=}E3{Oy19Hj4VzpRaCPVaP2!HiA@lx zdnozD7s`zZ!?UWgeq;0O-w46tYCXT#)NbxX>D-Szyh|i>IrX(o#f4{x{;Pvts(`cX zw|$6eh&Y*~CkRVClwukksqyvwjZTtv#FrZpG;K5U&HZ@5au&B5cgQu`=ubl#i&buF zxtc4J_Qr65wNLm`0rU~g+wGaC)vq!da6l|h*NP~Z>XM3}gn3W&BRPRMQP)h)Uixq- zXXxFiL3gdIC5x!Inz*+zojexc)*^09x5$`gD<3Kh)Ac4<5R@iYJ&w5P!c0?ic8a`V z%||P5nL8+4fUFX;I$Vi)PUTPm#*k3shs+*{Yd#WX8bQ2h`FZI(_!U6T!(eFAUc-}PhWVEJi>A`fxSUN`9bC>4TSsWtjF<4x?;r^`nx=U_ zx>kN6Tr1QG=Tz&ViIi8SJW=WF`qOd*LlTJ%0Te-Kp#L4ce@{gi{z5M((Xg?zrIWS$ z6M{~ncK@PTintq@*#G8R{#oQFC9KM>@_z)HEogAbDh6;A!XjAUQ4sRi3R*$1%50tPvA5VvJO1Iy;A=wcp^fOi>Ts)XC9$krbyS>u#4r#^yYT=98N$qv2z*)dlcSS2Mf&+|w=<;7A36eq2urjRH zFuGqdxP{W2sFIt_54?^puod@-=tC^9P+h47>thH+kN*8TpfL9yVACNR(E4tMgL@DM z6G6f~CK0KPJr@NNMKvFs8>Y0Cd`IH(>Wch#YLsH1A?FGTM;`w*HIn)3Q2!N@K!gfY z3$s6u_J6X6Nepa3&p;GMO~!g_%*s>CgP1x&*-Bb8-?JD;b_dQ}zx*(}sjDm7 z;g77Ftf6(jUr(pRtUPsK)uXW)lGGy4c8 z04-*+MaCPEkRM~y*heh~$~}(_lhUqG>gGEmiajzRjM?~D;>q|qB2o2I#SQ%}l>v&OZy_tOK& z)T!Ho&ooYUgZLnnUDv8Jf$Fo^SaDOP`wnZrm zX3nhDRlD=mHVvpoUv3DI!feh?o~CQcjzQYIA0Ey=jn+AsYmKhzc5T%@UMeK@(Qa#k zU3Z9`<2qLprI{d)Y5b??YUQgCcm3z@pgltH36$TuChYf>V>&%ZP_^^G9%Lu)`Zz`` zT7i2S+9Kfw)B*P3t87+84QJfQ8?O2(M==4Pm$eTSB-+Ev?Xg4?+LX0UWH?A&@Ys~4 ziTmjL&xe*t7hSq^uDOTwTQ_8z<$d|fnul2`7 zv@=u~0OK`HWw-H0qn^1qamg==u*0k$k*&o?=kJ44RiQSDO5WgO<^gKI`DxmgQsG1&+Hugz%nul;ihFYP8>P%?2)D5<-oE**JOp2S*o(o5eiRAEp-WfE zc=ngtuEt;Opp63(0xBB!-LRhUC!HDXR_IaVc^2K@Zt+LKy*iyka`L!F<2Pb_j|DC z5vD(RDcX}8Y__Y_*KQ!9Di*o|;3%+~UZ=%{PYfRjnYt77;mVy^`^%64AxC{Cj|lm6 zW`fz%w~=wxITyn0iG!c~z<)f*u|Jb|cYo@T&Q|=zqg)TwT!I`1rMoWuE3%G&L159@ zulfBeTOpS302e|h$j;s?>-tzdnf5c+Ts+T!Hh2zm?HhRB-L^MX2+!!XljROC>rtCT zd>93zZ5+h5)`ynQcEJ}pL~ze{^}FL*UG zTndRVhD18cBxz6r+a%7)2+6|p3O_h2S9iY$LERKwgzWD#J0yscHi?nv`Wqfe&I^{l0d(Y*Z~?Kv&K_%)iIdzgJ{T|MS}UlSM7?n@cV4XlLwV^zRhI ze~xjKHIzVOoHsID1EG(w;Y5_73XbonLIa=>`6<4Is>3QjvM&P+469EVk-adCat3 zmtEduN;_JNndp&)6qtt>ASobV8v>#vR_}xQV#jmN$cCnSDpaf@2opwYu3f6Nv{i30 zic}2T+j!!4!j0^%;z=FD{j4?;4LAIBkYyEMcS8$Akjx8^gA9D88nt=s)F_K9g=b{P zshuG2CkqO&4(o~ou zHyDe=Q*I~)LA)92xbCv%YkU?8u67ih3%dbE*qEj;oQ*Vdh>iy1Y&_(m4shF1TJ z888?zu-5kls7g;2@6{!w!Hn9=W0{ov4eGGl(PO{3*;L4BY zW72uRiJiBq=MHI;cgvVj&HOZqmy+;m`(0uv?~l0b3UH^wzJD^kt6i1 zXQ^I6@mH}MS9?39XZg8%q~_?!g>h&S-fbeGW*k zcYb11A3Qe+eDH+;diyA1X1CA}1-O^RE+5}J6i+Z`EI+d!o_%%mdwF@n>w!SWM22L$lCCoc$>`el z+f@ipdbM0mliO;kaNE8Wwb67kZ0@m|9i=kbjMeQi3u#e)o&1>PB!#*HDy4Sr+ONFF zTo#fKu$GvBQIvFQ9t+NRe5J5-Uff^mRMt1gw7O6~*kG?YWI1kOHmfA(H=e*U7)%%B z35fwTaUDo7WtX+#^nkG5Mga!w1lOectLVifk{T7N@Fa9AL>2Xim@6J%ZM#j6RvTy!Vdlf8w`(yoJFZ@1v1;wPp1I3E4+7X? zdyP^pe!jzex~3h`I%M81Y+wF<9y}N|{dFs~`BW(^Ie5)g3NTmo-co+fjXy40ER^v` z>cjG(^~G~QG~MeaJGN=5MKl<(L)0u<@AH^vLBB<60q4N+6nzX#6ow9qv(a6dKt79P zGppwuVNi!csU9`{uP>8s+io={1I4erB21bg8@tk*Z9LlfEKdk2Y^#g}anxtu9s;Bx z>_&nO_(|c`b9P8c+h(L0#Vb~9rp~qx4Aev?l5E;#QFd81QPO7Q#7%j3n|Dw-wtjut z0B;o2Z4K-oJcPJFQbRvcd_&>}+>;9pC?(!Il(GtLHnjq+gQ{&{Ub8Hlcl*I>`rqP)gD?@@_- zU7d_=2)Df@k?@BFOZ`yd$gE_`;uPG6iQ^bF#e&7rwU6ZzM`zof-RDxXr}e&D0g_Ec znkc=WMtY{-YYHL7AfHtBbh#MTdeJ(ky>EImy(H5mgT2b3U8(S3qhBT6jh@BsxcD6T zm&rCyO9h}rGf9ztY}zFjO6XGIrY+A9pA^bvCf`4erR@}s)3p7wENSb;pw@`+HzFSz zoaJm#Z^Z=dS#86_?ZpD?5+p=I_Kj0h_Ow2{BO{sgplSuNZ&sz3_z}=9IbX}_Zx7k&;G0Mpl}EL4|U$(JFNdI zXa5uoNZ8ohIXY`v$(fphq^nTaQt&{`j7CIrS#9ish@*l>6d&k zG&yT3`!C_D?IdK$APU}L5^ere|{K2528utI;%jLU+M;FM@Ci) z)6K^C%gfJm=r1B`M&c@+(=}lTt4M%?BGa*({nNk+QR0t`sfdu&Z+5? zF*_#V#jl*(Az<69Ys=`q`d$}fv&2w$iQ6OZ?Qrd-JhM^MOSIF;+<01$+D(_roj%I~ zq{zuOEIA0q2iTFJ3`5D|gu@MT2Yf7~!i{MejYpqf@LF&$E;P}OS1)YikbgEZnOrW) zVv0IG`VpEjvs4^#M3;WfQDsW(JPG}{Icu_tHq{3b zw`-lC!InKvkDX;eOwJI8)2(s#Szg3EWX$`(sg0-nqVt~VVW%xk#X!JhFGx(yJ>puv zZ(bW~UM~Nl1O>HvFs2-s1yLP~IcvqmWRE5^Ix=p-!Fh}+PQ{Px0c>e=wA-h09b5A$ zNH;5=SVz>cU<>}4i!GkBxF>pKNs7LcDq|p9hMVN&+9Q9$P*4rbZy%DTOYAjMKTayP z>ivs)Nu`iAox9P(Dw=)YCsRR0wmN`K{o zP9~17CXRGc2Bua9pmb2`k93ek>CdUEh`YUoqsjkF|3HHVV`~#x6SseS?w^4w>mM5N zz~Ca6q>%6j{_;xAaQXfgk;D>JO5SjiYnu+d(H_RR@e9hQ3T8`|^dm78zr-Jt&7uON zhJ4N|T!$b_600uX=PTGA4{G{CNy=IL928zNcJ|Ba%8oI~2`AlMPm(L`O%rp1dv8J! z_HVW1Os)N~sj6rJZcy6ri+VepA0>Hd$LVW30QUVcSl2H{FUAJe7Ut z5+=>F$dt|_D8Y%*~|6v{~oV0yJ046)+hI$T|By+&V0&aBYQ5##F;P8A_ z-YKJ7Vh`Cy1=0(?MByKa0}TTYi@%d0;)^)Oy{TiG)Q(v?DWDk-R}_Qsr1jR!Ei@)1 zGH8p>VCxp@=UFyR4VpNoHtQ_&7EZjct=oJT-#Tf}aYG=bkN(gvw-P*jPe37%etT9t zWMx`Ny(VtZEA9MiL7IF_pj3x0Gs2kLs{q}$h~6SL%w7coaPL-QUUFWNbw*1$Te~-_ zB&4*lRD?#(i zH;STwA7r<*1J-#BW=HUMzWopbUmrQRZqa>~-BKB`wVEF$v90^S=TgI*j=~yD9I`_n z4-B8rTftr_$JB=b*pO; zNPbC!El1FPU-(AZTdKk!&UESg)33E1B1rJWby+=w^hJ?`UhERr@4+8C?gns?N-@(%Gj&rc@Z|eR{7G%|9%_OcuiRuIRD2 z=zDW~Jo{kOZL}v(TJ7d|UQ2Y5@fpU)KRGr5R*UeUxR3)naQ_-h|9!Caufz7g$A!Pz{(l(iyV(D;^p8@q zlKT!S{lVdjq|jVN_O^Q|(eCq5mAz|2W6+TGMJ6c2yib!2J|`X*Bj&$QMzxZ}|9SPx zc7z4A))8=?g9Ma3_v2XveBIuV^npWwk#ZbQbiI1KN%~q#+w*js>W)#5Krf{(6HYRr zsm}eS-Vf-QoKBA}K;h+e68G*Yo>k|Cw!_tG_lB29|0;Uvb4m2Tfaf(PS;dp~&)Rip zaO;TDbTBwikpcJ!>+;H_;*8FxwwU%Ygi02DN`^;_I0%P4VUzn*TnAki?8T)x^F0kB zS-lBy?tPAUAzJT`neKXfDmgkKJ+N0M$?j2al_nlbc@Qi};>EYYg!Bf`y=m4e94?#2 z4aK#pKgLUbZ}87pSI-)|(~pgpfX2uG+_dpIcb1-iY)-6s$%%{yHKWH?h_RV76HK*D zF>Q?e0~ml|Fu6nTUKNjIWwi2#+({`Q=^m6Mwvbf zMDI($j={SVL|XD%b630ppK1&!k{9-sqiW6S#|x8&SZ$u7U$4Hy0+;MdtZr2{UN3Yg zDX>BiR%7YH_lf9r)Xw2{pA57~Arr9Jx1=EamP?ahqr1%DlcV(NOx=pDga6DQ|5-iP zR5^a+3I-1|86%+DTyqkv3O3wjHX~fL19rGXONB4*-sm(ER6b%Q_TEb6MF*n$uKq!+)(Ee-r*EZl=q{n5mjaD^7(aQVJ-7>NNJQpa zSO2Kl;{;)(DnV%I`wzz1->+V;zp|viuHOF_FCEaAf0Hyo>C&H*GznV~cTm-DVQcnB zu@7pX{ROK(3l8=_`)R+)86Z%-nYJbBRP=;@!WfVmprvs6D$>wT0VCluFh+#fHQ{QO zCvVC+hd#Zz7uXLGg*a#iN~jvwKZRoE)Jkxb9AKtKci}~(R9gh~Lf8fEl2WDf@T@`_LLLSvhjRCTp$^{yp1oM3Gsp0Wv zBNs1>pzeYQbq^<8E*cvx`?IGf39Piz>?E4J=_;QLNI>hDK^AWl>YB{UCwItGQQX=X(-ZIo9E^y!efeu>7D5P%lQ7}+P7P|c13K(I_BR99}! zm%?$5<+5Vy)>~cuL*@cPyPAFK)T%dvvZ#+1g-jMbbVUIer;HvQZL((vW2R~oaiv0? zax~P#0o735oRUaITt~#*mwSFew{)j8;Gj*h#Z8NRJK+d+Ib9A|XR_ITysP`DC9%ij zTl-DyC;jYN1iIld>1iT@Hj4vf7UU8IlBm>x!mP!O`=|I;tL1NJSt+`?g5f+UGv!1o ztkK~9??ViFHo7aC#Fe;{U~P(yCCk|{uIN#n$-=R6nr@q~Hc5M#>@%wKxRHkKLpHYe zj%Me-OLTpBQjED(BcbwWzj=&duS-~Y4MSf2@yvW+ewMy6(BsAE$evV~fHR(#-=m-W zg~^z(322|ZLsvE~-Z3sTDp5Z^Qf7CwI{dARLd7#UJl478c8HY>e3(fKfo51x8 z#u9FkBKtN9b^PadzAQ#Za5e`j6JjyfK>=PIKd|5R#69V~kUt11GyhRf{5@_^{e_f& zax;}|L1)?H-x!)gcE%<~B!8alDCJd2>zu-eNy8UQoS2lzrMAcL`AHSgd&F%9Q@-=V?wBZn5GA2W1 zAgcl?^gZa9lM*VUE~Hc|BEWdIm5Q>fTm-G7mP>XvGIk5G?MA*FbqJX~!YSTSJ#n0e!v+I6wO=G&e%Ew6Y$%G=US8+m zy-*E}yNL>W@(<-kc)T8E;Th5lW@41Nk3)ld+6luW)3FLqR;dZ+*(C;aY%!bSr*RgS zDFI+YeoV2KKPpymz7Hlpz#T1-fEU)f*d9&obI60>63AY6+Uv5`Pm}mDuZ?)aS%G9f zc1DX-n9VTv`Ag+{&#|2Ps0Xs|YF!Ls&<2QYl0AfA;^JU$T+T z$WsIy!+_erUl)$kQz8S8AF8757@mmsmSXz4^ZMR1r*qH3iti$UMTeBpc9Lh z^qw9W!ORpi(Ai=n5lh^7?PFR^?d0nWF1u&%4wK5^Y}FY|;u8)PMJ{QQ^5A5-q6tCS zn)!c}2Y;XH5dMX{|21R#$7DtWRvTy8;}jgnku=PLA$4CTO%s@xna`Woi|>;PZPJ@9T29Zgy>^)`+&UyOHf&})R-eT$ zy}Sw~y4jd;f$veC4|9HQbUt=p`rO}t-M-O#+$(wiDN^ktw~;3;p4xL`Pq6fk1d^(KCzGI~mLZ_o_Be%f-AR`4*;AprH^pkRP{zq?y_w8Rm7 zpQNIO{2Yu4di;W*(CKLqvXRAWd3y9@mQsBL%z%(uq7{TvJbYfAwWbuKWZeE7k?x5pQ{?F{%$;#~69W02=SVBpPYI0;t?uz4)*yhH>G0M6i zZ0uP^z}g-C`0@Ff^9-+nw3ayKvUWOZYRdGk8iDbP^%S6H*8 zOq)nqzE|m~9g@XsE(7>HOHz${cf!)GxfyAJrDsEyRUL_Qo0(}ErSS1AUvyi;B!P3`HT)$`Yg9fUt{KTFu6S+* zlC+8ex~a3#04Jh3Z)~Z$DQ=L~o$H=*<8DHFgO|*XZvfZ9%~S1S5M%BM4keJmd?*ZI zs2OURYKTkSJk)_Z(%#mteXT)!JmKKGx~)KvQ!!3teOg{F11r;q>`#l+@i*--31mkW ztW5kE%j55K$fm>NWy9%>0!%^weYQ6Jvg4ecE_-T$q>REcWeQ~(HvvFJ6ZF*`p@c<3 zCbJBi03=R=-qwIf!Ft7Fs&u2_BZAp7*s@BbTnJzA>^yHzvN(pB@l&5cCL|Dc?Pr2H zM;yA8@8LF0V%E4}o-~z4%vY7FRhwBq0?D4=9-k~z6 z8xr3ad-nzs3}_kf$g)H-loc7p40v9_aD3iX zCWh~)X0J0sSD@{`K?KNK1y_AANEwnCQ7i4vrcn^b=7E_-cLYCrBCZ^es{0TSEzXA2 z`}da4P++##BW*28?>!}@D#n+;lbrJT&Vti1FeRHh)=6_$4ScVwj8`F!=r2iUuuQ~@ zQ80|!UVy<``wD2ma;u3InLPpXO1NOnnA;OMxstsYA#b@fzVH|VuZWbn&(9vIH>B<5 znR}~IaK=oRDep&r9R`U%l3WDuKZj74Y80$w<=O0MTNyM$N+myGJv2h|Lgoli6!HLU zzoKj(E(zoWEFzsW4_s>ArlX%vT7+>F&*zvGd*(oI1WhSwe=bW*&txiJYW~zL!Qadr zHlX)K&lr_YjhuZ+|AFN} zrrpi@a)t5GNmRXc3m(;s;q|GNw~)Lnn8E^cDbst2h~Z}SiVOdN=!G_oq#`3T{OVa+ z$*z|mRzg~92!+FREY?CA;|1xoxav6^#?KnKks~i7d29i9%*TZBmf5D`?A5s>9E*kO zckJJ?RkI-1M09-;G&9(?Wye}AhuFnPbZZCsfc%0-rG!Qu{q~+Phg+W}TjYVf`3?Pr zNyMAGDt_v|7=50UrHW)JDq<&Wd{9wYFd`x1CvTzjxd~&s@1ocdbNmg%T;C6j}2`$$4?i;tM8>bE|1DW%XzoZaa&sd^P<{5QK1tiluwsnle zvu;i#>|7Oo7K`iXvb_*+*haWzrmNdgRt*i$+fARSmD$Y3J85R3W)il>|J;~%(p4$Q zvbfB5l=+#Sa)4V*bM8V+6j>S;X*tapYK-o*%;6L4Ae=$n zoV17g!%A6*sM_S~4V+Qd468tfH!G$Py`nJcY*xGDQ7opF{`&&Y9>r}-ygm_>+jjGSjJe2!=C zQ<`>r?P?&Bsty&VnCyZvn8*`>dGUwR@qK!eD!72FHB5`@@JcAbR>2?8i&3AF%c?Mmh>MVY)9Fn`ZeyqeAN) z14ko2c%C|(e|UHiq~$zOa-WRj+oq%ITsz(-FT@^D$l=uDX^iEjuqTvBnBI48y!Nl< zMbi!cF0sv>B2-C-`m9xqy@O(s^eTd@7X?O__|)2`Eiy1FzW;;DtfOeZvvlm+qNE}n zXFoVp4olf_pb!-#?q$nWlQzGGgl=Vw440hT2Q}EW+{`Rfu zD{Gg0=Lbi32m4$zGN45$HWUQleAoTnLQlv^GP%gynDCL4JkwO)6^Q_4DXg4@zvMk2|30}3g-X}xRom+Fhs)a{2`PGi!K}u$5|_AA18{v>_a?DJ zok2@7R_r~ZQ9GKgvnHnF*mbRtFd72Op32e5&FJB_cq%nFK5# zlPcVvjX6ZhBED_S`d;2kAjk`fl-FCCb{L?F*n~0BXbJ|E6S)zqDLyqf+4csr(4|C~ zG+PDf>gZHgBB$SV=Kdz{zE5&dV9EfXQma3Y(c&?-!U7f&d(I{4386Skh(iK4De;Cmx z%Qp2Bo}8E`8h-6l+ZaX8P!w#DqnkU2;u6#HxI;AhTbLoiVp6(|MLKn=?L;Cv%nWe> z_Y{r%*06`1*rZy!<_FgU&;rCCjs#U^L7%(Ff6^qGJWtKO?Y;=t6=>Z#YCEvAE^yW) zo?p^Ge0U-`j@0JSBwV|C496349Jod*O1)Ja`>C&M>%6rP&Z{%-5Q00D2-}ud$p7(V z6fgTlecxDMzKp6Nizj?a?T)&E>)NL7c&BY-`^UoRPm?6PJSjZRnW|DFXCH&l)bX!g z0}4g?JWcY+VhBYp1g*Iy%#u5fjAdqM1#J1O)alaY$1u+o5YMslme214MhnwsqNKZ2 zhcLd0ovTCOUV&MD50*RwM|y|aAUlDnvsFCvjTZykh&Pm9&CN)c|1v^9$3Fp}&G1|S z{9)VuQyb2NdGySVl6hfPIsh);J~q4DTp0UttHS=lB>fX^zFtY4y3n-YmFBdF;kbYi zw1Cxh_C7C||JL{E(^I*kZ(BU2-9iz{wX+{50T$)70k8xiXw%L zOW4C05Tl+gsM%!K?Z)(%qQ4nUTMAT!@eJlNExAMYwzL?CNuUhb3}@F8uiDDpUAfjX zI`>F$oxyThlV=h^^tsyVUkWVR^ciQ=AqfdVfn^7kyo*c&te*~EG93L}nc=#*#y=Ke zo57(!LeRYR9NEB)s@6A}Hed6$%nc^!yIy@60@Kc_d zI{)+(JcQvht9x4}hW7i9R1D?Q-iLfSzOEytyZr851(BRO44%8v{He1~Piwmo)hiKQ zSEczCcllN^lK=xsWvZQ0w+Q;lI=4_HdZ8?Ck>ynZf~hO$5yW2J!)x<)SP=nXNPaC zv#xc!oihBqeRjFxg)fiwT`I@zM4Xa?yzFJ`gxWlN=jrJX?&@cb)-{IPHHFLj0_S~3 z`*KA2;%<-TyTjYrfA{j8^5quFEEwp3ZDAci&pdZhVKuZ3VEC3E>E+x*x$3#dJ(>GD zGUe}%=dfD}%7=FP=4D<-3FoQLH z4--tjSqBboGZX2>SmNKDj_TIxBW^#Cm=U`0zPD6-DlJL#M+rJ6s&MY{D#h~P6WN4k z_D#*-H)8$;kqKO7?vk_`&zg!w5+_gQg5Cm~Sa(KVwVQm}A~~3g)M)QZpZJ0(LjB36 zfyD`^-nv0iR3%;x%UYd}syz^2w!Ik0oIHDzr&4Knu0MyBkPVXQF?*(T|P3?i1s2&Q|=$VnF&z3$ZdGQA@1U)zCZ84fqI7WGIdHQpBfRA-` zTRfQn+lB3c3C=b7RlH#+2jtJNkb2)VB+>h2jl*Hq{O@h&g%B8a$uBEN!%lN^;(J9k z@w68EFuVHV?Z;bR*(=FM#Y?A$kv=P1#LHQR&HQqQI<()o!es*@U?WGT5_8){(apSC zzE@98o3hhtrL{U^8v4_mNkA$fmk0UXyE}@1pC10b7sdKlFY2#($^Sy-S8y?LF;TMc zH2J+=!0FEhRhEj4+^*=y?63yd=!7qXgmyhyeOBU%DWBlP9F=9G3;28Ak3hD9QhPOVv?@=_1uU9y8v&mVh)|RkifpxZ0Cub0puUjHj;3%4U@w zbpm;e=c`YFtGxQrAYhiIOz2}i{h$B@8Im`tic%F# z&_Iy^EA^Z(rqj7+{Q{a^W3`1nAIUmyx z0i(KUn4-ruv9p^SzNN}28ep!+XGJ*+Hw#MB+cn`(Y+Xj2ic*fvkVeX;Gm0g|R7WAO z(T4TXyWFFL$J_U~WAKhwz}S4pD_Ml`YC<*aVrTzTDjx5r#!1%OEK_3% zY1<_QR-;+(%kRMz>KftW@hrYtTy(c58Aa*~Kj_E{mtRNTjYPr=k_zqa8~Tr_L|s=s zcZ#=?;}a4NU>$@?8PQ1G7!#XFla1U~;bm)dbfx|%p2>lWP4TkOKz3%LS%0dFG%>|L zoUvMyK9Cin{*gveI}3liIi*#1HNH!6+|p^YcO^VLyrFK-XmbYV2WgFW1ud@^{uW-!&1+pyjYS;V3s*c75&_N zMc3+A1^#QSz!qPr1fQ`$E}J+UcP)S;H|j{+T>1GZrUyL8*CW#kIEuu{B%>BAkoy{|9eJ&etm z^|Gm=)#^9i-|X}7U3prNiv{X2JJt>XC{)##kh+d32F!Zy0`RUD1gNqX7IZ(W?_gEarxWo%!>2->rG zh}x4$g>;Z^keCtUG(mADo;Eo91r`6Wom)Z}m0ETS&VSpYf*6XFVQnM_sC{JQD71-@ zNC}{>nUg>X%d=~kkAgj688?MJ;kwlw5@^25iBAA#z6U=f-M++1J+mnG|EwLm@(fU$ z^p!^N09xHGLxq>*M}yfiQd4j_>6>Mrg=hWL{A#(F`IK<(_*M7SGhzE=D{Z#_rb@iA z-)+G)h8XVoJ$eJ`!qa{2{kh}*_DC(%&ko0z*kSXqFDr}dVeT!sBAJ>6&hem$UY|LB zEHzI@eqJW*+|YyYwReQU8=v0b-HKqJ4bZ_d?1LO;_~Tp{?hhFY@doVHz*!D&V3MD! z6G4N2MC#=1^LhaU)$7Ov;&I*_xtC+-p{FGcuF+v<}5CTw2XfShojfh{9Mo%{=n9=G02#7jyi|udX7Wg~!rt z#P%2Ytw5T)+a9!>p}J5DrQ2}F@1xh*BSX`*0tk6ZXn>62t$J}XF#v+?j_MobOh;%PhO>(iWbft zCZmyh{p953+UoZlOEd6_z#fEcmH)u@-?WwG*a^wGS z_Lf0)U|Y0qfk_Eu}&~F?_m? zq59A!b0W5|OQL5ed)k)ClAVPqBnq(DqN1O+{nY7j6k6Mb*;%dU#!|Urp#*RW*BejO z(cB;2ap1%!?gH_UDf02?mif!(Xn0J<=mT^vsS65fTJ47y+7V&B{bV zSzST_!Y-W`9J;KjOavW545#clbxAAiidn7uqG>uwOqOKl!1jjAl(upM5svT9B(-ce z2pjvrUc&_yb8bYXs-&J1)?$KidBC!tT!HZ%y%)D!UP6y)TlPI_kojr<7ek!~|LR>X z)VFpSX49Cy)V`cEE~^$MjG&53^!?iUwQ-1(-80LyxhvFhy3shuZSB#BX-v&u^>Qwd z7vw)Kioe9mxo{hK`D}TNj(>QhEXd4+W_DJPc^o}w*6|}$fya3GO*feR`~i#;HdN*~ zAm%oK;y+*>e{Rh}9RVNlV*R_L$|@BX#^M3JruJnhWU2z9y@5@9%jz1`0Sz;%cdI!3 z)T!S%XHjBmM(GorlM#m$J<5=VNRTga3ae5pYb=@Krnam5_ z+ujcp@k?ZF17?O#*H<6yWr3-_B$) zs&^j3!<#Dpc@r0S5w&liRUjTanF`_wazr)+;Gs76>NLPdOgMM^1+v)bvKzT$z?1wU zCC^RWsp;Yid3N)og#M?(ve?IjWgj6kdXGRb)LE9mjzCisB+Non>yQiw@078r)aNmi zWg6CdMmOnFjqF%6?yOp9OzSotH~!Zz6SVJ$rfzT>P z!V@-U&T)l2soV%czHQMY*MQY-Nw@LA#ij_c-6sgHLhg@r#diP7AW_Gz+aKkm=2Y52 zZ523R#jtoxt38&>#&P=f-TCBCcE!{7!=!YUOuu%K7}OdO`^TB+Vqv3Km#k9 z7eiN75!gH_g?nj7(bC-3&`o=`yD zM`#;QTOi-EX&`p2|ybwS5Yhh9Zfl}6d+|@p8;Iu<01PlY(NHg;SHglWNN1dI;&Ky^h%Jha@c~Yx!rkFYlJJ0ZJ%g*)MHB zFlrj4^UG|G!RL~YhiQ`fS}msO49f zR&&V{4o1WQocCON#H(5SOR#!UX4{^Q&=gA)V(W{R6Qw6Fw zNnSnlna!(ce-+9O>$#-4#nHQ@5aPfkP_mP9BBR%)lUoBED^bYu8sG9r_>*0SJ zs{Uof{m1r_n5B!tNAAhLoPr-m-p-ZR!NJZ{&+*^RdA^dVGLka77q!d)Wytrr?GUE9 zuVn<&{ivli^OMk7cy$V`s{j^V1A0FOBp9damyhY0MNREWX5#3|-djO>!<__z7O%^c z%b?-GbNyiLg8TjLj^!uU@1Cz?Ert`w`UzC%_mPPSg_;W;o2eg(mM;}a4SOoxiokt? z2>|~{6}zwQ0)9940$E&oUwh4^5`23bPP{)?a;JDsqkxY<=1yqnJq*7y_M?oIZ(`ri+nTW|EIL{0 zy#SW6n}#5Qh}W1sJYa3>PQjL|o-(q}bBYrTb#E73LZhuZIeecj7)_rIx+b8IpG0z@ zfnWcbl`Q5gpBC4Pua^#jih(VLK?n(89EQy_)CHXuw#h0bi{Za5pOJ(F4#G~wU$d8v z$c@w;`u#C?l+{o1~?h$7@5^OtuVDjCtDf2*&L@McN%1 ztxxO>98Tb5*>5HSRvYk-{Sc*u3EYn=hPUp)`@#|xA;)O)-EpBv!d+v&ZM0pN3JMk? zya7?$fHorqmx5ia!CEH~)!mGOIs#f^AgX(oA^zJyH2DK+Z~4GYR)ktQf-DtQuiTcT zo?1jORz3DZZOpq&v&`3w5`J(MI7)TpY1n6c1^hPWE3LSKp40d95HHw9jDq`B0K0_w z5*e<5EhH{|T9GXYHk38N0`80qWmCrHJ_ol7FZ&UUe5>^TeatbGZPl(gT z;N%(0F0o%Dq96&PBr{=Ez^%n;P${5D1tgB(vbt}wZkEJ#A)BG*d-u*5Sq)MdKs`2%$B(krmbA{ib7yF)I@EyS~*M}{52B> z$=OZHG{?w>xo@rAuTKE&ydd zD=VFpM5$#_MpqRGFlYB@>h-Ih@*_khX1(0-DsZnDqz_}KZ{UO8dC@Rsl8Eb67*K#E zilZpX8m9dW_Kz268IzJRx1R@o;{>pU5jbx)OYl}pz%E|ctedX=mVvmy&f&bA?5Uf{Pm*n z_c{EFsJFU*gC;-hU}>DTUv^y@Qrq+?R&s}vS}M45T^Hf-uqMLl$>99uWCaPiMEFG< zoq8`IL@}*fl{_E&SCa1Qq1HPoreeMIwrQH=`HIW8bVSn>(bbB^&3w> zbVRXSYEQ)NPu-)Tj^U=h?{LCyt+BS>~N1&Goy>%=qV z@~@!>xmsQwmSdk@IYfo*a%A6meD^uQ+2`On2sRzy+M+;7(71PIH)KMv3hfUOfBE`( zb!!~gRKdGN&o2Rf+9Q8jJ#V{_aYzDb{~~qK>`&c0sF*0>bH6Y}s6ZsX_a{U?z2VJ3 z^`j{`|Bt5NzxDNhVdwsW$h$C_Nm%O|{5u}UQrA__@Q+{qb0^bLcFJmo7MU~ANfHUf z(2#Z*ouUl>n+Vuf>-4BeooN|Jz5ve4{k-%2amNBy-ndFVK9FEYuAh;78ty1iihUs$ zcXv~FYzJ?@?_ME)3NI20^tO@HHy73sYOp;%8qg2*i=e_=5Z8TYbfzy4BhN%MQ(p=6 zf41VXlcD%Rbem0CHayn)dI07o+T@KtK!RT^mT!68s9s4TVKoqcB-u|HU$N+pm4%V( z^Iae)sESZZ`Yx=hrk*n?2p)f`2aBk;MrOZAS|#1Cv7)?2LXExRwyae1L>4I#y4;8U zT9HZ8@JcC9YPeHVs}LzU{uP~$TJX0<>f~bb$nvFFYz}>X%J8nL*$AJ(o|a)vOg<;T zhH3!j7@n9l+#syd4HAd%?M#W}66iaV5Cc!8LK9C#wafkIvOeEvoT)>iYd_hA9O zs6O1yNcxPW7#+U4AF1gK?)M%C&trcEFCK0}^LJG0?}U0{Kmo6P7==SZeaXF*@9cW;9c5|8?u97HVa+=5#C=xcw;2 zpML6qGmL$Cwy=2dZmO1{C{N<}undN#@O8kW)y)tuMm&dD)0j-FY~EPrH?v=YtaQs$9;hS$ky2ZA0$*(FzQNC0CI<>1-10J=0|GUF||XVH?ffHo@z=-b{eM4YB7dKSdA^}j`d z{z1kxb@(T3;V=C?UT);$5=G{WZS)g2Aq}`L`3wpzLZ_LdAQB~4Oe&MjgL+DQo|it5 z9L%9l_^kMyFiPd}^KUr!zB_%SY_aYH&r0Kni;q3Qr@Mne=}+v3w2Z<(g88dy2)Z{t zC*6qq`1(N0CFmz|5VPlOC;U0K0pV-3F=ETcfITVaQ5lRwg6F#!gzDWXl|CV}yEmOk zpLRVLx}6ye_?{dM8!ym}1F=%moZm7N2h%uGg+eMj7qe3~vixg4e#HFoG^kS~z;G#N zmtpSf@7u?$=u0XkJ6!w+7W)YN*<~c*g)yYZuAmih4O&_YGp<_@)bw>gxnP!G9{AhN z9x-B78uvDKV!O}O8f8qy-l^FQ>R}rv0A-wtO!E44QS`px8ary=10;QYoaI-6fCNT& zxxs=^ScR0P5OcyZ&S;>n^&Rzrm`Ka4N4g3E1IHWuJm4&wcv@e|So|6pQ&G4XO^rqo z-ITwl6ZKvIm(D@CLURxsSWlEeeV*O3iSv~ZN8ls>^QO^$PAIv3S9Lfm0@6S$&_K^c zWJ1Xdp57r~e)yIccso}>*=`nbgM!v!@Z{ze)&B+?5~kO@_AKE;ndV!Yzu_X@bqC^7 z`bi1bvvee`pa6j+oYOD$YHKt9KvUHzfP=e%do zb2z2$WS8JK_#aa+!Q^5G_{Y)f_@775^#9Y*Qxlgn)YG-lwff-2{lA$7c7_&)y7q>D zV=SeeKLYRVO-w(y$p2{Y>i=RP7oa7~D`tVfXz`RUg4BFWk#%?`@nBTGb=Q-PNAz7E z_<~dLDem3SU_>M)F7!-~OJR%&D|30UFlgJGCLk*jUoh zV0Y81&A6!t0d2(9T)U;}$aP8G-aWG+0lR+IiU3*|vYZG5p=<6|o8I9_Ji(|ElL@O{K~yE(ZCpO$gl~F_O=%lW z0v1*sc|zkObt6Fiz2-=4RdqQn~nXG}Z0qLvQf|>ra<#I(sdGRr@4sEuD zdUTpbAf{vpw*R$@2%z5_Q6_#zbu5hMtZX)+l5s*@87sc)0~ItZoKx`Z z6_%1~3YL}F5W#n_ihNrO^1^-|-Ej6XApWNjqt3@6L5vQJIQ!AdbN;ot7=zeKF9fQV zr+O`>ms9#H?DczC|0md*cTyh4VWU)vDMLK%z+6U~zz@gCUHlexJO5Ur#Dcu9f$ei4 z#1t8S>*bMHKG)`~SKY;zqezuJ`1YN;6r|AD4PC@se-0WA~D_ zqo(^TjeRNuTtb@+ZXQ04fdAkJh739V~?C5V~0$`&x<|}g=Gti(~H36!~pSJ*Mmo#6o z;+IS1uH`Y8G3xsFlIwdcLUa!ax39v|e88v=4?ptl%XLJnk$(_vm6xg~#z~bE&w2KOL=)#1G2e%cfj)-9FzAQf@u(U?mD95%NPfv&sJHMa zu(vyAX@_u=?NAMrW|wB!Z?NC@^kdWv56egV@nmg^?|&4U3*F&EV`#lj%-%TKlN1+d z)se?QW((&7&Gi;Fl+0{91v$+04vYOt@XR+kmbV!nQUS2;`XP&C-zA@x6AUNXzLAv02HYitM+AbT*yrKUQiS8?HHfPAH=c(HU9O5y@a? z{@CfzfNLb=H-6rW?g;e}{x$3CgLMrU;Hck|{E(F9m?RZe$c3!^?}T7^diBi&3c1iT zvOcdJV`q_&X3zVMqd#GO>~sswqt8}~E^ue3R?b0=}nZiS^+ zhm-dPDHj;5zH2Pt&<4Sz;um6ElQ|jx6y_cRc=z zpZ(4*i1hOKM}0K>M}7RaZ1AtH+Dz9;_m8KB4|J4;slMqyp;3ek|F|&9mCTh_gub?$ ze}@wSlaL32wsa{f{)vocGE+cI*q_v=0EUP}l0Q-32H_JGM0ilQ@#43fkr3x~2tO1# zd{ma#G{9WVS_oO|b~?Js?kv>$a(uV({wZk?kH6$__5*O7oS=hFiiM+Ep!>X@_6O6L zROo5*b0bB9%^Wz8?e(WJyo*t0=jQLBQLJXEd?P59-u6sOg4mq|#r=VAk(B(6rm0-L zMFrxC5@RgoqX{Qi&$y9el8;T&6|u6Y;F`K2?^Q7GGlD-3NqsCrt4-2Z z3F^M;B%UYZv|G5WMvl9_IM9;aE}tz?+@hy68=JQlrE|z}34J6Kbl@_q+HP21aZnFI z;cqB#b9%xoO@nJ~AUzdpZ)&Vlf~zsY7I*V;mqUj*8-=3sHLfNEZz150Rq|0Ewce>H zn`qC%N#C{p(uluhX9sRB9=(-#zq=#$?u~M(CT@s%!kkmz4gI9u&~#8~v$3cSjlWAd zn2TcAmOgm&3~OqMOtDE({DI$zcF(SY!2w#f|FY@iUwnjqK*yV+maMRdl#Q!enN<|G zL(?=$S{QG9AKtU19U9!ew=$}2lfI2{Dai8IgVFX&7*1|DO~EgQ5IrWkqKcL!!niwS zYKZsuy}06s+1L^uu13e9JU<+3tEuj3Uvh!CZcqrt)aQS6hbw;}Fg{xojVdtRfR3`i zy8-YYwb>1bt8`P}n{ZPsHrUw9vGN{(GeuY}TR06qqV!RD7`q^0Pi7up;*vgP=H|%HNjjyKNpj}Nl#-bge6^<^N*kXck`2%xQpquwo{WkyHY*p4?}f6tAXr&U--JhBe?HRE^#WZP$LF@llza`UQ5D? z)BQs)dIJ3W^!DGk{lDNn;56)(3f{gQJDvIQU^o? zOaL5B$%jZ!!a>C?v3m(4t(W4#6WzWjSk_)8-upYI=CBIN6WNE(Gb|SGElpl&j4BA% zA5)TU<)-C*@x1PS^Wfe2{y7V00e#nNras1_vA`aP(%Kx0wWpM6j(%J_PWK_i6O(;(m$}vX+JuzCelaR!g&lR1iS_3rZFv#@k z&e4X)nfp^cGl`MPef^0n@RgqbyG09JBl`RR4*ZNn4XUzm&mDk5KO zHYiUhR}>L>QizctWQPxW-ZcI#B>*i{Kc(A!ZIreV0u0}eUQ?)t9@s_z*2z)B#$1pg zliOmJmwUBEm*J3tQpQspwx{x|YMh0s*HT^b#^l8`u(7omxzX)+tk<~+6m%T~_}*Ln z93Y;7W3^H(Vqb<7a@3Y;$uLg>8pS{{g*`pNP|a(W3|tdtK1WUnW1cQfGTVT@0O1bI zeaL)FB#_7-BQN8zLZB?xsZ{?aG=}?IHCYgBP*m@g!tlP-s^fMs*LCujF_;qnao8fI ztWCD)N-99Pbo^=HyPGStd{l1SU;d;97|`IqMsEGK3fci_M7yH3y=zbpV$@X)JOQ*Uxh(MKd%GH zZ{snH3baEUS1oC6U|5rBxuJIoVYdXJL_TQoLH06B6LSzDWuV}N${+u*t9%t$2GmC{ z?T?$!>|24SFH-5F-OD7q2W8;kM&zIzq3Ry-HCB1x^8a!u?}+ood3UVz6;z>otjTm6 zO^YYbj^t?`XMlkapg9!w)s=Mvs^xI0-owDbEaK%bvqZG6=H~=S&>h$^Ev`~kbcXb9 zKbz-xyEfbw<@HcNKnoA`44vvdh~^EmhUfw%rNw+C2{8@`GP(ni!+wU!6lBz^HHe|d zXG7y~f16~69z?(AaIEJH8|f-C%(z{y!c7SSVE5hfQEv}b={jMLsjBR-Qp1E=kv3~M zJ%7p`N?YbwX0s85kd|R{r%%B+3vtd6ySDvIxrJ=|3u7+mZmRG$%1ASU$APWwtC(zd z=s0gmFD&2$Z8B=831Q~@cnv?G|AX*;)kEY;?@bZ*Sq54T z%EgW&8=eq(knF@nQJy|3MWE^VNCRM^ga zaL1Kg#^}s_7dZCw|8>c5jew}lcWbGHPh8e0caT^A)8P6@G^Bk7B&d^z_VHUtrTa@h zKE)8y#gc1}%;Bk*qx4uNt9opIiD=y2^z(1Rc}5fqQ*rsf%b64gkQD|>}?EDUmak%Oi80{H%q0=YgKva@s54DlfcfKoZ4fK?*M(5HJ>+FV- zeN;n3kPU5bfXfr;rCp@QXzn)-BpUYvWe)uF9{9&-rxOmj=h@Pcay6^D*E{b&v>C_5 z_u|Ko7TNTFUM$3akC7JE#xxSv#*(^LAEA8zrU26YCHe)(i_6c*eB^+X!-n&U_2XyB z)$6Ipsd=aitTQ;MVOtt@pEtByf^KOMi(=e5H#waQ1cdoUd>O3o9E#r&otD+gRoz`p z8+4v{8c)RS?C=7O8pQI`lErSNPLLF*?Gr0e+>dGW=RBHi?#FN|JV_vvbOz$ZjhE5; zqbc-?JNjGNnJX?v6z`723qmLvh*(|jLrkca#J_~bi`f<%)Dp-)wE?w25lmzSvKa)h z6IZO3ME5X&MP}WI8cq!*mg=mQhD^gKGJQIGP;n*w!M>Z z%YMPoSSj{a+u;zor-{ha8dJTz;*FTA1RdeDIppV7FFve6N9={D0*tkWBU%lzS8En< zV1{K5yH^|kL~&8IqLey`lV=D9{*0z&Sb?+v4RUCP(E!{8qq4n5>cZMyL|JPd4x->X z51cNxe3N7m*_>^FWN$h^%(n>a_*6T_QJk&7B0gY6DLl9IT*<^mMCXo2U7TILfC^I+ z#0FC`!PeVydH(yDx> z^^3>1g0!#g#f}@_fDZyWQ<1FgCz+5Ouns%&$QO9*F|YoQeO%)gJ26@I*JJ7?Th35{ z>+*31C%RGKUfXPQ+UN5Va`npA1|%K&Hn_^2-*0?6+~E`@rr8m8@j{_HzkssX#BPVp z+m?nlh@=Y-4b=SN?5hpF#EDs&XsUgI?_0#}LqFff>F@Q5SXl7fc<(aaL%`7oO8pSb z{S5z)rx+S^!>5*yp)uosKFx9bT@C*PApK!K<8w4JGPINYfPZ~dhP>gwfxaEAuE)p-GSX^Mz*cUEXY@qLhahx$S3@o_bWh)>liS&v9Usi1PghM@}$bR#a=y5PvHm zqQpb))Zz}i+~boo%B%%0{TgNKe41z;eZYm({v0QxD`PITnW8sU1KxZ2yMkul&q%(B zqUYwZr3l)4n4x(LqlKctQgkHK@NgJy9)0?YlwX<~=a$;47E*;V+x6YFJVe7NZF_&J z`l?AkRxY;cLbHPbLsZMftj9vki5pp{6)c7U3aJc6oSV)P?A9?6(1S@%FDb(E%S>T! zn9}A=3su;nJ68i0L)zBL$~|?jUB`1{*zP6Ul8PlW=T_pEbbj}e1Y{atCC#QvK2TW9 z@qiyM=mYGz22&GJc(#1Z=;Tkw#2Z_{vAvdRwGG0F@jH{IpS&obO+!&BSZ4#12s##T z)?FibmoWMB8&w>@$n}9A=F zVU*ax^J^f?w?VTwclA9cK+`To<@_|ia(S)FQ4lDY?M@pP-6OL&db_yWW0dWka^GS? zI5lO4xs@mz_rThUHulnEK=d*Z9F^nkeFbWV^wM-ooVE*mu)V{kDx4J0#Z30VyZk(0 z!i?U!i7AAy!!2Z+{*wmUgWxl_XqTAn_BX5Zoq1~wpv2D!@2oifIA0E!x(QNMBxth) zixxhN{$G(pDBRtP>^}K8H43ZQBGsg*LA$Hr^B01$Za*T!$&De|BEx>_jyJ%?2gR_npTFs3EgFJt?P#e>g0} z#3acElb@dtd89c_PyNpUb4AOjL`5`6G--ZEXxgb3Y9f3gIjovaMjEl z+vVV8BVHR}dPBpq-_*QZ34PKH znatyu;ba61lZ){as>wb^{a#0=?5WOW4$OwkP6js#>1-CeSPX@_9XejE@pSMg)9pqT zD~FQFx|A{;2fJrw@qiqm$;sx2p{a)PcK4Lqb6$C8?L#G8w3f<}3});>bXx3lCa~%x z2)Y>2SUzy~ZH9RAT%#8Nc597MO}DJs&c1Ctmmg-gCJ1^1$92AK+5 zt+lfiPZbxJ;HL)|Q-;`$Gjh=<O-FR3FMEKmQ*-r2d7L6 z3(m=57bii(XQfbGrpTW za)!MFfiOr0*60#O>t>!1VoN%mBSGaiw{{Zmxlf_+Qr76BJmcrRJI4iI8wvRHLBI&}PI0lEN3JEfuQ#?BzV`LU{4c zEtb5(9>j{tek@@u}xF&`W+&BtVY85H7NSX@X4&%-D=>)$~Gu4XBvoc zN)Aazr4WJwKv?i^>ukrx(573W8@@1ddLHzT2&}@Y7srhkQP|ioR$W&l!o0pFT*n=V z?WIDnSdL?oO+SIqi}}^@Lpse!blewC13vyo8-ozy3 z-D)-pcDM8y9x3wFiQk{WO561M>Gd$kwAc=IX0#&Z__^zofs&XXoTASR-a%$isQo8K zIR;>bbjZK}9UDrpgU4R)6Jgs_L&Pz>(9Ih(uJa%i1p0cnHIiWE`q|CN?vP> zsaKvkNk1-m2dk>fG(Je9LJ&FvZe5~fJc=NfmblR_gXS|#CUwOHKb%mhhcX~XH}G~h z6Bl~BZ1ZU?E(VK{KH+1$E@^?Sj43UQ;n*mA*+r#cz>o?YjZW8!dV*51D=sVu;nNxx z$v@QOgy;hMCii z^brh#lE(zWWDH7iK;(^sk|PX zLAoZgMW|>N6XUrcs#x(%<4g$l0?{I5g0Xm^jVZCwagA;cqk}!H@Ditc8RNOwh0#ty z3tN=I`i1c~R&0z<7CkRfzIU-_K$yEh?#RvSBM;MhU?vGKTQeq?2(cBV({1S?>Ba=p z!kB{u*u-`-ZASNvMIaULQ(4g6O1c`;`L4FW771AGIYX6Z%x=xMP209!p4>UOkRD6 zjo%r$v(T$>h}7r7vKP0*0j3-5pi#k)RJRwbx=SzE*^9KuThSRTU6Q>;v^PMHjLC&E zP^kEh%f&sr$zsThe3WeA(^TIsAxNV>Y5d~}gqnOl(>9f}(mrPxwnU|>|HYDdMa%8+ za;9mp_h6o)&P+sXx-b)^Z>2JF(eZTes~2RZK67i$YH}%+Z{n4^?zwly*8Wm-xNpgD zIMqGd%n_wQ%Tv}?s6e9Di$>#FBQL`5j$_{re;Vndv37H_N6-Q3_4`d)Uv7o?)6z$R z0Rgg;Z`Y?QevkU#)LPCjcXAhtn~sj&s6wGXY8(pGJcU2#w~r8?pANH`?-8o+RfAlv zI{UN(E@9qMPS=3>ys}_djk>p__g^X$&LKL}Ui|_=wQM``e}getE(UEX{?09#fA2F( zrwd&9UE%F!n+fIT-EAK7Tiv3%@^j!!$$qC~dVo+`Tk`znh40T4Pc-)$%G5J*g81hY zp_J`6q<54a?@ji)B+OLko~xR;y&J*L8{RwabC1j&(t#54O?kz@cZ9OEJ;PFUE2iH; z;~^8&L}Bl+JA;qV;@Te1X#T}z9@h)Kk9b}`3_l&-`tl%Uk1QEHO)!olD?hG`Arwaq zSAiLf3!H1VGgcD@4?MXmfpg9gA}3_|FTG-TdTROwcw};>v2``ekkN}qp7YM>7BL`3 zH;YBPhT+v8xO3ruxx}=j`2tR1gwQ-V^etr;%Bua@7>G#U3_}e!g0Gr0KnkZ{p*^hZ zY}O#5$5=;6QjLl7sNZy`E?@8>YniT|V$M=BnpSSr3Kd-+v#P-3lwJ$3d-z+X6msZX zR5ClTRn76vnuO5|+D7v_=nJ!^gvWzz20dRROGMMRFP<`Y-&~!odEM;jTh!;%OW@H34 z0)i7SE&d3OOPV2e4*0cuZWqhrJT60(9J62ri!cONqf!Vvu6f3gI(-tSQUN6zh*gom zZ?_9E+9VoVQRP6v0DLY-9~`Ju2y}``PJ^5 zzA#cFK~TKCqo8uGtfw?iK433QQ|t5k`QX>Jeot`FZLlhpsP2V^?!H~@$+hnOi-2YV zZ?#I|9;5g~S$P#CLOy(J42-y%X>p_BVz#>H`sBIcLEh(PQTPx^!8wt_Nl)SAggf%r z2jpyp+pt|yWhgMdfdF%VOGtHGNg(vq5ndz`jZ3&t$?Y1y(5^>)S}f!&=Tg>OTy+{T zR(5^avWQ{9+)Ob18-l9<4(v$pwNV1aP1>;=`HrR8JJt0(ZTVZnyYUb$8V>vj(j&BH zIdKn)a^qnVbq+c0s(1X7;ovC6S&C(bF13>?CXqAr#)0 z2z%kkOJ}mkx6;tSw*rQ%HM53P^PVqb=^%TQ!3-VSHuRm*W-=#Eizyu9jR~&2&>5T> z@vo|UT3tdBX4(Nmfjj^iWza_!f3~9Sh=D@Pw-WAURzCfpwq34EkfZ|y1ufzyyYH7n ztf;qPcy6=^n#h`Reb@N6hdKE3f;)nQa6~zZ4W{~%O>}Hm1{Wz^eE644xylid>(x%vE2|>41~_jxcCM(=w#bV`C|REaJ0eP2SjsH5!uh^=Wze}YyVsrG;L_)$SImO4H-fqpw=|NS7HZ}Y}H7fbOvgm(y0A89=_F|Up-jJ8||Cr5z zYs~T~9rIWZQMV=8HrY8xC=+I3w(*tRA2b;oetq%hgH=>XLpl=1*=WsP6=v6dZnuKKpI^Y}_ox9xcMPO(UW zc|StzFeHVQz>F0w4)?T=)1puA5QNmG#3qNGPhhp&IuE;|t1AUdvuULg zg&|7q4|G-%mQX443Ygnrv*lV4bx`LKwF+`uW1Ys?VJiQihAw8+K}D3bE{*1RYo@mE zm11e7-?b9NfzYWv)(nxlK{2+WmVuZ?fo_u1g$R~FwYP2 ziWEm~++eZAEHUlZoR}*~WdjQ2o~8!42&Z;N4jcI{XGm>Zs{0mnn)aA>z-{)O{`LA@ zX3RtT?sglZ_FjidH|ba9{GV+bR3NyU-08PwlPKsrP@At#FP$Xq+T#n-Bao$hF2ml` zmroc8lLwD49)>~X*~0Js4=)UVjQNZPu!V8(pFUyX{rhX;e~Bhyaw4b`qw_cG&1!@}#mh^y(SvLNH-KkG3tgyj9LRa+B zC*m7i{nV)w)#ud=)fdI`^wj}N_idO!*fhnxlct_16J*64<^y?qs)?|JH~oZRrQSfw z(&!Vedn{2c6&J%$HxY;#TB|Mr`f%u7bRj)wHLnjVkFv@G1@ECfyGB@E`J^E16C(`F zlB(5nlQHjd1sbcJxu8E&{S2Oy5tyIL77g-R*0Z^K1a<;*6_>5wug0w zm)F5W>reYteNp5kGOZr`E@`^Edwz=LB^umL4&OntcMHmjFg6@H0<+!w@BERwErH{w zv&{S!lJR7v69;On>&y#Wh8!zxgI(tg$R$$w$GazL@%)J=9_m&rPI)o$GRs8yAl#Eu zugy?S3mZ_}U+oH`w^nYT9SRYI@q-EF0PPV6O!}#Y8yWiOlmg^871tc+n9m;3ooYsh zcE^s^nY!QE6&LNb*j*>0*`M^Xf-`zzsZ8r>ZNzY_`IPv#;+v(iBJny9<1>XIs*#^< zL;R8bchz;v`|jjp`>RyGA0?Ei5KfUZF{)V=F(vvmTAOz8+g7+PAx{60K|XM-C|6fv zfjEMa8!=4QC3;C+7S88HY~+|z(^sz^F$L%@l5(U9VUkG92w?!19gDt~aQN6WuJJI5 zOjk{=_}NOJ1A{x+7iMDyL~jd%9`SqAPpggESG@!VM0di2;0(W;ad^l(o^bt!8Vt9Zof*$SZ4 zOxcY`RZ?p5%}3JcBiMMxV2;0iCT8rT?x6xkk|MwgZNM3jBL$P-o+pG}>}8v8nj*d- zgvcJr`f&fbnPOReN!R^RKtlf^2miP0gyio6`rCE#haBu^`B&8NALy^YvLNz5qCt?9 zk>4Qvd9YYkZHmnmL~^=YWv3gwTNVa%xip)~JYBLIh5I*%l#;fg7;Q6q z3cc#l#oV}AM-?jcQ;OwCkU#l8p_Z&*0ZeMRj6khEYPDj$jmvSVFO|J|)=%J=j7TG9 zomcv=f~xPfw*J0MUNbOHb)Tswb6-sCtScvIxNJWiwNn=f50#6%I6_-eLquUDif;!0 zv`pTjRbR1@2K;{TLUo@uTzp;rSt{WpSEwT>S{$~9*oiRpP488cQ2YeuggilC5bvxT z0{p~8^v5vyydL%rb8Hoc1lSIbN~jR_Vo6q+0kI;4!l;ouQ3Kmnj}uP09t}}kIX;W0 z5~bd^*OtlwtXaSEIA*vevJJIw$7-y7raT5Sq3HpaI*B14FNMhHj`XL6n$H;E6ykFH zeM@b@Aav$+ipcu^4`uHdTO7*uvsjn(kL%firorv>3GMrZ$agUhPAaaSXskS<- z8g-=ggC0(jz>)^dpxR;OzMy2eqcI<*O~97kmVrLfsfr5ee_-dhlmI&bi}W|*vVjeg zXeiF1L*N=B`OMWv^4UF2V4syHCx->W-B%pshBOkaDD>sexpQS;tSQHZcNJPbk7x!E?^r) zH!swlejlOrq}PQ0AQaNjMaR|8VgUi?Tg3?^=@~QW1+}gb$gi2*n6g5Z)mVg9N#vVC z@2r3(w-zM3EqoMQe)!MrU;4gZ+zwdteGcxN?+pVeG~l^S1k;TEC}mW)%_Nva4B4uf zJuCi1pwPAe0dXUtuu6>bHTjXOf?&(Y3z z@nE|P-b5?}i1eqT!Nq(Qs;?KZsQgAmc6}gx$Q_EE+N-bMerco?e9KxnbA`LS{IX|L zIJTJJ!V(jy{Mi;cch60_McZ)Mhp9y4k`QWm-u0GZ`*Ar%9e?&l|MF`?!FwC*G z@qKQL%zztm-01En!l7gZulfQ&bs}RY%i`^249tLpYBuQ#Shb>b&b)eOxvif*{W1`N zoPw@|DPFk?;Yq>kdQj&y=(x{NU4vz^$gyjJ&Sb*ynAasafqp}ap{phheU^aSO5lMq z2`>@}z1PA^vdEj}%PZG^FEx6WnCvBB>ta;0WQo`5$9&?AU5|(EuW$Xx>Vl`zV<;b6 z#YQXy;u9!hdvRb;&6JEE=-<}E>=6lPVt#HO)5c|RuHLOncj`*NAF;B7FW#26S-@#L zoJArSC2q|{VbJW;AMS8CP!qpSnYf--e$&r7t+mh4CQJWXWu4zJl;2=R(?%D56Y(g0 z9w*J8UVbCjBHVR#zV|sChl=^pYg-nQF&T&@g ziQ8AhQ(r7ZPnfK8x1nkkrGK6?iI+N1ENSdhzS3Eg=XU!^pTcWi$eV^>IiRe-eF0kc zX%;mHnqSeV=lLYf(nJ$#YPd}#s;TQk-)ac$pQ@+7H!uE8nfY7J!_?Z@ z(fmJTfN-Th*EJzvM&@^?ecixS%N0U2FX@%hqNts_U?py z%(wqo)?|m_!MZmvz0>ZvPIvTvdwPQJBu&7YrlCgQ(Y*sVW5ieKKyj}CU^h!;e0Y3~ zG1l%6EMO^Kmx!@@5g~f2m0c3v#CtY9MER&-0XLeD?bdP>Zi&H%c0cS=-Dw?7G2GA;67*GDXevDxl2&YC=t-}o4$VKI6j7l}u1SdSy-b1b^6PR`q6xhaTKv&@Wc^FZWx`r_el z@lYL+lVwFu_o=RhO#q)}EZ|TlDno%trKvX;UUX(A+D+3f49%~Ok_>UL_!m*3^%8k& zym9i&Jy)C$MQtp(k0D?ZsN`;7E??vBejpcJkuC?5CZRN`S7_ngUG>q8$X&8J)QhFd z^{^yJR%^i;%vkJ7?lyxv-Q;PjZsPBF_x;|8G!3*es5#E1Q!m_89hW< zMtGz*UnzjxyJV!YXC(3?V=k44(w z+wis~POf$iR)0!DDVf+B|Bb7UEmVNDiId4+&{DQ?MCCx^g^^2RgC(MehexWerFTy2 zj^X=hh?3e>O&w0q|%>UxY*{36&Y6%8t6c=%XK_N8!MJMf>OcRV@&c*QMzRmkkwo{q@d2C z1;!T>tn5%nr5N~D^@tDJGG5s@a(;HD4*Ogx>AHj4R9#JXqnh7cH|6T6BPsLimp^)Z zJ~6qlOcoZQK(IlMp#zN~aA`h>&>3jIeqIU*CteC$?(xDGiN3I6zfz!RH*R4PaqS=c)yl~Mh6!(qN0Ed84rY;YI<62iO4}ZU}Qe^kQKJ`RI zQ5@Soi}2gM54wi&St#qUOf`))FN%kk6lFcVcx+PkqDX8fT=)?#tDW;Xa#8!_(+}EX zBEIgjTo)dZAmIu*3hapRB01#3BKlv!Wn2;X*Gkmo7v)yKAV9PKch6CPXD8uUT|6)H zuN2+AY~#lXXbyeHYH>h38SJodudt2OQ7mCfP}gr>lr#4Oa`!gKhoPzZBlFqCj}lp z=@LIxw2Ep|Zsk~AysR{aXKAF6o9gF zErPFj2vdjAC%EY@UQ5zY%MMzOsfGRoc8D%^mO2!}#>!>5-Dg-hg+mKNp3*;{s~|L6 zD~szS7SJY`1=B`Aqis5}uCjL&df7A|e&#WGMly{yx8+6(ERCV3gEw;vyvMC*!2&vh z2_N7g37+>Y7Q(k)*+M!G!ZHn>_9-|u`RXH4>o%AgBIPp-eRPLI-_^(AX`j{IMd;xO znmgBZHW9#I4Y8ae9i>x`6p3iXrWKiDG{& z-CuL20M5`l`}0pW6M~W5yM4yhcL}ci-&Ub(OQV&h!2XsER=Pd(NJN zuwzNGTn%8fR9#P$-bG6s4|0fr!!HtFg(u70rom2CtTUdiHi}hjW6JRjq7jn{&$&>Q z?}2xBE-OOL`#ZXk#2g4lo{SeRBhH|?7%+?>l-6&a~Y{ zT^EWVoq+t9{9941E1Rvdf{m|pI_I}S$ZR(@{aT8_b&VZlzL1{MHH_b{=t0e}!f1y9 zRf@drGCkd;oK`vhj&S=CvP=M$R5+zz^@wAS48U#4OeBdHejz@%qRqs>^l?aMrwOoIhPW`GA&SFb#|DKDw9dm@ zU;Is=`HpfpPD(jO42~s!-l)9?KeE)u%!Aj6dWEGZ; z6%ej06VU}oc;KZ?2SMF-<|JS5^MEs|hqFm`U={(PkHzJQjZheJr)6M9CpS49se?z*YPf;#kTZFv zZV8phBb7?r1+bj+qp!iotatM0xEhr|{!}(^BMgqhs8a3)Nx*jCcJ?YGnZx>`DmS9G z<1b!Du(!L9$lic1_SrReSDj2Z_w6DeIKjm(`(z**3d=VAiS~Dc%ko)&V%+f~(W8-y zNTlc0+nACsPNV!-p2Id=#5~!7sG=YIYRxs@}Qq`kO>-C&W5QThhOa zq!q04$aSX~LfXHFgilIfN~&H@b_i8{O#AO*9Oh2>2T~6`YzDlJ!BxOy>e8OM?l8sKEc)|q9Pf85WG5N_C zukunqYpC9_?;g;WvcKX=36FY3PIPb!&?;aI+69`=e@+SDHdjoW^3xfbG^H{V$;@*( zrI1;;t7>)yN%;QL#1Q@%ZGz(KlZb2Vtp{|tjdfP6LQ?K`DQ%|LH|ReQf6!Noh2`537v=O zfIQ3f-s62{X2&NUvBYd9*5dHK3T0n|COfB?aesWd07b;uB${!Bjo|p%Xyb;WN8##U}_|yafSkT+H$%5_T>mtY5;XwQNb8S|8j;{=MfNJEwbd2;H&+dh0Ie^6ogaL!->YuMrVE$!Mp zX?&X4$<4cf)D-BRzL+Q;h$O;SiZdF0jD$>q`ngVjz>M(fXA;iDn0n_f_(NS zjnXaghZ4VrbJdgOECb$zuBv|>J#PTEtj-urK+x_v2AQjOD<1>kcuU~Y0{h*>JM%%@ zYCfr+c@12>D@^04!5k=6=i@zrmX{eH!a1dn&wc&0Q2hD#x~6W|2osqm?)TPo)MX*a zh9RrTa{j`Le+d{7cY25%eF)YF|D&D*-Cv>O|3k3G^yB{67+Bbnvi`{y4Oi6pFtJDD zRf3T|CbpSNcnTmu*DTTbwk{PG5)+3OMs=}0=H)l(L#F}_2L(9R(z%Op7q_$4n5PnT)y#R_(8x-H$ZiKHXOdac zj+u6fYJ(lulT6bfak3c1S75%wbx@`Q!{*VX#|$4zd$AgQvvy;WG-|jNT!1<&!wC(E zN4H1uM(FsL;QcIt?$c9N7wnKRXu=TX-di*bfL~_;4GA&Jw}>A%Wh|s}YS<2#u(fis zRQF%m!bowBSZnyKkGI`;8J44=o9x)1hdE}KRUq$mR%@!`E`Dmx;lTC3SnWb`#T)^& zk}A#^X=O_01Ou>^gs`u6T}jC5Wh6cq#nP9!eP|!Yp=^nD=1L&FcKZh*v|d`^v&JPM zG}dk}#SzdcLCTxYDk<~yC_RO^oS47F{!Y!|XUb8EaB&&SQ``U?O1Dldk0YGhMaxE| zS7hvjw3#+qZyEc@yGc)$3LiLvXco07z6Okbz0$M4W=TgiVmO=T=kNS>v0^WT8QLg( zKRV1Cv)G=b`xucQ8XQ|y2c5Wsw41a9^CcUjMhfYpS&s1IU!7{x9%x+7k1FEz ze_YA`?o|IDo)!OhHe>UL-t51Mi~meDW1s&R=k#~@OsCQ$=1_8I)Ea8Vi?(Lf#{`A= z%;NTu_jeSmWYb!k9i5edAZ}ipD>J4%eBz6{JgLV~m|2uHt!LTDcuYS}dwkm&;rOIh zIT%$@)LGM4{V~^+F;QEV=T$+p-Wq#icl|0{ZTvCNWDWLke54jW$ta0+o5alMSu45l z6d&5BxyN3l?pbBuSlm-BMeXClV@};w910gOP4y{F4~e=x@dS|swI@}SAR3Fh3aH@p zAm*N!9MGb3P23Wp*IVEI)wJudR7t(;Xq-F9h>EOP@1SE~9=mP>)H5M|nAHaR}I8+%82jEss$| zWEL)el_-azjt+!!Z{!_PnH2z2C>-()RE+!D9$0m^pi5XcgEk+tjQplaxNs;B68l?5 z_Oz_DItCuNwR&VA-7Olz1Z`YDt~!T$HXEM-Rq z?!xN?qt}@7U^SnMV$c!@+ieWLD&qW$v!>O1@ZD4kT;*Wfuo_GhGKC!6Bw{VBvnRJ*?B)X^G!cmT+4P8B$)IRrgcR3hMdeZy*-fV@i5V58I!5lB2!C(D@9v zS;nWe$fbMu=vzsk=Xl`IhBmgb!H1lEWv1~e?$GbPJ!lOd^oHUe8umiu!R}GWpdm2g zGT-7&>E+w|&r5*AIwCu!IAj#Qs^=YM?`zh2^A5XxZul0aZ0ycGkoiZ{w^eIBs{cSn z+5f1o{~H;9OVIw6=l?gQ|F5|JuXpypv-x;s8&_0OH17iFwKJDGGBn1l`gwJBXcww> z1sSkqAsfqOW#QbPcBx~vYt_j*_W__^egt<23HpjpTs|xj>|TzOVpMLa-}j84rJH4V z?DO5P^S?jcx_*LPG+;~-#A))JyZ+iU6fe;0GtrQf72C{{5~(xbiq&6IaSqMU)E!k7 zLI*sNnk0HL9wnK$)Zz(g6R=*WV5xKlKDLaH>kJ@3(%yBb!-9*08B(AJRO|$I`vRFz zb3Ysw7zZ^DM0&N<^#>R3{TPGWY!4h;kjZ}=OWUYo-3Yu6ptv3}Rt%wn;5&9t{^(Sw z&k&^*ZLvrUH6C*Q>eF|&DKWSf(z+(HhU&?3TL0Z?#^QOkcO)rlDqTMbXHT+6a(FZY zl_jkT*qak%m&zHg2A3w3!Q61Z>S(=z+~RUAn08SCtP$zT4N@*Dm1U@GAeJW`Bi)v3 zFx+Nqt=LhKVHu^;vF5n&I+H?l;gB`XWA{1a8!6s*b<5j=2TqkJj`yYx;v|XsY@Glx z2f$K;9~z1=FUP}J1_wt^u=Wn%^20+Bgy%~5;cKVXDeU46U(MI43}HnNt3^x%d-o+* zNDquCqg5}l&)VZ7Mtc&(=^4|oV!+`sv1=7@JOz`h5tPuh{z&#A*1Mj`Wm-uCw2V1l zeHEuKKKC@{XzzG05-ztoX9n52)Hn zg!(yhPg!`amox%5Z#Q7w9^Q!}27f%`B}69iQz}*@)a~x4G>{i|ZX;H&ZO1+&3iG*qqF> zj?qv+#UZ|b{bn(^Zwy-$Z;Dl}(QSMnYfoc^afLm1D~|Op=|zxur7v>&Lw*ri6H}Vs zsARP#nLwYvchoMa?H0^#cl=g?-0JH+dz-uZ!4}GA2GpFu(j~o0Vr=n2So0&vcDArw zAn|-LH-A6blb({tFl5CJFhn1@^dX2?TdmM4@0>LI8Cj1cR3tV3ieT{|`x0q||BD+o z8QQ^#lIr!(IT6^*DZX6EhsU3%cm~Y1a&g0v_DxQ2Oa)T%0CC`02FYlNO7ojgv{$j> zBil0Ia3o7dXX*L}=pXNki(np_@sB8~{f||`#9ztXUo+o-(24tJv95oq8j}9!e@Op- z|Bu6eguwofo&(e0<;uUi5G?=e(k>qjR(hxBQxD42hUvxP24HQd{pf7RN|EF<~5i_6{2Lc{qd zH2YwpHnjd}^qLF?tm^w7fNW566pH~;9~%kiR*6DJkoK7)u)<>D9z;IcO5D~nfaoi# z>QEgA%Jj}#NK`Z?gzJ6x$f@=^#9$H$eLvX|)<3J{}#dO^FwYww>SnBnADmD=6S;Ib>cJmbN zVBQXDV;Jll>t)u6@HL5osYqTpUK0i+So?C~-_>{321bRZZu~uBsaGl8pSK-oRK`_i1g7F4i_n@Vr@#1mf7OgLKc?s5Ggr&(my^JxtQo))!Q{Jip z8VmcnbkDDzH9DMzMu~Er-IO{;=Gfl)_#Tddo~@M9|ZQsDK?V;(gzpwCC57q=r6|Bh#u@fz8lxDUatPk zSmYi)_Mc4n0(9>?Uw2-N{rcD-?kFg9&UD2>Z`u!UDeY;nn597l=Oq zKmOZp`sO1+c={iJ|L+6=_b@u>336%{|r>;V1u5?*}kl&6ugXkP5kzw#DnI&AeyBB2{>vt=RFRO#U(KJi&$h7c%E55G0KtsoY)SLL&XoPm%6#TTvcmo;=an^p zgSnqlZ$VpOhn@*@g8J>4*by`8&Z=-|6(gdr{ADe+f29t#t*@%)G^!5-HIyD+cL}oz zg=|UGMe#6Gxxy+{Nj@{Z0;sJVM6--lXuIsZ5bqaI3&28mR~u9dBgd&*d2OQJA^KRN zV60FV9j$=QXd`L->APoQ`fknpa{SY=4{J?qOsDo_8o$8bXsN>>pvfcYgbu%rO4eo; z7)Zk~!re%V6+U&3xxfY1Fe8`aI2v<@znBaZP*LpU;!@t)uhbc928v(9~>qv{_E%fBg5|3H=f6De)n zkm_0sbNkvvU))iUe7XkN+4ZsgI{Rsj{K82L{Hm}`v2g8(cm>!c)KPSk%UALw#ha(u zW!0AR&ZJTm#253+R|+Ik>LtyZ&6de+X4+ie9nU>dco>O(T<`YWAFVr1@Oo%dWxh`g z@Iil?lM#%128k7noIb+MMI3Qy36%dJKF7vNQMS&U-bYY%O+G~p@a~ZhR&`1guAG1Q z<_q%!~l;-R3E>%IBn+qeZ?%FD~ zW~s=KuaW4HHaK#~dB!K|+WbnQ;SC3sCkqN6z3&;zK&_O zuY7r6&uhZkX?@i0Di|v6)lq~4ASlL3gqR70w1u#HF#+JrM4>%&WWo89bPv~IO~U?@ zVqBEcCSjngV}UDRBPQw)G!L}t8`UTV#R{G(fN6rea2l10PVByxg>zVG1reF^Q_J)H z=2XxiKtiV_Dw^LKNcFG*l`mOIUare7sgUlHwNC;fAzC!y-ds5NsQwlw4HETDtru&a ze~;8f5wESlQ9SovfNs!Mkz0rv^w(seaUc_(lLwkDXG)SL#cM_-!{!EI{mAWY9;lTM zyh0DUvUCh$| zrk^8Wc6V7mvwfUfqGc2Aty4YrfIvv>*m`w?h_NAsE1_Viz&EoA2)5*u$#Wm8br*tm zp{lfnh+n2FN=yxHmy+}Dmu8(o>&5TkwZzvDwSfKZQZ&22Ys2S05s?q%!`QySb^Y$RE)~DNPhzL^b+f!YGDD=sJ`JVn z8&&3Iwi=nWwfJyGjX?g28>d;h%_6K1lxC z_aO(|D@fcJF8tWfft%jm=YhrDFp#sdusolZJTC?1RGZ-LyTy^2>lCZrPyDCl?ukWd zB(-uuOd|8XRdPtJE?1W-WEqmfNu#pJ zY%D3LWAA7C`OXe}_hB2T2$IcY?V{L(!Mrom@{)@ChB! zj>>78ERwM7APHAs^*RaqOTYwjO*5_J#rNv)?4d0KT@{9K)gf=atYK&Nvc_JBtw&-W z10X|ZY~Eeg_FX*$DB7ykuv7NPK6x;s7j`=_s;J(s2C-fN?WJ?GYs8VNuN|TBqtI(B z#>&<{#34FyPUj=tLp|Jv|G7diT3en33q$q8c^|b6J~ios4;;Tgq*BOY5lhg`yHUi8 zMY9ffDgM~cOk!(Ifn3%=i@qZd-mL?)S!39V&KhkQesKWrxqh4_avZI9vi`>PZjg>; z++#f4MAt852WLwvXzvl&1WT3|1n)?F)%0AN&3%LmahT8e&@|mx@7BKjU3wyfNPFTn zQGj4e7^Ie>&=helC@svrL`OHC#~A9kUO1G6(UONT_fNf3KZ+a=8e!1(V_>Ww7>LOt zOP>S3pq!DKM?s6@s0e}>8YTo7aj|t>??f{Si=lE{%c$^p_=c!q*Grr4(b-L>WO2{< zV~i4GE>w`nsV2k|q)nt>;?z!)B?)6V0O}beCCaP57Qu1*3EnTAL3cM6%Rt&@fT8h9 zy6)~MCR5T1n0u^eWR;d%LMIM2YR{rmf8J(~Q6T#6ExsLtG)awG(VOGuQ4Z&5Sy}{k z2M>$e731H|<*b%fnF?U5$nI_<*(CVZvTF<|d_Vc=LF*pWs2gg+oK8i7!)x0y*lYP< zLnr-pxjPftkiyxRE9W6TYeH!*=4YU7QKwWgmX`SMQ7)nEL=n90l7owM1=6zZ^kNT? zY?3;r5?js9aCcr}Rikih4npT4QMGGC|Ka`YABtCE*tm@`*Wz)EOurGszk%5 z32PhR_G%6@o4T+)N0-XXjB2r3#y;3jva6y)JUt0W+hd zy(liEy>H*6FBad4PGYK1Tae@SiB9_p#3~BDfb9z@&hUJeB&9ovt-^f%jd9i0gqw7uv=Uu*|&2T2sz3wZ(Cq% zd{&Y0+H*<7cLR}=%On6)@y}tR8kx^Qn2wUW1FaH6vp{c|UH2$M7chp1L8t?{ZVu<>n% z7{@E7;6hGo3t{deReY@B(qoqwnIU|4 z3P<7HDD|hjpH_^ZM!ifxElo)5Uy@nPQokv1+u^oS6UlB!x)^iA6q=hW-RbSgp>EeI zsIklKrFKCn*>WXq!z9m$z0hG^?+xb^9Yp zto5`Jqk4JJ@iXHtVI~&umatdORmLgT`F2UqX$FRdT2Z{0!!oz19v_vnlrcS3`^6vV zX!eR1CAA{$ZN?ypYi}E&`2#Z}p;Z>PNnePYIvxz8J350C1Kl3bB~lf&B7eVi`O0r6 z8aE$~s2##KzkYEj`bC@<2#U{V8(TW!l!Cb7R-3;g;Qw~~5Qp6X+#tLe3InixOs?p@ z@#w)9@-}qK+Rt+ohfquq6!VdPJwz5UPTH0Zm-@$cxlIbNzMN>~cVOaoU# zeo&xX5d7w@!O4oN-`B&t*gzBC_l>#eUIqd#)3%_%4|P|IPW4p;^Olce zRO%Rhy;S2#_xjSYh=Mv|@_kn794iwlv#v5f@2e(NP@9X@?ij*bok`)-9E5=`#*fo^ zP^2QYq(ui+iI5p8HnRE<9Ls7usHtI>t*UF&328`Mgp(h%lp8&&#Bn!cw3x&U|j4s+Cz0Z%*NUrh=Wrd{X3v%e` z%ZTpV?mb=y22N!q-#kMPb3MlsPn~Y;sr#)s8@4rpC>Ps&Xr_MojLrN0%N#PUF{JSW zIk&{!%0R4frk%0NDazWnAJ#%}xPfk+eYGnyQ=EtNF4>>U=lT{kxn`TztLwKr_mo;% zPOuSH$W0i33>DadO0!^>mX;>4WvsWAZb)4mK2IOx%p(qq(ekVqF*46wxyR0#=0T8z-N^8TA{~nrceJ;lka{zaArSz(?M=L;qN-WY!lN8nl#~PYqdtD;qbX-G4Z~ z=PKj1(V?tsq=iq0v>F5UIMjKF?wD5;udM?u(#6$an)7CYOlfMWy6=YbRm3fO+Eb3@ zA_{|3YR(TD3Qf1wfygN8$ zcH>`R!!_0i`rew7_6ADz_LG{M4(u3M*6NydWiE|1v&%DWF_KmuiD*vvB`mny4V5VM z=(0eUZlzxt=jG|vG#boV%du^}$s5+pb~eM622Der%wrbLwni$Fo-2V~8Y{WpEj8dY zcGY|7X-((GCu+6!FC_NC}EM=0k*7o5i$21-v)Bx{ZBh8>86bhjP-`T^ld^bs-TI{1-Xu1Tc@SEtWD zc3PvazKiS6WgtCtOckB@7!Kxlj&8b+!lL}XAQ1XIY3}dDBo##@7W**zU`O3&#-eWS zbfp(KZC;oRwj_|9$%j52#EOHDNp$`>Wpek%u34RySON97RaUMOQLa6z4@Taaxf^C2 zHv~5y>T}#aK-;%FP(NOncI^cTO{Ocw3mtK0Km}2m$1~uau{ffR8hYSLMe8`L}>Cv1Hn>DHv6R&k(9I^S~Zl6iXNmy#QO znp=Gi0X9LvVn62zS&@fA5iPPyO;prR!ekpTq#B4tk!M+u=5L!TIe~U7ps>BJHXwY3 zo%_=lr-WX(=5cuF@rn9@d_JPo5^&jh$b)8hY%>ZCC`asG^DCGm?x}poEgz2+yfW8z zz61$vBEw!bdx1WNPp|zaLtk#Wo@_OE{A&z#o`LSHAcUido7>6Xe zh~5Pg5(X$beCnOZ`Uv-n&g*52mgJimRgo5b>|4lII zDl1^|k)o^k_b)48bOPF>$8pJI#%_wz$L_sbLE*f!M7UlLpe6 z{fWB$iP$KUA{VNsJef<=zgVT5&e*xs%wr2KRT3}Rtm!X+wJ){L0Xh9!e4gc414h29 zqTpOn4mJARrzbz>jIr2WeoXvKuOwJ5K3bQX^caM6-*iFQ)XG9!hkpeSICX*_kV>EV z4rJlB%K)?=oQ0U=`~2V0+9O6G`c7_1^~EgR;5G#Au@BlOwsD7Ar$1eeCo#22v5+sI zwX=P=>1B>2trz3Wv>5fX`rRXNneTedJoq#={WgclfQB5@MnG{6M3L!3&z&dpOh}c& z_eSE6)29x)AE5M1XiePV<_}Q%215{{lr&`4#EH*xDla1Y48svEuhSp+!R=RKC)i4j zglTyfqUS^I`XwT(7tk2CA#$+&GPc(!d&{gF zoz?6+@XZ^$OOFG`rhlN5%TFFoZpV(Rn4sm;!j(KbiQr4d+Z$R}hwKhNx5|zeFf7sd zDif*)N_xhcvmFp@;-vGFFC>aZX0L(y8g7I%Q8Lw31Dd{PN+gNx2$XflRj%pYz{=IjfRi(jhJDHI{UX zP?F|TA=DmTn$PhTEm@%B#g|5evqJgE>E)f;d(lRwuCEzkLvNC*l48KT=z%4!`V3pS zKyeN96@hAo0B_YX%LB0L<)lNM9Q=q}*ASa&nMk&=Ej;T|0D_ARk3p1+xq-(5oD~_Y z;c2+VYKDYoa6+xj!F|3l^8QeauGnmQTFx;Y8j`K-W)4TcsNJYSI@ zJfE&lJl-60)n8twRmyc|UZ?!oPHg~hrfk7J7@pH%=}r1%Qg|~GQ@HvkO%Asd3U!$; z7CiVi8%Q4kf6t-;4!j7Z<4muN5RcFgap4mz+?;>~VFdP|SXECE{9s|{R$<)p+5VQg z%P%&ba@?+>xQSLfGe?8QHlwri9(DE#xDysTvdeC%TyOuHHaL^Hsbgb(`gHRlG5fbk z+kYz-(EqPloPvX$(TDKt|FC4UwsZVz6DHu_q8zX`!cu6IP&vee$SWaWFY~@EA$15lQ1~ep^=T-Py0c4`RDxyf8)yz652rsF$!9C(J_lJd@qR!j zVgui%{k3hs|H9$!=HM|~c_POaiwW^)Cfm_hbCaLtuBwuYZ(tNv(bLn>Fv2Q#`Gh>9 zBC`Up56@%B!`Gr!<-W;vund>|RT!CZ*NEJg@_gwa{lw(#MPFVWaEu_;|FJ9jK+(sVr6THnlm-#0nSRHuWrb+D!D4X$(sZlXyJG1Fv+Og} ztphCHmPAL&mPl(uf>i!A)Beo*jV5kRnkbT3upQ$aUB;OgW4(6x_YAWCw*AT zPyx3pqp-MDg2URN0(+_m3u9)Q%@48jf#3H(vHe;3#b{Vsh65>+Sd&>UNFPGiU*EHo zsA?Ntz=Z=F0YR5Lxa&ywBFzTGYm>sOYJAxbTVPKp!&Fbm-wzAe^fDepvadKsaqcQe z9<*-06kug<8qnLv7Fw?6d6IWIO*wK0Z)Dp+++Tb`L`7SBu7BqLW0I!wP4(sH$0RN8 zABkvxYs>y=d-?}IQ%Xr*?r+w{zYTc;l&t=xGXonC6*Vj@Ac#EW3mzA=8A=?&V>*mh zg7MG}FAbfSD)$BMA@ya`2!enBkN-Ir@4NsTC?&pZv47UavA^ngw|AdO!2fx4lg#hi z74Zg!RRj$dSA*~Xd;w@`OIo&3mka)(e<)n}QZPPnwz@%DXYCJZnKLMU08dl8o`L=` z97&+(d6ZVPZ>1E)v@QcW2MabmZxiOwXc-Uz_4E&(8P>8}`CbO&jTtQDOVEeP3wkNq zQ$*Qs@+-JQH<;sa2A4ivu^w~?o7~1~9rZCrCv=>Wuc&JWQ&R!DDvo)JVI?M8c9+#L z@q6F$sg2de&64cIk_neHj$yb-4HA^f#j=GY+(YVq20bz9BTcaZtE4k2IRk}y2!r9< zTH%ZcuE}jTO(}qkG7-RMxlTsS#@-e@LC$InWRdf}C6(!14HwQe*eZ(Zfr$~?1VnR* znD0cQm`@BjA2d1gX$YM;USM{!0)I@l68gN=xjNW6ag!({JSv>&68E|yh^XiT9O%-^ z_Y_k3dD*RYA4wj4^RFou$`0((&qCD>B;k3|yM#AQ_!XkeckW_6B(bWTCi)nT#`4AO zwK}EKE^R+Wtb+g>EBWA7N98~>RySR#Eru2zC@>viy&5_A;f*K8e7DQ=`(ZtJh9F%E z7og}9e92lt1GQ+A>QiC#!K`%jQ|<8+rjvKb&KDCKMLi){FP|GT?R2ib+w;c03m+7^ z1Et7sxSEcQ(w72l$Rv&UJ$Xza+{!PZZ%)T0jJ-`tp@z# zt(N}ljr|wB^8ePD`BOzk*$!Y~pnOJ|wN0j(bNXE)r+{AcbcO~cq!GIVs@u9FZW`czTfnS?UPSN7g{(84E_IP=H z%<)NsmWfn4!R**`i1BOBMXA|!y~_SwiD?VaN4L&wC@$kz2wgE|Ax)1C~3LR z2FzA~TS2Xu+B|Bl>q$?{P|wAL-hp#iTGqH0-DI<6fy;x1oDp$};qW__l-Yz3w&wO| zjPatkVVSxZFOX-@9?yJ^?kU?Jj!FdmkiM<|81mydH8{i|^D0S?^h(ywe z)|m&uRvUcx-Hd}evj?M)H*PnwMm(HI^85jk(i%xF+$Vd*LVcv_&n_y@PtA7;eeWXw zGx)rx*PZ}z@O&Zi=cuxNCPG|t7fH@RDN61w&lOrc6x-v!JVQNMs75wEa0TB_)j@^-r0iGS~mGsE9-!k@f<77fFvjYTc08Y4?a=X+1 z+@pPDvKlKWe$n1>Yz7?V5J&nYL})wr$(CZF|}_r)}G|ZQGnScHa-~7hil& z?ET}4s#^7{B34%BTAAm0n6GKwET(3BLk^*s1im4MqcdjVYz}}SRWBH|D1w2Vt*6}v zCsL?Kx@(#)Q_U|LRcOFCy7drsl}qk1?U1)3tI!fItFF*!Z1|E5@=8$y7&7#!5KK?} z_qM}tqV+wi}D#*3eY4VpeVVn7S$4bB!)6! zzntCtDYA_ImDv4ddltO%gc>|-UUFs4d1WGWU#j2NgLamXI$#`9|I3z58z#TUCGU;i zwcx2Bq+DDpA=_lK?qv?JyunJk^v+{ebKeCCbiqNe_Rc}$+I|=nKkQ~EN+%RKgq1mA z5FLq!a>E z_|Nao<#;z}*OUF} z?M`Pizom-PcH1-@g@{q*PNaQcG3UM1JV+R-g@9+N=Oj%0Wj*7x)J}lCXBfEUOc!Nrs za)3nU_ysE}VwK-xC3xokUTkjrRx-_({{7i*JRB#5Il;&PF1wcYU~r&sRre zA&D8;X-3nlPn)LO$L=|s-JhSAu0M>udi7ZTGW`qr7RMx9&e#W*|7G-NXE69zHC7Hq z_#*HNzxBvkamfwO>sLGWE@(9AdZROlOUmvmV%L383IVYE9A7I|O7=#Wk&S8)b@lNI zrVY}?q7m7o z^47J2!pRPZiWWJv!Hc-c)Dce^dxzm?7+dOk&m8CvJ(i?-^9JQ05+Vm<(P3OQu zUHE<6wiz?OIF0lo^v_Nn>5%UG1yBrFnvUB9Q2CNi;gdk!#8CO9C3dS@zw5)FQjd-q zuPzQ<%99K;B-;h0Dj_f*b!TiX7^;vGYDVS8ut^(2pkt5Xd?9*Y!8F-2aOy~nKTf`52 z^Omb)0Z>#70h-N7`16%BJ%eunH$42Ka=z9{h1ElEi(lR8yFA{YauhR5vxm$WUop|B zaq_Ab^Z@C^v6V1cm z&~7mO7&1gky_Fe0N!X4kV0Gq@$Mi(Y!CTg2RxsqzE4NRtq68b@DTv^vMHim`guJ!$ zGu7XkPHN%VCW=!*h*nw7FCL z;e+I~Hy?OMVg=Sr3-dZu<^rFbkIskq$Cy4;KgB1z3Rz~nCu z$u1?&n}RSLa$kv2mWA~}XT{0vRZxmqi7p;)N3uAYqcOFn$~4QOz6|2Fi4cLT;!ggbDKJc~&=eR#Ct*+~AUq zEIr&6`lP(Sj_3Ee_WM?OrNka#14EWF7r)3a@tlz14BL~{dr*#SzV>k3S=WGEM6D0r zU3U8zVsJxo0aANi-w2sx5qv3^bR{6D*yz$Ew%3)hN+o7#WA1*4RSE&gh% zrseBV0~6ERwr_spy*yuSJig$p_U-wU_jIx0tFoy{d&3X=Kbjh zMe?(2<;p!?g?-0QRI|uL z3A`Ymy)4PBM1))Z!ewS&iRG!awa6shLR+#xwq2u5wcI?*%eSz^S#b`haqP~_Ik9Kq zIDOTmJy-Ri3f-m88er;u>B#2FK9vveiMclsbVx4~@Fz(uLK`oT=OTrI&M!zHm%Q!6 z3%+L!eAs8bcu~+IFraGjP(%Qr_~jc{LR*~FBYZH%)TpbtXk_G3Z$W}uxD}i7O4XW^ zPu$EO%PO4Oo#o9?07Q~K%r}6SwBE4l3to^fTZ)_<)b;$8X@9UYzmx(~1ey$!k=eKXmuUl6J zLJ8H+@iomews|N@UYsQ5SMZ--<8$rFAIk!D=pxw%1Tgb0W2i9-N+zk~=PHxIf_JW7 zIGgt&jL2yV=A*A1;6ulNTX_k_1^1WDGFWgEdi83keK%17qCwkoU2)+{bgVmRRnMh` zaDBL!RT7ML*Kjl-C6}rvMb3cQiRJ1NYCNo+ z`=eZ3>YLgm#l#Vjb`(P8h3WpdCX8_x5A5k{L}5(z?c^EZPvXd(U7c>oy5VqCs_?!j zw51Cuj&*iIE$zE--?d~;9~pRw&UNv`j$)7-3hXCp;>9|+E)N&8GRjmueBKaI_WJxh zy_)SE1iB@nAT*s#|z z2zF-l`6oTYeS~~0_ubNj9}eyNkTl%AX8WoLkdfjlFmP$&(i-r3WYc;u5?0nfDRmT> zKv;cvIb2)#Cr&+ClNf+h48k);`EbDaYc2rG=cr*b1xQ`I9?CHfNeNZoI!JZ&u~Wsq zxIIlisAdE_vPB%NH-mcJ7y+rHm<53V5@A?*1qyKzfm9vu@ofEItpI)}_0Nc-y#2NT z!3(Nrh%Uo+G_^GqXne+p_mVO}Unm>_Wh4=H6i!8Yp+=PrR*UtM(8~ZnM9?xjJ5zgZ zr<+dJJAR9OL1_FA5qe&2UM858S^Q4*lI^tN`<{>nxQ9A_StXES=3CU{6)7W&>ux8b zy_>3DqXTp0O{slV1H0@6@FeQiG;Ynrw#=~7Vg^IRC}@Pa@e-vQ`1yboJv>8FjyU!? z`^7oY9nieGF-Tm7IqTiOZ@a!+GPY+;%_d;}zUF2s=9#GSfz+9N405NHv(#{PxLOc-b5W2k) zBxU`1=ia!RH`9*E*^89_GtpqvIZ0ER5obGV=XU1qMMPmFZ6%evIxXs|2jQ3UK^>%I zM$n*J4q15lZ*GX|W{Gl>f#Vl)``8O`olSpt&vCa8*{J2w^nuq)H{Xkg=}Yd-h1*yK zsQg9cQ}7_wT2f226&&bArqfBP2hBc<;QJ-+pYWQOsCYBIrjef=yVb>r$It!2`GDOf zH_|tgL!aPRzB@o)b#_i_5kIkRKQTj-<-#!F3{34Pc|Lx2jGON82Ci<~&ANzNqoCMF z{t4EHx%#2T2rsbI_2V8+1eznF2f#y6KMyeV+q>iYcSrdC5QpH>)>cdsm$tMfVk~ zm2o9Em*DdX&Uax>Q|N=tV30;%l(r_n=9j)45xkGU78y%+Ki~x?;F$KpGkV9%CB!); z+cz>JRwkdNFg(UP$bssU)(+Ntw-Otd0iovo<_|IL`%Z3v=Rln*_KKq2pvmkTCy?ZA zjNBWTs463y^XP`LgfrWacRR7*rpGVakip#G7|ayRXC~=_X6s(W4!6pUFWA5Gq&*_a z`s{b<_w+w_o&UQ-MEG}}{QsB_I@nrS{reObrFi)-lj`-Vh?#IO3**6XYhjdk0qS@% zi2(I)z3ASwIcH+nycbl1)HLjZHkHJH$S2Mrvl7m~@ay55z!}ja$aWk=h3w(VmNjM3 zRv5o+lDD~cvY|L}h{;o0Ar@`&#^lM2-3JAx_)%1F+*g!D!(59D+=F);x~I1tXu-H0 z6-?3gfBm`()h|=H{6(aSU7efH=Sx7KhS*j$YpoCI$USn})D=U9`## zcYvd+-5Ni*=)&gr<|baaGmPp89rj}cM~ejwYR!!)B4`74CW{|e8I;W+u0s`%)h~LV zDq04KM3}Zd&xJ_m0Zk7^`O*Vp$Osmj)FYDz9k_Ymubh595|eTLJYk~?w5ln&2SJMF ze)Ex%7{HhzjE~$*w=xZeNbv*5+$VuLW3Q90;P!#OM0-0}bc@?$1%kC$u4rI-%Ev;c zId~YJ)B}{W2ZTE!KdvD#lgZ4vb25y4vq?Ft#4aHM?#Z6_x|es;%-6&#$fB&kX9{Dz zhAD5+-AVeQ@MDh_xwilX*!u>`Cu^XT7=BKAY8grf|k>4IR96JJj5?gWZ&`UIDl z{DaCH#y=IvWC&By@!zE&(|;@liT>W=`gZ33bOZeN)!+Y6CPt}fs;;jfZ;94fNwT2o z)A|44kEUlKVFQ`78zpLlW&=SCm^7B6VXX6(Lc$y!VZdiJH!Zr9y7oR5{NlrXi#3tX zwO&epY~?MxOtwM*0J_nrD{y>V-EMw*d0cv#_WgPR{2{ap3lvnLvJC0OUn&RCIsPX+ zmusQXpp)+p)Mn*;I=u?z2y&XAVzGlg|FR3%224@TxTk}Va)a1=A^-j#g|M^wTqkv! zq{C1xWdc+8bYeY8Ghv`Mm#*CHpDqB=he@q}9y)UTocs;HDljv9?stClLQ|~(c{vas zTJ%?m{Jx&0$1Qe;1`<5RY$!xd;wia4DD5ISo-~5aoF4s&ZDSV9g2Ocv*4r_I99U%z zROLnFj#=d1?sudL_Dq!!UST7ukOZ-rOiAzE-X*AP=+n(t9+FoIb3zPP!$PVsGgG1$ zE#h((GD;iI>?FJyy__TwcLsApsMnoe5^kUJ1Cyy2WGDT{2c4K+1N`!D6^sSU1Ixg0 zj%R+QKqPzfT9y+a+_v~~048 zt@YwYDQ)e3R8S!KVs~p_>2aQUr$9baI!ulb%c#6&crZw~0C@nT32?)fuaG4^rWCK5 zK8!(PR(uNtG}0MlF79oy`GOy+yC^@W(btds){7?2L=sk$Tqk6#sxgc7Mf}M5s2iCK zyi4W@sB@Qi=@0+(rWPK1a%}|T5?OEqnQ?hJ*(!3G`lEQnWy@Sv*L!sm(un zO{P1O2bf%&DuKb0-^b{vnemfmLD^Dz!J!QRR{Q3MO6@M%G?LWR&S2$eTC>=TGI4+an_qcIheDaTTD=8ja08E$qXaWU!+{_50iqOho)1+Ip_{DMDNqsZ)T=sIx_JulGuGWms0!JmQ@n#9JXuVIgxU^CIw{9@U50# z-Kz2sE#``RT)I4d+^VF-DUuT-jr0%XGLo>{rwk7N!k1d(XLg9}<#$0$w9B{CqZ~CSnf~IFJnqlP4FwM z0AA3%{i$_F(MHG@7J#Ddf|4eaLeVBlt0m`1g!^~NROl9@t*gpmbRC^JG1|HroALln zeoL&iDGx3n{hIThlCbZ+ZjrkyC|mHY7N%NSWE@M3>f#Lu@pzebNr=W_we@+Fq;bK! zomwQ9Y90*2} zPY3%0k9Iyr4=&?BrbCQ{BUdk_fNmSKoJir}2H83HacrDPhaGzR+L~!sll9)X2vF}4 z*GX5?oE!=_2OGrkSEorvxM@g)oAMfrZx>CvS z!ojKLS-8^|vSk)o013dsA{|H0Kbh*76;TV7r#li!0W$94T-qMhhffxwD$!{%jfV<@ zuQG6anqxz2?b}YSY>v@%a1aKG^w$apRYx&phb5ipH?Z9~^vSx^oKj;F*f^0O2U@f} zmb*(2Ng`8`!e3v2$g9JRx{~Uvs~M;VCRbhAacL{ziknhpL_GQz;kw(74$!V6@abK) zr|k42QTMxx;fHNvS0E0zMkfz_vWY8==I#hnvfAbj~E;J@b;$v0+Bj1eZ-D*D<-^XSzx8BqCgfj?!*9y(em7Omke}U79#DPdn~* zZjDemCtnYy;#h35-1ckbSl0ys;l^3B_FUD6eHp!I>J@N>aEli3#oP60hT+=^xS%C&Z4ya36yBuJivBlhMUNOMl zajIeNH-bU6I3H4f@nh}YR3f5z0oZl@Tpsz^&;fEUIw7b!pND&01k8HJ?bO*sB0L_& zPPW^P+Llq|+vf{%opDln40>Ds^?@E>yK8Yp{csG!f7asjqo3{7wZ$J68~&ZsFHG=x zw!3dBfETAHYmya?QTG!%3{b2qSjmRS3+&XFFT}q)&vFRhn(y<+a&NF{VFn#gJ)EC3 z)-B5pKBT1zRz?jBCd!vA{xXqx29cocu6#6SQ1K~ZY>s9uFeJk}x7F7nwiP7_3?39$ zB*D!J{fHc5{4dKJC?rS(S$XLZ14M)b`5_@C8G|_f(7Qzbc)td#g+TeER-Xff{uR07`#*1?WKk zr_LO~-|6|^jBF8eE93v7WB>DtrjP^N4O89yi`JY8L!1Xm-NA@6OeD@wvaWK3-HRAM zKHk}jq-mdR>PXKj#+aDH3cK|&X!DQ@1r=z14>4H5rK=|;o zL*_Ij_w2@z@;!!N$%$n$P-Xk4^%fxba(rpigNVJiP8M7_qa~$Ua-)R_x3_w!uyIZt zxfJ9Avvgqqfw^FeaAP#g)07A@kNOyTPZN^!QI(_4Vkpao9NokcUj*g zLfH?@^alpfjbJ|V1u%FZ*pmBO=3N;hSoCXB+XFVgT>^oPISQqcU*PVB42PH!$!xwe#@*X2F*Bt~gZ;l{INbES-K2_v7GxKJWY}E1I>4HkvC8bRCZw3c zL@ge{VT*{+rh|>%+W9lK{UYzmx+<=Tpza9pZE<)pBG=9M`F_Q;2X+z!!zMIRVKy3z ztVV~9@Kz7>VQ;WsOGV4f5d#|*0@&lj6?P>%h=5i@$V>w!nKyJV;#|yWO?_fNPslRt ze;Zm2`9~wpuoEZL^^F3|khARbL3}UU=M|eUoJJxq04xw?*q3}S%N2uLD2$SWQxZYQ ziGP1I|9-fvyqd|?Rb~SHgINykygRGcApp=;@7XCsgI9sO1Orm}45pt5n5Oo%6*ubw z(5WsC0Kq<)vksERAymlphjna%=lU-6=k!ICNyzZd+uOaz102Np7;5WGIBqFOX4F|c zp#wVg9t+|#AQTLE1bcMgbwr&TuXnn3w?1^n;yQ)(z&0^=q7XW8N{q?UG&CeDZ;k+7 zc}>{yvI)lNro>`w{dwuP?ixZqSmpwCyEMJ;fGY4JX>SVPZ0Nt#4qiHk+GF zT%T%D^-=)tTJv>?s#hKp@1?6ZX@PJAqisdr)E+d%%bb%E7mpmA9Z!Ab4XH7hq3UNT9eVdLcpO18h z=ZUG|mg44f;5eQt+`L^_eL!EFTr+CAF6zE5SYm2t9^D|4N&xQ~VVbx4);p(=Dwj3-5xi%icR{37-srgDW_)lb>CF8wXW*jm4Tux{!@83p zh0Qv`$b_|ktyC$a9g&+k#?Ws>kdu|CVryQA*%nL8jaB3ltuX7V9@9KVBP;Ml)cdsa zX5??g6&GrHiV~Op`}q~0aqej+2R({u)3oA-FqtXa54YMZth8=xa7B+~6k#-{%E~4i zw(s;)D}!<5p&j@W@-w4@VSMAs7hMte*O#Zi6m!IlC*y|EE~&Aj z1j(=zU#qS(*}S@Q049;p8jfs1eZl3m-IqWh@*k^5AdJzgZjECqakrDAmvQXNOzgc`781+M`~KSmx;*3^(X zNX_05z@Bu4YfBD2DWhhcKxoEj6F7?O5Iqe5<=P+-;k-RHv~0bAfmrGBsA?4YAFa$n zsJFQ#GyZ-bC^~3XIjkjA;kC>2x<1O9FnUxZ8w+1B+QRTEB-judfL`mCEc`Y99jbK9 z4uYoG$_P>z9LE(BeafbU`9Fh-pd~EjX?=@1bJXxF!R?a`-J*VdxPZjLto4RcS$47q>W7Y&M=f|6cD$#xWUK@(4AoRu+*A~`+kRknRxsZ= zxP~C2*4*?21ancADX<}qla_u`qmXd<)3p@P6_$3iAiKsX9Ppq{ zOV2K?ws=fsO|?K?dt+=I7wzpUUb!u0NgQubl}wi+N|SII4<+0ft4Lq#({P2zbscE* zC3J)yB(YQy)dS5BD|R50l6%icM^ahrStAX>pDm$SBN>~*v24>cqr_8^Os4iMq{p#F zuvrhK4PvdP7_-kZ^36K929wN*fy(`A0_gf3jivwO;G=6M z8UxFitjmAu6zUu$c#B4PyOu%d9Pb?4`;yfc0gNDsW|+l+am|<~S8f3Gr=_<@2*j1h z(*;Chj#NOA`@?@K&?AcI&GXfAi8RKVbA!?BW(1^EX`2O)5`=^GLR;6PW0UG4Ck|PA^PxAYvsNjopc*r3 zea3777)Y0l8$&{*dBx;GB&)G%VQrO3gO}%`;*=@^LYa(R!U|{M8g5O1*!9;OX(l3J zMt>oT{t)v4c#3)uBYoczvrMxeR$Aktt|C>lI{Q((w zm-V(EJ*s7yMVu8)mQ@1iOlG3pM5Ez_kf)$&DN!ou%#q`$yh4n+yhYQnfz0*nz9^zXO|9pO}&&?bo>!Mv94Hd&2Q>a> zqa1Hiz!(~&o=eJD;?Ygn=cg`k!$p{KPkk4A~UDR9IOF z9uo7C&0{z$%iyWC&q1gX+9N#}FncrV=q|RgOp>O^m>n;W#n&4+(o+^)o*D8!2B?Y_ zcP=iZ(WVY0pls>#@eE{PDvc| zS#X76W(_4e21-ddKNTqEQD9}QL$OUPK)R2ePy)-p;IClTHVXF)_$$3QM%A1iPxT+vU%VEV%XLFBR!=YeN{)q97OwJkEA;X%agS7(D!aCe%-N>p&#LS_w z$TJ;!(hbk5Aw|r7wZJhEy|bvO>~WryMTKolmS#v19N5hr4DfZR=~d5p_@JollG+G# z)L~aA0WwP=%`O0|uc#cMuW2q}y>7PG^E^N#tBM4x4*CL8^~{h_U!DH+-MVhFiC0UW zTD;0ksd#-a9*N2}hle?}{3eLnMBXgY3I@|8uW*=>d^?p`|A`}`B;g_u+L>Q7i{?q5MClwW)mL?JS%qY&NO;Ql-(=M z9Zze~f#nFlo%}p)bIqdvHEfofSyVC)WD{sCo+{;htPMjWp1GYw8FsZA2)JHOGZ)Pz zD$3?oB{_40B_{>jCC9x%rfSwWNMDdhAumIS-UyFtKA@lPaaR46tlCP?Bda7Vp?6F;PdI^n%!Cv1`Ol_5Ve%cjbQkGZp?eM2~2JBECBmR^BGS7eC-vRDdp{ z4a{6oMBD9&*XSDfEa_LU2ldu}XW^k)y%Ah6uY5qUt}3j6EyMltGQ9N z5fo+b`WU>ycJqjOZcE-dKYI3H@_HNH^1u-G{k@CvfNDCU{X?CP%mS(oU9fUq4!7VX zcP|fitMueoc!bZhj4*FQ_=%`a#|hz}Fm9*ftmyaZQ&u=nUHA#<_bO>b#OE9*@`sWA z?C_P+Lqd5d&+VSDYD5H1yW;HPUf%Gvl9My?>~x3QEaITB>}Kgn*j}FO_Vy4?V<8|E;XK_A02&m?^fyLE5i81%07Bqyn*6Bdya%> znmI#$hMc@Nn)>1Agk2T=dc3pd08GqO{Yu8&>D&8hGx#Um|*zMu6o0$grKxTeU-Eg;j!@9N_(a?u9 z%V+wf0?{t6uj~0>Z`TGWm5bn+8C*@dJI?Am!R9*M*BnhF^6i}Il}XnT$mU7fSaO{Q z=l+Di$wEce^0N#2i%%))p#N(GIr73YWN7%!Jb3ra*h|YFccQQR!Waw9WS2B}SlZ$M z;$0v+Sof{E-&XYF4(okbbl2CZ$4QCq>1%#UngIe}TkT+8Pys-Tgol?8=8gYMDd+d& zhaghEtitd!(nSa??QW~;{wjbceCyEam%|*gSB$s&tlL)6U|8pG*IC{OfPHtA!9kL` zojp}@2?_gb zv2*Z%xh2{66P)~?k^XGv6txeVj%ei&?|Vnn{LCpXdVx%2>lRymmZSsH1qs$Vg<@Ak z>FPsn$-=ztDB`2c?A55^!<5=XsUXx=>S0^DFDVO3$%`>LfVdcsElN%E}W&+6FWL4#{>v5BtjEl!lo{zDONRF&n>v*eGX{iXo$pS9HL@@ix_8 z)hF<7(tNgwSTs%>^p5eT-Kk@%#L)Th{GBKqiG6K=zu&+iBw*0#b$<@kW`DqegXXM$ zR8!o!pv)2R5#v8g&OK;HTXId*UErM{)XG2M8&PGmO`_i=IJ(zf&~_bn)&+}w14t3h z8zl1#|IiF1nn8T0YBO~z!%mvgPq}pFiCgUqD|A#ZY#8yt|Bc$9VT^Em* zdSIT|=g{cW6&%(A{2W2G*?&5|+K?cSDZb6C zz28y=kZwpIn^1Jbg|D@liF^E%I`}oTi|E+frs}A9R!F(s>@}TE@Y_4hEd`g@70X3; zNIp^sE^b!8q3Uk#;3*-@z9F0|2E~nygW)K3T(Cs-zWCgmAX zh@@gX@Te4pY`^Xz`6C&RW3oe=$_%b(%MliQ6uus6g`x*?ZPeq%p0^?GkqnqR%- z1i1PG2>b7W+HYu%!sj)H;K(>rOPNv>f3TINF3qH;P#DKjD02dhKtLCCi=Y)E2h%hp zB#I*>BE};Dp_+B%Ea4XUxf5bYPaknaoyXxmb) z`~mSz`@PR7Uw;n$DbPD&dXW?$=Co{>tRf}kjeTThVv^k7kz5FJqCQ=(#7D5XOhrC0 zzFzbp8UNecAO8(N&_zaTcy>CaOlQsTrtp)}P4>453T{bSe+u>RMm><@DZr6}7dy|u zlVpmXk6OQ%8l|*K)a?)f7A#fi;SzAi5^(hQ4q`Dpn{$MSF+&mo3(um`JjhbGq)b6X zR~IO-@)uhLxHpCr8e<{=-dniaEB}T+$P6ss1_UH-!|5grF7KV2=cbWq9j;iV&J3mX zfpsqqWvqE9>%afQRyu4b;m;BBj~^=E;?@7sbNp}kjK|-%#{ag1ru`3g(0@D9|KB@E z6FC^0+n76<>sy(782{yP{}-kB-vE+I!Ha?QT| zGUYnW{xsd~`-0p<1O?J~22?eQ8Z?L=uNghR(=oUiZ|KR-sAX;Q8qIl?79#u@=su6o zbOQau#6bpDW7eq7Xjtd-?B{SR1aAE<>{w1R)hVfqgO1D zvd|@Y#)g_ANrR+%ar<$(;0zpy0rGnQ{?`~?tOGz!J5=jf*y;pbCW8j`xh4X!Hn`*R za$xe{_iba++fY?5Xjjcc_>C6>b_A5vaNS78-ExM%?E+Pms<-M2=OCZj`i8Py^2&kW z3sYpC?jV?s0a_JUMm5kRhU;=6YJY2JN#yRSW)yU55fTwv;^AUz)R^*hO~!W){9?{`>i(_M8N!?hH*Mk^$$^-;qem7&o{z{~i%TA3 zk=7h4MFFWj=(Md2CD2#A(CWvEaEKu^16R@*gtLL!SP_vyWuwwPFVemxJLx*C-gOM7 z%(&umMk5)oA_%gL9i4n5aGSR~Jx1i=&Dn88d4gP`XuMQUIba|zUESk3~JJVxNvK8g4yjI;Ch4OcK?c@W? z0p=X)iIMI6*GDi#ypY?nm0D80~oVnH;OQt0H86c`|?xc8`Ydl#x`AgNpJ$HKD!;1>_X?fu(1eBH|SS0ZWM}G%|__iq;I7+ z2HKT7km=~1E+>fuGK)8TNS^T#0qsuM!{SsS)$skm?%uP=+!UbLl^+euqOuAM?`yCx zjBqhuq4ThPU!q3TvBev(BTX?&r=n=B_i)EPJ(kv5Hv3~Inhzt5{JyGdXu4CML18Rg zohX&KO}Dd82PpmS9%T&#k3gd`9bUii9{UtYIx@kCvV`3LvSdZ zO13koef(S*Z`i8qC0x*Sd?caVFujOzvp?Oi#?^AY5g^O4O9o+-2%bP?c>AcwE*@=H zH>OiZcm{?74eby?Tf*dIQ7FRTY#HA_%}Z{Pnr_@nY@u}L)>Vy=V-`L@|LTCf0Po)r zeGjKFg8%pbvj07T|6em$-&{&-TN_$s8%rBoR~uoszXtKmZEgP90@IFLx0&aI3o*5- z)*dbzbs(d*ASC9S2Pie>E8LSCg@}y^Q3=aXk@vT-W|?KV#X!jDGqeu|LV)cN#9!LQ z3sPasXTSKKx9s|QzJvbZ)eR|UV%h$>J!$Wn(FI6MZ>h-ioR^z;3ogjT(&1qdTFsh}yVU4Ew z!MFg(_V+E3*NQo=64s>-pQVVNIS{;x4sLJ>OUNJ&m*)T-S?2b*-flE;^A zZvX_q2e3KJF&>BFZ>knB=HKC@eZ-BjN}@kivP-P2vjJl@b8;Wdw^))`j|a;wYC_XF zqQgDx^D8Fgja65J(WyRx{@My`e6Ut+-zY57?|G&F|FmZC|CH8L&8>_K^&N~9oxXW% z){bKOHbz$eUC(ItkA;7TvxHrYZJhos;wV%CwVmff`lJy*Cuwo2F$HXi1u`28DMFF} z6(k2Pq&5IMwa`^mJjR-0L3pPyffu_0e3lP&O~F%v6stArPS}3#YQ7p@j>-9%*#{k> zMJ<&`Y4X8GcYqtTn+_+PTO@!^3{6;Qw=WkZL<7qvINgHzJQqWXHW$2yAZSAI9lN{; zW5>Pma&fq0gBkNs-{xHT%w2Uu<2IaH?v6i>vvJ*MRCTCT0$B4fwLJXgqWg`>R1r=q z(s3d6=Uq*+!O=O3sUKEOLXw10*h6%#-1`_o;vB>EO&9G*Zt(EH@|3H>ijhNTwyZ#= z(VI}i7fIjIX+gD?#)HnsIZ8b>ZCn#?2Eq+7QU z(GFe~)KV<~=q>?so2?uKA8{tLzTeg2r*>!JMtjsq&dZ!Z&o$qm4?+0GQ+%d{Z<`U- zikPoYuo4o~gL*m&ncy-Xh{nDgk{%fC`axrMwrL0!R6~kWJ6@A-Wh#X$a#QdUHfOR3 zd`6dZXY?^n0eyD_nIGUoc75Y3UoHFk=mei6N-(%g$dFmCmO!xU?4g3G*~0g(sjVEF zYLUI!CGLTKMm87!(a&8wrBY$Z71N|!m@5$)@)1lVP03fYXU&!_sKsogvoHio!?NBq z*GP9;j6uV3l{&RO=g;4`3=*e8>TjG%Fh#2fNjyX-7A{t45`9-WcE;9KE0IcNFtPN} zzGIU|YAIi8vwt-yzzl+S_K_JV&mFdFROEw9G5_gPjnOYjC@0e zu+Y8di&#r1j5;@DRZ>H}klrfrf!bDsVq&gT_rRbf8jg}d!_n3N`UfImB9=y4w=Cf$ z!{4W^ewB6QZY&e@0j1#&@|_9MFCD|-oh#=H(zKVLz^D1b=8@&4T`xZJ;1nYPrqc{U zv4XeR1e_ji2&WG0lON*PCFKp#3A_{1P7KCO`^FB7MTkW697?AHoClKE_9FKZ9|pd0 zQ=H65KeGqf`#FEzxA48v8+wl(l*p2g3GnWdyn##YrH^t6ADd6aDmNuPvHlgVmK2T= zxZmQb?f)pA!v8y5|4FVBcC)i}aMG}pH8F8Cc5?iek9)GU>YfPF@HG(-RKOTT)pLuy z5+88{?4O^F!a~5eKb?(xjjjFs;Ki7N+G4g#FCldwgn9XRm1Io=vfJWTHnX$GM$^tG zY_GnyMkfwFZ;x%e0r|kH;8$XjLNwr-3(7)!usvN1OTGzRaukcgl{gisL1lh^@)XE!+nH9|=QF05+s63G6`NQD~5&{dr*H3T)6*j8NUnYo zPY^o42PX;Z=%?1!kN;j|1ElQIBZla~Plb&^oD8$;Y8ZDAzvmZ5!X!?G zeTOLdtEfgteu8xjCe>Vl-{5pX^82AGVp1jxg-NwlfE<`B75-jf2uSTh?SPgpTj~@q z)EY8(pT3ou5&E;SZZV^&Ja(TVFTA8ws6m_<%F24&zOKwMLE3H9@wykHpHxzXpX#B2 z44EkGtXmQ$y=}9-pG5lE^$Rz&8!Vt20;v}==j)UV8!lLSk1?djzj!zzuObc)t7a2v zoqxuKm(L~c%iat|tcONF0jy$D3l2(8L4Lf$ju?qXNC!6RWS^f)9~WjMNYG3CJ*xvS zI^8E49w;6BOmxYTt8S~VB#TT{AvZhmjIGQthX&afeR%-jD_5aJ;u`|28FnVpGH9`N z`3FE9)Vuj!p>kI? z{+sK0TrguDjxAir7cY{u%7_9YQKnP%b;Tca_FuT-iO=J6E>b`@S(05pr0JEZ;OReo z3fwUbtEE0WNtWQ9jfD-^2JyoiVQQE2r_14FCDyNS zo*W{Ps;PxTvyVA+Ct(<}(VVDHy4GjEgVbRS0nA5C=_h9+5UCDW6uMW8L;s>>d=rnZ zXFr*m8P3pf;b&j53ubY(TBysBb|-9TvZm(A$t^`4w<1v*4YHX1b2X~x(zNSPzq#-| zz055z{h1Lwbd2N~mG|J*93dV>s1Yq=wl2SbxU&+;A*1+doGf{1Fm)(1NsrGY-M%7D z+`d=07}N(O4VlJMMscA?uEY1<&{(xq*NRc*P>#Ei6$@Id9-b1P^iv0f`zo`bqF z0wZ0?nI>mM+U!K^E!NV49U?*z&5KG=R9SAGX}e%@OyFOi6p|rKFPXSihL^D;Ix*}d zy6ax!TiRPYZlH%axM}6Z_4^6+j}L1`7;r2xeq>B%m3zg4-=bq@PDV4MrF;ifZ<#eq z+7>2M{benA1n3`mz$sZ{cGP57#Pa>IjGM;sOZLWKINA1ASM1i{yz$HHLXlIZ&YDaY z7+Gy_#K4FwQpj|t1Af#_Myqzui8Fqkio z6l3`)m0%C->#6qYP;u;iWFui3W+Pg5yscR10^4*t%Rq>(#++2WK#TwyLvwvVU{y~B zt)DNLK(M8`egGg}$3rZ^nvbexuu_`=_o&!s1F*o@9Ba%cmm_|Pooj4edydyNZ+ZTBOS9H&j_vw@3 z(M!ysOUPjvn@%0O7kNQve2JC!8ItmuBIC8C_{x&^*^=@Z{eDlf_JOZah%n${=^&U6WLA@u_iLdWkY)) zM(r1Z)=so(YD))XpKml!ThJnQQ&{2A_ZeRD}8qzJ=;jyGX}WjBfrq3I8$oVsC(R;BcW?y%k_jJ;n=hoAD+H;}7R?TSlZ z!yQs-=wmnWA!LdE=3Q{e@J&}OD9cQpYC_T~oYkz12@Wl3$rkM$B)fybLP8hOz$ZqR z_(S-a{T5#?*d3QDhi#(}CmMG_C#75#I3}6q#XF-Aj)x!Y!zt~i7>;wGk1>`^=N`!z zX=4o7Mux9C07Qa5be4-CJMW^Hs@fExPn;l`rGw!fW+u93H-9VRQ~spEwX0JWxYN+nlIlOl(^ddt%#mGU3G5Z#hb z_qM>mAEY@M+ydU7J3Eem;wECzsXxQlBeqW`nJ6U%+wSmN42JIr)+Ln7Y-{0^k;NJ#&qcU2 zve#t+_pn~-euV8hF1skqj?j?@1UyFV+g7G*v~g0}0lZOnVhFS>o9A-JuH*vy=uRf>#=);aZUpoZ72n4anglAONd1poC2(%Vsedi=RNL zUCS@Uk~*L=4k)wB!OKVA5T|w zQkGd4nV+^t%+&$pJ*kPkNSIn1>lSIJY^~mbmuytego|*kO?pU5M^*WB> zOA4aa|E)`bDg*=2F>>e zW2Xyv=}Q`6BjymhTV}r8bq+}z-fo)s2T6Q97}{-g8Jx0KEJuFho*m+%lI*u$${6ok z{)`hn_76@P+qq8kp`r*@4^G1l3SB9EpP@>?f$iK54#2AMx2B&0eKGC`7y|spNO`sp zmgWtB7g=efi9(N!)EfRLl_b(?>NhD&t zMq^ulivQRrX#e9s#rkzZ_O)X3YhlW(y4{l`{gY+0&$u$*d2+1h$W+@;W4&wr_MabL zp=$P-mF7E3miHQ=Z~MudeLZIP6qWMI-0U+b?YBT1ON+GB9!HPMB$T1$zWc%!KwB;f zSUm;7{k&B3lm}r@agsfmjA8_p12Sp9#?q8oy%TZfux!)@TEynCjl5F|Z!mNWi~DZ* zez}$a@be#>8ER4j?A+2nc35_QcL+qO#}u*(scaH!Wu>lCbRB&EIKQBvLtkh4fgb}s zOSu1ao^kzsp8a#BUD)2%!RbSH^JAFzU+Os~rXNGUKcD?`I+LZW{fE2xZy+5FZ6pLL zD$-@$M-4_J?flxDPih?@7U_Gm=91Ri>sGu1_vt;@zqcQQfRtRq=Mw6%Aj-V?%P3j; zvYETtVf*DElbk>A*s+AU9NTjtaxkBJ?}7vuQ8??nv@zZsq@CcH zAtXe)LZdV1^;F?rS$9%~HSxL<4nX-GNpHwD|6SV^2o<8-r*&Fc_)F5;lOOeG=?bbJ zwjvslVpfo7+p-d@|G=IgA+&R;a7DU~;)5*@FCLuBS^YeJAFr3#9-MfN5c>+#^zf`~ z3DN36SGLVP^D{#Hn?a@jNsb7{+lW3kfB&}lDx*%=R0HV6Q5JuUZ{py6wE3p+5}&|W zKa4KLXR#yRS(t)djD$enmSDr{uKeB<<(eU?RK;}O-hQIu7>AoV{pl!UmY`{w>v|7A zs#Osu^6MgcO8iX2JThyYnnZ!|*@7K}^Y}o4n2wcxzGsv_g1X%Ixz_fhNX!3E8m0E% zLH$>e*0TPuNdC})`X^52tYGT&=h%_-|JbUs{_7t4m(MEOUp2l;)%Fia&5w>=(=9_P zH%y7Ts-=l2pm=&#OHy%vAV0vMFA}w4)5gxgupZ~3NbNnmtNVL$({WS`t)J5%!B3BS z@&KanqB)|p5xaBF2hJ(Z!|cP~Hy0Q5AW_CCf@7Qk)?pBHp8@6<7gbNiKXvZ_c8b$mq0_?Sd0e`YG@)QR!ZH&CSwdR z?Je}*l^=jyxLXyp`vO%4kk8NL@Mqm!0^drDgR-DHDl#RtiUk091IA~EiN-{ZWF0kV zS9%Rx*xoFKf1fIHZLKWxBK@8%jk}1V&!J<5EB;%Ol<62BT3}{7b1`jlkWx1Bbq85~ z*sjg+D{kMq<)!K+TVJZkELr?!u*z>Xsp=_4VOv{;iu6!uX}zLhk@*YC`?VaZG>u{$4#^U0!WctE!hU+s_ie! zvtD_nv$go8g-Wnn2Z^U{fm@!Osr(sM>_~&(=&0a?)llSHe<=`r5R;DWic#8C3~TNN ztq?PxYXz_Ud5qx+R_`57L~)2QZwI?t+3)Aq>-HJYLsq8j{t1RX?Wzb*p5p3lwuxaS zH#=J9@6Ero`XB>ygtP{RT7=`3?(`f*1yNy66CA{1sw5w<&~kQ}m68XMl?8U4>?%0P zAmCKy33T;3DI%Dw$1FwWuFzv>%e6|{1>48+3Efem+coCHAgNZ)JZ_3JA9otLP=&<` z)>9;C-|*&)mpRb#tY$@(GBkV})ZBG?;37j5dU1_fCM9#i6Xpql6DUPre3guj&4MkT znMygG*AD!)l`_X@5nUr!!t=xnG>dVH&UPLhH>#fErc12Vr_^3u5<5lV!@8$uPMi!h zzrk+|wy>K2}w)VV`A z%Wj4ryP{l=zjVj7Qj~vlsDd<=mO+ZFAynx+%D07cU);^6I9k75Y>?P&JeV^4g8C~C zv=lXD3x#=L4iW&G(yeNrH#n(vvQx=LQD!GN0EDYua7D8jes<2q!+}U z`2Z|6GO3u~12t-6Hwab1n^wf0U!P#vVuVD8A;A5?%kYKDAF(q`%;yiQuvLc*K8Sed&fm?13^hPyGc!$}e1vVUqGpm1FD0E#E^o zeD_JK;$FOp@BPmW0gedaJ&KRBs_K8DYV?1fSN})Z;6tCz(!|ikRN2(r_5&9ATdOWw z#ajL24BEgD0!9T*-%6IVElLP^L}^sPhC#q$18$LXjTF6^TVcQwuolPh3g7VjuJj)P zbjPPbIj@RzQ&}(#sH+Q=tgY_noX(Sc-Z{?iZx3y1k2g&N^O(f=$Mer9=?$oF-r_o5_Chl zQ&pjMcA?yE(b9^@(Zvp?aX53+wuCUWA*e2(NVr;=3>)j;vq{jlEeE0XE``<=ADg&AacSGx(w+XTVPgczVq3b~x4yjNc1`bf=CVOko`Tcww&TgU>w~)QEOVUNj{(5zQ|V>7 z>%29p2+%ae?QtzSxQc~RN0Ytb0$!5&b_)NbsGNSlnoN?9Pc|Uasg-U+$ds6@B>~FF zq!P5v+QB*xuA%juP3sn8z)Q36=V{-$rTmEhqTlU-)M&CzZg<=gum-sq1!bJJ^Lo|Y zh^C|$gnge>^!e#FMSoYM!8fQ8^?X~Km5gG05+jC*!Ex83!g7*eLNHx2N85?dr2rC( zJJkw)>-16Z&Y0=&TWMz-bj(3+Ygzx2`bDNSr=9>YEo|$KBKJM6<3cXi$-s_zi-HG? z92mR;A{N;a+V9G~Ab~ha~V84vRBlZHmt( zXAyEJ3SB1DcifwgtgAUn)Qx9*sCmLGkr>(>Rlo3#lrK!6?i~YuPYvL61l2pzaB<4J z6<7DTW%^UbjyCAMf?>fX`9_RI&hRwkn)UC4<}VOP%GN{0-rnY4_niN0B>#`GhO({voC2!f`LY^UBo2zAik=I~ zLq5x#yop61C5(xIi}TjS=mw=rv$VJLv_2VQPe0<|hdT3KtFAB6=V|gYUf#oF=SkUNl^4RIlcy&^G0 zDD7BkU#h_T`eVE?=R%8TFxqcB&X-C}=e!jr#Aft`j_H@egxHN6i|BBofE^ zdCtZQARv0t|2~YRCi=XEk0490wm!Sn!N4hVjfpcjKW=5E6g(Afphd zG|gMW9`_r_@+yB5P zN&kw&A`K`H?FBTxq2wwbL!_Q=tb$Bb}a1?+cO*vHVm@^JHkQ*BQkq9#0pT|^0 zmeQ8WQpaD>rL>O%lauArF^MT!;u0GgiZae&HoRw(?Vh(@Na=YIao2LTd>=jpA35LC z9{Aq6hO!CstrpXCXtlHu%hn1QlPvv&dCHE(%=MKyGL@p|(^VwGsbubXWwqA=*E(dR zgM6i0NfCdxV7#>;4m^vHg(a6)m?QZ_Ru=5F07AZm4-5X2 zf(T_3$7?>gXFtivV@`}E`5G3q=E{rA_eU~=3oSazx|5X@SC7TMd6}8}eApLBdo&bw zhKZZ1rg%FwhI9p-xhp$Wd(}4M@(iu5ApB%AcG2O6z|{2~={vHAhJu{3lkX{cM5-mt-iZuB$q#uE608k8tIYik<| zG^AKz@a&2sCyFZ4oQ#81?B}#~&gJ%w6GM&^dII7dLQ@bUp@t0=DQAc&5argSx3+B& zV7a?z=*Gah(^~MnvLi&ol&L3SY_dYBqREkz1X^`mh^03dGE=@*zLcMytt|zd7JF6E zU9SMn*Nzz+S)%yeGU&NX9wTH_?1TF+UMi0%E==V5+0Yp`d0ko&KW)OrK1Y08thFf$ z*(X_KbtO;|hPQw@lT+Tmr(xV4 z=K(dhS+>JvniRZrTe@)^mf@C0zC|ncIhWUnl-({6M+C)iLUw|DhL15`q{@giTqK{j z{0N2}k0G3^U*aUYc|2isL8L<&*S^mf0 zbB&=+IC(1iBZ3yxeXp?Atc2*^NGi646fEUEwU^YwSz#+B)ph%ghrWa~seuesIPPYd zWxFbC*@@}Oab2QG^Pu4Hh8r2GQqMQb3Q_9XIHFVv0cj<8TS;+aUM6JxyaJ)P423N6 z{DwKl!s`OX0{0v*4!nhhY%aW=qlf5K5a(l#c-<`2&o78KJ!Mcj6F3fY|sbR}{ zw5IcmD)yCb)3h@6L!)*j>dX_osTVhHUhs3}WwzEOOZma~rG{j9%t}?*$_ey21q0;t zg5Q^B&gj~r^*>JfKKp$>S{xn4%{81?>mDgFNV& zw=M!-?#gQmgEnDv??IUhWcEkvTGFMckkRmO*d85~IB?5>MUh%9q0~zA+(&vl=nq6G z+KS7^y2Haiq^vUI7mte{roDA2WN_qBg2FSM0ecBs2^0)zQWDORE5562d!J4 zf&pka#_cx{~M0eXecQxF{=IvrpHg!)IGD3zEpb8t|R5Gr4d+{4m_ce2_r( z=WC4S($Ff8m_^w2>|AF$N>{!mc;BW4+E@fr*sZ!@zP#VR(qRVq?W%o80>EdI-O6S&L0ctgR01=H9hc z8i*OgD*Ny{V-~46V3J(o?75C{ZfT3XBEMQ$Kx>7^kJf%w==)VS?KdPkHSIoQBCZ0K z%#bg1S_#v9s6g8Xvmh?_a;_^r}zY1@r1FD>hO&v=G`RP2psH zA|rFtHaMHF%QSBe93Ppf$q3_)F%LizuY?H@I>GJxB#3zu8gMH^-k=Q zKPx&lr%&V;Vj6dQ*Di5Wp8$%zAx3@_ zCtJEDILzkc(K)V!8ZU8Dt0yEaNFa~1%*c3=PXObTTW!W+7$~~(UP0_j>utZYXEut; z%|MU7tVooe=h*c2K}g52D8(|Eixpm!v9lsI7K-boQw3&o7zI21PEp_hQ-g)nt_FO0 zlv@K`(yWk7lA2I#wR7LTfh!h$=&|Pjl7Od(n_jqY$CU+f*0mT&B2hJ7vV4uaHTI4g z*g;DD)Ify?tII99@o{1Y7^Wz*C&HdnHKiejI+JahQey^TY(6xT+(D#ok7Fj0^_CTe zx+A>N_%5Z{VdHK`eRc5Cm6u11#r5aw#S%h*&H=6R2|K9xBA}nOVop&J;ed#!dNT5`P2MDXlt{adN?-sMamL4N)YAS!`H~Q z)`H+Q5&xf~XiPf__X?A^a6I$rRX}J4l6k!AqUJa~CCKdPH+=%p{QBg{%*g% zssOvKH==1Y!j1NpKCYp$ufg16kIHiHTSEq!P<&u@A|IhxV*||A_vCqXNxvnHt2HO5 zh}RSG%QfmJ+p+64NN)_-t*aFE%)G2c(Bw2yVZfQ@rvZr?y5TxIkW<~oa;TDuXFBTz zoR61kN0lw_t8#YjicKcng^Vgy`;6+7iM`TIzpHYB8nqwJZGXJHwH=rH8Q?D1w|FeH zSE{NPS9ql~p5z^uZ?eXmoD5XpH>v$_Zz~V2QsrmSUFh2@#c$VGNZUCh1KZJA)( zwBimGc5cb!EC$@LvPxRs_v{js#(`|&SXyo|{Y#n95)ZjHSpT=%vro@@Recw=h|{rF zS$?s7LDf4HeR021XvN4lDZi2z!k&q;(h5J_?D~>L7?6EfUR|CtYzmwnYpqIfgQzkS za)A(j4?>eSI6zpHd?4E5lWC_C_9pE&L@6f`n37VLIn%$|44~-#6LPTMvNH z3w4fNc*s2pb!6`ic^ncG@#$HegkfU({kfH)FXrY~c_VyXdjV5lJ!FfD#;&3s2uM!; z{W?Qm$})a#R_ju02S=YN$QqT8hB!Y8mRG;_}&OTui| zbPS%`-5~t68vPeCx`{X+g69-;EkWC97yfDM61fvXuy=Wo6>j87(4nkiF=39)0h|B` z$WE;z?%AzZT8AY1VqoDMblGz9w2nU%#9p^J|M;joi;t65jz z!?h=udJJd|gL_~Feq??>#Hn$Eq}`$GL{2QWfbDDpdTk`FB-;V~zk0vMIxPDG5q~xt zgj|Q?_WBy}$Be8H>kYsxPTT3n8F@I&GUuGFm?u5G^8bl+=tD^#eCVOdd^AS?T1WVA zO;oYJyQaU8j{l2u{E2NS7&^Ip5QILcLH`%!;P^*}ud?Pe$AH8uHCG4&oCXO2EdpXo zfX>6aO9ae1r3tjMxd6onR8fMynA)VR@pm_OgM2_m!)f0X0Ph8){bz69y=Y=z>W$C)rIh$-smok_WgG1?EY)rV+L0stsFP6Ys`} zj}glWkrA*^!M67>Jfwc-JMgm-dSr_`bS&GqqW%#xO}hXW)XuY*SF9Ya{@yy~{o|1GIuy`=^)rWJ+I?2jDN?k-*iZS){&AMeicE5x>3DkUlwSH6J+RJ^h|#! z+(gQ+phyauu%$1}YA(*L?)$xn);8F^D_IwC=g?Xj#dr(HX75smuu}LS+pTX(^6dSnj-CP<^|5G6rtwZfe>Rfzs+-R{sj_^ zd)W#XHr232Pjfxj1l#|i$ugMp$$KVcEj-3E_j}Q7JXgX{IIoS7@T4CVoQ`V)O^ ze5q5mR3lMs=MGm?F`A*?flET^2AC>5{hhtuaXElENx`={_?%iPwyOu=sbhdy1-M<1 z^8UuLSxSfVO#cl!T`9tYXjhUrEZS+ydPMJor`r=h#Pdb~D}vnz3S_XOBY$Ha74vf= zADPJ{%OS#sX>T+$%+|@fM9{hRO-e1iPa!4~f>N8D*pqMB3;m!O3X z2{Ncoh5H#lp)vd+wcNLx3XJV$E$^lQp9H6Mmt}IRmTZUnk6fdigC=|Ym{2(Xhqugs=Ni-Bx#s-g8)NE3FKuXMZTJse)_)Kh z{>^LVAEq)P|A5Fp7z<<6N_n8vVJg2MG*8fI^^lEf36)5%9NX|#Lrja$Lvp)Dpu%Cr zQ0^xB2~2s~tDu*M`F@ROjo! zHb2lDm_~}?t+Qngh>zx{10+xt`!TuVN1a0^7_@lM$)=_!PhC%0h4sr@CYxLA95olD zt8GWE_;d}9=+c#FyfRD;zhBmou~=~I=r8m!IQ@b^&MaorEN|;3`~IwYS28=%He8PJ z!0QR`E73n4$qe{WMNEvD%M_ii6r(}z)7aW?yW~>lh&`Fl4HV2Y#^jdR*YwoIwBvjG z9`5CzXb;|pE%z*-7&R>rv~t`;@nOz$6g%JKkj&dE6bdbd)lj#prwDNz@l}IZmBT}J zPOgE@kOoLfH#fbJe#{_)%byCiS0p8@%?b4Qf^e0ePpr-v9%hCD2t#GqNW^a3^?-S_ z!hMGOaiqBz&s65+?Y4&G+Vulj#wt~SL#(cAl)Ngl2*hchy=J<=_I?JbM&@;4Xt3Y)`9t^*tn${WKDM_MMggpV z_tP-3tpaO`<%h>-%jbk(q>SZA3_*v9jPv6jH96njV&K|a z9%0JdT@dg_TZG<4NDwSd0=-#H80GvvW89Lj`k>H$dkd(hAO5l4*Z7D3Qa`hIOY056 zdOx2XvB4zhB=n^n#C=&V&W~Z^bT`IpF`U=Gw2M`{wUoO)(u3eX(&N83Z3fNh$HbD zn^dW5+R)fgxCtQ%e$qq=9>L?p(07QdV_W_VyKmcwJqA~xxui!K|6PiDSwT{jkB~sL z$Xm@|Y==mdQ1xuw<>B_{?eWv$K#o6{M(-48h+3*x=Q?yED#sr1O;+C97fUzNSKbp8 zWvmm{CC=xILwVgY@^DoMFI%)Jsz*6V0SENmkHNOD7O)Kjp6#(6>98-2B>F$=N`!x8 zoYc)EOKS5~Z>U;!H0Ps^>|RHcSlj#SzbWap*%|f<;wQu7Km<5;c9Amti~+BJ8z=1% zXaYTxUW!bBcN+tW)-i+vxpt(`TFJOckyEiZ$*y8<4DhocpDK zZOdtUn+1c7-odL7AMOHB+G+$B%T_#^j2;n%XBA7%xnH@lhdpL+Eo&h>_KTO&FSLkU6g#k32$QJ4v z2CGhNzHe7@1ao=3ohdw^EjvJ>EoR;8LxM06aT5xpc&8dJ&a(;->k$ zHt3ORy=m+ZoMJXSp8$HS%J1MdD5LPM`r9@|o;(O}RKv3KoWph9HsN#8X(A^@TN$X4 zZ5(Nem)_|Sq~l**YDQ_(dR8TS0opqT>F_o-&;aW5j9BqE@Y9U%hm5f&Pl^*#>~h3Y z@@z`B9D>dZl({37<7D1~G}E4NPjBu%mKN7P8Dv|=PVK=yowVU7S>nZJ6B0LKkz%i_ z-J8g6%yz<-1-;3p{R&|rsj$bN7mDo7wjj(KfBPYBVrM~Hz-lBz12b&&fg*H#>-nCU z6Y_GqyBgfw4YtoPMUVZXSJV$=oQsoZu74(-WKH?Ds4a>G>i19E7sQU6_7!>NM&L7V z9a_WYZ#ZVe?46AhWg6VwS0Xxe{$fNtdj0{5sVgdJz9~|Z%R6VEzW58K5h*vG{%8Ar z>22O;xp?z1Ta8^hy4f~aMSJ*y?1H3%74yi-DnYm?U<2;2f3 zlt$TPEWx(Z9hTp|*PvMSX3-n&$PuWHO}dwPZ3uVaR9dR>?H1cLUXU6N-m6I*c7ZQ2 zHmmBd12TAO>fNaAF^(8HEz25JfjmbuH}eG9x{Qa9Jz2{eTYgeb z$$JyCNy*6(#NpV{kt#bA)T?o*ic#&U9}lg~=T&sSU%LF?pomZfw~S0i)o~hyJry~V z7_o=d5#>!->6hsCwe*tapH1ErxRMxl#}*>;)vRx#(+jB=Mz!!rIsz3_$`b`^?e9dW zvn6(b<(Z3pIE2y$^WJI|=-8o5PwLFTnQ2y?PU5VE*z)ow7}&-9N_>3XfE!u35%zK& z`%y2JX;m2Ba|#)y1yF`#O;ze^tpXpyCALDa&+}pDb;K3l%Nr9eqmiOZLxovRj1=j2 z8SA27>>>MzAhDpkf!|8t9e808uoT$^5gfu6)a4M;ZWoY3$Dc=oz7>%Zs9A?(`fSFX zQj3jy_ziSHIF}(^i4x{sb-Z(ig732i^y(Td3#vUZIBlM6g$**CbeVB}sbKvXw+wcJ zdtLAqCb}T^z+x0*XFB-Y@MG-F<0CeCClN3YTahE7Clo1sQ-5_Jqemy8xt~`KtA1K2 z23rUl(m=L=>O?t_a6IQ}W!|c*tfE?tls*<*tXTqjp9WTkXqLR=^4((Jq)dRRV#g1( zQ8j*{#OntO$#;H;vL?aK(eYD(1#_#KJt zbKa&E1Wmc})evyBrPA~exT|8ghPN^gpR{eMfaDNQVya1U+TbqC>A#q2Fxt|1c>`lF z7i+oc2F~)}_3YMVvW4m8j%8rji?E(#H_Sz#FtO_857^Z)d<+a4nE_gjUro^Q@`<}M z78`{|oR6TX$d@^RD5Gq0szt36dCI;80M|0cp9-oenSu+})5R^tkpaa!0if=OB-`Ab zPB&IWv|HXZx)M9eWgkvloE*uBsw7@Ti~(X6x$NY(*uHXVT8Et>-p^X^U1b$cY0H7b z{*YuHUSrkhm+CanoX-%9Zi3GT>X@#FBWOA5Qkd}Ht(JUS$T8#tL?4|>h|8j}b&kh# z;8Q&*cSwtCLk|(zjYDih>Cs&V^i9>)BMr@k8tdg0k=o1C)`OO;y_wBcC#$8~zs)xF zM3VsrJ17;Xz8zy3Ert|Y)J%7<#Q8dI$;j8|x8QgtVgH;;U@^sIw6#geg2~q0cGie<$%G(K)Z7$Jsj@`l+nVs?Xqpl1@tUKOK>TbH5dW|3!??zKilp!KqWkW=?{dqvn z4!2u@#WfaIIU?Neu-F6#&;xG^^KXx zhQ31(KAeai3^@ATmTs~0n7R2NiptL;9buU%9rkmo-n#;t87aYfGzxEHYyoK%UBBQ4 z$aR8C{HpUA+&7~UYr@fOjXunLjk4sS+|476Iq7uo7{t@rkYSI+)MYMQwI|xQSzs7EqCM0L5(ZicC2Q0CB7v3fb?_AwF+k&qi(@X81^w*%J z*VwoM0^NT>p2iF*o6bSWZ+1vT^ zl@y8RwL??M&FgC`6BcLvA5<2vS5Z$|=x0XGVUZ}R8K3#7h$7T;Ess`HY56wHKJtB0 zj~HEf+)fhyku@m?LV!yQAd5~ z!=V?wZ3I+sU+m{lUX%yC5zeuetpQaN;7;l1#d{)9bq5$Sx%? zZ}@eFr2Hd5v$kKl%rf1RJ_4N=ajcj4(VL#JLj1XcaX6rEeNd>s5z++liLU*>elAM8 zvkK1w6;@A^pqZbce!*OtIYZ zRIWa%O>%a#V4$n9=#??*IOa$KssVaW3GOUt9EqxAZE|_qs!=z{V99Py3()aCkmc7%uFt(-=K#6VUb$0i2n;`lK=!5@` z9&{Ju%cbAS>-l26l+^e#>h)->%++w(hj@zQ^l3)1<+ga6Ez+_quaT|YW_D;*UC^HQ zS~r)~DuBE(v_|~W#$aFkb`0_8^nMai@7SlwG|?8jh!rq0$<8nrykNgG1=!W^+v?+b zp7_i6(<0s@2m}@c_^y%mhDfk$?Zte7yU!cclMM(Ggigb4z0(PZ!hwQXh#< z_;XolXzJK)tubl>p;=KO7YTk`Kv^UXO14SPz)-+oIPf~IUCGSQ+VxC;FZsKK&koe3 zFqS+NWUVVSd`p4U;#Ih4QG*_LJnK6Z&go!i_-Uh#&&K29>eMLq z!hpQ|fwsD~yhvWGY#Q^b&X`o5CCR9lqN=p1>v4WTzAy7R7z1ZkP&hKVfWjO;6 zPjlzmqFF1)vwOj`l|^wdfM$-#%kp?WUG_kFpz{-3)r7n?T(qL;_szv*(>rKa)h)r0 zG5y=V6hmd5vIk~E3r9uODpqvu(qY_sY4fCYT;Cjg)AXh`-`mkGx>B<=#@ev9N_B-c zKlB4-5@SvB*r7@z$k4Qiy`p{0!N}g0+6x3?IPsV{kkaP+ zdz%fr%!Gl@7P)i0q+buh$u)Ab4uEQ6GoJ@Q&y02k;3>DfJQ*XX5aU8ws#@1zkAg@A zE~*HW5@R`H&pC{F=>+n>x^@Hvgaso?1gS*wwt>W2#@22UAtnBX^K$yM|JQtEXhlEM z=3_o$@;{k_{C`LBzmE?;XsyzwrVjr;CUG(~w{-rnrWSItw>EW>w9~TwC*3AmS@#dR z4X-dd#MY|p;&Bre!*E@B z6Pr$R4!Y(P+lrC1x@Ai4V7xy!yH_Q!I~GZNqK)lU$r6njOEq`6+_DEGfp;J*iV#{TwG za3k%Y&x>^Ho`+HT9?O`CYiUgLj)h`tVv&*w``gUY()byS2#ZdNkH0+lu%8vhDSC!l zzWv5_JUe98M*u!ABauz$gBtFBn`Mx#MDH^uPhd#Q?;LFN8`HgTv054Y;fPzt92F?K zerJt!S7Gk=trDzfKu@?fZVA|UL;bg< z5hKl0DQnA#c zF|d~LfI>k_IO|JNA>mzMv#y?n`Ci;k89HL;rcf{Sn1Kmm*jV}X8ye6>VGZ1U3~F_U zFOJ-^byTm{gCxjD-uZlEaaAaL`bK41r&v(I&uWL@HuZ&7D(Xx=RM2JUQ45@UD+-U-Ff26W}JY9G5=|_$sSE$ zN70E^xm^xehS2x2WI%kp42$r-$acWu&d7Cb1Q(kx3kWma7~5^(tA_jUTOcagIn34vft_ef$)Dx>_jN!J}-#8#?I zqdl>O!mf=ii(7p^2!-0hpIGb+0t~kqi+Ij_8@Aj}?MPlQFxhNfBX@2?we9Ulxefx( zTlaGgoQJa?ZTy-#fU{?K0A0#4x|+hjM-u(i zrL{gIr-R5INftHdFIGqIK0~Jv#p%nT?-7IYRCd^sC8CN+mRW zmG{dW$cR}rqm{{*+4nz?G^HS`0QZkFIR76r!v99nnE%e|zZEgXOkIpEOr4Zm?d%MV zY)t=^*nb$hv;2htged!e7`h|*!u^c-vJixVoJbIw?cojzF_05V3LS4yE+#MeTEDaw z6taolr2Y9#1sg4&+z*65(Q!39+zE2kr8&*>=6+&wd`f@I4@9FM46+_yWONUw_Yi== z-g58O;)N!8%;v*K;8lIWbW88u87K6e;Wqh;HegU|n)KCRILYCSwps{zm_6eZ@QbQ% zJ$r4{*_J-eaQafn2ycDYtKyxUV4rnvi_!FuR!9 zX2wpO%d8d;`2<@yr@iZ<%NUbRJyZ8#y3tqNZq2nSc%A$~2BR zixXWDA?q|2z6gu!;oDnAu@!EdNDP#49#G(sW)$v$k(f%Bev<;q(Y~Bm`XA;%-65jJ##PHrfGcv3Wbr~O>F#_RIVjPN|u~76K zlLCInI-G$$Sr!^6gTFx2hv&^3rsncC-K)kj%)uAoIuwBE=Jk15vRBmWGbr+kv7*o4 zAZ`}B2HOPp%8ru<%8namzlsGO=eZNr(vu%=4&trLDzMyG26p{fz<}#mxDg)(EcHJM z*njsfoPUS<{~szU^Irj9PpC8Jxcv@%wf)?^Xvo^Y8he<|7^yB9NVE=@x_dkaPrp{ zsRaaiC#gcZg&LNP5U{moSU@+!B~4rt0`SQNo9I+H0lS5wdfLQoI88+ zj2O3kH*BoMnQ+TS1zzJC+>S`F`vQUt@p?5O4_hiiVlkEYl|c$xeeRI`$8~d{T-}`y zkwq0<7=VsA1&ml${V~*HX&Z7!LL=k#!@XD zZiW~Ra`=r40l^qLg@U-7TUt1Knqpr?7a%5)j3g&O@=^9F12u$n2!)>uUOAbp_sb2G z-9D*(l5{_xrJzp#fZm!sE>znLc=RhNJmyp%bR$DcB0^SJK6Id^+Dwa<8~;$?U0y}E zMjR!BMhd!!Be5aMB3&Ohug!Cboq#!#bsj$_x?xL{eG6xKK6tFG{^arV`gxDD`r)S9!wt&7-v-BEl-g$AG@;l4ybAyY4NC+k)56-KzT ztm0>6{L(V|M4eQxXh6Ta)7wRCbdB>z{Zm_DX5H`vj%|SMf-7a|U_r71BJ%e9n z`n}izYnL#-s~gUq?7hO{9l3lZ*WpijXx!P)FG{Pwk?}DD+0PW94Eh|hE`b5uS^@=F zb4LozR;*t(9okQvLO#Hao-h_#uREM&33i!0!!}?T#B4NOAgxUuuXgo_AkTISeF2e) z14z6tpgz>!I(d4^Ml~&7cvWA?S)P0_-24c!Vxr{eS07H#H}E_5Nmh6tgd~7Fo%PrS zB%xy7BXJxSB@^yDCRzJ7n2+7*vr@mBcU7@7W52a(O>dzS?E00HWUHg7E*j6YNb`LE zLG;A@z@PJ55=_+V)BeQO&AclARj>Q?4<+)bTXOCTRnHWjU5FGCr-S^awR&drCKU3H zN~)ytp)#lNVU;*Mxb6L>V4&{uC+jXYlV8qqVv656m%njf>22=(j8{&p-l>sn$ke5F zsVs8NQvu>h)`Bm5#IH zxRU&ij1`X4hm|O-k6GOYn~JVF425w!WA8@fJd8Blw9KRp!9fVi0%ZP=VO*(3agU(0 zAQSRrCtCRaN2^$YK^Ni?4SXV`W%n2!B4~@R53Eo?)foHpwJqs^mUqJ>U_V?CDcK8T zS+^&X=;0Q9g!hekP!)Q+>hcJz{KKEv6HRvh&gwrHhkwT&y1%jK?=ag|*ehzUEGmjkUc|(aLT&h@SwV+PmuPO7nJO;bicjnv}@3(UJ@Zc`mX+1^()umJ?(BE11_yKq@Y87s!bDp z4A63k>Ta2dCY?cNRNA;^^&vY62T#rb5R$r(&3p=|sKEf=I?1TQ2dD2(Y|*GWK`cbC zHlcCh4OfCvbI4}AukJdpxv)V4HUot-fn4&NxX z@FhOC>F}|()O3rvT6~%{7>Bc$qDhVX1-s69H`GTky^s09NbT)c3rM3Zm2@r{>q8m} zWbo2_CNK5XGr$AGB{_fMu6%|c38G@yiWg9t-#WX${51^880P?Jx>M*SwOwv{9I1f(@5K z?7jf=+Rx8C$*Zgv^Et8Gy%vG5fSePmosSAg_fR6S0<}(?<~}5&=I=Dpq#>^|O(k|a zFCMS7$*sSMobk=Dn1lS2U5?xrY-NBSRrpBSO{U3E1?SfQCncGY{XgH-Eb_$p8`I6( zRIwkSe5=exHrn#*`T;LPi2eNjUa-7Np=&Zv61)6oIYU)muo>An;c>d9tQ5Fl5g<+m z%!HwQNm$YA?7g)v%tqh7^ZDLBar5F_?Yie4=RM!{%!USEjfYe*brk-xG8}%)C4S%j z8;nB`M9F#FNO9Q{;_DYcskNAqH;`t)C9DLvk%fpqA$E_k|+@t2UhM%ao;A)Gmn(Y*VL=`syU~SrQI}%$~a-(b04ec zXYfB+2Dffr=0=4KCMp&}NK-AQsh6E-_Hpbb!X$xL(qYSI{71$*db_1~SNMDH{V)%F z`{lPkEJUE$v*@fvmIIaM19~eeytL*WQoqziHG_v1}y>lX)l>fwgt_kYs*NGv?w%#z* zcf-zJh|*l2%BWUCls^B7U)y+eT07ey{ms~eA8T!%Rfsbm^FvgC%(+M0>{B(dYfDpZ zu9ygfB&tLqwsOy=@W*WS$ymBzXP4Pp8`}}yws~`?PKDZ5-2>mayj64dGapcoc4vdx zcXsEDueWsi55F+8GO|VfiXN@YDL#J#Xiaf8E`Lx zy?}5P55AQm(qK9SS8o*)jk&B|^2#PH{0>eIIAImdYEb~@JMUlW)NYJ0Q&G(XCKX1{ z@O=i5{({aGqa_VVTi{%7JIWcs$Q~f}3p4%&>A4y5?8}u!_e?^}s5#h~|F%pF!%~98 zsQD0jHOV>n`57|PPs|mhgevk>M@E1Wum%d*xg?H#?S7l&aGSXdhAF?n zcl*!)GW6#@y^>ettT?PB_{(v0&FNp2T32=JSd4p9Nimus$gHVFPM@U*CEuH>2DHa+9lmtQ*%m9jIuU&Vi9X7d28R6b$D6He7<-8NU3 z#1lpI9=evZ4aH2IQ)a}_BBg6bBMZ0j)YBxz)oGNBL3P0rCMB9YeH_ENmH}WvId!sl zPHMCuypBqfl0P!l(RYU5#Xz1#O(Wpj`6{Adkz7;+WCIZ^&J znURu`f-!KSMCuHjS(ueP=+T$tmatc(?LiIlpBh6sXFqQ#@k;@zpPRGy8Ly7`!40*N z%ZPcNH>L<`lP8omwG_2t5w{>_nY;y%`v5n74o!!!($M|PA5uztM_n3S-Qh4QTr0Wg zp6#i0`Qej#ffc?`wkm#TZi4Er{tZOdYzXIdwy!v&Py6@WD1s-^*xW>2Nrf-euWzyv zy*}~S-9%?CVMc=a%#1hmiOJ3(WKg_V)pD>;UER}0Kd0YeEA);ba za^!DMh{Q{1G-NO>b-R-5wv70XQ7~|xGHwc7A{GC&M0EZJpTC*Izl~x4M4CUt1Jn%u zBooOrt^Vh;|3n~b3sYwkV`WDZ6DIIh%s;V5$;J*<5ZM=#>gJLL^LfXsA{$r-d8KS$ePP_cwIN@H%*jX7~#u%u+hbupkaFQkDzvAGv@2l3Et~p&y$mS z@5f zagN^0EcvVOL8aU(hZY~N+3SWhYGO;2gOp6vZSk(r&Y!gLTdRC?H?N}^ECEj5tiJ>p zE0af*kb^00$%l!x*$bNMT@8mlMj&jdj?ZZ*?vj92|hj$ zg^@QSiB+gJFHARM77As%5OS{Zz!NGYJNJK(P!PHbP|&>q#b}zg1y`l0toD= z80y-;W<*!+{N*<|oF^RUsLM#gVXMQkjP%GvkY{mg+|>F#l*{WzRM=akmK(=_)CMT07WzF z^GTvwx^ZXS6=MGgl6DMfiR|{`u{JSoo_!#JSpIr1&LayPg~;344`0gd{Vje1vP{Lo z6b0f$Hq#rdKW_CtsRuqP@R3daR|e4SU)2l$?a2Pqgbw`I*5uC%UQMySRpGL@yDpr3^g_U9(!A8h>p-A4!Hi9}QS`iqusui~y-R3XMI>n52 zr>xIJKfnD#VY4)RIgMf7Sguyll#rhDJ(51$;JP16b1Kp6@_7N|e$@h)>WFlb(#&80 zo(VrS(h5sGnCwGOOY(a5!X^2U*td)V*=4z_Z;Zj7h@<<5z^745|E7=7y(w#1SiVF_ z9BNqx`W!$4`s)Mc<}9cQ-LB7fS5>h2f^58E4{X4oUNm3!QNDdPQU=^cOm`|866a*| zR(<_~;P==Tg$B&l`_*1C&v}3{_j#*wA}#$Tl?aJd-@RZ*ktH`=xw1LPX{Cv4l;QBTen?>0w`qM!D(w=@jpl!AI; zK93?L*uMiHs`MDZBL!G>46{xeibz;BH(Kg~d$#`yz@(R$xu5nUovbNp9N+<@=_(zG zhzK8YJ!VG7x8!e}$IzfucuUxio}G($zeqAS;{@q zG#_(Dgat=DZjd8rF`6s#NHd;lpFCImOjA!KMZ@Nl^_*BQo48{aqYR6+&XnGTDh*bw zfcCk}wtVZB>33;Y+UyD*Z)=`pimZ~!u!YK-%&Gx90Ae*fKFMhRAg}XM6)EKIo7?$Bf0|Om~I1eJ6=J`7qpBgl76*VbSj zN%=NN22d`Y^&JedZLYH8^X=M4u(5(vd2$m^$OUl3k+>X}75POP_|UQ5DOJXFN)SIK z<)!@w6wcb!9INmWF4z`kaqH%}OCHL6?~9hh_F(-@;KjeC56{gJOx%%D6PwF;JGGQ@XDwU-hv(c$QPh;U`q4 z%#>u}tXKh;?B*#j78z9{W)X7#hKE6%2AWM6!MdV(nEFWZn0 zV2+7hKB5lDd_OU^z2SA<+U5K90H+Vh2g9)tEb${+8aj|Lr+lw!dY1&{Krs0-k;R!k zL765^z{vb=Aq23HOn~;_ND;YrET1ZE+1UIiiY##+JP-1w&ne`034w^!G|#YT@J4B; zCBw*&2v?8X&HG%px=qZlhROI$QZK5Iq90%hinQ^C9c&A^k637}aSLQ}RqQc$Ev9q@!pyh&0qOJP?x>_t8Xm~19o?+|gYU5Sj;Y{2B+?#!%3J;^NS{bGT&BkBEE zD{a0@kC-6T06d@WQZAQ(G@aH-aHR>A<@eZMbj|$Ma#{52P?GxRy-8i9_3VW&g%G4_ zKt+X|yI6NCPKzR>)_vFZNSC}KJ_6ef+_EV2YjhE_$u8av#aV>XyK1HPrQdN?U@Cf9 z&8t-J1x3seiO*vXXDV2VF8|EYGD_>ZJ$wJ|oh77%EJyEx-qi`5!Z)D6lgZ__6GpjQO-9n3o$6j;EMMH}t;Khl9-BVnXXzFSJx@I?Lf*cku)^hb)Z_Rr2eYhis#06Pvq>>#L$A8WGm$yG`X);t(lZiU89n=GgGT#`^mr1 zpNULSoOqmZfn3@)fmn-YXK*ckjj!S7h}&8psz`>m13v!#diBc>#smgHAD!5^dJ`pi zoI-~xrSf-T5yD@*&c;d)aMB7BXQeujv!Nw%{Q#ljgO(kaC5sPl9sDNU-7WN&+&y6$ zEi$097s4?+95G>Knz`RGEynsEIapU^Ko&}woc{1*G0~Q3gW8~8X~T_f;TUIsgJEmX zQ;UUH^{SM$B*O>EVoNT6iJV1Q!#GcdDM9qh#- z4DuFp2-`-EKb&Zg%JVnR%N~h}*pSIf8>vj%H*Cx*74NJl(~!H9D~}fRi~|y+Au#)P z1a5ofhZlL@rpj%VlWMAF*~?T~!ha=_nu)LXaWfssEg3SFv&weSdLV~>WnDa}CZJyD zn!iu(778dXr38tEPB+G>9*d1wN(j4>)H(G2FfSS?Sm(T9NtX2KNva>pnSs0*l)@%P zQh$m*4Uy8O^X)%{wl%Sv{nAD_SD-v9 z7QD|9yu-kdX?Vq9DqVF}AFZz8ne~=DZyo8)Mg+WG%*txF4=)je%(QZYz&-9Cx2YE_ z!AU5%AyWRAhUnjCD>#4Orbgh^Kd{h^;U6T>KhAR!rj8Ph#@$*c>(W;V94KKl_Ne@? zRI3AIG{eR?FCTWqkf(DUvS2i=MPiH7WQvO|{RY2}kE-h48BjDbY4NZrwKMRjJQpxXeN}$dlv$V;rPE+U>`;*EUznWRWnt4Ob6Y7kC2Jp}hG>$m52x{YjUL3; zr`%dv(xjOrOWOQ9;e4ij2`WgM+)5YQ4&QPANwmIThf5MSir)`p)0>{E}^Kx%4D&x5FWX(fJ zCich$T%>bRlx*MJWTSE0qo$Re8xBv4LpN=cWb316Nt#WnW>jOiK>91E9v}D5?7$FN z_jfTLT2L3~h3*eE=*3cNMF3c4kjB}aEPDsLP#xHO!Kc> zm{|718P$YSSf#h-w9~)|s#uk(TDi=5KC?VlRG#@Mw$abO&#+YwL8P?qK{%zq78Y}O zd&7ZRq0RAK$>G~S`VxrG%GB6io>y;pqv^OMY?I>*J2b1Z%5>|=qhb6>Rt}EG@%6x; zjS&{&wC@u{;Cg%qV?()XB(tA>#~a+Z;hW@F(r$w;rCFTxJheN(?H4A%Eu#P2+oG8C zuP+svy`^msDwO4&;l+j`XptL<4CYnJv?m~(OwMVStk0F3GENEcQD=2HTL1uh>P$ei z3F0jPdxeQ5_B*#lsp;Spky2P2t9XF z)N_YN#s<=OlU}Sdr@q<5>u7jKE4qgxcb8R6icjZS4XId37>3{09luQ^y3=5Zv}$M4 zrJQbxvVSB{zO|RGn+9lXnUHgH`<45#d>Mjw2<^li3?BBOfu&4%cBDMK>9ob;H6Mh5 zlYxTr%*Yv+N+b3OJekO|s5pZ-zn^8rVN!SSy%X5A`)qFy8w31u zRvt0sPl{)~I|;)Ls5cSuybZL}sE0NlNn_SLW`uqn9@x|962uC`S=-;eeFNlQ&7Hiz z7UVI?d87;>I?*4m4mJzr6E$TTd1!2N52Syjl9e%7_T*VbpA~?&}`0$4~O8~Q(}4$ zeLEW%Qa!bEY+z3P4O5t(u3g!}Z~J8Ibo~%$!ps1t$R*o~rPLcx*p)6Yxhx~bi4SIz zkJOSLC7I}Gl1Su?+Vg3{t~W#ZmnTE|QsJ=rk4$YARe!n{vTaubGY7L1c+S~OD-qNcS zBCK|ea3@szKKNX-@tTKJQNmzVN?6hS4f6h(i%7@nbp#%;^(e=v`UFQ(goJ#puRi=h zKI9iT80vx9wu;`D<9cNVP-}B99(9EqCER*Ej1G1f*VWRx(8zcmT1h%s6H9mM61?WD ze)w_2`W7luOUj$NBK`vZ@d*`x!kxWU>Y0t%L4;~J}RPUEe}N;`+MI|LAV`v z9=YD^B-4>Ke;X=wM8|hL^1^0;m*l4;QRF-OF8#@#g5(=L+dNdOexNGUv!q;k!%l%u zi4VGx@G17*a3widt}u&moF^8Q1>XSCKg2eL7_;4x+;(R#h|%wtH~L;5U`yZJ@h4S` z7`qPJ6c>GC)Mo{0BgbLQ46MJD2G;^>iDhYR@G?mYr9}_~u;Ota*jDYY^!@-ryNzh^ z4zRdD>A&VKi2MzL{}(4#2Nx4Z4?$;VM+-w2XOsU2-MQj z91Z==W_z%n z;ltvz+3EKiA~zaGsA)I2lWqZDjZxYL5YxmrSq07R#?`l1e#SkcFo{hexlb?TIKE1( zO$_e;)~mt&UxJ+$18@nZ@7iZ$^DvnN)E!CWn74c9=o4>oAM(Ry*QlUw?^ z=3=P!kxSY{VvSZs@r?VuQ2lET8);NnAGgrer1Q$L0%?0@y2#2dM4{SlGCy@x+V?y$ zed2V)QKTYAP_N}3#J-bORmhAgE5HR3foqI3t)Jvvo_)3;#s?&}OHuItC~|=DO>(6g zOMadsc;(g=+j7?d*fLq;-h&-QQ`9cK;_Nj7XiZM+)pPPd{_s=Yp|BZE=KHno;#4=} zlLs?d>^3otS`A4Lg6*$ZgMgNMh`Q)%Vdh6j*9Knh!? zY;Ft(R0miyh$=6{lJrvzUDiRS5AATP**pi|-#*JWXsFFU1Klj~#8tZS7mL+C+IjX% zFola~LNBUxI3;Oh=WxeR7oG%tUVtSThhUSgQz9qo^Pdjo@e`;6yx^YA{J-{W)ccc83{N)YB1DMP zM64ChjEe0;qCv6h1ydo%_xF$E(7kT0r*H!He7CkI(2%R9>im1{A!{G0h4d-Q%2ld) z8*$5i#fx$H}Wj&an$Vsfy z9HKDvtVl*bNvifS$Zm^7s0lx?nK8tf0D6oCox2+KTY-Vz0}prjSB8Cg54Temj%u*J}4vVKyD4kK%%|mw^Nsp zm&a+4iLwzCg=&qYYq1!+L?iO1zw|D8R6Tn7{Xflu z$;s&z{xA!^`D^7VV-tR)nw!@Zg*m*uxTd7?=j)cl|2ByC_z$Rsyc1k+pkG zY=F0B+P%6uePJ1+ML>Jy`^)L6i-4plVyP&$k8J%t;4k=OM5 zvs;2F9jp09m-?|OiS#?jMN?{PGnNxAL2`baQ1zyMkgkb*;8jCaI|DF!-U{sCi^aS> zS339X1YJ*7E|uNhz+~iMIy|h#R!cgr5NmQ&reW$;T%jgxJ5Ua9*3YtNo~xe-RjD=4 zR-xaPPg^>*etQ+*QQn8`C;{d#LAz0o=-R3d9|I+{dsL{otR)V@P8&}oBDY|Gr4B4_ zYrCS5oI~{2k=vnk?iM1P$yZ-kSMa)(g;5g7+ejgm5C%hx)e?rwcam5oHa&y59C;5l}5Tef65g~fRF86XwB=2FZtXf)b;_jkRIf*HVBE1 zl2=W-?krMh95Jm$E{w2~oAFf?|0Y>r9~ZSEqitN zzQ;ZYh8M(jw0ES__QoJq=J)hMuENLb8(xRFqS<%8Jr8EutNr7IGUmE&G5hKAw~bb|Nmw{4V=IfD8oNFP$d&vV`XzNX=VIB(tYq(|B!h6Ig$fg z`~1aMDK9JjvBQQK$7FyWh4VhCRCp{P(m@E3#0vwRK2<#Fy9)&(K)THV6YA{;!&W~m zH3n6c;%4Qq0@@4dfvV5O1jS}g8(vR0Tt3gM_eeSK(3yH-sXlEo^#_?CfbIsP%O8vF zN4_l3L_}{j@-ObOn{o2$uOv5o7X%$x+a?pUms=`3j$uUipJ{BjB*)894q`8%_f0Y9 z=p7752ASEr>7)6;HzZK8b?vVHnG zL=F=2HtE+jJDNDxVckX0llIKl9T{h8kkL})y*W|YR(5BLEwFl{K^z1&KyODm#GrGh zjmt1xsn{-Q(__b!OZDDJa?5TRLr7%K zh(t(=eJpvb4!6iS`;*#39)N zGDu=sf;yPR(rFPRG241Cx$mUU6ZlYR5J_Le8E1kmhQ=L$@Rh#Yc#4~KfNDY7&Jbf_ z0GXHW>Orf)7PL7CNm<4mnqpy{&!qDJPuJuh-HWOC=w`mz+luep6|nH#U;UJ=h;>{2 zop5(IRNaj%n`B&15i(~Kfp)Pw|JZ~92$M!w7?dA;+n!D++Zz5xWss9mtw|?Dc9mY-XxyMY8>gWlMv8O1bKlXZ*ndw(sFpeoQt} zKd}=PJ&I%R`ABhHB0bhuvZly2ti)CplB``iOF7&bmH2Ssp%&j}X zIjQA-JgC(#0CATbgCq$G&3r62B7BR_U?}-9uzDkcxUW7wmoSSzga)?Ku&9$MHx1Ba zRGmX1W~99&TrOj1`5mT~Z3LR|p6lTe-M}MF z865ic#6kOtL{1=K1n}uOQU3TEyrcGd4O;2(4G1lQ~!nKxto(n z%+vt4mP#~O@$#p&X!uE5Cn~td`tSS7+W&dw|NqhXft7v6)+R#c7S_gpsr>#rn?=fh zjjbi6P7L#iI*We7+o-5|OI?Y~Lsf;BEeukYtd`+}6V{eB{t#LyqWv+pfPD+UnrI?K z4<%lr;7M3^y5KupYro!I1PAyYD3}Z_1d6xrMBhO5^!K-On0v|?wc~TK{ z&K*E;B8v%~bBoxZ@FEzYI-CY7OImbE$8W^S;wW;-c1$(xfGCv>-F;a#=&B)%k!9xQ zbpbCt=OWw%ni8%hfnLx0cOc!i}UIs!4a>g7|UySv}wcXeY0P$ zJp}%0Tzo-v1139haMI*75pLnX1224O#GMd=3=*>o* z!O+M$R?8*YDls8>M1E&yQxbvnal=)daWB4NCs#!zo4Ah_YlW*cLO#aoy~uYN2w!b$ zzNekK7ZDe;Nu&~c#_L>JpdO@*81ow}4!R527CT-3#zAd>H0?CldBl?5zrVL{c>&Hg z?8V&O4|-9}RENyKeqlakd)z6_aTjhK3Dgd;U@Pg{Cy=!P#j9r$e6CPlKpst!t1@cy z_QW3?)w3VClzPE%_Rm&w7L`}@V?W60jzMgLJ&|Lng0`r1DgAyLW}N`DAw zA^pi6Fg`#8zzUKCmD0Lzp$Kz91M;Ctf9;yGboZLV+<(4Q^MH}6_+uV8nk^9y(H~G~ z=~{6pbAPgEcei+PA@Dv@Kblw}Htyn5V{sR-&Tx8bQJj5mH?L^`u3VeOr{U$@2e6}OJk zp9P0T+pHi{B>ek}j^|&b)AK&P5#o`zhg&&l3nB$-xbjURwvdWYPZ?=3PDh-;t(>zM zs1|hy27Y6{CC&4!r`RVzN+!)?#=+JP#oFHpACsyTG&IJHF0T$yc1B)?Vs4R8QQXgk zYaV9ItGu)5?S-D-yxb(Mpf=h04)Mbw#egV5V4kZLdcP}sw~#Vi@Mu+*6Bmx|uEu{F zil8)XpjRg4DTkl$x-6T_3!Kw{?wFHz0gwj9dr0suTDOt0KBX7nfCD$?c}el)V8~`j zkT}S>%I@bfVX~PTa)vHKy#qon3cGCRoI={$U#6m@cqH-Sy-}6+Vm*jk1ooE&yCw3B zF_fk_R(!s-;G`BYh-3yCyGL_7m*Gn2#T3>d@fkI!$$h#lEMO|tGvfUXJmmj*ugbQ* z@|&|NGW%6v>esC!n!(8N8G;Kg-3=yZjKtb_?Z*UGSPZFgNX| zJ(%-zfRZr^7?qE!Xmk4~Z?MWu&vOATGOd3pvVZ3d2>vdzfAR)0;O+_RQmSO){Ez3! zxDK2*p3Ci9$EI=p@_6AHyV`6qodkzPJOQyxUXcVsy9Le+nayk=9UWgPS^aU77g>8o z6ZIvm0t%Ayr_v zu;c~K6=Dr}**-|Hx9Nrtg;Sqbo=PZhIjclY?XWu;!0*Vi)kpA@phx`&v@mvTBc5^5jU9# z)~bvK;S*{rxd`c@kC5XXzs;qMsEqc1$iKIKyy+@YXvJO12PyNk8(9hi;Y(p0F^DH& zn=!_%8=M44xJGQ5*#O8(sReK+NxU1Rs2 zI;V!eX~8m7;*|OILY~kAKOK4vT^j4~M|sl|b)MrUf_4E%Bv+J!C}<0kpV)Wbos^2E zMj1zz%Yb>g+m)gOlyzR(%Zc*g;4TpCUfhS&$UIZ){*YhgwLV&MDJoJ~7tW zYjE=SM@sz^470Rwhkv4&)j%u~7O4t2C*sI04mJ-*{SZpGoRC27WxO!h6i>c76<=HU zaj?x7f|Sh#e%^=^AZ)^3Dz)UFGwSAXY^0bX_Fxq%KecwXC3Kh$YDeZ~7>3sv6Mq4R?&wldw7&A~ zytdz$Y=jRiWi6Qp_~jhK#y+%SI+&BMsxUCUu>ZNe`#7j#p733V`e4nVK>d2~#! zK~ph?Q|O2fA+^GvaN62~Atr{%J`YjWOSlq;wS>0$i*ts$fW9<@A;Uy=^QJevv^@o+ zI$~om*dd>Ea2ITRX#^Wdku+{bK7mTLM)bzx#NDM$?dBaeK~b4E8!qyrUW(=Rqhrbj z#F1kgJsT2|-w~EFOq(LcJX#2%f+z>(P)3lY!GXz2p!$t39_iPkr z%PFdc>7SPZOww(Z>O^1i6w-7mB7PIqluArGSh8tk7B?PGtm_xWm0X{L#qZ2lq3!t6MV56LUJS9cVzXOs@Cz>hI9A3=O^p#hMtMf%@GOqh^) zSe~yEu}xfAo&jNP(1RmJ3O_B4B%ucBsB;x1E!aa-&57~v3$)z8!%RGSl+6q?PkhNC zo^7N^>)r|VwPC#arQxv_^R)G0ezEK)u&kHrs6gb{Bu^}a5n0btfuiPcg^v!xEh1bW z8AF`odxVPRB8;Qe)1KNm;z3VX?bo()pQ+jWLFB-~W=tt_q~pHs?>~oy?{1AjG{Zj1 z66YmhEzZn=o;s^Vcz@xiyw(pD zO0>nu%w7SnU$q~F^v{l!ww%whoF%#lY6ibFPA91SHzP8Or%F za-xuSnr4kjHkm++n#$Gc#eVF9S}+l*DGDkxSBG*$>4Dv#fABKl+i1X;!dTuI6yEerU zNe<;4S@{_PG)^VnD&8o~%qdU(Dzt@e4Q#_G>bPio`ys-7rZU~R9FpLGE~xZfXPX@x z(Xx2D&%;wNClV=$-PLtud?ERg`qoM$8XqO!+f=YmsWJ$VH6?P86Y;U03}MNrw<&%7dv1-wZw3kj zb9~;O+Bf98K z2&mziBd@*QqkP|(FG8}E4cmi0St|(zgqYQc>#go22EZ4?bd<#@@b6>S=|-~<%Y&{B z2r<@u_2mwfnKi5hz3C97GER?_28`vM14G_zq2pTnuzH@oC>~wI8loS#>?>Xcu}MM` za=9n66IgC(^)dlpo>qm_(Lqr9tGmohFcFVQpYb@qVWQXD4vmE+5x*m@V*o_PJbqP4 z4}EtxnrXStnz`s^Rl{8e^6-R@b#2^@*0+{C1Ef zz2cU}KFQxY5Xgt~>tbkdu(*X;x;J?5Hsb60#s-T};O7`l!fcW@{IXxJmhU0-Usvr+ z`keS*$BR@m{b*ETw^7|8lV(k@69BU?#jy1!hQd6hw|qaoSuiLHLoZc4*ScECN21FBR^&(r$}ei=)K=qAXCoCUiIFOnXsb_dQ(!&yro+AjSs)^{mh-8P_SQZ zwC>Q3VG009J)~rNxD8p7Ud?p*PcMVU?#gd{&GKahOPCzMG_{PPRE7=c6K$XESmqtV z3En(&t~<2d(MxRaltf>=C8Ls4`w;oNGCj@g6S)oDEuhK^=Q*pX_)0>GmhZbt;@wOq z!(VUDm!y<0$i9&ZI0s@BUXD$`QPfi1tYgC0!Gva-7isS)Lz>#5GdK-qUN`}_UZqO} zeLf2;?K&;m9#c$(VWfs6Gl{2l%N3hrc z!e(Pv&@GWpy=|Hfq#xO^-xsmqUgF)z5^~c@xyfF=(8q5}9!R^P1ZafEXA?^a=dR>-}^|1H9inofy-UiMUZJB5xx;&!b`=3}ik`m!>z6 zDV`nBTp6%=8vW9sBVZ`?jYXC%{AaWS{}QTFsw zcJqqPB&q0a&`C=@2XttH;e!3=E7l!+W)KOF%$)&iwWzlr;s{JgWc4d%Ihu`s-jS`I z8pSa#xSslGxo}lj6e&hnm(rT zQyQDssoT#DPsFm63vG&}x}1z=mIc{6&a->RrfF;^#ADhtP!1dqUl(OGb{Bjb@npOr z7J4~m0Zr5&NsF-To^15&H>D`sDav##4%((;e#h?%OcN_E%|1E&VK|CceXs~;8!x$I zmzlZyI>~UBS7wh>Qj59FFjg8P=t1Ru$ZeN|pZ6l_?C`75(o1~!O5tRbRW9InwKh3b z{-`JImP{j7_culEU0gYGAIqL`PHMyfFtf9e80yUk&_wFyI`HPz0D3fMFCR+8B`*+dO&{;ERpZZW|l zI9Qx4>RWfTyM&`YX#S-X6*$cxpQ%})da?obu| zv`-|sm3YD&ei-w$Lm9KN`zffKZH9~7Om-1zO`^Mkvw}WP>sePjZylo_4~`Qh6KW%U zxfdB~MQIa=bmDgo$b0J_d82--P!zXGckj^U$0Qej92TXh@Lhl!M^1~;vuF#2AUW+w zpKlvot|-Moov_ zTQJ?4Na)7K1!bb`EuNX0m|saM;zeY(r7c`Zbv|BgLrck7S9`i=xB%0wNqS@wX*Mxl z_D75o@r}ezos2^ucCDYLE7oUpgm=VdQ1{ieA6Ma5pJQ;fw3rJk9IhfiJ~O=4WiP0V z#N{!|5$wCw+TlcGdQI{pW?61`*vVk+CH#zFLmBbOlt_ZFk6^lWFB|A_<# z2eXEGtsC%)ge{%%uAZ)vjr7b~Z-Z0N)g_k<^e!pF99tK$xcdHupHp$KT^9DjYW=`p z`mVi(dwe(8c#hOYj0m5yYY*{hT1AP)T0HB*=e)U;am+c{#>#VS-ttE3x94w1jy6oY z?_UhV<%|>amFKDw$gQ1yncFSh3KxY67Y#LpMG01I)aU0jSW=+5Pd< zbIG2}0Fz80@@M#cqIu*AJLy{tug$e?`rtJyY59&1nvB0HdwQkc+Z=?@o6yYcqoUNF z)}@3#ucUPNt$m`~!$6p7SD;Ikr|@24XU#pb7%f z%up;8wPt^QE-E{5+kUk@ceSwgxuJ=;eUXL()2y2GBH6^?jGiy}uQzmS*i=}35cIS3 zN~Z6jsqs%{c~21%n(ZGph_Z<61u)vPWkI?cyxMzUBTAl0V{uo4-5$+#S}JCg_P8DX+7!0`?8a|7eWQUxMo!}7iVW)yM?-g%pj@{I=5CA= zjxO6;GdqI9)Y@xDvTTDg5P9X$t=vDYI$zt`t-p_4a&3p{QvH zcA#XTqF8}J;p7PCZY)6$_D#%^W+=muJe40IR}*6pA!z3*yLmmZh7o^zmBW!yzu$M z=d(BNAnUe@d|k7WzRio9gzMd#zK509B^6QTc2thhi-Aa*WGISJEGGf#pr9!KjrTL* z_N>vxps4HFLd=~z`(F|Bd_C??eL`9qO)IVrnPr7g_wtZsOXY$-2^_ygTDu!pbmUm7 zs>~y+*5#N9R+i)IhyNgB_!F34ywX#V;WdpeH{j&`0lbZ%&?_&oY% zCNs4RV*l7aE=?3^I=6y}VrkKwlqOqN@sTTBQrI#&<1K49oWbB}z!@v{c^8~2ht2G2 ztl450@sqHm-qYB1Po#bThG*_EduWg(FT!Q1%;H<&{;gf0`mu&zTV?pUdd#4qv_q$O z3BJ_w;$dtja`JE>kzTesIKMi0Z8U(HTWISdaqAD$IqPI};PB8inDmvTZvH$<5?tQ~ zQt*(waep})_g--@+pMTjSF&Q3Zdn0#RSWEXh=e?ETmZ_;lnk{I6 zKU@;qC~EqDM8Y1YEZ{#o+j@MKOXJ{ZBioWM1jX$-xtgOkV6SRjl|raEXefEzx72YK zKqPE!o!2}+Skb1lHtt10c{H}=IK);=Z>q@VjW?l0EiJK`!}^w4+}hWc5r%!3YSz2g zo7AnHsd>WE-ZZNpYU&q1Ss|dN7CY9A&aj#VAO(Suz=P*UN zvc$1pWgCsU!`7L3=?klWhj2e*grPQ*C-J~g0i+8wKgL$3XD)G zzzEwr{t7BRDN`5IdWkH~jRR;BB;7ML+;Q5{L$F~y6WhKg z(8gUO=soKiAlQ5$YX9DkiM)3)j$(txDHG_@sq{&_cOlg7W!WVc9~Um$LR<*WZ5aZC zna5ePBC2?jmPF0l=Q-h|P^{=)FJPz!o?itj1@kv|Oqbh}L8deLWb2 z!kAX6Ke!y&jW8sy#$_3YEo0a3e~NUjUl#Sm>m0fhB5_TA8N!NpXLKG(9;IsQuVuY} zU^5fr)J_S1RiWSBbQJao>sQe|w!N$LZN0Wx>K%JJ}@A^e2xiWq*`(c9_l??WzHD6!uz60@~ zX1BFkFh>L*zFZ;fL95?n);KqJ@kE3nf5}JgQri}oIl7^Y{RZaTYYXH)-wX`s`uoRc z#B@d>x62ay#gBX%=WDE{B(T=;6yKQ%-jY|k4f@)O?$jWi$mc|Y>|7f>&8z9*j-E|) z5lV!DF3goD_6E=s4C}X1OMc=L4#jz}dPwL6F z^jZUWMCwCQ0D2o^N4Ib5$<$;+4FYshBVoFPlZsbt1oM1swCYum-l1OubR(YYOe5R? z+?408Sz|U4ZaBF*x;_G@C+cr=NC=4=oj2^^GiF8TgzU@YI z&&ZL-<#bER^DgI11JB*_1F=;3@p!-0%`WJO^yqh=mRB{8nLJ}m$1dfgzbF;cm~iUWhNo1tF=mQv8Q zSYyCKtXHEjQM?RXW^yu}W7E9CG0?TJWjoWaVvx|>xSB=^2x&DVU%YmO|AcY~CyLQ6|nXWKodOVZkA!NX0*#O&Nb&3x24o*JnjHe>1we|ukCWY}y0<#dDc5kVh9qUB<9jK*x7F4C2n#dzuiu0&?y z1Wf5eHy}iUeZK1?sO0P%)uH6Jv0d?XswQWoMnCoIqNc`!m8}QlrazLc3T!C5C+sy( z^tFdMMmAj;>LcD*2Y(W6p+O7WC0e!TK*T}W@J){F=+Xod6dV^Q9Ml)aN~mH-4*#S{ zs35w>MUeQZLsJ|GAqmk*81k%3apqBnT{@iSzJ=y0Vvb&f$%}x`x?E_t%~dE)SirRg>ntb^c6MC=Wx!- z&gw@q>|ve0CYf{bY@tPz!E!XfE}8CtaV`Etg^)#w*4`4LLNQ1=1y%)eO)>DGeBjTD zdyN`$uaN?^NzkuhDL>ErjN(MiPrGBS2i~7-j16)Pa0TzjydFU>bVCwX8%z$znzfw4 ze{N3)Lz!NgGsbGSVPo4E>8T>$*r_-isTuLPN%e>fi$t*7+-_4Rr2Qw zv7~kA)c&f1G&%bD+4M!b^Q(UPEkNb~b3r%hb|XUu?Jp%5ljasxzmMr5q2ll*;lV+u z4o;{N3?m-Ue;<=$M2*lev#Ihu&}T%Ifg$+Jvs<^MD-3rXqP;@ zmqevg@aR~3;*8L~68D_-=y2JMFUai*FO)93DXTE|C4M&>-ob1CGFN3w48 z3{&15v-%9b1E*}Em9JmxQo{NjTG6f_HJ+^$f4@0<2kZ69-y!OaTfY!=S-8xhJ)vFh z=oB!Ilmwy9n>%^p*KAZ{TI_JYeXr|QXyPhC{e7R7Hm?F*Fd!^d!@ALji%cCVI|O9@ zel5VS-yF?Lh3Bm($u-hN%y3LU_7fu@G?=@u%K*;_L+GTt{sZTuIe~>tMx9EG=mD0n zhIIN0!t%Lu8kJ?z2M_NfHpLbDvt}4bT7h{36&uvg zw{``y%M)-CceD z!;Fsq+_OIVwe&8E`tN4+|G5>X`}bD-e{iS%ttn*xkG$|->a8G!f2p_BlaeUpaX`X+ zsg{#OdRS4V5=45{`N&yjZ03x}pvI_He*O834;2Q>5A|lc7Qwi(MDtrNkZP>s{7Ry@ zO7%E=%>FWYjPKG5l%9iDk|$AX&}ovhHz?7#Nj1-9jaW@iv)Fv8ci%{%saG_E@H@9H z$^VjShOeRQxF-?nR8TR{1>koLH9flaIGYDC(r6vc#;*CzA5ChTFxMB(KvIVi$wODP zB0cpQp_4Q3kTDQF_4vKBScZlOzkSaAjHbRH`!{K))jQ3uD)M;6%w=}@?GM(M41Wo72%5T~%eNx}Fv;2^^eCT&k zB}%}(|4^Dd2($2l2O!HdKfe(m*sfJpt-6GqBmEe2vSP*f*&BNX|`Ix3l6~?(nE~1;D^RAgIR+@5YIidYEt9YDhYWv$*eqUZZD+*?#zGgiJCisg~(&u<2&iv!?`>Xo9x?=T`pF}OZ1 z{bQ9Se;QNamBdBR0d@FdQ-bwKfp_)z5gF4@^iCHFOu_3v`nnX9J`t6Vcl^mpJ$+IE zADo|mL@LvzK1th&E@R2`*u+fH5Yq~&_b7q&{&v?+^n^LN$fF^MQV=e zq*Fkhy+3HSL+X+EV$SA04mXB(y7&rP44rTQ!X3x#lHH-v!@1+FE4dP6Im!nLbk2EZ z^8 zav}v_tn;E-3#dt`LwElq+QVpRq3EEwAvaAjoy0)(E=tc^!MV3uU-D~)#;6ha zPQD>m!>>EEJpDr?-az0LY34cuXJVz&#LSJ`+$0}ohrwvRE+1p9_AX**PZ_1`7?wJI zrbJ!Wr+l%VW3b4aN($|6yfaU4@>tpjyh$&zo|d56nOq#q1*l&PL(5u}P4zp9hjDux z(s2lt9IrBEiQ1-{^=l@$FT0Sbiq@}Qj;4ALO!Sr1N1H{q9966z(0#~CD&0f!wXxfn zU>jtL%oTW8v>T>YeN?uD&HIrFa={udH$4DcS-GwQJ5>0LbpI(e;bbE;fqEy(+yOF? zrA5a;u?vzJKG)-epU+$7AqkOMIa6+MlnSy21_Cy54NWdT0P8pHL}xz!cbUA}v!}h_ z;G~Yhy$x)0S4Gh?p=Pdb{H@KXh^BX@UHI--Ot&`2QgwSLWLJ5hp-QPR4aVhw*B^#; z+Ungwb&OP=$@&YG>S!bB74@0Bbvq4RB4r7s;A>Q_R$PJE{9mdKS+Pq>Scu0lq)T%* zLFQtK8^^-M_vIJU1MHE{EL7Dy97gRf9^>;|AX?X(ohg;U>65kyqO zQBo;{!K|C!{F6^hOR+X6Ig%!GBIs8!+DzpR*rLH!C_x+NFKC<~l^O zEEG0W+78&K87|_p9`!=oq^|RSZ%cKJ2f2z`U?{$izc-{|iJxUiftVFrRX#;VmkP;H zQZE^0Q0_kULukxo<#McEw}~Jc1pA_GVYcIGrltU7adFf=i3~{gkYJ2@A6q`2_!8OP zRmLKTpV51Q7sl2ntx-k(;^N5$j&`-PY?vt{%KWh`2>^m`V)o;3jY=CZETC<|1OvT2 z@j?(hW9FK%ASOwZ8&3M?I*kHQWP5^5x!0EPSzJ#ND%BbJpXhLsg}LNNun)k&cL?R% z!lPOEGklbFrLbS)M+~1Kv%z<{ct!x+8WW%{**cpbvF@IIBbDUZyK_$whHq5C6K97G zsRu8>S5J7fw+=pMLlm8TwwPz@iFGFD&1P9th$6Voz~aJl+PZ)QH>^g8#)2oF*NDp~ zlnOf#$j;vW)W)fZwZuuxUKOJ~FVrb||8u6w^D2r?;o-V54*n;=-4&Q;{0BAsUh1M0 zgdB77<{L{VA>~8cJYoJN+Ks56K?5TTb9=v6X<+;4mu4W=`%-Lb7Ac!1iB=lAtG_S2jk+X zD`Qx*YzsY**BpYrqippGapf1;?uTbBvx(D}jbI~T1vV>rRu+i7zI$uZxHtR25+!CX zIBQV3#2rIVcVD2(>$2J83d3YSp7QF6r{omzMMB(%PVsSFx(0I zVW&^wG15p$$$iINWjvHVZCIkC;g_3-)d>OIA8WVhhs_>MEbnj3W0 zQge1t;%G=uW8LayCh4l(ppdjQEng6T0AFBMyKXXsEsd=W2|9Fr+xT}zrbxzlfyVcE^KJ`%F3(Ys=C zoYmJNh?ggvHmXZ7>`>=ZH2ZjAPQ1Juz;scwf$u-syJmFD#)|=w04#WWUcQ-p01h1i zO9V9vs1Y?6S~=>?)gZDZ>gU?sG;?sf;Jr%&O$aVjboy_(0=p)&5j&N7A4L((Xm>)DEQioo_9|Z*bAn&(1H9mB z9^-Am>7j2gknP}dF*%lYvE144Ja*t8hMi#TxWcSlBU)}jUx3JVGJ7;!(OOTxmL^r~ zaJAz-NPh}qE{!{(BezxSKvr9cGfP|x+Ie}3njoK-0 z?$ul;hux)+ky-~V?wN2gJAjro4=O-0f1>doiRFHLZlgUBU%gX!oK*oWjIS-QH0<7f z{!w>`YI>)BRl~G#0PhkANy6^C3e^yPh21uK^YcLruKo&$AiS`M@!_OouJ)5r1N(m1 zt<$E{y+!-NjJL6|+Z9tt_y*534RLcT&ZlZPg!nS@1OlckLENNxz^$aYH{?$eN^ZkDsYXeK0FC_cFqHYou zwEp2)g)$S6KqlLw2!pRtB>MXoj*cyaVOJ5oc>2+5nICUMm&I;gXLky?HuKBJmv{8~ zlBEw?z|E9lWn$gqVclb@)4!uAPYGDm6OMon$<4X`AASnL?5oCMC7@-{fq+vbGB8^Q6 z&F*cF*lB|P_ouvNaRng6#Z4gj5IuVB=B$##jt;y`bX))yOLLYUfC1pNT}3BWj*kzp zQ+-G81ML%a>&*N!eBjqK$3}1$7BFVDSoW2j$xJJ4gjgT=$mCA?@F>cSsZg-K`LIY+ zqSf(DP?D8StJ`VaaJb62w2MroZO*YUU$oW9!Q@93t0MnU{Dk7~ae0#XNDWwHCn<+! zDKpYwKYi6JhDe8sLY{*SGAY6F>Uk?E=t6VBl_hp5Y$p~Ao65zkS-&HB5s3XB%2+Jn zSLpB!u+yOA-J#G-;R~`1xf`m3RYSgD?I%S{2KBf+RL}e5Xv-2`f6cTaC%ve!McLtwg7kdj=^))FLi1+syoS82qK$i=cJn~73RKCFcL2P0IE5*ANWuS14I2AgLgG(^#t_DaR=0FB7a*{zjyOL9OAx2II$U1zm+V zHsy7s83R*J+8FO-YwQw#A(&%Oc%GlU6LI2tG$!UZovtl4h%-|2N}iyXG=Ys;O`nzZ#EbjF?vHO81c5LrU6XZMLervZEsEZa!@ zr>{GNowEF>8OE<}Dzvo2hy}_07^E(rHlCQg5!|{p!TW zpxbPip&y>U6-eEY_nvnj%)X5q>$sS`8D!KGnfuApT?k#51=*gAB+Svh@;Ah72p>#A<6GvuUiPhxiP_-y_6JcPk2voe*9 z+4cu3BYMve^h**hxb>Cj3*sf3z6^@CauJF3y&ihg0U!B2V)S{YIQ)}e6j$GYC+SHA zd=Rv-T!CAJb4HBb&O5RoxrjJxPs~NRG^E$4L*yJ#VJd8Yk@g548RCwkg|Js?DjiPj6_$;R6Q% zWd&lua(D)}ocn1Pd^*f;R&2ra#lK2s3(#txdV-=lx-5|xTzRaw?VfS(M8Eq2Di4+E z-8*1Z!nIrmT)6bhZhBFrR^YI2?Mv&SDO7dQ*;9|=Bk3TUv9aN*qN*7FijOIFG?4wG z$hvsJXSNd)!6WesBttu-f~lUS1r0%yyrp~Z{dOloA7E(G}W>)I3i__=IAUO z$SS%+KRwR95NJzC@=|_CoJ{cDw={&8!V?(2qIW#6sKP zNrn;og7{CLK|2+&@!UWe@JE8P+bvn&U!f-k z_DS8sxvqidlw5BZ(KPm8I8$94(F@}nF(Q(rCmqjgeuPU?AsjNq&veV9%MO<* z_Q%Q0=lj=JxNmi?gE3_%%TIY!KYHkDiteM}rS!zrgRZSxDRx9w#{+rb*!uWp;0oeq zxCElps~GQ+?l`!(n#c0t@>`G z6tBH#h<-xDU}r9`bTsdRI9qw(;KCagZ10}}jBFa0B8n$~6mfDUC~GBu4_loMdX8^Z z<3jlspcJ1TCQ^g%7jNy9eq|RezInIF+z1gz)~d;3nK3n^9ZhfeU>tAWW^WoDcA-Yq zVsh?=pL%euTu7oEyN(9jlN zjFgoj9u!j3ns^&gO|*itS^OH)?7^uVetm*v1L}roWpVya1{!;NQ7lIJ*K98v3E8A6 z$5nkU$7oTfp8aRS=w)A_t2~!H@$)=yV&lPgE-rD6IQ&=oEZL+5^WlG7mxhE|3zc8j zrTqVD&?fx%r}D2!>VGam2Qzo0|MKT~{Tq-&)kf8r5bm!pr?O#RHsB#HiA)Slbj{K zFA2QKRbmeFDwsKj)S5#|yx!UXSqKU&g1+4r^Ytx-tJzXzv*{rSZK=|HR7FXU+Dd>} z((oKNY!uUpATK&}Julo(46q~EI6snPPPSs4avU$J(Bel9wM1{#Hy9!`u$4!mbA#z{ zmRx6HG%;wEP`^aAfO7SWAW(9>b&0q!ZkL$5Top5ZUE4@TVUj+#|2KZa8Me~#NW;-% zZ|SaCq65ZtBWyRu_W0?Zs< z;sT9WqIc-DLssbQ!kfQY{7c#dZNlh<0}c_lv_uS=bFhy76MH}WNL#n1{9&inQqsG+ zCxUdyG7JtuTsq@u=JxJm_E%t0UF>M3G<*YISEXlv#4!-Hk+~Rj6)*~zSYL9YPQCoh zIREwmHm(POQfo4ogW--__+q{qG=$!V6h~E?mf89V@_!7buO+hS5?^QY`m2=xzawY= zGtc~&!Sp}YxBmn0_-BV!sOqALXD>OBV1)j9}2W96TxWoBRC6x3Irh zuylcVBYX@99a@hs8`?G=109=dn;Wez!VfQ*OCDDpkDITr?<9i60l2WAc(;@`n;u(B z!7^`0nO9)Gcq;{rAw1>jeeRy_D)!>(OW^xM<$FYar5}u| z;~3O2>=2qBxdFZfq=>?Kb1A~g1;KpU^3mzd=Y?a(`M`PlGK)EPe?Fnl3}eBhHEF9|D1U57TE zGaeCq8<8o(i0WJz;Uw{Lgq{9goPHH$H99g&JKQ7zzkX*{|7ZsdqmrZii0!e_y6AVc zxmz7jyq7A+IV2e$OKtV_l6xF5{Wc=Y_977-*CSjY z>e=;Z&+{qK68*z$^dT@s3wDi}ihl7AvU|Ola+!zOe)IBltTqWe9QiFcd<(x$)AL8x zh^ztL88Jr1-lR95R)3e}P(9)q2F7kKAvPC!lzd7=*L#_GM^K(sikp6?CFwS4_%dC- ziZq*=M?5OlWEInj9&>oVD#?^>qZG4EWw0z;P@cajwe1-<>EB zWR^f^>{ICm$1pQ%%Y?Wzt_ad#Q9X0n`goa=(dR4jaB-%@`e;=8Nw)CGrmT!G? zHBJ&ioVKE<#wlgV)W{5UXV&i9-Ba8XL2P#La@Jxc`i(L7Vn{&{VBF#qSDE5hbP@)U z)k73MXabF4Or1xjMW4s$I1r*QT39-REwU#QW3}r0 z1}{vTHiHKE&2NvA3F5O$wL4gicBdw@!ph3iD+ab)->+QQ8Xo)r=pe^Wka9-KOx>V& z2y>^{`$dEwq|psfo#3=ZU|qT2v_WVK8M4jzW;1L*{h_M> zN1QuKz~2#b>y0d7zo*%nE6YNG98BB_Fl~)NLFY31tc1Vs`rwV3A(1RL??l`lD0!- zHVRiE>~Jp&=8qt#W3=tfBctD3<&${Ow&Qx(RoeFr&SPwh|2_)WR)1CbHTK!lJ{WcJKbWGz67@u?K`0-HvAq zoR`YWjB}bdDZ;5@jBNFMVxqKQUpW0l1gcSNJN(s)(-W33G00(*Bx@YPrh3D4UIte6vdWrfGVkZuBmXu&VcLB~ z?aW>~j~6I;0=E`?f^|+5+glHrvFRsNyxlyrVC5FX-wL?m+-U>;2sB-x!}SgvvHqPz zZTJLnp2zZZQo5o$kWx z0~4gYuT)~L?;MHHCv4DmzMtV)#FatnjCY4^tJ(SeEZ3>Xuwj?rZ2`i^pynud=U6Y) znzI1E*QcA;tmGX{t7hkhw;p_T;wa)buVqdeJP265@k2j`dn&LVuRU`gAKc0tr`Wix z{jOY2GBhA0BziwT3W_|VP%4$JA|J%iPo%F-xVL}`u%s_18b;hRV#z;PZW}r@89qYqCSFq98xv?PZBh)Lj+QiLF_dpAZGFaH8v0qd8DX~EK`imaLh|4|!|v4x}zE&|#lQHE zbVt0Ih)6zMC<}%Pf4Z&?$HI>ijrBBKO6hKjYV(g5Nhck`a$$weuuP3wEHlS}aI!b@ zKg8#6x2ZOEI2Q~jN3?cxRe%ikB3+%Bq~?5lDdQkY zPFK(rPhq1Kt$cyjDqkdSL6(O2rQ~N}+j?5I8jsY$T#|=Go!Fi%@J^A*U)`atfC)V> z;1B~3aSb!-yqhxPakZ%zxOT@wgFG05(rjHUd}+xH2p+E_mtSxfb^;Sp+6NsdlOFrc zUf?^k7( znNzgqC=>J+BHd7_5bK|je8m4<4B|1p14Gl5_JvV)75+7SIUtwSoG4wpl>5`5_<<%$$vuh{n`AG<#*}nQ z3bepW{7zA$d`d4YUNa`9{ihfPVUP&yWcWa_y1dt;C8l*L^)@cG6Seuy2G&q`>G;t} zAlu8Z-R}5o#naG5iOR1BQLu3BnTjM)FpA>V`9;W1Qsl+bYaTLMbB)!byu_?JA=pWu z+RZW;WedaJ8d_CX2Ga-?>F`X$Y&B}pNc>qIe{Tf(kI2r$vX(& zLO-CNqK@2AkzZP4D*e8KB*zGyI^28i)iRHLd$liNIDT^3e*JKvXQeL0;Yw=m2jR7y zUzbHC7Z%3?;9A+cePl!5S@{CFCIeb+;zpGTV<4+)a1T&2L;h{e&dgc}ZN&PmvCbT% z3@r8IPuiaz?>gT>;X9lkQSl@xkNx|u^-oU?XMj!$o6)d$}nveGZFadKcAcWerUzH8XQSr+nf zZ`77FN1g7@kJ++)Gs_+%t0S5A`~EiW#6{*pFw< zYqhAU2}$#k3V$tUNawyC=#wOiq)uq9mgc_WVnP;6kMre+)jF$2Mibn-wV8+I8Q5+- z`F&8~r&*342H=3}53+n5y|<{9YfKN@%r|Mxp(~<27mB_r%=d{sg}Y}Ln!dmeeqoXGDj-=exi`*$2y2rGwEe3LebPkwpE914z#GfPJd$rkn+?hRe$D6`uv~OdPi) z?!D&DBm_zl7DGaYfAFWFN=A-`b<3Zuh z67!PqV1VWNdZej;NC`%90cylBcSsWoY>(A6m?nOUc2j$=zjj=J9-aZ+xV5>h+(=|XiW45xW$({- z<~~k)S0dZAsV+1|1jY%ILNYfCnt#e6poh^nPPej1qscaIE!Gx;*8cENf^=~xmv|HNltesYqj>y{9Bwwmw=uzcOZ%z-3Qa4aZce&E=RI9d@P*4BE%Zny z>RgdJV|1e}kiZI1IINWW}yb zH!+?1o0@^hc!PmG`?19Y!#UQY^ME7h9P5XMA$k=$CW?TAYxS8sN(G9X63mbj3%|(y zk3Oj0G!Fb+kw2^jL~$_~txBxW%|mn4qmV)!>|;A)dJX*@?Q)2* z8If4?rE24)-cmhaccX$cA|n3OV&v!DJK>vO&O1gus75 z2Nds#qwAxGo}2eo;v7`CRgQRV-HYznn%_Nh5~!pfM)3>k15z%i1Ql#)0H*dmHRhK3_;roZ&2l^SkrhUx9 zxiZVJB<5J-6Xyt*_$RFT=C!*W`^XPE?g$!)1)tySXX6Df$0+mLcNMiAqp91g*D^+LF7h z2N67B!Nos8&x;V&P|P)0;hfBt>fyDD&i$COXEEU)qX{>D=7%RWl-!eOwL&<3(hG=q z+@=SUu$s(d=u@Ogb7b zc(QgK*ah&97DT-^p2v`h^vAg^hGF26j2ElMlW%btU22sD@~``7=WJuf?hteu9WUoz zVzARk4>g)v1Ed*|pp#`dgtVTqJJH0R{`9oCe!K}b-Mjjaft{*C5dW4AI61T`Q{;61qKnl+;GzCeYIm7OG#b$S1%_e(TN` zn`xIunmD1zrRYN-ZM}OMInH#ET)QvPm*85qFG-O%h@$|;})`SD{nF-N3KCPH1kZFn&hRIM#<1GHbqh2a@<^6mF~CtloU zH7`*htey*vzjPo`QPtMiH@{eR&_QAYNN<)*~Yt!wwh^*nVln?9xCrw#Jn& zAE~chWh(gpR;`HtU9J8f1K6)k$8M zRZRO$-*qsjsG>G=4+=1X0YYBJwzKc3U}Bm!o(iB_CkG4yO#wZu76%Y>r?zX;6VOfA zeTZcv>_3IMyfEGM1ftWF>BWh1V`!-4$iibfTSkOX`q(Ku1)2eXn1{r34D41Tlep1buWsrwA#?O#r7`iUpe-%BgEdq8e zy+v49|ic5OqcSgP#H_6_gMb=|?JNtdQ2TG|I_wluTLkK79R-m@XjAL2b}5 z8uTlR5X=DE=0$+(`}_#e{erS_ZIs0syGt&W{j4>lljcBy<1%jTC{2^mllcgF`^l)E zc`6$FoISLh=Kf`x(1~fKAB8Bqj1e+4;ZT0ovYlI=CYf#9sn2S78-RVB(Nqk-ncLzt zs?Th;M|{uh>hEux11@B)LL}TAXN)-09NAg3f+jv$K-?uyDf4XN?}w}xJl+q!ZNkYb z_pY`5oYMQHK&zbB?d9(raJC27tArlMiCf%(o;D8%tM3sz$-(IF;?+OvXYWC%LA?0~ zZ0@$vNRD+(f_@@IxDf=P4>=*aNgtakS*Fn;777h^;=B4T|62F(=|3e!2JYdqT!cvV zd2!`%JckA=HInxZ^UM!F@i=*gh_n^kmWXIlRCr1{We3M%OL$s;Y3HM~ zef(osEM(@Ce*ZNrHu=AS(*Ebp5BuNW(LcqnQfBt{Huh3_7Do2}!Z;_bsqc!T4wcCV zs)DQ{$X+l1;qv{(k)=L!!u>rFhJpo0w%V#~Iz&!%_1o&C;}qjXYItW#xSOxr-}j4B zUk^(r?$cx&YWXhp&xTTpk(aBh&kwk7d_R2xa=fPgrkk7sl$G(}o0#XA#;xot(ij&Q z{^($psdPYrOHnLTE;7N{Hvna4-d#q zm|}44Mb(MxADWCZ4!NV$=xIH@rdsjsww&oT)dUQyfYVV#asvi{Ic;&MCL801D)=~% zdx8eQj9|{O`+Ae&hRP&%Ke?davHx=p#vI^=djIt>OO&LEO)9dc*vLYS*Zpn!dW^sq zD;(fUm+I}A>x~k`>&vIjlqwb!Z<+4ab^lr>99AI$v>7AqQ{_%!KxYMS1=VaF&qPQJ zsV^Z_jY@s|bZ?4F6>|#GFy!ShYu=G$RI&zM!QK)F6!2W-^5EOEz(2VpSVeuAQ5~>B zSTnOYRPKLE^uol!)ac-wdZ6nTlf5x9vx){t4O;)%wwwp~!%GZPhfJJ3vQqfFLEhlS zF-CgW7n@~=+l&6K%9VB5s(&T_D36~B+y63j?0p)!vPe(&{YphV<=vyFHa0isC{O<68gg1L78>5HWR`U3W>vi>7Q{ZkpX?ai1zUdX;m|7?s zU9gdh*p-7lVLQU0&aR`36dOIf}M8uux21bQf+zy()$X#Hb6nNe@_^n1QNxb1<;&o zgoUaQP@~UGvuWwNI;}xWEtVxcneQ6S71V-QVL%Om{i^OBz7RZVZ74JXobGmIoy9H7 zDjNG3z1C#Ly2F%Qcd9ZMH@gDhTIqMK$1BSde29-Du+yF>?l*91So2!0 znSA%>s1&*x4b?>h47|n3jV8i1$+-4^=1b6As=e0KI54PiMTLhoQrcFAh}vQ`V!Oam z3EkeJX_sRaAd46?GEQrdkL%HMVIUS#S!O$juIrvVP_Xjt&A1=Lu1Wn$#)u3hvd6RS zLqM<|%(czVd*!`r#a>WAymqnA412XYQ0-jDLi$R=%rIzQ<3>;+2#EVjCH;Sq_Lfm` zXv@}Uuwacl1a}E;!QCymL*woiG+5&r+}+*X3GNcyU4uLM$Uf)nefGZZes7HTe)Jm6 z;AhXORaL9zocK`O(JhqJyuoM~>xq#)!w)9z(thGI63T63>ybn#Yjs!a<9^YpO=A-k z-TEDb_Jhfam&le8&QgU3-kJMf;+RD>F6pbiR?+;b-VDk;m5%g5Fy`YNn6E7Sa2xZZR_HCE2eX!~%uoN{Ch4Ezd*qkv727;7XD~kQR4!f9JNN z%+|r4Nn(TK99vcrbwJXmVqIYT3V5dNvCl0<6)Jpb*n$$7>m7b2<+sgc!#L6zdrlxB z7L^F747QIZB8!atV4XR5GsCK*H`H8|_4z`~SBLC{?bD0>t zo0_gaBAc3YqyXXBPN(sqyCXy!g(}~$nkI$)!Y9_d#o$jwFemdjDXO0xHC1vmHAR0! z-~uy;X}mlNoOIaGj1|kLIRMxVmK;JN9M{owUzK3|_yg&WFvWFy#^5$i$XC+d%_iDi zfm5{yXd!=n-+ljUcVRgV!|}x+Nmcm^lgtRSE?{QqfP>u%n*Y*VDL>W`Z1Ux#x*JO2 zS9PZKsPBe~B@)F~hB}N6sJpK(H@@#I*}^oL%LcI9jXty+z-BgoY|@mK&>yuD&7ln-g$qV zbt>MYs@^$orySoj4ZjH`{~Qg!H7%iad}hO&9?)*#>CV39?!NUdme%h6=}zL?_tPE2 z^2<7=g6)aub6lgZ^(_-<7tns}6|y~+5fDTQTigrx<*;O@LU{y2lMOeE$jeB?W~q;K z(O&OdtVvGZ>}%w`_(F2UP!W#_#D87QlDyBD91*p}I&jv8BJ2`(&lRxsviMHNe)?rb zm-W$~3FQfH+r~5%Ewu%456Kttc-nv)uUnOB>(Xg!GX7~j`QdhZMC~00kNz8BBI7(* z_7Dy<+AyIq881l0TaU}RvJ3bo-Z7iA=d9N%1b>&bY2RT#xE!>iDn$6@HG37^qH>9I z!?uWwcX@GN3YMd)v1tvfLpeRdPzYPOG?1%TeQcSuV==hr)+Xpejrwt z!C*vPUX`$4sz0see8^a3GY<^pVJ0516-^GeX@n8O+_m{=fk6|xXX^b~#77|h9SR>) z4QnJ5GMBNfdH9U$3;K7PXUM%;140~-t3(_>@l&=o-HkmPXkrjkzPd?^E>piaYMH6+ z*8{JR^kh;*RRhceEVCI=5Ldzlda3e9RIE+XD51Vixp>rSN+w@sRT$!+z)d|=_Ic2- z6gpjal#ki>+#WGVOGJvGstc+&^OF#XGljgO25jS*&OZS9IS36x<*lqF{@Xe!j?Le_ z(8>NREB~Xv`(K_tDo*BB|AuM0sib?1wH0YgK<^1u|9M2Z>0Oz&50HJd=u@?tw$@r zUM@0!feV1YhIBbESe0U!>r_a?fE>oYN6^%3Iw*ab-*LCBP(e8+0dJtOFu?EbQx^0= z)3c?;h{)2-J8uupc+*%)Ner-k)-2NvyLb;ow^UpiC2;wQ?`vp!K4uU67%o8N36;5* zg_I^|_caXSy%qhIJ&bo#;+(~%)U#;#57B%~{#j*>;(WdJk!$*cgY+d&hJ<6(?kVuJ zAO~5L`UaXCIxcrgr%F$9l{!}0qD+cDQgtuJ>`t>)N}-K&VmG0OmpC8f?%75R8F(PM zign2cfU66w059*D>Ff-wxMb&ASV>Q}8}v2)jgvyKJ6CYs00r$F1#}FeS*$x3l@u>N ztMD~_zC-JG!7~4p+>A(fL0!*Na_jC9$&4@UR z4l~IViejuy-LOsf+Ju&2HVj$o`D1kQ)!1{3;rl8lySATdGV1FqtYwOGYA_}Q%H-f| zlB*baia{QBN3Y{b9p{_2aSZ+&si>kqSCy|f={^m1;|({1zLIV}NJ~zcp?SIT_ zcE&nTLvPQ-h1RN)t%icUmHP;jfnuao-rtW`eJK}uT>ROqMgrUg81?>H;4GA(-_dSz z^OFdp0@Jwpy<$n|X$vL=KH`gpT-1z=Z4g{G9|&#uGuiA;V0GliTpYDiq2Y|l@Cmf+ za6_qzBpY%NDsM13XMP=j%;~+5`z$XDMoZ>namc=;^o{?0z4BPkY5*r^P^0oOxZpOJ zuS(eck0@1&dVEDT2YF&_mN=DSsrc>H_;CcWDy!rZW%$kY)1ve>EM%7@@${ma&b(l) z+OV;!z|^a6`@L(m7#znaZcMG}<);qkhNLbhCb=Qa0woz&ByqxJ$2cQl;0-Kotz=pC zlVkE4zoH_p;!DHg7jN!sMXh5eOsiH$YT=&+VQ!ii)y&>)_Fq0MpfHpX-!5d}Mlvn% zAIYrPKn=_|l|Kje>fyGq$FlT@cQh+>gW@^?1T5dug74 z8lfG~S$_kxnI5qojW3`X5blnk{6w*{tpNmI)WxqrXTN=1y$`8MOV$_JxfJypWY=4* zp0IIW2iaT-cW*iFP#zfTVInYFQA}pQ(>FG_`u9rN9gg*Po#arOrd|+>XG|aheKX7Z zr4B(SFV3VcDbJty2d(!eXR!QgosfZco{Q8cwwWL69zO3x!6X9^n^4VG5_cfl2*vLf z@m0tB#1warjoc&=$JIFOTnJBV==fZ+9fl5ii&%06S{Zk5m76SH|MIODD7=D|J9*Pl8<^7Lb z^4gfErJ^?iehAZ#J zhtZx(yYmFMAO5nSy==cdbO;v1i_^oRq1#5!W#^e9UMsrOq6^ElC-?2<)R&a_>Kc~7 zs6GW%CUJMLaOs2p#5VnY_nL;{+HD2~FKMkYb>1Gpw`q5$cd!U$N`)sbb40A zooYy@YZpUQ(b2hjj-(JgRu6}}Cudr@#G3qhUo_FKt977{9N+FHsp$*!EFoH=!&g6| zn{sib~!c__zyI``i@&?D)qSpuuNA&2O9#r9LE|x=X5hS!?o; zl<&DhM_3yxpR{YJdU<8?5SRZfUF$h42&!*^?t?@FqzNzO4$02+3R-v zvMh{BdtVCF5w3?}E1X<%W zSTEdxUM8#mMGxAT3A-qNTMhky{NK~bf6{|E|GfU*tg(!o4d3WN|8a0ou(h@NYgrU% zWb~hZ{Nigc~YIPD4cbkpayEn)M{|2Ln=2 zSi(`zY1x%qP#xD}0AW{PPIq*Q(noNl0HG%yGZ_3wDZbKimZ~@YF`lQhe`3Q@j8v=e z-qqXgDDDuCZM`?wrIPvervp(ge?U!KRq@>WMM@lyg(NFOu#j-KiJ^RQc95+#R3$bd z289K~y|gY|lQ!*K#O*F_^F-f@bOs#wCQgppQaHf&eGWo$#B&z86D|axJ-&$x`cqyF zcAp^VHHD$yp>7e~y?@PsYSg?NVb$r&y_Eae#N~=g{%SzwapQ`X4Izk%tQxJRUxE6> z;I8PwZ!nJ1;+J^E$p#hZV=f9b;;oi*q*DJcMY0GqfQ~n<&F4Aaux}dSo9Mj~u`=S* z_o+For46F@tg*CC5~LFwtaG=aB~qmG0TkWvbE|$0ry{$6aTBIU@cQ*#&@kP!jZWbA z?l0HNMp#9^X1u6kyrj)*1Z0JsB6!H0BT(1ONM6V!|qo!%&PTMks^mWbLdPK zcJ2(bd$$r1u5^_((^M0Eb4Ew6DLWL?PKAXTZ9}h6hfp+RqKN?-YO{BS_y+m*QNVTj zTR?AM`;Mam6t7_+P5z`K7?Q9c+%Br*@x`}0y=Ay*^&Cu72?>y~6L5hK_^36s2+U^_ z?XY=7Jzhxhd$}=TKCe2rZX>K4vw6Zp_nqRpg7Tl>-%>amM2y5(H*|t7R1+tUr>R4q@b&o=hf6-rQNM0$lx%}xv0w0M4j z<@QH48B@EyfHL#Xkr!b^@0aZ*HCDQHE++;KnwvaZk9&kp^uhz1qk@Y1%`9w5`v0(c z2-xoIegC%V8uzye=0AHO*8dr6N^gdBe}>hc+7@r5hTqf$GM0aJNM4H8zx{N*3qJB{ z%G1VUVW7k-En9GgKNbh{DN8WESvfy>wdx#988$X!k-qs7g@zFRwoln!X`n!@avd5! zJE>*4;$i9Z{&f$#fkJ>hjQ>qc@f0$fM45X)vm;W5Kb}T2Djx<4=%NJjMU4F@)S-Hg z3iolw^yjX+s4q+_O(;a}`Y9jfBz(`K!U<9!e~QS(FNDJ{LU`?&7V@~|@iJ`#h$cGd zn{f(4$t+aou)u6Dsx^3i^vACCSE4zpO@M4qICeyjVUJ?@E1^g}zS#{k?@{qV`9a`t zU|5*O$)}#@Vzo;5AZ>w}esbkuKoTW72PIGD{F0J7>%>?(b*%;YRjx8+cg$tx%@!Fd z^P*(^C?g(XMT@#fcDQBJxDbUu5rvhEv`eIsOzlB3>t__8u|@XERKsR$iywEtg}S8r z_wlCMcW+JkNAxYxx<4H%6RQT;!%cTyWOCq8&uG+y&Hos!B;` z7!&5)^_DZl;T9Kj+mJ_&XVM&45!vv}m(Sm4Tr@;brYKI^+xK}Uc=tc;ui38idx1sw zq6+66+*fq}kO^Y}w0pMgaIu})991v|9xkNlI2Z5;H#&jFKlL(OE7395nbwel7HX7; zm7@l9lp3X0ZeXhO%NeW00T!~A^G@}fTo~&c74CG6IR_1fYF=8&36sep&}2Bwr~v!+ zsVZF-856kH$BYWimiBVYK+C1&2P@(&cDi*mk=2~hw*sJ>Y!e_{qQCSUqQ~HPm#mAZ zc!u~*i9<(sQ$_iHgoTrb&8fn6Z3Q3-P=s_beyn zrYeyVo$qoI1r)|W;a)bYTqBgEPWZTl#T+}`KrYXG^MXAMVAvd+Y?K+0h%XN5%a)O9 zV0sgO2)h;1*8a%T;k$*=c|DQ76Jx@hzS-nrHu90G+m>DWatYhEa}iS6WdF7JThR_?JOG1Z zXuPPanPLtQ;j8BPjO=xlIb6@$_s5f2 zu5Om!9(6J8gNSt;!GCNV7xd%Iwrro+uWg>C7#_@Yz_fl#&7X7Q#hTW z?HxwkvBz5MG`L++tfjSbBQxNw+C0!jz}U7hqvhc~6}*J;3n^=kBjfTtaN)gcX~Di@ zYHLcta^xzUmGy37sVmofM?+JrNTwfTEOlPAiNvGa0BfBwNXA*sCm*YL&ZT;?7h~DCl7o&QzwCX% z+>Q+@4oE~JHcub@f>n(1JEJ&T#X3G$DrzY*Acg z2B0Z#gKiwQ?h1gV%)kgZ`%>OjbE~pnKbCqV4>!xFt-y%9RVos>t{ye@o>ghy(oH0U-@ep4G6pG4!#wp!~y%`aIp%@JH zZVwdJ{*)ng2VVQxA}}iupLUc$zlG8DYq1c!_gAY;7*Qtklc?#^ohYSnxGI5g0e9&) zifU6|_|&hIaJ@vHvJFhBSbyw?&2gpd7sTH&CY;3NIrkQ08-I(jf3_EY*@OPsUi{0X z_YVhNi9d`O9sV{AR^XNIWklu;ik?(7qb`Dn4-Q81t-?+k1_=OAh%lU8KS0>Xepr+w z_r|1!;QvY(*u*Jn5InooQaf?J*2?Yk%hMaI3+e>fEXv8esVQCx`CxriM0KT_PNgSJ zYaE?)V?+iMvLbSu+9QKG1us288+-V~6-{tx=|MyGo=+x7DYTm~)!!QiMhB%{E8nXvZGCAP1*kL% zDkI<%J1YD1ObO}sVbrn*5cp;s%T}MfX&9n)hao?{@>zAWD_cTon(<|Ei6xp5;6B(T zzVrHu+(l59OE^muP_;J*Ib^-hUB#5h>3oiWYATnb&K)G7)B zyZt$8YvWtA)J&h_c4WHEsxwy!q(3#C#dw%UukS#Iz@qYF^S{wyYkyc+nGvn++L)T| zC8wL3-0oM}u7kx@_2?`6lR5Rb?8AeqBl@D+5l}?ih)YN+;F{afn^*P${-qV?k__J8 zCrvRd0m=3Hsf^7M0R(rJB;joW9j>~}RX+e%PoAck%SKiqlV>Q63LDJP+#z&vPJk5b zuNk74?}hpKIe+pq%V^2vb4#gxb(jKTm$)ffQH-^`aRYVilQ^Ms41mC=4N%VNcd@mH6KZ~!1T;p+SW-)phYT88EXosDAc zh6L0<>{Is@+pHW_xC4u$yuOSMjH%4tf_<)8>BM(#Tw+ucD??RPm}OC?VE={+u^F$D zayr7?Gr@T@xhk58F$yYF4<=T9Dx;Z`2SKi$w?w!y?-)9U5)Y4L4WR+2fL`dBC={x6 zT5bZtQ!fv8QMWd^FSJbOg5*e1HvcYqX-k5-hb>b)Cd*)dxy!;GtBn+D@*D*w+JF;dL)U(u)C=g)w{6 zxWhATzec9Eo0gi&-{04Rv?FJCj6RboS?HX7Hsw=t8iunX88w#ub#XSMS0|P=_9HrX zwVK(S*=goeX#aKwnD{cH55(=8+}9#Yag^Bki?W_n>CbmjKaUBC}m%jQ}69PbT&2?dfQtcFH4DN$Wn^dWu8BOZ*DKZwx+w=ytUa+HZ{peet0;( zQhUeJG!b3mmR-{yyA215DfDzqvT`frcHcSB&8lbzllCA0Lg`vJ}ev|yUe9;kuEV~1z_yL-Jx-*>_QR3ht@8E z%NT8NFy;;|#dY{V2GrZ+YlGr+M>+Vqw|S#;3d zJR_EsW;|g>ut?%uyQ8faZ^fZ`rDooOy9Y6NFOycP)OwJNK9v*C*kpr-jcLKf6BLv_ zXT44zIy099lxQ3w(zz{ap(%VIGOll%om$*O-@Pm}GiNo+rJOCzKCbYH%!^U?w1h9$ zME!)OL8h1_k^!VsRSsmfyA8N0+Jg>V?pCBag^`6&mB_{>_Mo{iLZXvezVQ^d;YI6i zP%A9wq>jl>s&qQg4Qn;(>9KzAs3Db&seSh3H!C|(6{?HA@0BNv>S_*#AQiAl&|*iO z@$@=NMVRWCY zU+@%q_=ZPX+4r!AJxn$PuR$C$TVn@H$rZRM=LsE|^X`BcUDpFzQB_jajX(TY*n@o3P2~bKk@zV(E+Arg&LAHZKz5N8f}BW)02ob`qu`MfPBF|E944;ZiLA)2VaU5%->PwGRyUv`feo3dPuj@C|$PUx+B zzgy^m0C`f%km#L2Dl(3C;+-7|e8;PvAU7Rk^TH_-;5SGrpTEUnUIs?l!B+=^)8C1r zLuW5p@*3+;;8+NIGw$dbNV$DejE7Up=8{m(r_7t&!ua7ooDi7i|4E(QY6oX8G9qyy zZYk)Q_l``>t1>rN;06z%;q6Js&q$|gxgYpLu_l91irQ1wvn^NTssbHt@Ech)FN%hG zme|MN!nKcT`-!W<7PCe5zbfr} zyH(<38!Z9HXX>DgWof|o8{z!#ixq(Frp+TkyaVSbLAbe_F2m5DZBU1(T5fNG)=3dg&>;<~?>&V$u--kV`Cl>m&g1em0 zmZ*0{;H0hB`2CHh>SaFN(=b1zN1VbpymOExE#UI1qTFPM8YzZGj3PL+2sdw8;|_9} zeJd0*acUkOAJypQChYE<;4&r4yH%7j>aW?37l;z#BtLWCB>F)7PRT@1R4Nj0AnpI# z+~J=UAJ?BC{omeG|0*?%j16tA?Hu0xPX8mv6R5as^X3lEyWC=r56I3|%ZdvVl#FI1 za+4Cw$&UA%R;8M}i>QTNwwaG%2)@L7hQZ(nf#&-fB{XUij*I}qe_v)Yo|gWJd)>$L z<^2YM4O3jJeTImEI8AreN~rj@NM3RH&4^6NwH&T}Kh7QpDS7=U0n6z85i~%K_Dre! zL76PJ6e4CJ_EzcrZTMz43gL$fI}s^Qs-Bq=!7k@A=w`%65*mkYVXP}YDLY;_su^CA zrirTAPdk-gsJ5%xJ6|YJx5GtJ0FItKssjMp`bU23_{doU2ErSWOTW9gpPSodQ@Z-< z&no#%1fZvOVH9Io!hHp5G+<12orX-S=2K}~wSKQV8b$N?jzoaGyLrM-s^uYPBTiSH zvPX50(sx{X#evn3_;_plV$OK?CfXV``^f&tpzB ztp|OgSidO@lgT%!vW>=iOiAerR*XE2^zfN_B${>H5pBpiGJ=~4J2aMnYo0nz6+`lD z;bx7;89Vl)E>H4_r>`uOw{7}7A1{vs!tE+9Q0DHOvTm51@H~U|McNrfU&8avX+F}Z z!W}suS%RuonqFY%zQ2_Z$y?l*29%s%3(-UODYyrjlgUPrGovUl|vJiC3lvcxVxr}3l`x+-lnH;Ar!CgeBv%f zon%F-vt@5j#=q>3I?`Wl@4q>^WBv>#{-JEY5$ZgF1u(?(EbMKJUqa5I^=qEoN^MjRKMUZ0aQUn^d}#eW8Z%`2MpgH;oM@KieYx%eX&yoO@Qj#J2DK5B$`jRS>mtyu4$+5XYUt_;H zZn2xBD=4M?5MSe4MWv9Gv66^Ks5{dfrRICY-W{K2Z2EY4+8EJB^O)`uX=*=}%%JQn z8gzGK%M{D4_8;0fJVfRRq5VIRA(CeOl9@-<(Xy;Pq?HTjQ4?yCM zp`ZEWZ{#?)$UvnUWv^8WNSmCPc1)X>2xO)Ku?5kbMe`f8LR@!R&hN_z1IKAnE#FSf03D#ep8r$23E#@CCz`Znj-$4F;iRpiLI#u z6Qoz?1LA^Gq+nq4g-YOJ;5I3Q*Eq%-?2jvQSDjHjF<@ZH2=3qU!7;7f=POXZX*Igt zj-HM_KfY{Je#LjfNdcx5OI&Y@FqU;}1X1#!AITI=+8N(dp)7N4!J3+#h)L2JUIk>Y ztl}~&-_9_7`e>GIkO2sswx?MqxwaI;t%su4k~3eUuQq;)6#oYKN|3* zZM{-Riq)*xcu@%ArTCW@`!_;r(|v7(*S~q?(0;*^^7v{AbD`&It|0a-yr;?*DF?$rc}nxc!+UA_ZRdP?i$1R}PlWHH18Bk9 z$;nWcA$(AhSSv1?lN7nvV@b5aw2j{a&X%PkzU zLC)Km7lQdsDvqx>zIJYAc&iQ$ z-9E#CcuJ8{eF_zhyKH%ZT5C-U!fEG^dP~g;+QDk0K*jPB#oT<_2DI>X+!mz{1sbsw z^4SU0SQt7Vwrpm&2vx`K@4)Sg< z<){S4(E5NlOopn51g%Hbes0!LHeiUIwVwnnw01CnAbUMo(MCly$y}+Hj=7lf?Nca$ z!&Od2GTK-Sg_%4{5+-92($(?>CTEX0c(2|9qY%UEhcZBTYF+e{E7!T4%cr`a7*sKL z9InWR9Bt?tsq{*Z#)id#Ff!^K*p*w^k|U@x1z(q4SMOha7H{gl-V0xlNc>Ya1l0{Y z%!gMRWqyNvMw^mo>*q>x4lW%m3j4B2B@9o~Cv10VHVXvu>dKjv&&-Yk{uoP^9E9^J zY&CC`3M>N^d#Fsci>Elh)P!I^%9v_Ck~dR@JfcYSy}YvJfS(c*Q{NJi4? z-npF^%IplA7L8tdmaw}x>9EKUFs!k)q?760c7TWBc1T2N%Jad8;o-`8+emPE zNNUGwW@M}%>Ql;QgzUuPBw!+96n0=}WZTHldXAsX&E$5p*z=Gz}S}rP?ITJA@6VD4RS=&|>97qoH(jejHuY3i2gqC;MsbJuAmH z_m_@oU=ePLxkK?`ML*=%D7tA!MA5Tv2~ClgIc6WvC&;*Xwb=&x^gAW{gzxJz{#rQ< zo!pme6(Z}pk;Pu?qp~SaQc@AsE1fYxbH)B{HKHF^lH$ImnCqz9)Rk)m{RQk?x$~yz zi(r4t+B<{=1^}_Ds4oP^E4;zzrq(^eV*#NT*m@;}6A~d}2g}qe9_v?IzjFeRx3BL+ zqeQ3V`;(#WuXnm$vlkbKHn3mpyKu(^ zp32k`LVCVc$6{!NT|vxii}Ntp5vznArl#X7pY~U{pI!zMZQot08%wAfA1t=*G^G*O z9}bzAIoc{oxlT2H{9%Aizf?h*_O%Nl*(#m5gFY3K;HG(B-6q|mgS7wn@;!vda8Y7ZB*+;+oGmCEE4MO&_eDz2 z1Uoz$u$^pWJg`(N^8@u`Ev{Rn83y(q$#lU-oy29Zi??xvKAZNMe_3GU@{TTeEe%heXb8F!Uq9x-Va9wcBs$D z7D;SrHx~643$^R2KNQ{?lvtweYOZmrzg_Cf4W2iT{Xv+oe!{|>KCYh2aQ6Jwp z3+7OJO}s3Q&*3K93KP)>?9r$DqMiSaPVrDHOOCDf_Pm%p5PUEX|~JR9w|)_r~t> zlOM9HU|4ce`%qy`n$WUs#iwVfK=EcTHe`={1UbmCkyA~yO2^+be^=R}w47VNUwbM4 z_7e5aj*|Az(&vBBaII{CMoPw}*2XqYj(-l$l(gQ|uHVSGfzjAD@nKoPiQpfpP|h`# z&qC3Rs=l(c<)1b{_RxQW_kV>ae1Ph>C*o4VC;afX2NAj6fMaD0VoD}UJ%74AS-eVr z`9ls=$!$n~$7l!ZKK9uD{c6JQP}YVUqVTd)w__k4pRt*oDLW48RgDCj4%nT}!19jw zhE#QRTUQs1snHMWQ?3Nc5lr7a#mML!^pH5pbc4vmyCxX_**Fk8TKS~Y0VB3%+Kbou zM`72cHG9MlBk>6&v13+DPD>RVl{xJ^cR$kG3{2;Lm>n3IIs4<<8#Ec`;7LO0aLVrp zC&PQG#o@S5C*wBl2iK^CnaEz}$j{xfII%MSVC>2&FQd0w2%}!ASWPM@#k(2vb0+jc$I5QRTCR`*u{zJK*4_jw1JdoXy%cw`BRj@q82)k^l=aLkq#bYJq zTM&;Meu(qqlwDZLx5S`h2RW%IZoh1Xb`>P-0oV)nGwhwVR)8$DQs`Q-N^$v2^+oLA z_ZQQ6q#(TH(LMi3SGY|@GC1N-u{^g6eZph_BPLmWYMj6oHIPLg$bW*Ll>GbI zKnwz#5fWh!;bPFICH?jt0EAI#r>Omk3q<}BIA*c82F%%nha9q9iTu3!eo4@PSK(vU zCKPoxnk>lJ{r&7vmSb|3I!k*9(F?hGUWKa8FF(hDIr~NnnKV6NwAHRj5hz~qgTjfx zi6-stIk@kUS>${~Q_nGQDU;dyX1G5II1X0TQiEBURThZD4gD@HA$?dtnIW5F7a^Gk zC0&mokIZoa7Wqk;l>vxh7K`t>*s@+KDkJx5JfHX=3U(n2pyV&YFGDOMNLZ92CRF27 z22e9*2D8d^XZHSB7`WoU2T1yCS$dn%%~ zwqr8vAK%bdlCA%&x70;+vef?O|Fy&s?ni+B;x`p7KM&sO0+`r)Dwoo34}dQ{wN6f6 zg=K~J7!-!2g^NEa>wC2jtmFqOgI99TGO+>w7U4BH!^~9u`OjTXT<; z_N_ICZVF0vKXOsbJ~6AOgf)4=AX}qDP$~PDfZkrlxB$=~HE%NEC#Q4D?1v)^JtYJ6 zNp#FUfuGm4e%{^A+$fND;>ZNs&v*9Tc#s*Dc#H%v{UbGGsohpS4IdVijK>Y-We-G3 zU3!OdaOiFX*Mag-%~gedlW>QS)%3~TQiRvY9yEhtycl($d-?~9O5pofKANw*7&kB; zG#2>T=*guQCdg;G>{W$1?A3)i4MYnnv^}dz0VY4v<8cvK>F%IPpaT;bR&u#kWsBeB z;`(z36>CF2dqRWH=guX-A)5Gv*q468gOF!Bwk1-3qN61-`*{iWW9zBeI%An7>WUX) z!oGu(=A;q@uG^L1d|xF+5cOmutra=xv)fTD76x4p;P%OgfvAb&pi5<^{o@#5n#jLe z1LpdrIt%|DF?ln}&XV>i0-A}C+=~%*<*>(T;AvqVa zYVx6c6SD0)t{k!3^9`=9Lk-Es4|u=JUjhSW#u#>y@j3#?_i)AtJC?l2Je{>vl?}E# z%Wmps_Ma5`Ps|FM^^#ADRomHQb*sqgmSnxNFs;RlxTHDXS(hWzS#&e&+2-gla2_1m zw!lcMtkZg8#r~QV4n3-Zm0<2wHWlr-klN^KCvDSy}0!9ch*xlu3(sl;f3T0fF;G z)yX3w;spFiyxAPFH+FxvPS7w_aDoHLIR7%u*Iw;M7Nw#d|7?Ok9zDJp)0 z06FY2^Krz?&~)86AJcw0oalP*b8<))n#_t;4(tp~FRZ8;=Ugjk!VCg+=di#wyo3%` z?Gsq+U{biQ%c*nIntk9_Y0`kf7HS|uhK~J=Wu!X8H&rv5;t?$br%+<;zS!ZZ{Iw6nR*Lw9L{uzcTj zwdj+BGm!*rn)I@dt+TQHK>I1blCg?3EG`_@O!vmo3j+ z9cp)YGrp8USchEHCv+ zOg!cfQvdb5EoAOq_?3WayW*1^XYQ%jlaSFR=QOo!6WWQka`%@?tj8~+Mzm>RqlE zWaNkIy`ILxsNqY`V!a$_9#!KDinRgo-b6US_wCT^!!9lpqC6f*&OLN80WA*@t6bvz zNSX-edMABDkeTcgGJ+daw<4lu7Xw0S0zr9AR8nw3u1WNd>(U`443w$bjbXR%H=ZtI zrp7{I)z3rDkP%W`!_*7GmL7y%gS&hJl8RmsfWfk_psE2@g$`mm8UMzEH20t?_NA-% z0J`k&=v>fSz>X(poXKFHuNxqq8~&tai0nNiZ`$L z5MG1njeXwvfZuOJB6?PXL1s;ULwJC!0d<1U94(3KKZL=hi0Fa84Lqq4+zB;;Pj~?@ zFa z3&ax{XZgXKSH-!0)?2WWFbQNy$AYR#UfxZ@T%er1l!4t2DY!lOuH>GvTb#=o?KXd6kTymjw759Lq1y3?}hcmnnw=ZuS88dE! z(dDHQ9tJ~5kius%s^x=dk1>kf50l$qrU#{Nz6@S0y^}gg7L0j&Dq&p^*^_)oxI#f4 zvGS$xfD^vne%X$OA?f8)iTqXKt1tBN+F?c+jhg)U4~MLeAFfH#1>b8(EzDZXEa8AG z|4l2|Gx&p6Vhde~4T4ju-+zW;fYPo=JQ90yNMAu?x{EFJ6L13^#5a)iruv!*?~(AE z!KQzICu~bzF)oN$O!wYCI=gdEx*FA*El{_<5u0MaegkCFvPpZ`%wOvGBhCe_rDEN| zw%H52AXr3h5K}n6Dr}{9&v8pJqvVCKvBsYiyE-6}%dToY)G-b2HI>LbP5NipO^7STyA<#?12lPTg5k7&MwlUCr~Tv9R}?3^umF67*pVa@$HxyuN1 zn@ZtM$EIkM_eJpr>slll20uXd5tZt?CoJdpQuIAXGwEJdaa*C70+wU4V;LG z>H_b7;8b8*;ENSM-wjn=eQIdjTQo6ay&F{|Gi+wa`#hdw@RY`k=RIiGtd!$3N{66f zt1J4wgncQ7inla{L<~W0B5-LXqv#@XS49qh7MI`0ydiERTVkcF*Kw#cM z4I^IYqj#Bb8S5GI$#Q=NFz+YB!v|%-%7tPn>TA!gUWI6(W|3+4=dG6?4SuW%#r-&_ zu|Oyt>}rOjR}}{rb#vL6%G8cNu7(1ApZ4TY?h_BZb7TZ>Zpgm zNLMr7{d`xCEqf}PcMtZvvwdh|QGj|I=zsfrXZsH`jK4zd&pl=_V<$ti|9&R@Yt^6lo7Nlh13klGDH*T$v3m5+$6(E z8VzVBvetXXdj;2vZ>W}5(|Vk)m@9nEsW?j(fs=$x(XbrfNwf8Na=(2&&cx~j$L zPj2|dMQ-#6q<@fxVN*eMQYsgTmzK>N!{VgmN{ORMI)wq6tgDx6nu)A(wkd;Ip8Dvh zkqXpz#S!E9;{9#FG2zgS95`TUwaLiwk>xxYII!EmZJk^&*i3d1AWl|SYUWQS3=-0m z+C%~QDraQ|19C&d6L}I%5l}jN(+yG*2XNLlc55y^_SQ?3$en^yPk9xSmzM3bve-t5 zTZUg5`();|Dz;GXwJEADNb^<0VWfAUm}1l8k5F#XK~Y+2rS{Cv`)`=hjAyF}BCQ!U zDa@I86qF{4JSWtXrio5y=kTLGV$YV<*BY4@(8BeY97LE+&AwmXv-eL8bhGx+?$>hD z>V|6TwYz8vmN=l~hU>}yDT)|9X38Ov#rKYe9w^W)s%>&mlrgGN03+=dB`7s9L01?H zED4(RWkGc(M8Q!*&W9`dK}7t0lA?jfh7+)#l5D5f&saf&E!37Q?0y(8O}%PA6t@?z z01aQ(LScw*Xk?4gt{>(cLkU895g}ZH-yj^hhvC2;=QFEQ1Xl@qoZQeo-Q@8lehC?%I0tYD>;twS_zEi|oZX;iC9^l>tk)*ZsQ;&;`(N;w6CavWd| zo%V6)Gqa``oeJrgT>_Q-$@Kj8dVaqAvQ-S(CW+$&+a==#IEXPMt=w4E>`2WRu?{^@ zJ5kRf)dD`g2{dRSV1q*5H(G$<^LO|^ZRh>6%S;>Qsu*)TD>Hr1%!&`^jVTNlI!V0= z9C3UBeR*}ZFsCyr$t|Td-vf+HjR|QsiH4;(**fCDCg|xqe?cl~=!TA{dy)(9oVImC zwC*n~wN!A@B7#tfNIEh1=Z++*I9pPIFDQ6TmnZJd1*7|B2v{}~%;bkMr*eJr51vO( zub)okfpZc5>0@2<48fHFwQl;19+T(u%CY2sGqn+Q5UFDKYTBkH$k`s&o!wl|KUK~3 z49%6P@6_anL_q5Dt|9EWwF&1Dir2FI|FHI!L2;nlpKu^JjeBr+cMI}68D^)qDv#Mj{So`XeCcZlTL$wEUksLW9C2nTuUMb6I& ziCf(WimpdxQqNd_6toLI%px1m7Qz3DoA1^+F+yeeM+whP<7xgkzBfw6kA+fuZdVxr6iA*KY*66u)4mSg8wUT2C-H( zp3bs&R4!?HIg2ai8vAqoB-;0Vc{Qp@_r10dFR1%)quiba+|_y#uAAL=sq+I(dE(3k ze^)bR`EV`}EYsslg5OG$n#75nM;lvZ^atEY+vWnEAvYnr^{I8s{;2nUPhgrbqC74*$6{d|j z4(cV}53ksgxkv{%;=k}21L6DQD_02mjGwxgh0Usw0Q}IfhS*eX1treM&}x{84>v># zAtDX|(%&q_ybhwR_pj{~b+~l(r7X)Ce(bmG541AkLa;8V$1e*RGtt}wcb7F(w>9rA zi=*uHKde!w0OJMfV}AZJI~rwKnn{5`E+F~JNpY6EAC2(7Y0~8j|J@g31DV7Kk)pWO1*c! zWv9$fSMHB3)OCwIH`Co(l{;Y|0rM}M3RC-KRw>O2GA)9zC3$`P3do+7XTfNmf^9y! zKjtpf$D}=tma>ChB{xy@LZMn7D>QoDTM>=LoGPi+{ zno2fUB%*FiHn~acg+njz|L2KFORQG3AGFFd`X4=iD1NW<{&()_%q)LVOaJmr7lD@< zy)M(#zea|?4@AGTo2CMXE&8NP77MaW*Tzi3sYMEUp%9QnXZj2N+_pl4{x$t{eTnyA z-DiK5S0KRe4bBiw0Xq+JZwaW$MBg(NtFcGU;H9;Q7S%&PF~w)1S;VFn1dJoc{@b~b{Jx>Zx!NaHK=m?B+QagT zylzi7!W|Ou0G%7QvV}#holZC`{G4oFFTT~Q6 z89-|^ep=8whTzc?4~)*5apVo>{a9P0n0j~bWu^I@+009-S;sowsQa?v>b|U(*T(vb zijp_{K5-_6`xbJ`TwWBkr(_Dkl^Y@VJwh32QMH(YCyK`Z>K%@|P{s&62(!ZX%5zq~ZN z5GLZ<*Hl8wr=Ste;*__f3A}t-e*Xy1XLOI~xA^v74%csj(3lx`U;{pZzx;EgkO+t+ z%Bjx8X=q-c{&-KOvECK5fzn~&zc$!t^mScLPT@hZat;n$*Z^P{%j$5yRG z&`v0Bd{3C0;Wsb&R(Zy%+BDuRQZY=f@sp%!=8&^(5_^%LFO$^Yv46H#3Z!ckVAIsX z@tiYt#w>6SZ(u6`P>fa*K_(IHXBwhJa~rF!ZvBCCg~X?(OIXC8EaTTaa(P&xN;5r3 z9&S&$=7H`Kx{ONXwhbg1L2q*JU8|0|0Ypa4ZHqCVe|=%U@M-wP z;k`uoWz23c%!z?STn@PWL-k48N<0l~d89@%`@Uk-Yu_C%lUq4Jpt}$ozC^q^LQHvv z_pb6v*W$b8_6CN%B^2Q6uNum3RFTJ_^qG?f<|L;~;`Mp$-4*?aYQVS(L&uE&(})bL z<7Xg_vI{W%wz0jOZIfo6rXG#-*;;u&TN~TIXDOZ~`@y>SKn5LWK)MdpVn!~09-TpJ zC)s>KZ{~xXSbGcqTBo?5nG)#Vml%&@*$`o2*&xlMAv3d39TZ#}Cf`@P{*3TPsNje3 zRE~jgr?sGP`I{>B&rsp`KUPlee;(BTBI!r|RY_H(4Jrj`^GX^ZY3ngG322O!@~L2p z9wig|qi5YMp@B>0gyOGi9~#Kg9^Zp_B)lsW?Xt)AEUc+M`x79_^waCX5aT-;&+eEq z?!Xn~bWpt1l+7;hr!@t)ZL&9S1HRk=DLn1PGl zRKvnh)95qJ-Avkv+U+oi$Gv_g^30|-KWk@m82=l{3MpM|>DmAMe*B>1tdw3WGqE19>$X|xsCY1si^_{xxh@+>` zol}fcQZln2mgRHOyp9e|NG_T}XbH$2Kf@jJcEHoGvO&+JaGx1x*M^|Hm(1(0*8PT9 zV!CGs7*SfR0ymS#x27 zBPaR;pO7;ve}MJYXlynhm5b&UF@6`W0fi)m%^`5zeDD^0Tqzu3TC$*TjcN=_hN5eG zHQ6zfL+xZpgZh0w_XFCIM=18B?(w$u6K<`Z>@1B+PVU#GNeH86OVp4{x%MKm0slC< zadVI6Rk0_!1k+7*!snQC+&pfzte;V~Pss2BV&ya-7zBEP=^v_2ZRk`kT z8a(2duwT?8?PK;W&;tZ<~CV$iTTgIFaff@O}r@VNk+vL8-Gos`Blsd71^1$56lpk zm}7k#=rNwV(_jfpAdWOpD`h$Dy`K>F-D3TjDbSyl@>D&lMbX}7lIxFT)3IhtAP{lT3dcF8qLwO^qQ zI*Z=CS;kIZK4^(^Y?K5Li==0Knw_%nldx($Wd(Bvf$1!TK@71GG89v8mRvPT_oq zv#3d^lALbwOz*qk#CxWphb#gTcBGUoc+jVCR;(uOPe-^n9y0lV-Yhg-yrWPuluTX| z)G}UjhY(o^uSh6Yf2>qQ%NoF+nBuVDZmfT`0MMtN@`%)K+JoG~DHFys-E#R>5--AK z0H+vzmkb-rdqo$mQ5GBptN;eR77R%fiVzO`3Q4P}81pGJMf#u8mt_{REVl{nj|80f z47BD-&v_(sbdYvSQH-iB6yKOEbI_A(bdMm_a>v-4%ymYU0AdE)4QX|gio+REJ{PTR z!gIkibNr@-)xH|z<}xTI;cj@>$kcUoAC()U;^+s`b6Sb#K5RG!N6{X_^gJ9+IdSXi zk)EX)7CUC{))jV5WD1I2W4PV!fK)D;N0?mUbkGXW?U^IA$s!rmgAcC4OW+%t30>GZ z1~8HEHd4&7M@&y_cP1906I`g*4Nx)J<;IV(`xSZZcMwJHvkRq+g2}pIp!=-j=OXE; zvqI>Au2H8v=f-RZ?@a*EzcvP2W^wdG157Xj?#r0j>*Ss8ar|)VYxQDRkpR+tOKq^2wGJE;G?lhOUS>+6Tw+ z+=<*K)(qB?T*IGh6lr2Q^)U8(fd{nqPosemrs=X!B8E|x7XNZ6Fk9@voV zog5fRVV_G1aC)X3vtgzyq&`f}sYEKt!Y@VXzF~K?T$%LVLPkfYIg*j${_sO3_xZq_ z-RIJtqn760Fud{WPY-1%!>Y7D)QC?1Yi;~byoc!T-1?0p{1-L<-)KS=5PneA(%B5u z6aUtTadq}mb@&%;|2M%#)#i`910kiRP$6kZN-XE$?8HbO8yjdCD^#*8#vYy08Q4(W zo#L6ct7eq0*msaFCAin+eu6=ory(twKCHGKth#sv{Cc{A@5UowDX^xQsnopk?O}S; zEjQnV(?hG(agJ5Hnn#-ANU?xddMiByl7WP>+z~H$|@Kw_?HjKvk zj_YgpnO7&%7ZJ(5;N(;xd)&EnSDUOZcfGV+LYKoYHnWMzFrQEzRjRAOi5HOH;(Ld%u^%!8Si+9FVpvh?O*g7{UgYWuYtNj{~RN&;}f>wQLGmv&QNHR?@Lnms56TbK2(b zOcFw1V&#F#_DH!Fk(~7b%v}b9fxh7!gUm*f8TCdxVbsG`EXO${svQVMTL3LI&kM0r zl>u?611KlV104Cr&#q)54k}#m$C)tt;chCLff1I=&)fpYoBGtiB&*XxX0;U1)Y+?{%h~V!*0YiOfIW@^ z$LYvQk=0ATT6Cuz!7xZNu7Pwuc(SMY=0|5tEgqk1*YmF~${aTbcmaC6(*MT6eAg?TWs@g7k>8q56Bw>1IVe*IbM>SP2`ch3+L~pz6jPL zRKGh}1PQkG2$yU<_iX0urUaW zz{A4xL*CKoj*DMW1o%#As=5gJL4E<+OJEB7t~@vioB|&Kncb;|Co?t?j0qzPM!_P< zWo<_Nq4vi|x(Tv$cTch=R9%<5%g1u{-6OS1iVaVgA1=A#ul_T+wf zoGjdVB@UYPu+n?CV@5EJ4^MqHAx0sEYA>x?awvl1@k1MYGqz=B@3>u4jCtJd&`5{RQXL);;J-EtI&TdI?dhJEVhyZlA0PLk5DiooB@ zjy-$J)en&uM!x)PS%pfk5*bAF^aakW`w8;yNU+LNEzVLc-&LPjTy}nZEGI5XHmzEd zh}CuzFThVNdyQVmEeb{}q=)llO?qi9ayo@ieXO5-EhrdV^d!#1?&1=!N-#WjAUYvBaK8>%hmZ%Ih>KxWUp*84lEeZ!&(SZV*K zpNXXhaIl%qL4BU^Jn7Oq%w@ZSojNBA`M_fc>CMk5uXK0;nK0QQmBJ%kN5-dSEY{aCxi zH}0Blrnid7PtU8aB-Sx##j}<@{>C(`q;Fo-@K`$TfE!1t;UC~%a@yn z*5{doTFgj-IP*rToA*q&)loCT6rIJ9vJ{6iXm4*k5AxSD?9U6#)}j8-g)=0Z^NqKL zR6nx?M#~bsQPVOOQYBy%BgwJvrA+mTi3r&STBIa=MER_z#Po86huf!dP()+wF@^Bj z+2PYOXMWgqw2v-QGmAnWh<;*|!8KNZbKe3_Qq@8t;fBpe#bPY9pi@*cSMF2*5v;E?4BjNc4J zrMlDhQAypybtBzoAGZ(*xP*vN-`8tcs<9dOkI!D8b82P4%^LR;B=s7q@lj8|`}yK1 zrbF*O{efTuNsjiehIVqCx%fmEkkOJmbA~XnDj_W*94HViFJt*KnZH1W4rS^O_3ZKU ziU~OKE0mC699`kr;W<_@c*Dbg$&s85UefO{@S)@4^P^1c2H6xAOw5FX{Nj5UQl#uJ zz7#~g2dLL1{Sr7&Z?qccc0dR44>;Q}zr1=0L%zPg;GE?%7dZEzl2C;K2@yipqtPxT z&iqxUZ~Gou|Cn*y3XBw%K~F^Ie_gx&Ic!t^ekT4+m>2ePH4}C-H#c+sTcd~UFN&VB zoWhDAvfq2+!9j#z2XOjAE=cDRFCKqNhe$06D2vFK!2SRlXzfx-)otj@ zeB>Pj$1uV0iaX2oPOlsH_6MXUk9R+6gsan(Fnz9xV90Z8T~fbijy;&I#z>JY*HX_i zexVc=V)`)TBXIH)F;`mv?Jd=jS1;j?G z2&!~rCS(_iOivIOup;BH}LI^b%nHT1k%i@yIN z@BTJ+fy*M>RoxsI75zsLq*r7cSAzDAPoRF`f4_J9Tgc{LD{8ergy;WmyDiLIMO4(~ zj6k%Vzd!sJpXWDb$_jMgMCY$U2PP@y&}q|r)vl(Oqz;Fvppuz3)~XjYeqqBWFf-(t z9W$OXD2%0#JCr~JvD`bY(2|U2o|rBsGJj6+9xz-?ZV(E96;$^i)1_1vUYp4s0PJ+4 zidJ?S%ML;0Tk&~z5tn`JG~+8fs9$+4B1-riO>ANt`$-k4gX;EQ0xOtg38~tkrDrgZ zrI4sTr~zgd-6Ac&@O}+BQ!7N3)8k5%7W;)KCuNy94NmBDO|vipVVpbCpJ?-YuObG`_<=7#3-SK;`M(_^(XP`i)BILT18%1lKb#Y_zmtyGpd zujn_(2uqh_l|}d*A`u0FkqD8@*KUe^wpqHQ~YF(yY*v69;Z|AB?^?n zWz4s&A}JQ(Xln!i=MTJnyEs|#2M|Y9^|4z~pRkBJ>DEzPi=fsN%0Jo5XBbv~On8Q3 zw8=fWL_s2AdB(LnNTH;F??TNVf}tFYV$R?aQwQPEz&u6V8J{e1ldhKIF|Xo!_?}4C zLo*Hxu29j0vLL<|Jc2AHQsVXscM1Qutqa>4nb_EZN1qP&Q5w8l17lG{9S;sBh9$iC4hgT@K$$zf z8FOrL9m?J*gLsm>UNpk0{LDB*%*Z0<|wH29Zg*cT0?2xndNCS-= z<%U@l)a?Y$2y;}PawH;6IUiF8v6d(xA=5+?RYj}A$fx>;KTP}5?Al30@WGwg$0Q2u z0j%-Am;}s*GPs694-S7Poqk()FqM2?!MxERY10)=gw#p-0<)Am=1@Y3{T6=C8ewlH z&#W}0hlAUK%u7BAh0=UL8?W%;{AchS5ZU8nzuF{IXJRR-3qHsoRoRM+;8`)6312ue zw52}Nu!uVqJa+#>LF_xm%%_l{V$bKUg2~H_b%|XM%HTAB)VwQx)yxL7%W6N~U4`k` zPbQb!=y(U>!$E;RQXnqk2U8>H0n+-f6!_=1Rql5u@_S$fDX!T3skrj*%|Gbx9~eTA zg4*w$5cB`?Cb2O62kJ%${jEqfN$;zmSUdbSxSdcENk4Q_3ErS!crqmC%d{z2zrgc{fu1e_cJ%equ4l$pERI8_+@K8e)4_%q1Zs zpwQFc6t9R&Z&A{uMfedo11r4*0H!f?Os?qR?7-7J?ZUZrO;Io;8=k;vdW`<`=QSM%i`reb=Vd7Bx zwI@E217G;p#jWMZlXM8<$~EP0dyZi55uY7nx2^;ZQXP;io9L$I+}Smg`J_mfUk_pP z(O2X|XReenEB71``4I!#gmfuzg~qCNpA^0+jgHAbQAt7f?b3#STHwk=I9sGk%T?ym zyFhC!h<2%NS)odls-|!MdB^poIh(Fi!Dr(L;0P z8YlP5!3ic_Wm~fY>|j3OIC0Le4q2MTlC;Nd9X@2(gdpb&j{~m#z-G+8W(I*R@7HSV-UwHEqKneyiVwN z5%TSbU=qK=54qw)T=S`j7`nAdO-qh8TjFr<3Xlp`Un?ie*=~wGT{;+VbXSRq){Ja+8C;;*FMIEQezu5g;uK z$xqa$Mct^eq0MH*BFBIqJ`Qo_$we_I!))U?(Il=mPS%d89l{T z{B**mpZGKs0?n`d65{G83J8&`*Jz?M`z<&G@4MVPHDUPgtNLAf!On>zy?9Jv&( ziLmzLtR?@(Dlca2PD)r-TmpU8*6bI?qbWMd1Z6G~W;P(6%As{z#zb(7y)x-QM_)6O z^r9^G6KawojCdSwkChZiTMos4^K8WfqK|M7LUCdl03+@QVm%? zNaVNS>bM=f`r-xof~?g}miZd-PTK9PGo#KvRGN8|I8hR^=e~1+G0_-|jJ3hd9M#=> z3D*{W7xIQZNd)%V6S&$$sc14qC)@Q({PHO_8~MdOdDt)gBU}Um*byud9|T3#I}#D1 zX9yyscg~b7Y{swOCdX&Eh4-v@b`Zy2{zQwh_`qbfgOcF(zmIJHNCKkYN${`P<4+Au z6%c;h)XnzajL$-iCj;CejDWiGL`~vuaCiW=@knn{KQRQhZFe)R6D))#jBNMdIY~wT zhZ^{q?;PXPx1H(hb~7AK7xivBLkNxW^-k%xvKKoSN5u~6y@S<;K>;!5?)SVKUK&RLi%VL@L@S}Wd)uCE7ZrUu z9O?u=c-awr2pR<6lNr?*p+=%7LwqdyIHUD(E#3pXH=5Gjz}EsBWxnE49B{kAIu3Qo z-H;5m`CCCF#EGV&c~$A}yL+iONDyVD))aeK8lo8|Lp@QWZ((oYm0^=x5G)R{B4CHe zO*%uNs$!!hM{H*0(T7}XwRGgmFUxY&Sf+Mbx=u~TmVEl8gz3W8;0SbIhr`*jbo66b zRJOqp%*M@+L;0-(X>b!hrG|@E6tXHicY5)DX~`Gn9g%`DP%h`{s=4 zCM_QJ7x-oJq!ME=ovCp}uW^{geOf+}O3g#n@L7i6yH0U0&k8A!9G0ah z*)k=WMEPfirf}-p0jLdsJL>qwO(>$uj3yjJvI&&{r%)z!z*!&RqehJ)B{0Anb z7y|1z3!m_a2tMo79=tUao;x*KWr0XsI~Q=d6Y>G@N!02HxmyJZs8vlYk`-dfD?Hz@ zP^8|_E7yf`4P%yKswY2h-v@*yFwwZ^-l1=j*G}MWz$68umI;j;1!FA&cnOlNML|pB zb^-BCSd$uDm6+GrOB{lakl6!$`yH(t;MtUlHCygNxCvLo!#@7&CIx1uyD0mo+#Cj< z7`3;&9#JWbSwbK1L6h)T86U9?@a@4Kh59%Mr+uE@ z?!0uC1FeV~HNB&kdJz#qOLg53R!fI;Ktd2ePzhINX%oXUcKOi+oc>}!+bdazuK)rI5#yQbfaga8am8Ed{omdL{=JC=bHv{u!j(8)Wfiz2r%rNLkx~+korsAFnAt5J;U3q%=V=1}2g0qzLpnxO2@e+CB_#~9 zL^)lzyUqR=Shrpa%FpIe0t%<$=&f_fDOQyqN3bMtdKl(d$jeDfla{ks^?d~EWapMz zz=U;sW)O|za5O;3d&Uzug5IPQ%%HnqR`%c#t=`cmH;G=$0FhoQVP%Ux8aWJ2e zfq3a}uHUIe^&;|jj}ll`;uC2aa$%Ejx;@248L~7hH{Z7y86$r8JEAFke*Et1*Ck04 z7GD~J#Ir=OCNPe1Chwz{fCdh3VaGsGnKnC;>vC?hb2A|8HbsD+TFZ3G{IhIw0`H^# zvVaQ9LLHyB{-w`lX?Y=5RApom2SRyh+fy7^QjWpkB2-nOTjg2!sz-Gge2q%~1Z-sR z0#H{@Wc*G~>KC)e^rI^gC(ls?=7Ou4=*KB5T*JZF5hco0r&@&wyWLXWy^4z#616nl zURfE~qA$o!5K__eo!V@Jb;aWp$u89xXpMd3ELj*!7*f>id@I8y{^~25@W~ZC$j0#q z5n{HODRatArQ_U-PAx%L%EW=V+Q%RHb_D{IF(7@|T(e9p_D_TAu6NCr_9CNrMOhp^ zlCv$i(wp+yd}ZpS#o*HdptzqZdsGb!xDJI`HH23e$Ia9fho%*`GOtracp#~;{}517 z-{{P9x8W}AO!?B%UT#JvnlvVybLy+Wu)5Hu8BS;)|MdAZBO~!D1_NDeUetSL>&OD0 zHar1#`a)`T)Nt~`%dg5obtB+28to0e;ic#!l4fp({_(r&0b2Yw=}n?dN|KNBMl#iVzEwiwa;L~8{Q-3!HNQ?N zzf6i?O(R3C#KTmu@{j66S2=K8-@j6ZPI_SJo^0M9iA+|AtkGYNvvq!sK2MLlSQW`q zwj7{+8Kg<1`ci2u-lJ}dWb7kpaQ7)UP);0*LqrBlJ9=N~{dnh~o29oKE=PFQCGHed zgyl+vl}+O;+!P|d_~Iv&dk9yOAmcVd!;9Whjk_4T!TTeE8I-UyuqmGo#}6A_3KH4V zPY|S?Z|Od}xQqRqB{v8dk5EkObd8t>CbN3j%p8}J1kTx0%1kl$RO~7_1Y*ZwHMdu< z($Uy50&NCIvG@fFA3JYTYHp7}pW(=4FIC+xBFv(>Hn3wqU--af;12lMDgpg~n1#;JgS&QFTf zeE64r#P&Kw8&|D+j+dIL&Ywgc9QGV)*fIHsM+}Z8A2@BDS1$dU5Kgf8@*WN`qNjZV zgc@*n*tUL!)5}Qx`qE{L_lEoSmK74v-Q}1ARr7FpE?O1J>8j4#p5gH8Pc;?>sUdI) z_;>GMK*Q(X2Z4X0bNMndeYE9;>cXiKu#Ll5McLbqVMC2_ zreMe>k+%}VpMbdyhMLButvaX^G>jwQsvG%d$716ZC6*#w@|$Ngs=9GjJUHeI-IsG4 zD$Q|E7~8xkhb&39l8=mIzGWqOxEec>Iib0I3*0gcD^7K@GgQrkIb6b6g9P&lQkd4v zqpMev*DYm*J_TMM)fV@C5)yUxPDf`Nr#jlh{Q>nkNDI;0xhT{XQFF}XXOg!)rpE!b z9^{kbo-Gu$gHw7tN2>Ibix^7>_nnKHq><==+0}3K<5euT=x->Zl2Qf6&5>+1SQymNK`5oHPPk%8Mvu13M{y_IWY4iFCPEGvyW7pQguxy3 zeP-A#iVmgOkm;xx-1N|Lrc6n(iOVbv6WXm-Z9p}B^!22Uo~Z>Gy&kk>9%1!ZiIgIr z_1a)o6Xykt^Q*&ek6$g@6HC!$XS9b(z=71kQ=|`H)jLEO-Yro*r6E%|^mieM_~+Fu zO4PSMOrF(d2XW}P2!*<;KzEYyO!PoWDoa|3RbZ@6Dv0k(K@L+)%U8&=Esl6Cp{)wGUBVmbRg<$VGtV z5N@Cmf?&++;kjFenKG(MO*i4zXRatJSu&JLbM2;)$G-t>0L#*Dx%ygT(ihQZGd)eQ^G^6C01fqTOa<`K zP}^3vD253hCd~XinD<*0PB+cu2xKyXz$4wgu&VExA1V4B(5$>}Ct`}i(@fi;v$ivO|tIPMl88|^47LYxPBj>NLOAvqivUTJ!wmycIjX&VHXvql-{5))I+r- z0Jah@M0~-Zit@tTgCqF#w29(%(&{E zKr_da^8B9gKIgMm?*vN8=fc|yPa-KFNf@?9@;$`oD}0LTue^)-gKI)+XVf^p;o;7C z!5H7-;(lk$#sS`RsJ@2`;kPlpv}Gb?*5S1Ti1EAPJ<7k|-=2Gmbf zK!Odw);`g}UV$mo8>_k65QCXi-(r*rf5JpSA~&o-YO`f98aL-Wf8W!2I@@y^M22sw>KPiBuaZ2}a# zY~y$bkoM$qeeO5wR*v(`6)t~*08F~dY=HZ^rX?q*X+|XqX*kOKrYb7oX;!k) zh|JZr0D1Y;NowW>7_610Pjs|EtFboVY3VR7&9yxZCRS&R-WkEhBb`Ca6t?*L0-_)o zX13v?6UVQ_X=O|45F&LZm~3v}YntPlh7yACEVyDpa1)qTdFR_1&2_f~?#v;{poDws zkf2W4@0nsF^*U2t4#J|xhSIE$&FR@Y>e?kt!A;*KxyF#2#niQV+mxFU#mI|j#W^^b zJ<(3R#4#!B}lmbsq^@k#u9xguGKT?Ng#)KOZMq=`^CwR*LntZa=fdDvy+hG^09 z$fOh8@@T`7k%`OH2C!Y(`B{xqy?_jP&BCms2s9+@NvkB%L~DnzD8t-k&F#vDl6Jd4 z!fZUluS6p?)n16k+SbM`plgkex#XgwQX{XFR- z-g8mWTq6KqJC_RvSbejEml?xNRx+{Lz6!N}kpV!Xo3waz*N(NlZ?r1|@Mk6>9W5hP zQ_Cy9#|EIN#%&!U3Znu?qB0f$3rTgV*ZO@R?SV{VqtGkNbwneG{Q2#sLheTlk3^ml z-Ki0nuD&Uqb#iw*M)1JXNTYUGYT00vL4TAf8vxRMV|Ygq<-;JkEb~jX1bGIHL%3R5 z9v$y%SvDdu!trFNUz481?3w2GG4qbWXGHcpK(@4co)b zZ)OgV=jJK32T9D>gMpz=*h_NGlCgUJvm0+OE03p;o5F}Y*hkoTBcaQl0FMKN$oNvj zd`_2wla|5W#<-;dl=)Rt36o1=Y^9_q6|b+P^R*><(h3ZloL600!!~`TaDI|d6ty!? z&i5qP=1M@pOQykHVN>c6sD4j05W1x^mn-+Jxqh%IjC(Va&G6_OWh&2ecF^J450Mue z7^nc@S&@qe^H>)c&y6lhbmG{k%FQP?Jly=75Vs89ps1x`8hu}g*072#Vf-J{O~3`5 zK~e$IE12Xq4W_xeAX=Lgz$TQQen8NzZ9$s_2Ir4U|98FGpZKV}Q6AAE1B=6;-g@`C zH&Tj!hB@F8ZYCXQeIOJjp2T&+a8Z=vmG%*Y%FwUh%r$e0Y+nekYLn#zJCb=QFiMV3!nW$z6PNBQ{vri84R)QQG91a{ z%4@}SJ zXNh@10jRjcz8tXk7Ky=tzA38=_Ke~QSe^P3IYtCi<}woq6ZG3Ax@e=ad&0y<9R9u>@N0k9-6CIGW+O_F$H@7RL)EEY^XUACoYb3jK5Ktt$Iso|3_?+kH%-Ff8AXZ%+yn3@js+7^EFYa79fN6?7K_a!*7q z*UEt?`x-FB!ZtkfI;%s9xA?7a+01w=eUNH+#P1Zn64tfIn5BQxG0fU;ZelYS^SzVX zL3M_B&M2c4zHC8Wr9$t@cOZOk6KV-1-7M}Uu3QyrxkId7tGL?d zxCL)d3p|BUr_)qh%Y1iWKNa)S^OKfaT&*|q>6%|+7wTlJ_?(XjH zPH=a32=4AK!QGwU?!ny(2~HqDaCdiyQ!QWj-@VWO?K95Bsv1n!SR28&haNmMCA{n{h7OTK3#O*!%_ef3jvk4knq>| zlKP1JSZLRqRk{7XPys0LzCq1Djs<SboCr^}%a8|avAgbhi&k4ml_b|AW931m$(Svvik^CgZk+@-d ztY5sL4cG#?m-0mvU;W>&_!v@d?wq`Hq~jpX3~`2knA;P;b&=`E_EZ)A5QcY!M973w z!!^^qcYjuSi>Pd z2%T0r?W`y!L#!H-_rwY^wF#AS@Mp0rJfrby+%6)q&> zAvmlz>hVpnVQ{=}PW108Yr}BW#?&EaVti5`M~?Tzuz!UjFgA!Rd=ps!st@ErnlHa6b!7t?W*3M!iCROTKPh_ce zM-M4EVK8|xtQnSB!#%oO0J4^Hv5MJ-^}FJj3|4x+K%PM4;o1ms0GQ8+rMS0q%kxQt zcgOR3mc~11dLMkK5v{DT0-f|{CfS-uO*caJeqf_R(oMg#vL8q}NlsvkCBD!18A4gl z2&+QAmSqRmFzn=x++^6dUfu+2MX!Cdw}$oQ+hoV_ikjg8<(Whtei)?Tv>cdu&~rT? zQ-! z2xGJw=Ny=Lp~}}?%k43*T#CJzTxI7bM1FGKw#e&0-mpsceA|d=meFWenQGDlXjG&^ zhJ~Y)WaBk*YHmgISa9GlD}q)^1kiO$8c@gw4xwT(B4)`xZE$>-ekR7&;<5t_5qw=m z<+v*-a{tVXN6>2*A&}*yrE7u{m0F7z_0rN8WnPoXR;jhON*o)-c#?rH<{NGlsrFM57>|@$q5{4fUhCEr_?j8vjYmUa z@iE~~P`q%QZa+G}aXhwlOkF8se3)hI^P-Fs#HW-6ch@gG6QM>OxM!_0BB%+ib1+gZ zurLpZv1*K=P`zq?1TY*Uuq8glndG>H9w<0w%<|pfP^0A-oe(w~G>~m@HKI6%sg72}7tayuG-v~&W7ez+3ntY+!WT~0tys^E5n+|S^Wh(3<|e2&!7`(4c?+q6$kE#^Phx8kNBQXl#VH{w_( z>C+{xE*Tax#`$?!r&zDA7w!o^ShWr-T0lA;MA?>q1PJROgi5o4;tzr$^N0J?Z~;z2 zYl~9Nl8E(cT(DST@+2-`6QIa7@R7w?rfV~&uzw){wo%svHgg0fx8x{X^=iOY z*2C9?s0P|~oC$ZCj)|gl2|Urt5*-$>F?5Ds*zZj?*zcRy9*iu?l)oB=B8DB&1Dx8N zAPRmZYIlZ1>LC+c)Sw{Xjgz`OS*Lu_WsJJ^241$7SVAbOqcPormm)jh@ zSLIkVS`A+JYvOVvtu@%02%lx0YCc2n86u?N(Gjckn(7iuO(aBP<;XL-mw1SB9YcIW zCRTe>7=tTT7o2eAl8a0Jb`@WA`lB&QXGued>~^z;NkS;j(A26M&yIOq?gpSp`%dn# z9GqOewqn^#A%;!e!}wH`9<41ABRPX775QbA(ZFKf5%-vEDGvsyQ8n*t;C= z=Y~$*6|uzvEXPLKKh&|7>ncp}faT2CKk-ig(#!nYM)g0_7{=dD6Z`)+d5b@%E-}jM z3aCOzZy2!z8i=rB(_4ryN_EAfArG2H9~+@dXtb-K`EBEh@pRos)(Ak}iF_}Gj0%Pz zjkmJ=lO4~zD2T|s(mK9oe0?05zGk#GW@qr$=C z_kCiZjo=MH5MqxXU{JhdR;T`x;xSHK7?RHh79U)?Ti5q{F9EBn`{O*r{R`aSGi;OY zp7KbwX6pGOmRmy5X|GU@VfguJNElpF%EAPG_f%K>Sj_?pPyDKvYf%0)T#*OPJ&$nh zwY>6`&2IXB!({A*T$k6<0Fxg?QH&BMgZNc)_jjh0=Ho-n%sk3YQ&*w_%x}7sgG1mD zErkg4bM*!Kt+Ev>TshPY3wp9!Mg*v>qI?;rL`G%o?^(zdE(ZRA`5x+sgCzwp%h{5p zd#n&nQHvHO3aLFmBck`ZEeB>e*>b>ffxiWeZbR6iELOicz{V93R)jxzkc>Akk>&ZT zS{tXj4ITj4Ye;`6whzZ3dr7&MFL)I%)&hX|)R}R8Mh>a!H5yOy{#@um3aBpj>bqc$ z!0_;Pn2Lexie%y1V7|zY#2C*W)h-KPIH3b#cpJMB*!vyQx|Qoi8)q;UAr2K*6V%D5 zJ)L5&k@sVjJ?S#s-2{R_yaE$&ya(6%o7ehqd=wx;ucffQ_9F6?#tUOy5y3j{TdgzS zL2rLnd=h->jR?2C$vt^Cgh`tnV5r%kS#2=+h^_q@GIWoBd;UlHfVvk2^ceMk?9!&~ zc)qxDj@{Rv`hO@9VV*T_Ndcqu$3LU=U#Pslqx8?B`|rX9$Oihul)+ip)Xd(=^gk<( z7}Zy2R5c(6NUmeml-U*>878tl)dpr@3p25zG302dagHL!Xg5sm3I&6s!%+b1qV@_b zvXC`a;8}t@r$vetzBpRS&F%VZ)p_dE`O5uNkN*p}0k%7Mw062UecR@vvS4lKYNg}Y ztzKY8O6obd{uNLmxX%u1XCA=eY-Wt@S+gFmbQ026<18bn`tbzPZa+}x3SbvOl)4LH zI4uQMNt<*kPnjy!Nx^GKR&5H;N>P%3IBf`(9d630A478gt%=UTtd<8tHEA^mF0sB> zH@)v?R}P+OZnc?-j1mr+E~72y`Ive=Deri$)3fh8vn3|pAphrE(6*GjOqPlcxw(@O zy1t_(oLKaGxz0%Lpk(>Wl${(*SUR{Z>IN=LLq&B5t#u!gF`0`us&~Z#h85rb5|J1!s-j zaI2`oH-einQ9VfR+txr#qD?6+MXJN1AhL#JS<<6XvjHNNr=b*gh>Ns`!mb1AH~<;` z+0 zU5M?GO^784>F$#wf@(Jh*7?h6-fq}8Oczd5Cq^4*f+1!06Q}iD_x9e9pyris4V(eD zow)Mo@kNUv;2?a#j?tIE` z6sf&cM~9@`6jYEmdX}(LN!<5VO~-io${-(=>}gZ-h4I zRk6B8MmIBU53_a73uZED;caL{$T+$T0ZSdHPR1An#Z(^;PS5+5>q$L*pokJsSpzN= zYm1!H0lwQzJKNA;vHZMkk4jl2D|+#)O4w&r*}GM(uR2;^{Xw+!ELMc z(L9DbAif_4%zJ{Vqf*1&wOU@*E5_PfONDyX?ZnEUH2Ql|X2NuF9%>>SwqqjtU}BhI z8P@1D+;DyCCTfiOMO&o?U5@d=^!;5GSm>KeNo%41Pj;CWYwZztxeoEOh}2(R8&T6x z^=Rqy9PZ>dEG~NhGITPes6s?eAVsNUV*TSSjQbc#Byodi{ZQ9;H7Wmx6MORTc$;d2m|A1hNPm5TOOP0mafK{x4S?aOR4yRN;=cwSZFV4ZH zCUdz5-u}^V1Jw|r{M&YSv_rV9@A!y^9f5}m!G^8W>B2tf_7SEOM%WazZoKvId{!aK z1Ri)o!;((8iym%QxI+!Y9&SIs#D%DD>(1G6fAT5L%?`Mte}BRyE+U0i zY~<|f>1d9_TQHk!WBvAdytZ5hWRN2En8*3q>u7Zx3n1 zF^oJ(#_uQGq=HjxFaD8k=7@UFOAD#I1h?nsCmM9OR-4Pt8^grmJ!ciY3DXeFy-T+| zyom^MP^&pz+;<$p+2LbPu)DYmvLDg|3bp8zQeZC=f0%wX1iWlEEO)BBjvHUwe`cMNqcM`s7J`tKKhfb2gqg z>S;;o+PqveOd~KJpMVM>bT92ZOg1E~2jEo#escodCgqDsDoEB&jjT|RG` z3k_YK$Pkizfe(h=u1;;Ne3QpR80#TaqQx(sh0B)vRS_f!CS9+pQ~De;JIQ5znE6l- zr(kh3wvyIztGz3^A2Z_~xlZ^FWPN8m*Q7qav`M^;n*06IztZ`9!7KAtqyMOQJ$JM8 z{PhGRVv69cvO+V-FTj0w77xIf`rwzg3F>3Q=fR$a7T;DM2rrvE0N+%T`bZmo6{R+c zDX`Y;95{JCu}K`lTIED78F*pJH~m@jC@y8D?KQ7vrscCer6%;R9t&#SyI{Dpu02VnzP?b@* zR`ffv`4wi?ajQ;X-9qK6N#|G^rvN9+^Ad8WiqvCGzw;}NQjFu-hKAHx39o0}!x5b` z3)cm2gn)PiB0S0DK)#8^$=h0IWzB+VClcU9|2>g}i6yXwp#>&^PXjj~uN2;R9a3;~ zv1`8pqb0k|d3PyvU(WZ4S0(QDYLp&-P5E@qeWeYf6U-3J{0H-Ej1|MJ_|kGE^YIRo zbm#~2X&4xOeFH==6eJ^BKTQ!Tyb~e=)?qpnk90X^4p9sW8_|8qm*0VBD55K0RDlOp z*gx{}-*N;5f90i>p_}1vv?wW{@}H%#CD0r1kNhmw^z=a;#`Zmy`S#1e_%{R$zmRm7TCF|FVP!f-t-y(GlWw{XGuDa^rOvm=2FUD^IFbnt6Pgbb z*4(q!_Sb>-cxDUr@ovRUZ~~rY?rWWoYgzVgX7`uxpI)E>PT1geuvky%TMG!bhA2%rXhh-9&sI%V<;xC%aFc1RpiK<7c65#l z9yM52f>!(VM26hldV9G^1I?~i5CD5ZQJDs+Y=Po+)AXN2QO zFD^2t2?bpp)Q${GOHIyF9D2BShq@{BW8iYy^}VPf0|k^MSa2wsas~tfNsfr<;#0dB zju%OvjkT>O=&)$pgz%Q=iY!gJVA#j<&%!Eo6V&bPeXS(~<7AYo#;O^TWh^yASK2f4 zB%9htTNw4x6jdfGO=LjKG_oUEe$gy{Rl+p#IARJ&N0j4&;a5WIZ#mYte4`N1l>=~L zG0|p2Nmi{oJ$hgHgl9LwGf?f-4zQKlbaRTMCZG;tvU}Ov>zwp^VdkA*P^^Y3l zi)G^-kAxT4jadkiI){=s@=mOKXtT@BVIk5LMX{UMD>bI6)_K&>GCht$X^A&kQX!|P z^fi|=7K)9x*0_f6aqN3K^^X3EHNW`yDJh$?bjwm(Y{W&jKN6IQ5$5X|zFIWbewri; zbl(@Gc9`mQ(*6qkh+($Dajc)VNg_MkuLyG4ilh`VZDW*X zDxrRnDHiZqwFACVJFms(dr!NQ4k=nn{#Dq|U*QkA`Em21-k}49on^R6J!f{4vFS%|};ArKtgY{vLy zZ#Kvj0#Jv;IIrv1zQNh|>5$vpAVnbB@6*?}l5MW?zO+J~7Zpm;SPy0vtdh}jx?-M3 zcFd}OX_*ajf9&pBk83ztZU=5&^)v75ek!mwP z1+t?Ul1~EiH=tZHFqVf{sk8oDiTI0^NMEUdz z3yoMAEJMM^8I&;hySUEew4wJ~9vyOW-}HyjW7qeG;m)SqJCiV~OGT+R8_mN$DL~^R zo|2tsq)+1bHV*u}qly>^M#*4E*nO-^bXz|F0B*1}PY(JYr=FR^7^me?MqoB&*W0-m zpee_s@Az^G9?!yy^=70!hdnP2ddw5Z_Y6A_KTmqzU+lbVytGEw>K_X1zM9pREA`(G zK6IsX7jQDF2*gXJU-HjI%i^ogQtQdgA{X)c55Wp8g6L&hLNbVL0$Q9uo9cyoTtCGU zDr|H#D&Q_6aVEzq3P+`=i;<_sYb5)-GQ$x1k>#5Sm~Y*M25Xj4d;e`}1+ zy~IAJe#t&N+_I6B*sm~HcI3f-L+RNn`Ft&bIK2J&{St)s-fwK}#cuOC%DV3M5qb|m z`09Z(W};dUDS+{ku?2sDS^Pn3|0*GP=!N!esFu~gG%W}Dm^5vr*0SOU;IpRcOkKZM z7HZ73$Sdwww*J|&Jm(dE#UOP8G(j-`b!1cgqw=baTH>vCkQmjo-E$sQyMnd3pK+Z9 zz}ovU*B+s%A>MpoP`|*c8N|BGMAG#Dq%o%y=Ii{SeN1axWS(p5P^i2T8M^A4==7YL zyZTyG^YpI`=cMx2Wt@%~Vx+ng|)fvAC2F@J&o*Sx5ItfTO6A8-_NzY`Q z#_5mMB;fQnxe|JtSOy&`0Xhyr@OmB0zEy31n+CY)83ooZvd$L(cBNg6|sXX9a|H z{=Nn4>)RP04X3llIbXq!GBL@~hR{lr`L-?n=kCODfQ*wFi^2d2{}fB8OC7eIpuwk<@V}sh&j9APq_~ zvgZj|w#_6y>MJYZE*c6yl7H%o_N&btw8qp-i$YouERieA83n&9q4P%l)+1lnLttOG zh`6-uA6HQUq* zjNJax2FnZV3SW#|+M;a3QOGN5_jX17-XN_}tqQ#Xk&t9(*+)r!(_Gly!Uv<&D;ZU9 zxHK4QM%LYhD{6b!{1+aADP_MPtM!2vyu=&tN0(CF#2YD}&$1(k7b00^0;^3)H_-Ak zxijaOH_sDvez{X#VKPqq5dz@`FN@d56wm2yt`->nFspGr)5zC^A;T5YN>|RCxnGVx z-zWanw>aJGB7UhYIX1#r@TY{ws8cYlaRLn(S5bRD(8R4-mCpp0{x|o2^&B4Vup4kd&-!)V~v-A_SoL*@PyP!^2RU{%H$AH1h?aie5~E*fUN-$Kd2Te z6A>D_Zj&hVV|llx>bHNStEkL1R=NS#=NQ+oPNqm^ZXWRo&>SM`k~hYQ`Su)k1h5Ot z>B0mmJM1e7$?IN+{&p$YGYu7`aEE{yrESU|6yXU7M^?PKM@@s}4dq3r2dL|eBPu$&}JnJ>~ePY{#Mu*O_lA<;jF6abNlezcmVb&)xgs(rgcL}lwU5n|+i z%{$K3)q$>FsB+Tjn}|Q zJ6Rq@^(FWeAqHnI%Ri(TQy}kfwe)cC+(UH#6SFv^O7wl&HcqPI5YB|Mixq|1Gu&?( zJzB@&rvPkf!vE_Oj_0q8{%hs)SAzbYuK!GBQ#(gjQ&-dfp>PYt2B3Y);!-++0u5Cr z08bXDuAem$rnXH&%cLA|b;EUO@~naN&wmrHwyVg(MiXwvHI%A1RK|&mq3LVwYt_Op#79HE&4d z!`*%gHLR_lP}Ee%hv7~N=i2$v9F7X(-PzsWdj#ix=TSFo-JK}4gz9)|VYINTJVKOG z6R)6>0CV3Q!i-y37fDqxz;slh8kKHX;b_$C8R|&gZ3`hp(#Si;)c^c?fDQ1ho$=`J z*=!0Qv#T!NR&*PAR28pm#*T`G(uFn0u8P?k-5#nKWBS3>$ZNliH;JhII|dXofw*RqAOh#=1({W?~-a;M1` zU96uI&)Tpy%}Ry#-trFhiwm}f^v6SdtG;8hi91x7dWVV77!`4e;i1F8f^!yKf}L2E zazIWQeYtOfPs%9$(>K9^DBGrtVHbM(8tMS$@;jmUSa-Q&dzt{5LaiGnlAYVs+a;fbZMOJIp5DWF)bkq zwm#9q(9U^Moqny9QKzEAD+|`;qC4?_{sZ$iW}0Dcz#^r^9lsOPJ}64)y%gM877Zfw4(^JO})%ob8Wg}A)>9Az+!SX zDb<$C4~W<&u2QItc$Z|z1C@M81u3hPo9+-fA&dv9G$_8I5TTAED+n&mDHAT{$A>39 zg8YCR3Z9In57*HcQ38f9VJFX3mX_T;lFZTbqj9Z@-7W!@jj^2rw_i7prqXRr(JV5a zz*FWy_#e;-5jMS@%siHh#zD`wWu)CH72H$Qjq%F~HqMkZ#Jk?e+-=RM0%=+O4PV^H zPdj`2bLTSl)2n!+G7e;vsGcd2UN+Dy#N)DAed`1ACQQL+7P ziR*{)G)b+9EF`3;U_tAodKtqZ_mQ~oXG>$^gQK_JXx}%9yE93qH>wzz2#U84!2PY; zGFFq%f$y)j6F$~!KGp`H*U>XbUNnqI4f4P!x=s!wR*8OP9)5=~d$0}Xee%ZX?({Cd z`FB;l^(1{&V=M+VcPtcKp)BDc;e5Wcb0~DLK%b7+=)h0rWds+iU}t!@SKNGj2{p61 zYt3zNJ9c!o0tYq(u-GAP+%eH1Xfs+S3C5D*oyKX{SGT@?ju3;%|RlFAEnl55-sD1Y%rC?l8s5z!kbGM5bLD z%9NGF8et)a?!4IL?Xjj8QIQBbNjhyZg4Sw3tJW%4A*%Nn8N$rAAE?1%Fj(xe{v2t7 zt5G>G%!_Jfic^YdzwFu2{{)N4P6V5kuU>}86)f$)e-pglzM@7)dalT&pkr9nifcPK zL!~8U%pCT`PK9LYTDXGCqR+WhepY%>2oHKt5ih-6KPtfE%t2e^$l6|ZH*ioRYeIVB zV1lDFc1Nj-ihuE-=JlY&<&~%?wG-)wzM!!_9+N%JJ@qc3FC1;i8^?X^FvsBnN_avD zq{nvyTN&z~p;JK8N~RBIkzvl)dwt_s58*g&nFa$*08+{Ac0OdzLZv8&P`4qERBD@? zpnti|9jiEh#=Bz|=cjiXQv(z8ZsV6#Y>F!ldB^G-@CK7BXP4})J&7w;-FYpzJFqpy zP@OKg29sOcdY8!MSH+nCaGHXA_IiMcb8&HPI{nxDGHCeMpwA`FiDytDC}&vx+;p1^ zj~Z}G%v(3>NbJJ?_t7V6eEr5jB}y(KZ@=3YVG5bG#R4bgFaLEShxb?X|2ZkQaI$xC z0ZOy{XC9|!tAQ(q)(u8ReW*Rk8!lE66*`TgSRsjA+-boS+)VtFCNjx1$>!I|%mjq? zy7u~esDH1s;BIo^{sH%0mcZzCR|9dUN&g7+V|*Ly^?28m^ZCh&|NHYc;|HvuT&$0% zZa;q9fCGMFH&&V8FT2XEVpChJ?7)G_j5&||>Kx6ZY#y1-;!}2kZ$P33o6CY@khLr^ z#6DvS8k(M_2`bU@23vIpryAQtzC0w`0rOI2+ZJn^xl{cfeD-U^HlN?L)w8=emgEOjQ0#Pkw4Tg( z-Hiux?#x&892Qq_?F}bZ82=n0LV|K}tw4qYiWPJ;x5x;Cs*FxWsNN_zagow;l#M;D zZkr^gj<0Yc_wEMhM9Px5Vgj;)&%E(_>Q$6d$twK(8ltX#%g&0~6i=8H%; z1DL6*BvAZmDI0xrCc4TOFQEY~jW~u(3AZ;{7y7jC*Un7^e3aPApguJK_OA>|)Tppx zGNciOK=Qf1A+&lRbb8T@(OTw8RUoz1I3^36Hf`?>_hM`FD1MM#Nox+AFgjWZO%w;c z1v_PQ^x$ei&@cq~c-&VV`CXTZ!Stji>2f~Vr9H`IK0ik+rz6>to3oR6G7?&25<@yU zzv}2pRpSIK|K1HZThD4Ol+{f5Pio^-NcYm+vZ<2O`1PV2AN9ci(#tSppQdTFKyQeF z7xEXkR%7bAy+D3qnSl8b6e1fK{#8dC36Kd9+jw$C%leNrQDtoIZRLWKBDRU-ik|f! zM={`yl>+xSP<*o~m{K;2Ft>$^9xJvqvE$p)icHR;g zjiRrjQjl8A+i>G>^k5kMeK~oQepwomo_&h2_y*wWO7~%%$s#QnzSsT1*V7d>0Rj9a zpt>2Govru&g7jlvS`kq(#|*vr3AoXK2G%fy7xy6EIvt*Q8RW(TsMlp6VIBQ;6~4q1 z_*?K^Y2Hc12weLSHAu~dB+};x_k(3Iuw2EL`=~*N;N}7e#7r@9c&^`yqQm2%Ttg}! zfjb4yd29s2-mzyUEDRZiyZmsIpu)>@f;`eN0-m?% zPdDnmW0o(ZNJ82lPlpqro&bPn2j5yyUiEqj$IhTW{iI;5w2;<7VolTV?olMx3y4l? zKV6RU<4GI+u>4vE_{XhRG0@K=1Nf>| zgu9=XlO)4=oC17pIZDZ?B=J4&oWZ_3gWRQ}Kz$#)K@qf1V z|MvId{@1hgUmJKJvgY@8K-tsatlVP8TXA z1&9k?q71+W8?%9sW6Jo}QHX#Q8Y^T+6~BGyF-)n|ujrkjxjp2|j^lB4bKg7Y+kjO` z0SaNQR^DrSyS~3p?bm*waEKGCZOb>m1Yh2S>%a{>eK7YKbCC_zV@)-NSNzmFBTD4uCXF+U* zvY3-wnwyff_+?52LLWiq5~|;@>ZZ>EXF^*MD*VZY*QcI}^Stuc7sgNpot4Tgdadfq zcRYAsxNj1uaL)~GqsG^5S`M<`)HSU>?WrrWF$D~^Dk)otQ_TTW<0 z&kvg>sL3P zbO~}nO;@{beAFZ@FXo{V=deGiJvl{Pu@;WPxg*> zkq#cdUN+In)MAE_{v5mgc8hI6gS)U8M398B4LK=p;aI-ye9ct z!O{opxyRTdVd#buFC#X$dnGGUfs%hil;+1Zg!T*l)~C-7M}GXPS<*H}Heez5pP3x} zeNgwikrm;*5B(Wa0)Bx-4-phd8Yk?QDcEQW?vPQPR?!f9F&HcZ-1KPiG`mgRl z#naCCFB=;dLlc*cxJjn1 zgphtn&rpoDO($K;pFyS+y&An166O(B99l73p4yoKH`lV@Ef+<((g+w8_`uM_jc2_%ivTWps)?1*UC^!0Og0=w@l?(3n9>T)^U20*^_~;CnI)aqMjA%Bz%rz5w|Q8(*G9JCu+Le% zxo#3|jaNd1s(_|Dp@r}-1HMTedwF;@xr;N~Z8s1GblWDe`y4H%HoBH6Qy(mD$y`8> zGjp`M*&E9@x$Fc%$6V9d+Mu!J!u=Y4A-!u60Rp33^tXc)@=p6}-1|E#Ss9PF z&%+8IxB^Ion>lqD#JA|6z`yw6#F(WSDaA?f?Le_*bfG$q4<|0yHrGC2+J*7qEPvfh z!6KKd{IvNNygZxoy&IhqnP$%KU4ZQrd^?}D9sxA)8{-C{vpKRMCpvXW`5ad>VZ?O2~9kyW#fl*))qHWVV}wGZa(wRnjTPneD3!3M41p z9?@7ZGKE|xMOsyqD15dwnX&yu#K+`87qw-|-%p$Fp(#7-$`>v<>-0lao$ykr5$urm9&;^K`a*S{^-dj#eZJ(TcX4yzQg!N z(l=Ztktv(?X1t2F8qD;-)3)*MoA)lvP3ZoW=}z{s*h7R-cdQ-|Mb zoicd84iwWOg5gJ=BXn!hkICD#3^4L7aa;(Kx z+7)^PBhW9IYWq}>`3GkA!KeEb=PTa#Z0D!fVIbi-4?r5M3OEP1eI`F%g4(q`&vaYN zIP~5hUwr!2TuZX&eQlWKM*XF6k`JRxkZSxV;~{&sTlu~!u2L_bdG(C2WcnNSu*vaw z!@XCHDWsg$;6}pp4Il@TdEpISL6`;pBMNoA&}R6-mk_9=TZB*N(^kvwdh3;MItw_$ zGfjYao=(v-d#TI9sp1x(aV);O(RbM@*B1d8G9fu3hE#ISEey8wASUhemO`9g`+6Pt zYW`w;IJcl#nCR6C3HT%!WImk+7E}i6Kb(1YU5`Di3Wi3<^Cy8JRgs*62*~3L6+l@BL&(5c4AdMw?_N3Zd4ZnjG z=*zO`6a#J z!E3}GPdtlakAl5ev{8=C%8*?0KvNl3(gCiNwm=$5+MRaVhB1FzeR5< zJ+i(zFnTrr5xxIz_2~Y4S@`$j_CFjSl|TJMlS4ykilJb_#j1WeMFn8id4QRQ$_o|$ z!pv+1*$cf;bp@zoV)QltPeXIB=&(qJ*s)f;Pta)|1 z1Ph^VfVIBcq*uc(!nkCamI0TRTII>YTAN23h?UH>rHIYr*Cg2o>M?P7`jjfLm6sQGWe+vH>AIM%sTSa`Ynyw0SfKX_NjbqrkMFDkL4?Z+JjKy zsFE9r(w_*8!*lD8$1vn2?n8ct!r48mZ?ieOFXy$_558wH?BXwGkqhgoDw)_%E@-N1 zWT1vvgm01=%Mkw%UxpZcrjyM*9lMU)Uea~f_*C98Fw70^j+!o{N}i+AsX26|Ysebi zcq=PQ*JQDNt$b9ni&fvvuwq*|%Cs`1*aynqxS3I-gi?Qm`l!cCl7{N>F)~#%N%A3Z;$28~oF`x^TZ(r2LWvRQFMh97cYbS* ze>5z4PWtoahPejMINZMo6=1p^s=fiXhUCXJV2{)uG`};j18pboD{+O@BJZ$|IznPz zRdOh>)K3zni^LrPEer8PR$_sf{|eVQeNR%2tj$Z=ZSZy=90o?;OBnXGHSD=44k8_@ z3la1YkcS+l*>Bv#H8Z4B{Qf&*KaeBY_BR9P?f*J57X3eC|Np))`JdjN|5=&*$J?{m zQU+5$;3*opbKb~sq|RlpF8A9nL>2X>y?%r!0pF9DO$KistkUUupkk! z=V=&1(sIJtVL5!Y$$$Rcr5PT#7%nvDVENGkH;tmKLjvXfo=4 zZ#~J%fZHs&%QoO5nm>pv@g*@zkJy(n#TctR7#zFAc)9O0B@Cwe?$9#Cr982Du zCd(%cmm^KC@TeozX6Di*Y}sw&x}AP0UfEy~e$xueBpgGygeNhaj49X|4h&r;GOf=| zua$jw5DC$me5xEv{Cn68Ds8CnljM`et=wqplnaKh(OqY(QgnIpHjhiT(hXw*9i>u@ zrj9Vc6s5lbL9|R-YZ$HO#u!Bddq_v#j>y+q2aDWX!NVR*s}K>M8lvm$WG=h@Mogvk z=Yq?l#ro2->NJ}fN^2}z5XW31aVVYC7H{>8>$s6%iqG}?TFb~WSLFxCan|IS6b49A z|N2R*D~dQ)oht9{F*$$scw#$!!0E>`syULk&|PLam#mv)%c9GOlI&g$-%%I20Ael2 zvKe-H%-|xsDY=?s3^c1IA7&H0T5Y+J*Ix4XZmVNqgeu8dH$LT!kPA+{%+3skZ7Yxd zY5!nifaX)9+p(r-L`gASu|t~8ndcXtOq=}5&D9nJG*)=p8;J;&ZEW=)LCmiHhm1+8 zL9!ge=3R{_K@Oatx1!sZQwQ&8-H7EvmbsWzprlz(A^G>~Oxc|d;Zci>=?{U{aO(;A zQ#We?65b>@3k#vV_<(rWiBxb+EOk2{G`#7_T+|alP8Vi@(kS-uJ8xMI;%avl zm`)M@*gyW8tMK=P;g1c>|2Z`T4jUZ)niIyTo;v-87r^CJq7wz92tF586fqeVBl9UH z2T9y$gn$HZQR_g!J0WIxp>gp5uMi z=6){l{&q{yOXp`aoh5|V?lX7I4ak=}eEhNd-PI?|(53J8)8@!B9Czi4(f8Vq%R8=Q zTlxDB^aSGiL>$NWSigKjqZqR%VTWVUgTH=Ak&cBDrNk&^NIN#1j+1V{5r>Z!q|N*A zweMioZS2xmo+H#)fHQ}e5r5fTbir7wJ_u*^sC>x|DeN4 zMcRAVb=hlnN8*VwyuwA8W9eii^^JhKbRx5z&;zw z&h66#n$GD0T#qLFWQ#MVzLU~6fayTk8R-wh`^dmEk zf|6`a6dwXQkr%-nY;kZf)xn0VY4Q8m@0q4c2q_fSz8ju#swark1-4*UK%2(Q?QxH1mUR zu-=Hy{kErYC1>J-O!MeSZbC9fBpst;+v(W0ZQHil(dyV{$F^;Etd4El zowv5n-tXR1uinqBN~Kb@<~K3sm}9_8tloj0IuKbP({LBCWtImUzt8=q(}MRhk5glMouKx>cYo|-f*`5p}@Ufpr|<@vQC_V z8Jy4o%b^uFF71Q{u5P8gOQjQ*n9)%!MT6bIJZrr;Kx@u26}+#WWmxznCxIyO=q^;5 zvG^uCDT0{FV~yH%!prE$Jeh7-3)^jz@rO@A(77a@$jGHA0I#qO-CKfF**<&W2T>rV z=Ix3!Yp80QcmoKJ1oxX2@IZ1j=?)jYv*1b7apUO;O+>V2s@Si9|0VOw>vONvN z`mlci1fRVJres<0XO3Iafkc#NrRe>48t^XE?hO;6a|_b z&>twurb`kf9)F{oqEKbY-~+46@IMBXe^-~kO^E(+>HQ~d#YIuZ4j5-XC*I8r@D@UG zjowLdGu{FywiGHK!}zHf_dX@OkZEW)MWHWY`$CcT!wJx8H=~4tZhwy!{LV>8G@yPx zaeA80W@q|vP-VCA!P!2#=XlJ59Z8NykA4^DXgykV^#FEXTvqocCig7OX;3fos*!Gu zmGHSYGlbtIKJShg;4BwKS+nWx&z>aDpacKXG#n)){8}p zn832VmEeJMKeRxFp?qOENa)Hz!D~K4?^9yP zjrtjSXgeBOAd_I7AJZr=8cgW5`7w(#-w?fUz5)SbfetCX87Ns$T@!qQR=EZ4puDFX zDvw~B7%(`Sy)l~i9-2ipY4@=Gc}EfeF#^g#QmVb-1X@}6YzZS55KBi)XP*t@LNo;X zl*o!&fmgR2-b6c^IieoK&R6ww+g_tBHJ)t_=fv982%;J~!0!6+>kvHc z#aMw1jIGvx#MZx4oZ4T5z@OJ#e-R-6n*ynUEs*c*VoPG=XaW>c`qPEh8TgaUU-_>= z#pYMF0bg6Jx^W~eP0i}C08+!a!hSHlnEZidbSM%U(gLj4%~fl=^n&y?ceiBBSMn%7 z5K(JU&b+r>2)~4#mNqL15|@M?W7CWCjsxz4jDz;ojd%BZm@h<~rc>BabZ|AMngLte zK0{0Bxu17yjoN(4)ol#iquRHzR<~jEQ2Q8dX0?nlMvR*qzXkzKZ0z2%s>pO2f5;sDbq zNK`F?k(4A(Q!Rf0uiylqVMeQpMcnbQdnwx$GV;7pa$`khzi_(k4 z(^qL0;VxaXNRHu6DA)&rs7EVVONhad3!sjdl|Sa`1@5yzsU=46DN*3eFpv_zWT6xy zE;0ny!Oo^mWel8kKo(}KsUq1Oru4%Km2Pms*)jBYQZh)J)Y4fNUb3!FJK&*X&<1+~ zj5M5u`_YwAv|0O`uywANUu93t-IH_fMsF@(xi{H$=IldzM&@)|3LMXZgoQ~ui-;q~ zV3#X}utc@4&`QdmT=}IKK8+K2LDvlz#Ppt&`d)_}7(%NF3c@Il_afl}MhxoF6(ki~ zD;cHR<*l%Ia{-6Xczq3oOL%eb257uJEV%ePN`RW4=Q(**8nk}pgRE)Y#F{hbp8Hc^@z+zQ=R9rS4C(hMLPSQ7@TrGQ3gD87S+s>~KeuLVuEk!9s zfe8EA+kM2+xSP+2RM7}EKfXWM@2G)Xw{sMf}ZcEA=^%9S` zjBw@1{_Qug%`&*_cn6qcqW>X^{O=mZ^;e$xpCkf*OC{!Zc1|XW2DZj_Hh*!i|EzJr zN*BLUGksua#G;k*fgB|fjvN|E0TCGjvalE&$`BNZ*Jhq1NBcFK^REK$WPIkr^l3o< z;&+Oj(>tP1Y3?Iy=i}`jTz01S57~BnzkJ_@3e-??*bFn7M2Af-Ev*?PwF}YbbZ(i% z^hDPN(lT!oa@y3F4Tq^qZQ$TUHXZz=z;cRqtqwkdY8j-g9B{p~Z6fvG|2T64zbvev-h?~<7F05U2Mg}^bvxfHM|=p)GS(z)*~;23!&IO^?H zoOFBW8BnF>d447}H(M*~E<6Nbk83sm;JxMS6IUFQ{4_Rf6*lVRLeLwjZaWk0kDXwn zgKJGCCYl>tOv_mWrNqS$wUrc_-YkQvDelFneX32gR)S5AV^woe^*K5vpa4{@z8)MD z8O_>QS~_=_-hp+*><%atj!iQ(Gk=&`p8`HgR_M%6wtOcD7{WzPi4eJaf*Zekxt zWT83JZd}M}`$>BAOm%B)q$1pAnQl-_jJy$)tRQ`b5C zkWi-{tCV6q;*)C zpF-cKCrIonbpTzz7E}0Fuvl+kF;)8qs`l?7{~HGQQ_Dlx#MHpW+W9{uJ%oU(U!y-Z zA}TgY%jzh+~PVtu! zGT){UUJU&B{pKdtzkILsS)%-&xy60pF~RXb?{R$uL;^s*h&&?pXj$KJqI$KhS0j>@ zFFc71ox7e?6wa3}CmJ^iVxt(`=FXPhQ%$J$@iwK)q8o79p5DioNR~}ke}<3Gzm8hV zku9r>+H|;stZBNvrIE?3ORG06Vm(t@wqnn23Jn2n{k&e%C*7KeAg|Ug;w#4JLE^xO z+)Xxnc1*gAgMf(b+0{AbTVf$ry@u3T@}#<}PvH1x)RP7>7f)Q)dbe3qAF8>UvUgpd z;$#6_W-K9$lBs>wHZg3-p2uXDNFz*;PtGIAkfUd~t#R#F1>yj&Z!K{WU{9ikuE!HB zAoy(8j6wR#GLr<4b~AEZ_ZDx^grDB+I&x=A*sRLeL8o*CQpQi}!HZ#IKVX<_VlVsb zoGSrZ1diB2N}6Gk41B+E8`tNyDKwG>rKX?NHlDjG^?ld(vrf(TKKr4wYyAPg0{?JW zqBml0KikA|A>6XkrFuC(xL>lY-djO&eGe^amUnwmB~6E;;*Cg;RvJ1Z8Y5i7ro@0U z1P$o|MWJK@s{NR2x`f`Vm7FAtqN*32e5rfvW@dTmq(NSeV?z0$zJ0`IjH$2$&$vn0 zc3FO96)ldqr)9OyK0*!Q{JcZ+Hm;-5)^sQF06AxsVk`fvLZaqy$+oD_DxFD&=BZDG zN44bzMHc-&N^lc`GSgQe zLi!A|!d@k1%0B268j)WTJk`3ZN2@P?aNr@+=6qpwEXp;gRhN*G!mU{rjvXt+H(MgW z(j7dMn(N7Ft@(=Pe&*lSDck_~x*(Rqg=_O+QVlU-W2@*ehxlc>haNLmFh z_U6*6jd;)ckvz-n{Sw}D*+^H8o$Dm=(fLz_a|`p9SL%#vfN`rh_3gn>fw%qat$Gz( z=~tNLQs(1qxKhr64d=d$0~!lS_8Fz2a93v{=Yk--Y^)%nfcx@A=i`I>aL*=P+aIQN zOOC}ab4+<*zR8IpF`~^kPkj`P=ZeFUNA3CEq-=`2nm$vx* zx!%N`Dgp-dW`^u$Sr*AKdPu=BwiU%N9DWk7zP%NqI0aYHlkxS=eZ>Z60$>lGnLd0a zO+oZdq~|LC^aM`GO7h(&FPOoa;T=xUkbm=qV5k$uR`}VD@E82R_|yE>%Ratelalsz zdkZ%(A!Yto5aVw~<6oUUa6jwhVq@Yc;ry%W>3>=^+Zb4x$eOr2|A}D)t5~QZt6})k zf&pM_fXWAoEXz3n{Ydi$8UgCs`Y9*@q-y7`)D|e>c&;OAo*#PiB1Xxx3wBwv`17+5 zvcR*tc<<(m@2hTJS#Bd;?{8OZUnn8LJGMnSNt1Arbw&4iFok8d=)U$Iyidd$icoeWe=K*jJwFf6+J@Ie!Ci;*-#M4H3( zN4~h6S(ISMvzqE&7X|V_-gV0sIgE)tvhPE$5W4m))+kgt7sKE>*~jXYPrYa^LVfgs zs#J8=DdFIKF&>AFDLowRse>93#av=x^Tis-zKM${1t${gt_x+0pop?&Luch@q5Qfq zbuxIGD@(2N2cH*RCMTmHM0naD5>c{^b?826q<@0fPf*-d$dicS;$V_KhyqO&XKdhJ zBt2Bl37EAO%^9VhrfdQMp$9a%fY`@Edv50UB?HEQ&dQRBd(eG*v;$L@@Ej7O3U``O z4b&%iee8%n{mzjuIeBr*YI)LwSC#Ofi^bb}aDI8g$sEMYG^bVgl4^XhI_9DSR)y={P(zCwq!Fp@Hts~SM02j0 zTM##YNgv3l$hn}9GlS#Gq6QXMOt9XzlAet0+al76X;Ck4{7p4=;OWOP_oPli%g z_O}+;vI0aYEX+8{nWKXExQMR8Z@zCww^bOg9KO7HrCC63LytJ^&w!gg^yOyZ+IKqR zCeIr=hlWUPY&PG%m&6K`jt!r%RP+=ZBif&H$;Hac>aV!jcbnvn{g-H&oSuiw3}2A& z6xo@e++y>g7nALb?)W?SeyZozMX)fcl$ykKkPMOO8D7ej+wq9qWxwt+X^-pq0otAp z-19eYaU3r{W)eG`GI&Q-V4SNy*{oy&lqq#FH3h<}f2`bpCCAOuqwoNw zzrU47)G!P}qnLXT5a3PCL50GR!6m|>9JZymrZ;`B#4%HRQXwFTao>cv%=^S9i-#dD zldzh862Id$aaXm$=L_N<#w)}$R}$Kv74cDsE2Hc%b2^?GY2N|qDus!;F;AV!LXSSd z>kU!JL`@FTn*~wy+MPt2sNuQ!J}^)dWkxViTdHW57~DjFr_SypkA~z??YcJy=c0utWbfhpt6zJ zyu~K8kQGHDyZGc_ulMGT^r}c<-&JVHY4_LYF_~rHMdu2wv8!j_MewoiI+HwKNmWfTig9QUd}jCcsi(W~bjzV5%;alw{#tV)o&R5LXZRmC&#p685h zUVJ9~Zu>o6IOf#VR`qZq{58Qvc}R%U53i+fD$k4tnACjRrML(4DJ_D@Yg^uhuJwL4Fsyj|ysE;*L z>wH-+e2|3ima+VWwr$U~m|Z&bTt@AJUBJx&AA!PZrZidV@i&udRTx6U55V5r|DQnC zzvM8f{))9fxJV+7j&_bRK!tgry!YRc_UGREk5j)!l+k&~90E<0%e^49gt|vtD`XLR z7LmLPnOEV)Aw~E~ukj9oXQ5YIEM)pL48E7Vh`PnWdif9;&o-u;(>kWfiPxVuZ{%Na zpkc4VkedtD>#TcXG+lItbPg(#NU^1|MrK1DZTt8m_o90hLBr~wF zkvM(Ah1mB%J74K+d2|PfHLh`bW8|7;vlQNZZ zz^?D6Vw{r?$;oFo5VM^*x$s*GFAkL}2G~ApJ5cVD-!|1WF@usz@pkDQXoIJ+;M2{;;*%Ox8zg+2qRbX&gCl>XuvezI4P6+3(}-c9abLhVv= zP4+*j@x^lmD7d5y{{nh*n`zzW0Ou*r8g-JA=gul@c9}Me&Y8?d7AyuZf%%U~^UrWkz;nZ+s|d+8{P4LRm4hFpMm(1S z(MexAu|Im{Ugb~Pv=$`F6+@-;&IfAiJ!|*G#MXyqY5^aRIm|hoF<{?h*eGZiVV59Y zL7TKU6u(>UieJL!?MWVAH(P7mQP`V=uQEaDj9u}_u_H3nE<5yNXB`S=4OA|o_yZ6D ztC~c^!XGq1J+w_R8g(dLw_Sz6?B;0z0xJhHt?UwtWM&Zbq7NYfF*1OLYf9)UY>0#! z)tdKcf$@Hph_d=cnG_7=Ry)R&qCLyzfo~1M^?fV7{UiW56b2DgGNPsu^4RP_W(TpS z>l6;X6xz?oLeSsxdaIQiy_kz!8xKoY29#BRP>c=Ci3M>4$hmNOjWjc@c?y%gTp~U8 zSsS6rQQ~ednEZnyDPNRuH~oA(kxjGBgGOkX*eo~0$w zkZzsi?Ng6*w95>0x99u@Owtnf9`g;Ae!)cawFBA1U3`P)Tb2-Pr5p6qJk&)1ynQd)wqgvgW_OU&jc4K_3$~+-Al$N!*(bd=LzV{o+}nK>$aVi%=AKbJKE*p##Dw7L(!!4DrbS8sdLD z>HKdY{_DQg*5y}w9j8CLgh(I>2}KNrhX$sP8o4J?pBeG1aqd_ywc+|F#ekSo(+?(y zM!G6>b>p1ND;+G+C?i#pQ~{!8+e6AvQm?40whz*%;SD9D-iPNcz{^&iH99@M7wm4A z*YEPyrOeQeq7>|X!a^>rK1Xiu%=i$Umzbi-popXN8U`3tljjU53(8vQgWS+QHA8vq z%Gb7MXrN_G-+8Vd@Sjw2IOliNvks(4AC!(h2D~ty#Zukds$;m&gNy+piTXl{W`e|E zl_CWMk}%4JNjopm8bbZmA{A4EbaX35jK5yQhvJg^CRh(HoBz1D>#u^1-&-0KdPj#o z9FuKG70x-0IohKZ{uZ<;ASj~TX0hD_HYuYzD_`$nyhUxG#mZxU$qFqZZ5U?Ijwsog z=&HrNJg*CT09l@{b_ia&g3P^j=HZ5NKndc%RXbj-eGXc;<|*EiY>NGkFcuf+c9jKg zKiy*yaWbKk3j)E?*9!|54zpe`GKb^+%jDq8W6V~s#Epte_fDqr`Al>$(9=x{_mnEE zm&+7qPt)(+kwYL=q#%TGA4Bq-_ln$gWi{ax5Es_D-`%5yWtd0+k5v+U5;SPeJ>&is zF+Ei~ZIWey6xLkIptz6ySR5kaEzRt$WVNMK`VAWzagbZS78iSa0oOZqw76NB@?*=zL&;_j;F0*OUT!TW$0HUgPrBWDzBaE3 zgjv0Rq#Y*OXrKgd%A$+URbe!!ZGqsbDxnOKCLg4`9MfR0D&k67YO?HL^HNy+FhX3U zGL|Kcdb3i0R3kMBg5dFFgTMOEorRfkN){B8A2&xI2*UPH@oh~J1|M5tgThJ()44sq zgt9XZY-^kh3TJ3|5Mva=I9iAvF zXXGq+gVWTZHktfWl2?y3zujDRTP0$bNFTi-|_7|KZVJR%B*bBmQ@7*GLyVz7SxehVx%B ziPXO;*QXzmTQqnhj zt|Ucv|Cyc6KAwpwjy?BE=Rga9ac6aJ3{{B=^x1tdP(g{P{(|bs*&xcF$Jo7QV6n0J ztbX%UkG$G4I!PqcQC#e>_5?x@+D1d0ss&B*^I*-rLwVQW^b4m|FMm-eJO-w5jEVS- zXrI^MR6N}f%kq5nR_KUf&ft`aYk@nC?P74{wdY)JL7%SDo=j(-ji7GRXMfi{&l(fA zgoA1SVoX;NFtJCOS+>#GlID{*h#a!pE${RVMW6Uz!h*9^U`<~LVRxld*i#DvJTbx3 zSaCRNqPiG0Xn@actoW1m>uBXG%^N$alo-|=ItB1 z_8TT+A+BNSiQy`hX#rXYl-py;7*>fG3@KZEk$N&hQ;wkR-rRECsCx0=m!-!aP4U5M zDkGyMP7DsVN?21gAYktM?hrqFsG62sd*biRy*H~&6Q(az!6=Q7u+mM@*UM)VYI^-` zURT`ICOIw6_Dd)>Mp2120lm(3^94*=LoYtM{+AyYJjY=De(~v&udFST#6RIy3zO-D zjOkZ%^!#p7rmRwkbmD%Nl+}+-Q$0HyGuk;A(&;`3yTOM3z!M>tNXrX1($mk^B~0{| z@d79*ph#5P9TvKP=(r2U^3d#J&XD8|$4*bOzrmaFZoa1fsN!)9S@4nJ)9xN%?+TT| zAmPfhXq^#k6JxxVjBX#gT({GofuOh07Rx$SC<#GO3b`Y8KpxcJF7#|wZx_u%b}qKgf47guhDy$kzfMk!zju(C z%35y7Di~|zp)CwpSW-2R_lD&bSXf&8F+m_QT9Nr8;#lQuDI`T_-NuDhjD3oJA|0<_ z8l9R2N1C-s&%lQ-`b=3+5_vh?@1J#V~1K0?1I= zebO|=a@L+~6LxC}j4%uw&`8`(mLMgo94aWxRY_oFhQYaxEVI!-tE}MThpWo&7KrC( zP@cu=ipIq_*~;zX%IU{&*2Ec_70rioIo8d04YdnCF6`XB>1tSmKj zy?tNSFeVUWF&KgXM1Oh~$udfL@#KpWgu9FqEo>fb-|OO%DSE&ayXaME_sHAv!I_+i zQ{sHYCJT6Vt70iMZ1VdD;ljn(C<-}US`D?f95_|Itm~P<7klE)<8F#f0CG%)$>lB>raoI+7tQJFQZ3mEFPUT7lhyk$gfjGatG{1zjB^UH)7TQ_ptEa#I)S76QL%vzmLW zNLO#Q3N>Y!TB2v)8Hla?lo_pr*DFkkaB;;oH;@xy!uJ77)js|pfY_~UNR>pifGT$6 zeA{{dl-B5nkjsG{B(fh8S#=oJq!6#@NbAl;*Tv!vsCQ6wOV|zbLPaJ!1{NDeD@D)0L@Q zbK-t-{+>futzSXtUx zvwduIX9+Zb5E?d)6^SCui}Z8I%8}PQ&{~CQvG#(){aCAGf)PdoEmFw)cY+Qbm*#1p?_Ua}4?wvc6z zo>N)T$y3(u>s{M6AX;TXZAq%*ia-Vv&Ir;rQKvZDzAbN%MJe5Tepe{vWDivW3|T27v%j2&S6jD?M3Jqhgp!AL4877G1T0`4ObRb@Z3$q>(2NDI&D%z!$k~NrzhZxi= zujrI@W^QNpJ>g6M_1n%n&%u0$`r{`pQTm(tPEP`Z%96bz}L%3a6b(no&LXT_9<6pgQ71L~5D%dq_ZR zW~PKCbJ2*|7g0mo3X2poiuAV-Q2bNRe&^P%cMM9+59pmc!)0_BG!ZsN?blH8%%~H$ z3$uC(tl8-1KL)>hIhwJ{?hrx;CG;wnhTkyrk?U6>aLd+xz$>>lvOO4%VTk)2`YHLP za;(3n`nwvf@1U{CD^uZTJ^T3>nKKUO@P#~MVl^PCJ6#%&%|8BqmQ^!sy|jVuha&Yh z?fDX8zPU{n164FluidYY%tQF-;|;LZ8vWxS;NP`Y=dVZh7xMl4ea#<)fZv42#DQFw zUsam^vWGRYv$h7(>HhC0$M*N{u>Jn}DwO|lk3|zSLMad1hR9W=5edBmog*TGG|GHl z3L~~#sZQW;Eg1oPcw_2(+765ei~9W=LDJQM(SKSKnQi7#i+7q0;*taz2_JAnFv6_MkQOvlOpB8|*ATBAdR`0E) z)l@W~+{m@W5i4lh*rjvqB#mR;QA?daDR=zK=NU$}o=MXd=RIwLx}+0!?NIA2vk?5kHq>Y73=qD)z%yqUBJji+MJlWKn5KI zxg(i4YuU$Q)Tx5XS&dE1@k8I99uaPTDm)lc*1ueBq^_1ejqW6+LFc2k=6u^~_y3UZ z4BMM;SZn(&Xvp5r%*r5TowuzxGH}Ho4zp23lf1)8Q8VgfrPf!j*uyR9wRSBC3xHA2 zj?gDxxvEGL5h6}rN0b$B!5W0^58Yw0z9VD#prTD|mynOL9@OWfH)XJ_fXfmQ6RxSE zoD1{a7jb`)Uh=^(&2!iu8R|C34Q?$l_0oPeCAWp7L!yErP4Kb4X!fGoisgA7yEWSt za1+l=*88IytM%r>;SjZc&MH67jv|F$tvT`Amt=O)i}M9TNl$hFXFwjaAGhXOxER

      1DHL{~!6F0^BXJ+)c!)CoQdW!XQbc=g&h3oKe62;CY(rTNmog z{?=yQtpafj;Y6LN{VL%&xE;>0SZ%1)kkkUL9fV(Bn?kG#^bYW~h5w_o{dY;j{p+>; zUebh3tPMO&jDhQIn?LYR6)iU;HI#KPfB^;p2{MSdt?4ja2qFW}54;4lMI1zeVOOxd z9#q;Hi53yu=9%mH@VJbwRrRE_rVK&H=RErOm={#kD)-)C|I}$>{pIC;FXQHc&+irL z7l%i@o)#6Wh4AMvg$2Z2(vI@X!-9kowMy17O%0)HOlE;YS$xaMdUcI5-Y6S1_`BSe zFWm=@x5k-Y+Q_fjYl?`)MXiuIZ zRE*^ASXcY}fwV#e+kW0{Qo*m-?$r{T{<2_%I5pyzTf=GC!uHiJRoMASyF&Kd3o>JP zgebNO`m#m?nhih77w<|j$1qWS+ALFG5Q*0A{Pz|l`UGJWEtgxRS&S0KMEl@;rp|(N z56#R;JoG>HSQXHyPJbt_F{*IS=UEUF#>o9{%$nRdJ4OuIdHazQ#ul=t8Vow3nx5Lt z2t`%o#s6bV<6tk0P`?D0LXbb*AYm^mNPeW5=bA7Ee3cK2Et__53NqOi)_lLWq5MF= z(PYAx#0m*h$mFi=r{}29&{o~ z%W|aN&E)y@haA~b!9C*^$vk(#NLz`{%O_FesF$u?r64Zbl)&_IS}3p=n!s%>;grtq zCAGSan`~MujnHaCm#Zl3q|HiXReuVO@0w*x6gviG9~TJ6OA~UsB`XT?U}ir&^2PiN zFRq1iBTJMmYD|hx$ClEvZHAad60l|<8lzsf8O{3Gn}DlT?r}6sylc0&KiQ_u-F3TX z*$^GL_i(C3Gpn!IqG|GWh^l2Szgo~xi8>nM|k`IY#?L0(Cz z9CT^eC?C_HWL>&M{Y|GgK^>pcCkCL0zzVT}Bpw>+y4y+&%lZUF5h*Huzg4<|1aWZQ zZYNg^{|F73oG8;nk))`gYS?EYHoVyREmY?U+O)d(z?;asr&xQCH9Xy@)yTVX&W|)0 zGtFCp?K~Ze)1R+qWv=O#ThawdbjOB1k1%L_4$bgd-O=qxLqKnOdn?Oq|MqS7Bt{5h zdvDIt+HYY}N?Sa>9hX9^76gr+SmHRSJGeiQ&fMYxmt*)abGQ z@Y3T`ec%pVPB#o~>dC%#9UkG;xqw(Qs z4Tb}`hgOu1!J)-&j^<_kZx%#Nyou};jmz7)ja(7GW!SlaB3h$EX!F?sNA?NaT{$at#w7@%hKTIJ{8ODyT$KPs$FG`KiFXvYW!|@Ngi!uX*4Y{0wOnv+RT(0j6JophK&EFm{iAmCToWNiRSo z4vcCC$9vYaGzLCgISfhV4kfN?A`x=QXSs48@#7%DOD?iq&gW|cQE3ih?irEE`cH#Q+V&}e905ikKwru5h0S8Fdajo6_uSbkD1_3bYug^O zcpgqW-2C2t{sir&9)%U1j|LShZ_d&mt_-8!bx}Z_mOiwPX1b+i&-%P}$Rd=iv1%wz z_q+m)71q7Co&+hln4Er(1nM;;zYJtHuUayDl>2!HgCuvZhMMF!7?ib==hBzuum$4% zwwhae-R$Nv#b7;?)T(uyHa#qB26CAr(;!*hWh^G$BFb~31+;YZmdmNpLz;Oi1e2H! z#bm|M*f5<5^ETBjj4>FwEJyTz2?V!k8K<{Xy?-QZ;-)=W@U)R*S9mHdaGw`OjPuqK zpeAl2EN}W53(42-7>I@iD3+}$e-gv)q#*|QvpOWiwP7&OM|c7CW7PW8!7;VjDCT!y zQ@bK^X+Z!2vp@AT$7DGuH~n*c#0R2X*J2DRouIjrQYTV4GfajXxO|SQ^SOEV^uv|( z6hyR4&NR$jHV*f|I!b~Sx--p5lVpW))o1ig=mYEh$-mLB&6~d8(?|{72&pZoiPWFi zF|9V2M{H$jSU7TZ9)t#s*AgRAe2~!0-r^t~O9GOc@qpxJ%3+o!<#?^lEQ)eKvU8Dh z*yLqcXZx{btC>Zu%XGHzG?f_B-RjO}Yjfci^W6JY_;Xyv z{S>71CePLl&8sF%c4h|W(BZAzDqmke2c8d^*6~c`&@H}|p+Rnn@Pr|fQYc@zMOuM5 zuTzdokm%k?)+=D<2o>|lSbF+}nJ`@nK79b@dl_hqn%wErZ{)$UV%HT%U}xpcu~`B0wGn1wqFHKtfDzfdrm1wME*f zz%P0q1wld)fRZmL+uOTR!4#p}Z9k$f&OOe3-+o>p|Kd4CIu)T@YUJAek+>T1>PfKL zKTd+v>{%B{NdSuZ-l&I^O%5`FYmLx!E=gkNCUW24P+AR7*TKW_lzk4k3j~fiQt`}ypm%>7=h|gLXOaK6Ym$(N z{p)or|=dM zkGWCapG;E%PTDqmHIc*3k&DmJ+ugXfBvs#}lW@z_zA0HeW_*+r3!P?Wg(!)DgLdwM zPp7r!FMvYdLMk;c%lqz zC#LXU#?*gNzbGA-uSr4bqhNP&$j=~w(L)>>p`hVK zvA<+z5WD-MQitfosIva(T;01`^huJMA3$|dlThL1@8s{4q2g)dl%}tQrZO!wdwx1O zPkmeT`}tGum*JjlsK-&&y3Q&(43}CA%L8STC9~#8bM94S~U}E|LUX zwT|T-bG3yACO65AS$3*ro<6J%j=3sw_2sUn#&(t|q*JQZ@bV51Yp@b@?vUV zh=AagY1$mJB1xIwD7R5yS4}rei67G*b%z-oTaC~1baa-SIoK1qz3d0uLU9sye}l*c z>yK4d2%wGOz;!z$g$l5bp3Ro{=pI{fL-Q93EQy!8xRQvcj`1z+g;O4d><03zlt zr$_H9=E5m(2kd+;X9@;^0n|*U&3u`A)9_ff4oSL)7+<>g0H=<((jGikumv^TYlW%y zma>QAF#v{l{}Wf%D|p!?FC!Mk-O=;yDaSQfuUqL}fr$aOwUtYtnm`%KD;#xZ?g*xW z&!UZS`z095qVvrZZ=8N;n^KU;Dap$7$9uw}mf9E9io%NqmWDNyyuAebEfv6pODi2~!gj&NrJvuDAya4^h)-t|#wi*bHj` zv(-7ZlmKZyJ96ds4F2yb@^)#B4y9%{3&HCYEaNgH?UyVcEGAoj)YH#C6MkiN%(NMb zT;MHz$31=4htMTpe6w}R{mFBxEw(=X*l`lU)$5H+iokymr?Puy=fW6D&AJWa;Aw8ZRZhI62S}QPKJpmJx-=q1rQrona5k}uTNxI+NBJtWOE;jF z*f)A;DjH`SY!knbQOm=XsDBtFp5=_olg@(xuI5 zkGOW$P%wd-z1p9OP#EN{^x00ey=n*zNRgKBYN9A}Af%`Cm~155pe9$2Gd)aa7Z-(O zf$_Q;UOo>Z`ubTQjbN;VKK0TDzG`eM(yJ2(Grq6o)5NUfW5y)aj6pIP;x1ii$1E|1 z8p|Dfs(h2ENh@mP?ua>CqS8FXgIQ|VH9&MeVycHxtZb4T3tFj&@n+W}#l{_6xrd5R zS7BJq$S{S5Z)6aOPR%HFi(g=sX84vn&-3)!q0oym%iTtJTMk^D?nbGEpw2;H-Th^F zgUzXZHW?Nv)N@cb4xT0e{}<8+*(Fvia9#0I3(dyCDvEYflP0c=O8Y8Cx>rP$_9Hs~ z?(A=Hda&MKulmS_H}uQ~-4;`Y;KvC@`S4=WiBLg4sQq$w(Vg8H5QXpmj`-8Dk=@f_ zABXvkE#&PGN7Vk7Jsp&NJKYV!>H?ujJ)V8r%5|JdyYBvd5AB2?Du=6q%+?CG*P(lQ zTmvpbs+;2uUNH}2!rcB8+R>ZE;p6P&qv1WY?-{jn=Z%^%8jgzqiip7B7v`C~OH5<^ z4{k)2g2O^R{$w9-E(><1w;R5esM0g;u~zBt1wV+WaltorjBhu4Mwa_~Y_<*fb$jlS zQ$RFTe%rPz2J<_qB=-f6jDMl!cwxeEoUxZ^aPbW2L(T9VmL)F*#~4EB$N4$v@IaYD zQ@62`3UToB_RuS&VCN!-t)g9wQF=p&=nWoA?eZAtQvD&FcdpM#2GJbfl>AHe*Vw2{ zoT1d6+o|a_zeuGnB|yK%l^{w6DxJu*#+6|{%`afDK7n<^Dz6{j-RA~{^Ec}3HT`~Uje;%yUP{ZFOPEyS`5Lmg>KDv+`~tSqIuv- zqjs%hv3UJ4@7oqP?QQH1BA{isX5F#;eX^!=3u*)X98ejNFgygHy)VvQ3Q$;}2ZZO$ zOU;Rm)z}z371Ck%LV4G1kvaq$lo*weO{Z-Gzg+>i#e^Alp;4!D+-X|pOoJLD!hxIQ z3Ah5Mcq&JcGBS(VU>{vMqOt1LnkhtuoRPeIvcx$WK#fsJbt!R2apMs?uaEa6y=MZ7 zrfpH0f^r>fAl%PEYskqRmg$UiE(h556f^b1AM6el6r~H*7!67ES|7)ld2i91_wFT|#)WusDC}LAI&Rf;J^$p9ROJaW92qoo#{a zSOcufY=lpJW$-4WW4M*a3Eu{*D2}o)u7(7yVhH9*8JCeF7dlq5Q-nAaq#G{q{pOu#lNVFwC_^umUUx01zRxC^#YU2qlhBbv703Ie+QA zXk4&$eX7t5$?%k2+3=KoFV0$jAR~w|mzo&)-u`mFdQrKN-u3=;=Jw0lV`>kZZBjWb z{+#o~ly|){1=;sk_DS^StsgqBqVp<%!u5zs%rvKz9NxKDL-Rwny8N!C{iMQUO53G0 z3z8i%%paeGE)vqx74xDjj$NG-NA%@cq>0rS13s!S1-w*M&KoLZ!?}7*<$puOqieqe zil>an_0a$(`mxII>8-1A=N(0zIMTU#Y{PoAJy)}097HPOw#+}`$)t$>1e=_eI{81G zy;F20T-GjJQN^}x+o{-gQn78bV%yG+ZQHhO+p45;((n6q_vs$z|1SQ0vB%cfS8L5R z*E1g!Aj5bHp*8?FTeD8*_g{5RkF=qA3Zt=+vIN8=LYdphl$gi$XfOMY2Ux>okI@NP zmgKWZ$M~aJJp(Ygg$d?X86!;H=}vlOnHYIISkf>cpwLw><|5el$mnf)=z?8bsSVG# zra6>z%>V}wKl1mZ3o&2SCFbil?g;qJuaX{BYQSOn8uYIf`{2OR?U*U$OCO()nbHJh_lQ;lnrD!3g!cg?dGshY-OsYBYG96+xD0BEc0bIIHgc1!bgc zH^V<*xYf>vCNOPsC@_&5Zg$wd+pcA%F$6&dR{{Xh#x~G|uMXf>54$cn1(XnGPYq?3 z8i<#Kcljn7JST-4mM)TJV+yItb{NkucI?p%6i5uBAEPP(08*A#oO&Yi6^u798rbn6 zeub<>8CFY6)0SU&M(VnVQ3>J(1E@GcT6!?F1_Q7m$Aj_>fqUbOR1SUOdg^+S@|YlC ztkuSWkBk;}t+6hKP_lYjD_QE&s#Z(8L>jfI(Fw6MMRwgaFW+Rm2@KJ~(GE$3Xb5QK z;++)47tt^H2|UZ*+I^GYR=pwMq4Dtu<#6XO(Sof3hj=ZDbKWpzX4xz%0C6I*Vixx0 zv*AR4nwFR&f@1#O@3T6^7(|x22=KB>j4Yl5ILPw;>@H-g&6zZ%xo}`yP&(3nJZtu} zwD94=u?cx$wDfo(GVayNzhj#rNxyfW>y6Hi^cJ+t_52Wc6<4+$(a3o;_--o9r^PL7 z6H&^{qO?S(I(BFz#}ia6Qj`Or6PJZ?!L#afgy99GuPv^2iwfx6#DUNw{b;cwemcxg0GM#^R=Ri{jfW;7^= zifCRgXrYSpr@i@IQoltWXu&i_7N)@a5~~=C6E+{&sM;te5=P@4DcA7=k;Akg;|kqe zHiHozwB!T|r@fzuOuk%Iek3N@#%H1!;%KQy)*s>D$@?-F;<$&f(m5xpyXHc}x(|~K zBXRGfne)7B323$1_)uM8(KB+IFZzRVG)2kWZXt-?PDxt#%!{bG!O*f;UEVZVD)Uaa z)$(37GZpILc0&vsnIezjBM727xD4do^)je8{(ERFeG0LSJ=TU-dz6&Y`qvAqM`yRe zhsbM(2+Gu^>7!9)wh=m!aRZ!g-xakPw>zeQuP5g24bIbSw1Oo-(kvYX;i@1mR)K3PEPx{&mQ0+hl&fSE9 zeSvKEXfgk|cSO04?Ac3>cqRqn0-SL-lk5)USW<3#Nxh!Nk!*;~fe6i;2N!?Vl_TtF ztdZ{xqoZ0%^r1aBjp(ZTf*n(=RsVqyI|2lO+$U}<1-m;1(eGzOEc==L^fyajHLYul zjrO5F+#YR{Q!x86lD<4!|I#?dqA>V(`j-qwHE8%()xZB=!B>X=s{CTcPI4wDj$bIN znX#4CzYG0eh5vu*e3X)<(!4wZFFF*VH39`aG!Y+BqW?nKOD@D69v9dGU)KwzwXGCE z=P3zgz#pnXOr!UmFF^$7l??Pe6cbs(!Pe&FYQ5+5pSO>F@?W5A82*^;S17N^=Jvq= z>#NI@ak^(F@Qu2o%{+Zf7+xunag?Iwi5bkSQD)GkOQfLZ_6S^5N1`(T(9rbWR z7o)_O$QIM}WNcfp1!tj^JS}vOMTVg4uLKbQb~qHOKrmgtYCRvK4-4u1L!nIX^VuS> zYn`Y7(#0dlgT+@^(Pnxu-^2IQWuLb}ZU*h()`w4WIVPv|t_;G=rqZPjfk*kg2rHkD zTUR*x+qx>fK!MfG?U6!uXNjuGj0HPvMAUo?Lk!E7%(j~es+j6k@{JF9bd5ENmi}C{ zi-d}-CXpchSPL0^tYOg&CSg*#s6c1*1Oe$CJmU(^q8plC1{~|Kb$Xn;yp+sQOSfMk z0%KB{T+zrdO3Ojotc-zYc|pUCCzP-X6XIlyu$uX+jxhOZ=hTPd1W-q(Bo3b38o}ak z_yWqC$Rx@Gx5`f+SO%L?GL16LLKSFQ(~RL;s@f>LiduVu5Kua1>ih(uDQ+T6V=PYV z&6MHEm+5(RaVNuQ2ADC?xqHtZnA9+9(FhwF%`nS{1+8`g z#zXku@9P(${aLZ-lTH{a=8@xwisR0Sev$(lo#tt$s@|QcF4{tPKQ4pRP0D-{4Z2AB zTHV2b%x(MqbddAWgYJL8Bg|+BWL)X#Lom)0NUWb7&@kT%lx5^|kN$1y_Q1a402@_2zjYb;Y$Kxj-FT zy64knrt?II

      rTHcq`zCAOo?Hs<{=Kd0e(ACG^KJm>!du>Ei1$*+Iyg@3j7no|F3 zEbZU?>3{y3U%@y3^)Fv;mjB*b|ItHrs#yLNhk3q&=m6^8fZ}g1`4cEQfk>Lt zT4Ict&>UrQCr|DZhS7$qtYydLfP*X?wX8RXI zZ0c&$_s_>Y=`SW9y%Dt!j+)nweTU$_4{m0^?Y-VQRAJeJm+aHZ-{^wt4z$6<^G)Y& zvQANeY7QOKk*PNsO;(hr5sUPKmM)w` zhX@|eDjyR(5fx8H6%HZWh*5iaOZJnuY#vU;lN7{H|;C$4|~P%ma27^qkCp$l%osszvFc zOiI-Ub^5m}UW?Efakk-;Tqbdj?puPYSz$XvsrIRz^#UJV6XC&n}I4zqT6klPh|pPg1q4*Yfz-x#PYn6>$d_io0L&wv{_>rRD|D;ykHpl?sr7PlF0 zA}-*eW#(VYM`luvq=Kw*(C;xdg|1oPF*rm%uSwAsvc+rc;yaWckyu#=AxGtMTHthd z9yCumhnfKBF7jHrd>?d0la5-i(7Mup{46o$O`+9Xw2uliB2ct6+ISPbxojreYq#O% zrD3;7ENv_LlELnXriZGgzu<0L?LSqk|9Pc7HPhCLvZEh&nnE)0ZxUwf^VHw7h3VjT zwgxY+mPGzZkwn(apt{!E*=(8zAEHJt}-UC>m@3|TRQuf zz#Emxz_sTKlGpuTU7gtfRc-#hbN$m;;_u^E$jsRAD-QeLRpPG$_@8d-|3hGtq9$b3 zqyR=kKp{>NhpfGqW(b<>A}Ru$^KA?cAbY~o&kOW zwAVjhwVF=zvVS4;Z?FVWt}t=V#Yb3PqYWJW{7bc+nnCTBh4I557j*1n8(9x9n{$|4 zCNSd-GQP=#i5S}k5T;qyZHuS&%L|pXR~|)%wnylgD@;Od6)9tFN8Hhe=Cpn)D$Tg< z%JqxkHft85f+7Ac;!H-u7zMb?r7DZ$FtvTzUDcM!HrHn7U6yOy^8@d>nT^smzb%3Z zbs)qoY$l&3p_Z4zJvVNC+#r!|&6KO0Lkr2}VT$q?9(d#-84Q>jTie&iDq$}4J*pL< zZV1Fg+B`4`Ll;e|Stb#=vOnYvqHc&P^Ds}%xI#KYf{-vayB&6Xdj$3G4k5%0z%apZ z+Q6m#k@VaOz)g(eN4+!@!h?iIp=co;<29NAKC|H0?ku2HS!QfGw&qZo*Bc>)Qrned zFJF^11&d+8BIF!8QTbDK%4qaQXb2h&@$%uk!2m2IaD8snv%rjiy5B5MqxANOnCVg^ z9El9oDUVu^Nru(gxc7CUnSJyfX(VQMa`9cX{&Xt7gj4QTTJ-xg=GxOd%q4TwXquIN zpHwIH2B#Mvhwg)y8vA=;y7dc-jX9+9C2Aky#13p}y$=48*%TYiF!2lO)iu|;uqMUG zOvcMX8}Gfkp8Q?YB^5mh|7fqNF<_eSl2=C zG4L(}!Gk4RJ)F^R)##K4JvZ5Q`0RGa6kjwDHwSi#P+)E zGeZ>Q!j>#)T*2v?9HPE=m2m3a~Y3=I8_!I5}cI|B|gw9AX>) z!#O(QKTwPR4k5t*uN@@sU~A-TXzcJ;>-sv2G-;kR7qSHx7C(wHfqL8cwZ zU@rrgGp<6}t99ybNvXlBEPo$JHj^H8~@SL&Q%RiVzj>4M6d{ z2SN1lO6P6qza#5ftG3EK+{6hp=1UC+ZIZpp&;Nbd@09p~BJ$}>jH-{NLo}vF%Xgb^ zH0x=Kt@3zrHW*+?G;)@dP!7g4OR~6v`5<|VC8!(=m0XZ7uU^zP1lm#H(uISvtulNR zab!-UHXaIA1Pw}$+C<3=nyDOCmXb(*%mknebp1YWr)Eq-MDjF;UC6$eCdrz)EvAun zQKX#DXQ8^UAtYxcp|DCx)!4gvaz0-=ms@N8Gi8QJ*6T!CvKd>BwJu`Go=Gz3i?n1B z;KSP;y+#>^GvDB+s6k|I7@f~}3QX;Rr4-+qUNt)-G${TR^Ce}Z$-z_ZrE1>33bmWG z@2_m$7+d1f1Q%W&lW(-%UW}c=eD8BpO1@J`B$(Tg&gsX>Do3-8wNiA&Vbk*!SLFqI z+q9pmGjGy0{R5*RV>;C|`aIH9)@LfqOC%5`$BS4i5EXRP_kasW>t;OMF2a!o8fpUn zI_KW+XJ0;y-W8B=mw?qhH%S*A13(1MQDaXv6+}tPkf(Ek^)vv~JUjG`U&o2!Z`n|n z9j>g(*9wLCkI^fJtBJtZqWst6-(d0IHD1x!?u%Xdg{=K^ss2}e{~y6nlCrfUw8EGB zr>1t;Ofprat9DY6KIs-|c$Y$aa?xevZSRJSO@eVfwyP_w$ve3*LuxNM{yW9~Mg|3{ zri#%L2?gZLfk7lNKB5dW)9}j9vh}iw-Gfw2kHlJ_L{ZGbh2kJ zThhA;E>|9AQ$0&0ZHB#2Vu3CzMJ^TQaR#l8m6<@%Bg}Yx87Lh}E`gFMSQO`tYKn?8JFEW1+D8be-9wIWkvUu@GUd8+deMO8_0L~t z2qjc;tE9V~sgjX6l>!;Ya4BBXFmL0??qN`bnO?5fc>&XfE8JJPA7neq^x%b3;iOU7 z7NizAizg_9Hs4hI$wlM#5mH6x|4S`M#bWF|)kh_0-?;efmLXnwefNjR@8Y_rTy}v@ zcYiV?Z{4Wha>B-OE$n);(lk?3G!5+ASfMn=aS{6{vdA12oInAOIbivJJf&f~>UpIf z=gEhqGRfjcELd+|OlW7*0o}5ZWx7b~i*|~@+%uJ`DVT7TKkTeIAxNA18-p3`L{Ph! z{mNTJ4b^P6NH&$SFW-Pv+DQ7+b@e$~?7hAhh0-SR3HhGsc=nfktKR=Uc@}vL$+>$+ zfFNA?O7D){GH9nKRN#jcrH(91!ah%T?m6vVB+h1+0fk-6gL#z;0&>Qb58b48TSQtb zT9?3p`#{QM)|{Yvb{amctG`~R^IWDXBz;aUu`Mu%Lu=fnXL|(uT_}C;OrYrSmFgobPP$0!NB%XeY}Y^< zI#w}_6QZO>i}>~*GB{Iv+2ND{cP#>51<}x=#{EDAzV`Y2Y6Wpq9m7aq99?bb(~w?S z+N5(xYb!-1rYOvff<*#MN&_@U`H#YEpteL7%I&}Ih17^f~-FcAqv|#NrX*`K{$2|0RdP+C?PjE zU!sz0RXW~pHLZm^LV#K&ZVAt#QqAzEFpnAUY#$v4^_q~TWpz%Yg(bY@_1c=nwZ;0i zCo}YDo!<`pOReou#&g&HXI6~q=kJ$H+0h2FVvrLnr6!Lkt)aq49rvTAy6LnUw`UH4Fsp)%8vNinETPMQR9d^2W>U{B4Q zfcw#A%$X8&J{nKww@<`s$d96a^iVUSI?uMIbZbLgD|u}~E)qjCHe976&>AcVDyiUS z4s43k2u=@&odSh}KU!`$iRITPJ_!XYLSceP4#Cp(bJ!yuI1Y8`>|T7BXG+cP zdubZOUYmJ_OOA5ANWZ6DQ8<_z{i%Td6{dU~ti4q8?=w}GYU0MQPeB%@Pm#cvU1tS5 zG~+M!ThofsqN1E&La+u$945_ju0C+Tds9!{?QwAPQ_;l@g%gMQ6sdcZQA)ddE*(?! z!qi>jl(ye{JQugIl20qc^3hZ^Ht77C}BN!|{~{sn4eP3h0<6jra;LaopUP@?R_ zROA>(w{`3m$w=||qux};9y`hw;hhB`B%*AGR^=t<7D&MkN@Go4oZ3w$$wFgZDgy#= z<#fFJ*lKeMi3@B&(%ewGCGhVs?I9ML2Qx2|O3rtZh5YQb(S?=8PiI*Y1y;+E$R1O< zaUBH4)p_V(_CS^Em|BPOso36`MM-hVacTSWVfg_-sB9(j%E376YZ#IkjQtyW4}op1 ziFJP#C_o4|3yV<>OApr1?6Q-Uf^d5i!v=1dV3>xZfbWR_K;&sGqw(_6a->m46pFgY zbC8(rF=iUMG`s?1fg3)XWQ1z+&`!UjemCXqAsA3akvUY=pj) zo68J{2`zZA<|R)S3#Ks=WYbDF?x;Uo>p&gnSC8xLg21QPP^IirdW^;ZexTmO#O~Rl zhtz+@9i9+GUH9>t4t4wz#ICJf9b7|DF9Pyx>0m{Kne_O zY$)AAkt|F1iaVyVoaa#Yaf*_QeC_t0JhnG(kRNbb5?A^gm){9}2f4Y$K(9+Nj@# zE<3-x-mSmDNxj%0cKiUsTQ!3B$}SUK0>{$nIhB3~^i0eWL|$nSzb#4u*VmCQNC&?^ zDPd#96+cM#I3j)`9xnPtKZN%TAWTmXim^!-eI%N1h>FelRP7m8MF2H#lXNfQ$!r&Q z7~0=C%n9~=2%$hV*=ziKI&5Ji@{@I(yF2X@_>;AEhA?9J5I7p8prgyO;F4xZr}O!GFNS2iJ* zE}t4BiW4OjwSY=K61aQEA0)**o^N9t5Jtw8QDBSfoof1V3S>19oryLe_j zOm7)jWLCRmLjpb#A)Xf**1!QL`nfq<*cR-amdlf!3ZK{L8U?QxzJ`{ghwVjvYDwo! zotD(o3i_13KC276SF5RWjt(uW+?qmdop1Xg;S|47=yQakQmFN!-n0*?%4;nrz1;hU zcV?3AZzadodmSo*KU*HUzstD!hr(AAK~bV4VNOh4?~+ufamLp zvP8X_vrw!nh{im76BE|t#zz7YZ7V2@ zFMr47Vcrn^T5^ByXRtgH%|`T+{z+DUsI$oocjFIO>K1TF&7!u4ef&pCY#zyFJguo} z{Jk8RhlC+kOsiFP&GGX!ivV*k2ik7;MC^c+0{i(j{f!NcjNZyi$GMjYge@ZPD~!Gk z3P)A{6suY0mrT=9Rkll6^AUz*r>R|BOsj1_^3xM0 zHI>96Lm`gk{hA2Qi>(M=&}emPCO4j#RC2fZl>1PPbMNip?VP(*p5W?)3tKi6HPqtU z4dB8uxAgd-=T!TpNml}jAmyFk!oU6Kz+U_2-RzGUbohQf$3XHn1;xFOLR|Q=VV$bI zE;?q2#f5$vu#GYPBA_b-l~9y#RgZ>M7ili@f$8fL&4b(1)Vv-% zY{^*)Ir&h~^6P9tk~2hpnol|=)e2IxXZ4AobqlCsww>2D0j@$`bW+iXJRg+YAXwTF zMa*vxi^v!-*`TojP>&F|sKD&A-Qw7wPaBGNKuC22S`JXJ(CFkp!dlU#MqA!+nYX8& ztMSOPX$Ezb*MdBKGmR4udUE>2gaxhi~k0a4)ya_Ned z!LmpWuZYj+&{Kqb`EkilPwQ**koi(H5#B99{~cWgyvdlIwvuimSS0)T4<$d)247zi zYDkM>usGJ|ZGPa!tE}lG9w8Hv-3&go5p7pO!Er^T$|l&{`Uz@$VLgY~;$VDDFH=Mn*1f+9G*0CHP!1EoqUAfYflyh6Q@R z?9wEP_;q)9JtCE&iRq z{4b-Ke`1s*f1@jfe-S<~I{V{OIjHvh4F+kKVsrhs-g>(B_5A~u3(JlI z3SOC+wIGLKt}P&xVr=4wxmmQdkPvNrJxl#48tf=7;N0RaEFhD@Dlm}IG4xU^=vS>Bsel+rANoy|YJ~$H*NKIEeYw=q zq&u0g=t0_sA`KQuWa}T4!uIaA#h8@$IRf&q$q|l7V|hn&T!PW{NXd zI#fwOVv_dD`kn~uH36QMoMUw9FnJG*+@@TjqA|{zY7b%hWeNE0rB(eOyh51<@Cf!- z0vx}FXDjlyoH0~#r;5E<3Urlg2(>bavRT(D5lQPS4RDv3$w|4X%D?oHt~i@`u(VS! z>_)BB;)0&`XEBf(egeeC7P3=C`t77+&_h{RIYrw3 z26%MKlDB$KOTAlM=9}^(Pw|o=MONobKMiV`NRbBT4pPcj^0B6C_l{9B>FBGCf9`28 z17eO9sXR90hg->!7N&U-Usz;7`ZF7pVOnwpt{`U^olD6qT5qyKHKdc=&*mp>C6`xDUXz%doug8uvk3PDW}W2q%d0NmNV1uu%WQC-1o&y| zSAztsNd!ETUSD4k3L9JN_;cE7Ft_nh^TP*xDgCIT^{e|%QqqwMtTjH=_Lx&y9h5pb1dnH4w{lViT@>WU1Mf&N6T zeGFVs3Dv#B{!*JpI!9Way#M~}J@rZno^Zzy8{z%$b=*Kt8&hTC)iP%0=1UlRh|)V%k2gk=#! zy7fiNW)miQu5p9|gt|Ff!~L`+c3^w^g(t+G5C4ZOd(o_L(v@#aRB=qfYegiYHNop5 z?-{32hpI+|%#Qx)5FkSm1NKHV%h|9&)t3!$8X#$~W?CaZXownYz+H&}VY14A@kEeA z@~;g+uYJCGpjDU`yikZE^dqo}qR4_5n~~klK|C?^zt+IiU7lomjYhw5hVDXk@3JX# zWu49vZ#cyBH~?;Lj|CI-)ggFJZKIO&jxX^%T>4GbxJBLUQ>J*{iMKROv;FpK<4wMB z3H)G5jH;1uNB?8vu9EyBZ(BT|8NfZCRhOgD@ zoU+w9U-9etF-@|vn5V)t*|IbIIx${oNYi7)Y4($b?g)T>M}>7ooDsS;SlG4hR!HiA zUcK3yVfquzx(cm^BkRuFY3~`m#8O>;`B(D|8zQ*)IJaAhD3u%h^|4UYp%ClVuru5v z0KyqO$3v7`Od8UIPZy!mo=|gNuz@T5DUCwTZudJ9^6V*@;DxTJ&+xL7#0#I|=RQuw z;65Y8*&<|8-CkeX0lCTNUmhvFWnOo%{l_%x z|9Y)#_7!+!^l$vr;VTj8e;hZHl>T-uwwW)Y^_yd28yZ$62yjWjigo+OMF{9E0JuP) zzFieC z>}jwI22`k$yb;bhMlR4sj1Qhe$b1>TApVv)*sGpZujZ)_QJ-E+PznzwFxseO3%5g0 zuNnhS?qxR6(;nk{oGV?HC!G)Xl7GxUlHhPj#}--eRw+0s`snf_p@jUVO?ol=CF>V? zcduK~?VK+j4QaoOg7polErb~h)7X$mWlj5H1}Qi!LzN^1+5@Fv7ORx^7}DTMiSUcw z(GoiGvS&rv+tJSgkyHNaLk9+3=;@SHr)f)7rpEnwR*ExrMaP!f0m8bsb`YRFO{dsAG$?d;LpPWaU(SjuZn)`4+^Z71~b)` ziwX+lhM?Ue7!hhqr%bOY*dW@|Z7=$XlaVd|6290ZB2%2RM9pbow2+CKmQ9^{vt!>2 zYlhqdjVrzr1W031BDK5cjnk|jSQkO*meA`#qb&EJOs7dPm!PE4E$e-H=0kITcT`F|)@94}EW?VYhfQRjL>BeB#s38Q? z;Pr#HVRMOUvo}IT4V8m8B$u6IzV8qQYNVCp8I_%0mgys%Kos_y+Ffcip!p#p_AyBQ zifr!VI4aSwCeWn1He0DLalke}^MeG=0z{lg$;*VzfVe`q(XVph6(O2b+pwi=dW~Of zptbK1w@~-N=rOfockwZr`Jpim{%E>8&*J1x@FnQ+egU;-ryEKQ;Q%TYhzQp&2^sfH z-Czbt9aODEAj=GRd~+l+m_)|tF6;6y(^-sg=3SXM){6`g1HB5YqIq~W@%n}r5gP~w z+zb5$zWU+K2&P}r8}_L+(3|;%kd`Nxuejc=F9ow{XH=gz$xg7f!N_IaP*&)9GB&fq zNNqnm0hTf+Da;Cc3Y8(<(8@y_im=jATTDK_h@VuYKmA$TE8<5^H%VmAOv2IJ-GYq- zM}xv03$C$Jz&C6ulyjn*?k86&GdcJtenW}JLiGmI7&O4L|g7RWqv-93NvUb z4B{E*R!A)lT|9Es8sfgHMajHnhtmQQT3;&FB^_dSx?gO(W#PH~*WMD>>30i5BXsrsSz%5V zycAen@@?fJsw+Vy*uCCrA+~$uX0jR@Va+RzvOFef0V3smdF0e>7E7?dKB-d83m1&_G3y!BM-M3T6pk`BWTDM z_<;vHyXL(S?C%{lEH-Ve;<~EY4Q(P*9xaO&uQv^#*-D!rL0w<&enCBRRJHMEKRx&k zjgCP@Rzo|*VWet>q5HRyn9or^D+Vg5N`dwq>N5Jm+c@LKxzLQ$Tx+`u@p68u^V~Vgm`eR^BxPo3*Aqx21VM=oFHxQ1mEwJRn;&C7Z=H2VfUh3rK>Y_*hqhC+PxfN+9ld%HSXZHy({mH9Dq`goIQu?xsHQvTCCtX= zDhw#EWJu4JhgO2InEJsH87~xa6|I}7lp#$5WJImH5FPP&7G1AFEc{YK zdhmzb>|@;M0kvRFC%d+@IXaeMAYY*TdjZV*gc;q7mJ z+xz>qHp*@i&cC+XHg;h`ba8uju=&p|?L?z{KA2+|0aD>4+Ky7Am24Gka zH4z!WN@FvAdnDTfNvthcBPR9@m>RKs*@N(Lg89Jbc_ z{NIY>|3B2GfA)S~e$?~)D7?@?sR#wX{6j-y%D}=Ke@znsgQ3W*+e_Nd$VsUa#M>}P zVB&uY9iqRe*nKy361|O>ddbJJoeR$`F*IGXx$5+sTJ^j-I&XHx|5gxdI0GfS$~u^6 z3>`cbN!;lm(egSdBKMMJtX~E3++pgF!Hk2!_LVd62SUiY!!(&N-X6_NZ2`Gz@KR&f zBbhj29vS9>O~@jMzUrpnnxiRY1DykHe=aV<7?avwYA6^5XGQ^DBnT=B5M5ohCWn!} zFIqy7ru!I1fGf|AnT*ovV6M}`?8P@zrD6dkD?wpK1Q~+~H|QA|;uS@1O_vh?`Wn*r zFwd1fn@1R~8eA1FT3<$|Q6pL2^g^iGtFNfP7ESKdK%57F6~n-ylI9c(Pxa4-k!1U! zz(QsS6E#$Y!-y-C5-y8VO!iSG)C(Exaa%HB9kXlq?h4?2cFycGhb2dZZo?rmZMCJ7 zYpW7En?mZbTa5DtGI1W5&ELiG7^U@HB`Olw@Aj6MPxEaE<j(7E6mt?V zmqo)|JX){3Sk})I18OaLHRCtv+d=JIx|u)l3}#&KDKO>$`3m`zZ38npqpilLKS|?o zT=61;h#sYTQO_h8*?IeC^xpxYG#$YW;pG@6B^pv-l2?yk#5uBLgSO26B%>p$F77H;U0(Z50(Ju8AKfP2dbJSu$=$a=H zM6y8+c&2{EDY)XOQ4*nUr86cJZKurAer+fTd>c`Cp{gxgHbadmOe08_W6!4WIf08D zScP)SoCUc`QmoXMdlJKu9!fGSwn#9dj~hmZVQPtohDmb*-gkpSg>?f8cf~Mt^A%); zkY||lTl4aw=INLgY`ezurUdb_%_08la{>e(!O$w0eip{7D)Rk;(?^L9p#0w(TB8~_ zH<@-tLw$mQEM8A0nE~tfQz2qL88ugrhaK%^e=l+fZg&# zZK!T>nGia((!u)tFm9(PDykNvv`TZZ63hWC_%eg`fXak?L_9Ah^FrsR&3D41=w!^X zA?;2_u)>Cu$G*PVk+w2bwS*4Hu>@c+R7!Eo#6OuPx_A^#qSA4g)!gMRjd@fA$1Yxk{q!e-?jty0OGd!xrHdAnC; zA}%}40WNRS#8uB|m-;Fz6Ntj7U4kPgs3nKDAy|YbS1GQ+g)HaKUuHnUZYBx_U$;24 z|KQ;N_tAm%ujTunAqIZ~&Xt~!Uf61BFTc0USn%A9Oufd{n4;-N)eecf;!#HJakd}h zI9&D$3_Y!=n@HE=_X=of<)N%7Yxwmk!IBLDV47x9xa=)b(3^!c>?mjy>@-e(+Ip^> z?a=St&n7hMb(w)rIotFewkBQAo!5L@kw1PP$N2gq?4gGHaRdv_9!l(+>1}J>)FewQJX2(}AB_fDuUs=}lXrb$uhBA~ZYKFG>OQNXy3vr$lLoWoO3( zo3ev~7kHsR2BLu^WK7uSDWwIbz-DcpLdr}rK3?Vm54Kc*7Yjny7E3p?2E6N>R{`(2 z4hsS}`_+v^RbqSIZtU58Bh3mjj=U1$5Cl!sk>*#8TY|SmzLDYxoaIOGdKfvNiI1Xb z&h(MzkB^HoD9)pWeGr}gW zBpH$+)frk;U~QsYMro9w#K>n_?d}(pZm$|mAr8k;)jALih`{r{?*3mnxde2MJ3<|! zfnA#@w3;$8+4@jUkAcai77rr0&SQ2R!0%rmftF#!XE#D2iEY$PifGvq6xC?&>*uk)|Yj zKZ(Fm_edL=IECRrMujrlZ}Vfx;)iy;Mx1fdGP6eAd2y1(v18B2_FwhBRjT!Ai>=`v zKyZ$kJX)m~^4H0H{uGt#d(qy9*Jdb+1_wb5)2nI1y*|e=x!VSVuz!lxNpniz3t~yd z+4{BB4oIjU972knxXWU4%Hc;B7SK{ah2rGL!kpUYr$VB9wpt0zMb4((Y9MtSf1(!D zjmS!MYrqxUk|i`0iljMDkbXW)PK_1~;Ca#=#&1VHrv$1H92zk%%C%4QGGgnQGY}LL zghoN@OFdk~RZpc3y4iyC=IoK4s~f+=Z6FPW3*qb}gVznBfTyY8qBj9oHNkw==| zWC%S+`I>fr082DU@*R(>iL{*lcIIa31{D< zQk-=8R9m9ALQ!6!AD~$6JW&*fZH=zOp#iQ`G|pNjnG6{cbGiSgPN z=#ujJ6*M1-SPQjsb~kX<5Rr0OA|wrf!J0>UWoH@0jX=voMEhYGeY&e5Oc0mjkaKkK z-X6ds=7Xl@s;9sK6J^Z#MA#v9jcawNx z

      rO6X}&6Wy492uP=NOw{ zycU7RQ6d0S#N7$xrMaqNlr~p?&?QWijP)~KuVX}p$S?XB54CA!=3p!4M_Xeu5@btW zC{f;i=ZUdYM@J(W6W)+@D*-`2sEfL1sM1+gr74ui%0a_+}Lp0B7bWMB(Wz0xuja-8WnLQV2}XNE&W!I226yh)KPn zHIvw;A?lMkdLS;{t@<601?`v~4(nagKR>kA4VYKR&+q_!bf#B!P}s+(k7Xn}1_p)X zX${h&ZK?%kpgc8XwaQ!kd5MOs7q$Y!6W@~!qQGhS#>U`D4CG!(x}hJAV~VIF zRx4}z&u7G2jh0y;Q~qHD49K*0Dh7%4RU0K54O_Xhus6fVUWUtGK>X(G=to7s$0u@bg;v}_71`>nwK81W2tHE(9FoR9Q%BH|B3G^9ch$*$CJ1~y9c0s}w_ zM|5MVI7k(_Njt$Ge|km#V5xj!!x+ zDwis4XBmgI~7o2*+##Nbj(g3|IQ?U1;KC{yuH{mq zcCF5{Cy`ALph?5o)i+1Z4yZY{mZ(sxOoL_!TT)QFa!l%74d7i4i8TDX#UglZb%^G# z35DR?*gH}lskQJ|INhr>z0@1(O2V1iV@it{{n)Bb3%=3yfu||>oK&x3B&>q)Q@NIV zeraPYPl(~%5ctO4jIzb?hI)*MQekLmYFIE1;={7an{`uJXxcw9hTfBm8!o*WFR!Hh zRaC_Qp!h+oODtYN5;shOL)23K&KuN#2(s7_qW63dybw9P(LNC7^0Iit^}6yZ%lbG= zbHSN7GE*Tyjs8;>yVaUapHipnQVQSL%eyH7teUF2V8?`6wo8v`>g+|>wFl`74HGU4 zop4LUI>TF@m@i(}&adfSbQ!VJoNPF3L>(fHiIWIFtoy6nS`^kkiX|oO6}yW&W&dib znMuJ%<{MV9t-kj^m_0V*Hkzrx+krYmyUjBy&Gzavz0Gl5TYrd$J=Y>uf45%7D7bD1 zU60=2zVM;NgjRa%Gdj!?sr%vgso}g_h_%=yz<2kK*@FFv;_X28@zdK*l%sQC5lb9T$QKxutplN zaA;akw0&5pR|PA_;%1RFYE%t@gTX+WP{MFWJ6EMKEm88p@P3cTN-X<2(3ok08E6kRGP9aR)G4$ zf6PpLt0P%U5O!vA)?0!Z0tAtD5CV(TM0gwPfuBEXE+!2UWx!Go{2KMkE%h(k zv+8AxK*lzDo=aFnajid0JD(uLRQ_6IDRI!B)P|nudyX)X8g*?Ir$#D+A^w>K$csm4 zUiit5{yu4{gJqlBU=2#MmAYJR%qXckLL_(02~X+=V~AIhSCsu@~tBAIa^|AP6tSatlzS5O^WHEvh<6{ z$OcAhapIGT3rJ9&bFbu#oAf2oExl1n?V7!4hHcyUhEndPA1mO?3SnPe0o@rvtYZ}5 ze>(qctpbVp=RdQbe13G6q;`dnnbe#j z_&nGN*=W>81h$mbf0Jmx{ z-x^ng+-XYdVRd*#Xyc)9HYNjdr4AmS*O8B$@>NO39Qlgg+B;q;Nhn9JrkBHI-PcWo zk9SP#P4OpJ9>{A)I?T`?OUy4(Y8?e8O~fhW$x3y)cm~m14a^&~x5VI#@yDv8fspZt ziJ_(@&^b^0R?A5P44YtGgak}rj2`o-3^6UmL&s6}wjgHKVn9GJPth!H>a%xf90Fwz z*aD%#v*DfdpXcpW%laC|X7BKJCa--EDw-&Bs_(C?CZUb1+wtpThEMdNWU|l2=_ld* z?=U86%!Tel<4=Qc0^1;jUTyqVt4bjjA7P)%-mb6nassR5lW(>Y+f=X!m*B*mM0>xb z3CW8G>)+O{68Sxn#XY7q2GuIwTBLKl94Gfs_sV9ylg~adZ=`pCaW_7^J@8_Gmj?K> zf)7+Ee;tweQ5@_WVLKD9BE`O9!t{9`b{-}L5{2M3wC{u85oxAJMz`Ke*AubPtKSWl zft4NH8sQ7C29#M&z*C3YisU>~&j_iDZE*F&&rfD=vckhmi4qP&yBn#%Gq_gK8Uf5b z0r$WA=Z*4s;d&7EDz(F!|7C5`FIo~8XciPRgT#;LIS7GpLKo#Pzro~GUeTN!3r zFqs-gyx*6NY}gwd3WX0&v|Z=tVXAoL|Fu-{8dyK{1vtjIRPeYEEc)n=YgG^en~eMl zl&)1YNtJnLGCJF-WRwEjgr95SuRU5B?`5}+dGX?-v z)OrSok60Zdl_-;^XzbKj7*UgQnWc&H2#a!-U(qS!l5b{;r7aJ+L=gFy2T~;(M6Q7d zstNZ>S@f!!>MD*%6inzAlE4~Gx;l66T95bEUTzA`Hh*8|DxnHVLG7&T!h@IvM%x0WJ0iy|Wk&k7Jv48tOfGO3wyt((`kY(Z8kLPy+m2VU^ z@vA+&29%tKOfSngw>qAa9$7>n#UVy53eqmi4+bw8EGIRckgmix2pJccdWo%Ej; ztCux1bUZtq&kuJC=o9yc)h7^CZjg!$nGCPQ%~2T_x7W6HajK$=PuV^`n@+J75`32f zr`W7Kp4D*_5K;OR!9GL2D|cCoA=S^6N_P7d3peu=NuK~JFLLW~GA+(8%=M6}Z+S3% z^4*1n$6+*f8082FPhB<`pV)OBfpA8T*eBXC*-7=&hZk5y^H>6bMxP2#l>;E|z=70z zHrm(mR%NUWm`g9usN(oBrrpi|M*3)LD56k0l$%rMVBSNrO8E68;{_Bi_l14ci6Tus zON2W<5?2QGBHIGO>@i%I-nr~(s(B}S72>XpWi8@kJ_?_eB_feWhF#+Oq$CTOPAOZA z^&AXXG{oNbQ5-63`BX&bJ`cFYgE^nD@mHSvh#hq*ea0!8biux-c~NziBY{+)ZF?n? zuvMFq{55I;JAj+gt3+6TIA?b$xJxqqLZ-b>qybf8sc2R{wy}CVx_r|;#0)Cs{Uf{7 z?`xHE7LqxAr3DlcOSGY0NWLbrCukp<)dg29Sx#1*wuqwP+B~msL*A>H7_NafF#}@N zt;i#8`Youfn{W;DPlo^7&}ey{S47E0Id9^&XZ1jiRbrXo zqqL+Td2{8K|FWasyrzl&zA+-lV})=!w2-EFN%6*s&NVe9q{W02=00PORb4?lgLH{O zyc|eu8MwHA%!W(r#`NK;2g_k2QF9m*DBCBK>VpMyHOc;k-#CGEijsO{`PLtCxN6)R zi}Xsca?Ho2lh~ojMq&Lt|H z%nmnV*K!+;nQ^wYqx&`{(n1eTp$t0G>t}58tdlC0@grH*M<>%YIq!TyFhIIjFv@xs zw{cEJ16&?lE-$_I4PAAVaq*mNti?q?!)}79Q)x|(U{|0AA0J^evzt(r!lF46`9$s3 zn4Je~O8flI1#+WyU=2#0$P&`9xee1fkAT%m>g@SqfDra98|ftNB8@-cWH{TryZ6e= zp*qfeOYLS4k1tHz8QHOaEqt|6oy#R5Ud-IX%AfoZs#GvCP1E~VO5ExdF9EozIIO=| zw!QO~@C`-s0@7u*9sG}EQw|m2Y;o$2odfUYWF-#d!!}y5$N`(O89#hrmu3LwOWn z3ybE8ZGiIR32C1NB&P8LUsgGf8PnQ09O+lW3&*{ih7C;Yr8G3xlRqkHw^ALBhB{U{ z=XIQq#up>kJ!+8GvZ-xPUvza)e)o!G^ZE2nQrUH&)F&`6*bt^Emz;~NTX=Akr)J8O zp7EtyEer~#kk6rMd4 zo4KfZMc+RJpxu)(g=u(7a^y`#E|i8Um;6_u7vdy%rI) zIJ;+#pi;u<^N68hUT9_KGfE=T%Q!1LEgrDYf8(%^e6);~l&?|oj9x+5<#I8~u+7kY z{$&VotE!o@MGRl}ERp*14Dd(&WL;@s)8O3h-47sfq0r(D zhRv`K>mICrjlC8NqMDGID~3?8pPnasldSjG-{^8}US|1y*&U|i5yk{=v#xli0P0YN z=Vq&84?%!!WWRd75F~}d+v?Ts_JMqGWRXM7a7;FRfoaSCiiVJ!`QTKGxi733ol2ha z5aVGhz;UP~QxXSlO?|vU?-K~ISg1tL2>ygRr|wb0ihe01M&3QEiWkuz%b?*1lvT$E zQz+n(VaFnvH7fL(@fDXBu8i7P&>bN#s&dYG)I&lc_QRF8{ zMGqe9RtmF7jr0IJ+pMN4k>tbJiAP!~<%t+lfOi?3W5@aE>vtsp$i=Jp53RMpLvA_F zwhvzg8#DIv5B~xU5ToXRAT=oGdM~ZHOiuFwYLHzS>`DaG_Lm=v*c|KAb$MplIA0*kW~?=Izs325_K6;Ar>Bu|9%B&jl@&wRYq zQ~5R`=_Mz)4Npm)zu*q-XEY%8%^tIjxGfYN9gj%t$}HhZ;vx1Ou>K{Q1B_HPO>2Pz zxnn$Du+o>*(|w(eI=DhT(g+DcVQ)%j+oZ0CN$GRJW@*TxivUgPfh_V;E-&lD=O!U< zYB*FmFwk36OPS@D7`!g!+Bfe{;cH*SIm!5XW|USoIsRYb18h(lnyo@7exn}kAKy9L`d*tJ){ua4kN@t3c^ zGU%^KB?T;kH}g8|vo?Rs8pz1+A%vD3@NZ zzyZ(i(-o!hTNX5rr1?MG%x%UXmE^w(d=9pEE({?PT=|9m{;4gF#FP_om#V!@@Y1}YNDJVC>n<|uKIX&YlM}93m z2ODnzs%5ly!vlZxk`xbJ0Zzg`mRT|77>cj=bMk@h2VH+Oz{S?PS?kB4IF*8ZMS9Fl zS^#LOFTmK>P+#VcoRFSN<8RrjNP!d1$mU zxBT6OUrjbrwWq+QlC_hrc-J_m?_}-cWNTCtV2wwYG_nNaJ&L^HI%;i4cV8ht@q=Cm ztm|iTChp?rv;CdbeQ-B=;x2JZLBJmeu2gG3mQyWC`lBknmoQR!w96)^+mrmn8yTPV zHDzX-%u_aHV+728S%Nje9a5?u=6b3=6?iJ%%Wa##vUeFe^JxVfsUL>%vH4Cl(uPho z*?dwL&!aSDn87l>VWCW;~8 z8t?5G)-zLhDy@wX_Q()>yp-p1$k~t zKHa`!knin#M`Rl)^pDK5aT(jM9NMp3wOYcKg=(wrIU|=1x8-Q`MV0m58PTE}zrll|u7Nc}h;9bmy!p+^2wSD1d{xToZ>~MZ6?r+kK7VmRI0Slp?!-VmgE(+a2Jx5U&inPP;yI z)TOQ<6r0UtCw0KZT1;$*j{pF4EEZTJ^nf1VGCN$E;pPTr^>^Z+9Y7-~LLRnw8B;*Oi)d zKM1PtZiPHQ*5x{<3{&rU`MD}S`8GwH_?V=v=5rch`nfz<*>e$8?`>(Yta~e<-s6nA ztoypB-a|-#UgMotxgD4K*yHxI)ZRC#62F^vEq=OH96PcmJ&GL659mYRi6~ z$m0G)_sLn}!3J+0mH@LYLjfrIVFYm=2)w7;vaV@j5ytjZ1m!TGLz@L(1Ld`}MolE<6D9C2M8&JNxRD1lgu5gzX8EW093xQvV8I7TCx9y7^COGtSzDt;MzS}&(czwlq zo!JU^zM~9&T1gqZCQDzzT^_qH$R20DE1CGUI>iOiOgiS6(Z>~J$Q~1C1pIkiOilm7 zM+JiWD~?sHtPtq}&w5Z#jG%XmMzgbfKL5V(7=cI^;1Ne5u;D>6HbC%;1BpPr_L%`m z-albaa_6a15V%9#4ZV7w)p8{-`>z?DJk#GgdL91(!VHX0fG5`fv+s;i4fjhg8mYR{OM%?Vwhjkk+gIN9#Ioqh5#c$yImKYTKGtDD6Zp zEYf~ff^gQn)U8%xT9aX+RPeufvg@ zi$%gZu?vp7PL8kfJ`b`|6Ssx02Hzvk2ejy2Mq0Z!_R^=e{JH6el1O)}j9@WrdCmQM z1k>um^We6EWfSB$o_jZ_0et{+=-MzwdpU2uJLGV#Cs4Np@9yIco>eOcydqoPX$L}p z{WrTp7W28e+0M#Fbpt|Z1_&cRMMbIgiwBC>_<5dy@?Y5qy!OOhJr{7pU*d*AXKk+s zggkjg!eI^a-{I=t`H9X4R(*I{heRXMDnPi*>nwUn*(-`;JR!Ue$X`lQh1c5jk?;<4 znpa<-4EJ?eR$GB|-F>ZSBXRy3$OG3wLsG2)c%pY3xW10EZbr=pJW`g4hvE&$xlgoe zG3ou%7$DG_<{%i~*LaEPE-wiKBD|4HB7Y@KwI6z&BAv_U3CcSxqsBZQVwsLmm&-Qh zM-UJB7D@tZy`?qmUJR}`2=@rETit9QI;!WErf7{`*|l%<)7AF>=2vF?c1sU!9?)Ez z@nt2qs_VFrO$fl6vAy-@by$173!oj=yEn?x?|w4K+*ENM}e8m>M;=)F?VK?bGLr^?E2>)&{#mM@#~BUn+#wxlI=(h zc5$?QU9={M{5C~fSxebVeh0nk=b?^6r zlczC6tl=4!tewx+b|tQ{VQ^nL=bkbZ>H6@i9nrecAWyAMCPS3_y?&6{{RY8H9P$Z= z#ysi`mdKdI8o`z%aXUZPJ4EE$WAg*HFykW*D&d%&aK!ZjGmM_0Ul!O+ksO&o?tHSa_-gu4YWsPD+%4FM)jnNgJO6IV_vQf+jwRsRZP z9hTJKQ+#Hi>4MH(tkrvw-K^9*y9_zC%jnM7bo!-uWhkRMq61))eaj@qQi||}Dc7;9 z+jR?Vd!}_mi*dmxpun#dFSL-Be$rPB)26Lgex7zY6?CywVrU2g_TTw;kLYzL$)k5@ z6cKD~~`GjX4( z=eefN9#_6&kzJx_TS+k1B)+~gT2meC;8qu}IOxij8lm|7HGUP4J#rF&qk=^sjjLS% zTNVFo=v{)VN&MV!J5TXShe=x#A5(jKt9%DXvG*mf)h@vkOvxLv;G;#H!j&3cXU-fV zTZ{2~iqaTcLe$#w0r6R1#1uBWADOzK+3AhwZBMb{Am{e&ZvnF9!$QRYqg8;9I&qHi zzNzZiF}KxS5c6A+@|`fxH${%mn&IK5@*NbVIqh7{wxE%f5>ko-V0x z-@F}j%M^mmL$^QQKCd*X9bbdhUzz;h@y4cq1IIj}=hN6VcGVFBnfVX8iN&&=`9<{l z>PW~YpJ^Z|^!!^IXfAi1@x2O)4+_E6E2U5!D(e;OxZxX;M8$|(K;gX>{mNL8Guow? zZn>b4FrJ2uUo-=vCiJBeMd`_(@jt+!%1!CGfN#VDwM1=(x4x0ltgeS_J_peJmuum? z05ls)>hxSeG}o{9Wq3tFJ+rX{BDf-V-|-3)QgUbB^O76;_#J>`ViD~Kk1Fw&w_0{T zj1Q1H(=&rR=r^||r+dAO;CX_daR|Go<^i(*)*5Dl7YFQ1F+r-is*DPg`E488ep`7a zPoBH67~1|~N5O}c1PnpOqx%5Akk0m%A-Wh4XZZaaTa^q7(gPUUmMbFcKzY+ciJwI@ zRAc}0NDNlZu!Y%s0K>;H%BOzV#F9%tYDUu)qN=VR<9sUaV;qBZ=c-{CwCd5c%DxX{ zMsvo;R-3c(d`4}hrfGO3WB;G1$07s{s~PfNzl_!Xr=#FM5nS~D9t-_vO0l#nlZBM6 zzR~|Cq`Dax|KsAne5B6ua~3~y&D<-6YZvY5EGm|jO_ou!6s-_mmheOxX5vWV)O#Bj zCK#lDCzI3E>%5bML;v3Uj!`Irq1Yj@H%7q?@IS{7IFJ8wOLiW%=&UFBvUF7Cf!_Z1pEy?TQEJb+tZ^W&Zu)c1h?;F8UY(1h!)R|V zqLl%UisL!d0Xlj_(Q?oLNV7;7aiK+>+%V4{Q>fifp@Q6gO|?V zzkX%?Ob!3H@AE&mhQxnt4Gl|WCvz*u|8^Gt;J5$9t#Fhxc2KglGdCn+{O|k!kTwmxolS1WiU2Q zsaQ#BY&ixOE4e4+vDY+{)0{$#>^&9RX-SSn8Sm-WprOFLO#f#6^zDxjpaU`gpXYCl$U zV0GDw-^wTH-fq`pDBqQl z*_`PfUYORMI}BOQW1D~a${7<43QjZ&UqUTQ54Ct`?|G`OtUp~rxZ8be9H!$GNk=q( zR$6$JR1ci;Lx!wcFel&y=Lrt6k2J{S=gef=&@Di@q#Ky!z=URJb_VBn>>ESl(0tHZ zd&JVYP4+~e77`gT0#Iifxdl>8oR{k+#R?viG%i$Eufd(G7FMrIlBB2xvz4`GG@|7% z-A5nLqrfTX-%`vyy(09a_+Y&D*hv8sf?OWE=e3ha~g`z!VO`!Mitp0$v^CHIy6tY&2)b ztR`x&hfN2zT`g)53Mb$M>H^4{=Ml)%NpkcKe@`I{dOH?3hl_?&@D6lJOUL^Wz`3>~ zvj^`yB9Mq*2tU1NKlquLjlr!c!ejXMBGBLM&KFP{gfbP|!%nnxk^*_zkY_Y=i{O~2 zg|WMHMv5+2_fJ>#&tQM!(a*^m`QLUDoBvHP{UOBsmz)2e!YjfrE+s7V&uM&SIceE` zKDey4`l=ub72p6}aY2ebzu*X=s1W~9vO%$>!F>UgdbJ<_9->dg_h0~TFRrl`)AWP~ z?fnr0qwY3t&M!GoNPNt}d!s>Gp;7p7?J-1>{e$S4Y(Ww?YV5jLd@5i_*g+uun&%8R zaNH12qONc8!2_<{5-_J161VW1YC_I#=h)P@>ah<$M`eoSzs0hpDy2H^8_?>x${Xf5 z(C^F@er6WUlvcQm?tKpxQiQ|x-hX+tC~eVbJn$1!H>#)7#U93c&s;T&j~;?9<+55E zHDYkj8THH60vUnS8X@%20(tOy|c-izK^q(bmG0+;#4R)Z*EZu?_Eul(}_UW4Y zN`(10pOhj9(wFQ-5=irZ|6d+uSF3if%#Y%E{`~%1@&3~z@!x@-|L{osS4fkTxues6 zVR@|OG$jG}k-mmE+Ru|$q$oW!N?F7_W&CRa`0xA9ycXqER@CiRPDCFOkOla8b8(Tg zvLxK?K#uIDUujIIk5@K8eKDTUBdc3dpNEq6A%d+`BZ~z0N9Wnr|U%d@uB9sr@;0D%jo zgy13hg@aw6j~PuACFpHiH=lvou_|$A_U01?f*N0}NDg|-%~$gdOM{e>=5rQssO3o+ zgCPjWLZTa2wb*cm$R78^RHW4*#l2KdX(gp_8oxk*c|sk)ghWk)qQN8pGOA zOy9=H>VGyk^#A3NtWvR5#Z*T6%UUGGh+VXZ0DLNGm;q!~ST$2Pi@+BQqLH46;<+V7 zLpNndI$e;z{XzZ(*iku1YMDm>3@y!07>1gZy`96~K zE7Bl|zos?MG|YSh9_lz*y9HlJ(Un;BZP)!Fu^&R~I^Nbkgb|BUBbxeBFLGCmq+@he z+2OqLF_4xkgEON4-dc5wnG}7MO0ca@6@$yRY4>i1RBXE;t9_`;o%jYw_t~_ylCxj3 zq|ND2f^4Bcl^Kh17MePY$SL@$5fh}}FqMr-(r=f{mL-c8-JQ{-8H+fhCRkcji$wG2 zx6kOkz8+`z00g(}D4uS!vEU*p){4cdgEKx--mIN}UI>%{k5&zM&A4SWo!$;G^1UR~ z3M^(>Fi&tA7;#rr{1-t#zhBC2cr?-*dOhe?@vd??3VZeHqW$qR!__K3 z&7cE@sT0&5)xfEjHghIv*UGC%p?6m!%6KrYbFul3bD?u*Fl%VXs?8b=#Z|sv#OTS1 zaI(H0>WB0rz~KCm4X?im+9vjLv<+TQf^iCy+5r)=G9HF?aE%Kt>JXqJs>_0OM6U+oqd3k@ zX#%8SkQV`kO2q-4h#xWKQ~3e^i98o7hZ1pzOV}GV%sQkoUsYC)9c!QuX=3>dbKty1~6He{)gd4;V444%c?xVublPfvS!rC3qQ z+n_Z@V(O8{vJ*!FHOF(PJ>A(0bKmlfM(Xw|^4-s3RvgZJRw;oy37BUu{)Ch>EEZ%c zm9qI@g{dn3S)V{6^MP7_ptq&JlHZsz%fnvG9*0%x1w_%H5QQT zWf$ACHi!B)c`oG7XeBK4wPB7Kt+N6)Hjo371DvY&XCsDMYi&<5kk>P|d96%b^ZhR^ zK*@L-vwdPfVcT;lFMyw6yz;rnu6wkTvDByudmJ`G_-UzpVFFhJ$WjL3aKq$L=AL|I zlOmxcC}bBImyOkP68_m^o3pTLt3rq5)~s!Kx0*r;;aSG1-N`>5(A#dV^JOrDvgzu*E+6tw8;TG|^&-{yH zl7DfP;{Hi5@4bWo;ra35eSrVZI`f|ZxT61G>ZKv6U~HgorEl{?;QvQ4GPe1j+V#(* zoPVhx(f>mQ|398%WF)8W;AHM(Zfo;@J9u#$6Wf2mmYbB%9Tr58zLxFtN0FI{v^2<~ zez5e~{&Za{<`R2EOkn~77p=tKMsCG21Y==1krQX@J@4 z-PF=KhF%wJ<*MzX+w%u4J_RG!sG~!~x<`(FePWwBN=FpZ5xTIb-*1b^C!UHAmIox9 zbvhZ9bE;p0a5G9qn_d+kRDp{awRD_FHn7ZU8HZYf`F+`|*J^eR;drfbz3r?@#joFFJ3!8jp;t~R`7#Txxjk~Z*L29l&svc+6y~?t8|Z+o*k=m`e!9rxls_Ux zBw>9nFFXOZ{K5sM^ygPyU%v~M1Z@)3vs-c`F={~ZzwR3IVyvISYf&~}5&fpOP zwiux)$VZ}34C2Uqln1mx?#sg$=CEVxKS$tSyYIXC%h2eJv2J~L-{gmk(He-!m0U`L z6o8**uI-vu92)NXAC1Mtb$92m#euoK8ljmtjaFSr$LU;aEU2TocpH}sA0_}9GbUI8 z&|MHx*lEEgJY;Y6O(2{BEnA25s1Rmy_c--O%bXxWmPG`krl-C8c_X*I!xwz^J#Xnr+FRM4%u#b(^yf@Z>ASsy$=^=)MtH~4(~J9+vdcnRgW3y|1BD?WIojVyr61nS);0nr$Xg?U*~bl zvdP`Hrp+?P(Zu(+fJ4u_S2M2T)0#(B*NRSzPgR#}_iGF-H`p(k4{rZ4RO#W5`8+u_ zfzfB-&jf{j(jk)~HNj2VVBx?`dwK*g$vPW*cMc?Q0=13<_>nxL!5lDQ!YFDm+v%%~ zZD#!G@aS4Ja1M77QX-%HB1;9V3?yuLa(dJ#X^uCd9;Eanje54McUtKc#;MZ!Fi!=d zSqHf<+`bb?U_t`|^aAQrggo+Q#w4CJu%P{D?=wOUnrt{kdmwy|dSZ?k23b%*q_9}tG-&~A*#$C#Q_I1$#BR0AbaS|UJ~IQbaR zNgIHJa$cO=;x1`~5Yc)FL8+!&QcE_XqR~Y-NPq%VULz$2B*&on94`%f|J+VBcnE_~ zBQSHbGF6$QC}3k3mxjh2Nr_02S`XWJW`R*#e07xYrvevF)UBM+Z|ajsIVfVVmYU4R zoKwgkqG^Z)OCC~6W7Pu?Gj81BI?USCf6xS%`)ip!J*Me-stN6{ALgojHqTYzSc>A3ynS9*-&>T!ER;YO7N+F(H#Os#8WT zVrOAQE>fHU{@Afa!E%Wcp zHQ>mSqCnV}Vi53Wo&WO`LXbDuM@3nT?^|h)*|`SP(ys~ETyt6m;bzHz1VgvtO1V#9 z=YiRoB|rlin@R@a3R*SU$tkeB>0_*4XGPDj-Ldo6s_a>APKlo4b;wAoJTzKatWJ%| z#P^v14PlOHn)=x{vM0yCV-@L|RCCnIp|x_dB!c^MzwQJpK}2Ql?+yRa$unGZbwD?= zU*Vue-Y8XrZIFgI(?aZT9;vu4E=hMJh9(jXgR_`;MtBP61nwccw&{^okgK2=#oqaN z!#unt<1J(71Yn-LgACbtfxUXg1}~d3b!3|*dxfczUB<}s2CUKVT}iNvWs{vT~IikLS6(rX;j=stHJ!9Wbg<-G6&LYc0-%P6> z{lJkO`|wca@uqSLdI1wLk@8Z2()EFX7PxVX_T^lafeUd&uHW0;o8m5aH`s_=Q)n+Q znP@Pg;P&N~Do7dX*s5a@Ua;Bpb1>bq>J&WLQb75RaYO9~py4@OqVFllQh#i&*&7F6 zy$312ZsjtSyDhE57ANU~Cgj5iyk+&krI0oQ2NA#Dv$u zMTj;IiFBwbc7pZj%e`w#yyUrJ-p4(G72qL#aSQJ5-_#_&E$}oE)sGm= zk^49D#wsdEJ;}R^iAlW1mZ7m!RdS+O2J`myG^FqH!dWaccV&uI{JFg`MeH1m^l77# z!TAce)3YYkYB^Kre6S^PMgQ`CJ-<5I?UBA2PYPWh9pII+i>t~dsf~QPi zC$Ja|FYWp`e7YG;lk*`)xH_1_JFcRXH~Ew1h?y>BCd3#7@#>zG5RRIZ+7#q`+1I#l z${!}zWTD;E@6KKuMSyuv-lhE&7dj#kEXAjx?oomQsvv$^8YtEI-za6HZE`GEk=^KJuN^7wiPVFj$C3 zNCeg9e%T%$xvP%(Tgg$)j&pvj0{e8fShd;!j$@fxWrdAxBRMN`Oh&k3{#H%2B8p^W zcojt=c~9xwQ@|n#RICxiDYXvgR8FlH;&+S_6BFFk%WwgG5#E!!9qJX!uFU3uOu>a% zm^4HA0kgY;K@)oi)=pg(MC#nh*jFjVAN?p>b>m@o^J{F^)xeNd z=Gg3HlmUB?la$KIPl3ROzo%c?PcwTrdOTpw$}rKqYc8Dp@F`B>!H5VC_3W(42so)6 z1k058NOP52iN~=_{7+e3=laiSS`=vrBzIxe&dyT-yd{&#p!a8_RI$4gqmdJ-FDBMZ zRQ{XrJK^HY91$^2A|HAURgo*+>Im$&d!~#;9G_Nn8X-z2ysq^y<_)uzTNFB?D!}bL_EmhBgUlM zf*H3Z6>gol{w5y;*-g3F>e?~K2a@qD=2S@f;yYZ#zdO-;gnHY*+5!r51rU0y`XMW| z7!=k|!2pxoHDhcwPn)3Fub$aX9J^{G zZ_PP2CDIr-f55u8m`EgdhY~)CYkEnqoP&d{M3%f0)!T!%X}9*C{w6vGs(-u$i^70= zs#2fGF;t|PN}%fEhkRuxV3Lxn?3#Augb7gs2P!f*RJ2NKJOZxtpC^+h%CrjXlUJ15 zyu?VmAHc^OB|sR7&v7tZ&a9 z&!0p@)Axjj6;gKQg*o4!avq{D=q)26yY}o&?c{WqLus;v>DnuktV^lso*-zLTADp7 zEU*Nl#;}Ke`c>&Ez%J1<2?rMRrXo-Q75Y@Vu(qUdX zt>w6qLx@?mM8m2taoqsOva5mArvT{&Qy)HUs4Z1v@BO>ua-5y*eQ(4Fv;atrjuY z*bNG#vXR|tqLw7PKXmkZd? z&e(H)S+`|-{UrC`rVMJQ0@)OED%MSwoM#C5UdoEuaf}jXO-T?oP!5b;&_$r0Ss`cD z85Fjs&(e#~HUm>C)8>>`L-krTBZP-GyOE4$%o^4-G=o#73-_6t8~%gx0qua@N7d2U z&?cXr49RqAQ5ux(?%J1!G*iA6s$5dC)lDl);9Q*b8nu>Cljd}R%ZuwNF z)Rq|MC+F$+FbiLm>mv9*qBADb>VMD5@64==7Pr|bvCANjZ00N>FHBNui;N9MZJgHS zIZt^^$jzNEPj)~g5+a;9NkqhG9|5Ti6NcI)>S|e3&+otm;A+t&T@7#!mWRu=@o8T zOeQn}oF;5k!GcE{zMF7%DTj~-yF|I*lue*YMy`9c=~vXs*KprxIMTwfMuk{?0>H2{ zvuE1uE1h1|5lm|kw5RWJZ&X#kMPYP5?0mJoNly)DDqy34uj^m(oI5`fQXBSl^D#JB zt)v#3UltJ(uV`4?w~J`xvs9A9=qEj$Sn^C3G!fRlbR6qZOAKA}D zHjTP!S0nDPXXf?2Rf$}x*rxbuTCLAttw#qNMu~6t4eGqt0gBwmP8&O4lD>}r_`cUu z8qqyop=~aq6EJidJ6;AK+Xe#INht)cjt2E)&`WX_z;JfiY%bVR!Ip8inJJM;L5gLR zWMY_BE~nV*JlqF2mMN9+Ow(IJT{qDmnUT{Vw8{-!k`=Q#SsLDB=!`m^Ghv!IvHSO$-*Gt;MuEu@Z!i-DR;qm#yJXU0>Pv=pk2+w@g}!$u2t-h(_4RaGD{BNV>$#dvkPpN!x2qOUc({PbpUCxs^7aDO!{l zkDB7P`pLNe z%KaU^mBoFo zs+`ucfQx;mn)Ww&>W=xXNw7+wTs)pdJO{0@BzKqTi1mKaF59o`^* zr<@b?3dvPrc3;x(nhVMt4|rwb^Xu&%PJj(~2F~^ciA&sc%LLn5hVYV@gf-y+wH+yw z3mApda?|x~5Aou4-D%`pR#!`JMpK4=8eo$n@w?d3?p<*FqlDJGG|kX~x_2XZx)1yH zTB1qVmuCi`oI(yHvErg$fg@n#Cx)gZJrTf?_x?Y@KD zH96SF6_?$Rc2n@{(eU+^3Ojr49%n6lQj9er<`+5UoZhS(!t~3P8+}*!?7(ffAzJeX;7LDSeR{6o z!y7h;8|umc(pmB1by4Pb<{PB;UD#D%pF#N-M0>z0eIpxw#;?RJ*GWQkDx^ebx%|e1 z?qhKV=qY^#4To)7GhynfGG&d%h`%=gAK`x%A!ACZq2IGB+|L^hQZ?MWt66}1JkZuV z(r^@SY)kYu6f(Z!E;LCqE_jh8OOQVi6mN7eHC>3%0NOsyN4po@fu~g`-fHO6#BIUht@~w6>Oq$;$;|z#k%q zBS%j{ysAY*O*35-WN}=3h(m}h4ID}92R-Sq6*9HNk#sPmS8C*s0aTIzFPoIrCxaJC z=2_o73^hauSR4Eb0HG_0lUoBqHUmM`=#}(}5O^vTKuIXmRSGmBl?3i%YG49&EdD=~ zy<>2t3)%(RNjkP|+a24s(Xnl(qaE9}ZQHihv7L0BzBzMd&di*t`_;Ww?@nqbf7kOq zTx-3aP36ba_<6#|MZ7Au{6_hebTykPpa!*$*F!AA`QrYivp6RS(P+OjDag~B(_o${ zKl)nokjDdZ>uhP4luf^5n7)^*j_gW@>8hNrm#dBJ3Nlr#?W?Vw%+?raiz|f?dS0wU znjj&2Ff#nA5{3y+!AVMz9p&sqqC>O*ogsCPM&kayyBBSrt~aHaQ}sN;=Hw*UX?Gzv zvwB6c;?tpGKRQzu0~GcsAgBi}B6VcT(a0lEIfRS$5@hrJSNKUWCWxZZg;Z!TVfcFfXCZqd=|bHWC5R6Ucyj3gVSiZ%T=ls6<6V*Qd82ow11rczKH=?V$_2nqDZ=)? z;3qz;%j796?`qp^67}5oP`&)-wu!xhm(AV-P-Zi0*9qz~P;lDD(?;K&Q*_Z(CEe z0itL4;lU8}-I%!13kn~X8+J((*RtYkv>}Qo|BtF+(UHMS`M`Gmo=v@c7oN)?o|Ug^ z$nu^wn0YIL&u)AhKfr&Aazix#T(si?`qf^G%F7@6({Tmt=$b*BAD-J^ZB&(`#Ece1 zG`dW6HvTSQ+%6d!e&pAzVq04Piw$8)cV@>vV;!hi0-N!d-^O|Rza8{+XFv8f>u5Wi z3zEr`sh+u#6`70|xX$Vmw2P^l7%JsOw4|-yPAx2r28n<91HFn{^HTKs__qMn>?yRM zf$Kw5gCGX$0Y@vbGQ*JHXlmfLC%_slFjklKe_{SqmJwAQE~vnuJs4Mb>#Bf*eI>Uj zL;5u?4yPwyzh#g!YK+xo0oj4YbcV6uj9}Amx(IxeP$nDP`HRM?VL3C1$@5)f#e#k= zGf2`Q4}T54#_Sm96{-m_9^jgFsg;K1G-?uw#ulk0~A&sRE z`SwlKP1AiB^x>WK;koX`OT=4O`Q-j|zJ&WX`X5v64;gfWaJ0`W8QTBcrTzV8q_mPx|W)rdddfheB{zhsp-%_z#|Ylgdm5b*NTktRM9_rV+eL;3lwooY~l#x zQyJg|N|YOx@){R;8#J{ln+z6J`BM=LMRFTe*4x@IF550HT{gR1)YdnxU7M>nU3`z) zU2VviUE?tJKh@UWFQ2ELGlyNLTDPxHv$H(?LXvLJq&2l@P~ow#`w;`o4aBCSjM(GU zGa$f-fXE(`mSDNku)U?3(>#GQd4PN+bzfYh7zmeAoer8G&F7L5nAfIIo+4R{&c(+zLvd~@g#&cD5>Yu8S1SLi}| zakB+Qn8l42BfO#*WFzobOcx&m(PKu75cOG4F-Wn%`&QD_v1OdY-ajWX*dxOzs9Y4t zxI402G!%F|3I_CSh2+kor0x50m@rQEB?-njw}qHMZQ@WQ1qc(ui2{cPrXMoWmW~Bc zeJvN=NQHRGj?(FhXP7;%b)0`uiYl>vCd{%%_lAZ`G`bDA%_{uTA<%51O;Ec{bL zh83?YPcyS96>Ew41hf9?2J<%lhzk%8m#Ix++rC zaFm}ZLf#+^969QkJhNyXMxfrm^pij2i6l9Nw=FN7s)sFBfG35J>UDV3YRWFi$ZyYt z2Qg+|R!39Ihzc7PCbe*8VG7SRbLvD|iL_h=n2y`jPcaj;%?me|#H+>W##nLz+tm-1 zM$M8|aYs-+;B$!@XZgb5>lomAUS63O`rfFjhv%$9CC2)8M z5pQ-KHUt+Y1v(2H0IJQu84hdt!B&MhFe$AyY_iN)PNIO!^sA`N$%0j)u3L-|SS1ESF544wI7b#pmY=eAbVjyKLSj{ut!*>+-lX;zbZQF<$hbGgPWFU;;%Y z`SXLJoatvokf#(aQD_oIMa3+#SUfMxEcQdIl|$;3Wi?~7hozlu#;}2~rYTP~5LvJQ7Q^3{2DS{V}pC*1GLt)Xoy09-_?)1X$EjV(Olx88^E{tKp z^W+uBOG?iR%j*p_3Sgiz3JR^PtzPGsfESi_i@aK4o7$q9-kEYL4e-8$Z^C%<@!*H~ z_>?Uu#x8t(;*Q^jOtfb(yC3BM=}J_2N`fPN zy(N~7DiM8%s))S&Eq`9-`&4KJ%?K9m@-d?ajyI@L&^L-!>@D{UuIC3x55*iFQ16IO zrI?PZy7;r@V{>?f3=A~=LoK$ zW{~fd5Z{Njd-eN zhxVnB;tAX93>3+lNMEO{UWvCYN$o(9t7Il!QWANejm>Y{cmB88musz}vn^6&jwX|1 zg1R^TBqR0~(j;ZEcyk-PaywfvD=Bv1^bf?MX>|4sNsas8-1pu0>dhBZgH}{InBo3+#6V!1EI+?iRoVZg?Tawkvw^li1GE1EX6D^9N8x<#Chj-xl@Q zX9Eomj_W@xb}*Xg6frbL7j@TLP%skXPSE=CLS(94zc2a;vzcw2&%5PrKMMbg3{Vg_KV__eL_b zERO$sHX>|kxc?iVu|@_^t5xU-QVlGoJ2~*7sN3&k|3Rv5pheY&3vbf}#j~=t(tA-` zWHu-N7{?4Cm$t->SibRyT)qXne;ZAqR5^+qv509J?b4jgaJ|D?n3OTAJ8*VR3@F60 zCcoLNxG~u>AbXFpWa`rtSuxovnf^}A4>A4XcV88*R0HWW_POR*2W!>4w4{TUmX4Rm}t7j%$1_=Vg%U1YSM6PS#pM4m&7n@j4a-@B+x7zQECtkU>F>p@l03Nx@k0X>eIka#-|2UQj%|8?8sg7jC^5-KQ{0{ zcF5P^$(TWBM1sWMAI!6WV#(f>vs`(?OeAaynO#j zTyI%Emss1;!AL+vGhDbGHM<&G)pQlyLyKJ8l3QT#zm$ol(0Yr)SQs#RzaZ4|-&Iao4+dOtnQ((Cn>A?3^u@Qdd5?h>Ni|5swE|qX8x|Q$yH(bwNGcBZt;_~ z-EQ;A0LHUU=3uqN&AQc%W#ARLIL_k2Qf4mg)R&`Fl5axIpz&5>#kOTcAUEq05poaG-~H)UP(8V^yQzv_RSF6*?Mwr8qrGwUw8a+Tv@RG+p( zm*92SEecJQxkjly#MGZ|?y2x7^FMf_S9wI&pLXu4bPj7gthlLMQRcjSK&#;53q(27 z#A+Gs8@CdhQzbjWeaO0y@We=QqWi+nMWUU+Nj2ZDeG20p1Klnxwaz)6#YDv~oCPUM zhBJImDYR^@wB+zku~C{BKOxRRy4$u15Al;cHd&M@Nk5mH*D-G^F65QTPQ6ALScFGs zQ;A)Qmkn3Ad{-}5BC?c5YZ}q`e3HaJ?@iTuY2&g7X?MVHXf6gO);L4FuGwYG?Rq2M z??D}R3U[MTZW`>pzD`z3-=<*OWrspf~aaYGMEWB=oU4w~pHk+0dXNu_APYFfU& zq!3yE&{)^{bUoY4g4Y=Q9zm zXKIpZ(F1JOFWcCz@iX^SGTdSfUAeTEtKzUU0W350k7@Xsa1ke@>7(m&+LL12GF00e zjW5*&noe}^<+ZA%xxG%?iuop72AD9C4Up9BH!jQ*GZDD7E48rg)q}4WZu0_&YvP>{ z`NSm%26?RVs9|eloEGWF6yEDhvkD#}sU=9D4=Nt8Tpl}Cjzjmj>+a+NN>n>JUJoz! znnoJ5;{m?z8D!xnQ{P%-%Hk2YK@=_nB$=jE7`PFhY7R~!!jdTtb#AwRT)W`I$-}$c z8w~rLUkdFzo49vwsh0ln{PO6$c<8R%!SAof$Y%oa73KB%s;2}*nJtG+i62!hwL{o><;fQ=lEY^ zJjFn7k)cPB?+ZYUkh6lei1~e@kdw5S@kR|!Yg0X}itxsNN_W?!>RKDC$`~*BI{ti7 zM^l?TEL+uPO+VARZ&C^WrltL$bf4}2t9$OgUAs%p_-4_%t8G%P`6&(bAgwE9L|6-7KWh|=UDW? zaG))~un2cA(<#`v5^0$>Ph$SlGtFSVl&7eu#8A%pu0i;o=Gm*WfpsO7Y}MJkcADjE za=iy2^xIx&SZ2;cjY%!nkDP>NXfO@itD{gY3k%CR?W{ve+J^w`*{JwwW-vDw#6?(0 z#_%m_aj|NH53I@J<@$=o9<-Jp)QMz;PK?t#tt&iOI3%0}9VXyZlJ_!nt=+ESw|PlD5T?U!wu5C*P7}PiH`Ne;PK^w_BT!FVYJ_2OZ#+ zco;_>aOyy}an3z!VvJGcOJ{NS=SBe@#jt~o46xt!e<|7mZLB*{z9Wges%en6ZK}5a z`RJ6g19_@Z4Qj7ksGk1(Ida9LT5?fKS3(8G5m+!ODkVev)4AAvTRz4-?4VjdblV72 zJqR|Z4@|z7=QMDP^?RUau(2P-Fimdpf(}}oe$q@E%T+QA>j)(330uZ)zxl_^m+fzU zy8G+cVV!ig_9hpy8trE&zD$#kaMvIr37%-a&|kZSrtdsifP5#I)=aSPOg=J#-0LHs zseI)1vw-ux@rvhyHWS}KC&DTNZV~R?Qo7Af?9sS`EG@HcW(qY;)qcR==8NNO?fUpg z7sA=z5b#AMf5-e;jV2Hu6!7NM-ocxAXxcF-G?sGmG|fOZ%R;g!Nk_zYwlEQ2mtfJD zXmMalTd4WmP^g@L5MsX2V_v9CN_*gw@e!eD%3r*i9FDCrDCnv(KMlewKf8MKpLe9|*>ko;+VS0bGV5ze8WUY>?Fn;-HW&5Q=t0zA>aZ%m^4+VTxI zIe4oki+Unm#@w=^?s!UL?+3>R-yd_TD;3FadY{g%%`E>pr}{galI(x3 zFO?niZ5*x59UVU}uSpE8^c@}loLNOG=+J|FvTNs6yOn2OJ~GHiX+wwd5#R%b=dNc& z`_`SfES$Ks+Lhi=`TFAtb~dAtT(z2rLN?F0I$f_g9yge)ZKJ2ZEX5j%t7b%5j(QtQ z0i+jE*DNWgm7;X-X!M~0i0<=ANgOjt^A_uK^4kWv8z`nS3UhPXTsHOhsLdMOCL!mX z&K5Xy)U@zim1$1pd56&{&$t*u5rkXWNOUN2O5y|@A?#r0r$H@9q*}+KCq18>krh

      iAIqjjenAsFmM#{>IfTAiKc7SaY6m}-d+3GE|gG=VW4_tn^}Pr?2a!nVQ&rgfOT zAO~OV!4^q2vsx6CgcT%hZ}|2PN1I5i<`J9@L;{s+Cx)yM}>{YhHF{4XQb zzn}dJ{RM%4o$J&8!$|c%&-oplZ0-I@T>6BCqq41lm6f2wC$q`H@n1*zkxG9uX`31n z7WAZ4a`Jv4F*v`|!!nRA(xjn1sn%E7251aZX>`BboH^`GP zkJ?NftY3K@olk$??gA;<0TQy23d*8g!c}94s1EPBgt8rNZXL%QO0r&#mAy{UAE3En zC|Fib--YVB#<{$Nn08Ir*m4`{(PiL1y^)R9YKrq|q314-Ho?%QT7@j!0YBA~k)S;P zar;)JX}_r=_W&_mV$4`%h@t_D~Ks;#DvW=w6 z%)~U2m6a%xb$ZvXKci|}7zJ72NRcDX3Z^q`>c+Ic!mO%9&dMbrT7L$)Ws>syJ0Z*< zfat)Ly;KU|LmF;k7>(~Re&Dd|vAC9hcY3f#hRZqRNU~)NC~fgeLLci{MPDlOAYWTw z7@?_>_ghcLmH{XliX#wC8p!smOfVO~St3>%7BfcBJ&GBcs0Z&(O}u-KNshyF+kl_t zMi}Qe(VsF9J=pe#w6zfk;c;GxVnc^qs&f~var2YOW-CTY^ZIX{Fc61gO1>Z3IR!i_w?n%A73D1IZ0mS22E=VGdlXn!Q^l`8=ntI z51I2Tme7GypG%A0*S=**8O(I%O{KY()Z{o))e~^CTz{kzTT*{1n`{IM*KeXiNg$o-q2;#u?`JaJm`>fVZQw$tw&BoQa5|T+8A&@*?)IQ z3ZJmuZ5VP7&4x;pW~>7@bgaW{k;!Qd|8SO*H^flfp2GfiZ`sRde6swLj!x$XuHeyA zg>ij&xVnHArW_6Y_F9Rt8Y@IsUn;%s_o<S79!XRzI>4!^YcL;hHn08yXgO6W^LzSYvlY%tD;wOw=w)= zL;T~*zcFe3hu5{zCwBk!>djPvcAOVQ_@J{VDhNv}=s+f*1-1fFkog`{zMHQy+U+A%oy#(W_C^S z>(g}01an0Pa3;5zSX}iy;Cbs;J*EPjG`vrzxt^ONbjFuUs-i zQM1AHtqjHx{-Uymr@;8bacs{hPM>0VT8^oHHfwc?48*1wxk5pZRO(Qr!MhiI-X&g) zwVCpnJ%bX%x>_I8UThc~V~(ON-zbjzmmvW1Ri~cxaiVybXz3y*#inI|%zaY(%ne^e zVgRDQR7XkPPBb{GU-1T~Kb414@tV*$BM^7nkX@5%U2z&lyVinJVn2LQ0{2MY>z;Y%uV&SA=m%5SX`8M;zWPHlC4`9YVV{m6o)yJ;||$N-v|+Gf)2>NH;UV(n=tvVT z;<844ngSJ4x6IRyBOZNmo+v$(jRIR?9wu*fP!a)I)J34Aniu#ymW`q4aI8)4G-JjJ z+(wyF9#mHzT?xG=Yni+!Iw{depAPExJ#39bu7ORDJ4KiCoYD6+-)i3EN^f(^AFgTk zNUgd|*-&yzZjH;kP)Yl^C@dTO-fc?6(#Uv>+U)WP3*Tj3P0)@4(E}NE-ht)tf@uMF z;9umgX`Q!*VhHl*d_u?5+<#nQI!Qs`O@6h(8JESnBs(`EUi&RFg#yZ_*jnjgD^TY`6w4GABVgnPIQMPZT&5{sMyEQDtH4Xc572qc&E$(DDUM3 zpU&`uT8hJ#lls>F%!ZxW?)b!xH`JTS<+Tf5i*D;1`RA7FH}J1&&%kQioRh$&m>^$} zaE>*Y6Y;#TL~43Q)z%lJ{6sbNt?=kwFg-*#3le%GPNoVEm#v)QA6M51d^57drE3Sj zegAj^+_={XCw*35y`LVs|0eij{Eas5_Wvt={l5?<|CQ4|Um3d3sS zV3m{cK2`CJxw#Oe72QIl+sRIO9ld1UojpF{e_2v6q-avf1F6Lo_1CwSEA>Ml&c&8Y zGcqeg5vaKLM!pVVp0EW|py+oCC)6r+!Ek_GBwoG~J&qB>&6O$HKRYx8`;{O7|McTi9=|WiFkh zs2T9ftzAma2!RrLEZ~UfvK+5JJ#|sBhZf7dP~)Ni`%^5^T9ITkq=nJnj3Ro}a=jgj zuOJ9bKDXRCgcd->L}EUD5;uM#8#ghlmGn_cXgNvvet>(#JmppQBG{G@|7b~qIoi6$ zIEF8gOD(%o8v8yV<*&;Q`W{v|FE1+9z~pD6pra8txYkKevD~#Ina1G=+%5XTv^k=wcB;oHmm9{CSkE9QiDi6X&T{6T+x zXEvVL9qdGydifGD^AW?TnhlseanZKHm^({vz?&`;$^D>mNyxBw?i@wz)!H=13N+xt z(8q2fV6;N2M`rq`vg!05Q!IGueO5xvd0t2y2)Hqzj~&;Y3ubW&(RVr^Wn5uqkt!}9 z4E5OvDVw3bv3vc(E=lE2citjc=y*?_mGMO_ATJI&nVpy;TUwkfEC|zfG?$} zbhvJuc`inW$cM#(^s%A0>>iO$1^cI)ID+*rkI1BhL#O=MeNH8(#+gjc{9A2+eH_yJ zr)vzE5MZ)iE6Fapq*xGXz2Fm<@Ag(BqMhfv0aX0^G?<)5f;{NJg|Bu4||9*3F z{IkecUROjGK;WtWnlyk)1Ux1J6q)vw#Gj2JL^7r<=nK?{P}T+wvbws7qNl*U+#B$B zubDbYO8mDkd}DQ{^@^H5j8Tg$n0Y!kkEF9NSFiZGd_WigVlbsDm>%D<9kr(z)MFij z?>HYN6`yu&8lY^z=dd5F#~U#;i>;OsWf>-C5+*B|NhBQt6z6##amICyvaZB$`ZA35 z?qCk1RMu;uTwv7LQwfwb)OS>N5xzZ+Md91^c1jN+DJ}@crY_7{`McF)e%1_wrCw}rKzY4lK9gOo2pa%ZYD-4H5tnp3#oAqSdS`8Y(fP@)sgru z(IWY+>=8>Fjju0dsSpfI_BlAGtA~m4wC2+Rw#2Xe(ZhOH4}vgTr{5FhD`rK9K~J?P z${c)U(unkqIRWIRSjPgkQ0TM6Gibrn;wKJL88M2Cc96*`V?TgEOqB{A5CU$(Lv0LTH(540JwD> zDGyT*K%=OPpHVw+n=@X{cdj3d#v^a7Ps-FC(9kM97A8Df9h_r%o9pR$fALWPSmyphp&mijf zFU_aFHy{ds1<{`t;;P0D<|gi+;~qz6Yh#ChmlA*V%`W;@=0^HX#{c@4xs9pZr^1c? zA7Tw8|1=qIR69{aRzvySSpx&9gsRN4RA`|Lp{L#eEnikA&hn#ES>oIKbha;0 z$uK=kmk+abX@}B0M}HBajrq;!w|Vix;nv0B{IDtvs_jh06F_c($Revyy%r`ERXG3y zYJCFZq*5|AD=*$fk~y;1jthiqz|KuNG@6odo#jFKn70T7XpkY&Z4E-2q76;SikGht zGjxlj86{mB42vXTgaWW1gwE^WX;C4w+@cSDiup}Uni>&mImo&SP&A}$O2iTuYi1II zl$0t9hJ`UkY?=s3D*OvM*JnMC)yyN3^-9K9Q-ve?CWWD$a5oUQZVpfxc{c+T7C zYlh}l6=4Q;B@rz)S|qO@bCncZ6V>YoP}NYr zmpfcFrk&%stqeO_BAf`%gIozXUMl~33rmH&=vLMGSoQ%zc9&74lHpm;s9zJgvU_fGlo8yf;%65QIvKju( zX(Z+*BT_p_>6Dm)a8`}AEwMXn{ z*LELM+O~3&W?CG+(Pkw3gfL7&h_N^8Bsipj-ph!2piYZ`Go9>CC1`VKzK4#0j@hJg zm=x!(_&^qf6nlbGp-2*|sT=qxDbB3@{ny={uU-~xNV?x0uk=sg{$Dt-3d5-3_knNh z7u6|&lvLXG^Sfc3X-5;4u2#I!=#n=$x+E^^`B=G|@AA1hU=*e5O=NgVnMLa@3>71n z!Xt88(jhLQUBvWw%^#*G`V@^vxlXUSBE3b(k5f>?ES?J-lDLy$Z=rhXT)yW3Ludhr zAk}a>N7;D_7Ya_Rt`(*x{r0;Bx`)gxh?uvOh+FGjX_k)wQCB4e% zrZX{^vrC?9y6Y7JXJsT_LQ$Asf#4W#9MC*Z1lIcqq=5ode9=o7c zU;GZYYU(emah2_SX?3(Bjp*w1JC?QaP~J~q+jAOASDzZj=M2i5(zB(4a9bf9tT~Y;?l$a==|sO~%3)bc3KQ>;zi8gZg~RaWhpbt>Am!4R zFwH`<6F2rui8qd8o_sB;jd6lHmb9N$;@AxRUc3CtCF$#UIWBy=gLqegN*c9@y~w=c zhMnGSzTttr?rEbxYzIBfywGv!(&EO z)foh-h&=zXt06tLLkcg9f~P#w=vVUvj1oX7xP$Rc$-TOt;g^!}RX=1#p9F?eW)EhE zOydiaa)%g}j?IJ{W;J`rMo)Q*cu3+;EIPz8ygC=q>K$i62WImQKQ_o=-nuumRZ1c* z%Nw|@!U(UQPq(~LNJaQ24BKM1+$>|gr|ar`0nxU*;os4{=*TGbdT#Rj61A>{hLWGc z4l!%*B!@Hu(kZPVRORxi)tMu8de_V|$R>*Pznb1;_5>KO(m9Fl45|!YKElyk*yHkX zqJ@VN?;aKFZW;?)ns8@f?xouI;r<5wBS}eMD19XZ`|_m={y%$U?7s;U{#*9*uioNc z4TZd|t<}G$V}A^F|KGdbZ+{kfiHcg5pHi_OWRH=vE9&JBOJ(I`sO=X4{v;>`tO%i+ zt8G{DlPsGq%*ZA#px%$aDg6E6Kfd@TxVhBj3_=^}A7!LIA8L2qdH=q7!2V%v#jxO= zcq)INug=i$s}ZX>cji@cFr93l(X`~Dk3CMIu3z5D6&NsAg@ydofylXQPtXvW;Kj3I zs5g!AOZI68^3lYdKmxYQ{!#?bY3IcwVFM#~io3}ks?Lri4>t0d>NM<6rT zsv0wCJ#&ZAx4$!GJoJd1GcKM^XujBNze+BM(Tm7}T`ag?JPiHBiR2y#-*_|A=iR}H zrmtX~AH?)5p=eP4^@ImDMF^Z-jZ?BRDF?p?*J7&HwBoYLkAu2&~AkFh;`@1tBr@j7}4GD)Y zO+2e0%^=~u)hkWT7=v19&z&xgnAN1En?FcpgRE{XKsE;U6i&#Hpm3(0kxdlAuae`U zE79Pdj=DSSo5VU{7AEI?wTU26Yw~f0ICY|EJDU5dntg^G``zq|R4Ur-Yq=5TszLd) zAIvBZ$a?;xn@CR~jvEhn5*zPcU$%T4E?u5pZnU{t%?!Qzq#~NEfbC2h6ZlUk)Guc` z^_WYv9+OGg?w}hDkIhzwbeFqX)@A%39jK-@zQBI~(QPm$_m7t2{J&a`zgJ72+Qt7= z$oxO6rGGXPN!DsuVkpB`G&D6WWJota1IN30IRd~>5fbe`6p1Yo29|;s;97oCou?Vk z#yhTECHxZ6y%*u*=Tnw5MPp~BPG8Fy+nI1abiMk$wKH}3_Pl)M`-LBguB>*_Q9ZpN{cmPO-uE7n2 z?)JECr(3A4oxj^65~+~dg682tXU3JFEuGT^TJjmwjv1x4L}}@Y*@?sSt)Z*-<5;r8 z;K)oMn3*8&*R$`^$IO+leh5#V5O{M>K3KjG0{8l$^MdLc%dDBfv%hRWON@&+L}{6S zvOgc#)K=Xx%mTWi!}>U_w4X`-BITJOZM zMCDpOa2D=ALIj!>w@b4tyk#IxPEPi4s(Ab9U51wl=a8MXw-7cP!i!v8aYsJFdP(g` zZ0=pthZ3bk40UCY!gGwwr(i?4?RGiSm4nIn2McPQbA8Ra1ir{yd0%jI16@y`C9-R@ z6kre)m#HSbX06g5fn;ValS2#br~4!;E${E#$lY+`H0&jae`l%5Bf^Nvy8oo#+lh}} zNw|=%HSU`cx~-k)=~OLAER(lUUkHi80cm~cjmQi<=}^S2q9gRT4z)hE_B++1&{R?6 zqV8FotWIpLJCP>uTAnJjMPHzM0mrxUN(JTVkM`aVK5OogoCwFk2RYgRUGvU_*HRr8yU>tCe zL#S7_X$b>1;feY{4`99N^BC-hTriM70SHM|QO?CMHDROl?!`4zhF4MW$?{d@Dh58 ze3JkdcpfOUC3H<5 zk=Sfca(4V}cG38om0%Saw08jnxNQfj_s2uzF#xQ!V?hA)1wHVK_X7wB&9iOD3JhlJ zc8;yfCG!1m3&H5oWIr)7&KZGsBVnU^`FmvUXSZM+771TlhbNl}M8T-FG4RUUe3x6o z^0$)AH>L8oz{(E2#O#{*?7A_zl~J*Z+xezjh4QzC%(n^^pEcQ!R25zu>g<}a;fwuv z)#sK>uQjS}J?d_w#P2on-)qO{S4OKf4n|pnDjq)tK1aWFy4{o2>G*3Ca}MOR!u=#0 zAYXY~0fKTNY-%GMAmpWf*EcRjZx?*;^LoPG zMv9@AXuxiCb82#GiuZ1!{`GFT`RdD%1t?Qm zC=Vh;Urz=4)7Y|OV3uI5z;PzLR)+~iQY^hcAi{u8*|r?|tA~hNGv(5^pmyA#%7OS; z>Rp|KR-Id}DEVAXsf4{C0fGjnF<)&1Pp%c`EF5Rh5&(tJVB-fVilqbxC!@@$Heyi;rIflr*hI`UHxYs%&) zGC3%nGcdy#T7)yEn%hTOXjd4jtn0g8`_Z{X{>9uqJj{jR+jq)~{)BI245N1gr>)K? z8Yk36zhpzt>7ar~!T?_}gh`qCtdCpa(cxiOF`KvoS2Azd{wQ&-dI$AQK1-an{~8GW z{hWg9uM+3~%T)ERu@ED}pK#7pwsb^dMB&MW*A`ySd#HSMhz()y!f5?VLA>iP)yk2-rg!YMpCekE~;A`T9x0E1~dl7T2Kxv~uo z^h($!c1M|3Rd_*w|EqGtPghRSI}-vZR=zn>0OHmK7(K>@6)W2puZlydn;xe$BC8cQ zEc5ePEHblHkHu&V6U*O?1Vs32cD6C(Dw?t~u?fbqJ_@ldNd4tNBuTG704HDuMcP_> zXB~PC`05;4kV1s4K*m}PXut=J-mSMIoN6CB_i-~@WoEPvlhoy+O;V19+k}tjrqh=W z`2t^F8Q7hDMI5XJX>)@W1AQT9@9Mk}KLHx4c^T&Zm@3808l-Cq zB&Rs0&H)cE>nD0^L8N7f(->Qr<=7s5Pw*L$#M+~nA^kM$JYNU$D7gL)QQRY3m+sJQ zaw|vqqReiImamb-&yOP=Maf@>Xfyi6I|cd^ zf2qv~w|`SSpETC)UBnO>Za5l(|09Mhink}$KV#_Rzhda`{i@1eG4xmJ(3JY~WbQ9v z+ka@<{`t3mXxuvfua2&Rv7@n*lG7jQ7ZPQ|4vdhLK$`*O z8l{2>>LA1OIR)W#WU4ch;eb!*62%~aP*NnWE+eoCP(Ea4Lf}1A^8`cUv&fZ$dHDVT z81!&_`uuE=P2o)k9B#3)1U-XvMoSFV3&yQ|%QN%FTQclp#;54`cinwdFycFO`alA3 zFuJ0cAyPn?;+Je~F3hLo1Fat&Yyg=UfBqzshLL8z6uB2^LLWdrVrnC%-&o<2*^8B{6+ zn{iK#hl8;gKE1`SU;biLOYE5@RF0MU*kOzF+e}AmQRwox=prpA3gAU>ea_MZy*WoZ zyWLmU1PBGQ+)=`?N)5kDAZOT(dlvng9*>+w8J4cY4=a=&;yTa&-Jpk!3)C5qRv#_3|AN(i~2tHBG6f>kZ%Kqpj34Nc`%n{5S)aENso1@q>xwFhHM ze&4jMZANLr@?+eHcyJw()i_6F+;-#k<#Nd$B5|m_<9z7~Fc%~(@A^}>8ngw1_V_c@ z9{(%U{@#(3{uOF}_#;T$n#$>&yjmF@N*o@LQv(5WP}kCM8IFko|T9Km!-MO{hIdrZt^(w{RFs4i3i07Ta< z-NgPgO1B3?R3OyM5%{;ALb|t!TAVv|)^v#*;f0-gIm?3ZFvw&~Fg#6R#E1<0RGCod zm~$Po*V`nMpdS$pLORA$eTAoT=2y)E^yV}JgtbQ{hWy%oa z{Y8Zl@5t`qd{y%qrnn#MxBQP(9Q`kyb5 zy{hN0(+T7feT_xLScsK1gICD zg#AXtK(f|xF7cHVMB6(?U)MxD2~RA>+21YaZd8sXT$O}hEs_^pG!?5^v2_Vt%KIvz zF%_!MeVcV?tkhe=JsUP*DMe~_?-)?9R@$UlYqK6z*;s1$MZcN50D_PELR`>OxyoVr zuUHP+V)LvxLyK~Jd5WG9z47Dje(0KT@rlAntD#0SJVn77Il;>Lcc1wF#COVry`c5! zyG9h9&d)}^S#&%mj_~fCe8g?B-mu6Y54_BCwK*hz>C%5$WQ5`iW6oyO<{q-D$;f)V}oRC|w8n;9LN77(+b>eaTL^9fc9Z>we z@Dl$E$$!q4WXv5LY#n6u{~yxcGA0l%*%ocw-QC^Y-Q67;cWB(Lad&rjcXxMpcWB(5 zzI@-z^vs#uoOhG=gKS8EKvKK*sCefg}3|KoFIv#*5_M*mSvxagZZ{>#YvFH>Nm z^13~e7(9>R_;;!A*CYcmwptUI#2o7AfB;6)5p=x?RzeUNdv+a|65*=t2F|R+FLj04Ta7;>Eo5jccmGhOrkAeFy1LdS_K;tc;Qw+VZ150dbs)e>X^2y?LDBiz=j;z6ThRkh>L zULErgpZY7wbjp{v-L4cn`uWN#b8uLWox>wkn=yH$zp4WDK`o7~cb$!AjnT`_TWHqN z2jMg#^s>9|glf|o*vL`0XEfTcL)(xRag6u4kZSjX2Q*1>NGz15DvR*Ath;BGW$|6d zOYH`uVf6SAb|vy@1QQtzTM~X}{Ky?24)^1C4ZGF3UB3`Mebz~Dl$B=o>6_-rbBoC` zg)*|thIV7}{Vtx@5~@k?Ezhfx;SQNa$xLFYbpyP#kpi zk=#m+kDH%QXqJ-E@Rs;Rmb+F@WHFT5kkunm$udoUGkLE3o>U`WU{EGjH;E#|@Y^k? ztVQipuy*Acq!^STwDh-{QgVG{r=|(&JhEQVIbStaW;1uZ?KA^DKnT&J8T*oZcKQ~&sXW}!DtQ>eicMZL8(b?|m*xhnu>_H{4 zOlIM9)vvKW^gz>MVHk?iGb!p{SEc;!PA87or#zAYbU*0`Avv+rJMaPi>$0vCn}7X< z-@c)IsS^Ko?fo~x`~K&${{KRFUqAfUwDW(!Kp`{zAEw5CG;~!|+!&UaC0uJ2$Ojc5jl#eWG%hHj% z3oSn|lXf@t&7`7{G4_<4QwyeAHW--eM{tsflNPGKw!QS{m#LOothN~@du4L1Cbb=2 zTg_HswWYIZM{8}w8c)_$hS`h~S2!%gq!ukPTPBVhLS)3v_zM38`8bW%pIZ8J4&=28y)ATX)*xn7`BcdCmfDar7wY-z^G9$*yMq^VU zhXv?sOYZ`1nZ`sD#r7BnoeleuTNYttDOu1WG_P>e#T6ii2qO!FMHSU^%z*9^c5x;9=2;o zf;6MF?kN9UC&EhsFhJfUiP@i3_RR2hLE=Z*%^pyOM9Kl+@+3#6Y|I)_L2t(h5*~DJ zDXM0;XJ+Ku*8*^$6+dd0=emJ0Qyyk>xJBON`eOpx7|ERTARf*Yjrq#SHkHbHxO*0DPE zN%W93=gahmcoAsZ>LR1nX5Jpu$T{4}lI}d&iW)dV-^SMiGED$bsABp9n4}5R|ESO+ zmQAVXs4b&NOO8sH!ZSBc&^DRg8?i8hKa!S00&q-^APAuL;dF2AtAU7zD`Y8%U18mS zo_(O0DD@pBlMn35RHTjQm1Yhe>3(8{bqc~A+7yrmT91ca?#hb*%zXyyS6;k#9#icF z5fAMLBrVAu7HG2_g}c@M*bJ0sM|drI(~aBR{Sj@Kfoo{!;lvDVk&7K&5YIkEZrFoC z%EJ{<4l@Axh}gFydOQGh*iWE~z99#9eXw^|e%|e9^(#?csnl6Cax24AG|%7f;M0Xg z`Vf3O!#y*^xMFK2MAlyUKKXYfS+rL<_i85hF3={>Flh)4#tm78v-CTp&@_FR0N8`JWl=zkxH}pA7bg=TS2= zcQm&8_inMsuWIuz$UHBYOmj$qPbbn~fG0`OMU#U=A_8a^UxGBr zYNiGn&GZ0{IYw~-Fd?fcI7$2{b!C0MMWn9om+>A$_4Fy#DH~-^JJP{iOp|9YNrv! zG5p9vl%RH*zfe2KwN$F~0`yx@!u=Q|N0(q#Us(C1cR<_r(T8P%dKXeRR4LoF$t2F| z5S#bV2Z^c$QAp+88%?<*{u2%z9qb9pLy9m`rnPRnON>7cF?3kx43 z4%!Q7GFg{d)mjZx-`)+b`reHo)|G!bMCt#kdr#Ma?nG-vo*=b_qj1y5We|S6J4Rt4 zB=e(P&9aT8oJOIFvb1b#KY-9_;;!D&z@9!Vw4?)D=DCpt|3R0Qwr@-MVKu&d6B>RlKe%m>B zxyj@4nA`pN`2qJ0gO+MqDdvn4C1Mt6VRE~5+H-@OP9CR zA9C`64H1=qS^}rh?WZB^x{1=p^S)fo#O=x`YOszocOkp)-u6ZQG`N+--kCp;CuZ;^ zJjqtdz>@HoGI&QQ7l){izFKV1ID_hNYuwe7gVM{D*&uP@&&tD9JK216NW$C!!YRwt z^H?cy+?KDdoL0B;J$z|MvzoENv)MB@?cKO6Q&2RH-C$VYugl-;ww?0W8#Y~OZ{`P7 zK5-x+F)?vRkpeYY&B08)mZ1oDZK$uG&H2r-X+Q8H3vMibNsd3cvG1=c43wIPGFEDG z#ty>OJtBLG?SChMgN~jJcCa^@qtX**HE2_ep4Y_Bsa*LXp;TiRPW0(7FV~p95HEOj>?N%&7T&y=NEA0Jx0Ch@2Tal=% z=LjZhhgJx(Hh<+mbqmO*yBi<6ZOa9=X0?BmB}L<8h|uz~a8Wp+w`>iPUHl_h+#bwD z;*H>*S0rhI|+ z&c3&P6$HEgqmAdkt9Q>o$>h&-`oEab|74T@_f=`c#`^YF=Ee?=a`w_+jPTcKV)8`~ z|EpZ6RMPtEt^S0ip{XoS9Gj#nDQ_B7r;8>bE|64IBq&+|+b$YUpxvB`S=H!xr;KbK z_*-^ybuI}7Wo*s%w3rcg*b3Wm*|^=sti`Vl$u|llogew z#89`s9P|wV*0&%D5ky&25iC-?K&+GL?5{dDG#5m0Dhw5>%xcC(GV`~kNB+ny0o%8I zh(~(vlGl7dI^;t8T^#d2HoJYEw<=`IDjfN3ff;+X#U!KY&P;@@q7B{RNL5 z{;yd1_J<4KSvnEAIGDbImF?*7NJmtQNbrTuB?l&qza;82)3{yP(}%K1C$MkUeqGp^ z!@wQOQ!?rW#b{wXm1~%1w_!zYtHwQg%);dTKs4@;t*Cx5uoTu?MnX})eE;A^<(?qW z8W7RmyIB!HNBW-0`{08E%tm*7HXtda{t5LOV6VkATeVYZ@T=;AyKHI~H7TnEf6lJN zIa`uzhAZG#so}9*e9hzZ1~^orZqPz)P`P=7Ce|VhT(fQZP9uAOxhP>2Npy}UdrMS% z{I!fEc{HW2zAu?biM}7n6}7Gj$0l8phWGe2ACk)w^_*^%ylXVmRR(V+?b`DPrIZ1O z#@o+ZE!AgAC0C0A8E(UqK*Jfi6dqWT?>R3Fj)`YuHPlO&-omYq7|HV!NMhGg71Onm z3N3xTyA77=2Tv7J%)Q%Arro=)FD?FILI@6y(%yfg!ypyoZIZ9IG3kGhQU6`v68{P3 zf6}V|e%TDIY#hGy?SII9UW!+8^YX|%a7|8`B0~cJVLv(TfC{I!fen-sfR*S?thTq? z#3&rA3ob%l)n15FL-8WAKk|HHZ@Q{9R`}Bzt{yh;raF%|-p;PRddSMujYk&WW%a^k zmXV;_4!NjuCwZn~beeh8+Gd%8X6*}mW7e*<0!D)gZCjV&1x{Y;x*o!OJM~kz4m^T4 z?mdTz;}k=sbT6HZMjzkh6Us2_KpFqqrpHk5G8_Bkt6yXVvr!IFOk>ngbQ`Hvh0K-d ztOVTNQ972+4OOn6q1+&XZZEbVJWwNFMO-mjTDoBUgkT9uDnCLHEd^bFOu(XDq-h8N zS`;&u8)9xxxaJ}>W|&#+7MTGR*e}nmMJ&{NJ9-XECM2PkXJ2cDb8RT-aGGM14%SD? zCyR$cPoG4@g@#_Vmz63YCi+7wh3?}iPo;RvyhUJf$)(A8)Z&Z}-+*#ADJosv2%V#< ztWMV0aHN84ULO6I#zw`9z%AM&J!A`xwQ~W^Y>NEvD|B^5*sVdx*u-~PV!*`%0ouCu zrS$HbWyO~hNmDK1H}7zLt8Zny1pc)oh2X5*xIuoLiYF=x;baRRCGQK@0iZg?4bXC@ znF>nc{n>f-3a^TSuT(R#JQzB^!=SSFKLQxpL#yXG1udOh!$)aNkPU#*7Gc7^`Ou4P z8^HNfwO}$CF@#$p`-n;3LCib!ktftPyd%BPR{8W^$O&0>%hh0bu>u(@lTVEd9|l;n zlu~ux!yMTvkeSbUr7>T|5>O{awWPe=y!fvqh@}h}t{sw>Jy|`2{e|Gjzh&Afe?=+n z{~D$L-{k*4b+i8<|7pv`eId!|{v!VcZesbR6U63Mh4Ej4uto^TimR5Qg07gL_wI3lbDdRt*uzNPw=F&R zL7fQ#w8`40mqM@4;K)H4CEOEJ*+8XjBz`B_?~~3<_PP~DHYn<;j?{}KP2X9wrzj51DQsUU&5 zUpcA)MUPCA>ipbg;H0mR5yhQ0!<*rfX_YM?PnXT3IMGcu_ZFQlKG-N`TN{f;1e#X~ zM7sCM0eaEtkKgfly?TSoG!tlt(xrFxiyc2v6ZYIkdb&{F+ z5}AdU;_VRjUfnmL28}9frxv|wm)lfQI|;ebZ)IO;MfgpsDk{K+HgtMEp3t9ehai)B zEKLUE^KZ8i4`_p*>sKh_{Etxn?{fU#3HJ|v_~#%NMW-J>^bM?x|B-MfHQcnZmNS2U zm@s3CBI?<#)&Xd#>-GGkwz7hzEd@RT#qz&^jK~B4b>kNo48BP{vbZEQPb;!ZD>82n zU1yfSVHS4sTev!3Fmm)Oh}WUHVY%Kmei>8!1;L=1+m(yj?qzu$W{&P^4OR0Je@fCE=T z$uWs&9O(hEKBZjQRL2nc3MwiDHUEgO@ywXUYi7`7f;UCT5oXHfTCIv&tw$+6hhe2i zu{m*yF}|1XbU^T9(Vf60q6Hhl;2h`fJlm>;w=7s0e!zoLbdD-eOdx zIMN53%e3s~V26j+!0HQ~YL}wtd`dRlf`JYoqL!0xPY|Iu`48Cq5F>-~S>>G;(9g_E@V`Fbb43qZq?r(K zSVrA3DTls3^nt$1eN%Eb(!HT?^8IE4$~$IIVHJC~0!moGa1RlHf;O9sH_95*)5p(7 zR6#Oh09FfAp=bEK4zGzeO!pxFJ(-gWmBZaS24Z~W(FM~&Z>rP+h|7rKo(K85(D;LO zBnX@J^WMrt`@Un5DXp|2`$a>u?EB0~tQy}pIL(kP)6xogp@JndLd53Y;~fIlH*mp+ zM5NeplHd{b&=@L_l|zJG1DIaVNVE3v|X2x_(KW}lHM#6KOT_cDv=rsgsvaHrs(n9IH41J%c3! z^f?ZsN_u0D%v}9&;y?k=T3qM0ab%5~|QQqESHZWmBo-2xqJX0mjuQ@6FUIV=9pGHuf zGK`zbEfM`5po@Pnoos-b(|Wosaov3Vgz6rXjzSH-_nG$%?@Z zmneaF**7XEPw;LNL^<8VM=jy!5x;NUq}}p=<6z*qUFl%b1ZW=%QZ(e;Q2 z>>x0bcm79T!(xP~`sx%i%j9rz;i99Q2OA8Np2dfuq1Nc*@sm+}!?+3aGN+2w^QOnVO_wgG51W@Fp->0M9(exbSxQwx{qY+Q;iR+~6! z8}=@q7$qLV4!y0F8|P{c7(gY_Z`IS%;5l2W@gPW@uAFE5_6y`rl2Q*kt9BlaBvjR` z3A=Dr2_iJMuE`0mL^4#er$^6X1fQ_z#{&N`AcJbq6qG@(lE56Ku@^=(j1f)u9kY(Vsz|uEBbG+9i$4VMbXVEjxF4%Q5t1*e#=KTCAw_<#)OMAD zQ<5kwg3|vA$id=$)u6qr0*63JcA)?DRwp74COzk8;G%OIo;EUVV2U((#rIxRPIOK* z_sSRk^nglnETae67zwks=z%4UI(@SBe3W)tC>tmqgc9A)PVC+$IyKd&+9$XrP z`$I#V!7LQ>84yY(;eo8GtOtS&+jNFjJ4bp(K(hnYF;i-C!W@}ayB9**P`eEY#7&)d zEN&MKrds(spid4VL}}PV5AZpX&2708GZE4F*mOT7?c#V3$)(y6_$DX zcEH$}zIlbo>bhWVjq<`G(+{_&D=Khj*k5?89zb;1G#RwtLm5U;_h{=3yix2z2f{rC zrV5^#h#zi|Ow>xO-3Yu#fZ+86tA`-;W(XSv-jCstCY=;K zb9fF2K0bF`E8OC_d1ZhAjHfTHZiB9j)aJCu(DtPvpi4s*za#o$7Pqg=XJ(ZR!IK5kI1 zWWLMO=_By5X0IV@X)f=X&?-eb8+F!G=a)K=@*(>xetP<%!#7x_pvmW;LoV|5Yu!l1X{&1e+U}Jt0`-Y=&P= zC?y;3YU92@!M0B0cQAfyZ(JKy5%#T;?lOiO*HV9TMNdC_o~lsl5pc@f3pN2!Ablr?CPCvKS9HqjTev^e#60}QyDiBXGj=z_(X|)qX+x_*1-^FLL_dzhabXM!RR>g1g@~-MAurf;csjE#0$c>}X!rMBh zxdtLQiJ+3D+08{yQ%ZB@;?@^mh4Tno!rFLpFLPmBpmE5xLNPDz?^OPm> z`RSm!O>Id7`1sX;POV^<(ao0_wLqI}djM@^Iq=CES4A98JE{uK6z^!{C`0wB?6nri zsY(dLpM_}EDZNP0U$v^|I0bvWd3?PR_VnFWEr|DyWxD4!l}AK=0H(LBjBB&(J0`c9Ezb`1!ak} zVD&+UQ70=yyUC}uxl|Ts`!Ed>WyzW4atw`$n84bvzIZ+Sx&K{tIIQfw5(x)m81QRl zpS+ld2x?W*GezP2sg4Zp{9-LzzTSAr3jFroR=2S-#jR_y!lrqwb-a*23qfm}0o03x zrzg{D;z`y{%&gaiNmtJ-=F~Q)1c1VUW1bFng1BrH)A*x=jhyvB_Lm2U88QQ;5hO$A z+Jo_FS$52=ibCsc$ShSyF|)2`2}l2|&ZvDkRgJ3%^Q zWJx<4&CS$87s=+)&DxVkb$oKmc=a@H;C4{Bf8_T5g9QmkMC*bF5HdqPCNQ!dattK^ z6Z_WfZid!K?uS@>9U1MME67^-1#=Tv7^NoDnXlT_GcyrXkKI z3e9eo5g-;djAN~xugXDx+%NmJRBmh9I8&@qf3L<}oPlAZ@@3y5A`RIpUc%&q@Pk~D zcltO_4edt}Fyvr6nq|%Nn)UVN+tu%9=iDm5?e~nP(U?jUi5^0^w%R5d>9X!xTI9mT zfN!bE_Vgsy;& z5Kxzqt7lD>o8h)4$ps5lh+5P1OG^2%mmN@N?{j=spTBW?dsp*Gt(;`3#FiZx$5dl? z^~z?Z?&ki1Rc#??^)qc%=T_XPDLhrZQ9fPh0@8XJB?x*6ouLJzi8wcK0M~FvTUL9a zgC1^OD6NDAU&~auX)EP??Zv3G28P!pppvPNwFf3ZZ>24K} z+(BZlQ@Cwf{hnu;(mlKOB)dg*_6-%j^Y^O=9zCkpwdP;PM^9rIZ_Uq>+lO#oBkI?Y zudlw2ldq%U=LuM~uH&Nm{f#Ax4hyQ+o~DQz8a$8ZN1j-ONc8ve69mKA{jXnb1TYJy zQ++bhm57eb7<`P!IMM}*!5=*tSeHzQ&GvJP8BJ4M4uaSGTcp?Aa#U4UNG1vPdaP?rVwt-TdvXH1+%mD@9469 zmy35Np{`lss(Y_oX1lJQ$El`zV%yB%Yb1IP# zIfH)cV}9NMzGD&HeqcQRjQxy;y5_@<^x*))dmYCizWlB8a~pmV1K2vEWY#|AyRd-! zb8WQfNu)RxgUs>(vu2Uqs@nw5gMt>B;Ro>nyb*_g#n-rS4t5YKbHZ#H%pO;q$ zbw>al?)8EInd*64(;5TN@|6`Hf7~TixqTChrY?TY`hK`mAIgokQLt~^%e1`u%3}CG@%lmg?9Q8$3+gK*UiH$BP);(>G?nzk@K{MN zyi;3XcTLy={sbeLOV+vHuo!&J#Pc9e-FMUHOlZqfz`)+lkKaMy!hCvC(rPM ztlXAUua9D}Tv^94f$`8DhDA)`7=8^i$F3poy-{@x&Tuz7q_KmUBq%sU1;eYi7v zY*M+ve*Euyddri3tug*E- z$!Xq3vu~Ux(Sv5mMV|CRUa731jq3ULi@jjYXq^xC%3)fcjwQoozW60WG=2fO-e*aB z7-L?~M9nPY47L(Okb&isD=r34oNg(kCwcNoGp^NL$2u>v)vjbKKJg~JUo?VsDDfA! zMrRUDFQd**bu^FA|*}G=CB6vUnQS+{oX)vC#i-<{-vD@{jeGJ@Ly= zPbc$_Q|U`_{3BleYZTDG#Gt@<>%YXH;ooRYiHVf-2l^xNjJDk3nbYjT2n4CHAsAYD zSC?&E(`2J+j7QIzm zLF!*EcQc)|+Q~NIe%D7j{#qHihO;%&=I&C^7pJF)LPmpC&qY9=n#W;WpWXBSLpJ#xcPh8V%*am$! znQuv8oJ6VkMy4Nh^BZC`W@KIY1hLp4E+jWj>GYkJZ>MeWu&GO93kB5N+gr*%NQx50 zCQKjy1Z}faYPtgABh`zXQbbZlAWf0-$3c_$ttS)_;iMjGl=%ch1CG&q&6HQ9eu0P~ za$gcUSGX>1dsaV;O(iXiuI;bph_Lmpq1ptUr+|L?g*0&(4YLXU6Yph_ej;J?hKz(hjoOwo$TF)u1QP;u{JTuLR=;89C_Bg~{m@SdA8IpU zqK0Kzm$1Ms#D-g+TGw|Z&S*cUXkGv7capUeKzGwLzujdr^f%FU>lSGj z7W&wefJo%ZS1ele;l^((;b$rMJ5+#jK|93+>glN;QJ01r&mrk}vY`xhfEGTp4N7T2 zZYPeigZ-5;P5g4(l$;KJTw-W?dBrK@mwULGVt`nDDQ~@>{`~nMMz!b#L*mu^iMoHq zd!I3ZZ_F3_zW+bi_y1P!rT)bGANKwKMZIVG_g(l$Pwzif(M**yRV)B@qH&EBWc?cpvYF)^xhsz}Ed3>Q3 zJdgf5@9&zt?{=kfCPw}_ui=*E1oRt$$Ne!K5}SIdFwNCLA@eXc5*Sv5p_ z=&;t8ai6n(7o1s0kqnMSOe#TDdAdaN`?=uQm#QN0oLx^f%)LED7^G*OrBe>hNVHED zU`c~K+gf2fmX62q)s?N=T11#pX(vHNLczT=)-QI%fI*f_!M@W>*a|(+X!A_b&xTnJ zt<4!!V!rz{$4pF=i`pdL9R^VpWvLR`?X^!wqWt+?6@Coe{K|7)f4Jb(;0Y0-X2v!> z$%SG-tELUh=|j`e?;EgWykH6)Q7C!VWCc z35&i`FxV0}0ZY83`{s*p4{4m%*fCGU$-o*)koWHBSh8Bn+3R*<31|koA^}}WOV|_1 zJxh>5{o9(>8J|odUFX`VVBASC=$Bcr*UDNOW}z)v>+Nl8kBCymyO~&^)ja3hsBXg^ ze76fMi&nd6vfqhz^2WnO7IRv^jd2+C>epNj!=aH9F6Bl;_Dx&^DdW4jd^t~Lj#<$~ z`TmjX{ok^lyFInYYaeo_+5(q?>`H37z_i_4{B$DBY_OUE zx;Xu?)@vDMj8+YG)_HMlAkL4@3{^6LU*6FT9rD*vmvHk&2Ei&6W>*oJJlPZUb`B8P zA(YUxLALgaqw}l|*+=n8LIc!owbpX>-U`ygCe3MqKl7JBcdfM@v4Ihlkw@4nt01^B;(v!?9c z1EVFzlVWs%g!Tqqn zVk>Mh4Ra#z03rFUy+{;DcavBq0_H`AG82FIclM#I%Vq=l$~^Pn|M}Yfx7mW|Pxkp^ zM3n#M;O~DJ68|CnRjQe55e*|$Ez!q`Le~HxhV%anX#B}eYzYDt!eS{=qrJBP2u0mX zWUdz|>v%43zVWon9_p~ty1!wExX!Rri^R1Y5BHMg6Epn+;ij06nrwzk=gd2C&tRjb z(ygMSb2t6F>jCy#%kC90bgm--*4H&ggap|4WW}KoPO!qyTnn@2azNl9K-ytibtDzb z;cX$>j@JqFq}~-Y5*C7n7*K!O5(hJ6?*`b2oEW3DUM0_qC1qV6qD;_QvLi0Q zVHDR>7!gr^SL+DbNOFKJ8=e~Xs0GJlHc=?fnL0|@ifsURT7pQR2Qze7*>WzqSJ6?~ z0%#nvi&UO)Qt3#FN|C0s5lj^)OPml=Hix3L1#4EP}cdJ zM<^D}Y6Onf)RbUF-XwM*TPVL#sjeu2cFXkKNtzrrkQmRTs7ldGs^muEAtwt@k26`? zv;Xp|;9W21GQ|Sk8A?(>K_K`e&V}=OGJdLMoT;Y>Y;8n&C(tZky^?jo6~l(Bp09 z%X3eU!eK3PQk9{S_c)mmCnk~pXsD|gYlxO4kNZt|>LlQA!!Ct;^txhzB)p1i050bd3xyzxa(NC7AkXp76tyC7 zL>78ceh;3cutPC=82NbRM~AA$(|!+p0zvE~=$HUx->y2T{E^Hv&r3oJ`d|3|7UyM{ zTmW*W*9L+?i&{ib96;xQOZ1(_<?z_4*@&#ETq~i*$l>^i4-Un|cBH;^b5i#8e6ff6$CE^U;yB}0y!!8? zFg-8vMrc89XTDo?yJAn`l1|RjCOtUaZ*|O#NhO8)(X#Us+N2 zeR}RiAMU~1#k(9>r_x&7PeW&snby3rN~=^q$d)%zjD`fji`iPe;tDVp&81;SD{qjj zJsH@u)^=J+gN5IzF7ng2y2eXIeJwL1Bm$Q0qaphItQXsx6>u6xJ9^xni-KOb)X)+T z-Xz@@SIv<$; zC7ZHxhsmVeuZu~s%G-9Ho zMv#cX=H=~aAKe(*xojHh<`?d>e1&05y^;$?y~_ufyKa%OH530?Uj?5z07hAbD8NP$ zD;g<5&nsT(VSnU29&ko_S`X3m?iyIc;4Kn2JRCa3y2!M(zGAv_NYOsMts=j6_hiaG z$=^MBu@nztkiyXNVPa!axWzu(cuDhYB?RS;NDBI6`D7pTs#4A=dfNKl#+nYQ-TnK# z_!^11VsUB7-P(h<57v#HnlqQzn&W$#U2EB3I}?vqZ{SBXLf2l{PC7wc1&eG;T96BC zCpz35zpIDy7qu5Eqh!ju_MA(9OMj_}C!LQslu^FL{VN$-4$h>xp@@`oBkN0$VViiEXM)LrTo7mZNssfLu$7$bYw8JN3GoLf47 zl~Z(_NId>#!waS2ucw2w;T}5Wruf>XiMDN5uGU=?Tx@6b7d%1q*u`A zln+S)i?TSsWYS1-MUSjZw0^oa96WpEo0`w#VT=Dzws`$8ak+LjWNF5#Tg5D6DJm-- zF28ckt9>kP&dfL}CGcO}>|`ZYzCjr<+Fxp|C2D8*-p*v-Zkz-9#3K9TJ~DCs`;PcXv4`;)&g~1B%PCmO@&6LmgZJ8QCvO2URat7LHQvJNdPr#wp1_F3T{WPyRpkO-sezG#<8d94< zZ5Z*Bsn4huEl{8?j3r2-qvlfj1PS`)jwWYK|poRiu8&L)cHBcv~uYMUQKqZ+2_i*(4h+>kk;FCFS56+)@+co6YtEl)Px|SC-qkdi^>-Eu$6aOh;%Q@{XyJe?F$-s{ zF!O`paqPYXHPEypJ2FjRYP)XtcTy)k%QH+@gonQcVSmHmx+1A9DLw9d+xkZSxKJk? zsi(M*>&vG%e)If_#GA;?5jg974tu#@qZWQbbp}npThhL z57+=F{ps4W}|9B4))%8{`M%Nyj~ znSWH@*=dt{d<1`9em)9^LquZXg-6msoh|hMm$PyH*ia!|MvW()M=n@}SOhv}6>)ae@^J}r5H0Z_w-#Vhr)^0ZU9wcbA zK}T+Q4wz?`rpybWGwP6%f|Zu3MW?a53>D>}&sgzrZB#n7^D-}l3YOG1(Unn4U_jpT zgN;b}v}X%jXtx6wl12o@nLSo^(i9qc(lkGnQ|D8VjWO-52tjIl#`VZ*!=3?Tyw`ez zplMc6DDm9|)3u46P7(Y@bR!S}O7)oxhbz+7=A zkIoh1Mt7N8$$))`K~nHeluGA7R^O12HpsF82PMheWIBCy1tyM1!pnJd{7vxX4O%DL7mqe$~ z$#LYIzS*uK2t*nK@%tgEYj9sv9G=+{cw>2ODfdV}!oB#U_cYU%2c0%@Jd&jGh0e}; zb|%y|c(~(NB&Uh>IRaafSzYm%N@5RYta%%gKmT^i1FwAYOMKBsGhYoL|Lz9=?@@8s zf1=dM(cH>`&hhUV4G#Jy|1@nHD_mL5>%n(frWu*nEjgaqHc^IxU+6)Z^Qmhh7=(+M zQL(P)sIqM}*qp65LL7nRooC<1?|^kfek$yxLp$LQZ}xBUOnbRr9#`9J1M~y|1^8hR z?`CxAVTI*zb>R&VoA}~o3Om#hfX^u%)jwnS$=hTQ9eDKmF-Zv>&D+&In7K-JC~HkV zqrQ0bzPd<*Y5)P;f~)|5=nu=3(rOWKqi>A}wr$L~S$(WN6Uz#&iNJTtF+{?9i$C`| zLr>^SxLqWaWg1s&w{yAAD-P-iv1Pd+HEWcoN5v>bH!eDx9NQV;LymRjb`YE3Eru~V zNhfr;WS;ptt?1}F%R1#p&y|(He7SzFYjUxQwqR+=)_T$!fvV>5AX+=a{A3>bWH<6e zhi%p+Y#86!t0)BbBTiS)0u8mAJL`+s607i{O)5c{uu_I`n}pX()z&Q^SE;$`2)PA{ zxI%2%CEio4PdUQTT`rRj!NJsw+(asfP0~lgwu2L>3F`_NHw5TEnO0=%yw{HyfA7N3 zFx@6L9~F(&z=57@ykVaU1p45*UAtUU-d=hy0fmU^=3bL%Np(oBUgMG#rR?Y0BP)u8 z7LN^MPH%#T{bPP01#3Rv_}6Mhg|EQ)cToIyRZsILDE`Hvi<(;*|KE7@f3k*=%3mAR z#E^M}u(?>FB&5@lrduLGn#}793Xp$Y$HL7G*=zRnBEDl)2OH^g`#YN;oGPIJ05mG% z{Uke8Qr<|_5tzeMus+?|C~2Iapiho9j?`}aD;#>*I<724i^0)m|NX_GRqPsb@>1#3FF39| zv&J-D{h|Sb);&ePpb$Bhgn0M=aQ2SDndr;6@Jwc6+qP}nwr$(V#GYhgO>En?ZB1<3 zd7rcQK6B3A@4f%J_ogb9R3()!>He+m)!l26)qCLyXVjz)IFQ966rhEV`ow%vMxsa_ zAP*k4DeJ2|GQ*3tR_&*I;S+ie61%q`i6X!cHc*RDwg#%5`&I?UY@}v`)pfnPNp8Jv zK&S=e> z5#|H)C1gtq3MW({#?Jt+7QD}vS?b-ybz&II zf?P$}>RHpB&~^l?Hhx|6Af778D}{<0$z8}ZVe`w$pF0NgFfWdMVW~+ey2O*@2AG4N zL>|bx>o~DVo11$B%Aq2q-HcelzU@3ofU{CO4*xXmKIf=O6?Ngp6I76^u27^z2!<3D z)Gu;j-a2%g;r?k+k#wcwB1>{nB4#1@)e5||RGjGGyM<~GMsK^82*rYa-Dm>B%wt>q zY0!Xgx^W&$VjfijB;l?~^fsB+m5s6`d6Na}i_D5Ey1#UZ%mNWWa2azxvLmCj>goc< z6})qbG)pRv-pT-~a9x`SUHNB48}F94pToe=OgfLor2%E)Xr=vYf^UXPVM^u0FvIoY zd(UmIYjUwqb9C*+mGcSoem~T4?n9H*k7HLOGL-$w2&4PzTTo>NFaCR?GjJ++|I)li z;>?k?xVm?BWrkmPXBC4~M$zGM|JbWvieC<<1U$?L1 zX94lQkTd*e0r6K#ir6|>>pM9CDvJLyzDNSqn@XXx1`5##8XiaR2S)4w?@#1ZuBC!U@lV z0SPLqB0LfzXg#)g>VRYu-;``fYye7?^gWPQrU{kmcw^?{yq+#NT1fVYJlZgu^F5i@ zkv7cuaNV34kt4CkNd~lUSs9i{{mz)kCiY0~(37W*WMS=593xo8guZ7&EU;>Pz6Huz zS6=cAQB1@Ol{ zkiGpjFwhh7Wca4^eFzp*B-5i8i15nz90pn%GKr;U+{bF5WLaDG=aS+$j38%)K^lzy znLmyyArQ}|l>kpD{hyHykp$A$~vngzAci{1gK2LF?eVnzZ?o@qCmWGf3U3NmAvwMm)?qbMlP+`r9#{( zL-Td3=y^_;5v#&Hf+T?w!{syjc12kiGSCBqOAY8*_)s!v0*Qc7BD^#) zp=sM1slkRB^I_|IIJ>ayRS12_k>SIiKO{cO6Vm0^jY+nGhsB%Exfp9ewes7FW8y~+ zI)64uqoE5B4OV0iBu}O|GJ#t{P9=+qGv`j5r<2U8xs^~R?q2!fCfk*cXJS~9E*-D1 zKg&4MN#8o^q<~J?Fn!FT;W~qjad_PGvz{4xrmq+7#{t0_T$ez6Fx{{)8Zy{jinnc_ z+mEif885`)rDS{HRS->C_PgL6vR^^2O{1*}%ShBs-AvS?t_q1M$jOq3-wzAnBKv;^GdO5?aJdaaQ{hFaW-=nA1Q^L0hI4fp|r<&Mv||R#WQbn zER!^D2+7Ta>tv%j%0qZ0XW~^ScMU6Pg-P5X-DZ!Z&YV*KjosiZClEX0VZNPRU3ev0 zdPTRfM8Is~Q-t|bTNGMcM7nf(W*$eN$N5QxT@hr3#8d*{1IIsNRVz^B8iuhe1%DRH zC|s8&1nRsBmAy>7LTDO(2<%W0dasb7znX~cf38qJl#*kSz$iQ~Qo^!6J&W(uq(siH zsPwWYC2x$ziT)-RfgjgN0XH5w(5CH9$$2L{Zds39vI9LtM&K55c zZ4{xZoAW^Z3bO6>^DH&$ygH*ZTI$*FZ*JVKpip*VdiA2mcVYuilK5@#wz2!}jFK5h zOXkbpaczya^1orI%Zu8^rq-mwoe!+$i*HOP#oVO%h1zJCXeOZ_!Nv|LKwm&?S6P9R zcxs74KMW;zR--*^a|1&5nD_RG$iBY={aNLVQrY z0T(wBdomUerwiT>cxDJ0=Iv+sl;)ibg~s$2+fQW8$J%b~%N_m(lrMO!&Es8z=sY}Xc8oMb0(K~@ezM(m4kd12UAicxcNpiz8 zcHxcb4eJ(c*eJG79WguM<@eC)m zuE($H?S*^7=Y<=x z`CaY|Qn4e^U|-FaN|28-y>f*X%27dPk6?ASmE2G=y;6imFc53^7bjr&5SIQh)lQ-o zgzJRfM(`n}yeL;F<<&y|*u!{7>N75$U!%;aSa4v`)HbnncAYtlUk?iIr2FoO95_v( z$eBBOxcW=qsC8yyCL6Z$9b_cQe0aolCX_vve{uPMj+J&CgT9BX!w_$hygnrvgt{7H zxN_AtOrs)2HWRY)VM&S75H#Xq8{zXM&U!mgEl`OI;xStn z_~Nswmdy(LcZ;T6SK3X0$T8;qFR0$q{*jJP#G44r!brbLDMR)HN-;(Z(q8o~6 znip~V9k&}3*5+a}i7q>DhS~1$_PcD$EJ%Rz9uheT6)iE$zZOU(9Foy4wKWLbIE6Ya2-^ANVet^ z&&U#Y^t>B}ruv_LsN81KLmfc1hhO&-R<)$g&W@TcTc;Nvi9? zMS}`o8M8-;O~q(iW)yXtqO)7&){MlcpPV6Zp@cW>o5}sBZsu?1L!hjp^Z|QesxhgXe3fsrG%A3QhtzeHmpDU0rzmG$iZGLj=b70rW9KUHgX5Z<*!lf+jbO z0rZ{oS~A{tD_`%_MhZI$&9p`aSt~~3iamdGw{TeMicWEwg)N6#v z8e>qUnt;$9yQ@zY%GWU7lxkJ~prXoXm-dbr=LR5wP(|(vW;)~M1#=jMJ8V1pnUQH?jiySO{W}v;NPv@TPX`T_4st25fib!^oWmR=_Kv{v_P9J>=v|*{txQPTmhS3qC@ja|I3=vCuz!ddhh zPRl9XeEDg!oM?cehPaa@1KSkz@Y)%{#K|IyPr0ajY z;W6}pmG9ryfR&bHi*D1!d=`9ABMU;<$LMV^FwekeEJhtNRvjaRA9Ba(-@PyLi1Qn# zgQ5kF%I|`$504{>evR^j4B|7N7gN%o<8KV1VN*BSVsJusGH~moQ(I+{2+C$u(@C5} zzekd+FYK=EO7hdVHI*isBf!oS!^}ccxSo1`9`F0=JiHncCNu<$!@eY(GB|_6y&Gdc z9kC9@&7A$Xo(FK+OusW=Goqh!@F_2Avtcr@8QN`~7L_l;t3W{;2tR>nQT3I0qRd}e zI4*WuivH3N8i2!fTD68nvT#FCKqZO>!m>BOp8=5MMqbbh>eLo`vD?yF0TpzQ8twx# zXABfavNwM1IxGNtyPBJ0x;oiH1JQ=c;*+u+AM6I7naYtCl1ROh6B_Opk4J}TXbM9x z7K(d8^avV+MPwGmg$YKlac*ZQw1x{_?@zgVSg$bo+7Y41ChgDMB3u;0cvvlz@uo<4 zMtM~=3slp@$BopO#ZLJ_*x9oto!3?wL@+K>9_Y2~kFDim-w$zkc)=aDXov`$og$;; z@luE#+1zCD1N}y*4~j5NwaUgtY^yo70O{uQIR0=^`OL)f!rHx5vD|U8TT|~iqIAit z${HKVe0@4Y*!XjNLMNoYz_DEz7A_Ij)<>5W(x`8ra@d)0YV1rKwu*;STZsZ!YCJ!d zBLV%Qfj5hHdLQIySb*m3u5p-U@akdL=a=0n#^l-^VHQEtRlp^Kq$^`pXNA>0b3>=Z zV!I02*-cuYu~7(b&chYw>F}9mfA%%zZQL;STQg5bc=iKY{)KY7r?^{iJB!6JXDBGr zl^3T0uX^jMx=w$8l-x0R(#jM%MQ4E3uPIckk5Eb_zCB0xL+aY`1$5GsNiwzN`+us8Zv>O{;Q&Yomj6-i^`F|z;?{Py z4o(`De`+u}{$~aSP>1`SmVf=X`c$HmI(?v`Sfo9!FlL>sjDbz z68~7e;37o?dY`6~^Q@^2nzNr_?|?}`7|FmSKvuDNooOU+36}eed(v^jQRn;tH?tFn zEW0PB1o&(UezbxVZKC3_98U?8({kK|NQnuPeQgJ7IDDshM)?8#`8vosxKCjU?^TXS zAj$KDAaaebL(6@r3x|>F{Bj?rQ5nwdhc}rK&7d(}nsv%5(lI$~LTTuVGfjwJWx}om zNGu8nK%wMEPMLf~Urh0U$7vOH!U?2jc?UJtofgw5)LYy9^#*<9F66xRd*UiiQksi& zP{kV2gHy=S*9=jl5wjQT{*#PD0hHB>MYwD~00K#9l?E{(kVEZ2_3F(hY>Tf}MqCD& z(C4}5^0$@ci5O{WSsUw;+J7I)6C45&B>o8b{(Z(F``MRa1Z9FS*HO1hZ!7-1hpqPz zeh;%rj1WHKQe4$m1yWs$fj*2pb!lq@ZD@LGgNR<*P=)_sN&qK-!+Xn4&^Lsu{4+@f zedIwjR)kK%aOY3h;~x;sxrwU`HeKnTIfK~Oci%NkS3z*a2O$Y&sww7F_}Z~}ZL*=p z7$dQ3rJ-(~Hhw-1QfYlGGO4&nShPiPLM?>H6KGw#A%%rLujYWDO81moav8l6C$Uh58r%B>bfy3cw#{XAE~d}`w+aj9)I(83-}tMEQ;craDqIg&(@6mR*6zB0 z(g?sGCA$!OK#jD7bmN1E1~b96Xh339FXSdG1aBMBGG&@W0?Q`&p~MLAf~jP*qWn(H zXk;=EVZDMJHg(Wl14gJjEIO|VbxPBFN?}`m?LfLk{t>o()1)+0N>q(3S=p=F@2|EP zQH_$VYCm<611p1~zKizEWxRkcGv6S?GdwlAxZr;PWuwdBb_O$ z7O+|go4ppsZ2OfUXspktia-FgRjenT+Ao5OfgQ?#wU=)^$pHVHNvO%lg+7uC z30sD3nTAPBUFp)en{-R7w92l0DjJ`FKw6-JP70aTOv8$c7(C9fzFa)jsTvnF@_PV{ z)+wDc$C%uThci|rof6fCgp*RB$VLDjxXlub5x(dq0!mEd96e_GN%-W>r zg?1bv_UYN(^AyFY5w&P^^Chnwkml>!r%d0dXz1c2J{XPmxguY8HA4e7JFdm0*cvyG zEDM5QXb$M~Nlct-`;SEO1UKexs_WW$PBtxI;y_ZSWnV+}&w2A+@93uU4lKhNXdO8! znIw2xammK{KAW59s+%G%$QuQ4D{F}m{+NYqu5D6as5M zM-%Wr--KB@%`A%+fxXSWO>g~e(<>qA6MAz_z z@`cMRHNhtn4@9K;5+kIQy$Y=8 zZe#fGb>RO1jgh${pb)e%HgqCl{gVs#RydPEl!xPeVrKH5AAn++$VHrq(+lfX% z2#C?6uqb)+_4M?E99=izjeKP2@E4it!{zw~Yn#@Vcysa)o%tzo zilAwoO*U_3>IcE&x*(x0=@niQg?qdNJb2G;cx2u5GDY^9**(~V2AAIDo)WQ!5O+cz z2g?OZCgLHmzPYZ|MT8yO2*P_B16W|#xQmUypNz3pWUTsSUAQv&Yn{C26TdF8tuU_w4wn{{gDiZ3?Au*Q z=OA<)x0b-VaTil|4MWaz-ZZ*kp0~}JK)P_LM)EGnEGUK7f^4nOkQE_oO&3?ic1egj z8%@O)hh@syaeNh5#nMv={j3DsL5c(zq@m|tvx~ub38|Y7Rj6`M!fD1>V~33qQsK&J zo6@ok7v5O(uu8wSPHJbATN7x=ruDUI4N4f3^4K7+e6}U4kS`U4%JgPbDZRA-B0y%> zVMCx34OTP6(3q#0{=`|1D?#IolE$n0q1Zqs_Yp|bfODF||09LAEgE=@gLeyBurl4x zr*}-9?22h$WJ>wGbZo(zn=AO&^Y^D5ha;qSl+4;wLnIqJhox0%Du=b|WQ5Mf@{jL( z$eTPkYDn~=qq~1hRKj(u7+wGq6@NgC{bwls&vBUG@A&%NM*N=~y8P~(|1x+_P|&uW zr9j*y?}<_e#M-6yvrDdYKEy zYV%C@SwS4>7v{^-!N$(b%_2bc?>uXK6I!W&uo{!pKl@IkxERhc+f$-oc}#?yAotb< z<0_DG&NYW9vD&>$&c-zuDj@@7b|J2Kie5HYps+f&@&Sjjsu1d-(93tEw}_4Am6ab3 zAgks>65te@Qtd$+&N-@zaYA1au*}!o zq580;CciQ2p3=b48Uzm(;Z59jXTb!23y;1&kH&d1a1v{6;qk(H2<;q}TP5fCW%HnV zUYZtBXFLz-Od`#NxRon!!4y4aLqaI%u*YKz}cK>8_}l5=Qg!ZrL3LXbD44u5HhG!o=B1?&(LY?#z$bc z2vL}jpC;r#S1kjN$^jy+UDKt*7_-Iy(<0f8*}Qh$CFZ}LGB!+~)Mr4bRsJp1{`r&< z{SLK1hj$v1icSvZHl~Wk4(9q+<{rilf4*Bmit9GpB1jqNj5}yzOW=W*X$$c^2*hj~ zRxRc^Wk#GfNOa3UX$>(&rQm4Co5}qw<1F(Xit`aVw>hv=4~LdY*g4YkHl;2iqNe-l z7wOYax8s$aAFq&~_<%9tgj#2td8>FGwJ`nn-gmj2*0OM*l1K4Eq0hSRvG+M#xzc>R zvKPxN@2M5F_M;L|RTuB^xB~GEq(Znpx&xy~q*7=($lgp?Z?SFHyo(-23Gm=T%TWgp z64itG8s0&D(Lg2nV!wgq*yDW@i_hGKJC%B?c4%?LSoyMN!(fNK3W@wO-sOYMp(uMK zt1$-KpzFGwwg>A8;ErdSx-Ve9gfTb`#-T(C@kTqNBa(&sQSI;hLlu3p$4>`@o^d5| zAx1ORGHnF>^jeB1%Y_xyK-r(|nWD+#;Ni{6NG+RelsSvXmiK0p#)IoSyqX`pYvrem z`L$-5Ru-h;ku9Bqr9wT4+N{y-l(Ww678Wg#8ck{ncyZDTAxXg_XGA>_7{A8LUlo_^ z!c;_&syLI125O7?KbOh|w~s8i83%w(njpl*EeFTZC`oDng^3okt^z&VrG>0VbdF^! zhGP;iSjIkAlLV3)HBHzfD;^ieWq25J$*P>Zkvg{An?g7h%JJVs`byNMm_h2l3 ztO9&H7OzD8gV&9lo03Rh0KtEmc^*Ni$ryop?|ZOxd-@f`i#*pwJbR{1M&3Ol>albs z!iG&H&euweLF;?=0k8q3xnIy9yUl|22>oAwWg9O9_F*M-wvbl{Z#2LXz!`A@mb`3E zY16x!9jwQ13h+HiYok6;n!p_3xIA>d2*@roQ$U5Q)WD2TXAW>~G{ZfI-e#il8lDi< z-nVdWLNx;i5wS*vUoMvBUevaDA|7>ibkyRFn)}_@#;v(OrSSf-eAZwSM}h*#F|xlE z?EhqjNPg#;g}#fvzMVPk@6zFS+3;tkMahrK^Z?xB2%~2d+(T*Qyz;|FdO&t$wn3Qb z`RVz!n=n_pk&ff*<|-WowPMV+K_BG@j^tQu^hg+8&r<+Yr|bT~!wn`McI#J-K}hci zZB81(q4k#xY>hEqaH?YkpuGm~r==DKFc^(5E0}V%vcJf%IriyHQfuLM((z`iv_d#@ zA^rQNuMqibj+O;nZFS20q4-fR|67;!yTMZY(>=B!K5;=XivYr3 ziL1I?jJ!jUNFnB{BAy1(58G82NYF)X2U?Ih9bL#NhUQu$`9VaN}qM)G2cB8jbDQwE<_OA{S+ixkGWDI)w3Pl1bV{ zxN`MHN>vm4b&Jo{FDve(TmpbDN0_WHV~v?I>CKbaw&dL+vc}h5Q4Jvu?F+Di$S#& z?Pw(5r}wVaC1*~+MIAg-zCOe=3Z{bT-@7+JenCovgIG379{(eMm zg`r$727_LfCL@DXJ48FSu7^t6qw-6S_s*#Ev-mbifV}FpL;4zs+V1ix@sYakVs~b9PUfn=Q96dvZ~GatqpaIO#LA zqWGGmp#UrXxwR%uZvn`N>Y213DgvHVQZ>Y5kef=m<`V8^o z2rK^>FNFrmw^wWn?14FbiTuU<87Ag?8C{scC)Ox(iezA7^A|Lnug@X;)zMc_jQrX3 z93gx80#B&_DkOkPQH5Fnc>?u6_C$>Tzg+p>EmE3TIXjyDTM_;Dbcb4Ult8WuLV--9 znz{!l99Bb~Ps|L0CmfAeW^{_i%5<0q=}lfF7lkm)hv`Zb{lY2@ESBj!q`j5Vbb|X} zgR|4e_XS1|W)x~V_KXv`+D5})Uo1#^tEG)Xq9@Isa%~%G{VCj7Vph}OP|DN=O`!Ci zru#*ED|u}N{=4PMb^m}Nh>Bh^&!#8nS`n*zWNmseQc}a(jj_e#^}2?Ij$q?=8Z&qy zIO9W!6P1Izowl6V=LwEd z`=ZDy2&HQ`m!+pUY7P|*3g4~yM;pFFLcn!H^$N9kv(C#!y%{fEQ=b%DOE}cID8Yx6 zm|*PZB&(LA^IDoVpL)Plx}9`Dg>#l}xgbG8pkG@QjOx`%``8eeyBPrNYftSkY%LTm zKlSAAJ0EJsA&EeS`j;AXC@Nv#%gO=hK6;dKR1!hEdKYS~(w)FL@iZs~%`la$aJyPpj zSHh_&5k}KJGWxUm<~}G8ot6^Q=SS`g__Gd++*85W-WlKy@Ru?9e^JwaS`Ga9t3?0n zUj-PD8Ce>_3;XoLd=$l3A(W8%L8 z7ut&z@-AMlz$ey;Br)B>KQr-ZJ}?O4X2H%Msf%Na?U=v!XH2V~HJ`e6INl!u%JxUZ zF0KZ&+Xg8@)iSpZJ+_a~2M{ksE=l2Zu3h~_RCzCEm*z?I0u-%ZknOuPy@AY)<8nH& z+778CE z!b(NsC$kBW>gdxUIknCGV0lu2sYIq)<*c=3hXv z;xAa!g3So^(^_ftChMj5y8qN`6BYJA%$s+YkinoqqqEbxathnp{y@yz`XbRZ#gI-I zJHRoOlB9q92v-YLIGAeonCvmU4u6g(6Hu1405z7K-P=Pt7vOFqk3ph?clzQW+m~kE zButCJV_f92yOHRWM{tl=fA3O1M}l8vFFmL%w{H4OkhkyD8TDvKAI>)4_{5UQj=b~u znk2o)j$Jo7-34aYR#^)!trgv&ewlQkHEER@y3yCv;1`hd>@q`e zCdQHB@2HHY*wP@N`yf|Z^v0>7q1nDdR19-i;zZVt7+G^{{oj(U4!*d0useC9D{_`a z7>T-|_x;$MshY4`=4cxz0o8;Wun=ps8a}Y6D)O&yJ3l)u-F(Y(n5h!iO`||g&5lAC zCr+KZ1uiKD7pjqJ0ryZiU$#L%RxMWX?Ws;$eh##F5b%xmLKi|mmp^O0-yYgoZI5*jFPhGv>W zwjp)&&momlBddrBpb;qkn*|A)s|h~<8h?Lq0KWSp$!Y(uxE;nnn}eUqFSY>7(D#LC ztOG5~SQ7l0Yco$dUc7-ejjFiBM|p+xue)&IKC9_TkC*O~Eua=JstFC>H1Zls_!6E) zm49;U`X{gr5vy<=q@~#IrwyL-e!SmvenVhEXP{=irJAn7?zTJsapDqm2w8b7!FwvK z06}TjPW)>d#*|H^8cTDf8$ysFZPi3fd7Y&h=OM_{(Nxo|<7@=fk{RTly0@OjcWqCY zGMEwvyNjvWX>Ug^Rh1cwk?yl}$akpTz@uQlOEie_>gf?E4^6fBRMSK>nf{CbqBGo~ zMwY2q{nUq-i3cdjWfP&;OY9K2P|<2)e+Qde%i!Vy>VA=>Rrn2*U-H0Zov`bT(eeZ) z;=Q7MEeJ_ZCN5qpf<=344SwunrgRaAP>#85$rv&WW+!R&pyMsWb_PU>wN`rYxF>8u zvrxhPdlqJ#XTb;eXb#Kpd&|hN5Z1`0f(2h4P!1TrUW51c6tPLSQZjjo1kCSv2k+}} zVq`~JN$k-R?xo5;0`^0i)~IdHx$qSsd~i@n^tT;@m*u;ecg+$5zMnC!xpn$8N`YX@ zUfye@eL~-lPEAWGkPNq$sTEU1jGoZVEv#hd%SU!=Sfj4QqW_Wbc&4)C+Ly3I4`n3^ zUo9|+$UrtT+yLMEnI*~9`7>oR2!ksMbgkUA4brH%d!#uz*AVn(~9g z6nTY}Ug5atj5sy}{bHGQ(xsNmrZd0QL2EW`0@6^9CPM8W?A(&Wo^31-zeDyV!1nfrw)zrrqOGg2 zlOU}xbLc~?d%;cTW{ba} zpk1YXW^6Nj8ckd+(83Kc#2b(SxX15_Mr4yc+OZhmY{X4bnLFdmaSAr&vkFo z<8idt7JJ13Ezh7efrjTj%w@m#Wq(l{6$M$&>+$pp$XlTusB3Xx$r!kVq1@WC-|HWp zX5#EYK?UGdkoudr$Uo@;%HOZTzdKEFn?HoR{u<^9yBQkWIhotq{KXN(+5StzYhdz7 zd$pdfx|bd#)PoO45mOS&xnO}i+6 z$UT|Ak_ir^E}l_hIiiP16waMiE;>x|fei`_90GFZC_5s`P5Mv$UoIdobKi#;RnMZb zQET3jwLsjlz=)K@{CnWHL{adl)C+GP<1yYO$p?7jtqF#5Q37wB<)^bJ`*Ei}N5n_& z#_Wlmy?wL@fJg^95m7HkS^*@Y^*6$)=|Lv!CyxRLGQ&lCx4O0#!|&Y>zlM^7J6aZe z2a+M1oLO}=Zf$RxqQFV)8F`hEe5!Jh!dyuD$sRV8dacf@0jpiSR-!6LkmAzd^SG4A z{z-n$DPQD98EcfxnlU<0mpmcqx`2u-$bQi*VpoR0cVtyMlloo*c6PG0QkO=`(@~b$ zUeb0~9-GkFIJ!f(b~QHtBvzv4ZrSSC%-OZH>6(a{mV&_|T!l`!cdm+(beOxaAgzWX zZ*17LxYyZLAv=nK z@dWVLjsd?Cn1Bt_`xg{C9*EhS*BB9&n-t5Wi~*gFYT2erhF!jceNsPO{C6W@oF~Pt z(xF;}Nno7(?Epx*&VFa|5qE4nNJOunEvMh;(Lh68$(y=Q;1Bjg9a;B*h$5DHAd@4- z9#PFx9}J{WH&LR6B4PampDMdLi$B%;c|W&feLkxPC~uYj#vlJPaLIoM?w@|x64q`? zwzgLP8G%{ro7?>1$$e+HY-mdruLLir~5yZzrYO$O* z*zc4P`YLr*=4?ya@ufe>hpG>Pni2ZJ>Uf&&x3k{t|8ZdHG^J1G6hP$EpLhTn{Pc4= z?G5izl@OrHrEk-F$9>cvjb?%|jw9&9$d=V6`2&pm{6PrK-OsD}Ezs|bZZ7!MZs;r- zj({_uhKlBkd$O2iAGdkAc`_p%Bmk>`0=jqDZlA@e2rQ$rkMqVFFdsYY41pR&#m8Pc zsxq(4szwJ-pim#(PCJd&%-IHC16O&HL|u~fcBegVqDv3n412QtV|T=>lOmr9)>PF% zpvEXaLqbU&fQ9v41q<}UHLzOI>}hM>$P0rr9_3m zG#MohI-kD5DYY-d}! z#CxFEq}ZL;dKmiAp{rB*l^Kf!O5eauP9RleCgl%i75CPq?J>+lsMiKdO7e;_HdKB7 zI6+5H^R}P|#Nqh=7>EBc8TvC070s>f{&Rk=Xl86>_2<)>kYKGkD~9y#f=F7>T!?^H zL8s3NvJPU-Kt8FZ#T>Yg+Kimmz-t;2xOdTq! z<#upKJM;#b1E3{j~?J_z z`qt=b$-;%AcCz4+c7GBg!0XBS$`uGg9}Cvjq_o((P5zcpAZ>AURC9usnBnPEybv+9 z5oVY(;2uk%q zHp(l4_hgM|f(XwvSd9%%!XZmJKu|eGj^Yk(dKvXoaWGb|;mf+w572(~UH(TOfKpEt zaqRU@{TR+XIpb4FGA$yXw*4&_Cv zBxL+<6VdzlF22fu@|?N786r0jUitIH^|UGD~UQ9qGs(Ju7nwCld81soDg}s3oTJ$I0$lQR|K=)KHc2VXH1L>@^)sG$t$ue8Jj!P8>5a_W>@qjxy)x&S04BgQ5Y{0 z7%RyCqcOU+!eNiBQy%GYBt=VMw_YXN&&T|_WQr))otdR~aLCK!+Qty`03XJFyXX9{ z4`xDEtzLl-{mcwcd)i`-_-F%NK5Tx#EA>v$3xsXU8 zD_{oqm32kMpPG;*^!O`Mg&ubs=U< zU!U{w-n2V;FpD}57EJ?gVPBuEVtmFc2L<6?USgs-j3Tiu-@?42y97%lBi-IU5g_2| z?+BtUK}l54OZtf_{X$x8M{*;&1D4>{W5w&$f$d@}_JybA6b@|j#Crc;wTJE&sfuLV z*;6{>zj(=5_2wU<+rnPD-mqc*@y7x_*hLQb6o7@Yg#7QVf`39a*Ker)J$ll21gx#l z{+mitG`2BP`a=}qKk)6~Y(w-{?-}*)bplt3tg^`J>v|XnOHv^;F)%Y`@F?jOC7y!# zj@IHukJ0oIdy*(!U;&^oP@hk{5cWI!#;5r|uyj`kA2uAl-e2GDFuPD#&`W&c5BW}q z<><Kne4t1sy<*p2>V9 zXWG;5T5LawNHQNWFuBx!cTy}dkpefbB%cUmxJoktUPQC-F76B2bt1a`7OmiedgFzK z9ns~a-PEL!@9!L}K)dspx2_w*&x&j~Ua|`hN+=uj;DUrX{-rjls4IqQazxF(I#K|+ zu}Wekr8p8@hgxHYHzCxc7@1haEO?^Fn_=z+zMg&eR*y)uV0fWN$}@o9b#=zsi(P(= zDD@omt(o7Vl>#g`S!b)AZq{t9s#)pE`=8bdK=g838w6%^ z7Oh^)<9l1Uef7fXFVQfOl#k|86rcXE|9f%qMTZ6;^Wfi*`Jc-yvcJRb|Ag?+kW?@> z(6`dJF*J6N0+^f|+x+)!2+N-sUaqw2Fe?Vfy<sZXB|IIP^7d)9qQbdK3@1z)J3Jo0T8j`Kr04+l4QW~bNd zS3MAAkk$%llmtBl7GI~p$emb5^JAk4 zQ@CyQ;{xc_%=7daPv;5XMzvCdkDqjMbAfFq8+%vVl78wZmk>J@+rYtb&^wST7(E6& zcy#kZAvW`i5FP_>OVR%_LaTgISPM(k5MXtqgoDAd3LBNCWBX;Qxik^uA>_=^>~#jN zyH7K%<8;xGAy=IffSp|Pqgy6eyVq^*wW6vjt&d0>>cM!Zim(xd?3Ys&>I;h@?0wu+ z!H(i~`HF2^{H>lWdP7B`zkW@@P<2;vHj!WsaIbGEGUeOlU6j|lnJx{=qQ}S=U87gL zf(u~{Nb|L2K6h=2Ce$#oc%Rqy5;`5xx`U>z1R0{%uZl7&2^B99v1ww6&4#^mm>*{600o??N7aGv*d@1Wu&e-AGTclDEne>OktCD*yN|* zX&GCBNOv@YPs#d44y7$fHst#z)i~ZD(sW5q!X9sDCfIxetir7dE-~U}ZL3dwjSk;F z`xacLzmi);xFTXG5MyQEDe>Zh+R^ckZop;PRBkGtUFM`)SoyDM-XKT!4I3a~H~+^~dVucuUo7RHYeCBYQq1`;R4_LES8h5; z3mEI${CmnB75mEufgfq0)7;PaNCiTLk^+UYp=#dd3dO*nl!!Tteu>0`l}d5VCE5Bu zI*cb)m4!l@>FTo=*-i>DA~MZd1a}(uFZKghfVBv3rw`~<*Oy?e$>=Ytb=>4=i7eIwGg_7^+e$#ih@?fBRglA^`8Eg%gH%>*efrne4}HTqXN-Meb^_-Xo5o zT&neS%%(EW2h~o8!0#$+r)-Tjq%qV*r%x~#Bkx620v&8{5ZJl*eb?gfHm6Immqai% z^XITDRB8Ql+pUSfNmECBPZZb?dO!SyBz?kZe+E`)R~H1d1@Vg-89{>J=OpxNY}qi= zt0G$eq?YjPxqYL7EQF_oUrPx#nN5`HTKPDEgmd4rm0!LbxPJr|u%7(<$Tin!3XT{`cSsRuvW*zLQKji-Ez!Eh439VjG@s132`GEsX!5kha+Po@t!t`G8Tn$*K4 zBlhYqA3#hirfa0c@>(+FqUAW)Rj+Zy0Q1 zq$KuIDx85%t>vRPwe{s+?&w;0isVUPnr+eOQI~C49)OWoAT}P~^*#DytXwrI<46fW zkHWvBht1#V^8e`b{AF=0t#1QRy8kahF-IqT2PbE{e@CIS!e8ftnu*kaQZN&4?-X5! z+O`X~QG*%Vw(X>`ZQHh!Ml-hUq)8ezZW`M*8rybu7T#dkOI`27nqbspW-&6d3o&J&ANp0j4DWnixL2j+@}~>Iiuq#S z&glp?U@Ocfi9j{Zjf6_>#i(YP;7M9K)+K6b7fvYe5g&nZwJR}LVuegfRdE@ncP)FL zfo!~I#*k9)KAQ-8{(X9E7zG-#oArpfLv5;2DlflVJ*wzOw}L43W~|m!PxJgl5e;^X z$u@MRL{I4bII5k*@<*1!K?_cbES7xf))wjRqoE|<{nFcp(AyUs2PdDyY0g@u6Ci{p0_U+3jEh9+~CY!x#L)%#*7kCHS(T-$Yg=QG$B zy5uX{k~~6oW?EI@6Oc`jC;*bDobCH<(>qfI!zJ?@9fJi&?57BOzkob~vGW@~`oN!J za!`6k@D;|4JwM9bgYmRTNTUnZr;P<4g(Tovc7swu^FGEi5(1Muc!dLtclL{dMFW8EDv zJ@OL%M5`+Au+gui`YH3$P&A!E>&(!gFZzfCwSHqRFW*e_nJOJ6(Nt&Dfz-(kQfpG% zpPhipc|ZXs@W@7bAq^0PC0_LX2Y0N~C&lml6&Q-jnW}lqq5cHI* z(?JVvw_|Z@D_9b64|LRn4CA$1YRE&`jp)Zk76yPPnb-|V24YRR(Xx23^S3i06H%;! zC%)jUaXYgs3^dWgatLm@Da%L=JC?xSL(06hZo-}ka*(z0t<85iGdS*!#vmgCXp9Zm z)a{Yjh{iOgN-%YhZ=G_`uZ*!WjaDg~`0|A;oPn-f^1$F#<_RpUFY+2gj|rkyKK;8B z5%!TPNqP7pi<7z&AP&@n2i5urW2xB*{JwHvU}EAs=wfHHP9bzpR9#UsX9%FN$~!S7 za~okUPtzIw)ZE7irupujQw~S+GI%RGZ zFXGdHTdb~7$cliCq&tiQN+kL6K`^PiA%|}ICdpX5MJCI6b`bySRI&P zy~M{8R~C&cJT|a-(4B>L$fdJ|W~e1Ef^u^3af41LkOUCNc$c_%)1Y%Y48ud;NwN9R z^VX1v27G@aYLTR1?OYsG5|YAj%v^8nnw@Ll@q|2?hBVpS@dNij<${HuG+GUz>s8QXkghFJs#h8T zU4c-W8qqCcmFi=Q=`%h1M>FYHS7iGikML_B zUowK-$5h_h@4E>)Ky%lBQ3zIWfIrvBliDsNQq4;$^r){J8qvN}D9|oNI zwyodx^XT;26d5*Dd@HUiK9+EvwYM*vu~y2koSDnoso&X2}fm9MQqjfKZ?d0D>~f7_vkLGX0!CSH;N} zW#o!Ptjr;Lw82S==LI%8DT}`1x_=UTHr2;o3nSi^5|8?tj=i(0)949YC4d_ zD?K`OUp!9S4bq?o8_hsYWhY-uVm0vz+ zD(JiL3i9#Rrfr9X$btrlmEv7{4{r$N$6l8e@hV$`_>Y(UZO^6je!IgtPnRie&-L_v zN5d&T51;gOuXb{L9@O++S5w}ux8Hs@jn_b-Uqier!j1_4&Dj0hJI45L9PvNEuaWip z1I)!<+Rn`BZ;?&uJz*w*!V4;uLKiU+PGJe|65QZT6A-RRqJR=eiKS$-Iex&D7<@pp z2#@ST6{U$JuNGF2#-b?Aen$UK+1qzSMyV9ZWuKmD zgPSaW)N@=X!HpLy>d?n3x-*tKXi1D@U#AnBckPxNpInewt45yN*2!kM}7>4oh{ zloNKTJ)-P*40S_5{u@B_xUht{L5qlCEGLEA%Fyp zu-n*A2Jm3{8S6097E`5qIW13fPc@oXAsawc))5_g2WmMW%#Vfah9N4u-Qa)@5!hOh z2+2_hBe7cNrKdkf@wN0L@pi;9o?nE~F~{@ycOKf;hG4|)9UV*l10DZ+Ra^bNs{em>ME(J~Ta~Sprr+}v zTF`l<&?w&mpeYcg%7NkVb@!i9z+t{ZGW%*z*@P8Kj0Ot$1+@3E7mR%Um3bk?xh6w( z|M`#u?jd&cnAgkktl8`=rRt3T!+=pdVe>jzC1J6&6vOq}?vB`VwmS~wczf?jJgN3s zqM3aJb6qrcvPH(L39xwN97)J~igGd!POgcpF&Wa5>vr?n1%j=c08fH0E13e{gZ($o@-P#*hPAgfyUUAak})acD?t{UCA$USj=OP^#zqwfDUcWzS?M!s zog+H3kKxi4|^~%O5YUmdaeVNF7d?lQJ<~}Eo0ry znJe6d85uTIE>ViHuNx0s%hFl2Sx=D-u#tS~{fjY+E7*naQtvY5@HLqo7fML*cY^H} zgi}2LICekkkVZHv@dZ)XkAj=u$8d-0z<-@o06mVfWX|1aeH-^DdXb~ZK^ z&VTi4{Eq=^EdOB&|CqS;XZX2Z`Hvh78g$W4p*#XAc(}4PwITvaJ!&u&RAIO#!KB^% z5gJ@&%eJlDVfE5fLfBQ%yq8NI*u(L<6adgBx#)2m@%u*N zi8h@%4^dQZYW1i>a+CwA7n+g~1d6y#pBumeBd~-Rogn)pT_W<)zVA%uQD8I$eSEJt z@UN`q3j~1X)gQr_u)F&ToI0Kr(W_b@_c925d;cdBjc{1t+Q{Vw*E@g06;SR_$ROD( zHCvRS_#+eSIsGH(o^$F^4+^Oe3&i*RISR-A;iZxO&w|uF_6Zw8yYADkpG=@j^t-;B zUY$=aji~YNq$g`$i^%T{Qjl%2aLf#aF4ge@A2m97JM9g1y)+-gTL4yOiUdGo4aL(V zvM5HzQi<^fNsikv@-D|hu~qM(*SxMh0~I6iO%*|{;zfYhB=cD zl=IE?8RK<+Q>vHRC zrxQLTp>u-2S~?$E37$akC{KwnxdynxS9Q(h8vHwt;fA2k^ zKgT5~TiBS`xj6sJ^(PajXNM$!&|zlV%ne83UQ`jBKRC-4+S!5wp=3q+QL)Y!_mNLL~-ARuHVW4~H1K}i`*mqvHO{cl2K*9*w1N)j>cM*Oi zGs%Yq4`io}PsisG{ia>;&2XL&kiP}mKnWN1tWTeef}Y5S)iOhMWYfqqm!V6Dq8FUN zT-<3Fa5hqa{>ydpotcy09*TGwJv3?eAY6P`zP354>P1FtacBIjOHoy$L0+z36lr`M zqdP4b{4rU8zjC2f5p*ptP#d<4CSfX3WTuc$4a~s2$}hGqO*TP?1A1&NS>{dV9G_V) z?a6WFd{^PUtKn-gPa`|nAF}PW$7cz1FO53oBOB_gbl8<0TghD=NIAKyU(og|US~rS z_pfvrKf0LDHXu_rfjI&OLk44Wfcl+E*~*Lw382SrlMf?%a^^+5IYI5+l9YVskL>m1 zIU`7Y8=wtq#nYAWUK|t|n<6Gk8ULfMEb^4r=)=vVo4AiwGOI>x!IJP_1 ze3$8Tw#x74_X;5|8ce9AB(Wdn!jAw*xv3$|4`mmT>4Ays2OXPeKL&5R-a-Yg2|9#y zYAV47z>5)GdsGv54X9n7^f^H^3^Mq2xWP9qTh2R-&pG4JCA}A9)nb!OW-G}S{1Sye z05ALXIOpvBq9pn# z$cB*&Qv&w0>CrrsB{rt#+dm6uw@=WD_bpAS)AF* z=A^#T;0+f#?L7)(Nk*ThFmVvOn-0Hw4(&YAyzYXYz)^~pG(=>QcAM{xEvHhf?Y6l3 zI+|5_GrN?*buH-m-nRmyMYN{GQa@9_Vgl-FXEW$RT?a7SqeA!jLoHr*fG1n zts+L~xw_bznklXOtGD}xkRcj|7@CJ0-I|Np3(Uy_$r7|R9hVa%R_k!$G8HCD^FbxS z5QQp;~ctIjs3I)@1nzK5Zp!fwwTK+-q3P=N*4t$Z|?na&^=QY^09hG>4!5q&7y%4t>Sw(wMI4`n56v(j!+*OErSTHkw|Fl8A`)lcSJewt!P$`<}=5=_*c>ov~u;0G)mtRZ>AZoaPu$=Rxh`==cC47A3q>gL-`kRq()*L`p>Bl z+EW%TS0sq;#a?IVxYV)Q#?twBcGwPd!GUAVM{Tef>p--?`D!70JQOFrMc!r)6izQwmaCl$AZqX*t?H}AGj zkQ(v6tME@VGyzk%krwEDt!LrmNoIKV9y}fm4p}JRC99TnvVl`iFahhsQ>QnX`|-Y7 zT;+O`EQ768gpjIsO_4Kmi4oUwu}2xs>4mQamC=Nc-H=u_c^@!GQq?o#N1ujVotU-P zP7MpPYt#yi(EEo2A`efJ@%Tj|C&z})za`6aM^Eggfs78Q+-9ok&dA7L4~Coh2b+BT zJiWqH4n=A18^CapscNK3;`H4c^P$jM-(a;Pb&jSe0xxI#n}>L{wMo;LQ-^Pw3ht0T zJ^S_q@%@4jSj%u)rsX5~95+ZSVbr7KyBRe8ziKuU`>V6Mx?hNqi6bz|pJGo?WSO)= zFNU@8OiOvaj`dc`jvZCeg&4H;lbhvR8ow`pKV4cAPcoL;cEu`YAx7&~Q%}~oPVZj0 z<61mylABT_jb0o`>iTT_O~9I*4TuQOV7<&qQWOKeXe&0BmBTzXycftk1iGU>L&!n2 z-XpirQacV|oOYpUtR5zfk`p>=k1wkpAeRzPJIi}3ZdZU`3!crAX_Z-<%rpW6mJMaN zc1&`PuG({3WPTLdXY7=-=wYaly~?zJ}Np+C_S_>T?a=_2pp+zOUes`krad`v)AZ|s)?84$}}&f z2XXh(!&P+(E4;dKCn82qB_Z|GI(1?ndP!0bM~2s&tMb=1PAk#Picl#^q4?z<0F!}L zIwhYTE>sVy>-UH4W;tI|lVRxOwEBw{-nXY*kd)nK;sQN30IJyRbh$9C^}Qo8X^6-1 zR3ytdcOrRqHhU>WFsc$AzD+wfTyL|b;CQwZQwf2f%T{${`c7*S`v&NKr|udFm~nDv zL!M&xgC35G8=vX85M~3C4fWn=;1aPia!XcK$~~xNc-H@?XdY( z*U2yGPL3Gi%B*rSGRci?rOKiFr9Kc{JsFm()1>=3#cM%MzPy*8t6jS0ZG%#C4+|t- zIEsyy(`^^j8%64^wI}?51&!Jvd$i*~s;=aQ7aQl1W-}i*6)o7BU%IEV;VtycL^^Bl5;bHn^beyR@3(UHkcVEGHFR=g;zPWT?TH_kIi&xpbly&?5#*=*@c` zHnYv`PsBaF_{$a>nM>z@-@)b3>q<)N;|7rH$s!{|Vpx2r{#cA(?hi+%|KUp|4Fd>Q zF2&FX6aB)K6o@+2lUkze={IG$Say&{*y_y;2Smt zn+V2w5w+pLc>OTdxeS!z71j$MF6f5cZ^t#kOVzsC`%xY9UVi;wxcy&=Pl~^f>p!9i zG8T@Gc8)UdO^pBLV)?VOEBG(49&9o{D=1P(mEUBjm56$-!@$UcBu$+ziM&;Ez!dC% zLbLeky>%2HiX@<_tX)LXU-`Dp6LLW{Fph_$*DECc5k0El# z1Ovh#>nzNk?Xqbc6NtvEM}^66vQM9s0chUQkErgBgCxDnAc{23Wt&&SSa1TC@~*9I z+2VltZsQd=;Z%am$|f?~=@0Y~0FcROfFIl1&)SXDDP~5q*l+eRFbo(Dv8gla zRYDBv=3q*fIARo#!L_9 z(prF50>c=RiHO3XwClxtNV?YzU735Q2G<>@`&^_o2DTtpwSN*HFqGWBapxO9wX!Wz zTtiRyen~GFEd59^PBU|`g~z7Z(iKs@vlb7FZASa!5fj)J*vv0CzyL_FxNCl=_ZJaQ zYgJJ~RwCw+c6w)QoglPNXv{X~LXC1KZRVcKE}sdAl3Wnaz_b8cZPs{x{a)|*!}oth>i>@|*MBbU3gt5;WHAh1 zG*GeVTJbt%n>cyJbg&`+iY2VF%Ualk0JI@#e%)x;Z`Q79Exeyz%AJ=C#mue?n0@zi zW)501CJbvp@zdcw?p$;3=4;1W?B2XDpgU*|0k6QZ_|z)MK}@<|F-XLb6P8M65lxkv zo?KDHn2gusy^0hDrwp0YY7H2U5uO>^bcH;KaVwh=lD7P_DHsb>-7~r~OA%e_W*aHV3z3kNX6sd5D*t+~Ne!nC!XF6n>8t4YNqv!X*V`1sm>9ZK`{15R@N&E$T$n%m!9>~HJDRF{7LHY-IMV#eUI736x?kF6 zDkOvw=&girZHXPUVd!yluj5vpCc3``GZL#Fb$X7u&ILgmqxY(XzlsfY?hn50kl;y? zBVeccIR6S&n=Ij>2P5LC!x#F{2JUs=*Kb3eJ?Z9^ZdXheW>_0f>Gr^kx9ja~6~|di ztq44rrsiN{FFX|#QVOpKHH}rCQ87OgH8cH`qC(>$$@APXT1lqxs=&udbW@PYBsF@G zSbqm&AaLj&gM)9(v-RfBNtyhHs7Bu4W-d;p-{Et1U_rB{A zUBmsoy(Z=ReFS_pzh20+uKiy}<8X`*+@=Rl2ztDvJl!E|WmmKtA4PsnMsWzvo!+X# zI~{BKE5^{CFgoV)EWJ8tr(#d*;C7#`DGKx6y=yzK)k%{H;tgm}hf> z?e5jGe~O3>XQVL-R^`juPw0`cGP0P(*smj9bK@{cx=_istw z72?0e+e`#^OKY^e8fX z1Gu>Npu&q>GQc)%1ER7%_ffIQVGOlaFbbK-5_4beoo7Ln*3+RabeD+sOn2a*XTy*qX909LIP8P=p$`5DA=B#kwTLdxJK4e`(d4n^@G)F9V%6WXxORC(H%`5?0u7z zYC`^4K-cdq$i6nGVmXU+<&XL9Dgsmzmn=gb~g5oCQeT8>GXfu#NTD2-@p0G znvkTV^Scz=s4OXoLh;KtPrU|ToaRy0SybNi5DYhD>CR?BkYLq0J`wX-g^OzJe*OM9 z#_?xHkU}*sEBvDIS?k`4*X{8gzFr&1*9}6#_MzP9)WoxSqB;x_q<< z!H2SGH4*YClzAr%X2$T$kDwl1&YCSANQ0xkCB%V-Q4Ci2;YJ3O^NqgXhe1;L^H;Kv zR-{inSF+#dhJ~QXtWF~J5p^#HtV3efU5U4^dy4;x>o>bX+44_w<+pjx`L%3* zznC1rkv&um!eTiqEvoq)Aq%ieiP3-~<7^t?FGk=#sLeVSyfz^U1exY7RhgmvYcoN{i{8;$m2;{4|(OcZpO~*XN(hiBtv;abiC6c6EurZ^;=%qk%QkvNkA)v ze45slX5plm)K!r0YD5{ZB?t9~kqs!)%3={tijyY*oNZdHb9Ug~=&_you57=W0vgb( zG@_%)A0o+q^la)pYbFUB ztn5R8c(3{G*JGX;jgU_|WFN`goNE_c?PKZ>8!t;0q%Dzuki0>LP8z2;7(2jGWz%)h zyH}LZPnnxXebo<78aZR25HH`CXL5|V17yFAb*Yk$g=Sl zhcZdb z6KN1h$ZBqlNI!)dCoqF_qX}qZL-U?0f(~pL9&XWI-uRTM23w(7Rh*TqqGG$Ky4a|! zrmkg46>y8OgrJa1Fodu>TjntkE47`0GUO0blHfA4qX6ur?m~2CaNdQL@;<*~3s&0B zQ2Y{S^$lBq9ZCsNcd`<5vg``rxF36MK`y-po~ z0+~V_FGG)9^L@5~rAK#?H1I;62{Z4tsbxK7xXU0_RvU~ATW$4bt@tnuT>^{!Itr>A;v0NYYoZ-vib>vCO6oRA$c5zQ#@?QeHL0XiaVxFZ?NE zESdRJc663k@n3y=msA_7wEyeuydzkJLSbB9X<5UWgcpFlVvJ!i zv8!2Im{j}g313!3vh5PxgRzYJG8?SzSBW&p@7r*k*1&AhU-4mRH9ZJg4Ky3ZGVeWq z-Y8Y|x{$XlTM;-;V8eC%R5R4w&{;2z8Tq`mNp1BLN#avlnc3Tu#T36BwkX_BlZ6B* zLQ^QHh41`x_MdqiBKf#JwQYP{oiQSjWq3%+Cv={y4?F~UN<7JpIi>VGDmU3;EDl%# zoj4Dg%ZB@&y@{NcGuu2eg&wBbNA64P8;QPGvUVYbG=F|n#at-`2?8tW9*u=ZC8h}X=aGXo5Y)lUf)z@gYS{%i-HcX8`*%92HKUu z`}C)NzP(eg-;0jOzcAhdN&}ERf$9vjofGWjWju8O4<%i2rw%QKPbPjzy`h=_U%e5-f26kefdc^(l8Q5z{w9}h%Te1IR4-}i7qUTN{f+&B=O$)evGl* zg6HqO6ZJhD2X!Jfm}4a&ml@BL71ilsss@I?FKK}rs11~}sZSnu-Fe8|Yp$CkirY-gQ-F`| z=;}i#Yn0w(VOpCv6ky7(-V6xRIq8+9^HqFGw`gBOPMREutI{D3hPy^|#hW`!bk78I zpIlD@LeeQQqD`QLAjZ;fJTb%vWU@@=OGX|qbi|vW9sxFh{q@Cy>$~8grc-T=f4cL3UH@^rXNt57md!r_>Y;1?4K;8-+0| z1KfUW(q(u2aaMMZLF{;w42^1L+#nnqrX;V_i3`+<%WJdxDh7PShP_#ZVHB+AvCx)0 zMfOhbVll;-BCl6ukV2oxZ6agbyPR~V9__R)6cpI4iS5GSGs18$C=0-GFpp{I4Xt;6 zJ}_z$2N2BX5a)FH&?u@wXQn1nB~Q|FRtt_w+AWhW9-fjb_)}?e_5#BFe*r8zBg)_t;_p%TG;MM?$c^F|H4P(#YI{4%OA|@3 z+aeDn@8zs&s9s~G$8t4J&X~Re7`AM%WFnt(L!goeA6tDwri^k%JqKe7(pi`b^kjlL zUy3^_Po8Of;Uj~)xk!%p#Qx1m94~SF45b8_cuCT05T&NfXWx^-?eYC*Ml3feNtU~I za{5T)d*ro)=7|oGcwE2F)b*ymokZ_eK+S;wwm=Lf%C8&en@k7+!|}?H$Ud+u)VPy7^SY6OJAJ5^uVi#LVe7 z!X}3?&{E@8g{_!L%F&vTSj_GLq5MFMqJVaBrvv5n$kugCFytRdE-b3VB6v?a_fbq~%MH6q(Jq zjXg>yWT15i<;#d7dYMZtO;PVzdl%r}YRrK_uECSegW8uNbKokR`o(Qct`N}%Q&do4 zW=FDoOTAgB^3k6I<%}uwX+dyN04f<48x=^DBOL%=H!E7pyIHaV(}d-oH6YGv&iL)N z9?6CH6q`{zo_xb|e7}alL|gxBLlIKQg@jz{{Ld7}YzgNXY$$$9PJ8<{K~yV1QAOla zf~c3RX@zj*jk4Hy4^=#Qm)9B$arcWkyQ;4+HJ+xrYjvm~c#1>tOcapGm?OP?NrNk% zgH)}b1gg8>ji5{3%4&0R$9cC?rC< zl9~3EzBi*h!ct2Nt^lsNqK1_|s9J!Ex0YK33x*F)wq*AmQRZdXJ8+XG5Wmr31${43 z{-X}3wg+Pw;~ny%{R8s;n;Q6!L}+W`>}Ka^MXzLMWM$&4^m_-XlCy!c)9=~cKO{mM zuMcA{U&qT^O=4 zk1~6}q@dFFjof=Ux+JKaq0|o#P5k~CJ+j!P@H%H?RioFA46ARzJ${L-bjkWTX)`$K zGCq1cn-lnj{K5*E`mwz>Is7$3!zsBBZDUW-O*PDA96Rcv0ecA{;AOaXcN&>xA})No zJTh;65p0K0-}A~L_Hh-CyZ;VR{6aabIc0tKM(i#GyAm5e*Okc>`XO6&I;O|94`m@2 zCC=Kk=(RgVbB`OHw)*>}lc~EAyioAFVtMhbF)08=l#>@8Yq5A?RR3X>h5l37YGlHL zLbSM9)y&^GEWJs3 zvm!t$tm;8H*<^h7Ns$Frx*AjBDOOGEHJ^qvH_&tV^2OD)@1cOxmYq*NV~{=jG)$W^ zBx6f;%-f4Xl1)8GOxiRadS|@vX?*XnDy%Xvn662}`2fU?K$hP#p;_6y7rp!E2a5v) z+v&?YW4XBga`}8?2t9VHy}{)R-8MEJ6ygy>S!2Eu)qS6*>?1rYvDeXM=O^O^E$X3( z<_YR@VS47!__q7p^?KIwtovN&a4Bv7Xj&J0;SjlnLsN$z-Wr$AwWd91!=y|7Io!Hk zTSPo!VgJ6T1{=WOZ^5!KI%u{aw*Iy-ylA!>16uo8#Ay(NNkr`@54b{f zA02;)FNnz+KtKNx2^8X(T*l!M8J6rm{41<4{W{4WR53w`;mvJyD zVO4Qj9--fDI=hgvh_$x?LLLhPr7V_Gr0sns3V0f&bFTO&)U0Wina>&K2?0FGY9sC& zp!f388399)YwmGQ-|-(#_`Pnu-!5*jKcE-@2zBosd571Wx!Ahu4w`-2t?q75Oha0> zcem|Y@2wZ{LHnKdd?MU90-83OWY2L3nU#2V_Owg38+744qnCAac!fjLbtBwM%eIzU zUTmjNanDWF4X))LeSg7bR=!}<`6qsX6y8DrO+)DX)GszsZuYz#d6YbV;#uh{Mevr= z0M(9|)>#S{ddpTmLyu|1)uCa(wBe*+BCC+X5URw$2$e`)z_B=h z&Q)4kG_iGMti>>scX>J%bu8wtJnBJ9TPAr}2^D9MfwdxOUQ?N0jSVQekokk9gpQqj z^HWsi4?61puarx8ea+7jURuygo1#XkPzB}X;fA&~1D6dWjJ2-cvvdmIH56$TcB_85 zR!sp)RSaa<8DkslqTvIA6z|w^1`j4fEDIbXXxU_Bk4UVp!rWXO%5n7+>qpqZn?#8eTynjoMH zWo&-9BM`B=F(AdCqe&EL<4k}zNqGgqo`07A zL3?fNGw|~JIJ-wXvclgYAz%JMD*ih#P53vC`upklS1FOOfwRFMw^Wo0ywbao#jj-; zcLgdHnNgs}J@grwl>EVrrQwsMUod0 zBb0u6bMtCzMjuVa zGKi0P8vHd>92(vx&&Gx2q9+8clh*3>ft(=613XWTmM(Wm8WxwOqaY1aKo@!D^a_YB z?puW94*5w@S+&Disvnv+nx5Uyh(3Y5jSfQ2rcdAvm+%HTROEh z7RQ&y0b+fxadCVWC5f**?Bv%i{{BJ%R=LiCjm-9eABfyAda!zsUD?{(D9th>F=s(wVNkx>V}cc>uhXNpR`RLiRXJgg5bJN zHKut;uj|Eq2luJ+3iX)84ffhRwq<%B=j&DJO3g{6NVW!B#FXgGZbu816+&i6f)?z+i7oN4o44ShbE9ZzGv(jS^42*l^IDfmc<_TGjOaeO9f>zI{Okkw<|q%C-o|oBgivN8ze!r#B#M+Trb!Nsfpqrr7X2k|L+x9%QWBTDspgTR z^h_ETG{~T2*2q(kKrZUsgIR1blW=~pxcoGCy?tZFc?+r#q$ur%gEmL(J1!OVS-(}q zO0(Z|k*ofEZ_=vXb9$X01+PVNzSy-3>#O^E@0ZU|b}=7)!!}g-I?kx-n(d`K!+yEr z>iFJmpzpZBYVw#v8OT+m$NP13RB;fj_o~)Kb?=9}Av9!FzRJ$)WBWns{ml0a0^<@A zT_WthM?EIo#}UjTG_V%)g5(N$x1kNZI~Yw4(VXG7vIr-~!8UBB;5d7-=Uo^$AMhaJ zOYhd@WLrZ%5Rv%RzO=f|;<|Q%NsIK&JUZ|MCvS_;NDD%D;9aN?-G){n*Nb@vt7rHu znaVXT;uo-Y>&nW;)5ddsIziqGK2*psJMKD)7XzlT#Q-q9EwEo2qI^rDUUAYqvS5t%_G5? z5J9iuD2VrcO3wCNvAyeR&Zc>1IP%D$|0*L592}JD zF^?qnbG)L3y+-4Jo@us0@zR00&d=N6Bi<-|QlQe$#kw`cnQwR-Mdxb^W0ehb0D}f$ zN;TNCJQZ&X_r!f34OmqfO5J-e)(z|PbW-K7;@}bhM(G#!Zvjqzjui{h;RXOQ2Rvdx zRKlLZ>BmTWtI-cP^y28>bv5%-2=g+Q$^GIu19$Dc)#BTx10{ca#3KI$@^&3-mzP^8gS!v+oyhzjuOKc*MK~COiLKiD% zLLDY{02~IaQt7&_Q^@X&VMww64`=5Xoq5)N>9}Iswrv{~TNT@OC6zp}ZQHh8v2CMb zr;?g<_uKVO_ssl1%%@r_t3LkDbMAYez4v8>#4@{4g=9qshMi7dKN^}mzfjW%`*sl0 ziBQLvpZHm@>FW!MAI6!%#67O9d$*@_V3w-lM<$QU9F^PXHOszaq>}_C`2HaWjz1P) z(q6RgOWy&|HN*U2Qn=0j4OvxuPA)Bqy8PnxJy@=cj_ z=KHH+Pk+8cl}i(bReVf20{@ua|NRc7`FqOw?Qs7S3NK>kV(a|hv+xI__wTv>uR2C4 zVQAp0P&`ZjTTA%GBZG6;%%lS7_>*WTEH@wYL@l*FuoR@z1acE2mlAjvPlZ7%X)Qv zSyqM7%F>2M#>x)Uxo`q(#+`Z1hErg+og=kP>%wD0e>7-HNqtYEDHxozW@ z47UL|Ba{|lH8n!!nmQ0SzQmQ}`sfGnIZde0oA%fd7msm zA(&zL)M5sRja$^7eT>8acvg?)#7#q(QEJY|qGzvtkyLHMETJA77(r*eTEwWjt3^IW zn01-4*}addyi}&~rkS+mBq%&H#UQKwj6b^{%N>qK2S*2PafoG&7{K*s2N|z1+E_4q z_)*ARDf2*Kc(DmoPKL*FY1KtPgg? z7(J~opwOjHnW#Q!#7y{uOlZksj@&EZ1_00>G@Gu-U(9BB)vV zlNi;Oh%)6s*X;&Xh5$pONQ<>}MWanPWQ-hllP!id+LHZc`*bXOF$xu71Mz30$=s2W-SVD^lY zh0`Q?$a0Bic-*j*ByYcVk-;4LeYR*7T8zP^S~D)nuidh;je=0!> z=9bc!wXz?RYaC>{0wI*`9AX`)({tDjJf9chk{E`TT-b_j&{SP*XTI4NpclWb!vESrZE9wo-Hz$bbD`1}`KongQWU^T1 zE%B&01)k(_al8PpZ5aYGy07^KUXWXVh9)0Rb@!duU&b6QjyiD%%bLNhc5`(KRNs0) zHY+Z)%FeW93gz?p{ps1?-<)K0`Pgm}|AGDZw;+f4`|SV6tMjJ@_xEDsrHrkPD&SV# zUNh1UE_Cg$=?!d2DMYg{;JP6y;x48bF*ObXQ`MJOLmL12{m0TVg}K>k>d<%t%Nx*} zLS~k3TuO{Hx!-wf_11IdFyGN1k6q6kK%|87J&M*YAH%VXRSDSoidv^RrTCF;aULrw zNW)AS%9Sxg;%XuCuoy1V85rQ+R{4Ux9d1yeoU3t8@zgR{kkl);Lt$4}#A<+su5;F= zS#=|XM!dXKbvKU9Si5bMP&6;Q7Jj6%%rb4xnQhA^Y2hJDiT&-yH=u#H!M`606~Y}8 zjKUvt)~C>hqNXR+5+9_g&2m4HJPp%Vep=zqv%|cB4Mw{6P_QKW>lq4x^|pkci;7Dh zq&pts*BN|W#aE;gbJ7a^*1)8#Za?j=39t#i=&neNwz?E zGl6bZS`fW%6Ot5kyQZK<4W-pIO_)pQfcEN>v|9h-*e>d@Bu18I3{6V?6108@gbLc^ zj2ieYo-d%j$Im7<)@i<@VCQ1>$Klv&-jr5sf*qBM*d+vhet9(F&6Rn{*3XIl_{;)M z#sXBKfkgOvarB>)9$9V4RddFHrXa}Pj6Gsrp79Pi6$Q4*L;946C5cI7D$FuAkT}RD z#lTHS&34sdwmGkrvQVlxsEj`FTX?MB&Olxzm*G%`dJn-AaZ|df_lqTT&8$4ww=3E; z$zwEj&@{>O=Lux#z}3h2`E`@XZ0*v^qgaD4bxqe=Xmt+2;EvI%s67p!nF&05KkJjmqFgL=k2&g~n~O$vcE)4Ul+CHj%Ruj?D4kUFKQ zuo!77;H=m-9t?iXI#?Ya|E!EWUGcWncy^S#)i7Y#Q)s$xqKlL!ijY8Z#-mROl zRgGuEr}8jr(NZfE!y}ddM<&np`f_x3$P)CSkp*q))INoHx`dR-)h#YmJTV_?78`)M30YxalqVygq;FT1s}eO>|j`w%Fka%Rx_E9@g#qd%fT288wHQT z_dW+#k>Y+x@)M*Y7xqr!imn1gqaM}UOsp3Eam(R=X@;B{QQIAqo;1PGuXmT|7dn2I zf9{sR6*sL4fdye41{bIwWfcX%V zjVHd{>7sSYXVOXUAv?MI7jRAh1*~a*G^oUQYu1+$_O@9~_@AcdA=~1|_R%aE8P2@w zZ8Lm;-rWK2{^^J<(j()S>4W*(|AA57-#quJz2jYGE+ z>+&_kIxf6stp=yD9x0f9M&^+oW6a4{2T~A#%su~CRRBuI+;to5-r#(!o6hUj+wg9? zMxdhV41Q(1673*QIo|uQ{bcdC)H&_Iu*qDEQ4<-^|Yjz8)AD31<;dFmO!q>tO@zHR!jo!Jvh zSLx=fP)?j)G%?zT_L@KYZ%?B4uNd+q)6^(6U@SZr(TV zLtH;;;izD5L4Y0>OzhY2oDf#U?J#e=;}emlA|2?&A4(NsvVWn#@iHaq6v;i%vxGXc zs$`S6+WwACA^ZaI$kOs*r1swk%X8Uh2hjMil$w2iIx~`~J)(Q9D)^l0a~Kj8x7w2- zy%`f4oMP7UQ=eZs-2l~|;*>4Q9~@S+(<%$2#zV%3}3H~LI`6@r`_r+s+FCPnJYy?%@q?ym$) zT^cWF&!DL6(yu9;f*$$KJ_H>c_=us(o#mz@%+a`(Dl>B}GD)~U${E&= zo`m)Fd-1%biq%7g)Ttt0_wzd{l%5%kvyE=dWPi5uf8f2U3K6Uo{wxxDY4xVLxG`dkzgIHW!QKt^}XjIB}FT&{H?I$!Hd zS761ZssCqcK_?WoJnUlv+x^D`_V2m{rQe7C_xAPiP;d3;L;c_D*2llLCPvP7j-=`q z*2YE#j>aD}bQ1#`r~f1>|GT6y{io8T@^6bEjENPRkeHBvnd(~J;$u0QVjyH`36ZrCvBXFC*waiaf0u;%LFCf-@;1ZCjNjY)7QKfmf~mlYda)}7 zR=79Dr)0hkQ7(5lF=7``uk<8-f)YC=GC8e!-7g4XnqVN)#W);;tL`GbfdqwjW&in@ zQsHKE9u9(iLP7TsW~^hsc`VU)gV0R?@{E7!K5Zs|BQITNQ6$%ii#|~&#N=W=2(^v* zSoHh1U{(>c&x0RL*=Ok&N~Xu#6Q)mr2I^f3J_G9RRQry#q6Yptq>o6!*;;`_Xv86f z;?_^zl&eHbM%1*<&^+cfVMdhS^hoE9Kl;TlEC~l?*HbV&7fqQWBIbYq%=O}IpaL{% zfvhX4wm->o>tUqL)RRF-`Usg<=2`r-DK0aXJE&(0~k>P$GUW7&*6w4a5LWXH;QD9^d z0HD-OkB&I(N{uK~J~xJ@n5A49`CuUv15$r^`k04%m4}I^>^ZAiu&t(Zc6#7b*Vi8? zN&wVh(8)Mzi~8^y7t?3>h~5;$3a01`l29vap2Cw3wrqM;)>b@y0*d#$h`d$ZW|v{y zV~JSA67Sqc@+A>Vg9I)zKol1rodyO>WG1xQ>kpWN6g^e>W@C1!k&m0J(K;JCAIf6o z;_P7FP}nZ4kRO&#JQ!;3TX`Or->hHdmBGbbzu@s9=ozVBX0e^qbPa6F;O5O?NAZ#{ z^xJCpi<9&3&>s-k93jLtKI09tq58s-fl(^K;Bd>a3pLs8^L-FlFdKQ^t2l3uK|HBjbZ^`4oT(keIJ^wvH)^-NQ$|hzuAErR3--rp7H6;`wL_Sb|vNLK- zR52B`NKBAA_wKJ8h^V^2P)db%=@W=*6fE<4w0FhxO8JGVFXo8^CM)%tG6B3P!I`O9 za~s9&Q~O8N3|l}MB0;?sC8T%H-h`YYo2vJrBaeZ@(=0#21tG5G0F$%F*dn%S3y^ad z*Nj&K*a-Q8qZBZ1Sj(V*IfjmVj4TqBjajed1FP&YPFqvh-3$qjxRaG!W!;YEmkL*#Sfh!s<2BABnnZa*t<066*B54#Q4^(x5w(1=9qx z4%EVI{~RLHdmX;DTJPox4UOBhw@***jJAK1T?+#DI~iAXrSX#r*Ay-Css~DZOOJiQ z0)R2rVS0NqVs&>$nppkh%QkYW`odyanxeG5{Oq~RRMQlDy|RP4{lb8W4oT`z*@6&^ zavQ)`_A)_~9Tdg~J7$;;pkxue?TV3_^mg6tkB&l=q)I4N=cLWQHSfgAXk@5HW7^Wm zerkv7>C2lv;=COXaVR@(WlX;bNep1Q@ax2yb?j9|^_s{JmQWtOTUNE;o8RO*fk9J* z!M}DFi3O?r0W8@vAK9JC+>RsKMiyNo`2_1-X4b?c2M{(d}jJk#5^ob#ctbmdK|cKu0t$*}i>( z1~s8Z2MCz9yUr*k+^9p^NulKQzC$kDtPXp0t&F&{{>qOWqE!eS^*O`l3ybtO-ERyD z1O7Z%&Y?GMv)rr?pk7^uNu29%dibv)p*K$wu*m{DS>zit=-a?PpGL%OTcU#+N^%?E zXz6tJc&->5+bv|ssb-je$hJ*1r(OM7Hgs>5NzV9j`vv?%d;GWl@Za0-_sIJf{N;Z% z=Nj2rTN^l=kTU)E*#5K7OHI@14;<+byPg_)GU%an4I#8-olva^NGNRY6A;of&?xz5 z;V&K7qv1>Kh6qMZfw=o!Q~EJw9S|Vw1FBM+N{IxC(o=*udi)h z@6V5q0zl5&6aa3O4xYi;Iwo3EzV@MHTe5$q;fU-_AE|tjHO-|(-)Bd&#rZh4bBX-*hVcM1 zakeqW7MeV~aP1BiX4Cv&)C#zKq%deSBEV3|L2I8W=M8Cb0Ve7a^@&!)w_Gl#Sozz6 z-U&nIv(+|D(ra%0^K^%T*_VjyaN|iDth{*C#iw^B>(IhI8Ccjzebh9W8#t^D)Nh;f zEj3Qw>xc%{1LRl(=zuFFZRjTE`OIP#_F+p6Xd@oiz#2FnEaF}_W>{p!@9|ve^2!ef zz*s&?lhi@_Fg+JXAL+#0%${34huV)xtjpQMbc*k;*T7H}|ivu=l9dBOw z<`CLR(DdOYFsBUcP+ScNOlqm6G##XbG7Ploq+*)bcN_Bck@$P(49|cTKoVu1*}5gm`BJH~i+I)hQh7%+LxJ`3mh}c?sNr`JYni9aG;kKn1=h=5H%`jwmcYy#cNGelH=7Cm`jHKF#R#beGa8dpmneF0K*h^NM z4QEN4lfvr8CTx#Lkh(=sC*<=<(UE9))3&eG)-5Ex3bZe-79{Somac;s6cyz(YQ(H0&uc$+r_+xw&zXgpoKXnZb{^6Wm4M z1tV+~eLLht+&LYs+{842Q9pvG#k6}#nbi$7nOUw_fZ}JP%)ZA@82y`R?(-=gL{r|= zeEVN2uF)D7XHm_!;J$Y?SCzgr%<-Et#&;zz5-myQ6YJAjC!e zloX@A8lDd7u-LtRqD~5h`cu$NJQ2j`2?@@bB9{hhgCzaH`%XQL@m;zZR|`k7IqQAY z$gdgrW~rpCJNG2%SBF%ee$p0^)qMjxvfDIsaPhCcDsa&uWS%3W&S(w1pPd7OuRgr; z=kHj$1L7F0G)@m~!rv;+#a3NMP7+I52hEvx#d4f4E_q_SA5S>zff=4zx zoTzh`!l$2f4+^|dYdOF<=oDu;!UaHbi?qk~kSyDK`@z`n#CBJ5`djFb!xU2mtT=*6 z9Ct>kvCX(5EFIY!UB`$k39XpRkz%QkEc)(*V4o97<$cixAf5GSG#^9#aezpiFK$Mm>8!05jhA}k7dN7@}z3E|s& z2VX*Me=|v$zrm)e7U_NYQ@McJB(3;ExsZbW-(OSz{WJdGE`h((cM@7s3jbRIPsGmH z#OQBX+2#*fng5YK*~I`u7s&<98eK(4<#>n^Nr?xBa?p)5G*XdetSyeMhnkk{bDG~g zLaOgbus=^8+92?AXH_r($UWu;RQ5N$m99l_Qn;+;f8!ms%MKJpAMD=ohhNji9+*H;py=Mb zjN8{U`n(|bTWy|AKDoaNicdZ3lUB?1rmbBgUB9bVXeqmK)2&ujf0^Y5T%?%!>91DJM!xWYte}ZnbVqH7c%)qTv!HsHH*dAw z?h|d+XNd^Hy7lmqdXDfGs;-C5QW2%JWzy4~edbu0x;vBzmMY_jS`mMm4_B)F04S+p?N)C7{&bAWI{@Nr|@>SirO&Rk2l<2<>3ir+){Iq5BE@| zYcb<$7c=&t;I0CDzrYgS<6#K}#=;=XXaIvu^VU!t{o;P`FcvY3w9p(CyvC1$mmIg8 z-%&N}^m#z@oyo70sxkuufmd+={&_?q%&73q)P1M)tUOLmlLjiHfrUW6Qw=8(hMyjn z!NJ!-8KsdBy^7sxboBfYT2pLcJtW%Ql(QkIW3ch0e~3n)%AAkDDu>JngB`=7LKAqLF1S)I&3LAcfKb&oZ(Ux1lK_8q{cP5 z?Hs7A`b*_-*SJhU+$(TjRv$}(Ik_--XZH)GQx}W&6BB@(%kSI6Xi!Z-WCsGS$rP=f zJ+J)>-qB^t6AA9gUjER|?sc3EUWmFt$la#7Owt%gf84{e!lrTyQDA%nqUrq!VY(T6 z05KD)=;Tdmkiw~o4dwc2MD!P_475S84_v|O)Zl)sWjK+S5nBRD~-p8Q*{mn&d+S6ogNbC3yuk zj*(L6Lr&agA|)U7KlRA(kn)wXd%yk(;$bAlz9;=~lSlqzll%8C&;M8M_dn#op#N}x zWoAj)0S&pR!c=Tz!C+A1!%S$%&~(t!&kE8A>+BcoE3Tb5l}nY|>WW1E1ApcjA~NlB zkouC`hU)D;(7%}ph%T`J%Tikh3sJ<{pw(i0<{?L%3 z+h74GwLA4FT%?L{*JMD`v~nXhanAbn_UJM3a_vY^@Jdsn5QHBaE~MmUP1AZ-mrt`xQvt+cK|LVI|9H zlh!!Ix+qgt^v+wMjFz=lpyhPUycT~RSRs8Y1Fx{+(j28o z&0GVA+#6-R+^}|5Qv6uM<-F=teMGp=sfJ`)p;T{dBV*ZNM?Sp=w|)Afsu`En@g~MH zkuPS5MJ2Q3 z+x3v_K6)WPBmwNZdE&i~06FwOyq^F5kp9~q@_Q5c+v}?Oi2-~=RC9dDa7ON8xM69* zh5=<04+%g?Umnt%4@vyyBfwAG_)_y2=HF@H?)W|Wvj^uGNzkugSdgj)ZfRA-L2jhp zTWU*w>JrZ@1vARq6S)7Yg&x`0OhPU8B(y_UgvC$z@*LMh9Z zp=nG>vduy>BLnznP9tZg+4m<=(WbNh;RWtOYi1Qsu4Xl_nw%S3aqoUAT z>;#q_{r(IOu8SKkmK|kr*xLMb`i=eO>5vuL1eq_*d2LqAK8&I2bxc-Un~!zg!1g5- znXJH}6mf`$Q`OS+MCi|i1$D9X!$}f>khk?n}rn_ z%{j+k*gqm34C}9h*wY4K&#U{x$?RFq5A2*>kr((wbMpTm zKQQ{=LhtVn>_gWzwJ;NQ{E&Mc6+Uob7S11K1yZ)Ac7IJul^SjuI7e81-8`9-#lZZ6 zUx3MzVpcSO8DN`mq)Z`68AFg@2lxs=vuA|4{iQN4io7l}3r_SxarQWEktsG#BNN$tM z)3y=PwV~9KF*ztnj>ORp=4~DeTkA7Z27-gU^A5hle9DhMVh?%BtllQua##Gw78kX>Cl3uJrD>AJC&Gbgc#WG4ggp>E%5rWZrCkWnHM04nz9K&%SI2VbyH2WbK5!pc ze=TX>XAU-bLoDe*kpNmtWj4yn1J@6mUD15nSBOkuT@3#Xv2bb_{D$(u^Z2+rD9>K& zuPD>O*Udk^0>jb>?9jp*8jreb2)kp$AQ=`Z@Ll3Ek^dt2`ocoOd(eT6b=E|hXq2i)V_cRPZZ`zpqz;Uz;^Q@yv_;F?o=rS2N%#yetQW$b5%8op4#<#y?s z3&*;+9jJN$=YHZOFGrIC-I+!yqKp}#MD-pTxRM)c)(%bN+ZZIH9X8BEOW}~{LJkD~ ze2x~UA_X3LfQpUtE4O=Ro!92qjrdjJi0VOyc*s^!HKMa?8yr=6|9(-Oi&6b+^8K0H z^z8`6KB^38hy=&?8d*P~;U>QUMEUuX13T{g$@QMXW_yztqB_42Ucu@iDe zNjcfU4E8N)4Cvf&(1Zhh$+e_YW<2%YBt6odzG-b z`3w3?SP+Uk=O+c>uZ)Rt^R2z_!DBzuXvv}I&ZKVr6>)MQjSilt&DF=Z>>FlymeBCA z=T%?a9y1cLCmAkiF12_!`rHsXb^?2E-+nZ^(1*~imrRHS^DWe?A_2$?hbt0-8Ohip zmJZ21Q6^wi-&a7z+rBxqnsTf0!F7Borg|Xne^MD(?PsC49sf)Tx_4j$VkDonxwNRk zJrkRNjoY5rsQb-n;YbsK>7vXa?T4#jUc~z<^{sWVKT_I{fMT-!6V^(2Ed;u|=uAS} zq-oU3$2j1=2ZCgbH8c3*kZ_87P*|UcJ@D_dc#oeKKR+LxU$9Gb&zRZ(V5@T#EtUEh#2kaQT+Jk}zaFwYFPLdOh7u94abjq?BLfo%Cic ze|!cX@-FXhgrz|;+}}V`;Zt{@=dKs9m{W)4wDO$Hx~SUdb$1Si8us*XqcbrOe(U#k z^De8)8(WcxT6Vt-_5a8$Xkphwk)W5|eOdcDw~B>jxh?TYno2iPuNm~ZqcDR61s{Wr zlcT7$mqo8 zld}JwG(PDSx%+!u%&1DraObn^2k8zmYiDL2`M z=GywLgc_c4{ulX?wzDdu+7+IL6exGSB+7@}boj-6?M%M&8iOZk#gXXE<&|-@RZPbm z{APBCKIs1Iqb=52zTTZFOPUc*t+Do)#ih*!?88x=9`mBS`zc+m{k&^r2y%{Nynn!Dumy%4dAm#BzY1qrB3eg?WV5DH4GO2Sve1u zgO3YzI;UqoQdI+$ot8^xGa2b7)&@4wxWEgRnVcIQ;N4!yD#Ms^QL%yV&8_vt z#GJ#=B$JDV2V!_mL0gro1AhcX;A{6(Ytu4OgS!kl&OlVC%Iq7M@R?rlJM$%X#1nmw z=$UGYR&eSV;g@z_j+AOa`!?AQuvsIVc=az9*r3sR5UBy{J7^~E_aGVFKn%2A*Wm8Y z+#T3QF3~+!#TGxtsf~Rx> zh5-%*bbH&^CrJ-7S6mAYuivf;%dhsunNMS?z6z!wMWGjz!g%h`AAj@8CBTR1PAfpS zSvVn0t*w27_C~vIg+UqU;WIzcGoD-8K5M~ar+ijncd()q+uQH}&&uio+qyZ}J(_~M zPd!Oml)2(#n_rA19GeFspLs$h-%~RW=3=YiOIvaN!V?p_6-@Bu3;K8o@(1-e4E)J} zd#!l(XN8qTC!{Udui+LXzs&%I?hZ(`gZHx!u$zCyBl1Ex8irnEl9i*ocJhHurWRC) zr@!Sd4)E@Vz#_g3G!|CssF z&-#N?V1R&-vHmArl;QWx|DRy2s;zLwZ^pp zkhOEnA$ni~$T3I+WmqY7dtd-GN`ym<^V&vBP2+Oo=Lr4p>ZDy)pij9G&1i!I15dk~ zb52tyml?0!M{AD^`oKA`%8-`*a2@PELocYg_WR41A7B}6pO;xD9-(y4HGo$5r0L~iN6 zn4J7L(_8jf`=pSF%0%$c5>IBys+3>U-EbgpzQPB)Mq^fY#LYNX4(kAz|^+K%jx)d*5R`$1G)%cKrs_3M& zvt$afgC549u0$)-Pa*NlX_O(NnR==bWrA^S<|#6s2efp|aHCpQgbQ#y5j3M0%U+M3)-;tNXt{vUP~8V*`$QfE0qK3kMKpV zWK9S>*mrb??fAjrRQoZCkW1pnEcZ^TJ*3UTPjh@-$UiwJ)J-8*{*?KFkrTfK73)|L`)2d~}VNnf&cvEL4H}U?w2? zz!d%gVxkTRQQ^#~>;d*Qq=8mNZlH!B@*NmBXq*1FSJk@4GNWIpD4iE7(9`YT-OuS| zlLx{Cb{V%Zx}KiOVR!WKI`!eT7|$7q(QpBsLX}CP!-UY|*STBA+o`md*P3Lt#@`=9ZFk$eUIS%SAwA{T1PzO4x*f#j4Sr%N2x74UD3koku2;Tti8;Hn?r?3C z3j{b>ZDK@oGUABw?njp!F%)bK!Vn1~(t;(MjS(mH$Imd4Orn$Eg?HsBkw+V3yghAc z(l<*;bl_lNzB3!IIgkd7$@08Va?GH94$4h=`*s)~X=A85k|;k^m$SnQf!YXz&6=Ue zt!I2ZTCqT_qts}6J?cT7HZvVs5C9u*A8a+0Gp8ci9IM}t*@l8S*dopEt1&??2d4{- zHf#dmSh*=RE3m)IfzK|-p2-DJ>TOF}sbj+6bltU3C4*2Tm~#V4!C+maku)2<0Y}Bf zcT=^6OktaA?@>X+{J!T@XcI`Hsz6itR7oTHcJ#-_)wZI7HWr|bw4ON zup1Jd2AC6BqTX=Lg3gU5!H$U^2%mtv1$ki#h&H3yVTZg29EvvVO|C_+1z%Vg=hVRy zTrBjNFkcseY4BvFTf7U_fd&c4Icb_vG*1!*c~<;ifl<-98^xoK=9MS1eir{R0rc6PVlo;j!g$=DNlIF zaG9SPikMaclS(>>Q^w`b4JZX#gJXa47oklhbq7?`ekk~(*IjPY=k7+n|5b4u0d*+*_;CPn{-JjN+bsCs%D~@d!M{C%QqCrh z27g!te^&+Nz11Blw3d7n2? zv}+ksxR5+I_c}c#KuRFfi)|!O|dip`RS8PmY5S@B?a#^`@`2@;bPVN*E2Rg;#9niDpd}I_b z95x;PSRyBMIPCN(bM3Gk5(<_E?=Ak5wXsf3{=<+sR6WkLP1YU3hVX6*hAr%05y_N3 z@$K*{1QYuvjF7+8*7U^Kl=c^u(e(Eo>@J>7FeQUs_^eMDAkq7uK}$Iu`r%yr(uHw0 z*1vGufDRnZxXPwopNG4i%I4BM^;|s=_5YMCI_m{i{mCR}A|b~2Sz&CjK9u)b_dWx- z{u+uubBNE|)e|I`ZLK9lhU;{knM?z}YPoZFq*cJo+?>X)qisW~&t4ro4tEj3XNE&n z>uGifnwe|<>MQwTW&vNeHj?7JUUvk5s9-5}*Rnn+k%57SVNc+44>)8mRP+4dc1A$5Fv0LlF3z0pMqFwy2JbpycPf*o|&>`?hK zJnBD}7J!qa5~Vx5DEXh*Kpb+UPP~VmWY67_%^l|YMjA54g?DdP{%j;!C*->3fPURO z*s@<+n~JfIQh3Lc*+k-h0eu|4LRn$yu*oaxl6e9Us|6!f$Fnx09FTbXE+!~Wb@ymk zt`c&6cC_6gko^_3`tDTGMa#hZOVOipxQw?pQDZpYrtQ^$O|#y6E+Bwfg+ybpQJag8cXO`gb9y+($p5 zg}8~ck-3tIll=#U&Ezj-IPt^i|Hm7;+Y-zL^e%M{VHa%}bQhwEVZ!{Ka9-Y5U?n`U zIZc<|$hPvaxcNgFKLvpXmdF!UmN(!xvA6jX%^sSUP!r=dTj8fb5mUdHHeG07;-OeI?nIEYZ6iis(&_#F}-1iv608x0PT(hiH0|H{v zZ)~lm;nuyWau`z9=SR-oUYPCi9u!3cvOVvj^Mn=|V(NFF2}ZOH2O-QM(HMh_o;fc* zckkds;mW1Nv=^zQ8#+17Mx}CBvV_L8Dh)W4cvemqJFWV}iDUSrrD0B6f=;DqIttPB zP|G$pI$>B-$h(dH?LaON6(+d@Y#C!}EvfcxU{cYLeqQR$l<#EzJt|~AewwXckmA^% za&?IUGFv24BGN1q<>BTWd!g@^$Een#sd&`42+Li{7N7#V@zuur6x($(Joi9Azqyiz zv*c@4a+v9u#0%O@_VBMM%1+Rqxaztk{n+fsL@qhFfS|k&ElMS1ea?pZ&gR9lxn3w| z$zd*IRkq+XKVau$et-mg2f1<@__`(p5-v-Qdkv{X#ojSb`Kfp*Qn0Q=_4p~*zDDUD z3Yfe;s(!qye9I^0eAh=)+sKD0gQGG|AhSr$Xg@cr6ZIS(k`76vw?VBjsx%O0)_D_9f88J!PYl48W!HU zY;s!xI_a*gWSG?Y?Ooe<`#LR}md){t;W90)xM7ZrbtRQAdlAixcoc8-yc~6;=VX%) z$QY>$U8AjU&j3;sd;~%#A7O&ML;Vj9OFaJfNE@=C0hX+AFmu7wa$aN$Le8P5tLU_? z*9}+u+A;u^Lkz3bEs{lm3#J)@2+u__4e~;mkHrT-+fD5UevxpHWB{tkT@1ZGT0r=s z8r8gqY#RZt1gVmfBfRe%40BbR~GOMLu$sE=Z+ zVtoH@;N}aL|yCt*7cFx`?v(Ot`U25vC z{X#9zvKamVhj<^_dIfd$&S=(sqnkR~I1=I;uM&AI&2BfNi`RnMP7=mgNATy|9P68P zCw~ai3XIrUnzTd1z-uIQqo}3ePRw85?DX*9-L4F5(;&Ua<)7vcj7P0rPxc<4YId`4 z6(XPKtl0-XjfnELC``ypey{|NtoPbVw08Q|K^wXs;$`b<8&9-2glYJ*oX5xf!7m9D z$>*%`&zRbf2y~E3r+B28W3_ShC<+0#78s{dYD+agQmSy03@w(g!E`ir)hEVVm8y-B z1uu_uPXj@KPwF?F`kXqvH{3r~ZaA@yNY5V}Fa)Q1ua%b4-T(#eugh*|QN)t~1>^+Q z2OM|bUIY+b8$>}M3244YK`*~9P`m-8>?*IEuJ($4&fT#EKvYw1W-Sinde5|OLnl8h zi90P0UH(~<+7r-?-Z`J>Mc?ojWC?h3dw{yFSA4epLewB^+W1hB-IpCV4bM+mn6_=+ zpwU}GcITk57b|nq=Uy5OLA$q=1z~zZpm4r5O?m&6NO#d`2A)2UW zHJ)C5_k^CBwpg8adeW8{RmRndvK53k!Js$0?$!*Z8_5?Q2wgW$eE*Y9ohAWiHS*!G za`;E3 z<^d(4qd+eF1Ifuh0)nbYbjy3?$!3B?kp;efu-0vtVk^%Fm*c!H`Y(Tac}}*zyN z0Ld&HgrHGZ>7rJ{Dl?M0m1C$~N9t0wiWwZ0iP<_rnEbjL4E`w-X+Z8b6g}1Adx658 z+_AY^Xu~4v8dhY0HOT_Fa6TjvnbIp1eVM~+p$TCObmMhHV+u41$xCL1(bp+fc{XJt zy(#`lvm|Gmc^oWJoOe9U(tp{aT9#n3cMNKU9;gkD(+g%XGmGL zp5`ihvEC>8G+v0gRM+T>DV!uu`!R_=->GQX++F;aJk$q;co`>N>K7(L|CyX3g5WSt zWMoX`*orA~%ok&&hB(f{suOb@SQ5Hj)aTY3JQ0=VZ|B_ojJ0|~E^Cdr`LuVSORSCB z$6EGAi3WsEv(TnSDI(Qjws#}P%y1KmEM=bu(`sW9t%rJWaoVG>g9RgYnFvRS+1993|AG>(P(t$kHrPt^R!h^ zE*C&lE)9jZ#qQCyXj&3aDOJ0(pJ;mM3q!^y3zw5>J}Xxt zbq08z&|yl!zVvWvh6dJkGyd%7>5w;CUP}mm!?gHT6mSd{>A-9LE$Y*Z6n4DL%qStZ z*d8kW%W+EdLybJGq^Y@gphdRqv+)ugyTX@ z2jpbf?(Ww?{tz4#ldW913L7>Gqu>$Oyj#B0po*xsq^i@UpRGp>8gEQnMd3T8sjeJq z+S05w@R1lWMgU6!gCQDYhG0caP-XHw$AinVz>cLg5vMmcnrhTq0A@O51iw2w zYuj9@mJg+ZKj3m7Z$fy_A`iVy20c@3Su0Tes$2 zH+|=mSSVXhK+F|2)phL0^cM*EsMTjoPt^lV-TQQ(@0>Bd zd+!(#@y3WB@oUXB*LvoI&XZF{dPCWeAC%FI@=(SMOcPnr@=!|DF|lsdXQ8krz!t%+ z(Q#P((b~G7_7^Q1MB351#M1Pj_E`a-u?muxkdI`frJ!RX!9;Ueewc}0p$Q`tu!cLr z6>;#WLG_`iz#sS)&);EM)Q#skNEOxiVsmQs<6ytaCy65Yp%23HTX1eC`+gCatZ<{> z1!rqA`}zp5c3{zsXn`R=KdC16IvYAqeWnh{XcZGF7=K8o5-8vz{Q4j~@g6t0mBffK z*eThLv)YFeC)GMmNQZ{L^gW6R5OJl?Whxk9QH$8iN6Q?7rsdH=VS}yeAK@9NzmB}4 zMwaX)k({|PvMl7BP(ZX6p8@+aP^nN7cUp10%PYzsvab1>IzWXufDFxARswz-{w-JO z2|LMLz0)V*0n^?HwGO=>dTDHpo%^zjXw3(68y7#q0Vo>@8p!vJz*X>dw2#)lMCPq8 z6)*i9P(IIB_gv7Q5d;Ud4RI?fyLua}f2B^lo`lxng1<~QDx?}xyuGDP2e|V&*JT=m z6;??&6)!TeR`~gz?*W29cNS@`DSD6G(gK(MRs!#7*m{$qGMQ&+PQ5SXL0-C&H#dpI z%2F_vGDMX+Sf}zuFqwajhCujINz+vTbRh@S8cObU0us>)R$(?w$;o2AL_$=Tsk#tJ zVNQYD@4k?BQ%_*ffe`T-F3(`&K}(6{@@e{XOfuz(QP)4z<;yHOJ-LBlXDDVBu|`+V zEk^4j`0^8@ihOrJMu+6v4S35FBg6al3)(6={)KT{oVx~bMF-`JUXeu@-sXG!VSxMczW*KLliCCmDh@ZiUmYz-ALGn|-6_MVW?I_lF_6Vc z&J##kp_bujdomkfBVkKYmIO%}zcPX`LAy)SYqS@iW#aN~Qt#?TTkJL)vJ0>){u*G2 z7e-UXZgObbh)tekJzl1L(lQHC@fkQ0)@#cif&mMB(>pzQIeIhV4`OY zSwFc08PztQHJicq?%Rj^^4cBua})m9{8H?VoE^~+HS0mj9yvUeSwEk>8W+8BtxvEP z<$A4CyJQ3cQ?$D)Q8Lw2NJ}Y#=smBxT)QSF`F0Aw<(XI&NFUljZO0Z)K26PP>*p`T zQzl8R?CZ?Ntt!6|)Tve{{FC#3v@m~jlWVJXvL>oqDXMqqUbh_KqcWsR3#lu3;I|Yb zKj_wqO?I#(FRfX&izd+s3C01vK3NHmH~Jy!iX9|w@k2T;iQpuhS{uz=$3bxcC6i8e zcHw<5&}g&@HwOx*g;4}gOG!FJ1}KhLh&Po7jBRc#r3P7@)|*Vtx&{QG^(v7dRMe74 zqLrflEy>=Do6NumZLz2VWESPdN1}IOo+CR}0QW&sq4+;ZIdq zNjK5F5FMjWQVCML6eZnz7Z90mAyL=L9s1K1jqn~9j!>!d&+OHP(f5NK)o!y_v)P)5 z6avP3Hw26?+l*ctAu_MJ)`_GlQiq9DQ1dsA8S-Lt=-g(%{^f+ZdIn-`@zId={$||# z&xTC;*J}PFEKu3Z(bmZ8BdO^hC(j~nC_&U^toMLwqf|EPT<{oBvzS_37;zLalc3Ll z+8|(S^~AGU*zwUT_^FA=gaWDS`w63rW~=#o$;2&dVo6Kn#CzH6H~EY$x$8Ik*Deki zNFKs`avlfs?x*eLjDyu*e2<_=i+5GBTks@S;*C|_vL@Pd)ej>J1woLfZemT0eNijNhXlsqFcB`!)}R&> zEEelBpo+>oVX1hjLDHp-h_H+g3pk)5BDl)vXbPD;NF{zrY{qZ+0sJZ5zh{sG6Kb8+ zm`SLLv#4L=q>m3Y|=zadCQmSw3{{aGE7UE;56jG|ixO-i~slWV}B@ z`dCQSNYf7be)v)sOstY6+D2PNQ=7_HPi#cX>SAUDJ@G^SZyv0bhIcV(emw>}gN>!M zo1(;;(uUz!RhI?8)TpdR7tkEPE?ZdFmnMF`M~pgZvQLI*%TaQ=suyB;)rSbj*siOR zq(g=#r8SeIAn9-*v)#G1b>!&Q@@q?IKtoi89bW2h3abxQQO@_X){Y{AOuJVU)9=ZE zW2tDRLuSm$^=L&Np3W3NEy|~_G=CGjYj|vO!&@L`_gtD(PrjcNZt>1rM?Zkmv`<%> zo6?`hM5Kl?+9Y-QvaEuXRBgH-K^HqNVoV0NK&_r| z3m+1geZu6saP^scP&&vxA{q1&$Wo9Eg&&$Hx67DJh$B21vpZJ00iL5`xu%q= zXeBAM{n(X~9otbsY48~uO$0ZkI&x^s_e@!Z&u+SlPFlLn(|kieSvY_TmE-W$Mp5;4 z;!|mbiyp}noHS)rI_Or_)BRk(f)9?9lujqa+?X>OlM+kKi>5)b8O1 zDNvy&U6f(wcdzZ8m3#kcn-K8KAPYk-hQq3HBHy|Y8Q28nAiQ{lX&{7QPJB6?g{cB9l8SD|N;yw@obcKQML zh$(c|aHSEB2{Io!DR!O`q%GDL{*^qM)p?R(cB<9=G*x0!ovRaGb7j%;227bo7Rw_u zB90LU*ZwCf20pRr=ymoOMBBG9sP*EWkEqzjFn+O|zG!qI)HO;PIJ0>>N6>Xm{sA8( zDgfhh!#x)RWI(o6_WozQ;-#tw!9s?}2%9+;s*zN_d$L#Pe2in04ni_ihaCejxg;y;)=LUz^4A9EdRQLGNxY94q<(9>x6!c z+@mn?DF}!aU&@5h+`CwE8FB#jy{E~Wgpp0n z9@v2X32qfT$>dl?Cv!ZLhv#h4DJNsdqxmz||GH>Nr zWOAA15=0>=@pG=rFgU|A8yw%P9Tx}|Gg*QcL7l-&t30wnV=&rL@r-D3sT~Q=b5fgi ziRsfH3|V&Zf%lWmojVtoqA7NNjk@E^NzJdYVWQ)I>Ex|`AnV)!@j#u%1tHq3N2BLb z=wi_DEJ~FLotB;tj~b|n^g2`25x5s+^`Pmbj$=Rp&cR4+f==~n({&tc@=kQ;FsCQb zP=p80p~>sA7O$cTwfg~^jq^qPX_;A%o#)9gE(IHWC|gS|>IVHNOq_&hO6f|6JT7X5 z$)OT6;t}Qrqr<~g^~Y!-JCsCrLEK&?W}5c)48_!@-jE>m>)L{M(@NKCuEmP;AnGg$ zE$;(LpQzPkP1J@bFtn)>% z6w0}54fW9(;(91^F(%T)=5iExeh$HA-gj&Dd>`joyiOvMkA!Jp&e!*|^B@YE0{ENz z2cJkp+@*yR;k+2KFuITb7R{bq#SS&mF&p!y-VxRe?{*hs8~G=uPRR=;AboX&3D*E? zsJ=I;x;heTw){P*!DaO`EZ!7{q5;cXzb#_U8mo;jqAP$o^mfqav9ZN>m{R$f@c?Ih z`z(#b-eFwRycpgwy3ew2p+wSIf|$zwth1Lxg>EXCBq`9aBNUYy`LS<|(ExY((9Qp> z{WJPrIuZZM*c&kQV3WSD_h1vIYCRQ%NMKM0uisWiquhXg+@kSh?KjF1nJ5_t7FkY2W%%@K$yK4e%nevb|+WM*-4$tXd5oIQ`& zxPr2w!VpXA13``Fk&#>Szn0#@FZJWE4uxv%Re4l>>7(Y#Y%q1p%q(K)!xUtY7QP6eor5d9Px*9 z2Hl6+ju_Qdb#|(gaN3>@q-mm6o61@s%DHmoI*1*ql z+-5v*`tf8&fMNPMg!`Y2Mr{h5bBgp}<{Kc)5=_tJL?t1;Qkc}fIFK(M#?D3UCT6lt zNN1H$dL;4f8a>9|1i^9-ycFnSR@2_WeosKsPZ1K!mUx+C=`>D~3A~HDIcvyC-q-5& zDvowUn-AGvI}~h$4xX`&mNv8ehK8Ce9gQ!v_IaB;TAOEf=PA$W+;UT5_Wnw+6Tg7w z)L&a+8t4Idu`ZR}g=z6S_8M_;?#S(oCZZ#rM#EPMUftlHrk(EiaZfmdSVO8Kv(A)d zpf(^TM=*m3rDIIK(Q_}59{~cJVVgL+FUOVy4to_lKQT&UIKD5&rw;|-Nrui)(9>+X_8BxBt3}Pw4|2&LLH_%zZIJ5`ZJYI8>+W zW|AD3rB-Q_9MQ#pwM!X~wHE**S*94w6UYDy>*BD3#E;2286gPtltXM{EPVGZiH?W> znM7sp%aN}n_S)by`qvY$X0C{_l|3MS;0B*UQ8C@G?J338hh^u4jvPSCdmF$0HPJPA zIaWFR&_pYIWCZ*tWBs2X0MB37rvL6T{ecEJyZ$Bu{)e&QV(I2;>fmAjCkzmyJpPe> z`=Kst2_|Aq5*{z0XCpIHN(2+8R3lRjSA}H2s?Xt^AHH69?JS_6GSW$tLAfrRDZGpYK84BSrfX#q}vTYGkYGz&? z-k9c)d05`YDlW6Cdm)q7OVohY#r>ZnrbHTb6%lgs=Q4}m#;-~o+6N?n`m70tV5mU?Q4!9XF$?hfX&i=SK zc9I-i_(94d%u@MFA{h2kNcbll*Cq)VG~jWU^IJ@kx!G{`&&% zF%7+M-Mg?q-_Sc;eY9=c=Bq3KH9gp_G^zzum-2H9wBf@$hwcl8v~=$x@atFGj{sZ~ zE(R@bkS@9NIoQu=)}NL-tY}nei$$-?V|?6md2XCiJIOD(8<5^V))oY$X@I{X$j22u z^>5_7e_qkk{#tAQ-!bz)0-FE)AJYG8?A_VS*vQt%-o)(B=&&Z$fPXRjbfjg@sT7ew zMa#2S)DHx2EufStNG+8brHOyp#eu=cc9}a&i}nC+eJoWaS#N)nS{s&yMTacQy9ygt zy;#1RKVJI3yxy{WrrIzZT?&$HGoJbmCgRTp$sTG6sRm_=X*Z+}tpQ<8?AkVs)o!l4 zotT|%c0!4oikXmX7%J=|oX2~`0EOlq=*t}t7Px8R*)*oX(YD__#|42=n|dvP?p98u zmshS|8jAX@xefNfn)H%0t}A2Y{hZg@V>(;DU|bmb6qb3E8J4wmE}ML5H(ya92yje) z1K=BStjFe%yYS0#M(JOTWUYDYClXD|OXR?QhXHg8*R}WMxP(zxa{!cJ|Oa=$KK^ z3XhMcfn^E3hUBYsQ2gn9yW{?NOQpjZDCfC;5HMY#TjA0kN>moyE+k! z1T10jJZz_;odWRTHbKyr9ZQrv;<7Hf-Hq}vZ6}RsG3H@~Vkh<}CYHHE{U>3-vYqoQ zLp^C<;RWHGFw=&uhb3&AwV;aDo-2)&IvtSPQhH14Tea&?!omSvL~lOAzKLcHMz}De z_Z+!kBTEGie^L8G)%0dv-R6_S_EE!06=t>cep}4fC zIa#_G8&yUTVB^+UQ1Hohp=pb$4Y+!#JGU7DIgTy4{vXO|6Fj)yI5j?#ewHtI-+D*k zeTAE+4f@oC zKh%4FWSdQ;9+k9x)Suel*gOBMKi0qM?+=Jf&dS-@!CCIZHT~b5y#H!frQ&)%vKzPa?sRib@fRLrN*7q@vP+ujelnQ|KE^-BU|@q*?n0A@&;t zzIj?`!-Wt!CBCfiSWNLfnfSlIKH>?YDPY}IQL1%o!U*>%+JZ}qm5=!d(+&P6Mm)SXWb!GayHH%K$IVDLtphft ziYhE=wJ@C+VaEY6ydtp#xFH0X!vY6$IjSj*6n}alq2B=f`&p9I_kuWimm2yLN>}CrjWben=yoG@a%FRaKa5?ldRT z*@cm=6ROxgKjM9eQuVIod=#Fg>>ew}0)xlBBE2#TbKIYqy&1T5k{n^oq#u{W{Sxyw zI3Yx3TNsV@$;qKIFTj}wjQ4+8E1Hbrs$)r8xLKQsX18!HrX%x>KHl8rl5Bhv>y`x+ z%U1-<#-a@UG^I=S7oJpUV%+atT=e=3J=>v_4mrf=N5q5HvUoi|yR4kb6~9?5>0SWu z)wNR}3U+3Nd}xTI1ztj5q*y@|XCh=p`ykdLn*l-Kc7TpQUr{GTHh_X{w%LB4P;ZKp zpSCb#5aIrc(|pO$>T3EZrsrrn@j|-G+<0^p=cGo!qC4xT9f)5&pV`OOSwLd~^6l7k zRwJ|Rg@6)wf7oK<-PDo7VXMHaGHw9%3UdU$*eP|YOubdW(C1O2cSGYNdmxznDV02o zYhvDO$*}Qj*zo(P=(Uo1itGW_+IfvE7L(Jjf63nqvhSF5KUS~p-vHi!o>Zj&TD^bq z3;u+5|1N+3BZ&GxXO=&Vx9m*RrAS>IN!k9bTvq#cP=>Cs2q@L4m}sXcEgfRiURC%; zlO|Hh3>HDEYoMScp_G#ET)=@@h>`_v&QGOSi}@hMq&k)(BZuRQmSf-J^~ty#gKi+n zU8vx6m#Azib2=TyL3G|0Mb^-ko%Y9g2j>hH*68%+L(M)+5xGu*X+@qx^wF2f-}G&- z-ItVeTC-xT72JV)aD&yIVRfuAPUy1p4X3Q+{DQZBs*|#9m+mIB>$41V{(?81*YF^4 zxCh1jaN#0CgqeHP;Oq|HL7TYQ9*sl!s z^e_%z7-Y=B%li~(`g1EI1?4fh_kXU&%M7^M>3jv6H9Ht|*A}5!Qyp}SH98HLQMbZW zpXpN+(5BxWpD%k}R+(EwF3*EGR&&kI$U6o_NfY!;IH+tQLcK?Hp{b^5Z5y6JXF%c5 zaTPvR4PT0qiD?ZVCmk}Jyu&j8rJzrWtzat?1E-csyqQ@k6L`e62`ODfn|@#B8%KXM zZz7Vcxob_?Z#kvy=BaoOS;mm9cg(X<+ty&stD{eGHusYvm6bJ5tB5Dj^n{Y>7%i)J z4RyHiO&T>t5&{}LqNV6%f>=c3kwHGhbVnRjgpiq-hbnC+VjdH{0(k*y^T+Alw^wUU z5{7p>z}Eh)9PL}+$mEV9JoJ^c(_(pR*ju6NCS5{0CU`|;l+ghJwOX6ZQYpQLH%W#C z?!3F-xRxJv5L-)cd?oQGFcj4UPw}Iz5GruGea`-j`nGGB1dk^wO=$B|s%llF#fm{) zdf$SoW#xEi<`3tZ2x~V-5|n%a&T-$G`f>DgtJCZHa>C}9&%4;Clb~P7GZ4sW2a(Q{$ZC*D zlQrZhr>?3}?#1c{rLv!4#;mxcbK53s9$Vqf_GnHeiT$5Ov*AaWfUy;b!y{>JoGsRprUbf`sx~AP)-_`C%~2 ziJ+_e1n7DtDX`rSN1$6p;m;|wrODFNqe5h8jRY@2-<-*51triIrO43SIFeOWIr8VE zyu^@lIik)A5+bp4KirborT?alX`ymHoiGmD9N3>78_`mHSSIj zRY;5(pv;>sInL^@3@Y8MI``~yp_lZ@|7$+&JZPg910fzU%2=(8eyWB#T53C`H5dF| z2nnMM#e@-)*Sa0>gvA5ykoAt5urqo3VO*bdDmwc>z9+Pc`iSXxr#~@$~$ge@t z)CsCVF3#Fq`J^D*=+8YxD5@mBe=Gr26LCdOQ$y3&mCoTL?2cCk;HFe98*uK;j zX18XuLt&K^(Neh*o2hoQ3+!wR!Zf)HH8Zn;OCl*4ks`>SA5eX*{d66Mpm+*oZJZ#T z3YB!sAP7M~Vi-PtD=MueH5g0<#1Bsadqe0OwB{2u^@jQoNzE4`erVHsJ!k=?dgUbd zIB>&=kP|$;?_tUv%9H>@lJhC97vq8Rj&it_J7|`qH?<)vPN{~1a(my^V&TCTE>LB? z*TFPUfzW}y$=KO@G4!^ZJgiJ;dvNj*15-)PJ3XvqM@REIAwrh-s~Z*gBg+jQA3U#2 zZo^I3RIGet7_-wG{q}l?>y6$-_pBsu0>5N#ch!xD^I*l*6WD~JY3E7_u$tay$?DB+b3Z(W3#NQC&|LlSaf1O+M6gP+qzLQ+?hpp~vt zkn)W!d2(zS+jwh%@J!WqwyXi{8T6v$X?{eupikV`806%lZvRrF7lr)QBv<;U!t2a zetMwI7K6N4zpF=%%!jM4`f@=9_H%^JK$3l zk#;1B{pq$o!|a=wd=ht6aD{nn4WaBWXW4TMv+58w{8HOX-qkE5FRSJVvvMwfN$!r7 z4s1;PMX&I>?%40EwuMPdvWprx7#;T;fN64{Od*AqS}vnYgqUeBi=qMC5#2pA=RyXg zuQEOvq;Lkqa^dQ;BEzlpEd|XUURCS-qd8|#_zqGr9oAtSe4DfU%S6T!`W`IfjrTYC zuFd|Ai>7$xrntx)Q*dZm8pAf6EDExDOv6Tc38VFW6V_~#ytj$aIYr&(-enS2OKS?4 zagT-CbPppLh1Mt(4yx@%>Dp7&=G;$iBjk1oQWlOV9P7O6!uQ_}8haMi)=_RIn$91W z;inZ+tTl&K&UawBb0Sn3W5lm-SA>5J&guG`Pn=e3ZZ@W_NKeuZSo_t-eI2&D1gTS* zj0i8XUNpn|3eRF+ubbXr1HdF{F8^NiCT^UFhQVQh4kru!G|e+mml0_tl8&cqPz$-M zoWjF7?$W7c8n((`XX{D^RkUPgs(U@{+Ba?xRt8JG$c!~i_?p{WL#%W2wn0X~F1~{LBLE)2v5Lh5~jBctJ zGPNFI+!DATVEAr{j;9?f;e(lFTeDD?KHnh96Y)-$NTvs%}SnOVBw>s0a zkK(^huLB0fc2jA!#1Q$0*=&EZ<#^iNy->Kszf=DRfJ{*`;H@U*Q{qbQ*9I!Kl~b}A zQ@49V2-o!0hX8ii+pP%7*0_<^6jGqEjw(TI8X5>++e-t{0c3M7uov#>^hrBdK2Pmt zV3~pIyn|zAzBvTma?kWIy9t*;(@hIi)RL{FD4x)jwl;%kl@V7!lhEYYU?1`Zy8w zqSGflV*9>48?H}Vuh1F$mBU(^fvgj!Rk+juok4i+v^}Q#_qbm%4(>`H#rOS*TW-2Y73;R|G%rcCj2w0aJ z)wa$hXZTYMpS6xdf01v~<@1jn^puTWmJ%qRZRah(8+x&Uld(_0`8T^~Nbp&S>s|^d zSW8;$i=U0mxlNXJPb?feFfBVW2%`A@xY_xRk9Q<-1174<65Y;Tllgv(OsYXEHA^$c z%O8fNUf8S`esMvuVnXvZ9Ond~Ca#KE_XmfA@qPJ|WwCIW4!td9!`LGI#=eZcFx9Y0 zTHibi8;y9oK51ZNvXQ0Kuitd>I7R_kR~B8sZy5Di&29iNGK*S6aBlAAKXV)KZ{40f z#A+Dyv&}oKXIRH7$4wZmeY!Vt5b!qobY_G|;s8oIvH1!2GaM)6G2_tHNOo8+oPwb^ ze=wI9*zgW~GebYoGuZQu3>T2YPG>+twL#vn*iM zv#M6V%{YR*d*f_*uBX^$wySn`sKy+w`~H4W!&IDYMfx(FnR}cqp)PG!oi&%DS=ZRd zL>k@vx?ZW*t&YX6eIs~&vw(gN<$*h)0M=McEB=Fm{bRJbjp<`_!H)4?M;HIhJ{J4y z=J6*C%;54NFEz3-V{mb@Wl(T7GO;yNFgJHGb5;4{_y1r5e^}(P|67cpdMW=ed<$A; zT)of|NlCCuS=%&^FkcJ_5xF)<+j4LEl-Uj=WwV_ioIhlm*k5o#^ryw12-dYNPO_rx z3IPwx)AtRZ>4|BBoE~7z5nho-!?#%aln*V!eKnUZ$B=g01$52MDtt}2lEyK)g=NkM z@tV2w4VJEKmsrHJZ;Ldiq1qAF9zAp)ySb_EM|@xvcH1cM1DfxyfiAFBo$4$GOob{hblu}@ zi8^R@RgGk>3{P>UU1uu{USp_ST6z{+wS(n7D#%mqsO5@CiH7Z%?dOQT(~Np_4jR+| zHKiFjN;z)HB)_uV&P!ER{e5pWOVaEOXxh+36({@%_)HN5k!dJ#Xm6+%N8EL#*{~@} z`aR@<`(HGcl7P4|qr7f)`QRxz>8jRJ#BAk)5G?55QR-l)qucXWE%7HAuh z#?so&=w}>dF0%;_NLKhTHP#Ny(b@1tK^>@cCrENteNa|!vaQ{xXrNBHrN-b-mR&a^cq3=n|@c^QpX2}CS z?#E!iR8l1o0nui0f>AM1U{w&Li=ClDToaiuD(K#FGS=$MJxnJ z3IuzP{`0UA?ISs`5xEGoC!c~m=L3Jp&t%|UnWVTY&556FKEwSU%Zyj-<}ZE#YX<+T zltJXLI{KfOliD&OAIeTQyN@VM%a2qS)Bn`bZJS!2%!r3UPp@8PoR!y@;pDe+K{s5OXx(5X_B|pB zs-!=FJv!zna8<_CP_oDlf`&dtsni&4SyGw>$^=jXlfq4cO{H|FY8+y|qTgr(@K#Wo z2azw4!Zs5|d;30L(-kjNXgv4a#E8OXY{{VB+ee3udp19Whl;u3M-_S{&KnD+wEgVB z4)J6;GgVxea?Jd62?{WnPr+-ntfVQV89=XUa*eiJfI%Ifmrpt=lh^l7rcSUK|Kfd#gGHSkWOo934b_?i#KCOnbPiFS88%nAYYpxv%5IS-P?wT2krlL6 z==Whc$<6lZe6tn}8a=VVXMPwbWya34)n@Eqv?q>yZhJ`i1>*POL(;O#g8f*0JAa!A z{u9;xk1zLMsP6y4uH(;r`j;}X1Y&l!n_ojq)UYRYDfCHdA#WL7EEesRt;U;5OnY<;sQtG&@t=qvZ^W-KmHa>U9q1!j|$jvtC>y&*R zkgv%~fx*WS^$cB2JD+tpJ+tUFe;^Ee1AeyH2*F_CwTY0;dqc73SV=w&bPce zAmh&MK9sy8rHswB9JPsG7|9i%B~>`YZRH=o zwL;?H<}Qu|iL7B`sCIjklbBga?o%Yje)?q{{JJCvOlzDCtGf85_~SYdN#h)x0>gFa zyzF@Gu$|-RDNRV7x#V$LMT5>I7aa3FlL4K<9h=t^DoOR$}-g(Tu}mt;q8$?R-fn}$^tLSz~l5uvuwD}s)|3KA*etGg$f zHX$R0w?y}(A{e)OZpI^3)mKq*pv7jtj>3(lPN-ZX1W7GqS-ByjaH$B3-VkUm zf?pDKLzAJY%OoamkNf^@+0Ve*<|d0$^Q(9y@4FZ}#4PO!Lq%UvKF6#x3^1SStrT}M zRUOW5-ANK~g%7JlSj=vSWO8n4p?lsK-do!u zJ?a>akHv1uHCiu^nT?{wj#bI+wieTEr&5yojMdhTx45@;FUOX+JSk#( zTRf{q%IMg?FdxojMdccZV1A*@4`&+0n>|QUV_$j!Z*z2&K`#JaIsr`4 z6$dETth&l*ZSr{SlD?}OXh<8m*{w%kAhA{F;s+;zWOvaAM8_kjFVf`lFfKuM?VszV z36Kgg8`J$1n`Gut;C2;5=ZogiY->>RtR@B>A*|BDr|s(&9otp?zX}&H? zFl{HFilBU^5|7j8;7fKL*qF&A;3_89G{`G_|NwwbH7O{M-N?YYhl zwU9Ko$vewag$qfU{3>?~JM1tjG8c!k)!f z0>B@~^cFz~tD&0vDSLMbq}Q2Oh_+$+NG;-NZHhMNEA zwCaB=p8xr}^zTG5IV1azyV8HL<1sa}HFGulD_ZQoWfyV&*@pkf8bbk=5k_nq6+>0T zfP+G=jszE~Rx=c&qgUgH&oI)`G%wqP%QYa=&t0H?n56zSm{WU^CW-{5{v|9+YWnnQ z-lF~ZZ8|Oo2-Yx#sQUuVS7w5%+OReA`-K>N8{LoY-aUGYQv3#83GQ7^5wUuU7$WY= zLo`;z?tx9VU5Ho;jbjVU72ttE_U)A;tmOt4iaOau83lXP!ZeI($f5dLj}TZ8mz{%< z>#KH#NhOd)DAm4jA^@jI@+q3|F3bHfJv!&OVwnhYVL}^Sw?wt>T3X4uPCs-*fR;MD zpF>Vf2XRGt#WmMXit7L6scmx=mN{VUUX2PIKckWBq2^~YUGEWflYmhZRjd! zbuL#dc_Sk$I#TG26G&(so)wNIN!sX`y3ra{^zk}Y_jr|Do-v$*RblKxtPNXhOiktTU!i_oCCDB?`dw)V{pdCoyDerGSb}cnl!=8NgGjd%* zvRi1|hFMg;4R*ez0#X$GC$}HnEWyHY6K1|%$RkOs26U(3FOo*~lAgRH6mbLG{oPd6 zS3V}*;evsBtn?`S0A^Mjs;}rk+*_^93YK2!j1g3Hav_gmLdHW6|C)RrfbZ*XeH2v5 z|5{Mi|GgV&|G)5>`)Fk*W-c!BW*&bWe5LHgJgr<@KeU$r(qaBd!YNX<`^``DZ%)Fe zD+vvHrZ35!L4QC+3thq}&0ma6r;flD(5=zorWnqKxAV_AuF_c033**@8u2Aiuf$K&2-kJ=v^N{p~k-QD&%$J}PacnGby68m< zEa#?@N0;?G+dAV#5h;8!fDlDOvf8o@_z~-XCLR5Rv+wkNtgz|g3k&mk%etLIuo0`l z37VO4)Veq78eE)*96Is0-XZ|5NI5P`u?gxJtaxn{k&X+PLF5vSYV)0O>w7B(j6$?D zDH%pyi;-k^eq#a$TpZ|t16Irm8KZVL>t6YBG|)v}G-!4dxd_)UlxLopl8`3V6>VpQ z3o^X7S_2^dT#*rYv1=mY;aaj1RzPglDD@!{fUirlvdu;}SV{!tHEWccoRK%&1#2WPos-8y!Jhl3e8uVbp}MI2Fj9SVa4X+3Mskr_nh zfgY>69f)Y1zc%2j9d=XFslhzK6?z$_v0XeA-|8O{qbmOr#?m- ziVOSl2M`*=URl1L@CfC^(XAsOdu zHi~0TsV|-?;a1D&pM3t7KO0WcadlVkDlF|+*k}mxC4ODnrnU-g@PlO3$!icG`Yp;g zrjSSN3{(C9=8Xt*YoFmEC0ur}U+*vwkeGDIq^1DAB}yIz1dE(fe)FumQTp9b%9`oK zir1WDiyY5+x=RgE_d{TV;*$i7Pr>H$TWKH*W&*t&2)KUYhY#fLI*I2IHZ9;~09!Ut z%gRq-<`j=!``a!fy}1*R7vOeabxyI6E5UlYl03|82YG8WKV%#hW0IAk(MTL;L$`qC zkzl;$$o+CF7n=%abl1%*Uv(4&VxbP;Vd?Lb%HhtT+z|`=K@Z2f-s1!0X|Tov5HP(V z%!S5SUE#)NkB$s8Nlr4bYp(7Zb@NXf3pAC(a~&_6%Wj9 z&+_AoKmv;OwWR0aCvL~3QJ$lT*sit}Hw@ohrJwSChd0HIMZ5Co4bG0Rm(9)%_~L<= zqVshnnRB^48AF*#6QVf8rq8>SM-JCuY}7CdB`Ya*?PgR~mvt*zGWLhvRMSpBdwhhcXi$@O?(xyW*Fa`u;RxYQ2(cc66lDvIBM204&3 z0TB0Lc`D6V`1vTocR@uxv6;9{k;)#F(sH?!Y_zkmr?>?iU?r_sqa`xS0pimN)|(7C zxm`W6@Y+BvIYvA0Dw*H1?00Q%p{80CEq9^)4ywsBzoB&g0_GsKpxuoge;66_4w8gX z5lEl{w47|p5ca+Y4Zn*KL+=U~X_a2j+rhTiTS(#fJiu;`Sc<{0Y_FV~GP5mzZ7$I%)F3JO-4f~S4^l5{O7 zcblsENN0oYQ z=K5R6{yXI7zi%LaqOX5)bfw3smGgij^Jz7|NdWcGeWesuRz{>tc;?gt)YoQNJr3jw zj6LTn^;zb}k^jEjUQ!eLeU@{#c${iG-eP&$`iP411z`;K0=SBSRg9RIEXN z@clQaruX12PQEta>a4wk0Gc;ez(st8?5s;EbTfEx`X6k`a#HbGYRv$9^xJEPU=oYa zVY!ea;WXVq#EV>I?oY{5EsJC z(=oXw+fk_pd#kmccb1D`Pdd6)n0mv3Y@SG~^hN`LSh$|x1AdHwkbg15gt{f)omBVy z9+%L)p(#Tr2I6<3un!E5>8(wa&^pMt!5s~IX0!2Nmjk_v)mjM#w+GFn)Qb0>(Arvw zmaXa$a;Q?;AUCv(3EoePu_5^Cu7~H8UD#x*OC~~ye>$H?4_v30Wn+*H^q3oT)E77)X;fihBp0QoA zZQHh!ifvYGCl%Yajf!ovl0R$jeb%{q-+Ryf{=Cd2PxE2)(fVku_1-i!kWs*I!%7&# zsX}H8%J?fiyT)n#sNzyY<-{Ij47p^YW#JwC)*mqs<>HrW;fpTElF-0@Z!f$r$S6pW zQBNA2XeOl;@MtZm26b1lebNgIx}5+3y*h212}ylA_AN(HV}N;OWvHVu->RR9l!~`h zo>+&7Lf!ZX=^Ig(Dw-(HOCcOtm+TJJXn6%K?0`H;|punuZOg!+yDU zG~{}?x!1h>A5fhyAQPt(Mq=J}7S{71?DovcDR5Umf=`idH{mbbj-%&EH~arw%NjP= z{T427{b$704 z5IvyK^mqy_2`mROECodH*EE5snD&Mdn`*GxSXVj5e$6S2M@^M(W!{K!{+1axiU`QE za82~OY-Kl_p8U1-<>SR1WCpK45ANzW3m(UMHEnfVYVF39wD{HXF1fu@Jvqa6*&+z- z61yW#-8q4najOcog=Vj}a>?`rvNL)VYyqxnEZ$v5^XoiwZ zLW$=7FCl!&U5R&IWHAgOGYN4%4BJY3KZ#TmOuhzH+~G3LB0DTkrIR_Sx9BV z8oiOmj$l4g>HA&KK!r_meU7X(+j)on?-wQ|nTZ@lPO{9I&k&R`^`v>^fJ#val^TXH zA`}oz(_{5LYR}#mNwRHfU{u;D!ZM3#tr~l2f_{+@|G>dm5*ZgtO(d(r5!>bU=dvMF z>HzsG4Q;0QUXze|2*JIPcv3W-Y5-fS2zt@uuU+g#96QV!{zko7!P+?8CVLQe8RtFp z7p1DmA4FbZ(T^PQnTROP-moN}I}{<=31aoN)`_3tr8_Y&US1h`9;0~4a#w!Fc_3Nj z7Bau)Z%vgGFZ$&P-l8^nKs}KmT$zsqmvyd#NR=i!T-+Nk!>OvICI(ncF~JM`r(W`u z4FiwJ)qoGyW76S@3T9biW2JePMhXsIkKLxkhaZ3yU#Qg1vf0p0v?z&&^wA0n>zytL zw@KaPvN~~nPzOvmY3`iEz#W(Xxxw1f^Ripn_%`gCMyT9^YSfs#(E^{EaC;(eAOXELhu~KEFN#MRf zXXD=A0-n<3j=El&o5r+pTH8VxNeKAfU09AiDmk2e9*CM zF(Tiw7SEg6Vjd!iJbHmoUjAerD8_k=!ziTJoL^keMw+xX{AM+l>5}=o1WkD2L~C{D zlJ492mY?rC=rt%Bc;`;kM)e9aZ7-q?LVC6ZTyuzytpoVO_O(a7?l|-QDaeOwPjq|- z+ZsJ~w$<975PKQhpJsiNcFaMpZ7CKRzv=oF^HvNYd7L~4+HP{>zapo%V+gSQV{+l) z48UzT`WXtLACx})anPIJh769BojWgf9g+=-Y}a-Vj&;OZ#5zc11`d0|lOizt9q-KT z?kpgo#_H)iPATMIf6dCyB(Mc!s4pUkMM%3-kQ0IMUMh$ne-;xV;FYUIDm5kbvP4Jh zJx2c->!N5ITKz#yVm*yW(7xMg#?>^7k8AJd-bYKV(S6t!Q(>5uUBD(3XWBwhowVJe zsZE+w90|szS(n0jvWvfZB@IhcXZqzmrN%#X53&c9f;3nDLoC8V&MU9EzsE#-%jhS8 z7K^p(dxhzHALSG2DMJDTmch^ym&~=`55%P-8Yio~MrM(c;|XLFHPYIhS3Cd*o%BN| z9WDLgmVN+b^h(^XC@TOBn+$4&+IE(URe)$Oazq8)%Gqx?)wo6G6Z}$ELvQ})fZ5nN z+8b>25Pg^~;YV?%c#MNzb$pyR04BMLXlyCI_(7ZAsVr3hAEEk~s;;Vp&51>-K+_^h z@B~NlsB5w{9JKc*eR(e63_C7|Gh`}ztVXBmn2?M z1EVjyhR1*PHCl4m0;pfHE-f3*U<*YRCz@Jd!Em*-kX3MmyI@lOD`6pQAl1fbu2k*o z3x)jUCqnc0dHlIwzW|EE=^vR!_a`P^x}3(@`F(xg;J;yokvwaqa9^IZ4}BL)qtWW0 zRd2TGXNtu$(l5Ed$3g_njuH*D3QBBTLu+jzUW*G5*ITwl8e3}pO|w8Z>Xna`(qtE- zU2+iR!UQ0_G`&c`=Uj$?%p4Yz{|s`jym^Q~00rIp8XRv?)evyM9$rBaC4y8UjMxV8 zLsZ{>zeg25{_tWhvJ#_@8QyQZl|HmRUFfYDw#S=%3SjG zM%qJ3(DYp6m4v7dBc|bHDI|6jlm?JPjthIQgXneEpWGApoOfHBPDNiF_m%cNAAr+U zWb#kgvMTq5N2NhMPPX*$Y^$)+@}G$cw`vJqs*ttD!_kJHbLBaJ`OiKJ4_TUp9Ec?} zkTCQv+SjDp{(6WxN;-A^sX~mbr!8>lL8Di(WClFZkWNTrjiookB^0S8)J|1EH$%BNNUi-U~4eW^M~St>K{{*5`vIc6XOD3UkBB}vD@QH zR9-CC+f=)|2pqmmo>ap(TDriqO2CzZ?o%bHiK`V~aaJ6*AlT}Xs|;DMhpP_H#=NYG z7KSB3f>f}Znv98CfL>Qw^Kx9RBPs?6a{uUH7^b^8?eD~AWhPBQvcZza2m6xkYPJH3 zGFNl~+qPmO*oWm18)pd5Sr*z*4rOe%zCYa(OAt5*R%htg#Ohb!34)4gN_M(jT|6J^%f68SSsw7 z>3hf|WATU7(fT|0@9%sK?}p znRXmF8{`CU8enL&N^z6vccmV3pwnRwCK>aeX>OVH(cMp`#^@?JijkY>LNo>QjW^M(##he z?k9o~9j&#|y)9EHzW;st4@zd{z%U?Zcu;4>OV^GoeN= z!v)!?v@&*{yi{x4VgMt-b64oo#dw@x-8Iw38oUM&PH(W}6D!!4o`y(Z{0vf7Ggo44 zd%n2KH|EB#ZJzjJ6Y9L0`l?ot(DgOcq5ye0wN`xO5Rew;NgcuueHJMFOF!APRFMMK z%3L_7c%z#v)}2^Mz$DV>@UGbAP_FJZJWmYQt?Td~rIbz~A+ULr^t)=&{WG zB17EoVR^V_OYwOzLqMJJqrAm&E-4Yu4~T z*}i{e@>Z#=JEDjo@*2U=)1ZGTD@H*!bTA_cMk3n4y+hkHVUQZRQVGL1mJBdw8nqa} z9^(XHz{S+cy(B8$s3Ozi;70zCD!)TdHh*R_Z<@(Ib$NYmS$*?(pNgw|cGwO>TSkEm zJm|3-h=CmIx{?Wnf8yx>tugFWgfq!_UrccZ8!plW9j_^-TrWgFk8>2O9JVC*(~( z?umoK(hP2L*oGL$5#q0Tgq&3WSm#f1A6OKP9<-;_>6j+C$s9iLv~{`Yvd_mFRP~!p z&PTPY49^`~<}v8)^GzN?fR$=T>hKqGj@H7YhVU|j=kX@a+UUS;j!CO!2j1P z^F0EENN120#VO6~*h3ibFxiwboO7~Sws=J&FEa96;w@Esl843N!<(!+exa8Va}-+G zr$yX$ln8sfIr=8Jb}Bgzt12Lg1n}g61q)|g1RMMw-1aI5s2G6#}p4AGWHeIJ|M5 z;Tb1fA}41QiaJ^^PZXsLj*6xSMATkHz%@JF9LL65m=nh@gJF9h{`pt>wKrq|gl$Le zC>oZ6j!9`(l=|tKYdH+@JCee15jY7Oobih=wb91}8Ivu;EgJ35Ju+xJrd15gG3m~p z*$FAs_Q8x-$n3cRn?WxrC2%}5iC@1gnL?>zx$j+r;PJ%JSZ}dy=gphW|KUQrTSV(J z`|CCIZ$pg#*^esy@AdxwHkN$BvWTmRt+UhrPRgd~p zZOZrU;|qF4c#Q0^9a2CId80u4S1L$t&HTl&1)DOw!nZG8ShkVOFiXJ|D1}svj%g+@ z8TQq7*@CsDM38UI#YinM$Cq&`V9&w&4}rFLw?7oGxYCoC2FQ^+<1FJYV zfE2)s+9ztdELj?4Vpr_iGSb-^jr7uPI5?;z8_U7lkv!({e(rWZP(w)jz^Qi{z&HKWobaV3`ru^2zO71-Ys4+X)s7(@h-U0QZHU?4i^k588OhdL;nLG z;dv`@IJzWe7ho)VHzU{E#IJwdIUD-m{dmp=L&B;45w=S;B+Gac!)LMP635s%$Oabm z2u$t?^yffHI?JsnYe=VDvVBSXG2VI1DMudQH{|2vcgg#+){Ec!^XL`U0I?Vm4y*5i;9tFOM2=zqjE_W%8?EB&tv`u|R6 zGPZE~lFn>RjQ%^LNy+LTiy%O>QfMo_iy+2_)){KW746~}=0idi5*N=5#Li!QgBiE3 zM*#Ui^$V60XPNyM$xMt~%eFdox#2X;>vs8*fS=#*i!J97YT6z4CrQzkW7wyLnA3oH z_dE~N=W=ct#^O4{n@+a956k1#D`hf=@y+Vt{R^;E{sOEGb>FcbRZPitZ>QAt=%m4q z#JReb`vX%zRT~ahS5jYiWkQTB3f>GH<_MFsx-V|{4}+GC0LvJnew;6!GOztyvKISl zQMhKxsvLniT-{4{5gLcwGuI@%eK{!HseBA9BoBRqi;hVhs)q|)z2wcliL>kpbLcMk z7T#~r=H%724QCB)YZYx)@Xl#!;2WLEY^*@Y?B@>`SOv4MOSf;J2H6*k1lDKRgom#A z{HPI+i*uKE>-?O$|2q>c_ZJ{sLcN7|)1Tt7u;T5p)J88JxP*l~U{364jRsi-}wdZ{j)rM=|NM~bI%mQJUnj8BeMQ5fKh-})7GiXYVspl1W97*<}Ry#0OWD+@zk}k41y%Z^%T1nQL7H+s$J|WI5 z%xHm#)^3p2>kSz0mye6kR`MZp=zP<>IdKDZDkpM((z)8T)BJw?0R=d&|pL-%=C|#g$dI zmj>G|AVYQ{EN$&aGx~JyPd9d8YYjJRi!S&LkH?LhvGU0lXNt4!Z7!{0mg|$fb-qq| zHZ$PzF3Ts|O_}+VBE`g^Y+shJs`#3HGl8as{&-E34Uw7g8%YcYGHl2%P1j9L196qd zXRU~S6K1^!9W6E(*g)FL*Cx4_1}GZl`#aTfk70(C(MQKLIEnqUF$ZCtw*v&=oMqfu zMhzJ25+ZF7st|ue@%aJFe5j~h9-%rzsT*RBQYbemFUF{bTJ||%hR%YGdu>>Z(NulN zReefR+1{1P3xy@YrdvNsXmWA~iIOhsz5!01L>k!m)?myz@{qWNbSv1z3wK*WpSSP( z%L{qex?qlxzOd@BrZRZ8Wp(#pi`fP|N_;w*G1+yKhF_s#Bl3wpiy?-@p-6B$LtCsD zag2od^P4qlR@qm#ZQb}IHI!1|DJbf*F0rreO~W+{%326dN_`OoDvVQ#P;EzdvMItW%5~B$TNEF?;Kq8WY0WdC`)We?q>owiieye^A?2_k zxeAQu#q@}gEDL6)zw>-R1ri8(2sc+ceL^B7{8+Wa^l=KfSr}j}cFbXv5j(Zi%6z1*hT6cfsT?0gdX6By@%6nhg?2)a46?(GNi*GoryCA_lz zt2nm)y*S)V|I!=({`qen_-~tp(7$Fq{wuHVU%iY5lsnF{Lw2tt6dhy&FjWGvh$JXf z0wE+cF;#d;8K6gKo9u>;;?4dV_`Mme|~q%RK32e3Jv09gsY7O&n$TmRMJ_-O*%*pXqPh@n3$|2g~1b(Q*W4 zWP?yGk#*oaUW5jFijpY_-!AL=?yfmPL_rC>L+y^-;)(skc{Fuap zZ{3Rz!;M%y_pk99B)F9sIB3}jWM}}Z`#p;`+D=^iG?v#Szf!uv;2+7U1;M8oSd zAdz3ptId)qYHc3nBWU6ZW$pqXixW!$5 zoCyg6w2F5ZD%6yOOBD=uV6J>S+EhD08Nw6V`MVUC1QK?jX&-2QN2Plm>ox@GbWX)b zIAY*E>V+K1rh!Vh7&V@16GysCZS+o@zJFi9zh;_3Y^aD(qm%=Pl`z=5sZ2dcTE^Q6 z1r-vlU?*&vyh_s86z2;;3~*F*kreQGxp^aeWuQ3Iy7I|IzgtU_2WJaMh@_c@tyhy$ z2FEbAJvN5dI=_4^5L#@&FlcX;kg3Ck&>+Y&Um!>r-V?y^K(FU-l}zuNQ^^a)=1mg( z`AxJ`WJTh)NpWl%MOQi!7K3RBjHxZrda_WvLrHj{^}HLBb1PR_bzqQyBly6~CdskYPl&qa)>=Uq4TZG5!z+tXY!odnxPq;nEQDq7-Hpr5T1D2 zQEuKll6pt<>I0WcP9%pwr+PAj=WruBOv9!-jh#in$K@c&5p=^ucCOL8lur zA8dm;BJhhC@Z!oK;u$dqAKS7igOP9X>im3Z3h$dz(4mO2j=FMv#*I)f$0eY0uWKH{~W;2A6 z990bg6X2Cmk2=` zu%dfMD>2o<;4P(fsk|**X|X1_nFj%a%3Q@KdBfJ}ERk!&VCRp(9S`8C3`Jg?~Rx`8KnzWKr>B4y( z#V$PyQJge+Sf28FTzhcIM!W?W_X_7K=R2C_#hqw<=riCO5iv_RxwA_4c?0Lt#B&E36P_T{Cs$2kQC5I0wkV zE9~1(ixjFbBF9e=-ls8N6cKw#IyO0}JW^(aKUCPqLv_nOZ5P{%{h=YfF`-WG+!2>2 z{#BpGjF6tN?5RAa&g+d;z26W1j(u_Upai#E61to#1d$W6yn5jo^fLZSDXQayY_2au z*-+TH&gQn9H1j(`H_-I)w$(LyMKoq3zAsAo>X=>8YsfiR^eAa?ACVo8u_61|LV56( zC`X(_3G*poBXT0q8y;QFjRJdBsKf@}n5^dWa>FHw3nSX{Gdo@f#Fk0;YyZSax0uWjEh#VU z`wz#k*q1lIn$mZ^cM2Ct4U|0ekBm4vDFyO6{11rB=}>~c&r^X6^Jm|I@_Z%f7=pw# zTU}%eB#qSXTr(sP(?D@yBa27s9v^vFlr{cOz#OH$s#55gOp!4AFw;8*CzXt|I`y#^M)zv4uz>!DwhN z1#61+qH(PC3( z=OLK2S##EaWd8e>oH{tmn;-c2p%g1`vSf|LMJVT{A`{DTi;cR)Q46m+4YKQRwo-65 zn(v5xI$GiYjQ}Y{epkGRWBtyA{2ZVq=8fqZ*jmKosMheMWew4%uhvG5gsEmZ4 zR@@G0{`sygkB9e~xJn(A7cj<+8b!A&poPQz?!uva;&9g4{H^7~Wdpk`IwT_P^#H!E zSSNtALTBsQkV{93&%*>91+7z*#vvu$$&Q68H0BM5RrgdK*4y_?W5FFJtquyJ&0_?DUw(K%&LuTdkx z#ZZ3xiXvR~IZafWY1rjYOp$QZIma&hf!mwfEn#GsmQ5Peax4#6DxdPX8oz8v0DTG+ zzS|7N*5JPDd<>5g1SMC6)h5fqtdhx~l0;U5Ok@}?rQaq>@ojK#W}PVdECY#Oz1Fdh zEo1Qc@Up);7LC-ruv)8)ts2wc5o$^H4Rni#{cux|4=3(Ui}c6AgIKV!4OB@1KP~uVyWW=c4k~XhK_}ZE#VFr#@i8?a+n;l7K z?~{CmIhJbt->j!FD$v?u4>d@4kuA5iSqEUlnz69zm3Uz7?gk<7l?+_ZoLv=-+vZm8 zWfRP5p$Fg>ADVJT{A-fI%-SS}o-uA}lX%sak90D!IMHCq*akxl1vS)adwbDCyl9=( z$a$m8l$LlfJ}P)#uC8>;lbT1?&bI1Y28bfrkRW7Cab*z_^&J%ciuHP9z zSw#aKfL*G%oK?=kWA+6ZX-=Bas3amkrG_hZ^4V8IQw&qBk8F zncK-{-@bcZ1Tl0G65;C_^?Z z*|lpYWvP{hF=i(2Z|~|Va#(KJR{gD&nMf zjIwU-9`^k*$rlFQ@@(>Gw^vMPn}>Y!@9PpCE2lIYiyhRg%E<_77g{#x zLC~bUjxSTHZHTy619i2h*(e_&PF;PgQ;lVP3Lf6IN|ZZV483OEKoHC?xPYc43^+?! zr*@IRGY(FcQIjNjhHIlR85EupG}z1F>{4xX9O|~QW%E*uHWp>vV@P${!&)yEJMJaA ztLlwqgNk#iy%)l^LBYxDSLl9CStN>v?4q}^vlb%Z!|9DMeP?>oUDe+FRdzO8Jm@RR zn>GrXCF=xPN~qIdt^!KSu-GA@6r#fzOtLCdw_Z*?^D+t+MQ>Srt@FoM)<@QYLJIzQ zDDwS261w?3E=D=ZT{9N-ajP|V2gB(;AUhY0&pcwWm|~ph00ru~1vK|VP3pF{#MjH5 zv}hMp_+1OjIcjp0R^KIphjw7pSs?>UHOed{jZ!y7BSeQ9ztdXk**K)Z#%I*VAkk+~ zbIL6~nL!iAH&6H^EFkmf60=i63*8Amd%NnR`FeclBo87~n`*lPJ&;bLJLHw>bDCf# z6F?~5n5W2XE3)exWu2eIoO!VZl`=|+&i#OBVSIB3vH`1Mg?FA(SkEAp>UkE zCS*vK*k+_P_6?Rr16{Pc?M2zLRy*V{a>h_01-}In4kOp6`bEam+N1SlayeX%=sfsO zMoAKqwW}oE0@6NqfPz?ly%LQw*v2pNZJ=ZEgiGR^v>&bL35K6ESPdwJ;j(B=o0isI z3#&^eb!d4Qmx=`!ss$H;fx^A!?Z4)9!QW^iMyreP6j=)8e}KFKC9QsgwdLw13eHiA z-Z~!F+Glr#>vjm<+9B-*s$&-`Semo$giSmJULqA|!?H{SuER2_lBmljUYb9hF|)&X zZQ#0pSr6vR0(;%f65=n1_c?LRBTxzTegFEH1lDPDI;L|u&R!3`R1CfB)H$vO%%oo7 zw&~%$j$H^oRR|&crgQc~Ki=UE?J9@ob!?weTg)4_#dx(uKc=`sZ42@Fkg4lp8_gV* zW(uc$n)~(5Or2$)J4R2Y9ujI~-V|SHl0QDU+453pu|@TEq{U>{`{Y`aN+(U@@JYu-#mZ@TqLuG}tYmb{(=-dM0OQ*K8Tp{ma7uwe2jh$Oq*2-{9U# zAQ;(C#03jf`6F>w4{AHYmO`_6b4ndp~%}A*mi~ zcf<>e7fuA=Ox*#@jZVAobUCpCqsA2vhYtaL9F2t@G4v45diI-9a^XskD%`~El8tH%q0~yZf8aobc{MU+N{{Qs7b_~hCKNF{Li?g2&W#I!IK*mq6_|D+Q5X0i`mdp*dbAjz?GjJA1ooOK zIec(pdSdl~UF<>c43MSt!7B{V&kS@KyT}X%3UBUeVbmO=VRb^h7y=Y#5M4;TA&mA( zGi-l20OWp0(-mEODfgy~$zMJ3HQNSuy+;g)IOp9NL(7MREEW9klaljepinhs_isZc^Q`mw( zT^gGWnO0KFQ!+0lJB&p=7;!=^?hc#NQ&U)8BVlvu0uAZ%M#wR}#a$8SmakM8Wdb|ViWw&JUuRq194&FtRqrJ z5Q1l-KWH5jr1GX~-lw4-rLs??D3GdD1NdjM&F#~deC=KzZ#i3^-{gCth4QG34kEy& zF<{Mn;~k~Kn&e%uf*fcxKz2kGWrFFRIQ#R`&%FFGCYkP&#wuW6+O#R={pBfR-zo%y zvtcjkrWk9uVN;u)|IpgD<8i!or!gq#A0J#aeTMC5k4og% zsO>h{i8$NgkU5`G{NR zLtr~UXoLV&qBTSm0)yRVmOQI!z@nmGVX)U;pF(wDQo%OGoPzGZX$tb6#{3ut~L z^{_Hs>#+#w(hT)3*~f)!KJCa+Xzb7&z`&%@M>WHYF(Wg=;uzOwNjbXYxf+!K>Q%=@Jh$q|eqF z{X?u4c@)&`E>$I;I0n`H*>0wixAMFszo4Jt0g6O^QEgE@-9I95#0u)|P!p5tQq)Vv zL|-el0h#$#rD0pmWF2b&+gA9U2yVI!YP{=*z+QDFRP6n7KrB5JGdHPzfe|nL( zQ1es!`L~gc{Bn;`wJOAMh~vDJc?RDJE4a89KLyP|NiNOKe=?Qj^Xc6 zybqo>`HU|Z#pKNn))*8iQ+ng`uErQ8x<>cEpT&J4^!SF^*VEj=+9BFYd3>T_p2*#< zFLI2t`V~C0XT)3aa3?zl2&?*A)zXR^(?=ZFVCeQFk$W+#=d|#bQO>Y^^V3YZ8TCp~ zs%&E9&JFc(ws<$4X2qyiAS_%7A1=vl<1-cb7rEutEEY98_qbdBoP#iQdNFi9<42M% z!xD$@%Q}7t_1S>%D^{j2<8n%`;>s-h0sU3-+ogwe5ntAWy??VF{HGBc@I}S{W4@67 zZ>!P&S&sdecgW|y9KXtg zj-XvOlqE)nn@{ zn_8w~ijevB6bu}l36xq!;V##60NtVhwEon+cls%h4WeCZWbVc$wVio#8hB@h$!~hFO@SZn4Nk`ky5i3;e;uQPYdGHvpl{#w|J_mjAIL%R z_c8h#IsPk9{BM}}*9nSN(NV)uLEXfNBNCz{Yijr*X-PuP5+_)tH)H9a+e^9vuBgOs z&(zw}V^2S|aU%FBk>hjS_(0-3s@c?atocEidNd`hNu=PB2J@8JKD}y|vPw11}U~_d@O5L4$B)^C*W)5bIF;ab&!1} zUaPBas80i%>RL6aLt`ej1L3tX-Z-O3B_&>q6}C|6-^Jl+Fn}01idetdqqfi3Z=bi~ ziKlabUf>k717_v7^(WncRA(g9Uh{h2_Ly1S8I&Hv%jL34!zFbXY4}GB(|~;!ZH+$$ zl|OAYF>DAC90Brl)g{d?E5@(~Yz9XtjB?3{V!YQz59AH6K1b>ls$1oo>Qv)^s%>Le zzl})j&~-w%LWeZ`MI9@{)C@sya?v&wNGfL3-07SrV-7J=#PllUC6gj!N5pdJIYl6r zdsbemADVi#9M617{pdy{NuUIZg|-2vpDBF{-B#U3L>=lh!3vMDwSI&FF|?i3ZOwy? zExmkOacjgaZfzqXhpo{3h{E|x4e#!P6fb$TVM>@VAkoJ?@SGG611{vnwH57CkF>lu z!(FPsdy)aW^9DFClQ&x{4q~GlC5%<s3Mv+?Ir@P;$v*PA#Rxy#V2AUY`=H$h{6KU#m~ACx6QQ zBnUw3Ra`FBWNnB|aO?ohnGAr=c;b<0q;-yC_I`>FM?}`IzQwQF%%>TeajwG@zKoeBKzqXe07cVv=vepmT*5qA5nE?da{O(K$&J zCaZ2DQ?YLOg98Frm0=q$LM6P=RXF%?e^N{*@2qn$<-EHB+Njv^UHo^x)e9d*rLfgO zQ2Wx0s1W7~(H|q>C<6=~%-QiIRS&^m++1EGOEP+fsJ|b-f8Gxn^5=dkJio=I6q zaud`_l@0#TghSaRT(^pFl_U0*z}*y38KF*mH#6){Yx0;hSJ78c(|H8XPUr|JCIDF(NG5l5M>0D zYD@ehLSs}Ad4m)*bW>x+cr}<;FhSI+(Q?sJ%Ze4-s;a74B@tV^khN+F|FP@BXJzfJ zE|mA9@$+dCw#FY$U?InQ`R(`9Nfd%IBgUoz=L|-FhCv8T*_yOM2`nSupd|DO zDQ?H@)V3s8(7k+j}WpAG|`H(L{K172^)sfqp)}AzI!BBp-{@ezP-1YIyVI-RAQl!B7l#% zmWD$*p~EohSgL z95TsXH;MqIGDsV|IlU!Yq_{68mXrYHhq#s!V+I2F3=4rR&H`o+SYxGHpR7h`jq(kO z84FTOB6`+jJV~9O8wcjsYjvU3%WIg9{4??!(iFHkY94;6TDfBjQZGwUqHjA-M4T3* zT1c`NqG}xBp-M2+x%`kl8;Np}qOG5F%*XWp^Q$_ec6}hZG02y}>IW*v=n+W4W$eGRhWjolfs)o{ zJJ!TbkYd6MOA}Un2U=445uz>!#3laSNRh=lAP$;k!V_9P=mC|m&bbpeS~QK~!8~W#V8uSC(*Z-;wdBILEq+gR^#P!HW0;~@=m#IB!`awA@&)c)- zwv;!rD42f|BBU@qyTu#OlWvyL+5BoDH7(vSp{Bhe@6OKpROV@mfG?)V?^py?9Wk4^ z5*o?K!2a6jkM$mmWwQ1PlN(3Wq9o9=3@Rc&G32JikR_R+Wd6v+A_YG_tG{g?i~Gn< z0SuI~oI=L9v>gP72`^LA}K9mWp zW(L`8Qd6aGFxM;x4+5OOnNh1PD!`%O-AJhZ&;gd@JnMj*?kx!mECsx*=f@i) zyoogUKzRpUdbsy~KjsV6SKXVPDm_mH5uP^FFmdY5bbLJ$dB*kjhc0XM{hXJ4iOedY>aq=wj-Ep>JthBT=BCH~bLE1VlqY20ug z*%MElFK{*{PQ8+QkAK(p_L>FPBI$9{7-jmERZjlG3(^6*RU*zDS`$;FS3FlHhQG-u zn5=xAc8;+f>RbdVEMDK0hbj^zCGcwW9FF7*%CE!n(g^&%@$DBw9eTJy)wgc#+=3}d z^KxZhlOJk3%YdKmKMOum7FRU^xyXz~jETXbDpsp5&cc`1N)|IheRGH6IH^po(5a{$ ze&KX=jDkp zK@@?gtymiLbHB2`K3~Utm@0s9{im+UT$(?nB2fgD2Kl04#0sdkfVxk(d6FXc`XU-^ zZ~!Ap3WLOIO_Is|6O-cjasDa7Py>jxdNp)ez=XZ$6cBq|D>cdnY;%onBh+sMx0>uk z;&-0R$AKb_18irV-M(LaRG!snwcyK&x%%2-f3St0lBABQo0xQ}hq*v{7ZYumAoL{x zT5<4Y*I#m)xpw>9yo_+Jh08+PF8*?!0bYs`qlLUI(<_}zu~-ZZnfOqRv$18wZZ;IX z+TxxlWh)Cbke0ZCrF7f3anm|l>p?cAcT5@QpjBJNee~iH)kkE-#$5XnNr@gC3Ae5V zqG};1vlWvb*$qA)DY^I^(2db=^vt;s?MSy~+_TDYiZ=>qf4DB$dabV2OgTEwb+NFZ z6r<^5;`jo}ONy-oiIR*^l<(t=)1bas>}|n^R6hm2QQtQtX!`6^mHuSm4rGYZ zK6$+jS6N0AL4FZv&4RRNY>CLU5}GQGiCvFa-6eqd@I2+ud;~0pfjWba zjgn|}H^H)XFw*cyka&`I;;eWEq-2??WKsdPRpUM+TvmNhs`p>oh**ulLvU&Op(PN+ zM1Cc>VbcpvqeYXIin-(mgIL4Ykc;5%N!o-+viWT6V4^8-ze9$xI@LNoY8N4&AEjQ0 zRTs0j40^1tHJ7->Mew^40mgi^}kV}f2K|D z`&0ng1zy#2Gq#g%dm+^&IGxxYkcs!KQ&qZ7>Bvt!!6R+O5~vT{Sb=qn|DZ@~+q&1S zoA-isPq;ucG*YBQ#ge*dQb@e@0Ok*S!U{V?pxxbY0rp+_fdGgwWlD&}zU~#d(4pV% z2lqBs@HzwaKA&7D$%uX4M0y2y;WaFHA?`_pk93n4pk& z=LE?%Q}lAe8D-|P={dq$wzDN!Wziv6))QPR+b@E8s|WW+;(s!YC98iA0MawOS45*7 zCn0}Frg7$YXF8GLwN;Nm;1na>A{x`#pVKpOi$tY~kn__S__f2n(w?YT?i<-z0#0>! zM#IIW&iID+UhPk)YdXzHfe9+%{z8Ty3tIS=NykL2WcbDiM*EKR5u{=O4J$>Y#16_? zZBvN&!Tj|Ny@MlU9;oxs*}IkDc2y+P>mMPXt;$2y2Qs^I%d~vQrEl1uxmABtn>Z6w zv)9Km38WxXQ$H4at0k_2C2uYkZ!KLTJd26WOk{f6wrI z-C7K`DI{{EbWa=c%mU`5eNVi-V4=V>XiCjG+$#;lyr9 zQU}eBR2B@B(&!S1MrIIGdDerB~} zKM4^hU1%w2spLc3s%#29Gf>T`?Q48}|A2`TrzlVrD`~$fjcy zHISkHL5J_J&P(dpr1~=ie;WPYb}aV6+DIP3S-hXC$tB}STXm0VEe9LWAT1m?2-oX0 zCGzAxt!Nx;c97#@yZ;|!?-*p;wj^xtUA4=$ZQHhO+qP}nw(Y82wr%XPZP!=lp4<0! z-;RFc`>|q1tRHj5iWqZb&K#M~Qx*41tu{u4;}MM)D?$qnfS9z?ThW)Gh0;@bwz0mt~JB>O?qp( z!DU`eD$~pERMmc)On_UBdq!Gs<{!P^YNRZiUf|AgEjd{qTGvWKiQW4XjB!e139#yI z3M!BCw{(Dj%{Ld7Ck(ch^KLmaSHoTFW=gtkC1y&{@J@ytJ4R9bej* zXHRKt2A$gUjVdN5aiFX3m)YE^xk;H@VJ>wz7Uo;|w$;Nm-3w;)diAF)xw%`tuG;g+ zJ5`qbU!zv_XD)^Nt5ps1YO89rxfAQkId{uq9IFi9lw0qAZ<(4=AJR8@S!)zYm{{|5 zt?NUm3vTUNKd#VQSzEWdEhHNeb&VH_x9E9{^Xln)=y9D8khrXLco$OeG)}DsYgp`| ztzOVXur!N&N`Jy=JD5bSTqCe8f{wVDu|`~%kPKLVT84&Ur?lQPUU;bD>}o_)aXMR8 zg9+(81MH1VF47W=X({|E1>P0SS!M+OC%xXv>Ji?hWMCi(!2DQF_N;z8Ns@_LEyMlggO=#_n$jIsZB!&TE!nv(F19F-#8o zD(^~!E%%FqQubedt~OXaq!*M< zSTuk)tOc5mlktO`0QYe^50SB2CbamUfu;_Ly2({NDl_*t4$RQc$HrH8`xiCyqAi}6iS=Ruc>7x3E}COuy8c-1Tu(8zLk(vo?_Eu2dQO@Gj`+%DEbP-@ zukD*-_Vh%Xk8963!{W-3xow#6|YLl>q`BkV-9Ry!~+rU$tX#&QyjL zxiIk(I)uP3ED2W;JdM57J*>Glr?*yxHX^+!>6}x^V8Is(QxnhDmPZBRe31>J{^zj` z^uGLf9n7$Jha`puo{k!gFTZ)l>avQ_Z%k#;CvTOz={=b@{JRlbb2-ET;|dx z-jHM6=rwl$r_<`gs}S*!mb;e|x$1s9k6)goV7kB#va|5&;*r0ZSc7 zrZN6CBuHIcGBmI{Ht<~(h@RR|e~jl?rY(o%nocRtWu&&jkqpuEa^XhYA(=|@E(sPu z>%F(}E8qUGpV^x)fuM}HXQn@F>#L>E^u6Y_Ou3UXo$U~3snfRlcEG(n8=XA6{$51j809C&r-bmvB?u%Ddu>v2ATZxp|+s&NW?Maoy$ic0% zr1+5hwt%nF0-CoWH`p<(lH~V)yP#9CL_I#182c&D@4oFsn>Cgb&tV>zY(K7=qFMg* z#06?{nMC$a;EmW81fTIRW;n#3NwBpcAG%DF`Wf(nyHV2Xonx%#F=Gdf&5IfmaurO0 zm2h3UEXVW{hXX{$PL>IJ+LDqJ>S$;O;Yr}m?3r=$_ev=T6pfnG-gT2qFU!ylN5Xae zGTl!eRuG=OZ9UQzX;p1KhVIlrmCoo>u;)p(XDpbKV?z+P0FyOA6@O8mjMB>u-uhKh zJasMTbMLcmNetoB^UCwB|7sGsb}1G>`z`)6<(09Cxovt(|XtFS~(K>KPt1qp466Xss)Jti_f z=Z^z(A2xt5y;81-#|$vtgYEnCD3`22{`7J~RAs4cbCBs++8!{?f^3Bno~X0iduBIG z3{}0h(;$V4#C&<*_0jNR!#PnQ;#e1m+7wMYoU~nUdOtu#dEn7tXg@(XonXCJ7D%~J zSwhqxi?S!~H3otbj*@=}vn{cE*9!0ol3EjhC&_IjhCC99ydBik9Vr`EDgf!(rBLlT9P-XiN1pA(E$$5&#Y0}S$JtEjyjx%417>sv9D<-M;Xkav z%29hW=cz7I@)Ha)IL6uB6P1{J-cfv7am((vPMVi*6gPbs zO#LHXx>5HSE=#WIjF>34-h7Po?jB|HcEbFu?}-BjchzW->didD{gZY6IX3tJ2^sW; z1T78#&PLJ5Na6Xd1^w>IRTv|WPq=BxM=D!b$#*^2gdM3R!;wEyIq*)HK6uyA%asz-Lq?#`n0c!gG}BzoZ9lLTpd-=|P6!d*c*z4i`5^*C zFq|NUYgA5>#XH zOjo3nK#_0h2UA+7%^6RP@qGlI{+PT6eA5A@C+nm3`|%PXXg4CF6wEU!08v~CBblib zrk*y>6*%1L8C59~rg|u)t=y?85>Dovc_5DYO^-xt^&20Qj7xjMc+)?&Cyc*f>A8u(xr z#HU6lrZzpUIG@^R-md0(z5r;$Lh+iY3wjPDvZv9eDD4iIU2SRL`^ui*&5zl;`e!(f zUbC+kpn@;oSC9qt`IN=JzL~t!H!ZHP?(&w&)_>7VF*u~HhRNFH1LYDx1_iVo*hEC- zA_S>(zZO1c&6?hG>=TgPt?qlo@xu@WWTb_|K_CZj>)xm6zjc!dR0jvdXw*2(+718* zWPP1@8rd57QuVBQw6u_i!(;1HG19Xxf}O3^05S_F52bj4p3mGaO;&YiWL_>JM%=gW z+lU{9HO6GOQ0WRTmC0zRt$-@e5$W;$jL zWp6HEu%UI-#m3J5Yhz}`#7avk9T|4DA_nG!i^0?=?t2U1VKnMb8-FUzJVXo&456rJ z@@hKORpc^r9|c8*DlUfL32rTK#PR@cuNqxhqB&$f_tBd_mfaYWT%jTF9!jaO|hOddkhyHl6r;~ewOij zNbc3Y>_rUNJpyyx?t&?oZB-LG5Ab`-Jlcj%XG(mXsF=-(MkB;x>Pru z+$g5lW%7#OKKky^x|G0_wp-1~;EV5T)JzMv%_alQUGe%S_kMEIvzrMPjmJ}O+MCiQ z_@OZ<-ksgxUg;oK(+J?-LaRWD39-2c7N7uEnOVt+B?oTz`dSXI~e-c0J~nI( zp{PF};O(%~xncbI5?)p)nO_sp^)&;x0ADuimf+2Tp1EtZ$RzPB6^{^2p1G4~hy{O)oA1^w^)&3`sb;s0yD`I}t%J+aSd{ynqo zXm4Zrt)2fDwo*=JUKa6lYG|nm(H5(eb?66(B~u>V&1fDN05_5ws_ynPL1=j-JKqQ`-g0JX{#*5kGcN3_o$!%5rG zrGPf905|qV`4560t|>@}$MB_hRPFiWKuNYOa2U>g7M*LA7^$U)AI|Hj1%LAvE1u+q zFix7gnS6FI0A6?2VH#~LORhnB#GNoIzfn9Rh00l!2dqOi&X`L;Lvv(|AV(~*F>OVI zV%&8u1vdl+5_pXV zv*no(TADcl;ih{SS-lEewm0}Cm96Uux5<(zdVwgoWoQSAt8%UZ{|~H~a|=pu)ZI~} zN}JMpmqqtiz>F&zVaA3`0P4CPMWUs#!e3~gXXUZof5n|F@NM+g?~bS2@9Fh_GbH~r z?)-Zl{H-k%a<#RwcT~6d+xY45&vnqL3g(G;faF87jJ;}08B&S=YxUSTp%x5Z7hVqZ zCx8S1A?8me=#16L_{}UMZ`Z(Z2MhV?&Uc5zQ3@*R~nTkW%V^NxE8}C@T;r==5EiOt|)4Ljcs?yb*aNB-1-l z#lda>FD5$r&t5OfPj1>Vloft7NpAEpGRT^RSevtE%WE)4#K5Z>KykW)})M^+>@6fM5N?~xQ*n5&yy`!Sx5>c z=f^WCW=c~Wh_onEe`%lFl3{MdR_B)p3P|p~HI~`nB_@hOoxkVQk3&R%Y$kKp97utr zFXqJ#9P*s6iLKSVxKS0SV_=U~TEWF}uC{u@X42<3{t#3e<)L)!5I$~(cLR)z+QYv% zo^ATzC;GVr9R{VZu_uFggSw?k09#|U#z@y8tx5#Pw0U|dIU#UHm0|-+VU`=EPXyG0 z2f15TdYCtfJd0cxFNmd26egIrS#wMVb5R@0J}3@<0Z$?3h_zqVUVVanT3>1nOCi>z zQAw!8QkoxLM#zcSRI0B}RJQy5amS-A8&HsJ0PHwNc=P8{Ut`=Ib>xR?DheWFCTq_- zg`V3;M1I(GDrLzplD92R5QO?uW`?22RCRjLc4042udz?KU>yPQT z(^RG+PASbbUjw<%t5N3%o9`tfc2NLyUgA|u2A9S{XIg8ubIn3rbZ-yn&y{gPxpQUl z*jS%n7{N||?UQ1R6XIeru(&#|biE~-Y7On9!i==ysG-Z;W!lM=B%C|w_MnrwurQ|? zE7QD9bH!0Y$pQfbbBKM1MmT$D_s(SPqvG|GhvNbD#ywqhwoO`1(F9Y)qrU5?d*a^f zNzlI3YNI)6cZ$dueT(f_ZR#gjXcyWfb2Nr5I*WW%t;~C9%f`_tdy75kU#2nXKLT9( z6Ek#`8Z;H>Zwt*)#U+6fo_Iyo0|{Tltzg%G#gd1wyd8hw+g|cFwG`OSY?iKah0_Vo zub%!X{<^R=54UcsOjr#sGw*rAW@G&-?X2N8N_sw5Ha`$TeI=6jqV%&<{uR|aHT zi3;iH9{UctEdbil(ttOHeiS%PKxuoK4k&MihZYl4sE}q$8cWJm z@$a;x1XH#N{Yo!xs2I>sz{PAJnzjF;^r}wg71RYi=OvC`&@3c$zK$aP2K)*2S{%Az zqX(Rf3E7)}OGzpC2a9pf<+#JY_vtoRTH;scRW45CSSH`JM8*4zl+0`l?HlA!d+Tq@ z<-KCkMIJarCTsU}@9=u+;O*o!WQC}}8FP5M#H^XAE#fxYnd!7cm>dXa>stCotwGi# zUpc2==QOmRLDT$10?#VcuS8tq3x)}|R8)E#q(it6zU=+fC5JW}_mCgTg%FXt|F8ITYW8UGrTf{wZz z;w~+^7=hTJB|F>L*9-V@ImA8b;V zi*tFc*QiN!)?lc#*Z8>pJh4jkX*n$8P<0U=Ygg7LfeCXL=#m`)iwc()8s;_;<3h=l z5=S$=kUnsK_3=P8x}2>Jly(#}ToU6#G*QVmdMt+QQlL$>g%N!maE6rK(1VYCeu_l& zT9o=`=`e7*c^ajd{Y;-?uRjL`XJk&PeJR3x9x5Z<&MsDr>&P)n79d8PxtJ}Fk10|% z2yTv2QI-7#JdP|a^*U!zc58h~DqLtjnK}0Rzq6(D=&jZJutn~;x7%fKv7}E&|r`p+B-|D7t_~ z1}nX+>qK8Bp!{yI3|Jn`NYkh3=GEY6ZHp5Z;-R*Wi)g|3e&4Mq!%1Tr3hA-J-kRdD zH9C@Jt>1jjJu~AYn?E>^L;_kpEDWeINVnEYECE(1i8#e=j{|>p@A4BLA(?@VQ}NtL z>{c*C_l6iK;e9zTLVMOC<2K)Q!EP%>J(8ukVFc{n~|xVC+IEX2))4wtN`-XAYU7gbJis8dM>_IM`&bAaWqE!?lcPmNi7}7_=_a;=bt7 z&+H$n@RjV+O2fCNEd7cRmMcqZbyW=}XX7Rv@yPQB&W%mZjS{&&euY&$0v`U!I0@WX z((9yDgk;C!Xn^`kGElDF{PH?PFIrZ^HjA@hw-)F_Hck7>nS0BSJ}BWUV9H84xlWHi z#;M}*f}=5c>)?<{T+&eo@cSg&*wzMCXzAxU8i!52)rx_PYHaV4lUH-7EEwaSq zy*}Qlc!hMHa*$NKLVGOMWL2JW49R}H-l=qsYdaa$m%FT1V5e1{Y8jMPXLp~T>Sd3E1q@holE^c4Ekxw5LQl~e??kn$}4TcrLz zj)1YKJ%&52Xc{p)_wDUi5Qj_g9d>g|O?7@tZJ)wA#hyE*XjoYR~jX@ zrjS_T54TXr8CDm(j$Cq=&ecFZYs(3{n2eKt&q)c+a)->^d=|GP98U$!!_H2cbdG_0 zG4j{bY|Yz=%kYVt>{7~0Cg8maG2}0Z*W(=SC&2r6GI!u{$`6up&K~Fu&aum)dhW;P zOttJ_0wa>u^%I9T@i(`nAJadp=O}duKPNbpuh{Ok??Ul*8JF$$Vlyra!QZR!NP=il z7xnkc3SLY{y*X8K>6UYg!t(aOD^q6W2WIKjg~rXOmZ)rxky~pJ=#c#&p0=&PZDMj! zHUuz^xxwduG|lZT2$%4^P&&;xG)8F`UT{V@-*~oXpNZ}jB_0Bo1D7M#^f(|C8Le#i zi$PrCKw@n5+_hn%v)T=+_N4FzfOjRL)Z^?lMViZU?r(t z=)qG8jYs>?-xss7KFe!coc}T8hT`uB5Zl+&vGc7c<#N-$rYLWmgxdScb&V_1Q z2=HF`#=8pg`BC)t$8RcEf0x5)i=bN+pb!@^5N!$lUFuwtLQ))$sy|=<7#iZ;#X_uv z{PE-NyZ8G4e)r+}k9S}tM>9)@f5V2qd3FK{%2Il^g#XLo`Cl_2|0FpofEO7Z$YHCG zOK8=>6e7w6E`i_4G6|9W z$o{rDCO4*;Gd_;JV*Ij59r88&{n4^V zWrhAqgd46Fe9{b|9zV(KE!~W!y5un2SVpzvjfT>jg?eZtN1T4r;dhY{g5RQf=b3`> z>8?w`U`)`8((f-A{J}MEplYcw93)LO!q4WYku?7!wznLt^n`rU??j^kYW!v-6>8^- zO|fQ2@r0*(M%IjI9r?0+kV_`E(v=}59G&{EvoKmWt(Z~#6is=?FEA4!eWUf91F4J@ zu|M!6mqkpjLvN&n0{tT}JI0&~7h?9cTT)e+&MUZAr37^p6VEO%3}GYmPoc^kgA^Wz zNDfKr3$JeQc)UC|B-bEB{&>^z5UF_k)CAZqO5Ormss&#QY<`EI;Uk9o6s=6l_sGsD zHbER3moT$K_?x`-F^oSzm56Cfv>u!Uc~&94Z&@M{cVPaSPws^1E*v%m0e@q%j6bA2 z^ZT#AssqBQnQkrLm4Uba!L|F(DiP+to{;}COh|o8Hotwh|5YDS)KDQ1g8fXF02swh zvF)bhYYVg^qOJ@N?1W85V)SzYtgK`q79)W)H10F(>-z-P`2c$D887!+=p47np1X0|pm} zZz`g=uRLb1my*G>ICDi?+c?cUJ=3YqB2}#styZ=`Cbh6-nIz6+9k)D`MAZLK@6!g*`b?zOnV92`>+WOaCi6FttER?AWOh4a_T= znIiAc>{b%(w&@jIiT}ysR=jA0Efy-Wh`0!=Wec6zl9R-uE4@|b?b>{MmsAEV@(7zv zrZvV1Lj-teoWr3kn_N7S5DiHX9nlV;Cu*;jry$neuv3w|L8_Q$fYkwvTd(+L;{~w09u}jblfR|LU5V zp9+<^IBJe8O(uO#>hBQ=MjecNxUV|6AY;#^Xqh;@)Y|@mJ46z1d}C7U#9f8KR`rPG zEKaMH!cy_h%Gt^TQ{{X5{<(*CECcf;DFs?`r1Er7A|MT6|JX2I%AvjD-_ zk$h&(ND4@f8x#!3kS<~BvPx6r!Sas4n*8ur5jV0xL)eJzg>isa$l|QFKia>m$D&OB zSb{=N8UxM5d=9uy_;C){ZPq1<9xjXV3htQeqe(lE_^2=Q0jmVn4OARjs|Vukea(?= z^~*_K)#ok;z7!x@Y=-01V~gRiMiBGV!t(Dye{w(I=O{e-2b!FJ8V~{%Dt!VO96b)h zsS7mQB`!_z9=GCi5ir9?Hsy&IwtJ*dga>q`u@TB zWgh3)9k>tdtDNrM*DaZmM@ubSk@^Fd^K+&cSXvjqPMsSo1nRi21bfC+%HS{%S{acJ zUJ*_i$;&xFju;~M0yeJ`63iRE-fYD@Br|3qz5qX<J4|ftag2vs;m+UbD3+s8$nd?8KA8^|cKq6zu_z{_v0rsLKFI)rza{RmnnY9? zZiyscOjPY10@h~ddRkS92b|?i=j)eifZ_Y_$Mb-#A$l?~t#c{ICsA0fr@-%Y-xGDD8VAZOD)*^B@*1w0u;s= z_iF82cRNf=Gcor5e0@jiLByslEVs_Jl1q-(MRgxyad~NX#R8r78B^MXTg$k&ytEC@ zMq7^)e|3P&YI_G3(BG;`mC`Kb~s$qxsea7g6bjUiiuZVzLi z3++M#j@3)$(vW8G$t&kylyFEalS|SFfAb@r9d<~<+S*R;UrcUNUx#a!Vf@mhgKUOO z8&EYsqrERlX)pEGb>(B56*DqzKmA`5n`fSf~0DPP}aBqX#cq-%02W)t6QW4LF^ zY~E2gP5Sl-YL=N&BX6zY%TtBfA*d0`r}9SY5h^Q!jHP(=xqqhulUCiBq#S}WL~^2e zsQ4Y_H?B`qr0aHEWzES!-4{z{OwtmDq=)5J?Jj*<^m;qg0d1v;Gk4HWoP4Ln2C7VJ z$S>RrcwujiLBK?Pv{BHSU?4fn3|LdRSzz@}a~buK)(D_E*8R`ETm8o!Jm>gs!H;t6 z9LJ2{IC0e2J99rjX1s%`9Rb|HfB0(-IoM;E&rM~wB6aVr!3D$M&*WGe+)oYQMs5ou zuI9S3j82I@Xh@Y85i1ox19s}r-khSo*aYp86haKQdOhQ8O@P@DmuCqn5$tv#Y|03` zL&j=99a9J?6DV)9{1xeUf4aejd`J4J{}Ji`b4Zc&Uy=S_LyEHYHik|H|6OhWU&Hg? zDx7s;L@o=^@IdsQqGC>d`SL>VM!UQd3j4VqLCV{(rBNEML=DM6q@YzIF_LRCA7WAo zJ(8(nw^a-ua#Vtb)MJjY$nKBD-C{xgCn8$`{M16fT0eJbLmS3OMHtsK6Oz?lM~v*7h6x*9EvGT zOV3}Y)PL)^_>ohnpXXhS9p?^p2-v@FeeU8_QWkJu9O1EhC#HK3#mDR6;UUtolgm%%6 z7cd)fh5&%iRm(2+(1?gSXT*oWB;8JkMyRe!D??GdAa(MxxGB+Cq`ZNZBVxxZ%F?h| zcO-ob9?TezbHlMDjy2%Df5|ZjbhBV)3I70osW6;jp{z946@q|Ar7Jhcd2CsfJuwE{ zQBNmn z@6O{#$?=Hbt`Ebr0v4x?^GAVCV8sVE;2feexH7Qt#7C`gso5vzPoO!P>l--YW^j;J z>6ja21dx&J26D`NSKk^Ub?|=ptoomScA+~Ns5m_ZzTzso`kxGGh_;BCN#8b|ou14@ z+|OS6G$gUMLwsuQpMXkV>B^&x-UXe78$UKKjf$d@$=)M+2`VkI^52PdT*DkcVPR;> znH?fC#8l4}JF>4sf4LCUeZ|vqhLba!6~!r8*}X#l_0(2eZGiv9Dj4{G>bY_M_0;}- zdiQ;R>)>Q%WUpXqWNG=&96d=D#1ml|>oa><%|w6~)qjpRkWLWT00|);KS9=jVFOf> zQs5N8YI<&zJJxacGSDyDv4}y6eLl^RhFu;v?x%pb2!BSi;|!lh`;&FMNcy9POMO(F zrNQ%u7Tn?5~B?w1(YRVS*xv2{RT3XX+}Ax}RK0=FNNR;s~_)~2wn%R=ey2@0iE(t98sA>6D2`v`Ym>hQwHfGA{LTH! zo2XT&6cx*^^MW(YCPzRgATA;=zt`1HV)6;hFy{EkeT(?0$`xHhv{hjP8P`UV6C^`} zRD3Q*Rl7u z4eg~L1ThuEre;(d=WpbgGex2qxbM^uK(i54jk02R`6yD6zeU}`j_FW>^QXE4y*QA} z4j(X-Nv_796b4GJS>p!?N{A?h9VzqfG9nq`!4DH>5)GvlHZj;P>PU{)iiuSR^3j_H;EqAz zW#Y8b{ZtrrV(mr^juDbFeg{Md?SfOs&@VZD8b;NEK>pcGyT;@~kc6NZCyrBbt60(K z%>o5hNTo5-LWgU188BL{Qn}7EbM>3<^^^6D0rU$J+0CN+p*`!PSxIhU)?-n5BIzC+ zFykMHK-FMK48!+cMTXHKVrDISY2xY z$;=LW&}yzO6)Qm|5R}yZ#1Uo2a4m*n7$Nt8{zvt+#8MK=yMO>r1j8GtO>2u1d>1-y=h0lDG)sWY|!U(dW+U!Nig*+%rzZ$K1@ukDLcj%XLci>sJE zbaxt|1i4EDa7V)W%dRW(gN=RG1m)s8SwyJOt%{!Cfri?|P7m#60JoEQ=Mq53sORhi zuTds@W^&v>j*?cxOlPDKCD{3m>>@F|(SQCB>R}I=iVRePl3Hd2-As!$*~`4nyazxb z?&jjDhec3w&9W;c|D0h4A2XuA!#wBi+UdsI(&3PXO^x_FXzXnQkjpr4X=&H0exVD= z=17;`&k^W~uqU`NZgdsuqZ&`Gn6AE@TVQX$W?rNdD7?D9{%YPO2Xh|eY9F|xwX7uDHq+B(C~iyDRAinPavEEe46y; zebasgA`US-^i(VK`_CZ1o4?;9pDdjjT_7^J1adN zAZdf(si$ME4Z?0o7@HV$X)DMXEBygrU36BIcQ1$;4-G%y4b31%g%gMATtb5t zc$=5REi#Q909k6E*bgM{v_jht%PubEh(%}9T}^4Pi)AKz#LQjHYM(d}2_@34(Oe`nbWT&-@HLkXd^gkp@GJV67D4-V zb1U*+l4?%wD10ge?_ls>z`5{&7!-@hI+K*YKhP03gw&=!;-aqYqVY74t5_W;+15qX8)Sjd4C^|0GIhA=gxH9hWU+ zmuf*HTM?L2XKLRPgbDf3NMwg4(P5Q|c(}BJD*9heY@_On|0X8i==*%D3SfuNst z9V3-4a9DafVt`|h&+`DXi#hU+@HN;Yc<=d=miiIcoNyK9?zxQ72hI5I2^RU5MRbp5 z_n|D?TRrl{v&W2~01{Ho$AH_RN z2kwBs4z7F1234j)2(|k_1IQTysC!SN=p62u>$8Wo11AQ42M0|u#17u4Z;x|cz}1(; zJu}V~`e}Fi8Ny;8Xd|e5*M{=SX(hOO52uu<9QqyMOP@7)w3`x1-x)54-rQ?E`1+60 z@O3wV^@MnXQX393Y@)4{+M!TOYz(q@r%Yl5cyXVQ3r7z`!V{;GYc>_xi_=YgY=fpL%Jn)0mFztfA}QcOgYkx2z-yD9`0|knOvrAyzL!*i)xzd zgi5mrh0x1Y>XZZ9_87y$Ws8Qz@^cLH7UFTYFtL80b=`waUZ$PrjBXo@f>+Of90iVc z$_^7x>A((YTs#GidBfjyR`fdpBm|yj&o9aeGy%gX1e#^s5Gy|K?And4 z2Zkl|CY;qo$(?j1s}4E;e?CA5qhm`n_-)J?l~P2G04#dRR76<0;e zhc&&`uo?T5KWM3vB}uFATKbf#BdFzDK@-C(I6m!Bc7Nt!uYL-hL4^!BTUC--=Ul{Y zNt)SdGV~Zv(B9ejRh|ilImxgl;~C%*_8J5M+Cr-ZpNvLj6v^F@fVNdZ-O&4X4*J_E za#f7l&4Ot9OjEz>?wjRlJ(F=huQl+cWn$vD%z#YOR_d1of3?GnP3zJb`2o)&VF`8p z4~x-;bRBa0ce~r}{{VdbXF;3#UkUTSR>I$kIzEU0X7Bn=qm9i>{$3ST$yN%x%80|$ zQj)Z=bw6`t9a(%Sw!wu?l7qC?d#b~IHBc-n7@!ew%@G5Fw-tF5bQ&2pSEY6C0nbBD z?*nr!4!TyH-tgOGy5FYTo;Dn0xZm%V|NMycrvkV%@33HQWa=z}10~I(D7Xn&wRXlx zS2M*3d6mnbm(?KuxszYv)P(f`$tM5XOy$AC(x(WmrQo7k>15khs|kI0>e^IX9z6f1pwA|=)NDf-cY(a)av0!yhGV`v^?=f>P6U;<uj4-u44i6%(>XHWi@A9*ueBwHgB_WlI$$Fd6BEa;xJmhWf_Z02}Wjd-_2)7Y;h}? z(1<~E8^JFK@8>%$F~HFRvEX*rS|TWQ3A23%Ib;)CB&jOUYj}yKt zZ)098C;>D!T<_UE7v6QdiQE3rt?2YE=-};Xow@m>XjOq)vhfmr%V)VzBg96!DP8kh zk$rOuR@<$B(+!w#Y)nIf1a_C%4 zs@3gKz=spbQs%VcCrlZKvBQxx==hU*J+z&}lp)7bCDj z9;T_cW+qFMj%}cDAVcv#lCrzpM1SR`NtJNV6SCv7A z&}F5sDfH^KCwnCxd|H*u%~>%84ofpA3O5G*Qsw0ZS>FmBHT95KaGB&}q%_(AnqQu; zz2lGWTVD|_Jj;|;z#SUB_oErDiTG}PD9vNT!oDV4KmBMMR_}JLuS>=#rz7p_6!BxN zk$w{n(ho3b5aJ~{xv7MA#yPZ(qoXMTx1c6QMriH_x2U?C93@Il3kUNko9|pu z(_8G0UD3G|w~v$CXSZvx@uI)`tuPMa15KeF)&X0NXSC3-rS&4pyG>VjRiiM z4Ho7hCQ8av&l~)R3R%{o)!$cPu5xvJj5tWUmTZ1v1;~~8{M%sHUrJId8pq?)LxG1+ zr#%;vUi)F%*AHHi_eV~a53R@B`T0D>i}%TCN~*4VQHPE`@JSNEM{~VGG;0-0?mVx`wn`@r=-@7okPq3<;2o=h`XqPfzcTZ9`JmZ zBekGTjQuTj$UCaL0FhbJAmya}j?p=&x`TEEp2G#Q^@7U~cvJX1b52C%t^V?P*JEVx z>(^ki@n?T`-&3FfA2#@pPIdG>SRW~m$f_H3iR6A2I2;(~nw^F%zo@^_m+vs|MK0e} zyIP3^Ov%9k5v*a-6*)Q%enZDfQ z%J{Yqdi|sg4uO0P#{p?pb=2&gp*tGVBlNGxo79?`VK(ZBJ`$XVB$-QN3O;1>$+FeT z1I4_o4#Rcyyb|)x_Tw@}-Q2~{*r=x;Y&-J6s542AA7>nfH)WU!4+Mr{7pOQH z4*b?|G7_d5n(iFbzx_oi1Wbc^r{XsRFW0wd!<6GgoKdsotV6Ux5vm=FsHWA;6*%M= zoLQ%p0N8{K7&B7VI-G?HCzl%0ErwB8?K%V3C~j!p6FscOzOlUnqme;B77`bMc|~+KlZG;X}uMNgnwj^$ILLDVC9!vS0XcLvNUuwxD9ena7-=}CW!&qldAw9Y$ zhY3nl0o+o_fW{gooh9fPP~5^mOi|H46I5JI!62)#o0ok+4D}hU8LstIb~SVEAEc#G z+K13;vCW+BBXopm z*Rj+7E>YnzK|KD%SWMS#8lg9HykB;PFd-=xsp)D8BTP!yj~ffU$Us#P;q`!g{)muF zC34F6HY=B8F~4#$wp&@sBjl0m!CU+>4!`INPcP&6e5y;t9;d|N4fP!~A^Kc{o+VEL z7y(Um3Ykj*iPgTfa-`i3NM)3#cxL%Ja->Ej;SWKCpvCG23*`C#N7*}u$GK=>!)?>3 znOGAywr$(CZL3kEiETEvjmEZ<1`QfFR^#-WZuGo+pZ7Y~`~8}k> z>(ltp5AkF z7?&!{#g0l3loVNL!)m+X8jQ!;X^t_FAr#z7)hZFLPL;qPk^5d|$y0pHIa@XvaOKRTN6ySrlXPIH_mW=dR(p%y%LLD+?VtuI!*I+}o`eN&4HPZ+A#Asv_rT%zR zkL*(U9RVaSyqIXd9n++!_?YiRr7Mr}tgTmGdtyVBM$uguY+L3Y8&AVUM>E^h zmlZUlMuaGLc3+8~wpf;9@N0=pYed>brXek3 z4B4C_#&HL{-y3vJZBxgrwG*>ImUzl7X}x$zn-X&*)`o!9UBi+sz7d83KpTD^yy-%s z-jMKk!TGEDL@h?UBxSXZJSb?KTBGKY=QyvO*lk;yaEm#8^MfdFNFAI?tzkkNE&)0& zjPoEt{c|4VaS7od8_=WOe2oMF(jjXf%Cp?CnhZgb+lS(HseG||FNtXgmt8)=sk1zC ze?%79O_(Sq7~l{k_Tx7OT=MrN&3c&yu5I)jveqb(s4WE&(ls$0pMD1T#{lu41J=IK z4X)tikfXWKt*Zpfp%#`d&b0_J!rr){CnWZ`20KKlm5NYv0;sSAq-IaQu_^OZfXw3u zDbc7d`jRr+o$sfno%J2*Z{eM9yatC!i@uJzRZ8u=+wUxnw_bOyS#%%HA;rcDSn_+MT=fxMm@H{y+tu#N8YR|LxL5DBlb;0J zm=ykb9<7saRfp#oGEU=jo7T3|At^S86!9F#glD1XWf_naMd7`&jS$ zi?!@H#6(U!%-B$)SAl>)TLEkkrE6}Y3YXY4&VZAZk98X z%5A=0z(e{rOrwqS68d~B_*REdP*>b5p zkbiK&U7~|j@E~%8#b*ddpdb#GXef3-B`0PJ>Z_0oyF>kIm{5lhW>VnvtrmCcaXnqUzP_qJRh8Y z3jZ;N{r5Q*<8Ryl-JkxSJm$(ShJPm_Gq4BzC0G!w{7YgN$V*H+wwNScX{3r~5{waB z7`~JTyYyCBKsh|@XVL0w%%=JYy~sP2a9FaJ^Vi-0g8sV`7e))~wv5(e_I>XCjI?@w zU!O;WoFFFQqVJ-ev~0NQ3?}76v%b4tQNtK7-UiMo%mA9@D&nkjNMZflmZ4a!8|=6E z2tj8K?(%p3+aoJ`*brVL3|{@WF)a&H8WmTG3$Y>omQIHw&l2gR@DNZBxrftnh7w@) zTyXJM#-U^*KGAmW?#W9xu0U-zx6T}x>@Qfrl*lB4-n@`E?Fdnhaa#mJhO)IbuT;}y z$8t)NlUVLH-ElE-3^vR&@4*(sBBSPY3}>s}#;#(Pp>FYnVsFnBMnVLb<$g)XD6u!9 z5uR@bX5SX6h1RFeE^8s{*jsU< zV%&H#p$qcf<$rI~7UGHLTmyVaLGhD&j?$}w&Ye1OAdYvFz}J-k6l`@0o)!z?8^qbD z+v50G=&n>@POCME+)Y}?p0E|7y_Agw3QUWdu3oF|dhHwA$imBqNs<$R%;&+HU56V| z$`y!|;RT1(()D+TOE>f_cbXI8-|{6Ehb&AS7sn|<+oo*+z^VZLQi3OlEaDZ8bnc!7 z)HRL)+XeY-qX${@J!=$7lCf54qz@}^S57Yg3*e3XGy1&Y%Kko+xotz9kl}+tT{vtV6_w>dY zK=;kRk<#<@sj%ft-o~XiAsTZ-8-R5@MvC1fN!}*DMBXNmtE*GLK4j!160{)nQ4i(=s~q^6_Umlta#D=O(bJRjORSsQeg^xQ*U)En`pSTDXtA-9ovD6f@(=H{V4*dC*e$KdoV1aPk#r0b5Y*xfPAG`@=qi*vxQq5$ z{I#yZnb`86l~*=A1mjH-&SQq{Pt9qeJE} zEFKsXF|rb6RQWkRN@hYu>B_`!qpEFhS~}mCL=g|n+KDE4)rW;bOqvBcu--;1`ivM(48lH<~V^JWE~`sQCORKXmYT}Ljb|BKZmH;+eT>4!H-*&pgloan3}a9A!4LWmI34Vbmy zmgq6vd0-Ue9A7;BueJ(8bfaaN{b{ocFT`HkVkS)VxpCVBQXiD0p3VvxL}mMTs;Es+ zz%DOUWhD-UsE`^5s$HKc*jvQzjWE4WY=x`Z$$@vfQZQopoc@YGg?ni25{;~md_%8o zX&MPr77Jf0H*$0>u+=a*XlQyIi_>+yBlc=A*^4e*Pm&MEsh@{fzl5-`$4qH9Rc4#W zyr@Idl_rp|QS?(4`)vYjatPf)5yJPL7P$iBw?RB;WQyx-?Uxhn+8gOO3Pxp#BSi}B zT&O1)l}8aWMlJbwi%De}U8&#sC2k0MArP9^Toq-Z$(2@ECalXU(#XiLGq61hvFVL3 zgjKft-Xc_tS5X&<5F6`6n;7%pvaWrIjCO&LbSbds0ZA5>C`LgqzY;x^fR>$(Lr52y zZb($}48_TLi+A`wD%u4}jGDSwNh>_0!GCl*F1jr_+GSo+M6@sxZGS7#zbzX4QTjka zyhDXwqus+G=i}bSSdGWbJ6jS_|Kf-`ZDw}_y3%Q$8iT@ey`I1cV24tQduWKz=D}n6 zd1NYg-@XZc9+T|{S=@P;c^W^J)lJsx5;yc7>n;4Y)k?R-`wz#oGV`@PIF9wOlwaw| zNkm1-FCFOD3`5v^e1OLHkd{E!96lYD7@KnpmW(QFHT4)uYc1dgls$bwbMJFvbJZNT zXA9hEQNH_bi{W+v{4kvgNj;9~2ZW;{8g1+sHV*T3b**|wsdrzs}u8ySI_{$N# zMZjnG&=ReT2+{_fWUp7NdJ09zhp)LO-qG-whs5*_?R{Xar#g0%E-MnNFI%+_ls9PK zYn50MlD&TKJ&Q2aSFB)xUmnXvXAF-UTl^4T!c_#Nb}w(7EYf)A(TqD#v#f0BxrLc`qeqmn~(0MT6kWnNOnDh5Vau!Ec- zl~OA$M!A!E$YP_|YzKU8JT|p4Do5AB8t0+G5oGOixWts1FSIdXFb*g*;;k*IjIIXY zZ6(I!)X+S3`M^l{H?#dJ2}pL*hJ*kJug_83;Y-euU-DsytWCeawWD$<;4D9>vRNM2 zz)f+?>&h(>#o+X7Z4Xr8{vy!&Wt|IuZM52woOHn^;VPVLKYAGIanqr9c@S1v1g)Q; zcFI9nsj|vGLM+q(Cylb&m?kf#9Mk1_6_OHYl;)E!^bycx;Zya%ARSN;as$m{txcAs zOqVjhi=;7w;)V>T&O*v0r!<(vrWvXXs3Wd8xUKw(XKTPoW>iX6kV8k}ic05pzsVZ* zJ^VnUOTcIVcdnsW8gP9MU{BOjuFIxXX3tS%NF!v)0%4z>V6U7ug4&Q4sV)QSd^I#&MKR}p(NP3g4lFrIiK zPH}0nCQG7!QifAvXmMxj$HmsS{D~o9gidCOOz0q2qY!n#TN+nENf)Hr4x$~Tz$*?# z2aZZ0rSN!L7XHRclf|=Lvvh?O$`@omRj-`dbXtTF3wCQ~Of8#Kv{UDc+I8Xy*f^Xp zHn&(s=;GrWBsU(LiGH@FKw0clb6jW)6 zXPi;%!x0S)^ID)HTB(y+X(;$}xmsAaFG-QXpRKHrUU6 zVtcmC{Tluf?auxj)=c*7C3%WxPkslO=EmXs%2D=FYHh*a9=wdZIrrU`1KOHkc1d-{ zJrnvMIOQbsXcvC&pL^%u+_=pQi`9R+Y}45ALa4*D^Mq7$ z%d_*`vW!v+TCVQ9mkJ5b=~jIC_~k+~Y|6@K2}Kza!t7m>Faxx1m%*VX%+3`;{wFS( z{!>=e_9>l;sqxG#B&NPo);_l$Uy&V1v9o+?KqMc>$E#;9u8J`nh6MNX>vy~G^zrUd zboM0V_J|J|J;f8N(;f)XsnjhqAIISucF_umsaE1qdBgK!(v|r;wZ4l_l!~iKKOZp7 z57B3}*~sxWX??HbbTw_9-!(1y)Z+5p(fRv#tJ0x~QgU@^-cG>He4(;@hVoQZ@`sho zhij|S;*?xI=fipVH03Ve#y?&R!7sGJ&x6v1av7l+dl8YQox^KS3_2Qs2ZzgDhcWLh zhK8CC?Bl%?(s0lNbq?i{CvCQtCwXmPDfnjkV1#deglmRxI<9g_rvvZ&FxRrwcNZsn zv!s&>zo<_&^T+Ij26yalxq4i+&pvW|-lF+@uSZdCo3nj&jsS=I>Cu~De)GUz^dmIJ zE5w73LiRo2RiqOka@W@NtfpYrX$X~L7Fu&%dIo*=GvjDUiXm0Bdog_&AVk@^DXs0sKX8T{1*QFmaeJZ z6Fo9>rN9h0)_g$?5TGf?**$MC3ux1qia0bKm|$@l94bF~CDqHn`Dr*$z#Z0gsq1`X z+V@EG34LqdZ~Khj@{axqk^jrZuIDl6bfepzW@ zmn2j5b|wCZB1b@ZcZH9CCwj_s)pL0bv#Lt%1)cFr@qH@AN{u6{eCt;OpsZ_R(+{qG zIlQpgve}%7FvDIp@HRmu)h-+z7HEvSSonHJ6{ndYen>V_iTFAjlZj*BxvgbKgxR^J zTj5Fn%|JR)@@giR^srvmWAcaRPc-@{W!O_gcgrH`>A46k$uWFDSI&#Ou-wDa`XK;9{N$@s9iIc9!`XJ%06w;x0Orz}nZ zi(B&u?hfo>_Qrti0&|s}M_zk*iKJT%6W}SQFTCA-7I^eLIZrs0nrzYPQ)=7;xW6n_AOlAXXOtSQvBD@*Wx$VlVtDr zWNn?8(w6sk*Z29D>wi8yf_DpwYN-@T#3#O}ySwDP6;t1~$UiLipt>-9e=l%S#?V3D z7bP$-*&r~mnk?T`_^5I!wLEQ?kGDCxqj}IC5wcePMwn~9RG@YT%da`@dpD_w@Ia}W zq^naDU86{Gj5oj!lPv8*^PaLFpAfr9J$`O#u^A3od`efmY!B@{1 znNJKHa72-8cImzIg>?H?_t_8|Clc#OH@lmXJ}=suX|brt%}~6V$=iP+K4s+iK@o|9 ze?jqJap7!%Ef8XdEA1@*3QFD%Yim!_ zEq41E+rzI` z-M@eKTdhO4{bANw#V(x2W^DIt$RCRGLO_Y7>DwO}w+m&O#>_tWxm)EBl@2@MS!A(N z0F-e$`k<_4Za6teWA^S2_6mAwc|ILvyL03GH&=<9l2`=x6xB9#TNs_Y?nV5WQ6w>q5 zWQN2m>?&yq=oc1NUai>K6k;u`TY6?vI#!tzI4>&GGjS}cV-DYH^Mkh27?R%vtFt&s zJWsc8AQDP}p~kC)k_p!lW*>h@Q)QKfe1$b8ncA}r8$4t)QhL;JVW^8C(4e=gF4r+)vS!TLRT zaCI;-bTR!8Ki1!xvC6XGqy+RWqNS-U7eGgQdy#JhHyi-XN=f)eN=cbyWkvW`QcAWn zPyebJ@I*+DPeNwJ1C+-Hk0IQPWoZmJT1F;6Z=7BE{*{vk-U%1wfpgL-I46-wWmUa% z>F8Ls0SSeS<&ODGGSuxoWM5x%2{Y-JFcvh;<1fbq6P>r{qY55S-Zx!m`gfRktnE#( zxi={J9%RC%jX9Q@Fvqu+v=24jm1{II7#RcAuq{o1AdlvwE@R+S3Ot|~+tufyM5BZ` zGMSfBPc#bzQ}!7=Byd`>s`TzgY~6NVcB0}sH!1hftFtpIq}G#m!h=LJ<>NPKT;)Kj zqQjVs03P?4VAQeM=u9G=gU?BMb&(`bT!ZRMqebq>;#G}kWa90G@l+ScT*Vzm7}BZ{ ziw?#j*z63b0*_LQ+>U(cO+2(-x&~3!`?sisf9NNfjD>G5a=^!9FQ4t5Yx%7?gJ*pf zL0p?v>(K?}mOCupE~BR(tzr!Khz>9=cbuWv`@}jc z0+N-rC@r_RhHSNj2pg5{?XmK{d}=B8)(3J#FCB_-(e*5EeKpE8GkzU0e-WQZ^yLyP zvl^apa<4jm`C+KGBiQr6N7qi>1%i6x1C=x99omw#fsKxgO2 zZWo+db&>mV%n+!VurWwG9+y2}d?22|%EQob4tXm-`C1~4TaLqCh=(Blkt@~&H1s@`TpW+ z)&13&$9Sy$+u9%f&WCWIFsYp07HXG{g?FMV%cy$d2LsdFeC9y#MP8{%!(=RFvKz22 z$spx3NGO-|);$QpG^eQ?2XGA8Clu1mwH6i7jrg?-$_=;b%|dumeSBX|)axA@jbYTX zK)HbwA<;>uJ{Q_)+vYZvsGxm(m(LwSPvOaOTg&)ANO9T4?2xs#R>|oa7fQ? zo)ClTG|EUt);`b^Wfr9K5*Q*716nH|2HkAu3>~cY+bdRpeXel%EjH8&t}rRAF;8lzEtX>CftM`VU;c{`-W_mS~~@OrK|-xGIi@^w8eSuHjtj zMCQl2+He!~mZ@~Zf>uo4m?ZN}x?P2ai&x7$QWVO4ION+}UsSRQg3wH*MZ9|G3cCc- zytvLqfSupC;>o!BR;2?whh;5?28nc_+RGBB(LaUc!dQajhuPwj;-SsVoHTNE8$|aQl(}0hdGTe6Zc~%#(I&- z#`GEeJO(KTUfmYwr4oH3R=8&no)cntyi; z|1ZMchORF5!uGbd;2o&HE*O*kmj7)JBxFex@}E@zki1{$#wh@=fJ=$K4idWRUL9O+ zTIFb;`4IK-Z~5P@bGXopK_%7KN;SJ#7stzK%;x(;)%F{&*Mu{2`KC+5`lBNug^!{Z zQ3=g@uN$OhOY5aX)G{VWzA>7Y*oxMw%<>sgG;s@ql;mR$50*uMBip$QVe?k@x3Cai zgXbQ-x8yvLmo8gfaBp9$Yn>ERZI&+Z@@7fltX1dj>&RGk_`c6zsr;n=XcfaomrQaTEgKHO$aIUD0p>%P`dgh*e{0Sf??<@FXV^l^ zo_pMfk2XDmT}G_bnjEh?q>e^$KT3*T`~hVLJ1 z70$c{Pud?!K2_>9ljq|!srg87T+}+=Be?;dokx;?a zPcvGrgY0wZiL2T`DbuNL@1;J5tEZfgw|2^422^G=kE>1^(YAa#+qloypSgpsM24Qlq$R5R zrfQN8F_NM*WQUr94;AYHex9GkoD)~tmK zLz2VW%DzokmWnyr?Q2FJ0fN>rsE9pY^gEDIq|@`Yye*8oF^q5E#T_(&Ao%Da=SI;t zsSImi`8HKXmt&ALyw<+usgC~^`EZo+ntu3Z3r`R@2?-s1PLn-D2scB>j28ISe~r=3 zMYuGJf#?92@-#nnnATf}>lUjyy6wD{0jgy+cb$8yQmq!`tsuGV(ZYZ8hpMel82wQP zI2M2ZM=buga|r*A#lPFzU&V#Jjp;wT++TYN4u($7rZSdxrb?!UCZwK$AE%&wzn7vT_-9d4U+(ayuIBLqP}o6Gbv zg2F0jFa}F+DP{ofRODggv{;sSGI=W9ZztnFei(|uI%WMSP4zu!#$%9q#f*YYIDH+X z7*;XF5{x}z4VWid8Lsp&h`l2jF$Jv&em!Rj!qquF8&Dzj;xf}Vn`L!UC2H)VUt4gi zK)<~fBjT0yQf_A0&x!PYeN{5eq1{o5qnVjB>qO&r--yc92a2`;CW{h+b%<*0Rien!34u$bO3QNB-v;gm^x zF66JUSPY-!xraKC?I>L28E3XwC2)KY25a-opFW-?@Qd#|+@OXx`is$SaP+s{nv59E zujO-W;?d2v&qowQ?ki6+&>5sZO&uI5eQFkBBpa<->@?m(QjgAG#}{1oQ(!Z5OnQnG zbFPd@KgoZ$uwsr@u3uv?YVKEid1(2qjIucSodX2BBvG5B9(SYs^P69-Aj`Z^wFTI) zL;j~>)BLw#|83EK91Hz!1^}wY1{D=7^ywRe&^j?veK zw|&!~%9U)wWB9^~JY$Cl<^n2^snndV$-x+AI^emyOtwc;{0mugdas)d>4wS@@C!32 z4X?Ac@5U~)fFO8$w?)NiE}>Nkg%Tc?WKM#(4{85v)lGD6Vzp=%f1Bnu9X)0l0D@IH z9CO`H`V0K}ych}u;4s=jef$S#*$g;^DTD&r<*f3{UadM*N-~8H1dhIsxB?LW zgpvA2RJvHV5M5J#BG6a{Le?2I0Ol8bYB%Kq%p|=imW?AFp~zggj4H5OhjV?|nI}~u zO)SDqYnru4#*7%MePjLJwaw0r#GN-I*6gy;wsXieVl4Wlu8|hdtixeBo3mvDYV*!A zqybWX{ZU|j`p!r61a7~P9H&9jC|%IH^chqprS1|m!H`*v4+%I4@_@Jpm?^wB{UPO> zOJ2uBLbWw5C#t6NgZ#s#z1|q@P-E*>mDlV!j#OWnbxYU_z|Q}Y)M?b&Cjd2W7%=w} zlY5w;`nGDNTiH@$l+kp*)uB0&KLU9_!9${N2S(B{|6Z#aN?0=)M&ro!B6*sX=Z)e_ znbJG2g>TWs7I$?+UNTfaUVZPWWZQV$j_*f(kSZv(0+Bw%Jg_Hs;{GBaAZIN8id6Ze zPn7i%$^0qcCTGL7|BYYpH=}F03B1kT*Mof1Tp;Hc*vnV6GPBMtn1PNu2uMn)-RPJ4 z@Oj1O;YiiNSSiEb#I5U{`D1wnPN~iK#d54dO0f4B1l=cuCypW8b_kef0k+_Ci$w<5_^6?+cAWaY(uuWip2>i!N^uGhW z(SQ4cmb8$QJ-BgHFm!UUbg{Izle9ClC()8t29twM&T}$8gtxfKQ zvqJasZ7oXEN3Bo$IM0#lID*1pTbwRa*y==mc3)dEllH?UTR@xfk%-Qxl1_p>& zdbsB=uk8^k4hzF-CituH&TQO*;I0@(QjQ<1L|)BZ?wQHgEEp;suo zfF(87s%;(hqeQPEVB<=YFr95d4?leGx#}vJ+?RJjvU{D%xCms4R?ZPvuY%u2j@LI9 zH}X>z#;F-CV;*j-hN(>k^kFr2)K-pGlx<%x#vMe<(OQIRimQ4PckWd*+ z`$<&+qNZS)XJZjmLwA>pO}h9Qo8(B#E$L@U64fWAC)=Jg%EJ{ zpMsRTgiG5UxkA>9YGVcYjoB11q6O%cc$nXA;@ZB_bm23mFvEpx+pqUdgl&risxvb} zhF9kcmW(Ql{=w_?<@MGg57?H||7UD>{%y;D!hC9`PL^h#e}f`QrbdP~hIYoL|JgHk zGqkb%1^E5_D@!|bc?Z)!jhp1ZG+0>w^F>1k2OCck_CH5fjViXvsOo6Gb}%$JYH^|T zB@$98Y_Rh%=#6h16H}7q-Zvm~jk|C;*SGdiXLPLk^rYyUsv6AKJ|D>VsTA+q8I{J% z@y8*0$xrg`ukCZ%`@P&exWAga%O+G-cH9g_KYRz8p+z3vOAy@+c9ee0AI|08x*uwt zEBr?I=jUxHc``8w(xEA1rOl^nD5t~O=95+CWdr9iKul*n-*OBV^^ z6Z{$BFys!)ynD~DBo}+TixXRd>UnJq-vpA`c$;lCB+Kl;Zbav-N$eMYe1sG1E+yOb zW3~@G9em=oKRJvL`zA9VkvczL3?zGD*F3bS+^GhkpZFIPiXvc3q1;yh>eY-RkzvGJauclcO z;tJ$cxQ%I4M(Woi(avj7K+ONhJtK8Y3fpS8DF@}=Xv+2By)cw(US>3Havi>vD6pBs zmXpfdeEx&=NhL?iF=y2~%H#A<{1T|XR=d82Lb!%;gUS!{0TFbF!h$Aswm%w80!NFv z8vM^JI5Yaw4AFUkGuBZUZfb~1PD;XaX=EAr4=9Nu8K9S&v^_Y$Q}VEI8#&i499r;< zrbjf~vNUvyl=+$VPLo$SJ>v_?r=_Mj5_Bu*yKm^7GFw80+u^gDs%);}%~2@@p|;1hmtIC0)509@EC#seaoi!R_ zMd@OA-syxLr*~UDDs(19UuG?yPI{#bBCD6d(F|XFd3;}^SCtt+(UerAeo-Rc(|PP^ z($lLH@0I3>-*s8Q=iufQvLogSXRb)YU?BRwoptxIU@d#B=w<4Am{^7w4|%9-f!Geu zQLy6suO(R4>^(fgTLHnWChuB8rD-f#RC2rL?sw(TV_?l~`J*W|M}Jys=FI_DYIF| zRy)flnNbmDiB227pOEH!IDVX1KdyZPD-GHep2KI!-8|}1TVUNT(sTFa1-guHNL0=R zmWrosPpK!G6gqC<3%?`k$WaxWoiE+#Yyu>vJ8{J!?v1NGQWb8Hj> zII+q8yvKyHoEF0a=a|X=i5dm{&N06m>VtP|5Yqg}uObH)5meHS8t4zx(h;*eL2Uf6?rEIf!Z+vOJM*YI6N3W;mZmZF zO)DvoR-@*|(%p^}D}C466XXfMLwx#Ye`vWt2e+u3&RATa})B zq$Ar~H?+L=Q;R9;8lVcN1w`X&)UXc%@Z-6jqDdC!u2?r@K!U=&1p=6gB?U*CxHr|@ z11I)Bofq?LBO{;qj^9XQ#Iw!AomNfdYoLtos&p;LbPe}Y3hls0%3Q zu^@Yj;8~FF@h2oaVxhJuZY~Um6vItRHPR(_?7pcGov>DSITlrFGO>9Q9aD}aTeoHu zv3PF9;s%yAWm0aFZm6gWL+Vgs*4zMLxezmz+?(^r%DZUHpsKERy1b5@Q3d{a1s0nb z8eR?4U_gDd z!o)O}NSok;(&Dv!fZKQWiX*?RK>md1mNNZIl?T}eglVp4KrvKCYP#(=Z`2fhq8kR@ zM_l#vRAo5`j!FR-v^%`!1FyDS(A0(K8?Gpv7RiTdlq33D*%0rq2;UGS6C2aApE1od zQvE;!{2vtjxIbFCDag_ord>yVw<9B#{zVT5LfcK%7TlZqlca?RInWyh0o>RCCI) z%t(&#nLM>J17BOD+3(%*``*KM1KI$=W=J8?8cw`D4mOe!=F&mswP)a7q=+cQE0d7_ z2pkGV-LXK_s5pSM`yAOHAwKLfP!Z`;+B zK~)=-jJO9^3T^S{lnL>?-FpLZBo^v!NhWECD2Ssr^KwMZyL zNI}S}Hv_vZODJ3EZ37M@r_HSe8M|F?sP`{;zLLG~wPzkz!+$et@s+5n5%%%ZRtXkI z@Z6~9a<^b)32v9f{TFg;Y#E}!DX-pcOSn=xQ`9<-9^5l_K(96^~~$obEUxa$Ju-`@M=Ni1(OJ*N|! zQ(_(%_Xr@1tFTD#c-pf#_vj)c7bii$6HI4RZk}r81Qb}QeMg)VgN4<@BfzKuWZ;gz zuZ-~@qw31H;E`$+42H*5h2m1fYD0fKq}A}sEPG@d}T$OH30FG~iB{(6s9p zroNT^kQLrNEr>zN%4PY0sj5!K-RN1T%umM6nDztk6{{#|Hco0NjYanLXt)m$PqeLS7}>})e&@y&bxH>Zj)`rEiC>ahI_uR{|WsF5@;7oQ%b=#ba* zT>;h*b5I}&gh+MrW=2ikt|;aOB{Q741tm9}lrlsISxC-?eSdiiUOA>VP>chwWG@Hb zAlV^v*aXruzw8csJlc9tAY0bEf`z$)XV_m+#=)<)V;&r3eExA&`)`6*s^3xO@2gsH zn6NW7{sq&3-+}pVNQ`X^ot^*eSfk>)BOtfKRyricemi{YA3FUmW~FGhMyt<&B})}X2~TV+H| zuH5CTMg6b{UsJFv8WL!e{*yqYb*8;4(EH70yBGl_gkjn)%8{}Em}KYc zpaB+{lY}v(w8k!=fC$Fj38E5c64;LW$C2IGJN0rq@JMn1Twwl($qMUV6Y!4T|HZ#^ z0R8`%fNM$rja!&H{Wlc%uUzu~`|mjYhD-hn2x0uMqLlHkCTl?zjFu}Ry~t4}CadP? zpdo9R_9(?FeU=`86ADmMP(<-uwL`3_IkucU#lH9n)Ms@HGrInznUU$N74VMiL4trQ zfs=V7?S69n_?F;P=d0mu1Ys*dk=ZTgsv)d#Qo=>fY6nizr}rzF? zDGy|e-%oGvvE@zEd4Aq=qIc?GN`Wm1D-Jk?hd2o>o(3U+_VSLdS}pQL(n=^mPnitT z@uNv*1IfAN3=s9ncFQ!jr$@5@*HbfMIaId7k!L1h!Yk?Yt3uXME2EM-drdhYN1$EO ziY5g4OvQS0O74gbhD%0C3xcj0HiLr@UY5CsGYvk^>j=VJxSBzDK-nmigZ~~Yn(7+> zAXgPV`}wNS8V%b*5kXSNpX}K+b%?r(H3>s<-ii3c zvib=&)_rb!kF&e9AcZY?W|;R^VPjYwsja%p$dGco?(++eb0?47(kKNqm6JSKuk^h< zw#XZ}g^qQe6Ygx7&>nFf9ds5mytMpbRb3&CK^jx?VjgZ7j+ywX%a)w3*QeHyS=}U# znIu1eV8lh&Dr;7qywXfs8k;}f@v=1EPfApKTcHbNatV>XZr!2EYIfp{!Z`b}hqs~Q zi>P^?|jQ^6D{u6A1SC{M>a>oyT-lB|64kuXV^f5^T^wl$y49C4 zTcD?NA2`r;Nikbg6ci$uUPFO5-s}(_V9*(#Oi-nge1Sc{7;xt%GN{0-rnXv z_yzNyQ94QWPazyW08eyxMm=4`yJ?NY`wnj ztR3|aLLXi*p^r8I}Q;Ho_-bm#4nBK^7hYdV-ulBLiLbI zBMygSR>EedRtIwVaOntmP{JwduqQJ9gw`kLyf5R|Ejz4Ruvlr-^#Nn?Fh4{X_N6J0 zL!Z#FCNDmG{c7HlWKgfROxU4%8fG*Zj><3=)kb|9tNXCy2mcK4NhTh!>MIY;8K-u< zl+7CQ|AMj*34#NxjIKPjHO4A_x0i^p{2W4vhk4%l#+Mc$im+g&xVRf3{351e};V2sNCIwVuGsL zCI-RDqR#EsEr!>XY>^4ezb|7J?7m(49^^xxk>fs(v+DlUj8mo>xfCN+Jb8yY#4)fe z!<=C?;rorbOq7gZAMLkAm53(xIyJ9HBRgVv)Mj{@90!?14wP@*x*)xy9mTafc~|d% zDIgs?X=43of2Ol1u*%_3MHCo!!J7_Ko+SlfE8T`e5`ENHFB5T1VTHfNa?#dIygd?` zUR_3Ah>VMlNDGxeM?5bSJZDZ!o!GVMb8N_-ES zf1u-(Isjmhl&sZof}}0JF@QFk$Pxkg{}_9#s5sDWTQtF?aCdi?;1VFXySux)1uNVo zxVsbFU4u(-hoHfO%dPI-oqu=lbM`$C3>Y=)jrA>=YpyxfS7K=UbN7`Q@iN`Wi#D{F z^$R|Tqo&w zqdA@-71DcJk)8-R+=Q~e6o1GS4XaFFs5DZUZkbP0xKz(~L_J~_{(K}cZ70pQ^^e>( zQtD?{c%+HtO)#X+P@r9S#%;1BPob_PrtDe?4w|Tyx?K42oZnV#_{B~#YsbK0Ccx0u zcV>dS)J@=}SmL{+JshimtPsC$+bge^Ut~A5%FBq-C`H>5e5y})z{K-=0qi$$vIi{e zpFJc)dL-UqvHC_v&d=Ou_*>WDG6-@+Mz_e3Oa`ty+~55uP7tp|1V*SX5XNo@t@3wR zx?+6gWK3SgZKBzD84J5k7J+c(W5M7XsS`zG*V9x)-Ok&A? zLnA|X5iJ_u=#d)imYRxjY}(0J-8P_N)PN?bFi&l=*QtI3^#;%|&uE?OH=vF$;~#V7 zzwyxjXc@WwVQj_pFMBH|+rM{jt_goCs0_ckN4XLt*M)DXWLgaJQ@b9wGl=bz;Lsqm zHQl-ELWi8C+ys>B3kgOU1%@+4)x0PSIkyinhuW@WZa6>k9z0Jy{CavqdWXTk zpjPuvHfg=^3<$VRR8@*D7H9p%B`I`IKCCqx^=&Y(Zd0%cG`wU97)>+Ip|36Ej*h9U z8EeG+;(V3}o*d!W@Hjse`tl=pOCWiD-I|CwY5WZ$9)^do+~e~HtA|Pw_)b#);$)d7 zzERJ0B;g$GZ8!4DxMFGL3hPpmtbSv@yfW9J_V`WIloQApuC>N~GW(ANjUlx!2w$KB zWZN)@*W@~AC!uL#1I-(pX#=pc=adsxKYFJY*W-AhWpzcEjM5d9>$BWZG{Gxn4QvWW z*b#fjoPhMlzlEV=?h~QEzlC)l(Ze0yqBIqZX{L681`tYtVY38pG1yA1;`B zn-JxyEHnDhakXQZaN;^e5@~ z)TGm#5>&pj{#OK)=Xd$~mur-=DM*FR&<-?hfJi~cPNty6ny@G6$JNXXv?Kn_pIfz? z`-se+2yBx>uoS6TgQE!6k&=CPneZ6`M9!pvDZ269wNq-I6<1mEa@JexM$x zG**ELuQfUMGXF6?{)d){-wRZ3#8a@$HzJ6bH79<_)YW)H>b9>G1ryUK*~H`X1XHSZ zH>u33^gmH%5&ZSrfvgT8%~V0tzZNonnnQRAc{q^si1(c#IG(a~_~=pVyF$^PLnwW^ zI)d#Ek`sq|C=bAapJSFu9dQ_QbCT1rA&f9Kq+4vB0<#Ks&fmO~gUJ~Du@u}m8{A_v zr+T%YyO`2jZQ(r|7;?>X&PRm@Z@NWl=(be#BzN{09KEA)bIHv>Z$uLWNzdj!6M%L41!RD!n)4q)F79%nd&FiPiDAaG!TM-R;TB&*G#|mi6;om?wU_q zq|r2oZunY6-PPU1W$PSGjY`Qhw~rNnKfIQ$JRG=r0^M=Yq17%|#Z5nX$uB>&zr^t1 zwsU`GkZ52_M-c?1>QlIaa25$x%ZHQw+Iv?vHjnYKB@|t)G5;ZDzoE}Vw%#_Pd(Flt z9q3mqdrf90IPl~-;b-JO07IiUA@zXfRHETIB}g-VgzWbIxMgg!C=ZRQ9bF`~C5q0* ze?nnVEN?9NT!U-ymtyG(L@AzzIqzla73@4nSAs@efK2^0FuywUwNGle;+AfZYro|V zVX38pY}sn)prZTFWGU>SgdqcpS>XS=8K(XnvwtN^=|3=eWe^Bw;tKNm`73Jwj?ie8 zRV8#5WPTKvB}WJy$Id+BYM%DbWE&VA?*bVaJp%HjHon%Qh?%2VF-F+?C8%v1NhS+u z?Tc%=vF3((c#gL4%91)TBerK# z2FDX)8e=MWD9+cqO!K1i3UDP@5;?3$)8|OAC$UwsYx)%KC36cGCD1zeCnyz(v4&@f zDV_qMELxKQ5 zKM+O&?g91|tm{<^W=lW<{60fGNPH3pA5Tq*9V-($btLPlGJ2!qx3R*o;pwUho=F9E z>_{kMM{m{eUbQ@*eOZ&0cvcC)Xi^fiSgLZA`;F2E)#TpQAid~2a+EiJbNWosl@t1C zX{CB0Q8-j<8eXcC1FpFZx?{@y_;I9C8U(_2bQuOpqTCM)qr6W>m-Mc}OX4kb-a ztpSxp;2dMvCKl2vViDl-b0D!>5leB%S3in0X2zwF?HxgCz)rgkdy$nh&nw zK&<(u#s6e^majiNC(mq_*{2XpazdbPC(@LG7aom+(sjzk4y!EWiG$Y5`p)K7$wfv& zP=#fNharJNREu{{&9B6VWY;IXw@iX2|1xg4jyU1bElERdoBR$9qqFHEOs|@6-_%2f ztj0S@uU^;We2DN-i``p94gSTxlI*U*pI~zCgG-9~%)-D7rJRYHKH3>{W`QS~h;<7) zf9|5XV^w&3Kr>~r?4}!98FbL$;D^ist_iRD!=y0gz*B-}UZMSnEIPSQ@v(4Vu4=Nl*-S>xSU#EUh z&tWBhQAn`GUuP*?8Ouw5h@l&lpS@q|_F zrxjVbRUroK#n03M!9;$HC!g(Q=B;R~*w348#|t`t+jTPd1?<|20WyH&DQ1yMskHJE z2)zo60n`zz2Te-xfh&TG)3qWZ*v?-lm8{m>Uq7yg%e%tZ=SJ{D?Mg{)H}exhkmj0) zProZ*JbY}Bo)(A(XhzGqUPbvdJ;3b}fL0nD(i~{R>s-#x#R9pf5T2c$&rk&q0ay8c z9|4%ks7qoEv^Pbyk-qc!GLIGVyWM~M)845!r)z_dC+>eBPye1t|8v&*_o9;N|Dc@x zr$E_%4W}w^%DC#to1MeETwPwo*bp+4637@eBdG>5r}(8w)FfdG26S2Aa!%oEqFgez z*Hphk-HK(+R?=SIzZCHJhNenJ2^Km#x1YNSwx3_$uAd3My*_ZhGa(7xs?O;5(_Z~9 z{~=g=Y^%>g@FXM4Q2S(&VF8yOY2~;qV(a8)VzH)i@_Vb926*t``j_KYS|o-S6}t>E z@1=9L&`K>cdHZi;63GbCCsr@65WU#9)ue3KM{gJ>)ae9647C+_D|3c5MZfMAQYJWl zN%4CYjnxdhm@ZH<`TIc-LNFmaz-&RqVjb>}g(4qWeW$vhi!c$;Z3>X%1do6@V*2q- zBrK%9Ll34u7>|-9Fs`L*>c?k7O4!>RSDk;O^hER=+f9N#w@R3~iIfn2}mC3*~D!N=I| zyu9)|t4=QKaPW$vN)i>`M*OA7lSc(`YO&WVbZ<)KTA#3BIg4q<@3i^E;KL>r#8B$j zFsDT4jH+tG8@3xZR3f;923Ayx(p={lRF&Yaf7+8>Dt&fJJW>@(fWgY^TZw)eUW-&t zZuw}Uixuzg#GO8w9&J)Y@Jp2sVP5fl7^*n3a-8-s~F4{<444NJiRf&!_ZIxXWCIHN&ZU5i|#WD#~a~?bkv!74=&d9?k`43GGKek3}y5u*VbeuXW}fI8VJV!~HX4lKsy8-dv|1&_f!FZZpl)cpBle~V7T zkkvw-gJV4CR>wFUy5Rsek?%>0Ia+2eWV82)PP-P-e3qM4j!n`&__0RBSO&_#2++v} zY{T3Tx*(8waYC>yX>%p>dW5>Mr8Fj*M&(Vk#jt7EC0Of1{F2(?OvdMd_RN@Qvk|== zs~If@z}o2V4CyxbDzqW;h;-rCaQk*k5|WdQN<0==yd1-FYYOdrs%em}K$)_9V)#S! zmJxyYn&^)dn05Jau@uxF>ih$i`*+>?@ViI+dxfH8YHVuh_TTt|pqa)5bZQ179l4l- z_#Q%1Kl#)Ifl@?V%7visD7*X^Q(N-)2k;J3X|8#?Ns9h&>_r`u!#jPVxzyIST zXxUmYEag+psI5K73-TZ>?d9_N#QmKppW(>-3$?>wIu)dd8`&1Eji=$^gB6KO>mDG; zRNZTAjt=nB`b;U3zrzV4S4H&fIw?ayy)->W1#}uX+}?jDbWfT-e=(TS#HzkP47PRN zuc(q(4%nKA*j)J{4DV@yrHtJW_lY)|ofROt+Z+klZEWd6`y}Pfu&t7;howyceBag0 zgoHEX^svGxxD~T!wOV%4qrAG8{WG|=E2(c`VBAtPowM*zSdC1cLo}`r27_+6kKQDH z!~p#O8)%)4T9dCP0)a<478p_wT;)bkXWkix7sSaC=*tTn?8)e2@SrhpG>8B&;hp?q*P{-TupO4n$sw3(Y{)xF!`fYv{h z`cz-AeuD`F5Jz(Z2X(3rT%I4S=F)sbzpTh5M;QvBf`g`2}`zff}g1jS<{Sfh@2#O%m3uP_lINvu( zD(XC9=30UQ)xu%Op;o3B>wT~XuSUtTSZTV;h*laLXWfbga|LM=`IpUhc4iG?8xL03 z&F`{TbziPer@G%KDHxBa26DL!rXRoqN7uE#8BQZ_Qz9jobbMTKVoHtFIdcZiP%PdR zmQ=G8UWE_UnAB-*-Hx{~ZT*15C~RMwseWCuLsbwkgSv9PdXTi=<3sNToK|-G=t7rU zy2IHu^x9Qxr8WkJ*H<}Uf5*a=fBxFFTdx_mGarrJ@C5C{Rr!xC7b61>-Bpf)lmK;> z?1HGtgvgaUQec8}Uy$Ow$^uz+T2&%k*aU%GlwynCc(9>08~TSjb2H}AIL%FJd)|s- zxjOlFi!E85u^ohtL5HS9sY*UUQl6$p@;7(vN!}op@M~|)4*0px0dwgmmcS!92+j-N z?PD~J35}!qwho5jY<)CCC{JK~Q6UrnJ$^FpvnF_rfed)3ycDUt1kbV3E94bt`5mlV z-ByPVw$yANMzrjgkxIpJHl5QJV4aGdvc|Vw5tQ}leax59Ei$MAT)s6po3c;3!7g5H z>sY9VXJ{8x_w1puj>+YkkXfYzMqLgKDgb&TNvcbnWnkdqn2sV^()I9{9mU*$rJP-_ z7`ts?b#u!G`IH_j{DTm(ctciO7)GG+rfd(K^-tY;2G(!yik)aE9JAB#i170R3&Lpvoik4 zYo%hed(~QzrouvHiwihD$^DC|MfK`D^|m&uZGOlo(`^b*?^Q46rCmLtHd%FKWhS!A z9*Ds680WihEz=>W1P&b=w>)Ng1S%l%P#Yr@RLC}Wp^ve&rBJHx`63`pS!OOv$%EE- zr!{a?ju7H0(y?_49fYRLysTvoTjM3Hyc7CO5{1(%fr;zvH|FFSF9;=mvfp6AXIzL@}xd`tf*o&TO5KK(A8zxUSvgS+&1ruakKo8vz? z;a^Ppzc*egfB7mmC8bo9Oh*t~6^(`4LdIb2OT#KEqRRL(AiuQmH`O|3^+^Acs zNU&f2Nzz@Eo=?XObr2yP0S zMMt3e9hAZhJG86`ubS6>wXV2p$ER53^wo#cmTy1xv9;Q9#sKQjub5L_v!^|ySoXf7 zu5(C@U@8oV0%wXuNZ4oyb2w8KE+|P zTTRTdSS__g(TMCuAmAOy2360vy8XTg4a4E8kX=ZeS3>7bBy?& zQN^;pD9jyy3U?>O8^wN?KcPV}hI!qh+EZz1!od?q(*II_d2il6{mEGlSzMT{#e9B1 z!3t`?{mYy{u?NrewwnKDtbiIwreJ=hgp>!y!spt&JSX&cbBkNw!b;}^Ly(Vs>wp@8D?y4*x<);X$-q9x?@aL1jAzGtVD zU$6%PlvAQ?m+WUskYa9EcoFt;%zEj3Lko}QNu&4$R{_E|u*{ICOV%BQdw7;kX_c3V zi?J-%Oi73Rl6fZ$lgP`$zGv`bztGk=bXfhj4-N%{1cdV9_r!3Kazd$Jzme2IR5jhfa0)gVaES}xrFUyIqG`UHJ(PuVZTReVT`Pn;n z;j-^0GT2LJas}s)d?j9b{N?$BZgmgXh+kdn#spYHI5^%gKD-FJB=SAWQ{k+`iBBmB znDIa*nbmdwS{0c>nAq0EewWTl*@>M3YjLz!$FxuYo^cEQ%YJ}F=x_#(cm zqt}Z7bmqXqWha!;>QV$gjrx^tbE8up{b>^eiN2ch)s{hX7FmyT!u^Cx8%BxaT8h&X zaV9+8>KFxd2==?s)}z4*4ZEC!oolWVn(KW-Ty0bb?ap|I1TvHWth)rreJlQ?Sr)lO zlF_*=XGa>rTGWNU)ru^~f@G#CHz_}o#FyEz6jmQJlhQ#npM#*ng^$c=mu*hr4~;RO zJ~G83Evo0Uet8!UTH`da)CSLrC^Ln)a=SLIQ~_kxH!}IRumV0b0NDD0b|r&frjB*j z=#31Kz8meqD3zo=UrhXR>&!DRsFE9PpD>ACI|GzhCNw=GUWXD$2DvTsCr9D}X*>Ev z?OpSfUK%nw6$DeB^E15^syXSMQgV#E^Frkeq7T#dwfQrwVs@d(YnN``Vc3msrIfyu zD3(ugely_dZj}#56RrQOw-(2uXuFeoYaVn6W^VMU1Ka`4J@;dxr`=n>nJG zkcHzi4K8(mDJ$C}H4Va>Eh@XCC3ax*kNhk^Fm|m33f7l@pgaE#R_@>V`Ty}S%GS{G zw*x~o$T)BQBRW4;3PaUGa|`Y$EBr~nPD!k2u~H1fAT=h zf62v^oCcXu)FJV6-eo&`|8N0W-M@nOz}cY%X471&R9DELYVoqFzwB1eG~tgtN6UPo zbfG<8NrLa&(@LkQiVzRJyuCuq`~IQ5vfVrrF(wnBQ*j44Kq$Y;#6qtXF=j~ak}8N3Ghp~fy$-G=3D1$J@Pai3YoBig40yy zMLmI_nI#piE_&$j@n#*mPhr<~59o%EObZozABzXA!r(0+WKt>T7l9>N_HWLekp^{R zl#|lBr2$bn=kTT9oNz z7k-IIQ>4uSIEx&kKH(Yzc-*^kmmh$1RAkRuAu;ZaP7CE7OkZz{m9OS&9^t#+{}DB1 zQK}nLAWZQ7A5rt~s&4!{YJR7<-z>5}FBL@HOzm9M4gVxFlWST3_xn(g9>D+WZD^IU zu03cYm?=HWg%~icpf#qB03i_(d_>kDtU{3tmj?qGp#qy1S#MkII5qR2L){G*41tRD z{LT;E=;wV=E@`3Bx324rOmlO$^WiCwTnqhn%*RG73d&qpjD*k41fpd5ENNe6G$6CU zXKx!DdgI2|x#IHBODiGrX2%53shd``!HOC!dbFp8Op*Z2BLlRvH5`QSVN_uDVov7d zFT}HsU*(4cvvXVdj}z z2GDR5hK7Q%Rk^v#6G{jsc`cr67TlbD?J4+KLTLI%$aeu4loUjaI=;>~pR?hUn!D`F zObzl}n`RsyH-t2YIMyw)WLrM3hgz1`<(0bY=oSyqt*$#q{uYy6sjCVqoMf7)U_T zctR3}#9a?_wUa!fcjIaHF6TTN@DhhMM0bA3jwcWU4_H8R=4lzKdTwvPwaX<}Se*uF z!kglwwJ*P#b3ivk{D{djNlrAn{R&RQ& zik|7>OO$fX*9Vss_lHLcLde)bu@^F=N_t3hl5i|Q=461n)7JbBR^}7h7(>i8_l-bo z2;dRpn#Ob+=7zT0oa&v<9z@&(qdA}mpuFWx66u|mdt$L;Mp@i*y=uWSmL>7<=Rlhj zL>JBj3gbWUo&OLk|D6C8{%0sVgB)iJ%}tq{9c`H8oeYg_O#dR(EC2QRe>zm-KedJA zos{ftY>W(zt^YnKOIH4=1oDXDhqn}wMyJ-#&xb)-jexjth63V<;(Sx#(K#_xUBd9d;FZ~`J$d}|qwtATbrjF6qe z0w_1ewRe>^YDss%xw8~g0N$s2be+t5aNu(Y z{W!kU2%E+LO$eSMyLXylhu4U{(?q-Dj)v*T#jf$&hkk{HL?d21PSKYB49Vn_+a=p( z!wO^ge&(wIKBL~gH1Z+pehVx)wB17C)M8UZ)FEO@hZw_3jbrj{=hpN2^iUUrg}!CP zOGuz(RUX<9>(xr1Cvpd>cY9$`xaF*)d;3?B74^6Gq{ZJ-N4HU{nN1dF3Hcb+yp=r~ zE94}}4&k2AlulbYa2N}c~)-}JQOtDyclSzM6JEc9nR_gDA zvzu*E9l^G{*P#&##Ne@7{8e?iL9UtJBJzbryp45Dl&CntvIJgBf->P2@bG9D$5~JX zl|Ys8&39nS`$aGTpnaTEoJCPGrmrBers4y(IMm#LnDfzG*`n(i)_9Y7@PhM|MlnbJ zl8wgib19Qw&uKg3sdKIJ4NvlH%`%pNapg0{U~uO-`YTiwnh6=Y6!nSGRZ9%y6&~)K z_<4R4?+p(5Jj2>|YO$|}JN|#l8-)am!n%S&{(o;!Ie&-zZw%+Zd(HeOzD+EhLHYd; zuK3@P9inUrV)3H$BWVFs;){sGM)IRHB3S9qh~Z#h3rUNo2V!gHpaB5;bWOm$`ZHQJ z#mpzcbkMEjoh&r+;EKtk*#`gJ+RrtRg`htqb2uc>r5Ax;ntnCLU<4hpsPU|Ie;Re8 z#ojTLjeSZhhT*3{OdbobmWc#?n<-fHQ(t|nGJ3XsyOZ-i4Y;;x>e{Zh_gduu{g3k` zU~ds!v#@27?WR)^x|)HErN#Fd2!;^!S%54M530}jaB49x-B996YLMH+;@hfHWyll7 zw(UhixkxPiP?swEb3}|A`Ru@Yp2#pHMp0s34PrAe5+DY_9P@r?nWvIKZkZR*zdZBI z=szfH*>2`@k^#B6g4S#F(^*U$iLq!VS`XO!_W8q&BK-4VMBa6~yB|c;G#6gknG4Ex=_eZCoJgW4@qY&2GyO7o`!mumK z37vlG32$dx)1W-gHzZ`1=xsAN?zQ}RbK@$2=}C+{tmPJaN#nHRcU|q-tuKi5im(z& zxRkD`w8JzqY*CpAg8EnBvD&>()C?inVZ&A1a=7lvBBV=ozQ7|d&&(PLRCUU8r4~}E z@WetRT?+WdWDD(t^5Ld6KgvLYW*KXj!(vhGgN1)#+Y}GIZ<5lXf+x8n0+3 zrQBQajq_&H4~!r@uc|fnuzbe}Lqt{QZS??hCU!tTq)wh~#(dNG`6oAujLg8HDs4%? zc_>WN=rDWAwwxj=q3_&$ z#m(%R%8h2vf2zLpM)_g?0cbA%Bme)KSpOgX$6pPwtfiBay_2lrKjabr3&V_7d2n9+ z1iGx`P$Y+FU16uORF0S%lPk5dbI@{_gTUWwnGa7x?5|Vfs7CcHnu&*fR@ITk20^}T zplg16iaQur>g>y|g|p_(_D#Xq*Jt>T82W73KZ&Dt>dqbl4Zj|VDk6V)=nAVI{3E9? zMSya;v>hvYrBi5L3+^eh8!2Y|@JmdX5mVf1^eVt6X!}ZFbN#>#6~fO(P+XnX5ocsw zC8Zp9NG3{nF0bDp6|um8%2l*M>Q1i)gR+P=X#W(_5Y_u-8q1CUxX70zvF#*+`LYl! ze%6h84L69;M8D39jnmcLnXhr$cg<=UrhmhDIHZ`qZs zOS^3+UD?saXexVFO!wHkZqky*on*C``2N$@ylTK<$P-10%@MJl$Bfy0$2F?Lp~J9m zz}m_^fDpl1v>yzG$UzXJN*73B9oA1vZ)zS(JZt~qSgyv8%Iz(u;Q`bX z1esl1pmWK9c93kNNmfc39|3rEdBBNBC>&esdHgoT!9ImV8Y6zWmmvb{^28E0re1yw z>raSz3<9}i`81+J|6M^TtHIHgkr<^k4_`z3;x7GiglS)t;SQ=(Ge&DpRhH(?>^(_b zluL}0L2|@Vy8eemDLcqe(yoj#hR5lTt;|Zx2#C#rKpE^wL}T03dw?r=&Q3}=tPHM7 z;QS03wCes1a`LaY_1u15CE0|KM6<Z>>i8 zP(t%lEY`(L9c2$8cVR2&Qbe&Dc6_~gheh`dX(Y>-Ys}Ftur^h)s5vd()-10*h@;8E ze7Y(-Yl{C*Q2@xm2me42@L2s1QGkDsDD1zB++Vyd6$>YO7nk3Zu0M!~wuYWYrhk9_ z_sT9BWRi==iOw$-6+J=u8N5*PT0@ER1JKA~j*4Bjr#OTs*q~3QTU8H!=~D`ymTl;b zdgGih>x9fx9ovgymX75*6psnzlhEu&XRPxS|4R4U!wb^8NDH>46`l|A8j(c?*z4TP zD_S(;KB$hPIKVGtI2B79rd;}2%cdh$$Bdpa6nz6aXK;xFTKszU%(V!>hZ3K2nwk=h z6v>xvlVmuMK4ek(?CuoLj=r~P-RJFhp6C(O`2%q^hx2g#4y~?c*%ZJY2fFX7+Mgs% zLvjrRKr!u3&tF6~2uGG>cEQOtg#|Xe6Zv|Ee zir`FT%D%4$>;_HSMy#O%i}y_DU@l00NT-TqQD?%x^ps9H$m5N5VH?oT^$Y_;gbdJPMktQOf4 zrw`s$1jdF257hN*o&i(qCtn)ycPnyep!<0n^a`UsCf2em6uqFRw)2*{Wx1)_hFz_D z6C~UXQC20k2s-%6SY9>APrZi3f6<_Dh!(W27cX5X7l*M{236!T{$bAQ66caxn`s_V z4!EIajXQpM@21p)!H1lF_l=FKsEKWorbCtdn!%_7^*;~>zKV@qUC{F_{wG!JU-xbv zzn|~_L>NT=>&zMS`;XofWDokk-@7?`f~0*+eqY3agg<_Bk^kB;*cz)zk~=&6odOzF zt)1Z&vHj#S+%hzzv_vC|=@XK~)+{kP>pn%QnO*eSaEMpgO^#-+f2h(Q%moC+nmtqR z@@GW11E$}GCdt#XWYFT(=ZsJACqYHnY`u}Ijc^h%j9fLPazG%y`*Wu zllp+!5_8UcBh%7F)!~qk$EIJ(T3BqFCDRP0;~AsBPkT|=VmrlLTLG{d^sL=US{u+k zntET^zG!!eJBZAvj===ex;6F{pJXsdLZcUV^hEzdVjvCPK57w$L0c-XY5j z0@)uc{q>Y6;cDzNRSv*6u9=G#n`9dEj66E1f$1D36+-UVEL2N|FPI|x;ij{E*ld!_ zJ~@FXnnhyhJRfqXXqDp+7F${4C@#=g7AYU&$8wQYalGiVx2_cnh87Zqg;G0YPxK3@v&pI<~}U=PGW)vX4HPo!^j0^B*GRKU4` zAxoVLZuT$r-WBUm$|}ik{W3DA{jUHq0~VRr942sROShHpZk?j7;e1V|4lt0kdW62W>wUyAo=k?cf`!U+LM*yHIsx(Q7%4`1cHi0%D1Bf~RM zX?8WB#2quCc$0o^QMq0SWIncrB*w7RsnQiXrfmE9ftQSZe=pR~Q0${Y7P`RIm$L@;`dL7>Xbe&Tr0jA|@ zp*tH#!M4`8q*c>d-}lS(qiM@ze`takhiFlS)fkhqs%>9Eu`d-27lp#T^)r+O*4-N; zY~S3DEz8E{23quRkLz(1d)**&tla7BQ&JbUCjb4VWR`?^gbUA6&c*O7`b`aQmS2qQ z)Xo#fI^F0Kj>|Lx*E6QCyPhF&yEOqwu!p%L2KaZzhPF^y{F!Z+E zcA0mLJk}gF*86atuzloyt)@JQbqH+%5^h8CVERzp#w+p~!r)58@rD8z*{*L?c@Ou6 zeJ6$pe|*~avxy0iMrIfXVGzHT%erbaU|B-|>zXByuJ(vSq8D)+m$vXj0&~1wmj0jQ z>o7Mnw2z=y?)Q)F;J?8aq2I6k_ayokxA2pxnW3wV%bzf!grS|uKkBwI2yOvg5d8gI zt3Wmyc&f;Lb`r~!HhBZW2u&PSb-<5!;Svi)ODS04P+F27Pqe-3TLTy})?1h~9($di zL5@S*jR!Y3&i+Zmt(H=|q*=vWmB0k@%5~nQnJusUWgAcT;>$TF2 z4RYA9Gdk)<;Tfi?LW?ip);8V7g|VJ(Qh2N9V6rNYRydG8xe5165_C?ZN)Su(eu`*w zx~@k9d>NxH51fZ#%Z-MgVBY$mjgHycT{oZ&)t}b351nER<=%p&F)Q(a=>DiS5NjfM z7*1J&p7rY`dN#feizi?KJ!G{Jdas@o*Tg@tS?iXgU!nT>v;Ae@MAk21iVK*YI0%F! z266h(iS&zdR(66C7HVf)BQ9^4N?`p!Wvyr;$uC@+)t>42Aqg=19R{fyZCu<*HhTP^ zr_>crI*l`pE zYFu)UPtxV)Ul-YZdK})~if;@aZ36{mj8K&U2fQ$hkbSG>cVtji+g^bgcnK+TcCbIu zuzl~b=jQW!f%|Maf@)w5##>GF%7hIn<{W^MB(FF2TOt_+uhxdzAxkht(&Ho+ok`b} z2I*wW+Gl5J1b7}L4>Cy%{Zp>O=C#oLE~yeBaDBfX-A50CA&$d`xroY=n1>;&po?lwd%GAs>{5WggCK)m>)wSJ_8t(j|uzKx+Y*^?g7ay%jgG zOvimi4Cz4I31o##jo7)z)tAxOdJb>(s_i9to3Pu#fyn8VD1_USWYbTWiuCFZgNg z4|TdGIV42=2`W*7gHMIaz1)v<0>k7uQRh1=Y&x+*fkO_!jTg9Ml8_cz<13UOhfH(8 zVSs>Y23|`*MuTU%*EXiv=(RMQxd7Y&)XT8M@k8C0`%@aJ?(N$e@B2;@$mc?b&{a3@ z7M?SM;2ZVd#<0a_+I;(K85JHy!6n={GSo68#7 zfw0~G<|xknpKjo9JuqdEkT*yVths@nfleX>ZbVwQmO%o=$MEt!MnVdhRb9B>D&v>0 zwWl0U#3S}sL1GMIXfGB!v8v zEU`N;!|5+KlWRp6{lMV7%9i{v_p>q>5$9)Iei-S{bhSqIH(TJzASS*hFifig=lnV8 z3a~D^zIU?W7qXgRB?=So+y1p?AESh_0D7Gz0`&{YZbI1NH|7=6h+v5R79=H@LUwSP zH>WfC;&ijD}sR9yk;EXK4ir*K-PtVCsIq6CWavQrj!mDcC;b3G5Q5* zQZODM3teQ*LQ-&t6+Q2ja)<^ze^V8S8hWA9jDxma$2q*ILNS}XKiB^RXO&IYH^)4s zQsk`TYFzd)KCZY;t+HR1&c3aK)`TkIp+2~mn?-Kh4Um{hX$dcMM?IQyN4sLZmt?oj z7KN1}S9p7sH)PHDAw5_VhYejeMdBMCrS0#!>T5Cn6zjp*o7%0-H^LYQ+M|)rg5+~zT+MhJo4ZeILDmS zK+v_1qtfimp?GbRr{Ix63qPnL#BHCxHi-kGc+4W(TQEI1mlA~+lGD$sh)dG;qY7gq zUyCLZV)@Ej2iw@fB!1Y*0g`0jTHK)V2tGa$&Hw^@Q`IOtW?gkW{|anaEY)qxy%>F|C}# zEq84E50Jh`#(e0bp_310kWJeGo_tq>6wj>8-R~|QFrTl7Z-A0#%6!+~Q1d-)(aBac z-u^_w7UA2=CqPIT%RgG~e^1O}zn{v#im}3fxmW$+=L{N`K}^^GJQ>hejX~nCe->(I za`yic5m8xn11XTLCvqvCra}Sszbk!m?BRewk+SEK9UZik#XvQp>eA{&`TDLltOSszg-j=I@FyoSwUjs?#FP@Tkhmd*g z{K70$yHiLYGL)&%IJ$W1pcP=kmg4E;`UzX@HYBR=Sr?p5Q>I^nEE+(-^!VhC&A0?h zEK$dO_q}e!mV8Wh6kGN&)nqtWLmZBP&M5S#aFcZ6`)`v0C5s(NN z`tkj-T6dIfd|BkX6DD`2+%Ed0MF!Pif6FRz`6OPO%0l@X5>L|isFl9LkKMglG0Q(@ ze}%B(<>OG#yz#q$Txik|2)S+9EYh#5mk-#3oP(o=s~BH?<*}~OMeQ&lzT*ivM>D2v z-gj+xEPRjP)8s|6-Ss&Vr|*T(^{!{aoGVtNeHvOk7%-sQCt}u-; z7F#N0Cdg`M^$I0_GC!(FbFClCzT!1UuQR9>(D)=kP|M!ZmmJ9d1CoKrd-Vpnh0jLE zGB|!mt^ws6#EmWZiDo5fvJfQR{8Z`B5J1H)q2H-XZaZHV(vC!PGZzO&)&A#q=Z?6V zDsR22did?w(!absxk<9CJsCy|G!&mzssu?7XlE~j;@ZB_JYKj1cr(y^W#p7b90Vd9;wD(<9u>vcW{>I&?hW(?@`WwOcHJRYAJW`29+chsWagvzu%O}XRRAl*}njrkRU zYtBC=aY>kgumVs8rT9mW{_nK#A3D?jZ(8{4^%rTu>fkM=y>;P;T}goPt?jV(^+CN) zfpm)s;KBqmPU{~sKtr){Cr1z#lgI@R9g;jJ`XWF^%`IG=-S7L9y3XV4G*3jL|bkLmf||mkI_dTSl}nkbrA5-g)It~^-HmdlNGmM=4fGx4=?eqHvt&Yql8mSPRz-yyz3Q6l@MFI%EV7s zTf4bha=pua3^zM1s1uYKA9+y8m1~k18-*EhkSmcommZiK{Uo!XQ&o+e9K+J~t3I&~ zAU0Qjm3d7qENPq=-+u5gA}@gxG}FSGnT9|+%ECPocvzcP-7J=ZgSecF3leDrM8l{y z$S{U+l!iKqEy_eJk+()LqbDlUur-k4+pYnkQCw_?+o9GlxxmCVYf`FafeO{YPF~3- z5*K|fogSHZa4Fyleg7Na*Nf}&QpVx8U;)rbiWmzzS zNi_9{7p-FIFO_^6(l!#RU5?c6VfmFMs1vo>QtZ+vG@~Y@mO;T{ABMeXlON@Z@+;|N zMlHF`f*_-hp-#$FA}dRj_)Wm|zCj?(4JFH#tzyA)AomOucVHS?h%3A4F{{*=;{=WM zK#^)J_1T-$WoIzA8xvZrp~g2SLvYjz3GCjf{RraDziZ0xfr3;RG3y}cZA`?`RK#^g zv9E@+Ar?Z>-d=~P;>}Fb4Ys%8F%LqeHoe{~iFx6mp1UnYve{r=G;ReujoK1v5dZP@ zN3m58RI^JRN~U24+~s4r}>+-k{A z<}(3S)3fN_e#pY+gVyVf^=gpx{ez_6GT3IIeSMNwxaNzxMZX+vJ?n!GU;abOT#$m9 z4ICWw0}x*}_V^dazOk!KkpAM$Bc95XS)^l%2I$!1m#>tq`#7!H{d-6FK~$Qny3G)( zsteu=Zx~(os@l^l`pnlJFu~xO`YH+6OTm1li(6qQW|UYRmE!RWDwVJeT;hUA8mXOA zlO6W{oLM33L!a}aUID5KQRitzsC(yoc$8PbYXa@`vIRY+EX`ipb`ruX?uK<9lD~-J zyoVgyz^3Mw_{E*}u|7jc8S4w#!#yV3xY_J@`dP09?RsKbx>5QQ)llrs8`}MW>|k{^ zawEpu0+=AsUmNK<+K2~5d9S5y&!71{75mj&Ly!D+H!>W%6YZh-9*pwv8}TVo$4gPf zi@)eS&NGoIb2%rvr`oDd^SiJ~GZG6Eb}STb_-BA;@SMBt`RWxL4gn;@rf$ikEMlG| zU;>kUTOjz6w}81I1Qj=pmmu2Qf=Rp3fU{idd?b9{u-^w>tIpv3*n(}>4_=D?7uQ+K zSDZ*1&EkC=Qy;C)!ey|YdXaZU8fLwy^qah&-Pl%8FmG+9dhr#T9{R&zbCrrpkek&U;Njc3BE}R-svtzyrlXsqGn42c97;y!=BIyqP7~aoeci2 zAB9FOvYi&CQjEdTTvw=>LiN@6fh&qjh*er!FsFHmON!1jZaN;CGZardNKHUSMD<44 z7t*=-`b}AtSKB1Qjy`lEJ`Sk7=*aa^$fJiD#-dywYBs!&#dIdSG#hC*_@i|_{Q-th zzC4vB?Ci!-A6n%Ce|RhB{K#qDfVs^xLy?PjxmdCik2*#QqyekOi44s|!=qCslav5w zrrOIvPb|=~CG1N`P1?e17GrUTA`Q+7=u2GIDb>Q&8VGOIdjoo@$p$mxEI`sjduny& zhGzjDb3IN=myXz~sJOWwvsn)i?&xO1M(#TMo_lz5s>AYosTh3^qY$fI64(X_MtuTq!5CQA;TIJZZ1l=#%EIZsQ(>C~N z{L=nJ4II91SuFm+k&8!j>zG9vU$maxj0j`JQ;?kszlXP^XLUtg`FKjUKVCcqR!5Q8 zuCv|c@=d#}uys;0v)`Ce%J)Vc3xufVP`yKzJ=&W95{IR~)FbW7#xp6*w-WjQq!}vK z7mtU6E%ZZJ7DuM1O0E~@&A6=UaSWC{9A3eh$y>cvTtkyPw)(u1#6{+RgdGUQoNPRS zr@R+U`4R_VRhn9A)F2yb`OSJnT*6mzh*$Lq>b7N`AA_Z08|83_w@Urg!ApQJ7S^9N z9eQywdQH4|RZ?c=G26S&BbXte%7-qYaJrDTjC$C&(~;V)us1ytnT38Qp*EBU%)Rn5 zN}1T!Z|6U|xaT zG5DyhSRa{)JZB*t;jPykz(-^X&$(czbGHX_9kFwvZ>YK*^CxeI&_07^5$bTS$V4V= zykLIL3f#Oayr+UiZzr6u?=N{B>^UJ0%6-3WpPQGBvJWZm<+WHy9br>*ZoO}}O?2Nd zOOzad*)Yu13X_GiY5K9`qP9!b{$Tldu9~1+l&aKTA7RcvU6&mm5MfRlR78gYEx9}+ zWGX##b`334YAHgCSDBP)2$DJJW2thKSZy{=NIazxbQ%tmV6B=vPfX~`BxQPF*^l%B zBxkffuXweMkQmCbyFEdqcNry_YPwr48?KviPcqrw|KhQ4>V?vDi(j2ezl>M3LwzCO z8?%3t$>=B}GaFhBA;XLT@wTT1y2(p4v8O(C{~~mP^8&2qe%o?<`~YCpv$!&A%!Wjd zn0eK$Hm}%1rOT!@PsgILyWI17Q>aP%6R)-_DdvIDPR=74Majk&p*|R)-AR*-3LT!` zGNh$FuyzyMUgvFb9Lqwl&Uv>JL5kLwFa)_J61_JjzYSL2k7j?+d_95z#`se)VPmHc zB9LsvyU`o4T}P4r+LAxh?F@A%<27t+4+?@8_Ay2pbvYU>U22Aaav5}-%dk^*3prHS z^=zbN)llEw@oVC$b}Ajl62@)j$_~}R%U)0l*pKYNp|z2E;pAb?%T`|ExLr8 z#=ai$?E`#$9}#5!1Je=*MkPUHKeb2~uWz@X2v~=Pc+%BTkPnH56kbPqbHr0Sd0WwN zn-l8Qjs1ZCzMzuF_PW0N$No(S_+%rhd&EG;t*7YF=}nD_U4sYn^IYpM=s_Yg70Mzr zEuBDVXdWQZfuAie{xMhn6sjgZnb^3V?fc@E@orG?^ZgO}WvS*D%6ai{j5}fJ8tEjv zxr;eh!HY`Z+&h)UEw|={&Q9EpU!NFx#kzD1$%8K1bHDS*9>wLbIQ%D{=3EaNyNcUO zlYT?ald$6grHS5@kIt9pmG?>9*kpZu!Uoh`=`u&(7L7RYQ1;@Vwk${3hiHt&rYTac za%M3yxmf($XZ;uq1!R&KjEM-ilUx7Qwpp@V<+LXcurBRLu6>>=am>6Q539H@YEf9qUxJ88AQ~zs8ekiJS1wa z$)^RL!Mdbyp%!uVLeV!m?7LnW`q(e;Jf663F(L~22xG3qikq8#Y>MXG*(9IO$xOw3 zlD^*_AxTtgXEIz-dgV~>@h|Y|FdUBDFO|29LaN;iV~s1(6~Be)1rcxQkvyRM9v0_) zWFZ%&f1Kpr2S%kt%+z&c0AQcXC7wD+Yr21=xo!&hmH3DObbc#FQ9z43q|*{>%N%L@ zYB-0qmw29z4xH9EObUpVy44BLaVrpqeSo(%$fIgT6aq0%sp;Hv>?1?ireubVM348u zM0*E*a!A6(n_|-XDQ)b1lwX7$*DxQ)T5D?` zkL6$Cy}Q$Goeg!Al&+1xbX-`S$6At<%Pa9Hs#_xkQIq21kfZBaah^9dPr%zb3AUAF zdeDg}DU_vj+_(|Z)+2{QGp6(7tHtv@ZV{Q~1DTJp84uF`43I^a`zBVnS1%7Kats-8=pt_Zs9V|)iRR3ZI?6h>dKPWE;{5FoOY$*2H(ikrRAQNvv3yk%n!ehaWQpRM zg?A@*iOLb~^pG~Su}RDH>0 z-*vrd#k?f`wlfl)7xt6*k11Fpk2oH(ff>G$ZxeBHOn zmBSSJ-KKSn!kP2i;%eI1@HlrvFeNINx;=>Q=qLUP_r1B{@scZy_bR4`Rr7SQh|J}0(K?11oYw378aQ*d)~TIHB=bAZ;wsZ&>j>hU zC~Zi+H%w-!GT+Y3NqTW$i#p8j5-q$()6I>2Q~eSV|iWqv)mo5s~&pmK%m&oNBpXxqryWQCy70$ zDdGN{Y$;RwNe*K{`kTzYFx{*fHP!(n?}o5vB*j|jBPP$cD3-ltS^>twx0)XfTLF57 zUu9ZrJj2YYXzw)4->t3wCpky9l}ZG}-Zjl=RmRU3D$m}af=@!hmk4S*F`Ub()(T6P zL{}F@n@ZRA6UKJo9sUbHxAX!Cz;u89)(rlwe|t$nx3_A>xWg9hjYGZXBjqIEGky9%v(N8mxiMIX3d{KgRWzZ~VQctJ z9j`4Ixu&LWKeb*^_VS%7V?WSC@^$!!4KZtG?sp&#nVEsp6MPO~iQ<6G*PQJ5H7qLW zK07qjRKhbJ$qQ~m;F!6=dx_9qj2*6u>6EM93X1{5v@R!V^lo;BNNDrcU>4>eB+VN{ zo`^Y2(lH+-iyN-V*rfU8KFk?f6n0u|0`hbw0&Xdey)r?RvLQB8ot3`vRq2|e6Z*z* z_Se4BRpGuI)o?F-_39A-?JL5CsQ)_}kmHs2u64sZE$%b|N3 ztcb-ppRmL=*}+T*r7nIOX@2|SlIzZ!v#rYx#C5<*WHe2&7dC%^)Gc#ft>T&K!yn?R zWKy@xa%ZT`{kkLCo_xCY8qh1d6&b1P)bjBT(3p0~0n+E3F&hnk1ZEBXT@R@2yzeg# zKRb>Rq^@V+1g;e1{E|Q$!+Q(U5>M z4zJBE+y{TW-J8GTHmd&c3jW|r=$+a32%1s`L`awLe?t^0`1$kq%Sq%f3=|ey98P7pTdg;zy$!XGpGOm zqj~yA!}K3KMVYTE%vWJk$?2;;W9|5lHbPnB%Vib!QwBluj7lB}3_+f{>1RbyzwjF3 zf#kHRe<^E|wph^10nfs@z>v>e1U-B``|| z)-%_kr!7Ga;D5khV@HT0aD~!b+aIZea}8+6r>$Ofw8m8=wZ1$oOZy#v&mqeuH2VV| zNK{(TijOz6TBM`?D03lA)>>_%#G@KW%#6MQJ2a7L(``goanc^C{~1|V&X%4ObPbnh zic?qFa>R}5TDOd1-q|QtMP0b10V`_jfwd2Z$&sC}7=krO^lOh=&Lyz51R?0HS51b9 z36pRwna}D}q|&+vtxw#(oLzXl8neZJ2A9q-(&XquG9_2x?Mf+65&6q0``ujR&@zo` zWgisN!jdPg%L29>VxYcQ2`ZMur;cAvv+BrhOzS~~LtJpu8iYM=H1{VrYE6!}90%;( zCt}RcrBJ&5?(jOR3uKqm-nE4_*aPPFG5s{abAv1sPk8R0=PcVG=o$PCLv5TPKH3G>( zwMRq!5}?USb9(kSAavfn3$D4k)d!%J#BumlJ06%@-Eff!88iB}1Dp7>_-2L0d`OAXx-&nUs#$P>Je}6{upCExur8UJb-(g-MV5wbf#MaOfHw{8i5~5IOH$q4i zOA!jF>)tsd=I&k-wnpxVUAg08H<2+2{_dE%^A+KPBGq5>HdC@s8GIYwhkyG`Mgsf@ zb#C*!kzl?Ik=l;lEocz|$FB}5t-2i8TB(d?`z>zs1XW{H2!G1i;daQAQffxCR%`revJ$XlmuC>yr%V7K;1LH3R2}Fw3?U3 zH8ksx$uY(2{5-2{RDQp`3ksR+S=*bm(R7g-1{U2%qb|rF<%PR$72(a{Dhw;C@%}IE z&K^^h!b-dH+mEJA)|j!0cSUzC4hxUtXY{@ox7nBP@uQXDUOm2qy$9k|(;F(YB+6_3hz{mPj8=J_o1HkYyhDD}sD?Z!p9Qq*q<-W3@4+Ykt*X|y3!+tHe#1^H(2hWudq+f`u_By%kxlon2NjR$k$Z=bo)Ai{Kiw2)z+>ctZ88*I!OLY|!QoUz!6Bn4gfkxa0pzhEBY z?mVzy0GIhOs)3~$?@|%tZAMmlk$0@axF6MB+D;eT4a){nio|Quj8G($Wt^Kbf}fg- zrB56WY`WPqdsffI)55;DN3V}Fg@0t6g1bf#I(pqbT$hN4ALncBqU#b~9||qDQ7K}g zNB8-*fq7Gq15NP;JiWis-@(uaoH1@P@3KyJd(Lkg9|rRq8WH4QG29%|pO5RAk>zeg zugL7mf>_5IARDbY^bv$@4vT4zdiORI4Z#DE8I~(e_3J;KNbC|Ov5S?nxGUZGT>Wj7 z**z6A`|vfnoc_Bz`#z!EN(&52&qJ>ekOyumh;tZC=3~ZIR z1YDv!?a*!PF8n;{4Kv?|omxP>kJmG?e97#}W4u2}f(;Yn2Tz*`5kOvn&T~>An`JV0 zIAdsSbQLZbw!tK3I_C|ZSv{q|G{l6zrLw(TJ_FU#Wwsis%D;yT!W$A!L{1|CYcZ!= zOO{6(3Pt7aiEZIa_J5;Vou=b0#v-~4Hgef#R3b!=1VSDm*hnu z%b`^p`_XN~p#`U~YsXiatYS=v<`Sh{Kp>2mS=VB6Ij36~f7P$7&J=CgP5Lk-mdc?x zeG^V&W!9Pu4y4yeADDW_*4|(@Fwp3U6SMIsd$9%~E4$#3+1uK-@3ktr|j6et*^)gwACJKSsW=t|2h# z2xggpQ;x=&24NBPAFbY_|Eqv5cRh0W^fjd_|3@dvzaz6q|Cv(%qGJEA_3A$h=jt$C zN()$@zSeVV>w|*kF#XW};No)zT0i;~6-LJlD9rq@?dsO$ErFAj5qORK!opCMTFj9)q)Z!C(@z$n1?E&q98z*yWcY5$5(5+}(B~=F!zOSzqh8TjTXwRXgghPLeux};tQ6a&N5cL-zv~DU* z?}rjNsV=nT0;!E%yt4Q%Pn|ofV_3L>YWuySc|a)B!>N7|YaTDvztrV%a3s(oD_+VN zqR23~d%~cPi6svEEnv0#r@s;u+@-V%nEs1f-58_V0B#1E$6P41MSk7Ct)iJx+70 zZp#FQOzgu;tc&(_-CQ-OW5PctO{RuCKtMYF2!iki-qO@8qaxw2m5G4(>%|*^x+X(U ztW@a{F4Q$cc=T%0;2)UZ(Xhy+H`h8AU&h>|$t19nde+rciYzyUKf%$j@KhD0d*;Up z0OI8rB~f6aXfydWq8x?yymA?@Zej7 zpx?{xvz-r>9lr|2$P<%*mh~IP^EH@$_=$UuWu3kDCN zT0?1L#?RQYV4EX;oQgruv3N_nT+zyhs!DbsDVpAD+A#1&Bo2=Bi&l=?Wa|~XqF$+Y z#g#%eiKcbT6n!sYfHU+moD{6Pd; z3e5rr5K7$;^|s-X=5TFuW{m8Ei!b!4r zk$f4Lx{I5O2Q=8is;}{DA{+cop$coo!!6;xRE`-!k3?!$H<|3)P;}qZ!jG;WyaXH6 zd)Spb67{My^R~6}Z;TO|ZM5sxs-GfaQ`5Y)2FBb(62qN|uSXV$=8g=&yP=ts}scKx&8ti z?m0{bPYgP`M(7<#pocKGkgJ1ymqPXPb##~s@Gv`fW^r!&gYaQ#TDZB7YrEW!QNxEm z^04DO?s&Bg7*{W~&4Q7%_T!rJ#Q{ z5){e1wNo73@#&jTu85C9<-3%rNECFL^!4e&0_*isMMU!`T_`jVl(5pFqMTZ+s5YA1HfniCPJ;)PseEwB@(<-9Oa@HwcJaeK=GBN;^#eqk`k{x4> zvX=^FQ{|Q*&ftI&J0#1hJk~Di#UeRnWHsdhL0iDA3_foPjLuyWCv%_%%e(TbU)Qy8 zy@;HIKK<(j%p^?Ew6sCGhBEualxn_U)}fLt&rWloylz}sI~EvF zMMl_3om03}?SDZ#4&>W>Z0b{twJ}}VW+6=?%}5~TMmBj>NJNcxQy4XQRIwiqpE3kI zb|tNa*N{cGjYH^OpIU39byjq43Ds!K+=op0t9C>dC#<@=$z>0vD4!K~F~7Y3i9Rzz zs})e@WxlCm27~>QY)1@jhh`lFTwDXOZQ2#-z%Anylwi&IiLr-v=tx@DqC?r$WzQW_ zxGWDFjm@uONBxmVat_4-KN6BeV^IW^I#Bs@nI|Uq^}8zv^1Bud1ii5trz~S`XN3Y# z^B5*q`|)Tmg#6x(d$t6xiaU>(-78@HZaB9b!SE5$g4-hdV8|W9GMjhMVRZT@vr@UV zsmuV?Kt=SZHEA>RK^a;yG+l$@HSB!)^h!mRe9f|wCngMnF@Sy_44bB(t`50I2U!W;$8{%;K^BUAXPN0+C>9NKq*UuiO7Z0F^K4O=XEX?H6K{EEv??m zyrf-D0A5ANZ9Li=Pqc1XJ2zJEqrGHorvPnN{W4o7)~}>5XykB=$H=c0cv37oarG8h@(;)+|S6)$s2VN}&Q#(^cJ>Gi}(!#yemh9u%{E|P=-jK8(G+q7tdW#8>p^T9rp!9I;!dc>EcPcM8~-!6_nQz zMVA(2m{F~*=K9Ex`=mzejuCgKdQV}0M0szJH z8TtNhj_w0v@5R%ewQ4*4GKuDZi6>Bi$A5#yx;j#~d8bPGKK>cVhXOBs-_s4NTD_ON znqZH-Vu8v%+!V50s zN|SHx%UiM?2*wV@b|l@N(Pl@Z`KIGf3xVt2w@ZNFdIacYUjmOq##To#_ci#XQP>-) z!ecKXFoPg!-5e*by1@(mEHs%2(3GadBh?LRrqoew^w)oK{rU;n8M^}@Fb zqj(g0@o>!iE9J$x%?+`BBZx&#i^N&1K4wfO)S58tAp9AG>tSsaH%5#QLHOHn+TJ}2 zFv|)YN(mWQOcAoztQj^-6MZ&RHaXpgfDqsc8<14iKDFgQzU{DNUNW50zMplf|BR9? zsEWeBjIIFx2X@6+sUV7o2_4)nEO+}=kw;2X2B~va!HDryGLA!1xFT7y6Eja7nQ))2 z&*6X`X)66Kv0nG0V)r-pDO)gO9*SNgV9&oRRv~yN`Z$=w;SWe$A4j8T5M@{tGPHMU zcY#SNk2j$;EgM~)KGKa$@I$z#OaAGR?(eLiFjV1AWgjVyn6%pPmSQ$!bHPK067`bP zvGEdKeiOS%2~P{YlCTwrR0<@<#7U)bc_*RS`T=u>k!c%EIbyv1Uc(bGfb`OXa!F?_ zHk%I{0N(WrQXLW{VuX1vLn+dLe$9cdEXi2e30Um*%@$oRo)}dcfJK-NLgHO(uol@+ zmmxZPB4fTH$BcrGcG_CV%wF&>Es%1IB7PX&|{Wo<`jWB{|RtC<`rfW z++iz<(uZMACG{QDZ#(|Q{OGQ934FPmB{1Vq4EFIit|4>1k<xgZ6$O`99VRMy6pDX*|Xu2pky| zg;9$P!xh;8?UHtAkXel^kY^!app_6_mpCt}Tj!+@6{UtWPR@WM?;BCS!PpumW6I}8*hEz&l$ zgG<|wZnPP`XOvbaP1%yrYwBOp3Q0w?b^Lj#^Z3APSAi!{Lmsb)W>J)-6$R*bW()BY zhj$LDq}}D5Dv~h=XevqdpyHT+q>dCRIr&M9OMK%rPJBkPlj|sbr6;IYP|KLkAb}^v z>9TL7Fbeu3&xc|>P5vNRLkgfh<7X{Kwj34gXYA9JNw#eleN*0Zv3!=ND?cnfE2-qn z+j+;1rqbj)FQO_&WhK_JyCRWq{7E;Kt9>Y_sd{iv3fv{Tu}~3zM{ZJJr7B7q5YI)d zRQ*OzsQJ0NClFsGZ-afJB9d6aqdYD>^1=~?>F2Q#Qc*#-f9v>xY0{f1E*0bt#)i^T z%&f?_S?|-wqb$@3*Yp!bN8U6MIBG{Siagw0WoCYs=Y$!mA{|#o175AaBvFkbAGuQI zb^xY@r*}n|(g<`}c8bC;IcYJ4en@1q611uEuoHJ_59=ToY@N+4zy%n7W8T2V?RYwb z3u@lQBXV&rnUzQ%hya1DMhgJpgtQ!9zoco)1F2ugp^Y=CGA6ENLrT{Ud48_lIZ2Uo z%*FJ5H^+!oPghwS} zpb!s-Q}ENajrKZof{lCD4;@qYhK3Blcbj$ul3th=!CzZrrr-IZ4*_zDNnjsE3aCQ7%y9_c4|8g!RaU{f1 z`Iey3@%!YEW;x~!o9!8<^N~^w?d=k#7F~=na;5B$(kVtBXGX}*<_9{ygeltTOldrp z+x~9DzMQJ1J=Q$0a1S2Y?XHIbhmFKzi`3-ea7ui#d*!Ucc=Q|zD)Zo4mb64GrXe9g z2pJ^_5Yo~DKNG};?+5if^fbE||CIah$Q#kiU4SUTGxX{<}=dsEJ@!CE3oYdXZW zwz2TD{IR)tVDxgG_@pQJhWg026O>=En=q&?_RR6SfVK*BePHC)nYLnQ|2TBnjdfy} ziKL)vQ*RaectLc97~jF~Ge3XZ}bzAUNm(_K=Rw)UW*%bdxwa| z?J{!l2d7kCBWw48Rz+um>Gl-KX3Zn#vM6atd8y@la*&Jm#4GZKE_BuO53Y+28z+@aLHmI8=`%P8-+L>2clfPDop*1DoAy9e34Tw*S9Pt-4uDZ zXfiGU*t}V#h7h2NSZU%iZJsn-v>h}8XlzPSUga>Y(KLXUg>$)s(JAI+Th*nFf7VsG zfsVIosx$PX6ntH=F=!MQvn!bJC=EVHrB!VIUN9~#qLkg9=XvIb)pNiSMmIAsKdu(> zr)PiVbP$@Z|B@J(+!=8$SDk^$%^@LkLSmdQj+if3_Ouyvm@Svm0vRMPXk@v+DOl)k z2=7uU39b@>A{{4YQs$hZKiD<0kuVZR&Eic$idPLQ8E--x3;=m+oW4SKv^+xA!0zRy ziyAsS-vGgr=k9T-PCJ-;_DIE%tH6a_?QqNn#J2j>ra=J$SN^nQ~;EN{E%t94(=%}c&hTMqq7u3_Y2YT>3| z624WnrWWtoGM)LY) zMllTqL~rSZl~s?S)(zr>K7ThSX46N6+9ab$W!R?M(%i5OtQ6Lab^Fwe()wtE6@iwM z5)Bz=_H(tficz!j;e7*dq!i_?36He%02Nag41=Cc_^GC>fKuIUqCrYdl{^?{z*vpyFZ@Immg+~`ez_GKF{a3}L5mY={0dm3C-M{!DC zEgSAB6|f_CHRUVAv6kebZ~2EBUYkF@+WhE4v)0A)9Nc5YGg}(_d zhPgYtq4F;LCDwdHi?MW7y!ZHurD2KyyOp zciT77Vw<`i<#&0l-b;4cZ}-EjkR71jhFz}L58WzT+_pLR5a)rNQmSl*cBN>{^Z8Qt z4T>?Jr79u00Z-VR6oS4`t6tQzOWT_cn+oGf!#^&5#;p<$TC_*?azctl)qsdDGvEpd zuk>=<>~diYB4%i7rahUz=83-Usx6h;8vW{0kl9j~*!p&uK2SVxwq_3^i^IK~CzOn; zDG9F_R%RkuHkESzat+DS}nv-^16%0oD?J zr{?*aK}duYWqNw1m}Cqr?@`0$>1|fY1lnLF0zF=5Cwr=o>q+&ry1Yl8uC_;-$*=y! z>rqR2@a&E{i&4yv0d9K@^ED5+EbYoHgKW)H_bkc*mh(=iY{G4CFg{n5F6nFRr-~~w zuOvSq-79CeuA6t~=hwQPV%y$kDD7bE>W`bZ`#l6*KLXZGi*#%j{NkG{EW6#f4RBKc ze?}iRPb-bmPN8&=RAnN{TWa}o79+);b=Ur}yVd=D7C2lN=o0bXm7n+c6Px8jsY2JaYvnjuAZ@5idvEwP5UV2Oh9e{0+^p!EPnJacv0IcVCsJ9>OWXz@tGrjB|{Gnuw*QQ=| z8Og-2JM`BGUaR)c)z5EQKp*g*{RnJ1fTGX9{}2fQD|XT^LVtOyV*mHh@83m2TK{a2 z{|Wv6K`wuRkQ|+@jUB$m|4$&&f2SY)Ww`L4%!5j$4VyUzOx{bqG-L{DLk7%>A3t3% z0_{V%WCVl4Yk~dih%P2&I%zE})Li^obOSJe2>trO5W0m3yIPQDe`wDLPd=@1KHhMC zyzd<~Uwvb%?UBbRQ4Yz&81T<+t)3SS{WYyaD#O*N5YC|Rng!)Pj#Z`wz#XAjG5O*w z3k55xMVX(v6Fc?qWiW&Xp%e1U^6@kJx<~ppK$Ry@6Y- z@!MQBRR2A_Ec{D7?L#sU;lzdf3P)fpo1uGpaGd(Me6zh{6~%!An^d_Tw4v(Dh|T>g z{cQ2@r!v`$GH8q_*$j`gM=31I?L$`p8p0UGf*n-K$ooD86Ra&pLlD0e*%einBuXw; z7M&!iP=2-eeymbw>{9E_K~G25shklewGjD(Hi?+U__P*mZ2*pPUBOr%Z6am|URq&0 zBfDs9(AwwF?lwGBI>Bc5X5g_m*|-{sZgbFWPz24DslW%Wm7uP42N-`StWmy# zH~arHc;ooz+5Ahq_IKd+FJ^$EvAwgg<9~Di{RR|hi2lbbHK=q52ydk6|Xo(n3 zIB z6JPV+-UmM!jW7*3*IZ${j5ITHO;?(E?xGA+yaTu`-#e#tZ0A{>q)g4ed?%DrHywlJ z4N8U)vzo`8N;VFnOSMEUTZ(9*e}RENrUAU9s;y2aJ1vT^b1l;7tQ2m74(&QS_LtXI zQ~lA=Cgi`M8=`@vzZG|)*#@V7ZOpsgCo)_pdra+}bTi1Dma{6WaXoa7eeCdvh;<=# zYBoNKyATmCG#ktGsJ&kxyWV<8h;CtXn5~I2=C`V#sUgP1#g{wvuQBKZhn;^83HOf| zM0`>E8YvS*GV4UVfJ(#1%o27U6>q0dt0`N0a%igQQ0pSF!8}|tRvW^7*28znY|70(e70LX9&oKCX0%IkeUA{ijC(p)GOK%8NvFwROBX35xjfS*kx{Grpx51xCO zo876buN+(_Fx)*97^K1OMMgF!*06RIl5lcOl-GyMV2Q;`&8m|EQJwz4Xw%8NQn`sg zP*oUx(M496GbnPz+;9g#$&59yk8QF_n_q?ZtTLA@>e|8On5L4nr)J=6M=*lLl$c!R zjiF7Hr3rESm=U!|)Y2Q9L;&hdmf;V^m)6V1!5=W1asp|9+ud03#?c=cd4$Xu;xYHvv{qjE6n2j)toTPQ+l-8g-A*cZ!a zQU1Nz;%!bkOO0l^+^`QLxkvmwrPcIf*vIELNi!#6tSM8ks2xH|Q4RXwV zIQj3KYF7eRvjzCL;mqLkHy9=#SgaxWZ!gIOq-U}9&gK>l&p7zF23`bmd|AZuyrN1_ z7v|sXpy!+$1a6^bR+Dl0ekS8`{?NOn))jX@2i50LCw;K`#wMnQd55}#%sViQ4fojo zAADw|YqRx}ujw@QKQuW1E*SVP4bJ}?v&PieNybqIVE3=Lca*Z0BeDeQN1FcNuT0i( z#x%DLG&ID38eNRYl++4g25g2xdQCVNeWzKW36cD(4+2_NP0d2Smu>nRHT5i;X&I-6 z0{t}3qq8e7Z@RzC=9<5Sn|;}0(ci8aEZT$h9i}anI*xGnX2Y(oAHd>JTQ9}Qattse zj?giY6nAn%V?%E3T;^Sa3?(d;V_+@g3LWC>wGF{kEm6KtCCNITj1-C&aT8TQIB!a& zz*;ph!uAD1MF0Zmd!6C{9YuTS@8ZFebrj>xm*Flg-;+lJcRI7)VgYg{-z-FV)e=Wn zdd|G)Nhm>aOF0#_!;-anZEusQt(Kpaz*c)}{*b?eH_fgDQYT|6HnzX>;rWkEev@yi zg)<}*t<41JW#?VA(zQ|;0XU&mGI~kzolcy<2eHl@vEj~*#Mg^JqD|`T&)aKo*hyPv=^vZl6 z)-&I3k#){sH8Ea^Rv1Y_8AEI_M}pRcf%H;~d7El?76I6!2uCNASESA7b;fUMXYRUs z0gq;cRRh03qxP8@gvw~J!!5?Nsi5~VhU-Flg)jYx1DtY;Bym6%=I=O>AP{G3zUgPg zw&Yp&hKlGo2V;|;a{&oeJ+@!S5u!Xrlr8WeHxHX9m-sm~GbJ+B@*HGO2B(d6o@E|s z2^`x!tFzcdeZVm=5wBHY$a|a-oqM>JgzULs zmdse+JOsif=tVS|$PV9B;%@!2*6!hvkx`M{k!$CH1SMrz7-GBE+2c_>VXXPVo+44K zZ}#2K-ubI}s(3zjE{$aCOD^zjHEo`j-zshDdPwPyZv++Yh4P11gJO32v@!tg;B z@dGaRrXkVqaPRxIPw172Bsy<`7^0&NK>*q(k9)u#`yIXgE8ayxSJ6?biRTY1>idsx z+~yN=nq-#1pS#!t6|gLsgi`*h%&yP8E|mtSVc?W$0Dp=!m^8%zJM^OlE0m)V(Ipxc zdygNiBABFGqX&B5lRU#dvp)RCUI&eLkjwVvEF$BXi!V}~fzMhb{n5g~^Rw&z*7X$B z;vh(Ufial>r>^JU5S8@*ta|@Jj{es!Ov%jH%Id!eIe#0b{0kthV(o~cg6d;~(zfB$ zNQ%lJQn;wj231!Sii+abBo9?2!^#5v>{8!q-E8Hu7mRL39z(wJN)GGax=`5^)P@)&XqGNRJ%L zGK|CC73`3Z8mEZC)*r^Iqu%g>6yKZ7dJODxk^ZV4yjk z)}S}!5ZtdpDAaK#PndX0%>E6{A{?KAG!N}<$Zxq!sRdj4`4 z$PP4=>VTTX*$YZAikD_A5H`%pUYs4CZ1_5k(9!NOXX?NOHDo<{30Y2sd!oD@Yds;# zsbYm7N{O9!?ZH^8GQw@Cc<>jmrWf4S+@FG5mX`6iKI&&N#7c!b9;B+aFZ(+cxAn#{30h2nYCdDA&-09xcfqDCP0? zksUuxGWWImNt_M+R&R;uc9^`k6g@{FY_^NS^`R`EGl)7WWtYTP&Ak{ypSd(hRw@#VifP5ivT1t>PiqDf8J6}o+3nS@W->jh81 zBNi=C>G+lpcPW*=16eQMC+F(utT=xDZI^QqHlg#E37*vdK100qJAM}mAQNBHYeJmAZ$0>+(xu#=0woccvKUT@ zxcS_ND8?MG7t7}HTLH%xAbCf8AHPH%webm0#n!_aHedfSO;c~ydbi)GctVFTX^~*) z$$SxIaYPWj#_XxYJS|x3>Tpk565gSm6kEQvcS@v&tSMUDw)dpW;t}c~l(xIdM};;* zEVFOxvC$b-^y{cVb!MR$__v8!AkT^>ZrRyOk;;!C+r){F$#s-$Ufw?)tOZw# z$@HsL9xFNr{D8T0YARa8D%X^L3k2Qdr^CmRWU-Ry+3z;_Q=KuS*VCJ1b}RY?jSq67 zzhD;p?a9^E!)W=JyOQYF9`L{Vf&RV9`(J|5e?tBLsPY;K1DpUtX3jR2|KhCK%B`6o z{;t0xvCsGhPlyO(&*y;%zFD=f2sEUct-gaUK5FE+1BT?w%%AR6L?>B(%61@5WR!t z;d-pyiP;ck2|~Ea3;~jF2#<(vsTS;PT;dw4`7y#gCN#$M2GJ}&ci$y~$n#XcvFS*I z0oa)=xZnx^Or4isMRu8bHl5*aQO5IHIPj5tR57DpWGgA=qcP-x9 zn&`5gyYy*HsOm)acWEysAC`H`zLOF$jO_6y48LFBpCxc-8E^t%#}#los$|$nVm$CF z)3}l=TAk|2MYkStoR=!i=6+ukZ3wt2!r16uq%X1tDP76o_TfCrICRsMWC87N0a&D* zu{e#epdJQSYw{{ok)w9y3FhQt__~la1d9S@kM8;wp6E^3MSLVna4HRsSTbmH(bHnV zDyz=*Be(PXz2vsM>MQgEQEqRdJqDj&GK?@wXBR$}4V1+QS%>}~%HA=$vUb}RuBudQ z+qP}nw(YE#l~l4~+qSKW?TT&NMt#|PpY!g!-}~Lu?)|k|n`_R0Pak7EWAxGcchMdU zay*k_Wc+YOHFOkVdYzNPr(~;xL{-Z(1I`FusfO5{&)6gr~5`P zy|W8^pnH@&5i+q?T+$xdsXnn)xe9OgyJy>vc%*bHR@6Ns8$L>F8b3oeE7Zou?Sp1v z=1I*--lYveQIQ+OjeJY=bMTC}lVj~JjtNBXRrNkM2j;enXH3;f7E8K z-1~TySSSNbX>MCzki@uNBW?O6E8Vyf*k_+z5>lEXaw`waDxFve>&ojV6cQYzDGp(I z4Y$%Vk~fhy<8KFacW6b1S%6XIkK@Q*2D26xQM;Z%{$v)tnRAox$d3FGoH3Rk%mUof zrwMoJVZIEKev04ZpoMru4tXF%jPF?GaYE~R?HN9xA@86Gu;98`6v2@Hlvndw-vO^ZlRF!~dv)4EP+4|Mw0?q^ifK1TxAWUI`|W%%r3y3elBPGS*OA zZ($nECW90W1X{f1?H%Q9V04T1N^ci7#@Yc4F=}6v4t0hp#w0o$ zCem$|BojKww)WZvim!pkg_w)KKP@xQ(tf3`-pNxICT1`F^;W8N!pqT=S?g(QsR07L9YTwCPU0y#i&E-BiuqzAEk3Mt^J@^)B7+it$0G2 zT??I70nrFI?ki?rqIYgnU-nJh#JZ~_ybn?WTAPt|Cxa+;xj}}YW+}bR99lh0exq@- zDQdM5QKY(q?1pBQMq~!rR{~)ZjVR-3IOA!E76tBpNo$WzG^v7bwCKO(tY0?h9QOlK zGfMY))lUZs_s4r7iiTP78gXZ)?-LEl{J4JfS`O15BHYo*cOb{?vgeo%U#F{J(kINN zbCVQ&ZdFiCj?EpY=yaE?b&N20h&iF4U?_nlhKN?}L##2U92&FT?9i@F=7ey=uD3|+jynavt5sd*4t_bh`-oA6JM|%KFaP~Cb~Ohc6dq%)sCn_K zghE97Nrm?LWZLQ|GyW$th56N2Dk{32xIja-0Svma^&u7EcPp75VJGROUd391po3Zo zism3`TnnCS5A(@KPE~_vi;uS7k7JvL4+SRTDYXn950H2x-<3f2^wMt98l?^Wq#*Ok z036$@R~dSJHFXjVT+JGZ z!B?sM_R}GDnsNMrGR41Mg`fabYtB_Qxy@kXK#&09thP#u*Eg-=vXBWB&BZOvski1Q z3E3X8aP?M7h6H1w2a2-QaEcN)l5EU4LnVs4ciNA)!kJ@zhl^r&KglG^OW$B;ro3`~ zW2dv1uX^NVZ=q)k4%eJ^$J&rY&me2O-K#HZH(k+>etp3&YGIxHd4JXbG^saxU&gwO zZ6WyM#DI$y69yp*OkxT>#&xeP=5Apn4e2y^0)9PNVer%qp9*YwagA~;qRw^hUii&n zZV$2ada`Hn`=NV$2Vq@C(u1tdGChk8QA17S*PNXpyIK9oPhl5bk*!^!jB)LRS5KXo z`Q~|%=Su*pd^){<>B44H$DS~Mc6NV2{IBK6-@_BeU*YM$sX6~<<(S4A> zHIk%V_Kg&02VE3vziF-lwN~NBnn<*hM{Jy@ga9_Yo5b|X0c}`X?aTdiiI4~J6;BhwHc=Ybt$4wz_&W9n-km0vS z%+7&6hNKtng%_;5Yp(A~iJ*%Q!`mIBBsG=XF7#c|4D7d>8(a0(;7#0uAuPqH>2mPf zineMUEOpg7?brzy@*L2E#9r|Jt|FZHjtGMwU)saY=rmRkzfb{&+J-T^}y(53+d^$|%##WIWCkcwh-ran#TT5LLUivx`{Mzc; zrv8<%UMaJFn5k@TjxoWyvqJR{j$~X}ypG|4qf`b9EpGOq4R5+%8#0efb3bv@#zy^> zno(&&3JVyon6@#Kl6s3n%U=N1$FCW)EP}mrK6pF(Ha;>8MacRGt+m!bW>xa zJ}}B40fzq)9TD~IIb(}(aQva#!A5(d7HdH0Tvu z8c`OgDuDrI0%B|A#;}{=d%eU$-wUX(1>3PlqoBLnmiT=g+laNjo!pl7Bt> zr++EnzfDU2RcLo;LwcjkzyJ9l8!|CZ7U0kom;@CTg7~VUfe-|N1p^uP1+1cFM6Ne( z;)1E2T0Bupw5)Q$b3Q+L=I26_S{_M1MPAd2mguSCw3T&ZR%7Fac=?Cl<-{4;awg87 zn5VA~9jVV9n?4=veSO)RE(ijlPB?<)P(zViJ{|-m$b*&q#-)gMbCIfi#AqR)y}5M? zKJID_ljwJdWWkXF!UTKJX$KScM_eGZ?)es(GdJF1RD1Y)5T^{EF%rd(9d7>AEy``kyq(O^UmI_GnYjP(w@ z1#gu=u>^Y8w)Esh9*ugZ>eqRBX1A7BtdD$zt zQqUneI1*^2->=>*Nq>-j%Z8pjCJvL(w`l6ii$d%!uJn~FbZcW;&h`~V7U zJfrG35Rz&Z!dNDlZ6{YFN0MF-Yoq9OG4LBBD$ZG|HPvWYP;T2-qYP$+Edf3#-C<@@ z8lo1Y!PTSbW(2YQVrtJptzHr}k!Uu6A%_@51^^<+Rj-DzY-ov>-IFV0lBBgl{HT*7 zA=KO{Ew0^(qJKCHvy)4gBcZTp|2jqnwtjyX^TD;w-(_2b1_A7*?!Y+xn__P-Ur=V{ zVlBNnizqgPhh>3?0q}W4D;p7E($>S>H|-1Cu}dwARa#`l@ksR6dHL0Gco3UZ5S!e- zyXghm{jlym8SyXG5JHTG-F~34HsC&S*X~T6j6+XQW1e02L6cVTZ6C4WAXu{B%SL4WlZeca&UCZ0u*5bk z3f4mNp{ap012Ug?e$)9^qs|=YugF&QDUuW(@oLq*m(t+p2;xi4@tiAn$S{~<04bG2{GLYn{2Z7FvJjfmoAvpP zXKl2C^ah#!F#eo0J7~a%QWpCWlXL;K@CM>>jz$gnV$@Mfb3cRk>Hr8Tx+9>TqT9uh z@o>XCH(a+xq;FtJR>KLUO@hVnc_d(28vCHPutmn0*qeEp@4ilGU_Y?c$7s4f-!YSZ za6PDpwSciPT8iw_epd=seTzF(teKy5eD^x2H}}PTINpp(J~XTx^6&s()c-8JGzhYK z-T}7CIDZud40SQQfqyd;+kv<_a-cCzseyyca(~M!(6IDG@fzd-#YcJ#J))>VJn6%% z!fji;MY9jy5gObhZLxX_bS*ezC_#kls^p1LP*>CAB<;o&uXpZ_LsGYg?1j?zpj*Bs zS=(6O+45JX-qX_B8w;s3zViW**gf=G8Dp6~TvFp<$x|b6)ghC@MQOPi9<8*MDh)C` zdazcexmCFjcZpxaW>R@tYRO&YV>4Y>mUD##r(0L2Z??M}Hv5Xawtz{cxV%N%xzDf) zDUi~2faP~WPB+6D8@OXN^5a$vMKWS+^#wUjd{daRmq$+Q^q-{sBE?O*YkcG2&Hfhx zJg1${7QKDr41wINFpNo_SBhIXp1yK_UlrYhwCGx)`nm7QNnF&@dLZU;C$WUH1oi_Ern~y9?PTqI_|Ovht%|J zdyqw8TgeI&tv>^QSc>j3*_n?g5pPacMaXM`wKUb5NGK(Ez;)L^YW+0Gh#mj#;YsorWEVJni$HXOt>&!%2 zgy{Zqu`1=If;DyKZCanfrkE1!vuIgjWfL`8)n$o8=Gw#*)mUn7Qu>_#O3sNIix{>9 zMAeq_XQsro`VWRh;BA}fe;_0+o8Kk0&BvpUc2GtZ8)vdak+sy|atChoKWlIH-++F) z(I%OOQI4sgyQWzbIR=D1!SJGJ?}yvoBsk>E55PLo>V}cyY?cllb{i2uA|y)hAa+Hw zn-`&na>sjBo<$HB*q>`KmNpZ0hea}ks}hxqC>!y4xRyy_dq$Dd9QokSpD)jZM7@qR z%^xH7L`nftf9{b280rAbpQ~1pJmbp8-+Pao+Qnxi54n#-w7N0rY8xYlYjuvrG@4>`TNNu%tG$c_jCxvL$~4pQKu6CGBnR`i!yNa7hS# zPW8|%T-mNp@63eMy@Z*whe0-7H*#(!MZbmx^^3#7DKY`BEh72rz3*bnnCYo42@Hb^ zh%&C7$2nt1+E{-QR)hbbC3ecDERfr&zQEbTy%bqWzFUFoiY_9U-QUO5uwcYg+mmZn zbB~$b@>Ny2iyLQpcLyy88jyEHbyC0Notnnmfi2EuMZ;Ibc@eMg_4D3!^sg@KE$?(o z+A>0Jzhz;bNw*Z$n7OgVI*Xvc|L`rTOH{MauLCqrUc?U=6SVk`d*a+ z(!M>8GxmB`L^By3=0GA)ADzp9X9Cxw^OU&tNe+Bn5a){1#ipcY8U^*z+RS-f$}7Nz z3!WJfZiLcHoEmrQoUYOFhpUh--bULfL|~+VjaPt_CUWzx%UVqN+~m5O#@fO=8@-cs z9Ncn{bdd`@uJg>gGrj$qcdSGVvyUb_ZZ&Oy-rE~ro4#>8kAYiRG6yh!6H)Huk#C}a zISbEe=I7n39o%f8GGE2C-pg(>vSY>>e&Xdi80U_X)v7TTZqX2As;7ee$;-WLovm*` zmU9hJ){7HL5ASX^tEZSs$JvX3Lg#>#7qY(5<`%#7R4CXV`am6hi`NWNAmTL~S2yN7^qo4QqMvk80SiIm$D}iltiJ-!i6w(`wn<|#S~(XUd*8ME@JiqVec=xAsoOG$AVy$fi&CFt``)l5#hnqgFp!7pm4>N z!jzSDmS{JoFVOU*!qETVT^o&lwEt?PGc93~=`$gXb~yQRWa(cP@jD?_=p3X@IJJ^0 zpVc&?`*BU6#GRmv8guxPudZx-Dqh!=P)(rpDM#sWZtFw`Q^k~L$m1{$U=yvIUM91r z66=|~D#o3O%;WkR$+Y1RVw3!4qmn8$T7M;$4EBjz3p_YM(Tj-gLHi9?n=*a6cK zhA{fLW5m}fst)9Xo0G_J-##Xi*zpa2=OT&xurm_l1UKaSDL1~B>d0({GMBKK{R>M~ z!Qo5Rdx{=`y>1IWaylbGcN@f;^ZlV0{KAx{H6?O4UPY{)BS4Bx-;yV4?;1mrbt`mn@-|&d{4oO>SV( zR)X8(ARK*JJ(6S+NV!&w6lUKVjdJPC=cvy`tIw*BK7aV@yYi~f#O})S(K}Z3?i$}w z|M64*Lhb3oo>$H9InBNl{*m|**4d+X7x|9rBg~%n8l`uq$9@EXv2D*K7TI}>|8S%{JYBQ#35(J_fThEt^TybD+8LW}7<=Cqvf7R7_l z<7`W%;mAuVcHWyC3#3z5lC+ADtUE_dShShY3*tO@MpB73qE};E2@++DCKjJgguFBF z2dxt(Dq!r)Ee^+n847UG7aMl4y<$1*^Kw8pt$I{hH^yI~R~%(!1q~IcJ6D0}C@7xw zYnpvsmlIVU5LGT0XW+5z=-lhev`YZuN$bI>aIGuq>d`Sf=H&Mx*jJRRdt6q~N()IH z+!9KM@=A;3MY~4WN5qsf5Zfn^T-azzjzE470LG0ZaNKerTXs0Z6$ddS%eHS5$fIeO3t&c?8;^t8c`lA(HHK^~;e8A%`EoMGf>)RV z7^7Z$uS~o198c!3b*+UA#DsaFWhDH42yZ)cYEIb*yJAu&=zTPn*$^}=+C=dSwzQI* zL^8M5)7lF;^z}6ETYsN7`8%s!3+{6Xu|BvavMZYAtSLwIN1Eo8DPtTx0YW_yOub&K zU*pevsIJNC%W2iyWWS=cufZ-34yU$oaeCTe?&0`7;PH1ryrbYPzw_noUNP?PBBu@Q ze)EnU!fjzj-Zl#1K`@;nO$??SC3hdcD2%#XPykSfwfil`ZC&!prk0osUZkg zK@R&BD=J*$rreUmZu?LuKV0@Wj?)(2mWg$={)??y1Zju@@hKw+bxl-2#gL3nKbCD? zOd)nqZw0%$khv7^=Kch@E?sX?O=KPvRoF%TQzQiimx!Z%5qEu16K)}Vr8a+;-jtU9Z(IBKr2UNew=J5<1}HAzu&2A!!&+iom;~U zvEBp}y7HGO?qZZ}l4j-MEvI%GOkhi}J0Nw`uxMw}wvaz{aC)s3_vvAF*2{xfq^v>(baVAzQ^hSsJcvwHlF>bj>Fep{81yw&v(}6S9;J41IqE?F?iX~CzS>4a9E1Ah=Sdsj}mnOEP)zG#ecFVt|UiBGP+9 zOu7!J&X&QnCYP7)7|1z6!%_6nnwd+nwWeqzFu36T0pww?a~M8)vM;8`Iq?hmpI#zM**ip4FJjukrLBF=q11poS-snCy=Q5}EHX z5cf4C?;K!CriC%7(I^gHPf26Q(^C#nA#bUXU2|xDpchalP!gCH^k;|{F0`!TBr-1C zn=i0dF!o{9ParO8?Ak842eabU43bw~hp=|qY){k}nr8`Te6eh}nYkryBXom!Qw~-@ki8~TTAe--r&CsNdJSG@yYBmvou$>Fmy6C zAu+Zw1OWck7OYfVRhm^m(GAvyMrqyrMTn7G852MdHUKRYD+O(lTS70BhG#)g5K}OI z1b?Ld1kXjx{IpfS8_~7b5-&`-%|txFmm#PAuzWh>Niwr&8)3kbH6`5BlGXt<&=oUuW0@a{oWe%x zrxnt2t+=vDfrEi&S3$k5!1OVx!EGDF38QD^CCQ`X%5s(s5 zUD~%2^BDS~pL&?@Q?krJb9tj5Gx@B<(g;^MfH7bYRCafNv%BZ)Mb)S6oLEw%uF ztWzRQd<<*orBX$YI|Z%)dli>bHLhf113bz7plPVm+#n%TjyS`b(Gd#F8b@3l{nAa- zzBs;co(`9NdjY8iX^&^Hw<(JD1nvt~OCWu-{D`^)Lm*}vjh$7PoV<+YJZd-$EYn>J z&rPj4v$rngk?XjEWu7d*2~aBy{Ffsoez{KR zkzb|@txxMTXdpkGfKNy7 zPbFJ=fTIn)ypy4^jj6nu8Nk$8`Crff&8#%GvHuh!Wd9cy6IHZ7H4acdigc2aDkY|g z@_uL)HViu{|IUMefYkV^OmOX}0dzH7uB>kl`GEZs;_hD(`eOEJ+L^V?MwOf5jPGhT zdCz_5&CKuT^8sb>y&k4E09!(tMx)8#NGo=&VPN#3iTo|#Qc4d+IBUdq@;8&|N|Nz2 zJSNr*2sTV4-~#PA<1w(IrGv1*ymj(r#~7@spP^%{*{#*EEeib%($YQA0{Ga=W@RE- z*s{yD>L1T!yNFy02}{eBmMe-V8j;Jn57rq2)A#)H?GPi)z1616Xt>~(+dCVZ%`I$j z^dO4ieT_e^G0s{4Mpk=I#l_ZR3~{VyqbSB&szg{^$KTTYs6+t5d+Z_Nmo|--oT4yz zZo_eof@?AOO%)B+ukndE1__={WG$%FSx3YXH5~zR;X_O@tTu?|n3+VbwrR9JQys&U zH$PC-k#0*w!z#rRpdRj_${j@&Vv55I5C&&+1$nT-i=b)2atF)qcf(>b`eF#-Gjl{q zMS2UWDAc>jSu#zuVbP>!CehdJsIzDW5-U}dWqehvBl$OCY+^*GHXtej%N_%1fRSXAR;apmNCWvhVbPGaNNd|Z{QyN+mOmaKGsFQgSsXg zDiwRF?S|q#qGc9T*Pb0C%zUs?pHCRtL&%LfXReKUAq9Z435xG=Aj=^H8k?)=>W84d zGY;XQ&4ovRSbRwiU7*{dr>sVF%`DtlAcAR;;r8{ITRbW!4CsulOcn zRhRG!EhG`S4)ADGow5H5li>wN9R|$3<64n!-~$-2+&}pUCzmz#-G6}|!^bz`+CKG6 zoP=p8g`*;sQ1kjJo)GiFKL}RvhYfd&}_6rCmP^f4|Tru2qour<;mEZ2kf9TwRIUyEK8giW6QQ8_0 z-W4a?us$2Z&lbnZpHMo;f5AAHeqrt%RW#!?qb-eUuKdp^KY)JzWMF<^w#xZKY~E55 zFA(n;qr#Qkaxb(&cR&7ZN~9*g#PZ{x9t3nf?x&|5~BURQa@x7DVAy zmKsY+{A9m`Y~>qS#x@d!{U8}87J{P>hch)ywS5rm*C$0JwaUYneN!B4hbM$W zntiWvd)VMMmz(6R|71gaK;;Bt0#ycXY;-FyiVTkQk+j#C*Y|x0*UfbnWDTRax!5^{ zhcfK2W*iPNS8nptVXj5X<-FQ(4YAkOP@0T2Zst#rK(viDR`#{qb*xv^)@Oq%9S;Rg zRydzpagvkPZcOZUoJZ1SZwc6AMS{b=$3dYKU?6dh0fHtNRPDa{B<1pUn^GK%edZr) zvSiqE4_oo0i!+PBR&tzR_P{1Qv6VzRY$Y3aFIGc!j|mhi6`h+;;W=j1@)t zb&?b*(EsRewxWr~1!*Zu)e%$^w4-J8_hWSx$qOdRVwgsG3(%Pyj6nk6)C?PJScp0? zDHFJ7Kqh=p;p;m?RBq$6BSr6Az6AP#3gzY&CvfK z+1{_SH+JkT-FY>^W9W zL|O7WC9hvdS+`u4(5uJNx(mT1NOo2#@^LMm(9iM?iIO*Vg~53DiXQa=hq<|rva4d9 z&+Z6W=lCQqq_~lP%b@Z2*=Wi1eWx727^vz+WD-B3_U*mL*ia+`+WFycf3a>6y|h^h zC4>Rz(EW5)SD6A`Z@!LWa?1ml6RnBlJ|Vi%_ey7Mk;cGZVaQe?Tx*9r)D2u7y#>@R z4q^OJqwp0da)_AO25nhJUbD#=x6eZMXM=Dc|M~J$piz&fKVP60qf$6Cf#Ktyme3oV z3tXk20QvDB0Qq}9`G3`$`mb6nQrT8{Rsg{lPDnmwL^%Y!}wc3ZyIIR@UsWIp$T2^&eyLvCV2Z0>IWuRzpC z7OCYim#F&o;Ox&FRu!2-(t4ZaMJM)KRO%2%GUTT!{OzD6G00>JFycD5?$==Y6i6rS zkSUzgd#fl~Tgk9Ufpneir{~Ntd(ertA5p7#$?2}Fce<^T7uO4X;vQKJLPTcl`3mHA z!f3>c<qp%T5`$6PsXmxFR#x0`^PDt$~ z4J14Gwqt*HS z1vwrD@I1N#cmyEVi89e?vILo>Oj9|LCR%d>0ei(Ck&{KA5N0aQ zQ+R?|_PaKbP)E65-eOrWv3Tq%U*Q7dohsNZ{c&=u6Q~{L$?xeZ00M9}3JDHkLV{Tc zOX$_VaYulNi?g$=9t5|bsHRBay6WEKp7KvFLa_DnIZX^S~ z4sZ0@r(q8nO;IN{P1B6>F9fVnu%PCCBB1+!Ty56+tD^cp?P6LRnpqo?eEv%!V{a~N zX!m(_aUvlxvvC1f2-?_uJ_C^aZyAH}udVx4yHB9-#(~!*Asy)Tc`ne$UdV7qWw@yFaXr_Q9I3S|hX%Oe6!T>K=nYlg;X zLHMgEI-S&7@LhUf&_kztUt%C0TU$PdC-sX|69n@EeXmmE1HMQruGKRS_t{vBwGbXp z2D|wb;(`2IN-$KQfxF(fx?}26wnaT>{?J492p_$E4 zcm8RFTh*HiIF*zT?^^6J#R@SGr_VIeh0}5ua8N0IdxO}0Rcp*$IFuxo#>AAyZ-~z0 zY2!{^B(XYT%oEvaxOP8r&sv4?v~OWsC#!3`PYgGuc&jbj;UfS3a2!wehJ6WXHTA_@ zg|DVheY9IPMsFKbm|j+}%g7K*>KB+a@)6c!qPcb%|6736J9bs-Xe;uQ_8GlXgksBN zaGxi8?YPGf+UT{{>foh@9^b{T|9*n8-*`Xn>gGS{77m`6XxPs~Dg7UZ^6y#@6n`Dc ze{CMhS~@w|JINY;GTQ!Ko3Q@vUaazI%aT4Ui!uOZtGJN2RTx4fr9_FPOv1<)W60gz zdttK1&e3Q)%x8w!}qVXi@&Wjb{`uW0P5p{AO3o-3|?`12ZJ ztbf{FtU>53K16c0sDw$Y+XXVQR_4-%s#$P3lVGe+XKBO9_Db$!42CP}^f}vVr+gK} zPTCFps^1)>7-L#XK1zDcH02PWPW507^lo!CA+1<4qROUZlg_p4sN09sBTE>s&17|E zbVj44#0#F}IxkDWh`aA$%`gh7RK)E#mT^qKbm}{Bnsu-1lpWZM=ZNh5?m0chpaN^t zHUp2ljVXQ7jG{my-e-U$nW6GM)A09Vky4bV#B-uVfDn?oqg-wTL&M}kL#iZtI>-UG zq$&;?Q@Ny0x5c;h7KvUk?WCy~tI&yY;(EXz%67dvHd1(@XeBIZ_MLatUvSQ35^>Zd*LHIA zi(7p$viSA>^Qu^-ZJxO>G~yVyr~Q=dKL-@VzyVlA2?E#OtG2mZyh}Yvx$&iDU45kO z=)w)gXqjPH%60o>kruX-Opc_xO2$$+$bS5?@?XvAj|Zg;YBm6DqR)Z8%ObFjixux-UJwHXT~3d;j}<3m0U0*5EJ~)+b~q>2^Y`!;e)yH` zsE5u<%gGiI(`$b~*QdxI7#%`#&%|M7Z{EHtT@IrVBYYzeF!*egyq1YOKpG<&@DIk4 zy^8E*4k$x8v?g`qt|)GJy6zx!yr#S%->Bjid_XG&`fEsPK&yKvk7J|_lq$wp*i%4v84~qot<_P}+{|^Awjh=d(egdfZf2?cp{sNHk zXPkC6b)x@^2}<%W*aRt`%Kf9A1Qcr44HIZZLq-2inOyfxk1{}n(NZa}KuYM1l|~qI z>4I&wv2`D6E#^9N`Ja|&n|7ROGI zGpO(*8+?Mw%;NzyoEzuQ+SdCw7hmH5LlVDn_^8(NmiaIwe;-7ySksV7j;gx?Ez^*K zVmFMH?B~}6hbO8Ce+*$~(tZ*HA}gXOIbakMJk|H{f?M%7+GgDg)bjwS0`+k@{lXvL zoevAi_0*)jh};JrooQ*1h2e##x-4NxLW`7K3yo5&L~jr3=>2_RQjYov`kpS=Gfu;Z z+!lWtHInc&I_`wjp_v$@7&`A3(BP0h&_*W*lJZL9LY;QQOD#wqtb4-{!+=yixj75l zrATa(_5DU=#cmn+jhYqLfg9rED8P$58~HRQK1$qzJH&AW(eB<@;=&e!k)+Mm>hI=V zHZd*JM3Y!1#omZnuUe(B!kxlIY5OSx(G~KG2Mx@uE_P$jG?9hdloCg<-G??x+SnVZu@9uwltp|eT61ZIo2U{DFI_{d z*iOkSgUlhQ00k?4^KOnrp!PYs8f-W| zH)mEI4#SwUk$Fv$~o{+xB_5i z*ur~F%H?cFrqFDGrZF$#yLv^%RLp_F~v+g zALy=nWb(le{+s{0ziVZKg&q^_FxsGqqXLM?8 zUts`VvRQS|ds=2$hTb(uNNa4*v9YQJKc$x&W#U*zsAra*eM#xy0$>h03+~b?R<(O! zghs}X?Ny&>W3f1LW))k;9ZVGbGRasK`&4)GTh%NX3d%Pfi3&_BNE~5JkskVt{O%Q5 zfV=Oj)lKi+%#~YR7t_ZT!*u;FYyGrD*9NQS2rmZuEEThycotB? zeB8r^bX5IB>{UkbVr?n|xHi8zt08(iOo#4#wX+T<~jC<*1EcfKPar_c9ej$yb z@1N4ck{durqIaN5#CXBhdc*Gz*4rIl_x9vSTpGB8gPPLMcXu;GKioi4fB`@Dicu1% z_*se}9y*9rnHghEv8zt=n?ta8mxoRPNjaroR44DwPx%!+^ec$8;#YMGQnCElY~SE% zTpbVpdGkL*k}>E+Z+*O75c-oonT9JsnB%;AIvfs?8sInAE4f9C~ zhDhA`T^y@pGdQhlgY6wrH|irLrefwO)0pN@`52oQg>!6QdkvTXUhS_NkDWf3t8USF ze_ptMFrC1-3J>N|FYxFyWW0)vMj`MbE|?$;=rV)#sxs+>=v)ARp=GCD{>4ccQK$#a z7=u=IqE+O?h@NYkNQQeOTjlqtU`;(aTyL(y+;;er9-cqEv%r~JaKSxx<2Kz!_{T_5 z(x3-|~V&VFnEgV}TP4S;@6XguYaB45IT_ZS&fI!60BW>ag2&@F^pJ^`cG_OD&4uNOH}t_?O5j|xDR4H3 zFK_B6?%~)uw@6N04>gA^akzTpyb)M6>FgpVlyIMNLzD$Bk>PeE3=>E0>=jzy+i-BR zLlR-J$rK>{T;XcL)0)!)bDUy3UBD797N9JZ{glmX`9>mfmL zVSL8cs$vw4E4#*J5|DTNeWOUE2*UL%*KbujF;EQbB96lMtU;FPcB4bi+pc3=6CiAJ zQf2QO%?j@RdU>mR%ODLCOON(a^f_4#FCEmLVOT{$)R-y^X2y`}m}c=xfsQ%9svMG< ziBX-@D+m0`E6321Y;|UnhOi~|v-Dtf^u-d2v4t8*Ax}@FrBV>U;FjlBDkbw8tI^b7 zIvEOZs#hZulZZ=6@<{AWYk*UwmzByHH3zp8Gw;Od9Bku{z3t_=G&a8%DP+lwTB1-U zk1k~OODgR2S)!&l7(HhkCr&W#?0GUjAmbm$UnzC@ZsuOw2hmcf&j}yDEMv1|CYZf`gWhw6k&k4&Ed%UA%iXr?Aib6uxk=9mrW)^$LNVf7Ab#4dCoi| z(g@!;9Xy)&(X(f-@$$mQ_nNQq0?It_TC(m1W_JN;H%XF^s?xhvnR2fhzo+T#@8Vf+ z$(~toum6t-QhER7d)j9Nq5h9C>fh@D>c1k$|7IboY-#IY^B3s<>r$w8s)nVBwox3G zM?n`yoMRMI5CRNG#|lFD$*5~Omy`3e{O5S|v0i@1!noG$ zms^h8FZE{G0VxfP$K&vm8;2PkT|TCZ&yRk8uIax>2qylhUpr}%zeCNhLcU6l9Msg+ zw_@d92X15?XIQvI{y4WSH%7vs$*|(L7-?ImARRMXwODX+cU!R9#T`M8;i|OF&uwmN zaZ_uXFWaEQ<|nMEU-iy!K1DAP+|t|5RP!k&m46Y7>uL;*`OtV%la{Vzhd7|ckoC!5 zEjc>3t?d1kb@Y`9^?dvZsRjbXZmWmPAN>Mh%lP^i_V=3ljwx89Xy}%n>0Kb?@X_#M zrpwRoa%8+x0wpI0Bii4nYdetuWkK35I7Zd_Wj@@42nRa(zf4|1bz zp!8HTC5Dc@xGLI;!B79gW5b=;S z-qr~^PkvzwApnF>b~GHy!};{=niqagM|*wKuC?!Z>#92hN~Sm&ZOtxm9BAhY6KFjZ z@%8kFpX*g<1kjuMzhi4*ELJVBX`nx7PcI)W$mVZC9BWiZwihYNqeWOW~+otdtPyws8!8bpukeZ3k+o*1d6%-n7ZvnMCZ&R#wrW#_8Wv0^0N zvve3w+m#fJU)5>|yP)X=#UVMqkm?v5ILdtJ#-O1CPXI^WC zH@^Dz8{~l7&lMAP@5)2!PW?=takXf-A>6eY2t@ZDQB{8Qr#+l0vrv?)-w>fUjxSCC z7TtOFh5XG(D5Xe+&_Pr~<)!4}zT=e_EenfXcex_}mPiqaBzUWjNcGflU5 zaT!Bxo@5lNB?N68J9!!7u$L+2v-$E^nZXCqc#<_nrOl`CrLHHt--KMet2IXF!-PLN`g@Q8lst)q^ z|5NO9XtYJ&$k(g`Q;)zya;A8CuqdYTQ*A;=WAn*5vy=8=8T+DqKDn~%H9*0Db?i6x;PG#hXc_D^Q+M}=_$nuwY3y2+ep!}0ntBM=0W_t zdXVQH$(*0$P{D=Y@X1@@q>tH3xfVHpqCvjRw_#V$VAyNHP=REW-|5NVO-fbqtQvPw z*qznOH2<;)L!?tDX0uIkXr1lLJtQT~P-|f{r@WzZVQkX=<7mrbYs3=;@t7dt?JJ^Y zjy-fV1yUFC@F1>o19Lu5&g=$o^v~dyOSt6p0xC(JvFXVuYEHO})#oD0soLJQ`J6!=#Y0NXnbT^*AJc4q&;|Mj{^-@cbJjr>I8nz&;Xg zs;>AtiFiD1@R^J>V%#;B&tRd>VqIbo)FxdI=V6i<@T*UE&#wWEj-Lk*N!EQvJigEx z{UAEPN;llrV%@wjJQ|+f(X#PKqxx(Zae#eDh}L~&$QI}lett_e*vq_IA%m8AgvPes zJ_toVw(2g#+gx6ouS1tlcgH$IRXR^c7^NCRNIcq}C*3-lse^)gw+JTq4&a{y%S2E_ zKbs^_#oi&R5xxSBKMhPMb>5JosCO>^EO|w%@kQo(p)QyAg*O4V!0NSf55=`a5$QhA z*0Sc}Nc5fRZ|eW&x(NNUzxu3f9?H*|KSlXY;9+$>+s$6`a2o$jOqLhNCy`%yl*Gcr`;#b zCm>|Ys4`DRAjA*x^91h0P7R-g8)F5g1LI$Mze3L*xnjOl=KBv7hj*VAU0nd{I~qDF zD~;u@MU0Rdl9e|~_7rD987U)RpyKRnq*b*&66{(Fbs2l$0oEMPn-!G-*@~7r?d+ zdND#3Bf0<)Xj0nJPcc8?{%~VQ7QQu+PEj#?(0ii;T+?;o+J6(k4v=^5-2!~3F`S2O zJ#nsp_dw*?DZ>HZd($v&%qI2ThSKCp+BuAsS=6S{=?dV4z~)3mv-9AL+N7ggIK2?? z8t@M(_xD*BbDYMlzY}#^DB>=|cchP&hk~>$9hfti|5Y^ZP6j`$$$5q)7Z!;i&Z=6V zVi@FMFWzTV$z>iInJ|PjY^{S}cpwS{>t4&m*`OW@K?Uzt?IR8@kwUSHIrZzz30FUe zg#q*nrCU&Kb14)lXlLf!kUn88t!#adkxcL|;Vg3n&BCcY5?UmGDHb$rZ6Ib(!G{aAdF*er@tN2 zS&vaDi}4tSbCgA^C4aQ=g7=svYXp=DRJYvAQ~8%)ex^}O3{%Z($fC_E+osEG78GRv zYuy;s00b8NZYAFRk5=Np$F$J@j_H42H~#53IM}&f(e0@wxM=rtgN1 z@QhKl?)!N`9^w0WTW9q~sLQ2pahUi)IW52~e-drYXVcdlK}9>iEK)zcBM!bnHY)7O zjaG^#RdB29t%e{Kwy&$@nz!kTL&5}ULiLW~1#|Y}UOn^y294gX4Z&>fX^j{To2y}s z{BEr4rJV&UaDjJR8!7ok#y#At~dj zZ|;#{jaJRgExAN429TAQ?Si5z($C$W8x+L2d-rlDbzo&b;#U$vh&y=i zTgo7&m}}c4(c>=8zrxo{=B8NkJAB{&hq?#qzp1bi{(pw=-@9C)@^{HAfm~Hqgn=P5 z2LXb}B*jZa4m4j9<5JX%vHO8;Z3|%$3$`)K8zDB8k;?G&_P{qa5&fQm^Ih+f-@()y zp=`6Ufr)A4hc`Mqd%nFrZfYuSJY0Q!-f4s7!o6Y$CsB1;6fC@0{2m;EUuw+Dl20W& zQY4U(oWmRp;mIdT>mOlh&}jXYL1TU*r?iAEV!d&FrDqU!R?NHP&G(1egjQ1@aRO=In+f0B;6fqV})gnZI7}A9~V$s_0!Qq9-Q^=pK%+ODrh@jjs zkD;0V1Fpf->R6dEQ669z0%)Y9YpzWy zAhy$Yn|iOA!3$JaAX}N1B0w2hfW=}&^&%JO^~5+=uh~ z+P+DtTTQhR092gDy(7ovxg*cfo#s%KKZt;iRFVZaRFsKMRptu>P^33eYs|eNH}i8v znkT#?FENccB%kY5r#7?~x~j_>5W9SUup$zdrr9@6gfAVYJb9+oi!I9!mi z)Kl!~J-E{ENIVptkuQ3M%HN4D^Wzm}QZm)5wBrb}a?ZN(^!4g_MvUO5kd45Gdf~|` zgdUus+{@@60oe(aSEQ&Vv@ppfZwiqepZt+7^to2LGL0Zke@-vV--sOH_M;Rd6}PkC z#GXk-Eb^ou7pNkS3AqMFeb9MuZQ3Z&7&3n<&KB!WsRD&I_VH_AnHK$QFG!^6dKp&3 zc*3-qW)sjKU8AV^s-!C2lt}e25;^Ma2Iq-*1a8YY*K6BUi5K`Z z)8ONe!8Mz=jODuXO!K6euSxu-L7ah)S5@6w2X;}yg+G< z55SYJAMg89$d0&6L(CyeFEi|-1Ql4}8JjGVP1)QOzW6~TB)2ZOTElU55fGM2ZH?N@ zlae?%a6U~L#s2Ks-pAEikZxt%!<8uwBZ?7zF))jB$IkRL?__81c~Gv{%u`qu1y$EUgKLd9H7T$$U#&8Jnp$dpz-qV1uJ4U)BQ!or zlrGH^!uq@k$sOf6&gIdE(hrD0@`B>e6_3oFa8H)e?ScT=MbmmpZ4Wb(*vM3Y&U93Q1?s#2Xk8a*M4J>V3N@HNs zPfEHLIUy43H6KfZ=>l7hnZ*VP z_2OK}1S>F?{QGZ1`3bv8-AI*0i=`n3Yg01m*cv}X3>}pN4v67Mw!$>+GLD7>13Oa4 zEgD;omZEUp;_fzU>IxdfxalAv6X2#KK@sgTQ^IwlDG`1l$Bof9d5nwb0lO8z#nwN! zy3&tSk}0q$vFCVukRw+d5~&0tdFAEfUKu^&(mz%8M; z>cd2@YqNbT$s27-fxrwwj1pZD@9!N}CHPS`$I}|U*jPU~aiFa264Uqpfk$zQd(?V6 zk9tJ_UEEd-d@Gp)@5kx0Q8uMh~VfMo&@WH{h1AET#cvHQ{Fzri&TAu4p`XHY7RAj z2a~@|qNg;k7i-k@)ycNzL#|)-i<&FxGMKzrrGsfWY$`N-i2u`pC{Rd1w%_XnxTX{_E z`_31!FWD=JOW#8N69t9enX~|F7&Uaphmsq!#nWEd?@4L}$?q7HPhub}=gY5b7 zXqNcwvSb`Q*p^%$ zdvk%XL<|VlVrC$V^&iaXv=&=GwsF+}=UHA~q>ycrEY_RyVKF?QL|KQK@bX#QY<$c$ z8q^XQ6ffbQc>u@hwk4ibcbq`Hj)W3~P;B6p_jM(K0KkHhSNUm7e4>O&wL02j1z$W_#ub*-|OIi z2hNlHlXTe|+Btp0XC(dwf|UR3_qU9-orA8Ph2ekZT`$hRmXNxn^J6)0M}%FFboSus zlMMBB_3Kdu$RR_Z>oRd^mj_!2%qb&h^a|jE3ZPHos82Q<3JcZS0+}~(p(Xm*F4!im zKhoXY+^*93oG#!Uficgh?mFK#uDae1H(rjqc|I|IK=sLkWC`YKXv|I>t48)#*`MAr zG36pzn+yizB11E!E`W$jm?;_JCO>;r#Aq+hg+yu-*bHPuUMC*b??GuCR&b3qM3YK4 z#r!rOG-V22=h&k-g=UIVygL!?BRzGs8WyCz9)=ur5Y$vmrdI#uN!+=7xzIEwI|3U)bgYv54cdFIk%6 zFs_K<|AVpTrj<^86sLCc6F9S<56tG>wTWnX&v#>*j|;Z;+;DoxnNyDPTSB*3Xb6&}aelDFs)dhl z2BnyUW;#d#Aa~LeE&=z)2K{_GggdQ!mgTITO#4(0Q3F#>XFXIKE!>|C3^ar5xJfS% zPp9@ym02hO3Tlw0`0)bH4~=^#gyDvK3-DWScTsx5{1R%@1XiV#sf0U;AgvLc{b=`# zr%@1!Ju)sq491997J9eP-mWr{vN+9@bpEP<;Q8i3Lac_LQBK~1deO^*J* z?T!iqvZl&~TSLa%EblE$QP!^wVfy-!k z8W_@Fqgt&RR(w21boEHZbnDbUjg1G$OV&@j}7XH{?edVE0dtxE2vqBE#evEhM6chUuRaL ziY7-ao{ue3SUN1r?$4FDXK8Oag6k6><Jt;tIdsavHBn<~ zZatlT9mL;8TZi_YN7|0Irv}Yaf-L;WX+I6xuwUE%3MknPq4Pjw(&K! zk&f`B9tgWYJYt%5Z?)+s3slExS{X)Y*z?l_X6vxd@L_TahL<$o;L%k-4lS2|LKfe| z5qUDt?K}9VByDhBKUs!jq2>knyvI1Vi8fT`m(N1R&?lKf}K&XrCgSq6#m$BD9|OfHss8N z)tL`CT3#>jWXL7@e&Z`tLL@0H8><0){Fd0?7-Az!x&!^?Gx*WZhn7vV2OH^tb&-9ew&6uOMp!!pnVhsc{R8%An~ES_e4uss#Y{M5v{3XoKkZ3 zjLdBnlTS~GtLaI}Q*Q}bAYvhsnTjg@$}!YQtLW^5ZR_B$YC51m)(&}V6m8WEN?W@o z_Ka$_BHan>$iJ2Z2U$D~3)U%KGkkA4B@qtO7H($j&MmI(^n;JvCK1Ilb>-Nnd1hF> z?+^KNmPm55nAW4$7kZIA*_}#j@21Uk=R=vY6KCq(#D?z>CXXP4#GjCqh;9wIqj5kL z!t{!K{!EZ&iT#;Vc8B-OK|v1%oDlQgXNfc72V3h?h{cGr;gW#593BHI}MHxr`C9tSp?SL!*GP0oF`5XIf80b ziqw|jlE4_-3EmN{CIWGKCx@?BhOdVX6$QQ9>%@mu^^U=Lg8uOM(QNv6+A>}*Vn%hp zB5r0uK5+$EdjV}B{gCqd0$|yxEsJ^x=kj6La7nxx)`I;c=S-j%c~Z$WxX;y_F``=(wJxW_=}Gk;;S8A&P{F&m)`3w@*(#@XR-LJ4 zNcC=J;}p3@>6WIt7cvKYZvbG^xD}!FxC~w)3VQSHhh4NU@ja5lK-RfX**IT~$@mnf zesRScuE@ln^Bz>f#frYE#xCX==nf`w31}m$`29{;=*|63%ecm&M&V`Tv&ad|NX92hQ?uq`kPMi^9K%w14L|m8#$#m8hjbN}%0NmRD09Pa9Vp+1j3G_SvA*bMCv6Po0i_l^gj((PH!kqg1L3?6LO?Lg3VTVnS z->glQ75<@!Cu9wZY!1uiC{uG48kH@}g9-V;^BmJB41YBFVGyBuW*(Pb$<|3W0&|dB zvFpJ6+N!Kzy-zzys^N8yisKZz{!p~qaG^PD>(Ga2mEV+x`+B7wQ-un(<4VMnz#JEh zgAp3+hvYsTBmtl-CPP7$5=PWXR7M)%2tyha=Imij^0e}c(=L?h&8_VG+a6OHy7O#^ z;|JrjF+6x4Hsg}R5t2qu!S4)Vq5OMV5^(A+w8G?uvxI)X9>S%RdP6qCDN_iW^@Yfd zS1old`k}|HWP2ya`OI2ELAotk2|N!6^9<8Z@*it*YQ3-ltB~rArnpYQ#Xgr#Viugv zr)?Rmc52^@>*Z^V9hcFaZgl2~;_E8;Zi_xIknX)P`p$>2u^g8vmTO`nfjPcRqyY zlH+zk%VYQ38jk&P$cAT-2iq;!4w~bnO|G?oN^jVb*_=X$2n;%vfzfXOzt_x zq0={`JEWP~2k1&vMpPEAz!|3-G9cbSkVrF<6x-ZQ)3=hC{q+b`nsFaWYQaNO0(e?< zHBrMk2*o@lZ7(ft&yIC=ENV~>K5QyR9M6|w#CT_r9?p!FEB$RwvlosK#ztBkhlFFC z!COchX7#*#zbZ_=Cy_W+%n1(`Q$V0fb|r%uY4sd8FipU#HM)%-^hoo6MTbqELyps7Y!3FZd_WH7OOxt%L@;mo}*p2LgypjYgrYb|ocN4Z{C z?G_|US!x-frLh6`2!7S0M3-5ZtKm8J&0uTz`AQIhe$W(*AgSh~6w`e~?>TDg+4Y0E z#+ctiDz~3-U6sf<1I>_5ud0lLHuuhZnuPp1d938`sg~XHHRbL?tTce$tN~Yj&Uf3K zM>Yx0j1}8hSqE)YA}>QvyKf2B9sy`uHgRElwY$?X;=7=yDEusx$YkmcigSohv7%S{ zF2cpUI4I{O^L-0wtsYh-Ddpx1*J4a!v@&^^ALj7x8Alz z#XE7AOUm&*>oN=%ks|SMgC+=i0r8Z@k3=iRRctXQydbSvna|H(W9|`}(?+$QjYIuL zw#T+GUn6Hzidd1}Au+%C^-NZjETq7$dYTA??$s6-q2x3~dpd8@nO$nq39OU#kOjL_ zpQ7XVn0ul3Te0T3R8QD(i-z7hiOFS}8A8Nz4OM%3bpm;x2v$NDSz%Aa;FeNX&Jjpi z6|E%;ue1LlJU0GtP@M6ooYyS7zu`Bumle^~TJ_GI7qX`2`zzaRT2%7Qon(MS7Fgtc z__;5N1Uuw*>Pxb~Gm!`15lZHHPn?!r_7}(S(W_JtV|`VSw&4JSeHpf|qV-XQdvbbx zTf_;gV?pX4*Jy(`^JhoHlP#SwEPUuC1BiCd+Bqd!hJzFWK(P;EuX5z--DYL_I{@hJ z@;yhxdb0yjzK-6J4oTQ!hTyTwIW6tlT=mMYckWMK(}!RS>CVyPlJPQT6$dFbLzji4 z_IqC{67k@jh?1X(LpXa9X2>7E7Q=<61Fq!~zWYt$fTQ#YqQ_U)Pwe0y z{SdCTEXjFwUxF<5cLxksmD))TgFw%Y@Agz(K`bklD~84=RMSq*T~gkuy@aFQgWY7> zFV4ME+^ihQu`bn z_8|}{%BbN=2U&=61Entc7J9^s9>+rQkSKkDqftK=asb~}5A9ejLb{P6Kki(${6SW12c6y~j?x}+Hsbh}oQZ9O&j)uh z_3OAoS6!>RunTDIu({7mu+DR4pK}ksk=&!{_%;RAFUgR=p3gh4PS5NS#OXn53}0-F z&f+u<`M394azmwZqJbcjkE^g=m~=V?qff*zB)u9%lFJe@8bi6Hd@jg7Ax_?IAe!?s z&(?V*Id5{D>h;N2@MVQ3UFhaYjWtJI*kPsT%G*_4nFy&BsVVdkRI^F?on0Di?x>;W zPy&NOE_*;shyP~E{TB-1x|onf^`2~0hvy2nX1wz+ed-Wm>9`$!)+8pJ5q)}sfwyV# zP5pD6O;~hoaKLMAFmsuHq)6-Z=W^~ zA$IUol{v&#o>}>!lgwO1SD&6p!|k*pQF^-~sM~GIkseIAN>gp{Xkxz^9S14!(rA6n zIiL!JIz?q$TlA?PEDZ?M+5&roo3UUwjjQg3K3Ci&(tBdSvJ8MX3VF=vai9m}T}<#% zLB-ctegmr}H7og@&Of*IxV#$zq$T%M(f8j-uKS7JpAoq=1I?c&?3bB+QA%7MB%W7% z$;Far75gMY9cLWpTwHP6oDjNceU-(Z6X}P_1E_Wy$=-kS?vN~sEJ^PL%Ik@ha|zi~afOEqJQ2zcqBcdi;v;=>XwPmeR9DuKuxy_$f3)wCydq?OjxG`nm>5 zR7xG{C1r+y5sJORO10xFrh0aj(CTFwUy&Ur1(0B{rr3HSV-O^vXYXLYA3n2cy7IdK zi)ckPuG5en8gOPZezt)y7u*!uaweO&vl4!eauh4L$zsD!aW(7h5XnvZ2gb_>I?S~@ zir;di+IgGfy&LBRTW4%OJw0pXI_ zbwmg8(~Vd^lB*DVLA;ZjP*u(R#vd0?0dC5TS-?#F&0lunZl=8~gRyhBKl(D{s7130 zYm+#i#Je+k+0AZ}G7je4PH&LfzR)akm~jS@5AZAsxT8mWG6VwD9!C3b!I*Ns121*_7Msd+AdO)hCzx9Q8f2%#Lzew7^zNtJtyPoIf+(&q-iIFFBvQu8%w)m zF@eabL^Db|&etpDOPdaW{Xqe!+qA`%h{)0i`sU;F(@ek`+B5W&)g zd*!hyVHu_w*=!4$8b-p;yLoWXN%EYCds~m)PpU=z)phx0XLRF@G1h$*TfE=7fG75L z^#%DaEP`D7-RI#q{0#^-+v(O8daS=Fjj1{n!|xxNE4E< z^kR{LAbI6*RiYHG%_~#9g2_iNRnAciIO=6&5X|5 zpAk50+w4zl-To9l=?&jG@rjX8k_EjU!0vW#d~8m5Fx(k!QuBPgM0;WW__n48II|#6 zjv~)D)4BkH-~EAC=cacxqORj)r$^yrqOa$4HZu}L&mzLj za+j0Z%u3auC#xPn(pW3OTjv0C#@=zNK8bG)Hx<&F#I1E9N0y(JxBD5I7(g?cY^bgA zd&r}Hr6H|=INcElpxCIU126vv&gOaBKBRdxDNP4VrCG;%+o^_9r13hUkKVc!bhOn- zk^Bv7qEa$R?oLV0Fiv~v2oh6z49ofjkZ1_6YW?sCyF6`USP~9IxtJk&vI?KsJTTBV z2;aaLf*>23#jrpx{zT9AD&oI2z*w3nej{b{xZmDmw$|d#^g7 zWCCQ?mri6kbPZ-I5u`LKSC+`bxS5JH@h0#ia>dtNlaR`Nh z`QF`>kSOx22!&WZn!)6tPYsCWqNia>wm}h8bT!Mg)NhS0$-I%W;nMJ|^*r5|=?GFK ziiSie_5A@^(7Nn%n2bWkX}b(5cxHA~hQVl9j1tD^CRtOA)A)W>BPRt|`0U4&nWi~J z>~p**GTCvzi)Wo&WJN|DigNS5FIvj=w2Z_(r;wA9@bACWGpRvgbI^OP;sSMmki>1J z@@~M(^#m)vfDFL;4^`HWoQ;qxy+4Hzv+Dt?|5U+UUsf}msBdn(D^M|tE~HwR0g@3R zQRSqzCLl(G&Xzh7MZP6j7NG`X-!qsRDyrJ6ZxaKI@1N|vT!*ALML%&0&2@uq4);r3 z56<6LAW416{VY6!uwVbF{ktbgX|cdTZIivz_th;O+9slX4MVcg^cl(6K1I!!rZz+jJ>@8lbr zyI+*wqiwv91kZ%!5=cb~_X`(V!Bjn*{?vIT^b!u{5WlZi4l2 z{G_sDcv`sn?F_!)IQSE%p*MIN5LA9n*Y~Qqk3vDJl#j2W2X@wP*3uug9~MhepcvR| z0HRtB8dP=D&=;q^&)lIE>6c7oP|daOb=gx0P>}?;l}q0WQOq(Y3lp?`DH4(B19>@6 z*e`Knf?>pp^{@;cG9sIfn`>p$|_b5aaeuLPVc{nm=icbr)0OY5{ zDa@M9qj*4t@q3|o{R!jeHcv2(cI7>K0=>tjwuS>kVJ`GEW8_5mf`6}t{)GDkisq6I z9XK&Sym)4yjJkwgCt8h8>UdKzMl^=PI{kutmcZ};`831X=?A%GX&HCgqH4D^Y!mui zgt+S4n&EtP8k4~;u(~Oz)UawM4($4fJf}%Jl?)? zQ5-7fttpKXA>+8vvM^)OW7Kwgty3!gF3e{m!vr@ z5VHxvw+XyH9b5B8N2_r>I1!kGjL7ggOJEEfaZ)a={vJ|eBZ?t`NqnXGPS870xY&k5 zU6s?)j>4!fGb9Y44`GvLEW%xi3R^3qf~g+amxLmAQ^5l7cQqQu(G8BX)1bdRek`C6 z5aBeWl7vSWobVtcq>08RO!i?WXjThqGz znDRzE)xls~jzOA66-Gsd+9^VdsL2q~ie<%w^!O2n68>QgY2?bv*yySFaNO5KNOR+` z?C_1J;5F+YE?e)*M`+sSvV|rzyvQhBEJ-1Be*o1%pj#*+G|kge$F}B5^=(+u$Xhk=lZpRhr%0qK3X$uEr0I)CBLiIi!)CVRv0)a$xn?KH!rgJE42u z49CmEC@MFzJRli)@ErTO^6+VKP*86yWfS%-$xvngi6)(D+;vwtm1I#ulu-VI`?-( z`=tC4hcdrBv6F&X7Lu#C2;}(0n~T`*h5L$<$nx+g+;&kQt9l{wsWO2gt;Qmjz!$~D zijzsVpmTyjrmBhCo5iAeR}A^fTQDaC!VRO`{@VH0JWSIW*NCQ$X{TI?sc-qT%i*H%+;dT0@3v)?aK_jgTQy7ls>hNmKGjvT>b$iMjisM&NlPSEvHf&v!7 z)H>Oer~OI;l=F^yV6B#7+7p)E)o?y-m{Q*! zaZ&Vvs?FHXUV7KsrFPc1hLxryW4rKO1p&V)l2UPx*|{&dsB^)I;b6p^F14OHAdkTMh0_vVf%D z*TF4nNiARuQ?XG3D6~_9OZxH>ndN4Ckt%hIM$|0LiNDp^B(hsG9L2deWzC;#yz=z? z9jA`DO!+2DmK_QV=o1s~tT7+qn)V!Gy!eZ)zx=eC8{InRpw0LK#4W&}JW}QXV2SR{ zk$yyBrZmp{6k-KAJ|enUg zOlkdXkE%T=3@fxR!+M2k{PThtQ(RF8nN2yEhPfSJq( zN$aF?&N+zudCusz+v2FX`WSGC z2MypE0%FPh!~glcAP9SQpUaw%=J}QT0Gfx`S}>j^Et$pd#>lq8MP2PmU}U*RpM5`< zS7MWwJwGqCGW9-Ld>7IUUu2w+7pTy`diPORNyOK=SSx|;XSF7>n5DrgIGz3T=RCC z>`~jR!4bmE6Z`szzVt3%z-DDPwDxW%r1m4S$DrnOnq>>p&8EM#%a5%w9~9#)FW9cT zSln{rF0|-hS{-+q*Bjr5f?$C&`aPpfmM*Cf&zqpvo}@Y$TDi(R*FU^c0E$oIWp)`Y z`^)b)qc3r_@|Ah2@URD^coZGB(ei$oZL>9Wf=Xj~s$sS}D8lZ!AZQII3Z`!aWlu|T zg5&hMS1`8&I?r%&H9f&O<6s{f#UtED0AFpHy&$G+E(cwVa6K<8j5Bwj-860ZOPdgH z21#!OX}3Uj-y(Z};B*R1emtyQb2#rj;q+wJT_eB006v?8yh`6B#Qjkk6-cQ?Ws z0@|m0FN7NJg4FoG3&~CKUx{g-6}MZCXIQGP#c4PKS~+_m!%?G1*N6e zj}~kv9+JsAjgyuwN>}B{!q=ihj;XAHYwIBbR zrCUW^F8F96KZRyjU%lotH}zrFRYgMJLU3Zgnl@65D2h@zO<`{UBtT&&n?WEd2EBtW zYRXk49dX1}NSweJn#YbCwmkBiFCqc~85pMmIY6VzSJLnDj zdBL=hVbEE^Sf#6U8q{TqpkhET*6rMBa!oaSc<)l(ITVwRi}3vgd(*Js(a^ z9BzpFB+4uLil(R+66_(3hyJP9nQT{!xaCeenQPa^x6ZsEqXaT9%U@Y5Ohy?NEI3WTxnJ(g>-v3DiMW#^uq!Gf=U|97MUL zMz4R^2Q&z>SH=CxpcScgeT@f7WdVIU@chE4$}oD2llQ{h!Z!$jY`h}%B=U!uoMAAA zy$WgLnr%|30ogqIc*}S5;P5y;@6f-!hb&|MQOF z{g-ezc3CX)@i(cE>wmCV{ynAw{^=b3Bi#M}>{0tqc>OK@@}FN5{=a&N+Kosv2~&+e)yfnr!%bqdL?%b^nukNB3ZUu2Lr_GP|4pIqiO__?s&_``LA)V@vcy z5bd8!E?EJzt4@`77C0Q3_^Un&Y$FfJev14E2!G=W`Dbu}%8v~1@%(^d4B26mRI3J5 z7-#2t!yUt*)+P$}Sy!`tUP0LQKSCDR;oK{~TcMzaPa|0Z*|lAU$i+qie+c`pUt8e| z6OXq_y79Z0&`Hv?2EDQd$pMSlX5_9WmAb^hw(;}SYC*BEi(||{BBZABKKTDM!*y5f znbV^6*Z?cW!*l6CyHAAC7*+O1%P5cpQY+vJDtz_kMKNqdzi94XzqN%ZrkZ~8cxJV5 zf8(yjYY`UviF1`>rieq|(JuYQ88Cnm8`8TOj0$jLi;a_L3*#7M0~L>Xo@a-30=Abs zYGXGMXSHf4;nKp}z*I)sH~iBGb_X9{o$vk&bUn}ps2-`zujE$0(=&(AD)P=$8+#QX zW6RyOJ{@gRzdo(F3ObonqjcMTyew`d0j(bvRQ=Dg%&d!KFgXi9LkZwzB~eD6SEc=` zl3?D9gOp>~g;HncQ0Q36(No}x0wb9};NZ~?MbO@ZEePWx6;U$Udm|%~8_|v`@iCS8 z;GXmPZ6b{Y6Smcy63tvZLxmoKy%WG}QT2+hV;pgUL8t}siNpejCHNzYCiRz}08jf} zMW)}(2RZRE5aoMR{#Hj{=;}Jm6k{`;blxMMEq8n5Q1vdT;Sw5o7wZg+Uwt3005TU7y4R3r*L+${~e^uI=x0+cx2?Uj+p6 z9--w`QRHeWd2ey`sX1Sc#oaq?Ht~L2c33Gn_Oi~cF<*ae5i-&?+F0LHnedyB{a-xc zf1l2>|9d+B{}2}cn-5I*zvkwD@+%cAoxgROyojaRHw<&(;DLehEttR?B|MtJ7iJp} z(q{XD+@AMSzlU&k(qbJ2=DFOXlICax_FBZXgCVr3P?oh0$H?ets`HNXPp9+e^IdcC zkFsl2N}4e}?F!Ib9o8*4uB^b5RRX*@bqyy0cp74EShjwP4Ol?5f^jJM!dn;tFQ(JD zKPQ3%0I<%289=ceTNbo6Yfu>O9nHfVx|og(+5^8i>xZ`J1%&V)PR2Lr8fRo@G%Xv|HPI zbz%xn$8J(9b@@l8_WXoKHgf_nfmTjA2}uA^ESFhJ_uD6IPP&;Gu91n_Y2+d|h7>1M zTYkp%7fpJr9Fv=@etW@^+K2A+fthBEGNOV#8QrT8KGClf;Q%7pT?t07$;uby(dY%) z7as0JL_CF#{@iS|fes%ipdmy`{xX~hrm&qLi)`#ik35!|J+=-^IFfN_WC-kvDnmH= zcBnZTTZMQ8djf@GNEiW-Mk6oWLvh_bTMg)|yMgEC_JzF}txsebve-+QT>x6xmh@a# z)=nf|x`T9-x^V5xaeKU-!K`$MV?6X~%Mz11YV-YLvCg~McchYwEbZ(cpnFH8gJo_c zv>aNeu)zCa_)L$e20yd1bN?x`Ib^x)VhewE;biH0<6=;la02W(I!9R)Gq-sRo}nJ^ zg-VpLEfsqytZBCL6}`n9zH{2`!Ht_!0Wnh_QWZZ=vt( z%_aWuWXtxXSK&Ye@rmw=Yb09fKzrNm&E-qkBdx!ApMO#+UkJ{J-bfZtr?`d@#v|4U z$)&9>`nC6F#u5qLS|?RwL6%9nR3m+xth|9_qK|3!=VCo}wK+x=@3 z{@337_eMLY3hsh2?C4b=K4{11L+Ikqf_wc_ly4hPj!?)LKpcv6Gnh0@na>V@e}Hdy z+K}z{DYMiphvOQnRQ$c2+j8YaPy%M77)G;Wdee!g_Q}Tm?<LThEvG zPzH{-KkirF!xQSy4BGDlBmg}UZD1)z2&wyRc5DcWVBd|(_!Cj~xR5`@O$2AYE~K{1 zjo1+&;9#b0{5dZ7Zr8%ozo3Wuvr--C5n!Kwji^g}8%`PM`2PThi)pIY16vku_RFoz zG3H7WD@-gf)&{gWUc&H%RJxewCKa9O&bmiOD?OjPFsbHnz zRBYR}ZQHh;if!ArZQHi3idi@N>~r>h&VFut-}_;$mbTVv`83D;_u0oB{U;Yv#Mx=0 z=Lyw7gLsKL3B(k=kP=)(N2}nbDq3YthR)yPFfFb40Fz5#N(3MIZ7zj??6DSl@*M2Q zmYb!Hnwf|%Cq9;FI@em1CL>sIk&_Nx2w|N2$PN};&T?EVY57)S|GV1JB(FS9rc$q1 ztQ%u0_?`s^N?(Q0z0V+S48YxYjaOl}XQ1*dQ}!V=5GLy@IthGUwwjn?K|113E=M-h z&8*;}xDbU)q9*u|$lp{!~Y>(*vxD13)Xk8vu+{5CTZxYiRYTci*TkwdJ)~+5gk?yp>2yf+n|UA?N;XhyaHh1ikQ~S) z1}9pi@sC3yZ1DgHm8g*XA#LELO0YP%)N-}KMhtp~ZI_>5DK`;p>ECsU7c0rnOy}p4 z$Vc|lNYBRN?`bjzO*XpuE7CS3h^*w$=6N{99Vix|#Ve7sV2fnUZcuzi);&__2~(xf zN7aZd0!0~73y;HDO#&yAcUdK(!3Q@#p?b$?8MM=J4ud@6;H{_)czDg(IKE9uy zxClau(WQu_^2*BMvgsE~e!k9vA$Keh8Tl0i2adyY;2j2K*p0Rq=)ce>S(`#9^U6rq zp`VsG&n}pO!Tln+Wb!FIzj?h*TL8c(A1Ahf0JB<-TqHVX5S%@6nu0+g${=i-2J$&R zB!H~i!$)^~jLU#1wceINAO0m*pY|7pEFafV)z&5i`#TJgKdR?T%mHUt=M7 zV0wJ@0roJ8SKOI5B6>)>i5DR&(Qk#O#AiTv4SZK}i41*a0+#0Z#bd|jcO$?NAHp6L z=IAE)^hN9TD_lD#7z9JpzD;+;bnGjRM-W725DXm2Oo=)imqb_|H#QeAwjiJF!696S zOLKU3RElWOI?wj5I-!FJbHS}Ak&92NqCeZ#UFgr^e&>0s8p1L9!hE?N8m*kBxln2Z zNYW8nM?)b|d|8H>8n1Sf1jp#EAwoMab}ljc z?M~F}i(6>n5S7o1Fyd#B5V*!ji0bej$`UNwGfm6qRD$Nk8fCT+%R1@J;vKVPHxQdXrBo!n7)4x`%2?;>sGOdpO77??W4_@ z6RF)-c`j=L=k+y=rlg!W*6c;njC$f)eLk)WqASAk0QYs@H)h0Lz`P(3In-OPS{827N^&>p6lLGcUw<3#)&N6&uP)8V&k6Hn2q-a?UOna z2SoAXL&~=+plgUaM!e?Y?&53N%+X32F_tJsu3y^+T&GU2=5PP+v$PWapY#6HKWCz# zPW$_~48lnEwzjtIuLb81OR!L=>k$qduXci6^tIsj z)E4dN@new+N^DrqQUend-ouF{Lntwt zv70^oB0V&U&}mbZDoR<>7a2E?VB!uX+^9_8w5oe(162ayFerf0|GXIQ5S%|Ekb2VS zo7dH&68UEKD}dTj#opAeYdmHuE$4s<0b(x8us%-{VrKT%V5^whUiTf!{LTxv_glDt zi8e78#id$VM%Ha1p5)}Hi{#aMsdar(M?Q1=X|3B$2Lng)eg))#uWl;+kpmq=he1( zcuK1^js@K*L(fnl(bQn&`N3mZt>kXV?4!>`v~|D(0ll>fyZ8f4n?k7^iWxaIL}lwH zyZwhJv!gc!l^(pmY}32q!P}#)?~!bg9lJKnV6JODBa)v=;Cr3(m8KdQ-No3(o}DJ) z7?hIzt+Mb*n2GSdyAB0M-rPiN5~->vgC!jfC88s(P^k;<-r5uTcBmpdz2Q%e`X*dn z9F)Z>BQ{?OG%+xa+jGX+^q8xjN(+yQlY8brv`>qS4w=1H38!8LLb>E*iTElQ9ko3v z$+dJvnm2_sEEqjPNkhnMjp=#oebm0Hy+W=!r=5W#XP{Ohi{wc+zCJ6hP23bjqK}HG zH)l@{hhfHARGoEoP8(#zmTUM-+cpt5E`O#FJ*o$&{Z99@A+k)vU-E@7)f zrdf(kWkGF;_iTI4x^+<4}B$W$e{IFAP4>^O}A%Fg9AAVZtE4 zfD9VTaQzJ36EQ?C^V9CJnNU|^!<&$-TUw=#Q*0Bks=v{=4ky)1P;%R&2h9`f5j#3e zh{4^hgUj5D0D49nQNC}fG#-U+_d6qi-%`W1zh6p9OHXqZ!sXaOv5ZuH?7x@=^?7=3 z)spUXIkq9B`8%5t(Woij84V{bE9b=ue^x;JO;KRMr9Y_!}P*Yzp89MQ8&-Z zs-$Q?A(u2&6bm+f#2W{TZU>viPD{_pU}%jJ-6C$Zt9{x|Xl6S;TE`Lb%OK^t_8u*w zEZ{khkb`NHNy8S|Ax^2lKEFd#ZKMWdrpPj zQN5=s^0QY8i!T^eRIpN6mPptEV@GB{rOdK%liPc3fm*CuO=`1PS-rmYsYFo3*V_w@ zsReL*Cq0Mto|lxg;!{Y{qYcgw!>hvt+ee$6t!_r}hJGULZ88f>>qh_*I;VO@ zDT>EJwK3oX_9TO#1uD?VWx^DM)vyb@B1QPtu%cfy0ZIkk(LP6P{s*R66v_#z`HQ&N zaMs^0Hz|tEX>P>2&0&yNpVZ_~)mgfjWVy)J`~=gbA%~wAJl7!fPp((U@_f2T?ajGb zf*%pWI*>0&_YKT>toAP+)Ng7(<|VeeuTfjT-8W1dsJqZa1*?r?_ijL#u3a>B@lC(? z?}^3U2h{I1mKJfcAJipzeOs>aDFyq^#mDCJAu;I(^hFpb+yW6dDgr+Fi{Ng$pNB^y z50EyBHgEwzy^u4BA2gEg-3N0BIUUIlnowy+o-z_dxaZ!s*c+7?r1YjL zhEfT?9t4fymibV@_s{Pf`!@_gr z{iR4o3Mk$0(v$K)B*Ba}*OUlD+_v06ZOj4s?i6(yC&}I{lD#U@e+oIYo9FY`sx z;+}F@FXyHu(wRK(r+e~QOGTYFYNxj^^FKbn`T#i{i<0&f%z9ByPkagGx^I}1(`NN~ z8QhaszAFim7T269!eXU#JODXWkCNsrmFDzPJC*f3T`MZU;`DL*Wm|&qB402`7{}!` zvaO6|;E>I%%>i7|(}lM_>$Ay9)_@!ly-gairh7amoa2}}an=3M13Yys;=MFOc@IuG zAHd~~xcW0TY`e@M`-%&)VQ;I-=g~x_HKy{gRbV>YumJ;UH14bZ4=uC_eejYz`cXzMm%mCrwCT{8sA({U-eYU zcVB7r9dSN=W$J?QexaqA1*=@Yp6%SBV!*6kqt!!I)A_RJ24@P;tKs$my=PMCZw23J zv&J8>S2^{C!#S@Al7TT-5JN}by{aTqN&*zontwHEjHDA)u1*IH432&_RzCzydOo^t-6IAOC4OhLGNfXsj(rp&K zRPAGi^q0MLRB6pnS56!FjFqqXJ@-FZkgs!M2GFxoPvpm9JQVuWA|fk+Ub<8vZ)`~V z%&G=oYWKV+SZ=#f0gZ9bR%#7~Th4jsYXQ}$?`}KG;fLf?Mt4wwZtn5*c;Y@28Z7j+ zO?R(?3NZM)i~HNC(yKQ6a1xU{1lWGlk-sMkugzD(pfGg^z87wYr7pzK=I)`dKKU|n zRYm^(S2lwr^Q4^cpQc@j+6CE)LR;pUIE}f6;^fWEy@An5_k;4E9xmRi*$X%_bS;2g1!E%|`wM zX(%<;>y&ujhv*%^EtrRlAL7MuBaC`=&J^ZfTrO+QrQ z6^h3EKKD023S^3?)}ds7?(!0!;uflx=MRfaZk2zL+w*VFPuh!iM0pLo#Q%L2NM%v zhCq~Ez(s4tA}Y}@V`;9b=|LEn3jDy%I~A|2u3gozF|o#N*241X2NN6`^W{WBKoo;P zS?5q#q6v?Z#~c|1aVS=9HEcZ(KYR=e{ITKjWSX4C^FpqT(>!UQ{|@^kRnvGZ-r;xZ z@6Op-lkm{E;sB@D<>>TliLo)uubLfy=v_6fWOXx1| z+%6re5;Sk=fw~$|`r!BmsnN}Q;#1{vrfo#)N1f|ANhA(|Dl?TA%dYB5C~k8tbNDi0 z1SW`0yUigCuH$w54QO=7N@PRc*y?%OXjCLTfH=U)o$Fh6x%-83C*@Xc=4v5qa@ zkw2P}zIS4ExZWv9NYc=;xK`<&*x^?>$Mq+FTDKDYT}mJns!|4^C+JMs(6tCe(onMo zN_pW2X_Am`g|Dgjos@{LLtQzyAoL11lS|`I!ufPV%|*)%3>lu;)jJUDLmJ(k+|wh# z0pPzl0gFdpFZfk@^LU)0ID^deb>IF4XdvA2;NJKQnKJ?XpEdPQKVR>!n)*+}jr#xI za8nbP|13IFYb!w)8&f-7htIzxOg{lbR{uNqQ_9f!-@pA2h@n97*?vvn6U5+{CxRqu zCdUu6`UUSPTUN6&ktL^LPlk9>HXQ^_j5m?F(M<5yF$tkJ(t(ZfG2mL=_(hnuJt6(B zdc*~7%0;>Eb+hT}Y14h`*TY|d}Pbp!`j|QgFq|Gthshr$TRoY z%j4gkl%Z#`0u5IuE)c@kuZVPom!pGrMJq926}`%_ zggdnZuzA!O^O@H+s!=r}n=ITu;XI-Ryg z4T#dxnMhA>K&XxE9WwykpeGz`*__JWd8*~dG7P+9jYJ0XX1-&NP;FlB?jlHRxrRS_ z3l@frY3|2i&&B3>|HvAU({`}k`yq$cLr}2?0)(s+7|<-?o-!#%OJbFa!kHor5oR!} z9J2_2V1?1-Ml?AW5}jDr;$6Kic+YxA60%Jw@(e3CNVa&KDeJ6t#Y_2^E!R@Sau3op z5>@qk^6-O)?iEEnDzaM=bGI;;rIy4u__{Ig);f1tYUjJt@filIXVT)8s9ETPd;OTL z!=DiZLJqPd=budl^v}Gdzn|y-%vz%UYw!7FOj3XHB>!&Re|Gn!jg0IK9qjoGjjZhq z3I2V*BtgMa9zhw=%L<$VJrUT)tfnBp)(oD6b~cwV_*ZEUPjoa%fi~5Mq>83fU#A1v z)=QL{xviQp#wVEqV`>As9`<5-Bc+4=G5sk0sJ+wu_oMn3)wh0F@|8|`B=SH~lzHEc zWQVf#eZWSY;#@VrIx+`8rIbeC#|7HSU0W}0vnpJq`LYsgSAVw>>SvP6JL^=k6F$F? zLxy^z;v;1TZo6K`CFGPQtm7KoOT4cYw5E(gaWE}l*~evU`NwT!pZ2k8;0_-T-riH9 z@MFsECB5uDlOX5N^dG!p&^{SDMVCQuqNH^*1F=kf6=-;Em>9)11_(X?{cGotv*e7N z>NRv75eDcxK(mhbC)#fCAk9zSz)oM?5JzusWfpaKK`*`+2vLF_!oL0u0cI+=^knd| zL7KF;A92yPBY zCdUaU?V~o&2r2SBM7h0)uYen;RT zzh0y?s=kbU;kys~AH*VfG$9h`uO=i14WXVJP^y1{qFhWy(*A!4fjutpqLMon=JjlBZzUFc36n=A#HZL3Tm3$bT zcZIx^lN>#Ju6Rh~R?Hn=@AE{mPcF*2JFj+UpvaYWnwWhSWuZJamg61x8hB&>JnU)L zzMmeIi8OiDn%P&jDQd%vR6cku@?Cs>jlTGB?>GJ$17qujdvDCUM`HZBc$$VX%f*k= zh{ZR@PlWP(lZB$>TEX&?f0Oat@_l9xc{asEmh;FbwwA*hEk&l0^!w)ot@dTPS@AZ6 zUt$x*4n^v^Bd8!EjJymaQ-&=`2+30na-oGJEVS9G~3`4Z6_z+19V~ITovv6 z4{Z+;PJ2CC=g0?qtbTF`Xw)#(`vZGwjAADHPG!|P6fT;wjWex-4 zS5iJk&RhkNVwn}Y%Nb{i7GK%(x~@k!S`(k(a6+)M{vbo-1=?-muE zdbBv4uMjXl5(17bZCBw^{s@U`9{am{Fg8d~ zg{o37#Vy}hbh4T;>CSrtC*%T$yj~{X(Tf%EL6T=SnEoP7XI@cJoT~g-5j>#%t)< zZ$8t}W;uTBSWA>+=JI$_^rLKM6V1{uDh0qg?Po-OhBVM_OOox@AeK1YmcKMs9mycN z{}-66Og4pX5ww3rbbX}1xrs(1 zvY6_q+QR|0_tVrP(9y)OxW$O(Ti?}5$LnR=80+JP>qnO~Cv^7L--t4=8G2lncgcyk z)dt#|BZzZCUh=pV>a9Hd%Bysia~*;@vh}V!IMs6_GB&C`!)dLII{Q$C`q-gY@UU&5 zi_m9&eQxfRxdonLngqFPk(?^^`09uZCdEPU00<`PYdu zWu!-r8D4fhBP|6G%MoSV9xvJAk0Ze~5|SH!%vGaF@|O25I8>LxcP-5H)4uy}TAe(| z9F(fu{tSezRII^Rf+%mDO>e$fv-_Hd74e#Du2{~S{CRFQK)r1abZn*_9tZ}5{-mT? zW$QBn=irc4FRq14SPB`%#v4VB$ppvkF%kPD~R(FN@|knEmKv`F3CGcmQ82c1O`f^d3C0U(q8ZenT9LX-yvSq zJDeCv_f#y;1UKmv@u*LLIUP!HMBddNxu~V2%8xii$U|88oW`@h3&6z8R;p=%P)58clxnT6l2%9vzyi! zMKhkuqT3pTKr5ywNgK4-|yGCh_Y{PvG1_;iF5woyUh2J7wofSM>FhJ!*w*&%L`uQFz19%!iu=04e*HXAMjy7X0FESN1*k z^w*D6jTL?VnH3K++i%h?x57=J%r!nM901?F&tFeu%HwK`#;MF)%+g7RMxaurl7|h|bw`1DVs)S{XQw_gh3*90Jj8GP&S>1!m-w znsH6QSSXgEv{mh#%@$f$xBEM?BVaKJ?L$W){H)xAGJk3mSY_2nM(7c9^GxQ<*_GuW zMQ6D>)!LB6zL-41Z1oZq6jjRo={|3Oj92BK1?<=|4(w6FphjK}&(r}LvOA(HRZtn|a~){Gi+?h$#vj1iyf+Z;t-<=!dsT4xG^#akf) zMp4~rxar4)qIgXC@%Jl~=@k!&Msz@oUf2KCl)_P8ri)CXs2ygQ&S1 z%2`Pwt+!`76`|of8FeY$pp&AOxb8_vxZLh~{cbZbZOUj&2e#cXZx+ThD)4e>-qKei zQOnbU`|aJ0ozm58S;yUQ$H(iGXIWJkPWlAZLf@5HSBI6GFI=)Dl62kUvbBe2?Z-W^kJ@8421_ zpN6=m0mK~M31VnSwwzykxQWbWAAT|G_f||q26RrKaWq?4k4ztdTt<&fwSQb3%7p^I znQ@-=c8P(wdT~v-`=D)>^$yD9bm*gWx71{Z@#Iy|s7rEgRQ&Y`6hOK-M_!!k+eKsa z14Ux`&|euy7qC~X`*a&+@DLeF&c zazh+Id2j2$?lnRHdTv#t#X%z{HQN2 z4`D2=>&s$>XznufC?gF0`i7bqYJS5&r61FDH~|YkMiXKTd%f6>dco;`wfC)G4H@DM zmKol;uyyhrf0=|2RTx{=l4XNuOeS2EusW@|>Td|`aS`3%CNhWnqV z(0@8$LVxMkzf{Nn-2waSf;#=*vi8p_=6@Yd|Fo3l#hTQWbC6^O@=tv&1(ExSD@dk5 zD1DJd_Sc-)yJrm>SAss-Hd%x-{n*=4bek)dyMA8PmTb;+kF-bW?nT^QUhm&^ki!5e zsee$1>o6y2S|2?|?9DxpNtwmskBEZl%)>z2+FyGS|QA9x&<_SWh~v>KiRFc#d~bgWmy8QWT$NY9C= z7?VeHD206fAz@(r1=rsSCq5f&BtOc2e=8~9ffpi(9P6h-W&~IaF+ZfT&z?e7MsuL2 zk+Ux-X6QDrQU;G|3^a$*?w1(ak~bFQejo!a+F*V~wcE@-L^YK#nYK|P^NvHqZl=Y9 z7DP=DJkiczPiIJv10u7cFGninl;U}I*xOHz7-DnH0YBHbqFDTc0<1%c7-L2cX+*~` zK3`y9WAX|R@6hrvyED;|YekoHMm2IT#3i059B(7)fI2lc)_XC(VMkLzO5A%9u=e{F zL=_PtPu`B%9*(CYAlt`7(*8ZyRnjgG7k-__#htSH?^bj)w-YAsRv7Mzu0}POh1dj}UQz$`&nOTVb^Z2F z$C&NE9OHj>Vg4Qx`m0|4A8vr#8(JCs$Ha+{U$a7>h2^xG%ZGr<$@wu_p@A&Ir%T$7 z124!!Z6-S^2;u3>Bvn^&7SX<$`%4kW3LeJe3w8hsyXy%k>Fah%1}oMSE7tGl*E8my zNE`+v^}7l|dib%vy0(&cE@TrgIWk6ZWtE68vha2zsD)=a;R*(5fVhLv@BBuLm$R8+ zA=7Y4?0m$s4>n59D-tc3IR66-NX%At$3WuWgAz}me;rjSmeqjsLppxDYaUO$jG~&Ww zO|Grd1L}sHIxfybrsp{{cxrflss>tk_CG1Muu^5MrbMXpB&NsuNF{`P7r3sCLz&>O ziz%Isc8IcXKG(%c0YglM8ATC~JOa;@mGa%3mP z7P0H941%blNx5+nvWpnd$EW2dfVY}}J&YaRYTI6h-5Q?@MEqJBp{tc&uqN>&3c`vG zXxoNrCq6EapTcT+rBq1j%2Rj`DSOxt(SJ&-pOg=mX%-sTX-8Md zeX|VeJWx@w26ooibbh7TBI`2Xv!&Sz(B zZfN(fJIwa~yP-i#^XH}J0tNL?0u|WpJ z*4|w^cRm0m{Xq}`DGTq(aHIMWc{6SytH1~QsB7$e_&UkDm0~}+bcM|ggwjjG@1`cy z*+=X@hWeeWlksp2^m422UB;i!Ep9U$0~}+m0W~!UQ`rR%Lw7l0ED05U$uL319rzMz zjnbYQXRvPs9bgfv>(P^`T3wM!T^7^lX)eEJ)c(k`=#LOD0BaZZT4?G%n-;=hZ$cAt z2uO{VzztyFk0Z^CegzE7Mo#=dMY~X&@En}9jv4SUbVM!Olj-+vT4T0vZ`hSAXY$=dQ%m; z_cxjjZDcSk%F-ox#6Boht9VR$W{)}BjO)!~Lh;_YPQ@dpjSeg@tD;Wa7S5)OgYt2W^blK+{ zhdb%{c>8N(qPAyeYcCAiN}jGExb!Eo-AkJ6;4C3bO?#{%x~;1d{&9=T7xkGQdZMfn zyJQjbh=fTyXlTM>t}Bn?rS6X>tc7PkFZn=QFX=iF|HS7@NKBTRE(!K2S z%ckBPqIJ}pn z2Q)hKUTM#(9`_j{I)b4czd{OuCxqVrIW|;aR}q2yX%eRVmr3|fk;V2`IsR{x@Gq>w z-?Ga757L68&41d55&xbljcXP~0DL|$NRs5ta6(`U!Ea(q;mH)p@^Rv0A0yiSFbhro zWft1Txom%ZBI!RTqE__trMxJg%xuX>+4A=K0IrTi&RLbT#~is}iSa*B-rZJlDBwzH zHTP%A%^D6;C4lEm-UyCW88C{Iv0!iqu_oUzE*eXjuMu&RGqx?=yx<_77VnMIM<*ZG zsJ4;Y#(^dKJYhi6K^^pM+oINSiIFJF@Fv_2>ts|Lf=tuxmAQE4%V zu))P64!Kbpq{{ct-twvMGXp%Hnoabl;0~u;I!>@hhU+~w_$o&IUHMG)*Dc#-QdY&? z^+1&hkchR5?G1MkP`S>Gg~Em{j>zcBm-6T;UsU{+2N)^pij826{fp-0rJ|$MsX$Gw z@`=jdg=hTTh!1|b`4SKRL9Yo9Fs2m>BbOG@e zCM{`#k1>pOFN;DvX>ADMj%-J3k3>rRIW6ErNextGvhh8dh;TvL?gSQ}7)@+eMUe!{_lju|#=Nwo8A9Bf*7ET>q44Nzp*vlJhCh?d)6S z+IRr{+nNUz-5|yK_O?xvy2_68lr7|2728|n8M#i?Yoy?M-q}9&AG>`<$Oh`;r*oL~ zxy1hNB>i)FiT$;_{?a7>aSmnv2(|u8y8lat{I4-IOMN9#0(+bP=#$ST(;v7$ug3Vq zvLYTqM353UJ$3&)oX&<=a1l9qBumTPp)*I)Z%Z48TCnemXh`8iA78xVZ68(zx=qeK zn^)F6oeoosjV_0#s*SZ9N^St%Bdn z`s1mLELJXE)*oHz=ZN$hnzJAb3U@|0FAW1}q`rrs=b(zM=&xroJB5hRDnE0Z31TME z#C==$Mh!*tB(M$OwI23Mr3m}k)I3C!Nr}Bw)Fjp3yY5_v9x>*7 znCN(cjra*Rj1xwdZe13Nw(w(7Rrvm9;+y_08`GqNGe!B3^q7${pR`fef<42Ifr2%z@`fZ273-nJl44=7lS#O{)L|XDKIl` z3Jtth9w8hwwQ*ZCFE8NwCt*R&>A(F?sGFtA_{TeoMv_P_s@b&#cB6bG4MwN|+EZZ;MqvqWGQ&xn*-_5P@ zHKX-4m-=DA>*u?SyA#oNNkspiDr>rH`t!u^-{aR3M8Dr&m$tsZ`iO&UxHgm|6hOBS z;T}sbRVhePYh5akL5o`ZqZw-SfuAFl@1X&EuM^RM^uZh9)WxVK6;p3pvJ@FN)(F*9O>@AH@53vP!lTT)7l_XgWinMim1%0}4bAjw1uq(^>GzpoSPa~*dFVw% ztUT%os!X&H7z6*?WWHS8XrD$Hs zym*HQ9zcEB=+C~CEYz%;7Fv~pN^970j$<)EX>8(vV@bgVc5~OQ!WHHl-~BE)RsxCd z51;Km)nC#6jHNA+KZ)n&jAL`>2wX%O#+l$8Hoi}jL2gP%1W~$3uK<9B%_+eH!Ar~* zXgYfj>`5*1h2BSSQlbMe*>saQ2)rmVgE|tWGUo*`#~^);efB2RZNCJ?bKtEUm@dTT z3BtIQ`7)Ns;VWWyVwz&I*@2%IP`A$WjH`cBi#!nMw~2(ibmQo9@oYVSnveXtaB*%S zhG|FmG9JXe@-z9e>#uZp(${4qc9(&#`&$LVD%+gQ!F{XiQry4vT zXT2exv+E&$e6&8!lB)BqhWPbH@%9nJIpQ<0n{wIStDkH{lrupf(=>+M#48bk%3kl1 zbI1Q<-IvqXNYy7zE{xo_E^;i60zvK2m(R_hU|s}#3B8iaiMEq4w32HO$=TZm=|`yn zra7s>T2d38gQvwaTV|N?#lvOX<*=%44k;T)+D^{$z{#kxX>qgu?if?hluA+<4fx8jo=3=UOFk}3#&7Y`0UbH3 z$;_=N4}*8v#%MeH>V zSX7gb)gPJQSX!*k_5t?{wbOoxuoF zaP406{)Y>wiv`$U4X8f4?2j^JgrZ&M*Gu!S^dyr`^w#{)Q`L%+I3|2FEcs!MS$r_& zh!4+3An086S%RRly|qyCA8BZGh=1S5CF4>)65mNod3;;K2Adb|1l*w=$1$DaQsP+k_@G<&o}S>@Z^3PB*rn4LrMB9|v?g3}o;3a-Oa6*f z$LVT}vxhm{L}?e@3F<%u-vrV!x8}cw^34W*m)*j{{3|JW$Xx|U%M81i#g>3^d0jc#XNtQ3V&;1`>r8o8DZ(d32@(EXpque1K8+$?-+LhV zfzB2#uE$LM-%ro?=s%gTXb@j@Er4sN6LeHA{MjHATnu|<3(a5EtFW5(Jt;J_<>4SU50XbDW+*TbkgUs zxGfyp`vedoiu+D-U1cM1PehAKnc&{mD5`y5+py2NV~_TA8vN6Sk!_)xM0LEK^bU&H zWOM~uv!L8jB7={yHx6y1bPanY3DS$KL9ij}j0f}YWILG8@W2z`MK!z318gfkn8+LD zo&}iT2F2iFbk&RL<6G6B=43fSL1-0?INtYHhod5KqL4QPqE?NYrMFt!U>w-%;?=Wb z7-qBs@E-R$uIQ=KiEe7y;~Y9d0UbP@imGgVr^Jw`R4vd9U*+40uGJtnC402$?lBYS zH5{+UwXdPkmW*&tkJaR+`W)A@9S`56N|<83b!whLou!USjp9}m+tOs_;dRqYvPETM zlsX8y{v0Fk#Tf5K`;;4T|E1Ia86*7nG4QW3@_)w&|GnJwEev(-{{5+}qoJdr{eN_M z#=lLNLYc%wJeeFsS=kbm&{|~J0i}IBf(2k)Q24BI2X@OJjctz90?&%M5JHlGsSlF9 z6w3p>o^P!u8&M2!%74R$fY`u#uHTV|7P2(5qHYP-li+OG{;+(>xWi|Td-(j?bXR^dP3^6?g>-c8(pt;|t;*z(?qNR& zw}(~HK{)|F&hmiwk()QFt)tSOiO^Qrb)?j^kZhP48aLYv-TIv()j(ddQr?7|7ERbI z=F>{8IT!)ao-ci)iehXFF9QU#pFTWWtt8n|>~om_AtWA3Zv@RQijmA{B4x8u!U9u? z+Py?qa=o=1R8t#lA2|O3N74%YI--U;GkJ;!6t-K%VN0xm-k@#WZ#*x2?h(~%H73Ta zn5c7^;%sG!OE;!_BZ`$(Kd3c$UgzZ0EFBb!?rP3BH=KFGGd~hb1XIXClZVD7dvyT+ zvv@d(9Q+RqsQ>s0ShpM=zlF%f+76n`7 z1mk1ACS;+3@mgQdmG7S#j<|bwq@ZS``%48 zN`BUr)pv}8qy)ncJQ|K5fm1vEE*&i?PBjJd_DdK8Wa{JoLkaoiF)(M06pqzus;9>g zFC2UlmFzN70Sy|8S#Oce?+%&`ape(8WSO10cSPQpInzHJFkh}b?c!mt@1sPpvpsco z!QY31-+A6FPsy#k`ZB)cv-UBbNU>idKAJ#pQ!$bDo6rvciy8!rnC+r;2si&mMac4K ztILcsjbJ>#j-KuHPV*OYgW%c-XA*9!hlCF$4*Cs&zyX2KPhpy@P47W7V}y1;g#PC?-VVXbe*9TaJO7{Zo$N39u4`jT{ikT>wX@T8mH)$T|LfHIpB-FY zQ+`ecmb0neKT!k<98ysrVi`FfxobL?KA0v>Dj-Hr+k+Omi7kWXmNzEy{e)o6iuVpkQ?n3rcBxsmr%LVFA zntRoQ1C52Ky>JseCWN40$zE~@Vgy2Qbsg9qa69bX2&?Gx%M!QGc-wk5DD!aT(IMf~ z*5>a+4;>ZqZ3zW~=;I^1^j!h;3%}v0e>aApG36M?(9>jmu zbXO;rU821T%`gd+HP0*`6l!ywjbu5}N(5q3wEA3EFj03peQ$0;4W?Wr8J4^KI%D&_ z5kvC{pnr6{?})@qo6rlru7?u7ttebi)vc_sK8P98RmOMGc#P%uq8!-NJVui_Cw+we zJVthjPds&KC_h|12l&86_mv7}iGSt!q zs|X0~i>HeEv_Af%~pC{Fb1k2BpD&o&85IzvR&`_vFL4CL!WKv4A zGjV59vNAo&?J>Et)~|txVWQBw?ho3IkNn){LGM7Xc@O6qAd|B^90_r8=7%Y->nRdO zAJ30fT|hWrA%V07&yF2kt5#wmo?uM(zG|8-nTEk{qt9nk0*8gV8U|+>_Q%qjAWM<|dr%&|3k+X7R*ikA1Bw;CER#ayKsCI=yn};_r5W2@LDey0; zQ3{PXgX9Z1Bc<51J~1ND6FVa`COf8e|LRae{6CbvWl)^kwl$mp!QI`R;1(M#YCbbRhknq4?I?F^m)zT1SD1YLrLwK|%5;IG9=9E{ zC#~#ec=W!Sc3`>2b1k%R3!Pdb9=+Dhp-$M%)TUVHf;Z<52gwBAj%hAA%J>nCn^bL# z!Z;t5KnhuggPgNh{xHf=YE!IpumxLO?4Z#2Ku}3`Zi7W#{s!D320!kOQeNv92>uRt z&~g;_6PC~y7igbfLqH`4Yf`@#BvOkA!KJn@Zp<6B@#I52cCRyPpU$m&26QFi;xr#! z@#$%Qg)iJ3SKy4o5L&0p8P}TcaS$OTI77&4!3GD$o9B3Q^J!v_wCoM|@v4)bo(zlr zu;o47e^ykCY2u|hS2W9SE}>{sOx1pr7tfE`=kd>XE1j={`jkR&&N)h;drmdZJ>0Ly+!9nzPyL z))=>hEX&BMu=`h^Z%y=t>>I@jI&7J+SD+Yn45nj>%^#t@CAyBMf~6#f46e| z;`QyF6WI1Gfy_Ics7MRbcTcIl(96xf;Dm2*fYHs>C#riQdLQ2@erZ#9Mno`5?lqCZ zbJ_FH-II(=7fn%VY(vGHJB~i%XW!v@ewiFUR>gYHS0M2m<~qNmKQ_j`MuT2X^Vc6KMLyPQXWCDLel0HcHCa2;X|{6dMxE-#q2=>Y-g?#1 zMN-!-$&OT5c$L4wr;8G?4dzJ-$}#76??V`??wa7x(-e>I6gNni*!mP0KUhBs`6?n& z^B8_QeBXG4#JFWr3f=G!-lL0g#sI0awL36ibJ1+t!*lri==EdQ zB-jTOxuRhIxBg4zcl7$7{g<(s$zS8h-`+}pS{nY&;~-T8s7nJq_2;$V0k7 z8e?ZX88y=M6MGe`D3sMiLpZsT?~0 zKBBGK?|mH9jZ%K@ywl`6Vr$$7=eaph3XgWHpH9+3$T4(f*_(d`>BK;GS;Pebvo(R2 z*~{FHSK2rnXi+22bQkm^7W*af&kd4se}qlwc@ zS1c$$);AyQx0&y@yw+%0T6djWx+dUjZaa%o9KqYQlqIBZC|Y7T-A1Nu69kqSY-S}> zF?*!Vgf-cigIpX(x6-B`{LaYJArn;^vL#LLNxjJr85nRFmd3Y!e82TN>qKGB*6MA6 z%pPS;5(A&E>dbnpvUK7d2yHe?CqO&f4D}sp9JYPU|AxC(h#&t^ukm$DhY=pESG%g z1<=B#BoG*dU7(r0Q{kfL^h-zM!QoBGhz_M#lcaBK+W@>lz;mP@%DcD9<<}6hV{R!n zA72V)j}{HNz-qo(?g3O@3+wP<{ItPcNGcL_O&2u!07WUy)lKL+rgfUQU=tuplRh#` zTH2sL>wTUnL!`YO{c{MX#jHMpt`ck=K_U0&I3Y5@M@Sc^vMoZPJqZB{PG`KH`JpC7 z&)w(J_T(f*4}-AVm7=9KQFN(=upq1X^Dk3W7M<^{en`+s>$6WZ#fk~`s2x_0Cd+Jo z0PKX_azr6@Q~G-;q9e4x!&PR(_Fz0w#OlBpjizE$NVZ17T49SpGc15}hJHeEt~gVW zaH)R_BaH@fdm%eYCT*gyB&k{?d`7J)v2-++;1rM>4tpJ&N_~|1^?2s`)Nt+f^}>%g zzFm3(J{?ZZ&O0YaN{UbGXtQ6ps$+tlucw?aAzld*3mw`Q49PYq1MAWH9wM_K3+^61 zGxl|4_7U!Qib5B`uTcus$Z9X)wQHB~Yli;Gf$sic6kca z8Z5h*6x>>W6#;EC><4OirH1@gzFt)fS&cJQS^i9@ZN&iEQnvIVt1GeVQ)q9ht*oI& zN`+$uLct%S_)l#tBUsnst6a%!Vt3T0%Q_VA)HBXJP(z{~{H0FzYMrarWtKJ$_4Ua; zZm^8j{k3L92QMTd6^hWIjvO>e4Ym$SlAKW&YpK45UQt!6`2Ln%rA%p^yn5r#%1TRTwTjgD-PEyV^KxW+NewB2JEKbL zmoN$)F2)NC^rMpA&Ju7D6Q3wzn+Mx8Je0~1x*%X*Qm&`>OM@?6y5{5K6rHknvT>J< zi`x#-kT_u2wTs1-|E~DSg2P2yKq2pExT394Ik**FD39Zffifkq1!R<-pF1HCP{qVo z>|84qo#*rNV-YINooKMf9T^$+I`taS!txf`WZ$$+l_qV` zAgxDyVAd@h@(}>57l(}nwg!e{E@Xe&3V7CQ?dfRvlPSAV01qMR3Oj}aEuiyr)FwI~a4YL>cX>eAg+ zN0oVg4+4>RTG&xcdyyPtlMg*QCOn>>Z%&}TF_$qNs4!kyDLCipVHa^n5YX)LaR%dd zS6ai-*g$b4re%BlOo%)RV1-YA=!O(BUn`B4wEs?O{J&eK^mP4jN%nUP8q6D#S;*jTHtEZa`Km5jOOpS;X~tLaI;WFU3Je zu6rxi)O$JZeB;-Y)`yM0%gQY`b(iHs)+^Z+paoPYK*jd*t<7KcG_IH~%oS+RXusxi zk=uN#I5p~qw&G9+H%R4sc+gk!D{D827-a1De$8u__}%!VzH0 z^MoZ>%Oc>c(rh_RODy%xqR%p>4|(>7L|wGVv1^3AQ71r%w&$MB5k5NXr!?NfsD|k> ztq5nsuyW4#@{w{}Y=FZ|*U~urh>5^5G_N-L#j+auQ^ivMR~0pdzoL{QaO*TrlX2}i zbaRW)SlN)^X=neCSBfpB+kiKOY=Jq9Ym186WwHTz25Y-DQNGJQ%>k+{R+@F_1@DhS z^#&+}3I2Z)cQ}7v$Nx#((Xg?RGBUCS>9YQ(DS*+x_;dd%MNbs*MbPiRVko-h^8ePzI2{| zgcd~)Bbr&AaX3A3IOFr$kK*@vBKU%`&O%;FdTI4}zcDS_XU)8}jGbhU%f`XyV-@{^ z{=t+8I!H;rXesdx#nvbFsek0a4A_n^Qrgmrt3i^P?-aX$Ls?UBo(i0+KSr=VhMiKh zHRyvi>sgRmUV*z=4605Wr)MQM7D>lWXD5hK%_(;}T_|%JVn(t4^xlmxGE`?*U<10c zcE;J$npVkuv2JLeYA|sGh9mnvZm)W$j(^e|^@JI5i?x^F72u!81!vH38MA#ttR20Z{gQ}qL zD1oX~bmve^Xg&-VQe3iuU;>8UIBEDb3T!W72D>{|0$x8UhKMIvZ~y^bi7hp9Vb(UD ze9IL)}d$>yzTVfMe6bBdy$lA0jX_`pAwDjqx3b{A3DUa{xIRHuL zsvRTgjuUvPOb8G3?7U)^Np_i|{Ce9whTYO8nGym>&?eDB>kVQ%MMDJeOX#%{OZ5;; zEa+hKr4bI}p)r_AU}Q2_vLz5WW0PV6MBhXOb#zB6FVCb#zdY#ljUHx}OLSkL0f!sVHP&Q-dVZw~H#+lw;K5 zGMh{79Ap#u^tq2T1Ue;P4X7+EaZBLI-mG&0AtX+N-#54Dl=`!zY<4f~2dxYmj9L(r z@pkJHSa=Q$O5yjnONzhf1l+N&PI!k_fs{Xx0nvx~nJRwss|VR$5o^}dGa^VHS(uem zi$d@8a4Kh#LilsAB)*g@iqE)KY#)@_uPoTzlATCAyT6h!oB?dv-=I|qh0gp|T!XcE z+ZxP2N1yc;|LUGd(x%}J%cf}JE6hTIoyeGMB2(*h35_-6%Mndo zoVCMATf<8dMx!jps*(_YMYS*=XOo=oVH*ixPfH-zekFH zHLw3p%m2cqRV=z0)iv*;K|6+)IaK9b8a$<+jd`9iDfIRy%j^#JPCA&pz8 zuBtkDKkt~_)8$CaE0;-L(yKfu6tA38d;T-A#Z z`@z`vt6L1-GP-NYnX_J-Dw-uYoh&O*ordGtJ$nQ8QCtMj{iQOH5L?|4z;calm~tKr zs9k}P57`8*BJX18Y1Cee0iZrbFl~%GD$C(%krgV7N;vAT+5erXDap`7sto zI1#QoFMwNjbxV~1(clARS@1p$>4`_+vZmFC1wO1gr3d+v=sV9) zd}npWgV(;$hU;%~c<*lSpUsz7VZIaPMq4&a1Oqk<+xu__#PpGuUaxD-*8AwA_N z3Rblv2}5ZW?KRgXkb``GrwmybO&JOnlonEyJd0$`%W}?L%v>DbV2gr-mRp4bOnm-C z*$^G${hIp9ptzw~p!^qJ3!=GUlv}(@ZS0uR-5lzbqa)zof zA~v8k{r0sEe z4$!&vtDe|4h-GUP02u;K8)96`^>vIk}t~ zr+I`dR)63>7GlR<1KI~3p8{ecNar4rc9uoXihpEATWnpsqU{rIUZSCA0ItWC z&>alzW*IpJqA1s_a%L~vL}g`0jlBE(x=*_8-1pdjamHEUq3cIZD?|&1n3z?&8yxaCpAiM-4BowT>RiO8bWHe1j zdC)!0ra#Tz9e5L5XI&|bXz~eA+ws5hTl`DmIXIP6cSGR35!x`Z4EQP$_9BI0aEXHEc^?)ptXXehhT>>~i}mvF5f=>q`;3fiyyjtEdsRqJ24H#)nVdu9a8!|;0J`kbo3 zDGYc&Nr8R{3cAV@BG1IpR*!={2DaS4+%)U*Ftt&b!&NmprW1bBtTW>?Zc)?1 z$DY>cY}`pR6xS~2v(%L*O1T)jL$ITUe+6$vl+_>I;Jq;snhqrh#|;8A#N&`^@r zzfNzB<1723b0bJKk8*dTNJ?MI>Xyc^6>etw_0BITyJ>gUCnTXa{z^FqA;LkkvNu>0 z<(G=AODK27u!XCUEtC{z9#KgWZ7Z3hT=X~N`V`Wr5qN`mWrLGEqR=K$r@7hChS(K;XoM%QB0By!*Fdcvt z!9$J0Qfd3?3U|P)UC#H&Y(pYNW@GawrLhm;5Dr;W3^8+JGY5>nfO<*#^S9!kJ5cz= zUcKtz99PTPQniFscuiaQ44Nm^#MH7yq*!&Z@ z9#}&Wc?I?7-v7~`|5N_;?=1ftx&A+JtbaK+e0@^FQ$c^VPN_Cj&zCWyt*uvCNC~PI z_5}U!WBec!#1OLmCdT$+ENhO9Z>P*2N-gJ@hQl9ISzi?*_pPVjQJmzl^qlUlIdnL* zgFX{{USD32-nps4+xPwL0oK_1hX)wuzdXQ(K^|Zysi0AGIHvTnec6Tsko9mBQ>b-k z{&1cv(<1?3V>eNVH+QKGSwEUp7rvU1UB}lzn6fehsb5;M**I=}P*j5|iV^-rlK6cx zK-j70n+H7SHQ?JG_BifI{Y%oOzJ`w;FB{vkEv^Q|`Nu+Ls&0Y>`k=}{5B5_lT=rBt z`SX->=L`XY{-6Lj_?hVZ$5BPE_vJ5Nhw2;An6%_f92YE1ca!ZqWvn`kM4^7lPUy>a ziR2kdZ=gm;*q0|)SUD(VnRx(+{Z3G5fC<|B837W*@+2idzmedPq;3pg2tx!2smsJy z?$eC7jTg|+?$DgWQUS(&9t_TDjBG${1U3}sXiKJAFSEQLkeQ;g{P+~F@_}|O{%{;2 z6j(ix&Mma*`nX_$6g;2>lFwj~6hxyKR+E!jp`XPbgEd&>>S?&9`t{pdG}6vSUQE*S zZ$!$^T5Yk!eF_7wPzrYQq#kA;vE4Naj6Y`%nL@H?KU8PeF-PrFjIHkW0`9&iC7_OF z=?Y2pdTZkjzBJR11$#u#9J+_##M#sRaTuAPa23puC#xZel$c4D)+9hr73ZcY`ksxM z0Us;998?fBh0CKgLQf zH$cOoec5U>iB2XWTI0)YmlZB&rBK5gMJ=Fu&fRZ>OK$<6*j(E>2~);bfw3yqs*~Rz zUnP)NVzDa&lz90ds=QJuFR?*5NPBzE1^I%_(u464*Iza2v@)>1ZBqkrY^hI-(guzP zW|_sgm#N8_r64ec$Q6jgTPmm1-N4e0mS94+%Gi6KWAL*=j!SJc0ITHubGVf#UmHhyYbwZ^e`3^=9?0NFN1T(GN7?;Or1= z%D#r+_85ta_$-&S@$t#nMbQ?!;LK>@Et7D*kokR)DUbcAL!8_@!CNeH9@lMTrDZ7B zE1I>#jW$I4A*6%o+;v3bW=bL_+oN_hkk@1S7{SK4jas>K*V z5#djJB>MO#JcFP`64CYNKi`-oFCfgB%-BVW zWv^p~&0G@8;8r{S@(7Et4*v!Z%J6Ofk>UT@RJi`m@V^;jAn?KBPw?Sipdjd{wUL3N zjXh{rn4*zBz!G2$vW)o`81e6Nz`+glzTxjD{<{?Tmk#9LE5rWMfwb1D(lKIOXd}y6 z3UGz>_SVLjM(1g;t67p#rkprsi&c6!*+fD3#P>p=@ne5}N?tceYvCZy8pN`+Uu{Zm zGBM6rN+#m(@Phi@gD+IQ^ct8& z0=|jK*rBgD?qNFVp9wAl?He^QhFy3c3Tit$Gnk7gb*=12PP^58kn}2eedx{6g?o-b3ab1I?D>}HylfZxRL>FwC=v&(>c=b0*`MP zN}_!$uCO}`$jKV%ITa9u3tGEvDl!%Muoq>%cSUs|u*rlJCb$f$3pP@)@O&*%N1sEQ z-B8IPbGOPQD!@f3WQ`;1ucNeOH!Wi-ma$g@BeVt=P>6u9f-cr;CbE@v54Pk3-0AYZ zv$9fZQ5NlASLVZfYuOP@7$P-kR)lR(V;f~cl6A5I^<7;;-KL1Ya^o4E7uRY}6jrn% zH*e12tYv5{emb50VkEg_zDO1ZY!DVNIj{$9ay2|eREjrs;mYg|BN+H-CF)-UXq21C zw@sX^Kc>(|dQl>|PN1ZDzQOCqN?Quu**wQ3#$1Bi!OCT2Zi|Q}2=}Js0{WiARew9# zf*r~wAkymWgC-G}Vu*dw*zv-U3UF=1yFf7$v@tMuhZ0)G=0}`f^k=gLM>8~;wXK1> zsY=yapH(%ikMuoU_CFNDR5C<55{nyh%oF-1^9noKi36Epq-|L)>QfAG*hW0+-#YM$bHKDA@t_7b$yh?Lso>VtYKqZvv`M_UMmRw>tq%HfmHiFPp}Jx^a0Gv4 zon~Rcawj7qO}#xJhb))pe0E$uP*#%Ja!2$+?2CA@x(tUTcel+5Z9hly&dmw65bnne zDPRiSRzXUBd181B_kpV_F@fK{y>LmDQEQT#ckJiuxCy~^+3txo}?`FN;ieizE=u}?Is-cqG9Ps&Z zBaOz$?uRK}5bnB%NegrqAFT58+sV2eN*8}>Cjym=r;}-)NI~72tCICWtAUGAm zm>pPzv&TD!tuTlbtGl3h*qC>^e$c-6`+Alw=mm}$wgTaoiZCz$vOGIYamVohbgT_> zX=K|%BYH(ad;@n9xlWq~_Uh1r19NWlRnkyV)bEbj(K8T>y!6Z=lEW^hBOENTOd@2g zGT&ks7918}0=#Y|%)s5Wrphk4z-t)Dea0Eusk?gA!RQ(3cX`utF&4EE!fZ3XjLnt5 zr~CO;1va?z5>ayWP3y9RWn{hu&$TFrrde8VB8;oP1XmKV^c@loSUDsAimRI2Qj;}I zl3NY!7fj`kidR(ItUrW#*=yqB3gr3F7XbBlL#f=&8Ew>)QFXJ0<*eP74 zM*lDzFT=8n#yf9@!+Kx=^JeC8yAQ>KuvHjd7gnSXY;D&>g)+yCc zGRAz%dhf>mjPC8Lqb4b_BwpF?*!FGh6;>)OoosK@cI1OiPo>>zYgwg&_1)qLPbung zAO}|e5g%A=<`dh>X+7SZfgQjLGpmK65$24Hn33=B+;6gdRV4b?Gp4uLnM((i{L2c$ za@~!$R(#IqO@yGehmoa%GWj>bRMz=bs8X;7&sF)*0Jk2A7=I;FK=9-i%8WzM)djAL zoRx*G_Ng_V;zShs>^CZDQekW&cA#J-LOJz&fMndFR_%?aLxzFG$pt2*WMfL3mR2}& zVd~A`*w9r_$x;=uMA&BOly%ff7t6Ev!l-AvQE!h#w<8olOKZj7xV{YSLi~_91LpBV zlahXfok>Acl#zyCx1g&PNm^9o{M~gX9<^I9jVLNbbaCPlG*8Ml6zbJbJqlDbuTmsT z2G{AGmUC~2{4X24GhO_=d%ho^Hjl{AeoQv`nmZ@beQZFeoQHe8hUumsU&u>+(u~Sl z%`G!hEus41djnVKF>%{~DAz0J@+jyLRTe79%e|+EYtq8IdCJ@!hSs+2RUV|Z0%%hT zg^-kNuwx%$X%kQ&AoV(G2DI}iB2dE0rum5<)s)#(EgYSAGsj9WVW^0OS~DUmOMdB^~1G9R`bSI zmcB%dmRyDon`JdBY2vgII7chVU&y4=WsqN+Je-Pg`#HE#(+136?2KyT8@Ibqzs(M} zJ3-I9&;)NypI|sW6e?e_f;U!spL|MR0uqD=k-&@BF^jyv92K(X8j$avS1;rXI2%|J zUj^@k5E@&qXle0+pMe_RaKU<)I^=RM_yP1YM)QxQZ=I0W{J#EhzH*zB2H3_Mte+B~ zH?9xBgCR4&gpY^weN)1--DA#EPb+b%w50sq-kv6m=Ub#^8!m@57F1&g|&^W`0b)t6Vo z=JAbw&~OFyb`6 zL9FQ~}~Sfi37 z;YgltxOhO^n`n`TJ0n4+V@6a@$eyxW?k%JjH}&*GY&I2?xkw~Rwv`mjS(cI-Cuu) zIb|$9XSAWNKj#e5b6-HU>`T%5VSNkHL_Cguz}c#9lqn_Cf~EJXVJd{&eRoaM40?u* z)Sc`NH@K#O+RuHSr`AP0HJZ=Ni6eHZ%{;!Tn(ocJZ5dZer#Zd>A)ms@W5k)IYk8X( zTg^RUJ%+y;@-+-HX~Vt8XalW^Qbp&~@aR_5)GzGSLu6YaC>AL+M}Z!J@f5+P-J~0e z0GYN>UOCk9y^=9@@xWs?ahv zRUX2$m=K(_Y3wx)=Ez#(3xd!K5q#`HMKG8AO3*&lA@sFAkGKXpm{z$vzQbas5f!xk z?+cX<=)DSSbe9?D4PXrdP~b$Sm%DAivSvxg0I{9@(x4%N6|})CHE{r;NXd^f$C@^( zAqrYdnIu@2k{I9Ez0n26m8(>m1FWfG!fMW$!8Mr=#YqvdYf;GJU>x>EwvI+ zdx!3#4>wA-N5weHe}k-0sAt00v~knl4$L7V3Fr8Z`u~l zK_i1vkDW2i5+tYHo78^dfx zUIo{cy*OmMp&HSzH*VE5T&uyOf;idJa}x9gnAcGAvR}>Jns$w6aHOuUDwf#y37fNT zrr7rxl*cvZ!ZvYt?w{OGGTGZ8HQ4%Nd`sna{oRpFLvtTPc;H@viM>pX!a@bAMCz=4kAANjtakF z3RP1xM@ z8~%6U8ze&22fz#(%#WH(wUx~(BY9rLtrqPdRv&UEi`B>vKQmy#=mxHk<2=2>F^sM& zX2U|jzGlUhqg|vgJ#+Z!aHK#^!E@m8NeMJwFrtX`?KcBm9Ue6KnlRJ1T{N zu#lD*Yb^w-IE$(8HEu5aVK<`BkOPNHy?tM2O$iinnJG*5d=y#+;KHX5jO_y>-99oMgdrk~mW_GoPBPE1+lBse*R&x=LkPRB6aS72 zFJ&s2*jjP@L1JV{Ie{|PGEP6mlw$9f75e2unz*On2vnU-B0z+^$_yE`?7Q5mt~j3c8(4JX!OPe(`HHB;<;7R0W6`QWw~2 zrhg1HTOsjL?h`&pRyaGO(cZHqLNZMxx5+8!wfQ3rZ-%b@g8PF``yXS7f6{4*e_uFj zBS#k-dkaQodw{irmD!&=O8x>bR1_`ba0SrYToTmP2*cn)ql$*C&jl1$vm&r%WNhnd zgRy(DzW#)7v=mL$)=SigeXaIBKstyr=Qg+eDSgV9>~Xr?!`0#ab`SGTBuOxs^W~z* zqGP`z;yV`}GvmP(@kVA7zz!UN95+O~f3LziAV*NEHo5q3eA06K{q(64Chi*2uhZ}38 zm=~C{PJ3H!B-0TuVJ{-{bzZY{^=ieF!b$?JNFiZhP|>0AtAr{H<^kCcreYHeiYobL zQ^tLRes4&nm<$mMgLAXDTb3I)Rw@~k+>dhohux8l7Vzzst;wG=3hmLHjNZV_#nJ~= ze4HD8e(lgLfy*bHAYfAiGuu-D^slsRMyIUrapnW)5<4VGMl&MYzTcg|5uQg{B=Sul zsZ&EBAn%>3pJgOx5PZJR^|;b)$|W|YI$cyXZXR_QjU7*)q=7XHI3Ah(v}3I|?>VrU zf%XAGz+>uKePrRa>|^*lv@gWqvE4?B!i(Uf@4#pVC$PN)6-1&#Tz?c2VbIW!k_?9i zyS@wgGtfVa%hS+;u(J}-*Z-U3BKZ5<{SNg13m*NuApHB`e}(+`e;Hb(ZtIDoNoKF$ zhWbL}rF_m2j+d7dQGgZnCD&*;nWL!KRkEYe^ z|EXlQ%oQ^Ycfx(JXN0E6zLGskj)TKNs0*EOBaFXBBe3dghyIZL`Ml@Lhatr5l@l50 z`0UR+<#a#7!BAuk(V;ZjR@W&e;kxxGBaOSSWVp`O5wujP%Oe$cajROavQM)hM}QHK z*|eBp=iq?4WxVvzyKcBZ?%RF|^rG$0*%T_IHLV7)j=vg(O(Vf5bPKME!`Fh%`Y(@P zRcDk=*IhO?^!@SbVCsAUjdKL_d_MeW24t#3VT$n(4u^c{{SxHQhKwrS-POGHReO3E z@sJf;xE6_N5X=CR2ZsHk-dT$Z*<_y0DW6d((SJSI=X)j3s+y!rhpx-ZOAv_}Mt+d2a7xi+TOa2dvNgZgf|^ z4?g~6b{`v3Ge1=gvD&s@gID4#vUF-fs(Iam*4+cEcWwK=WZHh*owQM#&`22nVsqtD z{%(UC=^JlLvP;oU66!kMgF6*fZHPzXgu4IR&kuor9y|TsO%y25k=p=;y#I0R{?T>N z|2}qquS$^tSc4Xz+5fw?_=ja0h`#rSVPSuV@3heft$enc!g;kIqa?j6BW>t+bXap( z@=FeFV723t)ve&9>WhD?U{1)R@fML0Z;e$W<{iM+Vnt-6!;mQ3I0{Ah_aoKrdk7o3?6(lZfB+Qc8osSWA8Y)&L{=*0}c1^rM_JVi8{B4@FL4# ziTF08(6aOE0nDvMuq?+uW3o9Pe)WqG6h8xG!p$_Z6)A6xL2$F&Rk5B{552H{Qm56g z{i)E5ue|HKKz_VMpz=P(O2lS{vU42XLY7BA$HxIOFu3k@+j;CbV>mW+WB|z6kbcE1 z%p)V~wGWRcu18A0J$?~`w}7rNhOq~&mV{fsl(bNL5mjId#MQ zd>X%#`aolEn%j`@;CIh3ZsKAmQSx(=?2Q!ToKV$FhRqSX+x8%5lchrb6{AU;XEWg? z@Tdj*B_Ne3vZ;-y&kCMvn6*3Arg@Bg6?JL-;4v~}!K3ktHTEUc)fnk6O4V(V^T$~# zbaB;pD8UXWz%xL+1S!^4SeJRY1cBo(2~9by7V#f(ckVwVzyHkn|9?zN{w5tnC`#FY z){MMzNNiJLPZzL#zod?mzx<}55LJw9m8B@}%1#nw|MS!GYtC<%8&W+1xKON#cXWY9%!#m~M`&+^WQRdJaEc4>t z{Fr6mkLB1&9MQmN0Z4Tq&1c-EGi=!1h7;}2dkG`ElfUkNM)!ysg^?^c=eeRB-vypt zdW*Xidv5KGyjE1bi0+n!0Z#G7!C5J#wMG>md3ywvEAqh)d7ip8RH3?;;SCev!4q*t z+qn-)oJ8tuajI9ilWA@_rO|CE!rZhd(w_+i42tUBG*R?ZuorMH_vgOb(CII3lXGJb z(S6s6(5!JydDU3IVsChf`112}U7__xLWoFSbz=k%xwFai%8HU%%P<*V2Aq!=0`viq zCBnz(46+J7lv@L(bKgk|ck%d&xMeiVdV6h@-8zl-Ns_tkc8Kz>!Q%S%otoj*4H z6NX*veD@9^rY-%j#TK{qBo~}h)D$HLlgbOQCCCJ|FvlmU3SD3&47N3j*1UFP+KloN z>#yVK%igde2s)mBaA5x(qyKq4Iey3J-^h>>XyW|e=#Z6@rK8!O4h?@3U;~vclt3LI zUs!w+vHk2v(o;@c~VkF`me%t>2}Yxd78lm5@Y`FJANTus*9#8bZ~7)*|$m< zh)ks*N$j02)!eN)oPzXSn`qy7CE+FijMd2rdg2M3mIhN!CB7spGZ|g<<5d6@x7}(C2qCG$Lu;=xOZIT`*Zl1Qxh{vhLKf zF!c2o1geU?Cb#?*cafwlJ=e}SEEQBt&X2NOjYDSL?dHA{jF;0(1Hkw9^+C$lIdUW+ zm+((yu}9W=PsZu6F7qNzA)^eN_!!l-@}6I;XU%C%&d4KlnJ8- zOoWe24<}1|I&K*u66y|;I1qY{n@Mm#%OnR_(87vSCv`dj$vUW<4VJF7t?wOttpZGO z%kf|f1b4>mEs~y$t$OTvXLOp#o>dBmi5or;e?CzdP*-FQN`*I{svk1Kylb@;jVb>) zJmsYK8LsX-liWg>vCVT`$9S@99bwYwAygFM^yi0? zRL1t&iI;kp78;)~k?Qv~7iT$AL+KhNkHYh?qn7pz>XlFF+&}AYZBkP5n>d`=61RjL z^4F{krA=2meRwR*(Nlm>f8d(wEwh8Xm1ym|Q-7UuZ$-IZdAEa2%Ceu|F26G`L29 zu$!r-e}yNBFHOfb5zUq}3#4?RZ)srm`G(BEGaxKE`*C@N0P$x*i%qT9ZLf^G0Z5%~ z3!eBTQ~7`?Pz=p^Gkh z+9_zCCT^=86A!0UQ0Iq|gGPrr@&dQO@v}H)Ld9Da9kxT*(^YT7TEtv$V z8Vr0XX7=Lv`KS@8AJP&;I0kUPhW(m{zPwKfs%9vO#BVG zEXxNI$scC$N8RCBS@^UGCd?sOIuqT#yXubg%~m`s=RC6DSz2|RrWS2pg7gVrK17_7 zfU(K3b%Cj;;A!Bv9qqsw_C5DV1$)D7<(w(u6NAHO%&6ipfiZvEutDEKzvFVL;F@`l zMI1ALsneY#*R7dnH{AD)p;Nn>1k(~s;81%q@s{}NPg#}5v#E>~C|4N%N3QtiF=qOG zjQ?NtJczsehjii(_uAjXgD24N07Rfnb&fqTCz%e_BiR5;t*xe4Q&1An(10&M5{H@J zx8z8ya`dnGO7v)(PKk zGKt56sMWsnkhUqT?VLcc>F8?9?ntl>7@TNor)oW*?8#EGX4%0s)GP+_p5u26`A^1f z0ZtIXTL$Ep90_f>>lt~R*AvaS1w#jZF!Au_Y_rpNU8Mk9eF3H)(6dNtbT49DvzsKEHU$5X2mhMbTYzgciuOse)UBC#f;M8erQuYrjDOmA-O~Y}m)- zoJg+kS5((DYz>+{@Tw$7@pX)WBNRffY6<%J+U+=oZ(0GTnyKz3Tj#dAnyX1=2|p-0 zN{#X2IbO^ z%v!@01Kz`Xuam|4&jX7D=^5aSA{e>f7_kegL&^y=n^H80_a6u7ewOiwC!uJ`n}EEeQ+ zvrh^l^XK&RJ7Bv#=LGhyV&$S5(<1E7A$bboe2M`h%j^b|rA8YRvrAw4;>#nqI+qP}n zwpZF(Y1_7K+qP}4w0X0t_Nje#wcE};5BDY7-{K+K|1d|)8DsSR^{b4BSnd(;e%I^N zbK3I}6$?iL4i%jd{&;JPJKM-hgQR(l_X@_gfU)sK*lIOW7dyXNHe?7Ou}|8hD1FNig&?O-})>rkiUBF%oE(2{?*s%p01e+5i77_W0V-vNG z(knNE_@XwRZ&p##1+Ni4SK-`Ar*G%g#`idpv=n7jb~wZ&yBM)>gffJSABv+35jgvA z-oMe>kCjY(kI>W{Kn|niLEvysS71srN;`FK@xQo4n5`TEJnMqyOX4wT8_izaMixnuK`m~TtV;Z3o@26=)^%SJFB|BKlXN;aSL>W}(vtrPMi!z~xyl=OjZaTK zTiC=~;@M%L=|K;zg^ggC9J1G9!Qa1L{mGF|!S>A}Ob;m~0Mc7B>Ly!uLK%-|Uy;VN zKbLn!SN$sMlCL^iL7fg(d}|8dLN3N@MK;QvZ&MYD2|5yw8UIdv=ZX(Dtp*Eb*eozq zh_NbXgDd?cA!zN`OU)iB`x7QRM74*~v_t;JfP6Y`rEC@)ayro3uK$1~UP?fOmDk|o zfiz^t2KXq<@)u~j3vPwT61x3F3fV6%>m@vHWdvPDo40>yQcT)=SVj}a;20;1QCpEg z@KCE0|9h?a8~F!g9m^s3CPz-eAFN1zin-=fe>?dD4ls#ks5yPe>i@x5NB#q2Z7kGo zPaUrxd<5@JmRPLntqw7B9h`q{gz_qHhgZeL*cAQw&lK}t=$$SDKiet!|2P%?rzwH> z-`nZGd_eykv&kCRn5Y_9TNoSsNQ3`%8VpjEQo>q8;Z>^=wUp3r;n%;dCx(g*%q8k| ziwr}O&=(c$h1L(HXSa}0%a2;5Y1cxJ8c7>_Z<2IT(lCxwHrbOgr3@}>jhA4RQ1KD? z5~f@usbD{81}5T5rAWFu^So+1xy=%L-uZqG;RE0Y^bzfC%3FE_>?%+u_oK?tl%c2` zS7|3vOU$>nd@EFlrDF7p_AgFOWc2&G^wZ{yB?&keQ%*@(qrN;SYm}XMKsWIqoMY2zDrq8Y zcc-W*b!yU`Qu0=sW*P~};AJoge(->Yy>a^!0t>*I7tsrE*|XXXhP`VXPY7_LpahbB zBcl6E-Y&9xSD_8|0$&ZOGI7i+iWO$cJqxC$JqzUnm{(i^Ql+`1Oi`c-N*#k11J!6s znSTBU0Fa3E07My@C>PxiwfdG{Ky_r2KO5A%$&@vdKCt;1MTNoEbC>d=RUaM@cei(% z5ZpkLZaf*NKQ*`9%L`U**Q+rXVEd)uOOI%;uR#_Ee%QHWxMpX;X>ghXgz)-- zDNUmQ+=VjD?EUFVC7?jeeC@73GPNNPn;a`M>H=c9^}rSc24DkSpWb3=AEG`7Qtk)~ zZ~`h2C(Q~~{WLl!3pDe7po#Lba(8(d^W`5D!04o=AkQ@s>a1z&4GfP)&};Bo$m{M! zgsDRgeV~Sjx>1yaBvH`NMq#v?i~?JJNWhTIG&PNXeWf;3q9&9B-0{ zWe7rLf5r+zY)IpN2sn;EzWN3IzIwZN`#aX|ezl^wm$JOFgC*I41=l;Sf>AKEekw?m zWS+A03dLbFT%0uXh{s2vQG+RNs=U!4Zon{rThDr6(*QAbA9!aDHd)E>?X+y_8(ji7 zP;bAr%wV({e;4$T-PoXEe*Ecu!4f^Xu&iy>dVPnbI=VKbv!PFec}m9kJ$BJJcv#ec zq{4c^X@W7yCT$6SHv`MaoCS@D)kYjz7SNXgmvX$hk??uz^_zKf<#m^4fb1oqkJz;^ z64kb{NCNC+;<4&-@)&23PXwF}G$;~V6aRNktW+?#n#{~pOi(;*2K-}U5%!EuRCxU+ zlXLARic4{VzwIA1p}fM0V_l5Pi%pr<7f(ZR*^%%wikX6rueJI4H^4Dh_~~J1<55I= z>Mhm@q-QhX?C}eN`J>|s0fw}6s=2I})WNmj8mKnmNj@bBS037$EA1ULZx}3yIQ)|o zPgU2Nm7GqhaAo*bk^E4Jephjrc?s-J$F$Ha!%(aP#I&O7t+UUEMSQ-q9Hx=i3~kOa zv5uh0CgeI>O4kOHrW{vmqGzj`=gZKYzrXkBGf6j>x6M~E>OoFIb#6dxS}gBQbBSaG zU4wxQD96`O#a@6lwlj(+KG*bTaa`^VTVTM>JseYQDT4h)Qk7Mv5;b@X4kD@)Mwc7L z?vcO^udM<7ats^X*D?_JygM(gdh=hZ#VoZDubF+-vXjf~W#a{WADJ;AL*onTnwjOvK|AL`PKLSs(SM+0Gr@}iRBBrGJ7j{IjIy5>yYE`Ke$c2 z4Wv!0XY-rZcXk+gPpwL)DC(lBnbnd@$?7-#lbIbvRnc@EhjDJUe_!FbOq|><&sEIq zyP4ZJFfVt|H4o7B$ca9*Q9nR6KCusUF`jUtl(`m<1nY-4 z9>vJPozX`}+$Ye-_v|0MPrQE|Pp_gw1rUFZrZG_edwcUgojCt5?B@TQ$>+a6UjLPi z2ozNGGmff*)FfA|rI7?cND6^OX)N3YB960NFd>K9z^R7=B>f_b(u^d2`^iT%>1?ne zj4`+4yS$v<9N~D*=Hm19{sh%W1_AXJ0j>V0$c1>NXcNIut=0X^O@Krj$KY)q=Sljv zV{o#?LNoS+@91pmbR~oEokM{32G5@6rY=&onZ$|n9t4nW_R*<_cp9Xa|p z5GZXaqmJ2VW(PZ_2RJ0gZ%vb6-trlW6(d|@!PofHg}DeU=rG@OOL2uhly5Kbo?Ur> zr`E2+AO{L`^g$UMAaN$AmDP5?11*%ejD+`oPZaiwhXR5L{lvqsa%F_^^4D9ZJvY3I z_rSlXzc0}H=}p8{J5eKJ#l1BuQvDbj%OtlrsscQwV6>G%6{P8*OTLBlB;R;^`)RpgRgPe8CZ(W zmE;slm?av-TZ>6+jfxpt%|C*dA~9zT#IIyxjHjGDuky!>G?pTdbkhQUh^@{k9y$1a z^{9DC_Qz$dB=G^IcBOpbz&#Ai*&@@_tF%-^iYf$7t3oyaNLM0xIY;IYq!6^5^DXbjp4ySxRi&O&+Di#)`} z@~vWPl;`PmM2yXv3|hryP|X7&zFChS_a*lu57z@m*d+ljWg-ljmsyg{6j00!yDyDDif2qp;Ehtd_S9uK-Kox;2^8@txgRJ}i#DCXu z0|WOV)0f4cwiIRc_cLx>2K4&g1&~52AU!tljk}q-QfO|=a58Z@UU4{HZ+iNEzdix= zlIt=AIu=_Uy~h88hyE%QGSe|c*rM8IW2MDCl7+xkgMlfuhywB(Bcy#*)4sKcqj+dG z=}~s6(*m9Q`m|VH4weae0WC+?#IIsw24!s;{>^WWw0?8FhXe4M5^0(M*;4x@pe$)q2wm06^5u`w{Dxid~ ziV%|4`P(t9f*E0J0IbCF*sLQ*kiRE1LBw_1J0sQjl~%Rg15b5z+B9pCKxQGRobxv1 z22GOm#9q*2{DWVo2KVTFyf%3#1F`q`>gGjKDurQ*U(A_{bgFEnw0sC}dtV~0lUZ;c z&20=9#|YtHX$K;I_P2r4p+9JlXSA(An>OPPU2s?WF@5f=3<>)eca&O-dI6Cc}2qCTk={>vL1;F@|m7 z0~09mFXT6!!V(Q$NCgUY<5(yRSed0#SkRFTZKiFBC(IldcI-U){)m# zuB|GV>96B9v>Tqw4&0(+z^Y6xP7ZVHE}{1!<5m zE4u2UUE0qA=d?54B8OudbF)Q^MmMmbB7e`JEipvR7t+x~ltd$ZhPwpAp|~J1D}I5# zeEr%vJ?KF9$?H(Gj2D&ssuymeyT$3c&WDR;cxeFS3*zV}1N~6aQcLAt7OKp>KGjC+{@cVA%?w|AsD}ci1e`v_|{*OL5iGP1J{@)bZe@iWF z46ICKP28M*h7LapjQ=u={z$uBkwnmaNM$-?+SS43kwFBvEYp*NJp_oW=A|I3ECPxX z2S&zB5tFZ`hu3uEZf#%0I#KDsK2AA4$YZW2tHBH_1bL0))1L3RXFT1XPHN-;s0~B% zR$-Bj!eM66pik8Bh74KG9s}2Z5|`fdY4(Fgk9f_{^&GtAJZ{OLiIUcMf zP-Pj_H>w^xrh*6P*RVaRHpus!07~pcd)yT-i3P+;b++dKr_Ej`bd_BO^m9*ZPH7XL zTd$obaQr1!!XhUoj?%8!rxhHsAAkrBkYNU*#uY*vFhJQJy!VC+9j2iCiI+K;GM4cP z&24wR;&QtrMBH%;m)yl=QE69e?FFUW!}8wl05w}D?Y36A(z@>Po}x3zOk}ghF*^S!hJX>i5yo{qjP!T>PzwhR?>ABS;ofgdI_r2q{yE z(!B}F-ZwqN;Zzg%l@H${0E;(ckZx>W8&CM1qL_lZ{XweqqYRFHS!_Mt1Kr}@>e0&o zj1*cPnddjZ)cZ(pto<-;q<49|US_%{={MY95=6?Eoh54%VRWnRD6?up9`^5hxx%h@ zAb2ExvN3=3LIVx8#s{ExBGduh5XPEm_Q4ENicQG@2bd|?p^2q>2s^5D=@2z% zHdc*m*+`rx(`PK`A$%!FHd(1A-!)H~W($|p@zbFP3}ybroCbsidNumQL|pjTHSmL--yOT+)m6UTj1;;(X=P_ zv>1(3=l3EP*j_RPy%U4l`AfQSs&MxY3W^)Kqon_H29NablK!u`%D?(PMP5)|S|)2R_nl;9C1esCMK<$ARxQO+wr4gLE6z48 z(S;k59lEFMRTu40B{J)#R`IM7u6_`d_~B~6D9Z4{Y5C+r$-k9p{k=v{PL7nm!_|~K z_fbTg#=*W%cuttRii`+9MmbJ+X5KqzKJU6qKAtP!_#l7b_66vlM@Wl_0dfT(0RXo# zF{9?tO6Uv6wtF;RpKb05sxP&@I5&XI^Ct@cCV&9br=Cs=iuX&B*D_&3 zSX|(*j&qK4#gLDf7bD_UoJEKQ>@lQg0;GA3Qxf_xqd#32rw9w#uf=vxJ8`okR49aG zKm?3y%gh*aqeCoQQjZ=tIKo@DJg~78r7kG4F7EU=9iw7eS`?GK`x_aM@ZuDaNGZ~- zWgAW{dC7nYZVjzi%XO88U6k8;I5E2c5|mIEDc|M&si{L1xB)95mW3flFWXCi2qVCS zMwruxP`xf;@E{9HMDp&dIAw`fWjZaRf{&mu0`RWIA@FuGtC?&28`10=Gi9X+cJXV?`ho&Ciw!9o;w zGwQ^Q2|i+#=thi;U3OcJ*1f2XFUiD2mZ+|svr%)W>;iGmMMO***r={gEld}-y9wg5 zRubg`YLwZWzG&M_&1(D^RZalJwmp=2o}Nbn(M?xnT)`f>4&-0J zgor~lqa->ENzTXP@Q4!GsllPcpzIokyFa@mm4>iL?X7Yb2~Bh|msGhO!P&gl$Pc z*A(=VJOj62BJDc@#~M$L9|cIK3+{m6l%9p{c+p|fD~ebwcb9kKoP_BfM&m%ZxrP8c zKzxYJesXGgcATsJCCu(=&;qT-p99itj(tV2k&X0hpr#)#R(#|^yAvTV?%!IPNK=cV z$u8><9j@%bB^?)|eO_y|P1s1{s*U`nD9t}`piO!>gzUgN=%$PTaU)(3VR7Q*7C2G> zbAEBR=phMUVeBeM&Kj(*!^8jL>A# z#;B<*7@;(qLy63yLn$;t<_H_(082qgsL*nhzxWsQe%!GjXk) z+X6ppPlDOp>x?=3_#q48fz_krw=H`Cl0PTD2*_q3e|_jmx!CcV1^KA!kEyah9tiS# zId^;JTDer~A2$Ukm5&)OwC5Z73E0~@wyTS_OwtwrMQXEwyb>>4D?0}OX~~ag@RV{k zXyo%ndSbM4r90jOLB?59JylXaQSF34e7_#%H?te-Bun>y<%pD~1GN@vTliU)?lJMj z>~8B#uvFoRjD^Vj~PgjzCq{iLvhatllM-Or%|p0 z{uU`31(rFH3tZDwGveHn3^)7*gdA~L%>Z{>8XH_8EGq7nw%W0~2nA7}@5ggek z+$ahp+M7G8waVMo#)aq>vv0DNfL|7V^hV-_;>)FOw%+?H6%^5lq}#^emK1zPSfU;Y zxV1-=&qPC;k^%}NL-{oFv%{AxE}l9M4@B{`3ru~1;5G}!8<3-m3)6(~iI#6q44Xz~ z&2xN$LgI$&DfIuG7mwdvQAUgr%?BvwP%IRP34dz2@yGB1>M7tyDWF^f^!?M(R`-cu z;Js#uB&kAU*Zws*buJR<6_pUH>R91;e183F?NLu9iKf3*Pw0jXsS}tEDXgqh+TZu) zTGY-6l+cMa-psvB4(R5(Oj#h?c_kI=3i}gGUhU2ieQm8X$BY*BuVppeOVic*_uJ1 zdj%usHAz-~RB18>15lAzb9`YMXtOFF75ZTn^P@@=gpj;KC)8yhs?iyL4s)WtysW#q zQDI`#+FeWB99_W?5>211K&wSa^RT0m$hz|O+TOrdaTx=ba1Vig!EDMcrOQ#E**;C_Q3E1o?i%tXexweT9zFtaS=|-tQbq$ z63lpQ>T2Nw)Q8@z%<***&}RHw@zyvmc3g#}Nt0yC!D94=T~h1aw=L=*trN={(Wf7|Mw>oQ~8AWOe!p_)N<%AsbC|Jfzq0T>KmZa6q-mbMafj)qc~1x zVPvFqh&HaGq@TlJe?xXgR-xP&vBeH$sm)VSm|%z6C@D|d~Z#vl-4@Qe?QKVdmI_CusJ=zq|p zPe&B?d6Ne@Z7CyI8V%cqrQTHxoW*j@oT52(0eA;RJQKCQ)amUu{R!e()&x`6Z}Jvf zi8F~mt9yGeQ8^YAkcQn{>}ZVIc`o7P8)TEmo<{`}-9Hg!Gj7pm21TnkYJCNq>CFVX z6K?isGzpG?BU?cBC=f z>(*k&;-Kdi29t_8%1J{*1$kBA7yiNmN2#{!8uzs>}5x_NW1{PK0*!kis^A+WiZeZ(o z&Gm^9A1~4)nnGBHoYilP+)^N}2Rp`g5(+)YNKxgva(2`T|KffCm;6oWw0BQ+^hsye z+g=qH&&t*z8mcJgj3plP&)G}d3|YJw%S2rIHI{MQ-a-#DRas1DRwj{LxZIJZ5!T=_W_4fGh~`1T5@n$ST*f&2*lVk1BGJj9|6 zab`{)46@QO^FclyMDj?>6N)+_}`+-IX!-&==dAbk7_JJON9rAqYq}`|Y?! zI_3}#r59G8kc;H>zQ-E8@zIVl6zc9k`m)!TXA>_}3PQ6R&<&BQYDOLe*Covxu}!D! zAv`*76|=q*^5*%9oIB%;c`vT@rq)Nt-(E9HzB~xnBzq5HKmc$vidqwjVjWV@S1(c@ zD9=KcC-Y9~lziGLj2Yanv|CfJ#j3vx3qvf^3)!aHDrrbY3Oiq`4Q%IuPT;AvC-}oM z71!1oeW)8#4@Clx3ps=0Z)&&?Dh8iKTGPH^0OZtfuDOMJBERJkT(yDgK=cNE@5h&I-;s_t>buISVBN;4V^-Ms=PY^z-XG=vOC@&s< zXoT+KVYpke5`Umg^|fhmC+IA&{Lz{Y@9dAv`Ey#VHp5=kZZ zsTT%SZ#7MwDJ2)ZChI4uvA#xI;N8>)thHZ(O%2*Q1s>{_5&=-pTd$3&alvnIj^fpE z`DWDjXwdj3Qr0k4`Oi3zGgHBb$!`Nc8KAVpBIY6l>v`hs$xs$`5Pa>JN97Kok%6DJ z)Dg67{Y%oMc#!bdZa#aJn`qI>f|6<#DZkV;>fbb=2^iSIycscXD%#9nvwK1+hGvRN zY#uYlfZmDH*%6{?mF6Lr&K;(jD)m9pIA*ET1^T=yLa3IE44bseEDGWve9iO|q3a1m znl%L-Y%Dw0N6*`IYzUJ}2A~ub#0_wROBU+jZyJ>td`4{=IP{MyW*Hl}HANkqN#uoY!t!CVq{ zTc6a{t?=jD%-07r1^U*TUeBXnvNEG(85`#GnTwSwa5nF7o`=kiDl;DK&|3W`o%gW9 z{*o&C*ZuXI(`Vmpsr7kD4128O2{b+vO&$cDw8mqz_Cs%~{Y1V4cM84ktZ2k;tfg7s zGJvGk|ivZ z^_*5T?z%W08!ohTFR|p5hsinU=Wv$L;(s#;7_26U4F5?de&1ks(bZ_tTFqF~lD^b@ zQuX=O&qd?%=`YKu@f06@b*#@;y@^$BI#u)yi_`(((_2+BQ&lJIg{pktu z>EA2}6NFfBJXo$YLsKXP6Nu^p1?P8dwwRZwy}lMY$Xf~`MoH_jYD4-&;L>p{_r}K5 z{mZsDB}>q1dP0l;X%6K>vk9H>1oo+}@b9zI9xi^=_SAbOW!m;ugTs}EByh?S*^>9p zP;$GV(RLf)5T{9r%%$!Vh4Z~Bfz1?R2OY+B6M>p}VZi`OMj|>310fxwGH?!4-TT@} zLYVa$ng}A-Kud)LAY472e6uz{BqA;goja9xN})+7MjFW~NBy~MxH*H$5$Y5DNS1!X z=21y8N;i^9I;C;R`mAY8tytSLk7)T?o-moeM5W$*E$^-EX_2hqVuEs~tWl3Lz_*E^ zTwTf72y6x7aR%2VM)%7I245N^1zCH^qixA({ddUwLcpcfhZhfebFFPG);|3EagPA$ z+Exp^CSBU(V$p4E_1AUkY70gaY5exhBpS^`56!_CF+HkMOOcyhy`>30nwx|585#Dy zlil@R#^AXu0$7rNT~?e$H;r(_ND~ZJ+|>&g>tbIF;j1XfJBG@ripAyY5Q6aE5#lE) z!tfIX6swt2NIXCJ-glPwsIbrQ&tztl4BY54IyZG&Te-(A)OVbB9W5K=Cfhp8J z{7_N>_!ooORz`#taEn0(m{G?KoCu3p;D&aiCr&UOoWFl@-j`^min?hbFh~g!uI12m zEZHiuF^fK^Eoww+AvH+N(dsFH_8v3cv*Cd|KyVeFML;MwNcI)CV36)w<9KK)8TP!j zzcirYt`vGU;{u!Rb3$pn`i#=$$)PTQ$u#2Mx+#1d7;9C!sz|E_%NGJ4kwNnP7${9k zL+{Im1}_F%I4ZVm5yFEIk@o0{!kc5>RrKtvUl|5BvHk*yJ22a;+vv);Vk%UJdwP?r zPpJ-_ndpS9JaM5Qryg=RcUGW8`Par82F5D2uK;XPMd{b~1HOOKo1VkZD0R z$dkXSGr=ys;7e~|U^}QyLuhO%)j@Vy)Iu3r#=fvxCwx_JzC;PHTeY&jsHn0oRqwE` zv|YtVuEW+q#casxA0`58aJbX~V~tiF-9{)-u#@X46UP>C9MZ2_QZh82=ghHkX7_DS zsf=l6M2-xx(Kt(;R)3YPP#KR6x_g2xnKwaY#H#r{fn6T+@KV{O7&9n%v`jG;GF;HX zwb2hILcp53bg^|1_OROWOv$sR%W9(73HIvfgITpActK$zYKqM1psH4d6lHF9Hm%DX z9Qoy!&aEl7eWdI$<$u}cL7hp4IrXsJfhALg{Z;!u+PEbj$fZL%r=d{j7D3dP?;wJ# zIoG^$em2U0g-v5hbYcZF*TNL?LOxB(D|~QkNPuHgpXc=!sy>*BV;(z1n3HjM(C7)1 z5fXBV6+r}sS$#S_v6IrhOsyOkM0mYjE?w6$cQruSvD+J;-q}7^jacC*R;_Sz`S`SN z$yPRAN&bp8GJILxNZ9OmlhsJI!4?hrSFC7KNQ=cQBz;tx8u3JroYGabO0{-s^|W$r z$IV);g{lhWVt6I=sEBuy9P+GQnTZ&(>nU^l$Hif*5d_M1^$-R)-UZ`dv&}dgB{Cfe znxNh@S5!Mcmfe)4t$Vi@pXm_NfPzvmpw(17Mtez*><-6j2Cnld%bhqpxUXa!qP3r| z(nPgZ?v$)40Z@#CN_SNgIJq&F#6i&ioz>ys*1^V2g<-m5NIpW4|?J&YDsa7nvs z((loRaaMb(D7XTN*dsz|s2{S^B?OK&CaGtgD_%8GTZ?8y<}gt{etOm9(DIU2Aw-)#0 zgRSPJT@_yoQI9)w?asXF*3RHb%gogC<={iW(|B_=kMq~SJC&3bYzBw{xRrg0tFucx zbw?y|a zN-_U2DjZ|TN`}-2NW@C)N5fIV5P2Z188*H*#*uU#6<5I~)p7XI_Kn{)51AvwL%MS0 zz75~zV$xM;k^n5=K?q!@Q=}WFM2w zWW32fxg6V8t`de7qtPfQu~`t8fo}6!JgdxAXHB&7x{M6+CdQYPH4Ez-XEtNa<9&=K zP5MYHIiF_jok&s@3#e zAz>1Dwl364?jIqI#g5Wd71%AG>VYysHae)H)o@B9fID0d|U zTneo8%wwi~{WOrPS-Iw2=`-wTPIipYs*Va0TZ}8r8)S|YMOmt}lt*MqfI0(n&RHYR!4rT?<>N=q{k>NN}=$KZ+R((AGRbe)XY- z=M0)jDHJSuB9T}6m^14Rn=`J6&)qgxjGXcOm9Cl9rA^6wS8wH zS1x<2M00AjuQsiF6g^{>sJJY$Yf0C;**V(+FxAwZ*j8g~vTI7-?u=Nfr^fjjioYU(FtS!pUH8f*LZB4Le|n^#O*;o9&mrc2qcl1{Li)d(aHN?RGl z?T;rO8J38NBZ)Xv5dtAGl9$W9GM37Yaw1IIZj0`5SfP6#L)IZ!fm)GiuBaX}9!sHN zRpOe+=_KacxyLCa4%ys|so)}q54v1(sH2@u)HW()%TT2(G!(mdIyON3?utvBe;&q~ zhg*`v-8{><W}7swG^wiER(RRBvrS@alx5 zjGV2ZPFL{FNjg-Wk?}@gW_8&-=1cIV9u>CYF{vcuPTA(x3&6T?GMQ9hQ{IUivt9fs zs;{)k>HVSURVe_<8fIz61&HWX=F(otC_0wf5|u}?noTMzJ?utZ#oX1 zY^t7}yZQ@s=8lqxrlTQSF~#pbN58K_6p6-_xg6=$x7^XyXxc+YZ@S4IE4!7yt#}Dj zF6N-?V_s6Km#)yViVF*tf9^_ij+XnD2l1U{1!>tUapXlxe{RmokNT-4PQ<@Ls9vM1 zBVVa2m8;y9e754?y$OSI-h^1bXvMNvx&pJSP}3wYR;5V0#k49u^H?i^WBX_7yh*|L zx4$WhX~b72istG0k*|rs3aZ^rPwBoG%|jy4$1Di`Ujh0Kq}Z=te48Yp(4*3V;J^g} zl`?;vVF{t?Ei#2x!87>P(hk&9er15>BUq{ls9of`ys^wK75bwRo0DA9P zC(-{3(?_b61$sbCas2f$E+G6YRGZZ!+8XH-b-v30XycE=dD=QXXbNbou-CnJ# zwiO@W+f@dR{l}a0RFg0U$=pdTGpNffx$h|+6-+4?M>6P%-?DKdrQoq&fISh?g&gJ5 zpjC0jXo35Z1D&J5^5;LdQWBs5e!$DCm<1}JOJUl{54jSfbk72%gwl2X(i0wHD?8W_ zcsnMgXrrQ>5uj%A3&Ipf%qv71y=J#8E{H}tKU=KC^DU^9D>3~%#4_>r09mg2n$h4( zN`JK4Cch|AZ1_568-FW9QTM&@oqlsJ`)RX9)%)mJF57w)RO+<=UR=M-X!KC9TwHq; zRPH3EKR$OVEZb`OJwAV)RPJP^KWhCvhVAF+K(>jG8EX2ULf;e@qu;=!BJr;`cf85*ieHd=_3E|@mM_cxxPC=f45C?+rsHxKwZav-_V zNhi=BMV_d-n3ar^y*nAuC=GT7wc*>}69%znyGy|J3-lTc5up7M`y9+jqLY=otI%J- zrhKqy>B1qYy_>+tZDtI6i!Hm0UXwtT!>v@dJc`(s&NXHi9DZnknMGpj z?Iz$>At_owiLC~qHr9)ek1O%kB-O$NWuZ^#7dK*mK35d|f|6c`*Ijcb;x@?u8mG~V$Bv4u- zyQ^h-+b&=LYI{n!(P2NU!x6xiHR6ioWTeP*) zQ$y%yhM0OyMlTXGrf_@dHoda!cJ z=C_mCXUn5%!q&;-w?k6kwV=Ng@G=1WCH(oh2K|*kRw`L2;nA;iMnAo4%YF}gHpcXT zXT}Gh7hDn_eo=f3wF@?Q3^!)#D{QfMf^3~#;@a~^_J!qPz*e8{Bl&I}N;%*oy7CdE z{)8{q`!mk^Nu>V?%`LuKFW3y?BBFd>OWc~cv2Z`9Z)YE>u^wf5@A~Gs(Sc9;0PiM- z`0W$Ek556(8sFOLW(HVnd>HMPu07rCOs)7L+kL>CQ9jWQ9d{&UnFBgT(P&KPQ|jgJ zulyN2-bhMDdIoe-cNhs-C{05H-C4r@$8h7*b2`VuS;b6xfA4oT6 zg_)LNa=p~C^cjLOu!#jQovBdXr+@4x0}fh2tNWa9dV`0pZAMk=@N1#>osXh+V3j`{y0{; zORSGTDvl)+mm}~5CdOMg(}=iczrv3ObB@VfP>P3-e?}DGc=YmX()6~2%vCF&)c!Jd z!C3nx!m^yMT`NE*v0T-NE~jpNqF9b40d8~YfHS7Txg>)^`9Mq99RVoLF}Iqb?`a$I z$X%0V7686J3xFv6Cl@vjLwpnZIAVmlTO?ouABir&<3W&UYldkdQCTg)ayBzEz?+Cx6IrH;%%)2;( z?Ao6p0P9Y1Y*C)d$Ah=qZn}mOpo~0S+tGef(NC@InKwb|Ql~tm!yaaS>uun}{helwRUOozc6XuciY@k-aw+L1d z{hP8R09%LEI$pNti6=zBJCEjLUCqrRU0;t(!eT&;boKkUWJ8hn+$Qk_VPjDl;RkD# zT*48?lTLiTP_~?Z!7lz#P;aC`tr0M*Jp1oH@-i-1wS9(G$d~~mWo(`RwZ3~d$gg0e z8%o_EM%@677t_W)t=S;kA7x*_nBAIu}y4yLQqzSKP}Qsvg(SB#0+AR}c%E;LlIyL>D!lWn+AywqEbI zoQ6vj8p>`LU4BVRe`9g*VkS%aRgf)zk*+IE?8Ds}<+)Db&W|YnmkH)CSN|72%BL?0 zgLA#YKWg}bK^tB-X^SXvU`v*zCmc`q1Uhh!MGq5o+x^cGe3#YSP_C_6W_z?PGTczl ztv16AHxey6oKPiOtgqCYC)o(9Z8H!YR~iJ4u~#$wpMx_^%A8Qm4L85Ge9dG1XCh-g zQM-3Z%||rETd5EQzckq8{;qouw#nO}cE9Ax4@#Z1`K#M0Kuq>EO0p9q!enanzcAD) zjOa?PU|No3l+J8Rz&P zmO2=l*!NsNxWyE&-J}RESjF7=$xABucLDi!8Gs;vzts?uDnl+Y+aamXKU!0JW_Q?_;xFF#E)65oIKuo;avIqc#GhvJ)Ee#J?ehGCHj0eyL^8C!^8pQjW)VN z`s)|8{Qod<{0B#j>EHUe|AkHbCx_)=DTs7!{=}a}w@Ju)+GcW{ z<~f=E`e$mg2)C*D$|t$qP&gTgkPFQ&rM0`k>8mBLQ`_NB+K8IR_`(GA=W9QwQ2xFx zyc;{R-^furIP{y&BL3A25UK-T zOKN{^6tN;Ibs+{d(5&DQDu$>P0{~fQ;*(~78Re&{buv2W%T@KJ8TuHS<;Dk)E3ulE ztPWWAdj5lxS;mDFg|^IMrfQ+PP&nX_y>M9W8%H9(`S2~%#!4{3XfU29+^?oj>LMa9|6)N9$J&u_AmxeExpj! zu@xB(?c>w9^u*!ZWP)fZ(A9a8_z*XmVyfH#!#J7K0u)9j?&CM3z@Z!CiI^s^a5owY z=0&<3_A*Bl6$N64q>}P=iPGRqK%)NX^H&^q49%vt9A46z!>@)iM(+h9u7)Zj0Xk z0TcUAw}tG#E8+ix*5qR1U}5V1k23j}1%sIJzm?$s6*T@Yk!hCF#_u227*9r_y$y6; z9$GaG8jwUFeGpinqKdzuNI5b&h5HVBhN+e-&a#Wb3ne^T7#Q!L7{N%kvRuf{!DJUv zCNopB>B*lW`2_o;vWT9knt0A{JuXk5s3nBbIj_J56WEOf#(fCgU?o7FB)yDrmt>id zSBu~b;l+xoc~?v0L{0Qqe4FTJ!@$kA+XZw5H>~l|qvV0B7I$7W&XvEqT}z_;EU|tO zck+dYhX>uC_uSiG89_*m#fsgo?cpQwACX1ES)QhLjL>D1O2?f-9V*8rvOS?S&PXk>T$#G(a3T-<2{tsZNvY3pB)d^N(*+H3IO&%=W^UE)M@8UrnYHtT5F%Wz+Ef2Kx_eJ+g`6; zYo}SG((=`6*;(#tsotu;Tr-KWKIu@?r>6Zv2-$)2uK9NDcAR$A-SPgLrPdAp3#T<) zAA*E&tkg?TBOc+hr08`*QCwCxF2Sa(w2_$LPz34L{r_R?8=xy|)@?g>r(<<&+qP}n zwv&!K=-9Sx+qP{R9lzcGx#vIkocrD#FJq)?*Q&i%YG+jK`Y>xwW1yT--;|2RqWJ?C zlvBkeF316%&uBIdG!ABVdNvkj4p)O^-axtiE+Xxi)ujC20-j_UO1rQc+4%zDDNd;( z0ig|S0^z}<`3VIs98{^|BnNJ-+7hIR31)4@ESo|G;)%QU8;Q9S^sC27vopehBJUfW zLL`|u>wz_fiss}Q&~Bll#r0CX&?Q3mwdsrb!o?$DXi0OHq5B7q(~ZijHZsC!2Wlqf zobeecv&aamTExZ1ANV7?ToW$j^?=hTVMw{z@3% zocdrhek|Glb3~vEF-eqh41t{;UpTo#>AW6NTA!pCB^&Z~Tn^&(TWRQ}jRTY%Py(!~ z?Ceo>Ss03TtO7X_8^YHXhXegDQvqgZeLyfw0!U{G)rzSIqsn78vax<-V-9Vs_JH){ zU_w2VhS=iB%npHpW5g4)m%=TC+ulgTEyFP1>2d}dTi8*jprLU5Y^%F$<>8?J1c=sHR68QCa*;x z=E?eoSMFD+E(q=MtUnOnkO?QsBg1<&@(zPNZl61Sfm&h7izRSdH&Tj1gbO3!Vf6u78a~uW9c#{ zn*2WQ8U)o1M<@GKa;QZ5T5y~ky!@K{w|>}&XCpk)8`*QTr3UBj5S1Mg4|VsRU7x_} zZ>YdvZmni0pRgJ9MYFehFrC0GgzfihAd$TzGVM~Pcp=U*PU-8rad8H#3pA&2=!pcW z>sl3ZIEww|tN;p_b^ob4kXGtzbv4ru<$<5;_#|~MDcrqzNs%j)s>_pYLMol5`LpB? zvt;FOh+Z+&UZJQ4x3TGoV84y6PbuSbToS=c^hkL4L6F{{q0q#&GQ^A;#8L1{9M*20 zyk5$#Em40f|8^eD^H?<&eH!Gjj5)=@I{ojzgho z+dl5z3Su3<=Dhags_yEu<;hu~<{v#TI!4JB=cVe#?Z#zT@X_w;X`(???|nw!N=evZ zQ$|+JFZh8tm46Wb#W%O-GoAo6Ph>T$&aNsJd3&mUfmalLuI|Vx-YdFmvuV)iCL$uj zXyT5R`kLb{;R|j+6eIUL+yX7<6y#_b273Yz@qnRFl*LGw;pRK++VxWer1d+AR~n7W zQ?r4;FHwMwIME#NTO7%Zrc&P!<5>K#5xI!}M>xxSl~cwT)a^DXV;!S|f_T4xL;RP| zN}!KD?SViUq&L#uJa|Yj4oY@lQ|tFGs%w_Fr^aN=D0jxYI|lZ-PmA&n!5kq?*im`) z76Q{Hgn4Hnk~Amg_@LsD!1y5}Aa*!CBI4HIkPt@(-=dI1B@((RVB1{rAMkymVxMM( zfsOATqzljsfhrs$U5c@Q^3fWd`y_qNZ?+MjrQRD)QM2UlEmd8*As7Pn3dSY0DM>MqPSb;!z@!Xj8nd1e0X@A4qT}j9A^qZa71hAX`g7B4LY({Qbn9U&% zkm&Lx+Rc4iT;W_G5KJV4dAad?XZt0l&;^leg_~k-gX* z_{Ix9maV$+typDH+V%Hvr2I-c#wadQN}=>}u~}c&EEGG`DMf?`CFi92l4SOULKfmN z(NXM&Qvu(X)i5umi^hxVQLl(-_R??&@{nSt{rw)Vh#1C}U{{Y!)DQd24XO^vJ3*?= zz-80Y)`_Gxt}Y>l2HQ%rD9&0L9*1AEsJDw;`_GUbF?|}IqD1pGbP1}CEOMN5v~3sI zOtuhgQO1guAr8C*M zruVl80q+=TCLeha?^@O}B2Kn#q9>n(kw5GQOT|DoIG3+eFCdy1a$84V%gA({9*O zNbBOBzps@&Xn3GX=%Vy+;`-CnqS>Nz$70P&HiFmX-Du58v{xsb%jIW#P!k8H$qVV= z6o+hs>JxQp6dctcW!O<3e_LM5@OZ<6slcvU1$-eb^C}7 zpJhqYYIRjj;S|T-2$|HR-qyk$_*JP|L~c%VO&@jYY{bXyIbSZr)QV|Z4JJj$<4p`y z9R7ltt}_}YwDgb$8q6uC3gUZ>!dj5bfR5#);xks`YF-03fWzp_(vVpip+L`08vR6q zCGCP+CF>@iFp5>xAZ~?aMBkhdZ97uwuxY{FXm&~5wSbgG#!Jjt%w>iQr~9p5;HW8S ze(LpQt^mTcZJZ_Lk-eGa((It*Q9`zLbyfkV*k0IVG(xL?E!fP8X^*|0`g_@lb=j>{ zI?f3X8k;S%Tf0jMvlYX4*R`3$fww#3$+FPYj{<`_?}@0w&=&s{@-?uhcCL+8rDMj? zpGLF>Nj(V7n{8Zk3FfnJN4s_c!C;RdCgTTtjdWQ$!f{ zy`(I>K3?e>IY?ZrWCztp;Xx)pSP`dW?5_biblc^S^B$FA{B?a&cZuii+;nWpYD<7~ zz0Id~=riLfY1p~svBP&+Qt>vJ;8|15*=A_MFiVa`qbg0mX7Hed?kSW!NCG+%ku^?3 z$?_&WLX2wDD%BG_vU;jNZwx%0YH3-N56>-2PIWoC6&(FBKo^`7_%GNURm90-h(~8{ zK*3B(H^^cj`|QI8c^DUY^em@6YyI%?ZfX?cZ`y89?qt?TC zttOx}SP7I%f|LKky+`znROvoi;wm4fBPQul?CPGy!zV!pPtx}1Wm?|izKwYYi0M6m zXM(>GGPM@A7KhM!I2NbqA-nS!37*nJPWQeJP}!l)Om(=<^u6Rxag`tId(TYqmLKnE z3uSp;aKxCz==k&g0R(u+D7SmxB-#Z+*0xq^-7Nd+!-l9F_a# z&f}2~6z@9#zanJT!js^l^ zUScng!zgQ#_)Ouf$kZu&>QG<<^c~$xD=NU$k=m85gvaoEGfWm-o$nJzd;QlYR=ofZ-Q^#;u=SOF zNxDA-Y<_T5ZTfAwA>69P2(Rg?&fI@$Hf=viZdzHy=as_86L22o3PJ~qjBUaWAl+f(lHPoVt>6~rw277V3Fdlo+~2H< zw(XSI*<)q{v;9^80D7_Nfo>9nZRO})e~loN9r2axV13A}^||!7&BM|>M(wx;Z`|eI zy(h`9hFcpMi}5BM)9BOOAEgpl$7|tW-{Zi+DfR)1C9=Rpdid$G<-~gC7EgXqrZ?l{ z7(p(*kT^Wwcc@Ga$1@CiO>}}7vNR(L5m5^==-{*>s{xu^Sn3h%QOD$pgSibn!YYc{ zDYhCXRtRZM2reI8(y03b^qog$wFB+S#^^!Mk113B;agB?InKACru?le=QOWhWuaQO zV&6OzhDO9@wMaFIkJYI*`PJ&mw4B0xctaJ{?~BY6Ta2Px_Azf5_BsO`FAd^t`WmQB z^Z1$%tdKj**gdc^9rlnFi158VdIt^STi~?X;bbl6rZyF=I$lWDL}{zv@3riwZMo|e zz8fB{%7QZrP;@5w`D;QE7I$9Cy_?p<7R!yzyh0?E7!ude&U_vq)97n43*ls{x~}e6 zR|u;%UW_&7X}tawMJE)$q0>V6@+BP*N&gqq`+vp`$^VYi{4D~bX#OW=E@*4x=xlB5 zpy;IU#??}Ie#^oAKQfZV4x?d#NbOZ-H8k@2 zbSj!1_%2^NuiilM=;d-KN}#uHk;D{6XvGgY)b%5n9F%Iy&Qn${PL|#eFUI42Kc9|t zznphJ0dJahnh{6vGa5qHi%gsw4YEZ~p@gaTol+3pN6>6Xe&=CB#JPsC`kJ*kBzu-4 z1clJ*Dlorz;oqg2T|gT{?^xI&n~gthua$kn$0H8JvOu@-3)SzChg~}WZ9>Pbnom!9 z2glB@C+0mk6Kkd3+g;@4=m zDG_up&K<)daBsIHL`J1sBaaDB{3~6W-U!lnhJ_J~7evf|i}nV2sPQ%hwyuIwJcf-V z^Q=5$1*W`^e1XVPEmS!| zQ*>}NUTh#VLWiumMkkw7jwS)9!_oe=dL|rhxxd5syK!B>{<(S#sS(Il^iduPKqJ znf=DHgfl$@7xEsmDiDGxgg*}GbV<&FeX}-STihdk7d>$`ms|=#W$Q!yPxiVBeRdVP zqj%tp(g!R`^hRqn!;iLvoxtzONZOL*Y+P6rv0haIjWb4?9{#*jeB{yr16sW-cP%#0 zt%4mg(B;7!Za>kALl|s}q+b{?YlGsuXQAnQ<=j17w3?(UY1NH@{h*<|6H2y7jooui zD2mpE!waod$g-HP?7Vu#mXYgfUB{_zCGTJs&0ynYYs=`0~A<|6BPx|K< z%1Nck0aFBfI&M8KyQqbfe@c$4ul6=ExH&+=PBs_zQXE-XW3Jie3@9`k zo-wUX4=@~@nbAenOwQhsW0cIQvNDVA?KSx3K2wMD(eO#llq{yFag*duF4$G|Hx7R9 zIt)m-TT^7c<$d{aBm#y?H73v)XXr*>-ap=x!l1T!6|rg26Vh6`b>YX zOIqp5K?!A84lo4qvvwA443{`SwZ)a%p0w`fShZZkbqd0)vY9)D-q=?RCqnZzN=h1X z_!T?fl;985&nd1=9pN@y^e+aQOkV_xj1?c?K1sV(3bqkIybCR;0GAge;!F2rRUa}q zgI?x!P2BES`@awc=9nuS=DNW1uVLw${RYDBiQCyS5g7~U!Mc_;l$7IXW%~74w(OF7 zhYHuEb=m$vc|#?@ijy2#>mTnBZ=!Hn(3r5Q%wSQbpqC_}_md3!m?L%wI4vM!?zmOK zk6WT-H79$cBzL2rP+L)aO*RroN#C`!x~(SUTf{|6c!%F6rGvDoA#Nf@Ead%2>h`A2 zdEV1I;``72wW?jLW-Y?4ZK_UD;w?ksP9Tjd@X8Fmqed(@Eo!?CkQ98UBv6N_5^dsB zx(0-XhrS1msNxFTiY1Tyu!rNfilO<%s3lA8!p0AwjzvS|@~qUZ2ln^Nn*N#xF>VlIo+}=_-K^)lbaGE6JrF2D+c68u#fJ`2nm>{zzqgV&-V(+$#moq1^rJ3A2~` z?j_Jk+1*DETV5sq#)xrpFud#EGiBxXDisiC3 zWc>*1y>1jS8^1wfjR$~yPzdu!;l2TVkVVy*^GEv+Jrbpzk8idxI+6mY@jij@!hr=1 zM&P)^kE}x&ohATEgh%dr71Xy+_)h@{TL8A^9OqsKD}4xB^whb4G1wY&*C3ht*Y!GS z(-hCni!cZMV80H1I+pR%VDn_nMtf+5CFz|o3o@suG)niWjrlrg4d!}2fMgT@426Ef zC^z@mw7M2+leFM+Wy~6!8x2DQW3e*Si1*3dd*yG^%^SIWG}flZbnV?V8nM-pTshSqSyy z{=}pqAip~=@L(;hpllw@9;Y6U53l1ma24@*X%5C}E&=Gdm`_iW=wHb_e{GQ?oP|4M zbyMkKS{L^9{{n%Y+E~)~?)+s4xol9tI71UCgQsC&8i{=1S^!}=ZGE_fb6po!I8+B2 zHfCBt-riNhh97i(L8KTA?ujn8n+_K4qJiaGK;>LFrEzSof5?2^tyc~arBg5x@jYae za>Q=vxd_N6HIlQoDOS>{l(jfJTskbL@(svTy}uV(`Hw?mVRlfOq&PATQrpIr{8+IZ8s($A$ zRqUaBp(z%M+hIkn2Fi)|Qz(YVHto+}o z@cxs=2Vi~um&o$p2G&1KtG}q7|JYamu&#coz^G!1qjoMx3#Nt?3M&ylxZp34&8jj>v5>3aqtYtJ$k|#xrNrRC zPttv0q)qb;9U}^h+wY(>ab_X*QG8z|#5d{(xymR(jvLL<gyx9|l}y3iVJY)}C0?Zw*e(N%VUO)Bv|YmG@AeBy)jWkb^HV&hDLOkc)z( z!occPU04tpK%+glLr=WQ&joBmJU0!P_b3w2(-=12JG3Z}$s}6(E7)|lG{D)u`J(MH z#$x#mDLp6x$6YB4`?V`bV{riX54ZuI%nq<2c;Uc28qCpI+TT1L5>BRbrI{KhsPLR-N`pOfMlarY|1kj zdbdRW^rp<6gpBY>w?*xCm?Fr-W|FIr3qO9r$3k!7IHRdqxaFWu$FFc^R0usZVytRK*Rhs~D_al+#BTdq2gKqR$b)0*p(h=2isjTBSYNVSSR%M z$Y0P&@`*LD>RH^ht%Sf$L{^?qh6jlLWYnHm?QNtpZq3JG`3vkvm-U`H+b+cYE7n%f z4+?cBAdXtd)Gm#zGh1hD&^FZ4QbE`O>FiQMnOww)NrWD@L)+eHn#4LWgkDEzLQdiI zGQ0ct*AE)>PW{^ zWsMYQmo_2d$J8rSE$S7X@}J$>OM(9$XmkD2o^Kf4gn}kz!_d!$i{cVq5_E48btEHI zOU>~-W+i2?sIVp*D}?twt~z!+9737qXpQ#kZ^zL`O37`z#HJgnQ@50wR++ixa7hn# zTPHRNDTAi|@ga&e#T^-}%dujT=%^c?^J|36uPK~wn>qqGOEBYt=h)?XcI;T;HXia-b(XL5`?|GAw zw^kocj>t>2gC6}fGL6)6)cSl3RS?9JrOUCmUoOXfmC%$V9f4kWO${-S7bz1RiQWmv zbe)SQCudHHfW@kjpxRqo7{BNYkKSa`lXJ)`N25K(a8ZNDeN~NBQ%=*`W2`wbBO6H( zQ5nIv?>DKVw0-Apq(`79bCL5(koDNu$@QI?FT$2mGF)@Y=o8OOXxlf|I*L?vr$(7s zTsoJ;#xcz3YuwXQw$s+I(sAe^!R2=S-o$3QCNRs?ox`Q<*4;?Mgbkb;*}-omHD5NcbSTJUPZ?$5!Zs0cOCFr#wTChr@rFNt?tc? z^EuSydB){dapHE)`Tzt_UhD__RpK&@g{+QFF$2nxDb_zFEz{VZHSUlm9-5^XYe!d; zh!7TJ`>!Pg8N=`0{y$l(-<+|lXCn<7`2AdO5|Kl}!8SWIfn(viU9f^p;lAh;YI4?W z@oi(b5D&=1bx*$Xib^IYG6lyusJ(`)fzM67TyN+;29SH+x6~-6)x-xHAVGwUk*J-}~ z4tPjpN}2;{&uC&B1-bsU9^)y=d8N4*BKF z$-j94|4EcB{6CGC{~w}keP<_I0P@n>+==MF#4G=(1S8RZ#(yWham#0 zQQ|73CBS@ETyfsE{K?S-kb|{rSGsO|0 zT={&1)Xk9!n@~9^Mb&6bfUdG-#*J)1C8b%$gvDMaWYypvJ;=2KTZd&&kWilnJK&Kw zwB7K4!<``+6vK4_6(>=lbH$~vH-(M^|NIdxdPAh`(N?-S)Q z0g2yEk{1b*ShdU0uW=mSx+U8=#aqQje~)}ZnlVV;*|WdR)Nk2j1ChTFBi1p>>!fRI zq2&8<=YHuP9AsFf^9Wy3q4EQ!SOv1WTW&S^o9z;(;+#dssyNMhm?9HHO05G?ZiSBp zT-1e2p@AeK&%Hf)H)1<9%^viL6$FFva%$)ONmwj&ft;xdda&nbPe9J%%)EWnTzY80 zTolt4&V}$pNf=Uwg!8r5vfrkmnN-Hw5Vc0XuQYfwTonFtPO~{FmoylC752%3vnL7f z{(<+rdH5L7w*-JBQw6lO3pFmKdX#}MM~U>&L$q&!!!zTgx5Lx3S7+RB=h}#KsXG`8 z+!F!KjJ}LJRvm=;@N;=f6X{ig!p~xn09o4@Pvy52_fNj zhcINWpxS=xgLjQ%Ok+H{1<0qa*lzLqg3$#b3by77Y#mVc?!g93T~WVSq`ksp>%vgS zYb7`{BQ!@I(GeYK$%^G1VXQhnI|nbvmddNRc6;x*ovp)@?TwsbgS#Z?P^+ z#J8+)kx9I8HB4@{foWyb*s35-__-Cn*d+ylMXo46`n*%GL87oc*sH_I_QwR#Z8Bhbb{?az^+zZiNbg2GyUD(mKlHCG52gzww8vqwB4 z`{}rfz4!Y_!eA#HTAyenRu%zUIAonk3qw~&DYE#L!3ur?kI|)Z`uFuIAae2!mzK!~hry#`sAczG%evy|YvUnV|hwUjrSUn+<>j&(I#bH%4uqr*Olf@)#s1p8urJGkVnP%EN1w zHR3Gcs+R2a4cFJejX5$XTc@ii)DqPPs6Q0+6CTm5P&rggyPsM?7r^Xg(P=FY83$Be zMEw@8_1@1Ti`bJ0Y-numV*C%c(>l)kkF4b2R7(LRcG{keRULyg2VXMCDc+B; z)b&I<{T~>`kp3eZ?P&>}Vr$Qj`fqq2I}q1`7*{3{vCD^>b38{}M@$!0ygoj!U%$XL zM@I8NLjaDAwPo@R^M<>9d$sxAreFQtwPgrFA%nk)lpyd{!1@aNv%8`jj{4P+;9TAS zNuLYxZNdfl1=1DuW6^2r7Vn#m74|68m79T?gHL!`19qULU>4HL6?Y8eRh^X+Q@Z>9 zBZAGvuS3n4WB1%NHD3lGS@4)>!G*|kR6-x=oGyQ5qIfe3!@!8C1d1WaIe1Uu4LuSp z7|jrPUhMQLSu){{30>xwp^~s)R#py~^5r(>`Eq$CL;Md!30~{2bY;^n?w`(t6`Nc1 zDIL?^I;BjG6giBRt>zT@*{A6ZlUVw`-nN8Xx}Lr^ii;bIVjYEd7==U=^J%xS0uik~ zv(0SgzbE&4-)>Jvf1s`j@f=GJ(7`~MS_WiL)nX*(S-djX){VqUSauwoNP7zqryFD% zJq$Fx7M0m+Gb2O5z|2`#4-S?@#CA|Yn3m4c97?ynn@vkBi8|MI^_DglC*L2$)~q5& zbrc31v=!RmY^wZJUV>}YjCo(FG@uLEuhl$wAMMx-m7aJ8`E_p@KXH}2S#I(IVOr+% z$#U>c1rT#FPL&)C+mdL?R4Y@n@a6B>)-+>Ef=vLF;xyKU?5hunAHVU&mIwvG79{sbfG|;u@qTJ_EsZKiVv;&}{(aKY#*XHuY3(s~K`#R8OgzIMJ2!Y-qb&i1RM>$n;;9m(bWZ zI`HQYBcED2zLGihnAsuoBd{Tev@IA3gyN6ejsxjP`&n3-6UnsDP+884D7G-5OUli5 z8ZpIZmzRxER}w~8!ipvw?gxJ*Uc@~AwkZ|E;v7QIkGwc!n@B=hx)?q!-uZe^AggVtt#a`T9nk>E%p=+0M=BAl9@@nrs_aI!;gqQ!a} zNAY`i7&OVybHey0)9aVQWFtR?elE^O?f1&frH+eKJP;=9&!kJBL%GUB0T9}fwKray zLQ7a}dMx880hEH*roBW^`Kjf{bz;>jW42NrvlZ6a>Y~qy%<)yiY;Neh9X5w?@#@OZ z_WWQ<9Qr_1Lc?qO1<*|B+i)E1J^s_%{=#a+DwAJH=i^Ge+lf6 zt`-2TBr^Tg!E)T=VCvnwf~rRCzZNlh>i1VGPOk!zYvs5chs<@6G+i5_D73o@Gh+ed zVDcrf4tts$7$vXHBYRyv%dQctm%y%LM%|ZU+IFX0mkrCW7*c6Oy=z9DTkY~sJ0K8J z=D%(Ys!Q8(LU0QP^vV6{^F&Q21fF8%D1#Wycz`jT3V{*c78Wsm;A^S#**Pb+OMz@( zKeoa{L9e!B!AtKztVH$GjHm<*Q5B?j!E(m~!7X4$8Sp3zzUEdG8$>@=EN#FLE5QTH zn!2qCu3hrV0ApPEVdo67P7Hj-(pm~JstpIJdKL64ye+dzFL)Zvqs;Z(T;VFU*IC0> zr;ch*YZB9#B88Slda!qko~xP9-&>Xqw@Ks=>rm5ToIh z<$#%$!ldH`nHp?1>vpq4wsd#8fS9c$FVeaRxSs11d`63pv2Y!5&;ya?D%}96BkVpacAMsYMgO_^nwi9RWj9 z8PsS244*N?tYE0gyZC6={5%`N#6$H zhuEWWNVVt+A;YQ>atJ~gK_OU*K-Q7BCn3=-E@iSN$Fu#S=0V_Wh%=A-QJSH5s$(=b zs!{Y0#WIRs)2(Jm9~Ocw)0;76bHj!;*rIV5H*R_A%Pf@}ROMfUXy-yaLMdycCCVM# zOM4tDfSg^RD;;-7+$ehE5imKTJ3He0<1g*fYi*TZd+~jptg9Y{lIT&6L%(v;?f2uk z5J=TF3$j;-XTqqmzK+x=jIAZZc+zPJMVXxqj7SJvl)(@yVJ=gZ#lkh%D_ViF{_GK! zW{fb+06D2~s;{NTi0BW2Bji?GYNZRdC$;GZ6*xn)i(r0xiTS!NBtD=2l{_iI8{gEp z_rr&4BQ!0hQ(oH0hyfw8AFN(eQ`Up+Z^aF#7c?=Tch0TA zqymIC+pU0Cs`ERC)0a3Jox0TH9GSpRw4H$gH+&s}Nv#GJ=^mkCQ*$a3^d$A=$^zdY zB-$a$uJ3DToYxkC&bkq6!9ewo{qqyM>-5Zj{)b>y1#3A3;8ZO!H1Qj7+HTyNcD35}~AoP*rS)@$0OlVekM-tt@*L@ z>tHWi7_tXS$05{)B0VI;RiUdposL(b+Yh|&629;|sP`P9>U1d{1ra_3c2; zp6MY_CE7>&P>D{`Bh&7De>ICt2N2f}s>hx+pXTrnvhT!vXCLdP-DBd(BrJkT4PiSW zXSDn#Gw!2R#v}t%KgwoVTZQH*@LG(;h!5OrfK)mQ@lHp9Z}P%_tKNjnkPeGjMBsF0Qb`P zNgAh9{&`AeU>U>W$vuBF$*Vm((`18PMuel`{8s*VN z-I%A!q~|1!ZCCn}KlbKQ=u>X`lXTlNcJy7Rane(5`jd5AyXryv)#^b9$&vRK&6P9z zRfx;kYjFCLVcT;D`O*74%@sBK)sc(4?U;L4{g|id_PkI>b|@p2K?Rm zt;mQwn2USfLAyX(I{|yuGB~_;KNjK9ue!7?H+EZp?M)$e+B3q;op!gYSv)ru`hhR3{{w z?6WUx6sdE1yM5Qd7}{c{(61;=sFgsI+ibUq zHxN1#(uj;Ic07%PsMQ4h_TMYpImoRf%69^?13K;0@FJxiJ8}y!iprwrLu-op%H*^4 z&=N+tmu6c5t`|>AT)(7^-Zz-QE{i_lBb_)Ht$*@Lzlwu3;3^8ea(Rhq4ThY^@>F(f zlgWg0$^9s6cqDH2*Db+&b9X#-hUxnF3n#G&2P&}!kP-Z(|L-`7{|r@q|J%9#gOm6# zbcCd_`#&Lb*0IvEYkc2=w}?YC0)zSs_rv)#Qw7chsV9ZCvb8WFEuf!%t<;0F`Yl(^ zf6l{Dpej&4KwRg2tFuH?jpq91oOx1Us z_lD33!;Jt5eg|OhTK$|hG<%>O_^r^qJL>ug;(3KJ+NQHD`(_rEbA5P$A!zUGAp?f> z42@(AZm3GhR!Ub|tQT|1ks>{17!_zWEXNTa^OT^$6&5f&g;*I!I{7_fC?`U|o7UXI z+m|3}&z6w=l}f~KU#-_6sIkhcV?ErQJO+H$R zo6Z0Xl>@Sz(liM(tJIR=33Cg_t7uo@f{IvoH5iC>L9X*!DawUqN|ov6XnRG6_$uOw z8QZAJaM{pXz$o_bUoz+k8Y4fPZ=uQAo6O$kakFBW?{_FxG-y^+MX`*|TFgvwtYaG- zW-hr_2`rCtvF^;UU`|zsrwvEvPYNAFk|t@}9iX8wW$5|G$cc~HMEolaNtT=4OXu*J z$tdR5C7Gh7GgwN3Dl02Wsi)TzvAjV4c#>G^))19IU%r6+nlmAqcRJ8sRe(|~7Y>lL#LlEMZr?5dH-)<9x%q3z3 zF$910S<)8CX(P5ao!32}B=Nb1_$~W}H0ZbnF7PF;LfzJJ$}#gUy=lwW_XD{4#}RB$ z@Y0=i&1JK{e(|L=y^)Jm zN$|&1edDaFbH}jp&y;JE|D`A29 zB<9+D1`1o|eagnn!UV=XcNWxZnta7d9&E5d_#iEVB<&anE8B=*+r3R$(t0pF84T&# z!P2MY3&g~gyBI%NmSJ&=l)8`_->d8B{tc2soW9bOaFyXGFE&3%g0oEV06j6fw^;*u zbuSmwl%vi_l2CBb$|=q7<-qBk5}ZeLs}DfM@CrDB>#qD&Udd!ssV8rY_a2ACUxswr zZ7}Nw+=~;qH%0PTiN9k~aN&=dSgR*xaqKwa@y@|VcErM)}cKT{q660Q6J2%dcT zU*m{^e}C2ejb!;d8T1#FMc4%}(^d5WC^3MR+}}JLj%LnIMz*dt{|uWXC}`Wx(IInR zx=Li1NA&#GKnK!@Mt$rCIsKATBQ6LQ2rRriA>FZHX`vcf2jznT2?a<60lks+OJm}Y z@KJ>BdK%&al+C<+-rgRuyGb};C*+Dq5Zf3~VK+G|NCn0N-78EPR@o6pI}cbeVd`Rp zxDH`|Rgfi0tct=O$hdaPtt=9*daxjf+`8dO5j1$zJ;nQR8@jRWydvQUX%L?%C2wzm zc1|Uzyq4U?*;|Ods6D4SGilAsxo&5V$Q{(FzX8%da~HU`ggE_L*q}Mg3fTAPi-N0W zM0l>D1pibhca{3wv~iYK{;1NqM(fxGuZ>7iA!@PLiOzpsDBh+1J>JNGJPiy}^=ctTx_enb51*d?$ z%v9*L8sZWMb>7IdL^+g+#bvnVu!)hyMhw|u@h(fj9&STPrJNHRn9)tEJuhLOG);er zKayN|KsieyX9zoRIU@vfumqnhy4*tnN`l^A09Na`13UT%j8(Mj{K1p0HkzYblPaGm z!&f!a%mA^Ci1LH0EtXNqK`L~!!E192OCuyIEuPtYA*JaW=FgOqGgf2)228n<|Mgpz z=|}1`NULF~{HOlcU}ehvsnhX_^J)Ec zEsOUZSPw`OScFoFP_@#d6PfL?xGlxa$BW$sdkcY#rmYjrZ3ND5=9a4irlDH83#lf8 zYiKK0{yeD1B##E~5<2~iL$)q?U>lGJw=vkJ^kZ6+NI(Fr;U=Z6oz+1h0l? zXT}8G`Ft=zgh1P~>C6P&0G2BebZ}J%SrZ1J592L=xe?i0uUZr%bH&z?`6NeW7eB_xHR`N>&ouN(?#2-*G!^sx4n4rnXM zrWbgb(>#}8U$rB_!$(sDb26^+pxg(*fZfwgMU%`HX%BWE(|sbCJZ~qzJ)Z`{6OT3ja3~mHvG>xkE;R(0cltr(+xZPDC(;sE8eqgDn?t)P6Ne>0V zLxQSYuP(o<$d1ljuV1Wka16Q}$x*AY;_r!lox~e0ct^pD$K9w{A6Y*! zPf299^ZAJ@pd+|+mvNsiMl3&768l=!f2F1PX%QgqU5LFNvYhQSk62(D?d}M*m}ZB< zoWCvV8T_gOt;8K1x05E36|AM7i4mtZCEatMIEk6({1l{t=$9lzJ;&nuMu)|ZmzR3g zUqj?&zP&bjnBP=(yja~6Q^u(4CmmrRF(DAS6Kio{Wo+s|J#89qt3$Cy&G$a&^bXPf z>iy`gK%ccHm>XfI?G-w=kn}tIme9m!@Vu!?|HiR0O43`4T>GGGv?xmU^|!+Fz5P2U z4pt9FXA~peHK*V^P}&*wN80!9UGUCPf0|v(b)#trSe>x|-J+=5S;__7n%SWX?qT{? zJ>2*YREZ&MCNs2j30KGB@Oydj2-VUmI>nZIXvc&lHAOVk*Do+DrMsI`-HVK)g*V@* zeK6`zS3c%-|18^J#yLl0z>+=uU!@Cq|6aC#jAB_66GvmG{{b8}G6pb%nCn}adl>)Y z9sCXs`hU|j0T^f$kodoG3j#~-#@fYj?pg;C;aSSbK~g-G${`>N=(w#~k?7-(4|d3H ztGa_wAIiQms;({RHV`1VYjAgWC%8Mo-QC^o;O_1&0fM``d+>upaCaW}cHi4y_cvaT z_kJ)KW1szRuUa*$YR*~y#C?dhf!EcM=D!Mdt3<25^OA;#RT_PrQV+0w5)(PwjmoE~ zC5D`i9vA%XXi4QUjaKBueiYhd+X9=m=6n5-)bh#4rn7#Qxq}DxQdq}oion{duhc-J z;W{`+XlxWRfHz?!Lf<(eOMN-Q9vc~`h~%g`sZw+=9% zN-MZ}0oBE{bQ|TG+fE~RyeTlrK_2qWa&}KEFG{B-VMhB2DaIJ$9zPiK_|J`~CIZ1VGwMh#0^OB4%i2aczp9fVv=g2Oe zj17_$ti^Z8vr1*SrM<$sGp_S2h}8Lz0F`5jAV{W4!k};NXou-8QVn3qH$sg`27-`E z%9b_QfR_dlq8z&&p_;RJC5wlH8A0Ex62R09^((CtpmPLaTrYIP`xdh9)tLrX-UYd? z9TzUsc$B!nplk{gt9a>GB4@!(l15D}4YyT6cVa#2$+cdVQ1{bTj;K6Z%n>5H1q z+fF_d;M|j;*}L5d&Hm!$Y`f1~fBt=+V*3dYXDol!cz#G^nHrF{gSEM~PraQ)uXRq@ z#3l`F4y>M(nUv?R&Hxy@p};Pb`8xyBu{;C$ES!Q3?Hsdm0oGL$Q%p7BfM3=lMGAE^ zIHx4rM)8`B4M&9Qa`Y;GBS6Swm3cyB5maa)?b$nCAj8f6q%po>>0Vpg zJ~m)d8GOOZ5c7iSTJp#RG9M379imFA?Z{{Jdnv~ zYUv&HEM8=R1V(7p{EeaCku7SIs|1PI7Bmp^N%r{KO?<$(AJu_L=m4Qu&CzR&*(He0 z`aCR(HGLn_X0B{8d0+K|B2wZPD$ys88i;99N`)}i7_CxpiSy$`O!pY{n$q;gZ~U=% z+w>{xzWm4R)wzLcu`$%1&uI7Z1$w9mSgr52w&h!ro9&ghN60d(OvQRyM3&dU$-#Y^ z^?L2E01;r)3EV6Yl^yfxoy{8Ryy4W!l3EZ$R6Vv%e|;~PeAWP23HG?%7|L{Tx0>3F zXq%7>e+_(4^GFHor_+#d88LuUIZxY$X`S1?Men9MPm7heY@I;k^J7JXD1ScC`=U$Y ztJ=sEq7Yh#;gMYEc>(Rum}+kl(i-TfDHH(iM*eG<$^0E=PPVomu<}0? zk7mYJR{wyQ|Fdd$_^9dsZ$dk7mQZEkf&DMD=fncVGXES2xxQw-MAGP zQ!sfN83~3HW^a?OJIeJ{;U;o9mVP~q^iAAp8<|ipXJ*7ZK^-q zL-666HVPY9J<1}7Y10^^s+X-1?VkR$%HwQ`wWgDuJ)!AU{%C*>;8CZg!L6n=AqeTS zD`#8xIe7^#w74JxKO6cSUZTXYowR6n*s9t1`8K8&JW~-IT!@zJx!OP|; z4`RwPN7fUMEY&rSCBUH!*bz+zMVAvU^}uhI!vkr+ks;`sFwrgW_1vbtrAZG;R>cAt z&bM&Lq}PAoWOj!FZ$bO!57cYqjWJ~lOxr0Z+|yz5_Vl22Q|yuG`dUCgBUKfo)|K*R zSDQKnvZ2#4E^9Y5<*i8CFh?$E;47L(22}x$nCJ>*4;HE^72#0OR8=sGwNT(xaX21& z{W?eOcHEF_?4owH3R#dJ>bx-<{p%)$1ersAvqg=#!j|wK82}+BKrnb*?B>jFzO3cX zYoH}^rw&O7-S)l*&pn&Ni$(=T_Gwg~ zKuL3uXtEK3vgBH|Lq~xbI^9zAVrQO5QjfXhNCMzuZXU8((P?GmN6ldxr3sMm)gj}_ z@B!@=lf4OHJsFL--K(f9uR)ma^@hx@r2*_3cID0y_C3irhsC$ab%RJ(^!FNdyw4&BY6U69U4Z07nhFHN;}Z?CQxe3%^&n|BF?LWKnGsf$qgsBKyLb9ap>=UgAZ z9DWzlb)e1EEyxoj7z5)qMBk5T0?d1zB~zX53SD>qkteB2%r);2*=*li zW>>nV-F0U(MAyNS+te7{QhAhNR%{a-!KJqI%O63EB92ri`2jNB{wv7D{X2;N7s&ie zPxGjR{R`=gy-`Hl!+LFPZ#QNj}V=KdeiIRXj}Z@1MNaUyI*gbGqJ9J|SP2f;KAt zoa2(mXh@gr)~-}uDetV#cA9VG`^6`30S$7VxAI*xU2!hEtk`eOf>I9ZVdzS1^owMB z9_!9gNo)Fn*-k+n71G({Q7YaCXD+wXVmk?W1+H9r=Mcy2tvZs%k8pc}UorM^WjRR$4mC zm@Dt>Q{kZn5tch8`Bf;ZS~RHE;}E2+5`t!K@GQd1pU9ws_SVkMvT9qhLB`TNiL#HA z9+H`AyVtFhWvE)A98Ormp<#?#SPD_jrCw+Yf*~+|3DOt#gGyyN%XaRj&?>nO`aDP= zD5eA-nr=CFcs4{UqspF+iJnr^8O+Ww_4`nx3U{-Rc5oEeh~j-8fP_s6VxpJ45@00V zjT2wnQp;iA_GN?6Zh)iirsmh|RB3j{!wGwsrz2_Io*#>mmD?6i$H@nKLS#Gx=99Y` znI7-i;V*zJeP#y@jbn{LAk8kNDIaumxOARQr$p5$ej7{ndCylBtrAr1WGne4zWM2> zH>o}hYbxwdU_2PxvzQhJ+qRUcx|ksA3^h%R3w3Z*p%1D$C4i7Gp;}7VDzAB;@5*>R zFYrv#H+Xiu=iOwtzsSJueEu4F=^<9Kr5)-dR)MRyF&fHJ!OG%22}!;Cf~bs*BLH5o zm9!d(rNBS;89_mWlkYor2>q+2nko%GP(O9@eG@o*jKq^~x-bn@tDnvR`QeYH2U^bs zm(FwPbRD*$bIxMMc4ZeS_pRm8s35HKdqtw=c}o`@bL(%&&45NYSpoJfO4MFoKT3E#AW!awl6bOX5Y4dv5T;8c9qta!t7_|f@$ zmvN}mCSStfd|>$pwkQ;2rM_;sm;TO8*2seo&pBm5?U++!q-d!_a!;nE1_qETrq|@Z zMpI10sK8Xz7H#h<|Bhp?(K5n{=zbJA*r?CCg|r;C$#1oWL+oV?@YIOiWwAUUkt3qL zzK8zo@0ukvGurc==2x5T&t??lggEzmf{+V`nBnm79~WE2S=xw3?(CNw%v5Ad=zuPZ z)#ZSQl-NbC2D z(={P*qvTgiDf(+`5~<5$5?3HPR4aWg9}>j@SqCMHckkQ8={!CljvJvh63Yn$2E zX#mJqkZqz?{mC55(#ftix$8nZVr?6KEmMVZyIL5k=+J3vCoa6HD>Uojq^Cm6VxB_R z(CR>vL76os*N#ef?c6SdLvqouhFEp})tHu0A-$mz$M_Sw8o}*83q!x1BqRe2Ec_j* z<&?xncsn$O#`jlp?CNRGhGLhRk1vu2Pfc%lz{Q`SUwDNndAJVRo6OCM*Ea&(t~B4M2P zp1kup4*~jf?e7k(#szdvN;=KkS%vWwn4oO)!vo8!R6X6Xsfb^w#MQj#gNDl@TA2;? z10PGLCOw#_pk_V$m2U5D<(?A6aBc73zTc~P9aM)zSltB8I?9EL9I4JWMYY>PfRDKy zAYSj{;vXQ1hI@vY8ptD(unDQ@$S*%DJxS8VJaW7ON7s}aI@uTW9lc*sR8Nyjz9T>t zmAu~~)mC0UjA*gv+|gE50bRe@u%x#NcA-8DbJj%xRz~L&T{3)L<5L#O^^0b#l)0iV zLj;($5Po`O@*(@!mp%x4`(yz}~>nY2<3D`mQ!aDi{lf zG{z4L2HP=S>Z3)?;Z41}w;*#5m}|u3dLAjKb8^tfFr-3Fk<}n{X@Hyw-IC4JKvAft z9ffVwmmDW>2s$LlOenoqPdb545(Y1qAg8cyRwcjb@)~Ny{_JqOnXYdyN`{U&niaLF zz39JNV1#X7_E=}st%fGy5M0h>MQnYp(hs$*3NZ3FJaC=iS4q~+!YvN*CB=7{o4KPC zeFjK#dO>k%vjp`s;KNJgnIJNp_I^-|W7^RvgU9xSC6!|^*`e(N`KsMy1eOD=1Jfbf zVYmSzZ3pP0ttm*H9{ZH-?|koXNqAN|O)gNHdztERJ7Lu(?;u~ebA%R(vIN(KOWV@- z-Hu=mr;iLBHC{mZ3g>>L&rO`2E!A=c+qU0KYC^)P5nZbtlGTu_@p=99JQ->>9q{4c zdIQ~_>4s@WO48|`C=KxhY0A&(xt9foXuzL{@k??hWv}v#*@y zy7Bl9%$LQ<4I<4183#j)Q7t3Q6zxU1W|S}=IPYkJA6VZ`n_pmF-s|AH0yZ%>^tW8s zmtO;ay<_}=xAYmcQc0mceTqi-pOfzY>`hSojkgRxq&>Da^eQ%%HXm~xAzLG3LlHMa zW4k|O8UKJ@!5_;v=kySV&aYTkisf%ll!C~t2sJNZvI^lZ zuXnCP-RL4MLHGJkSX`Zsb>AV8gJ*v5^GH&YP3~Pas5hQ>^penXoPMkS}--&l?9GcX)8%_Bv!(pDOIY0ipe)Ok>;nD0UcS$>ak=~XP3V<2xl*p zrrOX$n^kRjm-ey6?00Ko(ZQ6Gq|rgL@RhrrCodm~{XPWJg75IBAw6~}!~^Em=-cXlO0OU@rB zx1F`*i_u59ul!%9NTPqA+`r{S`i|~4hV=3dwnol|A09^^j_Ar}e>hqEYqIq5s-dx? zl@%m~taGs5fMDkEU3b7QyfUn20UdlQWdQZ=Er@UbDa+;ORhXg_Q6zJ{ zA7FV+=VQfpokl<+I%CFYD8O}=xCYO!pQ~%D2QqQY3?q7#OW&I^6{CRxj*)Qw()ug? zmd#c`v`C5-+t}0EUQD-ycpA`|x{KsdA$V0x>4{cZmsWTx^s7{*dgwwsaxXM7tF|wU zP{pueD=Gk}gCI?rd@vsxae%0mLNM|jYr9OR#tWl)@(<<2DQq`(DTVeS z2IopAXt#*)h3+pwEjlilb(&O*&dpiqn2T(JQkWyrP{Y)zTM`4TtxN+to-@5IH zbveP?GmibninUkW33&F-+0I~h(7$6AzjwXB%DYZvYZQ&$KF!Kdsm$GG#FLu~w|pJL z|NTTWXZ>prBHi@TxAaEQ;$GltdD_5OmtO+Smk?%f!P+e1x&)c-UIq;s=QD<;ITAa$fIbmm{o}F^@iN#iRJSxT>5?t zrceI;f%4r<$@#fKD2;z`uTmr~a6zcuX4Yk&(uAA*`UMe2%uNgj_$uV}f)bAP5 z12Cg9dhHC7JKPLmwL*ScufC_$SU42`LMEl;FqDIpNEM1&@>eBX`|3SlD4+8W4cZ1j znqpFGc>~gV9h>FqL*7BH=6o|J53!DyXJ=$Ac-2y-Of{eye)k%{{;phS_SB#Gq zPfD-Y|F{;MK#r~oKCT4ZFaIr}3IBa9{J-!SG%WwcaghGd7aH6AWn|y*{aFb`74@we z+EP&19koG)s&xgi8|`S!fRfZN2#^aGD*vp;#y*lhL3j4K%)9F?`As7YT{5eT*||9D z2!G+I#TCUsf`%h9VPs>;J?A{z@%`7;5i_d#Xq zU_VvUEBHEl_*%yyR%;P6ONH!~V@e*q_UtvOHZ#sD%;?%e_9XZC^!NGG%z*@vxlvMc z9Rllvog?n2@@n<^`MOTam0+M9`Gt&>R7N)cnr8w~es%=uai-qq&o;aCg|&NJD{$cjao7))$YcOET6 zZ#(%mrs%+av8NF1IW@#w`7qyjE-EyZ{4pgji`$f2L|5cxV|FhE;F(UQLPfm=(L#M( z!1U;y$BZ^sI3D?Aoo*$~bw}mOs~=PDH(NbE9z9h;t_*NI(&iNJLDY%bV$vZF3O+*h zzZwp5XN(_b$84RAiYB>Eg52oWOw46W;elVjt^Vy}Pi{b+7Y(G^h(?}*>??b}*6yJY zlJb8Zu|_O~i(hR+!DNgi#2RSBETPWxR7VP^CCI^Wb)nEvwLLaehWGL(r8OdQPiRIj zf|OC{JPtk3(gssv7+--Mzkf^pixCr9+xpJ20th4tEuUFE-Y_7U4HET z{;mWy{|$U;q`ZP`-7wsPu5;6~twJgPqCLh6m0dujXh-6rFiX(?s@RWCODUTE3phw( zc>hRu@)ex~R8!4z05}a}fAcgysb)B}oxG}t<@fKU6g5qkdCgq-3L7p3-!GuPah#|( z2$hFidg%Uqsb;ao5`}Tv&DPwYqaJ1^xzDO*z#gE0X2{8?6@o66SYP39v|#tDKIFOw zE2Y9qET|DM&JAm;-Wk2`mtAJ*pMgGu#z7LRD0*JI?js`gFq*2CH)(!=MT6sdJxRxbvHfW2@|Jd^L|B`X@{DqS2Rm1ajf+us`X!l zbD}eO6Svy#=W4}(z~jesdkHNqI4o5C5_-kptr>=bS+Wl0H0_rC-E?;Ys$H6SSRG*! zxg<3!P%`s}_#2G4GR>HKL7l*u_ zl(+s<=XM?SN<+8L=T>?KpUY^+nZ`o{Lz-(-4>$W?(D^w0VsCcdXfHlLAgv$87S@RS zY`Q~QGR`zXnZj&*k3^4mQBFd~RN1dc2S#p)^9K zB4J*#WLa;DmdD3Z8W&AdYxgDbJyiul|9*e`?ij&wF1mc^lc&#)0aPY(pdAD zT;$ge8`jdJ^h|%9tP>ubJuKnchcY!g>km_&ZPzQkeG-GcU3U^3*3{UICl@1HVp72e zc*36e0Uxn)vM(h zcSdlvkMW4vZ*9;iL*rlBYxdYPp`#ks|d!-JI!0)0C zQP&cy1D{_vpREW8rV1)G2f6=Yp*1C|P(Ag*wSD-&@c;6n`_IZ#{O@z~ccT6?7Yo@s z+c^E}T`f(ZGdKRo6i4(D5TF z2_@q~DK|9Sx2#)n4|`pTf5CivkEQ1mm!5yn^SbK6H-0f$HG@jQVE@(P#w2vUr*&3+n4RS&{VN)q9ozvZa{#>_xAc#tM^aE@diNL(`sW1!iQvFP2P6ynRO@9*;y1L%>Nh;S zixe*_J(B`@CDhT%!v!~Y=k=&8e$;)I;>FTxlI!ZKL}PMhxg3gOfNsXF8kmg|r=H)^ zB(ot(2nk-@T~@Rw0R@CHzr@2^Xn%v}jjR44Xt4(f)hHifaG{#$#_8KlE7B`nArCFg z2&nMVCHcmU-7bX8VPZ2BM;CLQ7(xrjXpaFizv@gWB7fZ50>yTr2#-JW!7DbqpY3Y(~;%H;|MjkotX*7#( zHubS9-VCCcQQ8n%$W3HCF8e^tNWy&oWDP;|Mr#N) zi}@JnG_@*}&Pv7o49*;4+C1{+oG^g9?369Rz?EuXkRub$fbix1BcIG^v7i1qsm=t$ zh7bNY_Kp9xM(IBdSj7JK(f^xsEA&r?-jDw_#)eL|4kQ{>y+jZ8o80vh}F842@^07p%KX z0NyF*(~RTGNBNr7t`C`n^Q6DOk=5dOuyN2i{`NX`?)xcqs8fLCd|pebS_6&Ju4s$H zn6hG4hPzSKGq;phX4Br3cdir@v?qVh1N2hs*D^h}$3zoVw$5;Mi)@ugMk6{nU}7IA z&3N9Htc{F0vd)h8)nlMW%EK^fm#9;zVZ2`Hr}qbKv3F73`)7wlJ_c2^f)?3lE2UP> zvFjpQZU2n*5ZOjuYNPYf4m!9(cX>w(zTfFw9dul5+2Wm_{Pk8Sku!hn(P&mL;hjjh z&Mh1I+1?u3>Ud=DEmOyoCsS*Zg)rg_dVpIka={5?3rB=d-*Wa3Ys~A2jUtM4 zq90NV#cHA4D^>NXb)`5WaH#@l`x-dXCt{;rwshT% zM$6?=0C@@?P5dKwK!~ruuS$xwK|e#?(H3~kYVeuAJ{Ou-p=KYE2cTAxC?Y{GCU&2t z#VxS(xtzSFS2=r@PlPZE1-q=*(fUg0v0fr?r>=zy4rzh?OfA90p&iInqEbK$suf5x z{Phg4Dk0lI@7-XMffL7BwDpw_r}xW4=-i!*=r)Rf(a$6S%(Z2$BMDt&8Pps?q#kD) z`-RTll(pTf_U<*NlueO+8yFHfZ+pxNPLIGF_C}sU)KsGn>3jYW!{~zV|^?@9KRj zsFs?+X_r7nm>wUTE?eFf^s8<{+StSFJbubVxQNHCbYA6+bDggo!6F6gp8nW5QMKY$ z6Otd4#1Vh59Oi34}Nri!4|F8zi}KtE6fJIZeG}bs=qiQw7KAl(qx%s z`3@gveNxG9uF2UO3iE|gq3GaE%;I^&x4K{{HzWhQ!*20XMd=2kF7`7=R(iD|3#-zQ z8Wmn8advpfaWl&;1|XZb>l#+~JuSW5Fc7wD>6=j`(co7h)ObjJUTK2p8Fu3O`RP?r zGmWAkc^q;%0sBn8&*3{@&{WY%6v7pP>nPpPT)Mf863Cm|++Hyp&V{Tm0!)ng5Hsqe zXwQH4i|cx;n?ygZEu0T^)c-we3;um|{eN`bKEyQt?700?9#f=j`H`tiqMm!plH(@38QEb_ZX*cFg(Gs5^s-Apw=kcz^d^-Y)^ z&wqJNae8*WKipsmfGEL+g=0K6raEZ#Ofw&NHVmWthzx5u|H5c@VXSEx!{U-`{4Gw7 zW4xS#tD$F>abc0@K!MoVw+;^c5>0+ka?Vuq3|mD0GJ|4M#db-9$CJ=_OYTM1S4564 z!nEUYJHkrXN5E{5+b7H&$+0cYP;c5!fy&n>-y$+k)aTRLddz2dICYkN!+;!@3O^vJ zz&fYmQkWX`Jvm5sPpXXN!~T3j8s{6A zzYUJDIBR~vwEgFblZD^WmDq=*b%HsoBqpwnm3xiFMCKud2Q{J`(I{CQfCLhc<`Xd3 zf_GKcK*D*?;%+;SALy9z#xda~Yl)-KS8PyaZ!Fq>b2Iu z*Jh~#M%i^HX(E6QYq*&f*@AR#N7UKfjHLC^99wA#FP?A2 z24Rc;*?^W0E-(HJMj&)FFjkU`VT<(1T!y{Z9=e284cgQ$`E2Uw5Ec*lLEw7F^-x$O zqvb`_h@as8g9+o3+qn#|u*pxK--cXgzUnSADZ9@wJ9=Svve_El=BoA{+1n&e(;S+} zTCf?CvE43amFOy`wMfskmwI|jCc9u8WXv0hKuUufZN+|;`=ha?(juY^{n6OU{5NLI ze`*r{wR`b@F=PH|$JSd`LUgB}y5QQ&jEl{v1o;Lqe@m~dip zP<5ud@x_k0qr0Mf|p+3x2@$DN*2y!Y(~zB%u&u-yzk z-*aWT;hL->(~|S_&W(x`+1n|1Z2C$)#eN&F&!Y8L%ju-iYRga_(yq@?yWRxoFvHL{ z7mK3xPbJaLv30;eYs{r_f?KC#kwWm+%w=P4L+2mt;=xhnQz0)G5pZfR&_>`fn=GW> zY0c>r*E(r-m0-}U>(D*06|X7HuJ2h#TS(l4CV(1i`MapN(mM9l(`P}!EGRHp8|Eu$ zv#eox&oHsn8@dMgwX!%pRJ_*J*u!Nl6K{C3lDw+$SGdA?n~yt}Lq`|Af;AqY((&Fz zN<~Qy*oiT7#|@oGyesuFS9$1vk2E?!>`B&nV<^u<3qf(i4TL88$!z$@Y!6!t>aXW9 zVBg_PCEDlTuH^$s{B=WE!#Ez+7zvpafRoW=`K88NI;c%GKL(uG%%5c+^G*&!pzHUNBcQh8G-B{P(6Jm9)2)CUKb!N^AapgfV!veEkwa`|4c z1)G(P_KGEofEb;)xqD~qMzI!*;_)kJX1gi4_8$&YsOB)vVfuh^sEi^#GiJqKKy?}z zWEhi%l?yP=V9Lf3y=l(ziqwHG(4sC}dz9-gCz5@#!82)Jqk%#oUiv&QRd>9dv8=iYL{zSmp#byywUw)P}64cW%NG-n@)>3oGEQ@TJQIz+;Hv`s?A zsLSLINL#AOOmINYB?@&=jkNXTj3B>54P!Nn*@KZwc_F}o;DAXHzms!%r{h1{!Au(D zi^k2X{?gcX=XgRg?7Wl=x74}W*b>e$BERH+TK$C+uiw)D_Ya(bO!oj~x<^rzU%y+( zqP4f|3<_mv6iuUl)+ux26=Xc{%!e2`DKso;O+e{&)nI0o3oEy73-=5kO! zqE$38j3!bko}E;nd2KUvPf1uFg8)5)|@u>1DHUTd?iyke!$*y2jRCp%p?)rNc@6ZgaeIi zw{t1g|(5pN8-= ztE&tZoZ+*+55djO%{mnXo^5H5tu8(FiJ{PwIri4E9%6ZL*Ewk@KFJokPQvyPur}rT z{f`UvQwDl6_y|%?Z399kjNUlTAAAz%Y4*d`liOB zwhq?%PELPK2(pw{6+bk-+Fw=%0mDUVh%^)$4YVntphN`X6$=I+rh0C`BY9cISh6gV zB;Zf4vPc9Uzp3aa*63#vkS`Qm53wgkx{ErVdnaD6w~sJ>5|vD)*e-o#q51mI0HdBp ztyR=n7K`3AJLCQ4>cZNtZ#6qp%u*# zNu?g=a4GtCx?7gp(}m`AH+rFqP#FYI8cl}ciPnlX5!?V&LS-TiM(Y$SPf=(#=Ud<| zChuXA;hLU2JZ>LK23w#Hs0s|#O%0QBUrku1zB_g{vD?JE#z^ubv<N(?1HEM%(vT7fY0 zwA95KOs@gbbOT~F#wsKAUdrL0^oK@Asyx`x(6D{J^okeZ6xlzFQXji`Pbm8=K1|))+gmra9>D;1Spe_Kc?)Pxh&&Fdr&fnxu4J<_>T7@dnll zu=6Enl3&HdBGfo{!=s*D!NGGIg|S6WFW)e47P!szATD7)ns=yD6WTuAv?*h_znyJW zm%nv*%^G!g55Nw%2kZ0259OnRno?Aj9CsYLiWj5qOXSN|)DFm-y?qvz2G^3>p{hpV zz~tcTd);?Ws$lMiw}5?2h_fgl+SHb1B{CkoB3T+rqDh{-LzighNb5<+#&=tTXA?62 zqU=(hZNAlr?f@?0pjXa-h}U=Jd!bajT7Wmkm(p0_C3e=1+#_&X_F~E>2c&Z*0XNY@ zvxg`kEV^LRI(rV-9ghLE6_Yvkntv=QJ+M^nK#N^rfoVNGlnYI;4(~)o$hxYZBTkd` z?FsHP<1c{W@9g?!8M!EY3;FXY9T*RogRZ{-ssER{jP*k1wr5^4 z=GCVA6xC18s8EgAu+Ey<9-SmcU$=|t@A?`qzzH|m$yJZJ%htsYOZUbUv*fJ?1!yDV zGs0hM8<{?KZDE}k#%-`2(JxE#KdtuMkAsj+xC2&3_Vgi&|LHP zVHT>i@9I%;VWi-%2JNxHdkeXm)?Zwl zi6@Av_w-OSX}Fanj}3K$@?alprE&e5q440($=vwEP{ktTR3`g5HQoKLgIer0nqx@K zquF5mpcCL&V+Q1a@SE8E)ynul&VW9;nLRO4hgd|mYpC93J#~Tf+&hM8W-qE>5T}bvd$eb&3WqW^F!+-QMtx(DXRKhu}odBy5zNZ=82}Zniz0zw^Cfe7fo8hiWdVS7fBs{eH#d z4|o6&4oUs0)XI!s7?>G7lj%;I zk`RHS2!cPae!fe}ijlz-9-iKZk%=DQ=c`4%Z!lY)sFQ5?a50{>+{`JWe(6I)Tmj)f z9iFHiCrBzZ@|zG^AYFdApPXfZ!s3y~n48IF+kh+@`7{$=VKmVIwQymY(#a{Z+y6G- zU^uMq`VkH^3uqm7WGta#O!74;(m2gSR)oB=BoN7KGj5uR;6B&MPra;EUftOG%U)ZB!wRNS6Gj%Cmox@OPaO_WYa7U3d(ejDvI+1j!8X8{@pdf{%$=*7p z!JkoGksX~>71Ksk(SyRAsM@)IQ|MiYtbt~_7(U+4#${+65l>s8YgVc$!W9t=ZLer^ zG8a%mARO2wnUW0_n>Q$f5FWT=BIe@T?bf2gBVE{hpH>vt>AQkc1&nKP!1lwy48iD> z=4IMfBDN`N%^LT`Mo8tIjy6e0wlsCEspndb%5TXiX4p&S+GXOfI(xuGT`>Q*T zW=1dnNPt^}DPR!AIHW3e`8I`gp^{q@O*O7n3VU8-u92$fP;QhMeyH6R^?|P2;#vc- zVs1WrIyYSzAN$5X@rSMAq_mu3`8r&)+Mt1DX#iU@n-C5HzbIRr1 z(H#)DxXZo&#(fSBpLz2JqjsO6`BQ}_`hq74)Hd5??yLXeF*gH(ZXWlA0zx|izC5;V zTimr}DtJ=KAkNhc#W@Fy+XDnNGaQf-v+*_)Pj558)^jW40vGg_4H2(1geaNRTaivY zJRIgM7{9yC=0i>}4Wk=GuaM)T_1$O~)BXTF2&-?wI{z)O%`qb+#}im$HxJk4r%NOj z>JTIizlw`HccXe?*-F=yM|@53gsV;1wzDSq?vFR;TPVdKs%73cx|e5zKhtux;@&qrPyq7$3-yauenY#p)h%Df2urSsT~+PS9C7%u0hPR_ zlWzdSD9g&Qi6DLe?^IM7aTT&HKJ}h$C~R!vk$vR{{f$Xta|{kX5UY9}LmClK@WP5S zhxr>NJ`Bv<1pD=M{mrq=<6EY5=;V=tX>rT6oyVSr_3%JVL(uhwA)kX^8B(kCyBl&D zOe>g6XxmPLmhATvbdUal&IOO265S6i9p5i!B8Oe+vy}y@AbXT#{Vc;C>V8%xPqL+T zF2ozwyPCqNAoZ?!hT3IS59%XgTs(g`WJb#9#RzE$tppxnqhrq6K)bY}pV8EqAy(ui zHBj4O3ozJ9P&09AUnj?%pPz$26T@B>Fr_7UKt~)b+OP)Oplcle{1_BWnN@X*#;XSQ zKp{|%A5Jugaioy7g6kr1jx2_%u2^#DkoV7F+L|0&Ze&*QdeK#|a4xeI`$L{oNkPqW z>{==dR^vIueQZ-BOZM>Aaws5_4Uo~ANR9N8jfR3&jBl>5*eR%8oLmU4agh+ONl^!+ z5akmmIDKtNs5>Ss_TpTzP6<1ni`$1pFw9sGg zZ;NaYoT3&zFp)!_3lCwZaH5_<$z?OC6Lt!Yt)s2+tK|kUo>_~kfF?~{OXBq|m&;Mo zA0MDHpSl?)w7eV(c|D>QzpA1V5-)q}o5fybT1AG)1MC<@IO%Y=(gW)APp;OVzYfU5PuBKE;32`or>dRxPurO7Tm@Njd3&9YI=mf2?w|N^ z2H$1SLC-Zn#uYxn1KY)2yP2l++h^IE50xN!`$62JMfuq(06H2U#+{1u2LL74CA7$A0BrhVo+H_f zV|1CscE8{=YWkvk(S(&r1{*>=r6cW|qp#VeZ-Ped!*Ms~19`7x&bKlcA5F%*h?_TP zbRNx2mz0`~&$k$1>O_W9a0`hgClbpJ;yikEo=#sSb!$>O>vfM{R0^f&>3NOmd4K92 zQFpAD=&wNEo8i7R0x9G&}|9gY2#IU`qSG`OjI~C*#+dr zs`sG$NlH_COq;Pi?u_Y$ij(!Byy*?2*DID+ioa^e;RWN%p^%*jZVZq-q%kAm`%fSz z;5~48yB^roXMaK`_^{MTjEAdvc}T%dLznrAm_YzB^e`cRXHb4(@0T5ZpD(vXe1hr| z(Y3jAE~n5))5mnmmgmEQwOpI*Ppbs-lbX2LpFaf-HVF=L=Huk7qPW#~q9L5;B7gWd zxP^Akq7jp2`^Q|4&KB=7zVSRf;6FSIhlsuZsf|P@bMf5y*xONo{@);z&^*2zjK4fQ-eUU^;W1#6qCG~ZI`j5SYod3y zwK5kNA*uv-QBbYz%p&#q3*iEip@tlbuw|K8_P&h5{=#Yyv_AzIu#d_AtU8k77=yK} z1UM$sUEg&nO~hCh%cYHwB2^_ex3-_s2If6jJM66p$~^Fp6tZ3o#VlEDM6<4NFc#+x z2s5+ba=`)?zupV6L(wtq?z2CjH-)vCqzL9L@4%MCivy}(zWwfI5?XDi9=I;mKzc*| z;no`zHpkc*sAv;8ZI_&<-X7MB!9svRh20AUWu6bS4#8y~T?n(J?Vx=9u0k@JzF@$K znq`(YSEisN;Q;#@a{9QH7{+YIEFL@t%PXP3>_Z~dM0H}`KpQY1PB$aahX(O8zcFQ= zWsj`c=x~z$9Zvcn%nfb{PAhmI_Ll`mm6OV>3lVJxfp30#6uu&?^0D`OF_#LIU=BCX ziX;nDd0w6&PHeIJV-syta=0Lnxt;IVgd$qXamvrrFee@~`EF*U>IyV~DtSvCAdc8Z zma{sx&FR5W5!!cf=Nm!ktfMoC)QA$WGs;r7kJlsENih=0$JC6tsq2yEa4Ay2Ce=vx zq!d45g?_nI`5S+_HRFbR+d%phAuKlUJ3ftJXCx5A%^J?xH170Y> zArXy8Ab6gK(tH7`F*-z7fg^&quHDWnBa35>Nj^UN$Yd!xgyIUxBa4D1Du9nr^&wKT zZKx*FJK0}<+35e!_Eu4Kpxf49a1ZVfg1dWgmq2iLcXtoAad&rz;O_1k+}+(JK<}zL zRp(Zn+vo2dJ(?#5uz6bFlDX!ZbCQhrD$<(#re8Rf3#(t1=qupaiSgF`<#&;(oU)RH zaNumQ^*7|=pR?h==ZoLgCI6|W@vk9K(M$$a4vj}9F-OQq#}M6i3L~KUy9G!>fjX@& zg??}rU@L()d18|W=XF~LMf!8h5aOj6_Jv1$b~UJUp-qa%1kb_x!~Q|##oz~dRpY4} z?cEh_V!Vl|W;D*6q01?{AJ;C(h&@+b4R~P!8tQ7U85%5D&kn81dJ9YyJbG`a*7?}S zg;rusMCnGwk|Yb*r3dY_L>D}WMLCf<&gxs+Q*vd*0C%FB=K6%Ic9dR4qDF)EMky87 z{nf$fNs&O%ho8eYx6$bX>9$Yyip#risr8gv4a(WmQ^LayZprlS-o^0;JEj6oEbqB zQ;Y$6kT-1rti+S%z#-OexVrf)tNP+f=RGT$d!PkzCp8%o_%PuN@R}sDy;cB#pP1TN zuAF*fgl<$0t((+hU&4`Ue9tvpPP26fmrk{)vQ_7(8^ zKK9lApi%g2M~{^%gHtq{)iOF=L@o-6op^W@OLyd;DB{6dT!0^;T){Gu$D$##|=~8^AR+XI%yM>oaH8G!)dH|D@Z)o#&V|x1?@T)t3r#}Ebo4*@d z{uA(X|EtCr{ELW0|6iy{fAKple#c+_{0PZ^`-r`fnKjVs=XZ&yKWo>YJiZDgD;HD| ztoFFVH8L%!5Ls|xP-|8gdNVmW35c)D<_x6q`FER*!^WxBttkktwdhYu9dZ>1U5J}v zZl1=mpVa-Wc%sv5Yg?Hgcpu!>l6>CpK)Yz>v1TGZf7GhpeexrG5JIwqnMTzkjh0(w zEkpYO+j9~#>NUZ@d+?ljJ8K1q@$`j`O~{s>8Ws=u^spd|<>sqdLB~+#0dF2wX>%3J zl}-KH2r{_E6n#?d2;aBl&k-abm{T9Eby2do;%rPCzJleSAP8#)___L^qK_PAs@;G~ zh1Q14SqqnzKjal91TR& zt@7Z#&z4H3*y`F!`?~oweyEh3tIjJo$_pv9LDhxE5W9TuxyRgUT5o9f<7uK*de0Hs zdVl=-O@0%Biw2RUxdMD+VtesuLTii#%v~N5Q&ISsrW4@-WVK@RFUQvCotg}f^7f4f zdpr{f*eEV7LM0vCtGyry%#Uz#?^5lUF;h3LeA0sSvVGbyvl?mOHMY|OTtkPJnz=Z* z`j!CCv=LE<`2uPhPjMx5IZ^2lB`c|&JQ$A0}Co`E)YJ+kCcoC4*JFCTz?i`|F&RcSK!LsTn zoCct0iN$2o7`=1>o4 zc#9~I@6_AIbnuZ+I@f(CtY%QRc?)G;a^QOWK0kdh#Dhcm7CG8F#QZDdBtlHWJ$k$BQ{h@5fnoM-El7B#)HE(fr$8P?vJS zjXyZ~KQ7iBJe?i78Lv<3Z1{X|4Uh)tmVb>4=1GAC?8PW67xf6Fz|(us$XMPK)mM?R8nZ#a%15_}3dk*a6XQI3_*KZt1mhH#`{fw2~ ztAm__F}MkMK>-k%2S-qVg6_-B_|5~nkvW=An4+aiY`y+k26Z9!d8m4{ce&xwlYOs4 z7+)r}4koj*gsn7<4)K6qa|ug|$&H@J;cq-Icejcp{m#KiDxfISMy6-8-V0^W`U<6j z#zfw1^?_Rw)hea=pC=J{Xo6G)nY&=EqaUwC^fR;)@|TA_C__=bg#F)8LxYq?=16<0 zm^jiKdD8<_d+`CbI+S{%l#R-&fb+l}yL#BCY3$o7ft5Q6lwK`&cRKB5*En|s_qcD* z$MSnW5-)tZ_ijqH9kFK*$^ycxrq4XdvYoi>=bi)Dg*D4VELiQf)>SmKikOln@$PV~ zGbiQJ(6w`0Rfe0DLOLRxgcTtik5yjRZGH<76|9u)P9P-Q3#>o?hJ^pwvJw9kAb(yG z{COJl?E?+`Y~VMLIJ@y{5Ud za?Syh3u0XOD^S~5rc7mWiq2QVN~>@I86)%_LWsRzUjo}BIf%M!p>A*APOYr6%qVf` zOb2pCZwm4oGKDI*tpFkj4t#;gX7@;k2aHs1ZKb#{IIbh368&?#Vml=wGI$lU1Odp( zN{hAm38b&@rieM~+T&Qarm24j;-cIyHjoX>ie+8Z01XDipw`a@5@TTa8jR_$S?FEM zYNND#!G5@Fwdc6dK?wCfYEv8FPD~FsERJwcErXH3*&T3+HY-f`-L)!MOG``6DzpXi z>65K;s9F}w&uS_b@yhNwgp>EdLCDR|um>S3O3zXu2Z*&4J~McXkNk4t_H3w?=M}ba zh?>GAYRFbGFI_4(_@z~7M@ZYl!l4TNP>rUFRv)T#175iT-7Z|ARev8a9E=o%Js%4W z7VU3`r6p4oi+~~QIz&Ry-3YI4O;*B??~0W~g!J}~Z0t|&tQufr$AED2y?)G#XH5A( z3Dtl{78eTBVfv>U5w4-gP?f2D)tDun!S#*1G&=#ZIAmoK%PXrt(lS4u=2jg-eYo>H z^kVk+=*bFh?@TWg{vi_^j9wVGkGP=ay=0+{j4L=(e$e!r1^6+Ikq;wfx3U-t!JU~xDAsNOdDw(v{Vd2A7 zaKY#Obd*XOE-QakDsFZ~f*jKR@~LL;oIzC%Cx*PG($7O@j|B7OI`b{Hc(J_9<;&89 zhwTZWuf-<6%}Ch2pk~?%{>nj|BCpA1FFK%{DYFpOCM2UmVS@Y^z0kzCOE1EPR?Bay$Vj!GI#wIXdp}=ae-+_l15j<8Bnf>l@ z8?__FzH)O%KGex;xMjg-mE9@c4zi^w-v9EWhB^_^!BA(dNSK1QI+1tHuHzM)nM*7O z=`nHMbobHgYH>bL9?-JFM%V47!+!y0u)rN!#w{owt@#|zODz_nbjV1*NWM}E)yRsr z6^(Wv!fstmL2^O75=n4`hq{7?%!n%oufA}O#jbe1qi9;gA*j-rHvX&IvYi{}k$~X< zMgK>pQRK#Bq{Fa4Aqk9%Iuhuj>-##j>Is`~vUJL5hCZgQ)>#`(Iw@G`S`##g2zZ_8 z+LPH?hAt~IfW@%&9M{|a*H_r2F;w!c<^$)1a$W{li+zI;1X7$Y2PzqT5)h>H^dG4- zVa#yMT$t!hqQ8;INFYkswG%&muI`|0CN}tS7T7zJ5v*~9e0UK+93rkPxh5!ePx{dV z!WVk>IsnN+U5#{m+ESO<#bf`a=d>+CW}1}fs)(@vI)e>o@pwJh!?k(HAS3e*){R%> zP}GcNYVTkGBShqZVs$oc_4D&ib3%1c&k^&;-i4aiu_5Dmwpmavnt_eNVo@INME3<$ zgZ)#ptf3@Gvf;2P`hW?neOc~qmXKyAEW=Pc!JOwq8=gM2-@fVsZqu;He&z&izUJwDyn zcUjg`@*0|LJIT3(#z*C5jFn-dz$27`gCvZops3k8stNsWJeiPi{zACzN4O-o8Rq-p z+RW}~GNIcw^BSXkL1rzfid_pKoXH5uo_Wn?+{_A`p8?2OG&s3aSi*39U)roWvxtLn-W?w9p0$qM6Ss#cJzmRLH`Ce{`{ z9d3l^E59WWP%_o7@l4LgdxzQs>9B2$0+B0HKSg2Awn1C;s}nYPzj(xVn0&s^LfN<~ z78bPxDs4m7NM|qALXIC)+KzzVUeq0fMYh{pBjKN-wFgpnI4tMF6%P@j_hfBOJaRt1 z2NSHCY&Yt`1D&dnR{cNlhvE88Cnjwg(YdT>w07f3b@5jK4KXBy@F9dT^<8NY6!9oFd!nCJyi8g~(2d?G{@W9yJNirje zu0grlNftIi=zJ;%{3;OWu!^o?ms%NP`C^rmXVN65?}p!Iyp+`oo0mL$d@|wvZ7VgX zzQK2cSaB&>3NXm$hxX|i+G{H$YKq_ureQ+w1+{pqvu6!+M~JWct&bDW+ixd|!8Ndf zt9Y`|Ukq&pd6I6|BicRY8L5gU<}xs7RG3V|Ktdf4SL#Q9^>qH3Bi4YqIAzpVd>?pf z$hU7ew$H!cANsLLfb0QV%4vlE*){yLt!((KYxr~g{BIMh`qw|fP$T<4FKPabd;UKk zWB>R6WB(Th`yc2k$5BmBn(!j=gi*@LiJy$IFc1i;2m;r$g4w-OEeV zhZ!ur=&+tzCtl;^^qfYLKJJ6C)d`rGn6nL z6}`p1YpWqAMPbz$UXN#J!%(*oTlt{=$c?sOVN<@7Ej#gC>XK=x%F!AVesB+?QAw#b zfb~qx{Cl4DX0cC%fz9P?H?6Fd-Lz+J;ZKcpLKtaS96}K!-!Y`MDh^Hyhl<$BVl#b= z{W?-w5nJYxTnpTJDS)2Nhk+^N721d-EOj_Q|Ajl8QP0c>nM!tcf8&hk@JfqaCAXNK zILFY`5P?VP(dqYcDx`eBkB%dzgM@&Nb{6N_2DQ>>vK)+U<5g_^0&zH(L{g&@H2!^)J3P z=<3UIXXD(WaDFJP1#*)LwElkb-2jWYD?(+0Y2$lSC7h$oGilqr)24>PR)frfR+d#W zm{-_s=uz=)n!pO!$vw7S$qZrXUiEV8+IfvcYuF%Af`jw&Vg|!^T5y;H?CbY&FPx!q zWhNe!l@iSaNVrW-2&s(rUFWR3NxGTdXC9w+S%AGA3U46of z-Y6u8W==42?hTtFky?I5e52Q%-Dno>C9%l%F%S5T3wXB(7uL>kqvdFP;;JU=6oy6+ z_IO6EM8@~Ig&0SS%bKq7dmVPd^M<5s^Rw-Rlh1)T0Yip(0la|ED1l)UMkC3QPQSy5 z&D)*gf2){rFCX7I<;UMviWaD$HYnX5ShVF2hDw8~i zm_45(_@@G}H{IxXLfAW|j20LVXq6bk3X6J2;nLgRcN=ILVBfZZeLoj)zwv+H`)mL8 zivPMR`QOc<|FD1l{rc14KOZpg!@udf{P9OZKqX}pqd&2wdLWhLH$jI_VT3|x5dBnI zH2AFGgy1Xc-Reu_-BlD}eihD4*geU5WD|bk4_+G&Ue&EyXjako-(CzRfr1V{UyOT< ziR3+YKQK36ti7DSp1WLlJ-uXYynjIICWXwIw3Sj@sx*fcn!D^){wYprTxqymT7l!P zNWgeZt2RRdh~=p%JT7fd%3)At9+I6kCK0!&C`c~NN1!Sc!2z3ghMXU^Wg(5o&$nG> zL5VC+-fC+Qk(sy2S6T`!Nvt>wr3{U=J|vE$i>)bFO>8W&Qk;nk#k$HN`L1;Nv=!7< zCCP%P!HAt|u;8W7EL0u;&BBDDST}x98AHLTUle4}rjqX}4{ zVW4vp_0lw0^p>VrME2)<3?j!J(9dh7m1G*elqPQK&LR6hG#R1uV~KzsPxBkzc!rRb zYY%@a_%*XcUSLOAj!axbcGxC*ibmn!5{sZ;l#42I4~}Vd8j_X&GtEMu`UXI=9-*}w z=n-tOg~GyUd9Nrf);PyVx@s;(F|Q|fQH02Qst4T{5#mi;x*{1%UJ*ND5(B2jiM<~7 z0=^pNH7jP>%fw_DTos@xNXr_~a&@;? zpwxr9q9lIptee|8YL9s}&>DpCmVu=_TrjrK&3=@4S}AUtv|#v+5O1I4m>9#A%R`zp zCtQ{j@v@^%cwhMfnGez~UN0g)%gmBdddD%$2%AZ0jY)F|M|JUAA;3=RvjXDS1FD)s zG1Yk0K?mZX8?IxlK|CblM?j%x(;lEWlGaF5W;qv(z^@N6oY}IyP*h49c_( zcVrP35jFL4^IZc+jNuwOn+wYEld9|er%!t*h@2m3Jcp4l=~%Lujqjgj|ihuCvOmf@2c&S{&2 z$2$~A+LP18JVf4FWZ@kS6#`wYfuQn?=2B24c>12d+l#!I#&)8WWDQT!%aEqg<7LEm zn`TkFEEg}jlPRO|D(HK<{!gsMF-p67+(K5FfYcR;=XeFWN}oQ?)NZ*9P_v&Tqw>XB z;24c81l~Ejw3)CcH#z)W?&@CWM%o#>JOe?L&xxBw99VR@AJQJ{(>gfgZ^DRp!ptg= zMC#N}CBwRWl%HNP<=nfybOuABwQPvsAUt^#69gc1=)2V4Qs%qCSA{PU)<2dHLQY~x zu+4ngS+JV?NFA`5>1Ykx2%yvZ3c13x$QbsVVe(sj>MhWl6^(nnSB&W0`SM-@wPs`2#@)}+dU}lP)%$!Woh5UmfY&b`Dj^$HE z_fLqS)RusQ<_tWe*zwH-B0M$dh8cvM_+aENx&|Dx20*HO3a;l6oCWWrgX*xC2RQ78z<5 z3H3fXju<8|bCvJ#b8i6J95c^G&1I^CGvyX=Jv!FS<(Kt@cYYrcN4a+xzMmSb?I(}u z2O+9+vy${v);@`BaqBm~x1mqQ8gHz?)2yr~_6nkh_}WOKMrDyrS%55 z(X}Re_-D*npRI5m2orTt<9HeGOEJcDkJ?bi%N(v+kBfDObB77H35Safdhd5Ieh3}L zWLmzDc-33(ep{~O_U)JQhqh77cpA>!DlOBny{b8jM&gxET)V8;p6n?dJM4+UOOJ} zdlhqXc9K@?Hc-Qq&|P!4B&)Ic4nwlXJ*X5;I(dQ0fbtuPSl`e?BRqZ)(tBv0Cp5Z0 z1fVL>{w5DbvYxxDqoyIZIl=?RS?@C4lj8ex1)dq~@^fO$3xB-LbCj?vd7{-bBZE+S zf6W1(`HK@a<1TC-a+nj`h5mdWSF!;I_3$1Hrdf77+rrw@&io zBUHb@ZrUq7(QW2R>}krkOz)FLrN62pJ^K2N+HA@Us2E=sE3eEBI-B+a`-36>Mdq8Qhagp1xJ7Gp(g{k-4T&-c9$>im6m6 z4MZi&2%5y22oJR_5{h84^`|(poL_-|8o*75HbXd3TNRF&eXU~B^g)T(I=#HdZ3k*D zpuYa(|B~x87hD&z^5_?7YGKPLHJiWlM2NnDHr>%FbeLDcSS50j=cPx1E-FDomcY?* z0E3_II8(&b(TD%#3@EHnwSUEc2F}29X?0{V^s3D|!XMNZTfmLoa=Sk_F6g3}IzMpu zb@=B7%lK^Eu(40?)u20X0f(66icdC=xr$Xs^CjjFTD;9!hxGM)#&}5 zWmJ5PuStIcq?haJVeNn$3)R1!T>cY~#`(vaR{siISQwj`{K*}#QP?&{+K`4OMBefX z-u7LfNcV#kfuKxZfc&LEZ7=eRdB-f{P=pTDSFj1U{Rs2W^Rflb1-4rNfWcF7gZ)mFeVRuFm&(sp zi@pI~h>IQBv*7DpM5<-6D6JYRSBuIs3v^Q;!}H{L)a0t; z)(A%1oqeTSq0{{=GhBA(5AE4G!%5H-?&U%>O!o+=MqE`} zXa@LC;LdCfdypT~6X}|LFD^t^H;*qwk<%v~h1j!*!A)MCDuRbAxhfjw8J#N9qVPX+ zw+P*DpT_W9Wej&ieTX)wE)6>7n>;xK%gks7E0k&Il#7;qrp0eJ5uS zVQ%_CKJl_<52A%+p1O1aA^61if!A_HX!QGf%NFHWaJ+M(t=zJ;kGrvtV2JSogq=Ae z$Rd;J;mteOOXA$En`)IlInuznWDDk-f(2WYaB44+-=H7AXBFP*Pa~X;Cv|dja7lk- z>D|rcfL>=T=y84XutSe*XU8y>#xXl3T1gcr^CQFILYJC6Ef#2<>)w!@)b>bYBaT+4 zdO9X=^6gk6Vt;Lc7~Hv%`(fduvyY=3;B?AY-f`CR+kzs>i=uYhuH_+z2XX?527R{b z(Qq_7AMI;mp^bH->{|k)_REiGhD&>gKUPMLkwjv6)OQ9qn-716TsB1=U!m9|dAMhM3wUZ2u z<0ru0=;P>-VkDrpY|GGLt{BTcACuIXx}lk@iqSl~S0^9*P*+qq7u8&x{yZNNKuMzy8W<`1BY;#OVCvLq9}16cqYtkt7i~qja;weM$y0Qx&N0Ma^Z(nn2wJ zb97Nc!G}vuFCjy3(G7FV%y)#}Qq|-+0*(Rj3~^-N>%^gQ7|Xl-$;X^Mt@j7 z{wI?4FJ551qLl(rxyzd_9*_tN0Ro}OU)sYPCONQa zN&EGA2foA8_Sb2Y{c1i0vh366VNL7W#)s4MT91d%faaRuv0L| z`{eH8>pQB2Te9?f6R42wI3+nEf<(GgpfX_(iZf9+tyzVKa!*-OGj(D>nTmMETYcAw z(~Y#gyQqi;yk0S~Fp`le!Y%xg;khjIWYZZA9fVr6lhCC436le8oMKE4vx%zyQdvTP z)+T9IRg90d;`A_5$;A;iWLyc4OQD$Br^Wb?9Jn>PKD)#muFslfc8$3ei$;23HKE#) zGq!Bq%W1$7c6^3jaxy-&`!Kzi$rGCWiHr-}1h^XQ!1xWQ@EONm4} z(v-OPJ)EL>Lf*9eiBEA$jLn~v`jL85DCXXodini%fsj`oSsg?(ZMxDV)l^g{xKKGs zBb>c}q=Id!q)v@1QdJ3gUs)4IKRO9^e1#d`Cb9Nw0*jDqhZ~JH!BNw@i0c5KjZi1K zbLVao+rk>@rA^~gaono4Z5DAm*)aRX9cn%Gn9}DQJxhGjPn)q!>(3E|`k6nvpr`w6lSDgx zJf`Zgh_v9mC~2Qh$cBA-VuqmVF2L=RHcZCv@X%@?zYMrcGYjl6L!z}2NBDj8Ay^V{ z3($)V!cD~L3;a+7hG{pfLv9vK0x=2WmLJk4HQ(XoY55Cx8g0g9#kpk>K@o8IondjXsxm+&6b-SE5_B%ofZiwIsgx z@om{>NsLgDpvTsek(*rHoZ0A1u=~8fj9z>o(`7z{Ah#XWpRNNJvK(ipy~tJB$1pie zrPC<1dS-^lAD$)>GMrc%fOvQf{K_5E__F-6B{Af1=Uhe9Z86bp=+%qY1MNd+|uaUR_qMx+SN3*cZ|!`kPBMm7xrN z_q870C#}KH$=PP<71x+eqy=ktUNuT*0pW529*TXqQ0>YUN~}Pih$Yd)I@%5 z5{z57xy?FEFK4Q>VVK>DG& z?fiNf$U|&_Rv@=HVTQdnl(qx}!F-7_%E(s1`a!0Fh~Afh8X-<^eH1@An2+<;ATftG zoFPn5Ol=vJ)5!|LbSmjDloOT?9t<3hRrg!fxtX+n4$7q$5;NZ#O~D%X?|W~;4yT@H z117QJd2qUh4a1o5>BZ$K(~_nUEH}zS$1I0$w6yQD6cQ_UZ$%w#nG@`kJ|QsyTVas+lxUe+z!~O|nVopY z?nDvqaf2ID+>`qN5Us;5J>=I6J{VCr@%0}u2rT1166A-)pFCzgpGa~|Nz+O|B5 zpAg;2xH84x<$e3$(78P`uxKFlRvmRzUEXqq8GhIFj8^pNGq%df<0dbl$N%NgmY{ja zCVA;l9P_wI4T@)w;L`^r(!e2qw8TS7NPS6m$ONo`1O?UX`jXpCuIG223ictJ;MhWz zs?d6dVGj!Zj8LTB#>W6L}1eWlJQS8pPC z|J|LQf5@-+6R_4({S7Ps=f+FxudMU`q}Kn%sRBX}zo8E@EHBeU&iaK&IsaH7zcuAz&(#w-Fj)mvk} zf}MdBwF`|+#Va~^0sqFV$-DZ*F)_m_iTA*UZCso1G7na^sjgjH=JF_<;)-w(i`DA} zfO}W_&Os;JcedsQs9g$Kt7+Ez{6Ii-_;rZH4!M#!-Q8Cn&qmh9LpjXdBfiQVl#$U_ z6Gd&8a$AiDg=}3o%l-+rx69D~tA_<}83>Z3g6ewG>{&j0q40}7DT)%}jm=kK{41=t zNZyGYO`0p_MM?3x2?wh0a}Ie12K`tF{-;%#cAVF~E8k;;Y6VpEEmmyNn!VSp5@{wj z|0R+i+c?r~Xffnuyo$(CTf~5^;WhnJ5e_jL{zxYcR{Qb3@u3okZGQrk$K+V5#`N!c zJHcf2K{%#;bol!E*z)KCYU;JYjTCF{7$wt)<#LmWAt3N%W~NEl`iP;hO*dMl(^;{JjVLDr`M}BEY6_&nRfAUd&7v+iWFdCIVJc6Vis7~x_nc>!s zQ1!CyuGK=8jY(=+MPX_E*%~hgw?dcYp2CaYWdgrX)W0{`s*Ge?>P+=TYc46^CgTg{ zV3H#Q_AYd?tY~i0vN0aP zV`OdVj3Zso_LsPeO?#-g7w1fU%N<785`CzPXvb%mA3s%~*gsCCL^(=A9pzh3PevKC z0|?!V66h_F$d{0=<{9?YV5QmoLKDs2tAcCpwNc>K$S(MDC=y%bG>P#GjCQX(cl4pC zJ*HmsLD{MhM_eI|bQ5$!5C`hU+P*2V(BO|XuI z;)bpF*J}^fJE7|5Sn1A zUiI=r4pRyF7&Ub3R*1bg8moqIZ0ZU1Q3da=&qIQF3|t;2H%2xeh{+dNA$yee?w6?K zERcqt*kcbXQ)%&q$J-`Z!gCDu(q(@qf(T1!9b!=D+OBLSbfZ+nA#;kMVcvZlmUv9m zE5;D7&S*#@THlk_iZS4}Ua@I9?(1eKCqjiw>k&OOa5Si-4{HvQQBRb;6u?RLFw-@X-r~Dx4TulNI z_l)!Uu9wJWLY4NAT!+GQU{madFV}?fMrLZ_w&a*SzAo)}*4DsN=H!y^P!W<=xl{KR zc(i45MIZcZ56Bw31QYhULoMWW)XM^-<*1azd-w7_py$n96r0|6C_Hl2MaD$PCX39W zDfC&5#cVJo^SU2ItmRD-<(zM43NXg@qy1rMbNWQarVDJzlm4a&f#ve|kT&?wkp6Gi z2zu5)L=Wfz>HstgQ80CKGz5CzDB1{ETFN;YIT-;#`hN%fA6Ih!3it|z6(A`YDPw$% z5D7&xXBDT5e*~!dSd%)Dd65EQUbFVulxDJFMXb2SV_oz(;q*{xw z#ig!cv=`xngASa`Hz$)(Ss$ukv_S7v)iSE^>9Bk!EDDUnX_L>XQ9@NjEi$M9~btDhBDU(&M`_X*xKiV)}rYfyp%&XM=0iJ{r=$?d}Lipx1g1gqwvCv zq?kKYKd+hl7}@j=BJbP9qR2tUjhep$cq$m(Qf5X}3U9W7Snu%r!{A`DZY2l;hsGek z6=*_5x}Tm))flKpIHWx ze{9PH8-3{P4*V&bZrYSAM36`0dg!9+KnO>k&3>sl{=pUk^+ToB4YK4Xg)Wb^6|RnK z4Wu4?9Vw8z@_^5AKEIRVvv+geKLNwl^l#z%Pl(z3uWKh9 z4u36{B&>~XNJxOqp8D6nZ=8Pr#s7$B_CK1ve#PIkvw=cAXo>T%5a`H?vMN>}YGIgN z7$^b~R1k_(h+d38jVRz&QjF=rpEx_PPzgxLOg$NRU-H6D8qv-8g;=`TYMC9Zfnq&$ z8y!Axpr8FI0gpA;_zGic4Z4T)OiU+J8c1nvQgcWX zwDp1~N|k0LTY9RS+KKRzShRNu7^}$PSJFJK;&55TDxM2%!-=gJ}l$few?W-?Maqf>G3)rCDLMnmxmoSZ!I>CuYpK{=P8*H z@JINf))s1Er|CO^v@`-Pi|@|5i4<(rEQ-oe5-07*cG&E@Z%dG&hd(Vr9o;1=L&57kS&^B5XOoxMZB0%PUiI2U%9z%jP zT4{-;MgS7F(gqyWhUzBk$P3SE5z+8h%5=4~)*sm)e`Hc9%(NYV z;pJ~S%0(nPgwc$fb%oz? zZHoz|K)*5*deI19&!W{(6D`FWlMf zfm(2X-ls$;0BzA^(RdIc3-UsxutH&pP)75O_k9lA{9q)xJ=l2O4ZDZFvH;xg z_!UQ+=6W+prQ$ns$*34$P3$3MXhV_#7?4h~1glyEL zYpl3Cj;unT{^iimu*j4TpoW|fOF3#1wNi8<*L7Xg-k3+o6hqy|PQ{5>9-_QcKWv%g7Mqc!X|ES4PX)k$apun?2MjZ$QXB`#b9(f+ zD9{M54v;uURaBlC4St%2oYsWe3Hw%G>sGR{)pI1e|FwPmU zMuC^n(|ZD+#n+@j%lgW;+oBw&7`{>$D-gjyO!{X z!5+!~%@McAg$+axZDD8VOMk0?vuNptrF;cTNlhTZ;j7=U%3$(yBE6>|nNPSz(EHYS z=B4q@vYIBVe#gqyt7G}#ha;z?jv2!_R0!|i<9$rT7WFsr`O*JcarFO+_y1p11epH? zAt+h_s|u2LLww@Af~g2nz5HE31JPjEb3uTx97-L3ow&ZBRKo%-o~BD$D-Zk|RRnAZ z+1m%7c)NS`9k}Gj9GBs7Ho8wUzL)|X$it`o-i~tGM(aG&V#tr z$$KCEBCfM?C^4jty(fHVCtKJq8jfpC!$-E2COiw&;aER<6uZ~Q5yFrcH;EQVB&<*x zbU1SZcv@u<*ar!oesRK=O{VJAJq7NXsIrVQLs1{qs3wl*hQiT zpoPpS>3jpZW7T$TWAvK#C909@afue0wBdi^mKLvzQ;Am>_z@+E^?yXHGtqZZV%*q9 z8lWJGL9H3CQhbhBH8#j4U` z(Pa1zZ_UIaS5oglC8BL*-SDgDK9JcmD z+-!+&E~EGo)eMoY>V?I#F&X+^%edLsV!L*1>~fbz^T!ZJFZ9@-EmIz$` zLkaE*b?8o>)%<(xYlTMD^CSpIp_9F`vVk{RiagpT7ZqD613b?PhQ-M64>4TNbQ47v`0;HGOO~m3L#?+U(pY>f9W>${)MGNO(6H#E_|Dd ziBXdYM1S#PX539grh{+(^hf1LyAzi9!`^d&q@zrb*rky%ax^OPWHwC{pG}iPF3~T7 zIRW-#tYNU{(LC4&D7WhoceQqf4ys-H=|+AN`zhI{J9s`o@+lNFJ0<8{9Po)K{!KgJ zpW0;qRtEjE&7%K5TMOC)r-*+MyCdY6tWg=!cqW!%z;beMEz}@W140R*3G;o`H!Gnbh0zMFys&Uz4(UOmS zneY(30~YuQKlu-T$o8(Q!tr_{L2>WGf35f&FG*$vu&kdq89Eac`eM(Bp?+OO??^*@ z=lqTh=Fxq*n@sS@!H4PO;O0>g;?2`za)(MjsN|aEH7ZQiUXFj-*B@n#qz$sy{2;h< z$9rgQs7PVjgBMyPj&PDZ?lNAlUHt_ycQr<`O-cq*@Ih)MFzlxAfvI;SMWGT>j7U6{ zjxJl1s0&sIXn@|Ta7Hs!_%z8GuAZhWnk7zlE&Ryk&oRv{elL$slE2bR#HLwJsM2Gk zNoW^W9gdwz=YFNbgmUMHSr9S>UmqA`QtU{CQ#*MiVrEN?Eo7kgT%ldL)Zm!$1CKH@ zKXu}uRRJm?tL$^Qeu*d)5w@5RpaS9r!!D_fe@xSoaB%aSn)KFsXh$_#GNGhF%R>06 zTS`kf#85oi|;)O(`D&Zg$`^AX`35B&wcw;KI>607}!{D;bUcAVM1)a@L{%WA6b7%lw-N{mvcJ{+%Sga8w;I*Rl!^X(|63GK8;LGe><``RTpXX8v0W1nBYt`YdLqp)gN z`DT7B#%Ahbts@p-(m>HYaP&@{Z|S8sK}z zz|FfAM7|m#1x@}D$Rv9@W{_|2AX$yW+;$?VH!VLg)=l!^a%(+nuOFp0gI5zL%n*Q7 z0|@*`b57B#aX)SBm130{#dV_ z*CgtX=KZ?d=mkni%o=2gN03 zO6HP>F4Gv-Sd7IM@32#&CR@26hPuqBXr-@CMfQ=0#_;<<#qE5-C6ozw9%xpV-U{*l z^5Q7=S_&+fxUCp?EVgGUZ)cX`G|H7I!5nPm=YpQ-vS-C>kfLhRW{)+za;HJh-4c7A zjaw&`05M)Co90e%v#I!Rm=j84EZcPc^%e0`X`c{51#)*=sOGmDNj6eaXI%NCn1q%Y zxrIR$_F1Qd##0zSr;%)YcrCeqWPz!bPZ<+~_X&P5C_LbK=Lzi^n(*#lEK>+SAmd7= z^y=BHqUtK+4HzX0?I!Ek;S_0F4`UFU7sjG-vfZJH=g_T&(#kUgGpsErm`tY5 z$TfwP{1vEE_zIqZ3`@Ei3OAt-(>RSB8E108@uYbWvydLVV(d*Sy-K|Y| zPt4J{nAmVvhnQBxYK)^I5wmZbeq4K3KM6sq1;0cs4!|_8?VftHsS0#}5B#C1PoDZQ z(5*Yjj@drVY-vs*1mT(@cQ^Yx9Q?`MIWOPlkIA`3FSgYpJmJ?q^xhG^JavkLm_fCm zmO3dd#0SGJ*^|t59oP&`Q|9=$_`2CSmIaWaCAkE%lCt4)Ez}a(Z-FnSSOEZ!E#>^S zXuwKC6^=@>lP#D9OZA&4E38cX!3lq58)6~O*=oYDAoO7?qx zP@|m5kj+n3slfxFiqPgvx;md4+Y{ZsMUF_T1u7sDpkDA#LZW(_n$IJz9g>_1=T z-aBj7ng?E3{d9ikRPCzTdzUk6^}1E!R6l!Vd|uy)tmv4nVXbG#;YLK|f>{589W|Mi zwB?b-2kA^vecmshe*Tf@tsv!gaKX22;9tS^_uB72{qO%W*#1r0Rk63Rk+OGn`Y(d* zUl|`v;{6BiCdmk6Vsh77>BK{{>p;Dx(x{N2q4ZlOmi2ZxPHg30^GlI-;ilg)043k*kD!wRd-%sES&uxbU2X_P zHQQ^(Q4LfGwq2=F+ef~-ZopP2T4P0a=Q5AlUya%7BgluVVA7;*sSJ%N?c$4kpRGQs zq?z!Rfe&83-k@Gvxe?g5qna`%3p7EXEy7D&Bpe)LGO5t{$?(oomqVsv(cQFgbaJJL zw0?-LND>Vf<}%d(i+bw1sz%Hisu4*{l2^8d>o=mX(VW0*H(&=XFMqzBF2m$ z*KRo1P8Z7S^4vOOuBUHw=;tb8>Z{5AbD~M zAIT5p+)_y+!T`Sa98Fdm)o;v(LtSScJjsQ`Wj5Y<^1(qhkS|?o$DDqCoDz~YO7KIV zJ;d!nZ(%a2rg)s zDtIB^HyX3UC|T@>lWfN&67&_f!iWI1U;=Lsx3)Jenh}I?t-S7=`%H+m#Xw1^%+8fQ zUpW$DXjF+{xA0oTHx6X;LZ?bPI8ZXNP| zY$-S5e%%=qAZRc`j;ld!Ad_hE1eNkDV?6)y&?Y{hiTwaSG>d;B>fgh<=xC=GwItyBWo?a~$0(MfN_sS_}*;vJo>)&x-;M%9L-XU#?_?u9($C~#K#@3|j+V^WCoenv}4UHa|gJ_~W=C6Kus`djMCxum8#sf}inir*QV~RGrjhW%Z zPIo98Q{x>j3}FFGxhq0DKp=L}2py798gam7UIx##bx$yqfZ~$9<~IG+5rf z@5;60@I30Mq#uNETrl>(X4FCfA*NM#ldEv5QJPCcpbE(LLvclx+IhUxZDSVRJZRj^ z^0=g_?}s(-rI9lITyvFW3ZSv24}=1>-n{B33<5&K07Hb1HE`hB0DU<`XNhXK!KDzi z&7!1Fwcb-dKGphKo%H3qo?e0&y7VBrud%m%lCAC zPEt%Q@fMZ%ouJ2mQfz=#QL{3{IIW?+rLntOc78ggN>?Y{+gYhSxq0h`8E9{>a{VqQ z4^QHeghBgqE$~#*4ps!TwD{3gS2V^i)O%rQpH$NbtpjJ|ks+G*jVi`OOEq14!XT2D6xqUfrrmaX?_AQf3k+#x^5Z zJgNO5co^hm^d{IvY_>k5SB9N@kwi74Z&-0*hR{!j-O$*BC&Md*WxV-%m`jcva?{o} zyAy{nildLVoVvUGulB3}KC@Wpq`?~mZL;T{LOlx!kO=fOk&tYtq^m-&eZ-mzNjLR_ zkaf2VA)!0SbUQ?%0-T&SA-rUg?DOL~CzyhwC<6t74$g-V67NW#M%K>!gGWyZ((kq! zNEG_)r%|5p!nvwi4Y4=j@`AY2UlFmtDoo42;0#p>X=OrUg(DnLF52RbJi0(B1LRZY zuJsb6d!Otb1VZ`lgd0S_$^nUsNgt>c9Z^!1K$AWsFNnx*|C|qWKlOjy0S67ye{np2 z4;*j*2^`=xcJII`>pyjfjis53sfn7CsVOsfP|W;i_;6LR1?MPKfm?p6!pi6_51jxe zZ2{CDXs)ESU1G}pP()9s)M*@9);I%r_#Fr#Q0OFguYXce_EL?d0nq5%ukLHlatChz z910`(!ki??xCT7d3c;j)`WTlOfH?l40V7XV?E(vm6}kddm?n6bY!No(k~ z2L~{34Wsjhk7pxSNi~kqQI9btTIC>;iv^e1ve!0E0z;dF?So|?v@MKlyH+?WjP=)c z|53#QzBE;P6PLFokT8#*nwhqFfRVAv3Uur+hnVbna4w+L{9tYR6Q6A5`|;=zC6oP7 z4IuspH#R>hW@d)w!4F#l=xaq+@P~!RclSkMNydQDP8b4>MK!%NVdE0i4@+0F*Dx8x zSznZL6^bMF_WAQDC9XP;OIVnSMz^Rdmug@dICMj;Df2S=;DenxpEPNV)*RHWl+26M z>Qsf>3$~3F-U+KA2;59v?(n=bB~=rVA^eb07T+av;Dd3hRGJ*uHIr+w%bL7PBoR!0 zLzonzIJLyj+MFeUb{K4G5(1j7*&uEP9OoyO-a;v&Q&z9+#1ysxHx=Kf$a@I+nkP_F zLO&6_XH|OljYgw5Lu;~JR{sb4+ar42yTr$eg&xdD&Ow&Xb9Q^Icvi!J>@dF`9f)e| zP95l)=W_zxXB-*+SDFa>7@xs=REan~C#RrabBrCyzS1p$AicHQ#k1o6C}PYS$IG4H z>Oj&sXgyQiP;~xBNo?5R-u4EZxjg@sxqdHF{zEeUJ7Y;$+Wftf2%NS4ypMlhzfd)6 zRRVQfKLs)ycvegA8qt98 zeXv))Z+~0XhS2{MRA1cbxWq8n>14@>4aH+*$F7vyX<;Xc?QYVdaw{cxz)w4gtvK24 zJHBFcp2R((EIT*+@ls>^qXLfg4sn$9fU6M(Oab-catn>P>n}&-_JbFCCi3 zcC5BaYhdRWk4;*wm~__hZ%(!QVVz!0J9-&zZ#ntB1(oXNjFdfLyl|x!fe;0}L=)6b z^2gNj*ZG2x`hfKC52BE~3{1=AxR4X@xS^k22u%^QV+66kI39&DI^RG#1B@vn$)XQ8 zV`!ZD-Z4#24|Q^*Z_<9DV#2jY@Fh!NN9&1FZVR}gtb6-pT~5w0;3NZVwaW~r$d8gm zXBuig=F!Ft3$Q&%o*v0sDn~`#8j-Rz*-93%9!6>tS@Af`-9gno+u9*Xc%ajt@iZs_ zN<+VY1NR#3EXzZ-qnVUxl1+BBlNYMB+2`Ji5yN;Z^H#8*P_=nJLRt>d<(>98Uzv*j zs@DfO-DzoRW(?t{8M{p=g~LM@h$neSe2+M-5-EMAs-5ZNi{A2xx7Y>g1CJ1*7rXQG z>63QM!)3ZgIKiKH5VP&M4Z|=*_$7U@l_#X?OG$-`C+Y|9y(oLW zuw4KY4!Q+rMQr&uy?#iLz#ErYEothFY`6^`W6#?1m%s=h5a4~gb;N8sADXIBgN#BS zpVoz`ZYrQ_vW?9RjG-ywIC2oZNvGLI!-Z(T82au$U94Mys`}Cd@o`Afe3g!yrPRxd z+cZ1q$n}d`aXwKtwApd=l!o(&)K?@CYy!~CZ8xs;G#N+HcUg`(RPClXjT@lTema9_MZ;~ z8%1ub?C2HFHQqdp-(gMc;(FJBSdE8#v#U0W35?mJedDT4kPZZyYX`On9!^i9dIYyt z@Xh=^uI zcCd6)vRTv2sZ&#lg&DDw8~Wo$5X%UW?Y)y;3sJp@~%j83$eJ!3<2E z?0^%_musR81~~yk!#Ld8@*AT$mJ{?G&zjf9mWQ+xNk|jBEPHp6-n#gp9r!cxb-cvZ z<;(j@s+7XP?osQOADY=gj;^pKSO}*&Ta6%XBKFD6aEdM^<8UFzp-yMXb@^0b)q?eT z2|-cg)076g46&=}Y}M%t{pp+9L3z**lLZn^QpoA$k0YS&Z+nDsim=w6jC)mWBd+^g z#Gu(;LlR+9dl)(nWP9T?vhWF3+KnXRHYn=%7i;Nc8tqGKz$kkrYR@{t zufXH(=WD3b$4=DvVcIH9C9B-4Ren|$?r2Rc&^zm(k=aVF_1p9D4_{D7{yk2&B7pnl|? z`Nbp#G-7CpZ$}wt(sG<;42sQ}Ip}mX58X;AHMY0Wr)hj8?o3b%1>=~u@pFv~WzUz2 zK_><_m#E;8eOY$hFPx7YO3MK^;dgoe^NGJ-$;EvYZnoEgd01e^B=sGp){f zm_gTkC07~(WJn#0qE0Y<2-=pl)bV-vhK+#>pZ?KYa!=Hl0m@1Qwg;w435E}ann@=R zIvDS)Qp(&mjysrlJ)=H|D0XtLaL`u`UekNz zXBqjhaUK?A7n>+*cYUhLUT)fK>ZY|VP77ex*3wINSL} ziZm^5s4{*iX*AH~+Ge7Rj=12Gj4Nso^JW4;b@me(S6Hp|prAo~5Kr0MU`*fS6{nxW8G-*%%*n6mil$C>RbBEmE-%=t=4X=T08g#o&E`&@3yn-j8d1Q*pnEXks_a?Q%>m zkWL!Ok1@8~mYzQ(tUVv#J*q->7#VB>!mUK788K%Qg`!*bPRPWpwf>;gdZfj8`uM$p zlyZy-g%+aw>zC#d**Tkan;}#SWPa2VYb_8l--tenx-1&J_;5B_kDyd&JOuzO3vX=E0@o_Ao zOzIaM|^V7Qqgb4}B#} zSEIPY^GKCA=LYyFNI*yvDmk7Eem;g{G%9l=$_DkARXWziYtCpo+t<4@YcxaVm9(`7 zFp@7Khr@*y3(DX15zZ|9$<{D!rA2b2LquhcNCvZF zdF6_zu9r2BVqVUj<`%q|pX5K$seX>2(F z=P$qEFZ$u4;Us=Srn!U_wzOkEDT=g#Thzaiv3HH zpMzg@WsFVL;U{&z-Qpd+fbCE~4Z*WquCF^&WQL*%(YQT|qLhr7LKD*}k^!`Y9Fha( zOuH>6rmQkmYu@1B3=a=i)PMFVPN`3RXW|%*Z@`3ZC-4pm?x^OPw@B#G%BbI~N5|Sk z9@$T}c{xcDHw?QZxOY%PqP!$cK~g@1ErHbB+|ysn1aEcU)dn7iUDqm_xwX4UnkoIn z#$PKQz}NFw72#5qV7UvGzr>E|dRc7|fIA-N>TIDj8mtH?Kn30=H+`0?OT&ShsO?e@lXYMrPQdt5^R32|uN99T0z^xkj{Ouv4&9qoGmFceCGGKdfC;^=)4i zUTrLXI5HRaJC@q&e3%y*p~}(3!(rpw-j+{7t_5-Tv*+dL9mLq`?q}|o^4EQ2%kKcr zts$^9R(w}a=>S?EV-X_74hI-)rvvK*u?3P+Kejx~-Yt=2oHI5^Vyra31hxCOSvqqo zInp+;)cCo6Xg4%C%zz))<;Z`ENPKE}mV8Ja@zrrs|C)(%-K28mF00+q=H3x#YqU7Z zM^J_t^u`&3fmnVPVz$U&Kn=nPDqE8V@($pVoMvS~t zQun&WtEPIqA+NNdCXo<;h^0p!>`KXydf3Uv%E`n7Ui>W=Y^%LW;59*Y?94wUUD+|- zlAu0CWw?}RebQLv*gtsDrK5mS70g^{aL$n{lbv-)m!`|dyO8bzH$O4+di959wc z<|Z36j5AKlB0rs7GSs0$4Zu1(nY_rxW<#x8hc}>&LfINa+S7vyxQFZ4JhUY>Cv8$N zv_(<{?=%ffw!Ff0U0OKpICJ{OQ^O&nB~5`E15sL4sU9=C&747M{-6R<^yA0QNowYB zb_nOoR26jtYz&5`UX3t^)j`~avPk?h=J95JqZ;_{;l@UkmYI58p3^@p$cY3f^6CgO z;T~%;yhTHW`cEsOntO^bO#nc}W*x(HmacbqP~Ic-f5I(cedx!XMgPxoeU5ba! z=IMBaA7vt|ueW!jFRZpHnws?5LT*N*?F#ksv&(W%t!^0}Rh^IYFbHn2dqQdhV_g`wUsB||D4vh3tN z4H{Aff=P(Y-D4^_3%xh}!Q>cP?~D2HX;9L#9Aq5K>J`h*-rqP}&pIEhZ6eAjbh*{X_0K&t2~TPmSPvR8`mla6p7Dxl6qZUX}_r~+P4g%sf(4pBQTk%abNGF^lqQL(yR zeO^lllrSQ%&y^x*UOY@9uWa$uR~EtQ zv+p{ZMPB3JuXD2!MU+JwKU=%fXagA-D=xkEUXA^Z$W(IXNpNX$ev44du67GRlEn%j zChmOo8qfi_N=O4TJK>~`XhxDMA4kP^wvyW{hhA%*3Y}V7G(&Qp)mEy{8jE|laOSF) z&$)~6w-JCg9DO+ba4OeX^VimR-h9I64H)J<$JWcoW_x?lB-k{ObGvmz#QT2KxTcI( zPuq$Z#r0I84em_u_-*&2Xb#Pt5SoBkUaszO(KfESF~cF$9jV{3A6)rHkJKf_B!Z%0 zu>d@Y=1@h;M&UVgrk{nxi21D19n{a7aZ&(vil!j`ebK29nxEREr}m@g?jmwUq&0rV zgx|U}6J9P1WUHV-UgweGv6^a9u5q38yLRixi>8YQZV&x|JV`z(y_vGDsPpauzZVZQ z{bkqH%1r@i4G*ihI`)>j>r<+g1$HHuldG1x9{2T>%TL%FB^TeXO+DDV52;vinAcZ0 zXL(~Kw#D!6zl5WmwXM{jhA?+$*))#B#|oqgvozxnalkg4onE2+hYk zd!^2Iz_Px>5mJig^%Apm&crTyCKy3>lApTY(*)Bz_`R!Te^Vd&aYkuxGxFS9F(B!( z=qP{5>TYCS1>uKAuX!iT#JY_?wnC3{4RT&6QnIfu5%k-gMvNlLp8`K$v$zXEwa2hz zHl7i3a6Mc2AD*sbb-1`*hv)J*OYR{uO*`T|Z{HK?7uVKj(Oe zBx}7wN$9&frVQ-So}HDiPKWyRT5LNjy2`-j)P$CN1S}5d96-X)TKU{s{rxeb!mfMC zGF%N?3J7m{{L4;1uD71AS#_#%bv?uCS%op%G@orC<2S#|u1Zk!!M9D zXL%ZQ`8!`F z3PXFLYOzkE!2!YTcE+0W@R3Zz7PNKfBxzk~g4RWU(AhgP-3Pl^t>-^9|9(ajaq1|q zUiEeX zsg_a@aq+ZtxoMPAFv^J)ffS8mQ4N%MGGw z5T(s6?#Z3x%@_)es>?X@1ce$m<=)29EVM*9(_q2ZbSlkfCU3bG^Aml(@&A}H=hes7 zRf12sp>%ia?O5O$U$25ksdnSd!Pu-7q~0s8TkwtYiOI}^y}0e!3IeNoy^IYlb7QK$ zlr?!NQp$~69%G8*=>xG_|BR*=O|Vp7KA#U=V*8@Gh5VbE7y>rQ-jP0eEdY3V&GcCK zKJkHf?liJN;qsJ@P#6ZwHj zxW_u~!}8$65s`FNL5Vk%v51o_m?)x?(Zx#f7=Q!C?+@{S6!j)Q8f4)KVXLI?nb^1Z zpqXsF=ExnD%?mQ1@{OBe$GF4XHm$kCxzid{YGZ#VPsYz|!lsm5NH12?1bp8n7? zj)}}flYoa+x&PvC{@z7m_~)MfnwF}Xn%kP%x%~2J{#U0LEVli7UKOKe={_TY`4uA@ z4sYu1x{w2{JenadDu%X2lxpMKs?x-I56g}QW6#ZSn!*pEkWcCiw$nS1f*%AUXE-U? zVezR-ek4dtdpn=`+MkZD**|+B8NOZvA3hM@(`XI_s>cL;b|G@Qf(pCFaj@AN+XXmx z(~76e*`;&dfZ}2Yp)VM<;XcIIVwjKl$2=o|W9FU)u({z_mN1u1JG9x7F35>Bg=1|6 z%gOK^E|(<`0D{Lbg^)M$4g@aFRm#5uKx|M!$mN|xJ9{dBb?hM&|Y8hf`$>9uw zW~Hq!budO|0(!BDSc&JvC8?;=<7#7(5h@vXyafvy_KGSC`8Z3RkAVKgey8txwRnsz z1KW*neF9{u88<}{F>p2c9|ya=7cj;eJuolQ1ffKblNdlUCk{!6v7r^s8r1Qb(1;~- zvoJmP1)$dFMkWV6GVYrmEo^FLe+14+z7ONr0r?t2Czz!N?@c;%WX1*n6cd&!)z}e? z+Q*1zZimWuBE6m^03VrH1yezvDR`m`tvlV6pLnNqLtYs;c;iGcxGqB*FoRpgr!6zU zbjLWUf=!b3{D?;nv1;8>cZm{>;|z`3Jd`x`S@K1>vq+EmxTJ9JAuBreZ`~c#95q%z zykf9Tr6%PF@{E|6+5uWQE+Y|{N9~N|%FMFa5{XQ15n9!u1urjRzh)6mNbS2YYw`sI zSd(}WOvttPNSiFC2-G?mlal%U|Tb6W+;&2zG$=&Q>>V}(* zt(PK>%s}Fdp`t~+c2RiNL*4uW4RWv4$jhKhp~CvHOl+_r0~Vji#;~ zzf^A#xTKVZAiKiSsLqZ){OKvf`<^mH58VAN_!rCJ_jfDv&%6Ci@*-wx26VM?5qAT> zV=17WiOs)=*#COfHlBZ{lN_~Qd;Bo{sO9VATa?1X10d+_TnK9OtL{W(8raI9RJElR zwVygOY^V>W$BtXx!TqE&sPKp-25XA>Ru^JQkPf)LT1d>y_S*Sxzub-+_`f{A3Gnpk zQL^xKG>l>&zyjb{7YrD4mJ6b>w2YX3#{%y)a!zBFV_2xB054w!c4w}b4OV;Uedfl9 z?A$nQgcBJ^(uu)`Y#mTq+WYFOe*yW4V2Ql+{5+$ce&CK%o{@hcs`!hC$9DUf04ol= z+`*vm2t^!3Xe8fSt)>ZQEy|cDXl4vbznJK;TC0>rj`|~DuX176_&G#Vm+>)01C#5s z;-+fZgBfW{fw>h|&xb1e*~U!QB;ZGG@~T6c%?7^l&Fn;OmE|~CBD8u>$%KYQbnqI$ z7?tn1;Y{1UX!vW~Q@LdPJwIUlgwn(!&^#p|mT5v^o*wwZv!&o=R%qLV(><0+ltgHy zKsw*JE*HqymYnr~^hc4bxV;HMEVB#eq9Q(MX$T>A@|W`lg%9x2VUi9g#R>jJE4n00YQr_t9qa(weQ_zxo|JsA_RE%(#KJ@K(u&6|_) zp=+bF151}LawE@yZ@RBJWrHTfqBZ4+&k9|>K>fJD9Nd>s0-91(>xY#Y_ur#1ORkeoVLX z0P?J!!8A+Fgf}Cwr9|amnzcohag1EbAVXq!P0ixvVWt7DzzhpU#0~xGWOLFSjtdEhmp3z*qHXT7aF~4 z<^7n_#~mkb0f8rSbvWq0e`reu0$zNHwlZu9uqJ0MvPKuOAKlwouXdIOCSGAXWRdBq z=kJ|kZ8(8!bZLk>ig8_ENX@!WoM!R+zKc}QDeM36~1 z`H?Aal>AvqRF?xc)k%>3y0TsVChA_s{s|K)yDQypv+_0Mz>O9)jeDxhca0=2$x4`V zA|ui$;S;atbxa;{r0hv#TCi-+DJA!Sfta{-HMfid>Roc6h0~GW3JTv6yoLzg>rBOU zCwy-HuUa)!KA@FV+2?^6^2@D{2+|WwS2!PTa%n}H?PhjJjd~GMd`uUiGz-5Aw|%?AXqL8DjI=Oq4i$(#Xq7ormU#DO2zTM(pom+15%>CC zuFTh-+NP=78vX8A-vkz>M2tM9pH3tc88hCv@7e|^BStZayqnKK$AA)-)WKW&Tvbq1 z4j_rL!Rwj!b|{2d0ca+@59{*) zd7i+7PJ*96kT=$MM~X`$D1L}LqF-c-KwK@TA~0w!{1-d^_vFj1FpU5M_4vmPLTtVV9@ z&m!q}ZbcQg(oubJEFyUi>#dMQ(AWcPK(V-C%)$brTsWR z9+(}Hd2ME)`r0mp%}RH!^g%LOW_*$+&p-b zE-t(=tgpQD<3&=9Qe`?ZA~A12isNcUcSU#JH!FM!gCKL#lAtByjl5zV%r*Wj0+F4# zqx4L2#|4=lYfrmKU;7auM`{M&N^4?Q;E#GpojC*D6};0@030g+Q@8*AX$k-Nw05R0 z?)FaB@BRi+e||pO_z4B@=j-pFot##I#*84A=c0k-%%#%*iGiYyTzYH!UjAT_x_I9i z4Vg0Y?v+5Yqg!8A(y@Zsk6Zcng->Uq%@KJvh$_bdNY?ph{56jtfcH!l7%eZ8g%yH+tx%`wWHNaFQ{`O+9kuU8asewiw zK+q`}H5ogtyTYJvq`o>b-+VB-U4f~+J7GjPp{C+Fk31k0RR2>XUX`_S^mYul?U2^3H<+?Fc2)*l6RH|I{cj` zVpL^+t#^O6BLP$uhDOe9LXuD@AiBL}jHV?+kO{J6UmOlvIdQ;)0eNS6l0JkjH^--! zIs$dSfS0^a$VH`2qQ1#^)`mU)7^WW{E~_oo?cez*?A7vJ_GS!(q!z! zrya!R5wzRPXW65VaD!7tkv&0g&u-e907+)7@HVamxo@!XQ;ZBEa95r&J68${Pq6pW=V3tB0TYig4XcNVKDJaO+sSgdpF%d;_=_j|h7Fu_cr ztdiam+>ehuf{FR9Zz@e)NrN;Kjwi)MBYav9KNO~>TtcM*sGNzRV z3GG^6i#FVMR22bKA6-G4rOQ$B;_%hZ>lJ<64FHoq2U* ziips6de<3;#TZ;4p1wI`#Mv9;gB4b1sX51WAlz0kk9 zBfoRW|I;1$@AQVEsv3WEOa3C0$EfHi&MKikkJ~MLE^bkRh8AzDtZAjevVVtKSQ&4q z5rg?7u&Hk6RcH_QB%<(3L`NqM3ESW9OfzBlHQV(oK_r%-82DdiR;Yns1AnkIN~# zw$1v4FjmGN+lQ4bp=H{n+<3@!cXN`dyp+kX2kPVjOv%0a-KH*pQ&V%!f{}RELWOBi|iop9N}KyBAm37^9yqhr#Db~I!9r9-USw z{7)(hpEGf)-1k zU!taDOJ|!6QUa2$X3y%ad{MyS6}qRRri}MkhrFZdmDdPV3T1fIHy~Nq%YMB2eoOMI z(IJX#zr-_j(6^YXCvNOZpP{_ltd(0<2uIz6D3~@WJsGF6Pi7Q*ZY-Ax*caZ^c)A%Z_7+D|P!4p|G4>Il4r>{?(hCW*>z^^AI3EH9|+T5a1 zhDTF&`g_f5h4(^De(&v{V18W_$R$6EL2x#%0AH2=WMsVG{6jVWxhB7LjsC{1s^CuV z?|M$~Pk-&``=g`e{C`q_|1wxC{;R_Kn4DTwA*9Yor&4UBQ$Nz@O;A8e$#x^6EiLzx zbp_c4$h&kL3Y8~Qm@0-r(iz5*o567*BE^(L!kXmV?tDMa%_`{U_k?T+uZXL{{_fJY z_MkZkc-U~pPN8$hPL_3(`p%;ccYb*XKw-M%T7*%}0@%6a1zfm(lBPBeJSRFmg%fn( zyEi-HhOA(+Avs@xv~zuUeoIU!y~J}MkhL?N)S57gK8r0}<49wIYoh5#?@_WvY?_vY z%bXxI9T12aOs2aEFUH#1%T!4QNOQVB6cF4_9oflMNA#6EO~Kt|haUN%t2s|*YS~a2 z~VRw*94gz1M~2#EMhZD~Y!f!QB#hFis>(iXjlwDCP91->q=&ooHYWV5>_zGAz= zJW7>?If8b_Co@0Z5;j91#mI~oJz+3JLC2(&%T0Djt!l7Yi?B-_U}t8)FZvQz#Y4s!3E|TO660yP!YpM~S6#^qvCF*Dfq8 zVHOHIsX0ePAw`h5F*_ii!(kl*L%c9y&`X)d2h`Bz$y`)HT)Vh&OArG-mp&iMX| z{<|t{#hXvVX0zG+Lp_6U26`G+M+jHq8;_u8hjGYlJlUcZVRsUH5qg$sBaX;c5X7@s zRO%e_x|S1F?v5D29v5F_ZZmae1cR^i=g1-?uh(#ARX_vbKy7~0hv4Z$<|({4E2t-8 zva(xB^g!G^httCLxC-XuBZh7ko;Mx9;}d2^YysnfDEb3yy-_#EE66g7^b|-W?=V}I z+#`aC$+DuJP)56)5SBDh&EcU4Wc{9K9Y=!Mi z6wS<>!GfMYOM@7-_o`rDxo5jTP>6%2bl^^EtkBtZPsd|^={z$@iNVwJy#~H(f~;L zOwg9uoK0|kcQBgpV}RB7OLMT$`p_)eVD{jQhA^iaZi@)eRg-tt$a8Eyf?FkW-VLh) zuL@9AZg)O6?U#zfqOCzmlL9zdCw<8PlDmkKU({H=A(180YOz#`$KGqR!p|;sW4@b1 zkX{ahxgOk?c&9$6Tr}p(m><`*m(5xn%$JwQ)XC)H>jLi{UR7bbUs!L=qcjVI!UOW7#4EawUpYA{bA*$ndj=zw2rP>)8nv3D_>EKTx%#RmEkl;%i=tr zqAUtS0KOR}PPe2nu9N3cB2%|AfPqyy3Z-9h!=yW*5VZ%9_e8wraC-hyTgY);pA&_DSe!ya z7s070`ZnQAics_7e7@D;k@WoekJ?+|(LVtP3}6EO$C<`|>czinZ*W@rm3qM5jsKvS zKW(i48iR-aWtoJ%NL*DSR7FaswbQGT0C~X>7*8V=q%n*6;OqVNdf&Bap|#+y!l$nn z<@wc1f9=Wi(u<&3$){K(Fq-S(D0l}%_#FW(x>p$r-?^o81mQ~KTu)V^Z0@=|66 zODEcxTRUd~3#MVk6S~tKGtE?lRb1 z@|XQ-6G4kq{EtlBTBMu=S-=*TMpVXAV1$HmQl%HersLbvb|1~`@|=zHuS5M^qAeKj z69YHUV3>d(lTM{qQgf2-%0@8Zj_`N2BwTmMpMM(p)NdNJdIj)cN~RIMm* zsmGu`_Gc)z_hkMh4rVN~BmNU8{8P3F$=2eV<)7y$rzASztY4Yje`deoF>%& zRVx18IMV$mP5c9`{%fn>|9K%Ycm*41ukloN5@*!Z^)laoBmn}h3Jwl5T5Txyq0-~ z*~_v8&a+$9%DIt0IUZ(uUyY~O69|B6_M?Z0u0ZQ1Y$0Ko1N@z==Umg*JItFquY-a1 zE3W37EbwE708I`$BUu0Mc?=m9jG~e9(e)v`moRs5;hb_?&b`USvBj>xb3FSWUnsP#tT4+r;ibNUfFFs=c$VR?oKxKqBX^BqQAF zyD5*3j>&MMaCPg6CI-g=QS0&syib9ZW5@Uhh)CCrm6kVL--@o~kcZJR#?Gnf7WyN5 zi+S4@zrs+WjIWM6)qzAI%U+rY=$4|Wk~;vAdQuxdishuQk;+zl1gea)LfTFb)VRP5-BPeZ{Ibq4!6q{tp8aqZhM(q z6wY$z^1`oj@mX=t2|s<;P$`9qXWq~|25 zGLV}gD$Ua{XJEJ0zVEu$h#Q;knDGW5I%xDTZBEtP{o{1Dmoed@6VX;?8&)ReMb!sg zM$=63mrXCVcF2)qdCen(3tHv$_5H5s?HV3kC_}gh*#QmZG%9)t|Li?UAd~0W$%^zC zaxt59C;zK*ny@w6EoS75Tog(zRMURDdrQ4QAcN;zgWkyPd$o|;KiUK``+sXEMSCIBFS&wR zp@cbN#6*$b<~dL07W1nVMJ+{5ZauYmdwkc_HW9Rqse_GNeaT3A89Q|>&{Ob z=3CB}0)KwrP<-=yA4eKJZi-8hKvy%fkiqflnH(ELU|}dj>AMYRFSm$b8?L2YO)$Mi zJOOvoH60BhF4J0JMAzd<#x*kx#cCRtoMTaLR}Dw02|*Q;n)_*~gZc!ei7yTtL>LIg z)EL%kI|3o1F$%N`BqB_bKGh1TD@~cJHIWL(XRy{@Qggfs-J7gsF0!~)@@A*NNl=zg z>VpD;LpG%wRY}l(#e@5ZQL4lqc;%0*|1?#RsS8U^G}bO?WhzTa@}_>4;}ls@Dya4A$tpcMz!w=w9HH0!3`d&<8VGZ|%HfFyVeiZrJr%C!-U-f3JpKKCMJXuf@ zKdc6fqL?b(NhHaNAdH2x8%*a=z@ri-2M0rAi^N4kqN(gP@-Izg!Q#Y2M@P;IAq(k4Ya-bl*O030UIsd91GtR|X*{MI@)$^|V+sH}D-}10 zM>qwOy7c@<#JDCzQGDQm5)=_6^;>`i^9cz3Es4m0cnp#PzIW$i&7QWzvc%KpM zw4_}7H-5!3ti$~1=5|kglz~4s2u6vFn1KjZa(-up#>`={@xBlSWz(brNt1I%j)T^z za+}luVi^&0i(f7>%4I4G=8AnqEQ^Q9*V0V+!93-zRfklUpQW zBDJbnq8`fJUnitpAak2e?&U*J<#5)!_v89a*qN3;sZ;BwL*G{%#JPf^l;=su*iH2B za3V`2#UUR$_4?XV9>E)p(bD7HJNX znYZ5{#cndAYH)ftoNR=nvRA7x==wHBq`Q-ye^l?3GViaxydC3dE@Bn?4BWaDb#zpQ(#;BwGG7dT&vrUVANv= zAK}7&C!`IK9pUQFP1JZt`e9pbjcNBV=YFs2Q zZiRW=04fEs;@BVa0>1hD+zY)rA7=@v&lPS5qW&3w$Pty?ZpU}`khsn%cvGMl3-xl- zu*c2K53l5goe8xcJhMlO;?*;=kEdUgV`a05LqJUZ8h-#r@tu{wJG6hs@q9qS1pPYv z`zfz$AG?pX(aV?U=VPp>hfR^{sHwAQoni3i{qFE)c{WZVp8$rDNy$yj^&rJDHxV(f zKjLS4x|!)iOa231{u16c_*?6CC>+X>IF5zg)QUCS#@|q%w1Xk&6WE?(Nc7gWe{m2P{$KHE|K=I;4=09y zz2|@9GW^ecT$9hec$fcSEneKo-uAz!4jupWXMli_8KYK265B7PX#}Tws21BNCY1zl z6edO_U%U7MyS8HHwglTXd|jwS7$-^~_qh(wvWCK77)Z1lI(eRUmHD?%TFn-}AIOg$ z&=Ac{_`(5mn^2bf>gG%GBUAbk-5u~E2lVw@J+5=DVjweXly(fAjek@gdz`LWm~4xf zlkH=3qnvq~%#_=}EN!qEBG#r&dPRoWYI%svpR{hxH1A&6ej-|W=cXg)o+6{vUi69p z#?O=`gu7R>ht|^P+i4-a^+?qdoMQ5@cySx$B17XWE-vyt#>+xU6d72`vEF?YZ^Y|f z4^t1f`YE3m1t-G$%czraE=W`6$k#HG?Gq3kGd*Gx(INJncl4*^z2QNqQ#?IU-p&9I z)gVd;xIk-}M=Gh5=vzFt5Frgw&or_aT?w3mECNYUNWv7kC%oB&$Rr`iKgu6wOn(md z%c~W#`Ly};IM>2Enkl1XIG%QtE?b^pbYs3?y`MG2^O(Y*N#v%_Ckx@%HMTfUn%9^k zVNr9|8nR@iy8Mc*y2aN`f!>K;w#8PBQC6V!HNUyHHj&e^3-L*DBmRC*Vexm@HQ+M8QSYCt z2-5{{hq89lglw4fyi>u&0~!UHxyD*tjfY$0rzJh?k)uECWFZ0lJ8pOjf_Pd{%*PF) z6eYNGcbrl=yiK>*mvh&6NhFraCm#Gh0vB{e;UQA#A(RhkMle5*6&X-FmK|Mx^(Gx= zERUwxe__Ds@M_={&My6k!z5uK&An&gV9#-&I_wd?|rF{jb=;2le-e_{5 zwLcM>wYM zDaWb~r3D25CaZ#Jx#jmGN$!D3;%0EhVeXQtwZ;ri-S!t?F|kP7{3_CcX>62pr=)W+ zDw*b%Cthj}cFD6mk(N2pZ~cINU!Qw3BV9=z&Se*D=}ZIoAIr90DzxE5N5 z7VD*~uFsEcPdA#QW>rsG?`?9q}D-trw#$! z?O?7EJQ9fTsaSG)?X`Nnpn@hU`{+U@ZlwDyfX&P;9;=so$=-?{JblH@ z*56aDPPsJ(oM^LL6ALGSkZ9hGIwMG-+GGhc6af0j{#dG^q||CanPwNzQoLL))%q@; zmp9*6OOD_$ImngtkKNKNk&R&Vr+;IVk(@4zhXQ*RX-hwS2z<7cmc-n}@=To&QwW)E z525SRB|(&X5ElF3N7$-xGYY)W{2ujfjyaNsR-1yOc(`+6nQkaj^&lJbMN9HNiCzoS zc&&F{ea8r}P)lcSc2T>K%5gpD7u18KB8E=z1Q_$sy@haC*xX_PRS=RoY!^FA5Y0_- z5FMNj4e0#7W?QvmpHXwN3wxDnrL|5<-@@{K>M8w?30|0Z_7GdWrfbkdHQvSJ)3$n_zK#~Ne~$T{#o$Xl#d%2AdmWgSl}JCP zCZ8H+euRbo8eI<8Lo@S(AbGwrYhGVU#Xq%&H%7DrLX}1A?AFdP&%wzd6Cf5g3zf-5 z|70K}BloUTASb^FN~@;>O?Jve$@}81i?fHqO{r6y^qD^61#(%en;Q#_O)1!uLN%e> zw*~RIXT*9=ODcmzXhKsn$Xr-7F>#pi`Mpfd*#^q3hj{DwpzMGf2tsn@XwQlh-F!2yt=EH7}IzwR}=zr>h;6dT7&~M<~9hc6&qnQor;! z&i22z8dFcAhPc|T8O1NLu1l;w`>oT>4I(nG*9yird*)Bg%E~yC!w9<;v9uOj#nA1D zsA8rWrzc<%AFMQwC{S^qk#K5-BJLIJ6J<|ws_tBbKgYkW6Kc0=zM$F2;$98-GN(NQ z$$lE=`8e%3bw#oA*UiR>x&J8vTy`DD{RHd1YkFynQhA1JNkHmDcb%s&YhT-Ter}iS zGJx7asYE8*2mN%B8x%d$A7fTH|7ojYx?C>{oguN!%jJ0`;~{ByH8%gnF-zs5c9MaE zKT0BSzo$ldLK~Wz)TaZmY#J)jRepE#|xaT1}2=th( zQKzyrBIT8H1WX>gK$TqEN7I@$$!Zx`1PVWsXD8jjW$RdOD?{JfhJ^mHuMR58_0b+g zD~6VyEER8=Y9GE;hU2nJ@ZK{990i+eW@OqUh}i?|On-ceDXg8^+;`m_gD}>gyx66$ zj?Y5qppnwQ%a}BY4Rv^BuVR=lGFVCwHA`8GL=-_Y9%k&}C8ZS`4m<1w3kBKbz!+fJ zJi>R^Qz>`M*P{5L^%iN;s7Sv$t^{{Z;U4vUpZ_EC3{3>AEwRpB{~|^V)0%SAK;zzfKeiOKxJ^*qlw;EGa zD$hhB^IR3B0d8vB^(1a77n*lkEC+FNI9V#SbeqiqZc!6%piILc^6}Nb@Umts7o}5A z!zKe2>2t-RCa4Q(5OWhLdMud)MV|sQW%5&KN$=xwjmcIyjBqDsS)Sh?9qx}X+rz_G z$uEoH8n~B|pK4A&MOC7)^UGb$tqJ)946or%<=dvL!F}3Y-3T1D#GLKNjeLzEa7t!U z`&_wnyDlMKEsO0DX6(|Vv&Cc(s_r{l%*Sxu$n)t~EHVZ!xXyD1daN=fd^F&z?n(1m znw@8a{e9(j{vgNJHg&ooKnr=gU|f|s9f9nIagn0V9KLqLJ<|@BGlsetV=uDG%kKGd z`8Z%asS<_8PHiW&6~1=k%9G_k%);^WL^7xcaPmsJkUnah7BxJtdis!rmN-2xBF8=@ zZp+P^>u&a9t2Jl(75q4Pa%A0wZrqkSjn2u(KC`6W%I*R6z%akT)$he>msdCnzpXWA zd3B%sd5l9k3#ZkHT(iS-uPUZXZ-_CKK*4Q!h0C{qG?eg6&$ocG1=lH|@O9S#N54CA zUn=Bk57M)SZFV?_A$VEkzFU_G^m4zTIRW>%kN4!lMH4+b&*ILrJ+Uv?^4+++RMRLF z$*Vv@1C2Y#U>Er*7yel-TZdsrb#8gu=c}u;M=;?{24=y;(KL!JS19C&K^IR4++7Iq z9Rz05_yCbEvVrrFzrq%_%|Onzxfi#jrZ1fTz!ubmf5{C2TmJMdV=Ifm(=j_vzTZ#Eh$ zO2dS#wCpkWPUpA3lYhi`fy0Zu7d*xjNr8FxazXMc&vyZRWah_OPK6cg4rzH6j16Ny z=)K|VaiD^(oLXq!yC3G|O_KtuSZEuc^&{6}NF64v zZpA?PFj|dL8RJZcuM0?Cl8ssw`L>7F7xQRHL?zxj74tfX`AQDTKbAYt98YUnrEO08 zTCab3@<{`aW6f6*lsSp&Sf>?^nLuH`OFkKY;R<>96W*YDk*Wnu3T*7-p}ai=>tlFL3?jnCJuxnarCw^sF>zMdyIGzXLT!; zgBUCm;f7xF!`c4%e9-$P0zf~ZJJc!h320!m#L@_ zKj^|hHpi;ZNnxxz817%zRSB6lmE|B;(VW1_3=XRzY0mUrOL)SY8;!s3*fV$jS=&4R z&45b6>npmONRV7j599jEP`8NmuFWc(cW`0j`NfNZw3HPJdf|yKd8>lBo}3&Y4?8&6GrmEIm0s`#qK@C{GjWI%=1&A z6vMIoNqpWddR}YpZqh8?&{NzvoSd(48-%9UhAFLM8OL$1H9jZQuLY(TqP>GTslZ(! zTBmLEj(1QV45~TJi>O^&tCDiT*q3KJn$(M2pN`9j{>uB?Wjk>JoWal zJU*%ee>S|IlC#jj+i)YJeC!Zi64Fjv`CkEOZv6JENj*B&bb~S}N_N{x!h3o>l@d;msRQ zG-zr+yCB>2rEc$ngA&Dp6f!w{ruBX_jQfqECGwJtxIwUi;uD4R2`i9Snc_Ku+P#i6 zb61+^JxnUlf}3qj69XGVd%HhKq0$6#1iPFQA)GHU`EYM82;Y4o%^UyBd-!LIcW|p< z=hvc3!zdP#ur|u?u|}uOt|$zrusf*FrM=bAGCCWc3V7&)pIDl+ZH@yY@xn({<5cs6ZN0e~vp+%T8;b!yPy7i_|p zozU@VBv!l(9k{rC@g8Hi2kE-fDduxnvW+Z>IeDcczPjsCjganJcjz@h6FJ-WSngoB zU$fHUoQ0QW9DS3DoSr&q>iMn4`vsWHzgnz@K$ZitnA2x^x4TwQX-mTm%Z^{kk-K3n zP`BKe&gpgPaQhZvcvtNwuV+(%g(qUY_^V=rnNk_heTSZo^NWhcEll=Vl@|m2SPaGj zEo7H74xEi%Iu#nddtV#(zAaDod;wXeZGU4Hu_)kAa!8FT?~hA1x1carwbZ{S2d##> zV2cWiLgEHIR#Q>uCG1MD!HPwuh~9^VDPtkfu24?5#vjcDP-8Kph^0go5c;iZjK-ud z%nCsd0Se<_zo#UK2!(tfab+9shR(d-FR4bDM2IeaW4&=z%^-#0#>GbdPs1Sqx*Ucp|?TMa?$P!FJ-yY%}d{IE;=C zU3ek87_@irsJ>L&7-(}u6S?f}#F5CQBlME#I+6>ka`8%pSt5uBtfP^LCqKKqq@t-w zaP-kvcV%IDasBR>a~*81r+$`T@*5Law@HAOMj={Z;-<1*SkvhD{Nw3ibX!=+7 z<5%|PV*BrrA9us2=^g5je5w`M3#Z?7YSoI(gy)y!++kzXM;Kk~+6@Q#tBei6f%7vD zk;US4)7a@so=UYOM&gCd3WKzl%;yFxdQ+c93T7*j!R0!=ZOogf@q9R261-U$ymMx! zDhF*-i$o#|6DT)ay%#!W$)!|VUZrS^y3kb!`GLSlKy(b{fLLn^Yb9lm5_*F%Tf~gh zz?aJ%U@I{x+;{EbYDF><1AB(ELS9ZHx*e9*iICS?B)D+B_+ESmO5#BXY8a zk~kk@z2Mj2V`aVL$)RR-8Qp=pxK8Fh@%IXweDzI;i}8fx70*Y2d#NP?6I9dXM9y_ z$hfX=D5LlPRJ3fEK<#CC!y zeN9t1_8!1RoV$Y?|G#w)X;Q*3k)PTA?B7Qg|3R|F|10Bv8q``E*;sm+Ix&cv0_{IH z!TyofU6pL@XBE--kCL0#lXp+Y4gqOYM$VjCpmVYT~os~r)O zltea$2!%KRjZ9=@myt#562wWGPm0c=xPHlmBk@FdvVxVy;>PNGF#;1VK9c- zrQ#w@F01V#_7iL>d1K9_UxgY_$k4?J7ULq6MYgTm8SHd zU;}H4Lm0YrmT%gAHdnElPhNZhifcqnqD$gfbiL>$-b{&2X3g7&wur6qVyoDR(HPOx zR$CRmw-lbSmGjvy<4xkAjtF_1c`TK+OrfN^Ebok@mo?v?et!Rb;@-nEvD9ak--EFq zM%`JAnPjv#=6QhejfFsC{1@75ft%sqKfTf6f_@5^SNFn$6Tw>7@}>&^AOwes@Id}i zb7v|ph)y}OtF2@?3^flfCuYHPEb#XdAwiL24v-}@_{gJuM46UglTGL$>#c$g)!^=^Zp9ojo}4XRW~U| z>t;#}Xti(5hGxPvD^*J_k*7x1v1M;arO3={!J|-?di#;LdZqwq&)Y1SZb)N{r$9Ag zE?YS37=k;e>MOt!NWGz?uZjGfMF`h2drGK5G*yLv%9_N!Zk3Cb8xw*^Ko^3YGnL>D zSJCnNIJN6QsdvGMwMo3Z@2f}~)WsnEVtiRRiwUk2S@zM1YO02hcB3cr>%e^~3;+zu zp;gS18}8wveqdtL;sgQ$90w<+czAzwR8^E`ZX6qbZ9{h;*`sBvTl<@u2c2%W81WST zNR*NJ3$1@ixK~Xm_pe^Y^|#n~EN&7xbOlcM*k&r|@@ULHI{_ufd=}%|wAxP}r@XHF z4S3y>i&s69ZyDEsK)%}yquvHt#iJ~61K=;-Ti>|OfVIYSe&l|K(9Q?r-;LGyE9EZA zp7%>cRUW6Ef1peFxqXWOMkige@o|-Is|1qMff$D7uK_7wx>NxgGKLqI=9zD((rbZt zWcOc3BEI|ec*WH_fp|qv{sjM$Qu>ASB4TWtOE!2D-u$XH#E!6^ZdQB$0h8_>vo{;w+{|5b?oKlBl{HL|oL{U^IWI>GmEerba_MZpCJxr)f(ViCC082cVd zN-&f=WIbp~G=N=$wX-Jtj>`parT_&pYJl85=yP6_^U|wy^d=cc#)hYv9}maf!P(~m z_hIcI62o#QFpO>v1+MR{muXDmtjXmg-61=S_*XM)f0Xs-)`;^;t*oV21zenqSq@RF zU5ez=S$vF6p_`n}rL8?9TxKX*i~S ztk#$c(zxF1My?Q(-#@x`xuI#idYomoMwlqkhaS&*9C>P%sz^e9yA&q@-92DG;z*?P zE2)l2E+#Jr_5D#@nk)fo@ z4syW}m$}+h=zP%&uD6v?Qs5lfT*vPJ%rox1OI(&8jU>M+HCTMl~99t-nkL zapB*p)1#LjpZgOFVs=W0sBG9MO=zRo&uNkFvbDEJ-QEzRw}(0Q`zdv^gdx+eadRvb z#CNVdE_nz%q}A+c@evAPgFr<00;ud5&2zd|`CeB8#ooggYB3`dicLd8E{_nS`iq=T zN9~c$K8qbfX|qa)kP*b@nUJOZ5eKR;%yNv+G35j+5IzCkZLPsPmG5d4%D?l3JDfcb z3}vNECCaQsfhpNHc8%glbY{9~rVUWP1#RCPgiZ{(>w*pY1ljfXv;QJdi0%G`O z4sJy3fTuXM&ZcL&lbL6yRBngXVW@#uW~5SA<)eDUc2beYYBjmBeF$#hB-=D(xlp@E z4Zhle){X&os0dwVg$m*^%E&vR#`acrMX92bDsw!1Ii#m3-nx^u*8(W8`Yh+7)PyNh!!cJfZOpw1ma>so8n{Gd z1e-`77lf^ypA{8{E#v%AjE95^Yc5lO7b`fwOvI628DS)o{VF;beTE)Vm$b>H`NnbO!GR94}S_G$B zv8>&7%yfC=qb5HJsU+~{=(LM&e0M43fT_H%sFawM8-cogXRT06g4dAj{I@P*tK-=` zlB7a}=8v1^CVF-VO!XzB8I#Byu45$hSCe4+zd*X&^)p%zIHj2l8E~qb(D_g z3B{jm4y*~F^lSw)VVoxo3tptD!!ZSEF_^kjJH;G(W3~1Wtm|`@mq4lYAy8s#$}L!B zb@TBgY}Y!c()mxCfI2<8t-Qxs1a|*34mCCu@I1$Y(WO`cF!<5)sjW@@0w_)NQkUM5 zU0rtthgUk0{W;4hU!|Vc0>E!vlowR&BUNJ-3N}g2_qTS~pDDv9(P7(=GJdtn|Fx}= zAlWXlVfVZVh!Wo?kU-ZHJjI)Qy4abe&^7fC38B`7N>&0~V{;sDDnpAxa*L#*Rqt@W z7$9XL_wYq;-*0x-9B+!elDuwZ2No0Lo(yjzykL>JatPz(nNsuspq~pn4FXpNZxc0 z(dq>X5Bl!eK4dOWzYY=GMWe9w@Cgm(V5jaM0?rpL~WgF2n0n}qEJb>5KTO{%| zB5vE+xdclJs$8{~Ch1z5{7c5Dv{)B0wp4%q4x=*C5LKMt4 zk{j%3YIg~pM#3c5yp3nFDp9_>3h#bfo_Q`1%N)X~z+$@Xd!1Qvou_u3cS&D^X!V^p zZfQ@tN(@-M$wKj=?X8Hr-_9pYHRBEr6F-3lbweD#ePS?|wdxxkr>EuAo};8H=rF#)*{3q0&!E z1ub?eVTFm3B;E13FkNB8cAyn-0s&#F>@eDwhW9>Ll~g(Nb<`{_O)KB-dM)m!=@~AX zkO_~Txk1egSV5N30_4$U`!X*{UG19Im@ow+hz}>x3I+;H8V3RrV(AMy+Pm}-h9u@E zxo5N3HIA!2~m5<16Rd+oE!D)o_|blrfwh8chYZVG48c5=%B?Z zHU@+yBf7X6Ia}Lhx4E|DDu-V2-eYN2uE{cn9kz`5-fO*Prr4G65V~g&_k^4dql3*< zS2w3)BVGh}Z>-S*;)}wOMo|Wv@+h!nG|43)RS_5nV8ciGB(U>Gi(3M zq)F)g9gwK6QqI1wvWJ9c#)f}nJRjvk;gyJr{Pk@@(tMRPZEa^gd1fTIo?&)L^Ij%U9?&Vdr)hfe zyq%|^*k&JjX%%aAC)YS(zLF}@b8&W`o}yo5Pe^+Pd!ke8L2M;;KjUtJBIA|Sp?6ed zej-OReV?AbuPSFZApC;wn^W~$;2LE8sDMCQ>5*iau(~gw5*Oa3I#%qhr(`uemaU(v zGhk#~lh|Ys`8Z*L6XY9C{vlMT1c-Inl!(C7tWK0e{+ul0$E=KfLzgaXT0t4v0M++M zmm0%YakPMMwW7oWtU#2(gE!n7YAi!%4SvZ8*)Fw$076m_)aYZMv zp~QBNQrn!VUsU02Z)OZm?4v%e0<9z_I?^;odG6`2Q2s=!u3$$DapeOK2SD>uS5B^e znO!|jQ5E_VDLT9$p4*5Syb1a*+f<&zvU6w*zVY36I&x`i0I%XmBn+ddO*AJlguPC} z7{Q|J8tr2mzXqgj{*d_EK&C=yvx{oiV_SZmi+;)41_ElN=^7>W6Tf?+S~~XH2{Dx| zq?9A5MiMQOiIL{oqK@(G{5>y~Yaew_6#ATYP(rNo*)@H<;@y^911pYbCYWTHgj0C|{EEk^))%_t;u@46|ucabez8z6;9T-)PEgE{xhA|!CNNr z!xgT=A^I)s_mO7uQhJNEk8w6t6=DhfG_ex(j7xfw4@01ai(UYX3_DCD^5Fl9mly0rthQU1b@)L>eqX*xrzbPMpSc4 zG6YH%yCy&^kOw||L2rWmGfp@}G+CMc;D#^_#g;j@F&xb)a0u!Q)U&h_Lg4t5uLm58iYDIK3$SZSpu~QGD1#bL(qJkj&rD?%x)5&5t;%RrBOj;?qs7j-UA#sImN(_G}?DP z12(-X<;|0PGXkj%^&4lGmTJG%SJks)E0tW@ni&&f%uIdd-8Fr#OTno)AChcpGe9?Z ziqPn{W-M4O^4-l?gyKG|14UN5Kj!YE=!X~LBC8?av1=F9Y+-g{+EX_k1Or14X*f9<#DB0>un zApRP%Yo5mkN1k7|QEs-ri|UVY^-cag80TG)w?}0!Q&XA8g&O$Wztz8$c1ytJ9md`) zk&8?hlcqOjT-SZ+swF_1*J^`?;2(ap?SBQi+z#J{~;FY*{vy|3n$%hg~%|-NlgY#6&XC*o>nRIH;Xc z(dsfeY)P4Pq!d3XANvSS?115(4PW(mQU1*-R6_W>RC`Gr$rVgJV!!alrwnOR@=HEfe9}3huOFU zmQ#GDF74Ppnx+F`Jg}}nZfie*Vz`XgS9>zRz1Z_ke(K5AnKT0k2r-ETY7ZV)S(&x&M<9(Hi#@9;X-&UqPXOX9|AGNB< zq_M7j#LcesG{F;)=Wf$n91E$0=kQgg=5l?oRe@0e*y&Y5^0PUQdPctsy69x)T*U&u z4@=D0u2n4|P>bIuFQC~+^^0sEH>hM)$~kUoPcO{!SmnfaU-xtw@PC??t?sY&u=?@P2krfbR zsW1@1t>CsT(=u>+t!i|hKp=P&E5zCkVX~T`#;#)r;|+3?o$K{h*ly*qD6Xz{#H^Cp zsYId2tVzrBLfS?9_DrQ0H$tyCY)2Ql<#IEElw;*Oe6c(aa;mtf#N0ZLuLj`qJyR?% zn*3or6Y$%1`F+wUbtUQ{Rjb#uQKK(h2lONIbM1cKtO*+?V2}i;kEO9TC)p7N9+@!o zEf>7+t?fOdIJuyqU%qCY2>A@%GpT{Mp+r-^HzOCeNQw93Q&OAhnTWI`c-_g9-+yjs zYn+ptTj+^_%Y4S+myHv^8oP&0W|afJrr_w#;*IN6s3ajLdAlvH^*4rzaik;ax7Ny| zgtVX48=*gUXtH~)ITvENj_APFDwQUlhAZyOyUcRhmGxC<1&C|p_{hEvjJj4Z-@ZNs zsyC$xu{8Fb-!Dx(U7a(L_$>zToK|Z#PSp%7z3$Bz)}$$#4;TdrdHnLvfZjFa#OB~R zgFTSwxP{nk9z^7!ejHXX1lHOtV@ra0tKWLV>jAqoG29{mAO;XxM zaK%{|UkGw$I(O%Eb%7!!ZN2w=>AL7IaYP;uRsRJ%(%!|jaCp^yx8_sP#EYg_tbnv< z5%(bc!T+eQN_oX?^{Mytqpc%fc(ZQ>A^$b06vS0Bgsl0slo?Oo&vDe*fgsT`_AxZ@ z475&k_|!bIFTg{1q3^8K=l1I+F5)oc%g9G}(>!XY)JZh;nXde03L?u3W~k{cT!cjG z?iaTe(HIj3m|8>owudJ5#WV1e?3EkE*%)_g6BdC|@nNzKryqBgZjec@kBm7+YYS~` z^8#L(mAlPJ|Mj1C2H!6MZz!41Z8#u+}JO)6M@}4(@WsKhAx~j>5q7m z*mMRyeKZCMTS2!04uJRta@NIa9-Y07Q?cCVZ`?+61RL+Cl-0pKoC@1(W3i?xFaC|R zSCm$L(^J!FS1lgdAjRXB?Xx6*B&JPg;n$Q!(Uh8^1Xasj$>oX_}DDo%(undMrqLC4=ILajI1n!1(K9xes+l-)8iETdHXG z)uFMCKY3Sgt8e_}#zxmw@8N{xQtY?z3nFQQo1a}q{*%uAc$MPs8U;VO&i;x1F0d@# z`d@ge##4WWXtsb;vC#(>XJfh@nGY8~$4rSSI6|d2XMI0P;a$&l3~>&vgJfcjjTK{T z^a{RYMAp+wvcOxT!ITq3bEy~_O7)$wMXbb&)v@-+qmdEt*eYJ-u|-FvD^#8?$jH11 z#(qD*VlDlUwe8q7*};#+utIBAu>iQ0Ev1$kx}OMj)8k+;U_7E1z@H+cZ!i5;fhpYM z7Kua!W5btWEKyB6WTUPJYMlWYPSkb+I0Zz(Xn%@D=PF_*EUq;^_`dX6aHH_j6c&FQ zP0vqC<7!Pxi;nu4cED!Vz|L<|hV_F2js}9MnYIi6gR1`3(1kF5xJHakTYHf>->nX9q>YQ6|1=}%bSsn;)=9lKBKlYHDG!paM zDPiL+N+z~SvCD~RvNju_GvwG1(3wLk;NP=(C3VvSvT2{;r%sz!A{#Iy&vHsA4urk)iql}Hgtf4^ zhK+3`1HUr1hcGT;oplUq@96?mKQ4!ugK(a_AFqwXt8YJL$lWhAg?eZQJ|t9dq+F!l zD6J=3E-rlD)4YmI4(QQ{Zg>UmR(h8Ud^Oe0yjczf2c>C?w%#FocSK>>XArzJRrX`PcwE4Y;?DA++ieuGiJS|Mc; zg8cq^f7LE=-U|_blwySsjE!X<&S)Pb8!QSeX41NAKMcaMS)Rjz3w2#Iyd9e(pOPt(^%IM`A=>L(Cc~qhU0>e?Bg;1as%Yt^o?xMS z!1jo(L4v8JaJo897*RNlP?%^PRrS8{(@IEU573eg$2{vQ81blKC8Igjg#*45{5%Q@P25VJk??aSgORA-1e!7cV%3rwn`OB4QH zOpSdMHXLF|eQ> zf!4bjYL?(gjvYy<>xU+K40<+Wwh4N+us(OWV(bkZrtprS z@Ip9UfoJ0`AR?w79&oWJ9OVH(m3 zuJdy5Zj*3GcsVTH{dnVhO_95W_NnuU_t*@}_YIOY%IB1Z2xRPUv9?${4~kHu?Mcq; zYW=n^tz)`9$_tgR;xs`PQeOFV_vF$Q#PYwBHg#Spe@$voBu?+{x>H|rR1K`yHxR3s zLM=sGGJSuu(_JeITTg}T+Et;devdT|i+~t$7xdMHP)}eL0c6Nh6l9~oV{z>f2xD}hr3y}4KaAHIEj+cMZ zr||Hlq{p0gqCmx#Dq*;)5N#Umc9$D@iAws^L*g5o{1@vogxb`5QC%!UA=e3@srpUNiUPccXhLCoucQgs+X#uEBKg#)AY}O4PxbP5!YdP zu?ltq7nK|MAuDw<)eOAb%YSiJbpG~GvmYt&TJx`Yg)m>>z^^&Q$yD3&X)m`QF7O)q z{1(G}LBQej(NX;)aPR+c_KrceHd(uFt+H*~=Bip{+r}!}wr$(C?NzpI+jiBd?zg|U zclSPL$Bq+`5p%{9&#yU0<`^S0ulrW=l)zn}d%aSE`&o|5`FjCqvMhb5+zmhabmOb@ zlxwogn&Fz(pv5mgF6w}f~4S#@WndC{_+ZWfxbN2O&9#ZOBPD024?P^h2_)&`qrSZFDdV9zJ|&NT;VDZ5FZ!c zGr{mwP~A}o+@sQUYd65otO2~0H#+{gZ*Oa$AR})}uf4VMC9oO@dv^%hr)b)zxOYeK z5qf?DdnK}s)aLchUC$qe3xL5ds!vxqyv3r27RPYJD14kGysotLXM(H*`H%WBDC|F@$g|2 z)MnV^!>GZVit6)Au7FVZ+B(+}@CDAvhAG@(J)PY@MT#U_|sQ7LX| zm?s#gc}F#VaK;r&F_r4W$}|S$sIz4^T9qY02cZs8B`Rl1B&;(4jYpv`tG(^?lk<`4 z7YsG`f-@<|P`KQ}Jl(+WbVQKa6W8|nk&QEht0}ikUmZ)2_O-y2S-doW^CS2e#`5fQC`0*-#I;XdkXyQoaHm@=PsBR+qB&0 za9laLO1EQ7B-~ds_fEt>OhKq+z7V~)f{MLiWWE4DUtng}t#=N#h(x**cTRq5wEk;U zVsF(JZ@l9vAU>c|O=oQa46F?ZTkf$r`V2Mfb{6W@%X`F@1C`p^x1NDuwnq=zDtd3T z-nKl-*_XEbA+*&XwvyUy5{>X={?$iZxJ!sb`YlYk3V_lY`$_xxiS%o%yCmpS%KVt8 zA!$>j=ce0%p-bWyo6#9w{X_K1>k#}28`Tl0Q1N0d)K-aogyN+9@y^UTCj~sP>MKcJ z%-|k3jlJLBgS?KB*&yu^vhLQPGfAz$fSd7jL4OAWjFW^Jbg5R4e-N^)`DT92<@5q& zo}|t4N?oWI1asg7a$vPh9zi^;ONh49oZ60PJ+h#sW&J@>{u~>zy_O`o9c-Xb%vPw? z{k|lTOo&}J8=zmov@S*1OO7vmK#~J!PfE{8m{?oK+}5{p07N~!b{?O*qJksUD4c3Q zDd%1xum#h>s&G%FA?j+EN&BrKN{ho|OiNwDx%G`=X@i-HinO|M&N>K}!XCL`e5YyL z)>EXly+cQK{m&u!Fl>gIB6&cda)<*5tZ9Shxc}&#X{(7~7*}dr|A}gt1UYpDrF$dD zqH^cd_kb&T6-4?$-=;h&pikGE? z*C634`EvgPi)`t?8(82n(wMDJOwlFh%#(OM(F_;LhT1Z!`8KxUVW%oNwlk?xr{@8M z5`LHa2ZUKjQ}KkYFFCtO8tgQbw2QLYuXvm ztg!6JN~=KHXj%rjPS>$VGg*u7oB0;7#o?<9F(R&lPG2oNFwi1$hu7A`u-|>{@*HA;?pE?$H6`Kg(=Ku)t?uXb)M`~^Xh13jR_r~dP<66+qrTK%XL0vTo zK1agZM&f1|QH(K51m$WtOnB00LwMJ!v~10m20MZnRTCuyG6gdNfz#+;=O9NQFTSIz zMmsa}Qt<{Uc5sQ=b*F#Q0y{D)ly&QOG{yx!ROHDF^dv)Prvx{$GOjlnlM&9$MQ#o^ z=1MPyjMAqyxt?5_rq3`a`Rw zD_t`2)}a-gN^}_M9H3RkF;1|!SGuwh$2>9CY+1Bw)h@Yd#>Ei|38$d-t;8q#=`4#! zwm9k$%9I;&*dm!5G?_Fu5v-OAkAIR=lBH#`Z`tF9oMkudH%aj-WMrNm;1f$!3O|q6Z04PA98(_3yxO^VCB_Jo$M^iP3uO)Xt=% z7{bPa@B{IMRH#<*tjd)tU#Zkdl|AF6CE^42bbFPJbPNwB)Y^$;?=;V(!?nB@tA5SL z#gsJDIhH-gKJ+i*zBY`p6(dezphJ>RsdCKZvx8RkevH*Np#>J<2?ep0Ec58*y9`=e z4x})pbR)-FZr>D6Vm1Oai^H}YBXuEptAR^8I#H%3yW{Lyx=w%K>*NUJtW*J-A0EjZ zkf{|@7-2bcfwzPxWItM^tfQSzFKh|oP*@3;FjR|Jf(t3=yX%T}YtCcudt-SLGngwy zyv0X1!zW8@wB{SGhM$TG%?+qZzCv?8(NIFXiE6$Ce&66qAz#gf?t(7>jg`(0)4%a! zJ@R8Dd77m%4~O_&46L?m_i)FP43qOdjfJxu$3$h^5?};RAex&8?MK0$C)qvex|SBS zWn_X;RLi2y8L$%wc(l@VX?n^2E~-$beHtBOsE-lf58g*$M*jpotv#eG$K8Ce_Fs`VD#T<*yRJD@pXRODJ0@WUV~o<-I) z9yVo0QW?qzbuE8y;5KWKupHyxB<0DMvaRAi!iM{bJOHdy^isz8*q@iPPu#f z40Yc&tqPC3-5v%%9VR+sf>&GvFFQy^0ff`kp?PNZ6FJad-I2SvE)}qYzi}#Y6y=`$ zwH~i|Bbs1y+p1~L#(xrT$_dQ@fhEInTC|#1JS+avA@Ak)9`;_wF-;jqH;lHD|K8N8 z{LO%mDecSSoQG=`-l7;hs95Lw*)0*Vl#&v*lf+c(a+TnUd4;F@t=}oRf(Ee2m1;uz z7|)7{K1%GfV{}f*V;#%CSbE;KD|(5r0Z1I2nVF*KN_G1N?&ih+*^^oTP(^*GrGDmm z@vQ8J&7?=emmI=L;E^89<{|Q~jV|*^>olRr4JQmG75O}^dG{yAjlJ5(bjHzh<=Sn5 znO!pMzXim)1AIXXT*Gv-eXV6f&x4Q`OhyLb^Xj&3hGJjg*n^`j1AWtbFDO%2z;#)WD0(fXSzV z5o^T)KRq67z#^k-j?Ur+9n;wXxFN?* z?h0Vv>DK(B6;iSJbKig~V`XMrZz)b=f|=Ia7L<+3Zx}i^tXR0K5-9Qw2gwo`&4-OW zW}lIlt_LFrr8tJcHezfgM5HcqY}3_=*}34%A{uVQEnGa+k54hMzZBVI}(ws#KH|lb&RsNpeSbKq16adn*3xG``rnpoboX! zei-gyEd)ngL3>A!%c=;r-*DWqZvM#z)16L1sc6suyYTY#9B<3huC1EVP&r3hG?t>P zTH32VIa_<>(9k|I^Fn%PRMobgSpxOslG3KS?|8a5o;dpYW+lMD9r^<)uE+oYuucAh zo%k=5MQZ~b%IOcYy_4JY2c1C##RjddJPA%D>&_$eSRC6#Gu_ku%A(S0?48!2v}Q0c z^9tYg&jr`Kv<$^}@O-hd&-CRoPTAO7b6l)*+(P-<+6c%XT`Q^^P5NDd08+~=DWJ06}u1A46RE1<*d60uIbDqBiqq2Dm#WMD`tx%H89+%q||f{FtXao~n!iZBdF$LxCxTMq1o9@DLR zmbgCVYe_SC%6X_yFIt4bR;Z%v%9iikvJpe91uug zzy8U49H&Cww|rlxrTGu=_`ff<)BQa&{^h>(?~CW(to~oh_4fRRM%H$Qe^VZcYsz!7 z@Ec%I3@jlzAhmfVauTJA*R&;?z*3btvZp|BRSjH~zBcmqw!U9EpJ8ac$#AsUALPmQ zvmuHxhHLj387rwPO!Z{{uy6p?L7zh3g_7P*dWkT!2W7dM59+8hO@{^>8f_w>{-W*k zo#%}uU_DR-x9{JpGF%n9bW0*ajeLWq9-^<&Q&t(TxR|fo7hICs2{FUJ$9y^}Q;L4u zB(CP5T@pdYo}UI43Jm)xP>hrYwMdz5qh+s!XLLLfOk8J$@4+!OnV96dl6|xw2Rd;l zwZtYESS@Dqq%A8T*rJ^LhB#d(c&8>=FzMT1^~@$L~mr#dM- z=yHLD*oIm9z3l|yEAW5;`ta))ALL}x2_t&jEo?1Snj!-v(%`HOa3GMy;U2Wvyd`;W=7 z3XPQ@%dBqb7Lf(5yiM)_&d@*be(efPC$1R*TMNT_1Q~8;jR`e=LUF$04j5q}xhhdi zow98OIem!+XjR0H>KYL4J~j!wE|~uB4W*}K*st)HzYKYZ-YgrYlh04~QQZnUL5B9Z zR(dwKi7(o*z_E*$bLld8_-k_c|Ath;--1(r9`TKFxSm z)P?5q%o|pvr

      ;p8dRajB<1$;YxraAoyO zVb@Z#E|)%jKfOP{JYe}i;?f(OQ9^Llo`d=6I*5&kg7Bvo(Db$r@BJ`F&v*jVe!T${ z@XlS<8Ky3U3FAQtoj&3d2#BrTFHigj;U{1lY<+37Bq6uXBYWpco477%(50t4-2*_4 z#aaj5blMM$d_V=Q-)PAp1DTXHCI!)eMU z)H0avYqPx_P{lY-KMS?oYf9KWKm?YLKB@<-4;}AK!Jw^9uXNtF6~`&fKdYCd_d@Fo zM5}RWX0PF5+vqYcMEDfYU^CexrnNFi9LqG?u1l!jO|vf4NS2ozs+sQ}NC8@0`nWis zN2u0HTnl*?Y+m?T=5IP~V2hA!VsMj$$B9gdDsco{V&ILcbbX4l6!vU<+fEp-H)5Mo9m)7X~)11NE@M>Z7a3Y+| zAr8(l`w4O#Z#bk6|Ku)6p;SQ)+#GYU;%C$ySj#oY%MD$u!3v7YZAeIm zDbZ!SFxL1yLx}^K;Y9Af4W3z{K~)^*jcrBfh4dExc8+)}%RRiWe;QHwnkeGfd}ken z|G=p*{wtWx{?0mo2lM|8%c}oRj`d$j>3{baV*YpT(zUVq_O1RGI@PG~FW|L63EC4XkPaaY{4Q!KjvTbE>GxD4TybH2lYS$NTJ0D)343RQ7`bIo! zW_%@cEcNB#ZAbEuefVjL0lsULlluU3B+V%M;QUUBe%^G0eC zwDgus7G(GoIpjl=#eTxe(Xo8{sIhvw2kJibSNiub9Bek&!X`5QP{s)RReu@KI0kk{cn>BH54{8-ejIM9eM%&(e5& z+#eFMV?$u6TjIpbk~3urSxSOoE9m6R(U^EN3gtk(Fw;d(#AR?yiyw|?#b7aY^BE>0 zT!ejpFX(1@U`|M#nYsq;l%6M8=`&P|&rzm;*uFwL0%wXqGH;?lqZgSQg)Nrmu)1l+ zkw^@+KQ@;gHc z^}l@_#%8~L95xUSR45=xz39CjzlD%2un6(tBGb>;N5)f`o@_7wTD&2s=>zPY`et70 zbnSvZ8!THRX)Y_2C~2Q&u{5h~jmH;Q6!cB9XQcWVJJf=pYF%OpVw%Pr%H|HDa#Mva zTY@Z67{j5@LHg3uluJ#^jMs|FKVwzpn8%tZVI*#H!{C&peHL*vj;hj|&w{EX1Ey(- zLSld+bi?$DlUQNC-7$&#co&_@%#1kqY_&E@Pd6DWi z3q>g$U`hg zPo@WzS6h2TAi;fx&oF&hoAoY=lo`bdNFO!rxW{3;ZJQgLqLj+%bwCkc2F5D|UHkn3j|$ zqn8w~W+bK8bJaNE{seUmTvrfzM$j&r6MKaSP2`C_<=Ky{SHvlVWn076d(=4N#vXBi zI%|envOTVQ^XZ?;dhF5M>hs@o?BV~|l>FPL@Ne2y!PQFte`LfI9CRJ*|Gn1#%hVxI z!CYaE7nw^#TtHkRw?q};N9l5s2yl6xVMgo{G$wCx!qu9CDUj~m2vEOv;ZOg3JZzpf zrgwrUHdA3;05Q^bwkO-i^No5QZ|^r)9axkggM~*Nfr`{5onhtZ!kfiftS*8KwWV#; zZ(TiRYK62-iJ&Te+hBq&3pVoBEtIH{N}QdRA{z5ED(G{TzIpDgjU)A6{4ko!q6***qz{n_+anKtb8%S*ERNR&41- z7L+X`F*U~`KcV{q^Q?YJMK-t2AY!(Tx$73$fV+b?yW2vs2jx=5Aww%5X6*{0$#oTYLJlg3f*ezlhDf+2Rd}S4o z?j{U%dVmK(i6@t@TVm7-N9rOf0#m$b0*f;+BlulUU?jbL5+D=Lk*d zb!c(GWJ-bAGc_?77|wc=a0)oYU(|Z_BST~F?_(Ffnu9-AYgvOHRmGYcAL*^jbX zoez(yt+#&c--Pr~zD?v1Dr?EmLsv6nfeCg0Mtum1k^70K+%oj7;6FP$w#$^hBX66% z^w^5@(uKxRoU{fKbXgeo?Re}q4nK9+gpN4ol~_VH)rWCxBuZ%&wq0d_ zKGH>ddDFap(^B#4fZWEgGp9wQk(Hak4c_jk}{9@T(weJzn6nuo{* zU94x~nmhz3^}TM)4u8516TFff5k?>6gGfb;FgBW0gieC)bwSkZWLAkF2+$sMDert1 z@DFco4sZ4J z%zcVYF3RkX7h2l2F+ED4Z*(N)b(EW-W@;T2IPi-f_+_9~a(m`p_%4RYy2mM=jT9`T z)neB8>AjBopAuJozG_hR;^Ks>j-`4KxbuAq4;S~TVP`>DpQP-una1cm?#a+VLfNFv zF56|jPHmgcZD^pEsx9FOW|qAL`A*rnQs zl>fVK|92DmSMeidYG-F{C#Cz3lIMT<{+BoCznE;5P;aCqgwMq#y;|OL9|~BNQGSC| zRZ{HS)3DYvC3qr>#nr_FQoFV7bg0F7%rh+NpQ#`sq+qDqMpGbW3* zy0r$r4Xa_&#s<<{!n5VJq}Un`?ve)>vzY=zqm2kFk*h=9U{%Ywn$ACV_1#G`^=g&c z(M_(Fb5^S=E4ugTX?v>$-eOLF9OV~lFqSg*eF7qH@>hY>=N%)M$E4btD$}JK2JEDa zw~!+op>E3b)lMon{QJhejsVo+*wkzfre?y zD6eUfPL?e=#5B${@NK9~4(mn)w-yahLyyHAE^*1K-8V5D!E19 zq~%M6RIV<=i?1Qf`2KtP5EtW#AO}VHSgedvokdIF!rZKtoyob}7tUVxOD$+ZvSa{V z?W`tjZ!MrwgOLIA4nq#Z`}11+(kyLmUoo&J8CuxcWZOYPBY&(L>A;aj&06xgIZY&6TpAf|^`1Y? zxDubGL8*&mRO%M>fP$(a>e%YI9Gcr^xeP4tI*a~f=Fy%^M?6vzmudNcTe=s|#U;CTW*-dIPv z!+b50SyJD5Ye=a0D09nNak8fPzHi7@-EJdU;};1c1_fJ|s4 zBf3lF?1QVjV6|s%4$VT{Ku>g9H@iV~2!2ItVaRsbJ=;A&)wv*-1TE8h*+Ru)F zP9arpiIO&CM)H&^mofRp>d|Bm1e53(+DSD;nkDm-TroJW zwMFRKrfd^-zT}ZzgYThpOE)ka2$b32O1K}(4A=Ssu0AznRhLlKP$h`U;*@t!eM-o6 zqf#tFMHmZFlD~|Mpb#CQw_#{;S=BAvCxv!atwq8vF%xnzr*0O4YF_#Wl<|QWqs8}7 z&0HzR7lsd1SgZg{OX9HB7bW&pj#+cGXHy_+84=&fJZ9OlM^EJ_Y(pTz30SuoRQIZ1 znxEcj$?+UGjW#r9qQ6VaMHGKJq8Tu+8Ou5y_7#`F+B&9VG5ju}**mBMh<{V7t3x@= znWE9I!0Qh95|-7b6D$>#{0-DDS}6HBZ;!Z*Cgrt9ue2FNp}Sc1)Y7%YnL5MGnEs(} zK>iB4$Vi?S$_+z3E^IvNk;7M>ok@$@(54?g0@ zdxBw?&VSnF(|$c;9s{Ocy`4tMI1Y{21B!R)0G@ZXgT{;0?4ihdecILG(BXV z;`9a~;);E>^>9KomhitxUIi>>(?$_5mnHL14zzl$Bsy}Id28`?`7t%Uih&tDeOe!~ z&EA`l3lO=ejPk_Z!om8kukYR-a&Jz%=bip{L)s`eB^i`NzWe9%s6PE}}M} z#K0!}hvz`^pK`w61*!JLFPsbbM>UYf#wKN%i}&qOKfI3c2yf6zA6-Ko zxHc46qi*Du=d;f-hQ=1QF#DVrCAY!mBX)-JZ3vTZ!YA~fC&+_{Vj148puIb?927`q z5J#hxke@4&XEt2qQ;=>HrO9@Fbo1|To;}Cscma49QC81S`E|?6Aa)|YE1kb!eQfcS zH$H=XioH1Z!c91yos=wdXv^74JIfCn%&qgMjmEBcm)CE8OngR+O9zCqyac^s zWIh~o7NFm&#l#P$2^+}DSHk_U$yoCQ{btXdYCi>+7>dg8ZpylJK&con+S-MzSv<2R zD%jidCX`S{o}(&^ffB^2O?!YARB2T1-OFPyW5@CUstjhfYUHvIst{7EZ`_MvKZfIt zTH0z0cwj>je{Je%Oc1f7umh}YVkKZH-ZCnC1}2Uz6dpT`_d;Ie*|Zou6MAjg9!AMZ zIj@+RYZFsevv!_6Rf|(zpFDhcBD$hO(7{He&+=r+Cf`MpnT0Y_q6gIqX{DFm-%$UD zzM}>I;GixR6@)D!1f%a!NQz`Poj$znP|q@KTSOlB&M$dd{ahv%xKf4?J-9^kdya^0})y-80w zk1*D)xLsgjU_PT~1pMAj@kyt+mWCYXc?mr`lf|T27tOt)jpy~&C8JGc2kMkC*9ZAS zV+IiF{HruZf*!-|DXt|RzAZY~Meui^^cWT7*FR;)jH8_bsJ`p%mH$xV`|o;N@bA{~ z|7M^3|ILr=uOfVc6T;su^4Og4qS4&m7DtER@c%}>j(N1mfDw)m0pbYhxZ z9z5;C&DN`R_o=S+x1g!7yHc4SfAnrxfg4HCVO%3PvbT82t)M{Lq4S5p7eIk_Oqwl# zg2)h|X`tO5x}##25Fmx!;Ypb_%dh6j-`#TZ%SaG!K{(9d2VE;nAAZ|I>nbO2ZWdZI z^y+r@sbXDQF%Y(?L?7R;Pq+60Pvfgp-2#7jt&X5UD>{l0z2bJuH40tj3K0naR!sI2 zA#E;Yrp#coDb|lW_4IcU!fd^@_iT0fUxFP`11v7Ip_S zk6{=7^Fq>N(hp9MPz*)!D~;Jw&oniY`*0DolenuErpspr{bEz#-I#_~m1%dt(lzG~ zGK?Mm(hr56QogK}bZJmQ7wFPS%zgjfKlSjBFhCxgioc2*Dx`o=ATeUu!G8&BcUDs{ zJR*Kctc^P1D-stlA%ZOM`V$IU{NO*CMgnV)^k<+?B5R#>?3ty88SHTZEfGZDDh2EC z%5R!GdRmn4_D9zHT?$hV)?_4aKI%kob*H3m$gsmIABL^bUPvL%E?|I(4`G*$_tv^a&4Ja)cQWz+@s=wnQYh#0Z!JJ6d z<8AC(LS+sFd!vZ4L)yJ4*@euo811(W`E2U|C@!((MsBc2MRw9bC1%zPt6O(b(8F<_ z?+pwXr*QO02xu?mOCo*it7hYh`Ez8*OhGVV+=9l_oJq^vk^&l}Q?sS8YD{9lJc6)G z(1>*rwdO+83nXl2We;w)aVH(JvJTd-*ih<6RKzti?LTO0`8`f1^0^o$ukRI`J?r{f z2`*Ec$KqK+y-ore;_yi!bJCdShgxYnl&eBlCk)G;)zTz-XIbW^{%Hj3GdVCLX;r># zH%pUcg|9!;`Wt|Cm8t;!g(h z*cK`I^=1>VAI3ODNW4jS1|hgLn?S~*S-vx7 z5Pz=|Y0i9-=?eFH>PKTmt@GN+s6S6 zt4fxFE25uHmM~qk0=`K5m}{P1CnSf3}yeX-*KT<7BbRNLI zsF3NDwmtLpL|8|MW<9zg;rhTtL)ZKh6kIthp^a*RKj80Cdj+n+e>}5;#Gp=FI81}) z`d7RoF*fu5OeuYE!muHY5US!4e-18-izpR461WZRFV0ewKkTzmGaClSicJc^%|$LG zj|=-I^NqI4dxp06&Kr675ZFFS0gH##TtsD&J+ill-+yKlaS(gNpkGMUWf&EeCj!cp zO;71|kS73`$CU1EH?Wol*t(?EJ`+pP3(W|wG|x!OJwl^#`wtq(el!40_A&|9(ZR1^ z>^k@;da0Xw5p!|OMicO%JQKbgT7B@w(M^Dktb&g@wEp&|>q%B}LFnR-nn4u;=@MQ)WZ=QF?v%o0 zk|klW?~+;e1nMg7UE0wAznrNXXFjQ;`*Py|_U^`5kn%+}g}+hw_Zui#}#1 z(dO(T-phsA0oqB~0l6iT!kAKd<_89SeQ@=Wk@c@gHP9 zp-E0+7x}e0u%*m3uX#*E&#_Km*0>q{A2#z0n-7|I>A=okp)>OfA9>)C*ru@ z)dJbJ>A>y5^rz9GhhKvdSTG^sOJm`&KEGlXoiM;yo_}E-Zf)Zgq`f(K&rfe>9}aiJ zpH8vlL&CJgm=>DC)+}0>j}C`QN+@uhyqNh5@`w7dE9MlO89@TLLyg<(&>qHnG71tS zKL~>2!&hO*kuW=;rtgS?vX;!$0N_yS=EMOSfhfoliE~7V`nelTXkWpw$!(1<7p-PC z4g+Ce`|Cs1f)B-pOQ{hVTFrH52gBKe2OwP&iqF~-5*P(1j$_&|V&Ih`zVxIzEtAPZ zt6NcWK-oe{6BRz*kW);`#v9oBGue;|nPvzL@;i|E>$lLFAWFZr1?O|Vz4q1Rpwk(l zj_)AVrtbfUG3z0W=0RsYg?3F6d|NV{vvgNmjgCXv^U(aGkzi&6eR-Hg)pwR!)wp7! zQqwzGOj@&568y_l4Fs)Bb->pxPK#tB3ka=THB{o~Gt%-JU4j{P0%DF$YZ%o}p)P=0 zjl6B~fcPU^?qJ1!1g?qc*mF3^5tZ^5$QOXLxQELgk&V!C7`g!#*xBf@VH5UB$e?G| z{oR5Yb#{%=E%4yb>`xtDC(~VZO_3M$MOcMp3lduL7Cz-lL8#u)Y7^p|-xh!;2VhQltC@MP(70FAo zndV$YvaujpXG#8ys-z0Arl#>13t6}>vvW|GM`14Fw}KQOBN15@prli=b#8>46CIqx zCEZgmy~eT`MFF}R^C5OUO)r{2PK>2HWt`file#b;#|7kJ-@k(^@_bAJS&1H;#&bi3 zs%#wgE;0KrAIUf(SZMtOGx##?eFSzq3&MmI5*?g~peY5VbU<-0M;=^R(^!8+E{A$h z7K5yc2pgJq1;7qiIJ90*7hqk}MWrhGwFK~4Gl!?=5gX(VUm4F52}wa!BWLcUT56i) zI^-Q2WNXTPtec5~dbO9*ZJG{+UfCfM_Ljz>j-$S{Yp15uU+{`|ViGZwyd6+K;7nZD z5>;KGxA@z#i^!AeD4&%BPIT?ilJaT>8bOy{KhF3I#0cb@ntLEou6mjkmXpYSE{+F; zHLvwI6f8!mzJVDTOsH*zUg>dO3b`Ef-*i42O}Xwvpj_!|Q0~rdp{=ZL;<6IOSvp9 zq~W&T=)$ky^=a+;=gwguJF{fr@#*C3c`(H(8E|5`r(m-Ecl2>%<_xdO?XrU%G0S5m zjEhv@^mPI!*1(X+jBd5;bwh=y2-gXQsMD)r#7=yE$UO-pccC8k<;=DE`EJKGyBr}whqwaM}IR$Byp<=Zk)H^v?}Y&7l*y-S61(ZPQ9?(XhWWpPaEL?M8;19EH5 z4b}8RWhabY+@&92%3SRKNqQl~z)kV9b4Vr8C#Qh`n?kN%2*_{DW84oZ9$POj zMe;yOg3VPI^H75M?0|+L5ra|g=g7!KEQ(yG3ew!p*mxmELR!7)?KHK$39C{k);)Y} zpQiBUiq&lYfF3Lz6PoVD@I^s?jq)m{S&k1%(U_)aP#V>(fRtj&vQk|0a=}hv-Y6fnwjj6F^n;x9OsuY?3&<3L^} z0isqdXEymGNZ6d#}lZxUpEclc0}pda}V$t?hEV&+N%Mzxt1jb!4Bjdlik zO|}Di#Vv7~wKhbYF1ZZRyhm1gfpJUavptk7M5Q9>EyZe?DeV%%$hU>sq@vN9QQcg`9 z1d>iBQ11#A&(!(A`S`*41;OuDOQqn zvJMh9dl^9`bf;ogYw$=B%n-2@z)sr;5H@$kJGY4NU)um{r_{G`gnpE~o)rQ47|6-z zctUnSTHi!JcSdAbvgLslS*JC&!dqX4Yli%p+V2I36x}Hs5Y3Ch4M}y=hj?aqm?c($ zwap532HKM8=I7u7v;M;w(b13|>@tr5mkZQcgX1tCMHLD){SD8#{dL>&<68R1fo)52 zcV|1^Ce=1ICvtG`1})*4|Hrinr5z=m51#+cvu)_Sfqf6Le0V53o>2Pdz(>%O>_SKXezSfAa2i zUH?lv!*dVC^U36>@!oaN8X^?mr{BOW)hl|+1M#~r*q6a*Dcq-9(d`P8hN#7t#wWov zrJiWV^UP#Cig8E^L=9pacsd~Xba%HJbgEDr*fTP+4mRoboDD=H)s5kC9xLzOII&cm zXZ%3HO$fOps(Kh=U$0jIrpK7iCA}at84GNL;{BdOfLFom&mjA9 zd#!^i^vUw$KE~vMeQxS#{lZ^SN|)T4^T873cl*vR8C`UHS1%~GntZ)O5^?_zXXgMU z%eJlS)$Y}{vD&u1+O}=mwr$(CZQHhOuEy(g_C5RFbK)W5Ma9gFs>-T}%E(bO=NRMv z{=Q*FE9F9l<<#Se>;6SK5RIu0lAF7F6~Tvk=^o}%iIo8gi}*1Em`$B6#>e#ze=#|K(l+D&JO)>aRy*62$?vchG&I4JrK4VR}3+bsB=ijsg~1c zOrQp8gV}1Ik*>mbbG>@ybr32NB)dQ`)B#7mKos8i1rePz7E&VF!gkUqeyiZg(I~ z2G~|#D+l4~Ml`yTU>(>{Se-C@gV{+NUZAX+X%0cNjG$!|c0&#}YQ;@JzQmhb64;st zlN9c@LP{MhREzE;@6GcFE5iYI8byq5yPzk4chPSh$1bnbV?CmZ@R}ibQH7*8>fS*} z_V%Ph$Eb7^b04u!L78{`uy4+GL-=VTcvY~G0o&sc-o@HA741D2Z4EwxcRqz(bH!GG z@~a6ReNqO0Emz2)PtBGJxmt4vWpjo&P0pZ|LbQ+ioozHy)TB>a1VL=%^i!@(3mR#^ zdW3?|;1#oOX?yQl;%?w)8iQ|z(-+yhy&=?>Ql|El#V3htj@cjtA~KzC=!xVI>0Kqb zRt!Qfq#j?T(?H;n(?e+OV%hvHXUBXgab8%8jHZj2L};k#RG-;oLK$&kTTiKI=~U7b zP4aT~DDEIa2O+VTd#=#axT#qzTce1SBU|{v0$YfFM-o-Ga1@SxnYpq^{CitSKo z+%g`?ud+XbhTspJx+Z#+>6z}?D$@($ZD(}0wX>HB*>K^11Uw&Cc(;cJ2aTNVgpbjV zRY;&MZAnHg&k(6R^@JZ?iutC8hXvN9On>UJ2>w`P>yp-pPudO80d?%(frC#WeUiOx z!etKeuTjMEQ?kXzLKYsmf9UyB`o(LOjl$ByQu>8$i{yoG71SN3ZsXU296o5-5+fF6 zxCf&l7$_9k0!f2Bd_!-`QdNkbo0@frG%Q;@=A(GJnX$S5X#lrEvqcz>jI-Y%IzpGZ zKq4HqNz6csv(9gc!&-xBg^F_*h-e#qGp7NPw9BqLGY`1p6D}I5`q;DwILmuhu?Ktu zflKy?Mj3>iC;p;KNlj>eS+vEv@v~m=H!#_Rs3VbBtT$vvDqVPt=50-;iV4OQ0|Hmh zd+r{iguP-`5IT~FbpOTOaV{-&-W7N4b;P&I8(cQ(Ie{YB?1wT|((KLWyv$i#Y6GhS z#+at9Z3?9l78^8VX)7MF6*t?8o~d!-S*-2CRDNHtAabvr*dt9bRu_PDfr^FsoSwQN zdSG2LfxO(+VX8%uR#!wLF{D*4T>=M$Erm2n^X#xZt>6aU6~gd=w+}puGmxAww<57T zmtFL&L8B37^ICGn6LPs{ubDQZy349rwEAVI(w`cT=eEVFnl(kGb%RDKCHKPBu+63C zKNXHqDVKj$FVvq%bR@o=7(j&oXF=^hts02_u3nhwI_c`#n9_)TYc5UoP5;%s`7cG; zx9jU)t($7%;=lreVX(juPXGeH1xfwne%plQ2zJq&0{SwRy3*$3zDv`G`QyF=e3A~g z|6E$TJBgTdh|lbFpW-sU-pcOo`uaf|r5NNgduJg@c^;&5qC%w%r8%!@JTcqSi@r9P zOS_3|l?0_rGK>xi3FdL^Q;xh&?wVGkc;!LMO!)MD*gl#{6I)+cF|01l#qprfel_zf)}!nZeVt3 z#@m!zRznf4H<7s^-#=uNYctm~@Z02FC_sh+Y2C94DyYg&klWH$J-Ia-qNVQmp1z|p zQF{iPMR{0NvQ31|Iz#Fcn9WZH`h$RqCKV&q@K4v{ACi2tBwg^yMJjnuK@-<;PNXJ& zc@lI%Sbhgf&s$AhGBW9lIczqN&K7KbKxZ_y*7}{x3-EN^I^N%H@D&%Wa?KbP_$BTq z;fM>KL0R{dQF=bP&l`w0_o73zA$a7x%Vmwhx^#;Pk*9BBAI6HkKm~gPF-9vQOl9O@ z&#jOE)a*ncns(Vt*iCdmzdHn zdW*=1tCN!(SX^Jqxd365^D462Os(gu4zT!Xx@-?tqN3(mI$~<~2^Fb{mU>aTtcwE5 zu_c*Zr2gscXKzp3*osK?5Jt4f!whgT1!xVGfqb4jt$i4&fba`xB1lA4>FwryoAqK2 zo0{TUnQJ=TZfPQ2u5RR$oG9oCit|WLTRTw%B_4BB1p6IaK!`Vo$)CcpgQj;FXMHzhbIcOfO*iY$APjyfufR)ry-!)EkItpA|_6L3iU(J*mFA&uP6jc=|GirXZ7&U!QFKe(~3gi`~iV`2G5j59|I-uYZwt6@A?H8E1wy74hVtJ&gMS;r zDXiPgi@0elnahbpE-9Qr#Vf*NU@<_1&Y zcC3(mRjmvq%Vj6OCd$YxNDl>VI4qy>9DPWSA|K8wt3MfcY?&Wg|>h_`;;_R|P$R7vrHkKlsUqlmB{ zM$xi2xqsO5ifN*0V*X`TxF3%Hf;U#znpxS?gqsO6ifjpz;xc#xLJU-hwoD(*o%H^C z$!%vgt?oFyL%leKGhyW zik9#w4>YNM(XuzOj7MIv>_82)Zse3TloNUPmL&JmcC`1SB%)A81;BLALG(>ZSFKo1 zN_3FaeiEu8vhKJwD`>uZS86b2Ry4y{#HKgsk^!kHm4gU%q!5ZX;t7?n;bkn=K|L0S z+Hjd(be^zEMEBU;k##wGF8fc1oOzv9H6#N#5d(>r;%+senNG44*e*@b$H$qvr5r4i z1|E0Xcp@l&+Z!ecpIVO|g0d{JDi76D60V>=>qh&B*g7odJVcjM^pugH6JFImsMXwR zh4Zz4Ze0!5a`B!y=ZcN;&(3Zd45gB79+p77?8{s~s~?09gY_AW=q=ap&0bVf!|649}^TtRC!EmDWtK~qbgt_1dv$TaKH&+UM2+52XtcDr$Tq1;dW;yaqtR$v1BcBjZXZ-qejRYT*XM77FLqAQ%cD~U zECdaY+!HN0|=!Mp0wwPiWZ>Lv^rWIChGBqK{CM^y~VUakd6-l;z(;KNx z=NB#8x|X|^pQo^W{)FTiEWeGkGT2|Keto;DC+}OoKi_Ku{(${N^fP45k_sRw&WD5# zxV!i467b-TV=X6=n^j=MfgCW}n;fla%E7}*Up6{oI+nxd&V&(0kmA=Xwr7r8d*Td* ztA-pxtiS5n{Y{SGq@4~+BS27_@aMfl8`lAp#vIoC3WSRJ97l>MuXc4Rhk6k^iiT34 zFCv_yttCauR2~xEnKn(rap}4MEs8`g_H^hEL=wj?hlxB zBxrjkcw;Zu$*jy_Tw>Uyw|ry9_-0^AH`fc36-z zvB=_-Nflm>wmSaPtmeLA=CT;w0`cc7$+v`0 zn}Ak^(Kteca6(cDcX7?^UIPS6)sX2aAq1c`Jp&4>I&MK)V^X*acAN&HNS~vkU}5TZ z|2GQ!vD`mB4KSF-Ln#jAZ7~k?fO`85XAhq)l!-c5Vl_9$3rnYH`IFU|5r+zljU`lU zumn0ka%-x#MA|)jO(^MOv@-D5Q?8c}wg<8C7uItiU_^HOK-EA)UNa-=d6^by+l=gb znKK>N`ykcm$pBAXTddk;BXaCnw!m@+0dY(W2?9k;`AJ*(1>tft%id3Z>jZ%8OBwcq z4);*-g3XM?*4_a7SyKH@&WJS($ZN}-s^AH>iNUHh*RZew&xk07&Fv+fOOA_0>irXE z52S(~yV)*x4Z?`dJqAN!y%>{&n0trP%nmK_G#bIefhhyA30ug4AVp}r&ibCsq7Y;k7zIgPYY*lZP52y|FZ3Vb&g ze}_50u0H~);lMRtN;l-~`ML(@4aQr+V_I{$B~%V%`+P?hajgNxIO{zSc<1xZM0*pT z@Kq!HIBSXZ_0YB;$k7<$dHK!5Jgl4b*O4Qkp$1aS_3U*xv2~f{cxQ9Kr>s0jqTS16dWz)(oGyiJ3oDL{$VbXT{0s)u6u%oDF zf}fYVW8}oPv?#<`-Mr_D^lp)AKf0>8b^NyjtW|f@Z`8q&aYd_S*>NZ#C$ukeP#c!N%V1Yd>O-+4$aA`~C`(qEMI*a;^BHjt!;{%aCYh{rM z!y=}?0_6zz?Vrr$k1X-8LI;|9w`3ZqAGUY4D{rcAa2uvKAQJ>%w!xR64u5l^;B9}P zCN}L9k+fxhRs(;5ePZfbSG+C}=5Kphi@NfBr5bMS4ZcIn{x2b}W z<)xxwMb9nOoWnp(tJ_kJoJYIaTDKryGW<3`()lpGnkfo-!LYt=E-Gu1N=#m~+Vtq2P7E zNgItluq~l=N2qQK+U?1&P#+SDK1-{-%%hJ|o1*`jyq z4T4FFo(x-!MO6$J#~YWztk-~4pIAWrtHskl@qJ#rVuuS3(WQw-JLIU%kF> z*K|?KS3T(A#-n?9axIYW{kZIZ!qDR1q{rhn@8yN6tq0|b>j%GbpT7?Y$xoNcxyln0 zI!OVU-t|}Lj6}-6$tvC&k~x*|AR+AViC6IKQ=3RGBF&N-Kuv`Kl}#~JPcd1!+!hei zZ4H1`4GDEhPLkEOr<<7SVl2Dt&E&enA;QRn(g8NEV2!L7nw*|u(ik9RvUR@p6v*)Q z>=lA1zSe5HR;vs4q6!ah0nr!nLV09bU*+Q{h>WBwq~lm2V-{lY9o4xtLd=ej~Bz#wg#6li$`@GASH1A3CCH7t+Ne zO^3jxw@})!<-UpE-3^axeBlkKd%GobpSC35B=AwN1^c~LSU(z~49Ke4Nx!9PDBb3j z<`%U$cu@ao*w7hevdiYL?-)IGdHDiE(FMINnmPY;bZf!P2c;9$elCPv4s&|ImAniC zzir(6TyLgo0_*|L69V^&8@?_y-M9CM2kDiT)D`BJ~W z9$I0A|H+x^V3~ELcc6mFJ$ZLY_6`#8Zhqe%YY<0RN6eNb^+;m*B7Q^l^p;$DP$9i0 zc3X}fHa)v!U*03J1U>(jLnTlhdoAdZWXW~SS6J*Cb(vLC&hDBU#$a%6(+dJ80j77LffHNq=g~ zdl5)?==K^`e;EQ(qRtgcdA==W-X2sDKd~1x}l_OC8`)D zh_XJaT6|dRO+C0H@ZS z3ByI*rZ?1uA<_{=miC(0m5?>_(^U!KMcr z3Ys`TFF_{B=F)0OD=D!0PNUC=mAzRQ5ePS%;MaUY6LK#-}Xr^aU?FXsf-Ck|#x zW9VC?5A%}Xv@TR|rU<&PIa;K_O#`_ff74kHV>qjxOL#|iWDwZvUCQX5{&-asX=epH zF5!#s6ggCnOHRWye~xS@%;Rn+k6gG!3=1OQc6dsH94_-z(d=DkGWN(L_)uvqz}{=s z@wb86+^2&4EE__|R^uQYQIoBxnr;0x`#l%tUsn1hw|I*% zryZvZ0S=GXz-hGDa2w^~J!#7)4#_X9_KM*V&7HBqovLg_V<1M=r#>Z5*VQ~}yYF%TTolR3ch05t#+&T!U6=a@l zpoxxhd8Rv>#(}?9tGHH-vb2KS4aY2;SO^&@KieU%|B=`z&NUhNia`@n-`Sz~l-fFW zGLQ)Be(X@vax-xlngpWAhQhAN7?|Yu1_rBJn!urILM`XQqTT5W+iTsXG#LtzdcKFj z7aev5N%bkR41BxYvv|QaTCcr>>%MPjwUW!NgRBGr$*BFOU)Ek%`rfs7;C_}}vNCfH z3xF+vI+3CzDOr&++$mK|{|Vbxge&HneAPKEeIHU`HTO0{6ppZcZ7?*g{`W(A%Ll-g z+QIo=+EoGUalUIGZCV*n!iz>M?Iy!7_vE24NRADoC)$nzEEW(Im*6*idU}i{Z+TZ^W2pn zkfLCt{j);84_B^w_lwf}U!rlMJFNtLCow>pdR4TiHDzXnaB&Qex+m~P z#$#IjcWwH_^fnSOk14JD@rN2JREKS4mkByfTBSJL81d`W(Z|yD9x17m)DyR_ieNl6 z%kf+(%;KZIqfM>4)s~aE#zz!Unpfzw$j{17T2V`!?3n2Wwh*J^0F4?uAbpak%bs~P zj80utSJq@3dDB8%RW37zJVhgtb1sc>!|9TX2>0%nPp(YCLm^V+@KQ(t0=RE3eZIKi z)Q_OmjET+?-);@RymLvL<&vHR3*F*@T4Q5`liU|Tw=UI{7*jF%X6v(2TXJS5&p)e^ zhRhQAA)+8 z9(pWopI=k7o?TvV+nBVJ&Y58_KBtpr8GU{;!?;Eg$pKtlWvG;2F^+~mlGQRAeMh4L zyS8+kLX-0x%a9)3GZUhtPHe*KHQRPVVNp1O8}83UwH|0F!umA%WlQ<* z>$;n5nXAjZ$j?{eqCw(YBGqHdB=uzRyQ=&vUVsA-N>GQH+RttY;DorNJYVs5;$Ye=Iz0D@TJaW4cai>n-UwS~ol=_3T&WTy$ z8P`fFMjR^9bH-{^W^s^1+L<6hGL3Z?-ES!{!%7EElu}3$X=PQn^F;JHGd{^a^r}J2Takl@d>P}2LAXUpEtpJdU($r(3{uREB8qbHSdX_v zA|{TnP=o@lC{e#o;|jm^gJ`UjfMK@r{T8P+&iJU+{z&i^&iD|AQTTvNQPU!Zb37@+ zTT~0oISDaYlQiJ5jYSnZmV(H%W+d9fKVbL1(zMIa4O+TlJI+M6s$P!!?c1qyBQ#1O z+y6khmEP$OM>3GnvA-k;c75`RYe<%Q0AgXve4T@7$=<|Hf$^&5{>18ydC}D{MI|6^ zQ{;8A5Q$;bgkn1Dl`iUHJM5PTdEE9uI#4iR*^n(5Ii@@4u-Ut+`~HWfRB&GEphl@^ZJsm| z3coN-bW@Sh>iaw5cz-8zCyd2{W;EY9X?HiSI%B#t`AFiW1~mz)M%PT$aC>33=l0-| zi}p1c?4suuKJww%L3RIF3s&8q00}O|OFP36Jo#e$1`6)P7zY_egXj}PaADbQd<3s{ zwy5VrNRY4sa+E5y+R3BD#_0u^Snh;wuj|{|4f-;$Brzdn!WTN_#fpr^ZPhgQTE(OS z=HWUbVL0khUn@0F^@0uB;n%SM;UvYxv-;;0O-K`06t9Nme6v3#)A0gl#d)4gVRae% z@+KeM7zFQ+uNdZFA6VX;3*z^8uMB;)hZbKl!tXE;oLtKgF95mRNWa?isRe(EavS?n ziWfKHcNLWCQaaZ$m7Z%F5hol7gOrClgMSFx<^K9v`3m6hiwE!5vc+{s1ZuGmDovY_ z5tyGcD0#(3PQIY(mAw0_5>797!m55oIB|XhfRcW#i`e)l*&Is9tkmdqm%2U&#rhHK z4}o`XKg0WEki>Q`M*O{Oyo>G6hpgvH9C&Y+>3B7PcZvYMe96MkubK^8<8ZhST=w%d zR|m?yZG6@%FF2yTZGw^2Yo(Rz#%_mh2#6Lq-(l~b{g-GRkW_3}fIjXYJHN6&ztbs^ z_p8C)Z}kYIn%>sM*>#f7hXTp~iR_-pSTogNqTlo1$QCXjiKgOHB#S0y2pBA}8_kR+ zU8GB!T9yCe;!F!f>-3L5H2qxBMvA;tqWTSqJ)bhLzgoI}L`Fd6MtP+_1`mfm{9-f| zJq{rgUWUPUOyBD9hB(vQ@2#%bMsC%Z?v2H?St#Ujdk1Z)?!T=KNEo+k(6Ti9?)Z#

      C3Q6>&jx1?cZhmxMNYoRqifQ?DOm49FMX82UHahQNcYX z{T|EP1kP*b`hc~>@v}~nHp^gouLvO&7Z_>;GG_{&sxhe%vH`(R+IU^ouci2*BJDQq zd6G!U<*Vg~QH~TGugfB7%ye%1?tku_zjxcc598IF53c(2sMpK?*zG@?zrABiXn!9{ z5+NzQS$+Cbe;&6>dmho5_T$W)#dC1wLHPUYkiTDT*Pd6t(s{$wZa?Frje(!EF_PxknB~w?bakkd( zT{`N>K1x6jd);xO`@BfV&LhL6N4f94)LT08NI6pI5r%^LT5+PY0Qoe1aS7t`ic+)eDJL(I>9eYju})c|_AcjvLsr z`HtUlfqc0~{{9wz18p6}HB7NusD85!E=(neV&;TZ6_kF{uH{QdtOQ+t6YQjm+~p1F zHPS0j>xun&#NN@SImN?3{1xtxy35lE$RsSyd+hk)?NqOrIh3M_X5vvAy5N4q9JcTD?f>01BO-|+9PW1K2W6OkM7`pa-H62wO(L{K#% z;M6Sd-1qU9p6IK+O6jmH3(j|SspL5k-?vL|bAgjfy?SQ;n-X4SLH-kLNs-G3tkiKT z30Og-rvJw@p)JP@j`lFaw`8V%g)=2Hj$htPX|@r2)*AmT$norx@(p>D{w&v2>7E?N zXn9G|V*Fupn%Tgzyud+KKe@X7;HTRgirj%{-P=#SN6Qx}rDxpO-$J(q=gdvfh<-Ef zknOBF>+x@uW!qZA^AhrH6?!_zfBu|)?6j2M62olv!Mj;UY!9uwSZz2W4p;9FDrU3= z+H-bYc^di0ZbnXY?! z^PoQ%ZBj{Z^*P(l)Z|(l+24_kUWC!Z027v%%b+&iLa?HT6MOv>)t<%~Pj2427ok3tEd1*E99~^^W*M zs_J|>3U~3*KHdv=y#Tl02IkYD$>c~rV5Os2Gm_;7v0#Kmb9&eK&%>VhSIrGkNgNsB!FtEiklN@ zgo;%NQXZ}pKmTyesaOtjk}m<(3#MxjpPi*OygvWZ{^IV|#U0r#y;0}Y$McIPpE|Oi zr+{L5<8hd=CJv}uoM&5n4|g~F#I*~zE10K;2ZZC3P4ladaF;E!IpTU+OM6u7ORjtE zR(SRNey6XS*l4@pe0!#mOWc{>9eYAcfAu`Lez|*m%G>_v^Xs1ldMQH?9&gMaKbXFy zx-#NGz(_y9zaRgp<5l>(toIi;N6yg3Lf7>h0`)JX8KIi^zvzH}!E^o_Z)WFcMM(eO zPyZrYGW=BlEc}~nY5N!1@^Ft20Z(F=uCN56QbB|U4jjQ6(>=Y#(TSisl2jXHkNDQd zcgkhrURhG2hAQMl!(h&_p5_u-9UTpv`njL>BH;$$mm!XCPs$KYfC&WEeF7}C- z*-@LvlDu@@H$?kEzZYovP;S&;p^eV-`RnuU8*S`_tCHd<6JXdc6lxdTr)j8Sf_=xb z5#Dm)nr`h$Cs7gr+8?1RXi?Vq^=qtBCT!DTrV!AD1px*tV~ROjKZU2(tlFLQy^mWn9QHTIL&uxh14bLiZeBsqlt=lofangCa}13rHP@ zBg(S?^=CejA(#E9qJv^uNXqVPf&w}@m0$sT##$f}OWnL|qwuBTR)SZ71F1U){%N_- z?ykZs<%WUEt35YRF4Of-YhwczgWswyJ&00f#=`A=I~L+(dqx9|CPo4f55 zYLJR;^vt4DGW=1Lj)a=|VoRI`kc`R2u_~!10ITv1m(i?`-+mmbsFBCjF@^@lXW25Y z(lDEaP*#3CHxW6<#5vkEv_B0Oev0d#urDT(sF!WOMu>e-P#dP&liVzuXcVC3+N3fE z-&X0R9zlY}=RdcFZqs&Xd}Cw#$%H>cLb>X?|0y@uJU0X&`j(rE{~M|7KdaB?e_y8d zhIUSdb~NI;M&`P5hPIA|_6{PtRt6S^cK-vsXa0|O3I95j@PB`($v;2zudDpOzUg26 zCzaoH{eSR@Ap_^^AQXTBVgZrGd0`ZgsPf(;bZ(Us2Y8w(T}vc>X~1Q2MG7&7zo&(Hn-TiZt2(&Ay>1vojYb*6r4>pji zTs3wS!o-D>JX0ZZO5xJjaoW(|x$HCJw2`bfGtEhqUrwLwUB+0(I%9 z20t01r@c%?bq>#@;!IlJYFe#nC{1+OQK{lRi~L@rWN)Gh3v)?T)V}qjI*S3>t`qNz z%*)JmWhM>x1;)2^*a75MpNiWH)usY}F5EKg>0!&%N9CW!oe$eXNNgqNj8+a17L~EW zYq4TLeM(Td88_Um3Qp>fQ3dKe3C{;hMh}u27Pbw{U95i^EL-rRX_;mM5``L~^TE>- z7=*7IpiR=koXo65N)UojX8M8;x-#Gumg;h(Dh!YyCo$zfw$1}F4O)GmLH1T#$tzjy zW+%#*M$eZcBXPiJiDI_QVD-p-yaAmu{|!|T{_1e0U}_{i6P5J&`&$kwZm1Z@rhStz z9&R?6%zG6tyAh_ZMuC^dOjTxc_`vWb%n@UkE+17LFiz_Mng$h`!LPu~+DMDc0Y||k z%x&@7WT@JJ9ZlXry->vOhsMcdgv9vE=ra+p-{37^4cAhUY9lzsm{zF}=_+rMOIn;A zeoKZsMRKX@2zd};vIA`svY#HKMAyV-220}PsDO#DOXv;Zs|Ul^D^0%^*2EL{-Pr_v z@nb%~u;Iy@claF0!H7GrJ?Il7$h;*9tx~rG^d1`kjrxohzy)NQY`}m^fTwb8EunOG zNR~9VXOi6^>dvx&DGU88;A#TE>O2C&tKax^U67e?7skjhV!tKa{U91R1=P(L=+B4G zH4vo^-S62GDD=N5fsH{ML;K&chxKorAOFdf{qCmx@y~#w`KyOZPLNkX^}pDFhS9xZ z{lDRYzC_n8;)Tr3pk0vMW^kagsQgwS_!@M|c5D%qLnPDVtZ*bak-qQvam6wsae%>$ zf>S>}ceAIfn_+*DfP-6UC>WMe*K5H0QB;bRhT9dPlxvtdc1Rgg|J-w9?+Pad;^Rph zFK&gR$UHp1akAs~Ld!r{%V(TUO64YCgo(cTIT^ke%kuO&4`}JfRb~oHZGj5I9$=L~ zwJ6yrqQDj(TD$*+Je>9cZqa^{x1Z}<)X$zh6XwnN*0@}ARWw>J*7!R++Ck;_%Kiom zIrqk%DkQTsuyebHWQa2>rVEXuKDvH)4}h{iYd^G8Q1uCVK+RyarZ;eeJXM_w<}k}Y zNG``69Z;YEKYr+bkN=)-|MP$B8}ssS=TgmF(ZSTh{;xT+Q{|64=D2Fs>Y|vMW<~wd zpw_xwUEPJstzaxotmPrAG0}RMhdAN39!FDqobugTo5~z{RJuKD;S67HARlDmGhZ$e zsSPPy&Tq11+?RZp7b9hMTO=|zXx9*2E{0UsWB2Yq5f9s6_wm{I0emOjv~?B^ni88DFn5D@JA;~h60fZF{SrRm4<;;;=3tfFFgs+RG4HS0TA~Z@9$e9-^-NQ*3Bqz1#=Yieu=uzbg zP574`W*q6~e{+8iI7ENBi6jdX5wOi)4A4m5gAveU*sd+9K9>%gNy-T03?xafhpDsF+A0{Hc`w=|RTgh(Uq8C`Oh| zLfRS;OZ(2pT-gOAW%kQFjZB8QZJ z-tVhyZpu&B@Y{e|&%l2_RKK+ZfyjciDt@!Sq&W*MQB776!p^21pX@BCh=UL%doH;- zu?+B)1SOnE5&u8s`xnrq2#nbezAP;`j zaP!2mC^*&WnRs1u#;f~yQ zc|45TJ|5tcoz1y*SHZ$3J}?G=*6+?sgHF1x05XO74RS6&W)`n z=H9aYcvHwgif(#Tmc<>RdPO2fdS%VR0-uBRA}SUjNH5UV`SpDN5)h#LT%}~uawQMn zEI5GgwvN_9EY*1GW$`lN?I7-j z^D~CFHRv2@PbKXyqKK9-N^F{*a=59&!$&n7JIh^YvzoxsZ2oK#!frqoMgM`It{j?A z48}wuvN94fHu-Xe#Nj(kHsG^^@Dp)?Gix8`Z1Kiin)C5)5QD~hebGeJX=#HKUAe4_ ze*42?Vspta~XL*>|3I4C4W`8OiEi4zS`dFf5p>4Stzz z&&+NI+@KUNxB_i8&_FEI4+nKsu0LZ(Y=tU`89!2#l-YYpcQ<(Bc7dFvhwi^kJwImh z1Y{mNw>I4ac^*gO8DJLd>_|q+dhZ9~8;jn8U~;^fW12;XYM0p|r+b?WNbK^3=%v+9 z*x;fhucg=-O5?yVHe;IeIMZXf7ux9Dd;L|@CP=m<_V`f}&m_@S-`lCEP!M|)3)xV!Gcd&8F2v*MbeD4*BjFfViA$GL~xMs@}05%lJu zrY1FFK1l`^!A4J%o`nY^=NwNmGG|rl&ZmZ@;gmJbEiIeB12v28&h+n+65~nAN z$DZTZL)G~xnxvq41?j=FXCQR6*a4LfC!NnJ{VWYJ3}Ht_3e3zxrzmlNG7RxyVwNJW z3Qhz47V;P>c3q9sXe2608HLan{~x82wHXebW0qvih+3qa%avX2Qbd<@Egx9}{Rd`rUK+Nfx>S!@SxE74 z9K787D90&sie_PJ;8)t)a~O9^W=!eF#ic(*5SZAX?hXyc)@^9wV%YikM1t`MU<9b* z5Ls_8F}fw<<*YM2kaxn81YGuKLv3HXYT5xK;r*}Dp+g|7xE0y3OVj814Uord>Lp%Y z!H-hL&v@P!p1uux_risZqZ^*uIatR;8YqCdz!4QRF&eYNRvA&dRQhsQ=Rb{+qm!cH zt~*`8Uhq_(`Q@JZf8G^xQmY*#hR(Xw1dljdqB;>QZpmDE zkt1B5IE|Tz-|zS148UhzJ`L2ZYNv|#0BuA3pe+TmaHy01JMIfs zRnbSf{Z3ugr;;LgFKYLgvG(CA=xmBC73Wz@TXmiq5RbUnMqCwYTkvT+&TDG+}A;doIH8bQ>#Bs4^)M zoJ1RUT;fSSREW^b@rvdRpBLqyYr`D6tvQ<|_9-bF03PC&uEAUWl4C<}g&6h{5ZRp; z_q|80q)p8b*#*wwBqHv2)?~l;!rWh}e$ruY5XOXG9TBOZVn8eWkfW*isN6)4iBVzx zF<=@d=+2wZF>dMQmL{JW;kZn1uq(+LD8{E)itvP}jgzW~{GefCAi&fO_;L-(tF~A{ z^zB)SvN4OP3Y?wjAS>w*(qU&w@P6HT5&qX2P?J zHofJC!hIQjoGmps_(nNAyqj243T!m)Y*3E8^Gal%w5lB87@X!?Bc=2YURC;|jDqPE zzCH)V8bd(NBZaS`bnz{a7Z$%;sO2VdFy|2$^KEa+?c*3kFRXsNZ^j<-`PmnN-mRts z5334x@Z^(o&sg(!K)w-)d;#$w@Kn!AlW|Dt=JUNVPuDmMh`Er4(-AuU?v`Id{Ur^g zmK7Cp6n!oiBM;%?s%$5|QNSX??JTz}$~6yV$n4egdymEsCp3M|hHG0?Tc1W) z0CF@PW8nUlyA?Kr;rZZIwq8H5}mavUZMvEiGm6Es5 zz95$l1Xe3;j2v6qe$Zc})|T!O1DXLQ-#3j#$~nWQ!B{9aTe_HpxrhuF-&!7$IPo?P4$}8(AF#ainlNqNL<9MVHo` zPo-|nw?cf*Q8PjBk$_LY{2mIr>FiA4&)M^x>Fz|hmuR}Qmw-7QFYPgh$DT^M-P2Cn*Jq!Z=AE1KudR!^my)zhzPC|c zIjNs&9B;iwZ@Enm{gV&;mnYWD^RSKixRyn@=Hj>(-&e29k2YVIu&ru>v{SELVaJhRORjH7XV!~_`UwpSKTk0CcF>D5 zW}9a&)xk1m3{4(Qqwl*>f3CI_zEY5u;=+V06 z;;R+VT_-d87Z`MT_crK_Fmw;>+7GW1AgDEb5N~fl0!fWcJpI7^;AlYRu!#4#5kpbG{f$ykiL_y5^NK66Y@nspF!?% zF|PPNZ=} zD)}V%re;U$Qmq3pSH%m36#HY=pk-(l3ali28)7APJxp6Lml5vmlZD>3Oj_n&?P!Xv z4nb(9;y)(pBD2*I%C-q5Di9k`g&m$s7p5vgp30M=@azqM?N1lFr4A&I(AP0MN|$ty z1e+At)V}~tgS#0)ynz@O=SB>5vie%VS97k)c|J0bKUTM-WM%w;^bY)lY!D*V$UC6j zGNhE>pijZ-RGxfel1~m#Z!t6*i`C}GKCPrh{s8@Fw!-Bp=l`8`rT_b?{9E($k6OUD zv)jM&ri7B;{y@6MhH{3w28MQjr7=8XCdGj0kOHT_moDn%l}nWrsPZwO%k`AtAcD3% zo*6@N%*6b0x4RnOkXZ#f26*fwU$wCAFO3ODCy8w##g=?SiwYe>NRftohdh&76CV z*pDDG560L$b`uPS(7x#tM8Mw4Mf{9+f~)}fW0_3?0FMYq-AC*$+anAdfI(nt9=0#^ z6R}6O#gox^rfbMOtiIr{(#y3tRmtb~xeNQZT+n~!)%gBCyMM*}zsfBC2frp8J5wtM zK0E7gM*~qSBkR8v%8H`)n1V>3Ye|f8oLZt-(tfpcPBF^5wZMe>d&mh9l>>Kve7_R~?q{be2gK zy8!_;5pV_I_#yPbjb@82(=*uep2mCUT8lfO2{(E2i;`Vuigz1Y%Xj!tuY}P7Qb9@X zKfleOWF3Xqm-m8E zyW8N1^ zt*27uYZF9UF~;viz$PR!I|moL0m&%Te!_j12^#g5BU9bJvhkRROsFC`nMih2blU!$ zUzA}jdZlhv-wS)=OSz2?s5heGGBL?6Xo-=BKI_dflOOlUDCITN4fDb3_ZLNQZiMC$ z%nhhFS}j#aHAJ{xWf68y*|nZ0o~2glZC$yaeCM&uYOR7*sYW_?Kha;8N~dRVOwZ*= z;s+Fer4S|{!Ws;gh+Q(VQJO((vZ~}ZTg6TkqjuqMmA&K?6P8eOpTQtsATUw`xrXi^ zp&?Sa)4leLB2uY44HE;69-PGaaowq++=5=S40NYeBzrWmO@YHm=jx`~QT8HVlTzlJ zhp*5gFJ^9&yvxbTc`U3)_6t*W2)}So+v*TG!NY2{;|a5`2wl8$t}wST@EmQTqh4`V zV%%wNjgU3)0n<*uP+V4)~+(YS8JJbLz(no);)6Lb*1 zPvg@OtW`x3w6mx;Gzx&)h{qq<>3IANfb_&=`FR;|)!MPMqSdQ=FVjs}Or|0t2Bw%& zV_sl-qTJ~?N{g8!JW^rjx=PIq*U3GMT!P6xMXCV|261*=DcI0qWF6ibSeV1ieaFic zK-7%06HJy5eARD}_Z5}#Xp8wIVp&MRC2q^tfRe31dQGyD_55PvN2N8u6)#CH)iAt6 zgEDosE&`7cHQmmg3P@R*O4NOpgSC5*`PC%u?;vdkyJ;5q9NLZY< z#hhx4dsSh_Q0QYLt!vHE5O{=U`H5ZiJ7N!g-fjmP;!CWe|ER<{!-%%jE*E!}pc*lo zF#*x#t-952!Y>xD_uysO5csDh|1TbLxsh3}l>Q8iA}6Tl#C{C5GB4>3*do7^?hycQ zS&NSiH>#JKDe|g@Gpmz-hm;kg_e;qjJcsQ`fwQ_mKoN&}Ei6D!?P!(XsaWTLI zO(|3~g=w0aAHc$%Gxw*y-YZTKBZbx~j3=woWvYsYU77ocJ;r+tp^?j+DnAhDyS-i{ zT4fwssQ}hh6bgS1WB86%lAT_kJEwAnSYh*V4ou^Dis$Y?s4ZBs;{IfumX3t+wn2&J zdnz+s!Hz7zLrzR)>r=!%=|RG!=<4U9&@XwFpM(P{;J5k!=nf<3_*BFu-eg>&n{vdb{hjePDPS< z;6P#fxmO9`WJe9Yn{*z(=q)y-;#kqpF!_hEn8_K(a`r52EIqT1zZ>J9Sz@X8aB znK@#aE4TS;hmZF52q#=+hmW<_$wYOA3Dq@{@P{6$@c5}i(;^NKn zubfWwspv^0K)aq`ohNZ0SGAl-6O8G1d}(HMr2c_41?ws2pvjrC&?W5+;mD7EHw2bL zA2Cd}BQi(Ruh)$u7hC)RT;zq9!8-V#{hhKx8A{nm67J#P=B8y7Sun+71gq1OSS462 zw5B)2n)n)&W}X}1kCSBz`EzzM-+cwCf{7E~s4AgX+QSKAHASax3<2Q5-8l@Pdl`Zl zl`qhUl!z*^Rg}NryiIl(GkPT9S0a@;Ws|cO6uEfwOB^Ri&u2of*~1G7MtsGt_~^TJ z#pUBL*mKiPO2`vEf=(j}92!%v^U1ZEZc*^cG2OcSaW=yX;>y*C!8?}GK=@$UK`Oy7UAQ*8fC!#`T0 z$bL*bfS8Gsk@?^3_&%fiKM`*ahZU+UUXy9D zDkgkZX4D!yIARV8GC_AWle|@zRFUyw}lFWJ?Z^<(?r3 zo!)r|LXj}U*sA}TVme80d+HE;;VFkxXzhqKB1UVCHm`CxO4pttS{(#e2LYE-pcah- zkF-(`*S~=0(}R~iMpqo_lQvHcckuY$_1n*&NP#bwr&1;L)T^cXSh5!XVnOr~xa4in z+Xqf3&=Vt&P9H99d{zwr_6qU%4)&Ss%EB3jxJ{}}83qyw?+2|JQ=+X%az8gQhLw|7 z=|?!w-x+4m3t4+23Aa9H$DV-TVCS=(#}EitVJm=H*|izNP)UXvY&YyB-{Eb&<2YN6 z^@-l5q8{M^=f7&kHyTie6smorLvMQ%DpV$I!y1mKcka`mww33UKaPcIEmt! zsBT9uieYDh4<UycC|3%z`+1;9 zdg$RG8{GKOos_^iN|SaLg>?kEAFO~ej{t;kEaO}xZBFGRH`a=@9dO8Cgqdys&ZvoS z_+9(X*-&C8gTd#k%`sHJc+0(9wc`@tE|i+;cFvr<#C=wy$2!tBs}FunhV!NcJe~6! z(t}9&i(>dHrQF-VFC-WWCoF#u$KsL*BWkdZtiDXe+d}jOqrO{noWWW3-LOr&iyGKu z_i+;?6&4kF3QTJ=_Mh3({#reV=MhWeE)op^zZmaC0W}K9?@HRmh%g6S^*|E0eggKOCexh z8wMGk4o2!JCKu8M$1KyzvO50`Tjd?XHxLGz^vf%V531p*hLH7_S2%m-8@8 z=`8zP`1@YWhjh}y1<@mQjkNz9(1kE7Ntg}@DK5=Ps}$w&Vd8({Qt$8M(}GTaWLYb& z&O``UERgDg$(%=hYdlO6U|W3$j+qU5vtd+3*nS9&A(qJPLV%J`(HhI!{$2R$QKH@aXZA$9!jK)1)<}Y-2sDv2_rK7cEsortB$(r)wt%n(O{BiLE8(cIkBw34Kl;T ziUL@ms+`T2%X{O9>Sq<8($%tX^jy9RPAJDadWH-mf^DRwhNwdaktde2;%1g}aex0S0yTtIcnfWvs&chHSouo-V!_z&B1k z_RTvJ>X5{8U_nCPn7xR)AoqCu^PCwlSuoW2&BI`hohdkvg~dX~^(|u|NC#5lL-7a}|+225a8W*i9)ik}{rlH1QiRjLfgvK6-PsUf6;~WSH8<$L7AV zoYY?D_yn5e+2w2)s;d_05$4r0wd)Cx75cz-c#nx`FmJ1c$r3mP8y0d3{O66e2E?0UU7`7Ojtts1RdD zTFvpRn>8szXwc|@$Bzpt7d#DEG0M?*M3GA4{|Memh%Hp*)Fc!I*mfhsGP}w?SuIe@ zt*UF62=8&SOt@1}iNtZ%Hc0I$`jfxYyr9=r}N(qgJ!!>Id^Wf6! zS6~`q(lpW8$1|geylmVh>K$>K|3I*ulb=#x9$eZ0 z!D_;sd?{MPpmgUP?d6YqXn))z^01&qkJQ{^$2)GcD*W0RRmobyfjHj~ry!)(q8sdi zq76{6HKl<+u7q+vxWmSv$yYP~ZcW>>8Kuy^I4WZ@3u9w$47nM^_)7exs@wQlb%CG8 zYN`H;FTuBL@!MO)^e3L>)`vRQ&U42jDarv9DcaoHn(SPU=v0MHf?S78H<`~I#(#%7bN}$NgMNyUs+8gVs(ViYRp@OAW=?k}F2V^Nt<*U0 zIpIwz8{^)IhQ4LP;>}aJm*ZTsV}$JnHOUC?l~A~H=w%n?eu`AtcVH|9B8tTrZ<=bX ziZNBx4^4(d!6nez^+g?Wso=cKu3x>R2q z$-w7tgaRpw&bFA%eHdHPrfx7h4FcMV1$pq=hg`H@Rn`@&f6Qd()z z!!ZfkM;4zTGs|?s!UqD4rTdZYbrP=IZ!_!Lhi0fNlL4p6OFW$n=L6%XsyUqK`1U#e!P(;35M&#sUbmz_m50ADm_WO9Rv zwalp(!k|Vz7RGZ+5oo3$Hqy1VK8*S@5gyrap$Lv{z zsHCIEd8Br&o^;pp_vu{g!WxJjletla5?XHEq=32fd*|L_Zh7)M6DBF&y80izseY3@~>i7Io$j1$HfjmEH-8b;diI%Ci=?k#; z%R-XDCHreN9QeP=p-3=KBo;Em8tJ&^hq-y9>&YD^?#iUFJHcB{oE^tnHg10IKk%H~ zc;dvmh4D&FU5*(wRdMgI+9n0hc4aECMlQkNp~^JCkgVU%hXw>T&OB}DyYUxTBr6{p z0XGBqc1c()L?Xe9hYWvo;D%ETBdzFVRM*EFTbwc4&j!Cm0Mg89=&4P=*+uDO>}!*N z>uKaV9hGUxnrrBqk2!$`3v*iael6&2vB67@kRua9BgV34PmR%v(Y4&Iy%BGd0b|)i z=G7l!x@A#U(fE?z+OoI3>~(+(NHxP}-65*1$tIX92dx%DOm={5j0a5vGk5FO;4Vrl ztO*nc7-XOgvi1?NWxXw^VS4Xtcp%9-@^x`7HU_CyQ# zAEtL3gs}Zqk286!Qi+U{&*Z|1YMAITVhP2JRb=;a?D}+M_XSzY(S%kr%1*q_cv7Q; zxa9M++AJPgmnK_G>_C7>RW70=nD!NS<=zkqEp9Y*@4)43GR`dnYnp06xE5@|OvUb} zr5y5(pApTB_BTq~S!UC;UTC3TXOH&Eq8+}5tkamZJR4*;EOLbV)pO8n0GpZjzeyj% zABl%Aa)(}?-91_!XFd3Hw3kNPhk1~R0QeTC(Ne=8CcQPk3Q8{@!ETHTAXZ#BaNhhp zeyjMbsL9UVc;aqtGvbyjHtERI6}qq85fGHKFJ_yYq9O6^r}mY#mT#~tv+dlfwihIg z%!P=ZjtVErMxpFY??9`D#zZCB#gHfMqfJLLcwgrcb^EE*X5a%+3h{C6=fPUEi{h2w zi+)H-=W2YUle$YVtG3Q`lMBct>LDx=T@if}G=8hA=ci%gy4 z`KvBwF&_)rC>9bkPoG>-Ph1~m>>YZ|HIfJTy2eR*>K_3~5#~y09};V7ixhIlS6atY z$SXqUg8SbD zjL9Cv3pO3MEi)_7d_TD)!J0Q=l;$o7OA2S^4(oY$Mhf(2vv53vC#E#sTK0X@yyD6n zx2<^O#aupiJ#`UTeF;GOHvRBEpy3TBz3OWqu*O`CXDTp!Zb<;pmF*zu4;lxz>11<~ z#sH+x3A~idRZ!SSpv?JlnZR%CUIk(gSGbeFG#PzCxQ-j7;Nr#$b+arjefTBkOyAz* zz%Yd+lB)$n(80TM76)O15GFO{yzU%~U96hi@+;;krr9lzF@h3$;l0;kR|dT~P`ECX z0ZA(CUY7Ms8Qx@VNjH@q;D!c2f8-lGW`Y9awYFDY1j_lLUh?6F|7(Wpn$}?~U7Wsv z-i)t_GT~t?ddE8Xq%NJ5T&lbi4p@*gulh0i+c{S^&*|AnWd(t)jMP)FSQReA&Y9Fk zkBH;-Kn|t7UO>JA>l3Nwc7e~uGH^onW@d2(_a|XW+sQPsy{69jY?oj5?XDKMa1iA~ zPHA7t;TQQ+%3mBviyQt{U4|n>+T$6V$Fx7P5Z!!5qlly1^1Gs9M*Gn;O}Y$oh1lUd zwKK&}q{-|9c+YEiIylKfbhM)3-S_4;7Das4MU+7=c77yI8rCz8B#z;$E3Kz-TI|5{ zz|0)Rzc4MuF4{_;n`e3-=-EnFN?u~S99=KBnsPm8+`DP#G!M6R8+2H=Ih>66wK+bD zYTM!7;CZ1C3{}p|7LlxjyNj{~r$yaJVE1h-sJUtG1vpcze zco{RNt={47P&Lykj#+a_vBt*rW9Sop{gZ=9b3bi}9<%KNlJAZP{eDQtAREOfmza-m zt0?tm2MeQy!pA?_GD@uC=4f2a<5APEY0NXv>uXoOdp^45us>2gqL;goaR&YdnNERV zy5z#cn|S7LklW|S#?`miDpqZUH;yK1RBW~E=~hy2r*GI!irawURx?DuEYkSSOjKT_ z$11(&_I-0Ny_5k^eu-wqP_f!Ua)jfYxT7HqTK&Z~cK8QbFO%E>wvG_nTz?h7xx|*X zBa|cQbgFUGU1$-F8{Y&Czq?##;{cs@;;Pc60-0l(yg;X}c&dev>G!o{Ba8EUlO1ER z^FX-soS{!m-e#ny^-~zw45PMDJS?P1{q*K>`WCp&`499Nz8*(|bOpYaEd@t6)A9ll zsTNvK@t2vlxXk)EFtZoDx_~jXaAWzM*WT@}e0IP3l7i;Wk-aN0?|LLp=T|$qv&pvY z#mWB4RalZifAiXdP^CuNad%QOkCA~;G-R7yPVGG>!(h5@=Z)rWrwZSALh^9umi(cw zuEO>w3mZdSKdc8U<62@UzeGu=(Q0GBsC|IMNFn$OQ^7Eo5zhYE>hIS zyyMb0Cmng89x(TBqn0L(ZL3mvQ&0%sCFtb=Nx^WN^7vvtV`#7e=RUsS(c7i%S&t1W z;qMNRX|h7IUk`aYBmI7G`x;5)aW=}b4i>^F-lBU>_p5VS>{SS=dz<bGaa``{{ammu;@yh1eDt~k z8AD*pYZ;@&Y_TWGXrC5$qT{RUSN!;jGko5Q=ioFCe{dp{gT&?sN3mIPP_vUVlb0`XtDCyKq5C>a8dPSole7=r0cg4+ zzgtSfX|{_!B)Vi#hxyq&S*{2cp0Y6*#mgD8ob*mkkVR*2W`?AG zPH#4TxZXf)SG3!Fx0O0q^U|)Jlf^8K-3?wxu2$KJ5-p=}Q**w%v{DdJEQspo4>apW zX{D@44Dp?y0blgZ#d*5XYks{t0z%)hX6XE<*^<*_+`LY)(gZG|?w;Pp2^wu_*$#31 zt?G#O#Pw_QD%o4@wwc|-(z7BjOLq9Q=!ZDm#(3r=8fP7o{UAljn3Jt2@*-+4l%s1-%CDX}eI1T-cIPW3YH@QAc)P zzmFpARzq>&O4W;^ios%3iR5?Zceb|=P+rQa`SM|tqmk@A8InvjYpzQHPOPd(!{DV@ zZ0(oESy3-?zPd|vH*F9^KvD>o;f9K!z?`g=;x#=J$EsY2twT$cd*GnFL*mYVP5kIm z0on7eQH@*%a;OPUf2Il6_+3HX^2_mICukGk#l~D`#WwHZY`WsCxhJadE%UYuR;%%i z-F)euUcS?{2kkcD=jPLNJ^JY>0>V*&pl$1|@$XbQe-k7t1WN{&dvPEZXaTF>BsS*t zEXb3kKBWv8>dfy{5UE*^LQ4=UIWQdv6D-glb0FG1sYv?eUkKxeW=Yv+;{Bki#B;;F z#h&5x=h>U5+u&V_1-`cZ9$_ZP6AOLTJ8s^tmY=wVh&%cDgHkD}|4+3NucJ4%u&D&SI@M=(nJ%$7|39LpxseO?&uLtKBzp;LB9!5r?hQyAuB2tQ-% zag?{2p?RyWS9Zu@hW@tj@$`gch|h^M##_^P-tVQ4$FxmnzWJ8sZ8iZ9ZM{Ao&(gf* zp;x_KWeI;zl16*GXA|*Vl}5`n^7Tr{9qBMKiM96|co!iX9CVm&SMy&isOe6RvI=fz z(eHlfMH659yz%K`^!r;^;xdl%@}{`$Lq8bRsYxk`X4TxKpXlS|53NPX-~68uilJ_m zvhr*UyF1`@#O}I!)O<|pZZe$%xK%9(`t$!LwK7I%--3G$0kgiK zF#Cl8MK|O{!bXdu6Mjk4Zh-V|C*X8LS5$cM+NsaHh5*qum|@GlCTp(r-8Waa{k`t9 z*O=tv67g&CV1lg$(j-YbLq;w=!F3Q1(J6NTd7x;+2DJLoNqx$hKqQVMlNl1M#o=!q z+B{zlK45xdGV^n@3lhy?ZHP8Tt;HY{hPQqNtzo8*NU%pB6;yL%<#!t$KkFAwtE=j( zJJxfz4z?50rUS!aVh?j7FWj*=Ud6S}kj{P|umC*0flf2K1XP-~$?cR)6AbyCN34ur zA;&}lo;>4LZe$&JW(tz-eU%Dwzn>|sJVsMray~8Y0%d>}XpbOb2T|2W zNa;o=v;C2K6qMCGd-Elw??FiLW(XQFRlTq?R*lNGAn`o1p-<96%e`bkeG3(0WZ}Yp zDnD#)H&xwVW>z*Jw8Xhs%~g5YZ`o&(Ij@ zAk`BGov9aYd`-)C(;klN+UB~yfd{WMzJt^G{E=9%9Gph4TENDMFN}k3n`REAntb4( zuO({`1(!q`H#pJ`J`!(fX4%f)wB*D9VNKA*jZ4y%L?-S$zz${f5_>j_z8|d=*J?&F z{-LT2@X=5ygigBGrgv%Uq;dS3w(Bc`wMX+;bZ^h7yKw>gg{;1G9hV&{w+(0P%|j2k z%~Nhz`KPD3$3^AKFIvBczBxi{M%iI+I>VgT;beU-U)L|z@I-)gA@O8cku|&Q#BI5) zHAmX?g*h*bN65N)M6j3%Akc!rD>`b^YTt5H@Iogb?h$__UEBtEjoa`>nq5HpgjVU@*s@ z{milv&;hf0T~<^|5{kJ>p&v}lX&1~n z1@ZSXkXMbOU*A(oC&uSt%q%`J-O%<=d3?cYTPTmNtvoF8k&7;Hra=pBjRF)(>_-Jj zf=Bz)!LE?E39iT$!VPQJ{J^0s315+;&Wk=7MIsSGc!(0q+eRA>9a!bEk3WT#0I)iL)&_13 z+=;9xC>+E(zE3Rw z=v3|a!47UI3L}_L;o%%zfft>-<%7{0rCArj4iGJ0l*d>o#5$5fdz)_M&d=Zu6ME_%$nF^c@T%X^L zo$q7`FaAcFG}T|hFYvx^ngF%#m5IgV$YwDYuuA&lW*<*J0$Vb9(Mq=>v2XOWGQJb9 z$Wd6jX{wx+e|tIJDs=Eif&{8`K)1%As7ro28iA^vp}oCs1KDI65lO6U>ETsVN3tie zAVmuWI=?7DU#q_EEc%j{qd5#sV-$rl*AkBez$47n zLuEv4X?*BX1%Vfg;9JMmH%xN@UPqh@ zx!%su=py?dh-{f1YPRh4KJ^D_ZH3Ft0I4W{QfHEPV)w9V6=3z?$|jT!mUr>hH#~3m z(ED$HsHHGHgrnxcK|qL+{}U+wTMh7mB7dm?{-xhmGjRZzy8o@>Rx&X(urjbQGWoZ( z#M%I0L;6>LoJgf!o4-UQ?0y(!@H$;}@gs9zzK8QR%_2=^8`{dDV9P(WoS z)Qv*L&@w(i5+B9|zPs~VS&brbW(#BsQ(z^6|gu+aE^wP^60ZL1P80*k+Gp>_V-sr>Pgh z+P;=rHgX<8{2k4YzbIiw@umOx*G?WxzhnsiIZNP+5)2=83sWDX69XWsd|@|xJpmto zdeM7(@G>SVOF8X5kbDsPmb~tEfow{bZu56oIYwglX<*5Xr7_k|vF4i~xXMkx@@bY1 zvR@AHr%OGB6Sgg_P$qH(;D@ejts39iX*`3@^+XSBM4#~&kzz3v7OS^A1^yT>i=Gc@ zqaW&#)&HR$`EPZd_&?*t=)*+eZ2i&2M$7g?&TnF8;_zn*(w{0nm=TZ7(CLO*xoeSw(797PG$+9Y3G>0J^I%(EARqm z6T5mUt$ymzr33d`znTa?4{dRJzwT-AhHh8!x)- z>6qk8sMeP}rdUC6Qv~$fuN_s&t|mJ>!PjzX%`Aq}4~%ZK9VOwW!cHg@ap}dA`B{cm z98_cN7fF&DwcGTU2#dSiw$B(AaPemp&uz8gK^3IKbmvAJ=kO`Ce9E8j=!>btry;R@ zZrZL8TPqdcH=47<%wg&@8J&m`W6LmTJ(D}~bWM!pdQzTX@LLQFqeB@iSTdSTkCt3I zxpOK^g~5v6@rD~nsc`(%FRnIc=Lc=iTTW}2c7LCj&A=b0Cb9fgCt?c>j%i&SL2i@2 z7%v@`H!V=Lbu_|M2!OR}LbJjM!v>u;eHF`WFZs*LNdvxn8+Z?7E$X1t)N(je5cc<> zpD;lOf36RPLZS*)p#a)z{@SQ5mtK-xa#W|O@?l(kP5~liy+Ws}*+Pwzz6WdJPYyGM z)ndZwnBb{_(&V0f7F*QTu3u+^&yM-Ls(YtWXF)rzq9d)kXa4z;ivOg~*-Osv4J7C~ z?${ar8;p7VO>=VHT$|vxrdUKkMvrn9XwzG`^vsfmT zv!!_%qAhI3OY?wb3KA!ZN|@i?s^|uFlX96J9j}2avhAw5h8R{K)dU}Yc^EahFx_mU zZA|Ev-UkMzz zKi>++5j-_O%IbNPL6R7jPG$y$%e)Y4DTstxS9`2H%IArzetvO*Qf;!@+&bm;%vFo; z-WsUJg>tGp3qilJ+8mVhr<^%?)k+wMH?N^k>j5-*FUUI0A?GPF$kpK!^(e2ceNBr$!dugo z>-359cne!*k|FBE5sY*E#lH&aMTfurNk+#oo0vv@bmsF*`dWIyxDUcp9jm@+6B0{) zN_l7%V}jZ}*yz}l9q}E6E<NYej6n#14K=-=4(r!0>2@4`mkFE}?zEt_TQ8W88j$ zeV7@S?PIA0KY?5QWXa;o`pIYJ((Uu|3f^PAreETuTH#%b7U-Y5p>9DtpmNw0r^qW= ziQ1+3Y`S|B!;--9${6GK(|Cj}O>c z-uQg$ITP@qhfooI7IO6;fud?SaVXu@qg`i>_DmUmJK=q1+~25Ga8dC8XeAevJ5oG% ziPmqJ|C8$%L#ud=ceSMx;e$3@0~E(1K_f=>kvw`fGdoQXm!bFAz-Ksd}>rdmLHz|jIy;(AR^|tX*D>@QP4{VEB(<* zuH+Y7k|ZSy3^B0TG3lKl4snW1eNfCC=y~HHoaT(GIJYw&TOdO&U(0jy>sa_Gn3`DB zH4v+msDIxJoKDAh`Q6NlvEto4nz6{>tkJA#;gqKZF1hz-k#QT|H6?NZ;3gVFe+!%K zxY|^(V&b_p-DdZfb6;AJGdH=F1tIX{wu1Bp7!~zt@cNcq?WkN#&mqz31uUvl{w=8B zqW-G>18==5pQGUL(4je@_mZw6zQbZ&dO^@u`3{t3=dwCxOy;oyF#ATbgy_)14;M>0NMQ;=0B$NB)sTx5x9x$S?3gZeaOP-b(TPxE4d?8}sWboH^U&UH}PT>P}ooFJc8HoLNL~DMj=~0I< z*asr`las^JV`**=w^vQVHV#&?yo30Dp(dp#O*<4v@lCi|YbQ3QdD%|(l%E=N%(}U4 z=>F~X0;-P}jm5l|gT;OBxYY`0RLEGuhQ~n*?$^9%*D{Qyq-aNp3xodfUDDW(Nywp7BT+GvYW2e;%+0mM(1Zmq& z@$uZ|#(Dw%!A&)+W)|#c$77AK@)0K6G=epvQHz*P>Din{EbU#V3=X@nU-7IN0q%ps{t+YAv*S~kA2_W^O$ZMh$pITf`OD`L zl59URVXa#+R?I~Yq>H#x!e#+NAR0bQE7?^U?rlc*o7lU0AbXe!v8s)4Owh!Sc*BFh zph_F{h~giH7y(y(59@ZTJCQ(XA!ARtoE?6^Pb!FhbBwB};FRLMnx0E2Y|UHQX$Di7 z^VT+iA5{`jJ3;_v;;M*Cn3uX;;-rB`T5h^z^&Q&Iu(5(G_uP-CpvjXI35m>88NMP% z7>-h9vtuW-V@VlF@(DwFQ17n#-H=J}wWt|^*~~o8Q7wSMnwGV}Y}%|=ir#xr?uCYz zbJ&`^WowZ83F;U{+0PIq5oHZ3FJhSNQP)7WYpaHt8h=$mO(m)#+EPGvT|wE~kGM9A zD@1lp8BOn&^0L);=QU+^Yo;v$kQdlLhX3*S4P5re@CW^0eID%pci~qCd<5W{{9Ud8 zUDrP<`XBZGZ?|dWUwOE&LDPuf;oD%jqUA|>5SMfaK`>es-%(SqjIhx|tTe^i|HayQ;%^psgqq9Na@_*#0uTR%MMUZgz&y*=|qIT>` z2~;><)Y)qH8JgqN2a#mz`{%YY){Mtcx64JXR~c)(ny?VkoVwi%?l zAP=tLux2+G(5MTiQb3NL5enqxP6*O8u+C0nY2Dah+C>v-|KymZ6kgG5rVJTDV7XP# z(qy1j=B`TB@UA~z!QrJGl)kpDW#<@_rfy(F^n{hJN?qu2+{L!ej)p1n5<+Aw8?6ZT zQ$^vmPlds&?c;;T65>KvH2g)W#NB2Wk(<9Q!H=9VQ;ky9sg+DBc32$m>&VXzq!)G8qE0 zfE1rU4Y%YQb>l&^Ec}urv?Z@bM6S#kvca#Syk8JDv5#OJrYk*^QIx=8pC$QO8nDbC zM3Fpjg?*qadtz?m|n%c{N-mr?ecB zwoYx#-gT?}#8XH_69IdPfqO3*cBb2XWi!G%9ZFPeyhT1mrMN&*)|~*olpW%RTD?x{^{UGCBv)LM)Md0pF&( z0+4%K9$Gmc@qca)J}`!_wp813oIx{6|F)@{Vd53!Dl4Zj^cQWU`AyCnahe#C=p)r2 z$dc(Y$Aqx?r`I6TjE7qA!;6LVKj!FvA43cLGe`eQ&b2c(a5DLqH{4OuMoaqNbMzxc z_p6PuvWe4Q@BZz|%2d(*BfR>(2~|ki&;OJEC%v+S@W6B!RK+TmL7|0D2p!gQ*3hyX zGIa(qo4WzYSRK5aAGR!9`B}JrYqy$A&%GKM zxdtBCO3k3&dQLUxa7R;6ikjR+>&dK&WFeUZBNVhTUjW?yoR%|)D4dZSOMtIMfC-#! z1@jqj4j&8CQ;4FXNi9#>;sY+`7`|1KNabbBP7mc+@2k5ySW*G+*T8Mb{YbeD&Bco^ zSPGY~Ekw7hZt^yx)Xr5cgvz*i`d`^Z;esQ8)|!0^^Pkr%7I^nq0-2tKlaZZsdE`Pp zFu6}`=UY~mZsa5t=&(H`Eq@?ajZHiA%XYa5HWbkMwYMz!^WQ4pEObfwrLOrla|t^V z11HPP_Wkt^&u<4Vyc&QGL0XMDQ>7nM7`Sg_HYi7OdD)nl7Y_|`Px^uc3Hi_>x*5m zK*w~paA2Na6GID|M;MQh03%|rc%6vds&Dl6cdUX#pKkonuR>7MV7zdz)@~w{{;mVB zoW>h@<^ud&%+EN&MoyWAk36^X9Cj2_E%7OY&a{Hcfy9BuQ!00J$1m98U0Xp6VY-A7 z5SD@O^`Jj>rjy{%ufh~EI%<11UjE$dhcD>%2^r z6S-9h)U2H=PAnlm&^St*J zD{qB8%(^Hshi-CEl#Mm6MjPqJgibnlITxENZ_~8_bq=2-o-fTm9qJog%t$qQEj3n7 zU-2~pHWMG(Op7hcn!UUEYEHXWUUxMir@|SG^&jx&obfq3k2HPKS%f#UeNP?Uw3AJH zyvri}_-ama*R=(>sns8EeiI95BJ|G{Ii96%~6;cRpS8X_L4IK^Dm+ zgVlG^GxIQIAud2O)w2fSitmelVbf1xi+f34fhy&x)6=bqLSY%=MB26pWw&6H*mkop zVY=J52s;jD)8%F^aH-?yTwBK*6JzPaRPbUig;nOL)Nx6->aCF3>!(H6LoZod?xcj* zz-2Th4A6Y@pwf+s=K-=TjD8bLC^j}=bNiMVx_C=PCfm?j#?R-MT31=L84;sC=E$wa z`P;+iwQjz3k}~Zxj-42*hE(OUS*R6SQ+zuEOIRx!%NE}Z_Dizp=98=r5TpL2KA{O- zk|tGLbRZU4=oBPv>arum+Qfcif7*dr(?Wm6oqcn|+!5X==a=%y zcCX(LgPZSV06K$aX_0RVVZy)&{QE9J)54M?kw0cmsBN{&XIW6gP2CHqFln!<9KdJF zUb8Rg?k|K$MD);coY;FM--*3sXP|y#aIuSgM#VWsd3RUB5=GG&;(YV(HUh=|{pU1p zxAdxa_<+Tc|6%3)Z&)P!2NwT6Yhd_0LEh=_)6qh8fI7}7n)gEGB9Kv{qE8<^T+|R? zG!D`fTtLRC56c)bsLu!}5Mz~CpSHl_NgbC-N>7n}i;_Lex66CfI6x3E|3P*A;Elu(Nu=Y;Tm9^W} zaLg53729@Fu~D&YyW&)A+eyW?ZQHhOr~d5y?){##_i63B`Y+dPZQiW*j4{X4dmp_+ zJs;iUv4%F5VDh%)XT(B71;wdcB0=>x?YGsY#5UBYFu*BG>GrH(@n)(-P;!RXWvT>! zXQ;$6|FIbxpB@(CpWN8b@fN_mzML&inI=I{Pit#6mfK!p;y?)VeNsMT$FS7|+nr2k zWpV&Z)`&yW>De(vp|Z?GWfNMP;5!+Vl1spamUI6-DwjICDyuP7wzg^nr(0%NW!-Un zYtoo0nh*~vC`Q8Dcc5U%cSHg^^we<5I|1ltib|LVeESq=DU*GzNpMy$IwxFZVPI9A z{)>Pd42#?-4Z2oIUSI4+f#0!9(yV16KR&YJ(w3Cwz>nlF-sk&qH$sUB_O1L(5Lg7m z5!RD>5rMbnHq>D$+rUb>qQscmzvn=M-t@}c_(HZPFiYpm*`zNygoOg@!1BrwSdPqK z{FaMjM3=j(A@G?Oq7q_&<1CqupbR+C(2+_jt<=`b?t6SQ@_!IO(8|G?507PMU%>T! zb-k(Ywc4+1VaaMT)Vj|&;b%fyFR_i#4^$GK(^3`pJ?};)36S}OJnco zEFh85y1SJ~{FW_Upqo2ewR_S>TGcr{*CHddl^0iB=aRMTYSqS0{?I&ZA~C;5;hRn# zeilkPlP^hm_EL`^S`pDKN{gOjZ&4FiWtddF8g%L344#&c4l1J`1OR+f^j|w!L59~HI=WO-MVB}8aozLRBC zD!KX~2V(Kv;OIDE>)>;+oWsOC!aHg?7UA=k@z z8wTZDviw%&8tp|{rp+_VW(jx`Ll78*^{y$(5AMgRo+ZjR_L$7r;Pq?Tc2>+dQ8^=w zHdn>M0%gaZpA1A2yls6tAR8`k==4k&7;oP^w_vvd@wc5{JvYmp&muOZsJQRfo43Wl zR?F)W@|{yM$r#hwPhKc#P$tS!Q9Imy&I2K>t`0KsM5JnF9m)*?_Q>;uFnnP6^CGSc z6xd2y$kZ(r8rmF0Wmp**d2OBp5gN^0cz)^5>{~wda$n=zH;0_}drc%<;VC?E^RNfC zvs?0}j3yT8nj~uR89vUQHLHQyoA|->?8@l!1Zrxoe2slF zU|p*`0;*kp_h^*!Pk^nhd#CN)U~8U;3*=~kvI0`#y8xf~YJY2I^Pw)1Xu{x4+}*gR4dv^^Qt< zUu}2;c-gc^>Ylm;7mI!-|lQnpVAT;L-}@t(=lJj_u@EK5O(;Tt^y zawBx)Zv~>2xeeZ)ZQf><NV~LV`{Uz|+77YbdkVum%A`-Cl z^{-+cNpvPjtEWRESvY7sC~c%oZ+Gl;gOH=wUKlhP72vEz2DMUYh3QNate);l%Sz%H z?ZH!U)D>!6ytIv1+-WtJD@4Mwz zqD*Z$0+TDfcyKq-V=4}DTFD8tVzC(ad7al~^MdGAscepl`x&BH+xGVR=8pT4ImzfX zsV+B-lN+L0Q(YbRUY*x>^8)ho0{SeKFLM9BAUV0yir(v>em41{V2iWj{(xwf|K@hq z*yChmPV(y+bx|T6Mr@Y1`bog&o3Z0au^4@7-E*5UDDSWFA7=dm1hCjZja=u|Rc%w3 zMSr$g<>+5EV~6Z8se-AJ$|961-r^kx1gMzb- zjlO}^{}K%RBVdQ|UwlZQqPgN1_{>{{Dky1DAdd9od&w*~G;Oh&y+8m~^=>cI-q6yu z8R$+yJot<*DILRU#Onl>Pr{ze>JNdWagwWpO~)(VpKmW`cd9_((#-uyR$Gky8RpPI zmz+CTtUK-31^Z}H5v>qS971mi`l++`myGpBwPk=AERV=*Q|+CUbq%yA%T)*CPb5%H zJMq(@C)qKoML$-Lo=zhPIBC!p4$;`NkB!r zPLmGvKGOkQ=fu~2zVx%cmJm>NA8pifxLhR||I72Ym1NDat#UveE)=U-2xJd8Rx384 z*Hpb$hm~qsVL87&;95p0Je=j-l@4wo`8_r)-X$|SxmBORc7s=8buAq<^f^mT5Ub?Z;9WH+Az(mV7f-lvxfQ3 zVbQ!wIh}=4=S&FAtp4a8g?AeB*2zftv$Pq8w2Ix8=tfVnM(ka%g5clqN3wj^^MhE^~W3xL(HYe)HXnu>ueVrxj=$3eku{oIfD#lI9Zx}fK z$16PBuVEsL!6@SBSb6**=T?#9O2hIKSKm)!r@~wQ=^OBY*?@jz!ieQ?CrbihGV-)e###?vu6am8x!pzS|uf@r!eyY}mDe*Par{eROjqJP)K zUrL()1fvzrjIFHx2krW&iiYuje*YCa#rW6NJyG#Y7DWK{voIlPaY0a|9z?-@ppihT zgPc_OduR#~;{qAkXR1wbEkkBST^re}5;hvL%vUPOsH@3ZKkR2t+epR+=T?j7(e2)s z7ZFgYYj=$GbF9li5;?d~?vAsEg4HWkNUI?GV_?FV?-;2mrlxa2QM+BFe!h|o6AAkV z+e!qapobP)AUuz5$GUfacNP;5&%Q_SI<%KA#H7#qDw}HOYIwrNK~`;9*B`ek@~xEx zmO+~Qm`MVEnm&ppytYyKQvBx*h7dfivsNK^<>t%Q-x%(tby7uD*t|Q75urEz4C+)% z2+x|A;+0&o#PeVz4}#L;_Fj~Mmg5;VsJG}X7eZ`Aa*ctqh8ctn*osXtVoc%C8K%Pv zyrfwo8%fjj5omq)Q5axGYV*4?`{pxrw*uY+FO1T$7?R?}Y<(6d(E%=+kf>cMmDzOJ zX@OD{DI|27)pm>Xra>06gq8Bif?d&#^1_ojm7kiw1bASN00f}vJI*D1Mr{^F*8Zsp zEqu&)CDUif?PvjMZ96LY23geR>%+f|!qNaw9l!Jqmz`v#5gyo$-+oZA2sxWM0A5<< zIN3QP*_`#Jw`W9s&>W%@slDb-@sKXx=H*R?q0FZ#=(F6D#$@Iy)&Pr*3WBzhUp8H* z30D}6NVYjon?|XFyc*|##P}UPWQr8L-^Nqv>V&Bz8&H}3WSug-JSEbHD3hKMUF73> zfcu}rpv0Vaa41z#jHz9&0VkuT<8i}NGxh;52>GVXdhPwo<5-&V5{b3= zp-JJ@FuyNREJKZ08K1zKP!Cb02O#_GB%fsb<`&HL8{pWXNwSN-AHj^zSqi;UHE3-L z>EB4P@avpV%t7uK{>xwnd+Ik^;#VO+{Rc?(-;y$mzYD?6!Pdyx@E;N(slPlw<@Ft$ z%$>gcivDT-`PZ|5-h3VZFB$*!vf-C_NY?o0zx_f*T)*@~Uxr=(eBkN5i#F%X&+(zs|$K&955rv!(WsTU>S(a2Rhpa#Rk zx3t8?j?W>G9ts7v@`{pPha>`BYAs;gVcMX?Fc!eOV>`xR5L&Sc<6DWO;qkkJ{M-h! z9$7W$QGP?k_DW^?VSV`Kw;C0eMv}a*1^K9fzN+X<;d^dyS;+!E)_zH*Qg10{%CJ7J zD7tz|K;8r@x;aj)GGT_j!fhkYbEi{-avM@WjTNhr7HLGN&C;JxyLL5)%do4!3)luj z_T3oT>zfte9a-e?Li)Fug8n;ACKkI0hR&ElvS4!~Y3UuA-uykvmQ<&-^PD82aWCfa zEn8UUF6HYNT|zUAS2`_9>20tw;~<3wDA)l|z}ZBbbg>5Zy$wZ=1w6UEWL??_VKBQ> z{Ul;s-|)1f8MftFt?y4Lof%HR-ok97tay*`w<`y)u4YUc&?}UP$k)-NItwI|KY4aE zQAdy*>4AX=(BvN7Q6WDs+=pjO?o?#VBT0KB?tbqnjkM5=rV#II<<1&a1PTOKtKpKd z(QI*Z5oKA@~6>|IEJ;yV)m6i(jeVQoq(MpZ27%rBVL`*C;; zO^4Sb;yFr84DTM8(q}AvI^m`rc=zYB+4Q#-!&9Q4Uqyj4Gixk}fIk#U6+zVVRmwV_o6c zeSjLh&`h^8U4=dmCkd}(8?WnYoj()mK&j>%*t2?zpOk&ZjB} zZ3(NAOvRSxEoux?LwXz+Zm~{>7pa}J##L~LgR^3}br6@24AsG94F$&qX`}6TJ zN`V0jf^KBzc(65tWX_otfTur(Etpo*8DxX_C=YNMvK(XR(hR+4rffTuqeOMcoVK!D zz>6#oH@9q!?K?;1%Yo1YtybhLWIK-B%;~_HJ}Fj*vW~g~FTF|#tSIi^Xi=A|dRik= zr)6`%66u;yFIXq-%ip(^E@7dM&S%#R@C*^kttm1*>_~?61!6UO_hme1YlsK)Cg)Dr zBGcGXvg9f-4o*+{?RM|?{ho+n>)=rEUh`yfE{%@hb>1N-|Dtq4k{GcF)jaWyk8LAM zW6KhO!$=suNR|6aW6tb2Si>rkuYxVo*|S@qDNLs689sFJmnMcD92>(K`4-kw##vPc zl;(9To=8(oxwuk;Ly}}>k~va?R@mI|U4nwpb;_^h#$^-&B;+)tz7N-D2vZvamPJhISVG{LOUKZ0Q7?Sn91cg6F}0 z38O>V2yR798B3%+GZPfZEu$P|cL!#=xBfW{BN~kSM3AK6+N98&6q*tmo4XF5%ai+a z4KL!o9d1Rfhhb(lo7@vAtad{$cJ1E|mDnn-XQ%}mQWR)T$mxPdPE*50@@G49 zHmr#s=>qSkouIbDtUWV7PV=EBk9`7u5^6{I4X*UV-Arc9sNYf2PfGqD!)-+E+0cl- znWtpu!nx;HwyiFMZR)8HV~#>DbZW`U#>4X;0s~Rc_v(Apc`ru#Z{FQ<{YX8$ch#D4 z%0!x8`Iei(59sZb$PSLO0^vGGRHH%sMXV4`rJv;mS)! zE##w6_BB@b8mR%wNqL|y-S8= z<6_yWh*FPb4cNQF?5!j8wX6^#rEVcrGccdxltT?OFVWct%Ev|{=-$Z79`OPh&~KGw zWU6V>p{Wo;c@K5R2i;NLj7R70tv4qm;z&%FFWww@R;N6SOI8g{s#=#*ol^nJm_O-i zkKB3mX#i5SD^sp&9)CIxJlpf1f}Q}n4u3`x%x;P*mB3UwfBLVZUXnuKJLQ6Lj_?R>JwyhETuymna)h`}{$ z{6-NbE5v_OX-u`CW~qq?%q&Qdm>n}XZN24YzErun{mHGpm%XG zdS{@qo?63ZVxnmr6zI`t15H}0bwXu8R?i}NUA3Ls{Jx)57cXY9>A^c8lhnMa4CwXw zHFB-6pUrU3DNOX|SMFQhvE}G)owPk6{u4x&C%m@47J?TjDMTj_j!1^cKJESI0k?x9 zR}0*>Y=yoJuH<>v{^HU|Y5669Ob&i+z8Ga#3fh_v4eErT!x= zuFaDd+{4oKJ6!wo8#n|Ed5(O&`MMV749C@&Q1man888?H-6hI`q~yjwa8VV*UEMF> z%`tk}?U9q}KCT>>m=bwNr&D+^f50~p2$lH;E?;)=w}4CD0_ZN`wP~!auK$RkN?Sac zKHG2KM&I1UbG@T_N5Q&yir2Fg{Q0N;2Yu_fum~Cmr~&bRxxHE0o_9agL(qIx!CGUWU>cip^8rv<*T<3vcEY51J zdV6cx>ktZGqn~Y2@fk)V4#+ca5K2GyT`wnYy#Y~V@g3KJOo6RY0%R zgE>iV+t`QA#_52}@QkWdlL;rW<~{H;2Pf&iBbzGICx8uMM2(})E_CnRg`wXPku}rUDg(vVoseJRWJiRtT42?s+dN z{l=hKzK0USC*}(5`P#MuR%Qdej<5{eD@=U)rZcEU#2xAcCfMHYhyn`Z4pJ0*UgJ(C zPo?}q`t@!ABUe}!pTs~G)(Ql@n>v!TXss{&*J zcj#QoPTOokZ!Lu|o5u2`ASU)NuJd0^;+n^?LhcT{FGWCDb3B*1s{$(@6l_2E26iFO(#A8-UGPKWHeeDhY2$J5sUty0XOb6Xxh2 zZ8u0*1)f!2aD1kel5p57>7|!hw=^iqSMdQLzQUZTh-RaTR2cD1$-uTkoAMB=K(I*2 zD3;6k%}c{#Byb+YM5<(R=PflXM{UA@eDbvNLT$4wV&)cZ8?Xqwo9MDo692&=giV1F77J(hp=iS79j?ArRntr|S zHz`l5BoV(AcU}lz!%UFrHI>DlK$95&qt>gNY!^gI<{lpvM@AyqYdfv7W83Bzr*JGK z^HzMu%|A?s;c&M!*d6=c16z2+3d;48Zh6_2^9j|t`a*an_UbqeuQ`=(M7L(Nf{GaL zKf@hj!CY`TQemt-f;8c87$18XEdA`kb)l6-m3jIcgEYlvcr|W4RoUOYZm6e~X`si{ zTD<*PBRPX6con+yPpa%J>{O$MnSEh?JAtD@CvY2_WszgcNquD0wjY$+1`eXC&0Fm5 zXX>LNkqpp^JXwUB9kC3WT_FFi$BwW25h5qV^9hR0e&A6z41R%nwl!1dJ80li`^LV0 zwJCT;x0!Z|6>+(JD0nDVZ@rUAn;$o0?SwFB#@iI$N$Xkd?;9x1)EL>5eZ$~4fd`;> zvNb$M;|$-X;?+C=t~zPvpAj)c`0NG6mSS0xKs=F;*sB4X50^XEO*oHB=|)-QB>j+9 zz?@5g_BStz4Mha6Oup~k7l3oPWzAC1k`tN>uICgqB#Pe<8ABi4GTb@ftM9-gg;rb* zlWqObxG_GqCc0lt`@oCjc5SSVtkN~E<Zzz>q9L^>n|^7 zYY0GhY6ipe>)n6 z#f~~G#H)0^UVf}-5aGd>>@Th86)i}|+in||5pV>%!26k%j7zsxkalaeLdPr}Tr35* zWG^g)RINZR-+mdwVMJP;S$GWqC~OSa(V?i5Fk|+j1^JfJ4wy`rz}jk>#)`2|wNz-u zQ)g+8KIjK9K?lUh1P#1nA!KKkB&$}- z%Os@*$|T5YfwZ4H13h&=6z!@)B%HsR3@7Rg#TG~KqBY;;=6wK;$Kd!%aFjtL<7aXR zO0uzI*}`q0$~JUX+%OttVAHAH7Dm4!8&M;AC&!!G9*cZoat0ZfIc#iqu{XngYn^-I zFxg3Ea3_o0v5ep!g_C=7tyj#f;QHm84-jQNqoK|(Ni}6q-IAJdCKSa zCV;AQB!j{Z>%ms!9r>_@t?dn1V3&|N28_6_Vk9wEMn zK6(M74TK{L1FYEW4Kxqb2ST4d{vK9Vus_)`Ze()Q`9B$t)$CjI;xC~0=|32c|7HP6 z{w|Nd7>}>8JEeatp8WkC{}-R(rC=#H{~eXLjz&`>92QMQH6fY$Qpoy|*~~y5c^HH- zJROsNB1s?1tSCPd`72pLnq;Q`c?I9N-OCekV>&-`h_~Y@!||$WYs(wxco)lm0U>@E zMROJdFvs09Ab|NgWf=XIp>NT5R}l9~+9zDcsz+fP6daHqDnio2kNAT|=)vZV6?9Es zjpsnDXni4p&?Q_c5<)eXOHwJ%Qg2igJO~;Ey^n;S`;MQ4G~tf9A@dIoJ{`P}!jP zD8X@sMFMuogd|@(T{uHLdpTRu(Ybk~T6an`rN-(AwA$9}r@B{BH*ZXo8SUKYKB1r% z`*8pd*(GptSnlFG!)7J98PoEbWT@HPkv)xgas1q==^P<{ffk&T*|hX-&vnA4I~OpE zr*Ug~4t;E1yinVHNy;I{dh7UOT}iBnXJ9p{#>7=|=arWJB{SaepL+}qU+s3%XHQ@s zfo;I%<$M1|RdLbvEbB9omnA3EQ%#IA%MB0$<)p${n9>VYij}VZ!);gTft5@d@2CO` zlewi?t_*khZxa{YzXHYTTE(zIzYdkt7gG2?J@>y+!vBAI?!RWnM5TZAT$lo5w34vj zDy5ZbFbdx~OAx+Go52)|AoI&MDXOJR#&}V@LcVijqeYUv&h&qsyt*~vGwNl8jy7h~ z%dex?^7gP6EC*D`FcnjBGVR#Us|pi@ayJFSh|iYZtBKw@#V#xZhSJ$bYI;n|J*#YE z7oJnlocF=n5sahr`^%F}iP+vcty~(Wv?TGS&vd-GEAdwes$fWoj zeG8r1fL#p^bZ3Q`@?PUufGU!O3BcRPcJ>9Xe~@PzA0Kja-(vN}_L@0|^Y4(3EQpq% zyH>8|T*Qj&jm#*(kQ2Zas9;{VqFN7o9*+iHD{u&-j{bnAF^V160}94^EvrPP)-R)L zXLStQDeoF7n3ubwt2yv}yp=t6JJV`~>zkk28hl#d6=bajw&Ytw$oddRV7_UHw7liKE6joLDCgA?yd90h7DaSJZRm^=2B({-iv3JBwimL z8jy-rfFQZ}9BmNy*m|7(EK(+sqASPsf$dfG`v`ch{PEMcmnp0z8q@V1UaWTL(XRFx zqJ=lZBl?H?O5d{-2m>ywIngyNPYx=^8l?m3gvC#Lgo3SH5|DUnv%qIE3iv25fh{rV zxk%>ha6{Wz>drp2KDrtE$JU-fayzsoqDTK&S#(O_ge%c{J{)uWLE7~wC-P%XmGr2+ zpZ}n#Z>!JDPQL2P?f=Qnk^bGZE%aUV{}GY%H%s^Lg~7kT_y)BXRh%WXPFvADeUe6` zGN6@jgx?KnXc!>;Wtfn|1wd#R5cTc2kclygMFnBuYclhtTIo-+Do^6rv}f5|P|SMG z<-AYmKl0q&x5Oa;!!i2_jw$SShjd#VPi6_ zy!D5LLox$jqswi1uz8p| zNdjEyniy$O-p>U!bken4e@PdEkK1X(5n&KrbBPUZ-(BDQ&VoYb=ae2qdZN+k5(m-c zWUy;L1j-N~3I~uzawrFN+8sD^tkW(?I>kR7n2B*WN4xf{GC4zDO4E+(XQFgcp*s`Q zxg~tZO*vC#nD?x!+(KZLZPBJjuIN!~S<38@1_-^Zuq=JfAj(bAXE)clzOx?Yq^x;^ z2TmGadtZ*(1kZqb*y2nDRe*Jb07Mt~2{M7aY5*NigdTotxM5>o3b^PFL>zZpp)ovm z9Yswwf}~=wxM6s5s+8U-bSsTRb zX1Sd1($X?>+Cw;8WD(*f{SiOWBsoDB>WXQ)JR70a*3rK3W?Cx>N|0Rg2+WC-pzgdIYUw_Q-s=SD(_D8x0kVhgc&VQ;=z+EbERiUUk zU}#R;--KE&uZZbVpOlLd8cu|lY)X@%_=nG1AIz{^=O||7ypiYy>AwAG;eTDsmaW`t zBY)j<=TRrHPCEqgSjA=b+^uwlp#WK&2Zjgv()_xa;pKEgab$(!jKoS7IzW?_ZF)*Z zZgY};V$F&`Gv1G9Xfr<>%ul$b5mOnBh0E)hv^(C1S$|MLAaZ^7n(a@)USoR#Gha)u z@pwXqI#Trb2E>gd^d_HtzHxbi%keaS9GnQ}JOpeU_g}tYdv5q$b_wLO3KKz|{A&QhT4L{yYa9-lSrrrB^PtOAFOs?xY4ABRKZwZvJ3Q3RVr zGnuo<_{P*O=Xt=7_TyK$jp+h_vBQvT{ej{JC8<6npae@ia*CZ+nrobf9wxvZmq+uW zx6pI#C%qoqf*iOwv?fYPx>h^#L1>XCywX!cB9d+birQNUnIswWSu@?R(MWQJ4TdW>A2G!QQ#u}V)x2{}M2Aqw^CEF)WVgKX+Q7hN z21Ov2dK_sXl|lT+M#(|YbO%$Ez19_4Zy${zl_e6pHmP(V%Z*<7ne0z3+O96HhUGRg zNs#mVEWPq-546Pxs1HCpmf$d`wFopUx$V<{9joKTb7a^ zLDKX}G%pSUJ_}L@;r`Si9#XcaC@psRt8$Y|4)Bb@Q#ak+c50O*xOdaPW70cDR#MOH zn2Et%H6e#yNs%+!s7C{spAJ))0*}UlagBYMhmO?-6O9}qMDAk+X6zH=y_ig6%j52} zy0A0UO4{a^yB2#Q9cdJi0FyG}aax24FKT}FvenvI4YvbX(j>BXgfNTNqUfP*LBvm- zjWV|TW1v=f{nvbozcTw6*9p^i#}3q*f`DVA!H(8~MqpX} zaFatgaWbG55@{bchknFiaIJ);NpFcN%4s>f@6Tcf0@4z%_cOEd59=$F*zX6FyR=Ut zSd!`xOi2w)F{zi7d>l*GdZ7_CM(pl7wtto7Z){eHR@2EAl1ocxadA%LX>#P_YgpCW zML|}KRx@LtR93EqXkj4$ng{9F08lDXjBBAX)$Y=@6uSBWBeL^#wXuXKw|P^&+80j9 zK9Gk#9MVZ1#wPhne8<^`#U^Yor1m+^9U}KKD1}zpRcvP|OOvTbm=ZCQxg8?&KLMlT zOt<#~xlY9Fmyhwek}CJ>*KEJDCt}hWtbeM`M0@%!cCeH$zbTY8WU|#7)#~kq{buQ{ zRoRi}hb3D)mlyw4O~bt#OVX&Zjk{c>H6k?Wdo$cFhR74Z%FIThk{IA;?YC=n-c`1e z0UICQSuq?zJ#E6q*r1h6%VS}zZrd!Dl{AJ9M%YUrG-)4&)kUMa7~(fw4kf*y)mR01 z^UPgbKu~Q+wC2C_G?Lt26N~R+%}*X4&HilxuLyV*nNaEkF=tXu)5u2i`FO76hwO zY9&SRb;nuuPle)}<#1$=}H3;1W}%3q}Q6U zv^J!Kz`4xCGvCMSIucVUW=oHAQWpGn4di?mf{%@x6i%`Go8dE3r_DqL%eSp+%K=3RA+><_x z%-k@&sec4$%JjHa$+yJtu7k7{J1^9BHO2UJ@;ibMvV|h{fM>qvQE&#qzWKzRl)-LjWhq;g`uwE5`r6nEp4~f%o^|{MRnChLo6t zt+So5xr5_h19PO3m)x2OVpp9Zxl0B_Jp1gmLNJiT@--B-HOk_M3?^&u zK3yniqmZgOzC}74RmCh{H{X1{Bl7ZSNZdg@ zw93?^8_#J=>_^y+6JPSl`#EN9JXh%Xy-Zq!R)f_jd=TX7ZItgoNQBF$nv*%E%TA&a zd@$G>bbG;wem*~|9Msmv3HmL6awd{M^#uSueJ3+GU@X3iyzERO0Lc(1{}B*+=Ct%A zpoUwks`+8FWY%J`rQiX&w8ud9Vx{M@7IYB|2GXUkQTT@HJ8^i5OtRAnr5F?U{gw%e zL5S}4rA5C1iLLfe@siFRoNuy2!=L1cdh#)pT`?NiArWt-T zg8OMJoimJhOWtDpBq7m$NhLo@S6o1@DFtcXu0#({mD1Oi9{Nm)B4xt-HkJzgW6D3o z6s0z&??W}Tn`ELVU7}A+Tav@bAEX3pWX1aMg&^&Ji>erJpJv}@8 zI64qnP=9C|kTL%Q2bViJ-kDc}W343_I)RliX?NdA10{9)(uv5amdYX6L4*Ei?ZEHe zg}cSOwL!OsESly@6VVR#rQ-s>6Rr!uWSLfO1On{RsjAFCz(h#vfXbzFCL93U=GTdF z=u%<0akN6N3^xZ-3m8*Pq`gPjGc_W_G+@CZmOeIUtBa%ZN=0#iA7YexmY1&{m$m+_ z`{y-&T)Go4 zfK=WrLB&@zeKG$jn^=tUo+BWk3F>%NBHUMIVenrE;;81} zBkv)LhLBnB|BQ2fK4a!A_yVDd|KppBE7-g>USKHtlSiK*W79I~;e^s_Ts%8H^aL6&m(Jj@Ruo&`<|=@X zE4PV!yylTe9kQ261Z&1nojEhq=|+_VdFuX%DPGi~A5)-v11g-fv>u6h_btUpw90+6 z)0qi}T`{1w_FUBTO2-TTW>L}bLNoXYyT)orM0kE0A7AQZMJvrHJ1eA!j0uYR+?SC# zmVgU;{hJ(_qq-M|e*M!RvpV)qd)74B^ls#)4oi-BnYjjJP5a8Ct@w2K{yHcr(g}cj zecv>*2~6$?CC>^kv&|tgF}g5CUy@|}{Rc4VTH2WGVJ*(kgVLyYh>C&>lOpI%zCf0+ z8YvcAawPd`DhY3nDcO9Ayh1d2&{smC8(kJmT@a2#ULLOG&WAe> zj$EX3`n_RRWUY*x>7M&^>;Yp=+gC>PA|D6D=bcWg3e9pK%tj2tz zYEe>E^ZbHY{Qxt|qoT##SSw!7H?VJf1Zykig3P_C z%7+J8A@;O2VV5@F@>B7x>9T#X19GxrCq+V(c${u-9I7a7?9(90k5fkO6wmC>*%<_7}n_IPyR!B=UN=j-mPjrzKeE1du>LiZ@yc zOmi@OCx>UYk&i8Yh3kX5mFM_-+|K48u`C**2&y{2mWGxK6v&Km$C@cWo`=9GkCfUO zzfDx{-ot>=W=ql%n#3zaK~3wCWlY{{Y9FdIZfQ_B{+2~F;T|(slS3;?`Bgz!MzSt# z6X)l5E6%lMdwjCY(VGZTFQ#y5H)1;>IH;}$A@oW?XaLO{58aap6>033PvtjPh*7&! zMwA87w)3Em=~qTkUbkmosv3Y29vw}5K0R?g54&H6S6ovR)%J&~i+((aFEeDEOOc75n*QuP>8XD-wQi#a ze8z_#HED-_`#~wp{m><8?;gXt#Kc|)W3OA-x}5!`0fJT9p@sPn5~&e>*iHq3^zIG2 zK1$}sjutCzwZ08Ou3g#^IX{=}BffqJTd67=K% zbW?w>c*{}{2?%^!RO-rNff%H8o_V#aqS!C9l`6a+)f-w)`J z-TUVB5I)mr9yuyiiz2ym{VYd8wU3pk?(K%p3ct@TOsP=p+--D2wH9cQ5?%oLZgx$8 ztH1G0aQXaqGHgbZLAoE-M&1?Y=q{d6^I(@Rui!#nr!yo=p;$We08{q&Wt48$D(U0a z;RXp_8EgId0wNeXAJoqATi!ve&B~atw7w}HA}lA^E+9l~9$>jz8acvqsBK*z^8iiJ zOZ8!;O4$1iY#z|5S}5JU#Tx&rETI%?8|0V{&3Be>+JP$9nh{SI79U~j4PMDZ-qEjg zI;L?eg63@Rsmm1PBE!2Rz{%9Rv14p$zf^1@z@x$0zqIt9pQKlOhXK`I=ec+xo zVzDMe>RpVtdCl#?uPE}#Gbxwn*PaU;i+DijnRwq~zVac^pL^_dKEN*F@e_{lF^KRX zYjE{@j5BC&@)1>9J>ytI>8M}$WPZo#`{b5FtzObT&>vFJrvsREJKTJx54_&ig*&j_(~Nf74}0yiH{&7Y&Pc;yo*3~IBo)^Mn)4m2q=6~G740~QR#K0oe45bJ zm`4EOGhWRp|dc)ydH85E-dT^T4H;8lW>JN^! z#!^(o^rGrg3{iSlPjNS7jcD3#`8gfRoWvWiH#qsciQCIS(wGq2gB?mJdL)m z0%~UXdg>czA^o!upY?kwEgHGvdzH2L`YABrndGQ(y)}B#Zj+wAPx4+Eb6Xl~_62sR zZ~2o$G9%<7A-(5%MIzhPy6`qfaed)Ig=F zH&!_$A;39rK6j35HWFC!d%}%9Yn-x_=Lo22tR$;`11K$;+9xaFmhEV?GXVCQqn;)^ z#Itz(id-=)COmi9!(UkpHgxT*BfolQb$^hTtee7L&{_eoWFq-U&I%Qr=r3}rrB|rJScWLMJfLN}p@Cll^Fh)Ya z>LV#$Y=<5S9=vV%VGXEamY3H~JKvQ$iG0AYyHeiK`WDcfJXZf=hwirxm&UwiV4x7Nu4tihp{m!Jfg|Abv-eefS;G@Z(iv(p9GF~6M z3-(MLI~s+)up@*wjBtb`t^CGMx1L~n(f}tkFs9PN@7<9rTj0(!`P-cZw~s@rCb^cz zvZVF#`w_s3Zz%v9QKAp5Pz_hn`CAcBpE{epKKl;Tg0WR7p&96)ETO*|pPZ)7EnZ8m-u+?{7> zsHMq_6)(|h$tjN}rzUEORo;eEPifxp(mY!$`HAhF00z{pM>)Rh3T3wD3{}f*+#3+m zv~XLs(PvE1XO*;XnY&%nZQd|XjJ(@h>tsfnIHX8$2CtsYB5)8_21qX6*|d{K;P)fk z!1DGRFEzVo*VX3U2O!yB4Ma0sib=etjFi}XPbs_%w(C@Vc?ghwU>T|!c7vKA+6d0p zi+z@taxSdUxyn)7%4nlsjln7>A5jvP7e`x2agYe0H%L-8Vx2)?dMAXy~p4Y?6D#w!mhDV2!l5zy4ywGu*dBdZ7_*t6m)9?tijzre4JyM)zKA#TBPPhO2( zKw2W(C|az>SEk*??V)jF?lj%uqkBPVf$!BlJGR#9?P7RHOEy^_ zY|RymU?jNnZtV;NFRkju^mL&NUhUqkEAUKF*cla1B3=i_E5EwZCPYe>sPvA@NG}^3 z3`wSv*Ae*#UKfF?-wlqVnvyGBC|^fMA{O1$K)-=i2{SDE@4i7BPPx>q*0|msZYZ^X zrY|~iDJ;xks4@`9yU@-2nc{z@NS#!oOF5e53c6kbt;2-je^#GWk?l4MTP*r{LpUBl z46XfRx%(=s^a^$!@QMUro)^#){WrxscVy<2j04lVR@Q5#VYE4U4u?N?ow&{6w7eQV zqSsW>7R0F6l1-fUb4N1wJPU=!X#m8N^7PerQl<&h;}pxvC#^J7MRXr|&((22T8~)> zp@uW}#qM~Aj&B*roaR8*Gk;x|C^(dTfal8$bOb9%WbpG%c)mYbcOy(+X@nAeL&Spnu%vQx4a*-Pn!?}Tb04A!Q z`++smlJFx93$yk3v%n8v!wOtOh@UzbTh!Nz1(&4ok9+04Blg9(7x|}C zn*8gteAYP08Qo#=3D%!{MW?AM*G6;Q-V>Mf_Z}b zu7TVfZTBDZ3m7KYIjyt0Dz;_8s1t#v0ao3`k)4&r+lYh)=Pk&BKii~saCZpt4EHQ* z3m_XP3Gn9Vr|tBl2!XR<4RFgcgTZqTzTHtW`$SOWZ|$ndaKDF`S%}SKgV<%jv#vM<7H`Zm)06%ukGH_ zp~oHF+V0xHUdB1T#A|C_Dz`MHL!2cY-J0&&fpY8HlZDSry+zLS5xIW->~)9j)Y(fI zI_>Mxf3OK%=@OZ?O_lO1)&^(Jplsg@FrKZC74Y0okfMNiCH&1pZqP&Kq5-}_vP}U@ zR;WrjJRyHwk0~xiMhSC`^wv0-xLXGF5fQ^!w0$NwyH0cHZ!J=i>?ZGf&k<|I68f_3 z8hQ`>+!;^dy}#7L6O=V=KloCDNZ559em55Db)X#Zgx^j$@}R!LacS2$t;MjwH|@qaBP%{R?vuK?^VlcxYRqi(DNz!d5Ej z8|DMY*_JR5nJzh9@0B2jOnnWJK~sy@Myomvfpm{Qk}B?RYP~~65X>Sv!B}Y%%BCTT zpH9X+*W6o>G6-6eLPf=XP07M+jF48`eB52PzRcL?WYME!(^w{6SiNDq=XuR9+jpL+i6qeQCaX#J|gt#yLV$ySlz)SX3tS$4effj|q6X>ux zA?P@E!E-jpVAaLFf_05{zK~9C7Jus;Jcxe0Uiu1F13W8~p!+r`G~5U0-E73Y4=cLP z@MTIkx1c;$LPW&%$SRN{0QlhiO2ZTxkEq=R>;aG}GJ{9@=dz9bqw@}sE!@y~l8}H_ z35p7DTNT>RZ=l5>P63`i>t*>kE1rXQa(906G|5NefF56yl4K&{(k%NGZec+{=3Sb? zB@J2ho5=i!+*9b1_>L{$in7j!QC1x4mfZSoZxj4xq+E0ai|A!5+7@+<*Am4s-wkqa z8`2gP`*8<1R=`zVk{@1sM&&JZtAF+tVuM>({=$Y`*`02eYK;O#CgoGB$pc-NnkQ#f zg5RXMV@&oHLc?pkJVpN-ME*s*=gZ~yJm6wde8B2;3B;`eIPjYx9*EQ2(PZ2D9RM4y zcA-y>buve55&xpTLbzcY$gAX6dv0$FOONENEyfA`niZ5m6M~CgDnz_tOAvqpziuz{ zvSDD7-#W;*&?EVW}Z{?%p>GYFV3CiJJ?6rK-Lb;b&^9vY^8R8+_Rh} z;O8&x!L0>+frV#Pm*LmXzcC0Zw_L8p;C}q@6#TE1*#B%1!uwZc=AUhu-_;o*3rBmC zzj2NJQH@msliwRg@}k1SK>Mj5@sptsPapCSwTdmVuPzSMK41(IHQ9G3!~%kVmVTK2 z+`!q+%8BL4^D|h%sS&=Bykrz+I9lwW!2l+ry4W#6UHY`D>#Dut&V2ktL# zqHR$mF*l(U#}w9~6g4#~z|%sAj+2X}pg3 z4bpfeb7KS>g^O-yN-(+Vo^vSz*F8EFIHk~PWg%O71iL$0ncEuZQShf*>~L7Ad8w{B zn|la;Krms~0|883KHnJ}2z18hxvGu{$_G`NwLH&paBu#y#6An8V9QX371lhGHP>W*{AauYtIhLiF}>aiT%l6d(5dhOEcW`vcRdGMp= zO=D93X_jKeou4SW?3>aW>nQ0%4KXiCg@Bl!2-dqkU;J&t;ZqHKnwJO`CNl;Em8+#B znq&t6sa`xZPcM5VJ0=i`J34(3f#01Rh+dQ!nwtH6;nNyd2Fi@c+zr5124Dsuu~U)< zbP}R%G;N$LsbZdZYqqNPun) z!*|w1&;d&%a|-ks3k&S#7eKz(P~c8t7!sRzh&J zW`h>zuUF#Iqe7D)D2(Q+-lA>6RUMdRJxaJm5sT|tlbX6`q`+H z7Od$ank}kK56s2kJw~R{_94zlp?LebPcM%_)ZWyj> zu=ix%HvQD={b$H2hCWf-E-i^3i zMI><^u8?x@G74c=2MV>58fah%sw6K%o$~Yia2x$t5!Bh?ve}W%<=)B1JhcfN>9P?? zSz^jLR&GYwY!#^U1@|i zzOx7g^&w!bhssXOiBbkFYrV+{n~qHtKibajITAK68mn>=_tjIF!Z?la81wDq#AQ|* z68QYc2N2?5Km=2qu4E~Z$_ZfQ#)op(eTNo21&nl4TuurWQHiAyFIEQ)4rzA5Ok4v zMleZyN}S^U6*K;_{2SL|!T?jyQ8b~kxYh4UH4Qm&_9Gn<9nE5#V&T{LtsI(e$>m^S zl}<2C?C^vVTQ6Qm=dH^=?HrjZM$YvLJoJn!&vx^$>Zv3k&cc&VX+f#`zxET71Zre; z&N^_ov>_xgT|YRO2t|mOZ%U3HKz0|@+=B+Q#(@dW8VG-D*;i2qt?j7?WAyUXn(or0 zj7RTzM7Vqew4p5}bvC1s+#a$fM5%LldRtuH^whVer(fXZ4~^XbexkvH+rrf_xA3HD zNr1QPY@jDC5eW7pnpPhyjZIWg>_X~xICAV!A{+u@5Ry_ox;B>tbI}iWEVjhXt+Fi% z{vJax9!zX!~?%n{a5u!Y*d#SowKw}dXLVaCtnn1Ko{u=1d4qg)togL>D1;bE& zu$YL-vZac(oK>>SAY##9rDw3mYY0M!sA6FztPyhcbZ`SSTSoZP%!(4u)Hz|FtgrP1 z6bid{M!10#1EAv1B9@wyNw#JtJ0M1CioS^FmeB)0>nW|PnIUU10p;DFH?*36cvwCk z&Ep?J^ogLeEq#sBohm-{tNkzxHrrX~eR3xa$MRR!dy)(btqatHwa+K|%@nUcn5Rq- z^IqI`T_Rsq&PlksNLHvtWt2tB#7fvOv+#>kn@3vQJHcPy9z~j+W3pQHW6o?NPs;~w z>HF>%x4ObFUTq%BqF&w9I8L!RwPP?Gqrw-reNwk{ZSU1_@5P#4T-eXGfyMK2tsj@}JOJZEL^SA;$KvdWJj&D9R0pd+VzHTP3Eq1yA*IBS2aJfSxxD z;t1Arl)XgP>fInoV5HV{y0vsNEZTPlZqi#z_6M<`zNwvF5;J5U*~LW-wV7;F`=Z4( zo~o6}IwAqkHej%R6e0rJKStnkAX--t2-`t6b)EB)t54p0;CzQXBQcm}ACX+ZJxuZQ z00%`Ajab0OJuTNN!)c*)A=n2>tKVtW@wZ>d8(DW<^Q|>0j8wzs@A@ZmZ`}g#tHHs1 z-Mmbb1sejXBr~+-P=Wm|OoM!0a+KUu&Y(QBafPz$5*Y`?D$_TX_$e$JHAX6Sf!zBz ziJjl6Ld|do3lOAuRL;FN_egZXIvi$a=Y&r*c~adCOHfG#h$>){a%w#uSV2O{z@;pKt@I z7ekE`lS@B>HdLkAVbWGk8(fNtuMA-5d?H2lf`Xe3v5JuE49ij8R^W~PgfLYoUY*pb zQ0L)LD~^hD{P7@4B9p`^d;mNw1bYN)o|?rP!8t8p-yrTLCM&!RE_+d_EWbKkep{Wo zRQ8UdM9Mbt1?07(PG=bcLV{CLZRPfTn);($l&d5xj6B>U571#i+|a6&9w;7C9RVS)3)%n0!)ig8t7Mr2LMB?+$YIy zgOFu*s6aHu&E9y8nxmh#JjQLP2{3ZORw?Tl-0(Slv)I6b3t)0uT%HtCqetkVKmykr zhb{V885Qpw8HeFNo4mPlc~)&no?z0Izuo$x<-Vjs>gre&`HPUvj;eu{P>ZBAA*t9s zrFb506h0cAY3-#aK%!70Jv*e0t?AEMVJ+u=sstM_Jtkfd$J__tk|h_wmG+nzI+NW` zbeUUGLmoHP+1Ypm_lw25#n>UJQcf!SNrED^ zI2PH=pwMcgI8Fn1db=&emK9>eLIXaAwVlegoHqiZh`m+H z8p2%rPUtuP8UKm{7>OMBK6~{`W8$bM*wX?fve~`{qRb4=B3Z9g5k@%NAPkC~=kZx! ziU!xik3>_eM33YI1-?jP4C}e^d5@_<^_Uw9Ip_1zZDmC(9y#2KfWBtpSFo&=sQxP< z(Jeb(_FEn+2(dz|OcgQ`cdVbP&V|DfoRub%s5{?mvoMW_G0p~4e5RydWo{$|Rs*DO zildgQ4GMPUGIlPsTBC9MwNoqR`>_mXeuzzWNkYMtVIjrGOn7idEf?y8DtafMHcp#f z-hGG6x2L(d?i%_p7lyXP4%U>>H(McPy>V1;^MhWut5Ozos~D|C`^KxDU`)snc8wyx zoe)6AqJQfdd5zCb{Mk5mGb$X|wJO@Ufucc!n#CAvGMvnj_YapAtc0VvLK*g)rMI7( zw(X&>t<%9wZg^s?`V~mIN7hA5=&2X*RH-TfKG+gs`xpc|A%s#?Y1}5{1Tb$H1us58 z1~iyU&eGJtnK11J8fk*RgB?*}bhN8NqGY8GJ&Yov;I&mu_^XLnZbUFExc!Ox(7cg= zZ*#ySfxM;ijAP~+@n(3RY-*QX7!o*mASSGs^YVX=J_oQs*erN$i7AG9vvr zX<+^^`Ho_)@bKFujlNA z>R_k6hLNF}T`rV8h9^nKgzi7ujnEu&F10N-CLf^Suzdzcv|quQ z7gjHT#ZEsYZohamDInk+orci)7k=$xuONJBe6k`}`vScb8ISmJututghJc(wmqpc~ ze_?4xKQ1Pk3wu8gaYK8Oegzo9*j7FsydupVg`W?!O}GQ*oiv$A-%Xo`z{Q@mD}L?z zV8Oi4`NC>*n^#hNa?^Dq#yjPhaErQ~h%Hln2T#%P>VVQ|?2>wG)zx_^_3jdTv1E0B zA<}M~LC*q(zIe7}1$xWQ5ViuW0uv!{fJ&8x;R`ugcz)o5<(TPbB#`AE3WC!nqhy=s z@>T#d`v&<&l&&|thG+k~$yx0cW7pC8MWHO6&LO0h*P8AeklBHFVsg>~-iGx;VX0R} z35a}Ca>eeQBd2x6o~0$!bE}u{4o~SW@aBk1Uyzl=XpW@dQrux#5Ke_F8kdsLr>MU=f?9bLytA@U~~r55*&iE@xfpeq)x1`BT#Dd?0m%ed!-93H8f3Sk z=#uyZb$mjOrU3=pE;{Oco#88E2!e7XF#2kl|LTC&cRU8yf?`$c@{yrurF{k`<|(oI zZ0)9Me_YbB0kl%;s9Ah5bdFH1bQ@|uxH)N)v~UIcW7H^+=JE_*nD*OR{~bN2ZX%m5 zxs0DC$Mn49MP7yPesoB3x{QQ5oqx-+5QE3;?073L4H+};qlbqVznS)08k8%z#MhhG z#1@tc0^^z;tI#DfAd;GYc|TXTZVVOqRg*Yrg{a*RWNL>G_U6Oeml1@fd%Wuh;1{OE zFJ`ZIWa?N)Y5*HF`r$yZyuF`T`ue|sQ3~0Y?|>TLV7-T1$|AU#E3h2K_4X9JcDyTbLgJ~FRQY*|CmcF zUX;=sFE)BLf%RWAiSo|?I+KC&6~fg2-8gb|9eHG9?sEalO!x)08#H!JQ{Cm0J$4P> z`GVQ*KORC`Q89C>79Os&B-Q7=S!)Zr6#mSGi3W%D(KE$+g7XYn`3U3nYw1JrK{^-&EB# z>W)Vu5I=tGBL3I%-G7=w(ETen|GR8dO`QK9C{YCyJ40QAzc*MJecQ(VWgW}-_i~=6 z+}}xham}^}kU1xj;-+M%BE16&eC_$A0pF}yfMW1{-=@eq%XKR!0&Uo9MO-U*m~T_e zKve5KezN{VXC;Oq2K&SDDeW!qAIrRPGAF5&&<9fagu0!U4zZC2>sVq1mqa--eza|9 zr6R&7$T=$3_n=a1kf@&Ae{2aAqz6R}?#niYfZX_+JqaJ91!BXOm*{4yPp;)PIPo*06pD zk-ZR5A z;;8W{DVP!Y75yrT9!|S4;*hL=+19+R3nm4OVjFr=*hh-Do}qS86-qY}Yh9}_7_e|+ z32%ng2lW6n*56l+utVk)ZCGJ-CX+%q3o>QE&Is5$TJJzzS@*f_AB>nMZ$Jz1OyKHHj{3A+-|$e(B)dB?;jp9ZAh+6)RcSv?y;ck4MV>r~XJ=g4J!@k)+6HC-$hDLSp$P_`eFDsJnqO&`cS=)~(coA$p;NB=Xh z$p0g-rbX~t|!~Ybv5q<}nk*V?DBPv4$%tc`V>GP_3+`xqtvJ97&x0^7g9~Ye1 zhff6^5-7G>ES!eHIu?ZCE{Mfi#9xVmJXb~CT)jf`T+h<7LD6imLd_hdWTUJhT=U#W z+1%2yY~#y)E6u>gmGbuB?Q-4y(tDEYsl9H4^Ip03dfYY2&Hw@L=pxTJiYU7dfCM+tq9n{zuau$nJfsai18e>$A_wU@A{wN0&6lF_@ zFVeM|0+mP#Og$NDuFM6iiQ83@CZFWft5TXyZ=ZcrMLH-PNM`by(WQU;Dvi|zE)pF$ zB)>sEy|!x_M_V3?KH2SA5*V+@MJcRomK7nn>h+H9Td4Wwrd8zsv5|%YRX2+zA>5`8 zBB9=q$KjiKrrxzO660NEbCoS%=#U*er_s)WtSW7f48~u?{#fgHc@v z(zMt{Fe4A|baHZmb~J-(HoK+n4w#y?z<+d)0NY^vwtCtIT`FmaYJql;>lxrMM4o*) z90H0?c^VvQZ+%4!YHwr33<_a=1p|l}#7tOti54!7@EBj{570|Ru`c~&jL!plVRIc6 zKwoL8I-|ruirq%e;u@1gpF_)L`Jy?~IIl)!lWFn^K22gu`Ti{jMI@I)U1{R!1iDd+ zPX@YCTktVD`GzWAAEtBCBuhAZW_JzK_<$<>W*=l%|9I*o{h;f@0RNSeq~s8!+*+#> z-Flx0#Yf3JwL#vn&kffJsxACrCaHzW>{3}S=iHx^K0Q5JyCAF{E#HreGG;3?Q|*RY zy3G~+egrGIoB$UsQ#ff=r}dr5AL?v_`B-tI{VWYlPW=dO z%$tV&Z4({8c9b-viz+r<#8~DU7PS)FKsUz&@+q?P>{L~m<`I7mvi;$>>MOXt{N_Tq zavGo}F@)czN=diX6zQxA#Y$D~R1`<=f@_FIqqe*BND12{bg#!jgwj^(I!U0v1Xw!B zT4@c=fmzwT&)ZW$3)%iy!a6{yOs0jN$tqVc){gR8R`8OtSDQN0o}#nvi@Z&4s*_@& zR8eqd{z6AbVWhMQ*(Ub2J*YfoHxc3RBJ0~E6qsUmUCY?5ES-iMiELNu-zdY$nV zJB8yWroBV0wX=Tti@nt2*e}TP0Lp?-#!IOy6&b8hXs;(BI#4|?ncxB- zfLFpBqwox3@(_Un+v67UI~6};p%scyFhtEVoUb%jZz*w9ETHWFE+V==oR4 zFWI85U}pf79{7b%5R7goJjndnk zfp)nOgTYpn5rbiHxlvt8bM%(DQqTYGe@BD%|v%iYMJRf(mR%{J`b02aN-D2J z4>X87cb@{a4@yr=lDjpSz{ZyxWxI5E(2e8Z%Oh5#i=WezZJyI*tWtUEkbzb={8fjn zfa)OGIX{md-M>XYtdo!mI>FkfKclP_unBVJ$e0M9%nBd%hdh5StNQBE+=O=|?w#|R zJHAvn#^@X3GM)r$4cSU%1CVtAEZL`rU;ZdQNpcI>qds@a=_831LkP$$cKy+kDy(De z{^P}KeqkO_HsoROHOxgE?oD4A#n$sY2!9jK{is^lwr|fx8LsIuuV-Kb*}W_HO$Tf# zI`DEcSP$ps@e*~NV``&~;StRGD$d_Me50dHKkM=FGQmB##TgYQDC`*%iB&@`dj^;X z**$&Ri_$(xy1#S(#!K)P+t!sL?3vv)I^KoGp=T??a}ChD={&oJb(f=bC6EWfebJvs z{uj_crQ2{jYr#al03s9Bi-q6PrKRzM=8}QS>aPBnr5Y&gs9jQx=z=}$5dLe0W zxSjdgUC@%3YtnNcWYNXqJC>ry(`4O8b~{j4(kh2O4r9p@S;B9dpubJddR?;UP>5Lapw@>jVL_jUf8jVl=~K z9w9^Ri8RweRt5r=_o$JN>c(6o<>(&2{z~(UR5vE0zxR(m82{V;k^4W=d`VL~J8L`1 zZ_h0|!vFI8|JqFc(-ubY?Yr85_(_~^fiQbZDpxa&Z?(p$rc|zHiJu5xtpjhuZ&K44 zG(l*waz3~rDfk7d?HyVHUUVV*GGYIo$GMr34=M>e&X$(KV1MO&^>L~H`TDkU^@F&k z2bQW8Y6|f#kqY(oF7c>EE7Ka>x|gM80!_+DV$H5xCpCv6Q^gQ;s?{*|;x5qG)?Lx1 z^MJh|kP=Ycpt*B|B5fU1W9c!{)8@7FV>F1wMRloVk3AtM&;4F=&W1k7$^=dT*6?_7 zLkO2MNa1oh#54%u8%ok;)QoCfvhZ#HRZ4q1;W$WkB-lO1xowIjVLfRlkRz*30@-O5 zj;hB>q?^JtHHg4A(5&0q$Vn&{39RQc z=g1zUOp7>xJ4p|TLtdzxJi8ZC;iDUKjeRS;Ufm+kl(>i!OWBj&Cjz6@N7}byx9^bzL{+SWVJ!B?No^Bk| zPy2+;;ct%mAhMukW$dagqy)67Jq4W<7myxkvT`PX?|GSPwlchSme_ykMk^k2Guz{( zmrP!1@1j-krU0D6|0xd@P!|ALAfneF`b@mS2a)C?HzQb|w z+;OB_(@`-_>OF$^H*$H9%2_;KT)_t&yx+Z~x*DH=M<667(U!!IXq^ZeTXGb2t(;Dx z|7(agjxOU1C4aj5!f?s1cgw2j*l(YAd@?y^;cBZvmJFihJ9b7x|8{VA}Eg?3eANeA15E%g%t zSkgJ{G^4unkLjXJPHA)u0;FO9EhPyzmmr|s`7C;28}%y*%z`QE?Gje#dqn1t*%pnw zQ0ncc`fMV+{D?Mfb)<6EE@vP0;O!J$XX2_*dgnZJL~jC#3&}eMmXjvc8#)04839@cvUS zpGG&Kj zzJIePr2ltS>mRI_f0(0Hd~;g=279U3Y^CO-$jug6Kw1l<9B3mVv=gTV)(>O)OsD2o zO`eSHNIsS~osTuy1$>qnWLP6!|4i<-sx`Kww*U6*dYszI>iVf5t51vJh1sk(5$C6y zf7!&+ylV+J9z>m#u4~hika?Rc800J0G7_Wp*wkQ73J^V32+B95e6^^Ix@;a)@lAT4 z!a8Zn$ZNAmqtL$SAC)R+R=wC;#6k;htI8+TG?~J2z4Hfr8L3o)f>ays9mWHR!^_(% zgY0`i7zvRe4%I=WN;q?WCrD{Fp_Q-@!vn?X>qrw`nabyC0&raShde+;=aj~+6)kl<;30O5!A;t2) zEo=}@F+X_y!Yfg2jh%l6)AUJ$hTj z)Fp8to*=(WrdZQjsOcmZ!d1rmxQT<#o!O=R^d*M5rl~ooi2Z(=eI87`dYg)HjD)M? z^IhnS*5;?c@_NQs=!=ofJ*oler#%wU(`Si-@L5%5Js-O*$3JpVF^m)pJhZB8{SiTN z>^s^8cc>@B5HNIU-Ap?=QRgRwvSFJq0$-$fZ$MMmoOr3Z_R(6EB!74I#mCGSiu|^b zqW^E|*#GRo{5JvYe;O19h8BhnhVq8SmWEai_W#t&7AUIAuL&W3CbE(un}I?T#V~`J zCd@B`?am&`$=mJBCZmh!OW<}04x1Ce!vEQidCa=6u=bRk{Sik$cUZyjL2zRD`b~0e zgNN;Q2B?6ze@j(|EF)dH!v@a8Kl`v*$XmG1JR z4qKynus@{mtmYhKC|(t%0N~|XA;ZK+_v5;pSF&#;odZp3g5aD@duffO_>$z3Jq438 zjeT%%`*4}~Iuu}sat|F;q)e;`pP|WohuW}bP$M4~2U}2Heclrt;}v2y0?|F++>Co* z?fu5Jxs?fxEx}dLlZzl}+Su4+6C~D2#%c`@8=RJXkJF2QrD@cg{9$?@lZlIX^AmGl zq(5+zC%)}N8Mu^e1R!0;i^B%)Q&ZB~lsOa@Yeg>B6>WNifmzN45MQZTnDqzSG25&=OTTSXc} zaPC|Wh1^7OXC&nDp+a5vAQL|PTk^`1X1_DY)ie_D;s6YSE<*1(iGijdnuK?o*r zif-Y!XNp~Zcz%Q%M5lA@L2wbobP>`p6LXHhu1NZRH~nEJZRp^#WRY`1tkxiqTp*H! z{PZE6xlWbpj|wHg(fAq6rqHr^qgDloqPm(9p>8U#hXJM}n@=fpD08wxm~$2AarO}F zxx{O21$cpR1~-8@JIyGl%14f%021FSpnb>hEPK@l;EYkHdaLg z>h`~n)0CQ3ZyEcK6qiZoe5izygziGj*|d1`jMIbfv|OEW`YS22%{V(5J|IsVK0+v` zY?}ob!`@{p!%LU@=$S+}9b0(zlYe)Mav_PL96T6!la%uP=A}v@#6jaDBo%43I|h&l zU6C>m2UYTtN11VpH;9hG1SWJwfbAMIxFuoaio2r1f4X^rF{toeOc{r$x6Jlrj?)}! zqq)%n%p~aW9k(#TN@CF&_RnPUNY?BdGH-t2S`|5HbkW+D+T9_){MNt^k0#5!o)ZzH z-ENMWAr8da5L#a35R166MM)BO1t43P}T`0^^^ z2q->BU004N;Nm&^b5+VY`5zloq=oSV#aulsh^bGI9D9fy`NF6X!r!kgq)%9G@HRJ~ z+#QAnKkOt--|Wkf4A&)Hv0X&KL6|UZ$i5MKMs{> z{y`@E_X7M+TT*$=zZvoZQWc!x%QW2L;mg$k8wwuu#G^Kd)fRfmxn@xVQc^M)I3;Fx zjevxLq2okMeUk2FYz&YpvaGx`+aImFPj=jW3?)_lNUR>;E-w)P(PN4L>YFa5?P;72 zx9ujFq;3AjgF{Yu27S8eXXH%FVeGTOLtx#SXdbgF+(v#S`*Im3uNNl`8v;gp+4KmGhuM|*>J?~ zy4Efswf40!AI#CY(m{{kp@tu*vxd>8ojCyc-4K_b~wP_X#(Lkf(= z(ix3hPh6C2b~t~`t|P+`rI>WIJX3mxs^?Jjoiuc0zo9mTwdL9-ymSX4)=H2w3RqW> zKEI(pu+JGeH-O_FeN-S~hx%i$QTQYfad=UhfG+1-*ZYV;n#8j^F58OTp!p|DdS+eN z-gXD|Xlu@Ir`)j!&14U$MQd0SCnnF)@d%|;cOMmnNBo=8fn=`!2C&H=lmoM3;A6c%iD?3d-H*-qhycdCChltsFVFG{ALBx^tUx6Ev zmx*Zby`VCF2k*aqPyh4qD&;$b|CUG6{3~$(7pr_VaXCXhT?<{SZ@Sw5jZXV}KKjr1 z5dMFDkKI4tO~~~3v$6&S@9&`k>!~g#erB434+^fpRZ}tS8`dQjqDJ&PysMP(cI#1P zd%Jb%d?V#}iPj4y5jzJ~?PDcVt{&rp*6hl8dx|T=n-h=s%QaXxWhI@#nO3ny z&2h6|PW7ql8ee@tsoN|}+Mk48Xilb!we>2sE6TOH!WMsxwi?kpBv{n@^m(c z1gU++KLJ4=;-ltTK(rtkXrt1dq~Dia+G2e9(PMfOH9kA(G%c0vh$CfkLuD(cff?kN=ZQ$4QKK}YVJFKBYq` zORwnIas|OYH;cm(Gl&WHqzq|dRug|ZBY)UFEib+Yjx-+QgIbC23-@6+y_d4&>~9h4 z!F4Y^7wHr?-K9W$hYYeQaQ4#^XVoj5p0>6Rhwj<|#s6go3zMR&4>bh~Q7bNdCqyb+ zH&m80e;eUqo356o`*k_dz{(9_w+tyBC%*wzq~!1Ayr%t#w%=*zh0OWQ8ujE zNOn;5mL!i4>~&J`ZBDj`q1h)~HVI5kyVsD=?lR*j>>Gs}Ixk^Y0%^kdTE@|4fQj~_oxR7`=<)(ZHx%-b!%0W z!XtopZw;{(lO7SjvU+6XMCYm%AIMSbBbst5_>Relhp1(J^~_9yZ(qc^i({AFZzE%W zWQS;ELVS@8wP}{hPXW+&z<^-JuB5{!JW%f~pA%kI!6hb5A5rthcSicL^xQuwHbL)v zSv8yRI+K3b)58IWrUrcb&S-M#-&&#=V5-?~@C+k;j&Q%SJb6aY57}_F!at&1Io}si ze9m4eK5uVs=FD)ulBt2*8EE6g7497we& zBNmQtrWMcr>@jt*fAluH@ftdb7Sxr1ZlA!dBt0OnOm-3I1BqJehhK$h*EEe<<*eap z7td;(e$~pgj5g%d!@aHrP7C4rpl&kt<%#J^nqx9=9U5trs}nvAeQhuhXb^UuOEVtG z1{n0a7P7*DprTMjVS>bPe&wWld(?aIoE)H36SlcJohFcr@l!iV?GiblFFhu?+~Dca z960w1sNJp~ye~nt#3iJrB12dVP6!a}3NuG)Bn?;;DjqH;=-&{qt8i?=pk-azjNjiG zx7QQpsV*LAhrcRQ=)XIG!Dox z5zbG>&I#AdB|PfM4x1bZx=tA~CPpcr$>HJ$s-R`q+_+S>%gGM;-^ne~*!iB-Fy7X! zH+IGGNP4lOJyn*>nWLB7CA~u)$>ok?rSo5o7tVxa5`yAf7Z#8us(%Gp#~U?)vrgGU zKK7e`8fa2;YiZmTkzY#cGd+4WR7QNR4rj%>uHSdY7o?#4?3979&WVqH5~oo$iaS~a z91BRATLF`k7P}8+a>gWr1@Nw!qo1cZkg+T?P)<^#n&@^=wup8voUJ4Qp+CXlwNlH^ z!mjZTb%@3VF-k&Zc>B2?@C?qO-O4*iHc+MoZl?h)@#m}ncW93{dm1nd8V%a*2ix2) z6x(@%;T^_g)&|f;kHMHP`i?wY;w4bm_e>3A%jXyEzouMyphqv#@8ZGHcP0OS&UecG zf0^(6k7c@rZ!GUC?;tO~pm94r!rU+XIDJScYHg_N$ho&r9nj?`JSx z1a4zc=u}mT$u`CehWY5!L&768E63!Z49T|nZQG%Gr!`weWi+w1-Bu$LDGq+wV%>nA zEnQ*}hsX&k$pztNL-Rs9Os>K1Ox?Ec#TOR|X|Z)1pxAq|b(2KMdL z#c2{o-BfQG-kv3`chS)!UE}`Lg3U>)x$t2m39B`Eijnh+b4tqVTn5^3Y-NZK6$~|jS zP=GZ(xh&D64MN`oQnP(HgteDZR=|XUtL5g(D_uRjxV7z%3h|4G2saC`TxfMPW=isc z-(J*(l6GL=ie_>Q@UjFwMQ(AB=KNY_2XeC z;^`;EaE)HCfUlc@&G)z`l~r#i%N-XB^FxhMT8w(5M1jnN7NM;s6^R3A%4&{5)xxe- z28TxWhU^HyZXD|-tNH@lbQv8?$o#nno}r0yUC3~j7*9{mmcrGI8U6G#Lleim;Ihg> z9TY(eviU!(y<>1L+OjnquW-e-wPM@0ZQIt0ZQEY4ZQHiJVkd9*-skLl&%W<>tKO>b zM`op-dQz1iJ!f~19%FQzdj~5kd*8#ef7AXw18-)K!i}_nVn)6R?#li};%`ql6TzEP zT%@W1x>HB7xm|P42FL6I!zenm;J#P@!I4 zCD%YPpaIwL9r56RobEZx;K+sxJR?d1c*#wuQFK}Fvze-dw9dzjTxvd{&@Y=2DGuOO zE&6uJd(T|9))46F&8z7|T@V7By?4Bv=QDOg^?FGV`l;C05wX=Ho(@yK5aXdhfZ7!O z-;yNB= z3Tqbc8A)brOOF;@%E_712&_a*rf+P@`#+-ZlM&CHPJ^c8DJ=c2Vv*?;nQurMh zT+Dg(oTm=`CH9nnQw(?$Ukxul!rC!$+@nrTi#P)r3z94kx$f5ce#t`I`%a>GGz&vb zMWQA2JHAohWfAt85(1=CUIr%}W7_s+DGATzQT z=!g%@M;_J#!q|rtIbC`m-;3$Q&ai45#~Ci-))p@Xf%fMzpie{Bv4v&PNEB8c+udZ| zmJ*jORIz7skh;8rgt>Y7y8#;~OE|;j4HTR0P62H6%%`&Wx8>{-eIGr{wn+bTo4c1V zx^qgtL6tDdbJ*klCK_CzxD;F9_AsQvh)Xj`zWjnGTR31=iF|^)JeYV!1PJlI-?Oq3 z38DKC(N%LlTfQfRh|$l=pfhkS_7zNuD1K-8@N-~WX0f||$ruBL*ja4l+{)0Pr`{1- zlI7CAsXyr#;L9t#R!NxHO`KzJy+5+uV`T5;D86=gU1HpXUV18)!>lCF8eH2sjWauk zRgsu8g_+v!z{O|Ta#@N`w$Wg%5$l#Ch^WU3q zZ0*gg9sj+z+}P%C^jl#|W=Ak5igI`4pY>>N+X1xxCa__@63c^lV=^TM1yp#R(2-3KdA*KOupc2}o2aP{|K{3T_%?J%k- zROnk6;yzP6KjkV^$PHE`YaI}a+%d07S|t}R8FzPoe2vO;dNh~@8SS}TaL&uCFuc12 z&yM#5UoMl9N4Kge?1}_jc2qFx#Ad}>)R9@*-vyh{NDxg%GKq^)k}D|zWI13aW(9cDr1L3CA$b7*rd-Uuk6{iz~-s9NWW5C4~Ex>Q_Uxm| zS_f%aW*LdWG(h(!zMJ>`z(kG7-4dWRCQ&Ern!SbH6v2HxZl+N-Ah1TdoJj z_HiYYR9ws$9%PFxso*;qiyR9yOjX}q0ZAr|+|t}Jm(hl_Sdj}&|8_%z+|8%;TaZ4S z_rUaLzK-z*2)CvEHKP(y9O(_|Z>XjYfwckY7?v$I_7WmdJmN-jiANvWDd?=WdXbp9 zfJ+qK=Sb zy_Y|3YtZUPT-JbK(F{r)2d&(D#B1>p0NMx+sirhPrzm8FE@-*qXzZf+{=lC^>GI19 zC#|n`7vn#$rhg7^@cs_wKQ=AeSknmo#h|dYHM2G$`m5tDRC(3FT0r-nvQMr4)nXg! z8=xBpLmggA+`y7+CIEEOm=32WkXBaHXOBWMKDm8v39fEIvs&J$uHjMFP|;y3rIAk! zp`Mui%5oR{LG1OCm8z$fq*g*T)#qu_>DD#&Y<+aj^YK3U3J_FH0+(4UAuSz+S%hSY z>43GyR!l>UvAC{FSDfDZ)7g4=d|+Hvb6eJ!Z;Ix)G((GQS-A`Y3m0XXDUJbV%vJ`H zk?5ru6hmgMZhlouN4$^P_Yhu&B8V(4-;}SsJFBKQuZ^?yd~`;hn+Dr=a$M;>Evpuq zqoJp0W-)xx&xl5?lRUPl>&R*MEX^9D?#QsFL?tB| zYb~uUhof=_!#SstkT*(#g@wn66GiuN_>8TkL|1L?Xg%@TZ-6VUXuOn$JYSZq?;`1R_ZroS;Yp5%IR5Y1Se5!RYoaBE_{2 z%dN>kus+~>zOz)9(DTfEx z2z}f>PCtT}eVjOhoPC^Jps_#7c!H#TnDA^9oLxdZpE40NVe&BlzNf=`OGvjLcqYokf9+6u%xlEMv+#4oau(VzJ@?v6%HPSsFQ<*{hSKK7oa;1F)Fzd)O$vid0sc;$@Xx z(Sol1TvVFiJ=IHFUh2?qSERA0ux>ja7tl{|aVzfFGUi7mCP1#} zPG!tE-XeQLB{44nDG8ZVa$ms|MP0E>GbQ^i`MQPp*Y8lrJrx|Yzqt<0M`ln-jk54( z*qDV8@VjHqtByVp3Q-S!kxry=`kN>n``}Ht3G|q{O6!uP?ORKq{*CB_;|Bjk{%Sd5do(rNu1&vTw(+Cp=4y>(VhxLrXNX4qn!vl|on#7= zgyosimvGHYk*sP@+q$Rf8Z&A0;qaa9lY_alM|?rcN&haO!G!%u@}O%7cRt`VpSS(v zfT~iXF_A{?8Xu-g_&Tk%*=b>+Y{%()uph>E|1&l$8Z{hKvL8W8mv5jxRs%3^k>A8U zXAIX$gS%H^)OBa;qP(Tpg&IB~zOR3h@UUDPQ)>5UM2r``gC&oZp^)FUXWY;X*3=t` z4W-}Is-o+Cv`<%yo0N;lt8DO^KXFX4>8vVraL;cYA$J5iXl(;;Mz+n{9yN4GZRBu{ zrpJpsg4R>zTmW8xo0{;t!RJ*{R}!*zKVFtS`p>WWkub*R$IeNYebl?m*hAH%Z$iNW zV?hFN6+q4=cw%!U1#tmiAmMUF98>C$L%~m=41bF&NEy zu2w&Jdr%2S2T&PFEo;SbQm@0MrBtb~XQ}MxJN7y?x~>a`E=<~`-|xt8wxB%Pec^_m z<7;mc@CmVHXrC32&&bHnN^fG@TJ6ryC?D-#QYqDeHTe1p5PDK;OnIa zj@)?S&54&&qc6zU>itJP-=YYl7i(zSHNXiH0w?t=?Nn8CJhv~AHl7y*uxsM-61kl} z!Y7f{@7>Zkm$C2{D;sD*S5UX$@6L9HT_Vz7{!M=X6{w#pVq0IhFx-DwN&GWe{M&!t?o8I#sgxgZID%1vVf|9^F1lSS2j=h6gX5-kHM) zD0w+|^?7QpPSACA(UY73#fkr0jA&^bK|*Lne}K^?DmTUysHm?1;08f&egPbjFzHE$eF z?V+rI2L*iVP+IND_oAin91_^0zgqUFft^KQ$^qy$MXYbT@@P@La%Q{HVx7zVXrkyg zJa*Amc!;*1EH8u(8dIARiM#J&o&47%Lq-E9iz!pU=d=xmSy1S@!be2uv^ZifyNFyqc(Nw+o2$OnJs`h9q8n<*)*>_>7!QwM*$EnI#bBM7eOTOj%`@>OhG>uByGQ)- zw2hB~N_|{;78AFiP0nbOLmUfgRxPLT^|jAzooGEKPjRkrm&<^CyRSmh)B;sz8gUsl zkbIUZMhEDXpERs-{x~NIY%u6?Wy*TZOUY8fAJNRg!2J=V(roU@rNH!M&^lwJuGULvo3jg2OI23>f#+ zqfleo+QD7gDg3jXw03AXIKe^SE%K~tODf?!Wc5Wl#@<3|SENTHm(F9N_kYj0M6|3J~7)PP3n-VB$A` z1olv}nGwC_$vvA@3O)O;fDA~Epo6GWuNTGE+4$0?;r7A4!$3E0d7m@4EXxR){Ft*qRyq z_dOz3Hmjm=Sv5612}DdpvKTpZfj`Z{pu{|qQ48q{#AIjl0umCAFDZb7(oi|>$1-24 zE(_WeTjMXk-H9k|FqI;e%3X1vpPyfj*G+LVtjc76exL$SR7b&Ct7UD>Jw}E`(Ynxi zUtk~>pWoGo26eYA7S0HS;6&H!Fd5~g3KSl!2C=OR!+9i~xGHQ|e+P@YyhLa2)tBp} zVw7!q0r6f4R&rl7%zM=Aqfab7GT#XG##XzH5YuV$hvVONfu6HV+>5oyuE6=ewY&3L-bv!iS_YrcJU<_?ZJ`?4Om-Y7-m`fXNJwIb1R#}FTHh4uTF@wS+H-bez3Fj3}QaOz5d*2=v zE7uWqR$bXy@+s3{@+av%gIyBZoP%AaibeTE#r=eV_8x8veI6f`j|s1Iz(N!jtDG0W z7>)C+b>VVQ9ZJrS%`EbE9GY5$T-+VBMVrDN9n-L5o-n*7qnkREA-*S@1Wz2EZl7Yn zmW~@8$+VU<6?|rY1rhq%|1s^;mgt`UR)5L)G{EJP>diWf9AT{=1Kjk@aCCi_zkP<{d!gowTB6l1p~3j|#63%!UC^jfbG?8E z@(jnd#8yK_hjh_H`4bESbgB?c@o(kB8oqs-l>}Qnc3{)bdKvSF@ zP!I{gf!+qq7-^;+MU<6M=d!~)%KP!?4?3Phz(f2)xkE`qKPD9MOlz(TXo9Y%P7j-P z4+ejq&&TXfAOOK`V33!0;U*_yub6TKkV&qSP6}>o77ARA$g|7Dd395rP$hUEMHCKw zWT4NxMs~!H=?WdEePJ)UXMF-k5MPpoW|9Cz7yV~xXcvq~bB;H^^&tQ%knu_D7h8|M zZ#`=lxP9@$Ilw=l&~jLSFih_Z4Fk6Kpwbr`Z3d?Fn<*A4ciyEXDplgnXwE&qvp1VD z;q;G{3EGA|pLc4OhG$w!xGx+`*A=!UH_a-MmZIGW(}no!UIc@A72n5!1Fcc|i9mKq z?KBC{z7Jt3WAVwCFPNhDn9`_I8x^FnRYoZC!>&|(t5IU{8^JlQpqJ;(<%Nqslb5M+!?Q4+nh1lgz{ zPZ@t%wytOCVtAo3dY><_`GB&aP+G)1(X(pfP6;#dU`;W9(~PN136z)#f4lhAtxX{zklMt_|5*vb$ME!ePG*zl|W?wOdd>J3q^M3cYLX_SV?m zGP5x57BQ6lp32&7LNnw~mrd{ocg|NhRgQ`wMJ0r@jn_HhJ?XehJ7mB)ki})$&n`5y`XF1iCC)?Y4Sl(KuWY z-fT3X^gcP2non<6EBBN`!XWKr9X(HC5DLdDcrl)q1o5Ju_PD&|uqs409((g>Xr9#x zwaLSY{PH%22z=Gg$dJb1#@;hcjDtX{4L0X0X=1Kg8vfeR`sqG(PUtE`B+U z^0kpwmAI(N!f1e6K2B4kfkz9CaR5^1G098XPBDu+!hG2uL(G@p%z4ki&dj-I)d^r8 z1WGWHavTlqIVYsf6jZydjNY6u7DR^uW=~4ApynDcfCY+ruvDobpv%NOB}yBp$J7cR ziG*p_@y`T6ei) z?hJ!mLm~6a&Pf_=WIKm}jS|^TjB^XvG5lZwZg3&)L50n(V3ffk(pKiVaF?+OoZ5xa zwg?X%Lik1ASXFa-M{?nt&oLmVmKRWDs^GTs!>|)8y-V@tXvV@V^VciDqw8E5PcrfA zQG>9j=I-I;hUS20kB$vHqv6&rOT3>={bdwx$afY(@H#OB7HbrrDFeM&YEKUR-S%qU zp=RB^H!U^Dddh~|lcMuT+qER1c^fprp2TJz=Vjr#%L9+zasl!Qw`3!FENHpCkMD?C zqBuzb7l~U!5$g zn=WfwXk#rmFP^#6!)wkuH=Sf?#NCvSxT&xNIgl?_?J7jiIrX86V9dvOimq=?jJS3= z+R2_UYYi@3hx}9eG)B82l7?(zegif0 zvYSgaFE*7sZ^v^s-#m+*-@!+2r;#oYvXkElj|d}EC%L~nS-iYpz6+WNqFjt#lj&&A zzic=CT=Ysuo`JnK!8h^uUmU+U+{z9{tDnF~>rd*CX} zb+ot1M3AW?T%0uXqJ-JyIiy3)op0WDGc`R5sNx$F`c;EzY7@ulB}!G=%I;p9vu}~g zz0aK*cHM||%sH;Hx1s6mHt$$Di0UqMXV~Q6&@pG2w8i+9K38t9d{S@AO8Y{(A8(a-6wQ+>TMhNWB%eOvgWtpoB9qPxbTp501>NiZF*Cd|^mOz!( zn0d-UyIGZtqHtn1G}b3Y%!Z_@2TjkI$7<(#gYbR20><-vScT|R7~)C9{!4(RbppGO zq$7;9-xcNe(bq)S<~YF{b+!An){C4()?M5KQjxcM_#8)!nI!XXQzg#`wY!SH@Nu3S zFW1YP(#1RN%sbQuj&4u6WwXec$ba`%dX<@YCpsbf$8GVXv7E2I1 zP~NJCxl{heatVPQR9j4xkW#^WE zsKR!M2LI}URiL?Mb-QrgPWhqM{(d9N6P_d#Y-JIK@s$>dsYV_hS_RWZJ1yFv900@9 z?|i@Y<>LeS*PA4A^2AA<4c88bhr5T!!sP{88F$nnbgGg|yni4%*4$*E)eKdaZyzMz zLEk8Xl$>0wq0vuE=eJK7Akdf&^~Sw$G8fj*i&(LAUx(phO8+2gHT;a85EZ~SB{tc~ z$URvBmj8AIH_?{R`TIG7vx*FG=|oLq-=0UFZLyUn=EK2lF?_Eh(O{(-Esv^ZB~h5p zKAzBR=Ll5{>s*5z(B;@`58`AA=$%;Tg+Jk|K_8wCS=O6vp-pOmdqq1$U2xwOpVqvN zYiEu8l-HDhWj>MMU{~+fIo2uXq^ZPIC5jaG4hoV&h<*+g^x+ldV;*O>SwxPV$`#}>u= zGoy3gGuz4#O`PWrPH}Z5dBtlKCg}|GNZaV!O6{)9@X}-O7#wy`9!TCTpp)2UOtY%6 zrdbH2%o+GrXxGrujlo@HG$-n_F-o$kdjO1XnOE}fNL@tznP1w>?)V+Sw*B@==f;am z*5h!^53To6n(9n=l-N;MlKCcYoJmdCI#`|Cjr#<*7pLFFizYuksE)}BKI>CXhKlbB@!PY-Uh~>Z(KlkH|z+wcJd!v&x z4nyWHtfkDc*XxO{(*Pgg+R-xtbuVBky6l4#2?QHYC$?#bppX5)$^55ypE18t9~9&9 z0t$AnjHD2#Jm7ZIGQ4uozH5Q14F>}mBfk{EHpEgAsbm7ZV@JF_K$|JWHZH|R9c4)S zVPRA7Fq84HOKx)gaeC{^dfS(9@{sYcNqU=q{hre9Bg;*z=N_rdrd0NYQdzW~+ zew`_vhJT)pvfgebZ*6?f2YNTxf1aUy&qS((k2-vnk1^B-!6i%Zy@&*$vt9v89k2ib za9#eeUH5?LePr}9NNO=2iTX#O1hCFJph#H&9s+3JvjBp(OJLEU?}|w&rMpkgoUxqO2m-?t}&aQ2$?-q0;=_4*hF__%C~11v7UeMH?GQJ$n-)Ay)$wi&b z<*j5e_~5cu_AWWK6&0(47xzUiK^6V31IzH)q$Lzh^Ht*;R}FhWm0LL6X&TZ#O1;fz zVc!AY<)l#SYs9)3(=#_#+#Xg^y$+A6u6O`)K-l@~PlB}5JNCVTXdABFDwlHn0bN?l zz0)7a{c6Xsznu8wCGII-LQ!7lPf*f*NohG+7y?2Lw8O>_oi3|YayRrh$*o{c=%atf zPeU|MJn%HF;B8mt`(b>8Ado`?mXZbGx+Fw8U8;AG3b$Hn)GA)q?UZTzHFC9FdbU`) zFzY%T?$@| zMibyIJSnndMJgCV#`Lk+$A+^m)ejrwLyAnz0|}}so5!eGWjEqm0!wATBc=G^xXP_x zSvD}#H>)*z;$D;v+!lnBC8l{q@yBy86D8tAm199IWyv1osid)%jh2Ph(;)=t-vT$E zf?~o1)9?nl&{>eu*6XHdy?1DDd#3*-X^XduBK{0!7_^L^F+2 z=bGa>gE=z+ai29@FkEXL0UHtg#Xrkr{b*N5YRC?lJwYfW<<$m8Nfe*xN(t@#<7_ce zmOoI97LSS@cqjB&0ZE`fth)Ahqi+ac&OX!@-iy*XIGKP>{}=t1fBB9JNFVMS+64kF zw|)i}nU*wH{LipN$N2{&9jiJ+* zW`agt;xBfFte(B2*&jhFX8lFe`0KMj@i>S7DXaMUV*?`xhrhlgV5($J1zI3b|6tnH>KJ9`L}?58fxD%cBMKX| zQ?AaH!;lM3IkOG&1zny}4_|RVW?UD*A9^LM`nhkrMA&?vJZJ&cZddz|rDebiXncv^ zMSH%(v(N5UUS#c%>d1pu(k+C(=?t1OP=}1C3>%@(zDB|H$6jq1e1v$~ky6QVyc4Dg zS}3K05MzrU)V$?ZD6isv?8N|45mE1a_JM_ioE_K-G@y?0GP$Dk#*4k@?S8$KFb+$# zmtFro2h)@b%-j~_IjyxeAHIK{rO+%798TZniP@VGkeRxC8sgKs5!q+3YLMmtzET^Q z7T&3bhiz;5OyJmNgVfk{2?T7j&-OIhk4*MtKMQDoI&(B*$jl}DN|>q=7zz|Sf4+@I z2!}p;&S-_g2A|x*ZDtr5EEWoZgh~uhb$$ zc|H^>rc(SL@+WuS?mA%~f$d4wK7{yn>!W-Ix^EGo4<@kPRpp%GCv*50x^0zG6AvW=sLafs0rRL4XPG|p-0?rs)07kwp)y#h)V%Gnu zwEHiM^}jU}e_k9R=P$jlsGhZiPP1%_Tq< zU5>ZLnHGC(Qe(*hfS9AF!2)-k)%z*x6*Y^2TFUa$j#nbvR$&6*S<$;ep_Ud&vFIKm$Vj+BTdHS-)98!|F9~atSxQv2KG8BO z(I7w#gTemc+PpQ3w$Mm4)*ei8>6hVIVR4{EXDT-r=i2vm!cj?~M9B3*$)p<~Fhw)O&2Dvniz)RD?l^4SB;&A%tKL0mMwd1I7sj!KV($1;i4x3--IfPUMU9(txWX zqon5q{y9g{Ul}VW{R+MLub=<#_gvtAzUP1N@oX%O{&sr&FD9Ozy@QdYnYEGpm$j&o zJ)ixT8NpoQ`h3SW=*2MKIa{{k-@lIOO;07ybb!q7Hwp5j0ljUU^#SuMJ*UaHu{ zIl+rXyW2IMQKC8y*_Q}(Y;!^$lt1yCi-k$CM^V@1)!E1W>k!~ij6?J=D`Yd7AFsZ; zXf|YKig?=Q3*nr(N0CFoleF#Ipq+Pp-=J5-8~S1_N^mxC>G)qckHM`xiFH_T8LEq+ zADfR%JQTMPve*>GcOi z)(MI07^jF6**Xyk_=q%1n9HOjidM$y?Qr^UD0!5di^20y?@=s88H>L=VInrm_OnvQ zIqwwNpy4!rND!BC*J&7#We=9G%PTqsD>#V8C?@6x)(6fFkyANKm9(Hzi_AimM$Z4V zU@nTK+-0|x1zz>XC*z9M&Ve?&lMF~N6XE^T)zu4PrP75wyq*4_CIA7#itX9zNXhwa}l=Kr%aQ_uR# z!t~2>g!QkdwL$q`vIOF4qJoex_5Lu^`QP+@i4?Ns?LU|}1AZb{In`eZ!R3dD9U zbmFc)daQcevhO|RHbORzE3VN@-bJ6@txNd{PUq$ulXzTZaXoT+AK6Xue12S${&e@8 zf~oIaG`C%#tH5q7JBAd{3oo+>Iy^TRhEjo{ds-cb zXIIyEkWje8w_ZszgLZCUs6Ik#B}tqSBhEAK7rLy#L*kjALM-T$Z9!hJ48~lC3`FW< z6xJfF7ZKKj1C4qAP1r39N3RvE0sWc}Ta=f&1F@#0WWAPOS($ypW%WCjd%&JC7^_C$ z9@N(`#OFyt5yV(yia(0yO$gCq)c*g)u*@s?~l0NGu;GmSDC#1;W8l5g_OIbt& z(%lotNw3TxN-D}{)=a^kn;}nNxT-4b=>TU@&HPjT7op>oC${a63AA`Ql1XL|xh&(= z?rMCPfHTup>`+{G(^P8;<08fetvX9|On>lQI>0TGATL<3w1|oQmy~JSrhBv3;TO0@ zuxijXGG(gNh!q^|@#YkHCL}>LB|ycTG(8Or`h|I?+F*VJvDPSFhQ7^H^Oun766K5= zP}yK?5YOs%j`#q&M}Z_dbf=u8|DvNM%q9~JpV z-BU8nu_*4;=`dz^5sKq7Bi#5IF;hvI#uE${Ec35@ul0ijS{!XymBK}&N>{*cX@_`B zQ+bp>{d2P9O4T*(Tt&P&C zZmqj_7bwYcSHbOj{tfJ021=21u+>3P&hxVmO;@#p_9*XgIb}%&thCT#u!Q`ST>&z) zatFCrTBaKwi>enc$e(Nwaz;9e?pK?Lf5J6q=pMTa{V(;hfy{0(9&G@vAIRaO(h|l{ zGI)3b9p%1;0YBt=v(h!|2jos$6d7903bLWwl2vY7Mn`7tOaUWwcVmr5wb9x8Mnuxd z&0He@;&RBY)XSot5=IqpzF&{vv~F<}iPmn!7LK~c_MS#qF!$vKn^FqcQFz?CN2Y_f zEKx8?iD;VTYPCQl+C}D?YV(%(s@5tG43-v0I7Wi!ZH;J$L1X4WwlRurmTCG6(&Vif z;bePTx3voJt7)Pc##IPF!tf`src(N_y3m@ooLQkOwb_WJq0znpucqX(RtmW{Ql&0Q zBxph?gO!+1G5ngUSXFiiTYImB&qqbKkvkw7FHKo7Ph(dpv0n_@g90@^Cc4IM$LwBn zjV6gxaO}YGnZC2?JZdKfn~ZdkJkIs4n3{)fT|(JE6amzPW8f94OaO*ht3g^AajtVm z$Nly@bbB0E6t;)=T5t*BWH^S!ZoWTUj|x zS!s8x`{@D$w)aemle!)kKg`o^j$~Ol!$s9TpfQZ6o6TXlPLFx~4~zWTg`ZWhL~mWj zw`JA%QfTMTCO#X(zhc`#2=Y*WEX6bj#PvB>O5fRK8ZxLq;^c6vl5M49)Y@Hd5aK0wC(FHe8#U4)D= z5NjSeB6a@-CT+aUcHc*z)W@Qq&3N}7C>2t-6BNuw8U($>h* z`amI92htaqmgLm5JTi5&2Z~j$2-`iiU0af^#>Z$}NxTndcpm9(pgMb(>etRMV0STh zZ%phjZH=Xhmj1HYsG`>Tl>Q>cxqkoe71%#hndJXZsqEjSSi`^Mbz0z+0Z5*BVT&Rb z^*g!ng%wQm6jgp;^&q>vR(30zz2eK5kNB$Q8e2By4Gnub+sW(-nHc7&i_y{?b5kEs z@08|K8&Y}LIRg;%eOYZDQ)gMX97kDi*N-$^fTrD#&>tzbl95ek5JEPNFaK+9|2JvP@s&U|CrVUOLah-}w=GE_&+7 z?Mb&kQs&@2&8F;g*AdE6eP{^*6?H>#4^&5|n`nEkkE;zp$O-9{$H-@-T65Rh&>w;! z#~mld3rKLfAr+tHLKC6KxGJJ5ihtQE62-U1D#}X9PDkqX$fAd}ra(1u1R0<8E+0~; z`Y(-DV_2ZOLq>L4W6N7v1>_i?ju5f9^$P9r0-V+G2|vLEJ@sOuL*E!Uj!ysNoD>Er zER=$-2viq`uu90y>ULD*x&&1|lys1c$8=V;*4V+3woqU?C+k2++T<5qtbwWcvpIxQ%V|RgI-w{eX8moNqPzsus*21h5|aBaUx2^wNY5@ic;FqH zvV#{Py(0SBwk~Bml>5{KvNfcws;w?Lm!16^V&)sb_oO^bTq<6*IHLQ+X`f_baNeY#@vHD^3dCe zLED~K3bdlrN<&H;#>;4{k326%s7iE-L4?z2%2N^b3wvmB8h~0izW8)Al(pdcC3%}q z{P3$A zx;GyboPz}!(KLJN9+Qzr4M+P$;iyPyIs2VUB2pnKR2n=#=RO-92Db#&B=KwL3P#zT zWd-M~i4}f_+`vO!v#*h-#pl2$J_X@6SU&IJM=V8h!PIsguQ3YgrCTegTsS{24ajKZ ze%vcSQjK${WZ1DArrQxJan*q!*V&~E4N`DaC_Q0JCSFELOU+3;e$&K@G_h57w~Nv{ z*=}|zguQ{NWJ8J>Xzs3FM#+){NG`aKZ9n?nuK#KJ^7{4R3gkFR=N@=PZh4*jiHf z2>_@gy;kcr(+U!PTsnP}ULWUmVRj({Zdh%Jk@()Apn4B}7ACvyi0a||++M+wPDC}S zTlc7)&ey#9*i1>Yj0f5zlV8)-=nZ+r&0m1FG6DTyJnj9h{AL_Gu4S&lo~Mdtw6(7B zGUpD^{qhiGH<9%YAe)=NH#pm_-orY&5lh|SXhrGD>-@5A&T$>8IoOVFS!+nz8OrX3d)(R}POIC^Ikws<)VoY; z5VpQkq#ZW&QDxVQ|EjAF^C`a9JpNLcrk)lrL>b?gF-mTm)Q+{StY~C0;J|$)LyL}; z^|J7rMJRn9@r@u)t6PHf*tu_J-VtPkiCT1wDg4Y!D=As}VCVh*(oG-9{G&0njH-OAG3qP7pnRN!G1BT z&FsH=bq7Zy>;EpM`$Ag3z$9rSmw$cl-$|oF(c(`7Ck4ntqr_lvKcI5ua{;pLwLA#r z5MCjvL_q|D#tQ(bK1qf)%Hyip65?1vp0ua@2$x1;3NQh`S3MJYhs%zmEH;KMAI}dU zT?7|&1Y+c4$ZQ8tlWLWO)&2HCCq6NnWk`_`%Ffw6?E(1j0FPC11%5=aB0yV-AwOv{AcYN4r<<5MnKNAn<+3DFTeDVW zX4vklSMFP=RL(q@mVht7`O(Yytt)SH81X%0;bjTZ?_{J6tQ<7JVib41Y^ zN%pItUz(Cc>g!7sN)IL~bJK9>!H!-NSG=etd04GvTV-;xY>)!!aB4EvHAt&MpIqI% zTC^6yO(c5IR7NjS=&JG2+{CMWcDy#-zeSnU?WV=4*WC$#f5GY-uQ`a+OW~}`*6my^ zG4ByJ0U~6T5x!mc*cGZ$$ ziqNDpL>NwUz%nB%1wLuh;AByF@VrobYvB!QmF}la>Tc!}{sO%XT%A0FK3fs@$FuSX zq{j?V9rC1H8j>(jn;n#u18zi@Mb>px3}Y(&#eQgtB=n?_VzlC6P3`q_cp0wJt?M82 z1aQn)kSRmrLO)jIq6%bX<+Gj$dJBO7A1cowE*MCP7$iKXAIn$;l*^@5QQ>_4jC$N@ zUrWhp;SoYdi}d$6_O1!v%=%!TH0IVXiGpG*-<#!FHDa~0M(J>@({Id|a1@)S&!m6cGIU>s&V?^HVfa4)*ndh=zf6+<G2QBSxmUAl+=o5yWn88QgcXvAz02mds}0!f*XOe}7XaIBct2**+GjFO<2pD# zmp#@UZWFN#^^@W3XYhUGaP%v@VVP_#yoA<~23cikHLQm^GVr4#@0crY>bu6zN5A*0 z6v$J8VcA(-K(gLmyyWl8XnJRN-c~r9%gwg zl-V;9qR(n7S&J#W$G~Qv%SCkJuxMLsLrt}AMqsv8sC-`dFrs`tm#Zrfb+VL0gf}-) z8&^Zpu1UtAxX!46qy#_oasB3o!xhj@!R<;zN!tlQ0#pjA zM9YV|Ppk?o@P-{2Y7$Zsaw;YVOy;4(_bML|8d6}&97sDAS6w@7-zf*LjdCHhBSHS- z6YP(eu#n{{9{XB>Bl91~DEdD@dCtFM;_pe9o`ajU0S(_D=fBZWF>6O7dwVBaMfVz7f4NB zrg5in7fv8n$0Tvt=Ykzp>O4B%C!0Jvy*_V{0cL4K;Y!qj+k~7Wsrg*f9KLGQOsj@( zRQk!1$;%HpS8>kQjY2P#dZ}a55?80-Z%KL>mlhyyHJ+tg$LcC99O?@VFm>@#i*TNx zDzHAh{w~B3IMDX1dhegHgPsq?%x&SAlt)XY#wQeb_)_fZ** z6>vg}R`%N^)GHt63{Ee*NfN#-VYT-2dG_q}ic1TD8RVfcV5}`_b30>lPc&|IlE?pE zDHAa7^s>@-dSrjo49Tc?#3Uz=blZmtK7dl#S;-+FV8}d4Hdna!H_HmO9l5KmU8D-2 z+u(<$-Y;NOKtdxXSGwUC?l{_z`38z*QYgN={R4YDtTjyYw8&1gk|ui+_HmbSD_BYb zIq~rA`zU>KMA@@sTQ!3Cz5{-$hCFjpLh=NuZ@jzANEia2k~}3EZNJ6+UfJ4C)2RU# zL&Lkcc-<>jdPNBPXzH!JXXGZX3Y*Xhm-#OZ5%SXAJ+{t}vXW$3H0tKi-{Zf=#V9TD zQTxQCdt2U?v0f;~u@Hq~Cy)>6f^sIxeIuH=2Bj;ush6;%BJU1X1hCo^PM8(xmuZr{S8jw7_}wB3&#(TzkwD|y48p7 z)18XF1N5YzI>Zs;Ci?U(PSU_GU{Y2QV6p8kB?^x#=_G_B=IKw4cHwp8MEqCa7yKu_ z`j6n3|DVDCe;@PyJsi-tv2k>8wAZs$F>^E(a{a12z68*J&37ICJ=(2My!gYQnG3kE z(5XS5Pl&hdmnucGcMbuPAUr~SUQyT#k8vBeu3_8U_~gqy9(S;?Z%HWZwgCE-rG=t6 zG|xE+M-#(i2HWFg>R+>6RJy5{(qU0IPqC*Az3r6^4RLGx`)(z5Q+)peU8yWst|b@r zR*5Vcpxh%gb|`~#=ul8K!ug#T%AJ>T&AY+l`tUIsER5OwJvsmYHZWkQ#NlHz`_Ax8QHx}rQz?zO_ zku_0oIYtF*q24OHME)pzwJ;9jT+~qLsGMkg4HQ6~&{L&K9CW0<`3dz%+h->IgepnJ z$blmvE6%MaCBXN5=tLbg&$#HZ?8&brV#R0fCTl>3{2}Yn_2Sn*5z(O1Uc8JBMM|rS z^j*E$cKYs_D09z}&RhD*`ADR<>;4M1qVE}@=Kmau5 z=R%KRMgbM!1TvecPXmUF1VYE@?EC-W>>b1FUbKGEG{%h0#TTVh3eFI!IZPyvc_&fyeja zczv6lw-3TT66$K%yi*&+d>5z$koI1A|uDQm>%dA4WuPNGFHtj#u0vLHu4PjB0p?k<^R_@z8PFfhDU8 zZOI3hWL!QWcfql4=P&SA`QCp0^dr^!{-gK!XPx2rlWPA`GFd1&npyrgF{H_dDaOAX z^c-Z3?EgZBi5dUyOfnUwe)sKfMKshE;e5gp{ok$JgiA*Qv+PK#MhlF{LNdE(1|_ zpna|#4K@Yn-@$1gKd2K$r(z@%Xo8G7?7CkQ?+Biyz}ci>FQQtWYs^( zg>&hl^d1L}?l;e#kSn$n(UVkstClL^*=P2EyymoIgB-_3++k#K0nkZaQ>EFBdMWX<|1BTKIecXz_rYz*l=u;VlA<23+`EakWSYav#< z*2x%LWs%0b(9=)F6bi-e>v-#D=e@oA0$FUpyADfd0hHF)t%0ws7X*ntAbX}o3m2?X z?TVGfASc(A^g}N(dhQrDFmAN~qKWF{D$}mxgr!lD=1I5kMFn!fX?bcCh+;L2AHF`f zoBOyY2S`b!_eO;QIybHekmjb1;iZEbv{OVjFy(iB1*L@G!C5P&X2)dWv9w|~l=0GP zad{TZnQswP=r!D^6+lQeJfhxWFRNp1R9(7-y!eSfznTPjK+AaV&)Y zj9?ChY^nc=ywth%!uI0@<@KKEGLC!hzRzlW=-KK=5Acc_bDVbl^AjL~+H0UM{1b5L zG6soegdleq42>>WwAg423+}1O`(KWb7zhz7?jHdz{y$n|eEvs(|F`h|ukq7LZbiD6 z4~3_1R=l(}1m+@+lu(4SQ{R&yE+vOFuM@sdH&;>&9r??ckilIK{Kp!wX}}bC((uCa zaKmA8{BY?N!WT;%Aif5r*=MQVI1)6h$*0dY;5UhmIljEVRFww*5cjV~D$UwThJ>uL>vvySw) zAQY`tw;ZP8A&wIoolVOG%wjm3?e)3jZidcqLWSl@wjoVK7#H3E?qNBC$W~(+!{i+m zBJI(At{F}U-nPKZ_fe_lo&@#v5M==6kQ%Wfz^BW-&M9I|qkBKGf49CdL|jWoAFVI_ z$EWshQoujQA^Ja`*dKJ&!O_NE&%}sM%E-#b-tBL}w!+^(`};{Wp}eNBW{mbF+fZzn z4dDO@5g!%7^V1nt93LQ;Dm?&*6*N6v43HRUiDEetyU#2+SIm1?>eKxO_FB)hmZ`N& z&D7?0BKTgC$rG0;iIuZovs=>{KKVTP{A*HplkWZX(FX`04(*f6q-h$-eWPo^@{j6bh3vn#(Bhr(MEFFwp>^_N;he;@7 zC1!;wX8<#h`^uzp!dj8>!C(lwyhBgv@}sZ?=Gcs{ z!s$E1q4K497lRM)uH5!TtFjYIAzgI!_ABnL ziN22tuQ-sP;Obn)3QhHCSQ1z^^CFFZB9HrrW>>WLPHefoY z4}Hq zaMZ2*GVS|_g=$)Th$_`k4~z0NoP9Z-;(&0?%|1>ImX6~exzDLnj17Q8Bc-%qt|T|L z+kVDdZXeGJ4GR;E)EU@R3WF!mqtZW-3$_F1$)zg9>&e6YQDs?bMmJnx6xW@(DP{qe z6vOOs9!N%i?l)9@HeGF=u3aP6pKxIwNRh8vxI0v2mV#4IHS#IA(}~Sc7X-dBssY?@ zJ+HVl;|?haoW zezVk~=~7~P4XthDeKGDp_@W&SX%|O~n|QXSOYKwjV=NdolwLGAw5MY^JXM75RyHOM zJ}kAwC280?C?Jq%;NlsMaH3D?l1%}G8+VS`&z72{FJM=MS6d=?Ed-U*dj$XvkjSHvE&h#0w-H?x7B0oEih4F}OSF3$|T zqqdgPy~PlmD|&rG{Nkchuy;e#e0l>@L_cmd!f`Ry4xGZ$P?oIhMexHYX=RTGM*mx? z()+^okSEzK7;!W1zUe7cegdxBsv9q!%7n9DjNQmfagr5iwUvk*4sG!iAv|LC8U@0P zc-ScvCWBywJH^eqjfaab;U0L#Oky))mx{Yw=0Y3IyTul2q1-UCyvAozcLTrjf!UdjlkPj5?>}(f5Kw>_z_5>na^UW=7W7Axm2P^BYGycxvMdI1r{O6}8R4-q>;2^W? z3{puwJIkfyJT@*0X~Tl&g1nQedbE}!MT0C;&X?Vwf`zb9^-0ZCnM zdS2{&l;Y}===|=&qeFIa#GNv(YrMhAY^#P>+=^U1#F18O)v!W62Yh;Gsp6+IUO6)S z`kRfBM|ffVoyQ@6CxIR+Q=p{pUZ>r$0}_N_5lll>pC`F&O(0c3MM-bquYSRjsRle5 zW={Au#(h@FoDP&xB*=WKSMq81z4&?V(^E0xfT0HtV0%Z~g&zwwmFN|P63a1&Rx`;f>Ivnn?E^6LDN-Nz3}CKd$r zQ+f=Ttue>4z3w~YTD5=ia}HEnb+Dxu{Kdm}Xs?)HFX981Wu5RtS7<+Aocduuofy5f za1iXxV$g=%TEbT*Vw{riu!&ZJMmT${ij#geg69Yv#{iU?U0fVJkB!$6{;@Y`tIw+- z?S$0ZT6(AgA*2!HgSY|V(ob-_gxpUdS2>Z>S^NCLnovLW6A{t9VHoCFE8!CQCHSzH zh)0*xv|=5m3DK{>f^PP|^gPZDCD?HtuJ``LG98nD;_8WQ)MCE6>^Y}=vw)p`|7(M9 zySfm2`GM~?eQ^B$T}=PUn5OtsOs$O^U2NtPPC}1>78s1e}bGjqDYStPTIJ zqJu<0Kv9VzwV*@KJ}q3)O4 zq97l4Cw^I^vRWiHS41P`PaqaQP24g2+BsFAGW*IaxOp}Tv6wcM{ruC+#Q7lmapV1< zBeluQ{y3BN2&5&tylyd0S1~dbTh9@h|2}J_jj<%2_-o8jde3vhj0-vOG#5Ouh zZl1WZF&z>(%b6w~Ah9d2uL!rhHCSWY#t?{4*4YF%Rwj*atBS=z4QAgOHwsP;Gyn<= z&RbOKE+SD_Qr~b6PHVYUlG8HxiITDM$jpp;IwYtON!6P<|Hz)&#`6TjZ#7M=GgMAY z$p~eh@Hn5bA;TTtIEod*u7josmS|xAJdD!iVAhUfj%uLPfl87~QSD2a5nOPo2}o!{ z!UShdME7LcL+|s!02Nv{ER~il9IELz-ZDcd#mRWnmX;$RQEgT~VdO%M58Q_hK zw33;Ef$I)^OYI+pXTnG96r!rSN6^*3n%TOGhz~Oh+*q&1D7_)^Cu(d+3OrWKG1kcSaKR{h#v>^ecc=AVR8m)EGgJfI(My zZ7co7<9d`kYK7iV@4}pqTZkzg*-+cxt1kgTx3~kw?G4-D>f5~#etVinF*A&jwA)eu zBzVw>SHRQ9A8a}$8^M+%B`3=K35nD+SO#Q3dPM*69rHORKJ5KS zYD5yEZm$sL$AW1t%a8M038GlCJg{V1i9x+djyAss_=!6=deKyy8+t7e^|Gn-1~HJy zgLRL;!$TY^a&g|EJ1bV?5pmW`ths2&Jt|H)4_1hU@UvyKq$k$ej~m5dNSBE4BL zwvN>dXy}_&d|i6m!nSa)tnYeD63&z6;_~vyKVA5(Y1*Qd-qOw(q1IW@ zDYLF2l- zP;BRIA*&vIEk)5@NiY_cIu$^4z$zk7WpX7nDx$!$^936ux+2f?i0HWf`Fwj+pAe@^ zubE9BoK3uyUAPn$Y%MPynrcWJ*)(=B62M)Ft#?K71B;0P`Oc74o+)GUm>Ki71$%;` zR1XVYrfm!JB$*O1GZk;CqxC?_(2#gLvEA+#_%ThXR0aE*bDQZ%(99EO1=~cOe;#xV zy%>wM#2d47=Nny* ziXBM1ouhjroRz3_ek~h~6y>od7N%z=AImz$uI)z!zmGCa6=)Sl>)GsoGPmt*vU72o zAja=@A@~aWjt5IU118f0KTWaA@qN^`&fDL+nHu82eQGWLgLiQ< zY2lU-GtBf=p#6yKYTPr}%wI1|5^-L;N13?0AzV z2LtO7Gm`;#UP3sQ$mE-gGg|#BJS8KCod6{vp@KREc!<^|W_m2!HgberX&C!q zgicejy{!ahQ%64v{wgqn`%c0Ii(KWm{A zHGahmwZEJ#(>?HP{wq$kl;SO?KNf@H)Hyy@e)6JJKisRMpCTD%DFCABz>_;P zSoOd>YICO?_lJE!1=H4@s6QxCzGj&9*n{Iz1CF*rKbp&6>sL2f!v}P!uUSP;Jrzzg zW9&sfeThGXu*U#rd;MRp{X(3pJzW)2jNk&aIwhy)zSo~y$&{7<97?=l(eyW|TWs`R z;uPq$rX6~C6*E9|`Ni2px0-RP)>TAEDFSCFg)GXo#7t$XBk`h3yfV18zHF7{<0Y=# zlP{*IB$a#w6{eF$64(^&k3bzZA<4){?z4@E=9#kp2;%Q%&}UPDq+SRFBxr$>Kj03s zhV+7KT>k`S7yHCFdgh2NN+cEP=vHk?;6Rr9o3i6;Wr9xwx;x3xOI#rGU(NHo6BK`#nuvgpiv zx%o$E<_E?BtgF_#mwvjYIloF%n`#ltEI47)1qP?3igT|CmN#@>nLDjHm0s`S$xSes z1?D1>>>cSB^n;=2cYA7}f99)$Nr1ShV?HtdOr371%i@LQf^l=%<`t&tzoFxx;ZSOXtZdN_CnPPvT)@ySC*^kzaIx!zB(5dL@Du(Uqq)4h2<+?hc{g4edQ^lb3Z0U*iUmhJziUKLcVkylZc`=qtSLZrB9224^``tyBZb1Lxfg zhRcK9O=i-Z%`7gjPw&k;;mr1@x`(HAY{ix==zH4yS$WK*JQZ(fkfL0 zg8ehD%Q@x~$U3v<^BBs{{I10xJwB-u4!`Qc#3Se-_3zOHgd0k#t)pg7Fbfy1Y@VWH zvHMlWx_xSlIOsQK_(9)-yOL{d-`;<3-U@N8MixKLIKbfi_hI3mWKgC*!@~c;8|fb& ziT{${Br2@`)=a)zX|Pvj0W0y#c1MTN=#cTQ29c9Px!?OQ@@~yNsE+omTRVIPmHg@9 zdCjk%HwdvC5C0}NxMCqCZZ6qAc78T~HvZGt+vAIm*Dr8gR2R$}r^JBD~A-0d8ZFI>}xE+Mn!AJJ=S?z;Rs>EyXH+4y*-L&^HW3+kKSs} z;YYX@T&jK12fkVTSP7CxBsD-YS$H|P2sNOOUXO5!zMl*CJwUI0uVCR^WcGTXL)_onhPTZQJN$AU3k(8q7E1#qGvjVY`Ln zJg3BnIKF@fg$nuVt)9<1_<12sfm)}dqv|i_LdNSB5_n_;dZ~2xNz!BOA9q2jmRtb1Kpqljq`Sb6?3K^{@BQcnQuru9s|9o zlKTvyW;>J{kA&cB&veR=#s{W}xmQi#&{2d5Qqp^K7#@lM6%Un<%`^>58Lr{e7sj&{6)&L8#FzQS0m zNeP5MuG~Gj?!n@;@;wLR%+{!Bxj-P=ccO7y!%_` z9KTeK2$J5TA8RHRG^lF4d?Nbz=bw4ZV$vRw$8~!}%%t^@Ys>Gs(%pj88anRv?zm|F z3>ed~y%%(^{BpxtfxPa}9F$t$8K^cMn|&PnP&-k;Nnd}3*(3u2v?pH*Gg(Y=Q4pC_ z6JWHuKVnhzP`dd$uFVU3iCjLm$yEO%2Eh3zuK#S5|Hy^^0<`|p4F3U>jjZjQjGT=A z3m%A6ymUYo#_&!m>NsF02Nf!ihYO7<8f{R&7FDRHpwJ!~LSH&$e8a5*$0R8fI z{wq<#>I?{`O<7iF>HzmPAU={kRy=JDV9>Cl6*QZ6AwQfdSrA9oMmRx!)^mUhzG$fG z+3ykUieizt?-eUjg{)GpX|^z?q*U_RJ~sU1Tkr@F^3^DLpGiu|$&ZkPZS~WyjU?Yp zZlGf6BW`=}-NtP@S=Zc$efH#f7qsVDOBFo=+Ab&wYl#tSNJ3v)wljqy@XK2yTM6%N zli0B<4}cD3Z{&L1f=ZJVceFPyLmY=hV$I>fwXDe*&!CVmgIkxz0-%!n#M-4$5+Y@_ zf<(>>EgQMu&29@<7O&&fps(O)R{%vIpzKDdt>d=$U$wY3^-^4_PYAxh6d5&9-jtG7n5%F*)VpL%2hMo7+ z9HQpV_SAin7l7xE5NH^_B|}VtNF67kt_e*1!~MRA}jyq7gM`Z3wwXt{U$w=kWMVizKS&>Q|A9vTb6X|;CsF@z&}fE#m!-c?ax?$E&ZlSNBPHKqgoCD{4&8=m&R9)a*lxE^=TCqeie zQa3W0>hL`I>#(Y76G)MMNcMXjAxw~8sryXr_z3P)@WpZph&vy=E_FG%U^$_!>Mj z26P|5^>Pe*`}ty#IH%}062WRB+IhCQl^_T`amT=U014_`)QW=tNa0EO{4lnW?en|t$~lrHMGozIR!c&o*sIc10D zJEn43^?#k?#u+ORMFnA#(iE+PglQTsEZq4D3V@g1k1Z;laPG6x`fxF(=vb2_|~RZuBF>B*bwla(zOUw1qmL36Wl!iD`&7)UMa9bp~B&;e3k zz_DX=EWp3`gO8|pH04A)X}2i=e&s| z-Wv$%I|OO;TD4fEf-sUK$F>ZKa7qt#Mgb6J-aTf1FY)o;EI%lJN^RGV!Zi6m7ABGZ zw|FDEeNd>^){R}v6Ld&oMe{%NS-nz#rCB$=`ucR1)cT(+2a z|9X1>=|&L;gt9bkr|J*Xf(bqbcQ~1?yam|Ur!zTDq&DuW+GpkRByZ}5psrI6+&}l$ zHFpqjZ`x>F0GietIZ*6hRJ)0&Ib&gFKJ#R@Gb;W zgzq)cC>R$rJ02eepo!u@1x<1e7#3s1C2GCJVzat5MzS2MO+HY&m22AXFL)ih=e6SC z&L!q}^jWaamOE|4+(&bH=^A}@0=g{Pa<@;*O zd*^IZE3;u@X6(RfK;@f9r!{JDu50DiP&T*{g)35LEHTGM90{b%=KXBT01ho(oRA(F z6Dn_2ermXEwhHTSNfyTKnpAb|HWV*&hZa%GIc4_YsB4c0;ks-{k=rF1@y~A_@CX6ArS6vUcL(lxJ zYCn=0)XgA>$KR@i%6EL8-jA$-`Hw2`&oRaIPuBct=l*}ouYa>?jSTk=iWN(XBVrZWkP$?42pTa(5Midi0-lvUhGmb+7;t$qA#tx=$sLg>p35F%{CJF~ zkvqAz`BSveaW1x%U%s3A} z4G%VH^5q4gjNtLg0K!cZ)j!N1;h(thrk^`>bsdQxIRO?sUgX9>`qohj78WO8c+*fb z^h+LzD6$IYVyEezhn0_yq}K95!nza$Vadw(YI|f?$0_d`7j#tLa5a894Tk=`=BGQd9f%Qz1Px}i z`(bAs`NP&nU^6G-)eP9icPgz7GnawY?wb_~9aZtCd5LE|Vv_N4c#S;hGPdaioj+ku=f8{{=QvXNuAbaNG(A>OHU6oY?^CxK zdS>vq+t_@U^_6tm-0~~Cb&_}@udHuu$z6YSIh`{I{a7FIoAWCsQtfc+_{!m0lg*`<_uK1GHV|M- zBS?3Uas})24D4j}v%2@_0_G>ZQIkyo$(jD^arj}8d>=s&NCT?M3X#<7^S zRmFM%KQXdsjt8lmNbVr_{qWrpNh>oND!8i*Jp`N%N3v`SSU*AM9l^v+df!AM4NRy7 z3Gr-VnUsyf32=W)soo)2YqBFBv`OqCy+g9SJq94crc4|{!iyKQgX$N@t4tqonmJL;GUvO3&XtCU z1(LItiS$9i)+oR)JZuS230-tZnoyb;a|UxNFm&$)!CO@$UZV#!$Ngk|1uSS{RfZa| z)h z95D|g1|DzK96v2+xck>g@6I5x4GX7$!^3|xg{CSUU04W%hsy%lv*LB5k?D5W%9C#z#C1sXJ zIh#0Jwh;LKt<{8UI{x#%B4brJwZwyqc439ma$XP&R+JQjN}GI zBj*LVoN#1bqwxb|+c5ge@k;B*HYV+3X`RH~#GX=*ys6xVPv8?rXl!(?FwP+yG8VRO$mLtui8q4z_1qZdH0Io>9;8VIwhn+e+@Jz zm9sg9LDXFSXqyRFj+)D8Z-mlGG$FMty{VPO(v=I_MHKbLbns{B=@Sgh9qWk(By!RR zJu|FX?5#oW*P1OPK@_oO!tmu9-VTi>;b(Lz26ic%G}FX64<|4m%; z@2S$+$llD@?Ke;2Z^p!bDGI#gt!$7!%;LbwQMd>6H|rN)>?w1u=S?GW&GgHqR&y3) zpF43RS{_;@9xlwURLs;_uKmAse>Td{c+#z~Pd>9W&dPKB$gBilC>FMm~M=Yf%E& z3ONfmWW(PR20 z36*RYh#L9IFXYNwx=eiDyxCqJuZq=sBQ(gpTV!l))l+BCsEoZ>$CBpsoOmOM3ub=V z5b@e7p_&#`;j7UO?lVU~tt{GG!sl_?ryBK>I>LXgSg@3!?haqOJZoGd9J0^7NO2F@JI9ljr59vm zpZn2`GkYr>ATBZ7j{$d72a^J!!Hx7ml?+wXYx2L|&=pTMs}qzbGkSFn9kRS4dX=Q% zJlcX#qYkM_``%$syUbqg?a-CN2`^@xZ~kJq(!-xb!+kUWRsW%H`RAHL>`&nR-2nVI zPV#R{pbyIZUt39eqYwS;Z%f19W}N?fXUh6768^u%vrdJw2&iD_CDUjHz9G>&;m}a3 zEVBYTL&uEuFk^s~V0iCD5xM-(-yzt}R3SGHzhli<#q+q~aLH4r)8+L7tP5upq^Km^ zNu;bLL!Yq<(4tF$!nv>ux*!>HD8$~-I~v|^!q_otkSf{Wd!l`84EWqEZe7Wvt9uIA z=jp5)3XgS`V~(m-qfPt~YS>87o5pFI35y7g8K<T?DOvmX(V zDc8*h2+~A*3-vz;+o^S+^oS5!)Qjn65U8opS(N1&`saRWdb4P_BM@$l2t}z)F~n%p zVb&@x&HswJ3afJ^E9)#pd%luty(dPHYtGO>AE-TH`|?Oy@6Zsl=t~?1IQ?+5DU^Sy zt2tGVPmVJ2?7|D+b?IIBnie)mN}t`Uv_9y%>q$&uh zy6fy3A3-rm5=MzdWsPUv5vf_`J%m^(S`QPKSfDlaZgbcaF|B3jY&YOIC-x|Pj{R;f zQL=NOl+Ad(xNY<8Ev7o+pmb`O`_-2z)#1{v<_eC8+30N1{fx-oT_b0kr?Q=ty$?T< z$1q}}SZGp=W|~8=FL-P-`ga?Qha?JNYt7*vMF0l%(?jmWA(U;d--~}j z#$oK6k3@6%&q?tgLb898|E)fh>3=iG|9Vw0a{SkM%uMCyzq)C>HDZFDPEfm{I8bJ% zCSP^o@427U`Kg^}2qblh$4wdK>eJW1J2s^^lxrR9PswTUk8<$*D&;AD7kQ6v-1G?j zBtXam8|=n(x$bi5a{24R>vF2g=NYw|^e6e&akWBdiCuRDHt?-_q@nyJ6DPpLfU#`I z=~G36*ZcNYhlcK(VP8D#X+eLp)ae3CCx72^1}rw3xfL4OGk8a+1m=|v#bc^wF@K4i z?{Zc*z5a~wKgiL^qnEwTiz9`)avz~Xs`<=rx5N&=^BM7j!j4i3ABsqhnM%MuV1!C{ zkXgGjK%iPvYLmE1h2crJ>cN1nf)xc4Jmx?`Xhw1$SSZmDmDfU zt6*jH&YsYS;~)od)dU`MM3AC9{xcROSUb;yY%y*lPX5$aNb2igkXYmwGai(sicVSM zER3@1UjKbnis}~KA7xHsjrtVj?wq41Qdr6!)gnQ*_kj9Ls7wMW@$Pfwgq%eNBL=Pw zYzF=wf2kz5HmZ2sJIMg=CT(xpC#T=s)ICzO2BuDGmO>HO?;NDg|wz9`}{v zFy=;Kh>waZN#QNlqnb&RZ!svx0WUg(HaWnIJztiKvRW1!W?EvIuPb`FH%w>PBcH{) z&qHh*odHUc4lOJ~f4&`j@jgqmf5q5nuQXeh83K%nWk}sN&g$TUGonIV`F?Nqr+(FZ zgd#3QN%-m$N|s%GN0hIuYxao?*r}zaLqpxLVKO-f?_r1kc`*X6H=Go(&hU`A??Z_e z@2l>BYf%d!L;55QJlu(Fl`;|zw>ysutx<%VrYM?VQDk9J=Wt)6fs3e$o~jgOMk&&S z9lb>!dq^tRXK>~=Sek$*gGP>YvkirFyJd&-sE_H%h{LmIinoQejIZU+JvnNd5@@v2 z7`K~)&T!|Zo1>$9dKO8-wo%)V5p!}&3GX;^f&2Jj?g~r=y=c&ngD+?`OKLpFSFdx! zT|8nbzbZ{27ZtA?Fp4yYId_ss!nsh#rCn&N?(P({d{f9}b#2KpDTY-+Ih@kBXB>y= z9VSFRUS6LtddXzX9chC3YHF;Ob89WyZ)r$lOFOgZ;JP8GL064S0^8WhK31!O)`U)1 z2(t$)Gkzvt%%f)4Vn*2=5sr~$t{v-?mHtNoJkMrtt z@0qL}x0Lt2%s#an$!)fas@nPGO>AbQcKV7J#>biC<`Bg1KK32Il>IIjTjdJzmXeNd zp+iJcW+3xq;zZK)dI#C_B)8k71z4l~Nt5&qx>NQSPj))Zwxg8yevgso#H)hn3+*AS zMpCS4*y%_Cx6P^XC1~+Tnf3$l%BN1`lx*p5@-D@>zq&mN((O>2MXEd*o7**LV;*T) zZqx^6gj3}c(9E${ttd2M|&@j+cV-9T5I#lh9^QQkpB(q+g+31w>#|IPW4%|{!d!#86 z($1?cZlhq{W~8J`Xj6h?5$&=JcgPFa3VX0dlwK*((+ueecH*8yiC<@gym!bY^5?C} zLcxr98d#O6FeIa5Lq<8#3v31y1-`~ySskA_R6*cD_|~DETF=Zxn+|9NXwA1vE7M{_ ztOLP>U9)VcW$VeW#oRq|IAyrPhu4}*O;UF2@eN0EoJQ5~`Bdk%CEIDi7JUs?Y}ZFY zFL4)Nws>}^o^er$AjL%QsY;+0BMIHHjlIm)LBU(S3Jt0Uw^4djIKy&@I%g5<=OcH; z{LYQS$HJ=g)trM5<`P0!zon(K>xj0~Ew*`9%C%D`uk$8P@r0>F50ef2SgNZ$BNg?{ z1c>mMyIQPHriz^PXumb&L#;1I8(4aF03rhYF$j4knF`P`Cc}S_3EIknS+Zk)(`ye} zQVI#ed}A{$1(&!c-O#sh9l4h9oGYo24;&TFyq(YBiVtaez8q2JPmw95LcrdC&+ zmTe0Z?t#+0ApJ6t{^W|HvT!@b;OPJ(VPm+W)k7^Y*{lk1IMT0?C%)y(^Yt!Q4!`br zNs}=i&W;wX$9f0*U9`SG!+Dy1P_{q)kESdCca~g#Q0tGb@%Pe5#^u9uMZr8bN{AiFIcKYz}c&#$O!qk zqXaEV<-=;*JcRjT5p^i8GR0I7Ug#7wp0`>spZmLETnzkZ7Wirln3~_LU}Pb_F_ziZ={DjdgwL3f2eYdm#_OZ z8|r)LRb2{QaV{DHHuow*l%5^^0uyebWM(kA=qc-)+JV3)K*h8AUHcbt?Hiqdp z+-*A+#8*Ujp`+nBMcgH|B#kmonN3Ks%I!vyrCb?|qA(sU6%8(-7IjW*eR_{vEb

      &Oq^I1zg0WLb}ObrbA0PI5}oAiK8W^y-^u)(C4d$U)o=uy0fzRRkB`eepSno^HgzLAP$$woS|UEdo0umD>< zjM9cKM2_?)c9`f#HN2^|&BRipED&Q%c+Z8WsF+FowFPqQN{L_d9kQc>6;qsy)*wex zp2t}WKr}FGN0ZhZs+P9TX!IJ62M^4j8%Vn*@^kn{^2|Qwoi7zJ00!W2haHXw$#V)= znZ1+Q`e03K-5rnZZ|HB)Z@`PD2~a+Wu9&G$+>DN+W2CAK7wF8TZaXFjQ!N7pnP&Im zXUQJe%0JVp$;%x$J~K_W_pQJ;Uy(+gPGl7PGG&89x1+eQ`H&(HPnq(n#GAXAM&{Md zT{1X5B{Qz%GphaIi~WhH)UPXE9fJ8#>A z5NNj1xiJGKOcQ#Qv%a7&|JVooa$9pM$VbC;pG}1pwCO2Fbfib<>{}1J0vU?8QAxsB zh6#d)?OfbST9-4)w8T%`nT@x|l@yMd=OuQ+>PJt5S#fF{cyl)2=2LR+;qyWAqL)nh zO5g8RzHiMiUKAcHsLWy6qR*R5;f&~2noP0}voX#9K*6QZ4xT8x8fz~TOymZz(RU+w ze3_z;n?zxIB5l&7U8-PPfF(?m(1+}U0_+g>J&ia6H1(gZ$!a{8PWagLa9XmsDAmSs zgP{_4&C8vSUIi$_am+(jnU?CXLDgY}$#2#fRMAE&!&i}1+GXL51j~6-y*o9FKwda# zQqC0s#v^LUnt>lCv6NoS#zR32p|?<@yX;DtWn7>7hO8ytJ35CGmyMJ3p2&4RE-@pc zN@wy5&y{9$dKUXM&YJY{7PmR)X%TvpC(fZP&Y9HDtsKCi?C5xG%$&O=_cm_e-eV$Vx_hNJxTNJbzg{TNJEAsIwdWJf5&VP=vmhaE|MZt-lf89H765%YeUJr%C4U(GmtoMRkvgKj4*#Rdt z&6c~~I4Nxm*v`bebm{3a`FvZ|booiM-zrF6lq8nyVvLVI!}I5sbOAWw zOO&X(d1>!`b(OMyG(?7;@dAt?`Mp$7sd~p)jE(}Y8*Z2|vD-ANoFrcK;w58s6=oK^ zt50`&{@3SFvb=+=nsv#Cd@M0K>oL`ES)s}?LDYx%X%Z8eemmp(#*seQZS7U1+Zpnw z0bKWWCpJ7O%1ZT=8aZDjAw&{ziFD=*)F5$>aGP6WzEA16xFfdt1Ge8F>@_?=XLarA z7^&+O(9*J?fwwWG>hcL`enw62ab+4Crj#!{LVH>Fx}}(I!1OQgNfUzT2%%8|0v8et zu^on!Z81bKMy)WzY~u8yQy8@Y6=WWWkMqy`fCW}CN6#Lw;^bBK+33v9u#Ebjr;~sv zIW%Bufo#2o2l(#Ihek-9a$dEwELROy>8*lKExLV!WI=}tfEL1iTMVpd5ni&e4Bsn` zvaWEG5lk=qls68x;Wq_r2su_n%^-t#Lzwj>5WQ_2mON!l05;1wUbjgJHsC(`__K<^ zE`qTHifih&a7D+Z)k@k~$h$cylUjHxuebnBaGqNm`e}&Ai#JlyB09Au`OHswi@p1z z2u$zw_gd1T$d~r5GNJGgg165fzGW35O|->MLLHoViO157ouiKebg~RH@3Kcy!WHkg zod)%`ncH8jY6(uX<_gOJ5AI*Dmw@qxzHL~LSOiG7>g_Wr#0F~IorK>RJV7h4TeIR+ z+=iug-xYA=G0Cth+!2Gzo8cWOkfZtZ`KcUxEdmBr-w`-kKaE}5J{7a2RN-v$H(Ya} z9hYRVMl+>uoqaZ?pZ0<{)QJP>lXJ%_P}EgYwe)M0K;2Y0DzrPI%p|>YG=QLLqXi=} z6yXRySayt*#mLwnoVB24P14!5+=ZpZY;N)4&?Q3smS7}8G`|&fq zq24^_t?GE5QtF~|p>9eSNmRWII1%+9ExhHll5YGj+MWY0=kEI_Gb6L1Wi+IWl!h5;XsBpN zDoJ~%rDSCk4I@&d2!$w-N{g%#B0C{6BCBCb`JelIyWgJM^L^rdfA9bKefse8_MCI> zS@)cC&pr3SQQfI}GE43ljj2#loqaAYu;)(KyXDhMj1|-;844b|raNeNjN^;Pp5w#^ zdRnL{+&w&a?Ry!Wd$uKMkt27!T06enbn8nW<-{wIdLPS6#O^4av^{xnX>psY!w^>0 zT*K@4+LFqzCiFV45i`VGWxbTy!~W`tHw@O6pPZM@y7y{@NZ&E%li%hfO>11WXv`81 zW&K#m{nwpO!Reo;=LyH*OYZddUpg>D?b|%}2ZL3ki`N|AJ+!$0s1q>;mrNRULuV?+ zTvr?GGdcZfeLo%Z)MFx5-#5$}`&?7tcv|LC>8$GLTvfAjPmO8e{THr3m^3h<%)Hma z(AID#X9r22As2RymK47tF~&TR}^mA?GcyD%({KVV|bae=bWO`k8l7ZT$il|9s|dy`j>pe4gp#Y--) z_WwL5@#lq;t0r&TwPTsVq%BGdXN;}VKg2RVHo$g*tXiMt5ulRrz3QE$z~>S7`?U2tKJq|N zS)utwgX~_G`5TRPxjeivZd~Pgwb-6&eA6DzF1N3&l~ z!Np~pz>Q0LHvjzQAUVcuO4_nyFZaXs5i>F#jd;KP;lrsd{%b#+9A>oTtje7`pJzY0 zQue6+d;Rg1YSZ@J{ng)UO4{>7Cm$F*I(Q=Ue(mXeEoYIpPa36G8n)gJ4&B`L_0!}# z0UIy0Or2e$r+vQ9ssl?jbi;1BO;c<6v5Vg+NWNvr$%6i`Zx_!OTjyZVvkyd=VhsT9z%;}goM}J6XzCMhvJI!-hql)Q^P91TGYfAU;XN&apj}j?ty%v zrqe|oZ>^n~?kwMY-YCO-i^3{}KucAeOAmiF{(YL$xKWNl8&<_%|1mP_=jRl^ zg4HIc<{Bn?)-09ot7m8ut1&xv^}b%Y^FqfRP*^1x(`V)UHFit;%vN-k6U*){ojvEF zgUrRPYjdjXJ*Opz-Ae6|7<4&~)$__am1bdazEhj#I-6we+jeQDPP}KH-4m@33Pzvv z21ixte{)*5?ctsGR?k}`vUb-Ge4A2_a!zdinutLC1Hp!&s`;nME$Lk>=UTKj3vfnh6(kGmXPe7@2! z^q`zwSd6^-tC(XS(s#@qdaiV}$+I>4gU6S>&zvUF=R@W0^7jJ8l4tvPX7*ZeUs2y` zj`qV0-2>Hc?W|OWFOYURyLL#_d>c!LBq5VqvOW1k6?673iT+Z&^SPwE-LMc@%YwGq zQ8In%+Eo1GRCfjrS^cakN#XSM8v8YY*?A8OOCs+aeeJlWyPE0dw4w{o9&CY2X|lrv zA`YBBl(=%&!%X5Zi;MThS zmCB*JkBG>Y1|Kv}n!Gn}zMWc@)3%bZpmi&kh*S@%J1BlvSG2EKw3BcBx`MDI_gej` zL3YDt^bb(mlrKH}Zu1zo`oly-LH$~#VQC7t0wYH}x@NXC{%+xt z@4};cYHnQ~=DaJcTiPdw%?3MP>B(o6WCfgcuCKnHmN{clTBeTK)`&iiZ#Pelzdh(> z*#lPQxabcVGp4G}OFSMnz$YVL*gYae__pv7)pWO%jC%LVX9M5&4ZY@I^-yPeb*4a& zY>Q99tKJ#>S)m%s);$#6<$Ty-=cyAt(>C9^wca4A`!j=t+VJ{$>pog8+^iD*b84&R zo@35YrIzDTo{b25tDQf!>Fel8F5>f)uWzuDx#nr$Up1=yWI^oYcRTe4nB9A`Shm3C z%a8SK3Due7M^>D2ST|nTe3opzy2z7tr+h~azt_eWv}f{|;dj%+cdAV-3*RuuieEH+ z|DL-ubp{_>dpu`fn%r>2m#UmT99Q|lA!u`p>qc!mtn#?`q_$}Y| zD(Gth*C|&2vQYRUe_=+DR{FL5tpyST!j)BK54CvOcgcyD=F_aa*Zev&*2r)hyz?!( zC-Ac6{6W5-h4N+^t}6Ok{_RJ1CBL4Qb+-p?9DiKs{n* z35V`aDvrs|IidMuhgVk8@s)kE;=|W?Sl7wuEr<%yx)xU-`bz9;>igew#SecwAzkYl z@mV~4sgI$JsNkCeVsCl{9XabXOfFJ**|-^NSIyWZCj6pq6n}2H*?Z4R!>=w3*p*~3 zd5_eU%I>SvPe{Hw7jUceXxS<8)!Nevzs#r@(eg%lox-x5w-?K0kBk+)x*{NFUc(xP z0NK_r88L%y2@H}H?(UdZidk2aPdT_Iwf55wy3*M{@clX{QCXg~&^4DO=fhzsR?)kBl^!`}s_J!PpW z3yWnnPScXPRI)P6RVzXusrR_b3j&SfW1o$F+poUgqoeO*QuJfkdYR9wN)}H~cQM)UYi2>*`;%>VZ3SwEU*2jqPbWNKW}xb8z9p|j z%^sGkZoJqisV))LaA0V}sJP3!_6L?m_KsA));#BT+fK?fJaKVfd zx1C&3cFARD;-mpiM-BTP-Jo2=IwvzgaX`ahr=5CgGFF<2SJzgYG#IlxXt(MkOWE$b z`YWFQy*=n$&)i9^LjrE-@6{}5P->6We+qZ2(VfT`U zm*)R$s@h(l?36VB{h?yzqRdaN(z)(aH8RRp_`Fs8z2WD(*Bk2tOMj&v@ml2CBK&LP zu%e{P`p@25NZwHlm0G9tVVBhs?_AMSt_v0oaL;PE@Ko>Ox$f^~D2Vm7J|_1nV7XNE z3OkXPc2@kc_fkAkM*b)msl4)t<>xi7BMN=3w_O=feP6OwE3iIQT-s6cR%PV+G@;j% zt6xP<)ARixE16xoc%*;C{qDgUSwk}#bB6ZFbzCk#W5(c|Ih&7GPZ;>9{7C=9&CC1W z^mRN|<|hKvX|4oRnm*Bq7kRmS!c9A+g zS3HCgJ>y!>CiR(HH(X=OT;<`f_+uqr=_KT*?tec1BlX_2xXGM^m2cN=nOS7zwE@~7j<^?JSU z@yO*+uUN6PYCVzV{KnyT28y0qCcOUm>RCyOvKG7e5?y|)Ol=ZOR;@l_>%L?CxPmcd zJI2Vd5MLnBXE^!d4Jq0#d#_q@-|uhNYR>27CSppxRB z{_*SbV}i@>FDXhE-u!FBX)kTPU2hD!J!y6r6{+fU)}WtJNWWAetq%deAIZ$$6nXA$ zYP8wI%NMoBeOhpK*W`?Y8`hoK_Th@| ziPe5Bhlgq}>~qs!d)$>B7x8!D2TiG{U zmD;#%tjO+TMG*lFJxlLBDM@pg)gx%ms)!P8w-cwcS=U~9>PM`8k(89P^5B@DsIy5c zZd-o}GurJt&1b{zZ?l5TZdX>77j2C=l6Ikc!=kwc(mM6)B=l3APCx9Xx>3QgYPnC! zx{bkw`fyZ#J6L#73;)uYIx$b4Y@S*;GG^?-c>TR|uidLss||aiEMw9jEwrG{rO{-& z>OfX^iHDxzgBN}q)IWKn(#@IarPr^{d%ZnHBk7d=L}?4*70y)?6XKGs+yeRBzE~|u zZkwLD{pa zC@s19wIe(dldq>NTeP}b=(dw~n~ZHh24Aa6c0%c>WGTaYNxwu3_#~Q=U)`K9K4{C3 z3Tw;xFEyOADvjIxy)K)*6FukqN#wiYNv*i~5enJa^D+y^>9vk+^AJ0-_*DM)qw_1D zOa9K7oTw5kYa<!d(FgL z`2!m+?hH+MeD2Hm+3H`v9=-Rj)wZ}oQ=%;O*6m^Hw}aOT^^KaGy3AbhK+36^XRWu| zCWh}>e6(_WO#J9)r;|g5f4S_Y5H~tMRcg<$itfKht~{)=G1LDZsTvo&W(81Es+h1kvf45l-ck21_TXd}Dji>LoxwmH8W)-J}nyqe4yRVy| zawGNloGOd9J}=~QgvaaV?=k!&u~PSrl!%Ic(cM=ItJT(AJGJn|>5!q~MGl4%Udb1f z4&PP#xVOmH?A_NXXOmT%lhen?X{=d)IKwS&hvJ@B5AVHv*(?!b7xEyy&*iam47V8g z3T^qk^McLME7@)zUT)L~Ja}yM)APrSZC|=uk7{}=HD|l>ns-C;E*3hZSt>l)+U!|* zHExCB-b$9h?c;kM))Y;MX)xb@xdd>|3F%GnXs|CrLM@ zM%m~jTU-vDuytR2ut9Z6@k0Rt-TYeIG5$+#en7}hKi>`3aw7YtH}7yi zV9ZA6qk5v}oF5(dz8i9W$rp9U<(K75$*PetvGv?7c8EW^}f4QBPqE?fA>y^Cg^AGo0T8F1jv`epAn03t2+P3J{lj5Tm&l82G zRJ;F-6#14Dt&k`kHF#6(7t$Fn9PyjY}(5&3gD|>rb}^ zn~B3BRn`o4Tc+KdEwoxP@0Bvpo+59mJ5>}jBZzg3@~ z)ru2u?2D^AIbChng`uU5;_HWg5O)~zLHtGk@aV?f+1-K$e>%0@Q&3}Tsk&K{NQuv! zSECjL%V~^U^Gn?>)XH&?(X1f>F+F6=j_XBkcTC@QNJX+wK~S%9GLZqDOx+Ts+Y6MW4(d$VQy zI?JuWWt*FfuLw10|Ke}l^(e+xM%F*=*v1nIqLCsc>|liX5Z0iUUhXsO_<=18?%eDF9*M_Temzp{F|G-h24_8 z^no(RerC&Cv`*P@cAkx@$m>O_@tc~?ZW~*Z`aY^x&D6bcQC)QYl)T`#qk{5-Ju~iJ zlNmfGYix9z#yZ=h^~NT4rE=xZ{Dl1S_pH*Hd}QUl%&>E%XT67gpBl94cFEJq{pZ|1 zzX_{+dTQXRJ1kel!rf1n>Jy(cY#mzwGn%(55eDU#)K5TR5{K zdGK|mnL{g-OtN2Z?0!=*T&?WakQ6(|a~E!}PyHSf$Je&Cb;0`2r{b4Is#t%tm}I~1 znd;YRe1{)oOm0=u|K3aN>h*e)1$(Qj_C2%?ciKA1{fdt2=bDS}8|tf1jCGqR(h{Mj znsahZWQxWWv8&f=^k=8$iZ2N&H1GC$mC}HlLwd_xnUpBBSNIE`^tMZ~GV)nc+qXN) z$SmE;cYB4WK|hC<`zxhyT~wXxk&`pFU+DHpLLf)%^Xyv^ z0#P5d6h$Y`xoEmUP~SR!?4b!>#l2ST9rRPXrE)P}K=;||7yL$jHYluIF>>0lpE4V~ zbJu!{ihXw9;4Kv1J!?XlFRS`e@Q3{8fhE0C-U^4M>IYW!x9*Ydqw~h=`TUP}CA2r* z8DtV>JUH21L{WA-pT?zkVT+ypp1!t_T$i*|JGCE% zZuFQSG*Ha$YGCD=xsS6?T*+~<>tm-@Yc!>YUHPbq5gvmY7JYtN5%??cSb5u2nf%3i z%@qMM>LoGVViPm;yftI3H`<)_l3W;E8+1miQv9Rvm+;VMi|6M&-e;I2vDC|V;TzVB zF@9@@uZ!pzIm6v4YJ^m|L!&fbxyf<8)sd^8@&y}=tjv}QOCEZ)&42zak!K=O*{uo@ zJyd>)H7#EEbW`Kv+;=hWCZ%ur*7td_!VQIs4Ic*dzi?dn7hj59-rCg;dX+n~!qvy` z|2;k6=+hx}7iuL7KTMYu8L&pDXnF07!8J4e&bP=tFIFtgmi&0=d8>GnhTyxFrxJ~g za*DlER@z+csW`An;(2=AjZqe=WzY5$p0R%OpmDoZi|R`A3nyw1HkBu=_%ZC|^I8X; zUh=2i25E$u%oTGS@?2`eL152AyQY5PyHOLCrYc*db_ zv1oE@%T9TKtDafeWOx!$SWK7+S;=1EQIsc-!1a38&b<9Y?#wO?s$pVAssk-xrS z?w(0&gI7&=ovgTCx7aplPVv}~OT)e#_tMaYcqqj(s=apX;y?%L=FFeviHPy~`&iBB?u??@)`;SiC5goBs z?S}a5II+Etc6Ow~sMx`o>7Vqd4UMO&= zzL$&3EBn<(_J56 zVJxq4ea%R#S@lNBfs%6D{e4BJ^zOFGeE&6v*|t@}ZPJ>xWtTU)G|Op6{=Rhfd0eT& z3-jp56`uQsWG7wOCoI^uTgQIXsjP{O?|yvJtzBO_@x+1~Gp;C}7^iXN#PQ=(ube2o z(I~a$=(+PZuYC^@n7O9prug?ClUBDy?mJie+V1-gpI4(czbKm@qV4B>)=q5MHoMR~ z6@vm3G3%B4RGK!KZ?HAo1AohS>NYI(p37j5OIz~8g$`erb-(SNo_lz}X}O5)MW#8c z0*AB-hzZB1y~xv+&b>cBRn$~;waCP6290K7an1(Wr#~v{`yc9iRQcxR6L%_Sm~L-t zWe=HR6+hi3Ih@53-r7|ci4uL-ObwaR_wx1e3s~eqO?CJ0==bolck{;M4dN_3EX834 z&*;tL#B*oMd!^`__j3^1bJ#Ck)*EWO;H(^75KTZ9g8IIM+QPtZZ9m$g(Aos@H`3zrMDmq312{ z!fMZOslGnpqw}U*d{*RUohdYaqD;2gwEjn(PH2ALKCg#x-g50hi;@d%7uzp^8C702 zo}XsF3<+BJy|>foi?%ntO1(>KcO~w=HPLkmYtH~lH`C^jS%-FuhBsY3`+82wtM9pE z%v(1Js@-$muKDe5=$K_9E1)fL@Eh5WEnkQF$%RVH@4M)v^y4Frd)AeB9}9&GO7jtVs77jGB(_jhRBsj~NQ*88SywWhgS6(fTml;{-2miCM`tNQvw zdU00%BZa%k8@BtZ6d4Fg@sH^#l;ak%v7*H9;37f(kAea==RKyv!|)eH;{>aP1#Vq; z4NlUuK9T8ZxnR=mmZ60KjYf_EMPd5kW8WNEdSl-i-<-215h{;I zdKjKDpIKMDwk37svz_VBmlbQd&MbcI-h4<=?Cr!p!i~EG5A8|#Wg;?mfwgOJqW%8* zo&&ZH^Lw~BWlyl)&l#g8Hcs#ssFf@#^UX9ltFm&IepBlCAo;_s2hKR`-}FPJt}%tL zyNl!I@ya$UHQ}*1rT+3)r(O7ZOnX}LjPy8ni62d$mTtd$ZK3`PwFO5lE;~ed$6wu* z+A956-%K>Fe9#9g^Hb^yzYkg+JsSIR_{F2sBJEE_ro{K3`jv`6Xk|@S%Jra_K3~OM1B6*%|9)P;PJ4QdUN7G2H<`|e z?&huQaq`&Jgz$0djY6hwckOR4IWD>7ORdF&(=(1L0&_4FFiSbyEzRl|U-n|IDT1o}N!_m>9Ztb9Wo@ zilB)`9&gnj7wx6kA_9EQS@;oT!7mXX zBOP5;f`g%-#od1;Hzm}af2jeKNk$TA+swWk^~{>T}qM@G!W{g!LC4&>O2TK%qUQv zJ}zDIf)vJN`g?!F1M<6$^mp-K`>yCE^?4ipU3$q~eOZX$TQT>S|IDw!1c*i&gg@LY zXLp_d6H6WePK;tVd>!tlcgb;h9f0G5-b$jrN(EnCEt~vi0R2ts&NBKYcC)Bac)_g+ zX1dU2$Oo*J<-o{dCDY%e%>~_0adTi_a@K<92m+{{UQUqc4uE^U($^CBvN&j)3IS3b zXrq=vgG6onKAs)j`}8#cb{N_IU>PR{)CBo)&|bYJa4krgCJ-2`Kw=aIt}EIy9L>sp za9x`LHhN{PNdD;Y_W)Z9U@H@_(}rT$?ea53Hx2^Eg3Wz=JUh4p6FU7uGy?+s z+?ECgIKf?~j{ih8khkC1;FLI8SK6$K@@Ig~57bnQKsgki%Ah2UoY-6+7c(apFSvit zO*)e9$!Evby#(p10{JMT>G(_oVN-Crlx_gr)e7{LVIUJJCOob=$RF6x9v{~M8~(N$ zEP(<~t3KXN(N1XMXYb(YM7)sI!5IhG_}v0b{7ij3J(t=$c;M`clS#<%@*Zu85(Ml% zApDgCb|27%PT0G;S7YxDW9q;}*kAd(t2j>fex7bl{y^nin0yC+Iy?KrW$pG+!<{M3 ztsCFs1(e1CrKARphoP^XDCOxQ0{lYY>YTq5j9T)t4}h>r)6d19$*z$G)!K;q9RYM| z;FZu-y^fCn3>2YIQa)o5!&{1sbDcUQMCmSrM?shy7Cj1@4f?hN0?=6)voYp& z-R*z~*8)B4uK7LM|*P1A746H@i6#2|0Jb<+)xmf!YINgc0f3Jd`KK?)hw^1RK z_ZN(PgEqGbV4&z>%8pO59!5#|g9Yq~EJ&c}s#eGNHqz_!SlOqEH3?^6W%+WZPz>(bdFxs{52K*%*UZh!mucs6 zVDpNH?-cQKyCDF|paaTaLo6b~3}c`j^uM{L<7)5i2voVcc{*yr-(8$!+OOVl+8~nJ z&Yq>03Sq4`ft}nJ1X55P&HbCzP@Sc}^=c&?MT3mPEm z;N*+O$}kfxJ#{VtVMb!q3M6f+Zin4mF|-lL0n6GB^_31DQi1z$PPu4KTQp@|(2KnIk8Em^8)Y zStug7*he%v^bF3oYWaOeX%?*&Y7Cvf)K zf^pLP{Wp#ezKjHJ?6jP^$Efd(9~5W16sFA!x%{S2^=}6QU9BK@8G^3E+c8}b5#n?Y zmI|t?rh{7vB2Vy>+|DG}r69YtxiZ~PrEQuFZa`THsjp20HL!Oj62 zFURN3K8%K5Cid+AJdWlO&wLvyd|}357HiWO7E6u;H#mla7(1=1hA0Vyv_r*R)7KXq z057eO>b+0ZXcaB`qg_UOdeO^ncKQO8MKjey;Qi-_Pa2_vI$b!uuoD1rci!M5>!EFgjYySb6jz z(6@(2ggekc<^%&R0{#3T?bC7%^kzKwzm*Epb^n^B~o0qfieTGyu446a4XYN=383H8+ z_DcYUV=)7&HUI{BaD8NaR&pIfV~Br1-XJlH!%E8 za5d2mHm_Z!K;q7BE@rOweopN#U3PYdPR>8i6o5eq!mbJu{VjaZi9G`L3c&VM3iTQ{o zV0Jc5_*Jx9>I8w!`2SUMi8jj(>B%YUKh7eQ-BDut^vJAN{ z@?Z+4-^4zGe_71s!Ia-&;<(DR2p>nKw!PND;K7hlXbrLG{R^N^0H^Lx$YI?FjNvaU z5C1GdQSI1$C<>==?-EE+2FvSM?*m1#11+{E2prLj3H;x+cyecIlz5Pnp!``KB~b5x z95Q?~`tnc6`BV6)Jdv9nNM3{2M_<_wkyY^c&Iw$^cZ|Q26VrBbwui|EY)94QZd3X( ze#lhtOCFHeKs|`#v%W81djiKh0cOW6osEu5(-{Irvs$IaY&HGlI9Qui#1*vLvP-H}Jbhz%yzQ+}fmq(fkd!{yL7I{e88EQs#AtXD;^{!Jfe$KE&iyc@p!`iTKm*7KS+%X7RrxVPJ(Z{gMPqIBpwf7$-ukb6ho#J z;1B6DH|@wE^|bn(v;>Kx<7G1YdE|sKP+T%~^7XV2K@Oums_#%=_7N`+m)blsYyEc+ zLmr4hhG5PPSB!vO0?3nnpg_r$!G|EG98`zZO6bU3;B=F5*Ri~#6tc}ImWYqg}LxUI+(o) zm>zr%qvhk}>j&8~oKmLsu&x&B_zGMG{ z9@=fRw^Fd16Bx=K3^t3{a%1ud@vUHL)4>VOCCG3;`LD>}9{S5;UlBePf2k)Z!BW5u zXQmt<|6GijDNg6iWbDy%gB?mi<*QH#M$e;h{X2uTpj6&&e1+HsP>Q*r+ao!sfWPKr zIwv`Re2E5ZkJ$aoT|;Uj704M)UV1IqDnm1uTrXpscQev zOhqR|*ar=tpK3rDKM+QGyqJraFs_h{X)i&*GAgc*I|s%D4b3cDVwHnRNnroJA*8a`m19(TL0F=Pd&o6V!uONF^SM&r)v@VzI75xH^VF z=vqZ0#K6gjm3a@G*J3w56qex(k{(Y$&4)^0Ctgt@D!X+;H_+3wiv|a_7-km9ptijn z&|%VPY=vG>3iSW4(_lzd*pW^1+yOUW&BH--I>ef7zhJ^>&88d4+V^Bl(sN6d(bAtc~y(kUeT&1#I?PY09FrzUe(bA zietYpigtFhCy#b?TcP6yX6=Zbwk@s~aLEQdqAUSZynDBH5#pH3%p0sSgd_6e200Ae z%C*yLK92=X2mvRS5g-HjF-V&ET|_L*CpY*Y)6dD-6B=S^;jyRn`@6dNYCAc&p|Fri zP-x$D9{g#0Z;13A;D=0~G~k?-Qd(+5(bd$StE0^j?<40a<9aEj96($HH6zoKXaNj{ zUM12X-YwNUoZNxIJHDcRCr1!DnNj2o#2C1CK2)oRP=>9_onF1-mKIuvob>*^JpoGv zD1inct>~c`3#GS4Zd68rl4Fj)GdVaOtqQFCY-iZqSTBKRWQns?7~^7G5ssRTW;@w4 zws9j1l-U|o?gWrgv^e{RMgaH|~OzLW-Vo)CDDvBE=f z42~8st;QVzw52`F{RLJn^62g!INx(s7iqVdTta3O0kPq_Q%aP z1b6eT&)sf<>07~01_dvU&$ICut|5vGdC(R5YX9RhP?C4BV6tx0po+hbkHF!^D?Hup z2FIne0}b#Y_In<-HTom}h-^TT1xQG5Ej}3|`CD(zAT*>uAsI)776J;?O)-*#3vhTa z6{Db(9r;y%S08q;=FfvGmNAEnA;)CFUec*X@nBjfXei|477Zfl)%4P%YG557z}B@0 zl1gS^Ahb%;o|V_O$Sz{=-V~9qeHJzCDIctVEg)4U5V~k$gmj#S69V$cgz6Av5F^sH zjju8%qGp3q=pG@HgDYAXKE>nqQkaZ`r@cQ9ga4L(JoX6?>IKS6dZ(NE7#O7!b8sg( zl~K{U$E#9^Y}j6_acwrRLITLuCRowe7(=IdPrDPeaiG3z_!CvIh!z{_pBC|Mt@$Mz z^A^_g1J*-UR~6@Dj1+U2k{vDTS|rmhid0D>WBbSBupDdfjmyiJl_Tr zNHNsd#N5tQM^oF1hc%~Pd6`%b+`9|PKyok57DJ*3$YZ;}i88lsBU4!MuH4)jxMu^> zb|;8?z667$P3_<0lknv(n>iV>C`6a6!j>XbDmem2DtZK6_Z%@@wBEB_4f`HjheP+W z_n_~hQL%=;V@J0lgY!k!_DT8a*-Q`<>O&`!o(wmPmC_}SNQM(<$G`;6C>*IN|NaJv z6#$kGWJiW&xp2vY+KhS1j@D4vrr2$(O!kG?w)TFyL>*wM2Q1{7*Di02g%&5)5Ylr} zhs_;}Vqg`r7r(uvAHP=nuc zFeGgP@SXJ-Fr_+wI!EK~nc-dvV$NMPjXon`DGA{C$VjX-0%OF?;mW~yRKJm?8Y``y zdlZbIFG!bUkw!FzOPe0vY1k%+%0}#1o0(wy*>4x27AG67cy!hl3>Xt^?8A{TvWaEz ziHHl^HW^E(1JLmRl=O+mw|55mmmq~Agry%eMyjOi;}_tSz-U++L)I%47F0k*BxUh)|b zs&lPtQwl~%uj?-wEg0B`)}<+M=NhE(taq?3GM#+^hnb-Z5bZF4VnESL$-y|(Yd}i)T_A8449-2Hl{kp z|1M||%MBEmw6Txrm|~{03shC$!Gy`(L__hw5G8^Mk1{Ywis3rJe=(kj*lj_cp?5?d zq3R{-M<*kEi&G?cq@*ZRX2;~*_KP;vN#+8lo#2G`3m7l-rEq*Cb1*P00>ydl%@NF? z-p#X0*a38{1x}JtK*ey={MIbmbo7!*>@}3|t@}#pbI3LQR{Ud>a6KNNxV|(__pnYgZmfyW1 zUKGUV0?YCsn4AFBC<^m`#bo9)K?XO5czrFt`py_&i6yXvl)l01|C+E)(q}kuK%tiW z+l_VmfK_4;X^_?69&a&4RBC7u;O5Cuu7VPwg9pm&+~7t9`?5TPejyC!DwpYffi(+* zFf0gR^!$i1{6W7s8aw^DE0G-Mg*^13<@ATA;3HT|!02=c5>_=~5@=2QLl0X=m_ikJ z7*cMe{97afa^rxnuE5tB1j-8#OmtFyq}7~Z3cHgF_JnrhrDqaw+Zj@-aW(tu;CzDh z3eahjsGyOu6Dpn_9Ubqfbu5l&42Cc&nkG3P1Q%HWSjnw(dJD#gE1fu;W|}*~5KtqH zF*IDMh2{zU0F99&7NpUN!DFx-0ktkPC;0e2yd%HdTo4I((G-@X+S~k1pu)ZHF~|t6Y03peAqmx8zw8XA+_ z2usNT#+wiT1;=p##En`^wXlvQ_;HuQc&z5+%gbvUK%wDWgC$NN4+9;iM1t==^SsT0 z1fm_X``IV}k^}-F>&f{_7!WNG#1>9pOe-zJ-hzN1aTyd02N^6h(2E4Fu7ZJ6dWjj% zRD0(UFB~(!dT%TM9}CWdbnQ3cIHZ%&{kcmQvR-o^idDR>u$PYh>~e1yi`51vhf)O7 zJ*Qxdl$*qNXU(19EQ{yK%`Uka`3rq*?zWySMXV)khR03Je>=Dfa)PaZ6;O|Ef zJdC&E!ZoiwTSFyKZX-ljYxA|P^dACt01Y6WG)&SDgTt<=(-QuC(`iJb)|R)RU4x;G zf^-?t{unskYv(M|pU1J^a#M>IpL1OP@OE$)s)IW#P+4$ifFMHlu8OL5yv`4g`I|j1 z26*54!sTLunMYP*cwDQ;Ox1={WNtE1jFI}BH5LwMScH7eK8~#LaWe?5sC$Kk_-0s(xu1g=l04Yac$R1q>`G8{} zG$40ux40$T*k?JfpyJVsMcIXbgnPWj;8ctRO95nxj&X>SL|)U8&;pWsxotq>C9Z&G z#}OhK1DBaP`A+7%miGOWC&I?{KNq>UZhSI0611Odhc_7M!+u^lyz{M|g5!7zm_4&$ zh@zvDV@H_XuC>h4t^B^a49%{a4-6vHKHCBenO2uBa|R_I*E56G1-^phiuDpKf^_@` zi!cVvP`YH*p*)>9jtyD{^$5`=L}V_I9I*oqrc)8b!)Av8$BJR4w}2Khu#+gkbWnP1 z76Laryv*IaoETCF0X2FRTUEF{VOdL4tLwSwA)HT^lpIPlsj zP$Z^&W|Tjwpg#jvXc6Lh3VwhxcuG9n=U-j7z4C}D4Eyw%jjnzC03OK_2yJd-gtQTH zM8!N>InWxsh2P(_0s41Am_lYb4RNn6qB{S^ePMzyj$;-5$-hAwVUn^XL#%-hIJEC2}i_X$+MFckqZR^zaa? zUu|@cRe&-J7(=48s^dcW4-01uD3L?kmYNXg4*psb(2{|femzEsRn*J}E<6U9QB!l7 zH@fFbO#;tC=-6Gbb2D2vIBBCq%LI-a8rlK6#q8R)7TBK!=1JPmfUg)DuObp`VZMwL zMbWCw?$lfkf)(cgqsjgF^$!e~wjVNl>kuVXkYq4@gyHanYsF81uh2!p8cAr+IhcAu zF@pBI^tQuvp$zHb(JjkHX#*r>E_&a#VQ7?};y9sf-x@g1{JQWc(`s|0_<+^9uv5$- z=-tNGqq9x^wd$c-G0e;6<;L=gy((>hNpO3zQ^}zhoMKQ+qU1|AN^vU+;aLm!5A1VI-(nZ4q!od z*k|_)%7Mf=woK?k4v_l@s$XQVe_Ie^z&kyyhP)ji_c9Pq-nOlr1{b_oaAMiXvfsnR zSBklY?EYGsY+jcN5Y1-!)kW_C6!+Ym&!aH7zn~R@dD6jJ!wPKxeGfK*am3LxXPI zP$Y4Oy^4e=Yl0!R2Q};oXH=%@&mMiiy?g|5&$;EzKS7e;L6TAgLyaJx6Y1H9JKcF z-ugNelGh|_tAMgOV_-3!uo$vb7`qfB;LSr_&I5`U1(M(_IP8ZWMR$p3G~eKNB|q z^r2NpeIsCZ$brF7Gdc7F3jXUcXkIRo-u}QD!R0pS#A^W7HJ5m&wWjc9VBEY|}ACwBUI8a`Iv* zk3A@-Z1UR$fC8sroC4xeWAUv|{eVB>5J!`i^Q#2optJ!FN|@ier3T}6Jc7$$qX^UP3um6@4`s0) zTXmjKxa=~<)Hy$7k}Ptqkx{z0&>7t(aA&TB4hKENuyC3}3!)1bOKY?P_?rOw(HzR* zGECxS_KSKKMz4$ls#b9M#n#7|DvFm~H|BGd2{;Om)IP@b8$6^ts9|fL0NDQ3g*v-@ zv1gbfis#7E49rZ78FpmE@O!n}wgtk{v_VzK5aq#Z42W79SJ@QZWZ>1nhJ5$QO}YJh z1K3cIB?IU0 z5MP=Ht~d>Ij99M#3yI-D6UIO*AQ0Te(FTt)S>NSx_k94mJSY|EYkq&i7-;tVMgI>2 z8dSZ>`+j+XD6r`jM7FwwMcw<#g@(Brx5$vQC4~K`4Q>nJwim=a0h@acsD*nv_#Uz| zb7>34Ni*+ntQQ4+X6;W8n)^VjReL>!VShkt{`^zrun4erCD1_D!oq%I{9H$KWR$?e z*X5imJc*_yJprKw5Zr8rORWFh9?RtixEeH=o;^UOv9c_8FY1z60Wv2O1H+z}!aqbA zcti-UsPeuSuk~9d#sZKv0HeuP22JS7rPzV&kT5}Rok4Wx6kDW2^BC(Ga8`!kY%Qcu z6eyq@--4&E{PMg6k06d-u}R__m{>}bED4+py*N;Cox^lq7uP}=0!~EBx3aV+dEl8hLX}5~phXGx1zPJNFjl0j zk(~{qo)c~jK=8(&&l&Rq^!+JV|7Zf|an|hiwMB*aA)B&y1Az=EbW0&L)Y5%lGmud3^tsiFl*#vDQ@j!<=vGnOP zFdg*L|7Ig=KSSd=p|i6dF{lli3ud__-mMP$d0#+XSeBsc-ApdJ+Fb{>OQt{&X<}yN z`Q9U0tovI#mmFoRFvyN5o?H8n|Cw^vGCB)9F6wzAE7m?C7)Ga%hUleY@9L-xjNMel zMyYStV}vxrnR9Mn6R$h; zh|{M>7K-u#rz-&qxg@g)jNxxMO~>812bZZ5i*%?eVdws-fKUlUOD^d$T$G|ji*tLT z+lCthga1bujBNYmq5HF7dGyRa70Bq+9_Kx5Jt|k zRh!#<5X_pv3CgGyaTzR~yieR3k6|*z7#8G-6-;n$ALz`arf7Fv@p;gzCx8cHf=;b* z&3iB&TBhv>n5K+xqcU7IL|Vlo_5rTY zTpj4=k1ho;s6E2sF12P48VZ6QYPKP;EI)#=&?{h01UW(whFh;xxtjBBULNZ4Wb^2& zTS3Q1ftw`NWAbq>O0Z&deG0%3wIZnv^*t*O9WSi)ux8Rql}=(z*rK?nt1w$>Ye<~G zCGfEVV3K)zdJcw3^HGak%3#t5+Fdb-3z7c4U~0uUPzq>;>-6A{Yd#km%%qO+fr^4K z9Wouifd5HrDdV&=q$%;=RUB3U+G>cteF#k%Sj2^zSySlMb)tjEZ^J=UCmu20xu~6mFV#6UkT1c+(awtw$vO^E+Gj~}%NFf+N_s4ReOUQQSUK5tYkHG6a1b&( zisaLzP<{P0s0K7hcPf8t-oaGjEwa7EzPU9d7hX3Gg>%7Vv2JM9I`>e02^0fk0q z>(no`zYNM%KEdaM4Bj1awDQ}{OmtRW4}{Joi0u0m6UoB^gX+?b-XCB60PXECMJm%} z@K6Zzjbo1sJ^|*P;7v%(p3gC6Y_f`fQ3Z5>Hy7A*$~*8gOprx_xx+WuyA&`dfPa)F z*8K1l#>`Q0!A~{udK(y-PlH^`3>N^I32aOwKsLX@Ah~VxpPCQlQZO7D+Q$DD4*L+T zJ*+_z<6TQc0{WoxWH&n2vV}Dn5o;R%9 zoo8?hd^LgEsR5&=b%WV-{<$Ci(@-Raj1;lyOXR5&)nX72L*PmV)gwP+S}6-+hpXJ~ z7pdgoq3;^{0H5ojA|gjDc;>)foqYvoIs>9-yfKcr__cn|+A0v>MJ_#0g6D=P2gIwE zz!1P7J)ibX^%vaOV@({`c_7afc!PifiS@Iq_kOs!fLIy)%wYW*Ab2+rOx|3%0dEy` zg5G6#&Y)L_&5CoU#V-W%r@$^p7O^vjV+2&1`oA3%b9rIzA~uh!dZ{sjD~*A=B7x3z zkN4Xz+;nf zv<}SMqr~$EtL%D1?kj=o?rV~C}dO~jN?Hb(}}?Txt%dm*wHR)R^M zIjnCiKqi9#Nfi<*gaDim{d5roRnh?$OY(DsdR z5OamO^$@YOdH0|QaiAM66?RH`CO}QSlU0+2=MKH>IHOXA2!la@$h;49FAZ7_)SE)X zlOCZK+1eNrFNepb;}6AC?)4$8rVm_XdVzRd!7M}xtoscxR?4Xucm~}FZ8tAPzY-bh?Tj5@^o^be-ZL>iH9LXK^AD-(k#{!?BXCq7qSFi zV=s)CK|C^DRP62!s6GML>{!15EUB$&ei$Upt@a3--me(F56~$SyF3hCahwA@8b{FC z6oi4|^bVA@b7L{O7sMmkP`SG(2+iz}29l)+$X7xzWQqe8WKEGl;gPZ|cz)^#$`v!Y zC>2>s5UO{6#Ek(mim>)Q3BNiLpuPbeaU&=lv<5?_J@7xy0PvY7?*cy$1||v2 z^5_c-Mx%9%39`zd#MDW3IRn%V7tYn=#uX*;aO-DU-=+!zUR28>;|`;6F5Jx70Efr? ztNI-S7m#oyM^9SN ztEBxlkLcZDfyakvKoJA_MH=m|Ef@v0Q(>BklV4_cZR2|E3~FDHf~a*V2i4Fj0t0?1 z&G>(bsTs5d$;dV)EoCMwas(`rOd_7fk@RpMMbQjocX#m!K2^f}&!MU6kVAF48?^?8 zZ+9{UWxSYdy~NY1Yz}73#lh`uW+1Frh+j*4R?JwHxTxe9$ej1To|QMn&5M z>kw+aH5H@g6-!cQ6ZDLDu8J7LTm_`L zXD8)VYz4-m2|Q%@`r|N0`8V>VSxug~GuaC))Z*R2Q|J<#H1Jr8SZG^1#>%u%UP1V| z1+P!mz~&VKL#M=zE794XS#%B%mcdfUyP&Nl7$Fth7zL}i&xPj;I0xW(*@rB3LjTA# zoEw2}+Lh6QM$c$K^hj>M;ukS^+6e!RzvE8(849;ZBbIJ*Sd5-Wz5~>d@nLHjri9iA zEh+;>f1o54X9e6cLz+-kzo``6PJo21Q=atXDuzt25j&cW(1vSZK7$8GtW=S>Ue^K( z2!}nMOaP*8U^I+jDAd~7sz<-Zrso4^o}4*Om$mlZ?`-cb6omE`=#3*4z3mpp+f_=% z%ru*&!=@m`fVi|%uiEiC-cV=T!=MmHeQ)1Dc&iF+G~B?EtPeaZn~t7)83@fjQiK)R zzr(<(u%h$y9d3AREgs(54;6O40P9B*@T|UIcnnh@7{ zd(z@t_9>ndO-$MZi-v|n9Niej$JBCa%lp-A4JyWMfJNwwT z)fjHOGx!6fM#?A038H#WEGVS{K}2vbOho5yet%!^Xii4m_u#Q;~DfV->@ zhTHCEI$tB_wjLyhV;<)3vS9MvlkJ_YXO#d3NXx;HJ%o^lZ6jswd9iXHw8OKMpbdh& zb{#qnL!)f_f4)`1yquc`DoGQ7?_YpN;1W4L-;_uKc;EftA=1XO;YwHeA^m{%DALug zRl%TXPLb8lF3K%yM(~9h0l_!nhb$xSpMjya2Wf05T?9RR&tiOFkPIsvh$Qo)$+Ix1 zPKvLI&h4@Gcla!Z$QkL1RqInnbW;G%Qajy&$bk3fsr-oM0PP#WXvt!-%%9Q}C8jVM zv}?orAG4MLemPjAEQe@e{^1;qpDCb4jgJgMMFq5g)EHfK@3k71L<;qY0S3vqZ^KM} zdrzk923h~=^H+qHf(dkks;LA)=m4}uSz$Thkb;y zZ$I(tP(Xjw%?65tygh{Dj1#?(MjEBg9$*6}maS0q04K)rso0CLQ2vI~+_|}dU=8rs zO+sx8*{~7+UvpmqZe#WRe?>B83>h-dWk@8XxI?aamMQMUz3$i@jJd{;P*RB~B~gk* zhC&D#GDU{SkPJ~&sAT$o)_Kp`=UwmK@3{xx-}C&>^LVbh$69Nzd9S_p+VX}^#wQF* zQG7SrZK&mv?_8R#(KGJ@|3~1OjEH9!Gk#VYn5?NrYRIlh&fkuHU=y{1(4>$qHXgxl zK4-|vG2BUtVS60HU}r?a=F20VNyftT0(raT8VPq&AUEuwNT~lEnti+omVN^&9x9gJ z;48+fgbIc8bLTCPq9&o-U$*dtc`*JT$~kwptJg3}ODFX*Rn{3VIjwi&uJ&n-MHK@> zn~Ft!vz0NZMR}4u$D@h#%k5#E{Drjv$g-i#=Z8(>b%?jqQ{HYzlN5)5Xt_BEKf(l4jVd{* zcH3kNE43x5b0R;R17hmh#IAAFBzg*&jI0hmbd13(e!yz6-LQ}-1fKq6RcFB_+x{)L zfog<$>E^=qashRI=(|j!-Z-hDwj7m95@k?JLa?cFpS2Wb+5!_C5hMJ)d^OrFqE0-PPnlG0~>7At`tJ z_=k&8T&oEzQkcL0&R8se82GJy9}TW|?g;hUX{}RgV1Y^CwcL*6`IAwb5A*D|g98~D zZxA*SP-1lafxK8d-5i#c&Mz)A0ZNh0hilLU&t(>utYK>NT&+F<25JS97%Ke37|csl z*5OIEGEz**EUn?6%Krq90){=1-!P%Db^c=vY7KXiJ$RjyYk@+mvKtP;$SLJ%+jt7) z^3<+TQxMwY1{2B--~3RjOHHU*tI>0w#IEme>=4Qe!25!7dDD3#k9BWPW_R+_24%^+ zp`PSu_vu@N!6}9sT#UR@X2#KD%pYY_fTIg=$P{5+amJy%OrR-(!HtlDj9EAA9V(Z- z4R0-5QD$LMhvV2RfZWbtOT@OB>%y;4jd(XSU3#mp%Q6~gN#*&B7`wv?;DM+2_x=(2 zGcb%m1}E)lVFkuwc`I+vkIxkWS?Z8x>3Fy~6749)Q#NAP?BzG3X{OBuV3DQbi&Yte z691jplVSHv{F^lA_`oH|hbRk^F?(GN#_P@G{2nG4#6xNCk<1%kQ>MKYnowC7$?k_4 zyuKClI3c6w0mzEG53aozAlCw93jwl0Z3d~7ScS@9t=^zKojf)(sWY&q#c!Xo7Ff5y z-^o6WC3SBHtN#lV?tK~t{*w59xu@NaJ3$tWYOW-hk+VLN<)WAdh@^W6}BNzm9T6X+)9A z5>q&1Ql?LOY;W|`8A2dm&$sgGco4r240%+5Y~Gzg+GCSzFTX)a2!7&{KkBXr{4g*_ zRz<4!V)%+=v$s;_=l4ZA7me!HV9%=c2x41{Iy+ki^kMwU2e9dT0veS|4FR1N6*9W| z7J81NCZ=&%Jw|#4@=mf>`j>@X!b0rf^l=;aqpdF+?ujcjWoJVHeq~+odE36w9at?7L82Dw}X!tzi zwj7VMdf{82O9LOpjeg(cF4YjSpbCww*pEt+;O!+mrBnX;d6K%VKDQj8G3?HfXcZmE zptInbXJ)ELRtEbZ$^7wBlbSn0dM`*u<_ookXgHiEEmz^f|DC@ie~;HpgOF?o?GnoR z+;GOAEWl3`^dFBrzPS>^x&0eRsWFie2@!aE!(JcuZqBp>zOLS@<5FIkeBXx0e!$|X zSx5Gqv>U~XXS?#cHr-E>oH8m8e)d6}IA4Z`uPV5hIFRyL} z$pZS}Sqzk^Yp<7VT}K}h!A+-}ZAi^n7;Ni^6)WCl*jBnm_U;*nrqs#Bm%dQ1`u1Z0 znFs!~mEghT_Zg(J2eU7?$}ArLYG}7KMOflflO0P(P-7hx8nB15eNN5O5Q9;kZZDtr zFc=1LsMU-C)5s|%V>xo)QT`)_YH_HGtm$#jh4ZmlUC4x{!n8dfWPWLFsrg1LpioiN)^Rb$W@})e7&N@*3!U4D?nM zU~epEuv&2Q+zxW#bM%_#+1tSR(46&30^spg3{Y*nSbDC;NF=_rdD@lJI2-EA*A`G?R^Z+c6IJL<0BeR9`FBrC zl8ZNM_;i7UFA++sLOfD5qmKCK^ps7zB*iBs4??Sk-;T8S8fh<=g$W#k8bykQPde%Y zm~}^5RwuIB*j^WvK2iJS?YK@9G~<3}rKIuCOplc-IL`mgay_;1&;4}JaT!Z&E|$0X ztOTbYr6^~J_gI6imzPy=bW5|wU;qB8anPeUZ2slB?dbE2&$&g`DJQ4#@$mna(7^U~ zfQIX%wok`D7!qrEuy?iz4mur<$~O1Jh2ae+2A`zk@K2%nJ#AQ^W0x3@MX@$#!zzk< z^fj%0OsdDH>wRb1{YIBF#?BeXKg$O~hQm?R7K9x5hY8WV4$tKEHPo89Ir!}E!jvP@ z+zZ)3|MYbR?y~pQGYvC}kNe!$>NFg2>k3Ynboi!3;jB{Y@H}34Z?4L8r7{g6FHG>G zP`=#Ws9_1lA#)B+>L|m|4Cqn-?Xs>%Cu$=v3!HKyoUwIzsoU5?}3)rGq;K89~VXC06#98JVDorFaEa$kZG?ehgYty z$dK(qfs=AxC^8JMba4kA#|#)n4SV|_A-PxkvkD_K`d^>y-NxUNQja_GVCjii3(Z%M z<(+S9FnASb{g!tO0g}Y<Qn$c4(onS zsOZ>+3|gg$capv1A8UuiE)0C|hpJO|gBF@>DlPD6W2VL72|A@Gr!2q+fxiu>6VJ*o~Y&N&GZhzsdPK%qK4H`I(a5%W|f8z;%gA1BxPAM zcTza{!_R<5=2)Z1y;kBT^r#tMU-=ywV_T5(xDfiFP$oqQ-M%;Q8MN)i6GQAKvmSQe zL+2-e^_{_b8LPHF!$@4L&YD3Uni3m>PW*@jn%afT4qaQ|LcfH#sC2i@Px8?zE!M!w zJ0aDQeI=V(GEN?cU~M|hYV+F8!ke3bc6vcOR4`~;2ZrO48ffP=KAu^H3l}F_xpJ)z zl#S3WS-Ec;(m4y19_Q*}-?jRdYDY6%Bf%93Hdi+W%PFJdk$AVK&d9D7E_)+iVSp_O zuu`}~dN5d}LDuP9dq0EgyiUT~4(-CBiRWjmrAJgLno2Ktt2g8101aeXD;wq*xEaL0 zEl(uU0CP-Waokx+=*ytha_+UUNQ@*=A_+^AOLi=d<@D3PaAhohJhdti61yjvKV z>E5rot-t)$^+&d$)$zkA&W^{Y`!iP7k~Zq*yxDfD{yBW%O03`%Y^RQ}ovz~;mcxsy zUDXplV=?fD6mRLr`$wLF2knmo51e<{J_#=|bjvcI;n#YsOQ|}s#O|SAA8GI$h@Gn$ z`kbi@PQ{1Y9NBBgMrZ{IgCBf$5>$90Rv}MnpMRZEy7&k)w<(Du5jR6Q4bf9A}1rq4_eWO$2p+xgx3bM&RZW)8L zOu=@>VP1ZR@kO%Jc>3~S0o;J?Q+73jQp+cjTyDxxvU+^)%YS|f9V=bXS=d{*mSM3k zaTY0b>PXgOPk#MPPAJan;GLsKV-rJE-kCj~yw)2L9nHUR%N@ql2@5uNRn^?8VKFB~ zFdt7>l_3Kp9jLr)#B|z8_rp45!Qrj#8U}NeQJ&J^Tu3K^->UlpoorA#A7K+?P|%k< z8O7Zjc|)JtdkuiwJ9*(s?iz@izD+6T+^z-EEy`*z*=Y1qsjY3s02BhbLkFP7*G}7!pFl zIrz1`^RolzbZu?TPnd9aa-C`H?z@SH^JhQV;RP7fXjJm#uHw^^jK>lXADZcY%o77@ zHvF0FzbarkBLA-pM_B{AE9uA5Jn_IM7|Z;4Xs3&UU0eC-b5txk3faiBrUkz-UM=<{ z;EH!v71XVRpS^Mw5ok6_W>tleY`w^Utq8*$czX7PvcRzl zIwl*l_T^<9N+$BgrO)ns^X2N{bi+mZIr}c2NqGy#pE?exd*9EPv;@tE!w}-hi}Wis zXB_Pi-v-nWA=Xue7^bTQrPGc=PA z5T}eN5gLq=z=xOH(TZk0Rl!bA);<#(GI&2R%cmo5RX39As%a&cw*>tkV~09aaOBS> zj6|tOAX4>&ihvra%&h|kUr5Y|Z7}ja8Yu7?7$SLnvN_XfF(V`+F{O2Kf0Yp^eRE?2 z+4`4V;|Kf=TdxN2Jp>z0hBEkoMCs$YN_Z-z;ckiZ|GSi;m*l^9d}C<&L4@H*K}SRj zro&q1|9FnCiw++C?BnRtVEcB^Bty`{t(hLB9)X1*Q;G~lEPi46&*8wd2bkpE;`6o| zreLc#n11+1;P|=bzOKvKk^=y^1=uQqKih$Ud&TP$6lz#j5}4LJreAWZvA{#B`CzRk zNlQWb4d{+c(8_jU;2bBMWdt8KoVTUm93c!-&`1FbOPg1OU*iufuebj4-7^}_-b^1t?E5k({}}&Z8(w1jL$3T*wp0obgDn- zlvCh5+_rUOA~-YvLXzjni_#?|J=gH?$vKuuM?(bf*|Z^V1u$qc{>W|H=)sIeG00w! z@^mEl8R@aJ(Ed);drgP#QekflWx&|0^9z!pZZ8>-0WAogSda?nmB0_#+*EHk!?L*A zX-HanOrqJ02-2I66nc){M4*A|GBG?nf+5M{b4&lTI6EKJ;Pph>Qk;aE$sUXuqZzD` zi#|m*1W%Gro1A}nUWdYKTaHBhjTbe@Su3zT5jUtXv8d&(f4|5CduBrMq(=NQfiYUl z))lXPYI2Iv4N|^Rw8lGaXiu~Sz@^Q9HHiT$o7Z;3QD}rq_5R-uWb($z8nl2t!yf&V zu5*0}JTm2M{tDv}5yO~j_2fh1by=dX1GrPbVj zJ;&4+u$pdAt;T|goNqD_ru2qFYE*1;a#{>-??)#mDko}MEg|al*XmVGfMm8N_w#Qt zB94XL;mNj;=sCyLS3qnG*;DLXQ2<_xB98QABWE*U#XgvuCok~xjJu1g@0z!KWHvBr z6eK1$XV>0iG+Ef7cG1LOl;oT1Z(H%u%RqG)tEwSn(*FZS<@F+=sj11S*!ZPjx|q+{ zT}*G&h1hA+y#4NM%M?TI92iag!2Bqj2 zCuSCQ^3-kR=m79>fS(~)Q*;r7SHkvVEB+4v1K+8b;rOn67$iY%cl(63nRCD;jQ(=y z>`P0uWl$)Li}KYZd~%MCv|dIwr6*)_LqdMNmnJGiY2N+SFBpxRul3-g7IS){Q{SZ+ zO6at(4pY12ISGsy-|T1*y0VfnDl4(#3BNToFeWLT&rHqD0w0+!F97;;CoW)ovkx}EioiXM$Hs2{E8;f(qFazOPKpx_}B zp3p6fiK-@^c<3#im}xHaTEVgDpt%J!OrAMU+r^NSyzTiN95M3#PO%ZGn2+J_h^)lo z91{Wj5~SE%7<}yaj3AKRy*8uBg*D$U%^XS&hUTrx&X3505}%_tL#eF4V!{KDMh;pB z@#2DJ&`7bGq{9qdu`7$yQ_D&5w=21P8(US!02o8pvBgdH9 zz{^Y~j}Bd2n^LAN9oy#vTjV<$51o`q+H><&Y)nFeu8#C3Z_TK;rp5r!XRv#zj_XcI zV1BMecm!z30w^^9`byjCGoXskm2j%ZIJ{%!5lLw2NvX{GkGH*Z9?bp? zXHjxfGyF0`R~`TsWR*B{RuHT%t6qH}ANWe@)>Nq4vi}&CB|KYaMJ3zcO%Jmgf!0j0w70@uSg7?-oR7yD|{6l`)&`~Il)Ce^_QYI?UKM_w!v2Of_a4-CG5fmFy>k3Y3pBk7`V)|3_=8);U^;h zU5#DtY!E85soeD$jupfFA61!n0PszUpIb(^55EPYTMD^VdR&4J?2e3frNj`L$%@vj z?7glnmh}Suz>sX8(TybDux8<{Lql}t5R-?zTw+IGK)4?(kWs8y6NW-ewoP3(6+>Vv z>weFZvnll8Iu3_A+Ngo`Jjsxh-TBEXv$`x+*CYqv|7PiqALl~hacenfp1_~k{C4o0 zvr&Ek3TfMtU^vRC);TRzI|J2}ArQEBtfF=OJ%`qwC@S zWt_P(hza5<1}7`E?U|r0O{N|v&qx7-Qc-xn)T!v3R88E8#Sa#X|7RHEQrz~lc>i{i z7U+w;E*$xA^24nPflBHVl}X6gnM|ePgtZ51MmRaY@iD}JfI?Txpb2ZJ7Y6S$J32Dz zj$#CN&jy!aN#P|r<*0)6YAs689fS-9j%DOlaK6iPDBa*F)fl|#)%S?Gzk#_jWR-fE z(JSUMM{lS9$Z-5dYX0kQN9Vl(e%D2cBP+WfOlG`(m#r^{eA31BniJzIj(!2A>YXLYC^uL;B&9&zV&l?dJfkyp^B`c^e?I>l^*EhCaOo!&_WVKy zrWO`_cSnhVR0v4KMRB9ibnrS}aQ+Ap~nVcW9Z6PsX!Ree-jB#}2)Fi6EtZwA#dBPl5&GQluVnUrQj zO!-l-f=D_odsHlY(svAz_vof-9pAk-SsN)pjnChG{9eG`i)_7>fd9fShOa0NL;f?k6-ZdukzW~+wv#tVjnv@en5RaAsokN2ayUsV`Vk+SG)7^Q9iQk zve=1zK6{kjMjH*(vWj|dP(E)!R90)=7}+i=n<^^MZ6nEE&QRDPk>9hhcnA%}%mik>+zR1nrgZZ5p z-F%fbPtk?(=_?)&>j%bvgp^38A9V{d0{bNu*GoNC+F^aQlE+hMUp3#q4J1*b-TIq* z&wNM^0=7n+lnoipiZZ3QrIUv3Fe%Q-#0z`-g5_O+OLmlWE5UeJ$ljq9UeE2P?S=ps zes=hPx&XWbfaRXAU?~P{xhZYpvA^PQu8Z&IrcurDIDL~z)LUgFSb50jsXv=-=i0y# z59SA@--6O6LeIadz#x^6v9^(HIaF^kmHG|LrhxfvqxF|)0>p8IhIRteqm>y`AVT$D zf-&r_$ecgh)i$jrmN_2;$iula)fl zI8ftE9JlXQ(o%2jSxc9I`|x8^8~%W3MH4opvb1_yqbt~ZR7Z`2$r+z#pCy9|S>=DWc6i=lO^5s6(KGBf-6NYj( z79Z~h=&vB-ts$f~wgqEw_@m&6goH>uq~UD=(Y1lR&yNqsd`lN*srp?_z^&Jo;p*iT zC3}PN6SWDe9|^4nm+~ldOl}x+bYQ58A1dqg9f4#8|5!@*+F~A|y-ASDpzMEsQ zqBMT%J{`;;Wpb8{Q|gze_w!x@uNn$oW$VURS&eL+b`H-8bFrGw4ZI^PDY<_fkOS+Q z1m4N};LEx*7Wb@8an8CEstcXOlkxlTPiY!^StwIWv5-=|8HGcc{Pq-SHgAYFWQR*P zmN;Aj45ad4CxQ3=NXBdTiAbxx^T}@ajT9Fv`li>49yC=h8kQujXnr&kzz0_Lfeu;q zPXCu&15KOM;O8o39b+FXYV4BSPYrn|&y$ti|g18cH$Z?5>xsO)*gd`D;yYg~jDKxRm-kgTi71o@ER*rz>T6D@0mK6r z-u>2~LlbV3ma(+?8;nKqj!j>l&smv;LFq^~#m;{-{w-{r3ZsrD2aC_0uEB>d^CJBCMAFttxlgzL2i8&wLhU7@Nxu&n zxMIwmoOfB9_2AgJglK-pPG2V3ZLU$*_7B4nhif7G%tuU#vRn3Rdc%BD%wX3GsxzO- zA5X8uJdcoFMJ#sHr;LKL0?(rl-cD0%;G<49Nq+OjTT#=&z!QKgcjG6PGEBul%PO0R zV|5)YabthaGHV;c*m59N^%kV=TFzh51q?CfNl>_SybL+cIT6FnlcJr6OFRrKlG8KNGb zv4YfevLK&#R#itA(sxcjJ%1HAM2~mL_W$fV8PVMf%gVVK&fUEDU`Xsp0(0N_t8OnW zBLRYz8?Z5Zco{C+7<(tap9%UcD(<;P4vS8+$S4#YdM^* zExYvpU*EEPRbA*-gW}H1Q3nq&UaLZbM9I3KsH+s^eA^eEc>f2ms1K|?RG4MQUlKMiLsE*_}jD{A22LaD8&o;JV48x5r03 zK=;#W`1V3USIWOkmr~a|jw1g%$E0sc3~f3j;~l&AJUk0Bz6n>=Of0PA4PKa=jB!TS zI=R2ctLXU5$~(Pn>x%T=?-8h!Ou;G#<#%?EIJyCRuF9aVl!C2zdfZ|vAJYY!%3>|A zWM|-(bLyd;SsBW*#GbafiyoqJ&vn5bnds!t$sjpZwmJH%RLpKz==VzMLfv;Ns} zzs7@~<*{~IbD3RGBP46*1vc?KUMB&D&`dfKp7-!ClRyA<_sG-Qmx^cvnD@`N(^`Wf zlGe01_|hjd)#{iQ|A!W1ByJLhQMF4AM#DsU42;WomH~fs;7sm4jf>!cFVv=`s!xBA zF(`i&?6?%Gt0lR(;|osLqRIjudv>%+RDMJQ>Bq&XnxEU7NT>eDpGlLw`vRG~?Op=i zmmVKjTPdSi1Sajm68^3IHoPiq2)!9tpd-s6R;Wx4JTqpyD*nkBE9qPc~oHTMjluyS!siE&bEt@lFBdPrC zJjY9}1i$}+Mph6Q!@Dv@%Pm6T9vc8{NvB7VjH4xJ1yBE6rsyZg0WX0Y5?+UA8J?9# zIi9*P_*jA$_SUQ+D?r;dNKTft+QcwCi@q-M)DNcOHWteuOJYTn%)F}>0nJ$KTT2U8 zB*!u;cIR!Zi=Hk-|D0_|L3(|XTGb`d{Sz6IeM_XBz4NUNYg_M`y=ib+I`qx$k%e&2rMDbOYI@N$H7mLG3(nLsmh5naCjdkf2!}!b`D@T<*)dPZS3X70BG51|Yz=Bguk+xzLG*98A3!xdNQ(RoY$LLoQZkEU#fk64VH9HTTA z6$35|JS2q6vmeV%gBi!b`N-raYKexB6-;L0E=xI>?ZkfSR+x#UM2e-PEz=;Ix6arW zA?V>s90GS-D(a8PNv-4H|B~z;&SwU#scFp89WDj3lo3nT>;uL0s9m(6 zc;9-aSn_&wq_cP>`AJM zta*$EslE+0tR$9tdOI)GvPiAp(d{%adQJZ-VBTy%mHE!YyBVS}bx%!na~sVEcYL5V zK+`hQf zOM|+tnAsCr`UF-W-TS7443%xSHfg>VPfUOC_~m<5pTyRVQ%2lCM%-2Xn)f( z+b+@N@EiCeMY8Z$4WkSo9%=X)fB4a)E=PAkkNyQ{c~to9ZwxxX;OS{qHtYe2XRnW` z`BEdGAjOIjJezWX(I}oNr{#$&p1Paw={bAifK=#ClZF?^hUTW5BP7K3f}rkyY6Jy- zEM)bBrglVGgL)9%;`I7SnI*^&Xd1mN4z|9`*p-&rw?saLu@wgm86X9mdq-a#F&d_@ zNZ-!?!$>UA+J?3G5V&Z6lZ8i5_6F1s;m+idW8G^E)pBR*V9p0KF=9w$4F7+l34`tl z1N#+u8W=KR!h?lq{OWAX1$sH(zz0g|CWRf1xCAQ8qr%fn zP`Uq}`<=x_%T4-iCR78;^gM~s9mxCD$2*`gb-{MovEJ}LM(0-PwT;{}s0x{9c&?2T z=3+6G!SXP%n5aAqUFl=i)pC55&c3`Aa~GHJN!u4D-}eBz<)B5LOfl!>y_`Rt1TTw$owL2RVIS-3)_i~ zqx5KdY{n2A2v55|ww|VbAbMs>RWvXpx|uBp8rr>C=VxwVWC{_!S^ zfRhXlKg`u2(xZz7!V_siD~;=ur&-xrFepWNFj*ynvIgQB~QZqoi>NvQ&@;05Ywn)F*6#^UBG`Pf1J|SS0IFYq~IShx=+q{+~JJ zFG(Vu{r&#+O=;&rZ?07qD)n18hOhL%(%ES0LJp42NQ&3ZfVA}8np4l!gIbNm7D-OK zInt9sI!N{#KK%8)&+pso>!Dx`6^u&=@`v?hTx=0$V<3tjTv__oV$|?o#1q@{mF>n+ z3`J3F&a6#yHgxt7$1|$kvx3GebpU&$^XxWALuS~7K#8)P3Q3x??`GeDG^M0JBpWJN zb8sl*QIZWj4NSl|<4T5V5TPvlYu%M}3RMayTMCp_G8v`fBWtKE%vr+r8Md`~;D@ga z7Dy~T`o#W59|7-34R4oG8r}dB7Og&Dz)R~rQthz^-UHrsn!_JBmhmbU`*rvRhLU17 zZ1UT!P=a--^-JX{vgVi~D@r`61^)#|ki5ZuIj{WIFuh z+lB)JV&Bls6&KJ(B^axFNUW~yWQNLzNU8+qa+dg4xdNLL$g1GrW$s?!Rfc7(qQJX* z_F}ZJX_1_f>N-{I(y`(`xev5Nt2>i?yPmkMM^O1WZaY45Q<*IA6b8>FoyvvfWcN>hJJY781bG2 zt2q#66q|Zwzc&OLaS0j`DR?~YJq?8)9%pr3!m7Xf_Ta{}0ZSgx{?GYTNV^WXOCQ?f z1C1W%l9iw_@gM4&7NZvLIovP=d^#Yy5teOxMQpq}7IhXFADo{RN}Z|X9i#HR|NZMw z+PN4tSXD4}%4ZDGe#~xN4MO5zbsn36cKqtnO_YR!@?st>vQgV-&?*LMc?7P5>TpP@ zT2!1gVO9Y&hE;ZU6U_UT;V7l@x=HR|=Pc3#9s`(^jCDV?V4``4)qIXg13DQa#)EWY|&c6(mPb6(R!s|rP4`d zUSYH>&e~?--`OFd!QqpACv^%R9|wAdBJ|1q$A4Fu9&eK1%d4nG&NCQ`pJy^?IsyNE zR)aJ2h74Ji+<&aQ!N7Ir;ltzawgNHp!hiMuD+lLPyO1-p$P-k+S!?2y(Ffi@H9_q- zxM$RAXhtJwMwrlyk-0ReW)Ug-)ULuBaFRC0&Y6FK>cGtryQKg5EU$($01NYE!UkL< zm3gV3PNsvG84!6ZvA(YNYq-qoGi_LWd^JrZB*xBu>8A((fjYbk$p&-BbIZ*z(l4NkztE=tA8a?(ed{E>_a&kDseO zsUWnX0FX$YZheB0SPRx~-dXbViPS~$zCBtf^d`iX39&sRc-o{Xqj1|Rl9AGe#-k+0 z*JKSdfhjypPMS0_AJ(--Ti4=dj8v&>7P?|ERAOC@DnaAvU5*4q1$nykP#B|74#Cgv z>3?*({v8#!OTZqOQVx7>^t2WhTbIGlkz>s{QDfhz9m z9#*pe7i&B*@#r}kgE$VFBa79Wx-uHYVc+0ZW#n;bo`qn9dGa8<>0;1Nax2$cw69(R za-?&fazZ~F^k6Jp)U*#_H?Rg;5xMb1q5A;oIOuLJpe^df&=k#nv_dVy`(+dLr0AdK z=~a#>{{hM_55Fq+VSH{iD?eEQF=kZ#6-Sx_G&vy|PHRLmXeH%fs{&yVw$3L4zCOnb zUuFZ~t&pvJ`D<&m2Hbpbo#<~kJ9*F^*JIlxVgb>>BU@W)#WId49JD7Vd3$=>Wgtz* z>v8>K{9K(U$R7Ve;Y?rPwTOviF3 zGjzpx=J@TTV}^)Bx^!X6gl2`YqE~QGS4rq@NGjv?p2#Ps#6`7@NQ~(kkq{RhfkSeA zCFCO~)LQa4JxTs5Rw9$81w$C(ZCQhhr-;w?!9V0FPQVvSPmy~#K=*}AxdS%1a59y=!;5b@Rs|y?(O!aM6WgcFSVrqrHl1#g z>%96iKcj-ZUN}!&$N<>G(Xow>zCo#&hV8#siWu|6u841GE>Sr+p|&>qf`SGuVgyQI z&Bm;cc=1u;x236K_@8xpA)%cG4#~21-X)CNo+fqjjN;K5MeF#t-($b8hpHY0yy{{d zOE)kqrLK+)5W^4BT&m^6TKqbe9<})dpkbqay&ozcYJ-S(ulH#%0|{le=6O?2 zg=OrWF}lc3pzH!pbrdLD9AK16k+MkJB9oKTQDKZo=@yqB8#)*bUU-qx+K0RLy4R{O zY5WG@pNb{p7KrWh=8ue@EydecsPm3A>1h5OEf2$u-m2?#74QATfJ^~GY!{+1&BSR1 zIGgjl(XOK<(295bjE@5Cl1ug)9X#|qETIy(CXcprU15m6&AVph(J}B;)<|Nj zT*r{EeNe9JmN%ltRfgq1nHT%tA7bG?F2k|FX>=a zhNZ+re8L?9W>+QL%SymSH19!?I<+-W4NN&}#B8RrKMkXk+s5lP7?b6D7{0&KoQ{KH z_o*cfCuJar_5dE-qPKl!KEgOWl~PO1+}vcbDg%d|L`t?02f-C=KrjcVHiO|@!gpRq zUO*>{T+4e4KYD}aKu`f%-cEa|A;Ytl{(pTt&9F})zAZbGul5-bNNKKI@bbn?AZe5y zX!He>R<-II8?qD@u@AmN@ER$|7_UqfGL z%PyN?S_Er!l3LoeUlHfI^k7JfiL;{{gl9ViF6==IKt;icxKPHx{+7duH`<$7TeN5P zwm*a1#?a5@0^qxC7$Ebhwjb8|uU!VtZ!??#uaFt48N{@!{d(T44`;gYe&-WZF&%h~ z%AuO~68=x^yusP}l6SzNK}Cp`AD4ZR=Xt2|v!F-r0INL9w7Av4Jz-59kQf`L2*s`o z`AbH@E9QirloTTUuct)enG9x}H9%H(?{`lXJp|QA0;$OY&b+=1=k8U*F@4Y%u)Tc^ zE-vQjEu$AaghkeX-pa$VQqfGDnUAT-87YzG%U)RYJ1gs6`x^40Nw>YlqNm0(+&~ts ztQkc<5?Q-JxQI+UZ0tClYjl|>_S@Ae4e0HN=~!l{Amwm^M#^nAk{YsVYJ!^c%X_!z znfF+rj1njpC2J`CcmuV1I!x~r$P1nZf=HL&|GI3q{UE3$bU9NH)En;=Sk;UyOjprj zIU)Y`T{Qf-G>v{2no@*P)a_v2lY-V`8BFVK?7}=_Rb0Zry<)?W{LDE|Wh9Q|8XR*wjbL(^Sb0(= zV=~%3UYd%Er%)J|8WU|b0y^!`)OLk&FPKylI1@)PPK(>dTW8(fk*dj+AxWwj9*7w*%uLph(XCq_E$X6LMQ)znK!j%6L+i&GQ+n&lK-4Rn{`g-Mc zh8svsG3Q3=_HRr=Ohj6Y$`3pdCoMKEMSn=da8ynjnHjn3l`g2KY(l|P);!O=#dPxe zy;9OeL<`>0eD9>P5YaneW=|pZ7Z)&~yIZim97^88#l{+UuQawdIR8HMPo9ZBvXBWO zU9msIuHpp}5IO>JQI?;RC!L|^KOcj#$eP=!MGV^E^8DVm(T>6mOCsKmD)DTN55U{G za2ryYiY;Zlf$&x>Gw|FTd!hbxJDM)UdX5dklX|pr<4>2mGYZq*xhZ%g58@)1Go6Y* z1kgTBz^h%>NE){Z7^Jdw>(tG453uyXA6e#pawTKot+lt@X{cNfq#GNLY@^qXpN7uL zJC84|Vn}XfZg^ztrkX)l$Yq7@`?zacz^2)GQVmhw z>%fk$+NzkE%DW_iZLf`(S{$q!2`jE_TOrz-H!=|pztk)#+NztoNPA1Ql)({LR~Jp% ztG;EBtT*i~qV1!}tY-iW7D}45qfx=yGQuyJ=PF?bGyKI;P(U z3TZ_HkiSPgSSBvJcW9Jgnb8?hcI|MT3SFZG@$}Z5-K$zd zQ%b^{%HF#BDl!IbI|D0{w@3_;(~~U5b}o|w@;gBQ93^M?k>6GxwTrD>0Spz8``7Afdz_%PFD83V1D+FvYQ zYNI4AC+7KTC6}*4+bN-Obzwx${?g%ll0gp*0F^1)EZn~FATiI3w#tRdVhrJ!z}Lz( zdn>&=NZ>1o$w*9To!nnNY+z7%h_zhpdBbUhz!IEzNu8-*U&6CjJ8XZq zj;_tX`@b@9Y)ue*2pTWLSK+1%Tq$HA;mg1j(#@WKSMA%n1bSAUah`kEm zg}yTKH-DrDJx*7H+QK(h7Gj9%&KSAW;W!C54Y(kX)t}6@k1m+CforHIKz@flH%oEs zE=*|(SBR)c(J5}I@zrrc&^G4Yr$%6>Uq8fI@OZN?Bk=FfbzTtnd!LWVRvA*J>+_uj z;fXIWTuUWyx6Y*1&JFRLR`$iXBb!fxg3@4Jyr7`y2&N#QTCC**tgqhIOVkFZN!EI1 z_3C^%ksQ!#;?Pgq=he|nm?hJWq0K_NyKG;2pWRfQjsU}O3s@dZ9~mQ|c>9&iaym2k z?>Q`|U>hkxL6@g8m^+h`!7NEE7h65D;3g1F#Omd(#)+>pLZxutc+{TvC88*xFNnCi z@aX)FZ$s%a;lG-T1)Y6^;adxG@#kDjOG%AOO1IAQ{9Q!a{#xIGG*e_P)a+@2@6t@h zr`8idc53bHOt~;|uUmMIzBISt7pzd89S(hmv6+w6m1js#wjR89LlM33rP;?J9GcxH z{d33n7zJ;&1DY^pPz1`iGuLdn_zE<)AF@@sGo3k?@i-XQ%syVnEdCNhgQx66)UTjj zaT&2jgKFZ1GOQ*8zw(xhdKdW8W#-$#uf`a0^YYwx0T#DR!Jb@K$jE%lDPpySC}hKE&V7u- z8|+hX&mzW~|9eWX%DfTyQTf8riTAf|X8u+D3rkb6pe;~1?2&Art_K*IVwmN(JWA#* zq3VW0E4X}p=fE^L-jWYEuTPaZ%s>^FJc^6_1l43Sv8Bc=ia4Dj!<&f`wiXzhFsuyn2iM(`+b@*ScWY(cD6n;>S6R4wbq?&2>;|v zwq73a{75jqEBGY$OxMt`@7N2NchCW~qPV^hW}u5Syx6g`AI?Yr-xrt_A?qJSE-+n6 zcItwiE;-8n3XJ}8NI0a{5b#jp2wn=!rPGK#Ulw=nUuN9PXnFslTm!pLT_nVt zf6v!@VjlRi04tF_+TRyskd{m9(drp$%L$1~O*4a9bZqb+Epek|c&KxeyP+t9GM&!2 zIlyWC^1eacgwvq!<%QUKRbhC{WNewk@RUldCrQxBBb8s$GviDP^a%f4)OpT5qLve! zSLNWR+r*?tz?GX`76;7x_T}2~AMy^;ELk=v)retQn&tm!-jsm>{QT}e?WF5{BeDOh zAvloUM1t4%Xs}V&uCZaW-rAc3HuyL^TvGwJXA23}Rx+q$`@6IA-OurEh0-5{O@^sF z3I&rz>g}x=mq{zgZCgnRetSD(+FVSwN6(+CH^7>U!sE$Ub-Z0xbU3fbm=$co+;@u7 z9fik$MF#A_of(7j&syEYKsIUZ#G21OO7D4eLRurUzq;Ktpne>+Icwvs)c-E~Gn{hK zGCJk>JCiQRo`E(?H?ydRwhRt4W;Qnt3wvchK=p>JStyjZd|wUJ{|6k=${}v=)-Am7 zdq!`Fx+L_cxmeile!MWtnJ|M~YNF1Ku|c&cpD2FoQ1~&ZRYCaIasuD%IL1d|9T%=c zQ&W>uG0`Cfm-1Z79objTxpEk8FdjZdQok*Qp;^>BUu{#t);iYk?Mb}drMemr^&eju zH9sEsnn4vR2LLd6`?;qjoG({*;vu=*|JqN^Z%+7SxtQVC+BZR zM%4~P$^N#e_n08dtm6_BGd$0~>*hzf^1MGM@1XYE51>J^-F47h2Bb8|+CGcwRd^D% zy{Q=~=`qoMZV?iuu2)ijq5-hqfQRxLboz%HCbKOnjrDsC+Qt4#6r6NuDDbyL+>phu zV;?bouP2P}qQ7wI-P5JD_YUqc6+g%%U#-~qKnx^P2dGfewtd!pBGK58nDmasobuxj zQ!m2YR>0%dv4IK-N?pXjl(})=e4Cg=JbG*ey-t{75z#+Jmpzd>uT0|B{m3-(qTQ^z zfd2It44pfNnkQ!@MaM+@wwvZVU1Tf+=544r2&=CNM$5CF-_|o&mk{AI1gjKbrn}wJ zgnODnU-v?jWMy~bMn<5xqS|IWF4a>3S85cKgge0*u4hWQ$JBUkFcw0Oi?$T}N!-GC zZbxIZ%8Ef#NiK_Sm3wjn@aDz(!UW#l+q1xHJ;4x~5*vfF;E06w5i}(r1uuTU`S(kR zqs#X?Pf2KN_Y~Y%5;*5zt%C*5X*(IG;^`ebcB*^FM5_X;z4g_~V*E_$CeFlt)$3eY zaOE7dwy~hH(;lYMT_Q>q(zsQ}c=3n~uTFt;uiOuI&_F180pjp}Mx@-Lz5MReU~Tw^ zi?k~|x}_AogE|Q7l0}lvhZvQ$CY!^cZ66VMf zDVI4C22?o}S*foI8nMerKwkM|No`2%9d^XUr36 yMM^ Date: Fri, 7 Dec 2012 13:45:52 -0800 Subject: [PATCH 138/226] Modified checkpoint testsuite to more comprehensively test checkpointing of various RDDs. Fixed checkpoint bug (splits referring to parent RDDs or parent splits) in UnionRDD and CoalescedRDD. Fixed bug in testing ShuffledRDD. Removed unnecessary and useless map-side combining step for narrow dependencies in CoGroupedRDD. Removed unncessary WeakReference stuff from many other RDDs. --- .../main/scala/spark/rdd/CartesianRDD.scala | 1 - .../main/scala/spark/rdd/CoGroupedRDD.scala | 9 +- .../main/scala/spark/rdd/CoalescedRDD.scala | 25 +- .../main/scala/spark/rdd/FilteredRDD.scala | 6 +- .../main/scala/spark/rdd/FlatMappedRDD.scala | 6 +- .../src/main/scala/spark/rdd/GlommedRDD.scala | 6 +- .../scala/spark/rdd/MapPartitionsRDD.scala | 6 +- .../spark/rdd/MapPartitionsWithSplitRDD.scala | 6 +- core/src/main/scala/spark/rdd/MappedRDD.scala | 6 +- core/src/main/scala/spark/rdd/PipedRDD.scala | 10 +- .../src/main/scala/spark/rdd/SampledRDD.scala | 12 +- .../main/scala/spark/rdd/ShuffledRDD.scala | 16 +- core/src/main/scala/spark/rdd/UnionRDD.scala | 19 +- .../test/scala/spark/CheckpointSuite.scala | 269 ++++++++++++++---- 14 files changed, 286 insertions(+), 111 deletions(-) diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index 9bfc3f8ca3..1d753a5168 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -1,7 +1,6 @@ package spark.rdd import spark._ -import java.lang.ref.WeakReference private[spark] class CartesianSplit(idx: Int, val s1: Split, val s2: Split) extends Split with Serializable { diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index adfecea966..57d472666b 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -17,6 +17,7 @@ import java.io.{ObjectOutputStream, IOException} private[spark] sealed trait CoGroupSplitDep extends Serializable private[spark] case class NarrowCoGroupSplitDep(rdd: RDD[_], splitIndex: Int, var split: Split = null) extends CoGroupSplitDep { + @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { rdd.synchronized { @@ -50,12 +51,12 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) var deps_ = { val deps = new ArrayBuffer[Dependency[_]] for ((rdd, index) <- rdds.zipWithIndex) { - val mapSideCombinedRDD = rdd.mapPartitions(aggr.combineValuesByKey(_), true) - if (mapSideCombinedRDD.partitioner == Some(part)) { - logInfo("Adding one-to-one dependency with " + mapSideCombinedRDD) - deps += new OneToOneDependency(mapSideCombinedRDD) + if (rdd.partitioner == Some(part)) { + logInfo("Adding one-to-one dependency with " + rdd) + deps += new OneToOneDependency(rdd) } else { logInfo("Adding shuffle dependency with " + rdd) + val mapSideCombinedRDD = rdd.mapPartitions(aggr.combineValuesByKey(_), true) deps += new ShuffleDependency[Any, ArrayBuffer[Any]](mapSideCombinedRDD, part) } } diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 90c3b8bfd8..0b4499e2eb 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -1,9 +1,24 @@ package spark.rdd import spark._ -import java.lang.ref.WeakReference +import java.io.{ObjectOutputStream, IOException} -private class CoalescedRDDSplit(val index: Int, val parents: Array[Split]) extends Split +private[spark] case class CoalescedRDDSplit( + index: Int, + @transient rdd: RDD[_], + parentsIndices: Array[Int] + ) extends Split { + var parents: Seq[Split] = parentsIndices.map(rdd.splits(_)) + + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + rdd.synchronized { + // Update the reference to parent split at the time of task serialization + parents = parentsIndices.map(rdd.splits(_)) + oos.defaultWriteObject() + } + } +} /** * Coalesce the partitions of a parent RDD (`prev`) into fewer partitions, so that each partition of @@ -21,12 +36,12 @@ class CoalescedRDD[T: ClassManifest]( @transient var splits_ : Array[Split] = { val prevSplits = prev.splits if (prevSplits.length < maxPartitions) { - prevSplits.zipWithIndex.map{ case (s, idx) => new CoalescedRDDSplit(idx, Array(s)) } + prevSplits.map(_.index).map{idx => new CoalescedRDDSplit(idx, prev, Array(idx)) } } else { (0 until maxPartitions).map { i => val rangeStart = (i * prevSplits.length) / maxPartitions val rangeEnd = ((i + 1) * prevSplits.length) / maxPartitions - new CoalescedRDDSplit(i, prevSplits.slice(rangeStart, rangeEnd)) + new CoalescedRDDSplit(i, prev, (rangeStart until rangeEnd).toArray) }.toArray } } @@ -42,7 +57,7 @@ class CoalescedRDD[T: ClassManifest]( var deps_ : List[Dependency[_]] = List( new NarrowDependency(prev) { def getParents(id: Int): Seq[Int] = - splits(id).asInstanceOf[CoalescedRDDSplit].parents.map(_.index) + splits(id).asInstanceOf[CoalescedRDDSplit].parentsIndices } ) diff --git a/core/src/main/scala/spark/rdd/FilteredRDD.scala b/core/src/main/scala/spark/rdd/FilteredRDD.scala index 1370cf6faf..02f2e7c246 100644 --- a/core/src/main/scala/spark/rdd/FilteredRDD.scala +++ b/core/src/main/scala/spark/rdd/FilteredRDD.scala @@ -1,15 +1,13 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference private[spark] class FilteredRDD[T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], f: T => Boolean) - extends RDD[T](prev.get) { + extends RDD[T](prev) { override def splits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).filter(f) diff --git a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala index 6b2cc67568..cdc8ecdcfe 100644 --- a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala +++ b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala @@ -1,15 +1,13 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference private[spark] class FlatMappedRDD[U: ClassManifest, T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], f: T => TraversableOnce[U]) - extends RDD[U](prev.get) { + extends RDD[U](prev) { override def splits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).flatMap(f) diff --git a/core/src/main/scala/spark/rdd/GlommedRDD.scala b/core/src/main/scala/spark/rdd/GlommedRDD.scala index 0f0b6ab0ff..df6f61c69d 100644 --- a/core/src/main/scala/spark/rdd/GlommedRDD.scala +++ b/core/src/main/scala/spark/rdd/GlommedRDD.scala @@ -1,13 +1,11 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference private[spark] -class GlommedRDD[T: ClassManifest](prev: WeakReference[RDD[T]]) - extends RDD[Array[T]](prev.get) { +class GlommedRDD[T: ClassManifest](prev: RDD[T]) + extends RDD[Array[T]](prev) { override def splits = firstParent[T].splits override def compute(split: Split) = Array(firstParent[T].iterator(split).toArray).iterator } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala index b04f56cfcc..23b9fb023b 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala @@ -1,16 +1,14 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference private[spark] class MapPartitionsRDD[U: ClassManifest, T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], f: Iterator[T] => Iterator[U], preservesPartitioning: Boolean = false) - extends RDD[U](prev.get) { + extends RDD[U](prev) { override val partitioner = if (preservesPartitioning) firstParent[T].partitioner else None diff --git a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala index 7a4b6ffb03..41955c1d7a 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala @@ -1,9 +1,7 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference /** * A variant of the MapPartitionsRDD that passes the split index into the @@ -12,9 +10,9 @@ import java.lang.ref.WeakReference */ private[spark] class MapPartitionsWithSplitRDD[U: ClassManifest, T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], f: (Int, Iterator[T]) => Iterator[U]) - extends RDD[U](prev.get) { + extends RDD[U](prev) { override def splits = firstParent[T].splits override def compute(split: Split) = f(split.index, firstParent[T].iterator(split)) diff --git a/core/src/main/scala/spark/rdd/MappedRDD.scala b/core/src/main/scala/spark/rdd/MappedRDD.scala index 8fa1872e0a..6f8cb21fd3 100644 --- a/core/src/main/scala/spark/rdd/MappedRDD.scala +++ b/core/src/main/scala/spark/rdd/MappedRDD.scala @@ -1,15 +1,13 @@ package spark.rdd -import spark.OneToOneDependency import spark.RDD import spark.Split -import java.lang.ref.WeakReference private[spark] class MappedRDD[U: ClassManifest, T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], f: T => U) - extends RDD[U](prev.get) { + extends RDD[U](prev) { override def splits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).map(f) diff --git a/core/src/main/scala/spark/rdd/PipedRDD.scala b/core/src/main/scala/spark/rdd/PipedRDD.scala index d9293a9d1a..d2047375ea 100644 --- a/core/src/main/scala/spark/rdd/PipedRDD.scala +++ b/core/src/main/scala/spark/rdd/PipedRDD.scala @@ -8,11 +8,9 @@ import scala.collection.JavaConversions._ import scala.collection.mutable.ArrayBuffer import scala.io.Source -import spark.OneToOneDependency import spark.RDD import spark.SparkEnv import spark.Split -import java.lang.ref.WeakReference /** @@ -20,16 +18,16 @@ import java.lang.ref.WeakReference * (printing them one per line) and returns the output as a collection of strings. */ class PipedRDD[T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], command: Seq[String], envVars: Map[String, String]) - extends RDD[String](prev.get) { + extends RDD[String](prev) { - def this(prev: WeakReference[RDD[T]], command: Seq[String]) = this(prev, command, Map()) + def this(prev: RDD[T], command: Seq[String]) = this(prev, command, Map()) // Similar to Runtime.exec(), if we are given a single string, split it into words // using a standard StringTokenizer (i.e. by spaces) - def this(prev: WeakReference[RDD[T]], command: String) = this(prev, PipedRDD.tokenize(command)) + def this(prev: RDD[T], command: String) = this(prev, PipedRDD.tokenize(command)) override def splits = firstParent[T].splits diff --git a/core/src/main/scala/spark/rdd/SampledRDD.scala b/core/src/main/scala/spark/rdd/SampledRDD.scala index f273f257f8..c622e14a66 100644 --- a/core/src/main/scala/spark/rdd/SampledRDD.scala +++ b/core/src/main/scala/spark/rdd/SampledRDD.scala @@ -7,7 +7,6 @@ import cern.jet.random.engine.DRand import spark.RDD import spark.OneToOneDependency import spark.Split -import java.lang.ref.WeakReference private[spark] class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Serializable { @@ -15,14 +14,14 @@ class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Seriali } class SampledRDD[T: ClassManifest]( - prev: WeakReference[RDD[T]], + prev: RDD[T], withReplacement: Boolean, frac: Double, seed: Int) - extends RDD[T](prev.get) { + extends RDD[T](prev) { @transient - val splits_ = { + var splits_ : Array[Split] = { val rg = new Random(seed) firstParent[T].splits.map(x => new SampledRDDSplit(x, rg.nextInt)) } @@ -51,4 +50,9 @@ class SampledRDD[T: ClassManifest]( firstParent[T].iterator(split.prev).filter(x => (rand.nextDouble <= frac)) } } + + override def changeDependencies(newRDD: RDD[_]) { + dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + splits_ = newRDD.splits + } } diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index 31774585f4..a9dd3f35ed 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -1,11 +1,7 @@ package spark.rdd -import spark.Partitioner -import spark.RDD -import spark.ShuffleDependency -import spark.SparkEnv -import spark.Split -import java.lang.ref.WeakReference +import spark._ +import scala.Some private[spark] class ShuffledRDDSplit(val idx: Int) extends Split { override val index = idx @@ -14,15 +10,15 @@ private[spark] class ShuffledRDDSplit(val idx: Int) extends Split { /** * The resulting RDD from a shuffle (e.g. repartitioning of data). - * @param parent the parent RDD. + * @param prev the parent RDD. * @param part the partitioner used to partition the RDD * @tparam K the key class. * @tparam V the value class. */ class ShuffledRDD[K, V]( - @transient prev: WeakReference[RDD[(K, V)]], + prev: RDD[(K, V)], part: Partitioner) - extends RDD[(K, V)](prev.get.context, List(new ShuffleDependency(prev.get, part))) { + extends RDD[(K, V)](prev.context, List(new ShuffleDependency(prev, part))) { override val partitioner = Some(part) @@ -37,7 +33,7 @@ class ShuffledRDD[K, V]( } override def changeDependencies(newRDD: RDD[_]) { - dependencies_ = Nil + dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) splits_ = newRDD.splits } } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index 30eb8483b6..a5948dd1f1 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -3,18 +3,28 @@ package spark.rdd import scala.collection.mutable.ArrayBuffer import spark._ -import java.lang.ref.WeakReference +import java.io.{ObjectOutputStream, IOException} private[spark] class UnionSplit[T: ClassManifest]( - idx: Int, + idx: Int, rdd: RDD[T], - split: Split) + splitIndex: Int, + var split: Split = null) extends Split with Serializable { def iterator() = rdd.iterator(split) def preferredLocations() = rdd.preferredLocations(split) override val index: Int = idx + + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + rdd.synchronized { + // Update the reference to parent split at the time of task serialization + split = rdd.splits(splitIndex) + oos.defaultWriteObject() + } + } } class UnionRDD[T: ClassManifest]( @@ -27,7 +37,7 @@ class UnionRDD[T: ClassManifest]( val array = new Array[Split](rdds.map(_.splits.size).sum) var pos = 0 for (rdd <- rdds; split <- rdd.splits) { - array(pos) = new UnionSplit(pos, rdd, split) + array(pos) = new UnionSplit(pos, rdd, split.index) pos += 1 } array @@ -52,7 +62,6 @@ class UnionRDD[T: ClassManifest]( override def preferredLocations(s: Split): Seq[String] = s.asInstanceOf[UnionSplit[T]].preferredLocations() - override def changeDependencies(newRDD: RDD[_]) { deps_ = List(new OneToOneDependency(newRDD)) splits_ = newRDD.splits diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 2cafef444c..51bd59e2b1 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -2,17 +2,16 @@ package spark import org.scalatest.{BeforeAndAfter, FunSuite} import java.io.File -import rdd.{BlockRDD, CoalescedRDD, MapPartitionsWithSplitRDD} +import spark.rdd._ import spark.SparkContext._ import storage.StorageLevel -import java.util.concurrent.Semaphore -import collection.mutable.ArrayBuffer class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { initLogging() var sc: SparkContext = _ var checkpointDir: File = _ + val partitioner = new HashPartitioner(2) before { checkpointDir = File.createTempFile("temp", "") @@ -40,7 +39,6 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { parCollection.checkpoint() assert(parCollection.dependencies === Nil) val result = parCollection.collect() - sleep(parCollection) // slightly extra time as loading classes for the first can take some time assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) assert(parCollection.dependencies != Nil) assert(parCollection.collect() === result) @@ -53,7 +51,6 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val blockRDD = new BlockRDD[String](sc, Array(blockId)) blockRDD.checkpoint() val result = blockRDD.collect() - sleep(blockRDD) assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) assert(blockRDD.dependencies != Nil) assert(blockRDD.collect() === result) @@ -68,79 +65,247 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { testCheckpointing(_.mapPartitions(_.map(_.toString))) testCheckpointing(r => new MapPartitionsWithSplitRDD(r, (i: Int, iter: Iterator[Int]) => iter.map(_.toString) )) - testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).mapValues(_.toString), 1000) - testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).flatMapValues(x => 1 to x), 1000) + testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).mapValues(_.toString)) + testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _).flatMapValues(x => 1 to x)) testCheckpointing(_.pipe(Seq("cat"))) } test("ShuffledRDD") { - testCheckpointing(_.map(x => (x % 2, 1)).reduceByKey(_ + _)) + // Creating ShuffledRDD directly as PairRDDFunctions.combineByKey produces a MapPartitionedRDD + testCheckpointing(rdd => { + new ShuffledRDD(rdd.map(x => (x % 2, 1)), partitioner) + }) } test("UnionRDD") { - testCheckpointing(_.union(sc.makeRDD(5 to 6, 4))) + def otherRDD = sc.makeRDD(1 to 10, 4) + testCheckpointing(_.union(otherRDD), false, true) + testParentCheckpointing(_.union(otherRDD), false, true) } test("CartesianRDD") { - testCheckpointing(_.cartesian(sc.makeRDD(5 to 6, 4)), 1000) + def otherRDD = sc.makeRDD(1 to 10, 4) + testCheckpointing(_.cartesian(otherRDD)) + testParentCheckpointing(_.cartesian(otherRDD), true, false) } test("CoalescedRDD") { testCheckpointing(new CoalescedRDD(_, 2)) + + // Test whether size of CoalescedRDD reduce in size after parent RDD is checkpointed + // Current implementation of CoalescedRDDSplit has transient reference to parent RDD, + // so does not serialize the RDD (not need to check its size). + testParentCheckpointing(new CoalescedRDD(_, 2), true, false) + + // Test that the CoalescedRDDSplit updates parent splits (CoalescedRDDSplit.parents) after + // the parent RDD has been checkpointed and parent splits have been changed to HadoopSplits. + // Note that this test is very specific to the current implementation of CoalescedRDDSplits + val ones = sc.makeRDD(1 to 100, 10).map(x => x) + ones.checkpoint // checkpoint that MappedRDD + val coalesced = new CoalescedRDD(ones, 2) + val splitBeforeCheckpoint = + serializeDeserialize(coalesced.splits.head.asInstanceOf[CoalescedRDDSplit]) + coalesced.count() // do the checkpointing + val splitAfterCheckpoint = + serializeDeserialize(coalesced.splits.head.asInstanceOf[CoalescedRDDSplit]) + assert( + splitAfterCheckpoint.parents.head != splitBeforeCheckpoint.parents.head, + "CoalescedRDDSplit.parents not updated after parent RDD checkpointed" + ) } test("CoGroupedRDD") { - val rdd2 = sc.makeRDD(5 to 6, 4).map(x => (x % 2, 1)) - testCheckpointing(rdd1 => rdd1.map(x => (x % 2, 1)).cogroup(rdd2)) - testCheckpointing(rdd1 => rdd1.map(x => (x % 2, x)).join(rdd2)) + // Test serialized size + // RDD with long lineage of one-to-one dependencies through cogroup transformations + val longLineageRDD1 = generateLongLineageRDDForCoGroupedRDD() + testCheckpointing(rdd1 => { + CheckpointSuite.cogroup(longLineageRDD1, rdd1.map(x => (x % 2, 1)), partitioner) + }, false, true) - // Special test to make sure that the CoGroupSplit of CoGroupedRDD do not - // hold on to the splits of its parent RDDs, as the splits of parent RDDs - // may change while checkpointing. Rather the splits of parent RDDs must - // be fetched at the time of serialization to ensure the latest splits to - // be sent along with the task. + val longLineageRDD2 = generateLongLineageRDDForCoGroupedRDD() + testParentCheckpointing(rdd1 => { + CheckpointSuite.cogroup(longLineageRDD2, sc.makeRDD(1 to 2, 2).map(x => (x % 2, 1)), partitioner) + }, false, true) + } + /** + * Test checkpointing of the final RDD generated by the given operation. By default, + * this method tests whether the size of serialized RDD has reduced after checkpointing or not. + * It can also test whether the size of serialized RDD splits has reduced after checkpointing or + * not, but this is not done by default as usually the splits do not refer to any RDD and + * therefore never store the lineage. + */ + def testCheckpointing[U: ClassManifest]( + op: (RDD[Int]) => RDD[U], + testRDDSize: Boolean = true, + testRDDSplitSize: Boolean = false + ) { + // Generate the final RDD using given RDD operation + val baseRDD = generateLongLineageRDD + val operatedRDD = op(baseRDD) + val parentRDD = operatedRDD.dependencies.headOption.orNull + val rddType = operatedRDD.getClass.getSimpleName + + // Find serialized sizes before and after the checkpoint + val (rddSizeBeforeCheckpoint, splitSizeBeforeCheckpoint) = getSerializedSizes(operatedRDD) + operatedRDD.checkpoint() + val result = operatedRDD.collect() + val (rddSizeAfterCheckpoint, splitSizeAfterCheckpoint) = getSerializedSizes(operatedRDD) + + // Test whether the checkpoint file has been created + assert(sc.objectFile[U](operatedRDD.getCheckpointFile.get).collect() === result) + + // Test whether dependencies have been changed from its earlier parent RDD + assert(operatedRDD.dependencies.head.rdd != parentRDD) + + // Test whether the splits have been changed to the new Hadoop splits + assert(operatedRDD.splits.toList === operatedRDD.checkpointData.cpRDDSplits.toList) + + // Test whether the data in the checkpointed RDD is same as original + assert(operatedRDD.collect() === result) + + // Test whether serialized size of the RDD has reduced. If the RDD + // does not have any dependency to another RDD (e.g., ParallelCollection, + // ShuffleRDD with ShuffleDependency), it may not reduce in size after checkpointing. + if (testRDDSize) { + println("Size of " + rddType + + "[" + rddSizeBeforeCheckpoint + " --> " + rddSizeAfterCheckpoint + "]") + assert( + rddSizeAfterCheckpoint < rddSizeBeforeCheckpoint, + "Size of " + rddType + " did not reduce after checkpointing " + + "[" + rddSizeBeforeCheckpoint + " --> " + rddSizeAfterCheckpoint + "]" + ) + } + + // Test whether serialized size of the splits has reduced. If the splits + // do not have any non-transient reference to another RDD or another RDD's splits, it + // does not refer to a lineage and therefore may not reduce in size after checkpointing. + // However, if the original splits before checkpointing do refer to a parent RDD, the splits + // must be forgotten after checkpointing (to remove all reference to parent RDDs) and + // replaced with the HadoopSplits of the checkpointed RDD. + if (testRDDSplitSize) { + println("Size of " + rddType + " splits " + + "[" + splitSizeBeforeCheckpoint + " --> " + splitSizeAfterCheckpoint + "]") + assert( + splitSizeAfterCheckpoint < splitSizeBeforeCheckpoint, + "Size of " + rddType + " splits did not reduce after checkpointing " + + "[" + splitSizeBeforeCheckpoint + " --> " + splitSizeAfterCheckpoint + "]" + ) + } + } + + /** + * Test whether checkpointing of the parent of the generated RDD also + * truncates the lineage or not. Some RDDs like CoGroupedRDD hold on to its parent + * RDDs splits. So even if the parent RDD is checkpointed and its splits changed, + * this RDD will remember the splits and therefore potentially the whole lineage. + */ + def testParentCheckpointing[U: ClassManifest]( + op: (RDD[Int]) => RDD[U], + testRDDSize: Boolean, + testRDDSplitSize: Boolean + ) { + // Generate the final RDD using given RDD operation + val baseRDD = generateLongLineageRDD + val operatedRDD = op(baseRDD) + val parentRDD = operatedRDD.dependencies.head.rdd + val rddType = operatedRDD.getClass.getSimpleName + val parentRDDType = parentRDD.getClass.getSimpleName + + // Find serialized sizes before and after the checkpoint + val (rddSizeBeforeCheckpoint, splitSizeBeforeCheckpoint) = getSerializedSizes(operatedRDD) + parentRDD.checkpoint() // checkpoint the parent RDD, not the generated one + val result = operatedRDD.collect() + val (rddSizeAfterCheckpoint, splitSizeAfterCheckpoint) = getSerializedSizes(operatedRDD) + + // Test whether the data in the checkpointed RDD is same as original + assert(operatedRDD.collect() === result) + + // Test whether serialized size of the RDD has reduced because of its parent being + // checkpointed. If this RDD or its parent RDD do not have any dependency + // to another RDD (e.g., ParallelCollection, ShuffleRDD with ShuffleDependency), it may + // not reduce in size after checkpointing. + if (testRDDSize) { + assert( + rddSizeAfterCheckpoint < rddSizeBeforeCheckpoint, + "Size of " + rddType + " did not reduce after parent checkpointing parent " + parentRDDType + + "[" + rddSizeBeforeCheckpoint + " --> " + rddSizeAfterCheckpoint + "]" + ) + } + + // Test whether serialized size of the splits has reduced because of its parent being + // checkpointed. If the splits do not have any non-transient reference to another RDD + // or another RDD's splits, it does not refer to a lineage and therefore may not reduce + // in size after checkpointing. However, if the splits do refer to the *splits* of a parent + // RDD, then these splits must update reference to the parent RDD splits as the parent RDD's + // splits must have changed after checkpointing. + if (testRDDSplitSize) { + assert( + splitSizeAfterCheckpoint < splitSizeBeforeCheckpoint, + "Size of " + rddType + " splits did not reduce after checkpointing parent " + parentRDDType + + "[" + splitSizeBeforeCheckpoint + " --> " + splitSizeAfterCheckpoint + "]" + ) + } + + } + + /** + * Generate an RDD with a long lineage of one-to-one dependencies. + */ + def generateLongLineageRDD(): RDD[Int] = { + var rdd = sc.makeRDD(1 to 100, 4) + for (i <- 1 to 20) { + rdd = rdd.map(x => x) + } + rdd + } + + /** + * Generate an RDD with a long lineage specifically for CoGroupedRDD. + * A CoGroupedRDD can have a long lineage only one of its parents have a long lineage + * and narrow dependency with this RDD. This method generate such an RDD by a sequence + * of cogroups and mapValues which creates a long lineage of narrow dependencies. + */ + def generateLongLineageRDDForCoGroupedRDD() = { val add = (x: (Seq[Int], Seq[Int])) => (x._1 ++ x._2).reduce(_ + _) - val ones = sc.parallelize(1 to 100, 1).map(x => (x,1)) - val reduced = ones.reduceByKey(_ + _) - val seqOfCogrouped = new ArrayBuffer[RDD[(Int, Int)]]() - seqOfCogrouped += reduced.cogroup(ones).mapValues[Int](add) + def ones: RDD[(Int, Int)] = sc.makeRDD(1 to 2, 2).map(x => (x % 2, 1)).reduceByKey(partitioner, _ + _) + + var cogrouped: RDD[(Int, (Seq[Int], Seq[Int]))] = ones.cogroup(ones) for(i <- 1 to 10) { - seqOfCogrouped += seqOfCogrouped.last.cogroup(ones).mapValues(add) + cogrouped = cogrouped.mapValues(add).cogroup(ones) } - val finalCogrouped = seqOfCogrouped.last - val intermediateCogrouped = seqOfCogrouped(5) - - val bytesBeforeCheckpoint = Utils.serialize(finalCogrouped.splits) - intermediateCogrouped.checkpoint() - finalCogrouped.count() - sleep(intermediateCogrouped) - val bytesAfterCheckpoint = Utils.serialize(finalCogrouped.splits) - println("Before = " + bytesBeforeCheckpoint.size + ", after = " + bytesAfterCheckpoint.size) - assert(bytesAfterCheckpoint.size < bytesBeforeCheckpoint.size, - "CoGroupedSplits still holds on to the splits of its parent RDDs") + cogrouped.mapValues(add) } - def testCheckpointing[U: ClassManifest](op: (RDD[Int]) => RDD[U], sleepTime: Long = 500) { - val parCollection = sc.makeRDD(1 to 4, 4) - val operatedRDD = op(parCollection) - operatedRDD.checkpoint() - val parentRDD = operatedRDD.dependencies.head.rdd - val result = operatedRDD.collect() - sleep(operatedRDD) - //println(parentRDD + ", " + operatedRDD.dependencies.head.rdd ) - assert(sc.objectFile[U](operatedRDD.getCheckpointFile.get).collect() === result) - assert(operatedRDD.dependencies.head.rdd != parentRDD) - assert(operatedRDD.collect() === result) + /** + * Get serialized sizes of the RDD and its splits + */ + def getSerializedSizes(rdd: RDD[_]): (Int, Int) = { + (Utils.serialize(rdd).size, Utils.serialize(rdd.splits).size) } - def sleep(rdd: RDD[_]) { - val startTime = System.currentTimeMillis() - val maxWaitTime = 5000 - while(rdd.isCheckpointed == false && System.currentTimeMillis() < startTime + maxWaitTime) { - Thread.sleep(50) - } - assert(rdd.isCheckpointed === true, "Waiting for checkpoint to complete took more than " + maxWaitTime + " ms") + /** + * Serialize and deserialize an object. This is useful to verify the objects + * contents after deserialization (e.g., the contents of an RDD split after + * it is sent to a slave along with a task) + */ + def serializeDeserialize[T](obj: T): T = { + val bytes = Utils.serialize(obj) + Utils.deserialize[T](bytes) } } + + +object CheckpointSuite { + // This is a custom cogroup function that does not use mapValues like + // the PairRDDFunctions.cogroup() + def cogroup[K, V](first: RDD[(K, V)], second: RDD[(K, V)], part: Partitioner) = { + println("First = " + first + ", second = " + second) + new CoGroupedRDD[K]( + Seq(first.asInstanceOf[RDD[(_, _)]], second.asInstanceOf[RDD[(_, _)]]), + part + ).asInstanceOf[RDD[(K, Seq[Seq[V]])]] + } + +} \ No newline at end of file From c36ca10241991d46f2f1513b2c0c5e369d8b34f9 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Sat, 3 Nov 2012 15:19:41 -0700 Subject: [PATCH 139/226] Adding locality aware parallelize --- core/src/main/scala/spark/ParallelCollection.scala | 11 +++++++++-- core/src/main/scala/spark/SparkContext.scala | 10 +++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/spark/ParallelCollection.scala b/core/src/main/scala/spark/ParallelCollection.scala index 9725017b61..4bd9e1bd54 100644 --- a/core/src/main/scala/spark/ParallelCollection.scala +++ b/core/src/main/scala/spark/ParallelCollection.scala @@ -2,6 +2,7 @@ package spark import scala.collection.immutable.NumericRange import scala.collection.mutable.ArrayBuffer +import scala.collection.Map private[spark] class ParallelCollectionSplit[T: ClassManifest]( val rddId: Long, @@ -24,7 +25,8 @@ private[spark] class ParallelCollectionSplit[T: ClassManifest]( private[spark] class ParallelCollection[T: ClassManifest]( @transient sc : SparkContext, @transient data: Seq[T], - numSlices: Int) + numSlices: Int, + locationPrefs : Map[Int,Seq[String]]) extends RDD[T](sc, Nil) { // TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets // cached. It might be worthwhile to write the data to a file in the DFS and read it in the split @@ -40,7 +42,12 @@ private[spark] class ParallelCollection[T: ClassManifest]( override def compute(s: Split) = s.asInstanceOf[ParallelCollectionSplit[T]].iterator - override def preferredLocations(s: Split): Seq[String] = Nil + override def preferredLocations(s: Split): Seq[String] = { + locationPrefs.get(splits_.indexOf(s)) match { + case Some(s) => s + case _ => Nil + } + } } private object ParallelCollection { diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index d7b46bee38..7ae1aea993 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -194,7 +194,7 @@ class SparkContext( /** Distribute a local Scala collection to form an RDD. */ def parallelize[T: ClassManifest](seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = { - new ParallelCollection[T](this, seq, numSlices) + new ParallelCollection[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** Distribute a local Scala collection to form an RDD. */ @@ -202,6 +202,14 @@ class SparkContext( parallelize(seq, numSlices) } + /** Distribute a local Scala collection to form an RDD, with one or more + * location preferences for each object. Create a new partition for each + * collection item. */ + def makeLocalityConstrainedRDD[T: ClassManifest](seq: Seq[(T, Seq[String])]): RDD[T] = { + val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap + new ParallelCollection[T](this, seq.map(_._1), seq.size, indexToPrefs) + } + /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. From 3ff9710265d4bb518b89461cfb0fcc771e61a726 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 7 Dec 2012 15:01:15 -0800 Subject: [PATCH 140/226] Adding Flume InputDStream --- project/SparkBuild.scala | 3 +- .../spark/streaming/FlumeInputDStream.scala | 130 ++++++++++++++++++ .../spark/streaming/NetworkInputDStream.scala | 6 +- .../spark/streaming/NetworkInputTracker.scala | 13 +- .../spark/streaming/RawInputDStream.scala | 2 + .../spark/streaming/SocketInputDStream.scala | 2 + .../spark/streaming/StreamingContext.scala | 11 ++ .../streaming/examples/FlumeEventCount.scala | 29 ++++ .../spark/streaming/InputStreamsSuite.scala | 59 +++++++- 9 files changed, 248 insertions(+), 7 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 6ef2ac477a..05f3c59681 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -91,7 +91,8 @@ object SparkBuild extends Build { "org.eclipse.jetty" % "jetty-server" % "7.5.3.v20111011", "org.scalatest" %% "scalatest" % "1.6.1" % "test", "org.scalacheck" %% "scalacheck" % "1.9" % "test", - "com.novocode" % "junit-interface" % "0.8" % "test" + "com.novocode" % "junit-interface" % "0.8" % "test", + "org.apache.flume" % "flume-ng-sdk" % "1.2.0" % "compile" ), parallelExecution := false, /* Workaround for issue #206 (fixed after SBT 0.11.0) */ diff --git a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala new file mode 100644 index 0000000000..9c403278c3 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala @@ -0,0 +1,130 @@ +package spark.streaming + +import java.io.{ObjectInput, ObjectOutput, Externalizable} +import spark.storage.StorageLevel +import org.apache.flume.source.avro.AvroSourceProtocol +import org.apache.flume.source.avro.AvroFlumeEvent +import org.apache.flume.source.avro.Status +import org.apache.avro.ipc.specific.SpecificResponder +import org.apache.avro.ipc.NettyServer +import java.net.InetSocketAddress +import collection.JavaConversions._ +import spark.Utils +import java.nio.ByteBuffer + +class FlumeInputDStream[T: ClassManifest]( + @transient ssc_ : StreamingContext, + host: String, + port: Int, + storageLevel: StorageLevel +) extends NetworkInputDStream[SparkFlumeEvent](ssc_) { + + override def createReceiver(): NetworkReceiver[SparkFlumeEvent] = { + new FlumeReceiver(id, host, port, storageLevel) + } +} + +/** + * A wrapper class for AvroFlumeEvent's with a custom serialization format. + * + * This is necessary because AvroFlumeEvent uses inner data structures + * which are not serializable. + */ +class SparkFlumeEvent() extends Externalizable { + var event : AvroFlumeEvent = new AvroFlumeEvent() + + /* De-serialize from bytes. */ + def readExternal(in: ObjectInput) { + val bodyLength = in.readInt() + val bodyBuff = new Array[Byte](bodyLength) + in.read(bodyBuff) + + val numHeaders = in.readInt() + val headers = new java.util.HashMap[CharSequence, CharSequence] + + for (i <- 0 until numHeaders) { + val keyLength = in.readInt() + val keyBuff = new Array[Byte](keyLength) + in.read(keyBuff) + val key : String = Utils.deserialize(keyBuff) + + val valLength = in.readInt() + val valBuff = new Array[Byte](valLength) + in.read(valBuff) + val value : String = Utils.deserialize(valBuff) + + headers.put(key, value) + } + + event.setBody(ByteBuffer.wrap(bodyBuff)) + event.setHeaders(headers) + } + + /* Serialize to bytes. */ + def writeExternal(out: ObjectOutput) { + val body = event.getBody.array() + out.writeInt(body.length) + out.write(body) + + val numHeaders = event.getHeaders.size() + out.writeInt(numHeaders) + for ((k, v) <- event.getHeaders) { + val keyBuff = Utils.serialize(k.toString) + out.writeInt(keyBuff.length) + out.write(keyBuff) + val valBuff = Utils.serialize(v.toString) + out.writeInt(valBuff.length) + out.write(valBuff) + } + } +} + +object SparkFlumeEvent { + def fromAvroFlumeEvent(in : AvroFlumeEvent) : SparkFlumeEvent = { + val event = new SparkFlumeEvent + event.event = in + event + } +} + +/** A simple server that implements Flume's Avro protocol. */ +class FlumeEventServer(receiver : FlumeReceiver) extends AvroSourceProtocol { + override def append(event : AvroFlumeEvent) : Status = { + receiver.dataHandler += SparkFlumeEvent.fromAvroFlumeEvent(event) + Status.OK + } + + override def appendBatch(events : java.util.List[AvroFlumeEvent]) : Status = { + events.foreach (event => + receiver.dataHandler += SparkFlumeEvent.fromAvroFlumeEvent(event)) + Status.OK + } +} + +/** A NetworkReceiver which listens for events using the + * Flume Avro interface.*/ +class FlumeReceiver( + streamId: Int, + host: String, + port: Int, + storageLevel: StorageLevel + ) extends NetworkReceiver[SparkFlumeEvent](streamId) { + + lazy val dataHandler = new DataHandler(this, storageLevel) + + protected override def onStart() { + val responder = new SpecificResponder( + classOf[AvroSourceProtocol], new FlumeEventServer(this)); + val server = new NettyServer(responder, new InetSocketAddress(host, port)); + dataHandler.start() + server.start() + logInfo("Flume receiver started") + } + + protected override def onStop() { + dataHandler.stop() + logInfo("Flume receiver stopped") + } + + override def getLocationConstraint = Some(host) +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index d3f37b8b0e..052fc8bb74 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -8,7 +8,6 @@ import spark.streaming.util.{RecurringTimer, SystemClock} import spark.storage.StorageLevel import java.nio.ByteBuffer -import java.util.concurrent.ArrayBlockingQueue import akka.actor.{Props, Actor} import akka.pattern.ask @@ -63,6 +62,9 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri /** This method will be called to stop receiving data. */ protected def onStop() + /** This method conveys a placement constraint (hostname) for this receiver. */ + def getLocationConstraint() : Option[String] = None + /** * This method starts the receiver. First is accesses all the lazy members to * materialize them. Then it calls the user-defined onStart() method to start @@ -151,6 +153,4 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri tracker ! DeregisterReceiver(streamId, msg) } } - } - diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 73ba877085..56661c2615 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -98,7 +98,18 @@ class NetworkInputTracker( def startReceivers() { val receivers = networkInputStreams.map(_.createReceiver()) - val tempRDD = ssc.sc.makeRDD(receivers, receivers.size) + + // We only honor constraints if all receivers have them + val hasLocationConstraints = receivers.map(_.getLocationConstraint().isDefined).reduce(_ && _) + + val tempRDD = + if (hasLocationConstraints) { + val receiversWithConstraints = receivers.map(r => (r, Seq(r.getLocationConstraint().toString))) + ssc.sc.makeLocalityConstrainedRDD[NetworkReceiver[_]](receiversWithConstraints) + } + else { + ssc.sc.makeRDD(receivers, receivers.size) + } val startReceiver = (iterator: Iterator[NetworkReceiver[_]]) => { if (!iterator.hasNext) { diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index d5db8e787d..fd51ed47a5 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -31,6 +31,8 @@ class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: S var blockPushingThread: Thread = null + override def getLocationConstraint = None + def onStart() { // Open a socket to the target address and keep reading from it logInfo("Connecting to " + host + ":" + port) diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index ff99d50b76..ebbb17a39a 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -34,6 +34,8 @@ class SocketReceiver[T: ClassManifest]( lazy protected val dataHandler = new DataHandler(this, storageLevel) + override def getLocationConstraint = None + protected def onStart() { logInfo("Connecting to " + host + ":" + port) val socket = new Socket(host, port) diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 8153dd4567..ce47bcb2da 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -15,6 +15,7 @@ import org.apache.hadoop.io.LongWritable import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat +import org.apache.flume.source.avro.AvroFlumeEvent import org.apache.hadoop.fs.Path import java.util.UUID import spark.util.MetadataCleaner @@ -166,6 +167,16 @@ class StreamingContext private ( inputStream } + def flumeStream ( + hostname: String, + port: Int, + storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2): DStream[SparkFlumeEvent] = { + val inputStream = new FlumeInputDStream(this, hostname, port, storageLevel) + graph.addInputStream(inputStream) + inputStream + } + + def rawNetworkStream[T: ClassManifest]( hostname: String, port: Int, diff --git a/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala b/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala new file mode 100644 index 0000000000..d76c92fdd5 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala @@ -0,0 +1,29 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel +import spark.streaming._ + +object FlumeEventCount { + def main(args: Array[String]) { + if (args.length != 4) { + System.err.println( + "Usage: FlumeEventCount ") + System.exit(1) + } + + val Array(master, host, IntParam(port), IntParam(batchMillis)) = args + + // Create the context and set the batch size + val ssc = new StreamingContext(master, "FlumeEventCount", + Milliseconds(batchMillis)) + + // Create a flume stream + val stream = ssc.flumeStream(host,port,StorageLevel.MEMORY_ONLY) + + // Print out the count of events received from this server in each batch + stream.count().map(cnt => "Received " + cnt + " flume events." ).print() + + ssc.start() + } +} diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index e98c096725..ed9a659092 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -1,6 +1,6 @@ package spark.streaming -import java.net.{SocketException, Socket, ServerSocket} +import java.net.{InetSocketAddress, SocketException, Socket, ServerSocket} import java.io.{File, BufferedWriter, OutputStreamWriter} import java.util.concurrent.{TimeUnit, ArrayBlockingQueue} import collection.mutable.{SynchronizedBuffer, ArrayBuffer} @@ -10,7 +10,14 @@ import spark.Logging import scala.util.Random import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfter - +import org.apache.flume.source.avro.AvroSourceProtocol +import org.apache.flume.source.avro.AvroFlumeEvent +import org.apache.flume.source.avro.Status +import org.apache.avro.ipc.{specific, NettyTransceiver} +import org.apache.avro.ipc.specific.SpecificRequestor +import java.nio.ByteBuffer +import collection.JavaConversions._ +import java.nio.charset.Charset class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { @@ -123,6 +130,54 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { ssc.stop() } + test("flume input stream") { + // Set up the streaming context and input streams + val ssc = new StreamingContext(master, framework, batchDuration) + val flumeStream = ssc.flumeStream("localhost", 33333, StorageLevel.MEMORY_AND_DISK) + val outputBuffer = new ArrayBuffer[Seq[SparkFlumeEvent]] + with SynchronizedBuffer[Seq[SparkFlumeEvent]] + val outputStream = new TestOutputStream(flumeStream, outputBuffer) + ssc.registerOutputStream(outputStream) + ssc.start() + + val clock = ssc.scheduler.clock.asInstanceOf[ManualClock] + val input = Seq(1, 2, 3, 4, 5) + + val transceiver = new NettyTransceiver(new InetSocketAddress("localhost", 33333)); + val client = SpecificRequestor.getClient( + classOf[AvroSourceProtocol], transceiver); + + for (i <- 0 until input.size) { + val event = new AvroFlumeEvent + event.setBody(ByteBuffer.wrap(input(i).toString.getBytes())) + event.setHeaders(Map[CharSequence, CharSequence]("test" -> "header")) + client.append(event) + Thread.sleep(500) + clock.addToTime(batchDuration.milliseconds) + } + + val startTime = System.currentTimeMillis() + while (outputBuffer.size < input.size && System.currentTimeMillis() - startTime < maxWaitTimeMillis) { + logInfo("output.size = " + outputBuffer.size + ", input.size = " + input.size) + Thread.sleep(100) + } + Thread.sleep(1000) + val timeTaken = System.currentTimeMillis() - startTime + assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms") + logInfo("Stopping context") + ssc.stop() + + val decoder = Charset.forName("UTF-8").newDecoder() + + assert(outputBuffer.size === input.length) + for (i <- 0 until outputBuffer.size) { + assert(outputBuffer(i).size === 1) + val str = decoder.decode(outputBuffer(i).head.event.getBody) + assert(str.toString === input(i).toString) + assert(outputBuffer(i).head.event.getHeaders.get("test") === "header") + } + } + test("file input stream") { // Create a temporary directory From 3e796bdd57297134ed40b20d7692cd9c8cd6efba Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 7 Dec 2012 19:16:35 -0800 Subject: [PATCH 141/226] Changes in response to TD's review. --- core/src/main/scala/spark/SparkContext.scala | 6 ++--- .../spark/streaming/FlumeInputDStream.scala | 2 +- .../spark/streaming/NetworkInputDStream.scala | 4 ++-- .../spark/streaming/NetworkInputTracker.scala | 10 ++++---- .../spark/streaming/RawInputDStream.scala | 2 +- .../spark/streaming/SocketInputDStream.scala | 2 +- .../streaming/examples/FlumeEventCount.scala | 24 +++++++++++++++---- 7 files changed, 32 insertions(+), 18 deletions(-) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 7ae1aea993..3ccdbfe10e 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -203,9 +203,9 @@ class SparkContext( } /** Distribute a local Scala collection to form an RDD, with one or more - * location preferences for each object. Create a new partition for each - * collection item. */ - def makeLocalityConstrainedRDD[T: ClassManifest](seq: Seq[(T, Seq[String])]): RDD[T] = { + * location preferences (hostnames of Spark nodes) for each object. + * Create a new partition for each collection item. */ + def makeRDD[T: ClassManifest](seq: Seq[(T, Seq[String])]): RDD[T] = { val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollection[T](this, seq.map(_._1), seq.size, indexToPrefs) } diff --git a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala index 9c403278c3..2959ce4540 100644 --- a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala @@ -126,5 +126,5 @@ class FlumeReceiver( logInfo("Flume receiver stopped") } - override def getLocationConstraint = Some(host) + override def getLocationPreference = Some(host) } \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index 052fc8bb74..4e4e9fc942 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -62,8 +62,8 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri /** This method will be called to stop receiving data. */ protected def onStop() - /** This method conveys a placement constraint (hostname) for this receiver. */ - def getLocationConstraint() : Option[String] = None + /** This method conveys a placement preference (hostname) for this receiver. */ + def getLocationPreference() : Option[String] = None /** * This method starts the receiver. First is accesses all the lazy members to diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 56661c2615..b421f795ee 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -99,13 +99,13 @@ class NetworkInputTracker( def startReceivers() { val receivers = networkInputStreams.map(_.createReceiver()) - // We only honor constraints if all receivers have them - val hasLocationConstraints = receivers.map(_.getLocationConstraint().isDefined).reduce(_ && _) + // Right now, we only honor preferences if all receivers have them + val hasLocationPreferences = receivers.map(_.getLocationPreference().isDefined).reduce(_ && _) val tempRDD = - if (hasLocationConstraints) { - val receiversWithConstraints = receivers.map(r => (r, Seq(r.getLocationConstraint().toString))) - ssc.sc.makeLocalityConstrainedRDD[NetworkReceiver[_]](receiversWithConstraints) + if (hasLocationPreferences) { + val receiversWithPreferences = receivers.map(r => (r, Seq(r.getLocationPreference().toString))) + ssc.sc.makeRDD[NetworkReceiver[_]](receiversWithPreferences) } else { ssc.sc.makeRDD(receivers, receivers.size) diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala index fd51ed47a5..6acaa9aab1 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/RawInputDStream.scala @@ -31,7 +31,7 @@ class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: S var blockPushingThread: Thread = null - override def getLocationConstraint = None + override def getLocationPreference = None def onStart() { // Open a socket to the target address and keep reading from it diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index ebbb17a39a..a9e37c0ff0 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -34,7 +34,7 @@ class SocketReceiver[T: ClassManifest]( lazy protected val dataHandler = new DataHandler(this, storageLevel) - override def getLocationConstraint = None + override def getLocationPreference = None protected def onStart() { logInfo("Connecting to " + host + ":" + port) diff --git a/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala b/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala index d76c92fdd5..e60ce483a3 100644 --- a/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala +++ b/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala @@ -4,19 +4,33 @@ import spark.util.IntParam import spark.storage.StorageLevel import spark.streaming._ +/** + * Produce a streaming count of events received from Flume. + * + * This should be used in conjunction with an AvroSink in Flume. It will start + * an Avro server on at the request host:port address and listen for requests. + * Your Flume AvroSink should be pointed to this address. + * + * Usage: FlumeEventCount + * + * is a Spark master URL + * is the host the Flume receiver will be started on - a receiver + * creates a server and listens for flume events. + * is the port the Flume receiver will listen on. + */ object FlumeEventCount { def main(args: Array[String]) { - if (args.length != 4) { + if (args.length != 3) { System.err.println( - "Usage: FlumeEventCount ") + "Usage: FlumeEventCount ") System.exit(1) } - val Array(master, host, IntParam(port), IntParam(batchMillis)) = args + val Array(master, host, IntParam(port)) = args + val batchInterval = Milliseconds(2000) // Create the context and set the batch size - val ssc = new StreamingContext(master, "FlumeEventCount", - Milliseconds(batchMillis)) + val ssc = new StreamingContext(master, "FlumeEventCount", batchInterval) // Create a flume stream val stream = ssc.flumeStream(host,port,StorageLevel.MEMORY_ONLY) From e42721601898ff199ca1c6cfeae159ad3ef691e3 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 8 Dec 2012 12:46:59 -0800 Subject: [PATCH 142/226] Removed unnecessary testcases. --- core/src/test/scala/spark/CheckpointSuite.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 8622ce92aa..41d84cb01c 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -121,7 +121,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(bytesAfterCheckpoint.size < bytesBeforeCheckpoint.size, "CoGroupedSplits still holds on to the splits of its parent RDDs") } - + /* /** * This test forces two ResultTasks of the same job to be launched before and after * the checkpointing of job's RDD is completed. @@ -149,7 +149,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { } testThreading(op1, op2) } - + */ def testCheckpointing[U: ClassManifest](op: (RDD[Int]) => RDD[U], sleepTime: Long = 500) { val parCollection = sc.makeRDD(1 to 4, 4) @@ -163,7 +163,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(operatedRDD.dependencies.head.rdd != parentRDD) assert(operatedRDD.collect() === result) } - + /* def testThreading[U: ClassManifest, V: ClassManifest](op1: (RDD[Int]) => RDD[U], op2: (RDD[U]) => RDD[V]) { val parCollection = sc.makeRDD(1 to 2, 2) @@ -201,7 +201,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val correctResult = secondRDD.collect() assert(result === correctResult) } - + */ def sleep(rdd: RDD[_]) { val startTime = System.currentTimeMillis() val maxWaitTime = 5000 From 746afc2e6513d5f32f261ec0dbf2823f78a5e960 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 10 Dec 2012 23:36:37 -0800 Subject: [PATCH 143/226] Bunch of bug fixes related to checkpointing in RDDs. RDDCheckpointData object is used to lock all serialization and dependency changes for checkpointing. ResultTask converted to Externalizable and serialized RDD is cached like ShuffleMapTask. --- .../main/scala/spark/ParallelCollection.scala | 10 +- core/src/main/scala/spark/RDD.scala | 10 +- .../main/scala/spark/RDDCheckpointData.scala | 76 +++++-- core/src/main/scala/spark/SparkContext.scala | 5 +- core/src/main/scala/spark/rdd/BlockRDD.scala | 9 +- .../main/scala/spark/rdd/CartesianRDD.scala | 21 +- .../main/scala/spark/rdd/CoGroupedRDD.scala | 18 +- .../main/scala/spark/rdd/CoalescedRDD.scala | 8 +- core/src/main/scala/spark/rdd/HadoopRDD.scala | 4 + core/src/main/scala/spark/rdd/UnionRDD.scala | 15 +- .../scala/spark/scheduler/ResultTask.scala | 95 ++++++++- .../spark/scheduler/ShuffleMapTask.scala | 21 +- .../test/scala/spark/CheckpointSuite.scala | 201 +++++++++++++++--- 13 files changed, 396 insertions(+), 97 deletions(-) diff --git a/core/src/main/scala/spark/ParallelCollection.scala b/core/src/main/scala/spark/ParallelCollection.scala index 9725017b61..9d12af6912 100644 --- a/core/src/main/scala/spark/ParallelCollection.scala +++ b/core/src/main/scala/spark/ParallelCollection.scala @@ -28,10 +28,11 @@ private[spark] class ParallelCollection[T: ClassManifest]( extends RDD[T](sc, Nil) { // TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets // cached. It might be worthwhile to write the data to a file in the DFS and read it in the split - // instead. UPDATE: With the new changes to enable checkpointing, this an be done. + // instead. + // UPDATE: A parallel collection can be checkpointed to HDFS, which achieves this goal. @transient - val splits_ = { + var splits_ : Array[Split] = { val slices = ParallelCollection.slice(data, numSlices).toArray slices.indices.map(i => new ParallelCollectionSplit(id, i, slices(i))).toArray } @@ -41,6 +42,11 @@ private[spark] class ParallelCollection[T: ClassManifest]( override def compute(s: Split) = s.asInstanceOf[ParallelCollectionSplit[T]].iterator override def preferredLocations(s: Split): Seq[String] = Nil + + override def changeDependencies(newRDD: RDD[_]) { + dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + splits_ = newRDD.splits + } } private object ParallelCollection { diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index e9bd131e61..efa03d5185 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -163,7 +163,7 @@ abstract class RDD[T: ClassManifest]( final def iterator(split: Split): Iterator[T] = { if (isCheckpointed) { // ASSUMPTION: Checkpoint Hadoop RDD will have same number of splits as original - checkpointData.iterator(split.index) + checkpointData.iterator(split) } else if (storageLevel != StorageLevel.NONE) { SparkEnv.get.cacheTracker.getOrCompute[T](this, split, storageLevel) } else { @@ -556,16 +556,12 @@ abstract class RDD[T: ClassManifest]( @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { - synchronized { - oos.defaultWriteObject() - } + oos.defaultWriteObject() } @throws(classOf[IOException]) private def readObject(ois: ObjectInputStream) { - synchronized { - ois.defaultReadObject() - } + ois.defaultReadObject() } } diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index eb4482acee..ff2ed4cdfc 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -1,12 +1,20 @@ package spark import org.apache.hadoop.fs.Path +import rdd.CoalescedRDD +import scheduler.{ResultTask, ShuffleMapTask} - +/** + * This class contains all the information of the regarding RDD checkpointing. + */ private[spark] class RDDCheckpointData[T: ClassManifest](rdd: RDD[T]) -extends Serializable { +extends Logging with Serializable { + /** + * This class manages the state transition of an RDD through checkpointing + * [ Not checkpointed --> marked for checkpointing --> checkpointing in progress --> checkpointed ] + */ class CheckpointState extends Serializable { var state = 0 @@ -20,24 +28,30 @@ extends Serializable { } val cpState = new CheckpointState() - var cpFile: Option[String] = None - var cpRDD: Option[RDD[T]] = None - var cpRDDSplits: Seq[Split] = Nil + @transient var cpFile: Option[String] = None + @transient var cpRDD: Option[RDD[T]] = None + @transient var cpRDDSplits: Seq[Split] = Nil + // Mark the RDD for checkpointing def markForCheckpoint() = { - rdd.synchronized { cpState.mark() } + RDDCheckpointData.synchronized { cpState.mark() } } + // Is the RDD already checkpointed def isCheckpointed() = { - rdd.synchronized { cpState.isCheckpointed } + RDDCheckpointData.synchronized { cpState.isCheckpointed } } + // Get the file to which this RDD was checkpointed to as a Option def getCheckpointFile() = { - rdd.synchronized { cpFile } + RDDCheckpointData.synchronized { cpFile } } + // Do the checkpointing of the RDD. Called after the first job using that RDD is over. def doCheckpoint() { - rdd.synchronized { + // If it is marked for checkpointing AND checkpointing is not already in progress, + // then set it to be in progress, else return + RDDCheckpointData.synchronized { if (cpState.isMarked && !cpState.isInProgress) { cpState.start() } else { @@ -45,24 +59,56 @@ extends Serializable { } } + // Save to file, and reload it as an RDD val file = new Path(rdd.context.checkpointDir, "rdd-" + rdd.id).toString rdd.saveAsObjectFile(file) - val newRDD = rdd.context.objectFile[T](file, rdd.splits.size) - rdd.synchronized { - rdd.changeDependencies(newRDD) + val newRDD = { + val hadoopRDD = rdd.context.objectFile[T](file, rdd.splits.size) + + val oldSplits = rdd.splits.size + val newSplits = hadoopRDD.splits.size + + logDebug("RDD splits = " + oldSplits + " --> " + newSplits) + if (newSplits < oldSplits) { + throw new Exception("# splits after checkpointing is less than before " + + "[" + oldSplits + " --> " + newSplits) + } else if (newSplits > oldSplits) { + new CoalescedRDD(hadoopRDD, rdd.splits.size) + } else { + hadoopRDD + } + } + logDebug("New RDD has " + newRDD.splits.size + " splits") + + // Change the dependencies and splits of the RDD + RDDCheckpointData.synchronized { cpFile = Some(file) cpRDD = Some(newRDD) cpRDDSplits = newRDD.splits + rdd.changeDependencies(newRDD) cpState.finish() + RDDCheckpointData.checkpointCompleted() + logInfo("Done checkpointing RDD " + rdd.id + ", new parent is RDD " + newRDD.id) } } + // Get preferred location of a split after checkpointing def preferredLocations(split: Split) = { - cpRDD.get.preferredLocations(split) + RDDCheckpointData.synchronized { + cpRDD.get.preferredLocations(split) + } } - def iterator(splitIndex: Int): Iterator[T] = { - cpRDD.get.iterator(cpRDDSplits(splitIndex)) + // Get iterator. This is called at the worker nodes. + def iterator(split: Split): Iterator[T] = { + rdd.firstParent[T].iterator(split) + } +} + +private[spark] object RDDCheckpointData { + def checkpointCompleted() { + ShuffleMapTask.clearCache() + ResultTask.clearCache() } } diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index d7b46bee38..654b1c2eb7 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -40,9 +40,7 @@ import spark.partial.PartialResult import spark.rdd.HadoopRDD import spark.rdd.NewHadoopRDD import spark.rdd.UnionRDD -import spark.scheduler.ShuffleMapTask -import spark.scheduler.DAGScheduler -import spark.scheduler.TaskScheduler +import scheduler.{ResultTask, ShuffleMapTask, DAGScheduler, TaskScheduler} import spark.scheduler.local.LocalScheduler import spark.scheduler.cluster.{SparkDeploySchedulerBackend, SchedulerBackend, ClusterScheduler} import spark.scheduler.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend} @@ -486,6 +484,7 @@ class SparkContext( clearJars() SparkEnv.set(null) ShuffleMapTask.clearCache() + ResultTask.clearCache() logInfo("Successfully stopped SparkContext") } diff --git a/core/src/main/scala/spark/rdd/BlockRDD.scala b/core/src/main/scala/spark/rdd/BlockRDD.scala index 590f9eb738..0c8cdd10dd 100644 --- a/core/src/main/scala/spark/rdd/BlockRDD.scala +++ b/core/src/main/scala/spark/rdd/BlockRDD.scala @@ -2,7 +2,7 @@ package spark.rdd import scala.collection.mutable.HashMap -import spark.Dependency +import spark.OneToOneDependency import spark.RDD import spark.SparkContext import spark.SparkEnv @@ -17,7 +17,7 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St extends RDD[T](sc, Nil) { @transient - val splits_ = (0 until blockIds.size).map(i => { + var splits_ : Array[Split] = (0 until blockIds.size).map(i => { new BlockRDDSplit(blockIds(i), i).asInstanceOf[Split] }).toArray @@ -43,5 +43,10 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St override def preferredLocations(split: Split) = locations_(split.asInstanceOf[BlockRDDSplit].blockId) + + override def changeDependencies(newRDD: RDD[_]) { + dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + splits_ = newRDD.splits + } } diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index 1d753a5168..9975e79b08 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -1,10 +1,27 @@ package spark.rdd import spark._ +import java.io.{ObjectOutputStream, IOException} private[spark] -class CartesianSplit(idx: Int, val s1: Split, val s2: Split) extends Split with Serializable { +class CartesianSplit( + idx: Int, + @transient rdd1: RDD[_], + @transient rdd2: RDD[_], + s1Index: Int, + s2Index: Int + ) extends Split { + var s1 = rdd1.splits(s1Index) + var s2 = rdd2.splits(s2Index) override val index: Int = idx + + @throws(classOf[IOException]) + private def writeObject(oos: ObjectOutputStream) { + // Update the reference to parent split at the time of task serialization + s1 = rdd1.splits(s1Index) + s2 = rdd2.splits(s2Index) + oos.defaultWriteObject() + } } private[spark] @@ -23,7 +40,7 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( val array = new Array[Split](rdd1.splits.size * rdd2.splits.size) for (s1 <- rdd1.splits; s2 <- rdd2.splits) { val idx = s1.index * numSplitsInRdd2 + s2.index - array(idx) = new CartesianSplit(idx, s1, s2) + array(idx) = new CartesianSplit(idx, rdd1, rdd2, s1.index, s2.index) } array } diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index 57d472666b..e4e70b13ba 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -20,11 +20,9 @@ private[spark] case class NarrowCoGroupSplitDep(rdd: RDD[_], splitIndex: Int, va @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { - rdd.synchronized { - // Update the reference to parent split at the time of task serialization - split = rdd.splits(splitIndex) - oos.defaultWriteObject() - } + // Update the reference to parent split at the time of task serialization + split = rdd.splits(splitIndex) + oos.defaultWriteObject() } } private[spark] case class ShuffleCoGroupSplitDep(shuffleId: Int) extends CoGroupSplitDep @@ -42,7 +40,8 @@ private[spark] class CoGroupAggregator { (b1, b2) => b1 ++ b2 }) with Serializable -class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) +class +CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) extends RDD[(K, Seq[Seq[_]])](rdds.head.context, Nil) with Logging { val aggr = new CoGroupAggregator @@ -63,7 +62,9 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) deps.toList } - override def dependencies = deps_ + // Pre-checkpoint dependencies deps_ should be transient (deps_) + // but post-checkpoint dependencies must not be transient (dependencies_) + override def dependencies = if (isCheckpointed) dependencies_ else deps_ @transient var splits_ : Array[Split] = { @@ -114,7 +115,8 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) } override def changeDependencies(newRDD: RDD[_]) { - deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) + deps_ = null + dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) splits_ = newRDD.splits rdds = null } diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 0b4499e2eb..088958942e 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -12,11 +12,9 @@ private[spark] case class CoalescedRDDSplit( @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { - rdd.synchronized { - // Update the reference to parent split at the time of task serialization - parents = parentsIndices.map(rdd.splits(_)) - oos.defaultWriteObject() - } + // Update the reference to parent split at the time of task serialization + parents = parentsIndices.map(rdd.splits(_)) + oos.defaultWriteObject() } } diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index a12531ea89..af54f23ebc 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -115,4 +115,8 @@ class HadoopRDD[K, V]( val hadoopSplit = split.asInstanceOf[HadoopSplit] hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost") } + + override def checkpoint() { + // Do nothing. Hadoop RDD cannot be checkpointed. + } } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index a5948dd1f1..808729f18d 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -19,11 +19,9 @@ private[spark] class UnionSplit[T: ClassManifest]( @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { - rdd.synchronized { - // Update the reference to parent split at the time of task serialization - split = rdd.splits(splitIndex) - oos.defaultWriteObject() - } + // Update the reference to parent split at the time of task serialization + split = rdd.splits(splitIndex) + oos.defaultWriteObject() } } @@ -55,7 +53,9 @@ class UnionRDD[T: ClassManifest]( deps.toList } - override def dependencies = deps_ + // Pre-checkpoint dependencies deps_ should be transient (deps_) + // but post-checkpoint dependencies must not be transient (dependencies_) + override def dependencies = if (isCheckpointed) dependencies_ else deps_ override def compute(s: Split): Iterator[T] = s.asInstanceOf[UnionSplit[T]].iterator() @@ -63,7 +63,8 @@ class UnionRDD[T: ClassManifest]( s.asInstanceOf[UnionSplit[T]].preferredLocations() override def changeDependencies(newRDD: RDD[_]) { - deps_ = List(new OneToOneDependency(newRDD)) + deps_ = null + dependencies_ = List(new OneToOneDependency(newRDD)) splits_ = newRDD.splits rdds = null } diff --git a/core/src/main/scala/spark/scheduler/ResultTask.scala b/core/src/main/scala/spark/scheduler/ResultTask.scala index 2ebd4075a2..bcb9e4956b 100644 --- a/core/src/main/scala/spark/scheduler/ResultTask.scala +++ b/core/src/main/scala/spark/scheduler/ResultTask.scala @@ -1,17 +1,73 @@ package spark.scheduler import spark._ +import java.io._ +import util.{MetadataCleaner, TimeStampedHashMap} +import java.util.zip.{GZIPInputStream, GZIPOutputStream} + +private[spark] object ResultTask { + + // A simple map between the stage id to the serialized byte array of a task. + // Served as a cache for task serialization because serialization can be + // expensive on the master node if it needs to launch thousands of tasks. + val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]] + + val metadataCleaner = new MetadataCleaner("ResultTask", serializedInfoCache.cleanup) + + def serializeInfo(stageId: Int, rdd: RDD[_], func: (TaskContext, Iterator[_]) => _): Array[Byte] = { + synchronized { + val old = serializedInfoCache.get(stageId).orNull + if (old != null) { + return old + } else { + val out = new ByteArrayOutputStream + val ser = SparkEnv.get.closureSerializer.newInstance + val objOut = ser.serializeStream(new GZIPOutputStream(out)) + objOut.writeObject(rdd) + objOut.writeObject(func) + objOut.close() + val bytes = out.toByteArray + serializedInfoCache.put(stageId, bytes) + return bytes + } + } + } + + def deserializeInfo(stageId: Int, bytes: Array[Byte]): (RDD[_], (TaskContext, Iterator[_]) => _) = { + synchronized { + val loader = Thread.currentThread.getContextClassLoader + val in = new GZIPInputStream(new ByteArrayInputStream(bytes)) + val ser = SparkEnv.get.closureSerializer.newInstance + val objIn = ser.deserializeStream(in) + val rdd = objIn.readObject().asInstanceOf[RDD[_]] + val func = objIn.readObject().asInstanceOf[(TaskContext, Iterator[_]) => _] + return (rdd, func) + } + } + + def clearCache() { + synchronized { + serializedInfoCache.clear() + } + } +} + private[spark] class ResultTask[T, U]( stageId: Int, - rdd: RDD[T], - func: (TaskContext, Iterator[T]) => U, - val partition: Int, + var rdd: RDD[T], + var func: (TaskContext, Iterator[T]) => U, + var partition: Int, @transient locs: Seq[String], val outputId: Int) - extends Task[U](stageId) { - - val split = rdd.splits(partition) + extends Task[U](stageId) with Externalizable { + + def this() = this(0, null, null, 0, null, 0) + var split = if (rdd == null) { + null + } else { + rdd.splits(partition) + } override def run(attemptId: Long): U = { val context = new TaskContext(stageId, partition, attemptId) @@ -21,4 +77,31 @@ private[spark] class ResultTask[T, U]( override def preferredLocations: Seq[String] = locs override def toString = "ResultTask(" + stageId + ", " + partition + ")" + + override def writeExternal(out: ObjectOutput) { + RDDCheckpointData.synchronized { + split = rdd.splits(partition) + out.writeInt(stageId) + val bytes = ResultTask.serializeInfo( + stageId, rdd, func.asInstanceOf[(TaskContext, Iterator[_]) => _]) + out.writeInt(bytes.length) + out.write(bytes) + out.writeInt(partition) + out.writeInt(outputId) + out.writeObject(split) + } + } + + override def readExternal(in: ObjectInput) { + val stageId = in.readInt() + val numBytes = in.readInt() + val bytes = new Array[Byte](numBytes) + in.readFully(bytes) + val (rdd_, func_) = ResultTask.deserializeInfo(stageId, bytes) + rdd = rdd_.asInstanceOf[RDD[T]] + func = func_.asInstanceOf[(TaskContext, Iterator[T]) => U] + partition = in.readInt() + val outputId = in.readInt() + split = in.readObject().asInstanceOf[Split] + } } diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index 683f5ebec3..5d28c40778 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -84,19 +84,22 @@ private[spark] class ShuffleMapTask( def this() = this(0, null, null, 0, null) var split = if (rdd == null) { - null - } else { + null + } else { rdd.splits(partition) } override def writeExternal(out: ObjectOutput) { - out.writeInt(stageId) - val bytes = ShuffleMapTask.serializeInfo(stageId, rdd, dep) - out.writeInt(bytes.length) - out.write(bytes) - out.writeInt(partition) - out.writeLong(generation) - out.writeObject(split) + RDDCheckpointData.synchronized { + split = rdd.splits(partition) + out.writeInt(stageId) + val bytes = ShuffleMapTask.serializeInfo(stageId, rdd, dep) + out.writeInt(bytes.length) + out.write(bytes) + out.writeInt(partition) + out.writeLong(generation) + out.writeObject(split) + } } override def readExternal(in: ObjectInput) { diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 51bd59e2b1..7b323e089c 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -34,28 +34,6 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { } } - test("ParallelCollection") { - val parCollection = sc.makeRDD(1 to 4) - parCollection.checkpoint() - assert(parCollection.dependencies === Nil) - val result = parCollection.collect() - assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) - assert(parCollection.dependencies != Nil) - assert(parCollection.collect() === result) - } - - test("BlockRDD") { - val blockId = "id" - val blockManager = SparkEnv.get.blockManager - blockManager.putSingle(blockId, "test", StorageLevel.MEMORY_ONLY) - val blockRDD = new BlockRDD[String](sc, Array(blockId)) - blockRDD.checkpoint() - val result = blockRDD.collect() - assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) - assert(blockRDD.dependencies != Nil) - assert(blockRDD.collect() === result) - } - test("RDDs with one-to-one dependencies") { testCheckpointing(_.map(x => x.toString)) testCheckpointing(_.flatMap(x => 1 to x)) @@ -70,23 +48,76 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { testCheckpointing(_.pipe(Seq("cat"))) } + test("ParallelCollection") { + val parCollection = sc.makeRDD(1 to 4, 2) + val numSplits = parCollection.splits.size + parCollection.checkpoint() + assert(parCollection.dependencies === Nil) + val result = parCollection.collect() + assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) + assert(parCollection.dependencies != Nil) + assert(parCollection.splits.length === numSplits) + assert(parCollection.splits.toList === parCollection.checkpointData.cpRDDSplits.toList) + assert(parCollection.collect() === result) + } + + test("BlockRDD") { + val blockId = "id" + val blockManager = SparkEnv.get.blockManager + blockManager.putSingle(blockId, "test", StorageLevel.MEMORY_ONLY) + val blockRDD = new BlockRDD[String](sc, Array(blockId)) + val numSplits = blockRDD.splits.size + blockRDD.checkpoint() + val result = blockRDD.collect() + assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) + assert(blockRDD.dependencies != Nil) + assert(blockRDD.splits.length === numSplits) + assert(blockRDD.splits.toList === blockRDD.checkpointData.cpRDDSplits.toList) + assert(blockRDD.collect() === result) + } + test("ShuffledRDD") { - // Creating ShuffledRDD directly as PairRDDFunctions.combineByKey produces a MapPartitionedRDD testCheckpointing(rdd => { + // Creating ShuffledRDD directly as PairRDDFunctions.combineByKey produces a MapPartitionedRDD new ShuffledRDD(rdd.map(x => (x % 2, 1)), partitioner) }) } test("UnionRDD") { def otherRDD = sc.makeRDD(1 to 10, 4) + + // Test whether the size of UnionRDDSplits reduce in size after parent RDD is checkpointed. + // Current implementation of UnionRDD has transient reference to parent RDDs, + // so only the splits will reduce in serialized size, not the RDD. testCheckpointing(_.union(otherRDD), false, true) testParentCheckpointing(_.union(otherRDD), false, true) } test("CartesianRDD") { - def otherRDD = sc.makeRDD(1 to 10, 4) - testCheckpointing(_.cartesian(otherRDD)) - testParentCheckpointing(_.cartesian(otherRDD), true, false) + def otherRDD = sc.makeRDD(1 to 10, 1) + testCheckpointing(new CartesianRDD(sc, _, otherRDD)) + + // Test whether size of CoalescedRDD reduce in size after parent RDD is checkpointed + // Current implementation of CoalescedRDDSplit has transient reference to parent RDD, + // so only the RDD will reduce in serialized size, not the splits. + testParentCheckpointing(new CartesianRDD(sc, _, otherRDD), true, false) + + // Test that the CartesianRDD updates parent splits (CartesianRDD.s1/s2) after + // the parent RDD has been checkpointed and parent splits have been changed to HadoopSplits. + // Note that this test is very specific to the current implementation of CartesianRDD. + val ones = sc.makeRDD(1 to 100, 10).map(x => x) + ones.checkpoint // checkpoint that MappedRDD + val cartesian = new CartesianRDD(sc, ones, ones) + val splitBeforeCheckpoint = + serializeDeserialize(cartesian.splits.head.asInstanceOf[CartesianSplit]) + cartesian.count() // do the checkpointing + val splitAfterCheckpoint = + serializeDeserialize(cartesian.splits.head.asInstanceOf[CartesianSplit]) + assert( + (splitAfterCheckpoint.s1 != splitBeforeCheckpoint.s1) && + (splitAfterCheckpoint.s2 != splitBeforeCheckpoint.s2), + "CartesianRDD.parents not updated after parent RDD checkpointed" + ) } test("CoalescedRDD") { @@ -94,7 +125,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { // Test whether size of CoalescedRDD reduce in size after parent RDD is checkpointed // Current implementation of CoalescedRDDSplit has transient reference to parent RDD, - // so does not serialize the RDD (not need to check its size). + // so only the RDD will reduce in serialized size, not the splits. testParentCheckpointing(new CoalescedRDD(_, 2), true, false) // Test that the CoalescedRDDSplit updates parent splits (CoalescedRDDSplit.parents) after @@ -145,13 +176,14 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val operatedRDD = op(baseRDD) val parentRDD = operatedRDD.dependencies.headOption.orNull val rddType = operatedRDD.getClass.getSimpleName + val numSplits = operatedRDD.splits.length // Find serialized sizes before and after the checkpoint val (rddSizeBeforeCheckpoint, splitSizeBeforeCheckpoint) = getSerializedSizes(operatedRDD) operatedRDD.checkpoint() val result = operatedRDD.collect() val (rddSizeAfterCheckpoint, splitSizeAfterCheckpoint) = getSerializedSizes(operatedRDD) - + // Test whether the checkpoint file has been created assert(sc.objectFile[U](operatedRDD.getCheckpointFile.get).collect() === result) @@ -160,6 +192,9 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { // Test whether the splits have been changed to the new Hadoop splits assert(operatedRDD.splits.toList === operatedRDD.checkpointData.cpRDDSplits.toList) + + // Test whether the number of splits is same as before + assert(operatedRDD.splits.length === numSplits) // Test whether the data in the checkpointed RDD is same as original assert(operatedRDD.collect() === result) @@ -168,7 +203,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { // does not have any dependency to another RDD (e.g., ParallelCollection, // ShuffleRDD with ShuffleDependency), it may not reduce in size after checkpointing. if (testRDDSize) { - println("Size of " + rddType + + logInfo("Size of " + rddType + "[" + rddSizeBeforeCheckpoint + " --> " + rddSizeAfterCheckpoint + "]") assert( rddSizeAfterCheckpoint < rddSizeBeforeCheckpoint, @@ -184,7 +219,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { // must be forgotten after checkpointing (to remove all reference to parent RDDs) and // replaced with the HadoopSplits of the checkpointed RDD. if (testRDDSplitSize) { - println("Size of " + rddType + " splits " + logInfo("Size of " + rddType + " splits " + "[" + splitSizeBeforeCheckpoint + " --> " + splitSizeAfterCheckpoint + "]") assert( splitSizeAfterCheckpoint < splitSizeBeforeCheckpoint, @@ -294,14 +329,118 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val bytes = Utils.serialize(obj) Utils.deserialize[T](bytes) } + /* + test("Consistency check for ResultTask") { + // Time -----------------------> + // Core 1: |<- count in thread 1, task 1 ->| |<-- checkpoint, task 1 ---->| |<- count in thread 2, task 2 ->| + // Core 2: |<- count in thread 1, task 2 ->| |<--- checkpoint, task 2 ---------->| |<- count in thread 2, task 1 ->| + // | + // checkpoint completed + sc.stop(); sc = null + System.clearProperty("spark.master.port") + + val dir = File.createTempFile("temp_", "") + dir.delete() + val ctxt = new SparkContext("local[2]", "ResultTask") + ctxt.setCheckpointDir(dir.toString) + + try { + val rdd = ctxt.makeRDD(1 to 2, 2).map(x => { + val state = CheckpointSuite.incrementState() + println("State = " + state) + if (state <= 3) { + // If executing the two tasks for the job comouting rdd.count + // of thread 1, or the first task for the recomputation due + // to checkpointing (saveing to HDFS), then do nothing + } else if (state == 4) { + // If executing the second task for the recomputation due to + // checkpointing. then prolong this task, to allow rdd.count + // of thread 2 to start before checkpoint of this RDD is completed + + Thread.sleep(1000) + println("State = " + state + " wake up") + } else { + // Else executing the tasks from thread 2 + Thread.sleep(1000) + println("State = " + state + " wake up") + } + + (x, 1) + }) + rdd.checkpoint() + val env = SparkEnv.get + + val thread1 = new Thread() { + override def run() { + try { + SparkEnv.set(env) + rdd.count() + } catch { + case e: Exception => CheckpointSuite.failed("Exception in thread 1", e) + } + } + } + thread1.start() + + val thread2 = new Thread() { + override def run() { + try { + SparkEnv.set(env) + CheckpointSuite.waitTillState(3) + println("\n\n\n\n") + rdd.count() + } catch { + case e: Exception => CheckpointSuite.failed("Exception in thread 2", e) + } + } + } + thread2.start() + + thread1.join() + thread2.join() + } finally { + dir.delete() + } + + assert(!CheckpointSuite.failed, CheckpointSuite.failureMessage) + + ctxt.stop() + + } + */ } object CheckpointSuite { + /* + var state = 0 + var failed = false + var failureMessage = "" + + def incrementState(): Int = { + this.synchronized { state += 1; this.notifyAll(); state } + } + + def getState(): Int = { + this.synchronized( state ) + } + + def waitTillState(s: Int) { + while(state < s) { + this.synchronized { this.wait() } + } + } + + def failed(msg: String, ex: Exception) { + failed = true + failureMessage += msg + "\n" + ex + "\n\n" + } + */ + // This is a custom cogroup function that does not use mapValues like // the PairRDDFunctions.cogroup() def cogroup[K, V](first: RDD[(K, V)], second: RDD[(K, V)], part: Partitioner) = { - println("First = " + first + ", second = " + second) + //println("First = " + first + ", second = " + second) new CoGroupedRDD[K]( Seq(first.asInstanceOf[RDD[(_, _)]], second.asInstanceOf[RDD[(_, _)]]), part From 2a87d816a24c62215d682e3a7af65489c0d6e708 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 11 Dec 2012 01:44:43 -0800 Subject: [PATCH 144/226] Added clear property to JavaAPISuite to remove port binding errors. --- core/src/test/scala/spark/JavaAPISuite.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/test/scala/spark/JavaAPISuite.java b/core/src/test/scala/spark/JavaAPISuite.java index 5875506179..6bd9836a93 100644 --- a/core/src/test/scala/spark/JavaAPISuite.java +++ b/core/src/test/scala/spark/JavaAPISuite.java @@ -44,6 +44,8 @@ public class JavaAPISuite implements Serializable { public void tearDown() { sc.stop(); sc = null; + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port"); } static class ReverseIntComparator implements Comparator, Serializable { From fa28f25619d6712e5f920f498ec03085ea208b4d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 11 Dec 2012 13:59:43 -0800 Subject: [PATCH 145/226] Fixed bug in UnionRDD and CoGroupedRDD --- .../main/scala/spark/rdd/CoGroupedRDD.scala | 9 +- core/src/main/scala/spark/rdd/UnionRDD.scala | 12 +- .../test/scala/spark/CheckpointSuite.scala | 104 ------------------ 3 files changed, 10 insertions(+), 115 deletions(-) diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index e4e70b13ba..bc6d16ee8b 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -15,8 +15,11 @@ import spark.Split import java.io.{ObjectOutputStream, IOException} private[spark] sealed trait CoGroupSplitDep extends Serializable -private[spark] case class NarrowCoGroupSplitDep(rdd: RDD[_], splitIndex: Int, var split: Split = null) - extends CoGroupSplitDep { +private[spark] case class NarrowCoGroupSplitDep( + rdd: RDD[_], + splitIndex: Int, + var split: Split + ) extends CoGroupSplitDep { @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream) { @@ -75,7 +78,7 @@ CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) case s: ShuffleDependency[_, _] => new ShuffleCoGroupSplitDep(s.shuffleId): CoGroupSplitDep case _ => - new NarrowCoGroupSplitDep(r, i): CoGroupSplitDep + new NarrowCoGroupSplitDep(r, i, r.splits(i)): CoGroupSplitDep } }.toList) } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index 808729f18d..a84867492b 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -5,14 +5,10 @@ import scala.collection.mutable.ArrayBuffer import spark._ import java.io.{ObjectOutputStream, IOException} -private[spark] class UnionSplit[T: ClassManifest]( - idx: Int, - rdd: RDD[T], - splitIndex: Int, - var split: Split = null) - extends Split - with Serializable { - +private[spark] class UnionSplit[T: ClassManifest](idx: Int, rdd: RDD[T], splitIndex: Int) + extends Split { + var split: Split = rdd.splits(splitIndex) + def iterator() = rdd.iterator(split) def preferredLocations() = rdd.preferredLocations(split) override val index: Int = idx diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 7b323e089c..909c55c91c 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -329,114 +329,10 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val bytes = Utils.serialize(obj) Utils.deserialize[T](bytes) } - /* - test("Consistency check for ResultTask") { - // Time -----------------------> - // Core 1: |<- count in thread 1, task 1 ->| |<-- checkpoint, task 1 ---->| |<- count in thread 2, task 2 ->| - // Core 2: |<- count in thread 1, task 2 ->| |<--- checkpoint, task 2 ---------->| |<- count in thread 2, task 1 ->| - // | - // checkpoint completed - sc.stop(); sc = null - System.clearProperty("spark.master.port") - - val dir = File.createTempFile("temp_", "") - dir.delete() - val ctxt = new SparkContext("local[2]", "ResultTask") - ctxt.setCheckpointDir(dir.toString) - - try { - val rdd = ctxt.makeRDD(1 to 2, 2).map(x => { - val state = CheckpointSuite.incrementState() - println("State = " + state) - if (state <= 3) { - // If executing the two tasks for the job comouting rdd.count - // of thread 1, or the first task for the recomputation due - // to checkpointing (saveing to HDFS), then do nothing - } else if (state == 4) { - // If executing the second task for the recomputation due to - // checkpointing. then prolong this task, to allow rdd.count - // of thread 2 to start before checkpoint of this RDD is completed - - Thread.sleep(1000) - println("State = " + state + " wake up") - } else { - // Else executing the tasks from thread 2 - Thread.sleep(1000) - println("State = " + state + " wake up") - } - - (x, 1) - }) - rdd.checkpoint() - val env = SparkEnv.get - - val thread1 = new Thread() { - override def run() { - try { - SparkEnv.set(env) - rdd.count() - } catch { - case e: Exception => CheckpointSuite.failed("Exception in thread 1", e) - } - } - } - thread1.start() - - val thread2 = new Thread() { - override def run() { - try { - SparkEnv.set(env) - CheckpointSuite.waitTillState(3) - println("\n\n\n\n") - rdd.count() - } catch { - case e: Exception => CheckpointSuite.failed("Exception in thread 2", e) - } - } - } - thread2.start() - - thread1.join() - thread2.join() - } finally { - dir.delete() - } - - assert(!CheckpointSuite.failed, CheckpointSuite.failureMessage) - - ctxt.stop() - - } - */ } object CheckpointSuite { - /* - var state = 0 - var failed = false - var failureMessage = "" - - def incrementState(): Int = { - this.synchronized { state += 1; this.notifyAll(); state } - } - - def getState(): Int = { - this.synchronized( state ) - } - - def waitTillState(s: Int) { - while(state < s) { - this.synchronized { this.wait() } - } - } - - def failed(msg: String, ex: Exception) { - failed = true - failureMessage += msg + "\n" + ex + "\n\n" - } - */ - // This is a custom cogroup function that does not use mapValues like // the PairRDDFunctions.cogroup() def cogroup[K, V](first: RDD[(K, V)], second: RDD[(K, V)], part: Partitioner) = { From 8e74fac215e8b9cda7e35111c5116e3669c6eb97 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 11 Dec 2012 15:36:12 -0800 Subject: [PATCH 146/226] Made checkpoint data in RDDs optional to further reduce serialized size. --- core/src/main/scala/spark/RDD.scala | 19 +++++++++++-------- core/src/main/scala/spark/SparkContext.scala | 11 +++++++++++ .../test/scala/spark/CheckpointSuite.scala | 12 ++++++------ .../main/scala/spark/streaming/DStream.scala | 4 +--- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index efa03d5185..6c04769c82 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -112,7 +112,7 @@ abstract class RDD[T: ClassManifest]( // Variables relating to persistence private var storageLevel: StorageLevel = StorageLevel.NONE - protected[spark] val checkpointData = new RDDCheckpointData(this) + protected[spark] var checkpointData: Option[RDDCheckpointData[T]] = None /** Returns the first parent RDD */ protected[spark] def firstParent[U: ClassManifest] = { @@ -149,7 +149,7 @@ abstract class RDD[T: ClassManifest]( def getPreferredLocations(split: Split) = { if (isCheckpointed) { - checkpointData.preferredLocations(split) + checkpointData.get.preferredLocations(split) } else { preferredLocations(split) } @@ -163,7 +163,7 @@ abstract class RDD[T: ClassManifest]( final def iterator(split: Split): Iterator[T] = { if (isCheckpointed) { // ASSUMPTION: Checkpoint Hadoop RDD will have same number of splits as original - checkpointData.iterator(split) + checkpointData.get.iterator(split) } else if (storageLevel != StorageLevel.NONE) { SparkEnv.get.cacheTracker.getOrCompute[T](this, split, storageLevel) } else { @@ -516,21 +516,24 @@ abstract class RDD[T: ClassManifest]( * require recomputation. */ def checkpoint() { - checkpointData.markForCheckpoint() + if (checkpointData.isEmpty) { + checkpointData = Some(new RDDCheckpointData(this)) + checkpointData.get.markForCheckpoint() + } } /** * Return whether this RDD has been checkpointed or not */ def isCheckpointed(): Boolean = { - checkpointData.isCheckpointed() + if (checkpointData.isDefined) checkpointData.get.isCheckpointed() else false } /** * Gets the name of the file to which this RDD was checkpointed */ def getCheckpointFile(): Option[String] = { - checkpointData.getCheckpointFile() + if (checkpointData.isDefined) checkpointData.get.getCheckpointFile() else None } /** @@ -539,12 +542,12 @@ abstract class RDD[T: ClassManifest]( * potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs. */ protected[spark] def doCheckpoint() { - checkpointData.doCheckpoint() + if (checkpointData.isDefined) checkpointData.get.doCheckpoint() dependencies.foreach(_.rdd.doCheckpoint()) } /** - * Changes the dependencies of this RDD from its original parents to the new [[spark.rdd.HadoopRDD]] + * Changes the dependencies of this RDD from its original parents to the new RDD * (`newRDD`) created from the checkpoint file. This method must ensure that all references * to the original parent RDDs must be removed to enable the parent RDDs to be garbage * collected. Subclasses of RDD may override this method for implementing their own changing diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 654b1c2eb7..71ed4ef058 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -366,6 +366,17 @@ class SparkContext( .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes)) } + + protected[spark] def checkpointFile[T: ClassManifest]( + path: String, + minSplits: Int = defaultMinSplits + ): RDD[T] = { + val rdd = objectFile[T](path, minSplits) + rdd.checkpointData = Some(new RDDCheckpointData(rdd)) + rdd.checkpointData.get.cpFile = Some(path) + rdd + } + /** Build the union of a list of RDDs. */ def union[T: ClassManifest](rdds: Seq[RDD[T]]): RDD[T] = new UnionRDD(this, rdds) diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 909c55c91c..0bffedb8db 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -57,7 +57,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) assert(parCollection.dependencies != Nil) assert(parCollection.splits.length === numSplits) - assert(parCollection.splits.toList === parCollection.checkpointData.cpRDDSplits.toList) + assert(parCollection.splits.toList === parCollection.checkpointData.get.cpRDDSplits.toList) assert(parCollection.collect() === result) } @@ -72,7 +72,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) assert(blockRDD.dependencies != Nil) assert(blockRDD.splits.length === numSplits) - assert(blockRDD.splits.toList === blockRDD.checkpointData.cpRDDSplits.toList) + assert(blockRDD.splits.toList === blockRDD.checkpointData.get.cpRDDSplits.toList) assert(blockRDD.collect() === result) } @@ -84,7 +84,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { } test("UnionRDD") { - def otherRDD = sc.makeRDD(1 to 10, 4) + def otherRDD = sc.makeRDD(1 to 10, 1) // Test whether the size of UnionRDDSplits reduce in size after parent RDD is checkpointed. // Current implementation of UnionRDD has transient reference to parent RDDs, @@ -191,7 +191,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(operatedRDD.dependencies.head.rdd != parentRDD) // Test whether the splits have been changed to the new Hadoop splits - assert(operatedRDD.splits.toList === operatedRDD.checkpointData.cpRDDSplits.toList) + assert(operatedRDD.splits.toList === operatedRDD.checkpointData.get.cpRDDSplits.toList) // Test whether the number of splits is same as before assert(operatedRDD.splits.length === numSplits) @@ -289,8 +289,8 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { */ def generateLongLineageRDD(): RDD[Int] = { var rdd = sc.makeRDD(1 to 100, 4) - for (i <- 1 to 20) { - rdd = rdd.map(x => x) + for (i <- 1 to 50) { + rdd = rdd.map(x => x + 1) } rdd } diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index d290c5927e..69fefa21a0 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -372,9 +372,7 @@ extends Serializable with Logging { checkpointData.foreach { case(time, data) => { logInfo("Restoring checkpointed RDD for time " + time + " from file '" + data.toString + "'") - val rdd = ssc.sc.objectFile[T](data.toString) - // Set the checkpoint file name to identify this RDD as a checkpointed RDD by updateCheckpointData() - rdd.checkpointData.cpFile = Some(data.toString) + val rdd = ssc.sc.checkpointFile[T](data.toString) generatedRDDs += ((time, rdd)) } } From 72eed2b95edb3b0b213517c815e09c3886b11669 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 17 Dec 2012 18:52:43 -0800 Subject: [PATCH 147/226] Converted CheckpointState in RDDCheckpointData to use scala Enumeration. --- .../main/scala/spark/RDDCheckpointData.scala | 48 +++++++++---------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index ff2ed4cdfc..7613b338e6 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -4,46 +4,42 @@ import org.apache.hadoop.fs.Path import rdd.CoalescedRDD import scheduler.{ResultTask, ShuffleMapTask} +/** + * Enumeration to manage state transitions of an RDD through checkpointing + * [ Initialized --> marked for checkpointing --> checkpointing in progress --> checkpointed ] + */ +private[spark] object CheckpointState extends Enumeration { + type CheckpointState = Value + val Initialized, MarkedForCheckpoint, CheckpointingInProgress, Checkpointed = Value +} + /** * This class contains all the information of the regarding RDD checkpointing. */ - private[spark] class RDDCheckpointData[T: ClassManifest](rdd: RDD[T]) extends Logging with Serializable { - /** - * This class manages the state transition of an RDD through checkpointing - * [ Not checkpointed --> marked for checkpointing --> checkpointing in progress --> checkpointed ] - */ - class CheckpointState extends Serializable { - var state = 0 + import CheckpointState._ - def mark() { if (state == 0) state = 1 } - def start() { assert(state == 1); state = 2 } - def finish() { assert(state == 2); state = 3 } - - def isMarked() = { state == 1 } - def isInProgress = { state == 2 } - def isCheckpointed = { state == 3 } - } - - val cpState = new CheckpointState() + var cpState = Initialized @transient var cpFile: Option[String] = None @transient var cpRDD: Option[RDD[T]] = None @transient var cpRDDSplits: Seq[Split] = Nil // Mark the RDD for checkpointing - def markForCheckpoint() = { - RDDCheckpointData.synchronized { cpState.mark() } + def markForCheckpoint() { + RDDCheckpointData.synchronized { + if (cpState == Initialized) cpState = MarkedForCheckpoint + } } // Is the RDD already checkpointed - def isCheckpointed() = { - RDDCheckpointData.synchronized { cpState.isCheckpointed } + def isCheckpointed(): Boolean = { + RDDCheckpointData.synchronized { cpState == Checkpointed } } - // Get the file to which this RDD was checkpointed to as a Option - def getCheckpointFile() = { + // Get the file to which this RDD was checkpointed to as an Option + def getCheckpointFile(): Option[String] = { RDDCheckpointData.synchronized { cpFile } } @@ -52,8 +48,8 @@ extends Logging with Serializable { // If it is marked for checkpointing AND checkpointing is not already in progress, // then set it to be in progress, else return RDDCheckpointData.synchronized { - if (cpState.isMarked && !cpState.isInProgress) { - cpState.start() + if (cpState == MarkedForCheckpoint) { + cpState = CheckpointingInProgress } else { return } @@ -87,7 +83,7 @@ extends Logging with Serializable { cpRDD = Some(newRDD) cpRDDSplits = newRDD.splits rdd.changeDependencies(newRDD) - cpState.finish() + cpState = Checkpointed RDDCheckpointData.checkpointCompleted() logInfo("Done checkpointing RDD " + rdd.id + ", new parent is RDD " + newRDD.id) } From 5184141936c18f12c6738caae6fceee4d15800e2 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 18 Dec 2012 13:30:53 -0800 Subject: [PATCH 148/226] Introduced getSpits, getDependencies, and getPreferredLocations in RDD and RDDCheckpointData. --- .../main/scala/spark/PairRDDFunctions.scala | 4 +- .../main/scala/spark/ParallelCollection.scala | 9 +- core/src/main/scala/spark/RDD.scala | 127 +++++++++++------- .../main/scala/spark/RDDCheckpointData.scala | 10 +- core/src/main/scala/spark/rdd/BlockRDD.scala | 9 +- .../main/scala/spark/rdd/CartesianRDD.scala | 12 +- .../main/scala/spark/rdd/CoGroupedRDD.scala | 11 +- .../main/scala/spark/rdd/CoalescedRDD.scala | 10 +- .../main/scala/spark/rdd/FilteredRDD.scala | 2 +- .../main/scala/spark/rdd/FlatMappedRDD.scala | 2 +- .../src/main/scala/spark/rdd/GlommedRDD.scala | 2 +- core/src/main/scala/spark/rdd/HadoopRDD.scala | 4 +- .../scala/spark/rdd/MapPartitionsRDD.scala | 2 +- .../spark/rdd/MapPartitionsWithSplitRDD.scala | 2 +- core/src/main/scala/spark/rdd/MappedRDD.scala | 2 +- .../main/scala/spark/rdd/NewHadoopRDD.scala | 4 +- core/src/main/scala/spark/rdd/PipedRDD.scala | 2 +- .../src/main/scala/spark/rdd/SampledRDD.scala | 9 +- .../main/scala/spark/rdd/ShuffledRDD.scala | 7 +- core/src/main/scala/spark/rdd/UnionRDD.scala | 13 +- .../scala/spark/scheduler/DAGScheduler.scala | 2 +- .../test/scala/spark/CheckpointSuite.scala | 6 +- 22 files changed, 136 insertions(+), 115 deletions(-) diff --git a/core/src/main/scala/spark/PairRDDFunctions.scala b/core/src/main/scala/spark/PairRDDFunctions.scala index 1f82bd3ab8..09ac606cfb 100644 --- a/core/src/main/scala/spark/PairRDDFunctions.scala +++ b/core/src/main/scala/spark/PairRDDFunctions.scala @@ -628,7 +628,7 @@ private[spark] class MappedValuesRDD[K, V, U](prev: WeakReference[RDD[(K, V)]], f: V => U) extends RDD[(K, U)](prev.get) { - override def splits = firstParent[(K, V)].splits + override def getSplits = firstParent[(K, V)].splits override val partitioner = firstParent[(K, V)].partitioner override def compute(split: Split) = firstParent[(K, V)].iterator(split).map{case (k, v) => (k, f(v))} } @@ -637,7 +637,7 @@ private[spark] class FlatMappedValuesRDD[K, V, U](prev: WeakReference[RDD[(K, V)]], f: V => TraversableOnce[U]) extends RDD[(K, U)](prev.get) { - override def splits = firstParent[(K, V)].splits + override def getSplits = firstParent[(K, V)].splits override val partitioner = firstParent[(K, V)].partitioner override def compute(split: Split) = { firstParent[(K, V)].iterator(split).flatMap { case (k, v) => f(v).map(x => (k, x)) } diff --git a/core/src/main/scala/spark/ParallelCollection.scala b/core/src/main/scala/spark/ParallelCollection.scala index 9d12af6912..0bc5b2ff11 100644 --- a/core/src/main/scala/spark/ParallelCollection.scala +++ b/core/src/main/scala/spark/ParallelCollection.scala @@ -37,15 +37,12 @@ private[spark] class ParallelCollection[T: ClassManifest]( slices.indices.map(i => new ParallelCollectionSplit(id, i, slices(i))).toArray } - override def splits = splits_.asInstanceOf[Array[Split]] + override def getSplits = splits_.asInstanceOf[Array[Split]] override def compute(s: Split) = s.asInstanceOf[ParallelCollectionSplit[T]].iterator - - override def preferredLocations(s: Split): Seq[String] = Nil - override def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + override def clearDependencies() { + splits_ = null } } diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 6c04769c82..f3e422fa5f 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -81,48 +81,33 @@ abstract class RDD[T: ClassManifest]( def this(@transient oneParent: RDD[_]) = this(oneParent.context , List(new OneToOneDependency(oneParent))) - // Methods that must be implemented by subclasses: - - /** Set of partitions in this RDD. */ - def splits: Array[Split] + // ======================================================================= + // Methods that should be implemented by subclasses of RDD + // ======================================================================= /** Function for computing a given partition. */ def compute(split: Split): Iterator[T] - /** How this RDD depends on any parent RDDs. */ - def dependencies: List[Dependency[_]] = dependencies_ + /** Set of partitions in this RDD. */ + protected def getSplits(): Array[Split] + + /** How this RDD depends on any parent RDDs. */ + protected def getDependencies(): List[Dependency[_]] = dependencies_ + + /** Optionally overridden by subclasses to specify placement preferences. */ + protected def getPreferredLocations(split: Split): Seq[String] = Nil - /** Record user function generating this RDD. */ - private[spark] val origin = Utils.getSparkCallSite - /** Optionally overridden by subclasses to specify how they are partitioned. */ val partitioner: Option[Partitioner] = None - /** Optionally overridden by subclasses to specify placement preferences. */ - def preferredLocations(split: Split): Seq[String] = Nil - - /** The [[spark.SparkContext]] that this RDD was created on. */ - def context = sc - private[spark] def elementClassManifest: ClassManifest[T] = classManifest[T] - + + // ======================================================================= + // Methods and fields available on all RDDs + // ======================================================================= + /** A unique ID for this RDD (within its SparkContext). */ val id = sc.newRddId() - - // Variables relating to persistence - private var storageLevel: StorageLevel = StorageLevel.NONE - - protected[spark] var checkpointData: Option[RDDCheckpointData[T]] = None - - /** Returns the first parent RDD */ - protected[spark] def firstParent[U: ClassManifest] = { - dependencies.head.rdd.asInstanceOf[RDD[U]] - } - - /** Returns the `i` th parent RDD */ - protected[spark] def parent[U: ClassManifest](i: Int) = dependencies(i).rdd.asInstanceOf[RDD[U]] - - // Methods available on all RDDs: /** * Set this RDD's storage level to persist its values across operations after the first time @@ -147,11 +132,39 @@ abstract class RDD[T: ClassManifest]( /** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */ def getStorageLevel = storageLevel - def getPreferredLocations(split: Split) = { + /** + * Get the preferred location of a split, taking into account whether the + * RDD is checkpointed or not. + */ + final def preferredLocations(split: Split): Seq[String] = { if (isCheckpointed) { - checkpointData.get.preferredLocations(split) + checkpointData.get.getPreferredLocations(split) } else { - preferredLocations(split) + getPreferredLocations(split) + } + } + + /** + * Get the array of splits of this RDD, taking into account whether the + * RDD is checkpointed or not. + */ + final def splits: Array[Split] = { + if (isCheckpointed) { + checkpointData.get.getSplits + } else { + getSplits + } + } + + /** + * Get the array of splits of this RDD, taking into account whether the + * RDD is checkpointed or not. + */ + final def dependencies: List[Dependency[_]] = { + if (isCheckpointed) { + dependencies_ + } else { + getDependencies } } @@ -536,6 +549,27 @@ abstract class RDD[T: ClassManifest]( if (checkpointData.isDefined) checkpointData.get.getCheckpointFile() else None } + // ======================================================================= + // Other internal methods and fields + // ======================================================================= + + private var storageLevel: StorageLevel = StorageLevel.NONE + + /** Record user function generating this RDD. */ + private[spark] val origin = Utils.getSparkCallSite + + private[spark] def elementClassManifest: ClassManifest[T] = classManifest[T] + + private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None + + /** Returns the first parent RDD */ + protected[spark] def firstParent[U: ClassManifest] = { + dependencies.head.rdd.asInstanceOf[RDD[U]] + } + + /** The [[spark.SparkContext]] that this RDD was created on. */ + def context = sc + /** * Performs the checkpointing of this RDD by saving this . It is called by the DAGScheduler * after a job using this RDD has completed (therefore the RDD has been materialized and @@ -548,23 +582,18 @@ abstract class RDD[T: ClassManifest]( /** * Changes the dependencies of this RDD from its original parents to the new RDD - * (`newRDD`) created from the checkpoint file. This method must ensure that all references + * (`newRDD`) created from the checkpoint file. + */ + protected[spark] def changeDependencies(newRDD: RDD[_]) { + clearDependencies() + dependencies_ = List(new OneToOneDependency(newRDD)) + } + + /** + * Clears the dependencies of this RDD. This method must ensure that all references * to the original parent RDDs must be removed to enable the parent RDDs to be garbage * collected. Subclasses of RDD may override this method for implementing their own changing * logic. See [[spark.rdd.UnionRDD]] and [[spark.rdd.ShuffledRDD]] to get a better idea. */ - protected[spark] def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD)) - } - - @throws(classOf[IOException]) - private def writeObject(oos: ObjectOutputStream) { - oos.defaultWriteObject() - } - - @throws(classOf[IOException]) - private def readObject(ois: ObjectInputStream) { - ois.defaultReadObject() - } - + protected[spark] def clearDependencies() { } } diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index 7613b338e6..e4c0912cdc 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -24,7 +24,6 @@ extends Logging with Serializable { var cpState = Initialized @transient var cpFile: Option[String] = None @transient var cpRDD: Option[RDD[T]] = None - @transient var cpRDDSplits: Seq[Split] = Nil // Mark the RDD for checkpointing def markForCheckpoint() { @@ -81,7 +80,6 @@ extends Logging with Serializable { RDDCheckpointData.synchronized { cpFile = Some(file) cpRDD = Some(newRDD) - cpRDDSplits = newRDD.splits rdd.changeDependencies(newRDD) cpState = Checkpointed RDDCheckpointData.checkpointCompleted() @@ -90,12 +88,18 @@ extends Logging with Serializable { } // Get preferred location of a split after checkpointing - def preferredLocations(split: Split) = { + def getPreferredLocations(split: Split) = { RDDCheckpointData.synchronized { cpRDD.get.preferredLocations(split) } } + def getSplits: Array[Split] = { + RDDCheckpointData.synchronized { + cpRDD.get.splits + } + } + // Get iterator. This is called at the worker nodes. def iterator(split: Split): Iterator[T] = { rdd.firstParent[T].iterator(split) diff --git a/core/src/main/scala/spark/rdd/BlockRDD.scala b/core/src/main/scala/spark/rdd/BlockRDD.scala index 0c8cdd10dd..68e570eb15 100644 --- a/core/src/main/scala/spark/rdd/BlockRDD.scala +++ b/core/src/main/scala/spark/rdd/BlockRDD.scala @@ -29,7 +29,7 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St HashMap(blockIds.zip(locations):_*) } - override def splits = splits_ + override def getSplits = splits_ override def compute(split: Split): Iterator[T] = { val blockManager = SparkEnv.get.blockManager @@ -41,12 +41,11 @@ class BlockRDD[T: ClassManifest](sc: SparkContext, @transient blockIds: Array[St } } - override def preferredLocations(split: Split) = + override def getPreferredLocations(split: Split) = locations_(split.asInstanceOf[BlockRDDSplit].blockId) - override def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + override def clearDependencies() { + splits_ = null } } diff --git a/core/src/main/scala/spark/rdd/CartesianRDD.scala b/core/src/main/scala/spark/rdd/CartesianRDD.scala index 9975e79b08..116644bd52 100644 --- a/core/src/main/scala/spark/rdd/CartesianRDD.scala +++ b/core/src/main/scala/spark/rdd/CartesianRDD.scala @@ -45,9 +45,9 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( array } - override def splits = splits_ + override def getSplits = splits_ - override def preferredLocations(split: Split) = { + override def getPreferredLocations(split: Split) = { val currSplit = split.asInstanceOf[CartesianSplit] rdd1.preferredLocations(currSplit.s1) ++ rdd2.preferredLocations(currSplit.s2) } @@ -66,11 +66,11 @@ class CartesianRDD[T: ClassManifest, U:ClassManifest]( } ) - override def dependencies = deps_ + override def getDependencies = deps_ - override def changeDependencies(newRDD: RDD[_]) { - deps_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + override def clearDependencies() { + deps_ = Nil + splits_ = null rdd1 = null rdd2 = null } diff --git a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala index bc6d16ee8b..9cc95dc172 100644 --- a/core/src/main/scala/spark/rdd/CoGroupedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoGroupedRDD.scala @@ -65,9 +65,7 @@ CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) deps.toList } - // Pre-checkpoint dependencies deps_ should be transient (deps_) - // but post-checkpoint dependencies must not be transient (dependencies_) - override def dependencies = if (isCheckpointed) dependencies_ else deps_ + override def getDependencies = deps_ @transient var splits_ : Array[Split] = { @@ -85,7 +83,7 @@ CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) array } - override def splits = splits_ + override def getSplits = splits_ override val partitioner = Some(part) @@ -117,10 +115,9 @@ CoGroupedRDD[K](@transient var rdds: Seq[RDD[(_, _)]], part: Partitioner) map.iterator } - override def changeDependencies(newRDD: RDD[_]) { + override def clearDependencies() { deps_ = null - dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + splits_ = null rdds = null } } diff --git a/core/src/main/scala/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/spark/rdd/CoalescedRDD.scala index 088958942e..85d0fa9f6a 100644 --- a/core/src/main/scala/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/spark/rdd/CoalescedRDD.scala @@ -44,7 +44,7 @@ class CoalescedRDD[T: ClassManifest]( } } - override def splits = splits_ + override def getSplits = splits_ override def compute(split: Split): Iterator[T] = { split.asInstanceOf[CoalescedRDDSplit].parents.iterator.flatMap { @@ -59,11 +59,11 @@ class CoalescedRDD[T: ClassManifest]( } ) - override def dependencies = deps_ + override def getDependencies() = deps_ - override def changeDependencies(newRDD: RDD[_]) { - deps_ = List(new OneToOneDependency(newRDD)) - splits_ = newRDD.splits + override def clearDependencies() { + deps_ = Nil + splits_ = null prev = null } } diff --git a/core/src/main/scala/spark/rdd/FilteredRDD.scala b/core/src/main/scala/spark/rdd/FilteredRDD.scala index 02f2e7c246..309ed2399d 100644 --- a/core/src/main/scala/spark/rdd/FilteredRDD.scala +++ b/core/src/main/scala/spark/rdd/FilteredRDD.scala @@ -9,6 +9,6 @@ class FilteredRDD[T: ClassManifest]( f: T => Boolean) extends RDD[T](prev) { - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).filter(f) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala index cdc8ecdcfe..1160e68bb8 100644 --- a/core/src/main/scala/spark/rdd/FlatMappedRDD.scala +++ b/core/src/main/scala/spark/rdd/FlatMappedRDD.scala @@ -9,6 +9,6 @@ class FlatMappedRDD[U: ClassManifest, T: ClassManifest]( f: T => TraversableOnce[U]) extends RDD[U](prev) { - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).flatMap(f) } diff --git a/core/src/main/scala/spark/rdd/GlommedRDD.scala b/core/src/main/scala/spark/rdd/GlommedRDD.scala index df6f61c69d..4fab1a56fa 100644 --- a/core/src/main/scala/spark/rdd/GlommedRDD.scala +++ b/core/src/main/scala/spark/rdd/GlommedRDD.scala @@ -6,6 +6,6 @@ import spark.Split private[spark] class GlommedRDD[T: ClassManifest](prev: RDD[T]) extends RDD[Array[T]](prev) { - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = Array(firstParent[T].iterator(split).toArray).iterator } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index af54f23ebc..fce190b860 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -67,7 +67,7 @@ class HadoopRDD[K, V]( .asInstanceOf[InputFormat[K, V]] } - override def splits = splits_ + override def getSplits = splits_ override def compute(theSplit: Split) = new Iterator[(K, V)] { val split = theSplit.asInstanceOf[HadoopSplit] @@ -110,7 +110,7 @@ class HadoopRDD[K, V]( } } - override def preferredLocations(split: Split) = { + override def getPreferredLocations(split: Split) = { // TODO: Filtering out "localhost" in case of file:// URLs val hadoopSplit = split.asInstanceOf[HadoopSplit] hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost") diff --git a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala index 23b9fb023b..5f4acee041 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsRDD.scala @@ -12,6 +12,6 @@ class MapPartitionsRDD[U: ClassManifest, T: ClassManifest]( override val partitioner = if (preservesPartitioning) firstParent[T].partitioner else None - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = f(firstParent[T].iterator(split)) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala index 41955c1d7a..f0f3f2c7c7 100644 --- a/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala +++ b/core/src/main/scala/spark/rdd/MapPartitionsWithSplitRDD.scala @@ -14,6 +14,6 @@ class MapPartitionsWithSplitRDD[U: ClassManifest, T: ClassManifest]( f: (Int, Iterator[T]) => Iterator[U]) extends RDD[U](prev) { - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = f(split.index, firstParent[T].iterator(split)) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/MappedRDD.scala b/core/src/main/scala/spark/rdd/MappedRDD.scala index 6f8cb21fd3..44b542db93 100644 --- a/core/src/main/scala/spark/rdd/MappedRDD.scala +++ b/core/src/main/scala/spark/rdd/MappedRDD.scala @@ -9,6 +9,6 @@ class MappedRDD[U: ClassManifest, T: ClassManifest]( f: T => U) extends RDD[U](prev) { - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split) = firstParent[T].iterator(split).map(f) } \ No newline at end of file diff --git a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala index c12df5839e..91f89e3c75 100644 --- a/core/src/main/scala/spark/rdd/NewHadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/NewHadoopRDD.scala @@ -55,7 +55,7 @@ class NewHadoopRDD[K, V]( result } - override def splits = splits_ + override def getSplits = splits_ override def compute(theSplit: Split) = new Iterator[(K, V)] { val split = theSplit.asInstanceOf[NewHadoopSplit] @@ -89,7 +89,7 @@ class NewHadoopRDD[K, V]( } } - override def preferredLocations(split: Split) = { + override def getPreferredLocations(split: Split) = { val theSplit = split.asInstanceOf[NewHadoopSplit] theSplit.serializableHadoopSplit.value.getLocations.filter(_ != "localhost") } diff --git a/core/src/main/scala/spark/rdd/PipedRDD.scala b/core/src/main/scala/spark/rdd/PipedRDD.scala index d2047375ea..a88929e55e 100644 --- a/core/src/main/scala/spark/rdd/PipedRDD.scala +++ b/core/src/main/scala/spark/rdd/PipedRDD.scala @@ -29,7 +29,7 @@ class PipedRDD[T: ClassManifest]( // using a standard StringTokenizer (i.e. by spaces) def this(prev: RDD[T], command: String) = this(prev, PipedRDD.tokenize(command)) - override def splits = firstParent[T].splits + override def getSplits = firstParent[T].splits override def compute(split: Split): Iterator[String] = { val pb = new ProcessBuilder(command) diff --git a/core/src/main/scala/spark/rdd/SampledRDD.scala b/core/src/main/scala/spark/rdd/SampledRDD.scala index c622e14a66..da6f65765c 100644 --- a/core/src/main/scala/spark/rdd/SampledRDD.scala +++ b/core/src/main/scala/spark/rdd/SampledRDD.scala @@ -26,9 +26,9 @@ class SampledRDD[T: ClassManifest]( firstParent[T].splits.map(x => new SampledRDDSplit(x, rg.nextInt)) } - override def splits = splits_.asInstanceOf[Array[Split]] + override def getSplits = splits_.asInstanceOf[Array[Split]] - override def preferredLocations(split: Split) = + override def getPreferredLocations(split: Split) = firstParent[T].preferredLocations(split.asInstanceOf[SampledRDDSplit].prev) override def compute(splitIn: Split) = { @@ -51,8 +51,7 @@ class SampledRDD[T: ClassManifest]( } } - override def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + override def clearDependencies() { + splits_ = null } } diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index a9dd3f35ed..2caf33c21e 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -25,15 +25,14 @@ class ShuffledRDD[K, V]( @transient var splits_ = Array.tabulate[Split](part.numPartitions)(i => new ShuffledRDDSplit(i)) - override def splits = splits_ + override def getSplits = splits_ override def compute(split: Split): Iterator[(K, V)] = { val shuffledId = dependencies.head.asInstanceOf[ShuffleDependency[K, V]].shuffleId SparkEnv.get.shuffleFetcher.fetch[K, V](shuffledId, split.index) } - override def changeDependencies(newRDD: RDD[_]) { - dependencies_ = List(new OneToOneDependency(newRDD.asInstanceOf[RDD[Any]])) - splits_ = newRDD.splits + override def clearDependencies() { + splits_ = null } } diff --git a/core/src/main/scala/spark/rdd/UnionRDD.scala b/core/src/main/scala/spark/rdd/UnionRDD.scala index a84867492b..05ed6172d1 100644 --- a/core/src/main/scala/spark/rdd/UnionRDD.scala +++ b/core/src/main/scala/spark/rdd/UnionRDD.scala @@ -37,7 +37,7 @@ class UnionRDD[T: ClassManifest]( array } - override def splits = splits_ + override def getSplits = splits_ @transient var deps_ = { val deps = new ArrayBuffer[Dependency[_]] @@ -49,19 +49,16 @@ class UnionRDD[T: ClassManifest]( deps.toList } - // Pre-checkpoint dependencies deps_ should be transient (deps_) - // but post-checkpoint dependencies must not be transient (dependencies_) - override def dependencies = if (isCheckpointed) dependencies_ else deps_ + override def getDependencies = deps_ override def compute(s: Split): Iterator[T] = s.asInstanceOf[UnionSplit[T]].iterator() - override def preferredLocations(s: Split): Seq[String] = + override def getPreferredLocations(s: Split): Seq[String] = s.asInstanceOf[UnionSplit[T]].preferredLocations() - override def changeDependencies(newRDD: RDD[_]) { + override def clearDependencies() { deps_ = null - dependencies_ = List(new OneToOneDependency(newRDD)) - splits_ = newRDD.splits + splits_ = null rdds = null } } diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index 33d35b35d1..4b2570fa2b 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -575,7 +575,7 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with return cached } // If the RDD has some placement preferences (as is the case for input RDDs), get those - val rddPrefs = rdd.getPreferredLocations(rdd.splits(partition)).toList + val rddPrefs = rdd.preferredLocations(rdd.splits(partition)).toList if (rddPrefs != Nil) { return rddPrefs } diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 0bffedb8db..19626d2450 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -57,7 +57,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) assert(parCollection.dependencies != Nil) assert(parCollection.splits.length === numSplits) - assert(parCollection.splits.toList === parCollection.checkpointData.get.cpRDDSplits.toList) + assert(parCollection.splits.toList === parCollection.checkpointData.get.getSplits.toList) assert(parCollection.collect() === result) } @@ -72,7 +72,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) assert(blockRDD.dependencies != Nil) assert(blockRDD.splits.length === numSplits) - assert(blockRDD.splits.toList === blockRDD.checkpointData.get.cpRDDSplits.toList) + assert(blockRDD.splits.toList === blockRDD.checkpointData.get.getSplits.toList) assert(blockRDD.collect() === result) } @@ -191,7 +191,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { assert(operatedRDD.dependencies.head.rdd != parentRDD) // Test whether the splits have been changed to the new Hadoop splits - assert(operatedRDD.splits.toList === operatedRDD.checkpointData.get.cpRDDSplits.toList) + assert(operatedRDD.splits.toList === operatedRDD.checkpointData.get.getSplits.toList) // Test whether the number of splits is same as before assert(operatedRDD.splits.length === numSplits) From f9c5b0a6fe8d728e16c60c0cf51ced0054e3a387 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 20 Dec 2012 11:52:23 -0800 Subject: [PATCH 149/226] Changed checkpoint writing and reading process. --- .../main/scala/spark/RDDCheckpointData.scala | 27 +--- .../main/scala/spark/rdd/CheckpointRDD.scala | 117 ++++++++++++++++++ core/src/main/scala/spark/rdd/HadoopRDD.scala | 5 +- 3 files changed, 124 insertions(+), 25 deletions(-) create mode 100644 core/src/main/scala/spark/rdd/CheckpointRDD.scala diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index e4c0912cdc..1aa9b9aa1e 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -1,7 +1,7 @@ package spark import org.apache.hadoop.fs.Path -import rdd.CoalescedRDD +import rdd.{CheckpointRDD, CoalescedRDD} import scheduler.{ResultTask, ShuffleMapTask} /** @@ -55,30 +55,13 @@ extends Logging with Serializable { } // Save to file, and reload it as an RDD - val file = new Path(rdd.context.checkpointDir, "rdd-" + rdd.id).toString - rdd.saveAsObjectFile(file) - - val newRDD = { - val hadoopRDD = rdd.context.objectFile[T](file, rdd.splits.size) - - val oldSplits = rdd.splits.size - val newSplits = hadoopRDD.splits.size - - logDebug("RDD splits = " + oldSplits + " --> " + newSplits) - if (newSplits < oldSplits) { - throw new Exception("# splits after checkpointing is less than before " + - "[" + oldSplits + " --> " + newSplits) - } else if (newSplits > oldSplits) { - new CoalescedRDD(hadoopRDD, rdd.splits.size) - } else { - hadoopRDD - } - } - logDebug("New RDD has " + newRDD.splits.size + " splits") + val path = new Path(rdd.context.checkpointDir, "rdd-" + rdd.id).toString + rdd.context.runJob(rdd, CheckpointRDD.writeToFile(path) _) + val newRDD = new CheckpointRDD[T](rdd.context, path) // Change the dependencies and splits of the RDD RDDCheckpointData.synchronized { - cpFile = Some(file) + cpFile = Some(path) cpRDD = Some(newRDD) rdd.changeDependencies(newRDD) cpState = Checkpointed diff --git a/core/src/main/scala/spark/rdd/CheckpointRDD.scala b/core/src/main/scala/spark/rdd/CheckpointRDD.scala new file mode 100644 index 0000000000..c673ab6aaa --- /dev/null +++ b/core/src/main/scala/spark/rdd/CheckpointRDD.scala @@ -0,0 +1,117 @@ +package spark.rdd + +import spark._ +import org.apache.hadoop.mapred.{FileInputFormat, SequenceFileInputFormat, JobConf, Reporter} +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.io.{NullWritable, BytesWritable} +import org.apache.hadoop.util.ReflectionUtils +import org.apache.hadoop.fs.Path +import java.io.{File, IOException, EOFException} +import java.text.NumberFormat + +private[spark] class CheckpointRDDSplit(idx: Int, val splitFile: String) extends Split { + override val index: Int = idx +} + +class CheckpointRDD[T: ClassManifest](sc: SparkContext, checkpointPath: String) + extends RDD[T](sc, Nil) { + + @transient val path = new Path(checkpointPath) + @transient val fs = path.getFileSystem(new Configuration()) + + @transient val splits_ : Array[Split] = { + val splitFiles = fs.listStatus(path).map(_.getPath.toString).filter(_.contains("part-")).sorted + splitFiles.zipWithIndex.map(x => new CheckpointRDDSplit(x._2, x._1)).toArray + } + + override def getSplits = splits_ + + override def getPreferredLocations(split: Split): Seq[String] = { + val status = fs.getFileStatus(path) + val locations = fs.getFileBlockLocations(status, 0, status.getLen) + locations.firstOption.toList.flatMap(_.getHosts).filter(_ != "localhost") + } + + override def compute(split: Split): Iterator[T] = { + CheckpointRDD.readFromFile(split.asInstanceOf[CheckpointRDDSplit].splitFile) + } + + override def checkpoint() { + // Do nothing. Hadoop RDD should not be checkpointed. + } +} + +private[spark] object CheckpointRDD extends Logging { + + def splitIdToFileName(splitId: Int): String = { + val numfmt = NumberFormat.getInstance() + numfmt.setMinimumIntegerDigits(5) + numfmt.setGroupingUsed(false) + "part-" + numfmt.format(splitId) + } + + def writeToFile[T](path: String, blockSize: Int = -1)(context: TaskContext, iterator: Iterator[T]) { + val outputDir = new Path(path) + val fs = outputDir.getFileSystem(new Configuration()) + + val finalOutputName = splitIdToFileName(context.splitId) + val finalOutputPath = new Path(outputDir, finalOutputName) + val tempOutputPath = new Path(outputDir, "." + finalOutputName + "-attempt-" + context.attemptId) + + if (fs.exists(tempOutputPath)) { + throw new IOException("Checkpoint failed: temporary path " + + tempOutputPath + " already exists") + } + val bufferSize = System.getProperty("spark.buffer.size", "65536").toInt + + val fileOutputStream = if (blockSize < 0) { + fs.create(tempOutputPath, false, bufferSize) + } else { + // This is mainly for testing purpose + fs.create(tempOutputPath, false, bufferSize, fs.getDefaultReplication, blockSize) + } + val serializer = SparkEnv.get.serializer.newInstance() + val serializeStream = serializer.serializeStream(fileOutputStream) + serializeStream.writeAll(iterator) + fileOutputStream.close() + + if (!fs.rename(tempOutputPath, finalOutputPath)) { + if (!fs.delete(finalOutputPath, true)) { + throw new IOException("Checkpoint failed: failed to delete earlier output of task " + + context.attemptId); + } + if (!fs.rename(tempOutputPath, finalOutputPath)) { + throw new IOException("Checkpoint failed: failed to save output of task: " + + context.attemptId) + } + } + } + + def readFromFile[T](path: String): Iterator[T] = { + val inputPath = new Path(path) + val fs = inputPath.getFileSystem(new Configuration()) + val bufferSize = System.getProperty("spark.buffer.size", "65536").toInt + val fileInputStream = fs.open(inputPath, bufferSize) + val serializer = SparkEnv.get.serializer.newInstance() + val deserializeStream = serializer.deserializeStream(fileInputStream) + deserializeStream.asIterator.asInstanceOf[Iterator[T]] + } + + // Test whether CheckpointRDD generate expected number of splits despite + // each split file having multiple blocks. This needs to be run on a + // cluster (mesos or standalone) using HDFS. + def main(args: Array[String]) { + import spark._ + + val Array(cluster, hdfsPath) = args + val sc = new SparkContext(cluster, "CheckpointRDD Test") + val rdd = sc.makeRDD(1 to 10, 10).flatMap(x => 1 to 10000) + val path = new Path(hdfsPath, "temp") + val fs = path.getFileSystem(new Configuration()) + sc.runJob(rdd, CheckpointRDD.writeToFile(path.toString, 10) _) + val cpRDD = new CheckpointRDD[Int](sc, path.toString) + assert(cpRDD.splits.length == rdd.splits.length, "Number of splits is not the same") + assert(cpRDD.collect.toList == rdd.collect.toList, "Data of splits not the same") + fs.delete(path) + } +} diff --git a/core/src/main/scala/spark/rdd/HadoopRDD.scala b/core/src/main/scala/spark/rdd/HadoopRDD.scala index fce190b860..eca51758e4 100644 --- a/core/src/main/scala/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/spark/rdd/HadoopRDD.scala @@ -25,8 +25,7 @@ import spark.Split * A Spark split class that wraps around a Hadoop InputSplit. */ private[spark] class HadoopSplit(rddId: Int, idx: Int, @transient s: InputSplit) - extends Split - with Serializable { + extends Split { val inputSplit = new SerializableWritable[InputSplit](s) @@ -117,6 +116,6 @@ class HadoopRDD[K, V]( } override def checkpoint() { - // Do nothing. Hadoop RDD cannot be checkpointed. + // Do nothing. Hadoop RDD should not be checkpointed. } } From fe777eb77dee3c5bc5a7a332098d27f517ad3fe4 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 20 Dec 2012 13:39:27 -0800 Subject: [PATCH 150/226] Fixed bugs in CheckpointRDD and spark.CheckpointSuite. --- core/src/main/scala/spark/SparkContext.scala | 12 +++--------- core/src/main/scala/spark/rdd/CheckpointRDD.scala | 3 +++ core/src/test/scala/spark/CheckpointSuite.scala | 6 +++--- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 71ed4ef058..362aa04e66 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -37,9 +37,7 @@ import spark.broadcast._ import spark.deploy.LocalSparkCluster import spark.partial.ApproximateEvaluator import spark.partial.PartialResult -import spark.rdd.HadoopRDD -import spark.rdd.NewHadoopRDD -import spark.rdd.UnionRDD +import rdd.{CheckpointRDD, HadoopRDD, NewHadoopRDD, UnionRDD} import scheduler.{ResultTask, ShuffleMapTask, DAGScheduler, TaskScheduler} import spark.scheduler.local.LocalScheduler import spark.scheduler.cluster.{SparkDeploySchedulerBackend, SchedulerBackend, ClusterScheduler} @@ -368,13 +366,9 @@ class SparkContext( protected[spark] def checkpointFile[T: ClassManifest]( - path: String, - minSplits: Int = defaultMinSplits + path: String ): RDD[T] = { - val rdd = objectFile[T](path, minSplits) - rdd.checkpointData = Some(new RDDCheckpointData(rdd)) - rdd.checkpointData.get.cpFile = Some(path) - rdd + new CheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ diff --git a/core/src/main/scala/spark/rdd/CheckpointRDD.scala b/core/src/main/scala/spark/rdd/CheckpointRDD.scala index c673ab6aaa..fbf8a9ef83 100644 --- a/core/src/main/scala/spark/rdd/CheckpointRDD.scala +++ b/core/src/main/scala/spark/rdd/CheckpointRDD.scala @@ -24,6 +24,9 @@ class CheckpointRDD[T: ClassManifest](sc: SparkContext, checkpointPath: String) splitFiles.zipWithIndex.map(x => new CheckpointRDDSplit(x._2, x._1)).toArray } + checkpointData = Some(new RDDCheckpointData[T](this)) + checkpointData.get.cpFile = Some(checkpointPath) + override def getSplits = splits_ override def getPreferredLocations(split: Split): Seq[String] = { diff --git a/core/src/test/scala/spark/CheckpointSuite.scala b/core/src/test/scala/spark/CheckpointSuite.scala index 19626d2450..6bc667bd4c 100644 --- a/core/src/test/scala/spark/CheckpointSuite.scala +++ b/core/src/test/scala/spark/CheckpointSuite.scala @@ -54,7 +54,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { parCollection.checkpoint() assert(parCollection.dependencies === Nil) val result = parCollection.collect() - assert(sc.objectFile[Int](parCollection.getCheckpointFile.get).collect() === result) + assert(sc.checkpointFile[Int](parCollection.getCheckpointFile.get).collect() === result) assert(parCollection.dependencies != Nil) assert(parCollection.splits.length === numSplits) assert(parCollection.splits.toList === parCollection.checkpointData.get.getSplits.toList) @@ -69,7 +69,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val numSplits = blockRDD.splits.size blockRDD.checkpoint() val result = blockRDD.collect() - assert(sc.objectFile[String](blockRDD.getCheckpointFile.get).collect() === result) + assert(sc.checkpointFile[String](blockRDD.getCheckpointFile.get).collect() === result) assert(blockRDD.dependencies != Nil) assert(blockRDD.splits.length === numSplits) assert(blockRDD.splits.toList === blockRDD.checkpointData.get.getSplits.toList) @@ -185,7 +185,7 @@ class CheckpointSuite extends FunSuite with BeforeAndAfter with Logging { val (rddSizeAfterCheckpoint, splitSizeAfterCheckpoint) = getSerializedSizes(operatedRDD) // Test whether the checkpoint file has been created - assert(sc.objectFile[U](operatedRDD.getCheckpointFile.get).collect() === result) + assert(sc.checkpointFile[U](operatedRDD.getCheckpointFile.get).collect() === result) // Test whether dependencies have been changed from its earlier parent RDD assert(operatedRDD.dependencies.head.rdd != parentRDD) From 9ac4cb1c5fd7637ff9936f1ef54fa27f6f6aa214 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 21 Dec 2012 17:11:39 -0800 Subject: [PATCH 151/226] Adding a Twitter InputDStream with an example --- project/SparkBuild.scala | 5 +- .../spark/streaming/TwitterInputDStream.scala | 59 +++++++++++++++++++ .../streaming/examples/TwitterBasic.scala | 37 ++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 6ef2ac477a..618c7afc37 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -115,7 +115,8 @@ object SparkBuild extends Build { "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/", "JBoss Repository" at "http://repository.jboss.org/nexus/content/repositories/releases/", "Spray Repository" at "http://repo.spray.cc/", - "Cloudera Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos/" + "Cloudera Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos/", + "Twitter4J Repository" at "http://twitter4j.org/maven2/" ), libraryDependencies ++= Seq( @@ -133,6 +134,8 @@ object SparkBuild extends Build { "com.typesafe.akka" % "akka-slf4j" % "2.0.3", "it.unimi.dsi" % "fastutil" % "6.4.4", "colt" % "colt" % "1.2.0", + "org.twitter4j" % "twitter4j-core" % "3.0.2", + "org.twitter4j" % "twitter4j-stream" % "3.0.2", "cc.spray" % "spray-can" % "1.0-M2.1", "cc.spray" % "spray-server" % "1.0-M2.1", "org.apache.mesos" % "mesos" % "0.9.0-incubating" diff --git a/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala b/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala new file mode 100644 index 0000000000..5d177e96de --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala @@ -0,0 +1,59 @@ +package spark.streaming + +import spark.RDD +import spark.streaming.{Time, InputDStream} +import twitter4j._ +import twitter4j.auth.BasicAuthorization +import collection.mutable.ArrayBuffer +import collection.JavaConversions._ + +/* A stream of Twitter statuses, potentially filtered by one or more keywords. +* +* @constructor create a new Twitter stream using the supplied username and password to authenticate. +* An optional set of string filters can be used to restrict the set of tweets. The Twitter API is +* such that this may return a sampled subset of all tweets during each interval. +*/ +class TwitterInputDStream( + @transient ssc_ : StreamingContext, + username: String, + password: String, + filters: Seq[String] + ) extends InputDStream[Status](ssc_) { + val statuses: ArrayBuffer[Status] = ArrayBuffer() + var twitterStream: TwitterStream = _ + + override def start() = { + twitterStream = new TwitterStreamFactory() + .getInstance(new BasicAuthorization(username, password)) + twitterStream.addListener(new StatusListener { + def onStatus(status: Status) = { + statuses += status + } + // Unimplemented + def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) {} + def onTrackLimitationNotice(i: Int) {} + def onScrubGeo(l: Long, l1: Long) {} + def onStallWarning(stallWarning: StallWarning) {} + def onException(e: Exception) {} + }) + + val query: FilterQuery = new FilterQuery + if (filters.size > 0) { + query.track(filters.toArray) + twitterStream.filter(query) + } else { + twitterStream.sample() + } + } + + override def stop() = { + twitterStream.shutdown() + } + + override def compute(validTime: Time): Option[RDD[Status]] = { + // Flush the current tweet buffer + val rdd = Some(ssc.sc.parallelize(statuses)) + statuses.foreach(x => statuses -= x) + rdd + } +} diff --git a/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala b/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala new file mode 100644 index 0000000000..c7e380fbe1 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala @@ -0,0 +1,37 @@ +package spark.streaming.examples + +import spark.streaming.StreamingContext._ +import spark.streaming.{TwitterInputDStream, Seconds, StreamingContext} + +object TwitterBasic { + def main(args: Array[String]) { + if (args.length != 3) { + System.err.println("Usage: TwitterBasic ") + System.exit(1) + } + + val Array(master, username, password) = args + + val ssc = new StreamingContext(master, "TwitterBasic", Seconds(2)) + val stream = new TwitterInputDStream(ssc, username, password, Seq()) + ssc.graph.addInputStream(stream) + + val hashTags = stream.flatMap( + status => status.getText.split(" ").filter(_.startsWith("#"))) + + // Word count over hashtags + val counts = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(60)) + + // TODO: Sorts on one node - should do with global sorting once streaming supports it + val topCounts = counts.collect().map(_.sortBy(-_._2).take(5)) + + topCounts.foreachRDD(rdd => { + val topList = rdd.take(1)(0) + println("\nPopular topics in last 60 seconds:") + topList.foreach(t => println("%s (%s tweets)".format(t._1, t._2))) + } + ) + + ssc.start() + } +} From bce84ceabb6e7be92568bc4933410dd095b7936c Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 21 Dec 2012 20:57:46 -0800 Subject: [PATCH 152/226] Minor changes after review and general cleanup. - Added filters to Twitter example - Removed un-used import - Some code clean-up --- .../spark/streaming/TwitterInputDStream.scala | 1 - .../streaming/examples/TwitterBasic.scala | 29 ++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala b/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala index 5d177e96de..adf1ed15c9 100644 --- a/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala @@ -1,7 +1,6 @@ package spark.streaming import spark.RDD -import spark.streaming.{Time, InputDStream} import twitter4j._ import twitter4j.auth.BasicAuthorization import collection.mutable.ArrayBuffer diff --git a/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala b/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala index c7e380fbe1..19b3cad6ad 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala @@ -5,15 +5,17 @@ import spark.streaming.{TwitterInputDStream, Seconds, StreamingContext} object TwitterBasic { def main(args: Array[String]) { - if (args.length != 3) { - System.err.println("Usage: TwitterBasic ") + if (args.length < 3) { + System.err.println("Usage: TwitterBasic " + + " [filter1] [filter2] ... [filter n]") System.exit(1) } - val Array(master, username, password) = args + val Array(master, username, password) = args.slice(0, 3) + val filters = args.slice(3, args.length) val ssc = new StreamingContext(master, "TwitterBasic", Seconds(2)) - val stream = new TwitterInputDStream(ssc, username, password, Seq()) + val stream = new TwitterInputDStream(ssc, username, password, filters) ssc.graph.addInputStream(stream) val hashTags = stream.flatMap( @@ -21,17 +23,24 @@ object TwitterBasic { // Word count over hashtags val counts = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(60)) - // TODO: Sorts on one node - should do with global sorting once streaming supports it val topCounts = counts.collect().map(_.sortBy(-_._2).take(5)) + // Print popular hashtags topCounts.foreachRDD(rdd => { - val topList = rdd.take(1)(0) - println("\nPopular topics in last 60 seconds:") - topList.foreach(t => println("%s (%s tweets)".format(t._1, t._2))) - } - ) + if (rdd.count() != 0) { + val topList = rdd.take(1)(0) + println("\nPopular topics in last 60 seconds:") + topList.foreach{case (tag, count) => println("%s (%s tweets)".format(tag, count))} + } + }) + // Print number of tweets in the window + stream.window(Seconds(60)).count().foreachRDD(rdd => + if (rdd.count() != 0) { + println("Window size: %s tweets".format(rdd.take(1)(0))) + } + ) ssc.start() } } From 0bc0a60d3001dd231e13057a838d4b6550e5a2b9 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Thu, 27 Dec 2012 15:37:33 -0800 Subject: [PATCH 153/226] Modifications to make sure LocalScheduler terminate cleanly without errors when SparkContext is shutdown, to minimize spurious exception during master failure tests. --- core/src/main/scala/spark/SparkContext.scala | 22 ++++++++++--------- .../scheduler/local/LocalScheduler.scala | 8 +++++-- core/src/test/resources/log4j.properties | 2 +- .../scala/spark/ClosureCleanerSuite.scala | 2 ++ streaming/src/test/resources/log4j.properties | 13 ++++++----- 5 files changed, 29 insertions(+), 18 deletions(-) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index caa9a1794b..0c8b0078a3 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -488,17 +488,19 @@ class SparkContext( if (dagScheduler != null) { dagScheduler.stop() dagScheduler = null + taskScheduler = null + // TODO: Cache.stop()? + env.stop() + // Clean up locally linked files + clearFiles() + clearJars() + SparkEnv.set(null) + ShuffleMapTask.clearCache() + ResultTask.clearCache() + logInfo("Successfully stopped SparkContext") + } else { + logInfo("SparkContext already stopped") } - taskScheduler = null - // TODO: Cache.stop()? - env.stop() - // Clean up locally linked files - clearFiles() - clearJars() - SparkEnv.set(null) - ShuffleMapTask.clearCache() - ResultTask.clearCache() - logInfo("Successfully stopped SparkContext") } /** diff --git a/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala b/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala index eb20fe41b2..17a0a4b103 100644 --- a/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala +++ b/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala @@ -81,7 +81,10 @@ private[spark] class LocalScheduler(threads: Int, maxFailures: Int, sc: SparkCon val accumUpdates = ser.deserialize[collection.mutable.Map[Long, Any]]( ser.serialize(Accumulators.values)) logInfo("Finished task " + idInJob) - listener.taskEnded(task, Success, resultToReturn, accumUpdates) + + // If the threadpool has not already been shutdown, notify DAGScheduler + if (!Thread.currentThread().isInterrupted) + listener.taskEnded(task, Success, resultToReturn, accumUpdates) } catch { case t: Throwable => { logError("Exception in task " + idInJob, t) @@ -91,7 +94,8 @@ private[spark] class LocalScheduler(threads: Int, maxFailures: Int, sc: SparkCon submitTask(task, idInJob) } else { // TODO: Do something nicer here to return all the way to the user - listener.taskEnded(task, new ExceptionFailure(t), null, null) + if (!Thread.currentThread().isInterrupted) + listener.taskEnded(task, new ExceptionFailure(t), null, null) } } } diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index 4c99e450bc..5ed388e91b 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -1,4 +1,4 @@ -# Set everything to be logged to the console +# Set everything to be logged to the file spark-tests.log log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.FileAppender log4j.appender.file.append=false diff --git a/core/src/test/scala/spark/ClosureCleanerSuite.scala b/core/src/test/scala/spark/ClosureCleanerSuite.scala index 7c0334d957..dfa2de80e6 100644 --- a/core/src/test/scala/spark/ClosureCleanerSuite.scala +++ b/core/src/test/scala/spark/ClosureCleanerSuite.scala @@ -47,6 +47,8 @@ object TestObject { val nums = sc.parallelize(Array(1, 2, 3, 4)) val answer = nums.map(_ + x).reduce(_ + _) sc.stop() + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") return answer } } diff --git a/streaming/src/test/resources/log4j.properties b/streaming/src/test/resources/log4j.properties index 02fe16866e..33bafebaab 100644 --- a/streaming/src/test/resources/log4j.properties +++ b/streaming/src/test/resources/log4j.properties @@ -1,8 +1,11 @@ -# Set everything to be logged to the console -log4j.rootCategory=WARN, console -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n +# Set everything to be logged to the file streaming-tests.log +log4j.rootCategory=INFO, file +log4j.appender.file=org.apache.log4j.FileAppender +log4j.appender.file.append=false +log4j.appender.file.file=streaming-tests.log +log4j.appender.file.layout=org.apache.log4j.PatternLayout +log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n # Ignore messages below warning level from Jetty, because it's a bit verbose log4j.logger.org.eclipse.jetty=WARN + From 9e644402c155b5fc68794a17c36ddd19d3242f4f Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 29 Dec 2012 18:31:51 -0800 Subject: [PATCH 154/226] Improved jekyll and scala docs. Made many classes and method private to remove them from scala docs. --- core/src/main/scala/spark/RDD.scala | 1 - docs/_plugins/copy_api_dirs.rb | 4 +- docs/streaming-programming-guide.md | 58 ++-- .../scala/spark/streaming/Checkpoint.scala | 5 +- .../main/scala/spark/streaming/DStream.scala | 253 +++++++++++------- .../spark/streaming/FlumeInputDStream.scala | 2 +- .../main/scala/spark/streaming/Interval.scala | 1 + .../src/main/scala/spark/streaming/Job.scala | 2 + .../scala/spark/streaming/JobManager.scala | 1 + .../spark/streaming/NetworkInputDStream.scala | 8 +- .../spark/streaming/NetworkInputTracker.scala | 8 +- .../streaming/PairDStreamFunctions.scala | 4 +- .../scala/spark/streaming/Scheduler.scala | 7 +- .../spark/streaming/StreamingContext.scala | 43 ++- .../spark/streaming/examples/GrepRaw.scala | 2 +- .../streaming/examples/TopKWordCountRaw.scala | 2 +- .../streaming/examples/WordCountRaw.scala | 2 +- .../examples/clickstream/PageViewStream.scala | 2 +- .../scala/spark/streaming/TestSuiteBase.scala | 2 +- 19 files changed, 236 insertions(+), 171 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index 59e50a0b6b..1574533430 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -101,7 +101,6 @@ abstract class RDD[T: ClassManifest]( val partitioner: Option[Partitioner] = None - // ======================================================================= // Methods and fields available on all RDDs // ======================================================================= diff --git a/docs/_plugins/copy_api_dirs.rb b/docs/_plugins/copy_api_dirs.rb index e61c105449..7654511eeb 100644 --- a/docs/_plugins/copy_api_dirs.rb +++ b/docs/_plugins/copy_api_dirs.rb @@ -2,7 +2,7 @@ require 'fileutils' include FileUtils if ENV['SKIP_SCALADOC'] != '1' - projects = ["core", "examples", "repl", "bagel"] + projects = ["core", "examples", "repl", "bagel", "streaming"] puts "Moving to project root and building scaladoc." curr_dir = pwd @@ -11,7 +11,7 @@ if ENV['SKIP_SCALADOC'] != '1' puts "Running sbt/sbt doc from " + pwd + "; this may take a few minutes..." puts `sbt/sbt doc` - puts "moving back into docs dir." + puts "Moving back into docs dir." cd("docs") # Copy over the scaladoc from each project into the docs directory. diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index 90916545bc..7c421ac70f 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -2,33 +2,44 @@ layout: global title: Streaming (Alpha) Programming Guide --- + +{:toc} + +# Overview +A Spark Streaming application is very similar to a Spark application; it consists of a *driver program* that runs the user's `main` function and continuous executes various *parallel operations* on input streams of data. The main abstraction Spark Streaming provides is a *discretized stream* (DStream), which is a continuous sequence of RDDs (distributed collection of elements) representing a continuous stream of data. DStreams can created from live incoming data (such as data from a socket, Kafka, etc.) or it can be generated by transformation of existing DStreams using parallel operators like map, reduce, and window. The basic processing model is as follows: +(i) While a Spark Streaming driver program is running, the system receives data from various sources and and divides the data into batches. Each batch of data is treated as a RDD, that is a immutable and parallel collection of data. These input data RDDs are automatically persisted in memory (serialized by default) and replicated to two nodes for fault-tolerance. This sequence of RDDs is collectively referred to as an InputDStream. +(ii) Data received by InputDStreams are processed processed using DStream operations. Since all data is represented as RDDs and all DStream operations as RDD operations, data is automatically recovered in the event of node failures. + +This guide shows some how to start programming with DStreams. + # Initializing Spark Streaming The first thing a Spark Streaming program must do is create a `StreamingContext` object, which tells Spark how to access a cluster. A `StreamingContext` can be created from an existing `SparkContext`, or directly: {% highlight scala %} -new StreamingContext(master, jobName, [sparkHome], [jars]) -new StreamingContext(sparkContext) +import spark.SparkContext +import SparkContext._ + +new StreamingContext(master, frameworkName, batchDuration) +new StreamingContext(sparkContext, batchDuration) {% endhighlight %} -Once a context is instantiated, the batch interval must be set: +The `master` parameter is either the [Mesos master URL](running-on-mesos.html) (for running on a cluster)or the special "local" string (for local mode) that is used to create a Spark Context. For more information about this please refer to the [Spark programming guide](scala-programming-guide.html). + + +# Creating Input Sources - InputDStreams +The StreamingContext is used to creating InputDStreams from input sources: {% highlight scala %} -context.setBatchDuration(Milliseconds(2000)) +context.neworkStream(host, port) // Creates a stream that uses a TCP socket to read data from : +context.flumeStream(host, port) // Creates a stream populated by a Flume flow {% endhighlight %} +A complete list of input sources is available in the [DStream API doc](api/streaming/index.html#spark.streaming.StreamingContext). -# DStreams - Discretized Streams -The primary abstraction in Spark Streaming is a DStream. A DStream represents distributed collection which is computed periodically according to a specified batch interval. DStream's can be chained together to create complex chains of transformation on streaming data. DStreams can be created by operating on existing DStreams or from an input source. To creating DStreams from an input source, use the StreamingContext: - -{% highlight scala %} -context.neworkStream(host, port) // A stream that reads from a socket -context.flumeStream(hosts, ports) // A stream populated by a Flume flow -{% endhighlight %} - -# DStream Operators +## DStream Operations Once an input stream has been created, you can transform it using _stream operators_. Most of these operators return new DStreams which you can further transform. Eventually, you'll need to call an _output operator_, which forces evaluation of the stream by writing data out to an external source. -## Transformations +### Transformations DStreams support many of the transformations available on normal Spark RDD's: @@ -73,20 +84,13 @@ DStreams support many of the transformations available on normal Spark RDD's: cogroup(otherStream, [numTasks]) When called on streams of type (K, V) and (K, W), returns a stream of (K, Seq[V], Seq[W]) tuples. This operation is also called groupWith. - - -DStreams also support the following additional transformations: - -
      reduce(func) Create a new single-element stream by aggregating the elements of the stream using a function func (which takes two arguments and returns one). The function should be associative so that it can be computed correctly in parallel.
      - -## Windowed Transformations -Spark streaming features windowed computations, which allow you to report statistics over a sliding window of data. All window functions take a windowTime, which represents the width of the window and a slideTime, which represents the frequency during which the window is calculated. +Spark Streaming features windowed computations, which allow you to report statistics over a sliding window of data. All window functions take a windowTime, which represents the width of the window and a slideTime, which represents the frequency during which the window is calculated. @@ -128,7 +132,7 @@ Spark streaming features windowed computations, which allow you to report statis
      TransformationMeaning
      -## Output Operators +### Output Operators When an output operator is called, it triggers the computation of a stream. Currently the following output operators are defined: @@ -140,22 +144,22 @@ When an output operator is called, it triggers the computation of a stream. Curr - + - + - + - + diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 770f7b0cc0..11a7232d7b 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -8,6 +8,7 @@ import org.apache.hadoop.conf.Configuration import java.io._ +private[streaming] class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) extends Logging with Serializable { val master = ssc.sc.master @@ -30,6 +31,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) /** * Convenience class to speed up the writing of graph checkpoint to file */ +private[streaming] class CheckpointWriter(checkpointDir: String) extends Logging { val file = new Path(checkpointDir, "graph") val conf = new Configuration() @@ -65,7 +67,7 @@ class CheckpointWriter(checkpointDir: String) extends Logging { } - +private[streaming] object CheckpointReader extends Logging { def read(path: String): Checkpoint = { @@ -103,6 +105,7 @@ object CheckpointReader extends Logging { } } +private[streaming] class ObjectInputStreamWithLoader(inputStream_ : InputStream, loader: ClassLoader) extends ObjectInputStream(inputStream_) { override def resolveClass(desc: ObjectStreamClass): Class[_] = { try { diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index d5048aeed7..3834b57ed3 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration * A Discretized Stream (DStream), the basic abstraction in Spark Streaming, is a continuous * sequence of RDDs (of the same type) representing a continuous stream of data (see [[spark.RDD]] * for more details on RDDs). DStreams can either be created from live data (such as, data from - * HDFS. Kafka or Flume) or it can be generated by transformation existing DStreams using operations + * HDFS, Kafka or Flume) or it can be generated by transformation existing DStreams using operations * such as `map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming program is running, each * DStream periodically generates a RDD, either from live data or by transforming the RDD generated * by a parent DStream. @@ -38,33 +38,28 @@ import org.apache.hadoop.conf.Configuration * - A function that is used to generate an RDD after each time interval */ -case class DStreamCheckpointData(rdds: HashMap[Time, Any]) - -abstract class DStream[T: ClassManifest] (@transient var ssc: StreamingContext) -extends Serializable with Logging { +abstract class DStream[T: ClassManifest] ( + @transient protected[streaming] var ssc: StreamingContext + ) extends Serializable with Logging { initLogging() - /** - * ---------------------------------------------- - * Methods that must be implemented by subclasses - * ---------------------------------------------- - */ + // ======================================================================= + // Methods that should be implemented by subclasses of DStream + // ======================================================================= - // Time interval at which the DStream generates an RDD + /** Time interval after which the DStream generates a RDD */ def slideTime: Time - // List of parent DStreams on which this DStream depends on + /** List of parent DStreams on which this DStream depends on */ def dependencies: List[DStream[_]] - // Key method that computes RDD for a valid time + /** Method that generates a RDD for the given time */ def compute (validTime: Time): Option[RDD[T]] - /** - * --------------------------------------- - * Other general fields and methods of DStream - * --------------------------------------- - */ + // ======================================================================= + // Methods and fields available on all DStreams + // ======================================================================= // RDDs generated, marked as protected[streaming] so that testsuites can access it @transient @@ -87,12 +82,15 @@ extends Serializable with Logging { // Reference to whole DStream graph protected[streaming] var graph: DStreamGraph = null - def isInitialized = (zeroTime != null) + protected[streaming] def isInitialized = (zeroTime != null) // Duration for which the DStream requires its parent DStream to remember each RDD created - def parentRememberDuration = rememberDuration + protected[streaming] def parentRememberDuration = rememberDuration - // Set caching level for the RDDs created by this DStream + /** Returns the StreamingContext associated with this DStream */ + def context() = ssc + + /** Persists the RDDs of this DStream with the given storage level */ def persist(level: StorageLevel): DStream[T] = { if (this.isInitialized) { throw new UnsupportedOperationException( @@ -102,11 +100,16 @@ extends Serializable with Logging { this } + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY_SER) - - // Turn on the default caching level for this RDD + + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def cache(): DStream[T] = persist() + /** + * Enable periodic checkpointing of RDDs of this DStream + * @param interval Time interval after which generated RDD will be checkpointed + */ def checkpoint(interval: Time): DStream[T] = { if (isInitialized) { throw new UnsupportedOperationException( @@ -285,7 +288,7 @@ extends Serializable with Logging { * Generates a SparkStreaming job for the given time. This is an internal method that * should not be called directly. This default implementation creates a job * that materializes the corresponding RDD. Subclasses of DStream may override this - * (eg. PerRDDForEachDStream). + * (eg. ForEachDStream). */ protected[streaming] def generateJob(time: Time): Option[Job] = { getOrCompute(time) match { @@ -420,65 +423,96 @@ extends Serializable with Logging { generatedRDDs = new HashMap[Time, RDD[T]] () } - /** - * -------------- - * DStream operations - * -------------- - */ + // ======================================================================= + // DStream operations + // ======================================================================= + + /** Returns a new DStream by applying a function to all elements of this DStream. */ def map[U: ClassManifest](mapFunc: T => U): DStream[U] = { new MappedDStream(this, ssc.sc.clean(mapFunc)) } + /** + * Returns a new DStream by applying a function to all elements of this DStream, + * and then flattening the results + */ def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]): DStream[U] = { new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) } + /** Returns a new DStream containing only the elements that satisfy a predicate. */ def filter(filterFunc: T => Boolean): DStream[T] = new FilteredDStream(this, filterFunc) + /** + * Return a new DStream in which each RDD is generated by applying glom() to each RDD of + * this DStream. Applying glom() to an RDD coalesces all elements within each partition into + * an array. + */ def glom(): DStream[Array[T]] = new GlommedDStream(this) - def mapPartitions[U: ClassManifest](mapPartFunc: Iterator[T] => Iterator[U]): DStream[U] = { - new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc)) + /** + * Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs + * of this DStream. Applying mapPartitions() to an RDD applies a function to each partition + * of the RDD. + */ + def mapPartitions[U: ClassManifest]( + mapPartFunc: Iterator[T] => Iterator[U], + preservePartitioning: Boolean = false + ): DStream[U] = { + new MapPartitionedDStream(this, ssc.sc.clean(mapPartFunc), preservePartitioning) } - def reduce(reduceFunc: (T, T) => T): DStream[T] = this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) + /** + * Returns a new DStream in which each RDD has a single element generated by reducing each RDD + * of this DStream. + */ + def reduce(reduceFunc: (T, T) => T): DStream[T] = + this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) + /** + * Returns a new DStream in which each RDD has a single element generated by counting each RDD + * of this DStream. + */ def count(): DStream[Int] = this.map(_ => 1).reduce(_ + _) - - def collect(): DStream[Seq[T]] = this.map(x => (null, x)).groupByKey(1).map(_._2) - def foreach(foreachFunc: T => Unit) { - val newStream = new PerElementForEachDStream(this, ssc.sc.clean(foreachFunc)) + /** + * Applies a function to each RDD in this DStream. This is an output operator, so + * this DStream will be registered as an output stream and therefore materialized. + */ + def foreach(foreachFunc: RDD[T] => Unit) { + foreach((r: RDD[T], t: Time) => foreachFunc(r)) + } + + /** + * Applies a function to each RDD in this DStream. This is an output operator, so + * this DStream will be registered as an output stream and therefore materialized. + */ + def foreach(foreachFunc: (RDD[T], Time) => Unit) { + val newStream = new ForEachDStream(this, ssc.sc.clean(foreachFunc)) ssc.registerOutputStream(newStream) newStream } - def foreachRDD(foreachFunc: RDD[T] => Unit) { - foreachRDD((r: RDD[T], t: Time) => foreachFunc(r)) + /** + * Returns a new DStream in which each RDD is generated by applying a function + * on each RDD of this DStream. + */ + def transform[U: ClassManifest](transformFunc: RDD[T] => RDD[U]): DStream[U] = { + transform((r: RDD[T], t: Time) => transformFunc(r)) } - def foreachRDD(foreachFunc: (RDD[T], Time) => Unit) { - val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) - ssc.registerOutputStream(newStream) - newStream - } - - def transformRDD[U: ClassManifest](transformFunc: RDD[T] => RDD[U]): DStream[U] = { - transformRDD((r: RDD[T], t: Time) => transformFunc(r)) - } - - def transformRDD[U: ClassManifest](transformFunc: (RDD[T], Time) => RDD[U]): DStream[U] = { + /** + * Returns a new DStream in which each RDD is generated by applying a function + * on each RDD of this DStream. + */ + def transform[U: ClassManifest](transformFunc: (RDD[T], Time) => RDD[U]): DStream[U] = { new TransformedDStream(this, ssc.sc.clean(transformFunc)) } - def toBlockingQueue() = { - val queue = new ArrayBlockingQueue[RDD[T]](10000) - this.foreachRDD(rdd => { - queue.add(rdd) - }) - queue - } - + /** + * Prints the first ten elements of each RDD generated in this DStream. This is an output + * operator, so this DStream will be registered as an output stream and there materialized. + */ def print() { def foreachFunc = (rdd: RDD[T], time: Time) => { val first11 = rdd.take(11) @@ -489,18 +523,42 @@ extends Serializable with Logging { if (first11.size > 10) println("...") println() } - val newStream = new PerRDDForEachDStream(this, ssc.sc.clean(foreachFunc)) + val newStream = new ForEachDStream(this, ssc.sc.clean(foreachFunc)) ssc.registerOutputStream(newStream) } + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * The new DStream generates RDDs with the same interval as this DStream. + * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @return + */ def window(windowTime: Time): DStream[T] = window(windowTime, this.slideTime) + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * @param windowTime duration (i.e., width) of the window; + * must be a multiple of this DStream's interval + * @param slideTime sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's interval + */ def window(windowTime: Time, slideTime: Time): DStream[T] = { new WindowedDStream(this, windowTime, slideTime) } + /** + * Returns a new DStream which computed based on tumbling window on this DStream. + * This is equivalent to window(batchTime, batchTime). + * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + */ def tumble(batchTime: Time): DStream[T] = window(batchTime, batchTime) + /** + * Returns a new DStream in which each RDD has a single element generated by reducing all + * elements in a window over this DStream. windowTime and slideTime are as defined in the + * window() operation. This is equivalent to window(windowTime, slideTime).reduce(reduceFunc) + */ def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Time, slideTime: Time): DStream[T] = { this.window(windowTime, slideTime).reduce(reduceFunc) } @@ -516,17 +574,31 @@ extends Serializable with Logging { .map(_._2) } + /** + * Returns a new DStream in which each RDD has a single element generated by counting the number + * of elements in a window over this DStream. windowTime and slideTime are as defined in the + * window() operation. This is equivalent to window(windowTime, slideTime).count() + */ def countByWindow(windowTime: Time, slideTime: Time): DStream[Int] = { this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowTime, slideTime) } + /** + * Returns a new DStream by unifying data of another DStream with this DStream. + * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + */ def union(that: DStream[T]): DStream[T] = new UnionDStream[T](Array(this, that)) - def slice(interval: Interval): Seq[RDD[T]] = { + /** + * Returns all the RDDs defined by the Interval object (both end times included) + */ + protected[streaming] def slice(interval: Interval): Seq[RDD[T]] = { slice(interval.beginTime, interval.endTime) } - // Get all the RDDs between fromTime to toTime (both included) + /** + * Returns all the RDDs between 'fromTime' to 'toTime' (both included) + */ def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() var time = toTime.floor(slideTime) @@ -540,20 +612,26 @@ extends Serializable with Logging { rdds.toSeq } + /** + * Saves each RDD in this DStream as a Sequence file of serialized objects. + */ def saveAsObjectFiles(prefix: String, suffix: String = "") { val saveFunc = (rdd: RDD[T], time: Time) => { val file = rddToFileName(prefix, suffix, time) rdd.saveAsObjectFile(file) } - this.foreachRDD(saveFunc) + this.foreach(saveFunc) } + /** + * Saves each RDD in this DStream as at text file, using string representation of elements. + */ def saveAsTextFiles(prefix: String, suffix: String = "") { val saveFunc = (rdd: RDD[T], time: Time) => { val file = rddToFileName(prefix, suffix, time) rdd.saveAsTextFile(file) } - this.foreachRDD(saveFunc) + this.foreach(saveFunc) } def register() { @@ -561,6 +639,8 @@ extends Serializable with Logging { } } +private[streaming] +case class DStreamCheckpointData(rdds: HashMap[Time, Any]) abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContext) extends DStream[T](ssc_) { @@ -583,6 +663,7 @@ abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContex * TODO */ +private[streaming] class MappedDStream[T: ClassManifest, U: ClassManifest] ( parent: DStream[T], mapFunc: T => U @@ -602,6 +683,7 @@ class MappedDStream[T: ClassManifest, U: ClassManifest] ( * TODO */ +private[streaming] class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( parent: DStream[T], flatMapFunc: T => Traversable[U] @@ -621,6 +703,7 @@ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( * TODO */ +private[streaming] class FilteredDStream[T: ClassManifest]( parent: DStream[T], filterFunc: T => Boolean @@ -640,9 +723,11 @@ class FilteredDStream[T: ClassManifest]( * TODO */ +private[streaming] class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( parent: DStream[T], - mapPartFunc: Iterator[T] => Iterator[U] + mapPartFunc: Iterator[T] => Iterator[U], + preservePartitioning: Boolean ) extends DStream[U](parent.ssc) { override def dependencies = List(parent) @@ -650,7 +735,7 @@ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( override def slideTime: Time = parent.slideTime override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc)) + parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc, preservePartitioning)) } } @@ -659,6 +744,7 @@ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( * TODO */ +private[streaming] class GlommedDStream[T: ClassManifest](parent: DStream[T]) extends DStream[Array[T]](parent.ssc) { @@ -676,6 +762,7 @@ class GlommedDStream[T: ClassManifest](parent: DStream[T]) * TODO */ +private[streaming] class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( parent: DStream[(K,V)], createCombiner: V => C, @@ -702,6 +789,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( * TODO */ +private[streaming] class MapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( parent: DStream[(K, V)], mapValueFunc: V => U @@ -720,7 +808,7 @@ class MapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( /** * TODO */ - +private[streaming] class FlatMapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( parent: DStream[(K, V)], flatMapValueFunc: V => TraversableOnce[U] @@ -779,38 +867,8 @@ class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) * TODO */ -class PerElementForEachDStream[T: ClassManifest] ( - parent: DStream[T], - foreachFunc: T => Unit - ) extends DStream[Unit](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[Unit]] = None - - override def generateJob(time: Time): Option[Job] = { - parent.getOrCompute(time) match { - case Some(rdd) => - val jobFunc = () => { - val sparkJobFunc = { - (iterator: Iterator[T]) => iterator.foreach(foreachFunc) - } - ssc.sc.runJob(rdd, sparkJobFunc) - } - Some(new Job(time, jobFunc)) - case None => None - } - } -} - - -/** - * TODO - */ - -class PerRDDForEachDStream[T: ClassManifest] ( +private[streaming] +class ForEachDStream[T: ClassManifest] ( parent: DStream[T], foreachFunc: (RDD[T], Time) => Unit ) extends DStream[Unit](parent.ssc) { @@ -838,6 +896,7 @@ class PerRDDForEachDStream[T: ClassManifest] ( * TODO */ +private[streaming] class TransformedDStream[T: ClassManifest, U: ClassManifest] ( parent: DStream[T], transformFunc: (RDD[T], Time) => RDD[U] diff --git a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala index 2959ce4540..5ac7e5b08e 100644 --- a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala @@ -79,7 +79,7 @@ class SparkFlumeEvent() extends Externalizable { } } -object SparkFlumeEvent { +private[streaming] object SparkFlumeEvent { def fromAvroFlumeEvent(in : AvroFlumeEvent) : SparkFlumeEvent = { val event = new SparkFlumeEvent event.event = in diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index ffb7725ac9..fa0b7ce19d 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -1,5 +1,6 @@ package spark.streaming +private[streaming] case class Interval(beginTime: Time, endTime: Time) { def this(beginMs: Long, endMs: Long) = this(Time(beginMs), new Time(endMs)) diff --git a/streaming/src/main/scala/spark/streaming/Job.scala b/streaming/src/main/scala/spark/streaming/Job.scala index 0bcb6fd8dc..67bd8388bc 100644 --- a/streaming/src/main/scala/spark/streaming/Job.scala +++ b/streaming/src/main/scala/spark/streaming/Job.scala @@ -2,6 +2,7 @@ package spark.streaming import java.util.concurrent.atomic.AtomicLong +private[streaming] class Job(val time: Time, func: () => _) { val id = Job.getNewId() def run(): Long = { @@ -14,6 +15,7 @@ class Job(val time: Time, func: () => _) { override def toString = "streaming job " + id + " @ " + time } +private[streaming] object Job { val id = new AtomicLong(0) diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index 9bf9251519..fda7264a27 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -5,6 +5,7 @@ import spark.SparkEnv import java.util.concurrent.Executors +private[streaming] class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { class JobHandler(ssc: StreamingContext, job: Job) extends Runnable { diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala index 4e4e9fc942..4bf13dd50c 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala @@ -40,10 +40,10 @@ abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : Streaming } -sealed trait NetworkReceiverMessage -case class StopReceiver(msg: String) extends NetworkReceiverMessage -case class ReportBlock(blockId: String, metadata: Any) extends NetworkReceiverMessage -case class ReportError(msg: String) extends NetworkReceiverMessage +private[streaming] sealed trait NetworkReceiverMessage +private[streaming] case class StopReceiver(msg: String) extends NetworkReceiverMessage +private[streaming] case class ReportBlock(blockId: String, metadata: Any) extends NetworkReceiverMessage +private[streaming] case class ReportError(msg: String) extends NetworkReceiverMessage abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Serializable with Logging { diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index b421f795ee..658498dfc1 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -11,10 +11,10 @@ import akka.pattern.ask import akka.util.duration._ import akka.dispatch._ -trait NetworkInputTrackerMessage -case class RegisterReceiver(streamId: Int, receiverActor: ActorRef) extends NetworkInputTrackerMessage -case class AddBlocks(streamId: Int, blockIds: Seq[String], metadata: Any) extends NetworkInputTrackerMessage -case class DeregisterReceiver(streamId: Int, msg: String) extends NetworkInputTrackerMessage +private[streaming] sealed trait NetworkInputTrackerMessage +private[streaming] case class RegisterReceiver(streamId: Int, receiverActor: ActorRef) extends NetworkInputTrackerMessage +private[streaming] case class AddBlocks(streamId: Int, blockIds: Seq[String], metadata: Any) extends NetworkInputTrackerMessage +private[streaming] case class DeregisterReceiver(streamId: Int, msg: String) extends NetworkInputTrackerMessage class NetworkInputTracker( diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 720e63bba0..f9fef14196 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -281,7 +281,7 @@ extends Serializable { val file = rddToFileName(prefix, suffix, time) rdd.saveAsHadoopFile(file, keyClass, valueClass, outputFormatClass, conf) } - self.foreachRDD(saveFunc) + self.foreach(saveFunc) } def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]]( @@ -303,7 +303,7 @@ extends Serializable { val file = rddToFileName(prefix, suffix, time) rdd.saveAsNewAPIHadoopFile(file, keyClass, valueClass, outputFormatClass, conf) } - self.foreachRDD(saveFunc) + self.foreach(saveFunc) } private def getKeyClass() = implicitly[ClassManifest[K]].erasure diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 014021be61..fd1fa77a24 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -7,11 +7,8 @@ import spark.Logging import scala.collection.mutable.HashMap -sealed trait SchedulerMessage -case class InputGenerated(inputName: String, interval: Interval, reference: AnyRef = null) extends SchedulerMessage - -class Scheduler(ssc: StreamingContext) -extends Logging { +private[streaming] +class Scheduler(ssc: StreamingContext) extends Logging { initLogging() diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index ce47bcb2da..998fea849f 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -48,7 +48,7 @@ class StreamingContext private ( this(StreamingContext.createNewSparkContext(master, frameworkName), null, batchDuration) /** - * Recreates the StreamingContext from a checkpoint file. + * Re-creates a StreamingContext from a checkpoint file. * @param path Path either to the directory that was specified as the checkpoint directory, or * to the checkpoint file 'graph' or 'graph.bk'. */ @@ -61,7 +61,7 @@ class StreamingContext private ( "both SparkContext and checkpoint as null") } - val isCheckpointPresent = (cp_ != null) + protected[streaming] val isCheckpointPresent = (cp_ != null) val sc: SparkContext = { if (isCheckpointPresent) { @@ -71,9 +71,9 @@ class StreamingContext private ( } } - val env = SparkEnv.get + protected[streaming] val env = SparkEnv.get - val graph: DStreamGraph = { + protected[streaming] val graph: DStreamGraph = { if (isCheckpointPresent) { cp_.graph.setContext(this) cp_.graph.restoreCheckpointData() @@ -86,10 +86,10 @@ class StreamingContext private ( } } - private[streaming] val nextNetworkInputStreamId = new AtomicInteger(0) - private[streaming] var networkInputTracker: NetworkInputTracker = null + protected[streaming] val nextNetworkInputStreamId = new AtomicInteger(0) + protected[streaming] var networkInputTracker: NetworkInputTracker = null - private[streaming] var checkpointDir: String = { + protected[streaming] var checkpointDir: String = { if (isCheckpointPresent) { sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(cp_.checkpointDir), true) cp_.checkpointDir @@ -98,9 +98,9 @@ class StreamingContext private ( } } - private[streaming] var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null - private[streaming] var receiverJobThread: Thread = null - private[streaming] var scheduler: Scheduler = null + protected[streaming] var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null + protected[streaming] var receiverJobThread: Thread = null + protected[streaming] var scheduler: Scheduler = null def remember(duration: Time) { graph.remember(duration) @@ -117,11 +117,11 @@ class StreamingContext private ( } } - private[streaming] def getInitialCheckpoint(): Checkpoint = { + protected[streaming] def getInitialCheckpoint(): Checkpoint = { if (isCheckpointPresent) cp_ else null } - private[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() + protected[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() /** * Create an input stream that pulls messages form a Kafka Broker. @@ -188,7 +188,7 @@ class StreamingContext private ( } /** - * This function creates a input stream that monitors a Hadoop-compatible filesystem + * Creates a input stream that monitors a Hadoop-compatible filesystem * for new files and executes the necessary processing on them. */ def fileStream[ @@ -206,7 +206,7 @@ class StreamingContext private ( } /** - * This function create a input stream from an queue of RDDs. In each batch, + * Creates a input stream from an queue of RDDs. In each batch, * it will process either one or all of the RDDs returned by the queue */ def queueStream[T: ClassManifest]( @@ -231,22 +231,21 @@ class StreamingContext private ( } /** - * This function registers a InputDStream as an input stream that will be - * started (InputDStream.start() called) to get the input data streams. + * Registers an input stream that will be started (InputDStream.start() called) to get the + * input data. */ def registerInputStream(inputStream: InputDStream[_]) { graph.addInputStream(inputStream) } /** - * This function registers a DStream as an output stream that will be - * computed every interval. + * Registers an output stream that will be computed every interval */ def registerOutputStream(outputStream: DStream[_]) { graph.addOutputStream(outputStream) } - def validate() { + protected def validate() { assert(graph != null, "Graph is null") graph.validate() @@ -304,7 +303,7 @@ class StreamingContext private ( object StreamingContext { - def createNewSparkContext(master: String, frameworkName: String): SparkContext = { + protected[streaming] def createNewSparkContext(master: String, frameworkName: String): SparkContext = { // Set the default cleaner delay to an hour if not already set. // This should be sufficient for even 1 second interval. @@ -318,7 +317,7 @@ object StreamingContext { new PairDStreamFunctions[K, V](stream) } - def rddToFileName[T](prefix: String, suffix: String, time: Time): String = { + protected[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = { if (prefix == null) { time.millis.toString } else if (suffix == null || suffix.length ==0) { @@ -328,7 +327,7 @@ object StreamingContext { } } - def getSparkCheckpointDir(sscCheckpointDir: String): String = { + protected[streaming] def getSparkCheckpointDir(sscCheckpointDir: String): String = { new Path(sscCheckpointDir, UUID.randomUUID.toString).toString } } diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index 6cb2b4c042..7c4ee3b34c 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -26,7 +26,7 @@ object GrepRaw { val rawStreams = (1 to numStreams).map(_ => ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray val union = new UnionDStream(rawStreams) - union.filter(_.contains("Alice")).count().foreachRDD(r => + union.filter(_.contains("Alice")).count().foreach(r => println("Grep count: " + r.collect().mkString)) ssc.start() } diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index fe4c2bf155..182dfd8a52 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -38,7 +38,7 @@ object TopKWordCountRaw { val counts = union.mapPartitions(splitAndCountPartitions) val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) - partialTopKWindowedCounts.foreachRDD(rdd => { + partialTopKWindowedCounts.foreach(rdd => { val collectedCounts = rdd.collect println("Collected " + collectedCounts.size + " words from partial top words") println("Top " + k + " words are " + topK(collectedCounts.toIterator, k).mkString(",")) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index a29c81d437..9bcd30f4d7 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -36,7 +36,7 @@ object WordCountRaw { val union = new UnionDStream(lines.toArray) val counts = union.mapPartitions(splitAndCountPartitions) val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) - windowedCounts.foreachRDD(r => println("# unique words = " + r.count())) + windowedCounts.foreach(r => println("# unique words = " + r.count())) ssc.start() } diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala index 68be6b7893..a191321d91 100644 --- a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala +++ b/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala @@ -72,7 +72,7 @@ object PageViewStream { case "popularUsersSeen" => // Look for users in our existing dataset and print it out if we have a match pageViews.map(view => (view.userID, 1)) - .foreachRDD((rdd, time) => rdd.join(userList) + .foreach((rdd, time) => rdd.join(userList) .map(_._2._2) .take(10) .foreach(u => println("Saw user %s at time %s".format(u, time)))) diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index 8cc2f8ccfc..a44f738957 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -35,7 +35,7 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[ * ArrayBuffer. This buffer is wiped clean on being restored from checkpoint. */ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBuffer[Seq[T]]) - extends PerRDDForEachDStream[T](parent, (rdd: RDD[T], t: Time) => { + extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => { val collected = rdd.collect() output += collected }) { From 7e0271b4387eaf27cd96f3057ce2465b1271a480 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 30 Dec 2012 15:19:55 -0800 Subject: [PATCH 155/226] Refactored a whole lot to push all DStreams into the spark.streaming.dstream package. --- .../main/scala/spark/rdd/ShuffledRDD.scala | 1 + .../main/scala/spark/streaming/DStream.scala | 276 +----------------- .../scala/spark/streaming/DStreamGraph.scala | 1 + .../spark/streaming/NetworkInputTracker.scala | 2 + .../streaming/PairDStreamFunctions.scala | 7 +- .../scala/spark/streaming/Scheduler.scala | 3 - .../spark/streaming/StreamingContext.scala | 13 +- .../src/main/scala/spark/streaming/Time.scala | 11 +- .../{ => dstream}/CoGroupedDStream.scala | 3 +- .../{ => dstream}/ConstantInputDStream.scala | 3 +- .../streaming/{ => dstream}/DataHandler.scala | 2 +- .../{ => dstream}/FileInputDStream.scala | 3 +- .../streaming/dstream/FilteredDStream.scala | 21 ++ .../dstream/FlatMapValuedDStream.scala | 20 ++ .../streaming/dstream/FlatMappedDStream.scala | 20 ++ .../{ => dstream}/FlumeInputDStream.scala | 13 +- .../streaming/dstream/ForEachDStream.scala | 28 ++ .../streaming/dstream/GlommedDStream.scala | 17 ++ .../streaming/dstream/InputDStream.scala | 19 ++ .../KafkaInputDStream.scala | 14 +- .../dstream/MapPartitionedDStream.scala | 21 ++ .../streaming/dstream/MapValuedDStream.scala | 21 ++ .../streaming/dstream/MappedDStream.scala | 20 ++ .../{ => dstream}/NetworkInputDStream.scala | 7 +- .../{ => dstream}/QueueInputDStream.scala | 3 +- .../{ => dstream}/RawInputDStream.scala | 9 +- .../ReducedWindowedDStream.scala | 5 +- .../streaming/dstream/ShuffledDStream.scala | 27 ++ .../{ => dstream}/SocketInputDStream.scala | 8 +- .../{ => dstream}/StateDStream.scala | 5 +- .../dstream/TransformedDStream.scala | 19 ++ .../streaming/dstream/UnionDStream.scala | 39 +++ .../{ => dstream}/WindowedDStream.scala | 3 +- .../spark/streaming/examples/GrepRaw.scala | 2 +- .../streaming/examples/TopKWordCountRaw.scala | 2 +- .../streaming/examples/WordCountRaw.scala | 2 +- .../spark/streaming/CheckpointSuite.scala | 2 +- .../scala/spark/streaming/FailureSuite.scala | 2 +- .../spark/streaming/InputStreamsSuite.scala | 1 + .../scala/spark/streaming/TestSuiteBase.scala | 48 ++- .../streaming/WindowOperationsSuite.scala | 12 +- 41 files changed, 402 insertions(+), 333 deletions(-) rename streaming/src/main/scala/spark/streaming/{ => dstream}/CoGroupedDStream.scala (93%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/ConstantInputDStream.scala (81%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/DataHandler.scala (98%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/FileInputDStream.scala (97%) create mode 100644 streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala rename streaming/src/main/scala/spark/streaming/{ => dstream}/FlumeInputDStream.scala (96%) create mode 100644 streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala rename streaming/src/main/scala/spark/streaming/{input => dstream}/KafkaInputDStream.scala (97%) create mode 100644 streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala rename streaming/src/main/scala/spark/streaming/{ => dstream}/NetworkInputDStream.scala (97%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/QueueInputDStream.scala (91%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/RawInputDStream.scala (95%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/ReducedWindowedDStream.scala (98%) create mode 100644 streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala rename streaming/src/main/scala/spark/streaming/{ => dstream}/SocketInputDStream.scala (92%) rename streaming/src/main/scala/spark/streaming/{ => dstream}/StateDStream.scala (97%) create mode 100644 streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala rename streaming/src/main/scala/spark/streaming/{ => dstream}/WindowedDStream.scala (93%) diff --git a/core/src/main/scala/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/spark/rdd/ShuffledRDD.scala index f40b56be64..1b219473e0 100644 --- a/core/src/main/scala/spark/rdd/ShuffledRDD.scala +++ b/core/src/main/scala/spark/rdd/ShuffledRDD.scala @@ -1,6 +1,7 @@ package spark.rdd import spark.{Partitioner, RDD, SparkEnv, ShuffleDependency, Split, TaskContext} +import spark.SparkContext._ private[spark] class ShuffledRDDSplit(val idx: Int) extends Split { override val index = idx diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 3834b57ed3..292ad3b9f9 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -1,17 +1,15 @@ package spark.streaming +import spark.streaming.dstream._ import StreamingContext._ import Time._ -import spark._ -import spark.SparkContext._ -import spark.rdd._ +import spark.{RDD, Logging} import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap -import java.util.concurrent.ArrayBlockingQueue import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import org.apache.hadoop.fs.Path @@ -197,7 +195,7 @@ abstract class DStream[T: ClassManifest] ( "than " + rememberDuration.milliseconds + " milliseconds. But the Spark's metadata cleanup" + "delay is set to " + (metadataCleanerDelay / 60.0) + " minutes, which is not sufficient. Please set " + "the Java property 'spark.cleaner.delay' to more than " + - math.ceil(rememberDuration.millis.toDouble / 60000.0).toInt + " minutes." + math.ceil(rememberDuration.milliseconds.toDouble / 60000.0).toInt + " minutes." ) dependencies.foreach(_.validate()) @@ -642,271 +640,3 @@ abstract class DStream[T: ClassManifest] ( private[streaming] case class DStreamCheckpointData(rdds: HashMap[Time, Any]) -abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContext) - extends DStream[T](ssc_) { - - override def dependencies = List() - - override def slideTime = { - if (ssc == null) throw new Exception("ssc is null") - if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null") - ssc.graph.batchDuration - } - - def start() - - def stop() -} - - -/** - * TODO - */ - -private[streaming] -class MappedDStream[T: ClassManifest, U: ClassManifest] ( - parent: DStream[T], - mapFunc: T => U - ) extends DStream[U](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(_.map[U](mapFunc)) - } -} - - -/** - * TODO - */ - -private[streaming] -class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( - parent: DStream[T], - flatMapFunc: T => Traversable[U] - ) extends DStream[U](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc)) - } -} - - -/** - * TODO - */ - -private[streaming] -class FilteredDStream[T: ClassManifest]( - parent: DStream[T], - filterFunc: T => Boolean - ) extends DStream[T](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[T]] = { - parent.getOrCompute(validTime).map(_.filter(filterFunc)) - } -} - - -/** - * TODO - */ - -private[streaming] -class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( - parent: DStream[T], - mapPartFunc: Iterator[T] => Iterator[U], - preservePartitioning: Boolean - ) extends DStream[U](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc, preservePartitioning)) - } -} - - -/** - * TODO - */ - -private[streaming] -class GlommedDStream[T: ClassManifest](parent: DStream[T]) - extends DStream[Array[T]](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[Array[T]]] = { - parent.getOrCompute(validTime).map(_.glom()) - } -} - - -/** - * TODO - */ - -private[streaming] -class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( - parent: DStream[(K,V)], - createCombiner: V => C, - mergeValue: (C, V) => C, - mergeCombiner: (C, C) => C, - partitioner: Partitioner - ) extends DStream [(K,C)] (parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[(K,C)]] = { - parent.getOrCompute(validTime) match { - case Some(rdd) => - Some(rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner, partitioner)) - case None => None - } - } -} - - -/** - * TODO - */ - -private[streaming] -class MapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( - parent: DStream[(K, V)], - mapValueFunc: V => U - ) extends DStream[(K, U)](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[(K, U)]] = { - parent.getOrCompute(validTime).map(_.mapValues[U](mapValueFunc)) - } -} - - -/** - * TODO - */ -private[streaming] -class FlatMapValuesDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( - parent: DStream[(K, V)], - flatMapValueFunc: V => TraversableOnce[U] - ) extends DStream[(K, U)](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[(K, U)]] = { - parent.getOrCompute(validTime).map(_.flatMapValues[U](flatMapValueFunc)) - } -} - - - -/** - * TODO - */ - -class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) - extends DStream[T](parents.head.ssc) { - - if (parents.length == 0) { - throw new IllegalArgumentException("Empty array of parents") - } - - if (parents.map(_.ssc).distinct.size > 1) { - throw new IllegalArgumentException("Array of parents have different StreamingContexts") - } - - if (parents.map(_.slideTime).distinct.size > 1) { - throw new IllegalArgumentException("Array of parents have different slide times") - } - - override def dependencies = parents.toList - - override def slideTime: Time = parents.head.slideTime - - override def compute(validTime: Time): Option[RDD[T]] = { - val rdds = new ArrayBuffer[RDD[T]]() - parents.map(_.getOrCompute(validTime)).foreach(_ match { - case Some(rdd) => rdds += rdd - case None => throw new Exception("Could not generate RDD from a parent for unifying at time " + validTime) - }) - if (rdds.size > 0) { - Some(new UnionRDD(ssc.sc, rdds)) - } else { - None - } - } -} - - -/** - * TODO - */ - -private[streaming] -class ForEachDStream[T: ClassManifest] ( - parent: DStream[T], - foreachFunc: (RDD[T], Time) => Unit - ) extends DStream[Unit](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[Unit]] = None - - override def generateJob(time: Time): Option[Job] = { - parent.getOrCompute(time) match { - case Some(rdd) => - val jobFunc = () => { - foreachFunc(rdd, time) - } - Some(new Job(time, jobFunc)) - case None => None - } - } -} - - -/** - * TODO - */ - -private[streaming] -class TransformedDStream[T: ClassManifest, U: ClassManifest] ( - parent: DStream[T], - transformFunc: (RDD[T], Time) => RDD[U] - ) extends DStream[U](parent.ssc) { - - override def dependencies = List(parent) - - override def slideTime: Time = parent.slideTime - - override def compute(validTime: Time): Option[RDD[U]] = { - parent.getOrCompute(validTime).map(transformFunc(_, validTime)) - } -} diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index d0a9ade61d..c72429370e 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -1,5 +1,6 @@ package spark.streaming +import dstream.InputDStream import java.io.{ObjectInputStream, IOException, ObjectOutputStream} import collection.mutable.ArrayBuffer import spark.Logging diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index 658498dfc1..a6ab44271f 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -1,5 +1,7 @@ package spark.streaming +import spark.streaming.dstream.{NetworkInputDStream, NetworkReceiver} +import spark.streaming.dstream.{StopReceiver, ReportBlock, ReportError} import spark.Logging import spark.SparkEnv diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index f9fef14196..b0a208e67f 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -1,6 +1,9 @@ package spark.streaming import spark.streaming.StreamingContext._ +import spark.streaming.dstream.{ReducedWindowedDStream, StateDStream} +import spark.streaming.dstream.{CoGroupedDStream, ShuffledDStream} +import spark.streaming.dstream.{MapValuedDStream, FlatMapValuedDStream} import spark.{Manifests, RDD, Partitioner, HashPartitioner} import spark.SparkContext._ @@ -218,13 +221,13 @@ extends Serializable { def mapValues[U: ClassManifest](mapValuesFunc: V => U): DStream[(K, U)] = { - new MapValuesDStream[K, V, U](self, mapValuesFunc) + new MapValuedDStream[K, V, U](self, mapValuesFunc) } def flatMapValues[U: ClassManifest]( flatMapValuesFunc: V => TraversableOnce[U] ): DStream[(K, U)] = { - new FlatMapValuesDStream[K, V, U](self, flatMapValuesFunc) + new FlatMapValuedDStream[K, V, U](self, flatMapValuesFunc) } def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index fd1fa77a24..aeb7c3eb0e 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -4,9 +4,6 @@ import util.{ManualClock, RecurringTimer, Clock} import spark.SparkEnv import spark.Logging -import scala.collection.mutable.HashMap - - private[streaming] class Scheduler(ssc: StreamingContext) extends Logging { diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 998fea849f..ef73049a81 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -1,10 +1,10 @@ package spark.streaming -import spark.RDD -import spark.Logging -import spark.SparkEnv -import spark.SparkContext +import spark.streaming.dstream._ + +import spark.{RDD, Logging, SparkEnv, SparkContext} import spark.storage.StorageLevel +import spark.util.MetadataCleaner import scala.collection.mutable.Queue @@ -18,7 +18,6 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat import org.apache.flume.source.avro.AvroFlumeEvent import org.apache.hadoop.fs.Path import java.util.UUID -import spark.util.MetadataCleaner /** * A StreamingContext is the main entry point for Spark Streaming functionality. Besides the basic @@ -126,7 +125,7 @@ class StreamingContext private ( /** * Create an input stream that pulls messages form a Kafka Broker. * - * @param host Zookeper hostname. + * @param hostname Zookeper hostname. * @param port Zookeper port. * @param groupId The group id for this consumer. * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed @@ -319,7 +318,7 @@ object StreamingContext { protected[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = { if (prefix == null) { - time.millis.toString + time.milliseconds.toString } else if (suffix == null || suffix.length ==0) { prefix + "-" + time.milliseconds } else { diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 480d292d7c..2976e5e87b 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,6 +1,11 @@ package spark.streaming -case class Time(millis: Long) { +/** + * This class is simple wrapper class that represents time in UTC. + * @param millis Time in UTC long + */ + +case class Time(private val millis: Long) { def < (that: Time): Boolean = (this.millis < that.millis) @@ -15,7 +20,9 @@ case class Time(millis: Long) { def - (that: Time): Time = Time(millis - that.millis) def * (times: Int): Time = Time(millis * times) - + + def / (that: Time): Long = millis / that.millis + def floor(that: Time): Time = { val t = that.millis val m = math.floor(this.millis / t).toLong diff --git a/streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala similarity index 93% rename from streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala index 61d088eddb..2e427dadf7 100644 --- a/streaming/src/main/scala/spark/streaming/CoGroupedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala @@ -1,7 +1,8 @@ -package spark.streaming +package spark.streaming.dstream import spark.{RDD, Partitioner} import spark.rdd.CoGroupedRDD +import spark.streaming.{Time, DStream} class CoGroupedDStream[K : ClassManifest]( parents: Seq[DStream[(_, _)]], diff --git a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ConstantInputDStream.scala similarity index 81% rename from streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/ConstantInputDStream.scala index 80150708fd..41c3af4694 100644 --- a/streaming/src/main/scala/spark/streaming/ConstantInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ConstantInputDStream.scala @@ -1,6 +1,7 @@ -package spark.streaming +package spark.streaming.dstream import spark.RDD +import spark.streaming.{Time, StreamingContext} /** * An input stream that always returns the same RDD on each timestep. Useful for testing. diff --git a/streaming/src/main/scala/spark/streaming/DataHandler.scala b/streaming/src/main/scala/spark/streaming/dstream/DataHandler.scala similarity index 98% rename from streaming/src/main/scala/spark/streaming/DataHandler.scala rename to streaming/src/main/scala/spark/streaming/dstream/DataHandler.scala index 05f307a8d1..d737ba1ecc 100644 --- a/streaming/src/main/scala/spark/streaming/DataHandler.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/DataHandler.scala @@ -1,4 +1,4 @@ -package spark.streaming +package spark.streaming.dstream import java.util.concurrent.ArrayBlockingQueue import scala.collection.mutable.ArrayBuffer diff --git a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala similarity index 97% rename from streaming/src/main/scala/spark/streaming/FileInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala index 88856364d2..8cdaff467b 100644 --- a/streaming/src/main/scala/spark/streaming/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala @@ -1,7 +1,8 @@ -package spark.streaming +package spark.streaming.dstream import spark.RDD import spark.rdd.UnionRDD +import spark.streaming.{StreamingContext, Time} import org.apache.hadoop.fs.{FileSystem, Path, PathFilter} import org.apache.hadoop.conf.Configuration diff --git a/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala new file mode 100644 index 0000000000..1cbb4d536e --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala @@ -0,0 +1,21 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD + +private[streaming] +class FilteredDStream[T: ClassManifest]( + parent: DStream[T], + filterFunc: T => Boolean + ) extends DStream[T](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[T]] = { + parent.getOrCompute(validTime).map(_.filter(filterFunc)) + } +} + + diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala new file mode 100644 index 0000000000..11ed8cf317 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala @@ -0,0 +1,20 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD +import spark.SparkContext._ + +private[streaming] +class FlatMapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( + parent: DStream[(K, V)], + flatMapValueFunc: V => TraversableOnce[U] + ) extends DStream[(K, U)](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K, U)]] = { + parent.getOrCompute(validTime).map(_.flatMapValues[U](flatMapValueFunc)) + } +} diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala new file mode 100644 index 0000000000..a13b4c9ff9 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala @@ -0,0 +1,20 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD + +private[streaming] +class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( + parent: DStream[T], + flatMapFunc: T => Traversable[U] + ) extends DStream[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc)) + } +} + diff --git a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala similarity index 96% rename from streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala index 5ac7e5b08e..7e988cadf4 100644 --- a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala @@ -1,15 +1,20 @@ -package spark.streaming +package spark.streaming.dstream -import java.io.{ObjectInput, ObjectOutput, Externalizable} +import spark.streaming.StreamingContext + +import spark.Utils import spark.storage.StorageLevel + import org.apache.flume.source.avro.AvroSourceProtocol import org.apache.flume.source.avro.AvroFlumeEvent import org.apache.flume.source.avro.Status import org.apache.avro.ipc.specific.SpecificResponder import org.apache.avro.ipc.NettyServer + +import scala.collection.JavaConversions._ + import java.net.InetSocketAddress -import collection.JavaConversions._ -import spark.Utils +import java.io.{ObjectInput, ObjectOutput, Externalizable} import java.nio.ByteBuffer class FlumeInputDStream[T: ClassManifest]( diff --git a/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala new file mode 100644 index 0000000000..41c629a225 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala @@ -0,0 +1,28 @@ +package spark.streaming.dstream + +import spark.RDD +import spark.streaming.{DStream, Job, Time} + +private[streaming] +class ForEachDStream[T: ClassManifest] ( + parent: DStream[T], + foreachFunc: (RDD[T], Time) => Unit + ) extends DStream[Unit](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[Unit]] = None + + override def generateJob(time: Time): Option[Job] = { + parent.getOrCompute(time) match { + case Some(rdd) => + val jobFunc = () => { + foreachFunc(rdd, time) + } + Some(new Job(time, jobFunc)) + case None => None + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala new file mode 100644 index 0000000000..92ea503cae --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala @@ -0,0 +1,17 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD + +private[streaming] +class GlommedDStream[T: ClassManifest](parent: DStream[T]) + extends DStream[Array[T]](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[Array[T]]] = { + parent.getOrCompute(validTime).map(_.glom()) + } +} diff --git a/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala new file mode 100644 index 0000000000..4959c66b06 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala @@ -0,0 +1,19 @@ +package spark.streaming.dstream + +import spark.streaming.{StreamingContext, DStream} + +abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContext) + extends DStream[T](ssc_) { + + override def dependencies = List() + + override def slideTime = { + if (ssc == null) throw new Exception("ssc is null") + if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null") + ssc.graph.batchDuration + } + + def start() + + def stop() +} diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala similarity index 97% rename from streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala index 7c642d4802..a46721af2f 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala @@ -1,17 +1,21 @@ -package spark.streaming +package spark.streaming.dstream + +import spark.Logging +import spark.storage.StorageLevel +import spark.streaming.{Time, DStreamCheckpointData, StreamingContext} import java.util.Properties import java.util.concurrent.Executors + import kafka.consumer._ import kafka.message.{Message, MessageSet, MessageAndMetadata} import kafka.serializer.StringDecoder import kafka.utils.{Utils, ZKGroupTopicDirs} import kafka.utils.ZkUtils._ + import scala.collection.mutable.HashMap import scala.collection.JavaConversions._ -import spark._ -import spark.RDD -import spark.storage.StorageLevel + // Key for a specific Kafka Partition: (broker, topic, group, part) case class KafkaPartitionKey(brokerId: Int, topic: String, groupId: String, partId: Int) @@ -24,7 +28,7 @@ case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], savedOffsets: Map[KafkaPartitionKey, Long]) extends DStreamCheckpointData(kafkaRdds) /** - * Input stream that pulls messages form a Kafka Broker. + * Input stream that pulls messages from a Kafka Broker. * * @param host Zookeper hostname. * @param port Zookeper port. diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala new file mode 100644 index 0000000000..daf78c6893 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala @@ -0,0 +1,21 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD + +private[streaming] +class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( + parent: DStream[T], + mapPartFunc: Iterator[T] => Iterator[U], + preservePartitioning: Boolean + ) extends DStream[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc, preservePartitioning)) + } +} + diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala new file mode 100644 index 0000000000..689caeef0e --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala @@ -0,0 +1,21 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD +import spark.SparkContext._ + +private[streaming] +class MapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( + parent: DStream[(K, V)], + mapValueFunc: V => U + ) extends DStream[(K, U)](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K, U)]] = { + parent.getOrCompute(validTime).map(_.mapValues[U](mapValueFunc)) + } +} + diff --git a/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala new file mode 100644 index 0000000000..786b9966f2 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala @@ -0,0 +1,20 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD + +private[streaming] +class MappedDStream[T: ClassManifest, U: ClassManifest] ( + parent: DStream[T], + mapFunc: T => U + ) extends DStream[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(_.map[U](mapFunc)) + } +} + diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala similarity index 97% rename from streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala index 4bf13dd50c..41276da8bb 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala @@ -1,12 +1,13 @@ -package spark.streaming +package spark.streaming.dstream -import scala.collection.mutable.ArrayBuffer +import spark.streaming.{Time, StreamingContext, AddBlocks, RegisterReceiver, DeregisterReceiver} import spark.{Logging, SparkEnv, RDD} import spark.rdd.BlockRDD -import spark.streaming.util.{RecurringTimer, SystemClock} import spark.storage.StorageLevel +import scala.collection.mutable.ArrayBuffer + import java.nio.ByteBuffer import akka.actor.{Props, Actor} diff --git a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/QueueInputDStream.scala similarity index 91% rename from streaming/src/main/scala/spark/streaming/QueueInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/QueueInputDStream.scala index bb86e51932..024bf3bea4 100644 --- a/streaming/src/main/scala/spark/streaming/QueueInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/QueueInputDStream.scala @@ -1,10 +1,11 @@ -package spark.streaming +package spark.streaming.dstream import spark.RDD import spark.rdd.UnionRDD import scala.collection.mutable.Queue import scala.collection.mutable.ArrayBuffer +import spark.streaming.{Time, StreamingContext} class QueueInputDStream[T: ClassManifest]( @transient ssc: StreamingContext, diff --git a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala similarity index 95% rename from streaming/src/main/scala/spark/streaming/RawInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala index 6acaa9aab1..996cc7dea8 100644 --- a/streaming/src/main/scala/spark/streaming/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala @@ -1,12 +1,15 @@ -package spark.streaming +package spark.streaming.dstream + +import spark.{DaemonThread, Logging} +import spark.storage.StorageLevel +import spark.streaming.StreamingContext import java.net.InetSocketAddress import java.nio.ByteBuffer import java.nio.channels.{ReadableByteChannel, SocketChannel} import java.io.EOFException import java.util.concurrent.ArrayBlockingQueue -import spark._ -import spark.storage.StorageLevel + /** * An input stream that reads blocks of serialized objects from a given network address. diff --git a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala similarity index 98% rename from streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala index f63a9e0011..2686de14d2 100644 --- a/streaming/src/main/scala/spark/streaming/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala @@ -1,16 +1,15 @@ -package spark.streaming +package spark.streaming.dstream import spark.streaming.StreamingContext._ import spark.RDD -import spark.rdd.UnionRDD import spark.rdd.CoGroupedRDD import spark.Partitioner import spark.SparkContext._ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer -import collection.SeqProxy +import spark.streaming.{Interval, Time, DStream} class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( parent: DStream[(K, V)], diff --git a/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala new file mode 100644 index 0000000000..6854bbe665 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala @@ -0,0 +1,27 @@ +package spark.streaming.dstream + +import spark.{RDD, Partitioner} +import spark.SparkContext._ +import spark.streaming.{DStream, Time} + +private[streaming] +class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( + parent: DStream[(K,V)], + createCombiner: V => C, + mergeValue: (C, V) => C, + mergeCombiner: (C, C) => C, + partitioner: Partitioner + ) extends DStream [(K,C)] (parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[(K,C)]] = { + parent.getOrCompute(validTime) match { + case Some(rdd) => + Some(rdd.combineByKey[C](createCombiner, mergeValue, mergeCombiner, partitioner)) + case None => None + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala similarity index 92% rename from streaming/src/main/scala/spark/streaming/SocketInputDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala index a9e37c0ff0..af5b73ae8d 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala @@ -1,14 +1,10 @@ -package spark.streaming +package spark.streaming.dstream -import spark.streaming.util.{RecurringTimer, SystemClock} +import spark.streaming.StreamingContext import spark.storage.StorageLevel import java.io._ import java.net.Socket -import java.util.concurrent.ArrayBlockingQueue - -import scala.collection.mutable.ArrayBuffer -import scala.Serializable class SocketInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, diff --git a/streaming/src/main/scala/spark/streaming/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala similarity index 97% rename from streaming/src/main/scala/spark/streaming/StateDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index b7e4c1c30c..6e190b5564 100644 --- a/streaming/src/main/scala/spark/streaming/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -1,11 +1,10 @@ -package spark.streaming +package spark.streaming.dstream import spark.RDD -import spark.rdd.BlockRDD import spark.Partitioner -import spark.rdd.MapPartitionsRDD import spark.SparkContext._ import spark.storage.StorageLevel +import spark.streaming.{Time, DStream} class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( parent: DStream[(K, V)], diff --git a/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala new file mode 100644 index 0000000000..0337579514 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala @@ -0,0 +1,19 @@ +package spark.streaming.dstream + +import spark.RDD +import spark.streaming.{DStream, Time} + +private[streaming] +class TransformedDStream[T: ClassManifest, U: ClassManifest] ( + parent: DStream[T], + transformFunc: (RDD[T], Time) => RDD[U] + ) extends DStream[U](parent.ssc) { + + override def dependencies = List(parent) + + override def slideTime: Time = parent.slideTime + + override def compute(validTime: Time): Option[RDD[U]] = { + parent.getOrCompute(validTime).map(transformFunc(_, validTime)) + } +} diff --git a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala new file mode 100644 index 0000000000..f1efb2ae72 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala @@ -0,0 +1,39 @@ +package spark.streaming.dstream + +import spark.streaming.{DStream, Time} +import spark.RDD +import collection.mutable.ArrayBuffer +import spark.rdd.UnionRDD + +class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) + extends DStream[T](parents.head.ssc) { + + if (parents.length == 0) { + throw new IllegalArgumentException("Empty array of parents") + } + + if (parents.map(_.ssc).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different StreamingContexts") + } + + if (parents.map(_.slideTime).distinct.size > 1) { + throw new IllegalArgumentException("Array of parents have different slide times") + } + + override def dependencies = parents.toList + + override def slideTime: Time = parents.head.slideTime + + override def compute(validTime: Time): Option[RDD[T]] = { + val rdds = new ArrayBuffer[RDD[T]]() + parents.map(_.getOrCompute(validTime)).foreach(_ match { + case Some(rdd) => rdds += rdd + case None => throw new Exception("Could not generate RDD from a parent for unifying at time " + validTime) + }) + if (rdds.size > 0) { + Some(new UnionRDD(ssc.sc, rdds)) + } else { + None + } + } +} diff --git a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala similarity index 93% rename from streaming/src/main/scala/spark/streaming/WindowedDStream.scala rename to streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala index e4d2a634f5..4b2621c497 100644 --- a/streaming/src/main/scala/spark/streaming/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala @@ -1,8 +1,9 @@ -package spark.streaming +package spark.streaming.dstream import spark.RDD import spark.rdd.UnionRDD import spark.storage.StorageLevel +import spark.streaming.{Interval, Time, DStream} class WindowedDStream[T: ClassManifest]( diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala index 7c4ee3b34c..dfaaf03f03 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -25,7 +25,7 @@ object GrepRaw { val rawStreams = (1 to numStreams).map(_ => ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray - val union = new UnionDStream(rawStreams) + val union = ssc.union(rawStreams) union.filter(_.contains("Alice")).count().foreach(r => println("Grep count: " + r.collect().mkString)) ssc.start() diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala index 182dfd8a52..338834bc3c 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala @@ -34,7 +34,7 @@ object TopKWordCountRaw { val lines = (1 to numStreams).map(_ => { ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) }) - val union = new UnionDStream(lines.toArray) + val union = ssc.union(lines) val counts = union.mapPartitions(splitAndCountPartitions) val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala index 9bcd30f4d7..d93335a8ce 100644 --- a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ b/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala @@ -33,7 +33,7 @@ object WordCountRaw { val lines = (1 to numStreams).map(_ => { ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) }) - val union = new UnionDStream(lines.toArray) + val union = ssc.union(lines) val counts = union.mapPartitions(splitAndCountPartitions) val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) windowedCounts.foreach(r => println("# unique words = " + r.count())) diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 0d82b2f1ea..920388bba9 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -42,7 +42,7 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { val stateStreamCheckpointInterval = Seconds(1) // this ensure checkpointing occurs at least once - val firstNumBatches = (stateStreamCheckpointInterval.millis / batchDuration.millis) * 2 + val firstNumBatches = (stateStreamCheckpointInterval / batchDuration) * 2 val secondNumBatches = firstNumBatches // Setup the streams diff --git a/streaming/src/test/scala/spark/streaming/FailureSuite.scala b/streaming/src/test/scala/spark/streaming/FailureSuite.scala index 5b414117fc..4aa428bf64 100644 --- a/streaming/src/test/scala/spark/streaming/FailureSuite.scala +++ b/streaming/src/test/scala/spark/streaming/FailureSuite.scala @@ -133,7 +133,7 @@ class FailureSuite extends TestSuiteBase with BeforeAndAfter { // Get the output buffer val outputStream = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStream[V]] val output = outputStream.output - val waitTime = (batchDuration.millis * (numBatches.toDouble + 0.5)).toLong + val waitTime = (batchDuration.milliseconds * (numBatches.toDouble + 0.5)).toLong val startTime = System.currentTimeMillis() try { diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index ed9a659092..76b528bec3 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -1,5 +1,6 @@ package spark.streaming +import dstream.SparkFlumeEvent import java.net.{InetSocketAddress, SocketException, Socket, ServerSocket} import java.io.{File, BufferedWriter, OutputStreamWriter} import java.util.concurrent.{TimeUnit, ArrayBlockingQueue} diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index a44f738957..28bdd53c3c 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -1,12 +1,16 @@ package spark.streaming +import spark.streaming.dstream.{InputDStream, ForEachDStream} +import spark.streaming.util.ManualClock + import spark.{RDD, Logging} -import util.ManualClock + import collection.mutable.ArrayBuffer -import org.scalatest.FunSuite import collection.mutable.SynchronizedBuffer + import java.io.{ObjectInputStream, IOException} +import org.scalatest.FunSuite /** * This is a input stream just for the testsuites. This is equivalent to a checkpointable, @@ -70,6 +74,10 @@ trait TestSuiteBase extends FunSuite with Logging { def actuallyWait = false + /** + * Set up required DStreams to test the DStream operation using the two sequences + * of input collections. + */ def setupStreams[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V] @@ -90,6 +98,10 @@ trait TestSuiteBase extends FunSuite with Logging { ssc } + /** + * Set up required DStreams to test the binary operation using the sequence + * of input collections. + */ def setupStreams[U: ClassManifest, V: ClassManifest, W: ClassManifest]( input1: Seq[Seq[U]], input2: Seq[Seq[V]], @@ -173,6 +185,11 @@ trait TestSuiteBase extends FunSuite with Logging { output } + /** + * Verify whether the output values after running a DStream operation + * is same as the expected output values, by comparing the output + * collections either as lists (order matters) or sets (order does not matter) + */ def verifyOutput[V: ClassManifest]( output: Seq[Seq[V]], expectedOutput: Seq[Seq[V]], @@ -199,6 +216,10 @@ trait TestSuiteBase extends FunSuite with Logging { logInfo("Output verified successfully") } + /** + * Test unary DStream operation with a list of inputs, with number of + * batches to run same as the number of expected output values + */ def testOperation[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], @@ -208,6 +229,15 @@ trait TestSuiteBase extends FunSuite with Logging { testOperation[U, V](input, operation, expectedOutput, -1, useSet) } + /** + * Test unary DStream operation with a list of inputs + * @param input Sequence of input collections + * @param operation Binary DStream operation to be applied to the 2 inputs + * @param expectedOutput Sequence of expected output collections + * @param numBatches Number of batches to run the operation for + * @param useSet Compare the output values with the expected output values + * as sets (order matters) or as lists (order does not matter) + */ def testOperation[U: ClassManifest, V: ClassManifest]( input: Seq[Seq[U]], operation: DStream[U] => DStream[V], @@ -221,6 +251,10 @@ trait TestSuiteBase extends FunSuite with Logging { verifyOutput[V](output, expectedOutput, useSet) } + /** + * Test binary DStream operation with two lists of inputs, with number of + * batches to run same as the number of expected output values + */ def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest]( input1: Seq[Seq[U]], input2: Seq[Seq[V]], @@ -231,6 +265,16 @@ trait TestSuiteBase extends FunSuite with Logging { testOperation[U, V, W](input1, input2, operation, expectedOutput, -1, useSet) } + /** + * Test binary DStream operation with two lists of inputs + * @param input1 First sequence of input collections + * @param input2 Second sequence of input collections + * @param operation Binary DStream operation to be applied to the 2 inputs + * @param expectedOutput Sequence of expected output collections + * @param numBatches Number of batches to run the operation for + * @param useSet Compare the output values with the expected output values + * as sets (order matters) or as lists (order does not matter) + */ def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest]( input1: Seq[Seq[U]], input2: Seq[Seq[V]], diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index 3e20e16708..4bc5229465 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -209,7 +209,7 @@ class WindowOperationsSuite extends TestSuiteBase { val expectedOutput = bigGroupByOutput.map(_.map(x => (x._1, x._2.toSet))) val windowTime = Seconds(2) val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { s.groupByKeyAndWindow(windowTime, slideTime) .map(x => (x._1, x._2.toSet)) @@ -223,7 +223,7 @@ class WindowOperationsSuite extends TestSuiteBase { val expectedOutput = Seq( Seq(1), Seq(2), Seq(3), Seq(3), Seq(1), Seq(0)) val windowTime = Seconds(2) val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[Int]) => s.countByWindow(windowTime, slideTime) testOperation(input, operation, expectedOutput, numBatches, true) } @@ -233,7 +233,7 @@ class WindowOperationsSuite extends TestSuiteBase { val expectedOutput = Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 2)), Seq(("a", 1), ("b", 3))) val windowTime = Seconds(2) val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { s.countByKeyAndWindow(windowTime, slideTime).map(x => (x._1, x._2.toInt)) } @@ -251,7 +251,7 @@ class WindowOperationsSuite extends TestSuiteBase { slideTime: Time = Seconds(1) ) { test("window - " + name) { - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[Int]) => s.window(windowTime, slideTime) testOperation(input, operation, expectedOutput, numBatches, true) } @@ -265,7 +265,7 @@ class WindowOperationsSuite extends TestSuiteBase { slideTime: Time = Seconds(1) ) { test("reduceByKeyAndWindow - " + name) { - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist() } @@ -281,7 +281,7 @@ class WindowOperationsSuite extends TestSuiteBase { slideTime: Time = Seconds(1) ) { test("reduceByKeyAndWindowInv - " + name) { - val numBatches = expectedOutput.size * (slideTime.millis / batchDuration.millis).toInt + val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime) .persist() From 18b9b3b99fd753899d19bd10f0dbef7d5c4ae8d7 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 30 Dec 2012 20:00:42 -0800 Subject: [PATCH 156/226] More classes made private[streaming] to hide from scala docs. --- .gitignore | 2 + .../main/scala/spark/streaming/DStream.scala | 2 +- .../scala/spark/streaming/JobManager.scala | 2 +- .../scala/spark/streaming/Scheduler.scala | 2 +- .../spark/streaming/StreamingContext.scala | 108 +++++++++++++----- .../src/main/scala/spark/streaming/Time.scala | 30 +++-- .../streaming/dstream/CoGroupedDStream.scala | 1 + .../streaming/dstream/FileInputDStream.scala | 2 +- .../streaming/dstream/FlumeInputDStream.scala | 13 ++- .../streaming/dstream/KafkaInputDStream.scala | 6 +- .../streaming/dstream/RawInputDStream.scala | 2 + .../dstream/ReducedWindowedDStream.scala | 1 + .../dstream/SocketInputDStream.scala | 5 +- .../streaming/dstream/StateDStream.scala | 1 + .../streaming/dstream/UnionDStream.scala | 1 + .../streaming/dstream/WindowedDStream.scala | 2 +- .../scala/spark/streaming/util/Clock.scala | 8 +- .../spark/streaming/util/RecurringTimer.scala | 1 + 18 files changed, 137 insertions(+), 52 deletions(-) diff --git a/.gitignore b/.gitignore index c207409e3c..88d7b56181 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ third_party/libmesos.so third_party/libmesos.dylib conf/java-opts conf/spark-env.sh +conf/streaming-env.sh conf/log4j.properties docs/_site docs/api @@ -31,4 +32,5 @@ project/plugins/src_managed/ logs/ log/ spark-tests.log +streaming-tests.log dependency-reduced-pom.xml diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 292ad3b9f9..beba9cfd4f 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -189,7 +189,7 @@ abstract class DStream[T: ClassManifest] ( val metadataCleanerDelay = spark.util.MetadataCleaner.getDelaySeconds logInfo("metadataCleanupDelay = " + metadataCleanerDelay) assert( - metadataCleanerDelay < 0 || rememberDuration < metadataCleanerDelay * 1000, + metadataCleanerDelay < 0 || rememberDuration.milliseconds < metadataCleanerDelay * 1000, "It seems you are doing some DStream window operation or setting a checkpoint interval " + "which requires " + this.getClass.getSimpleName + " to remember generated RDDs for more " + "than " + rememberDuration.milliseconds + " milliseconds. But the Spark's metadata cleanup" + diff --git a/streaming/src/main/scala/spark/streaming/JobManager.scala b/streaming/src/main/scala/spark/streaming/JobManager.scala index fda7264a27..3b910538e0 100644 --- a/streaming/src/main/scala/spark/streaming/JobManager.scala +++ b/streaming/src/main/scala/spark/streaming/JobManager.scala @@ -14,7 +14,7 @@ class JobManager(ssc: StreamingContext, numThreads: Int = 1) extends Logging { try { val timeTaken = job.run() logInfo("Total delay: %.5f s for job %s (execution: %.5f s)".format( - (System.currentTimeMillis() - job.time) / 1000.0, job.id, timeTaken / 1000.0)) + (System.currentTimeMillis() - job.time.milliseconds) / 1000.0, job.id, timeTaken / 1000.0)) } catch { case e: Exception => logError("Running " + job + " failed", e) diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index aeb7c3eb0e..eb40affe6d 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -22,7 +22,7 @@ class Scheduler(ssc: StreamingContext) extends Logging { val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] - val timer = new RecurringTimer(clock, ssc.graph.batchDuration, generateRDDs(_)) + val timer = new RecurringTimer(clock, ssc.graph.batchDuration.milliseconds, generateRDDs(_)) def start() { // If context was started from checkpoint, then restart timer such that diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index ef73049a81..7256e41af9 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -15,7 +15,6 @@ import org.apache.hadoop.io.LongWritable import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.hadoop.mapreduce.lib.input.TextInputFormat -import org.apache.flume.source.avro.AvroFlumeEvent import org.apache.hadoop.fs.Path import java.util.UUID @@ -101,14 +100,27 @@ class StreamingContext private ( protected[streaming] var receiverJobThread: Thread = null protected[streaming] var scheduler: Scheduler = null + /** + * Sets each DStreams in this context to remember RDDs it generated in the last given duration. + * DStreams remember RDDs only for a limited duration of time and releases them for garbage + * collection. This method allows the developer to specify how to long to remember the RDDs ( + * if the developer wishes to query old data outside the DStream computation). + * @param duration Minimum duration that each DStream should remember its RDDs + */ def remember(duration: Time) { graph.remember(duration) } - def checkpoint(dir: String, interval: Time = null) { - if (dir != null) { - sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(dir)) - checkpointDir = dir + /** + * Sets the context to periodically checkpoint the DStream operations for master + * fault-tolerance. By default, the graph will be checkpointed every batch interval. + * @param directory HDFS-compatible directory where the checkpoint data will be reliably stored + * @param interval checkpoint interval + */ + def checkpoint(directory: String, interval: Time = null) { + if (directory != null) { + sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(directory)) + checkpointDir = directory checkpointInterval = interval } else { checkpointDir = null @@ -122,9 +134,8 @@ class StreamingContext private ( protected[streaming] def getNewNetworkStreamId() = nextNetworkInputStreamId.getAndIncrement() - /** + /** * Create an input stream that pulls messages form a Kafka Broker. - * * @param hostname Zookeper hostname. * @param port Zookeper port. * @param groupId The group id for this consumer. @@ -147,6 +158,15 @@ class StreamingContext private ( inputStream } + /** + * Create a input stream from network source hostname:port. Data is received using + * a TCP socket and the receive bytes is interpreted as UTF8 encoded \n delimited + * lines. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param storageLevel Storage level to use for storing the received objects + * (default: StorageLevel.MEMORY_AND_DISK_SER_2) + */ def networkTextStream( hostname: String, port: Int, @@ -155,6 +175,16 @@ class StreamingContext private ( networkStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel) } + /** + * Create a input stream from network source hostname:port. Data is received using + * a TCP socket and the receive bytes it interepreted as object using the given + * converter. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param converter Function to convert the byte stream to objects + * @param storageLevel Storage level to use for storing the received objects + * @tparam T Type of the objects received (after converting bytes to objects) + */ def networkStream[T: ClassManifest]( hostname: String, port: Int, @@ -166,16 +196,32 @@ class StreamingContext private ( inputStream } + /** + * Creates a input stream from a Flume source. + * @param hostname Hostname of the slave machine to which the flume data will be sent + * @param port Port of the slave machine to which the flume data will be sent + * @param storageLevel Storage level to use for storing the received objects + */ def flumeStream ( - hostname: String, - port: Int, - storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2): DStream[SparkFlumeEvent] = { + hostname: String, + port: Int, + storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2 + ): DStream[SparkFlumeEvent] = { val inputStream = new FlumeInputDStream(this, hostname, port, storageLevel) graph.addInputStream(inputStream) inputStream } - + /** + * Create a input stream from network source hostname:port, where data is received + * as serialized blocks (serialized using the Spark's serializer) that can be directly + * pushed into the block manager without deserializing them. This is the most efficient + * way to receive data. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param storageLevel Storage level to use for storing the received objects + * @tparam T Type of the objects in the received blocks + */ def rawNetworkStream[T: ClassManifest]( hostname: String, port: Int, @@ -188,7 +234,11 @@ class StreamingContext private ( /** * Creates a input stream that monitors a Hadoop-compatible filesystem - * for new files and executes the necessary processing on them. + * for new files and reads them using the given key-value types and input format. + * @param directory HDFS directory to monitor for new file + * @tparam K Key type for reading HDFS file + * @tparam V Value type for reading HDFS file + * @tparam F Input format for reading HDFS file */ def fileStream[ K: ClassManifest, @@ -200,13 +250,23 @@ class StreamingContext private ( inputStream } + /** + * Creates a input stream that monitors a Hadoop-compatible filesystem + * for new files and reads them as text files (using key as LongWritable, value + * as Text and input format as TextInputFormat). + * @param directory HDFS directory to monitor for new file + */ def textFileStream(directory: String): DStream[String] = { fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString) } /** * Creates a input stream from an queue of RDDs. In each batch, - * it will process either one or all of the RDDs returned by the queue + * it will process either one or all of the RDDs returned by the queue. + * @param queue Queue of RDDs + * @param oneAtATime Whether only one RDD should be consumed from the queue in every interval + * @param defaultRDD Default RDD is returned by the DStream when the queue is empty + * @tparam T Type of objects in the RDD */ def queueStream[T: ClassManifest]( queue: Queue[RDD[T]], @@ -218,13 +278,9 @@ class StreamingContext private ( inputStream } - def queueStream[T: ClassManifest](array: Array[RDD[T]]): DStream[T] = { - val queue = new Queue[RDD[T]] - val inputStream = queueStream(queue, true, null) - queue ++= array - inputStream - } - + /** + * Create a unified DStream from multiple DStreams of the same type and same interval + */ def union[T: ClassManifest](streams: Seq[DStream[T]]): DStream[T] = { new UnionDStream[T](streams.toArray) } @@ -256,7 +312,7 @@ class StreamingContext private ( } /** - * This function starts the execution of the streams. + * Starts the execution of the streams. */ def start() { if (checkpointDir != null && checkpointInterval == null && graph != null) { @@ -284,7 +340,7 @@ class StreamingContext private ( } /** - * This function stops the execution of the streams. + * Sstops the execution of the streams. */ def stop() { try { @@ -302,6 +358,10 @@ class StreamingContext private ( object StreamingContext { + implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = { + new PairDStreamFunctions[K, V](stream) + } + protected[streaming] def createNewSparkContext(master: String, frameworkName: String): SparkContext = { // Set the default cleaner delay to an hour if not already set. @@ -312,10 +372,6 @@ object StreamingContext { new SparkContext(master, frameworkName) } - implicit def toPairDStreamFunctions[K: ClassManifest, V: ClassManifest](stream: DStream[(K,V)]) = { - new PairDStreamFunctions[K, V](stream) - } - protected[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = { if (prefix == null) { time.milliseconds.toString diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 2976e5e87b..3c6fd5d967 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,16 +1,18 @@ package spark.streaming /** - * This class is simple wrapper class that represents time in UTC. - * @param millis Time in UTC long + * This is a simple class that represents time. Internally, it represents time as UTC. + * The recommended way to create instances of Time is to use helper objects + * [[spark.streaming.Milliseconds]], [[spark.streaming.Seconds]], and [[spark.streaming.Minutes]]. + * @param millis Time in UTC. */ case class Time(private val millis: Long) { def < (that: Time): Boolean = (this.millis < that.millis) - + def <= (that: Time): Boolean = (this.millis <= that.millis) - + def > (that: Time): Boolean = (this.millis > that.millis) def >= (that: Time): Boolean = (this.millis >= that.millis) @@ -45,23 +47,33 @@ case class Time(private val millis: Long) { def milliseconds: Long = millis } -object Time { +private[streaming] object Time { val zero = Time(0) implicit def toTime(long: Long) = Time(long) - - implicit def toLong(time: Time) = time.milliseconds } +/** + * Helper object that creates instance of [[spark.streaming.Time]] representing + * a given number of milliseconds. + */ object Milliseconds { def apply(milliseconds: Long) = Time(milliseconds) } +/** + * Helper object that creates instance of [[spark.streaming.Time]] representing + * a given number of seconds. + */ object Seconds { def apply(seconds: Long) = Time(seconds * 1000) -} +} -object Minutes { +/** + * Helper object that creates instance of [[spark.streaming.Time]] representing + * a given number of minutes. + */ +object Minutes { def apply(minutes: Long) = Time(minutes * 60000) } diff --git a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala index 2e427dadf7..bc23d423d3 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala @@ -4,6 +4,7 @@ import spark.{RDD, Partitioner} import spark.rdd.CoGroupedRDD import spark.streaming.{Time, DStream} +private[streaming] class CoGroupedDStream[K : ClassManifest]( parents: Seq[DStream[(_, _)]], partitioner: Partitioner diff --git a/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala index 8cdaff467b..cf72095324 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala @@ -10,7 +10,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import scala.collection.mutable.HashSet - +private[streaming] class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( @transient ssc_ : StreamingContext, directory: String, diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala index 7e988cadf4..ff73225e0f 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala @@ -17,6 +17,7 @@ import java.net.InetSocketAddress import java.io.{ObjectInput, ObjectOutput, Externalizable} import java.nio.ByteBuffer +private[streaming] class FlumeInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, @@ -93,6 +94,7 @@ private[streaming] object SparkFlumeEvent { } /** A simple server that implements Flume's Avro protocol. */ +private[streaming] class FlumeEventServer(receiver : FlumeReceiver) extends AvroSourceProtocol { override def append(event : AvroFlumeEvent) : Status = { receiver.dataHandler += SparkFlumeEvent.fromAvroFlumeEvent(event) @@ -108,12 +110,13 @@ class FlumeEventServer(receiver : FlumeReceiver) extends AvroSourceProtocol { /** A NetworkReceiver which listens for events using the * Flume Avro interface.*/ +private[streaming] class FlumeReceiver( - streamId: Int, - host: String, - port: Int, - storageLevel: StorageLevel - ) extends NetworkReceiver[SparkFlumeEvent](streamId) { + streamId: Int, + host: String, + port: Int, + storageLevel: StorageLevel + ) extends NetworkReceiver[SparkFlumeEvent](streamId) { lazy val dataHandler = new DataHandler(this, storageLevel) diff --git a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala index a46721af2f..175c75bcb9 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala @@ -21,10 +21,12 @@ import scala.collection.JavaConversions._ case class KafkaPartitionKey(brokerId: Int, topic: String, groupId: String, partId: Int) // NOT USED - Originally intended for fault-tolerance // Metadata for a Kafka Stream that it sent to the Master +private[streaming] case class KafkaInputDStreamMetadata(timestamp: Long, data: Map[KafkaPartitionKey, Long]) // NOT USED - Originally intended for fault-tolerance // Checkpoint data specific to a KafkaInputDstream -case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], +private[streaming] +case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], savedOffsets: Map[KafkaPartitionKey, Long]) extends DStreamCheckpointData(kafkaRdds) /** @@ -39,6 +41,7 @@ case class KafkaDStreamCheckpointData(kafkaRdds: HashMap[Time, Any], * By default the value is pulled from zookeper. * @param storageLevel RDD storage level. */ +private[streaming] class KafkaInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, @@ -98,6 +101,7 @@ class KafkaInputDStream[T: ClassManifest]( } } +private[streaming] class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, topics: Map[String, Int], initialOffsets: Map[KafkaPartitionKey, Long], storageLevel: StorageLevel) extends NetworkReceiver[Any](streamId) { diff --git a/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala index 996cc7dea8..aa2f31cea8 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala @@ -17,6 +17,7 @@ import java.util.concurrent.ArrayBlockingQueue * data into Spark Streaming, though it requires the sender to batch data and serialize it * in the format that the system is configured with. */ +private[streaming] class RawInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, @@ -29,6 +30,7 @@ class RawInputDStream[T: ClassManifest]( } } +private[streaming] class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: StorageLevel) extends NetworkReceiver[Any](streamId) { diff --git a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala index 2686de14d2..d289ed2a3f 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala @@ -11,6 +11,7 @@ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer import spark.streaming.{Interval, Time, DStream} +private[streaming] class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( parent: DStream[(K, V)], reduceFunc: (V, V) => V, diff --git a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala index af5b73ae8d..cbe4372299 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala @@ -6,6 +6,7 @@ import spark.storage.StorageLevel import java.io._ import java.net.Socket +private[streaming] class SocketInputDStream[T: ClassManifest]( @transient ssc_ : StreamingContext, host: String, @@ -19,7 +20,7 @@ class SocketInputDStream[T: ClassManifest]( } } - +private[streaming] class SocketReceiver[T: ClassManifest]( streamId: Int, host: String, @@ -50,7 +51,7 @@ class SocketReceiver[T: ClassManifest]( } - +private[streaming] object SocketReceiver { /** diff --git a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index 6e190b5564..175b3060c1 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -6,6 +6,7 @@ import spark.SparkContext._ import spark.storage.StorageLevel import spark.streaming.{Time, DStream} +private[streaming] class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], diff --git a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala index f1efb2ae72..3bf4c2ecea 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala @@ -5,6 +5,7 @@ import spark.RDD import collection.mutable.ArrayBuffer import spark.rdd.UnionRDD +private[streaming] class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) extends DStream[T](parents.head.ssc) { diff --git a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala index 4b2621c497..7718794cbf 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala @@ -5,7 +5,7 @@ import spark.rdd.UnionRDD import spark.storage.StorageLevel import spark.streaming.{Interval, Time, DStream} - +private[streaming] class WindowedDStream[T: ClassManifest]( parent: DStream[T], _windowTime: Time, diff --git a/streaming/src/main/scala/spark/streaming/util/Clock.scala b/streaming/src/main/scala/spark/streaming/util/Clock.scala index ed087e4ea8..974651f9f6 100644 --- a/streaming/src/main/scala/spark/streaming/util/Clock.scala +++ b/streaming/src/main/scala/spark/streaming/util/Clock.scala @@ -1,13 +1,12 @@ package spark.streaming.util -import spark.streaming._ - -trait Clock { +private[streaming] +trait Clock { def currentTime(): Long def waitTillTime(targetTime: Long): Long } - +private[streaming] class SystemClock() extends Clock { val minPollTime = 25L @@ -54,6 +53,7 @@ class SystemClock() extends Clock { } } +private[streaming] class ManualClock() extends Clock { var time = 0L diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala index dc55fd902b..2e7f4169c9 100644 --- a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -1,5 +1,6 @@ package spark.streaming.util +private[streaming] class RecurringTimer(val clock: Clock, val period: Long, val callback: (Long) => Unit) { val minPollTime = 25L From 02497f0cd49a24ebc8b92d3471de250319fe56cd Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 1 Jan 2013 12:21:32 -0800 Subject: [PATCH 157/226] Updated Streaming Programming Guide. --- docs/_layouts/global.html | 12 +- docs/api.md | 5 +- docs/configuration.md | 11 ++ docs/streaming-programming-guide.md | 167 ++++++++++++++++-- .../spark/streaming/util/RecurringTimer.scala | 1 + 5 files changed, 179 insertions(+), 17 deletions(-) diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html index d656b3e3de..a8be52f23e 100755 --- a/docs/_layouts/global.html +++ b/docs/_layouts/global.html @@ -47,11 +47,19 @@
    1. Quick Start
    2. Scala
    3. Java
    4. -
    5. Spark Streaming (Alpha)
    6. +
    7. Spark Streaming
    8. -
    9. API (Scaladoc)
    10. +
      + + + + +
      print() Prints the contents of this DStream on the driver. At each interval, this will take at most ten elements from the DStream's RDD and print them. Prints first ten elements of every batch of data in a DStream on the driver.
      saveAsObjectFile(prefix, [suffix]) saveAsObjectFiles(prefix, [suffix]) Save this DStream's contents as a SequenceFile of serialized objects. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      saveAsTextFile(prefix, suffix) saveAsTextFiles(prefix, [suffix]) Save this DStream's contents as a text files. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      saveAsHadoopFiles(prefix, suffix) saveAsHadoopFiles(prefix, [suffix]) Save this DStream's contents as a Hadoop file. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      spark.cleaner.delay(disable) + Duration (minutes) of how long Spark will remember any metadata (stages generated, tasks generated, etc.). + Periodic cleanups will ensure that metadata older than this duration will be forgetten. This is + useful for running Spark for many hours / days (for example, running 24/7 in case of Spark Streaming + applications). Note that any RDD that persists in memory for more than this duration will be cleared as well. +
      # Configuring Logging diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index 7c421ac70f..fc2ea2ef79 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -1,8 +1,9 @@ --- layout: global -title: Streaming (Alpha) Programming Guide +title: Spark Streaming Programming Guide --- +* This will become a table of contents (this text will be scraped). {:toc} # Overview @@ -13,33 +14,38 @@ A Spark Streaming application is very similar to a Spark application; it consist This guide shows some how to start programming with DStreams. # Initializing Spark Streaming -The first thing a Spark Streaming program must do is create a `StreamingContext` object, which tells Spark how to access a cluster. A `StreamingContext` can be created from an existing `SparkContext`, or directly: +The first thing a Spark Streaming program must do is create a `StreamingContext` object, which tells Spark how to access a cluster. A `StreamingContext` can be created by using {% highlight scala %} -import spark.SparkContext -import SparkContext._ +new StreamingContext(master, jobName, batchDuration) +{% endhighlight %} -new StreamingContext(master, frameworkName, batchDuration) +The `master` parameter is either the [Mesos master URL](running-on-mesos.html) (for running on a cluster)or the special "local" string (for local mode) that is used to create a Spark Context. For more information about this please refer to the [Spark programming guide](scala-programming-guide.html). The `jobName` is the name of the streaming job, which is the same as the jobName used in SparkContext. It is used to identify this job in the Mesos web UI. The `batchDuration` is the size of the batches (as explained earlier). This must be set carefully such the cluster can keep up with the processing of the data streams. Starting with something conservative like 5 seconds maybe a good start. See [Performance Tuning](#setting-the-right-batch-size) section for a detailed discussion. + +This constructor creates a SparkContext object using the given `master` and `jobName` parameters. However, if you already have a SparkContext or you need to create a custom SparkContext by specifying list of JARs, then a StreamingContext can be created from the existing SparkContext, by using +{% highlight scala %} new StreamingContext(sparkContext, batchDuration) {% endhighlight %} -The `master` parameter is either the [Mesos master URL](running-on-mesos.html) (for running on a cluster)or the special "local" string (for local mode) that is used to create a Spark Context. For more information about this please refer to the [Spark programming guide](scala-programming-guide.html). -# Creating Input Sources - InputDStreams +# Attaching Input Sources - InputDStreams The StreamingContext is used to creating InputDStreams from input sources: {% highlight scala %} -context.neworkStream(host, port) // Creates a stream that uses a TCP socket to read data from : -context.flumeStream(host, port) // Creates a stream populated by a Flume flow +// Assuming ssc is the StreamingContext +ssc.networkStream(hostname, port) // Creates a stream that uses a TCP socket to read data from hostname:port +ssc.textFileStream(directory) // Creates a stream by monitoring and processing new files in a HDFS directory {% endhighlight %} -A complete list of input sources is available in the [DStream API doc](api/streaming/index.html#spark.streaming.StreamingContext). +A complete list of input sources is available in the [StreamingContext API documentation](api/streaming/index.html#spark.streaming.StreamingContext). Data received from these sources can be processed using DStream operations, which are explained next. -## DStream Operations + + +# DStream Operations Once an input stream has been created, you can transform it using _stream operators_. Most of these operators return new DStreams which you can further transform. Eventually, you'll need to call an _output operator_, which forces evaluation of the stream by writing data out to an external source. -### Transformations +## Transformations DStreams support many of the transformations available on normal Spark RDD's: @@ -132,7 +138,7 @@ Spark Streaming features windowed computations, which allow you to report statis -### Output Operators +## Output Operations When an output operator is called, it triggers the computation of a stream. Currently the following output operators are defined: @@ -165,3 +171,138 @@ When an output operator is called, it triggers the computation of a stream. Curr
      +## DStream Persistence +Similar to RDDs, DStreams also allow developers to persist the stream's data in memory. That is, using `persist()` method on a DStream would automatically persist every RDD of that DStream in memory. This is useful if the data in the DStream will be computed multiple times (e.g., multiple DStream operations on the same data). For window-based operations like `reduceByWindow` and `reduceByKeyAndWindow` and state-based operations like `updateStateByKey`, this is implicitly true. Hence, DStreams generated by window-based operations are automatically persisted in memory, without the developer calling `persist()`. + +Note that, unlike RDDs, the default persistence level of DStreams keeps the data serialized in memory. This is further discussed in the [Performance Tuning](#memory-tuning) section. More information on different persistence levels can be found in [Spark Programming Guide](scala-programming-guide.html#rdd-persistence). + +# Starting the Streaming computation +All the above DStream operations are completely lazy, that is, the operations will start executing only after the context is started by using +{% highlight scala %} +ssc.start() +{% endhighlight %} + +Conversely, the computation can be stopped by using +{% highlight scala %} +ssc.stop() +{% endhighlight %} + +# Example - WordCountNetwork.scala +A good example to start off is the spark.streaming.examples.WordCountNetwork. This example counts the words received from a network server every second. Given below is the relevant sections of the source code. You can find the full source code in /streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala. + +{% highlight scala %} +import spark.streaming.{Seconds, StreamingContext} +import spark.streaming.StreamingContext._ +... + +// Create the context and set up a network input stream to receive from a host:port +val ssc = new StreamingContext(args(0), "WordCountNetwork", Seconds(1)) +val lines = ssc.networkTextStream(args(1), args(2).toInt) + +// Split the lines into words, count them, and print some of the counts on the master +val words = lines.flatMap(_.split(" ")) +val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) +wordCounts.print() + +// Start the computation +ssc.start() +{% endhighlight %} + +To run this example on your local machine, you need to first run a Netcat server by using + +{% highlight bash %} +$ nc -lk 9999 +{% endhighlight %} + +Then, in a different terminal, you can start WordCountNetwork by using + +{% highlight bash %} +$ ./run spark.streaming.examples.WordCountNetwork local[2] localhost 9999 +{% endhighlight %} + +This will make WordCountNetwork connect to the netcat server. Any lines typed in the terminal running the netcat server will be counted and printed on screen. + + + + +
      +{% highlight bash %} +# TERMINAL 1 +# RUNNING NETCAT + +$ nc -lk 9999 +hello world + + + + + +... +{% endhighlight %} + +{% highlight bash %} +# TERMINAL 2: RUNNING WordCountNetwork +... +2012-12-31 18:47:10,446 INFO SparkContext: Job finished: run at ThreadPoolExecutor.java:886, took 0.038817 s +------------------------------------------- +Time: 1357008430000 ms +------------------------------------------- +(hello,1) +(world,1) + +2012-12-31 18:47:10,447 INFO JobManager: Total delay: 0.44700 s for job 8 (execution: 0.44000 s) +... +{% endhighlight %} +
      + + + +# Performance Tuning +Getting the best performance of a Spark Streaming application on a cluster requires a bit of tuning. This section explains a number of the parameters and configurations that can tuned to improve the performance of you application. At a high level, you need to consider two things: +
        +
      1. Reducing the processing time of each batch of data by efficiently using cluster resources.
      2. +
      3. Setting the right batch size such that the data processing can keep up with the data ingestion.
      4. +
      + +## Reducing the Processing Time of each Batch +There are a number of optimizations that can be done in Spark to minimize the processing time of each batch. These have been discussed in detail in [Tuning Guide](tuning.html). This section highlights some of the most important ones. + +### Level of Parallelism +Cluster resources maybe underutilized if the number of parallel tasks used in any stage of the computation is not high enough. For example, for distributed reduce operations like `reduceByKey` and `reduceByKeyAndWindow`, the default number of parallel tasks is 8. You can pass the level of parallelism as an argument (see the [`spark.PairDStreamFunctions`](api/streaming/index.html#spark.PairDStreamFunctions) documentation), or set the system property `spark.default.parallelism` to change the default. + +### Data Serialization +The overhead of data serialization can be significant, especially when sub-second batch sizes are to be achieved. There are two aspects to it. +* Serialization of RDD data in Spark: Please refer to the detailed discussion on data serialization in the [Tuning Guide](tuning.html). However, note that unlike Spark, by default RDDs are persisted as serialized byte arrays to minimize pauses related to GC. +* Serialization of input data: To ingest external data into Spark, data received as bytes (say, from the network) needs to deserialized from bytes and re-serialized into Spark's serialization format. Hence, the deserialization overhead of input data may be a bottleneck. + +### Task Launching Overheads +If the number of tasks launched per second is high (say, 50 or more per second), then the overhead of sending out tasks to the slaves maybe significant and will make it hard to achieve sub-second latencies. The overhead can be reduced by the following changes: +* Task Serialization: Using Kryo serialization for serializing tasks can reduced the task sizes, and therefore reduce the time taken to send them to the slaves. +* Execution mode: Running Spark in Standalone mode or coarse-grained Mesos mode leads to better task launch times than the fine-grained Mesos mode. Please refer to the [Running on Mesos guide](running-on-mesos.html) for more details. +These changes may reduce batch processing time by 100s of milliseconds, thus allowing sub-second batch size to be viable. + +## Setting the Right Batch Size +For a Spark Streaming application running on a cluster to be stable, the processing of the data streams must keep up with the rate of ingestion of the data streams. Depending on the type of computation, the batch size used may have significant impact on the rate of ingestion that can be sustained by the Spark Streaming application on a fixed cluster resources. For example, let us consider the earlier WordCountNetwork example. For a particular data rate, the system may be able to keep up with reporting word counts every 2 seconds (i.e., batch size of 2 seconds), but not every 500 milliseconds. + +A good approach to figure out the right batch size for your application is to test it with a conservative batch size (say, 5-10 seconds) and a low data rate. To verify whether the system is able to keep up with data rate, you can check the value of the end-to-end delay experienced by each processed batch (in the Spark master logs, find the line having the phrase "Total delay"). If the delay is maintained to be less than the batch size, then system is stable. Otherwise, if the delay is continuously increasing, it means that the system is unable to keep up and it therefore unstable. Once you have an idea of a stable configuration, you can try increasing the data rate and/or reducing the batch size. Note that momentary increase in the delay due to temporary data rate increases maybe fine as long as the delay reduces back to a low value (i.e., less than batch size). + +## 24/7 Operation +By default, Spark does not forget any of the metadata (RDDs generated, stages processed, etc.). But for a Spark Streaming application to operate 24/7, it is necessary for Spark to do periodic cleanup of it metadata. This can be enabled by setting the Java system property `spark.cleaner.delay` to the number of minutes you want any metadata to persist. For example, setting `spark.cleaner.delay` to 10 would cause Spark periodically cleanup all metadata and persisted RDDs that are older than 10 minutes. Note, that this property needs to be set before the SparkContext is created. + +This value is closely tied with any window operation that is being used. Any window operation would require the input data to be persisted in memory for at least the duration of the window. Hence it is necessary to set the delay to at least the value of the largest window operation used in the Spark Streaming application. If this delay is set too low, the application will throw an exception saying so. + +## Memory Tuning +Tuning the memory usage and GC behavior of Spark applications have been discussed in great detail in the [Tuning Guide](tuning.html). It is recommended that you read that. In this section, we highlight a few customizations that are strongly recommended to minimize GC related pauses in Spark Streaming applications and achieving more consistent batch processing times. + +* Default persistence level of DStreams: Unlike RDDs, the default persistence level of DStreams serializes the data in memory (that is, [StorageLevel.MEMORY_ONLY_SER](api/core/index.html#spark.storage.StorageLevel$) for DStream compared to [StorageLevel.MEMORY_ONLY](api/core/index.html#spark.storage.StorageLevel$) for RDDs). Even though keeping the data serialized incurs a higher serialization overheads, it significantly reduces GC pauses. + +* Concurrent garbage collector: Using the concurrent mark-and-sweep GC further minimizes the variability of GC pauses. Even though concurrent GC is known to reduce the overall processing throughput of the system, its use is still recommended to achieve more consistent batch processing times. + +# Master Fault-tolerance (Alpha) +TODO + +* Checkpointing of DStream graph + +* Recovery from master faults + +* Current state and future directions \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala index 2e7f4169c9..db715cc295 100644 --- a/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala +++ b/streaming/src/main/scala/spark/streaming/util/RecurringTimer.scala @@ -54,6 +54,7 @@ class RecurringTimer(val clock: Clock, val period: Long, val callback: (Long) => } } +private[streaming] object RecurringTimer { def main(args: Array[String]) { From 3dc87dd923578f20f2c6945be7d8c03797e76237 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Tue, 1 Jan 2013 16:38:04 -0800 Subject: [PATCH 158/226] Fixed compilation bug in RDDSuite created during merge for mesos/master. --- core/src/test/scala/spark/RDDSuite.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/test/scala/spark/RDDSuite.scala b/core/src/test/scala/spark/RDDSuite.scala index eab09956bb..e5a59dc7d6 100644 --- a/core/src/test/scala/spark/RDDSuite.scala +++ b/core/src/test/scala/spark/RDDSuite.scala @@ -105,9 +105,9 @@ class RDDSuite extends FunSuite with BeforeAndAfter { sc = new SparkContext("local", "test") val onlySplit = new Split { override def index: Int = 0 } var shouldFail = true - val rdd = new RDD[Int](sc) { - override def splits: Array[Split] = Array(onlySplit) - override val dependencies = List[Dependency[_]]() + val rdd = new RDD[Int](sc, Nil) { + override def getSplits: Array[Split] = Array(onlySplit) + override val getDependencies = List[Dependency[_]]() override def compute(split: Split, context: TaskContext): Iterator[Int] = { if (shouldFail) { throw new Exception("injected failure") From 493d65ce651dffc79adcdada0eeeed6452b3cc47 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 2 Jan 2013 13:35:26 -0800 Subject: [PATCH 159/226] Several code-quality improvements to DataHandler. - Changed to more accurate name: BufferingBlockCreator - Docstring now correctly reflects the abstraction offered by the class - Made internal methods private - Fixed indentation problems --- .../streaming/BufferingBlockCreator.scala | 80 ++++++++++++++++++ .../scala/spark/streaming/DataHandler.scala | 83 ------------------- .../spark/streaming/FlumeInputDStream.scala | 2 +- .../spark/streaming/SocketInputDStream.scala | 2 +- .../streaming/input/KafkaInputDStream.scala | 5 +- 5 files changed, 84 insertions(+), 88 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala delete mode 100644 streaming/src/main/scala/spark/streaming/DataHandler.scala diff --git a/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala b/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala new file mode 100644 index 0000000000..efd2e75d40 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala @@ -0,0 +1,80 @@ +package spark.streaming + +import java.util.concurrent.ArrayBlockingQueue +import scala.collection.mutable.ArrayBuffer +import spark.Logging +import spark.streaming.util.{RecurringTimer, SystemClock} +import spark.storage.StorageLevel + +/** + * Batches objects created by a [[spark.streaming.NetworkReceiver]] and puts them into + * appropriately named blocks at regular intervals. This class starts two threads, + * one to periodically start a new batch and prepare the previous batch of as a block, + * the other to push the blocks into the block manager. + */ +class BufferingBlockCreator[T](receiver: NetworkReceiver[T], storageLevel: StorageLevel) + extends Serializable with Logging { + + case class Block(id: String, iterator: Iterator[T], metadata: Any = null) + + val clock = new SystemClock() + val blockInterval = 200L + val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) + val blockStorageLevel = storageLevel + val blocksForPushing = new ArrayBlockingQueue[Block](1000) + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + var currentBuffer = new ArrayBuffer[T] + + def start() { + blockIntervalTimer.start() + blockPushingThread.start() + logInfo("Data handler started") + } + + def stop() { + blockIntervalTimer.stop() + blockPushingThread.interrupt() + logInfo("Data handler stopped") + } + + def += (obj: T) { + currentBuffer += obj + } + + private def createBlock(blockId: String, iterator: Iterator[T]) : Block = { + new Block(blockId, iterator) + } + + private def updateCurrentBuffer(time: Long) { + try { + val newBlockBuffer = currentBuffer + currentBuffer = new ArrayBuffer[T] + if (newBlockBuffer.size > 0) { + val blockId = "input-" + receiver.streamId + "- " + (time - blockInterval) + val newBlock = createBlock(blockId, newBlockBuffer.toIterator) + blocksForPushing.add(newBlock) + } + } catch { + case ie: InterruptedException => + logInfo("Block interval timer thread interrupted") + case e: Exception => + receiver.stop() + } + } + + private def keepPushingBlocks() { + logInfo("Block pushing thread started") + try { + while(true) { + val block = blocksForPushing.take() + receiver.pushBlock(block.id, block.iterator, block.metadata, storageLevel) + } + } catch { + case ie: InterruptedException => + logInfo("Block pushing thread interrupted") + case e: Exception => + receiver.stop() + } + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/DataHandler.scala b/streaming/src/main/scala/spark/streaming/DataHandler.scala deleted file mode 100644 index 05f307a8d1..0000000000 --- a/streaming/src/main/scala/spark/streaming/DataHandler.scala +++ /dev/null @@ -1,83 +0,0 @@ -package spark.streaming - -import java.util.concurrent.ArrayBlockingQueue -import scala.collection.mutable.ArrayBuffer -import spark.Logging -import spark.streaming.util.{RecurringTimer, SystemClock} -import spark.storage.StorageLevel - - -/** - * This is a helper object that manages the data received from the socket. It divides - * the object received into small batches of 100s of milliseconds, pushes them as - * blocks into the block manager and reports the block IDs to the network input - * tracker. It starts two threads, one to periodically start a new batch and prepare - * the previous batch of as a block, the other to push the blocks into the block - * manager. - */ - class DataHandler[T](receiver: NetworkReceiver[T], storageLevel: StorageLevel) - extends Serializable with Logging { - - case class Block(id: String, iterator: Iterator[T], metadata: Any = null) - - val clock = new SystemClock() - val blockInterval = 200L - val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) - val blockStorageLevel = storageLevel - val blocksForPushing = new ArrayBlockingQueue[Block](1000) - val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } - - var currentBuffer = new ArrayBuffer[T] - - def createBlock(blockId: String, iterator: Iterator[T]) : Block = { - new Block(blockId, iterator) - } - - def start() { - blockIntervalTimer.start() - blockPushingThread.start() - logInfo("Data handler started") - } - - def stop() { - blockIntervalTimer.stop() - blockPushingThread.interrupt() - logInfo("Data handler stopped") - } - - def += (obj: T) { - currentBuffer += obj - } - - def updateCurrentBuffer(time: Long) { - try { - val newBlockBuffer = currentBuffer - currentBuffer = new ArrayBuffer[T] - if (newBlockBuffer.size > 0) { - val blockId = "input-" + receiver.streamId + "- " + (time - blockInterval) - val newBlock = createBlock(blockId, newBlockBuffer.toIterator) - blocksForPushing.add(newBlock) - } - } catch { - case ie: InterruptedException => - logInfo("Block interval timer thread interrupted") - case e: Exception => - receiver.stop() - } - } - - def keepPushingBlocks() { - logInfo("Block pushing thread started") - try { - while(true) { - val block = blocksForPushing.take() - receiver.pushBlock(block.id, block.iterator, block.metadata, storageLevel) - } - } catch { - case ie: InterruptedException => - logInfo("Block pushing thread interrupted") - case e: Exception => - receiver.stop() - } - } - } \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala index 2959ce4540..02d9811669 100644 --- a/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/FlumeInputDStream.scala @@ -110,7 +110,7 @@ class FlumeReceiver( storageLevel: StorageLevel ) extends NetworkReceiver[SparkFlumeEvent](streamId) { - lazy val dataHandler = new DataHandler(this, storageLevel) + lazy val dataHandler = new BufferingBlockCreator(this, storageLevel) protected override def onStart() { val responder = new SpecificResponder( diff --git a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala index a9e37c0ff0..f7a34d2515 100644 --- a/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/SocketInputDStream.scala @@ -32,7 +32,7 @@ class SocketReceiver[T: ClassManifest]( storageLevel: StorageLevel ) extends NetworkReceiver[T](streamId) { - lazy protected val dataHandler = new DataHandler(this, storageLevel) + lazy protected val dataHandler = new BufferingBlockCreator(this, storageLevel) override def getLocationPreference = None diff --git a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala index 7c642d4802..66f60519bc 100644 --- a/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/input/KafkaInputDStream.scala @@ -102,7 +102,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, val ZK_TIMEOUT = 10000 // Handles pushing data into the BlockManager - lazy protected val dataHandler = new DataHandler(this, storageLevel) + lazy protected val dataHandler = new BufferingBlockCreator(this, storageLevel) // Keeps track of the current offsets. Maps from (broker, topic, group, part) -> Offset lazy val offsets = HashMap[KafkaPartitionKey, Long]() // Connection to Kafka @@ -114,7 +114,6 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, def onStart() { - // Starting the DataHandler that buffers blocks and pushes them into them BlockManager dataHandler.start() // In case we are using multiple Threads to handle Kafka Messages @@ -181,7 +180,7 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, // NOT USED - Originally intended for fault-tolerance // class KafkaDataHandler(receiver: KafkaReceiver, storageLevel: StorageLevel) - // extends DataHandler[Any](receiver, storageLevel) { + // extends BufferingBlockCreator[Any](receiver, storageLevel) { // override def createBlock(blockId: String, iterator: Iterator[Any]) : Block = { // // Creates a new Block with Kafka-specific Metadata From 2ef993d159939e9dedf909991ec5c5789bdd3670 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 2 Jan 2013 14:17:59 -0800 Subject: [PATCH 160/226] BufferingBlockCreator -> NetworkReceiver.BlockGenerator --- .../streaming/BufferingBlockCreator.scala | 80 ------------------- .../streaming/dstream/FlumeInputDStream.scala | 10 +-- .../streaming/dstream/KafkaInputDStream.scala | 10 +-- .../dstream/NetworkInputDStream.scala | 75 +++++++++++++++++ .../dstream/SocketInputDStream.scala | 8 +- 5 files changed, 89 insertions(+), 94 deletions(-) delete mode 100644 streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala diff --git a/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala b/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala deleted file mode 100644 index efd2e75d40..0000000000 --- a/streaming/src/main/scala/spark/streaming/BufferingBlockCreator.scala +++ /dev/null @@ -1,80 +0,0 @@ -package spark.streaming - -import java.util.concurrent.ArrayBlockingQueue -import scala.collection.mutable.ArrayBuffer -import spark.Logging -import spark.streaming.util.{RecurringTimer, SystemClock} -import spark.storage.StorageLevel - -/** - * Batches objects created by a [[spark.streaming.NetworkReceiver]] and puts them into - * appropriately named blocks at regular intervals. This class starts two threads, - * one to periodically start a new batch and prepare the previous batch of as a block, - * the other to push the blocks into the block manager. - */ -class BufferingBlockCreator[T](receiver: NetworkReceiver[T], storageLevel: StorageLevel) - extends Serializable with Logging { - - case class Block(id: String, iterator: Iterator[T], metadata: Any = null) - - val clock = new SystemClock() - val blockInterval = 200L - val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) - val blockStorageLevel = storageLevel - val blocksForPushing = new ArrayBlockingQueue[Block](1000) - val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } - - var currentBuffer = new ArrayBuffer[T] - - def start() { - blockIntervalTimer.start() - blockPushingThread.start() - logInfo("Data handler started") - } - - def stop() { - blockIntervalTimer.stop() - blockPushingThread.interrupt() - logInfo("Data handler stopped") - } - - def += (obj: T) { - currentBuffer += obj - } - - private def createBlock(blockId: String, iterator: Iterator[T]) : Block = { - new Block(blockId, iterator) - } - - private def updateCurrentBuffer(time: Long) { - try { - val newBlockBuffer = currentBuffer - currentBuffer = new ArrayBuffer[T] - if (newBlockBuffer.size > 0) { - val blockId = "input-" + receiver.streamId + "- " + (time - blockInterval) - val newBlock = createBlock(blockId, newBlockBuffer.toIterator) - blocksForPushing.add(newBlock) - } - } catch { - case ie: InterruptedException => - logInfo("Block interval timer thread interrupted") - case e: Exception => - receiver.stop() - } - } - - private def keepPushingBlocks() { - logInfo("Block pushing thread started") - try { - while(true) { - val block = blocksForPushing.take() - receiver.pushBlock(block.id, block.iterator, block.metadata, storageLevel) - } - } catch { - case ie: InterruptedException => - logInfo("Block pushing thread interrupted") - case e: Exception => - receiver.stop() - } - } -} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala index a6fa378d6e..ca70e72e56 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala @@ -97,13 +97,13 @@ private[streaming] object SparkFlumeEvent { private[streaming] class FlumeEventServer(receiver : FlumeReceiver) extends AvroSourceProtocol { override def append(event : AvroFlumeEvent) : Status = { - receiver.dataHandler += SparkFlumeEvent.fromAvroFlumeEvent(event) + receiver.blockGenerator += SparkFlumeEvent.fromAvroFlumeEvent(event) Status.OK } override def appendBatch(events : java.util.List[AvroFlumeEvent]) : Status = { events.foreach (event => - receiver.dataHandler += SparkFlumeEvent.fromAvroFlumeEvent(event)) + receiver.blockGenerator += SparkFlumeEvent.fromAvroFlumeEvent(event)) Status.OK } } @@ -118,19 +118,19 @@ class FlumeReceiver( storageLevel: StorageLevel ) extends NetworkReceiver[SparkFlumeEvent](streamId) { - lazy val dataHandler = new BufferingBlockCreator(this, storageLevel) + lazy val blockGenerator = new BlockGenerator(storageLevel) protected override def onStart() { val responder = new SpecificResponder( classOf[AvroSourceProtocol], new FlumeEventServer(this)); val server = new NettyServer(responder, new InetSocketAddress(host, port)); - dataHandler.start() + blockGenerator.start() server.start() logInfo("Flume receiver started") } protected override def onStop() { - dataHandler.stop() + blockGenerator.stop() logInfo("Flume receiver stopped") } diff --git a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala index b1941fb427..25988a2ce7 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala @@ -110,19 +110,19 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, val ZK_TIMEOUT = 10000 // Handles pushing data into the BlockManager - lazy protected val dataHandler = new BufferingBlockCreator(this, storageLevel) + lazy protected val blockGenerator = new BlockGenerator(storageLevel) // Keeps track of the current offsets. Maps from (broker, topic, group, part) -> Offset lazy val offsets = HashMap[KafkaPartitionKey, Long]() // Connection to Kafka var consumerConnector : ZookeeperConsumerConnector = null def onStop() { - dataHandler.stop() + blockGenerator.stop() } def onStart() { - dataHandler.start() + blockGenerator.start() // In case we are using multiple Threads to handle Kafka Messages val executorPool = Executors.newFixedThreadPool(topics.values.reduce(_ + _)) @@ -170,8 +170,8 @@ class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, private class MessageHandler(stream: KafkaStream[String]) extends Runnable { def run() { logInfo("Starting MessageHandler.") - stream.takeWhile { msgAndMetadata => - dataHandler += msgAndMetadata.message + stream.takeWhile { msgAndMetadata => + blockGenerator += msgAndMetadata.message // Updating the offet. The key is (broker, topic, group, partition). val key = KafkaPartitionKey(msgAndMetadata.topicInfo.brokerId, msgAndMetadata.topic, diff --git a/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala index 41276da8bb..18e62a0e33 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala @@ -14,6 +14,8 @@ import akka.actor.{Props, Actor} import akka.pattern.ask import akka.dispatch.Await import akka.util.duration._ +import spark.streaming.util.{RecurringTimer, SystemClock} +import java.util.concurrent.ArrayBlockingQueue abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : StreamingContext) extends InputDStream[T](ssc_) { @@ -154,4 +156,77 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri tracker ! DeregisterReceiver(streamId, msg) } } + + /** + * Batches objects created by a [[spark.streaming.NetworkReceiver]] and puts them into + * appropriately named blocks at regular intervals. This class starts two threads, + * one to periodically start a new batch and prepare the previous batch of as a block, + * the other to push the blocks into the block manager. + */ + class BlockGenerator(storageLevel: StorageLevel) + extends Serializable with Logging { + + case class Block(id: String, iterator: Iterator[T], metadata: Any = null) + + val clock = new SystemClock() + val blockInterval = 200L + val blockIntervalTimer = new RecurringTimer(clock, blockInterval, updateCurrentBuffer) + val blockStorageLevel = storageLevel + val blocksForPushing = new ArrayBlockingQueue[Block](1000) + val blockPushingThread = new Thread() { override def run() { keepPushingBlocks() } } + + var currentBuffer = new ArrayBuffer[T] + + def start() { + blockIntervalTimer.start() + blockPushingThread.start() + logInfo("Data handler started") + } + + def stop() { + blockIntervalTimer.stop() + blockPushingThread.interrupt() + logInfo("Data handler stopped") + } + + def += (obj: T) { + currentBuffer += obj + } + + private def createBlock(blockId: String, iterator: Iterator[T]) : Block = { + new Block(blockId, iterator) + } + + private def updateCurrentBuffer(time: Long) { + try { + val newBlockBuffer = currentBuffer + currentBuffer = new ArrayBuffer[T] + if (newBlockBuffer.size > 0) { + val blockId = "input-" + NetworkReceiver.this.streamId + "- " + (time - blockInterval) + val newBlock = createBlock(blockId, newBlockBuffer.toIterator) + blocksForPushing.add(newBlock) + } + } catch { + case ie: InterruptedException => + logInfo("Block interval timer thread interrupted") + case e: Exception => + NetworkReceiver.this.stop() + } + } + + private def keepPushingBlocks() { + logInfo("Block pushing thread started") + try { + while(true) { + val block = blocksForPushing.take() + NetworkReceiver.this.pushBlock(block.id, block.iterator, block.metadata, storageLevel) + } + } catch { + case ie: InterruptedException => + logInfo("Block pushing thread interrupted") + case e: Exception => + NetworkReceiver.this.stop() + } + } + } } diff --git a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala index 8374f131d6..8e4b20ea4c 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala @@ -29,7 +29,7 @@ class SocketReceiver[T: ClassManifest]( storageLevel: StorageLevel ) extends NetworkReceiver[T](streamId) { - lazy protected val dataHandler = new BufferingBlockCreator(this, storageLevel) + lazy protected val blockGenerator = new BlockGenerator(storageLevel) override def getLocationPreference = None @@ -37,16 +37,16 @@ class SocketReceiver[T: ClassManifest]( logInfo("Connecting to " + host + ":" + port) val socket = new Socket(host, port) logInfo("Connected to " + host + ":" + port) - dataHandler.start() + blockGenerator.start() val iterator = bytesToObjects(socket.getInputStream()) while(iterator.hasNext) { val obj = iterator.next - dataHandler += obj + blockGenerator += obj } } protected def onStop() { - dataHandler.stop() + blockGenerator.stop() } } From ecf9c0890160c69f1b64b36fa8fdea2f6dd973eb Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Sat, 5 Jan 2013 20:54:08 -0500 Subject: [PATCH 161/226] Fix Accumulators in Java, and add a test for them --- core/src/main/scala/spark/Accumulators.scala | 18 +++++++- core/src/main/scala/spark/SparkContext.scala | 7 +-- .../spark/api/java/JavaSparkContext.scala | 23 ++++++---- core/src/test/scala/spark/JavaAPISuite.java | 44 +++++++++++++++++++ 4 files changed, 79 insertions(+), 13 deletions(-) diff --git a/core/src/main/scala/spark/Accumulators.scala b/core/src/main/scala/spark/Accumulators.scala index bacd0ace37..6280f25391 100644 --- a/core/src/main/scala/spark/Accumulators.scala +++ b/core/src/main/scala/spark/Accumulators.scala @@ -38,14 +38,28 @@ class Accumulable[R, T] ( */ def += (term: T) { value_ = param.addAccumulator(value_, term) } + /** + * Add more data to this accumulator / accumulable + * @param term the data to add + */ + def add(term: T) { value_ = param.addAccumulator(value_, term) } + /** * Merge two accumulable objects together - * + * * Normally, a user will not want to use this version, but will instead call `+=`. - * @param term the other Accumulable that will get merged with this + * @param term the other `R` that will get merged with this */ def ++= (term: R) { value_ = param.addInPlace(value_, term)} + /** + * Merge two accumulable objects together + * + * Normally, a user will not want to use this version, but will instead call `add`. + * @param term the other `R` that will get merged with this + */ + def merge(term: R) { value_ = param.addInPlace(value_, term)} + /** * Access the accumulator's current value; only allowed on master. */ diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 4fd81bc63b..bbf8272eb3 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -382,11 +382,12 @@ class SparkContext( new Accumulator(initialValue, param) /** - * Create an [[spark.Accumulable]] shared variable, with a `+=` method + * Create an [[spark.Accumulable]] shared variable, to which tasks can add values with `+=`. + * Only the master can access the accumuable's `value`. * @tparam T accumulator type * @tparam R type that can be added to the accumulator */ - def accumulable[T,R](initialValue: T)(implicit param: AccumulableParam[T,R]) = + def accumulable[T, R](initialValue: T)(implicit param: AccumulableParam[T, R]) = new Accumulable(initialValue, param) /** @@ -404,7 +405,7 @@ class SparkContext( * Broadcast a read-only variable to the cluster, returning a [[spark.Broadcast]] object for * reading it in distributed functions. The variable will be sent to each cluster only once. */ - def broadcast[T](value: T) = env.broadcastManager.newBroadcast[T] (value, isLocal) + def broadcast[T](value: T) = env.broadcastManager.newBroadcast[T](value, isLocal) /** * Add a file to be downloaded into the working directory of this Spark job on every node. diff --git a/core/src/main/scala/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/spark/api/java/JavaSparkContext.scala index b7725313c4..bf9ad7a200 100644 --- a/core/src/main/scala/spark/api/java/JavaSparkContext.scala +++ b/core/src/main/scala/spark/api/java/JavaSparkContext.scala @@ -10,7 +10,7 @@ import org.apache.hadoop.mapred.InputFormat import org.apache.hadoop.mapred.JobConf import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} -import spark.{Accumulator, AccumulatorParam, RDD, SparkContext} +import spark.{Accumulable, AccumulableParam, Accumulator, AccumulatorParam, RDD, SparkContext} import spark.SparkContext.IntAccumulatorParam import spark.SparkContext.DoubleAccumulatorParam import spark.broadcast.Broadcast @@ -265,25 +265,32 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork /** * Create an [[spark.Accumulator]] integer variable, which tasks can "add" values - * to using the `+=` method. Only the master can access the accumulator's `value`. + * to using the `add` method. Only the master can access the accumulator's `value`. */ - def intAccumulator(initialValue: Int): Accumulator[Int] = - sc.accumulator(initialValue)(IntAccumulatorParam) + def intAccumulator(initialValue: Int): Accumulator[java.lang.Integer] = + sc.accumulator(initialValue)(IntAccumulatorParam).asInstanceOf[Accumulator[java.lang.Integer]] /** * Create an [[spark.Accumulator]] double variable, which tasks can "add" values - * to using the `+=` method. Only the master can access the accumulator's `value`. + * to using the `add` method. Only the master can access the accumulator's `value`. */ - def doubleAccumulator(initialValue: Double): Accumulator[Double] = - sc.accumulator(initialValue)(DoubleAccumulatorParam) + def doubleAccumulator(initialValue: Double): Accumulator[java.lang.Double] = + sc.accumulator(initialValue)(DoubleAccumulatorParam).asInstanceOf[Accumulator[java.lang.Double]] /** * Create an [[spark.Accumulator]] variable of a given type, which tasks can "add" values - * to using the `+=` method. Only the master can access the accumulator's `value`. + * to using the `add` method. Only the master can access the accumulator's `value`. */ def accumulator[T](initialValue: T, accumulatorParam: AccumulatorParam[T]): Accumulator[T] = sc.accumulator(initialValue)(accumulatorParam) + /** + * Create an [[spark.Accumulable]] shared variable of the given type, to which tasks can + * "add" values with `add`. Only the master can access the accumuable's `value`. + */ + def accumulable[T, R](initialValue: T, param: AccumulableParam[T, R]): Accumulable[T, R] = + sc.accumulable(initialValue)(param) + /** * Broadcast a read-only variable to the cluster, returning a [[spark.Broadcast]] object for * reading it in distributed functions. The variable will be sent to each cluster only once. diff --git a/core/src/test/scala/spark/JavaAPISuite.java b/core/src/test/scala/spark/JavaAPISuite.java index 33d5fc2d89..b99e790093 100644 --- a/core/src/test/scala/spark/JavaAPISuite.java +++ b/core/src/test/scala/spark/JavaAPISuite.java @@ -581,4 +581,48 @@ public class JavaAPISuite implements Serializable { JavaPairRDD zipped = rdd.zip(doubles); zipped.count(); } + + @Test + public void accumulators() { + JavaRDD rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5)); + + final Accumulator intAccum = sc.intAccumulator(10); + rdd.foreach(new VoidFunction() { + public void call(Integer x) { + intAccum.add(x); + } + }); + Assert.assertEquals((Integer) 25, intAccum.value()); + + final Accumulator doubleAccum = sc.doubleAccumulator(10.0); + rdd.foreach(new VoidFunction() { + public void call(Integer x) { + doubleAccum.add((double) x); + } + }); + Assert.assertEquals((Double) 25.0, doubleAccum.value()); + + // Try a custom accumulator type + AccumulatorParam floatAccumulatorParam = new AccumulatorParam() { + public Float addInPlace(Float r, Float t) { + return r + t; + } + + public Float addAccumulator(Float r, Float t) { + return r + t; + } + + public Float zero(Float initialValue) { + return 0.0f; + } + }; + + final Accumulator floatAccum = sc.accumulator((Float) 10.0f, floatAccumulatorParam); + rdd.foreach(new VoidFunction() { + public void call(Integer x) { + floatAccum.add((float) x); + } + }); + Assert.assertEquals((Float) 25.0f, floatAccum.value()); + } } From 934ecc829aa06ce4d9ded3596b86b4733ed2a123 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 6 Jan 2013 14:15:07 -0800 Subject: [PATCH 162/226] Removed streaming-env.sh.template --- conf/streaming-env.sh.template | 22 ---------------------- run | 4 ---- sentences.txt | 3 --- 3 files changed, 29 deletions(-) delete mode 100755 conf/streaming-env.sh.template delete mode 100644 sentences.txt diff --git a/conf/streaming-env.sh.template b/conf/streaming-env.sh.template deleted file mode 100755 index 1ea9ba5541..0000000000 --- a/conf/streaming-env.sh.template +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# This file contains a few additional setting that are useful for -# running streaming jobs in Spark. Copy this file as streaming-env.sh . -# Note that this shell script will be read after spark-env.sh, so settings -# in this file may override similar settings (if present) in spark-env.sh . - - -# Using concurrent GC is strongly recommended as it can significantly -# reduce GC related pauses. - -SPARK_JAVA_OPTS+=" -XX:+UseConcMarkSweepGC" - -# Using Kryo serialization can improve serialization performance -# and therefore the throughput of the Spark Streaming programs. However, -# using Kryo serialization with custom classes may required you to -# register the classes with Kryo. Refer to the Spark documentation -# for more details. - -# SPARK_JAVA_OPTS+=" -Dspark.serializer=spark.KryoSerializer" - -export SPARK_JAVA_OPTS diff --git a/run b/run index 27506c33e2..2f61cb2a87 100755 --- a/run +++ b/run @@ -13,10 +13,6 @@ if [ -e $FWDIR/conf/spark-env.sh ] ; then . $FWDIR/conf/spark-env.sh fi -if [ -e $FWDIR/conf/streaming-env.sh ] ; then - . $FWDIR/conf/streaming-env.sh -fi - if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then if [ `command -v scala` ]; then RUNNER="scala" diff --git a/sentences.txt b/sentences.txt deleted file mode 100644 index fedf96c66e..0000000000 --- a/sentences.txt +++ /dev/null @@ -1,3 +0,0 @@ -Hello world! -What's up? -There is no cow level From af8738dfb592eb37d4d6c91e42624e844d4e493b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 6 Jan 2013 19:31:54 -0800 Subject: [PATCH 163/226] Moved Spark Streaming examples to examples sub-project. --- .../src/main/scala/spark/streaming/examples/FileStream.scala | 0 .../spark/streaming/examples/FileStreamWithCheckpoint.scala | 0 .../main/scala/spark/streaming/examples/FlumeEventCount.scala | 0 .../src/main/scala/spark/streaming/examples/GrepRaw.scala | 1 - .../main/scala/spark/streaming/examples/KafkaWordCount.scala | 0 .../src/main/scala/spark/streaming/examples/QueueStream.scala | 0 .../main/scala/spark/streaming/examples/TopKWordCountRaw.scala | 0 .../src/main/scala/spark/streaming/examples/WordCountHdfs.scala | 0 .../main/scala/spark/streaming/examples/WordCountNetwork.scala | 0 .../src/main/scala/spark/streaming/examples/WordCountRaw.scala | 0 .../streaming/examples/clickstream/PageViewGenerator.scala | 0 .../spark/streaming/examples/clickstream/PageViewStream.scala | 0 project/SparkBuild.scala | 2 +- 13 files changed, 1 insertion(+), 2 deletions(-) rename {streaming => examples}/src/main/scala/spark/streaming/examples/FileStream.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/FlumeEventCount.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/GrepRaw.scala (95%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/KafkaWordCount.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/QueueStream.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/WordCountHdfs.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/WordCountNetwork.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/WordCountRaw.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala (100%) rename {streaming => examples}/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala (100%) diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStream.scala b/examples/src/main/scala/spark/streaming/examples/FileStream.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/FileStream.scala rename to examples/src/main/scala/spark/streaming/examples/FileStream.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala rename to examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala b/examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/FlumeEventCount.scala rename to examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala b/examples/src/main/scala/spark/streaming/examples/GrepRaw.scala similarity index 95% rename from streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala rename to examples/src/main/scala/spark/streaming/examples/GrepRaw.scala index dfaaf03f03..812faa368a 100644 --- a/streaming/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ b/examples/src/main/scala/spark/streaming/examples/GrepRaw.scala @@ -4,7 +4,6 @@ import spark.util.IntParam import spark.storage.StorageLevel import spark.streaming._ -import spark.streaming.StreamingContext._ import spark.streaming.util.RawTextHelper._ object GrepRaw { diff --git a/streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala b/examples/src/main/scala/spark/streaming/examples/KafkaWordCount.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/KafkaWordCount.scala rename to examples/src/main/scala/spark/streaming/examples/KafkaWordCount.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/QueueStream.scala b/examples/src/main/scala/spark/streaming/examples/QueueStream.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/QueueStream.scala rename to examples/src/main/scala/spark/streaming/examples/QueueStream.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala rename to examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala b/examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/WordCountHdfs.scala rename to examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/examples/src/main/scala/spark/streaming/examples/WordCountNetwork.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala rename to examples/src/main/scala/spark/streaming/examples/WordCountNetwork.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/WordCountRaw.scala rename to examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala b/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala rename to examples/src/main/scala/spark/streaming/examples/clickstream/PageViewGenerator.scala diff --git a/streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala b/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala similarity index 100% rename from streaming/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala rename to examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index a3f901a081..6ba3026bcc 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -23,7 +23,7 @@ object SparkBuild extends Build { lazy val repl = Project("repl", file("repl"), settings = replSettings) dependsOn (core) - lazy val examples = Project("examples", file("examples"), settings = examplesSettings) dependsOn (core) + lazy val examples = Project("examples", file("examples"), settings = examplesSettings) dependsOn (core) dependsOn (streaming) lazy val bagel = Project("bagel", file("bagel"), settings = bagelSettings) dependsOn (core) From 1346126485444afc065bf4951c4bedebe5c95ce4 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 12:11:27 -0800 Subject: [PATCH 164/226] Changed cleanup to clearOldValues for TimeStampedHashMap and TimeStampedHashSet. --- core/src/main/scala/spark/CacheTracker.scala | 4 ++-- core/src/main/scala/spark/MapOutputTracker.scala | 4 ++-- core/src/main/scala/spark/scheduler/DAGScheduler.scala | 6 +++--- core/src/main/scala/spark/scheduler/ResultTask.scala | 2 +- core/src/main/scala/spark/scheduler/ShuffleMapTask.scala | 2 +- core/src/main/scala/spark/util/TimeStampedHashMap.scala | 7 +++++-- core/src/main/scala/spark/util/TimeStampedHashSet.scala | 5 ++++- 7 files changed, 18 insertions(+), 12 deletions(-) diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala index 7d320c4fe5..86ad737583 100644 --- a/core/src/main/scala/spark/CacheTracker.scala +++ b/core/src/main/scala/spark/CacheTracker.scala @@ -39,7 +39,7 @@ private[spark] class CacheTrackerActor extends Actor with Logging { private val slaveCapacity = new HashMap[String, Long] private val slaveUsage = new HashMap[String, Long] - private val metadataCleaner = new MetadataCleaner("CacheTrackerActor", locs.cleanup) + private val metadataCleaner = new MetadataCleaner("CacheTrackerActor", locs.clearOldValues) private def getCacheUsage(host: String): Long = slaveUsage.getOrElse(host, 0L) private def getCacheCapacity(host: String): Long = slaveCapacity.getOrElse(host, 0L) @@ -120,7 +120,7 @@ private[spark] class CacheTracker(actorSystem: ActorSystem, isMaster: Boolean, b // Remembers which splits are currently being loaded (on worker nodes) val loading = new HashSet[String] - val metadataCleaner = new MetadataCleaner("CacheTracker", registeredRddIds.cleanup) + val metadataCleaner = new MetadataCleaner("CacheTracker", registeredRddIds.clearOldValues) // Send a message to the trackerActor and get its result within a default timeout, or // throw a SparkException if this fails. diff --git a/core/src/main/scala/spark/MapOutputTracker.scala b/core/src/main/scala/spark/MapOutputTracker.scala index 5ebdba0fc8..a2fa2d1ea7 100644 --- a/core/src/main/scala/spark/MapOutputTracker.scala +++ b/core/src/main/scala/spark/MapOutputTracker.scala @@ -178,8 +178,8 @@ private[spark] class MapOutputTracker(actorSystem: ActorSystem, isMaster: Boolea } def cleanup(cleanupTime: Long) { - mapStatuses.cleanup(cleanupTime) - cachedSerializedStatuses.cleanup(cleanupTime) + mapStatuses.clearOldValues(cleanupTime) + cachedSerializedStatuses.clearOldValues(cleanupTime) } def stop() { diff --git a/core/src/main/scala/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/spark/scheduler/DAGScheduler.scala index 9387ba19a3..59f2099e91 100644 --- a/core/src/main/scala/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/spark/scheduler/DAGScheduler.scala @@ -599,15 +599,15 @@ class DAGScheduler(taskSched: TaskScheduler) extends TaskSchedulerListener with def cleanup(cleanupTime: Long) { var sizeBefore = idToStage.size - idToStage.cleanup(cleanupTime) + idToStage.clearOldValues(cleanupTime) logInfo("idToStage " + sizeBefore + " --> " + idToStage.size) sizeBefore = shuffleToMapStage.size - shuffleToMapStage.cleanup(cleanupTime) + shuffleToMapStage.clearOldValues(cleanupTime) logInfo("shuffleToMapStage " + sizeBefore + " --> " + shuffleToMapStage.size) sizeBefore = pendingTasks.size - pendingTasks.cleanup(cleanupTime) + pendingTasks.clearOldValues(cleanupTime) logInfo("pendingTasks " + sizeBefore + " --> " + pendingTasks.size) } diff --git a/core/src/main/scala/spark/scheduler/ResultTask.scala b/core/src/main/scala/spark/scheduler/ResultTask.scala index 7ec6564105..74a63c1af1 100644 --- a/core/src/main/scala/spark/scheduler/ResultTask.scala +++ b/core/src/main/scala/spark/scheduler/ResultTask.scala @@ -12,7 +12,7 @@ private[spark] object ResultTask { // expensive on the master node if it needs to launch thousands of tasks. val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]] - val metadataCleaner = new MetadataCleaner("ResultTask", serializedInfoCache.cleanup) + val metadataCleaner = new MetadataCleaner("ResultTask", serializedInfoCache.clearOldValues) def serializeInfo(stageId: Int, rdd: RDD[_], func: (TaskContext, Iterator[_]) => _): Array[Byte] = { synchronized { diff --git a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala index feb63abb61..19f5328eee 100644 --- a/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/spark/scheduler/ShuffleMapTask.scala @@ -23,7 +23,7 @@ private[spark] object ShuffleMapTask { // expensive on the master node if it needs to launch thousands of tasks. val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]] - val metadataCleaner = new MetadataCleaner("ShuffleMapTask", serializedInfoCache.cleanup) + val metadataCleaner = new MetadataCleaner("ShuffleMapTask", serializedInfoCache.clearOldValues) def serializeInfo(stageId: Int, rdd: RDD[_], dep: ShuffleDependency[_,_]): Array[Byte] = { synchronized { diff --git a/core/src/main/scala/spark/util/TimeStampedHashMap.scala b/core/src/main/scala/spark/util/TimeStampedHashMap.scala index 7e785182ea..bb7c5c01c8 100644 --- a/core/src/main/scala/spark/util/TimeStampedHashMap.scala +++ b/core/src/main/scala/spark/util/TimeStampedHashMap.scala @@ -7,7 +7,7 @@ import scala.collection.mutable.Map /** * This is a custom implementation of scala.collection.mutable.Map which stores the insertion * time stamp along with each key-value pair. Key-value pairs that are older than a particular - * threshold time can them be removed using the cleanup method. This is intended to be a drop-in + * threshold time can them be removed using the clearOldValues method. This is intended to be a drop-in * replacement of scala.collection.mutable.HashMap. */ class TimeStampedHashMap[A, B] extends Map[A, B]() with spark.Logging { @@ -74,7 +74,10 @@ class TimeStampedHashMap[A, B] extends Map[A, B]() with spark.Logging { } } - def cleanup(threshTime: Long) { + /** + * Removes old key-value pairs that have timestamp earlier than `threshTime` + */ + def clearOldValues(threshTime: Long) { val iterator = internalMap.entrySet().iterator() while(iterator.hasNext) { val entry = iterator.next() diff --git a/core/src/main/scala/spark/util/TimeStampedHashSet.scala b/core/src/main/scala/spark/util/TimeStampedHashSet.scala index 539dd75844..5f1cc93752 100644 --- a/core/src/main/scala/spark/util/TimeStampedHashSet.scala +++ b/core/src/main/scala/spark/util/TimeStampedHashSet.scala @@ -52,7 +52,10 @@ class TimeStampedHashSet[A] extends Set[A] { } } - def cleanup(threshTime: Long) { + /** + * Removes old values that have timestamp earlier than `threshTime` + */ + def clearOldValues(threshTime: Long) { val iterator = internalMap.entrySet().iterator() while(iterator.hasNext) { val entry = iterator.next() From 237bac36e9dca8828192994dad323b8da1619267 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 14:37:21 -0800 Subject: [PATCH 165/226] Renamed examples and added documentation. --- .../main/scala/spark/RDDCheckpointData.scala | 4 +- docs/streaming-programming-guide.md | 14 ++-- .../spark/streaming/examples/FileStream.scala | 46 ------------ .../examples/FileStreamWithCheckpoint.scala | 75 ------------------- .../streaming/examples/FlumeEventCount.scala | 2 +- .../spark/streaming/examples/GrepRaw.scala | 32 -------- .../streaming/examples/HdfsWordCount.scala | 36 +++++++++ ...ntNetwork.scala => NetworkWordCount.scala} | 17 ++++- .../streaming/examples/RawNetworkGrep.scala | 46 ++++++++++++ .../streaming/examples/TopKWordCountRaw.scala | 49 ------------ .../streaming/examples/WordCountHdfs.scala | 25 ------- .../streaming/examples/WordCountRaw.scala | 43 ----------- .../spark/streaming/StreamingContext.scala | 38 ++++++++-- .../streaming/dstream/FileInputDStream.scala | 16 +--- .../spark/streaming/InputStreamsSuite.scala | 2 +- 15 files changed, 141 insertions(+), 304 deletions(-) delete mode 100644 examples/src/main/scala/spark/streaming/examples/FileStream.scala delete mode 100644 examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala delete mode 100644 examples/src/main/scala/spark/streaming/examples/GrepRaw.scala create mode 100644 examples/src/main/scala/spark/streaming/examples/HdfsWordCount.scala rename examples/src/main/scala/spark/streaming/examples/{WordCountNetwork.scala => NetworkWordCount.scala} (50%) create mode 100644 examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala delete mode 100644 examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala delete mode 100644 examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala delete mode 100644 examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index 7af830940f..e270b6312e 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -65,7 +65,7 @@ extends Logging with Serializable { cpRDD = Some(newRDD) rdd.changeDependencies(newRDD) cpState = Checkpointed - RDDCheckpointData.checkpointCompleted() + RDDCheckpointData.clearTaskCaches() logInfo("Done checkpointing RDD " + rdd.id + ", new parent is RDD " + newRDD.id) } } @@ -90,7 +90,7 @@ extends Logging with Serializable { } private[spark] object RDDCheckpointData { - def checkpointCompleted() { + def clearTaskCaches() { ShuffleMapTask.clearCache() ResultTask.clearCache() } diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index fc2ea2ef79..05a88ce7bd 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -187,8 +187,8 @@ Conversely, the computation can be stopped by using ssc.stop() {% endhighlight %} -# Example - WordCountNetwork.scala -A good example to start off is the spark.streaming.examples.WordCountNetwork. This example counts the words received from a network server every second. Given below is the relevant sections of the source code. You can find the full source code in /streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala. +# Example - NetworkWordCount.scala +A good example to start off is the spark.streaming.examples.NetworkWordCount. This example counts the words received from a network server every second. Given below is the relevant sections of the source code. You can find the full source code in /streaming/src/main/scala/spark/streaming/examples/WordCountNetwork.scala. {% highlight scala %} import spark.streaming.{Seconds, StreamingContext} @@ -196,7 +196,7 @@ import spark.streaming.StreamingContext._ ... // Create the context and set up a network input stream to receive from a host:port -val ssc = new StreamingContext(args(0), "WordCountNetwork", Seconds(1)) +val ssc = new StreamingContext(args(0), "NetworkWordCount", Seconds(1)) val lines = ssc.networkTextStream(args(1), args(2).toInt) // Split the lines into words, count them, and print some of the counts on the master @@ -214,13 +214,13 @@ To run this example on your local machine, you need to first run a Netcat server $ nc -lk 9999 {% endhighlight %} -Then, in a different terminal, you can start WordCountNetwork by using +Then, in a different terminal, you can start NetworkWordCount by using {% highlight bash %} -$ ./run spark.streaming.examples.WordCountNetwork local[2] localhost 9999 +$ ./run spark.streaming.examples.NetworkWordCount local[2] localhost 9999 {% endhighlight %} -This will make WordCountNetwork connect to the netcat server. Any lines typed in the terminal running the netcat server will be counted and printed on screen. +This will make NetworkWordCount connect to the netcat server. Any lines typed in the terminal running the netcat server will be counted and printed on screen. - + - + @@ -88,55 +88,60 @@ DStreams support many of the transformations available on normal Spark RDD's: - + - + + + + +
      @@ -240,7 +240,7 @@ hello world {% highlight bash %} -# TERMINAL 2: RUNNING WordCountNetwork +# TERMINAL 2: RUNNING NetworkWordCount ... 2012-12-31 18:47:10,446 INFO SparkContext: Job finished: run at ThreadPoolExecutor.java:886, took 0.038817 s ------------------------------------------- diff --git a/examples/src/main/scala/spark/streaming/examples/FileStream.scala b/examples/src/main/scala/spark/streaming/examples/FileStream.scala deleted file mode 100644 index 81938d30d4..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/FileStream.scala +++ /dev/null @@ -1,46 +0,0 @@ -package spark.streaming.examples - -import spark.streaming.StreamingContext -import spark.streaming.StreamingContext._ -import spark.streaming.Seconds -import org.apache.hadoop.fs.Path -import org.apache.hadoop.conf.Configuration - - -object FileStream { - def main(args: Array[String]) { - if (args.length < 2) { - System.err.println("Usage: FileStream ") - System.exit(1) - } - - // Create the context - val ssc = new StreamingContext(args(0), "FileStream", Seconds(1)) - - // Create the new directory - val directory = new Path(args(1)) - val fs = directory.getFileSystem(new Configuration()) - if (fs.exists(directory)) throw new Exception("This directory already exists") - fs.mkdirs(directory) - fs.deleteOnExit(directory) - - // Create the FileInputDStream on the directory and use the - // stream to count words in new files created - val inputStream = ssc.textFileStream(directory.toString) - val words = inputStream.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) - wordCounts.print() - ssc.start() - - // Creating new files in the directory - val text = "This is a text file" - for (i <- 1 to 30) { - ssc.sc.parallelize((1 to (i * 10)).map(_ => text), 10) - .saveAsTextFile(new Path(directory, i.toString).toString) - Thread.sleep(1000) - } - Thread.sleep(5000) // Waiting for the file to be processed - ssc.stop() - System.exit(0) - } -} \ No newline at end of file diff --git a/examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala b/examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala deleted file mode 100644 index b7bc15a1d5..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/FileStreamWithCheckpoint.scala +++ /dev/null @@ -1,75 +0,0 @@ -package spark.streaming.examples - -import spark.streaming._ -import spark.streaming.StreamingContext._ -import org.apache.hadoop.fs.Path -import org.apache.hadoop.conf.Configuration - -object FileStreamWithCheckpoint { - - def main(args: Array[String]) { - - if (args.size != 3) { - println("FileStreamWithCheckpoint ") - println("FileStreamWithCheckpoint restart ") - System.exit(-1) - } - - val directory = new Path(args(1)) - val checkpointDir = args(2) - - val ssc: StreamingContext = { - - if (args(0) == "restart") { - - // Recreated streaming context from specified checkpoint file - new StreamingContext(checkpointDir) - - } else { - - // Create directory if it does not exist - val fs = directory.getFileSystem(new Configuration()) - if (!fs.exists(directory)) fs.mkdirs(directory) - - // Create new streaming context - val ssc_ = new StreamingContext(args(0), "FileStreamWithCheckpoint", Seconds(1)) - ssc_.checkpoint(checkpointDir) - - // Setup the streaming computation - val inputStream = ssc_.textFileStream(directory.toString) - val words = inputStream.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) - wordCounts.print() - - ssc_ - } - } - - // Start the stream computation - startFileWritingThread(directory.toString) - ssc.start() - } - - def startFileWritingThread(directory: String) { - - val fs = new Path(directory).getFileSystem(new Configuration()) - - val fileWritingThread = new Thread() { - override def run() { - val r = new scala.util.Random() - val text = "This is a sample text file with a random number " - while(true) { - val number = r.nextInt() - val file = new Path(directory, number.toString) - val fos = fs.create(file) - fos.writeChars(text + number) - fos.close() - println("Created text file " + file) - Thread.sleep(1000) - } - } - } - fileWritingThread.start() - } - -} diff --git a/examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala b/examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala index e60ce483a3..461929fba2 100644 --- a/examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala +++ b/examples/src/main/scala/spark/streaming/examples/FlumeEventCount.scala @@ -5,7 +5,7 @@ import spark.storage.StorageLevel import spark.streaming._ /** - * Produce a streaming count of events received from Flume. + * Produces a count of events received from Flume. * * This should be used in conjunction with an AvroSink in Flume. It will start * an Avro server on at the request host:port address and listen for requests. diff --git a/examples/src/main/scala/spark/streaming/examples/GrepRaw.scala b/examples/src/main/scala/spark/streaming/examples/GrepRaw.scala deleted file mode 100644 index 812faa368a..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/GrepRaw.scala +++ /dev/null @@ -1,32 +0,0 @@ -package spark.streaming.examples - -import spark.util.IntParam -import spark.storage.StorageLevel - -import spark.streaming._ -import spark.streaming.util.RawTextHelper._ - -object GrepRaw { - def main(args: Array[String]) { - if (args.length != 5) { - System.err.println("Usage: GrepRaw ") - System.exit(1) - } - - val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args - - // Create the context - val ssc = new StreamingContext(master, "GrepRaw", Milliseconds(batchMillis)) - - // Warm up the JVMs on master and slave for JIT compilation to kick in - warmUp(ssc.sc) - - - val rawStreams = (1 to numStreams).map(_ => - ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray - val union = ssc.union(rawStreams) - union.filter(_.contains("Alice")).count().foreach(r => - println("Grep count: " + r.collect().mkString)) - ssc.start() - } -} diff --git a/examples/src/main/scala/spark/streaming/examples/HdfsWordCount.scala b/examples/src/main/scala/spark/streaming/examples/HdfsWordCount.scala new file mode 100644 index 0000000000..8530f5c175 --- /dev/null +++ b/examples/src/main/scala/spark/streaming/examples/HdfsWordCount.scala @@ -0,0 +1,36 @@ +package spark.streaming.examples + +import spark.streaming.{Seconds, StreamingContext} +import spark.streaming.StreamingContext._ + + +/** + * Counts words in new text files created in the given directory + * Usage: HdfsWordCount + * is the Spark master URL. + * is the directory that Spark Streaming will use to find and read new text files. + * + * To run this on your local machine on directory `localdir`, run this example + * `$ ./run spark.streaming.examples.HdfsWordCount local[2] localdir` + * Then create a text file in `localdir` and the words in the file will get counted. + */ +object HdfsWordCount { + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println("Usage: HdfsWordCount ") + System.exit(1) + } + + // Create the context + val ssc = new StreamingContext(args(0), "HdfsWordCount", Seconds(2)) + + // Create the FileInputDStream on the directory and use the + // stream to count words in new files created + val lines = ssc.textFileStream(args(1)) + val words = lines.flatMap(_.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() + ssc.start() + } +} + diff --git a/examples/src/main/scala/spark/streaming/examples/WordCountNetwork.scala b/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala similarity index 50% rename from examples/src/main/scala/spark/streaming/examples/WordCountNetwork.scala rename to examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala index eadda60563..43c01d5db2 100644 --- a/examples/src/main/scala/spark/streaming/examples/WordCountNetwork.scala +++ b/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala @@ -3,16 +3,27 @@ package spark.streaming.examples import spark.streaming.{Seconds, StreamingContext} import spark.streaming.StreamingContext._ -object WordCountNetwork { +/** + * Counts words in UTF8 encoded, '\n' delimited text received from the network every second. + * Usage: NetworkWordCount + * is the Spark master URL. In local mode, should be 'local[n]' with n > 1. + * and describe the TCP server that Spark Streaming would connect to receive data. + * + * To run this on your local machine, you need to first run a Netcat server + * `$ nc -lk 9999` + * and then run the example + * `$ ./run spark.streaming.examples.NetworkWordCount local[2] localhost 9999` + */ +object NetworkWordCount { def main(args: Array[String]) { if (args.length < 2) { - System.err.println("Usage: WordCountNetwork \n" + + System.err.println("Usage: NetworkWordCount \n" + "In local mode, should be 'local[n]' with n > 1") System.exit(1) } // Create the context and set the batch size - val ssc = new StreamingContext(args(0), "WordCountNetwork", Seconds(1)) + val ssc = new StreamingContext(args(0), "NetworkWordCount", Seconds(1)) // Create a NetworkInputDStream on target ip:port and count the // words in input stream of \n delimited test (eg. generated by 'nc') diff --git a/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala b/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala new file mode 100644 index 0000000000..2eec777c54 --- /dev/null +++ b/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala @@ -0,0 +1,46 @@ +package spark.streaming.examples + +import spark.util.IntParam +import spark.storage.StorageLevel + +import spark.streaming._ +import spark.streaming.util.RawTextHelper + +/** + * Receives text from multiple rawNetworkStreams and counts how many '\n' delimited + * lines have the word 'the' in them. This is useful for benchmarking purposes. This + * will only work with spark.streaming.util.RawTextSender running on all worker nodes + * and with Spark using Kryo serialization (set Java property "spark.serializer" to + * "spark.KryoSerializer"). + * Usage: RawNetworkGrep + * is the Spark master URL + * is the number rawNetworkStreams, which should be same as number + * of work nodes in the cluster + * is "localhost". + * is the port on which RawTextSender is running in the worker nodes. + * is the Spark Streaming batch duration in milliseconds. + */ + +object RawNetworkGrep { + def main(args: Array[String]) { + if (args.length != 5) { + System.err.println("Usage: RawNetworkGrep ") + System.exit(1) + } + + val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args + + // Create the context + val ssc = new StreamingContext(master, "RawNetworkGrep", Milliseconds(batchMillis)) + + // Warm up the JVMs on master and slave for JIT compilation to kick in + RawTextHelper.warmUp(ssc.sc) + + val rawStreams = (1 to numStreams).map(_ => + ssc.rawNetworkStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray + val union = ssc.union(rawStreams) + union.filter(_.contains("the")).count().foreach(r => + println("Grep count: " + r.collect().mkString)) + ssc.start() + } +} diff --git a/examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala b/examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala deleted file mode 100644 index 338834bc3c..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/TopKWordCountRaw.scala +++ /dev/null @@ -1,49 +0,0 @@ -package spark.streaming.examples - -import spark.storage.StorageLevel -import spark.util.IntParam - -import spark.streaming._ -import spark.streaming.StreamingContext._ -import spark.streaming.util.RawTextHelper._ - -import java.util.UUID - -object TopKWordCountRaw { - - def main(args: Array[String]) { - if (args.length != 4) { - System.err.println("Usage: WordCountRaw <# streams> ") - System.exit(1) - } - - val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args - val k = 10 - - // Create the context, and set the checkpoint directory. - // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts - // periodically to HDFS - val ssc = new StreamingContext(master, "TopKWordCountRaw", Seconds(1)) - ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) - - // Warm up the JVMs on master and slave for JIT compilation to kick in - /*warmUp(ssc.sc)*/ - - // Set up the raw network streams that will connect to localhost:port to raw test - // senders on the slaves and generate top K words of last 30 seconds - val lines = (1 to numStreams).map(_ => { - ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) - }) - val union = ssc.union(lines) - val counts = union.mapPartitions(splitAndCountPartitions) - val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) - val partialTopKWindowedCounts = windowedCounts.mapPartitions(topK(_, k)) - partialTopKWindowedCounts.foreach(rdd => { - val collectedCounts = rdd.collect - println("Collected " + collectedCounts.size + " words from partial top words") - println("Top " + k + " words are " + topK(collectedCounts.toIterator, k).mkString(",")) - }) - - ssc.start() - } -} diff --git a/examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala b/examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala deleted file mode 100644 index 867a8f42c4..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/WordCountHdfs.scala +++ /dev/null @@ -1,25 +0,0 @@ -package spark.streaming.examples - -import spark.streaming.{Seconds, StreamingContext} -import spark.streaming.StreamingContext._ - -object WordCountHdfs { - def main(args: Array[String]) { - if (args.length < 2) { - System.err.println("Usage: WordCountHdfs ") - System.exit(1) - } - - // Create the context - val ssc = new StreamingContext(args(0), "WordCountHdfs", Seconds(2)) - - // Create the FileInputDStream on the directory and use the - // stream to count words in new files created - val lines = ssc.textFileStream(args(1)) - val words = lines.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) - wordCounts.print() - ssc.start() - } -} - diff --git a/examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala b/examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala deleted file mode 100644 index d93335a8ce..0000000000 --- a/examples/src/main/scala/spark/streaming/examples/WordCountRaw.scala +++ /dev/null @@ -1,43 +0,0 @@ -package spark.streaming.examples - -import spark.storage.StorageLevel -import spark.util.IntParam - -import spark.streaming._ -import spark.streaming.StreamingContext._ -import spark.streaming.util.RawTextHelper._ - -import java.util.UUID - -object WordCountRaw { - - def main(args: Array[String]) { - if (args.length != 4) { - System.err.println("Usage: WordCountRaw <# streams> ") - System.exit(1) - } - - val Array(master, IntParam(numStreams), IntParam(port), checkpointDir) = args - - // Create the context, and set the checkpoint directory. - // Checkpoint directory is necessary for achieving fault-tolerance, by saving counts - // periodically to HDFS - val ssc = new StreamingContext(master, "WordCountRaw", Seconds(1)) - ssc.checkpoint(checkpointDir + "/" + UUID.randomUUID.toString, Seconds(1)) - - // Warm up the JVMs on master and slave for JIT compilation to kick in - warmUp(ssc.sc) - - // Set up the raw network streams that will connect to localhost:port to raw test - // senders on the slaves and generate count of words of last 30 seconds - val lines = (1 to numStreams).map(_ => { - ssc.rawNetworkStream[String]("localhost", port, StorageLevel.MEMORY_ONLY_SER_2) - }) - val union = ssc.union(lines) - val counts = union.mapPartitions(splitAndCountPartitions) - val windowedCounts = counts.reduceByKeyAndWindow(add _, subtract _, Seconds(30), Seconds(1), 10) - windowedCounts.foreach(r => println("# unique words = " + r.count())) - - ssc.start() - } -} diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 7256e41af9..215246ba2e 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -154,7 +154,7 @@ class StreamingContext private ( storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY_SER_2 ): DStream[T] = { val inputStream = new KafkaInputDStream[T](this, hostname, port, groupId, topics, initialOffsets, storageLevel) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } @@ -192,7 +192,7 @@ class StreamingContext private ( storageLevel: StorageLevel ): DStream[T] = { val inputStream = new SocketInputDStream[T](this, hostname, port, converter, storageLevel) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } @@ -208,7 +208,7 @@ class StreamingContext private ( storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2 ): DStream[SparkFlumeEvent] = { val inputStream = new FlumeInputDStream(this, hostname, port, storageLevel) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } @@ -228,13 +228,14 @@ class StreamingContext private ( storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2 ): DStream[T] = { val inputStream = new RawInputDStream[T](this, hostname, port, storageLevel) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } /** * Creates a input stream that monitors a Hadoop-compatible filesystem * for new files and reads them using the given key-value types and input format. + * File names starting with . are ignored. * @param directory HDFS directory to monitor for new file * @tparam K Key type for reading HDFS file * @tparam V Value type for reading HDFS file @@ -244,16 +245,37 @@ class StreamingContext private ( K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K, V]: ClassManifest - ](directory: String): DStream[(K, V)] = { + ] (directory: String): DStream[(K, V)] = { val inputStream = new FileInputDStream[K, V, F](this, directory) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } + /** + * Creates a input stream that monitors a Hadoop-compatible filesystem + * for new files and reads them using the given key-value types and input format. + * @param directory HDFS directory to monitor for new file + * @param filter Function to filter paths to process + * @param newFilesOnly Should process only new files and ignore existing files in the directory + * @tparam K Key type for reading HDFS file + * @tparam V Value type for reading HDFS file + * @tparam F Input format for reading HDFS file + */ + def fileStream[ + K: ClassManifest, + V: ClassManifest, + F <: NewInputFormat[K, V]: ClassManifest + ] (directory: String, filter: Path => Boolean, newFilesOnly: Boolean): DStream[(K, V)] = { + val inputStream = new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly) + registerInputStream(inputStream) + inputStream + } + + /** * Creates a input stream that monitors a Hadoop-compatible filesystem * for new files and reads them as text files (using key as LongWritable, value - * as Text and input format as TextInputFormat). + * as Text and input format as TextInputFormat). File names starting with . are ignored. * @param directory HDFS directory to monitor for new file */ def textFileStream(directory: String): DStream[String] = { @@ -274,7 +296,7 @@ class StreamingContext private ( defaultRDD: RDD[T] = null ): DStream[T] = { val inputStream = new QueueInputDStream(this, queue, oneAtATime, defaultRDD) - graph.addInputStream(inputStream) + registerInputStream(inputStream) inputStream } diff --git a/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala index cf72095324..1e6ad84b44 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FileInputDStream.scala @@ -14,7 +14,7 @@ private[streaming] class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K,V] : ClassManifest]( @transient ssc_ : StreamingContext, directory: String, - filter: PathFilter = FileInputDStream.defaultPathFilter, + filter: Path => Boolean = FileInputDStream.defaultFilter, newFilesOnly: Boolean = true) extends InputDStream[(K, V)](ssc_) { @@ -60,7 +60,7 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K val latestModTimeFiles = new HashSet[String]() def accept(path: Path): Boolean = { - if (!filter.accept(path)) { + if (!filter(path)) { return false } else { val modTime = fs.getFileStatus(path).getModificationTime() @@ -95,16 +95,8 @@ class FileInputDStream[K: ClassManifest, V: ClassManifest, F <: NewInputFormat[K } } +private[streaming] object FileInputDStream { - val defaultPathFilter = new PathFilter with Serializable { - def accept(path: Path): Boolean = { - val file = path.getName() - if (file.startsWith(".") || file.endsWith("_tmp")) { - return false - } else { - return true - } - } - } + def defaultFilter(path: Path): Boolean = !path.getName().startsWith(".") } diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 76b528bec3..00ee903c1e 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -318,7 +318,7 @@ class TestServer(port: Int) extends Logging { } } } catch { - case e: SocketException => println(e) + case e: SocketException => logInfo(e) } finally { logInfo("Connection closed") if (!clientSocket.isClosed) clientSocket.close() From 3b0a3b89ac508b57b8afbd1ca7024ee558a5d1af Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 14:55:49 -0800 Subject: [PATCH 166/226] Added better docs for RDDCheckpointData --- core/src/main/scala/spark/RDDCheckpointData.scala | 10 +++++++++- core/src/main/scala/spark/rdd/CheckpointRDD.scala | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index e270b6312e..d845a522e4 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -14,15 +14,23 @@ private[spark] object CheckpointState extends Enumeration { } /** - * This class contains all the information of the regarding RDD checkpointing. + * This class contains all the information related to RDD checkpointing. Each instance of this class + * is associated with a RDD. It manages process of checkpointing of the associated RDD, as well as, + * manages the post-checkpoint state by providing the updated splits, iterator and preferred locations + * of the checkpointed RDD. */ private[spark] class RDDCheckpointData[T: ClassManifest](rdd: RDD[T]) extends Logging with Serializable { import CheckpointState._ + // The checkpoint state of the associated RDD. var cpState = Initialized + + // The file to which the associated RDD has been checkpointed to @transient var cpFile: Option[String] = None + + // The CheckpointRDD created from the checkpoint file, that is, the new parent the associated RDD. @transient var cpRDD: Option[RDD[T]] = None // Mark the RDD for checkpointing diff --git a/core/src/main/scala/spark/rdd/CheckpointRDD.scala b/core/src/main/scala/spark/rdd/CheckpointRDD.scala index 1a88d402c3..86c63ca2f4 100644 --- a/core/src/main/scala/spark/rdd/CheckpointRDD.scala +++ b/core/src/main/scala/spark/rdd/CheckpointRDD.scala @@ -13,6 +13,10 @@ private[spark] class CheckpointRDDSplit(idx: Int, val splitFile: String) extends override val index: Int = idx } +/** + * This RDD represents a RDD checkpoint file (similar to HadoopRDD). + */ +private[spark] class CheckpointRDD[T: ClassManifest](sc: SparkContext, checkpointPath: String) extends RDD[T](sc, Nil) { From e60514d79e753f38db06f8d3df20e113ba4dc11a Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 15:16:16 -0800 Subject: [PATCH 167/226] Fixed bug --- .../src/test/scala/spark/streaming/InputStreamsSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index 00ee903c1e..e71ba6ddc1 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -318,7 +318,7 @@ class TestServer(port: Int) extends Logging { } } } catch { - case e: SocketException => logInfo(e) + case e: SocketException => logError("TestServer error", e) } finally { logInfo("Connection closed") if (!clientSocket.isClosed) clientSocket.close() From 4719e6d8fe6d93734f5bbe6c91dcc4616c1ed317 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 16:06:07 -0800 Subject: [PATCH 168/226] Changed locations for unit test logs. --- bagel/src/test/resources/log4j.properties | 4 ++-- core/src/test/resources/log4j.properties | 4 ++-- repl/src/test/resources/log4j.properties | 4 ++-- streaming/src/test/resources/log4j.properties | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bagel/src/test/resources/log4j.properties b/bagel/src/test/resources/log4j.properties index 4c99e450bc..83d05cab2f 100644 --- a/bagel/src/test/resources/log4j.properties +++ b/bagel/src/test/resources/log4j.properties @@ -1,8 +1,8 @@ -# Set everything to be logged to the console +# Set everything to be logged to the file bagel/target/unit-tests.log log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.FileAppender log4j.appender.file.append=false -log4j.appender.file.file=spark-tests.log +log4j.appender.file.file=bagel/target/unit-tests.log log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index 5ed388e91b..6ec89c0184 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -1,8 +1,8 @@ -# Set everything to be logged to the file spark-tests.log +# Set everything to be logged to the file core/target/unit-tests.log log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.FileAppender log4j.appender.file.append=false -log4j.appender.file.file=spark-tests.log +log4j.appender.file.file=core/target/unit-tests.log log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n diff --git a/repl/src/test/resources/log4j.properties b/repl/src/test/resources/log4j.properties index 4c99e450bc..cfb1a390e6 100644 --- a/repl/src/test/resources/log4j.properties +++ b/repl/src/test/resources/log4j.properties @@ -1,8 +1,8 @@ -# Set everything to be logged to the console +# Set everything to be logged to the repl/target/unit-tests.log log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.FileAppender log4j.appender.file.append=false -log4j.appender.file.file=spark-tests.log +log4j.appender.file.file=repl/target/unit-tests.log log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n diff --git a/streaming/src/test/resources/log4j.properties b/streaming/src/test/resources/log4j.properties index 33bafebaab..edfa1243fa 100644 --- a/streaming/src/test/resources/log4j.properties +++ b/streaming/src/test/resources/log4j.properties @@ -1,8 +1,8 @@ -# Set everything to be logged to the file streaming-tests.log +# Set everything to be logged to the file streaming/target/unit-tests.log log4j.rootCategory=INFO, file log4j.appender.file=org.apache.log4j.FileAppender log4j.appender.file.append=false -log4j.appender.file.file=streaming-tests.log +log4j.appender.file.file=streaming/target/unit-tests.log log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %p %c{1}: %m%n From 8c1b87251210bb5553e6a3b6f9648b178b221a3b Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 7 Jan 2013 17:48:10 -0800 Subject: [PATCH 169/226] Moved Twitter example to the where the other examples are. --- .../examples/twitter}/TwitterBasic.scala | 15 ++++++--------- .../examples/twitter}/TwitterInputDStream.scala | 6 +++++- 2 files changed, 11 insertions(+), 10 deletions(-) rename {streaming/src/main/scala/spark/streaming/examples => examples/src/main/scala/spark/streaming/examples/twitter}/TwitterBasic.scala (77%) rename {streaming/src/main/scala/spark/streaming => examples/src/main/scala/spark/streaming/examples/twitter}/TwitterInputDStream.scala (92%) diff --git a/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala similarity index 77% rename from streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala rename to examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala index 19b3cad6ad..22a927e87f 100644 --- a/streaming/src/main/scala/spark/streaming/examples/TwitterBasic.scala +++ b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala @@ -1,7 +1,7 @@ -package spark.streaming.examples +package spark.streaming.examples.twitter +import spark.streaming.{Seconds, StreamingContext} import spark.streaming.StreamingContext._ -import spark.streaming.{TwitterInputDStream, Seconds, StreamingContext} object TwitterBasic { def main(args: Array[String]) { @@ -24,19 +24,16 @@ object TwitterBasic { // Word count over hashtags val counts = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(60)) // TODO: Sorts on one node - should do with global sorting once streaming supports it - val topCounts = counts.collect().map(_.sortBy(-_._2).take(5)) - - // Print popular hashtags - topCounts.foreachRDD(rdd => { - if (rdd.count() != 0) { - val topList = rdd.take(1)(0) + counts.foreach(rdd => { + val topList = rdd.collect().sortBy(-_._2).take(5) + if (!topList.isEmpty) { println("\nPopular topics in last 60 seconds:") topList.foreach{case (tag, count) => println("%s (%s tweets)".format(tag, count))} } }) // Print number of tweets in the window - stream.window(Seconds(60)).count().foreachRDD(rdd => + stream.window(Seconds(60)).count().foreach(rdd => if (rdd.count() != 0) { println("Window size: %s tweets".format(rdd.take(1)(0))) } diff --git a/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala similarity index 92% rename from streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala rename to examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala index adf1ed15c9..1e842d2c8e 100644 --- a/streaming/src/main/scala/spark/streaming/TwitterInputDStream.scala +++ b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala @@ -1,6 +1,10 @@ -package spark.streaming +package spark.streaming.examples.twitter import spark.RDD +import spark.streaming._ +import spark.streaming.dstream.InputDStream +import spark.streaming.StreamingContext._ + import twitter4j._ import twitter4j.auth.BasicAuthorization import collection.mutable.ArrayBuffer From 6c502e37934c737494c7288e63f0ed82b21604b5 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 7 Jan 2013 21:56:24 -0800 Subject: [PATCH 170/226] Making the Twitter example distributed. This adds a distributed (receiver-based) implementation of the Twitter dstream. It also changes the example to perform a distributed sort rather than collecting the dataset at one node. --- .../examples/twitter/TwitterBasic.scala | 51 ++++++++++++------- .../twitter/TwitterInputDStream.scala | 46 ++++++++++------- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala index 22a927e87f..377bc0c98e 100644 --- a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala +++ b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterBasic.scala @@ -1,8 +1,15 @@ package spark.streaming.examples.twitter -import spark.streaming.{Seconds, StreamingContext} import spark.streaming.StreamingContext._ +import spark.streaming.{Seconds, StreamingContext} +import spark.SparkContext._ +import spark.storage.StorageLevel +/** + * Calculates popular hashtags (topics) over sliding 10 and 60 second windows from a Twitter + * stream. The stream is instantiated with credentials and optionally filters supplied by the + * command line arguments. + */ object TwitterBasic { def main(args: Array[String]) { if (args.length < 3) { @@ -15,29 +22,39 @@ object TwitterBasic { val filters = args.slice(3, args.length) val ssc = new StreamingContext(master, "TwitterBasic", Seconds(2)) - val stream = new TwitterInputDStream(ssc, username, password, filters) - ssc.graph.addInputStream(stream) + val stream = new TwitterInputDStream(ssc, username, password, filters, + StorageLevel.MEMORY_ONLY_SER) + ssc.registerInputStream(stream) - val hashTags = stream.flatMap( - status => status.getText.split(" ").filter(_.startsWith("#"))) + val hashTags = stream.flatMap(status => status.getText.split(" ").filter(_.startsWith("#"))) - // Word count over hashtags - val counts = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(60)) - // TODO: Sorts on one node - should do with global sorting once streaming supports it - counts.foreach(rdd => { - val topList = rdd.collect().sortBy(-_._2).take(5) - if (!topList.isEmpty) { - println("\nPopular topics in last 60 seconds:") - topList.foreach{case (tag, count) => println("%s (%s tweets)".format(tag, count))} + val topCounts60 = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(60)) + .map{case (topic, count) => (count, topic)} + .transform(_.sortByKey(false)) + + val topCounts10 = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(10)) + .map{case (topic, count) => (count, topic)} + .transform(_.sortByKey(false)) + + + // Print popular hashtags + topCounts60.foreach(rdd => { + if (rdd.count() != 0) { + val topList = rdd.take(5) + println("\nPopular topics in last 60 seconds (%s total):".format(rdd.count())) + topList.foreach{case (count, tag) => println("%s (%s tweets)".format(tag, count))} } }) - // Print number of tweets in the window - stream.window(Seconds(60)).count().foreach(rdd => + topCounts10.foreach(rdd => { if (rdd.count() != 0) { - println("Window size: %s tweets".format(rdd.take(1)(0))) + val topList = rdd.take(5) + println("\nPopular topics in last 10 seconds (%s total):".format(rdd.count())) + topList.foreach{case (count, tag) => println("%s (%s tweets)".format(tag, count))} } - ) + }) + ssc.start() } + } diff --git a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala index 1e842d2c8e..c7e4855f3b 100644 --- a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala +++ b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala @@ -1,13 +1,11 @@ package spark.streaming.examples.twitter -import spark.RDD +import spark._ import spark.streaming._ -import spark.streaming.dstream.InputDStream -import spark.streaming.StreamingContext._ - +import dstream.{NetworkReceiver, NetworkInputDStream} +import storage.StorageLevel import twitter4j._ import twitter4j.auth.BasicAuthorization -import collection.mutable.ArrayBuffer import collection.JavaConversions._ /* A stream of Twitter statuses, potentially filtered by one or more keywords. @@ -20,17 +18,31 @@ class TwitterInputDStream( @transient ssc_ : StreamingContext, username: String, password: String, - filters: Seq[String] - ) extends InputDStream[Status](ssc_) { - val statuses: ArrayBuffer[Status] = ArrayBuffer() - var twitterStream: TwitterStream = _ + filters: Seq[String], + storageLevel: StorageLevel + ) extends NetworkInputDStream[Status](ssc_) { - override def start() = { + override def createReceiver(): NetworkReceiver[Status] = { + new TwitterReceiver(id, username, password, filters, storageLevel) + } +} + +class TwitterReceiver(streamId: Int, + username: String, + password: String, + filters: Seq[String], + storageLevel: StorageLevel + ) extends NetworkReceiver[Status](streamId) { + var twitterStream: TwitterStream = _ + lazy val blockGenerator = new BlockGenerator(storageLevel) + + protected override def onStart() { + blockGenerator.start() twitterStream = new TwitterStreamFactory() .getInstance(new BasicAuthorization(username, password)) twitterStream.addListener(new StatusListener { def onStatus(status: Status) = { - statuses += status + blockGenerator += status } // Unimplemented def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) {} @@ -47,16 +59,12 @@ class TwitterInputDStream( } else { twitterStream.sample() } + logInfo("Twitter receiver started") } - override def stop() = { + protected override def onStop() { + blockGenerator.stop() twitterStream.shutdown() - } - - override def compute(validTime: Time): Option[RDD[Status]] = { - // Flush the current tweet buffer - val rdd = Some(ssc.sc.parallelize(statuses)) - statuses.foreach(x => statuses -= x) - rdd + logInfo("Twitter receiver stopped") } } From 156e8b47ef24cd1a54ee9f1141a91c20e26ac037 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 9 Jan 2013 12:42:10 -0800 Subject: [PATCH 171/226] Split Time to Time (absolute instant of time) and Duration (duration of time). --- .../scala/spark/streaming/Checkpoint.scala | 2 +- .../main/scala/spark/streaming/DStream.scala | 26 ++++---- .../scala/spark/streaming/DStreamGraph.scala | 8 +-- .../main/scala/spark/streaming/Duration.scala | 62 +++++++++++++++++++ .../main/scala/spark/streaming/Interval.scala | 30 +++------ .../streaming/PairDStreamFunctions.scala | 40 ++++++------ .../scala/spark/streaming/Scheduler.scala | 5 +- .../spark/streaming/StreamingContext.scala | 12 ++-- .../src/main/scala/spark/streaming/Time.scala | 55 ++++------------ .../streaming/dstream/CoGroupedDStream.scala | 4 +- .../streaming/dstream/FilteredDStream.scala | 4 +- .../dstream/FlatMapValuedDStream.scala | 4 +- .../streaming/dstream/FlatMappedDStream.scala | 4 +- .../streaming/dstream/ForEachDStream.scala | 4 +- .../streaming/dstream/GlommedDStream.scala | 4 +- .../streaming/dstream/InputDStream.scala | 4 +- .../dstream/MapPartitionedDStream.scala | 4 +- .../streaming/dstream/MapValuedDStream.scala | 4 +- .../streaming/dstream/MappedDStream.scala | 4 +- .../dstream/ReducedWindowedDStream.scala | 16 ++--- .../streaming/dstream/ShuffledDStream.scala | 4 +- .../streaming/dstream/StateDStream.scala | 4 +- .../dstream/TransformedDStream.scala | 4 +- .../streaming/dstream/UnionDStream.scala | 4 +- .../streaming/dstream/WindowedDStream.scala | 14 ++--- 25 files changed, 174 insertions(+), 152 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/Duration.scala diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index 11a7232d7b..a9c6e65d62 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -17,7 +17,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) val jars = ssc.sc.jars val graph = ssc.graph val checkpointDir = ssc.checkpointDir - val checkpointInterval = ssc.checkpointInterval + val checkpointInterval: Duration = ssc.checkpointInterval def validate() { assert(master != null, "Checkpoint.master is null") diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index beba9cfd4f..7611598fde 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -2,7 +2,7 @@ package spark.streaming import spark.streaming.dstream._ import StreamingContext._ -import Time._ +//import Time._ import spark.{RDD, Logging} import spark.storage.StorageLevel @@ -47,7 +47,7 @@ abstract class DStream[T: ClassManifest] ( // ======================================================================= /** Time interval after which the DStream generates a RDD */ - def slideTime: Time + def slideTime: Duration /** List of parent DStreams on which this DStream depends on */ def dependencies: List[DStream[_]] @@ -67,14 +67,14 @@ abstract class DStream[T: ClassManifest] ( protected[streaming] var zeroTime: Time = null // Duration for which the DStream will remember each RDD created - protected[streaming] var rememberDuration: Time = null + protected[streaming] var rememberDuration: Duration = null // Storage level of the RDDs in the stream protected[streaming] var storageLevel: StorageLevel = StorageLevel.NONE // Checkpoint details protected[streaming] val mustCheckpoint = false - protected[streaming] var checkpointInterval: Time = null + protected[streaming] var checkpointInterval: Duration = null protected[streaming] var checkpointData = new DStreamCheckpointData(HashMap[Time, Any]()) // Reference to whole DStream graph @@ -108,7 +108,7 @@ abstract class DStream[T: ClassManifest] ( * Enable periodic checkpointing of RDDs of this DStream * @param interval Time interval after which generated RDD will be checkpointed */ - def checkpoint(interval: Time): DStream[T] = { + def checkpoint(interval: Duration): DStream[T] = { if (isInitialized) { throw new UnsupportedOperationException( "Cannot change checkpoint interval of an DStream after streaming context has started") @@ -224,7 +224,7 @@ abstract class DStream[T: ClassManifest] ( dependencies.foreach(_.setGraph(graph)) } - protected[streaming] def remember(duration: Time) { + protected[streaming] def remember(duration: Duration) { if (duration != null && duration > rememberDuration) { rememberDuration = duration logInfo("Duration for remembering RDDs set to " + rememberDuration + " for " + this) @@ -531,7 +531,7 @@ abstract class DStream[T: ClassManifest] ( * @param windowTime width of the window; must be a multiple of this DStream's interval. * @return */ - def window(windowTime: Time): DStream[T] = window(windowTime, this.slideTime) + def window(windowTime: Duration): DStream[T] = window(windowTime, this.slideTime) /** * Return a new DStream which is computed based on windowed batches of this DStream. @@ -541,7 +541,7 @@ abstract class DStream[T: ClassManifest] ( * the new DStream will generate RDDs); must be a multiple of this * DStream's interval */ - def window(windowTime: Time, slideTime: Time): DStream[T] = { + def window(windowTime: Duration, slideTime: Duration): DStream[T] = { new WindowedDStream(this, windowTime, slideTime) } @@ -550,22 +550,22 @@ abstract class DStream[T: ClassManifest] ( * This is equivalent to window(batchTime, batchTime). * @param batchTime tumbling window duration; must be a multiple of this DStream's interval */ - def tumble(batchTime: Time): DStream[T] = window(batchTime, batchTime) + def tumble(batchTime: Duration): DStream[T] = window(batchTime, batchTime) /** * Returns a new DStream in which each RDD has a single element generated by reducing all * elements in a window over this DStream. windowTime and slideTime are as defined in the * window() operation. This is equivalent to window(windowTime, slideTime).reduce(reduceFunc) */ - def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Time, slideTime: Time): DStream[T] = { + def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Duration, slideTime: Duration): DStream[T] = { this.window(windowTime, slideTime).reduce(reduceFunc) } def reduceByWindow( reduceFunc: (T, T) => T, invReduceFunc: (T, T) => T, - windowTime: Time, - slideTime: Time + windowTime: Duration, + slideTime: Duration ): DStream[T] = { this.map(x => (1, x)) .reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, 1) @@ -577,7 +577,7 @@ abstract class DStream[T: ClassManifest] ( * of elements in a window over this DStream. windowTime and slideTime are as defined in the * window() operation. This is equivalent to window(windowTime, slideTime).count() */ - def countByWindow(windowTime: Time, slideTime: Time): DStream[Int] = { + def countByWindow(windowTime: Duration, slideTime: Duration): DStream[Int] = { this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowTime, slideTime) } diff --git a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala index c72429370e..bc4a40d7bc 100644 --- a/streaming/src/main/scala/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/spark/streaming/DStreamGraph.scala @@ -12,8 +12,8 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { private val outputStreams = new ArrayBuffer[DStream[_]]() private[streaming] var zeroTime: Time = null - private[streaming] var batchDuration: Time = null - private[streaming] var rememberDuration: Time = null + private[streaming] var batchDuration: Duration = null + private[streaming] var rememberDuration: Duration = null private[streaming] var checkpointInProgress = false private[streaming] def start(time: Time) { @@ -41,7 +41,7 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { } } - private[streaming] def setBatchDuration(duration: Time) { + private[streaming] def setBatchDuration(duration: Duration) { this.synchronized { if (batchDuration != null) { throw new Exception("Batch duration already set as " + batchDuration + @@ -51,7 +51,7 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { batchDuration = duration } - private[streaming] def remember(duration: Time) { + private[streaming] def remember(duration: Duration) { this.synchronized { if (rememberDuration != null) { throw new Exception("Batch duration already set as " + batchDuration + diff --git a/streaming/src/main/scala/spark/streaming/Duration.scala b/streaming/src/main/scala/spark/streaming/Duration.scala new file mode 100644 index 0000000000..d2728d9dca --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/Duration.scala @@ -0,0 +1,62 @@ +package spark.streaming + +class Duration (private val millis: Long) { + + def < (that: Duration): Boolean = (this.millis < that.millis) + + def <= (that: Duration): Boolean = (this.millis <= that.millis) + + def > (that: Duration): Boolean = (this.millis > that.millis) + + def >= (that: Duration): Boolean = (this.millis >= that.millis) + + def + (that: Duration): Duration = new Duration(millis + that.millis) + + def - (that: Duration): Duration = new Duration(millis - that.millis) + + def * (times: Int): Duration = new Duration(millis * times) + + def / (that: Duration): Long = millis / that.millis + + def isMultipleOf(that: Duration): Boolean = + (this.millis % that.millis == 0) + + def min(that: Duration): Duration = if (this < that) this else that + + def max(that: Duration): Duration = if (this > that) this else that + + def isZero: Boolean = (this.millis == 0) + + override def toString: String = (millis.toString + " ms") + + def toFormattedString: String = millis.toString + + def milliseconds: Long = millis +} + + +/** + * Helper object that creates instance of [[spark.streaming.Duration]] representing + * a given number of milliseconds. + */ +object Milliseconds { + def apply(milliseconds: Long) = new Duration(milliseconds) +} + +/** + * Helper object that creates instance of [[spark.streaming.Duration]] representing + * a given number of seconds. + */ +object Seconds { + def apply(seconds: Long) = new Duration(seconds * 1000) +} + +/** + * Helper object that creates instance of [[spark.streaming.Duration]] representing + * a given number of minutes. + */ +object Minutes { + def apply(minutes: Long) = new Duration(minutes * 60000) +} + + diff --git a/streaming/src/main/scala/spark/streaming/Interval.scala b/streaming/src/main/scala/spark/streaming/Interval.scala index fa0b7ce19d..dc21dfb722 100644 --- a/streaming/src/main/scala/spark/streaming/Interval.scala +++ b/streaming/src/main/scala/spark/streaming/Interval.scala @@ -1,16 +1,16 @@ package spark.streaming private[streaming] -case class Interval(beginTime: Time, endTime: Time) { - def this(beginMs: Long, endMs: Long) = this(Time(beginMs), new Time(endMs)) +class Interval(val beginTime: Time, val endTime: Time) { + def this(beginMs: Long, endMs: Long) = this(new Time(beginMs), new Time(endMs)) - def duration(): Time = endTime - beginTime + def duration(): Duration = endTime - beginTime - def + (time: Time): Interval = { + def + (time: Duration): Interval = { new Interval(beginTime + time, endTime + time) } - def - (time: Time): Interval = { + def - (time: Duration): Interval = { new Interval(beginTime - time, endTime - time) } @@ -27,24 +27,14 @@ case class Interval(beginTime: Time, endTime: Time) { def >= (that: Interval) = !(this < that) - def next(): Interval = { - this + (endTime - beginTime) - } - - def isZero = (beginTime.isZero && endTime.isZero) - - def toFormattedString = beginTime.toFormattedString + "-" + endTime.toFormattedString - - override def toString = "[" + beginTime + ", " + endTime + "]" + override def toString = "[" + beginTime + ", " + endTime + "]" } object Interval { - def zero() = new Interval (Time.zero, Time.zero) - - def currentInterval(intervalDuration: Time): Interval = { - val time = Time(System.currentTimeMillis) - val intervalBegin = time.floor(intervalDuration) - Interval(intervalBegin, intervalBegin + intervalDuration) + def currentInterval(duration: Duration): Interval = { + val time = new Time(System.currentTimeMillis) + val intervalBegin = time.floor(duration) + new Interval(intervalBegin, intervalBegin + duration) } } diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index b0a208e67f..dd64064138 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -69,21 +69,21 @@ extends Serializable { self.map(x => (x._1, 1L)).reduceByKey((x: Long, y: Long) => x + y, numPartitions) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time): DStream[(K, Seq[V])] = { + def groupByKeyAndWindow(windowTime: Duration, slideTime: Duration): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner()) } def groupByKeyAndWindow( - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, numPartitions: Int ): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner(numPartitions)) } def groupByKeyAndWindow( - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, partitioner: Partitioner ): DStream[(K, Seq[V])] = { self.window(windowTime, slideTime).groupByKey(partitioner) @@ -91,23 +91,23 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Time + windowTime: Duration ): DStream[(K, V)] = { reduceByKeyAndWindow(reduceFunc, windowTime, self.slideTime, defaultPartitioner()) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time + windowTime: Duration, + slideTime: Duration ): DStream[(K, V)] = { reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner()) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, numPartitions: Int ): DStream[(K, V)] = { reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) @@ -115,8 +115,8 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, partitioner: Partitioner ): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) @@ -134,8 +134,8 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time + windowTime: Duration, + slideTime: Duration ): DStream[(K, V)] = { reduceByKeyAndWindow( @@ -145,8 +145,8 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, numPartitions: Int ): DStream[(K, V)] = { @@ -157,8 +157,8 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, partitioner: Partitioner ): DStream[(K, V)] = { @@ -169,8 +169,8 @@ extends Serializable { } def countByKeyAndWindow( - windowTime: Time, - slideTime: Time, + windowTime: Duration, + slideTime: Duration, numPartitions: Int = self.ssc.sc.defaultParallelism ): DStream[(K, Long)] = { diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index eb40affe6d..10845e3a5e 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -22,7 +22,8 @@ class Scheduler(ssc: StreamingContext) extends Logging { val clockClass = System.getProperty("spark.streaming.clock", "spark.streaming.util.SystemClock") val clock = Class.forName(clockClass).newInstance().asInstanceOf[Clock] - val timer = new RecurringTimer(clock, ssc.graph.batchDuration.milliseconds, generateRDDs(_)) + val timer = new RecurringTimer(clock, ssc.graph.batchDuration.milliseconds, + longTime => generateRDDs(new Time(longTime))) def start() { // If context was started from checkpoint, then restart timer such that @@ -41,7 +42,7 @@ class Scheduler(ssc: StreamingContext) extends Logging { timer.restart(graph.zeroTime.milliseconds) logInfo("Scheduler's timer restarted") } else { - val firstTime = Time(timer.start()) + val firstTime = new Time(timer.start()) graph.start(firstTime - ssc.graph.batchDuration) logInfo("Scheduler's timer started") } diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index 215246ba2e..ee8314df3f 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -26,7 +26,7 @@ import java.util.UUID class StreamingContext private ( sc_ : SparkContext, cp_ : Checkpoint, - batchDur_ : Time + batchDur_ : Duration ) extends Logging { /** @@ -34,7 +34,7 @@ class StreamingContext private ( * @param sparkContext Existing SparkContext * @param batchDuration The time interval at which streaming data will be divided into batches */ - def this(sparkContext: SparkContext, batchDuration: Time) = this(sparkContext, null, batchDuration) + def this(sparkContext: SparkContext, batchDuration: Duration) = this(sparkContext, null, batchDuration) /** * Creates a StreamingContext by providing the details necessary for creating a new SparkContext. @@ -42,7 +42,7 @@ class StreamingContext private ( * @param frameworkName A name for your job, to display on the cluster web UI * @param batchDuration The time interval at which streaming data will be divided into batches */ - def this(master: String, frameworkName: String, batchDuration: Time) = + def this(master: String, frameworkName: String, batchDuration: Duration) = this(StreamingContext.createNewSparkContext(master, frameworkName), null, batchDuration) /** @@ -96,7 +96,7 @@ class StreamingContext private ( } } - protected[streaming] var checkpointInterval: Time = if (isCheckpointPresent) cp_.checkpointInterval else null + protected[streaming] var checkpointInterval: Duration = if (isCheckpointPresent) cp_.checkpointInterval else null protected[streaming] var receiverJobThread: Thread = null protected[streaming] var scheduler: Scheduler = null @@ -107,7 +107,7 @@ class StreamingContext private ( * if the developer wishes to query old data outside the DStream computation). * @param duration Minimum duration that each DStream should remember its RDDs */ - def remember(duration: Time) { + def remember(duration: Duration) { graph.remember(duration) } @@ -117,7 +117,7 @@ class StreamingContext private ( * @param directory HDFS-compatible directory where the checkpoint data will be reliably stored * @param interval checkpoint interval */ - def checkpoint(directory: String, interval: Time = null) { + def checkpoint(directory: String, interval: Duration = null) { if (directory != null) { sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(directory)) checkpointDir = directory diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 3c6fd5d967..069df82e52 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -7,7 +7,7 @@ package spark.streaming * @param millis Time in UTC. */ -case class Time(private val millis: Long) { +class Time(private val millis: Long) { def < (that: Time): Boolean = (this.millis < that.millis) @@ -17,63 +17,32 @@ case class Time(private val millis: Long) { def >= (that: Time): Boolean = (this.millis >= that.millis) - def + (that: Time): Time = Time(millis + that.millis) - - def - (that: Time): Time = Time(millis - that.millis) - - def * (times: Int): Time = Time(millis * times) + def + (that: Duration): Time = new Time(millis + that.milliseconds) - def / (that: Time): Long = millis / that.millis + def - (that: Time): Duration = new Duration(millis - that.millis) - def floor(that: Time): Time = { - val t = that.millis + def - (that: Duration): Time = new Time(millis - that.milliseconds) + + def floor(that: Duration): Time = { + val t = that.milliseconds val m = math.floor(this.millis / t).toLong - Time(m * t) + new Time(m * t) } - def isMultipleOf(that: Time): Boolean = - (this.millis % that.millis == 0) + def isMultipleOf(that: Duration): Boolean = + (this.millis % that.milliseconds == 0) def min(that: Time): Time = if (this < that) this else that def max(that: Time): Time = if (this > that) this else that - def isZero: Boolean = (this.millis == 0) - override def toString: String = (millis.toString + " ms") - def toFormattedString: String = millis.toString - def milliseconds: Long = millis } -private[streaming] object Time { - val zero = Time(0) - +/*private[streaming] object Time { implicit def toTime(long: Long) = Time(long) } - -/** - * Helper object that creates instance of [[spark.streaming.Time]] representing - * a given number of milliseconds. - */ -object Milliseconds { - def apply(milliseconds: Long) = Time(milliseconds) -} - -/** - * Helper object that creates instance of [[spark.streaming.Time]] representing - * a given number of seconds. - */ -object Seconds { - def apply(seconds: Long) = Time(seconds * 1000) -} - -/** - * Helper object that creates instance of [[spark.streaming.Time]] representing - * a given number of minutes. - */ -object Minutes { - def apply(minutes: Long) = Time(minutes * 60000) -} +*/ diff --git a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala index bc23d423d3..ca178fd384 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala @@ -2,7 +2,7 @@ package spark.streaming.dstream import spark.{RDD, Partitioner} import spark.rdd.CoGroupedRDD -import spark.streaming.{Time, DStream} +import spark.streaming.{Time, DStream, Duration} private[streaming] class CoGroupedDStream[K : ClassManifest]( @@ -24,7 +24,7 @@ class CoGroupedDStream[K : ClassManifest]( override def dependencies = parents.toList - override def slideTime = parents.head.slideTime + override def slideTime: Duration = parents.head.slideTime override def compute(validTime: Time): Option[RDD[(K, Seq[Seq[_]])]] = { val part = partitioner diff --git a/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala index 1cbb4d536e..76b9e58029 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD private[streaming] @@ -11,7 +11,7 @@ class FilteredDStream[T: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[T]] = { parent.getOrCompute(validTime).map(_.filter(filterFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala index 11ed8cf317..28e9a456ac 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD import spark.SparkContext._ @@ -12,7 +12,7 @@ class FlatMapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest] override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[(K, U)]] = { parent.getOrCompute(validTime).map(_.flatMapValues[U](flatMapValueFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala index a13b4c9ff9..ef305b66f1 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD private[streaming] @@ -11,7 +11,7 @@ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala index 41c629a225..f8af0a38a7 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala @@ -1,7 +1,7 @@ package spark.streaming.dstream import spark.RDD -import spark.streaming.{DStream, Job, Time} +import spark.streaming.{Duration, DStream, Job, Time} private[streaming] class ForEachDStream[T: ClassManifest] ( @@ -11,7 +11,7 @@ class ForEachDStream[T: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[Unit]] = None diff --git a/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala index 92ea503cae..19cccea735 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD private[streaming] @@ -9,7 +9,7 @@ class GlommedDStream[T: ClassManifest](parent: DStream[T]) override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[Array[T]]] = { parent.getOrCompute(validTime).map(_.glom()) diff --git a/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala index 4959c66b06..50f0f45796 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala @@ -1,13 +1,13 @@ package spark.streaming.dstream -import spark.streaming.{StreamingContext, DStream} +import spark.streaming.{Duration, StreamingContext, DStream} abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContext) extends DStream[T](ssc_) { override def dependencies = List() - override def slideTime = { + override def slideTime: Duration = { if (ssc == null) throw new Exception("ssc is null") if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null") ssc.graph.batchDuration diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala index daf78c6893..e9ca668aa6 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD private[streaming] @@ -12,7 +12,7 @@ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc, preservePartitioning)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala index 689caeef0e..ebc7d0698b 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD import spark.SparkContext._ @@ -12,7 +12,7 @@ class MapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[(K, U)]] = { parent.getOrCompute(validTime).map(_.mapValues[U](mapValueFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala index 786b9966f2..3af8e7ab88 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD private[streaming] @@ -11,7 +11,7 @@ class MappedDStream[T: ClassManifest, U: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.map[U](mapFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala index d289ed2a3f..a685a778ce 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala @@ -9,15 +9,15 @@ import spark.SparkContext._ import spark.storage.StorageLevel import scala.collection.mutable.ArrayBuffer -import spark.streaming.{Interval, Time, DStream} +import spark.streaming.{Duration, Interval, Time, DStream} private[streaming] class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( parent: DStream[(K, V)], reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - _windowTime: Time, - _slideTime: Time, + _windowTime: Duration, + _slideTime: Duration, partitioner: Partitioner ) extends DStream[(K,V)](parent.ssc) { @@ -39,15 +39,15 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( super.persist(StorageLevel.MEMORY_ONLY_SER) reducedStream.persist(StorageLevel.MEMORY_ONLY_SER) - def windowTime: Time = _windowTime + def windowTime: Duration = _windowTime override def dependencies = List(reducedStream) - override def slideTime: Time = _slideTime + override def slideTime: Duration = _slideTime override val mustCheckpoint = true - override def parentRememberDuration: Time = rememberDuration + windowTime + override def parentRememberDuration: Duration = rememberDuration + windowTime override def persist(storageLevel: StorageLevel): DStream[(K,V)] = { super.persist(storageLevel) @@ -55,7 +55,7 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( this } - override def checkpoint(interval: Time): DStream[(K, V)] = { + override def checkpoint(interval: Duration): DStream[(K, V)] = { super.checkpoint(interval) //reducedStream.checkpoint(interval) this @@ -66,7 +66,7 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( val invReduceF = invReduceFunc val currentTime = validTime - val currentWindow = Interval(currentTime - windowTime + parent.slideTime, currentTime) + val currentWindow = new Interval(currentTime - windowTime + parent.slideTime, currentTime) val previousWindow = currentWindow - slideTime logDebug("Window time = " + windowTime) diff --git a/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala index 6854bbe665..7612804b96 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala @@ -2,7 +2,7 @@ package spark.streaming.dstream import spark.{RDD, Partitioner} import spark.SparkContext._ -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} private[streaming] class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( @@ -15,7 +15,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[(K,C)]] = { parent.getOrCompute(validTime) match { diff --git a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index 175b3060c1..ce4f486825 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -4,7 +4,7 @@ import spark.RDD import spark.Partitioner import spark.SparkContext._ import spark.storage.StorageLevel -import spark.streaming.{Time, DStream} +import spark.streaming.{Duration, Time, DStream} private[streaming] class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( @@ -18,7 +18,7 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife override def dependencies = List(parent) - override def slideTime = parent.slideTime + override def slideTime: Duration = parent.slideTime override val mustCheckpoint = true diff --git a/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala index 0337579514..5a2c5bc0f0 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala @@ -1,7 +1,7 @@ package spark.streaming.dstream import spark.RDD -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} private[streaming] class TransformedDStream[T: ClassManifest, U: ClassManifest] ( @@ -11,7 +11,7 @@ class TransformedDStream[T: ClassManifest, U: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Time = parent.slideTime + override def slideTime: Duration = parent.slideTime override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(transformFunc(_, validTime)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala index 3bf4c2ecea..224a19842b 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.dstream -import spark.streaming.{DStream, Time} +import spark.streaming.{Duration, DStream, Time} import spark.RDD import collection.mutable.ArrayBuffer import spark.rdd.UnionRDD @@ -23,7 +23,7 @@ class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) override def dependencies = parents.toList - override def slideTime: Time = parents.head.slideTime + override def slideTime: Duration = parents.head.slideTime override def compute(validTime: Time): Option[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() diff --git a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala index 7718794cbf..45689b25ce 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala @@ -3,13 +3,13 @@ package spark.streaming.dstream import spark.RDD import spark.rdd.UnionRDD import spark.storage.StorageLevel -import spark.streaming.{Interval, Time, DStream} +import spark.streaming.{Duration, Interval, Time, DStream} private[streaming] class WindowedDStream[T: ClassManifest]( parent: DStream[T], - _windowTime: Time, - _slideTime: Time) + _windowTime: Duration, + _slideTime: Duration) extends DStream[T](parent.ssc) { if (!_windowTime.isMultipleOf(parent.slideTime)) @@ -22,16 +22,16 @@ class WindowedDStream[T: ClassManifest]( parent.persist(StorageLevel.MEMORY_ONLY_SER) - def windowTime: Time = _windowTime + def windowTime: Duration = _windowTime override def dependencies = List(parent) - override def slideTime: Time = _slideTime + override def slideTime: Duration = _slideTime - override def parentRememberDuration: Time = rememberDuration + windowTime + override def parentRememberDuration: Duration = rememberDuration + windowTime override def compute(validTime: Time): Option[RDD[T]] = { - val currentWindow = Interval(validTime - windowTime + parent.slideTime, validTime) + val currentWindow = new Interval(validTime - windowTime + parent.slideTime, validTime) Some(new UnionRDD(ssc.sc, parent.slice(currentWindow))) } } From 365506fb038a76ff3810957f5bc5823f5f16af40 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 9 Jan 2013 14:29:25 -0800 Subject: [PATCH 172/226] Changed variable name form ***Time to ***Duration to keep things consistent. --- .../scala/spark/streaming/Checkpoint.scala | 2 +- .../main/scala/spark/streaming/DStream.scala | 90 +++++++++---------- .../main/scala/spark/streaming/Duration.scala | 2 +- .../streaming/PairDStreamFunctions.scala | 70 +++++++-------- .../scala/spark/streaming/Scheduler.scala | 4 +- .../spark/streaming/StreamingContext.scala | 12 +-- .../src/main/scala/spark/streaming/Time.scala | 22 ++--- .../streaming/dstream/CoGroupedDStream.scala | 4 +- .../streaming/dstream/FilteredDStream.scala | 2 +- .../dstream/FlatMapValuedDStream.scala | 2 +- .../streaming/dstream/FlatMappedDStream.scala | 2 +- .../streaming/dstream/ForEachDStream.scala | 2 +- .../streaming/dstream/GlommedDStream.scala | 2 +- .../streaming/dstream/InputDStream.scala | 2 +- .../dstream/MapPartitionedDStream.scala | 2 +- .../streaming/dstream/MapValuedDStream.scala | 2 +- .../streaming/dstream/MappedDStream.scala | 2 +- .../dstream/ReducedWindowedDStream.scala | 34 +++---- .../streaming/dstream/ShuffledDStream.scala | 2 +- .../streaming/dstream/StateDStream.scala | 4 +- .../dstream/TransformedDStream.scala | 2 +- .../streaming/dstream/UnionDStream.scala | 4 +- .../streaming/dstream/WindowedDStream.scala | 24 ++--- .../streaming/BasicOperationsSuite.scala | 18 ++-- .../scala/spark/streaming/TestSuiteBase.scala | 2 +- .../streaming/WindowOperationsSuite.scala | 48 +++++----- 26 files changed, 176 insertions(+), 186 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/spark/streaming/Checkpoint.scala index a9c6e65d62..2f3adb39c2 100644 --- a/streaming/src/main/scala/spark/streaming/Checkpoint.scala +++ b/streaming/src/main/scala/spark/streaming/Checkpoint.scala @@ -17,7 +17,7 @@ class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time) val jars = ssc.sc.jars val graph = ssc.graph val checkpointDir = ssc.checkpointDir - val checkpointInterval: Duration = ssc.checkpointInterval + val checkpointDuration: Duration = ssc.checkpointDuration def validate() { assert(master != null, "Checkpoint.master is null") diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index 7611598fde..c89fb7723e 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -47,7 +47,7 @@ abstract class DStream[T: ClassManifest] ( // ======================================================================= /** Time interval after which the DStream generates a RDD */ - def slideTime: Duration + def slideDuration: Duration /** List of parent DStreams on which this DStream depends on */ def dependencies: List[DStream[_]] @@ -74,7 +74,7 @@ abstract class DStream[T: ClassManifest] ( // Checkpoint details protected[streaming] val mustCheckpoint = false - protected[streaming] var checkpointInterval: Duration = null + protected[streaming] var checkpointDuration: Duration = null protected[streaming] var checkpointData = new DStreamCheckpointData(HashMap[Time, Any]()) // Reference to whole DStream graph @@ -114,7 +114,7 @@ abstract class DStream[T: ClassManifest] ( "Cannot change checkpoint interval of an DStream after streaming context has started") } persist() - checkpointInterval = interval + checkpointDuration = interval this } @@ -130,16 +130,16 @@ abstract class DStream[T: ClassManifest] ( } zeroTime = time - // Set the checkpoint interval to be slideTime or 10 seconds, which ever is larger - if (mustCheckpoint && checkpointInterval == null) { - checkpointInterval = slideTime.max(Seconds(10)) - logInfo("Checkpoint interval automatically set to " + checkpointInterval) + // Set the checkpoint interval to be slideDuration or 10 seconds, which ever is larger + if (mustCheckpoint && checkpointDuration == null) { + checkpointDuration = slideDuration.max(Seconds(10)) + logInfo("Checkpoint interval automatically set to " + checkpointDuration) } // Set the minimum value of the rememberDuration if not already set - var minRememberDuration = slideTime - if (checkpointInterval != null && minRememberDuration <= checkpointInterval) { - minRememberDuration = checkpointInterval * 2 // times 2 just to be sure that the latest checkpoint is not forgetten + var minRememberDuration = slideDuration + if (checkpointDuration != null && minRememberDuration <= checkpointDuration) { + minRememberDuration = checkpointDuration * 2 // times 2 just to be sure that the latest checkpoint is not forgetten } if (rememberDuration == null || rememberDuration < minRememberDuration) { rememberDuration = minRememberDuration @@ -153,37 +153,37 @@ abstract class DStream[T: ClassManifest] ( assert(rememberDuration != null, "Remember duration is set to null") assert( - !mustCheckpoint || checkpointInterval != null, + !mustCheckpoint || checkpointDuration != null, "The checkpoint interval for " + this.getClass.getSimpleName + " has not been set. " + " Please use DStream.checkpoint() to set the interval." ) assert( - checkpointInterval == null || checkpointInterval >= slideTime, + checkpointDuration == null || checkpointDuration >= slideDuration, "The checkpoint interval for " + this.getClass.getSimpleName + " has been set to " + - checkpointInterval + " which is lower than its slide time (" + slideTime + "). " + - "Please set it to at least " + slideTime + "." + checkpointDuration + " which is lower than its slide time (" + slideDuration + "). " + + "Please set it to at least " + slideDuration + "." ) assert( - checkpointInterval == null || checkpointInterval.isMultipleOf(slideTime), + checkpointDuration == null || checkpointDuration.isMultipleOf(slideDuration), "The checkpoint interval for " + this.getClass.getSimpleName + " has been set to " + - checkpointInterval + " which not a multiple of its slide time (" + slideTime + "). " + - "Please set it to a multiple " + slideTime + "." + checkpointDuration + " which not a multiple of its slide time (" + slideDuration + "). " + + "Please set it to a multiple " + slideDuration + "." ) assert( - checkpointInterval == null || storageLevel != StorageLevel.NONE, + checkpointDuration == null || storageLevel != StorageLevel.NONE, "" + this.getClass.getSimpleName + " has been marked for checkpointing but the storage " + "level has not been set to enable persisting. Please use DStream.persist() to set the " + "storage level to use memory for better checkpointing performance." ) assert( - checkpointInterval == null || rememberDuration > checkpointInterval, + checkpointDuration == null || rememberDuration > checkpointDuration, "The remember duration for " + this.getClass.getSimpleName + " has been set to " + rememberDuration + " which is not more than the checkpoint interval (" + - checkpointInterval + "). Please set it to higher than " + checkpointInterval + "." + checkpointDuration + "). Please set it to higher than " + checkpointDuration + "." ) val metadataCleanerDelay = spark.util.MetadataCleaner.getDelaySeconds @@ -200,9 +200,9 @@ abstract class DStream[T: ClassManifest] ( dependencies.foreach(_.validate()) - logInfo("Slide time = " + slideTime) + logInfo("Slide time = " + slideDuration) logInfo("Storage level = " + storageLevel) - logInfo("Checkpoint interval = " + checkpointInterval) + logInfo("Checkpoint interval = " + checkpointDuration) logInfo("Remember duration = " + rememberDuration) logInfo("Initialized and validated " + this) } @@ -232,11 +232,11 @@ abstract class DStream[T: ClassManifest] ( dependencies.foreach(_.remember(parentRememberDuration)) } - /** This method checks whether the 'time' is valid wrt slideTime for generating RDD */ + /** This method checks whether the 'time' is valid wrt slideDuration for generating RDD */ protected def isTimeValid(time: Time): Boolean = { if (!isInitialized) { throw new Exception (this + " has not been initialized") - } else if (time <= zeroTime || ! (time - zeroTime).isMultipleOf(slideTime)) { + } else if (time <= zeroTime || ! (time - zeroTime).isMultipleOf(slideDuration)) { false } else { true @@ -266,7 +266,7 @@ abstract class DStream[T: ClassManifest] ( newRDD.persist(storageLevel) logInfo("Persisting RDD " + newRDD.id + " for time " + time + " to " + storageLevel + " at time " + time) } - if (checkpointInterval != null && (time - zeroTime).isMultipleOf(checkpointInterval)) { + if (checkpointDuration != null && (time - zeroTime).isMultipleOf(checkpointDuration)) { newRDD.checkpoint() logInfo("Marking RDD " + newRDD.id + " for time " + time + " for checkpointing at time " + time) } @@ -528,21 +528,21 @@ abstract class DStream[T: ClassManifest] ( /** * Return a new DStream which is computed based on windowed batches of this DStream. * The new DStream generates RDDs with the same interval as this DStream. - * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @param windowDuration width of the window; must be a multiple of this DStream's interval. * @return */ - def window(windowTime: Duration): DStream[T] = window(windowTime, this.slideTime) + def window(windowDuration: Duration): DStream[T] = window(windowDuration, this.slideDuration) /** * Return a new DStream which is computed based on windowed batches of this DStream. - * @param windowTime duration (i.e., width) of the window; + * @param windowDuration duration (i.e., width) of the window; * must be a multiple of this DStream's interval - * @param slideTime sliding interval of the window (i.e., the interval after which + * @param slideDuration sliding interval of the window (i.e., the interval after which * the new DStream will generate RDDs); must be a multiple of this * DStream's interval */ - def window(windowTime: Duration, slideTime: Duration): DStream[T] = { - new WindowedDStream(this, windowTime, slideTime) + def window(windowDuration: Duration, slideDuration: Duration): DStream[T] = { + new WindowedDStream(this, windowDuration, slideDuration) } /** @@ -554,36 +554,36 @@ abstract class DStream[T: ClassManifest] ( /** * Returns a new DStream in which each RDD has a single element generated by reducing all - * elements in a window over this DStream. windowTime and slideTime are as defined in the - * window() operation. This is equivalent to window(windowTime, slideTime).reduce(reduceFunc) + * elements in a window over this DStream. windowDuration and slideDuration are as defined in the + * window() operation. This is equivalent to window(windowDuration, slideDuration).reduce(reduceFunc) */ - def reduceByWindow(reduceFunc: (T, T) => T, windowTime: Duration, slideTime: Duration): DStream[T] = { - this.window(windowTime, slideTime).reduce(reduceFunc) + def reduceByWindow(reduceFunc: (T, T) => T, windowDuration: Duration, slideDuration: Duration): DStream[T] = { + this.window(windowDuration, slideDuration).reduce(reduceFunc) } def reduceByWindow( reduceFunc: (T, T) => T, invReduceFunc: (T, T) => T, - windowTime: Duration, - slideTime: Duration + windowDuration: Duration, + slideDuration: Duration ): DStream[T] = { this.map(x => (1, x)) - .reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, 1) + .reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, 1) .map(_._2) } /** * Returns a new DStream in which each RDD has a single element generated by counting the number - * of elements in a window over this DStream. windowTime and slideTime are as defined in the - * window() operation. This is equivalent to window(windowTime, slideTime).count() + * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the + * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ - def countByWindow(windowTime: Duration, slideTime: Duration): DStream[Int] = { - this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowTime, slideTime) + def countByWindow(windowDuration: Duration, slideDuration: Duration): DStream[Int] = { + this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowDuration, slideDuration) } /** * Returns a new DStream by unifying data of another DStream with this DStream. - * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + * @param that Another DStream having the same slideDuration as this DStream. */ def union(that: DStream[T]): DStream[T] = new UnionDStream[T](Array(this, that)) @@ -599,13 +599,13 @@ abstract class DStream[T: ClassManifest] ( */ def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() - var time = toTime.floor(slideTime) + var time = toTime.floor(slideDuration) while (time >= zeroTime && time >= fromTime) { getOrCompute(time) match { case Some(rdd) => rdds += rdd case None => //throw new Exception("Could not get RDD for time " + time) } - time -= slideTime + time -= slideDuration } rdds.toSeq } diff --git a/streaming/src/main/scala/spark/streaming/Duration.scala b/streaming/src/main/scala/spark/streaming/Duration.scala index d2728d9dca..e4dc579a17 100644 --- a/streaming/src/main/scala/spark/streaming/Duration.scala +++ b/streaming/src/main/scala/spark/streaming/Duration.scala @@ -1,6 +1,6 @@ package spark.streaming -class Duration (private val millis: Long) { +case class Duration (private val millis: Long) { def < (that: Duration): Boolean = (this.millis < that.millis) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index dd64064138..482d01300d 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -21,14 +21,10 @@ extends Serializable { def ssc = self.ssc - def defaultPartitioner(numPartitions: Int = self.ssc.sc.defaultParallelism) = { + private[streaming] def defaultPartitioner(numPartitions: Int = self.ssc.sc.defaultParallelism) = { new HashPartitioner(numPartitions) } - /* ---------------------------------- */ - /* DStream operations for key-value pairs */ - /* ---------------------------------- */ - def groupByKey(): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner()) } @@ -69,59 +65,59 @@ extends Serializable { self.map(x => (x._1, 1L)).reduceByKey((x: Long, y: Long) => x + y, numPartitions) } - def groupByKeyAndWindow(windowTime: Duration, slideTime: Duration): DStream[(K, Seq[V])] = { - groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner()) + def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): DStream[(K, Seq[V])] = { + groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner()) } def groupByKeyAndWindow( - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, numPartitions: Int ): DStream[(K, Seq[V])] = { - groupByKeyAndWindow(windowTime, slideTime, defaultPartitioner(numPartitions)) + groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner(numPartitions)) } def groupByKeyAndWindow( - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, partitioner: Partitioner ): DStream[(K, Seq[V])] = { - self.window(windowTime, slideTime).groupByKey(partitioner) + self.window(windowDuration, slideDuration).groupByKey(partitioner) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Duration + windowDuration: Duration ): DStream[(K, V)] = { - reduceByKeyAndWindow(reduceFunc, windowTime, self.slideTime, defaultPartitioner()) + reduceByKeyAndWindow(reduceFunc, windowDuration, self.slideDuration, defaultPartitioner()) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration + windowDuration: Duration, + slideDuration: Duration ): DStream[(K, V)] = { - reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner()) + reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, defaultPartitioner()) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, numPartitions: Int ): DStream[(K, V)] = { - reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) + reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, defaultPartitioner(numPartitions)) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, partitioner: Partitioner ): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) self.reduceByKey(cleanedReduceFunc, partitioner) - .window(windowTime, slideTime) + .window(windowDuration, slideDuration) .reduceByKey(cleanedReduceFunc, partitioner) } @@ -134,51 +130,51 @@ extends Serializable { def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration + windowDuration: Duration, + slideDuration: Duration ): DStream[(K, V)] = { reduceByKeyAndWindow( - reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner()) + reduceFunc, invReduceFunc, windowDuration, slideDuration, defaultPartitioner()) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, numPartitions: Int ): DStream[(K, V)] = { reduceByKeyAndWindow( - reduceFunc, invReduceFunc, windowTime, slideTime, defaultPartitioner(numPartitions)) + reduceFunc, invReduceFunc, windowDuration, slideDuration, defaultPartitioner(numPartitions)) } def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, partitioner: Partitioner ): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) val cleanedInvReduceFunc = ssc.sc.clean(invReduceFunc) new ReducedWindowedDStream[K, V]( - self, cleanedReduceFunc, cleanedInvReduceFunc, windowTime, slideTime, partitioner) + self, cleanedReduceFunc, cleanedInvReduceFunc, windowDuration, slideDuration, partitioner) } def countByKeyAndWindow( - windowTime: Duration, - slideTime: Duration, + windowDuration: Duration, + slideDuration: Duration, numPartitions: Int = self.ssc.sc.defaultParallelism ): DStream[(K, Long)] = { self.map(x => (x._1, 1L)).reduceByKeyAndWindow( (x: Long, y: Long) => x + y, (x: Long, y: Long) => x - y, - windowTime, - slideTime, + windowDuration, + slideDuration, numPartitions ) } diff --git a/streaming/src/main/scala/spark/streaming/Scheduler.scala b/streaming/src/main/scala/spark/streaming/Scheduler.scala index 10845e3a5e..c04ed37de8 100644 --- a/streaming/src/main/scala/spark/streaming/Scheduler.scala +++ b/streaming/src/main/scala/spark/streaming/Scheduler.scala @@ -14,7 +14,7 @@ class Scheduler(ssc: StreamingContext) extends Logging { val concurrentJobs = System.getProperty("spark.streaming.concurrentJobs", "1").toInt val jobManager = new JobManager(ssc, concurrentJobs) - val checkpointWriter = if (ssc.checkpointInterval != null && ssc.checkpointDir != null) { + val checkpointWriter = if (ssc.checkpointDuration != null && ssc.checkpointDir != null) { new CheckpointWriter(ssc.checkpointDir) } else { null @@ -65,7 +65,7 @@ class Scheduler(ssc: StreamingContext) extends Logging { } private def doCheckpoint(time: Time) { - if (ssc.checkpointInterval != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointInterval)) { + if (ssc.checkpointDuration != null && (time - graph.zeroTime).isMultipleOf(ssc.checkpointDuration)) { val startTime = System.currentTimeMillis() ssc.graph.updateCheckpointData(time) checkpointWriter.write(new Checkpoint(ssc, time)) diff --git a/streaming/src/main/scala/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/spark/streaming/StreamingContext.scala index ee8314df3f..14500bdcb1 100644 --- a/streaming/src/main/scala/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/StreamingContext.scala @@ -96,7 +96,7 @@ class StreamingContext private ( } } - protected[streaming] var checkpointInterval: Duration = if (isCheckpointPresent) cp_.checkpointInterval else null + protected[streaming] var checkpointDuration: Duration = if (isCheckpointPresent) cp_.checkpointDuration else null protected[streaming] var receiverJobThread: Thread = null protected[streaming] var scheduler: Scheduler = null @@ -121,10 +121,10 @@ class StreamingContext private ( if (directory != null) { sc.setCheckpointDir(StreamingContext.getSparkCheckpointDir(directory)) checkpointDir = directory - checkpointInterval = interval + checkpointDuration = interval } else { checkpointDir = null - checkpointInterval = null + checkpointDuration = null } } @@ -327,7 +327,7 @@ class StreamingContext private ( graph.validate() assert( - checkpointDir == null || checkpointInterval != null, + checkpointDir == null || checkpointDuration != null, "Checkpoint directory has been set, but the graph checkpointing interval has " + "not been set. Please use StreamingContext.checkpoint() to set the interval." ) @@ -337,8 +337,8 @@ class StreamingContext private ( * Starts the execution of the streams. */ def start() { - if (checkpointDir != null && checkpointInterval == null && graph != null) { - checkpointInterval = graph.batchDuration + if (checkpointDir != null && checkpointDuration == null && graph != null) { + checkpointDuration = graph.batchDuration } validate() diff --git a/streaming/src/main/scala/spark/streaming/Time.scala b/streaming/src/main/scala/spark/streaming/Time.scala index 069df82e52..5daeb761dd 100644 --- a/streaming/src/main/scala/spark/streaming/Time.scala +++ b/streaming/src/main/scala/spark/streaming/Time.scala @@ -1,14 +1,15 @@ package spark.streaming /** - * This is a simple class that represents time. Internally, it represents time as UTC. - * The recommended way to create instances of Time is to use helper objects - * [[spark.streaming.Milliseconds]], [[spark.streaming.Seconds]], and [[spark.streaming.Minutes]]. - * @param millis Time in UTC. + * This is a simple class that represents an absolute instant of time. + * Internally, it represents time as the difference, measured in milliseconds, between the current + * time and midnight, January 1, 1970 UTC. This is the same format as what is returned by + * System.currentTimeMillis. */ +case class Time(private val millis: Long) { + + def milliseconds: Long = millis -class Time(private val millis: Long) { - def < (that: Time): Boolean = (this.millis < that.millis) def <= (that: Time): Boolean = (this.millis <= that.millis) @@ -38,11 +39,4 @@ class Time(private val millis: Long) { override def toString: String = (millis.toString + " ms") - def milliseconds: Long = millis -} - -/*private[streaming] object Time { - implicit def toTime(long: Long) = Time(long) -} -*/ - +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala index ca178fd384..ddb1bf6b28 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/CoGroupedDStream.scala @@ -18,13 +18,13 @@ class CoGroupedDStream[K : ClassManifest]( throw new IllegalArgumentException("Array of parents have different StreamingContexts") } - if (parents.map(_.slideTime).distinct.size > 1) { + if (parents.map(_.slideDuration).distinct.size > 1) { throw new IllegalArgumentException("Array of parents have different slide times") } override def dependencies = parents.toList - override def slideTime: Duration = parents.head.slideTime + override def slideDuration: Duration = parents.head.slideDuration override def compute(validTime: Time): Option[RDD[(K, Seq[Seq[_]])]] = { val part = partitioner diff --git a/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala index 76b9e58029..e993164f99 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FilteredDStream.scala @@ -11,7 +11,7 @@ class FilteredDStream[T: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[T]] = { parent.getOrCompute(validTime).map(_.filter(filterFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala index 28e9a456ac..cabd34f5f2 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMapValuedDStream.scala @@ -12,7 +12,7 @@ class FlatMapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest] override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[(K, U)]] = { parent.getOrCompute(validTime).map(_.flatMapValues[U](flatMapValueFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala index ef305b66f1..a69af60589 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala @@ -11,7 +11,7 @@ class FlatMappedDStream[T: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala index f8af0a38a7..ee69ea5177 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ForEachDStream.scala @@ -11,7 +11,7 @@ class ForEachDStream[T: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[Unit]] = None diff --git a/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala index 19cccea735..b589cbd4d5 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/GlommedDStream.scala @@ -9,7 +9,7 @@ class GlommedDStream[T: ClassManifest](parent: DStream[T]) override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[Array[T]]] = { parent.getOrCompute(validTime).map(_.glom()) diff --git a/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala index 50f0f45796..980ca5177e 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/InputDStream.scala @@ -7,7 +7,7 @@ abstract class InputDStream[T: ClassManifest] (@transient ssc_ : StreamingContex override def dependencies = List() - override def slideTime: Duration = { + override def slideDuration: Duration = { if (ssc == null) throw new Exception("ssc is null") if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null") ssc.graph.batchDuration diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala index e9ca668aa6..848afecfad 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MapPartitionedDStream.scala @@ -12,7 +12,7 @@ class MapPartitionedDStream[T: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.mapPartitions[U](mapPartFunc, preservePartitioning)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala index ebc7d0698b..6055aa6a05 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MapValuedDStream.scala @@ -12,7 +12,7 @@ class MapValuedDStream[K: ClassManifest, V: ClassManifest, U: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[(K, U)]] = { parent.getOrCompute(validTime).map(_.mapValues[U](mapValueFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala index 3af8e7ab88..20818a0cab 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala @@ -11,7 +11,7 @@ class MappedDStream[T: ClassManifest, U: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(_.map[U](mapFunc)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala index a685a778ce..733d5c4a25 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ReducedWindowedDStream.scala @@ -16,19 +16,19 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( parent: DStream[(K, V)], reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, - _windowTime: Duration, - _slideTime: Duration, + _windowDuration: Duration, + _slideDuration: Duration, partitioner: Partitioner ) extends DStream[(K,V)](parent.ssc) { - assert(_windowTime.isMultipleOf(parent.slideTime), - "The window duration of ReducedWindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")" + assert(_windowDuration.isMultipleOf(parent.slideDuration), + "The window duration of ReducedWindowedDStream (" + _slideDuration + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideDuration + ")" ) - assert(_slideTime.isMultipleOf(parent.slideTime), - "The slide duration of ReducedWindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")" + assert(_slideDuration.isMultipleOf(parent.slideDuration), + "The slide duration of ReducedWindowedDStream (" + _slideDuration + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideDuration + ")" ) // Reduce each batch of data using reduceByKey which will be further reduced by window @@ -39,15 +39,15 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( super.persist(StorageLevel.MEMORY_ONLY_SER) reducedStream.persist(StorageLevel.MEMORY_ONLY_SER) - def windowTime: Duration = _windowTime + def windowDuration: Duration = _windowDuration override def dependencies = List(reducedStream) - override def slideTime: Duration = _slideTime + override def slideDuration: Duration = _slideDuration override val mustCheckpoint = true - override def parentRememberDuration: Duration = rememberDuration + windowTime + override def parentRememberDuration: Duration = rememberDuration + windowDuration override def persist(storageLevel: StorageLevel): DStream[(K,V)] = { super.persist(storageLevel) @@ -66,11 +66,11 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( val invReduceF = invReduceFunc val currentTime = validTime - val currentWindow = new Interval(currentTime - windowTime + parent.slideTime, currentTime) - val previousWindow = currentWindow - slideTime + val currentWindow = new Interval(currentTime - windowDuration + parent.slideDuration, currentTime) + val previousWindow = currentWindow - slideDuration - logDebug("Window time = " + windowTime) - logDebug("Slide time = " + slideTime) + logDebug("Window time = " + windowDuration) + logDebug("Slide time = " + slideDuration) logDebug("ZeroTime = " + zeroTime) logDebug("Current window = " + currentWindow) logDebug("Previous window = " + previousWindow) @@ -87,11 +87,11 @@ class ReducedWindowedDStream[K: ClassManifest, V: ClassManifest]( // // Get the RDDs of the reduced values in "old time steps" - val oldRDDs = reducedStream.slice(previousWindow.beginTime, currentWindow.beginTime - parent.slideTime) + val oldRDDs = reducedStream.slice(previousWindow.beginTime, currentWindow.beginTime - parent.slideDuration) logDebug("# old RDDs = " + oldRDDs.size) // Get the RDDs of the reduced values in "new time steps" - val newRDDs = reducedStream.slice(previousWindow.endTime + parent.slideTime, currentWindow.endTime) + val newRDDs = reducedStream.slice(previousWindow.endTime + parent.slideDuration, currentWindow.endTime) logDebug("# new RDDs = " + newRDDs.size) // Get the RDD of the reduced value of the previous window diff --git a/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala index 7612804b96..1f9548bfb8 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/ShuffledDStream.scala @@ -15,7 +15,7 @@ class ShuffledDStream[K: ClassManifest, V: ClassManifest, C: ClassManifest]( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[(K,C)]] = { parent.getOrCompute(validTime) match { diff --git a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index ce4f486825..a1ec2f5454 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -18,14 +18,14 @@ class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManife override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override val mustCheckpoint = true override def compute(validTime: Time): Option[RDD[(K, S)]] = { // Try to get the previous state RDD - getOrCompute(validTime - slideTime) match { + getOrCompute(validTime - slideDuration) match { case Some(prevStateRDD) => { // If previous state RDD exists diff --git a/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala index 5a2c5bc0f0..99660d9dee 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/TransformedDStream.scala @@ -11,7 +11,7 @@ class TransformedDStream[T: ClassManifest, U: ClassManifest] ( override def dependencies = List(parent) - override def slideTime: Duration = parent.slideTime + override def slideDuration: Duration = parent.slideDuration override def compute(validTime: Time): Option[RDD[U]] = { parent.getOrCompute(validTime).map(transformFunc(_, validTime)) diff --git a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala index 224a19842b..00bad5da34 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/UnionDStream.scala @@ -17,13 +17,13 @@ class UnionDStream[T: ClassManifest](parents: Array[DStream[T]]) throw new IllegalArgumentException("Array of parents have different StreamingContexts") } - if (parents.map(_.slideTime).distinct.size > 1) { + if (parents.map(_.slideDuration).distinct.size > 1) { throw new IllegalArgumentException("Array of parents have different slide times") } override def dependencies = parents.toList - override def slideTime: Duration = parents.head.slideTime + override def slideDuration: Duration = parents.head.slideDuration override def compute(validTime: Time): Option[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() diff --git a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala index 45689b25ce..cbf0c88108 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/WindowedDStream.scala @@ -8,30 +8,30 @@ import spark.streaming.{Duration, Interval, Time, DStream} private[streaming] class WindowedDStream[T: ClassManifest]( parent: DStream[T], - _windowTime: Duration, - _slideTime: Duration) + _windowDuration: Duration, + _slideDuration: Duration) extends DStream[T](parent.ssc) { - if (!_windowTime.isMultipleOf(parent.slideTime)) - throw new Exception("The window duration of WindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") + if (!_windowDuration.isMultipleOf(parent.slideDuration)) + throw new Exception("The window duration of WindowedDStream (" + _slideDuration + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideDuration + ")") - if (!_slideTime.isMultipleOf(parent.slideTime)) - throw new Exception("The slide duration of WindowedDStream (" + _slideTime + ") " + - "must be multiple of the slide duration of parent DStream (" + parent.slideTime + ")") + if (!_slideDuration.isMultipleOf(parent.slideDuration)) + throw new Exception("The slide duration of WindowedDStream (" + _slideDuration + ") " + + "must be multiple of the slide duration of parent DStream (" + parent.slideDuration + ")") parent.persist(StorageLevel.MEMORY_ONLY_SER) - def windowTime: Duration = _windowTime + def windowDuration: Duration = _windowDuration override def dependencies = List(parent) - override def slideTime: Duration = _slideTime + override def slideDuration: Duration = _slideDuration - override def parentRememberDuration: Duration = rememberDuration + windowTime + override def parentRememberDuration: Duration = rememberDuration + windowDuration override def compute(validTime: Time): Option[RDD[T]] = { - val currentWindow = new Interval(validTime - windowTime + parent.slideTime, validTime) + val currentWindow = new Interval(validTime - windowDuration + parent.slideDuration, validTime) Some(new UnionRDD(ssc.sc, parent.slice(currentWindow))) } } diff --git a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala index dc38ef4912..f9e03c607d 100644 --- a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala @@ -196,18 +196,18 @@ class BasicOperationsSuite extends TestSuiteBase { // MappedStream should remember till 7 seconds: 10, 9, 8, 7, 6, 5, 4, 3, 2 // WindowedStream2 - assert(windowedStream2.generatedRDDs.contains(Seconds(10))) - assert(windowedStream2.generatedRDDs.contains(Seconds(8))) - assert(!windowedStream2.generatedRDDs.contains(Seconds(6))) + assert(windowedStream2.generatedRDDs.contains(Time(10000))) + assert(windowedStream2.generatedRDDs.contains(Time(8000))) + assert(!windowedStream2.generatedRDDs.contains(Time(6000))) // WindowedStream1 - assert(windowedStream1.generatedRDDs.contains(Seconds(10))) - assert(windowedStream1.generatedRDDs.contains(Seconds(4))) - assert(!windowedStream1.generatedRDDs.contains(Seconds(3))) + assert(windowedStream1.generatedRDDs.contains(Time(10000))) + assert(windowedStream1.generatedRDDs.contains(Time(4000))) + assert(!windowedStream1.generatedRDDs.contains(Time(3000))) // MappedStream - assert(mappedStream.generatedRDDs.contains(Seconds(10))) - assert(mappedStream.generatedRDDs.contains(Seconds(2))) - assert(!mappedStream.generatedRDDs.contains(Seconds(1))) + assert(mappedStream.generatedRDDs.contains(Time(10000))) + assert(mappedStream.generatedRDDs.contains(Time(2000))) + assert(!mappedStream.generatedRDDs.contains(Time(1000))) } } diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index 28bdd53c3c..a76f61d4ad 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -26,7 +26,7 @@ class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[ def compute(validTime: Time): Option[RDD[T]] = { logInfo("Computing RDD for time " + validTime) - val index = ((validTime - zeroTime) / slideTime - 1).toInt + val index = ((validTime - zeroTime) / slideDuration - 1).toInt val selectedInput = if (index < input.size) input(index) else Seq[T]() val rdd = ssc.sc.makeRDD(selectedInput, numPartitions) logInfo("Created RDD " + rdd.id + " with " + selectedInput) diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index 4bc5229465..fa117cfcf0 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -207,11 +207,11 @@ class WindowOperationsSuite extends TestSuiteBase { test("groupByKeyAndWindow") { val input = bigInput val expectedOutput = bigGroupByOutput.map(_.map(x => (x._1, x._2.toSet))) - val windowTime = Seconds(2) - val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt + val windowDuration = Seconds(2) + val slideDuration = Seconds(1) + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { - s.groupByKeyAndWindow(windowTime, slideTime) + s.groupByKeyAndWindow(windowDuration, slideDuration) .map(x => (x._1, x._2.toSet)) .persist() } @@ -221,21 +221,21 @@ class WindowOperationsSuite extends TestSuiteBase { test("countByWindow") { val input = Seq(Seq(1), Seq(1), Seq(1, 2), Seq(0), Seq(), Seq() ) val expectedOutput = Seq( Seq(1), Seq(2), Seq(3), Seq(3), Seq(1), Seq(0)) - val windowTime = Seconds(2) - val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt - val operation = (s: DStream[Int]) => s.countByWindow(windowTime, slideTime) + val windowDuration = Seconds(2) + val slideDuration = Seconds(1) + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt + val operation = (s: DStream[Int]) => s.countByWindow(windowDuration, slideDuration) testOperation(input, operation, expectedOutput, numBatches, true) } test("countByKeyAndWindow") { val input = Seq(Seq(("a", 1)), Seq(("b", 1), ("b", 2)), Seq(("a", 10), ("b", 20))) val expectedOutput = Seq( Seq(("a", 1)), Seq(("a", 1), ("b", 2)), Seq(("a", 1), ("b", 3))) - val windowTime = Seconds(2) - val slideTime = Seconds(1) - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt + val windowDuration = Seconds(2) + val slideDuration = Seconds(1) + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { - s.countByKeyAndWindow(windowTime, slideTime).map(x => (x._1, x._2.toInt)) + s.countByKeyAndWindow(windowDuration, slideDuration).map(x => (x._1, x._2.toInt)) } testOperation(input, operation, expectedOutput, numBatches, true) } @@ -247,12 +247,12 @@ class WindowOperationsSuite extends TestSuiteBase { name: String, input: Seq[Seq[Int]], expectedOutput: Seq[Seq[Int]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) + windowDuration: Duration = Seconds(2), + slideDuration: Duration = Seconds(1) ) { test("window - " + name) { - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt - val operation = (s: DStream[Int]) => s.window(windowTime, slideTime) + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt + val operation = (s: DStream[Int]) => s.window(windowDuration, slideDuration) testOperation(input, operation, expectedOutput, numBatches, true) } } @@ -261,13 +261,13 @@ class WindowOperationsSuite extends TestSuiteBase { name: String, input: Seq[Seq[(String, Int)]], expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) + windowDuration: Duration = Seconds(2), + slideDuration: Duration = Seconds(1) ) { test("reduceByKeyAndWindow - " + name) { - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { - s.reduceByKeyAndWindow(_ + _, windowTime, slideTime).persist() + s.reduceByKeyAndWindow(_ + _, windowDuration, slideDuration).persist() } testOperation(input, operation, expectedOutput, numBatches, true) } @@ -277,13 +277,13 @@ class WindowOperationsSuite extends TestSuiteBase { name: String, input: Seq[Seq[(String, Int)]], expectedOutput: Seq[Seq[(String, Int)]], - windowTime: Time = Seconds(2), - slideTime: Time = Seconds(1) + windowDuration: Duration = Seconds(2), + slideDuration: Duration = Seconds(1) ) { test("reduceByKeyAndWindowInv - " + name) { - val numBatches = expectedOutput.size * (slideTime / batchDuration).toInt + val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt val operation = (s: DStream[(String, Int)]) => { - s.reduceByKeyAndWindow(_ + _, _ - _, windowTime, slideTime) + s.reduceByKeyAndWindow(_ + _, _ - _, windowDuration, slideDuration) .persist() .checkpoint(Seconds(100)) // Large value to avoid effect of RDD checkpointing } From 22445fbea9ed1575e49a1f9bb2251d98a57b9e4e Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Fri, 11 Jan 2013 13:30:49 -0800 Subject: [PATCH 173/226] attempt to sleep for more accurate time period, minor cleanup --- .../spark/util/RateLimitedOutputStream.scala | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index d11ed163ce..3050213709 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -1,8 +1,10 @@ package spark.util import java.io.OutputStream +import java.util.concurrent.TimeUnit._ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends OutputStream { + val SyncIntervalNs = NANOSECONDS.convert(10, SECONDS) var lastSyncTime = System.nanoTime() var bytesWrittenSinceSync: Long = 0 @@ -28,20 +30,21 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu def waitToWrite(numBytes: Int) { while (true) { - val now = System.nanoTime() - val elapsed = math.max(now - lastSyncTime, 1) - val rate = bytesWrittenSinceSync.toDouble / (elapsed / 1.0e9) + val now = System.nanoTime + val elapsedSecs = SECONDS.convert(max(now - lastSyncTime, 1), NANOSECONDS) + val rate = bytesWrittenSinceSync.toDouble / elapsedSecs if (rate < bytesPerSec) { // It's okay to write; just update some variables and return bytesWrittenSinceSync += numBytes - if (now > lastSyncTime + (1e10).toLong) { - // Ten seconds have passed since lastSyncTime; let's resync + if (now > lastSyncTime + SyncIntervalNs) { + // Sync interval has passed; let's resync lastSyncTime = now bytesWrittenSinceSync = numBytes } - return } else { - Thread.sleep(5) + // Calculate how much time we should sleep to bring ourselves to the desired rate. + val sleepTime = MILLISECONDS.convert((bytesWrittenSinceSync / bytesPerSec - elapsedSecs), SECONDS) + if (sleepTime > 0) Thread.sleep(sleepTime) } } } @@ -53,4 +56,4 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu override def close() { out.close() } -} \ No newline at end of file +} From ff10b3aa0970cc7224adc6bc73d99a7ffa30219f Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Fri, 11 Jan 2013 21:03:57 -0800 Subject: [PATCH 174/226] add missing return --- core/src/main/scala/spark/util/RateLimitedOutputStream.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index 3050213709..ed459c2544 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -41,6 +41,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu lastSyncTime = now bytesWrittenSinceSync = numBytes } + return } else { // Calculate how much time we should sleep to bring ourselves to the desired rate. val sleepTime = MILLISECONDS.convert((bytesWrittenSinceSync / bytesPerSec - elapsedSecs), SECONDS) From 0cfea7a2ec467717fbe110f9b15163bea2719575 Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Fri, 11 Jan 2013 23:48:07 -0800 Subject: [PATCH 175/226] add unit test --- .../spark/util/RateLimitedOutputStream.scala | 2 +- .../util/RateLimitedOutputStreamSuite.scala | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index ed459c2544..16db7549b2 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -31,7 +31,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu def waitToWrite(numBytes: Int) { while (true) { val now = System.nanoTime - val elapsedSecs = SECONDS.convert(max(now - lastSyncTime, 1), NANOSECONDS) + val elapsedSecs = SECONDS.convert(math.max(now - lastSyncTime, 1), NANOSECONDS) val rate = bytesWrittenSinceSync.toDouble / elapsedSecs if (rate < bytesPerSec) { // It's okay to write; just update some variables and return diff --git a/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala new file mode 100644 index 0000000000..1dc45e0433 --- /dev/null +++ b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala @@ -0,0 +1,22 @@ +package spark.util + +import org.scalatest.FunSuite +import java.io.ByteArrayOutputStream +import java.util.concurrent.TimeUnit._ + +class RateLimitedOutputStreamSuite extends FunSuite { + + private def benchmark[U](f: => U): Long = { + val start = System.nanoTime + f + System.nanoTime - start + } + + test("write") { + val underlying = new ByteArrayOutputStream + val data = "X" * 1000 + val stream = new RateLimitedOutputStream(underlying, 100) + val elapsedNs = benchmark { stream.write(data.getBytes("UTF-8")) } + assert(SECONDS.convert(elapsedNs, NANOSECONDS) == 4) + } +} From 2c77eeebb66a3d1337d45b5001be2b48724f9fd5 Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Sat, 12 Jan 2013 00:13:45 -0800 Subject: [PATCH 176/226] correct test params --- .../test/scala/spark/util/RateLimitedOutputStreamSuite.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala index 1dc45e0433..b392075482 100644 --- a/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala +++ b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala @@ -14,8 +14,8 @@ class RateLimitedOutputStreamSuite extends FunSuite { test("write") { val underlying = new ByteArrayOutputStream - val data = "X" * 1000 - val stream = new RateLimitedOutputStream(underlying, 100) + val data = "X" * 41000 + val stream = new RateLimitedOutputStream(underlying, 10000) val elapsedNs = benchmark { stream.write(data.getBytes("UTF-8")) } assert(SECONDS.convert(elapsedNs, NANOSECONDS) == 4) } From ea20ae661888d871f70d5ed322cfe924c5a31dba Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Sat, 12 Jan 2013 09:18:00 -0800 Subject: [PATCH 177/226] add one extra test --- .../src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala index b392075482..794063fb6d 100644 --- a/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala +++ b/core/src/test/scala/spark/util/RateLimitedOutputStreamSuite.scala @@ -18,5 +18,6 @@ class RateLimitedOutputStreamSuite extends FunSuite { val stream = new RateLimitedOutputStream(underlying, 10000) val elapsedNs = benchmark { stream.write(data.getBytes("UTF-8")) } assert(SECONDS.convert(elapsedNs, NANOSECONDS) == 4) + assert(underlying.toString("UTF-8") == data) } } From addff2c466d4b76043e612d4d28ab9de7f003298 Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Sat, 12 Jan 2013 09:57:29 -0800 Subject: [PATCH 178/226] add comment --- core/src/main/scala/spark/util/RateLimitedOutputStream.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index 16db7549b2..ed3d2b66bb 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -44,6 +44,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu return } else { // Calculate how much time we should sleep to bring ourselves to the desired rate. + // Based on throttler in Kafka (https://github.com/kafka-dev/kafka/blob/master/core/src/main/scala/kafka/utils/Throttler.scala) val sleepTime = MILLISECONDS.convert((bytesWrittenSinceSync / bytesPerSec - elapsedSecs), SECONDS) if (sleepTime > 0) Thread.sleep(sleepTime) } From 2305a2c1d91273a93ee6b571b0cd4bcaa1b2969d Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Sun, 13 Jan 2013 10:01:56 -0800 Subject: [PATCH 179/226] more code cleanup --- .../spark/util/RateLimitedOutputStream.scala | 63 ++++++++++--------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index ed3d2b66bb..10790a9eee 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -1,11 +1,14 @@ package spark.util +import scala.annotation.tailrec + import java.io.OutputStream import java.util.concurrent.TimeUnit._ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends OutputStream { val SyncIntervalNs = NANOSECONDS.convert(10, SECONDS) - var lastSyncTime = System.nanoTime() + val ChunkSize = 8192 + var lastSyncTime = System.nanoTime var bytesWrittenSinceSync: Long = 0 override def write(b: Int) { @@ -17,37 +20,13 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu write(bytes, 0, bytes.length) } - override def write(bytes: Array[Byte], offset: Int, length: Int) { - val CHUNK_SIZE = 8192 - var pos = 0 - while (pos < length) { - val writeSize = math.min(length - pos, CHUNK_SIZE) + @tailrec + override final def write(bytes: Array[Byte], offset: Int, length: Int) { + val writeSize = math.min(length - offset, ChunkSize) + if (writeSize > 0) { waitToWrite(writeSize) - out.write(bytes, offset + pos, writeSize) - pos += writeSize - } - } - - def waitToWrite(numBytes: Int) { - while (true) { - val now = System.nanoTime - val elapsedSecs = SECONDS.convert(math.max(now - lastSyncTime, 1), NANOSECONDS) - val rate = bytesWrittenSinceSync.toDouble / elapsedSecs - if (rate < bytesPerSec) { - // It's okay to write; just update some variables and return - bytesWrittenSinceSync += numBytes - if (now > lastSyncTime + SyncIntervalNs) { - // Sync interval has passed; let's resync - lastSyncTime = now - bytesWrittenSinceSync = numBytes - } - return - } else { - // Calculate how much time we should sleep to bring ourselves to the desired rate. - // Based on throttler in Kafka (https://github.com/kafka-dev/kafka/blob/master/core/src/main/scala/kafka/utils/Throttler.scala) - val sleepTime = MILLISECONDS.convert((bytesWrittenSinceSync / bytesPerSec - elapsedSecs), SECONDS) - if (sleepTime > 0) Thread.sleep(sleepTime) - } + out.write(bytes, offset, writeSize) + write(bytes, offset + writeSize, length) } } @@ -58,4 +37,26 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu override def close() { out.close() } + + @tailrec + private def waitToWrite(numBytes: Int) { + val now = System.nanoTime + val elapsedSecs = SECONDS.convert(math.max(now - lastSyncTime, 1), NANOSECONDS) + val rate = bytesWrittenSinceSync.toDouble / elapsedSecs + if (rate < bytesPerSec) { + // It's okay to write; just update some variables and return + bytesWrittenSinceSync += numBytes + if (now > lastSyncTime + SyncIntervalNs) { + // Sync interval has passed; let's resync + lastSyncTime = now + bytesWrittenSinceSync = numBytes + } + } else { + // Calculate how much time we should sleep to bring ourselves to the desired rate. + // Based on throttler in Kafka (https://github.com/kafka-dev/kafka/blob/master/core/src/main/scala/kafka/utils/Throttler.scala) + val sleepTime = MILLISECONDS.convert((bytesWrittenSinceSync / bytesPerSec - elapsedSecs), SECONDS) + if (sleepTime > 0) Thread.sleep(sleepTime) + waitToWrite(numBytes) + } + } } From c31931af7eb01fbe2bb276bb6f428248128832b0 Mon Sep 17 00:00:00 2001 From: Ryan LeCompte Date: Sun, 13 Jan 2013 10:39:47 -0800 Subject: [PATCH 180/226] switch to uppercase constants --- .../main/scala/spark/util/RateLimitedOutputStream.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala index 10790a9eee..e3f00ea8c7 100644 --- a/core/src/main/scala/spark/util/RateLimitedOutputStream.scala +++ b/core/src/main/scala/spark/util/RateLimitedOutputStream.scala @@ -6,8 +6,8 @@ import java.io.OutputStream import java.util.concurrent.TimeUnit._ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends OutputStream { - val SyncIntervalNs = NANOSECONDS.convert(10, SECONDS) - val ChunkSize = 8192 + val SYNC_INTERVAL = NANOSECONDS.convert(10, SECONDS) + val CHUNK_SIZE = 8192 var lastSyncTime = System.nanoTime var bytesWrittenSinceSync: Long = 0 @@ -22,7 +22,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu @tailrec override final def write(bytes: Array[Byte], offset: Int, length: Int) { - val writeSize = math.min(length - offset, ChunkSize) + val writeSize = math.min(length - offset, CHUNK_SIZE) if (writeSize > 0) { waitToWrite(writeSize) out.write(bytes, offset, writeSize) @@ -46,7 +46,7 @@ class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends Outpu if (rate < bytesPerSec) { // It's okay to write; just update some variables and return bytesWrittenSinceSync += numBytes - if (now > lastSyncTime + SyncIntervalNs) { + if (now > lastSyncTime + SYNC_INTERVAL) { // Sync interval has passed; let's resync lastSyncTime = now bytesWrittenSinceSync = numBytes From 0a2e33334125cb3ae5e54f8333ea5608779399fc Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 13 Jan 2013 16:18:39 -0800 Subject: [PATCH 181/226] Removed stream id from the constructor of NetworkReceiver to make it easier for PluggableNetworkInputDStream. --- .../twitter/TwitterInputDStream.scala | 15 +++---- .../spark/streaming/NetworkInputTracker.scala | 34 +++++++++++---- .../streaming/dstream/FlumeInputDStream.scala | 5 +-- .../streaming/dstream/KafkaInputDStream.scala | 6 +-- .../dstream/NetworkInputDStream.scala | 42 ++++++++++++++----- .../streaming/dstream/RawInputDStream.scala | 6 +-- .../dstream/SocketInputDStream.scala | 5 +-- 7 files changed, 76 insertions(+), 37 deletions(-) diff --git a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala index c7e4855f3b..99ed4cdc1c 100644 --- a/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala +++ b/examples/src/main/scala/spark/streaming/examples/twitter/TwitterInputDStream.scala @@ -23,16 +23,17 @@ class TwitterInputDStream( ) extends NetworkInputDStream[Status](ssc_) { override def createReceiver(): NetworkReceiver[Status] = { - new TwitterReceiver(id, username, password, filters, storageLevel) + new TwitterReceiver(username, password, filters, storageLevel) } } -class TwitterReceiver(streamId: Int, - username: String, - password: String, - filters: Seq[String], - storageLevel: StorageLevel - ) extends NetworkReceiver[Status](streamId) { +class TwitterReceiver( + username: String, + password: String, + filters: Seq[String], + storageLevel: StorageLevel + ) extends NetworkReceiver[Status] { + var twitterStream: TwitterStream = _ lazy val blockGenerator = new BlockGenerator(storageLevel) diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala index a6ab44271f..e4152f3a61 100644 --- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala +++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala @@ -18,7 +18,10 @@ private[streaming] case class RegisterReceiver(streamId: Int, receiverActor: Act private[streaming] case class AddBlocks(streamId: Int, blockIds: Seq[String], metadata: Any) extends NetworkInputTrackerMessage private[streaming] case class DeregisterReceiver(streamId: Int, msg: String) extends NetworkInputTrackerMessage - +/** + * This class manages the execution of the receivers of NetworkInputDStreams. + */ +private[streaming] class NetworkInputTracker( @transient ssc: StreamingContext, @transient networkInputStreams: Array[NetworkInputDStream[_]]) @@ -32,16 +35,20 @@ class NetworkInputTracker( var currentTime: Time = null + /** Start the actor and receiver execution thread. */ def start() { ssc.env.actorSystem.actorOf(Props(new NetworkInputTrackerActor), "NetworkInputTracker") receiverExecutor.start() } + /** Stop the receiver execution thread. */ def stop() { + // TODO: stop the actor as well receiverExecutor.interrupt() receiverExecutor.stopReceivers() } + /** Return all the blocks received from a receiver. */ def getBlockIds(receiverId: Int, time: Time): Array[String] = synchronized { val queue = receivedBlockIds.synchronized { receivedBlockIds.getOrElse(receiverId, new Queue[String]()) @@ -53,6 +60,7 @@ class NetworkInputTracker( result.toArray } + /** Actor to receive messages from the receivers. */ private class NetworkInputTrackerActor extends Actor { def receive = { case RegisterReceiver(streamId, receiverActor) => { @@ -83,7 +91,8 @@ class NetworkInputTracker( } } } - + + /** This thread class runs all the receivers on the cluster. */ class ReceiverExecutor extends Thread { val env = ssc.env @@ -97,13 +106,22 @@ class NetworkInputTracker( stopReceivers() } } - + + /** + * Get the receivers from the NetworkInputDStreams, distributes them to the + * worker nodes as a parallel collection, and runs them. + */ def startReceivers() { - val receivers = networkInputStreams.map(_.createReceiver()) + val receivers = networkInputStreams.map(nis => { + val rcvr = nis.createReceiver() + rcvr.setStreamId(nis.id) + rcvr + }) // Right now, we only honor preferences if all receivers have them val hasLocationPreferences = receivers.map(_.getLocationPreference().isDefined).reduce(_ && _) + // Create the parallel collection of receivers to distributed them on the worker nodes val tempRDD = if (hasLocationPreferences) { val receiversWithPreferences = receivers.map(r => (r, Seq(r.getLocationPreference().toString))) @@ -113,21 +131,21 @@ class NetworkInputTracker( ssc.sc.makeRDD(receivers, receivers.size) } + // Function to start the receiver on the worker node val startReceiver = (iterator: Iterator[NetworkReceiver[_]]) => { if (!iterator.hasNext) { throw new Exception("Could not start receiver as details not found.") } iterator.next().start() } + // Distribute the receivers and start them ssc.sc.runJob(tempRDD, startReceiver) } + /** Stops the receivers. */ def stopReceivers() { - //implicit val ec = env.actorSystem.dispatcher + // Signal the receivers to stop receiverInfo.values.foreach(_ ! StopReceiver) - //val listOfFutures = receiverInfo.values.map(_.ask(StopReceiver)(timeout)).toList - //val futureOfList = Future.sequence(listOfFutures) - //Await.result(futureOfList, timeout) } } } diff --git a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala index ca70e72e56..efc7058480 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/FlumeInputDStream.scala @@ -26,7 +26,7 @@ class FlumeInputDStream[T: ClassManifest]( ) extends NetworkInputDStream[SparkFlumeEvent](ssc_) { override def createReceiver(): NetworkReceiver[SparkFlumeEvent] = { - new FlumeReceiver(id, host, port, storageLevel) + new FlumeReceiver(host, port, storageLevel) } } @@ -112,11 +112,10 @@ class FlumeEventServer(receiver : FlumeReceiver) extends AvroSourceProtocol { * Flume Avro interface.*/ private[streaming] class FlumeReceiver( - streamId: Int, host: String, port: Int, storageLevel: StorageLevel - ) extends NetworkReceiver[SparkFlumeEvent](streamId) { + ) extends NetworkReceiver[SparkFlumeEvent] { lazy val blockGenerator = new BlockGenerator(storageLevel) diff --git a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala index 25988a2ce7..2b4740bdf7 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala @@ -96,15 +96,15 @@ class KafkaInputDStream[T: ClassManifest]( } */ def createReceiver(): NetworkReceiver[T] = { - new KafkaReceiver(id, host, port, groupId, topics, initialOffsets, storageLevel) + new KafkaReceiver(host, port, groupId, topics, initialOffsets, storageLevel) .asInstanceOf[NetworkReceiver[T]] } } private[streaming] -class KafkaReceiver(streamId: Int, host: String, port: Int, groupId: String, +class KafkaReceiver(host: String, port: Int, groupId: String, topics: Map[String, Int], initialOffsets: Map[KafkaPartitionKey, Long], - storageLevel: StorageLevel) extends NetworkReceiver[Any](streamId) { + storageLevel: StorageLevel) extends NetworkReceiver[Any] { // Timeout for establishing a connection to Zookeper in ms. val ZK_TIMEOUT = 10000 diff --git a/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala index 18e62a0e33..aa6be95f30 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/NetworkInputDStream.scala @@ -17,6 +17,15 @@ import akka.util.duration._ import spark.streaming.util.{RecurringTimer, SystemClock} import java.util.concurrent.ArrayBlockingQueue +/** + * Abstract class for defining any InputDStream that has to start a receiver on worker + * nodes to receive external data. Specific implementations of NetworkInputDStream must + * define the createReceiver() function that creates the receiver object of type + * [[spark.streaming.dstream.NetworkReceiver]] that will be sent to the workers to receive + * data. + * @param ssc_ Streaming context that will execute this input stream + * @tparam T Class type of the object of this stream + */ abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : StreamingContext) extends InputDStream[T](ssc_) { @@ -25,7 +34,7 @@ abstract class NetworkInputDStream[T: ClassManifest](@transient ssc_ : Streaming val id = ssc.getNewNetworkStreamId() /** - * This method creates the receiver object that will be sent to the workers + * Creates the receiver object that will be sent to the worker nodes * to receive data. This method needs to defined by any specific implementation * of a NetworkInputDStream. */ @@ -48,7 +57,11 @@ private[streaming] case class StopReceiver(msg: String) extends NetworkReceiverM private[streaming] case class ReportBlock(blockId: String, metadata: Any) extends NetworkReceiverMessage private[streaming] case class ReportError(msg: String) extends NetworkReceiverMessage -abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Serializable with Logging { +/** + * Abstract class of a receiver that can be run on worker nodes to receive external data. See + * [[spark.streaming.dstream.NetworkInputDStream]] for an explanation. + */ +abstract class NetworkReceiver[T: ClassManifest]() extends Serializable with Logging { initLogging() @@ -59,17 +72,22 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri lazy protected val receivingThread = Thread.currentThread() - /** This method will be called to start receiving data. */ + protected var streamId: Int = -1 + + /** + * This method will be called to start receiving data. All your receiver + * starting code should be implemented by defining this function. + */ protected def onStart() /** This method will be called to stop receiving data. */ protected def onStop() - /** This method conveys a placement preference (hostname) for this receiver. */ + /** Conveys a placement preference (hostname) for this receiver. */ def getLocationPreference() : Option[String] = None /** - * This method starts the receiver. First is accesses all the lazy members to + * Starts the receiver. First is accesses all the lazy members to * materialize them. Then it calls the user-defined onStart() method to start * other threads, etc required to receiver the data. */ @@ -92,7 +110,7 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri } /** - * This method stops the receiver. First it interrupts the main receiving thread, + * Stops the receiver. First it interrupts the main receiving thread, * that is, the thread that called receiver.start(). Then it calls the user-defined * onStop() method to stop other threads and/or do cleanup. */ @@ -103,7 +121,7 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri } /** - * This method stops the receiver and reports to exception to the tracker. + * Stops the receiver and reports to exception to the tracker. * This should be called whenever an exception has happened on any thread * of the receiver. */ @@ -115,7 +133,7 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri /** - * This method pushes a block (as iterator of values) into the block manager. + * Pushes a block (as iterator of values) into the block manager. */ def pushBlock(blockId: String, iterator: Iterator[T], metadata: Any, level: StorageLevel) { val buffer = new ArrayBuffer[T] ++ iterator @@ -125,7 +143,7 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri } /** - * This method pushes a block (as bytes) into the block manager. + * Pushes a block (as bytes) into the block manager. */ def pushBlock(blockId: String, bytes: ByteBuffer, metadata: Any, level: StorageLevel) { env.blockManager.putBytes(blockId, bytes, level) @@ -157,6 +175,10 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri } } + protected[streaming] def setStreamId(id: Int) { + streamId = id + } + /** * Batches objects created by a [[spark.streaming.NetworkReceiver]] and puts them into * appropriately named blocks at regular intervals. This class starts two threads, @@ -202,7 +224,7 @@ abstract class NetworkReceiver[T: ClassManifest](val streamId: Int) extends Seri val newBlockBuffer = currentBuffer currentBuffer = new ArrayBuffer[T] if (newBlockBuffer.size > 0) { - val blockId = "input-" + NetworkReceiver.this.streamId + "- " + (time - blockInterval) + val blockId = "input-" + NetworkReceiver.this.streamId + "-" + (time - blockInterval) val newBlock = createBlock(blockId, newBlockBuffer.toIterator) blocksForPushing.add(newBlock) } diff --git a/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala index aa2f31cea8..290fab1ce0 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/RawInputDStream.scala @@ -26,13 +26,13 @@ class RawInputDStream[T: ClassManifest]( ) extends NetworkInputDStream[T](ssc_ ) with Logging { def createReceiver(): NetworkReceiver[T] = { - new RawNetworkReceiver(id, host, port, storageLevel).asInstanceOf[NetworkReceiver[T]] + new RawNetworkReceiver(host, port, storageLevel).asInstanceOf[NetworkReceiver[T]] } } private[streaming] -class RawNetworkReceiver(streamId: Int, host: String, port: Int, storageLevel: StorageLevel) - extends NetworkReceiver[Any](streamId) { +class RawNetworkReceiver(host: String, port: Int, storageLevel: StorageLevel) + extends NetworkReceiver[Any] { var blockPushingThread: Thread = null diff --git a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala index 8e4b20ea4c..d42027092b 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/SocketInputDStream.scala @@ -16,18 +16,17 @@ class SocketInputDStream[T: ClassManifest]( ) extends NetworkInputDStream[T](ssc_) { def createReceiver(): NetworkReceiver[T] = { - new SocketReceiver(id, host, port, bytesToObjects, storageLevel) + new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassManifest]( - streamId: Int, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel - ) extends NetworkReceiver[T](streamId) { + ) extends NetworkReceiver[T] { lazy protected val blockGenerator = new BlockGenerator(storageLevel) From 0dbd411a562396e024c513936fde46b0d2f6d59d Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 13 Jan 2013 21:08:35 -0800 Subject: [PATCH 182/226] Added documentation for PairDStreamFunctions. --- .../main/scala/spark/PairRDDFunctions.scala | 6 +- docs/streaming-programming-guide.md | 45 +-- .../main/scala/spark/streaming/DStream.scala | 35 ++- .../streaming/PairDStreamFunctions.scala | 293 +++++++++++++++++- .../spark/streaming/util/RawTextHelper.scala | 2 +- 5 files changed, 331 insertions(+), 50 deletions(-) diff --git a/core/src/main/scala/spark/PairRDDFunctions.scala b/core/src/main/scala/spark/PairRDDFunctions.scala index 07ae2d647c..d95b66ad78 100644 --- a/core/src/main/scala/spark/PairRDDFunctions.scala +++ b/core/src/main/scala/spark/PairRDDFunctions.scala @@ -199,9 +199,9 @@ class PairRDDFunctions[K: ClassManifest, V: ClassManifest]( } /** - * Merge the values for each key using an associative reduce function. This will also perform - * the merging locally on each mapper before sending results to a reducer, similarly to a - * "combiner" in MapReduce. + * Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each + * pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and + * (k, v2) is in `other`. Uses the given Partitioner to partition the output RDD. */ def join[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (V, W))] = { this.cogroup(other, partitioner).flatMapValues { diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index 05a88ce7bd..b6da7af654 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -43,7 +43,7 @@ A complete list of input sources is available in the [StreamingContext API docum # DStream Operations -Once an input stream has been created, you can transform it using _stream operators_. Most of these operators return new DStreams which you can further transform. Eventually, you'll need to call an _output operator_, which forces evaluation of the stream by writing data out to an external source. +Once an input DStream has been created, you can transform it using _DStream operators_. Most of these operators return new DStreams which you can further transform. Eventually, you'll need to call an _output operator_, which forces evaluation of the DStream by writing data out to an external source. ## Transformations @@ -53,11 +53,11 @@ DStreams support many of the transformations available on normal Spark RDD's:
      TransformationMeaning
      map(func) Return a new stream formed by passing each element of the source through a function func. Returns a new DStream formed by passing each element of the source through a function func.
      filter(func) Return a new stream formed by selecting those elements of the source on which func returns true. Returns a new stream formed by selecting those elements of the source on which func returns true.
      flatMap(func)
      cogroup(otherStream, [numTasks]) When called on streams of type (K, V) and (K, W), returns a stream of (K, Seq[V], Seq[W]) tuples. This operation is also called groupWith. When called on DStream of type (K, V) and (K, W), returns a DStream of (K, Seq[V], Seq[W]) tuples.
      reduce(func) Create a new single-element stream by aggregating the elements of the stream using a function func (which takes two arguments and returns one). The function should be associative so that it can be computed correctly in parallel. Returns a new DStream of single-element RDDs by aggregating the elements of the stream using a function func (which takes two arguments and returns one). The function should be associative so that it can be computed correctly in parallel.
      transform(func) Returns a new DStream by applying func (a RDD-to-RDD function) to every RDD of the stream. This can be used to do arbitrary RDD operations on the DStream.
      -Spark Streaming features windowed computations, which allow you to report statistics over a sliding window of data. All window functions take a windowTime, which represents the width of the window and a slideTime, which represents the frequency during which the window is calculated. +Spark Streaming features windowed computations, which allow you to report statistics over a sliding window of data. All window functions take a windowDuration, which represents the width of the window and a slideTime, which represents the frequency during which the window is calculated. - - + - - + - - + -
      TransformationMeaning
      window(windowTime, slideTime) Return a new stream which is computed based on windowed batches of the source stream. windowTime is the width of the window and slideTime is the frequency during which the window is calculated. Both times must be multiples of the batch interval. + window(windowDuration, slideTime) Return a new stream which is computed based on windowed batches of the source stream. windowDuration is the width of the window and slideTime is the frequency during which the window is calculated. Both times must be multiples of the batch interval.
      countByWindow(windowTime, slideTime) Return a sliding count of elements in the stream. windowTime and slideTime are exactly as defined in window(). + countByWindow(windowDuration, slideTime) Return a sliding count of elements in the stream. windowDuration and slideDuration are exactly as defined in window().
      reduceByWindow(func, windowTime, slideTime) Return a new single-element stream, created by aggregating elements in the stream over a sliding interval using func. The function should be associative so that it can be computed correctly in parallel. windowTime and slideTime are exactly as defined in window(). + reduceByWindow(func, windowDuration, slideDuration) Return a new single-element stream, created by aggregating elements in the stream over a sliding interval using func. The function should be associative so that it can be computed correctly in parallel. windowDuration and slideDuration are exactly as defined in window().
      groupByKeyAndWindow(windowTime, slideTime, [numTasks]) + groupByKeyAndWindow(windowDuration, slideDuration, [numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, Seq[V]) pairs over a sliding window.
      -Note: By default, this uses only 8 parallel tasks to do the grouping. You can pass an optional numTasks argument to set a different number of tasks. windowTime and slideTime are exactly as defined in window(). +Note: By default, this uses only 8 parallel tasks to do the grouping. You can pass an optional numTasks argument to set a different number of tasks. windowDuration and slideDuration are exactly as defined in window().
      reduceByKeyAndWindow(func, [numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, V) pairs where the values for each key are aggregated using the given reduce function over batches within a sliding window. Like in groupByKeyAndWindow, the number of reduce tasks is configurable through an optional second argument. - windowTime and slideTime are exactly as defined in window(). + windowDuration and slideDuration are exactly as defined in window().
      countByKeyAndWindow([numTasks]) When called on a stream of (K, V) pairs, returns a stream of (K, Int) pairs where the values for each key are the count within a sliding window. Like in countByKeyAndWindow, the number of reduce tasks is configurable through an optional second argument. - windowTime and slideTime are exactly as defined in window(). + windowDuration and slideDuration are exactly as defined in window().
      +A complete list of DStream operations is available in the API documentation of [DStream](api/streaming/index.html#spark.streaming.DStream) and [PairDStreamFunctions](api/streaming/index.html#spark.streaming.PairDStreamFunctions). ## Output Operations When an output operator is called, it triggers the computation of a stream. Currently the following output operators are defined: @@ -144,7 +149,7 @@ When an output operator is called, it triggers the computation of a stream. Curr - + @@ -155,18 +160,18 @@ When an output operator is called, it triggers the computation of a stream. Curr - - + - +
      OperatorMeaning
      foreachRDD(func) foreach(func) The fundamental output operator. Applies a function, func, to each RDD generated from the stream. This function should have side effects, such as printing output, saving the RDD to external files, or writing it over the network to an external system.
      saveAsObjectFiles(prefix, [suffix]) Save this DStream's contents as a SequenceFile of serialized objects. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]". + Save this DStream's contents as a SequenceFile of serialized objects. The file name at each batch interval is generated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      saveAsTextFiles(prefix, [suffix]) Save this DStream's contents as a text files. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]". Save this DStream's contents as a text files. The file name at each batch interval is generated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      saveAsHadoopFiles(prefix, [suffix]) Save this DStream's contents as a Hadoop file. The file name at each batch interval is calculated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]". Save this DStream's contents as a Hadoop file. The file name at each batch interval is generated based on prefix and suffix: "prefix-TIME_IN_MS[.suffix]".
      diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index c89fb7723e..d94548a4f3 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -471,7 +471,7 @@ abstract class DStream[T: ClassManifest] ( * Returns a new DStream in which each RDD has a single element generated by counting each RDD * of this DStream. */ - def count(): DStream[Int] = this.map(_ => 1).reduce(_ + _) + def count(): DStream[Long] = this.map(_ => 1L).reduce(_ + _) /** * Applies a function to each RDD in this DStream. This is an output operator, so @@ -529,17 +529,16 @@ abstract class DStream[T: ClassManifest] ( * Return a new DStream which is computed based on windowed batches of this DStream. * The new DStream generates RDDs with the same interval as this DStream. * @param windowDuration width of the window; must be a multiple of this DStream's interval. - * @return */ def window(windowDuration: Duration): DStream[T] = window(windowDuration, this.slideDuration) /** * Return a new DStream which is computed based on windowed batches of this DStream. - * @param windowDuration duration (i.e., width) of the window; - * must be a multiple of this DStream's interval + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval * @param slideDuration sliding interval of the window (i.e., the interval after which - * the new DStream will generate RDDs); must be a multiple of this - * DStream's interval + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval */ def window(windowDuration: Duration, slideDuration: Duration): DStream[T] = { new WindowedDStream(this, windowDuration, slideDuration) @@ -548,16 +547,22 @@ abstract class DStream[T: ClassManifest] ( /** * Returns a new DStream which computed based on tumbling window on this DStream. * This is equivalent to window(batchTime, batchTime). - * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + * @param batchTime tumbling window duration; must be a multiple of this DStream's + * batching interval */ def tumble(batchTime: Duration): DStream[T] = window(batchTime, batchTime) /** * Returns a new DStream in which each RDD has a single element generated by reducing all - * elements in a window over this DStream. windowDuration and slideDuration are as defined in the - * window() operation. This is equivalent to window(windowDuration, slideDuration).reduce(reduceFunc) + * elements in a window over this DStream. windowDuration and slideDuration are as defined + * in the window() operation. This is equivalent to + * window(windowDuration, slideDuration).reduce(reduceFunc) */ - def reduceByWindow(reduceFunc: (T, T) => T, windowDuration: Duration, slideDuration: Duration): DStream[T] = { + def reduceByWindow( + reduceFunc: (T, T) => T, + windowDuration: Duration, + slideDuration: Duration + ): DStream[T] = { this.window(windowDuration, slideDuration).reduce(reduceFunc) } @@ -577,8 +582,8 @@ abstract class DStream[T: ClassManifest] ( * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ - def countByWindow(windowDuration: Duration, slideDuration: Duration): DStream[Int] = { - this.map(_ => 1).reduceByWindow(_ + _, _ - _, windowDuration, slideDuration) + def countByWindow(windowDuration: Duration, slideDuration: Duration): DStream[Long] = { + this.map(_ => 1L).reduceByWindow(_ + _, _ - _, windowDuration, slideDuration) } /** @@ -612,6 +617,8 @@ abstract class DStream[T: ClassManifest] ( /** * Saves each RDD in this DStream as a Sequence file of serialized objects. + * The file name at each batch interval is generated based on `prefix` and + * `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsObjectFiles(prefix: String, suffix: String = "") { val saveFunc = (rdd: RDD[T], time: Time) => { @@ -622,7 +629,9 @@ abstract class DStream[T: ClassManifest] ( } /** - * Saves each RDD in this DStream as at text file, using string representation of elements. + * Saves each RDD in this DStream as at text file, using string representation + * of elements. The file name at each batch interval is generated based on + * `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsTextFiles(prefix: String, suffix: String = "") { val saveFunc = (rdd: RDD[T], time: Time) => { diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 482d01300d..3952457339 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -25,34 +25,76 @@ extends Serializable { new HashPartitioner(numPartitions) } + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. Hash partitioning is + * used to generate the RDDs with Spark's default number of partitions. + */ def groupByKey(): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner()) } + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. Hash partitioning is + * used to generate the RDDs with `numPartitions` partitions. + */ def groupByKey(numPartitions: Int): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner(numPartitions)) } + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. [[spark.Partitioner]] + * is used to control the partitioning of each RDD. + */ def groupByKey(partitioner: Partitioner): DStream[(K, Seq[V])] = { val createCombiner = (v: V) => ArrayBuffer[V](v) val mergeValue = (c: ArrayBuffer[V], v: V) => (c += v) val mergeCombiner = (c1: ArrayBuffer[V], c2: ArrayBuffer[V]) => (c1 ++ c2) - combineByKey(createCombiner, mergeValue, mergeCombiner, partitioner).asInstanceOf[DStream[(K, Seq[V])]] + combineByKey(createCombiner, mergeValue, mergeCombiner, partitioner) + .asInstanceOf[DStream[(K, Seq[V])]] } + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + */ def reduceByKey(reduceFunc: (V, V) => V): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner()) } + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + */ def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner(numPartitions)) } + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * [[spark.Partitioner]] is used to control the partitioning of each RDD. + */ def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) combineByKey((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner) } + /** + * Generic function to combine elements of each key in DStream's RDDs using custom function. + * This is similar to the combineByKey for RDDs. Please refer to combineByKey in + * [[spark.PairRDDFunctions]] for more information. + */ def combineByKey[C: ClassManifest]( createCombiner: V => C, mergeValue: (C, V) => C, @@ -61,14 +103,52 @@ extends Serializable { new ShuffledDStream[K, V, C](self, createCombiner, mergeValue, mergeCombiner, partitioner) } + /** + * Creates a new DStream by counting the number of values of each key in each RDD + * of `this` DStream. Hash partitioning is used to generate the RDDs with Spark's + * `numPartitions` partitions. + */ def countByKey(numPartitions: Int = self.ssc.sc.defaultParallelism): DStream[(K, Long)] = { self.map(x => (x._1, 1L)).reduceByKey((x: Long, y: Long) => x + y, numPartitions) } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * The new DStream generates RDDs with the same interval as this DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + */ + def groupByKeyAndWindow(windowDuration: Duration): DStream[(K, Seq[V])] = { + groupByKeyAndWindow(windowDuration, self.slideDuration, defaultPartitioner()) + } + + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): DStream[(K, Seq[V])] = { groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner()) } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def groupByKeyAndWindow( windowDuration: Duration, slideDuration: Duration, @@ -77,6 +157,16 @@ extends Serializable { groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner(numPartitions)) } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def groupByKeyAndWindow( windowDuration: Duration, slideDuration: Duration, @@ -85,6 +175,15 @@ extends Serializable { self.window(windowDuration, slideDuration).groupByKey(partitioner) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * The new DStream generates RDDs with the same interval as this DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowDuration: Duration @@ -92,6 +191,17 @@ extends Serializable { reduceByKeyAndWindow(reduceFunc, windowDuration, self.slideDuration, defaultPartitioner()) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowDuration: Duration, @@ -100,6 +210,18 @@ extends Serializable { reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, defaultPartitioner()) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowDuration: Duration, @@ -109,6 +231,17 @@ extends Serializable { reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, defaultPartitioner(numPartitions)) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, windowDuration: Duration, @@ -121,12 +254,23 @@ extends Serializable { .reduceByKey(cleanedReduceFunc, partitioner) } - // This method is the efficient sliding window reduce operation, - // which requires the specification of an inverse reduce function, - // so that new elements introduced in the window can be "added" using - // reduceFunc to the previous window's result and old elements can be - // "subtracted using invReduceFunc. - + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, @@ -138,6 +282,24 @@ extends Serializable { reduceFunc, invReduceFunc, windowDuration, slideDuration, defaultPartitioner()) } + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, @@ -150,6 +312,23 @@ extends Serializable { reduceFunc, invReduceFunc, windowDuration, slideDuration, defaultPartitioner(numPartitions)) } + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: (V, V) => V, invReduceFunc: (V, V) => V, @@ -164,6 +343,16 @@ extends Serializable { self, cleanedReduceFunc, cleanedInvReduceFunc, windowDuration, slideDuration, partitioner) } + /** + * Creates a new DStream by counting the number of values for each key over a window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def countByKeyAndWindow( windowDuration: Duration, slideDuration: Duration, @@ -179,17 +368,30 @@ extends Serializable { ) } - // TODO: - // - // - // - // + /** + * Creates a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key from + * `this` DStream. Hash partitioning is used to generate the RDDs with Spark's default + * number of partitions. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @tparam S State type + */ def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S] ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner()) } + /** + * Creates a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key from + * `this` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @param numPartitions Number of partitions of each RDD in the new DStream. + * @tparam S State type + */ def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], numPartitions: Int @@ -197,6 +399,15 @@ extends Serializable { updateStateByKey(updateFunc, defaultPartitioner(numPartitions)) } + /** + * Creates a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key from + * `this` DStream. [[spark.Partitioner]] is used to control the partitioning of each RDD. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + * @tparam S State type + */ def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], partitioner: Partitioner @@ -207,6 +418,19 @@ extends Serializable { updateStateByKey(newUpdateFunc, partitioner, true) } + /** + * Creates a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key from + * `this` DStream. [[spark.Partitioner]] is used to control the partitioning of each RDD. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. Note, that + * this function may generate a different a tuple with a different key + * than the input key. It is up to the developer to decide whether to + * remember the partitioner despite the key being changed. + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + * @param rememberPartitioner Whether to remember the paritioner object in the generated RDDs. + * @tparam S State type + */ def updateStateByKey[S <: AnyRef : ClassManifest]( updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, @@ -226,10 +450,24 @@ extends Serializable { new FlatMapValuedDStream[K, V, U](self, flatMapValuesFunc) } + /** + * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for + * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD + * will contains a tuple with the list of values for that key in both RDDs. + * HashPartitioner is used to partition each generated RDD into default number of partitions. + */ def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { cogroup(other, defaultPartitioner()) } + /** + * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for + * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD + * will contains a tuple with the list of values for that key in both RDDs. + * Partitioner is used to partition each generated RDD. + */ def cogroup[W: ClassManifest]( other: DStream[(K, W)], partitioner: Partitioner @@ -249,11 +487,24 @@ extends Serializable { } } + /** + * Joins `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by joining RDDs from `this` and `other` DStreams. HashPartitioner is used + * to partition each generated RDD into default number of partitions. + */ def join[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (V, W))] = { join[W](other, defaultPartitioner()) } - def join[W: ClassManifest](other: DStream[(K, W)], partitioner: Partitioner): DStream[(K, (V, W))] = { + /** + * Joins `this` DStream with `other` DStream, that is, each RDD of the new DStream will + * be generated by joining RDDs from `this` and other DStream. Uses the given + * Partitioner to partition each generated RDD. + */ + def join[W: ClassManifest]( + other: DStream[(K, W)], + partitioner: Partitioner + ): DStream[(K, (V, W))] = { this.cogroup(other, partitioner) .flatMapValues{ case (vs, ws) => @@ -261,6 +512,10 @@ extends Serializable { } } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + */ def saveAsHadoopFiles[F <: OutputFormat[K, V]]( prefix: String, suffix: String @@ -268,6 +523,10 @@ extends Serializable { saveAsHadoopFiles(prefix, suffix, getKeyClass, getValueClass, fm.erasure.asInstanceOf[Class[F]]) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + */ def saveAsHadoopFiles( prefix: String, suffix: String, @@ -283,6 +542,10 @@ extends Serializable { self.foreach(saveFunc) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + */ def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]]( prefix: String, suffix: String @@ -290,6 +553,10 @@ extends Serializable { saveAsNewAPIHadoopFiles(prefix, suffix, getKeyClass, getValueClass, fm.erasure.asInstanceOf[Class[F]]) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + */ def saveAsNewAPIHadoopFiles( prefix: String, suffix: String, diff --git a/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala b/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala index f31ae39a16..03749d4a94 100644 --- a/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala +++ b/streaming/src/main/scala/spark/streaming/util/RawTextHelper.scala @@ -81,7 +81,7 @@ object RawTextHelper { * before real workload starts. */ def warmUp(sc: SparkContext) { - for(i <- 0 to 4) { + for(i <- 0 to 1) { sc.parallelize(1 to 200000, 1000) .map(_ % 1331).map(_.toString) .mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10) From 6cc8592f26553525e11213830b596fc397243439 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 13 Jan 2013 21:20:49 -0800 Subject: [PATCH 183/226] Fixed bug --- .../test/scala/spark/streaming/WindowOperationsSuite.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index fa117cfcf0..f9ba1f20f0 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -224,7 +224,9 @@ class WindowOperationsSuite extends TestSuiteBase { val windowDuration = Seconds(2) val slideDuration = Seconds(1) val numBatches = expectedOutput.size * (slideDuration / batchDuration).toInt - val operation = (s: DStream[Int]) => s.countByWindow(windowDuration, slideDuration) + val operation = (s: DStream[Int]) => { + s.countByWindow(windowDuration, slideDuration).map(_.toInt) + } testOperation(input, operation, expectedOutput, numBatches, true) } From f90f794cde479f4de425e9be0158a136a57666a2 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 13 Jan 2013 21:25:57 -0800 Subject: [PATCH 184/226] Minor name fix --- streaming/src/main/scala/spark/streaming/DStream.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index d94548a4f3..fbe3cebd6d 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -547,10 +547,10 @@ abstract class DStream[T: ClassManifest] ( /** * Returns a new DStream which computed based on tumbling window on this DStream. * This is equivalent to window(batchTime, batchTime). - * @param batchTime tumbling window duration; must be a multiple of this DStream's + * @param batchDuration tumbling window duration; must be a multiple of this DStream's * batching interval */ - def tumble(batchTime: Duration): DStream[T] = window(batchTime, batchTime) + def tumble(batchDuration: Duration): DStream[T] = window(batchDuration, batchDuration) /** * Returns a new DStream in which each RDD has a single element generated by reducing all From 131be5d62ef6b770de5106eb268a45bca385b599 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 14 Jan 2013 03:28:25 -0800 Subject: [PATCH 185/226] Fixed bug in RDD checkpointing. --- core/src/main/scala/spark/rdd/CheckpointRDD.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/spark/rdd/CheckpointRDD.scala b/core/src/main/scala/spark/rdd/CheckpointRDD.scala index 86c63ca2f4..6f00f6ac73 100644 --- a/core/src/main/scala/spark/rdd/CheckpointRDD.scala +++ b/core/src/main/scala/spark/rdd/CheckpointRDD.scala @@ -80,12 +80,12 @@ private[spark] object CheckpointRDD extends Logging { val serializer = SparkEnv.get.serializer.newInstance() val serializeStream = serializer.serializeStream(fileOutputStream) serializeStream.writeAll(iterator) - fileOutputStream.close() + serializeStream.close() if (!fs.rename(tempOutputPath, finalOutputPath)) { if (!fs.delete(finalOutputPath, true)) { throw new IOException("Checkpoint failed: failed to delete earlier output of task " - + context.attemptId); + + context.attemptId) } if (!fs.rename(tempOutputPath, finalOutputPath)) { throw new IOException("Checkpoint failed: failed to save output of task: " @@ -119,7 +119,7 @@ private[spark] object CheckpointRDD extends Logging { val rdd = sc.makeRDD(1 to 10, 10).flatMap(x => 1 to 10000) val path = new Path(hdfsPath, "temp") val fs = path.getFileSystem(new Configuration()) - sc.runJob(rdd, CheckpointRDD.writeToFile(path.toString, 10) _) + sc.runJob(rdd, CheckpointRDD.writeToFile(path.toString, 1024) _) val cpRDD = new CheckpointRDD[Int](sc, path.toString) assert(cpRDD.splits.length == rdd.splits.length, "Number of splits is not the same") assert(cpRDD.collect.toList == rdd.collect.toList, "Data of splits not the same") From b607c9e9165a996289e4fb78bf7f2792121183d0 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 3 Jan 2013 18:07:39 -0800 Subject: [PATCH 186/226] A very rough, early cut at some Java functionality for Streaming. --- project/SparkBuild.scala | 3 +- .../scala/spark/streaming/JavaAPISuite.java | 64 +++++++++++++ .../streaming/api/java/JavaDStream.scala | 95 +++++++++++++++++++ .../api/java/JavaStreamingContext.scala | 29 ++++++ 4 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 streaming/src/main/scala/spark/streaming/JavaAPISuite.java create mode 100644 streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala create mode 100644 streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index d5cda347a4..39db4be842 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -162,7 +162,8 @@ object SparkBuild extends Build { def streamingSettings = sharedSettings ++ Seq( name := "spark-streaming", libraryDependencies ++= Seq( - "com.github.sgroschupf" % "zkclient" % "0.1") + "com.github.sgroschupf" % "zkclient" % "0.1", + "junit" % "junit" % "4.8.1") ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( diff --git a/streaming/src/main/scala/spark/streaming/JavaAPISuite.java b/streaming/src/main/scala/spark/streaming/JavaAPISuite.java new file mode 100644 index 0000000000..bcaaa4fa80 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/JavaAPISuite.java @@ -0,0 +1,64 @@ +package spark.streaming; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import spark.api.java.JavaRDD; +import spark.api.java.function.Function; +import spark.api.java.function.Function2; +import spark.streaming.api.java.JavaStreamingContext; + +import java.io.Serializable; + +// The test suite itself is Serializable so that anonymous Function implementations can be +// serialized, as an alternative to converting these anonymous classes to static inner classes; +// see http://stackoverflow.com/questions/758570/. +public class JavaAPISuite implements Serializable { + private transient JavaStreamingContext sc; + + @Before + public void setUp() { + sc = new JavaStreamingContext("local[2]", "test", new Time(1000)); + } + + @After + public void tearDown() { + sc.stop(); + sc = null; + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port"); + } + + @Test + public void simpleTest() { + sc.textFileStream("/var/log/syslog").print(); + sc.start(); + } + + public static void main(String[] args) { + JavaStreamingContext sc = new JavaStreamingContext("local[2]", "test", new Time(1000)); + + sc.networkTextStream("localhost", 12345).map(new Function() { + @Override + public Integer call(String s) throws Exception { + return s.length(); + } + }).reduce(new Function2() { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + }).foreach(new Function2, Time, Void>() { + @Override + public Void call(JavaRDD integerJavaRDD, Time t) throws Exception { + System.out.println("Contents @ " + t.toFormattedString()); + for (int i: integerJavaRDD.collect()) { + System.out.println(i + "\n"); + } + return null; + } + }); + + sc.start(); + } +} diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala new file mode 100644 index 0000000000..e9391642f8 --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -0,0 +1,95 @@ +package spark.streaming.api.java + +import java.util.{List => JList} + +import scala.collection.JavaConversions._ + +import spark.streaming._ +import spark.api.java.JavaRDD +import spark.api.java.function.{Function2 => JFunction2, Function => JFunction, _} +import java.util +import spark.RDD + +class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) { + def print() = dstream.print() + + // TODO move to type-specific implementations + def cache() : JavaDStream[T] = { + dstream.cache() + } + + def count() : JavaDStream[Int] = { + dstream.count() + } + + def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { + dstream.countByWindow(windowTime, slideTime) + } + + def compute(validTime: Time): JavaRDD[T] = { + dstream.compute(validTime) match { + case Some(rdd) => new JavaRDD(rdd) + case None => null + } + } + + def context(): StreamingContext = dstream.context() + + def window(windowTime: Time) = { + dstream.window(windowTime) + } + + def window(windowTime: Time, slideTime: Time): JavaDStream[T] = { + dstream.window(windowTime, slideTime) + } + + def tumble(batchTime: Time): JavaDStream[T] = { + dstream.tumble(batchTime) + } + + def map[R](f: JFunction[T, R]): JavaDStream[R] = { + new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) + } + + def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = { + dstream.filter((x => f(x).booleanValue())) + } + + def glom(): JavaDStream[JList[T]] = { + new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) + } + + // TODO: Other map partitions + def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { + def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) + new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType()) + } + + def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) + + def reduceByWindow( + reduceFunc: JFunction2[T, T, T], + invReduceFunc: JFunction2[T, T, T], + windowTime: Time, + slideTime: Time): JavaDStream[T] = { + dstream.reduceByWindow(reduceFunc, invReduceFunc, windowTime, slideTime) + } + + def slice(fromTime: Time, toTime: Time): JList[JavaRDD[T]] = { + new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) + } + + def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) = { + dstream.foreach(rdd => foreachFunc.call(new JavaRDD(rdd))) + } + + def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) = { + dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) + } +} + +object JavaDStream { + implicit def fromDStream[T: ClassManifest](dstream: DStream[T]): JavaDStream[T] = + new JavaDStream[T](dstream) + +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala new file mode 100644 index 0000000000..46f8cffd0b --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -0,0 +1,29 @@ +package spark.streaming.api.java + +import scala.collection.JavaConversions._ + +import spark.streaming._ +import dstream.SparkFlumeEvent +import spark.storage.StorageLevel + +class JavaStreamingContext(val ssc: StreamingContext) { + def this(master: String, frameworkName: String, batchDuration: Time) = + this(new StreamingContext(master, frameworkName, batchDuration)) + + def textFileStream(directory: String): JavaDStream[String] = { + ssc.textFileStream(directory) + } + + def networkTextStream(hostname: String, port: Int): JavaDStream[String] = { + ssc.networkTextStream(hostname, port) + } + + def flumeStream(hostname: String, port: Int, storageLevel: StorageLevel): + JavaDStream[SparkFlumeEvent] = { + ssc.flumeStream(hostname, port, storageLevel) + } + + def start() = ssc.start() + def stop() = ssc.stop() + +} From 867a7455e27af9e8a6b95c87c882c0eebcaed0ad Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 4 Jan 2013 11:19:20 -0800 Subject: [PATCH 187/226] Adding some initial tests to streaming API. --- .../api/java/JavaStreamingContext.scala | 1 + streaming/src/test/scala/JavaTestUtils.scala | 42 +++++++++++++++++++ .../scala/spark/streaming/JavaAPISuite.java | 39 +++++++++++++++-- 3 files changed, 79 insertions(+), 3 deletions(-) create mode 100644 streaming/src/test/scala/JavaTestUtils.scala rename streaming/src/{main => test}/scala/spark/streaming/JavaAPISuite.java (58%) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 46f8cffd0b..19cd032fc1 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -1,6 +1,7 @@ package spark.streaming.api.java import scala.collection.JavaConversions._ +import java.util.{List => JList} import spark.streaming._ import dstream.SparkFlumeEvent diff --git a/streaming/src/test/scala/JavaTestUtils.scala b/streaming/src/test/scala/JavaTestUtils.scala new file mode 100644 index 0000000000..776b0e6bb6 --- /dev/null +++ b/streaming/src/test/scala/JavaTestUtils.scala @@ -0,0 +1,42 @@ +package spark.streaming + +import collection.mutable.{SynchronizedBuffer, ArrayBuffer} +import java.util.{List => JList} +import spark.streaming.api.java.{JavaDStream, JavaStreamingContext} +import spark.streaming._ +import java.util.ArrayList +import collection.JavaConversions._ + +/** Exposes core test functionality in a Java-friendly way. */ +object JavaTestUtils extends TestSuiteBase { + def attachTestInputStream[T](ssc: JavaStreamingContext, + data: JList[JList[T]], numPartitions: Int) = { + val seqData = data.map(Seq(_:_*)) + + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + val dstream = new TestInputStream[T](ssc.ssc, seqData, numPartitions) + ssc.ssc.registerInputStream(dstream) + new JavaDStream[T](dstream) + } + + def attachTestOutputStream[T](dstream: JavaDStream[T]) = { + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + val ostream = new TestOutputStream(dstream.dstream, + new ArrayBuffer[Seq[T]] with SynchronizedBuffer[Seq[T]]) + dstream.dstream.ssc.registerOutputStream(ostream) + } + + def runStreams[V]( + ssc: JavaStreamingContext, numBatches: Int, numExpectedOutput: Int): JList[JList[V]] = { + implicit val cm: ClassManifest[V] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]] + val res = runStreams[V](ssc.ssc, numBatches, numExpectedOutput) + val out = new ArrayList[JList[V]]() + res.map(entry => out.append(new ArrayList[V](entry))) + out + } + +} + diff --git a/streaming/src/main/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java similarity index 58% rename from streaming/src/main/scala/spark/streaming/JavaAPISuite.java rename to streaming/src/test/scala/spark/streaming/JavaAPISuite.java index bcaaa4fa80..5327edfd5d 100644 --- a/streaming/src/main/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -1,14 +1,20 @@ package spark.streaming; +import org.junit.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; import spark.api.java.JavaRDD; import spark.api.java.function.Function; import spark.api.java.function.Function2; +import spark.streaming.JavaTestUtils; +import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaStreamingContext; import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; // The test suite itself is Serializable so that anonymous Function implementations can be // serialized, as an alternative to converting these anonymous classes to static inner classes; @@ -30,9 +36,36 @@ public class JavaAPISuite implements Serializable { } @Test - public void simpleTest() { - sc.textFileStream("/var/log/syslog").print(); - sc.start(); + public void testCount() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3,4), Arrays.asList(3,4,5), Arrays.asList(3)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream count = stream.count(); + JavaTestUtils.attachTestOutputStream(count); + List> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertTrue(result.equals( + Arrays.asList(Arrays.asList(4), Arrays.asList(3), Arrays.asList(1)))); + } + + @Test + public void testMap() { + List> inputData = Arrays.asList( + Arrays.asList("hello", "world"), Arrays.asList("goodnight", "moon")); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream letterCount = stream.map(new Function() { + @Override + public Integer call(String s) throws Exception { + return s.length(); + } + }); + JavaTestUtils.attachTestOutputStream(letterCount); + List> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertTrue(result.equals( + Arrays.asList(Arrays.asList(5, 5), Arrays.asList(9, 4)))); } public static void main(String[] args) { From b0974e6c1d1d8e03d9b70660070f66977ea2e797 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 4 Jan 2013 11:21:46 -0800 Subject: [PATCH 188/226] Remving main method from tests --- .../scala/spark/streaming/JavaAPISuite.java | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 5327edfd5d..9cf0341cce 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -67,31 +67,4 @@ public class JavaAPISuite implements Serializable { Assert.assertTrue(result.equals( Arrays.asList(Arrays.asList(5, 5), Arrays.asList(9, 4)))); } - - public static void main(String[] args) { - JavaStreamingContext sc = new JavaStreamingContext("local[2]", "test", new Time(1000)); - - sc.networkTextStream("localhost", 12345).map(new Function() { - @Override - public Integer call(String s) throws Exception { - return s.length(); - } - }).reduce(new Function2() { - @Override - public Integer call(Integer i1, Integer i2) throws Exception { - return i1 + i2; - } - }).foreach(new Function2, Time, Void>() { - @Override - public Void call(JavaRDD integerJavaRDD, Time t) throws Exception { - System.out.println("Contents @ " + t.toFormattedString()); - for (int i: integerJavaRDD.collect()) { - System.out.println(i + "\n"); - } - return null; - } - }); - - sc.start(); - } } From 91b3d414481cd06e0b75f621550a38042f9b2ffd Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 4 Jan 2013 16:13:52 -0800 Subject: [PATCH 189/226] Better equality test (thanks Josh) --- streaming/src/test/scala/spark/streaming/JavaAPISuite.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 9cf0341cce..13dfd921cf 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -45,8 +45,8 @@ public class JavaAPISuite implements Serializable { JavaTestUtils.attachTestOutputStream(count); List> result = JavaTestUtils.runStreams(sc, 3, 3); - Assert.assertTrue(result.equals( - Arrays.asList(Arrays.asList(4), Arrays.asList(3), Arrays.asList(1)))); + Assert.assertEquals(result, + Arrays.asList(Arrays.asList(4), Arrays.asList(3), Arrays.asList(1))); } @Test @@ -64,7 +64,6 @@ public class JavaAPISuite implements Serializable { JavaTestUtils.attachTestOutputStream(letterCount); List> result = JavaTestUtils.runStreams(sc, 2, 2); - Assert.assertTrue(result.equals( - Arrays.asList(Arrays.asList(5, 5), Arrays.asList(9, 4)))); + Assert.assertEquals(result, Arrays.asList(Arrays.asList(5, 5), Arrays.asList(9, 4))); } } From 22a8c7be9aebe46c7ee332228967039be811043b Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 4 Jan 2013 18:30:48 -0800 Subject: [PATCH 190/226] Adding more tests --- .../streaming/api/java/JavaDStream.scala | 2 +- .../scala/spark/streaming/JavaAPISuite.java | 181 +++++++++++++++++- 2 files changed, 173 insertions(+), 10 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index e9391642f8..d0fa06ba7b 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -35,7 +35,7 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM def context(): StreamingContext = dstream.context() - def window(windowTime: Time) = { + def window(windowTime: Time): JavaDStream[T] = { dstream.window(windowTime) } diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 13dfd921cf..9833478221 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -1,19 +1,20 @@ package spark.streaming; +import com.google.common.collect.Lists; import org.junit.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; -import spark.api.java.JavaRDD; +import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; -import spark.api.java.function.Function2; import spark.streaming.JavaTestUtils; import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaStreamingContext; import java.io.Serializable; -import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; import java.util.List; // The test suite itself is Serializable so that anonymous Function implementations can be @@ -38,21 +39,31 @@ public class JavaAPISuite implements Serializable { @Test public void testCount() { List> inputData = Arrays.asList( - Arrays.asList(1,2,3,4), Arrays.asList(3,4,5), Arrays.asList(3)); + Arrays.asList(1,2,3,4), + Arrays.asList(3,4,5), + Arrays.asList(3)); + + List> expected = Arrays.asList( + Arrays.asList(4), + Arrays.asList(3), + Arrays.asList(1)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); JavaDStream count = stream.count(); JavaTestUtils.attachTestOutputStream(count); List> result = JavaTestUtils.runStreams(sc, 3, 3); - - Assert.assertEquals(result, - Arrays.asList(Arrays.asList(4), Arrays.asList(3), Arrays.asList(1))); + assertOrderInvariantEquals(expected, result); } @Test public void testMap() { List> inputData = Arrays.asList( - Arrays.asList("hello", "world"), Arrays.asList("goodnight", "moon")); + Arrays.asList("hello", "world"), + Arrays.asList("goodnight", "moon")); + + List> expected = Arrays.asList( + Arrays.asList(5,5), + Arrays.asList(9,4)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); JavaDStream letterCount = stream.map(new Function() { @@ -64,6 +75,158 @@ public class JavaAPISuite implements Serializable { JavaTestUtils.attachTestOutputStream(letterCount); List> result = JavaTestUtils.runStreams(sc, 2, 2); - Assert.assertEquals(result, Arrays.asList(Arrays.asList(5, 5), Arrays.asList(9, 4))); + assertOrderInvariantEquals(expected, result); } + + @Test + public void testWindow() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9)); + + List> expected = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6,1,2,3), + Arrays.asList(7,8,9,4,5,6), + Arrays.asList(7,8,9)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream windowedRDD = stream.window(new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowedRDD); + List> result = JavaTestUtils.runStreams(sc, 4, 4); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testWindowWithSlideTime() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9), + Arrays.asList(10,11,12), + Arrays.asList(13,14,15), + Arrays.asList(16,17,18)); + + List> expected = Arrays.asList( + Arrays.asList(1,2,3,4,5,6), + Arrays.asList(1,2,3,4,5,6,7,8,9,10,11,12), + Arrays.asList(7,8,9,10,11,12,13,14,15,16,17,18), + Arrays.asList(13,14,15,16,17,18)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream windowedRDD = stream.window(new Time(4000), new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowedRDD); + List> result = JavaTestUtils.runStreams(sc, 8, 4); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testTumble() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9), + Arrays.asList(10,11,12), + Arrays.asList(13,14,15), + Arrays.asList(16,17,18)); + + List> expected = Arrays.asList( + Arrays.asList(1,2,3,4,5,6), + Arrays.asList(7,8,9,10,11,12), + Arrays.asList(13,14,15,16,17,18)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream windowedRDD = stream.tumble(new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowedRDD); + List> result = JavaTestUtils.runStreams(sc, 6, 3); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testFilter() { + List> inputData = Arrays.asList( + Arrays.asList("giants", "dodgers"), + Arrays.asList("yankees", "red socks")); + + List> expected = Arrays.asList( + Arrays.asList("giants"), + Arrays.asList("yankees")); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream filtered = stream.filter(new Function() { + @Override + public Boolean call(String s) throws Exception { + return s.contains("a"); + } + }); + JavaTestUtils.attachTestOutputStream(filtered); + List> result = JavaTestUtils.runStreams(sc, 2, 2); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testGlom() { + List> inputData = Arrays.asList( + Arrays.asList("giants", "dodgers"), + Arrays.asList("yankees", "red socks")); + + List>> expected = Arrays.asList( + Arrays.asList(Arrays.asList("giants", "dodgers")), + Arrays.asList(Arrays.asList("yankees", "red socks"))); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream glommed = stream.glom(); + JavaTestUtils.attachTestOutputStream(glommed); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testMapPartitions() { + List> inputData = Arrays.asList( + Arrays.asList("giants", "dodgers"), + Arrays.asList("yankees", "red socks")); + + List> expected = Arrays.asList( + Arrays.asList("GIANTSDODGERS"), + Arrays.asList("YANKEESRED SOCKS")); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream mapped = stream.mapPartitions(new FlatMapFunction, String>() { + @Override + public Iterable call(Iterator in) { + String out = ""; + while (in.hasNext()) { + out = out + in.next().toUpperCase(); + } + return Lists.newArrayList(out); + } + }); + JavaTestUtils.attachTestOutputStream(mapped); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + /* + * Performs an order-invariant comparison of lists representing two RDD streams. This allows + * us to account for ordering variation within individual RDD's which occurs during windowing. + */ + public static void assertOrderInvariantEquals( + List> expected, List> actual) { + for (List list: expected) { + Collections.sort(list); + } + for (List list: actual) { + Collections.sort(list); + } + Assert.assertEquals(expected, actual); + } + } From 0d0bab25bd0dfefdd5a91d22a4e81d347d255cf3 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 4 Jan 2013 20:28:08 -0800 Subject: [PATCH 191/226] Reduce tests --- .../scala/spark/streaming/JavaAPISuite.java | 69 +++++++++++++++++-- 1 file changed, 63 insertions(+), 6 deletions(-) diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 9833478221..2d1b0f35f9 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -7,6 +7,7 @@ import org.junit.Before; import org.junit.Test; import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; +import spark.api.java.function.Function2; import spark.streaming.JavaTestUtils; import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaStreamingContext; @@ -92,8 +93,8 @@ public class JavaAPISuite implements Serializable { Arrays.asList(7,8,9)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowedRDD = stream.window(new Time(2000)); - JavaTestUtils.attachTestOutputStream(windowedRDD); + JavaDStream windowed = stream.window(new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 4, 4); assertOrderInvariantEquals(expected, result); @@ -116,8 +117,8 @@ public class JavaAPISuite implements Serializable { Arrays.asList(13,14,15,16,17,18)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowedRDD = stream.window(new Time(4000), new Time(2000)); - JavaTestUtils.attachTestOutputStream(windowedRDD); + JavaDStream windowed = stream.window(new Time(4000), new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 8, 4); assertOrderInvariantEquals(expected, result); @@ -139,8 +140,8 @@ public class JavaAPISuite implements Serializable { Arrays.asList(13,14,15,16,17,18)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowedRDD = stream.tumble(new Time(2000)); - JavaTestUtils.attachTestOutputStream(windowedRDD); + JavaDStream windowed = stream.tumble(new Time(2000)); + JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 6, 3); assertOrderInvariantEquals(expected, result); @@ -214,6 +215,62 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + private class IntegerSum extends Function2 { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + } + + private class IntegerDifference extends Function2 { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 - i2; + } + } + + @Test + public void testReduce() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9)); + + List> expected = Arrays.asList( + Arrays.asList(6), + Arrays.asList(15), + Arrays.asList(24)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream reduced = stream.reduce(new IntegerSum()); + JavaTestUtils.attachTestOutputStream(reduced); + List> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + + @Test + public void testReduceByWindow() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9)); + + List> expected = Arrays.asList( + Arrays.asList(6), + Arrays.asList(21), + Arrays.asList(39), + Arrays.asList(24)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream reducedWindowed = stream.reduceByWindow(new IntegerSum(), + new IntegerDifference(), new Time(2000), new Time(1000)); + JavaTestUtils.attachTestOutputStream(reducedWindowed); + List> result = JavaTestUtils.runStreams(sc, 4, 4); + + Assert.assertEquals(expected, result); + } + /* * Performs an order-invariant comparison of lists representing two RDD streams. This allows * us to account for ordering variation within individual RDD's which occurs during windowing. From f144e0413a1e42d193a86fa04af769e2da9dc58b Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Sat, 5 Jan 2013 15:06:20 -0800 Subject: [PATCH 192/226] Adding transform and union --- .../streaming/api/java/JavaDStream.scala | 14 +++++ .../scala/spark/streaming/JavaAPISuite.java | 62 +++++++++++++++++-- 2 files changed, 72 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index d0fa06ba7b..56e54c719a 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -86,6 +86,20 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) = { dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) } + + def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { + implicit val cm: ClassManifest[U] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] + def scalaTransform (in: RDD[T]): RDD[U] = { + transformFunc.call(new JavaRDD[T](in)).rdd + } + dstream.transform(scalaTransform(_)) + } + // TODO: transform with time + + def union(that: JavaDStream[T]): JavaDStream[T] = { + dstream.union(that.dstream) + } } object JavaDStream { diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 2d1b0f35f9..c4629c8d97 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -5,6 +5,7 @@ import org.junit.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; +import spark.api.java.JavaRDD; import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; import spark.api.java.function.Function2; @@ -13,10 +14,7 @@ import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaStreamingContext; import java.io.Serializable; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; +import java.util.*; // The test suite itself is Serializable so that anonymous Function implementations can be // serialized, as an alternative to converting these anonymous classes to static inner classes; @@ -271,6 +269,62 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + @Test + public void testTransform() { + List> inputData = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9)); + + List> expected = Arrays.asList( + Arrays.asList(3,4,5), + Arrays.asList(6,7,8), + Arrays.asList(9,10,11)); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream transformed = stream.transform(new Function, JavaRDD>() { + @Override + public JavaRDD call(JavaRDD in) throws Exception { + return in.map(new Function() { + @Override + public Integer call(Integer i) throws Exception { + return i + 2; + } + }); + }}); + JavaTestUtils.attachTestOutputStream(transformed); + List> result = JavaTestUtils.runStreams(sc, 3, 3); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testUnion() { + List> inputData1 = Arrays.asList( + Arrays.asList(1,1), + Arrays.asList(2,2), + Arrays.asList(3,3)); + + List> inputData2 = Arrays.asList( + Arrays.asList(4,4), + Arrays.asList(5,5), + Arrays.asList(6,6)); + + List> expected = Arrays.asList( + Arrays.asList(1,1,4,4), + Arrays.asList(2,2,5,5), + Arrays.asList(3,3,6,6)); + + JavaDStream stream1 = JavaTestUtils.attachTestInputStream(sc, inputData1, 2); + JavaDStream stream2 = JavaTestUtils.attachTestInputStream(sc, inputData2, 2); + + JavaDStream unioned = stream1.union(stream2); + JavaTestUtils.attachTestOutputStream(unioned); + List> result = JavaTestUtils.runStreams(sc, 3, 3); + + assertOrderInvariantEquals(expected, result); + } + /* * Performs an order-invariant comparison of lists representing two RDD streams. This allows * us to account for ordering variation within individual RDD's which occurs during windowing. From 6e514a8d3511891a3f7221c594171477a0b5a38f Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 7 Jan 2013 11:02:03 -0800 Subject: [PATCH 193/226] PairDStream and DStreamLike --- .../streaming/api/java/JavaDStream.scala | 102 +------------ .../streaming/api/java/JavaDStreamLike.scala | 109 ++++++++++++++ .../streaming/api/java/JavaPairDStream.scala | 134 ++++++++++++++++++ streaming/src/test/scala/JavaTestUtils.scala | 6 +- .../scala/spark/streaming/JavaAPISuite.java | 108 ++++++++++++++ 5 files changed, 359 insertions(+), 100 deletions(-) create mode 100644 streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala create mode 100644 streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index 56e54c719a..9e2823d81f 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -1,109 +1,17 @@ package spark.streaming.api.java -import java.util.{List => JList} +import spark.streaming.DStream +import spark.api.java.function.{Function => JFunction} -import scala.collection.JavaConversions._ - -import spark.streaming._ -import spark.api.java.JavaRDD -import spark.api.java.function.{Function2 => JFunction2, Function => JFunction, _} -import java.util -import spark.RDD - -class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) { - def print() = dstream.print() - - // TODO move to type-specific implementations - def cache() : JavaDStream[T] = { - dstream.cache() - } - - def count() : JavaDStream[Int] = { - dstream.count() - } - - def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { - dstream.countByWindow(windowTime, slideTime) - } - - def compute(validTime: Time): JavaRDD[T] = { - dstream.compute(validTime) match { - case Some(rdd) => new JavaRDD(rdd) - case None => null - } - } - - def context(): StreamingContext = dstream.context() - - def window(windowTime: Time): JavaDStream[T] = { - dstream.window(windowTime) - } - - def window(windowTime: Time, slideTime: Time): JavaDStream[T] = { - dstream.window(windowTime, slideTime) - } - - def tumble(batchTime: Time): JavaDStream[T] = { - dstream.tumble(batchTime) - } - - def map[R](f: JFunction[T, R]): JavaDStream[R] = { - new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) - } +class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) + extends JavaDStreamLike[T, JavaDStream[T]] { def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = { dstream.filter((x => f(x).booleanValue())) } - - def glom(): JavaDStream[JList[T]] = { - new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) - } - - // TODO: Other map partitions - def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { - def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) - new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType()) - } - - def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) - - def reduceByWindow( - reduceFunc: JFunction2[T, T, T], - invReduceFunc: JFunction2[T, T, T], - windowTime: Time, - slideTime: Time): JavaDStream[T] = { - dstream.reduceByWindow(reduceFunc, invReduceFunc, windowTime, slideTime) - } - - def slice(fromTime: Time, toTime: Time): JList[JavaRDD[T]] = { - new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) - } - - def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) = { - dstream.foreach(rdd => foreachFunc.call(new JavaRDD(rdd))) - } - - def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) = { - dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) - } - - def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { - implicit val cm: ClassManifest[U] = - implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] - def scalaTransform (in: RDD[T]): RDD[U] = { - transformFunc.call(new JavaRDD[T](in)).rdd - } - dstream.transform(scalaTransform(_)) - } - // TODO: transform with time - - def union(that: JavaDStream[T]): JavaDStream[T] = { - dstream.union(that.dstream) - } } object JavaDStream { implicit def fromDStream[T: ClassManifest](dstream: DStream[T]): JavaDStream[T] = new JavaDStream[T](dstream) - -} \ No newline at end of file +} diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala new file mode 100644 index 0000000000..daea56f50c --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -0,0 +1,109 @@ +package spark.streaming.api.java + +import java.util.{List => JList} + +import scala.collection.JavaConversions._ + +import spark.streaming._ +import spark.api.java.JavaRDD +import spark.api.java.function.{Function2 => JFunction2, Function => JFunction, _} +import java.util +import spark.RDD +import JavaDStream._ + +trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable { + implicit val classManifest: ClassManifest[T] + + def dstream: DStream[T] + + def print() = dstream.print() + + // TODO move to type-specific implementations + def cache() : JavaDStream[T] = { + dstream.cache() + } + + def count() : JavaDStream[Int] = { + dstream.count() + } + + def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { + dstream.countByWindow(windowTime, slideTime) + } + + def compute(validTime: Time): JavaRDD[T] = { + dstream.compute(validTime) match { + case Some(rdd) => new JavaRDD(rdd) + case None => null + } + } + + def context(): StreamingContext = dstream.context() + + def window(windowTime: Time): JavaDStream[T] = { + dstream.window(windowTime) + } + + def window(windowTime: Time, slideTime: Time): JavaDStream[T] = { + dstream.window(windowTime, slideTime) + } + + def tumble(batchTime: Time): JavaDStream[T] = { + dstream.tumble(batchTime) + } + + def map[R](f: JFunction[T, R]): JavaDStream[R] = { + new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) + } + + def map[K, V](f: PairFunction[T, K, V]): JavaPairDStream[K, V] = { + def cm = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] + new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType()) + } + + def glom(): JavaDStream[JList[T]] = { + new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) + } + + // TODO: Other map partitions + def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { + def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) + new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType()) + } + + def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) + + def reduceByWindow( + reduceFunc: JFunction2[T, T, T], + invReduceFunc: JFunction2[T, T, T], + windowTime: Time, + slideTime: Time): JavaDStream[T] = { + dstream.reduceByWindow(reduceFunc, invReduceFunc, windowTime, slideTime) + } + + def slice(fromTime: Time, toTime: Time): JList[JavaRDD[T]] = { + new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) + } + + def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) = { + dstream.foreach(rdd => foreachFunc.call(new JavaRDD(rdd))) + } + + def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) = { + dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) + } + + def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { + implicit val cm: ClassManifest[U] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] + def scalaTransform (in: RDD[T]): RDD[U] = { + transformFunc.call(new JavaRDD[T](in)).rdd + } + dstream.transform(scalaTransform(_)) + } + // TODO: transform with time + + def union(that: JavaDStream[T]): JavaDStream[T] = { + dstream.union(that.dstream) + } +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala new file mode 100644 index 0000000000..01dda24fde --- /dev/null +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -0,0 +1,134 @@ +package spark.streaming.api.java + +import java.util.{List => JList} + +import scala.collection.JavaConversions._ + +import spark.streaming._ +import spark.streaming.StreamingContext._ +import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} +import spark.Partitioner + +class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( + implicit val kManifiest: ClassManifest[K], + implicit val vManifest: ClassManifest[V]) + extends JavaDStreamLike[(K, V), JavaPairDStream[K, V]] { + + def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] = { + dstream.filter((x => f(x).booleanValue())) + } + + def groupByKey(): JavaPairDStream[K, JList[V]] = { + dstream.groupByKey().mapValues(seqAsJavaList _) + } + + def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] = { + dstream.groupByKey(numPartitions).mapValues(seqAsJavaList _) + } + + def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] = { + dstream.groupByKey(partitioner).mapValues(seqAsJavaList _) + } + + def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] = { + dstream.reduceByKey(func) + } + + def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = { + dstream.reduceByKey(func, numPartitions) + } + + // TODO: TEST BELOW + def combineByKey[C](createCombiner: Function[V, C], + mergeValue: JFunction2[C, V, C], + mergeCombiners: JFunction2[C, C, C], + partitioner: Partitioner): JavaPairDStream[K, C] = { + implicit val cm: ClassManifest[C] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[C]] + dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner) + } + + def countByKey(numPartitions: Int): JavaPairDStream[K, Long] = { + dstream.countByKey(numPartitions); + } + + def countByKey(): JavaPairDStream[K, Long] = { + dstream.countByKey(); + } + + def groupByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowTime, slideTime).mapValues(seqAsJavaList _) + } + + def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int): + JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowTime, slideTime, numPartitions).mapValues(seqAsJavaList _) + } + + def groupByKeyAndWindow(windowTime: Time, slideTime: Time, partitioner: Partitioner): + JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowTime, slideTime, partitioner).mapValues(seqAsJavaList _) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time): + JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, windowTime) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time): + JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time, + numPartitions: Int): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, numPartitions) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time, + partitioner: Partitioner): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, partitioner) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], + windowTime: Time, slideTime: Time): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], + windowTime: Time, slideTime: Time, numPartitions: Int): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, numPartitions) + } + + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], + windowTime: Time, slideTime: Time, partitioner: Partitioner) + : JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, partitioner) + } + + def countByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, Long] = { + dstream.countByKeyAndWindow(windowTime, slideTime) + } + + def countByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int) + : JavaPairDStream[K, Long] = { + dstream.countByKeyAndWindow(windowTime, slideTime, numPartitions) + } + + override val classManifest: ClassManifest[(K, V)] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] +} + +object JavaPairDStream { + implicit def fromPairDStream[K: ClassManifest, V: ClassManifest](dstream: DStream[(K, V)]): + JavaPairDStream[K, V] = + new JavaPairDStream[K, V](dstream) + + def fromJavaDStream[K, V](dstream: JavaDStream[(K, V)]): JavaPairDStream[K, V] = { + implicit val cmk: ClassManifest[K] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[K]] + implicit val cmv: ClassManifest[V] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]] + new JavaPairDStream[K, V](dstream.dstream) + } +} diff --git a/streaming/src/test/scala/JavaTestUtils.scala b/streaming/src/test/scala/JavaTestUtils.scala index 776b0e6bb6..9f3a80df8b 100644 --- a/streaming/src/test/scala/JavaTestUtils.scala +++ b/streaming/src/test/scala/JavaTestUtils.scala @@ -2,7 +2,7 @@ package spark.streaming import collection.mutable.{SynchronizedBuffer, ArrayBuffer} import java.util.{List => JList} -import spark.streaming.api.java.{JavaDStream, JavaStreamingContext} +import api.java.{JavaPairDStream, JavaDStreamLike, JavaDStream, JavaStreamingContext} import spark.streaming._ import java.util.ArrayList import collection.JavaConversions._ @@ -20,7 +20,8 @@ object JavaTestUtils extends TestSuiteBase { new JavaDStream[T](dstream) } - def attachTestOutputStream[T](dstream: JavaDStream[T]) = { + def attachTestOutputStream[T, This <: spark.streaming.api.java.JavaDStreamLike[T,This]] + (dstream: JavaDStreamLike[T, This]) = { implicit val cm: ClassManifest[T] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] val ostream = new TestOutputStream(dstream.dstream, @@ -37,6 +38,5 @@ object JavaTestUtils extends TestSuiteBase { res.map(entry => out.append(new ArrayList[V](entry))) out } - } diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index c4629c8d97..c1373e6275 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -5,12 +5,15 @@ import org.junit.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; +import scala.Tuple2; import spark.api.java.JavaRDD; import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; import spark.api.java.function.Function2; +import spark.api.java.function.PairFunction; import spark.streaming.JavaTestUtils; import spark.streaming.api.java.JavaDStream; +import spark.streaming.api.java.JavaPairDStream; import spark.streaming.api.java.JavaStreamingContext; import java.io.Serializable; @@ -340,4 +343,109 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, actual); } + + // PairDStream Functions + @Test + public void testPairFilter() { + List> inputData = Arrays.asList( + Arrays.asList("giants", "dodgers"), + Arrays.asList("yankees", "red socks")); + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("giants", 6)), + Arrays.asList(new Tuple2("yankees", 7))); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = stream.map( + new PairFunction() { + @Override + public Tuple2 call(String in) throws Exception { + return new Tuple2(in, in.length()); + } + }); + + JavaPairDStream filtered = pairStream.filter( + new Function, Boolean>() { + @Override + public Boolean call(Tuple2 in) throws Exception { + return in._1().contains("a"); + } + }); + JavaTestUtils.attachTestOutputStream(filtered); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testPairGroupByKey() { + List>> inputData = Arrays.asList( + Arrays.asList(new Tuple2("california", "dodgers"), + new Tuple2("california", "giants"), + new Tuple2("new york", "yankees"), + new Tuple2("new york", "mets")), + Arrays.asList(new Tuple2("california", "sharks"), + new Tuple2("california", "ducks"), + new Tuple2("new york", "rangers"), + new Tuple2("new york", "islanders"))); + + + List>>> expected = Arrays.asList( + Arrays.asList( + new Tuple2>("california", Arrays.asList("dodgers", "giants")), + new Tuple2>("new york", Arrays.asList("yankees", "mets"))), + Arrays.asList( + new Tuple2>("california", Arrays.asList("sharks", "ducks")), + new Tuple2>("new york", Arrays.asList("rangers", "islanders")))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream> grouped = pairStream.groupByKey(); + JavaTestUtils.attachTestOutputStream(grouped); + List>>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testPairReduceByKey() { + List>> inputData = Arrays.asList( + Arrays.asList( + new Tuple2("california", 1), + new Tuple2("california", 3), + new Tuple2("new york", 4), + new Tuple2("new york", 1)), + Arrays.asList( + new Tuple2("california", 5), + new Tuple2("california", 5), + new Tuple2("new york", 3), + new Tuple2("new york", 1))); + + + List>> expected = Arrays.asList( + Arrays.asList( + new Tuple2("california", 4), + new Tuple2("new york", 5)), + Arrays.asList( + new Tuple2("california", 10), + new Tuple2("new york", 4))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream reduced = pairStream.reduceByKey( + new Function2() { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + }); + + JavaTestUtils.attachTestOutputStream(reduced); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } } From 056f5efc557a8fcb8871d5abbee082b6398ba78c Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 7 Jan 2013 11:26:38 -0800 Subject: [PATCH 194/226] More pair functions --- .../streaming/api/java/JavaPairDStream.scala | 43 ++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 01dda24fde..cb80a2f3e7 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -6,7 +6,7 @@ import scala.collection.JavaConversions._ import spark.streaming._ import spark.streaming.StreamingContext._ -import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} +import spark.api.java.function.{Function => JFunction, Function2 => JFunction2, FlatMapFunction} import spark.Partitioner class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( @@ -115,6 +115,47 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.countByKeyAndWindow(windowTime, slideTime, numPartitions) } + def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { + implicit val cm: ClassManifest[U] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] + dstream.mapValues(f) + } + + def flatMapValues[U](f: JFunction[V, java.lang.Iterable[U]]): JavaPairDStream[K, U] = { + import scala.collection.JavaConverters._ + def fn = (x: V) => f.apply(x).asScala + implicit val cm: ClassManifest[U] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] + dstream.flatMapValues(fn) + } + + def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JList[V], JList[W])] = { + implicit val cm: ClassManifest[W] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] + dstream.cogroup(other.dstream).mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) + } + + def cogroup[W](other: JavaPairDStream[K, W], partitioner: Partitioner) + : JavaPairDStream[K, (JList[V], JList[W])] = { + implicit val cm: ClassManifest[W] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] + dstream.cogroup(other.dstream, partitioner) + .mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) + } + + def join[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, W)] = { + implicit val cm: ClassManifest[W] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] + dstream.join(other.dstream) + } + + def join[W](other: JavaPairDStream[K, W], partitioner: Partitioner) + : JavaPairDStream[K, (V, W)] = { + implicit val cm: ClassManifest[W] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] + dstream.join(other.dstream, partitioner) + } + override val classManifest: ClassManifest[(K, V)] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] } From 74182010a4916c5b03ec74c54c21d89bcab36723 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 7 Jan 2013 20:19:57 -0800 Subject: [PATCH 195/226] Style cleanup and moving functions --- .../streaming/api/java/JavaDStream.scala | 26 +++- .../streaming/api/java/JavaDStreamLike.scala | 41 +----- .../streaming/api/java/JavaPairDStream.scala | 128 ++++++++++++++---- 3 files changed, 128 insertions(+), 67 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index 9e2823d81f..9bf595e0bc 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -1,14 +1,36 @@ package spark.streaming.api.java -import spark.streaming.DStream +import spark.streaming.{Time, DStream} import spark.api.java.function.{Function => JFunction} +import spark.api.java.JavaRDD +import java.util.{List => JList} class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) extends JavaDStreamLike[T, JavaDStream[T]] { - def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = { + def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = dstream.filter((x => f(x).booleanValue())) + + def cache(): JavaDStream[T] = dstream.cache() + + def compute(validTime: Time): JavaRDD[T] = { + dstream.compute(validTime) match { + case Some(rdd) => new JavaRDD(rdd) + case None => null + } } + + def window(windowTime: Time): JavaDStream[T] = + dstream.window(windowTime) + + def window(windowTime: Time, slideTime: Time): JavaDStream[T] = + dstream.window(windowTime, slideTime) + + def tumble(batchTime: Time): JavaDStream[T] = + dstream.tumble(batchTime) + + def union(that: JavaDStream[T]): JavaDStream[T] = + dstream.union(that.dstream) } object JavaDStream { diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index daea56f50c..b11859ceaf 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -18,40 +18,17 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def print() = dstream.print() - // TODO move to type-specific implementations - def cache() : JavaDStream[T] = { - dstream.cache() - } - - def count() : JavaDStream[Int] = { - dstream.count() - } + def count(): JavaDStream[Int] = dstream.count() def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { dstream.countByWindow(windowTime, slideTime) } - def compute(validTime: Time): JavaRDD[T] = { - dstream.compute(validTime) match { - case Some(rdd) => new JavaRDD(rdd) - case None => null - } - } + def glom(): JavaDStream[JList[T]] = + new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) def context(): StreamingContext = dstream.context() - def window(windowTime: Time): JavaDStream[T] = { - dstream.window(windowTime) - } - - def window(windowTime: Time, slideTime: Time): JavaDStream[T] = { - dstream.window(windowTime, slideTime) - } - - def tumble(batchTime: Time): JavaDStream[T] = { - dstream.tumble(batchTime) - } - def map[R](f: JFunction[T, R]): JavaDStream[R] = { new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) } @@ -61,10 +38,6 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType()) } - def glom(): JavaDStream[JList[T]] = { - new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) - } - // TODO: Other map partitions def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) @@ -85,11 +58,11 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) } - def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) = { + def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) { dstream.foreach(rdd => foreachFunc.call(new JavaRDD(rdd))) } - def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) = { + def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) { dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) } @@ -102,8 +75,4 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable dstream.transform(scalaTransform(_)) } // TODO: transform with time - - def union(that: JavaDStream[T]): JavaDStream[T] = { - dstream.union(that.dstream) - } } \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index cb80a2f3e7..f6dfbb2345 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -6,43 +6,64 @@ import scala.collection.JavaConversions._ import spark.streaming._ import spark.streaming.StreamingContext._ -import spark.api.java.function.{Function => JFunction, Function2 => JFunction2, FlatMapFunction} +import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} import spark.Partitioner +import org.apache.hadoop.mapred.{JobConf, OutputFormat} +import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} +import org.apache.hadoop.conf.Configuration +import spark.api.java.{JavaPairRDD, JavaRDD} class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( implicit val kManifiest: ClassManifest[K], implicit val vManifest: ClassManifest[V]) extends JavaDStreamLike[(K, V), JavaPairDStream[K, V]] { - def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] = { + // Common to all DStream's + def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] = dstream.filter((x => f(x).booleanValue())) + + def cache(): JavaPairDStream[K, V] = dstream.cache() + + def compute(validTime: Time): JavaPairRDD[K, V] = { + dstream.compute(validTime) match { + case Some(rdd) => new JavaPairRDD(rdd) + case None => null + } } - def groupByKey(): JavaPairDStream[K, JList[V]] = { + def window(windowTime: Time): JavaPairDStream[K, V] = + dstream.window(windowTime) + + def window(windowTime: Time, slideTime: Time): JavaPairDStream[K, V] = + dstream.window(windowTime, slideTime) + + def tumble(batchTime: Time): JavaPairDStream[K, V] = + dstream.tumble(batchTime) + + def union(that: JavaPairDStream[K, V]): JavaPairDStream[K, V] = + dstream.union(that.dstream) + + // Only for PairDStreams... + def groupByKey(): JavaPairDStream[K, JList[V]] = dstream.groupByKey().mapValues(seqAsJavaList _) - } - def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] = { + def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] = dstream.groupByKey(numPartitions).mapValues(seqAsJavaList _) - } - def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] = { + def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] = dstream.groupByKey(partitioner).mapValues(seqAsJavaList _) - } - def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] = { + def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] = dstream.reduceByKey(func) - } - def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = { + def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = dstream.reduceByKey(func, numPartitions) - } // TODO: TEST BELOW def combineByKey[C](createCombiner: Function[V, C], - mergeValue: JFunction2[C, V, C], - mergeCombiners: JFunction2[C, C, C], - partitioner: Partitioner): JavaPairDStream[K, C] = { + mergeValue: JFunction2[C, V, C], + mergeCombiners: JFunction2[C, C, C], + partitioner: Partitioner): JavaPairDStream[K, C] = { implicit val cm: ClassManifest[C] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[C]] dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner) @@ -60,28 +81,31 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.groupByKeyAndWindow(windowTime, slideTime).mapValues(seqAsJavaList _) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int): - JavaPairDStream[K, JList[V]] = { + def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int) + :JavaPairDStream[K, JList[V]] = { dstream.groupByKeyAndWindow(windowTime, slideTime, numPartitions).mapValues(seqAsJavaList _) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time, partitioner: Partitioner): - JavaPairDStream[K, JList[V]] = { + def groupByKeyAndWindow(windowTime: Time, slideTime: Time, partitioner: Partitioner) + :JavaPairDStream[K, JList[V]] = { dstream.groupByKeyAndWindow(windowTime, slideTime, partitioner).mapValues(seqAsJavaList _) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time): - JavaPairDStream[K, V] = { + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time) + :JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowTime) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time): - JavaPairDStream[K, V] = { + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time) + :JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time, - numPartitions: Int): JavaPairDStream[K, V] = { + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + windowTime: Time, + slideTime: Time, + numPartitions: Int): JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, numPartitions) } @@ -136,7 +160,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } def cogroup[W](other: JavaPairDStream[K, W], partitioner: Partitioner) - : JavaPairDStream[K, (JList[V], JList[W])] = { + : JavaPairDStream[K, (JList[V], JList[W])] = { implicit val cm: ClassManifest[W] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] dstream.cogroup(other.dstream, partitioner) @@ -150,19 +174,65 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } def join[W](other: JavaPairDStream[K, W], partitioner: Partitioner) - : JavaPairDStream[K, (V, W)] = { + : JavaPairDStream[K, (V, W)] = { implicit val cm: ClassManifest[W] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] dstream.join(other.dstream, partitioner) } + def saveAsHadoopFiles[F <: OutputFormat[K, V]](prefix: String, suffix: String) { + dstream.saveAsHadoopFiles(prefix, suffix) + } + + def saveAsHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: OutputFormat[_, _]]) { + dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) + } + + def saveAsHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: OutputFormat[_, _]], + conf: JobConf) { + dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf) + } + + def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]](prefix: String, suffix: String) { + dstream.saveAsNewAPIHadoopFiles(prefix, suffix) + } + + def saveAsNewAPIHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: NewOutputFormat[_, _]]) { + dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) + } + + def saveAsNewAPIHadoopFiles( + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: NewOutputFormat[_, _]], + conf: Configuration = new Configuration) { + dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf) + } + override val classManifest: ClassManifest[(K, V)] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] } object JavaPairDStream { - implicit def fromPairDStream[K: ClassManifest, V: ClassManifest](dstream: DStream[(K, V)]): - JavaPairDStream[K, V] = + implicit def fromPairDStream[K: ClassManifest, V: ClassManifest](dstream: DStream[(K, V)]) + :JavaPairDStream[K, V] = new JavaPairDStream[K, V](dstream) def fromJavaDStream[K, V](dstream: JavaDStream[(K, V)]): JavaPairDStream[K, V] = { From 7e1049d8f1b155a4bd742e84927c4cc83bb71cb6 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 7 Jan 2013 21:13:55 -0800 Subject: [PATCH 196/226] Squashing a few TODOs --- .../streaming/api/java/JavaDStreamLike.scala | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index b11859ceaf..05d89918b2 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -38,12 +38,17 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType()) } - // TODO: Other map partitions def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType()) } + def mapPartitions[K, V](f: PairFlatMapFunction[java.util.Iterator[T], K, V]) + : JavaPairDStream[K, V] = { + def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) + new JavaPairDStream(dstream.mapPartitions(fn))(f.keyType(), f.valueType()) + } + def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) def reduceByWindow( @@ -69,10 +74,16 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { implicit val cm: ClassManifest[U] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] - def scalaTransform (in: RDD[T]): RDD[U] = { + def scalaTransform (in: RDD[T]): RDD[U] = transformFunc.call(new JavaRDD[T](in)).rdd - } dstream.transform(scalaTransform(_)) } - // TODO: transform with time + + def transform[U](transformFunc: JFunction2[JavaRDD[T], Time, JavaRDD[U]]): JavaDStream[U] = { + implicit val cm: ClassManifest[U] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] + def scalaTransform (in: RDD[T], time: Time): RDD[U] = + transformFunc.call(new JavaRDD[T](in), time).rdd + dstream.transform(scalaTransform(_, _)) + } } \ No newline at end of file From 560c312c6060914c9c38cb98d3587685f10f7311 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 9 Jan 2013 19:27:32 -0800 Subject: [PATCH 197/226] Docs, some tests, and work on StreamingContext --- .../streaming/api/java/JavaDStream.scala | 33 ++ .../streaming/api/java/JavaDStreamLike.scala | 59 +++ .../streaming/api/java/JavaPairDStream.scala | 46 ++- .../api/java/JavaStreamingContext.scala | 152 +++++++- .../scala/spark/streaming/JavaAPISuite.java | 367 ++++++++++++++++-- 5 files changed, 621 insertions(+), 36 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index 9bf595e0bc..1e5c279e2c 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -4,15 +4,25 @@ import spark.streaming.{Time, DStream} import spark.api.java.function.{Function => JFunction} import spark.api.java.JavaRDD import java.util.{List => JList} +import spark.storage.StorageLevel class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) extends JavaDStreamLike[T, JavaDStream[T]] { + /** Returns a new DStream containing only the elements that satisfy a predicate. */ def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = dstream.filter((x => f(x).booleanValue())) + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def cache(): JavaDStream[T] = dstream.cache() + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + def persist(): JavaDStream[T] = dstream.cache() + + /** Persists the RDDs of this DStream with the given storage level */ + def persist(storageLevel: StorageLevel): JavaDStream[T] = dstream.persist(storageLevel) + + /** Method that generates a RDD for the given time */ def compute(validTime: Time): JavaRDD[T] = { dstream.compute(validTime) match { case Some(rdd) => new JavaRDD(rdd) @@ -20,15 +30,38 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM } } + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * The new DStream generates RDDs with the same interval as this DStream. + * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @return + */ def window(windowTime: Time): JavaDStream[T] = dstream.window(windowTime) + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * @param windowTime duration (i.e., width) of the window; + * must be a multiple of this DStream's interval + * @param slideTime sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's interval + */ def window(windowTime: Time, slideTime: Time): JavaDStream[T] = dstream.window(windowTime, slideTime) + /** + * Returns a new DStream which computed based on tumbling window on this DStream. + * This is equivalent to window(batchTime, batchTime). + * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + */ def tumble(batchTime: Time): JavaDStream[T] = dstream.tumble(batchTime) + /** + * Returns a new DStream by unifying data of another DStream with this DStream. + * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + */ def union(that: JavaDStream[T]): JavaDStream[T] = dstream.union(that.dstream) } diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 05d89918b2..23a0aaaefd 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -16,41 +16,81 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def dstream: DStream[T] + /** + * Prints the first ten elements of each RDD generated in this DStream. This is an output + * operator, so this DStream will be registered as an output stream and there materialized. + */ def print() = dstream.print() + /** + * Returns a new DStream in which each RDD has a single element generated by counting each RDD + * of this DStream. + */ def count(): JavaDStream[Int] = dstream.count() + /** + * Returns a new DStream in which each RDD has a single element generated by counting the number + * of elements in a window over this DStream. windowTime and slideTime are as defined in the + * window() operation. This is equivalent to window(windowTime, slideTime).count() + */ def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { dstream.countByWindow(windowTime, slideTime) } + /** + * Return a new DStream in which each RDD is generated by applying glom() to each RDD of + * this DStream. Applying glom() to an RDD coalesces all elements within each partition into + * an array. + */ def glom(): JavaDStream[JList[T]] = new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) + /** Returns the StreamingContext associated with this DStream */ def context(): StreamingContext = dstream.context() + /** Returns a new DStream by applying a function to all elements of this DStream. */ def map[R](f: JFunction[T, R]): JavaDStream[R] = { new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) } + /** Returns a new DStream by applying a function to all elements of this DStream. */ def map[K, V](f: PairFunction[T, K, V]): JavaPairDStream[K, V] = { def cm = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType()) } + /** + * Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs + * of this DStream. Applying mapPartitions() to an RDD applies a function to each partition + * of the RDD. + */ def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = { def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType()) } + /** + * Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs + * of this DStream. Applying mapPartitions() to an RDD applies a function to each partition + * of the RDD. + */ def mapPartitions[K, V](f: PairFlatMapFunction[java.util.Iterator[T], K, V]) : JavaPairDStream[K, V] = { def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator()) new JavaPairDStream(dstream.mapPartitions(fn))(f.keyType(), f.valueType()) } + /** + * Returns a new DStream in which each RDD has a single element generated by reducing each RDD + * of this DStream. + */ def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) + /** + * Returns a new DStream in which each RDD has a single element generated by reducing all + * elements in a window over this DStream. windowTime and slideTime are as defined in the + * window() operation. This is equivalent to window(windowTime, slideTime).reduce(reduceFunc) + */ def reduceByWindow( reduceFunc: JFunction2[T, T, T], invReduceFunc: JFunction2[T, T, T], @@ -59,18 +99,33 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable dstream.reduceByWindow(reduceFunc, invReduceFunc, windowTime, slideTime) } + /** + * Returns all the RDDs between 'fromTime' to 'toTime' (both included) + */ def slice(fromTime: Time, toTime: Time): JList[JavaRDD[T]] = { new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) } + /** + * Applies a function to each RDD in this DStream. This is an output operator, so + * this DStream will be registered as an output stream and therefore materialized. + */ def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) { dstream.foreach(rdd => foreachFunc.call(new JavaRDD(rdd))) } + /** + * Applies a function to each RDD in this DStream. This is an output operator, so + * this DStream will be registered as an output stream and therefore materialized. + */ def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) { dstream.foreach((rdd, time) => foreachFunc.call(new JavaRDD(rdd), time)) } + /** + * Returns a new DStream in which each RDD is generated by applying a function + * on each RDD of this DStream. + */ def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { implicit val cm: ClassManifest[U] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] @@ -79,6 +134,10 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable dstream.transform(scalaTransform(_)) } + /** + * Returns a new DStream in which each RDD is generated by applying a function + * on each RDD of this DStream. + */ def transform[U](transformFunc: JFunction2[JavaRDD[T], Time, JavaRDD[U]]): JavaDStream[U] = { implicit val cm: ClassManifest[U] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index f6dfbb2345..f36b870046 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -12,18 +12,31 @@ import org.apache.hadoop.mapred.{JobConf, OutputFormat} import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} import org.apache.hadoop.conf.Configuration import spark.api.java.{JavaPairRDD, JavaRDD} +import spark.storage.StorageLevel class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( implicit val kManifiest: ClassManifest[K], implicit val vManifest: ClassManifest[V]) extends JavaDStreamLike[(K, V), JavaPairDStream[K, V]] { - // Common to all DStream's + // ======================================================================= + // Methods common to all DStream's + // ======================================================================= + + /** Returns a new DStream containing only the elements that satisfy a predicate. */ def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] = dstream.filter((x => f(x).booleanValue())) + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def cache(): JavaPairDStream[K, V] = dstream.cache() + /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + def persist(): JavaPairDStream[K, V] = dstream.cache() + + /** Persists the RDDs of this DStream with the given storage level */ + def persist(storageLevel: StorageLevel): JavaPairDStream[K, V] = dstream.persist(storageLevel) + + /** Method that generates a RDD for the given time */ def compute(validTime: Time): JavaPairRDD[K, V] = { dstream.compute(validTime) match { case Some(rdd) => new JavaPairRDD(rdd) @@ -31,19 +44,45 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } } + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * The new DStream generates RDDs with the same interval as this DStream. + * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @return + */ def window(windowTime: Time): JavaPairDStream[K, V] = dstream.window(windowTime) + /** + * Return a new DStream which is computed based on windowed batches of this DStream. + * @param windowTime duration (i.e., width) of the window; + * must be a multiple of this DStream's interval + * @param slideTime sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's interval + */ def window(windowTime: Time, slideTime: Time): JavaPairDStream[K, V] = dstream.window(windowTime, slideTime) + /** + * Returns a new DStream which computed based on tumbling window on this DStream. + * This is equivalent to window(batchTime, batchTime). + * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + */ def tumble(batchTime: Time): JavaPairDStream[K, V] = dstream.tumble(batchTime) + /** + * Returns a new DStream by unifying data of another DStream with this DStream. + * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + */ def union(that: JavaPairDStream[K, V]): JavaPairDStream[K, V] = dstream.union(that.dstream) - // Only for PairDStreams... + // ======================================================================= + // Methods only for PairDStream's + // ======================================================================= + def groupByKey(): JavaPairDStream[K, JList[V]] = dstream.groupByKey().mapValues(seqAsJavaList _) @@ -59,8 +98,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = dstream.reduceByKey(func, numPartitions) - // TODO: TEST BELOW - def combineByKey[C](createCombiner: Function[V, C], + def combineByKey[C](createCombiner: JFunction[V, C], mergeValue: JFunction2[C, V, C], mergeCombiners: JFunction2[C, C, C], partitioner: Partitioner): JavaPairDStream[K, C] = { diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 19cd032fc1..f96b4fbd7d 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -4,27 +4,173 @@ import scala.collection.JavaConversions._ import java.util.{List => JList} import spark.streaming._ -import dstream.SparkFlumeEvent +import dstream._ import spark.storage.StorageLevel +import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} +import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} +import java.io.InputStream class JavaStreamingContext(val ssc: StreamingContext) { def this(master: String, frameworkName: String, batchDuration: Time) = this(new StreamingContext(master, frameworkName, batchDuration)) - def textFileStream(directory: String): JavaDStream[String] = { - ssc.textFileStream(directory) + // TODOs: + // - Test StreamingContext functions + // - Test to/from Hadoop functions + // - Add checkpoint()/remember() + // - Support creating your own streams + // - Add Kafka Stream + + /** + * Create a input stream from network source hostname:port. Data is received using + * a TCP socket and the receive bytes is interpreted as UTF8 encoded \n delimited + * lines. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param storageLevel Storage level to use for storing the received objects + * (default: StorageLevel.MEMORY_AND_DISK_SER_2) + */ + def networkTextStream(hostname: String, port: Int, storageLevel: StorageLevel) + : JavaDStream[String] = { + ssc.networkTextStream(hostname, port, storageLevel) } + /** + * Create a input stream from network source hostname:port. Data is received using + * a TCP socket and the receive bytes is interpreted as UTF8 encoded \n delimited + * lines. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + */ def networkTextStream(hostname: String, port: Int): JavaDStream[String] = { ssc.networkTextStream(hostname, port) } + /** + * Create a input stream from network source hostname:port. Data is received using + * a TCP socket and the receive bytes it interepreted as object using the given + * converter. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param converter Function to convert the byte stream to objects + * @param storageLevel Storage level to use for storing the received objects + * @tparam T Type of the objects received (after converting bytes to objects) + */ + def networkStream[T]( + hostname: String, + port: Int, + converter: JFunction[InputStream, java.lang.Iterable[T]], + storageLevel: StorageLevel) + : JavaDStream[T] = { + import scala.collection.JavaConverters._ + def fn = (x: InputStream) => converter.apply(x).toIterator + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + ssc.networkStream(hostname, port, fn, storageLevel) + } + + /** + * Creates a input stream that monitors a Hadoop-compatible filesystem + * for new files and reads them as text files (using key as LongWritable, value + * as Text and input format as TextInputFormat). File names starting with . are ignored. + * @param directory HDFS directory to monitor for new file + */ + def textFileStream(directory: String): JavaDStream[String] = { + ssc.textFileStream(directory) + } + + /** + * Create a input stream from network source hostname:port, where data is received + * as serialized blocks (serialized using the Spark's serializer) that can be directly + * pushed into the block manager without deserializing them. This is the most efficient + * way to receive data. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @param storageLevel Storage level to use for storing the received objects + * @tparam T Type of the objects in the received blocks + */ + def rawNetworkStream[T]( + hostname: String, + port: Int, + storageLevel: StorageLevel): JavaDStream[T] = { + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + JavaDStream.fromDStream(ssc.rawNetworkStream(hostname, port, storageLevel)) + } + + /** + * Create a input stream from network source hostname:port, where data is received + * as serialized blocks (serialized using the Spark's serializer) that can be directly + * pushed into the block manager without deserializing them. This is the most efficient + * way to receive data. + * @param hostname Hostname to connect to for receiving data + * @param port Port to connect to for receiving data + * @tparam T Type of the objects in the received blocks + */ + def rawNetworkStream[T](hostname: String, port: Int): JavaDStream[T] = { + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + JavaDStream.fromDStream(ssc.rawNetworkStream(hostname, port)) + } + + /** + * Creates a input stream that monitors a Hadoop-compatible filesystem + * for new files and reads them using the given key-value types and input format. + * File names starting with . are ignored. + * @param directory HDFS directory to monitor for new file + * @tparam K Key type for reading HDFS file + * @tparam V Value type for reading HDFS file + * @tparam F Input format for reading HDFS file + */ + def fileStream[K, V, F <: NewInputFormat[K, V]](directory: String): JavaPairDStream[K, V] = { + implicit val cmk: ClassManifest[K] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[K]] + implicit val cmv: ClassManifest[V] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]] + implicit val cmf: ClassManifest[F] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[F]] + ssc.fileStream[K, V, F](directory); + } + + /** + * Creates a input stream from a Flume source. + * @param hostname Hostname of the slave machine to which the flume data will be sent + * @param port Port of the slave machine to which the flume data will be sent + * @param storageLevel Storage level to use for storing the received objects + */ def flumeStream(hostname: String, port: Int, storageLevel: StorageLevel): JavaDStream[SparkFlumeEvent] = { ssc.flumeStream(hostname, port, storageLevel) } + + /** + * Creates a input stream from a Flume source. + * @param hostname Hostname of the slave machine to which the flume data will be sent + * @param port Port of the slave machine to which the flume data will be sent + */ + def flumeStream(hostname: String, port: Int): + JavaDStream[SparkFlumeEvent] = { + ssc.flumeStream(hostname, port) + } + + // NOT SUPPORTED: registerInputStream + + /** + * Registers an output stream that will be computed every interval + */ + def registerOutputStream(outputStream: JavaDStreamLike[_, _]) { + ssc.registerOutputStream(outputStream.dstream) + } + + /** + * Starts the execution of the streams. + */ def start() = ssc.start() + + /** + * Sstops the execution of the streams. + */ def stop() = ssc.stop() } diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index c1373e6275..fa3a5801dd 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -6,6 +6,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import scala.Tuple2; +import spark.HashPartitioner; import spark.api.java.JavaRDD; import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; @@ -377,18 +378,31 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + List>> stringStringKVStream = Arrays.asList( + Arrays.asList(new Tuple2("california", "dodgers"), + new Tuple2("california", "giants"), + new Tuple2("new york", "yankees"), + new Tuple2("new york", "mets")), + Arrays.asList(new Tuple2("california", "sharks"), + new Tuple2("california", "ducks"), + new Tuple2("new york", "rangers"), + new Tuple2("new york", "islanders"))); + + List>> stringIntKVStream = Arrays.asList( + Arrays.asList( + new Tuple2("california", 1), + new Tuple2("california", 3), + new Tuple2("new york", 4), + new Tuple2("new york", 1)), + Arrays.asList( + new Tuple2("california", 5), + new Tuple2("california", 5), + new Tuple2("new york", 3), + new Tuple2("new york", 1))); + @Test public void testPairGroupByKey() { - List>> inputData = Arrays.asList( - Arrays.asList(new Tuple2("california", "dodgers"), - new Tuple2("california", "giants"), - new Tuple2("new york", "yankees"), - new Tuple2("new york", "mets")), - Arrays.asList(new Tuple2("california", "sharks"), - new Tuple2("california", "ducks"), - new Tuple2("new york", "rangers"), - new Tuple2("new york", "islanders"))); - + List>> inputData = stringStringKVStream; List>>> expected = Arrays.asList( Arrays.asList( @@ -410,18 +424,7 @@ public class JavaAPISuite implements Serializable { @Test public void testPairReduceByKey() { - List>> inputData = Arrays.asList( - Arrays.asList( - new Tuple2("california", 1), - new Tuple2("california", 3), - new Tuple2("new york", 4), - new Tuple2("new york", 1)), - Arrays.asList( - new Tuple2("california", 5), - new Tuple2("california", 5), - new Tuple2("new york", 3), - new Tuple2("new york", 1))); - + List>> inputData = stringIntKVStream; List>> expected = Arrays.asList( Arrays.asList( @@ -435,17 +438,323 @@ public class JavaAPISuite implements Serializable { sc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); - JavaPairDStream reduced = pairStream.reduceByKey( - new Function2() { - @Override - public Integer call(Integer i1, Integer i2) throws Exception { - return i1 + i2; - } - }); + JavaPairDStream reduced = pairStream.reduceByKey(new IntegerSum()); JavaTestUtils.attachTestOutputStream(reduced); List>> result = JavaTestUtils.runStreams(sc, 2, 2); Assert.assertEquals(expected, result); } + + @Test + public void testCombineByKey() { + List>> inputData = stringIntKVStream; + + List>> expected = Arrays.asList( + Arrays.asList( + new Tuple2("california", 4), + new Tuple2("new york", 5)), + Arrays.asList( + new Tuple2("california", 10), + new Tuple2("new york", 4))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream combined = pairStream.combineByKey( + new Function() { + @Override + public Integer call(Integer i) throws Exception { + return i; + } + }, new IntegerSum(), new IntegerSum(), new HashPartitioner(2)); + + JavaTestUtils.attachTestOutputStream(combined); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testCountByKey() { + List>> inputData = stringStringKVStream; + + List>> expected = Arrays.asList( + Arrays.asList( + new Tuple2("california", 2L), + new Tuple2("new york", 2L)), + Arrays.asList( + new Tuple2("california", 2L), + new Tuple2("new york", 2L))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + // TODO: Below fails with compile error with ... wtf? + JavaPairDStream counted = pairStream.countByKey(); + JavaTestUtils.attachTestOutputStream(counted); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testGroupByKeyAndWindow() { + List>> inputData = stringStringKVStream; + + List>>> expected = Arrays.asList( + Arrays.asList(new Tuple2>("california", Arrays.asList("dodgers", "giants")), + new Tuple2>("new york", Arrays.asList("yankees", "mets"))), + Arrays.asList(new Tuple2>("california", + Arrays.asList("sharks", "ducks", "dodgers", "giants")), + new Tuple2>("new york", Arrays.asList("rangers", "islanders", "yankees", "mets"))), + Arrays.asList(new Tuple2>("california", Arrays.asList("sharks", "ducks")), + new Tuple2>("new york", Arrays.asList("rangers", "islanders")))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream> groupWindowed = + pairStream.groupByKeyAndWindow(new Time(2000), new Time(1000)); + JavaTestUtils.attachTestOutputStream(groupWindowed); + List>>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + + @Test + public void testReduceByKeyAndWindow() { + List>> inputData = stringIntKVStream; + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("california", 4), + new Tuple2("new york", 5)), + Arrays.asList(new Tuple2("california", 14), + new Tuple2("new york", 9)), + Arrays.asList(new Tuple2("california", 10), + new Tuple2("new york", 4))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream reduceWindowed = + pairStream.reduceByKeyAndWindow(new IntegerSum(), new Time(2000), new Time(1000)); + JavaTestUtils.attachTestOutputStream(reduceWindowed); + List>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + + @Test + public void testReduceByKeyAndWindowWithInverse() { + List>> inputData = stringIntKVStream; + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("california", 4), + new Tuple2("new york", 5)), + Arrays.asList(new Tuple2("california", 14), + new Tuple2("new york", 9)), + Arrays.asList(new Tuple2("california", 10), + new Tuple2("new york", 4))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream reduceWindowed = + pairStream.reduceByKeyAndWindow(new IntegerSum(), new IntegerDifference(), new Time(2000), new Time(1000)); + JavaTestUtils.attachTestOutputStream(reduceWindowed); + List>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + + @Test + public void testCountByKeyAndWindow() { + List>> inputData = stringStringKVStream; + + List>> expected = Arrays.asList( + Arrays.asList( + new Tuple2("california", 2L), + new Tuple2("new york", 2L)), + Arrays.asList( + new Tuple2("california", 4L), + new Tuple2("new york", 4L)), + Arrays.asList( + new Tuple2("california", 2L), + new Tuple2("new york", 2L))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + // TODO: Below fails with compile error with ... wtf? + JavaPairDStream counted = pairStream.countByKeyAndWindow(new Time(2000), new Time(1000)); + JavaTestUtils.attachTestOutputStream(counted); + List>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + + @Test + public void testMapValues() { + List>> inputData = stringStringKVStream; + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("california", "DODGERS"), + new Tuple2("california", "GIANTS"), + new Tuple2("new york", "YANKEES"), + new Tuple2("new york", "METS")), + Arrays.asList(new Tuple2("california", "SHARKS"), + new Tuple2("california", "DUCKS"), + new Tuple2("new york", "RANGERS"), + new Tuple2("new york", "ISLANDERS"))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream mapped = pairStream.mapValues(new Function() { + @Override + public String call(String s) throws Exception { + return s.toUpperCase(); + } + }); + + JavaTestUtils.attachTestOutputStream(mapped); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testFlatMapValues() { + List>> inputData = stringStringKVStream; + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("california", "dodgers1"), + new Tuple2("california", "dodgers2"), + new Tuple2("california", "giants1"), + new Tuple2("california", "giants2"), + new Tuple2("new york", "yankees1"), + new Tuple2("new york", "yankees2"), + new Tuple2("new york", "mets1"), + new Tuple2("new york", "mets2")), + Arrays.asList(new Tuple2("california", "sharks1"), + new Tuple2("california", "sharks2"), + new Tuple2("california", "ducks1"), + new Tuple2("california", "ducks2"), + new Tuple2("new york", "rangers1"), + new Tuple2("new york", "rangers2"), + new Tuple2("new york", "islanders1"), + new Tuple2("new york", "islanders2"))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream( + sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + + JavaPairDStream flatMapped = pairStream.flatMapValues( + new Function>() { + @Override + public Iterable call(String in) { + List out = new ArrayList(); + out.add(in + "1"); + out.add(in + "2"); + return out; + } + }); + + JavaTestUtils.attachTestOutputStream(flatMapped); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testCoGroup() { + List>> stringStringKVStream1 = Arrays.asList( + Arrays.asList(new Tuple2("california", "dodgers"), + new Tuple2("new york", "yankees")), + Arrays.asList(new Tuple2("california", "sharks"), + new Tuple2("new york", "rangers"))); + + List>> stringStringKVStream2 = Arrays.asList( + Arrays.asList(new Tuple2("california", "giants"), + new Tuple2("new york", "mets")), + Arrays.asList(new Tuple2("california", "ducks"), + new Tuple2("new york", "islanders"))); + + + List, List>>>> expected = Arrays.asList( + Arrays.asList( + new Tuple2, List>>("california", + new Tuple2, List>(Arrays.asList("dodgers"), Arrays.asList("giants"))), + new Tuple2, List>>("new york", + new Tuple2, List>(Arrays.asList("yankees"), Arrays.asList("mets")))), + Arrays.asList( + new Tuple2, List>>("california", + new Tuple2, List>(Arrays.asList("sharks"), Arrays.asList("ducks"))), + new Tuple2, List>>("new york", + new Tuple2, List>(Arrays.asList("rangers"), Arrays.asList("islanders"))))); + + + JavaDStream> stream1 = JavaTestUtils.attachTestInputStream( + sc, stringStringKVStream1, 1); + JavaPairDStream pairStream1 = JavaPairDStream.fromJavaDStream(stream1); + + JavaDStream> stream2 = JavaTestUtils.attachTestInputStream( + sc, stringStringKVStream2, 1); + JavaPairDStream pairStream2 = JavaPairDStream.fromJavaDStream(stream2); + + JavaPairDStream, List>> grouped = pairStream1.cogroup(pairStream2); + JavaTestUtils.attachTestOutputStream(grouped); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } + + @Test + public void testJoin() { + List>> stringStringKVStream1 = Arrays.asList( + Arrays.asList(new Tuple2("california", "dodgers"), + new Tuple2("new york", "yankees")), + Arrays.asList(new Tuple2("california", "sharks"), + new Tuple2("new york", "rangers"))); + + List>> stringStringKVStream2 = Arrays.asList( + Arrays.asList(new Tuple2("california", "giants"), + new Tuple2("new york", "mets")), + Arrays.asList(new Tuple2("california", "ducks"), + new Tuple2("new york", "islanders"))); + + + List>>> expected = Arrays.asList( + Arrays.asList( + new Tuple2>("california", + new Tuple2("dodgers", "giants")), + new Tuple2>("new york", + new Tuple2("yankees", "mets"))), + Arrays.asList( + new Tuple2>("california", + new Tuple2("sharks", "ducks")), + new Tuple2>("new york", + new Tuple2("rangers", "islanders")))); + + + JavaDStream> stream1 = JavaTestUtils.attachTestInputStream( + sc, stringStringKVStream1, 1); + JavaPairDStream pairStream1 = JavaPairDStream.fromJavaDStream(stream1); + + JavaDStream> stream2 = JavaTestUtils.attachTestInputStream( + sc, stringStringKVStream2, 1); + JavaPairDStream pairStream2 = JavaPairDStream.fromJavaDStream(stream2); + + JavaPairDStream> joined = pairStream1.join(pairStream2); + JavaTestUtils.attachTestOutputStream(joined); + List>> result = JavaTestUtils.runStreams(sc, 2, 2); + + Assert.assertEquals(expected, result); + } } From 2fe39a4468798b5b125c4c3436ee1180b3a7b470 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 9 Jan 2013 21:59:06 -0800 Subject: [PATCH 198/226] Some docs for the JavaTestUtils --- streaming/src/test/scala/JavaTestUtils.scala | 28 +++++++++++++++----- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/streaming/src/test/scala/JavaTestUtils.scala b/streaming/src/test/scala/JavaTestUtils.scala index 9f3a80df8b..24ebc15e38 100644 --- a/streaming/src/test/scala/JavaTestUtils.scala +++ b/streaming/src/test/scala/JavaTestUtils.scala @@ -2,15 +2,22 @@ package spark.streaming import collection.mutable.{SynchronizedBuffer, ArrayBuffer} import java.util.{List => JList} -import api.java.{JavaPairDStream, JavaDStreamLike, JavaDStream, JavaStreamingContext} +import spark.streaming.api.java.{JavaPairDStream, JavaDStreamLike, JavaDStream, JavaStreamingContext} import spark.streaming._ import java.util.ArrayList import collection.JavaConversions._ -/** Exposes core test functionality in a Java-friendly way. */ +/** Exposes streaming test functionality in a Java-friendly way. */ object JavaTestUtils extends TestSuiteBase { - def attachTestInputStream[T](ssc: JavaStreamingContext, - data: JList[JList[T]], numPartitions: Int) = { + + /** + * Create a [[spark.streaming.TestInputStream]] and attach it to the supplied context. + * The stream will be derived from the supplied lists of Java objects. + **/ + def attachTestInputStream[T]( + ssc: JavaStreamingContext, + data: JList[JList[T]], + numPartitions: Int) = { val seqData = data.map(Seq(_:_*)) implicit val cm: ClassManifest[T] = @@ -20,8 +27,12 @@ object JavaTestUtils extends TestSuiteBase { new JavaDStream[T](dstream) } - def attachTestOutputStream[T, This <: spark.streaming.api.java.JavaDStreamLike[T,This]] - (dstream: JavaDStreamLike[T, This]) = { + /** + * Attach a provided stream to it's associated StreamingContext as a + * [[spark.streaming.TestOutputStream]]. + **/ + def attachTestOutputStream[T, This <: spark.streaming.api.java.JavaDStreamLike[T,This]]( + dstream: JavaDStreamLike[T, This]) = { implicit val cm: ClassManifest[T] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] val ostream = new TestOutputStream(dstream.dstream, @@ -29,6 +40,11 @@ object JavaTestUtils extends TestSuiteBase { dstream.dstream.ssc.registerOutputStream(ostream) } + /** + * Process all registered streams for a numBatches batches, failing if + * numExpectedOutput RDD's are not generated. Generated RDD's are collected + * and returned, represented as a list for each batch interval. + */ def runStreams[V]( ssc: JavaStreamingContext, numBatches: Int, numExpectedOutput: Int): JList[JList[V]] = { implicit val cm: ClassManifest[V] = From 5004eec37c01db3b96d665b0d9606002af209eda Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 9 Jan 2013 22:02:07 -0800 Subject: [PATCH 199/226] Import Cleanup --- .../main/scala/spark/streaming/api/java/JavaPairDStream.scala | 2 +- .../scala/spark/streaming/api/java/JavaStreamingContext.scala | 1 - streaming/src/test/scala/spark/streaming/JavaAPISuite.java | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index f36b870046..a19a476724 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -11,7 +11,7 @@ import spark.Partitioner import org.apache.hadoop.mapred.{JobConf, OutputFormat} import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} import org.apache.hadoop.conf.Configuration -import spark.api.java.{JavaPairRDD, JavaRDD} +import spark.api.java.JavaPairRDD import spark.storage.StorageLevel class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index f96b4fbd7d..37ce037d5c 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -62,7 +62,6 @@ class JavaStreamingContext(val ssc: StreamingContext) { converter: JFunction[InputStream, java.lang.Iterable[T]], storageLevel: StorageLevel) : JavaDStream[T] = { - import scala.collection.JavaConverters._ def fn = (x: InputStream) => converter.apply(x).toIterator implicit val cmt: ClassManifest[T] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index fa3a5801dd..9e8438d04c 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -1,8 +1,8 @@ package spark.streaming; import com.google.common.collect.Lists; -import org.junit.Assert; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import scala.Tuple2; @@ -12,10 +12,10 @@ import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; import spark.api.java.function.Function2; import spark.api.java.function.PairFunction; -import spark.streaming.JavaTestUtils; import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaPairDStream; import spark.streaming.api.java.JavaStreamingContext; +import spark.streaming.JavaTestUtils; import java.io.Serializable; import java.util.*; From b36c4f7cce53446753ecc0ce6f9bdccb12b3350b Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 10 Jan 2013 19:29:22 -0800 Subject: [PATCH 200/226] More work on StreamingContext --- .../api/java/JavaStreamingContext.scala | 47 ++++++++++++++++++- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 37ce037d5c..e8cd03847a 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -9,6 +9,7 @@ import spark.storage.StorageLevel import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import java.io.InputStream +import java.util.{Map => JMap} class JavaStreamingContext(val ssc: StreamingContext) { def this(master: String, frameworkName: String, batchDuration: Time) = @@ -17,10 +18,31 @@ class JavaStreamingContext(val ssc: StreamingContext) { // TODOs: // - Test StreamingContext functions // - Test to/from Hadoop functions - // - Add checkpoint()/remember() - // - Support creating your own streams + // - Support registering InputStreams // - Add Kafka Stream + + /** + * Create an input stream that pulls messages form a Kafka Broker. + * @param hostname Zookeper hostname. + * @param port Zookeper port. + * @param groupId The group id for this consumer. + * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * in its own thread. + * @param initialOffsets Optional initial offsets for each of the partitions to consume. + * By default the value is pulled from zookeper. + * @param storageLevel RDD storage level. Defaults to memory-only. + */ + def kafkaStream[T]( + hostname: String, + port: Int, + groupId: String, + topics: JMap[String, Int]) + : DStream[T] = { + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + ssc.kafkaStream(hostname, port, groupId, Map(topics.toSeq: _*)) + } /** * Create a input stream from network source hostname:port. Data is received using * a TCP socket and the receive bytes is interpreted as UTF8 encoded \n delimited @@ -162,6 +184,27 @@ class JavaStreamingContext(val ssc: StreamingContext) { ssc.registerOutputStream(outputStream.dstream) } + /** + * Sets the context to periodically checkpoint the DStream operations for master + * fault-tolerance. By default, the graph will be checkpointed every batch interval. + * @param directory HDFS-compatible directory where the checkpoint data will be reliably stored + * @param interval checkpoint interval + */ + def checkpoint(directory: String, interval: Time = null) { + ssc.checkpoint(directory, interval) + } + + /** + * Sets each DStreams in this context to remember RDDs it generated in the last given duration. + * DStreams remember RDDs only for a limited duration of time and releases them for garbage + * collection. This method allows the developer to specify how to long to remember the RDDs ( + * if the developer wishes to query old data outside the DStream computation). + * @param duration Minimum duration that each DStream should remember its RDDs + */ + def remember(duration: Time) { + ssc.remember(duration) + } + /** * Starts the execution of the streams. */ From c2537057f9ed8723d2c33a1636edf9c9547cdc66 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 10 Jan 2013 19:29:33 -0800 Subject: [PATCH 201/226] Fixing issue with types --- .../streaming/api/java/JavaPairDStream.scala | 18 ++++++++++++------ .../scala/spark/streaming/JavaAPISuite.java | 8 ++++---- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index a19a476724..fa46ca9267 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -1,6 +1,7 @@ package spark.streaming.api.java import java.util.{List => JList} +import java.lang.{Long => JLong} import scala.collection.JavaConversions._ @@ -13,6 +14,7 @@ import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} import org.apache.hadoop.conf.Configuration import spark.api.java.JavaPairRDD import spark.storage.StorageLevel +import java.lang class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( implicit val kManifiest: ClassManifest[K], @@ -107,12 +109,12 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner) } - def countByKey(numPartitions: Int): JavaPairDStream[K, Long] = { - dstream.countByKey(numPartitions); + def countByKey(numPartitions: Int): JavaPairDStream[K, JLong] = { + JavaPairDStream.scalaToJavaLong(dstream.countByKey(numPartitions)); } - def countByKey(): JavaPairDStream[K, Long] = { - dstream.countByKey(); + def countByKey(): JavaPairDStream[K, JLong] = { + JavaPairDStream.scalaToJavaLong(dstream.countByKey()); } def groupByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, JList[V]] = { @@ -168,8 +170,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, partitioner) } - def countByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, Long] = { - dstream.countByKeyAndWindow(windowTime, slideTime) + def countByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, JLong] = { + JavaPairDStream.scalaToJavaLong(dstream.countByKeyAndWindow(windowTime, slideTime)) } def countByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int) @@ -280,4 +282,8 @@ object JavaPairDStream { implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]] new JavaPairDStream[K, V](dstream.dstream) } + + def scalaToJavaLong[K: ClassManifest](dstream: JavaPairDStream[K, Long]): JavaPairDStream[K, JLong] = { + StreamingContext.toPairDStreamFunctions(dstream.dstream).mapValues(new JLong(_)) + } } diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 9e8438d04c..6584d861ed 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -1,6 +1,7 @@ package spark.streaming; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -492,8 +493,7 @@ public class JavaAPISuite implements Serializable { sc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); - // TODO: Below fails with compile error with ... wtf? - JavaPairDStream counted = pairStream.countByKey(); + JavaPairDStream counted = pairStream.countByKey(); JavaTestUtils.attachTestOutputStream(counted); List>> result = JavaTestUtils.runStreams(sc, 2, 2); @@ -589,8 +589,8 @@ public class JavaAPISuite implements Serializable { sc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); - // TODO: Below fails with compile error with ... wtf? - JavaPairDStream counted = pairStream.countByKeyAndWindow(new Time(2000), new Time(1000)); + JavaPairDStream counted = + pairStream.countByKeyAndWindow(new Time(2000), new Time(1000)); JavaTestUtils.attachTestOutputStream(counted); List>> result = JavaTestUtils.runStreams(sc, 3, 3); From 280b6d018691810bbb3dd3155f059132b4475995 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 10 Jan 2013 19:52:45 -0800 Subject: [PATCH 202/226] Porting to new Duration class --- .../streaming/api/java/JavaDStream.scala | 28 +++---- .../streaming/api/java/JavaDStreamLike.scala | 24 +++--- .../streaming/api/java/JavaPairDStream.scala | 76 +++++++++---------- .../api/java/JavaStreamingContext.scala | 8 +- .../scala/spark/streaming/JavaAPISuite.java | 20 ++--- 5 files changed, 78 insertions(+), 78 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index 1e5c279e2c..f85864df5d 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -1,6 +1,6 @@ package spark.streaming.api.java -import spark.streaming.{Time, DStream} +import spark.streaming.{Duration, Time, DStream} import spark.api.java.function.{Function => JFunction} import spark.api.java.JavaRDD import java.util.{List => JList} @@ -22,7 +22,7 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM /** Persists the RDDs of this DStream with the given storage level */ def persist(storageLevel: StorageLevel): JavaDStream[T] = dstream.persist(storageLevel) - /** Method that generates a RDD for the given time */ + /** Method that generates a RDD for the given duration */ def compute(validTime: Time): JavaRDD[T] = { dstream.compute(validTime) match { case Some(rdd) => new JavaRDD(rdd) @@ -33,34 +33,34 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM /** * Return a new DStream which is computed based on windowed batches of this DStream. * The new DStream generates RDDs with the same interval as this DStream. - * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @param windowDuration width of the window; must be a multiple of this DStream's interval. * @return */ - def window(windowTime: Time): JavaDStream[T] = - dstream.window(windowTime) + def window(windowDuration: Duration): JavaDStream[T] = + dstream.window(windowDuration) /** * Return a new DStream which is computed based on windowed batches of this DStream. - * @param windowTime duration (i.e., width) of the window; + * @param windowDuration duration (i.e., width) of the window; * must be a multiple of this DStream's interval - * @param slideTime sliding interval of the window (i.e., the interval after which + * @param slideDuration sliding interval of the window (i.e., the interval after which * the new DStream will generate RDDs); must be a multiple of this * DStream's interval */ - def window(windowTime: Time, slideTime: Time): JavaDStream[T] = - dstream.window(windowTime, slideTime) + def window(windowDuration: Duration, slideDuration: Duration): JavaDStream[T] = + dstream.window(windowDuration, slideDuration) /** * Returns a new DStream which computed based on tumbling window on this DStream. - * This is equivalent to window(batchTime, batchTime). - * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + * This is equivalent to window(batchDuration, batchDuration). + * @param batchDuration tumbling window duration; must be a multiple of this DStream's interval */ - def tumble(batchTime: Time): JavaDStream[T] = - dstream.tumble(batchTime) + def tumble(batchDuration: Duration): JavaDStream[T] = + dstream.tumble(batchDuration) /** * Returns a new DStream by unifying data of another DStream with this DStream. - * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + * @param that Another DStream having the same interval (i.e., slideDuration) as this DStream. */ def union(that: JavaDStream[T]): JavaDStream[T] = dstream.union(that.dstream) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 23a0aaaefd..cb58c1351d 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -30,11 +30,11 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable /** * Returns a new DStream in which each RDD has a single element generated by counting the number - * of elements in a window over this DStream. windowTime and slideTime are as defined in the - * window() operation. This is equivalent to window(windowTime, slideTime).count() + * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the + * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ - def countByWindow(windowTime: Time, slideTime: Time) : JavaDStream[Int] = { - dstream.countByWindow(windowTime, slideTime) + def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[Int] = { + dstream.countByWindow(windowDuration, slideDuration) } /** @@ -88,22 +88,22 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable /** * Returns a new DStream in which each RDD has a single element generated by reducing all - * elements in a window over this DStream. windowTime and slideTime are as defined in the - * window() operation. This is equivalent to window(windowTime, slideTime).reduce(reduceFunc) + * elements in a window over this DStream. windowDuration and slideDuration are as defined in the + * window() operation. This is equivalent to window(windowDuration, slideDuration).reduce(reduceFunc) */ def reduceByWindow( reduceFunc: JFunction2[T, T, T], invReduceFunc: JFunction2[T, T, T], - windowTime: Time, - slideTime: Time): JavaDStream[T] = { - dstream.reduceByWindow(reduceFunc, invReduceFunc, windowTime, slideTime) + windowDuration: Duration, + slideDuration: Duration): JavaDStream[T] = { + dstream.reduceByWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration) } /** - * Returns all the RDDs between 'fromTime' to 'toTime' (both included) + * Returns all the RDDs between 'fromDuration' to 'toDuration' (both included) */ - def slice(fromTime: Time, toTime: Time): JList[JavaRDD[T]] = { - new util.ArrayList(dstream.slice(fromTime, toTime).map(new JavaRDD(_)).toSeq) + def slice(fromDuration: Duration, toDuration: Duration): JList[JavaRDD[T]] = { + new util.ArrayList(dstream.slice(fromDuration, toDuration).map(new JavaRDD(_)).toSeq) } /** diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index fa46ca9267..03336d040d 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -38,7 +38,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( /** Persists the RDDs of this DStream with the given storage level */ def persist(storageLevel: StorageLevel): JavaPairDStream[K, V] = dstream.persist(storageLevel) - /** Method that generates a RDD for the given time */ + /** Method that generates a RDD for the given Duration */ def compute(validTime: Time): JavaPairRDD[K, V] = { dstream.compute(validTime) match { case Some(rdd) => new JavaPairRDD(rdd) @@ -49,34 +49,34 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( /** * Return a new DStream which is computed based on windowed batches of this DStream. * The new DStream generates RDDs with the same interval as this DStream. - * @param windowTime width of the window; must be a multiple of this DStream's interval. + * @param windowDuration width of the window; must be a multiple of this DStream's interval. * @return */ - def window(windowTime: Time): JavaPairDStream[K, V] = - dstream.window(windowTime) + def window(windowDuration: Duration): JavaPairDStream[K, V] = + dstream.window(windowDuration) /** * Return a new DStream which is computed based on windowed batches of this DStream. - * @param windowTime duration (i.e., width) of the window; + * @param windowDuration duration (i.e., width) of the window; * must be a multiple of this DStream's interval - * @param slideTime sliding interval of the window (i.e., the interval after which + * @param slideDuration sliding interval of the window (i.e., the interval after which * the new DStream will generate RDDs); must be a multiple of this * DStream's interval */ - def window(windowTime: Time, slideTime: Time): JavaPairDStream[K, V] = - dstream.window(windowTime, slideTime) + def window(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, V] = + dstream.window(windowDuration, slideDuration) /** * Returns a new DStream which computed based on tumbling window on this DStream. - * This is equivalent to window(batchTime, batchTime). - * @param batchTime tumbling window duration; must be a multiple of this DStream's interval + * This is equivalent to window(batchDuration, batchDuration). + * @param batchDuration tumbling window duration; must be a multiple of this DStream's interval */ - def tumble(batchTime: Time): JavaPairDStream[K, V] = - dstream.tumble(batchTime) + def tumble(batchDuration: Duration): JavaPairDStream[K, V] = + dstream.tumble(batchDuration) /** * Returns a new DStream by unifying data of another DStream with this DStream. - * @param that Another DStream having the same interval (i.e., slideTime) as this DStream. + * @param that Another DStream having the same interval (i.e., slideDuration) as this DStream. */ def union(that: JavaPairDStream[K, V]): JavaPairDStream[K, V] = dstream.union(that.dstream) @@ -117,66 +117,66 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( JavaPairDStream.scalaToJavaLong(dstream.countByKey()); } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, JList[V]] = { - dstream.groupByKeyAndWindow(windowTime, slideTime).mapValues(seqAsJavaList _) + def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowDuration, slideDuration).mapValues(seqAsJavaList _) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int) + def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) :JavaPairDStream[K, JList[V]] = { - dstream.groupByKeyAndWindow(windowTime, slideTime, numPartitions).mapValues(seqAsJavaList _) + dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions).mapValues(seqAsJavaList _) } - def groupByKeyAndWindow(windowTime: Time, slideTime: Time, partitioner: Partitioner) + def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, partitioner: Partitioner) :JavaPairDStream[K, JList[V]] = { - dstream.groupByKeyAndWindow(windowTime, slideTime, partitioner).mapValues(seqAsJavaList _) + dstream.groupByKeyAndWindow(windowDuration, slideDuration, partitioner).mapValues(seqAsJavaList _) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time) + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration) :JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, windowTime) + dstream.reduceByKeyAndWindow(reduceFunc, windowDuration) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time) + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration, slideDuration: Duration) :JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime) + dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration) } def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], - windowTime: Time, - slideTime: Time, + windowDuration: Duration, + slideDuration: Duration, numPartitions: Int): JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, numPartitions) + dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, numPartitions) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowTime: Time, slideTime: Time, + def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration, slideDuration: Duration, partitioner: Partitioner): JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, windowTime, slideTime, partitioner) + dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner) } def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowTime: Time, slideTime: Time): JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime) + windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration) } def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowTime: Time, slideTime: Time, numPartitions: Int): JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, numPartitions) + windowDuration: Duration, slideDuration: Duration, numPartitions: Int): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, numPartitions) } def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowTime: Time, slideTime: Time, partitioner: Partitioner) + windowDuration: Duration, slideDuration: Duration, partitioner: Partitioner) : JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowTime, slideTime, partitioner) + dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, partitioner) } - def countByKeyAndWindow(windowTime: Time, slideTime: Time): JavaPairDStream[K, JLong] = { - JavaPairDStream.scalaToJavaLong(dstream.countByKeyAndWindow(windowTime, slideTime)) + def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, JLong] = { + JavaPairDStream.scalaToJavaLong(dstream.countByKeyAndWindow(windowDuration, slideDuration)) } - def countByKeyAndWindow(windowTime: Time, slideTime: Time, numPartitions: Int) + def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) : JavaPairDStream[K, Long] = { - dstream.countByKeyAndWindow(windowTime, slideTime, numPartitions) + dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index e8cd03847a..5a712d18c7 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -12,7 +12,7 @@ import java.io.InputStream import java.util.{Map => JMap} class JavaStreamingContext(val ssc: StreamingContext) { - def this(master: String, frameworkName: String, batchDuration: Time) = + def this(master: String, frameworkName: String, batchDuration: Duration) = this(new StreamingContext(master, frameworkName, batchDuration)) // TODOs: @@ -190,18 +190,18 @@ class JavaStreamingContext(val ssc: StreamingContext) { * @param directory HDFS-compatible directory where the checkpoint data will be reliably stored * @param interval checkpoint interval */ - def checkpoint(directory: String, interval: Time = null) { + def checkpoint(directory: String, interval: Duration = null) { ssc.checkpoint(directory, interval) } /** * Sets each DStreams in this context to remember RDDs it generated in the last given duration. - * DStreams remember RDDs only for a limited duration of time and releases them for garbage + * DStreams remember RDDs only for a limited duration of duration and releases them for garbage * collection. This method allows the developer to specify how to long to remember the RDDs ( * if the developer wishes to query old data outside the DStream computation). * @param duration Minimum duration that each DStream should remember its RDDs */ - def remember(duration: Time) { + def remember(duration: Duration) { ssc.remember(duration) } diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 6584d861ed..26ff5b1ccd 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -29,7 +29,7 @@ public class JavaAPISuite implements Serializable { @Before public void setUp() { - sc = new JavaStreamingContext("local[2]", "test", new Time(1000)); + sc = new JavaStreamingContext("local[2]", "test", new Duration(1000)); } @After @@ -96,7 +96,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(7,8,9)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowed = stream.window(new Time(2000)); + JavaDStream windowed = stream.window(new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 4, 4); @@ -104,7 +104,7 @@ public class JavaAPISuite implements Serializable { } @Test - public void testWindowWithSlideTime() { + public void testWindowWithSlideDuration() { List> inputData = Arrays.asList( Arrays.asList(1,2,3), Arrays.asList(4,5,6), @@ -120,7 +120,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(13,14,15,16,17,18)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowed = stream.window(new Time(4000), new Time(2000)); + JavaDStream windowed = stream.window(new Duration(4000), new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 8, 4); @@ -143,7 +143,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(13,14,15,16,17,18)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); - JavaDStream windowed = stream.tumble(new Time(2000)); + JavaDStream windowed = stream.tumble(new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); List> result = JavaTestUtils.runStreams(sc, 6, 3); @@ -267,7 +267,7 @@ public class JavaAPISuite implements Serializable { JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); JavaDStream reducedWindowed = stream.reduceByWindow(new IntegerSum(), - new IntegerDifference(), new Time(2000), new Time(1000)); + new IntegerDifference(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reducedWindowed); List> result = JavaTestUtils.runStreams(sc, 4, 4); @@ -517,7 +517,7 @@ public class JavaAPISuite implements Serializable { JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream> groupWindowed = - pairStream.groupByKeyAndWindow(new Time(2000), new Time(1000)); + pairStream.groupByKeyAndWindow(new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(groupWindowed); List>>> result = JavaTestUtils.runStreams(sc, 3, 3); @@ -540,7 +540,7 @@ public class JavaAPISuite implements Serializable { JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream reduceWindowed = - pairStream.reduceByKeyAndWindow(new IntegerSum(), new Time(2000), new Time(1000)); + pairStream.reduceByKeyAndWindow(new IntegerSum(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reduceWindowed); List>> result = JavaTestUtils.runStreams(sc, 3, 3); @@ -563,7 +563,7 @@ public class JavaAPISuite implements Serializable { JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream reduceWindowed = - pairStream.reduceByKeyAndWindow(new IntegerSum(), new IntegerDifference(), new Time(2000), new Time(1000)); + pairStream.reduceByKeyAndWindow(new IntegerSum(), new IntegerDifference(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reduceWindowed); List>> result = JavaTestUtils.runStreams(sc, 3, 3); @@ -590,7 +590,7 @@ public class JavaAPISuite implements Serializable { JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream counted = - pairStream.countByKeyAndWindow(new Time(2000), new Time(1000)); + pairStream.countByKeyAndWindow(new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(counted); List>> result = JavaTestUtils.runStreams(sc, 3, 3); From 5bcb048167fe0b90f749910233342c09fff3fce7 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 11 Jan 2013 11:18:06 -0800 Subject: [PATCH 203/226] More work on InputStreams --- .../streaming/api/java/JavaPairDStream.scala | 3 +- .../api/java/JavaStreamingContext.scala | 68 +++++++++++++++--- .../scala/spark/streaming/JavaAPISuite.java | 69 ++++++++++++++++++- 3 files changed, 130 insertions(+), 10 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 03336d040d..b25a3f109c 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -283,7 +283,8 @@ object JavaPairDStream { new JavaPairDStream[K, V](dstream.dstream) } - def scalaToJavaLong[K: ClassManifest](dstream: JavaPairDStream[K, Long]): JavaPairDStream[K, JLong] = { + def scalaToJavaLong[K: ClassManifest](dstream: JavaPairDStream[K, Long]) + : JavaPairDStream[K, JLong] = { StreamingContext.toPairDStreamFunctions(dstream.dstream).mapValues(new JLong(_)) } } diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 5a712d18c7..2833793b94 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -2,6 +2,7 @@ package spark.streaming.api.java import scala.collection.JavaConversions._ import java.util.{List => JList} +import java.lang.{Long => JLong, Integer => JInt} import spark.streaming._ import dstream._ @@ -18,9 +19,26 @@ class JavaStreamingContext(val ssc: StreamingContext) { // TODOs: // - Test StreamingContext functions // - Test to/from Hadoop functions - // - Support registering InputStreams - // - Add Kafka Stream + // - Support creating and registering InputStreams + /** + * Create an input stream that pulls messages form a Kafka Broker. + * @param hostname Zookeper hostname. + * @param port Zookeper port. + * @param groupId The group id for this consumer. + * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * in its own thread. + */ + def kafkaStream[T]( + hostname: String, + port: Int, + groupId: String, + topics: JMap[String, JInt]) + : JavaDStream[T] = { + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + ssc.kafkaStream[T](hostname, port, groupId, Map(topics.mapValues(_.intValue()).toSeq: _*)) + } /** * Create an input stream that pulls messages form a Kafka Broker. @@ -31,18 +49,54 @@ class JavaStreamingContext(val ssc: StreamingContext) { * in its own thread. * @param initialOffsets Optional initial offsets for each of the partitions to consume. * By default the value is pulled from zookeper. - * @param storageLevel RDD storage level. Defaults to memory-only. */ def kafkaStream[T]( hostname: String, port: Int, groupId: String, - topics: JMap[String, Int]) - : DStream[T] = { + topics: JMap[String, JInt], + initialOffsets: JMap[KafkaPartitionKey, JLong]) + : JavaDStream[T] = { implicit val cmt: ClassManifest[T] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] - ssc.kafkaStream(hostname, port, groupId, Map(topics.toSeq: _*)) + ssc.kafkaStream[T]( + hostname, + port, + groupId, + Map(topics.mapValues(_.intValue()).toSeq: _*), + Map(initialOffsets.mapValues(_.longValue()).toSeq: _*)) } + + /** + * Create an input stream that pulls messages form a Kafka Broker. + * @param hostname Zookeper hostname. + * @param port Zookeper port. + * @param groupId The group id for this consumer. + * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * in its own thread. + * @param initialOffsets Optional initial offsets for each of the partitions to consume. + * By default the value is pulled from zookeper. + * @param storageLevel RDD storage level. Defaults to memory-only + */ + def kafkaStream[T]( + hostname: String, + port: Int, + groupId: String, + topics: JMap[String, JInt], + initialOffsets: JMap[KafkaPartitionKey, JLong], + storageLevel: StorageLevel) + : JavaDStream[T] = { + implicit val cmt: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + ssc.kafkaStream[T]( + hostname, + port, + groupId, + Map(topics.mapValues(_.intValue()).toSeq: _*), + Map(initialOffsets.mapValues(_.longValue()).toSeq: _*), + storageLevel) + } + /** * Create a input stream from network source hostname:port. Data is received using * a TCP socket and the receive bytes is interpreted as UTF8 encoded \n delimited @@ -175,8 +229,6 @@ class JavaStreamingContext(val ssc: StreamingContext) { ssc.flumeStream(hostname, port) } - // NOT SUPPORTED: registerInputStream - /** * Registers an output stream that will be computed every interval */ diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 26ff5b1ccd..7475b9536b 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -2,6 +2,7 @@ package spark.streaming; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -13,12 +14,15 @@ import spark.api.java.function.FlatMapFunction; import spark.api.java.function.Function; import spark.api.java.function.Function2; import spark.api.java.function.PairFunction; +import spark.storage.StorageLevel; import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaPairDStream; import spark.streaming.api.java.JavaStreamingContext; import spark.streaming.JavaTestUtils; +import spark.streaming.dstream.KafkaPartitionKey; +import sun.org.mozilla.javascript.annotations.JSFunction; -import java.io.Serializable; +import java.io.*; import java.util.*; // The test suite itself is Serializable so that anonymous Function implementations can be @@ -757,4 +761,67 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + + // Input stream tests. These mostly just test that we can instantiate a given InputStream with + // Java arguments and assign it to a JavaDStream without producing type errors. Testing of the + // InputStream functionality is deferred to the existing Scala tests. + @Test + public void testKafkaStream() { + HashMap topics = Maps.newHashMap(); + HashMap offsets = Maps.newHashMap(); + JavaDStream test1 = sc.kafkaStream("localhost", 12345, "group", topics); + JavaDStream test2 = sc.kafkaStream("localhost", 12345, "group", topics, offsets); + JavaDStream test3 = sc.kafkaStream("localhost", 12345, "group", topics, offsets, + StorageLevel.MEMORY_AND_DISK()); + } + + @Test + public void testNetworkTextStream() { + JavaDStream test = sc.networkTextStream("localhost", 12345); + } + + @Test + public void testNetworkString() { + class Converter extends Function> { + public Iterable call(InputStream in) { + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + List out = new ArrayList(); + try { + while (true) { + String line = reader.readLine(); + if (line == null) { break; } + out.add(line); + } + } catch (IOException e) { } + return out; + } + } + + JavaDStream test = sc.networkStream( + "localhost", + 12345, + new Converter(), + StorageLevel.MEMORY_ONLY()); + } + + @Test + public void testTextFileStream() { + JavaDStream test = sc.textFileStream("/tmp/foo"); + } + + @Test + public void testRawNetworkStream() { + JavaDStream test = sc.rawNetworkStream("localhost", 12345); + } + + @Test + public void testFlumeStream() { + JavaDStream test = sc.flumeStream("localhost", 12345); + } + + @Test + public void testFileStream() { + JavaPairDStream foo = + sc.fileStream("/tmp/foo"); + } } From 3461cd99b7b680be9c9dc263382b42f30c9edd7d Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 11 Jan 2013 12:05:04 -0800 Subject: [PATCH 204/226] Flume example and bug fix --- .../examples/JavaFlumeEventCount.java | 50 +++++++++++++++++++ .../streaming/api/java/JavaDStreamLike.scala | 9 +++- 2 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java diff --git a/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java new file mode 100644 index 0000000000..6592d9bc2e --- /dev/null +++ b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java @@ -0,0 +1,50 @@ +package spark.streaming.examples; + +import spark.api.java.function.Function; +import spark.streaming.*; +import spark.streaming.api.java.*; +import spark.streaming.dstream.SparkFlumeEvent; + +/** + * Produces a count of events received from Flume. + * + * This should be used in conjunction with an AvroSink in Flume. It will start + * an Avro server on at the request host:port address and listen for requests. + * Your Flume AvroSink should be pointed to this address. + * + * Usage: FlumeEventCount + * + * is a Spark master URL + * is the host the Flume receiver will be started on - a receiver + * creates a server and listens for flume events. + * is the port the Flume receiver will listen on. + */ +public class JavaFlumeEventCount { + public static void main(String[] args) { + if (args.length != 3) { + System.err.println("Usage: JavaFlumeEventCount "); + System.exit(1); + } + + String master = args[0]; + String host = args[1]; + int port = Integer.parseInt(args[2]); + + Duration batchInterval = new Duration(2000); + + JavaStreamingContext sc = new JavaStreamingContext(master, "FlumeEventCount", batchInterval); + + JavaDStream flumeStream = sc.flumeStream("localhost", port); + + flumeStream.count(); + + flumeStream.count().map(new Function() { + @Override + public String call(Integer in) { + return "Received " + in + " flume events."; + } + }).print(); + + sc.start(); + } +} diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index cb58c1351d..91bcca9afa 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -1,6 +1,7 @@ package spark.streaming.api.java import java.util.{List => JList} +import java.lang.{Integer => JInt} import scala.collection.JavaConversions._ @@ -16,6 +17,10 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def dstream: DStream[T] + implicit def scalaIntToJavaInteger(in: DStream[Int]): JavaDStream[JInt] = { + in.map(new JInt(_)) + } + /** * Prints the first ten elements of each RDD generated in this DStream. This is an output * operator, so this DStream will be registered as an output stream and there materialized. @@ -26,14 +31,14 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable * Returns a new DStream in which each RDD has a single element generated by counting each RDD * of this DStream. */ - def count(): JavaDStream[Int] = dstream.count() + def count(): JavaDStream[JInt] = dstream.count() /** * Returns a new DStream in which each RDD has a single element generated by counting the number * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ - def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[Int] = { + def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[JInt] = { dstream.countByWindow(windowDuration, slideDuration) } From a292ed8d8af069ee1318cdf7c00d3db8d3ba8db9 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Fri, 11 Jan 2013 12:34:36 -0800 Subject: [PATCH 205/226] Some style cleanup --- .../streaming/api/java/JavaDStreamLike.scala | 9 +- .../streaming/api/java/JavaPairDStream.scala | 137 +++++++++++------- 2 files changed, 93 insertions(+), 53 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 91bcca9afa..80d8865725 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -97,10 +97,11 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable * window() operation. This is equivalent to window(windowDuration, slideDuration).reduce(reduceFunc) */ def reduceByWindow( - reduceFunc: JFunction2[T, T, T], - invReduceFunc: JFunction2[T, T, T], - windowDuration: Duration, - slideDuration: Duration): JavaDStream[T] = { + reduceFunc: JFunction2[T, T, T], + invReduceFunc: JFunction2[T, T, T], + windowDuration: Duration, + slideDuration: Duration + ): JavaDStream[T] = { dstream.reduceByWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration) } diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index b25a3f109c..eeb1f07939 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -101,9 +101,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKey(func, numPartitions) def combineByKey[C](createCombiner: JFunction[V, C], - mergeValue: JFunction2[C, V, C], - mergeCombiners: JFunction2[C, C, C], - partitioner: Partitioner): JavaPairDStream[K, C] = { + mergeValue: JFunction2[C, V, C], + mergeCombiners: JFunction2[C, C, C], + partitioner: Partitioner + ): JavaPairDStream[K, C] = { implicit val cm: ClassManifest[C] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[C]] dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner) @@ -117,18 +118,24 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( JavaPairDStream.scalaToJavaLong(dstream.countByKey()); } - def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, JList[V]] = { + def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration) + : JavaPairDStream[K, JList[V]] = { dstream.groupByKeyAndWindow(windowDuration, slideDuration).mapValues(seqAsJavaList _) } def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) :JavaPairDStream[K, JList[V]] = { - dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions).mapValues(seqAsJavaList _) + dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions) + .mapValues(seqAsJavaList _) } - def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, partitioner: Partitioner) - :JavaPairDStream[K, JList[V]] = { - dstream.groupByKeyAndWindow(windowDuration, slideDuration, partitioner).mapValues(seqAsJavaList _) + def groupByKeyAndWindow( + windowDuration: Duration, + slideDuration: Duration, + partitioner: Partitioner + ):JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowDuration, slideDuration, partitioner) + .mapValues(seqAsJavaList _) } def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration) @@ -136,46 +143,78 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, windowDuration) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration, slideDuration: Duration) - :JavaPairDStream[K, V] = { + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration + ):JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration) } def reduceByKeyAndWindow( - reduceFunc: Function2[V, V, V], - windowDuration: Duration, - slideDuration: Duration, - numPartitions: Int): JavaPairDStream[K, V] = { + reduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration, + numPartitions: Int + ): JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, numPartitions) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration, slideDuration: Duration, - partitioner: Partitioner): JavaPairDStream[K, V] = { + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration, + partitioner: Partitioner + ): JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, V] = { + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + invReduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration + ): JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowDuration: Duration, slideDuration: Duration, numPartitions: Int): JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, numPartitions) + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + invReduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration, + numPartitions: Int + ): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow( + reduceFunc, + invReduceFunc, + windowDuration, + slideDuration, + numPartitions) } - def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], - windowDuration: Duration, slideDuration: Duration, partitioner: Partitioner) - : JavaPairDStream[K, V] = { - dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, partitioner) + def reduceByKeyAndWindow( + reduceFunc: Function2[V, V, V], + invReduceFunc: Function2[V, V, V], + windowDuration: Duration, + slideDuration: Duration, + partitioner: Partitioner + ): JavaPairDStream[K, V] = { + dstream.reduceByKeyAndWindow( + reduceFunc, + invReduceFunc, + windowDuration, + slideDuration, + partitioner) } - def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, JLong] = { + def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration) + : JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKeyAndWindow(windowDuration, slideDuration)) } def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) - : JavaPairDStream[K, Long] = { + : JavaPairDStream[K, Long] = { dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } @@ -225,21 +264,21 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } def saveAsHadoopFiles( - prefix: String, - suffix: String, - keyClass: Class[_], - valueClass: Class[_], - outputFormatClass: Class[_ <: OutputFormat[_, _]]) { + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: OutputFormat[_, _]]) { dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) } def saveAsHadoopFiles( - prefix: String, - suffix: String, - keyClass: Class[_], - valueClass: Class[_], - outputFormatClass: Class[_ <: OutputFormat[_, _]], - conf: JobConf) { + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: OutputFormat[_, _]], + conf: JobConf) { dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf) } @@ -248,21 +287,21 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } def saveAsNewAPIHadoopFiles( - prefix: String, - suffix: String, - keyClass: Class[_], - valueClass: Class[_], - outputFormatClass: Class[_ <: NewOutputFormat[_, _]]) { + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: NewOutputFormat[_, _]]) { dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) } def saveAsNewAPIHadoopFiles( - prefix: String, - suffix: String, - keyClass: Class[_], - valueClass: Class[_], - outputFormatClass: Class[_ <: NewOutputFormat[_, _]], - conf: Configuration = new Configuration) { + prefix: String, + suffix: String, + keyClass: Class[_], + valueClass: Class[_], + outputFormatClass: Class[_ <: NewOutputFormat[_, _]], + conf: Configuration = new Configuration) { dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf) } From d182a57cae6455804773db23d9498d2dcdd02172 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 10:03:55 -0800 Subject: [PATCH 206/226] Two changes: - Updating countByX() types based on bug fix - Porting new documentation to Java --- .../examples/JavaFlumeEventCount.java | 4 +- .../streaming/api/java/JavaDStreamLike.scala | 10 +- .../streaming/api/java/JavaPairDStream.scala | 264 ++++++++++++++++++ 3 files changed, 271 insertions(+), 7 deletions(-) diff --git a/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java index 6592d9bc2e..151b71eb81 100644 --- a/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java +++ b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java @@ -38,9 +38,9 @@ public class JavaFlumeEventCount { flumeStream.count(); - flumeStream.count().map(new Function() { + flumeStream.count().map(new Function() { @Override - public String call(Integer in) { + public String call(Long in) { return "Received " + in + " flume events."; } }).print(); diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 80d8865725..4257ecd583 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -1,7 +1,7 @@ package spark.streaming.api.java import java.util.{List => JList} -import java.lang.{Integer => JInt} +import java.lang.{Long => JLong} import scala.collection.JavaConversions._ @@ -17,8 +17,8 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def dstream: DStream[T] - implicit def scalaIntToJavaInteger(in: DStream[Int]): JavaDStream[JInt] = { - in.map(new JInt(_)) + implicit def scalaIntToJavaLong(in: DStream[Long]): JavaDStream[JLong] = { + in.map(new JLong(_)) } /** @@ -31,14 +31,14 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable * Returns a new DStream in which each RDD has a single element generated by counting each RDD * of this DStream. */ - def count(): JavaDStream[JInt] = dstream.count() + def count(): JavaDStream[JLong] = dstream.count() /** * Returns a new DStream in which each RDD has a single element generated by counting the number * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ - def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[JInt] = { + def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[JLong] = { dstream.countByWindow(windowDuration, slideDuration) } diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index eeb1f07939..c761fdd3bd 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -85,21 +85,66 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( // Methods only for PairDStream's // ======================================================================= + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. Hash partitioning is + * used to generate the RDDs with Spark's default number of partitions. + */ def groupByKey(): JavaPairDStream[K, JList[V]] = dstream.groupByKey().mapValues(seqAsJavaList _) + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. Hash partitioning is + * used to generate the RDDs with `numPartitions` partitions. + */ def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] = dstream.groupByKey(numPartitions).mapValues(seqAsJavaList _) + /** + * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs are grouped into a + * single sequence to generate the RDDs of the new DStream. [[spark.Partitioner]] + * is used to control the partitioning of each RDD. + */ def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] = dstream.groupByKey(partitioner).mapValues(seqAsJavaList _) + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + */ def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] = dstream.reduceByKey(func) + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + */ def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = dstream.reduceByKey(func, numPartitions) + /** + * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. + * Therefore, the values for each key in `this` DStream's RDDs is merged using the + * associative reduce function to generate the RDDs of the new DStream. + * [[spark.Partitioner]] is used to control the partitioning of each RDD. + */ + def reduceByKey(func: JFunction2[V, V, V], partitioner: Partitioner): JavaPairDStream[K, V] = { + dstream.reduceByKey(func, partitioner) + } + + /** + * Generic function to combine elements of each key in DStream's RDDs using custom function. + * This is similar to the combineByKey for RDDs. Please refer to combineByKey in + * [[spark.PairRDDFunctions]] for more information. + */ def combineByKey[C](createCombiner: JFunction[V, C], mergeValue: JFunction2[C, V, C], mergeCombiners: JFunction2[C, C, C], @@ -110,25 +155,78 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner) } + /** + * Creates a new DStream by counting the number of values of each key in each RDD + * of `this` DStream. Hash partitioning is used to generate the RDDs. + */ def countByKey(numPartitions: Int): JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKey(numPartitions)); } + + /** + * Creates a new DStream by counting the number of values of each key in each RDD + * of `this` DStream. Hash partitioning is used to generate the RDDs with Spark's + * `numPartitions` partitions. + */ def countByKey(): JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKey()); } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * The new DStream generates RDDs with the same interval as this DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + */ + def groupByKeyAndWindow(windowDuration: Duration): JavaPairDStream[K, JList[V]] = { + dstream.groupByKeyAndWindow(windowDuration).mapValues(seqAsJavaList _) + } + + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration) : JavaPairDStream[K, JList[V]] = { dstream.groupByKeyAndWindow(windowDuration, slideDuration).mapValues(seqAsJavaList _) } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) :JavaPairDStream[K, JList[V]] = { dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions) .mapValues(seqAsJavaList _) } + /** + * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def groupByKeyAndWindow( windowDuration: Duration, slideDuration: Duration, @@ -138,11 +236,31 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( .mapValues(seqAsJavaList _) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * The new DStream generates RDDs with the same interval as this DStream. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + */ def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration) :JavaPairDStream[K, V] = { dstream.reduceByKeyAndWindow(reduceFunc, windowDuration) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], windowDuration: Duration, @@ -151,6 +269,18 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], windowDuration: Duration, @@ -160,6 +290,17 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, numPartitions) } + /** + * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * @param reduceFunc associative reduce function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], windowDuration: Duration, @@ -169,6 +310,24 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner) } + + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], @@ -178,6 +337,24 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration) } + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], @@ -193,6 +370,23 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( numPartitions) } + /** + * Creates a new DStream by reducing over a window in a smarter way. + * The reduced value of over a new window is calculated incrementally by using the + * old window's reduce value : + * 1. reduce the new values that entered the window (e.g., adding new counts) + * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) + * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. + * However, it is applicable to only "invertible reduce functions". + * @param reduceFunc associative reduce function + * @param invReduceFunc inverse function + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + */ def reduceByKeyAndWindow( reduceFunc: Function2[V, V, V], invReduceFunc: Function2[V, V, V], @@ -208,16 +402,38 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( partitioner) } + /** + * Creates a new DStream by counting the number of values for each key over a window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + */ def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration) : JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKeyAndWindow(windowDuration, slideDuration)) } + /** + * Creates a new DStream by counting the number of values for each key over a window. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param windowDuration width of the window; must be a multiple of this DStream's + * batching interval + * @param slideDuration sliding interval of the window (i.e., the interval after which + * the new DStream will generate RDDs); must be a multiple of this + * DStream's batching interval + * @param numPartitions Number of partitions of each RDD in the new DStream. + */ def countByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int) : JavaPairDStream[K, Long] = { dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } + + // TODO: Update State + def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { implicit val cm: ClassManifest[U] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]] @@ -232,12 +448,26 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.flatMapValues(fn) } + /** + * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for + * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD + * will contains a tuple with the list of values for that key in both RDDs. + * HashPartitioner is used to partition each generated RDD into default number of partitions. + */ def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JList[V], JList[W])] = { implicit val cm: ClassManifest[W] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] dstream.cogroup(other.dstream).mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) } + /** + * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for + * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD + * will contains a tuple with the list of values for that key in both RDDs. + * Partitioner is used to partition each generated RDD. + */ def cogroup[W](other: JavaPairDStream[K, W], partitioner: Partitioner) : JavaPairDStream[K, (JList[V], JList[W])] = { implicit val cm: ClassManifest[W] = @@ -246,12 +476,22 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( .mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) } + /** + * Joins `this` DStream with `other` DStream. Each RDD of the new DStream will + * be generated by joining RDDs from `this` and `other` DStreams. HashPartitioner is used + * to partition each generated RDD into default number of partitions. + */ def join[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, W)] = { implicit val cm: ClassManifest[W] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] dstream.join(other.dstream) } + /** + * Joins `this` DStream with `other` DStream, that is, each RDD of the new DStream will + * be generated by joining RDDs from `this` and other DStream. Uses the given + * Partitioner to partition each generated RDD. + */ def join[W](other: JavaPairDStream[K, W], partitioner: Partitioner) : JavaPairDStream[K, (V, W)] = { implicit val cm: ClassManifest[W] = @@ -259,10 +499,18 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.join(other.dstream, partitioner) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + */ def saveAsHadoopFiles[F <: OutputFormat[K, V]](prefix: String, suffix: String) { dstream.saveAsHadoopFiles(prefix, suffix) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + */ def saveAsHadoopFiles( prefix: String, suffix: String, @@ -272,6 +520,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + */ def saveAsHadoopFiles( prefix: String, suffix: String, @@ -282,10 +534,18 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + */ def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]](prefix: String, suffix: String) { dstream.saveAsNewAPIHadoopFiles(prefix, suffix) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + */ def saveAsNewAPIHadoopFiles( prefix: String, suffix: String, @@ -295,6 +555,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass) } + /** + * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + */ def saveAsNewAPIHadoopFiles( prefix: String, suffix: String, From 6069446356d1daf28054b87ff1a3bf724a22df03 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 10:34:13 -0800 Subject: [PATCH 207/226] Making comments consistent w/ Spark style --- .../main/scala/spark/streaming/DStream.scala | 52 +++--- .../streaming/PairDStreamFunctions.scala | 170 ++++++++---------- .../streaming/api/java/JavaDStream.scala | 14 +- .../streaming/api/java/JavaDStreamLike.scala | 26 +-- .../streaming/api/java/JavaPairDStream.scala | 166 ++++++++--------- 5 files changed, 196 insertions(+), 232 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index fbe3cebd6d..036763fe2f 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -98,10 +98,10 @@ abstract class DStream[T: ClassManifest] ( this } - /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + /** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def persist(): DStream[T] = persist(StorageLevel.MEMORY_ONLY_SER) - /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + /** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def cache(): DStream[T] = persist() /** @@ -119,7 +119,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * This method initializes the DStream by setting the "zero" time, based on which + * Initialize the DStream by setting the "zero" time, based on which * the validity of future times is calculated. This method also recursively initializes * its parent DStreams. */ @@ -244,7 +244,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Retrieves a precomputed RDD of this DStream, or computes the RDD. This is an internal + * Retrieve a precomputed RDD of this DStream, or computes the RDD. This is an internal * method that should not be called directly. */ protected[streaming] def getOrCompute(time: Time): Option[RDD[T]] = { @@ -283,7 +283,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Generates a SparkStreaming job for the given time. This is an internal method that + * Generate a SparkStreaming job for the given time. This is an internal method that * should not be called directly. This default implementation creates a job * that materializes the corresponding RDD. Subclasses of DStream may override this * (eg. ForEachDStream). @@ -302,7 +302,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Dereferences RDDs that are older than rememberDuration. + * Dereference RDDs that are older than rememberDuration. */ protected[streaming] def forgetOldRDDs(time: Time) { val keys = generatedRDDs.keys @@ -328,7 +328,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Refreshes the list of checkpointed RDDs that will be saved along with checkpoint of + * Refresh the list of checkpointed RDDs that will be saved along with checkpoint of * this stream. This is an internal method that should not be called directly. This is * a default implementation that saves only the file names of the checkpointed RDDs to * checkpointData. Subclasses of DStream (especially those of InputDStream) may override @@ -373,7 +373,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Restores the RDDs in generatedRDDs from the checkpointData. This is an internal method + * Restore the RDDs in generatedRDDs from the checkpointData. This is an internal method * that should not be called directly. This is a default implementation that recreates RDDs * from the checkpoint file names stored in checkpointData. Subclasses of DStream that * override the updateCheckpointData() method would also need to override this method. @@ -425,20 +425,20 @@ abstract class DStream[T: ClassManifest] ( // DStream operations // ======================================================================= - /** Returns a new DStream by applying a function to all elements of this DStream. */ + /** Return a new DStream by applying a function to all elements of this DStream. */ def map[U: ClassManifest](mapFunc: T => U): DStream[U] = { new MappedDStream(this, ssc.sc.clean(mapFunc)) } /** - * Returns a new DStream by applying a function to all elements of this DStream, + * Return a new DStream by applying a function to all elements of this DStream, * and then flattening the results */ def flatMap[U: ClassManifest](flatMapFunc: T => Traversable[U]): DStream[U] = { new FlatMappedDStream(this, ssc.sc.clean(flatMapFunc)) } - /** Returns a new DStream containing only the elements that satisfy a predicate. */ + /** Return a new DStream containing only the elements that satisfy a predicate. */ def filter(filterFunc: T => Boolean): DStream[T] = new FilteredDStream(this, filterFunc) /** @@ -461,20 +461,20 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream in which each RDD has a single element generated by reducing each RDD + * Return a new DStream in which each RDD has a single element generated by reducing each RDD * of this DStream. */ def reduce(reduceFunc: (T, T) => T): DStream[T] = this.map(x => (null, x)).reduceByKey(reduceFunc, 1).map(_._2) /** - * Returns a new DStream in which each RDD has a single element generated by counting each RDD + * Return a new DStream in which each RDD has a single element generated by counting each RDD * of this DStream. */ def count(): DStream[Long] = this.map(_ => 1L).reduce(_ + _) /** - * Applies a function to each RDD in this DStream. This is an output operator, so + * Apply a function to each RDD in this DStream. This is an output operator, so * this DStream will be registered as an output stream and therefore materialized. */ def foreach(foreachFunc: RDD[T] => Unit) { @@ -482,7 +482,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Applies a function to each RDD in this DStream. This is an output operator, so + * Apply a function to each RDD in this DStream. This is an output operator, so * this DStream will be registered as an output stream and therefore materialized. */ def foreach(foreachFunc: (RDD[T], Time) => Unit) { @@ -492,7 +492,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream in which each RDD is generated by applying a function + * Return a new DStream in which each RDD is generated by applying a function * on each RDD of this DStream. */ def transform[U: ClassManifest](transformFunc: RDD[T] => RDD[U]): DStream[U] = { @@ -500,7 +500,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream in which each RDD is generated by applying a function + * Return a new DStream in which each RDD is generated by applying a function * on each RDD of this DStream. */ def transform[U: ClassManifest](transformFunc: (RDD[T], Time) => RDD[U]): DStream[U] = { @@ -508,7 +508,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Prints the first ten elements of each RDD generated in this DStream. This is an output + * Print the first ten elements of each RDD generated in this DStream. This is an output * operator, so this DStream will be registered as an output stream and there materialized. */ def print() { @@ -545,7 +545,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream which computed based on tumbling window on this DStream. + * Return a new DStream which computed based on tumbling window on this DStream. * This is equivalent to window(batchTime, batchTime). * @param batchDuration tumbling window duration; must be a multiple of this DStream's * batching interval @@ -553,7 +553,7 @@ abstract class DStream[T: ClassManifest] ( def tumble(batchDuration: Duration): DStream[T] = window(batchDuration, batchDuration) /** - * Returns a new DStream in which each RDD has a single element generated by reducing all + * Return a new DStream in which each RDD has a single element generated by reducing all * elements in a window over this DStream. windowDuration and slideDuration are as defined * in the window() operation. This is equivalent to * window(windowDuration, slideDuration).reduce(reduceFunc) @@ -578,7 +578,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream in which each RDD has a single element generated by counting the number + * Return a new DStream in which each RDD has a single element generated by counting the number * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ @@ -587,20 +587,20 @@ abstract class DStream[T: ClassManifest] ( } /** - * Returns a new DStream by unifying data of another DStream with this DStream. + * Return a new DStream by unifying data of another DStream with this DStream. * @param that Another DStream having the same slideDuration as this DStream. */ def union(that: DStream[T]): DStream[T] = new UnionDStream[T](Array(this, that)) /** - * Returns all the RDDs defined by the Interval object (both end times included) + * Return all the RDDs defined by the Interval object (both end times included) */ protected[streaming] def slice(interval: Interval): Seq[RDD[T]] = { slice(interval.beginTime, interval.endTime) } /** - * Returns all the RDDs between 'fromTime' to 'toTime' (both included) + * Return all the RDDs between 'fromTime' to 'toTime' (both included) */ def slice(fromTime: Time, toTime: Time): Seq[RDD[T]] = { val rdds = new ArrayBuffer[RDD[T]]() @@ -616,7 +616,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Saves each RDD in this DStream as a Sequence file of serialized objects. + * Save each RDD in this DStream as a Sequence file of serialized objects. * The file name at each batch interval is generated based on `prefix` and * `suffix`: "prefix-TIME_IN_MS.suffix". */ @@ -629,7 +629,7 @@ abstract class DStream[T: ClassManifest] ( } /** - * Saves each RDD in this DStream as at text file, using string representation + * Save each RDD in this DStream as at text file, using string representation * of elements. The file name at each batch interval is generated based on * `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 3952457339..f63279512b 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -26,29 +26,23 @@ extends Serializable { } /** - * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs are grouped into a - * single sequence to generate the RDDs of the new DStream. Hash partitioning is - * used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. */ def groupByKey(): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner()) } /** - * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs are grouped into a - * single sequence to generate the RDDs of the new DStream. Hash partitioning is - * used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to + * generate the RDDs with `numPartitions` partitions. */ def groupByKey(numPartitions: Int): DStream[(K, Seq[V])] = { groupByKey(defaultPartitioner(numPartitions)) } /** - * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs are grouped into a - * single sequence to generate the RDDs of the new DStream. [[spark.Partitioner]] + * Create a new DStream by applying `groupByKey` on each RDD. The supplied [[spark.Partitioner]] * is used to control the partitioning of each RDD. */ def groupByKey(partitioner: Partitioner): DStream[(K, Seq[V])] = { @@ -60,30 +54,27 @@ extends Serializable { } /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the associative reduce function. Hash partitioning is used to generate the RDDs + * with Spark's default number of partitions. */ def reduceByKey(reduceFunc: (V, V) => V): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner()) } /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the supplied reduce function. Hash partitioning is used to generate the RDDs + * with `numPartitions` partitions. */ def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): DStream[(K, V)] = { reduceByKey(reduceFunc, defaultPartitioner(numPartitions)) } /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * [[spark.Partitioner]] is used to control the partitioning of each RDD. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the supplied reduce function. [[spark.Partitioner]] is used to control the + * partitioning of each RDD. */ def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): DStream[(K, V)] = { val cleanedReduceFunc = ssc.sc.clean(reduceFunc) @@ -91,9 +82,9 @@ extends Serializable { } /** - * Generic function to combine elements of each key in DStream's RDDs using custom function. - * This is similar to the combineByKey for RDDs. Please refer to combineByKey in - * [[spark.PairRDDFunctions]] for more information. + * Combine elements of each key in DStream's RDDs using custom function. This is similar to the + * combineByKey for RDDs. Please refer to combineByKey in [[spark.PairRDDFunctions]] for more + * information. */ def combineByKey[C: ClassManifest]( createCombiner: V => C, @@ -104,19 +95,18 @@ extends Serializable { } /** - * Creates a new DStream by counting the number of values of each key in each RDD - * of `this` DStream. Hash partitioning is used to generate the RDDs with Spark's - * `numPartitions` partitions. + * Create a new DStream by counting the number of values of each key in each RDD. Hash + * partitioning is used to generate the RDDs with Spark's `numPartitions` partitions. */ def countByKey(numPartitions: Int = self.ssc.sc.defaultParallelism): DStream[(K, Long)] = { self.map(x => (x._1, 1L)).reduceByKey((x: Long, y: Long) => x + y, numPartitions) } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. - * The new DStream generates RDDs with the same interval as this DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Creates a new DStream by applying `groupByKey` over a sliding window. This is similar to + * `DStream.groupByKey()` but applies it over a sliding window. The new DStream generates RDDs + * with the same interval as this DStream. Hash partitioning is used to generate the RDDs with + * Spark's default number of partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval */ @@ -125,9 +115,9 @@ extends Serializable { } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `groupByKey` over a sliding window. Similar to + * `DStream.groupByKey()`, but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval * @param slideDuration sliding interval of the window (i.e., the interval after which @@ -139,8 +129,8 @@ extends Serializable { } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Create a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * Similar to `DStream.groupByKey()`, but applies it over a sliding window. * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -158,8 +148,8 @@ extends Serializable { } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Create a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * Similar to `DStream.groupByKey()`, but applies it over a sliding window. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval * @param slideDuration sliding interval of the window (i.e., the interval after which @@ -176,10 +166,10 @@ extends Serializable { } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * The new DStream generates RDDs with the same interval as this DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * Similar to `DStream.reduceByKey()`, but applies it over a sliding window. The new DStream + * generates RDDs with the same interval as this DStream. Hash partitioning is used to generate + * the RDDs with Spark's default number of partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -192,9 +182,9 @@ extends Serializable { } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window. This is similar to + * `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -211,9 +201,9 @@ extends Serializable { } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window. This is similar to + * `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with `numPartitions` partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -232,8 +222,8 @@ extends Serializable { } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Create a new DStream by applying `reduceByKey` over a sliding window. Similar to + * `DStream.reduceByKey()`, but applies it over a sliding window. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -255,9 +245,8 @@ extends Serializable { } /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -283,9 +272,8 @@ extends Serializable { } /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -313,9 +301,8 @@ extends Serializable { } /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -344,7 +331,7 @@ extends Serializable { } /** - * Creates a new DStream by counting the number of values for each key over a window. + * Create a new DStream by counting the number of values for each key over a window. * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -369,10 +356,9 @@ extends Serializable { } /** - * Creates a new "state" DStream where the state for each key is updated by applying - * the given function on the previous state of the key and the new values of the key from - * `this` DStream. Hash partitioning is used to generate the RDDs with Spark's default - * number of partitions. + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of each key. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. * @param updateFunc State update function. If `this` function returns None, then * corresponding state key-value pair will be eliminated. * @tparam S State type @@ -384,9 +370,9 @@ extends Serializable { } /** - * Creates a new "state" DStream where the state for each key is updated by applying - * the given function on the previous state of the key and the new values of the key from - * `this` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of each key. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param updateFunc State update function. If `this` function returns None, then * corresponding state key-value pair will be eliminated. * @param numPartitions Number of partitions of each RDD in the new DStream. @@ -400,9 +386,9 @@ extends Serializable { } /** - * Creates a new "state" DStream where the state for each key is updated by applying - * the given function on the previous state of the key and the new values of the key from - * `this` DStream. [[spark.Partitioner]] is used to control the partitioning of each RDD. + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key. + * [[spark.Partitioner]] is used to control the partitioning of each RDD. * @param updateFunc State update function. If `this` function returns None, then * corresponding state key-value pair will be eliminated. * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. @@ -419,9 +405,9 @@ extends Serializable { } /** - * Creates a new "state" DStream where the state for each key is updated by applying - * the given function on the previous state of the key and the new values of the key from - * `this` DStream. [[spark.Partitioner]] is used to control the partitioning of each RDD. + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of each key. + * [[spark.Paxrtitioner]] is used to control the partitioning of each RDD. * @param updateFunc State update function. If `this` function returns None, then * corresponding state key-value pair will be eliminated. Note, that * this function may generate a different a tuple with a different key @@ -451,22 +437,19 @@ extends Serializable { } /** - * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for - * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD - * will contains a tuple with the list of values for that key in both RDDs. - * HashPartitioner is used to partition each generated RDD into default number of partitions. + * Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this` + * or `other` DStreams, the generated RDD will contains a tuple with the list of values for that + * key in both RDDs. HashPartitioner is used to partition each generated RDD into default number + * of partitions. */ def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { cogroup(other, defaultPartitioner()) } /** - * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for - * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD - * will contains a tuple with the list of values for that key in both RDDs. - * Partitioner is used to partition each generated RDD. + * Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this` + * or `other` DStreams, the generated RDD will contains a tuple with the list of values for that + * key in both RDDs. Partitioner is used to partition each generated RDD. */ def cogroup[W: ClassManifest]( other: DStream[(K, W)], @@ -488,8 +471,7 @@ extends Serializable { } /** - * Joins `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by joining RDDs from `this` and `other` DStreams. HashPartitioner is used + * Join `this` DStream with `other` DStream. HashPartitioner is used * to partition each generated RDD into default number of partitions. */ def join[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (V, W))] = { @@ -497,7 +479,7 @@ extends Serializable { } /** - * Joins `this` DStream with `other` DStream, that is, each RDD of the new DStream will + * Join `this` DStream with `other` DStream, that is, each RDD of the new DStream will * be generated by joining RDDs from `this` and other DStream. Uses the given * Partitioner to partition each generated RDD. */ @@ -513,7 +495,7 @@ extends Serializable { } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" */ def saveAsHadoopFiles[F <: OutputFormat[K, V]]( @@ -524,7 +506,7 @@ extends Serializable { } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" */ def saveAsHadoopFiles( @@ -543,8 +525,8 @@ extends Serializable { } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]]( prefix: String, @@ -554,8 +536,8 @@ extends Serializable { } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsNewAPIHadoopFiles( prefix: String, diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index f85864df5d..32faef5670 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -9,20 +9,20 @@ import spark.storage.StorageLevel class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) extends JavaDStreamLike[T, JavaDStream[T]] { - /** Returns a new DStream containing only the elements that satisfy a predicate. */ + /** Return a new DStream containing only the elements that satisfy a predicate. */ def filter(f: JFunction[T, java.lang.Boolean]): JavaDStream[T] = dstream.filter((x => f(x).booleanValue())) - /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + /** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def cache(): JavaDStream[T] = dstream.cache() - /** Persists RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ + /** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */ def persist(): JavaDStream[T] = dstream.cache() - /** Persists the RDDs of this DStream with the given storage level */ + /** Persist the RDDs of this DStream with the given storage level */ def persist(storageLevel: StorageLevel): JavaDStream[T] = dstream.persist(storageLevel) - /** Method that generates a RDD for the given duration */ + /** Generate an RDD for the given duration */ def compute(validTime: Time): JavaRDD[T] = { dstream.compute(validTime) match { case Some(rdd) => new JavaRDD(rdd) @@ -51,7 +51,7 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM dstream.window(windowDuration, slideDuration) /** - * Returns a new DStream which computed based on tumbling window on this DStream. + * Return a new DStream which computed based on tumbling window on this DStream. * This is equivalent to window(batchDuration, batchDuration). * @param batchDuration tumbling window duration; must be a multiple of this DStream's interval */ @@ -59,7 +59,7 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM dstream.tumble(batchDuration) /** - * Returns a new DStream by unifying data of another DStream with this DStream. + * Return a new DStream by unifying data of another DStream with this DStream. * @param that Another DStream having the same interval (i.e., slideDuration) as this DStream. */ def union(that: JavaDStream[T]): JavaDStream[T] = diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 4257ecd583..32df665a98 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -22,19 +22,19 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Prints the first ten elements of each RDD generated in this DStream. This is an output + * Print the first ten elements of each RDD generated in this DStream. This is an output * operator, so this DStream will be registered as an output stream and there materialized. */ def print() = dstream.print() /** - * Returns a new DStream in which each RDD has a single element generated by counting each RDD + * Return a new DStream in which each RDD has a single element generated by counting each RDD * of this DStream. */ def count(): JavaDStream[JLong] = dstream.count() /** - * Returns a new DStream in which each RDD has a single element generated by counting the number + * Return a new DStream in which each RDD has a single element generated by counting the number * of elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).count() */ @@ -50,15 +50,15 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable def glom(): JavaDStream[JList[T]] = new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq))) - /** Returns the StreamingContext associated with this DStream */ + /** Return the StreamingContext associated with this DStream */ def context(): StreamingContext = dstream.context() - /** Returns a new DStream by applying a function to all elements of this DStream. */ + /** Return a new DStream by applying a function to all elements of this DStream. */ def map[R](f: JFunction[T, R]): JavaDStream[R] = { new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType()) } - /** Returns a new DStream by applying a function to all elements of this DStream. */ + /** Return a new DStream by applying a function to all elements of this DStream. */ def map[K, V](f: PairFunction[T, K, V]): JavaPairDStream[K, V] = { def cm = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType()) @@ -86,13 +86,13 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Returns a new DStream in which each RDD has a single element generated by reducing each RDD + * Return a new DStream in which each RDD has a single element generated by reducing each RDD * of this DStream. */ def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f) /** - * Returns a new DStream in which each RDD has a single element generated by reducing all + * Return a new DStream in which each RDD has a single element generated by reducing all * elements in a window over this DStream. windowDuration and slideDuration are as defined in the * window() operation. This is equivalent to window(windowDuration, slideDuration).reduce(reduceFunc) */ @@ -106,14 +106,14 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Returns all the RDDs between 'fromDuration' to 'toDuration' (both included) + * Return all the RDDs between 'fromDuration' to 'toDuration' (both included) */ def slice(fromDuration: Duration, toDuration: Duration): JList[JavaRDD[T]] = { new util.ArrayList(dstream.slice(fromDuration, toDuration).map(new JavaRDD(_)).toSeq) } /** - * Applies a function to each RDD in this DStream. This is an output operator, so + * Apply a function to each RDD in this DStream. This is an output operator, so * this DStream will be registered as an output stream and therefore materialized. */ def foreach(foreachFunc: JFunction[JavaRDD[T], Void]) { @@ -121,7 +121,7 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Applies a function to each RDD in this DStream. This is an output operator, so + * Apply a function to each RDD in this DStream. This is an output operator, so * this DStream will be registered as an output stream and therefore materialized. */ def foreach(foreachFunc: JFunction2[JavaRDD[T], Time, Void]) { @@ -129,7 +129,7 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Returns a new DStream in which each RDD is generated by applying a function + * Return a new DStream in which each RDD is generated by applying a function * on each RDD of this DStream. */ def transform[U](transformFunc: JFunction[JavaRDD[T], JavaRDD[U]]): JavaDStream[U] = { @@ -141,7 +141,7 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** - * Returns a new DStream in which each RDD is generated by applying a function + * Return a new DStream in which each RDD is generated by applying a function * on each RDD of this DStream. */ def transform[U](transformFunc: JFunction2[JavaRDD[T], Time, JavaRDD[U]]): JavaDStream[U] = { diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index c761fdd3bd..16b476ec90 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -86,19 +86,15 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( // ======================================================================= /** - * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs are grouped into a - * single sequence to generate the RDDs of the new DStream. Hash partitioning is - * used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. */ def groupByKey(): JavaPairDStream[K, JList[V]] = dstream.groupByKey().mapValues(seqAsJavaList _) /** - * Creates a new DStream by applying `groupByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs are grouped into a - * single sequence to generate the RDDs of the new DStream. Hash partitioning is - * used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to + * generate the RDDs with `numPartitions` partitions. */ def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] = dstream.groupByKey(numPartitions).mapValues(seqAsJavaList _) @@ -113,37 +109,34 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.groupByKey(partitioner).mapValues(seqAsJavaList _) /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the associative reduce function. Hash partitioning is used to generate the RDDs + * with Spark's default number of partitions. */ def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] = dstream.reduceByKey(func) /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the supplied reduce function. Hash partitioning is used to generate the RDDs + * with `numPartitions` partitions. */ def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] = dstream.reduceByKey(func, numPartitions) /** - * Creates a new DStream by applying `reduceByKey` on each RDD of `this` DStream. - * Therefore, the values for each key in `this` DStream's RDDs is merged using the - * associative reduce function to generate the RDDs of the new DStream. - * [[spark.Partitioner]] is used to control the partitioning of each RDD. + * Create a new DStream by applying `reduceByKey` to each RDD. The values for each key are + * merged using the supplied reduce function. [[spark.Partitioner]] is used to control the + * partitioning of each RDD. */ def reduceByKey(func: JFunction2[V, V, V], partitioner: Partitioner): JavaPairDStream[K, V] = { dstream.reduceByKey(func, partitioner) } /** - * Generic function to combine elements of each key in DStream's RDDs using custom function. - * This is similar to the combineByKey for RDDs. Please refer to combineByKey in - * [[spark.PairRDDFunctions]] for more information. + * Combine elements of each key in DStream's RDDs using custom function. This is similar to the + * combineByKey for RDDs. Please refer to combineByKey in [[spark.PairRDDFunctions]] for more + * information. */ def combineByKey[C](createCombiner: JFunction[V, C], mergeValue: JFunction2[C, V, C], @@ -156,8 +149,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by counting the number of values of each key in each RDD - * of `this` DStream. Hash partitioning is used to generate the RDDs. + * Create a new DStream by counting the number of values of each key in each RDD. Hash + * partitioning is used to generate the RDDs with Spark's `numPartitions` partitions. */ def countByKey(numPartitions: Int): JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKey(numPartitions)); @@ -165,19 +158,18 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( /** - * Creates a new DStream by counting the number of values of each key in each RDD - * of `this` DStream. Hash partitioning is used to generate the RDDs with Spark's - * `numPartitions` partitions. + * Create a new DStream by counting the number of values of each key in each RDD. Hash + * partitioning is used to generate the RDDs with the default number of partitions. */ def countByKey(): JavaPairDStream[K, JLong] = { JavaPairDStream.scalaToJavaLong(dstream.countByKey()); } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. - * The new DStream generates RDDs with the same interval as this DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Creates a new DStream by applying `groupByKey` over a sliding window. This is similar to + * `DStream.groupByKey()` but applies it over a sliding window. The new DStream generates RDDs + * with the same interval as this DStream. Hash partitioning is used to generate the RDDs with + * Spark's default number of partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval */ @@ -186,9 +178,9 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `groupByKey` over a sliding window. Similar to + * `DStream.groupByKey()`, but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval * @param slideDuration sliding interval of the window (i.e., the interval after which @@ -201,8 +193,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Create a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * Similar to `DStream.groupByKey()`, but applies it over a sliding window. * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -218,8 +210,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `groupByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.groupByKey()` but applies it over a sliding window. + * Create a new DStream by applying `groupByKey` over a sliding window on `this` DStream. + * Similar to `DStream.groupByKey()`, but applies it over a sliding window. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval * @param slideDuration sliding interval of the window (i.e., the interval after which @@ -237,10 +229,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * The new DStream generates RDDs with the same interval as this DStream. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. + * Similar to `DStream.reduceByKey()`, but applies it over a sliding window. The new DStream + * generates RDDs with the same interval as this DStream. Hash partitioning is used to generate + * the RDDs with Spark's default number of partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -251,9 +243,9 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window. This is similar to + * `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with Spark's default number of partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -270,9 +262,9 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. - * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * Create a new DStream by applying `reduceByKey` over a sliding window. This is similar to + * `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to + * generate the RDDs with `numPartitions` partitions. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -291,8 +283,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by applying `reduceByKey` over a sliding window on `this` DStream. - * This is similar to `DStream.reduceByKey()` but applies it over a sliding window. + * Create a new DStream by applying `reduceByKey` over a sliding window. Similar to + * `DStream.reduceByKey()`, but applies it over a sliding window. * @param reduceFunc associative reduce function * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -310,11 +302,9 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner) } - /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -338,9 +328,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -371,9 +360,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by reducing over a window in a smarter way. - * The reduced value of over a new window is calculated incrementally by using the - * old window's reduce value : + * Create a new DStream by reducing over a using incremental computation. + * The reduced value of over a new window is calculated using the old window's reduce value : * 1. reduce the new values that entered the window (e.g., adding new counts) * 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) * This is more efficient that reduceByKeyAndWindow without "inverse reduce" function. @@ -403,7 +391,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by counting the number of values for each key over a window. + * Create a new DStream by counting the number of values for each key over a window. * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -417,7 +405,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Creates a new DStream by counting the number of values for each key over a window. + * Create a new DStream by counting the number of values for each key over a window. * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. * @param windowDuration width of the window; must be a multiple of this DStream's * batching interval @@ -449,24 +437,19 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for - * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD - * will contains a tuple with the list of values for that key in both RDDs. - * HashPartitioner is used to partition each generated RDD into default number of partitions. + * Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this` + * or `other` DStreams, the generated RDD will contains a tuple with the list of values for that + * key in both RDDs. HashPartitioner is used to partition each generated RDD into default number + * of partitions. */ - def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JList[V], JList[W])] = { - implicit val cm: ClassManifest[W] = - implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] - dstream.cogroup(other.dstream).mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) + def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { + cogroup(other, defaultPartitioner()) } /** - * Cogroups `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by cogrouping RDDs from`this`and `other` DStreams. Therefore, for - * each key k in corresponding RDDs of `this` or `other` DStreams, the generated RDD - * will contains a tuple with the list of values for that key in both RDDs. - * Partitioner is used to partition each generated RDD. + * Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this` + * or `other` DStreams, the generated RDD will contains a tuple with the list of values for that + * key in both RDDs. Partitioner is used to partition each generated RDD. */ def cogroup[W](other: JavaPairDStream[K, W], partitioner: Partitioner) : JavaPairDStream[K, (JList[V], JList[W])] = { @@ -477,8 +460,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Joins `this` DStream with `other` DStream. Each RDD of the new DStream will - * be generated by joining RDDs from `this` and `other` DStreams. HashPartitioner is used + * Join `this` DStream with `other` DStream. HashPartitioner is used * to partition each generated RDD into default number of partitions. */ def join[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, W)] = { @@ -488,7 +470,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Joins `this` DStream with `other` DStream, that is, each RDD of the new DStream will + * Join `this` DStream with `other` DStream, that is, each RDD of the new DStream will * be generated by joining RDDs from `this` and other DStream. Uses the given * Partitioner to partition each generated RDD. */ @@ -500,16 +482,16 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsHadoopFiles[F <: OutputFormat[K, V]](prefix: String, suffix: String) { dstream.saveAsHadoopFiles(prefix, suffix) } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsHadoopFiles( prefix: String, @@ -521,8 +503,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix" + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsHadoopFiles( prefix: String, @@ -535,16 +517,16 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]](prefix: String, suffix: String) { dstream.saveAsNewAPIHadoopFiles(prefix, suffix) } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsNewAPIHadoopFiles( prefix: String, @@ -556,8 +538,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( } /** - * Saves each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is generated - * based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". + * Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is + * generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix". */ def saveAsNewAPIHadoopFiles( prefix: String, From ae5290f4a2fbeb51f5dc6e7add38f9c012ab7311 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 13:31:32 -0800 Subject: [PATCH 208/226] Bug fix --- .../main/scala/spark/streaming/api/java/JavaPairDStream.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 16b476ec90..6f4336a011 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -419,7 +419,6 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } - // TODO: Update State def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { @@ -443,7 +442,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( * of partitions. */ def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { - cogroup(other, defaultPartitioner()) + dstream.cogroup(other) } /** From 38d9a3a8630a38aa0cb9e6a13256816cfa9ab5a6 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 13:31:49 -0800 Subject: [PATCH 209/226] Remove AnyRef constraint in updateState --- .../main/scala/spark/streaming/PairDStreamFunctions.scala | 8 ++++---- .../main/scala/spark/streaming/dstream/StateDStream.scala | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index f63279512b..fbcf061126 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -363,7 +363,7 @@ extends Serializable { * corresponding state key-value pair will be eliminated. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S] ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner()) @@ -378,7 +378,7 @@ extends Serializable { * @param numPartitions Number of partitions of each RDD in the new DStream. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], numPartitions: Int ): DStream[(K, S)] = { @@ -394,7 +394,7 @@ extends Serializable { * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], partitioner: Partitioner ): DStream[(K, S)] = { @@ -417,7 +417,7 @@ extends Serializable { * @param rememberPartitioner Whether to remember the paritioner object in the generated RDDs. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean diff --git a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index a1ec2f5454..b4506c74aa 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -7,7 +7,7 @@ import spark.storage.StorageLevel import spark.streaming.{Duration, Time, DStream} private[streaming] -class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( +class StateDStream[K: ClassManifest, V: ClassManifest, S: ClassManifest]( parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, From 8ad6220bd376b04084604cf49b4537c97a16257d Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 14:54:47 -0800 Subject: [PATCH 210/226] Bugfix --- .../scala/spark/streaming/api/java/JavaPairDStream.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 6f4336a011..0cccb083c5 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -441,8 +441,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( * key in both RDDs. HashPartitioner is used to partition each generated RDD into default number * of partitions. */ - def cogroup[W: ClassManifest](other: DStream[(K, W)]): DStream[(K, (Seq[V], Seq[W]))] = { - dstream.cogroup(other) + def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JList[V], JList[W])] = { + implicit val cm: ClassManifest[W] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]] + dstream.cogroup(other.dstream).mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2)))) } /** From a0013beb039c8569e9e69f96fce0c341d1a1d180 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Mon, 14 Jan 2013 15:15:02 -0800 Subject: [PATCH 211/226] Stash --- .../streaming/api/java/JavaPairDStream.scala | 29 +++++++++++++++- .../scala/spark/streaming/JavaAPISuite.java | 33 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 0cccb083c5..49a0f27b5b 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -15,6 +15,7 @@ import org.apache.hadoop.conf.Configuration import spark.api.java.JavaPairRDD import spark.storage.StorageLevel import java.lang +import com.google.common.base.Optional class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( implicit val kManifiest: ClassManifest[K], @@ -419,7 +420,33 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } - // TODO: Update State + /** + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of each key. + * Hash partitioning is used to generate the RDDs with Spark's default number of partitions. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @tparam S State type + */ + def updateStateByKey[S](updateFunc: JFunction2[JList[V], Optional[S], Optional[S]]) + : JavaPairDStream[K, S] = { + implicit val cm: ClassManifest[S] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[S]] + + def scalaFunc(values: Seq[V], state: Option[S]): Option[S] = { + val list: JList[V] = values + val scalaState: Optional[S] = state match { + case Some(s) => Optional.of(s) + case _ => Optional.absent() + } + val result: Optional[S] = updateFunc.apply(list, scalaState) + result.isPresent match { + case true => Some(result.get()) + case _ => None + } + } + dstream.updateStateByKey(scalaFunc _) + } def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { implicit val cm: ClassManifest[U] = diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 7475b9536b..d95ab485f8 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -1,5 +1,6 @@ package spark.streaming; +import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; @@ -551,6 +552,38 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + @Test + public void testUpdateStateByKey() { + List>> inputData = stringIntKVStream; + + List>> expected = Arrays.asList( + Arrays.asList(new Tuple2("california", 4), + new Tuple2("new york", 5)), + Arrays.asList(new Tuple2("california", 14), + new Tuple2("new york", 9)), + Arrays.asList(new Tuple2("california", 10), + new Tuple2("new york", 4))); + + JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); + + JavaPairDStream updated = pairStream.updateStateByKey( + new Function2, Optional, Optional>(){ + @Override + public Optional call(List values, Optional state) { + int out = 0; + for (Integer v: values) { + out = out + v; + } + return Optional.of(out); + } + }); + JavaTestUtils.attachTestOutputStream(updated); + List>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + @Test public void testReduceByKeyAndWindowWithInverse() { List>> inputData = stringIntKVStream; From 1638fcb0dce296da22ffc90127d5148a8fab745e Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Mon, 14 Jan 2013 17:18:39 -0800 Subject: [PATCH 212/226] Fixed updateStateByKey to work with primitive types. --- .../main/scala/spark/streaming/PairDStreamFunctions.scala | 8 ++++---- .../main/scala/spark/streaming/dstream/StateDStream.scala | 2 +- .../test/scala/spark/streaming/BasicOperationsSuite.scala | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala index 3952457339..3dbef69868 100644 --- a/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala +++ b/streaming/src/main/scala/spark/streaming/PairDStreamFunctions.scala @@ -377,7 +377,7 @@ extends Serializable { * corresponding state key-value pair will be eliminated. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S] ): DStream[(K, S)] = { updateStateByKey(updateFunc, defaultPartitioner()) @@ -392,7 +392,7 @@ extends Serializable { * @param numPartitions Number of partitions of each RDD in the new DStream. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], numPartitions: Int ): DStream[(K, S)] = { @@ -408,7 +408,7 @@ extends Serializable { * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Seq[V], Option[S]) => Option[S], partitioner: Partitioner ): DStream[(K, S)] = { @@ -431,7 +431,7 @@ extends Serializable { * @param rememberPartitioner Whether to remember the paritioner object in the generated RDDs. * @tparam S State type */ - def updateStateByKey[S <: AnyRef : ClassManifest]( + def updateStateByKey[S: ClassManifest]( updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, rememberPartitioner: Boolean diff --git a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala index a1ec2f5454..b4506c74aa 100644 --- a/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala +++ b/streaming/src/main/scala/spark/streaming/dstream/StateDStream.scala @@ -7,7 +7,7 @@ import spark.storage.StorageLevel import spark.streaming.{Duration, Time, DStream} private[streaming] -class StateDStream[K: ClassManifest, V: ClassManifest, S <: AnyRef : ClassManifest]( +class StateDStream[K: ClassManifest, V: ClassManifest, S: ClassManifest]( parent: DStream[(K, V)], updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)], partitioner: Partitioner, diff --git a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala index f9e03c607d..f73f9b1823 100644 --- a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala @@ -151,10 +151,10 @@ class BasicOperationsSuite extends TestSuiteBase { ) val updateStateOperation = (s: DStream[String]) => { - val updateFunc = (values: Seq[Int], state: Option[RichInt]) => { - Some(new RichInt(values.foldLeft(0)(_ + _) + state.map(_.self).getOrElse(0))) + val updateFunc = (values: Seq[Int], state: Option[Int]) => { + Some(values.foldLeft(0)(_ + _) + state.getOrElse(0)) } - s.map(x => (x, 1)).updateStateByKey[RichInt](updateFunc).map(t => (t._1, t._2.self)) + s.map(x => (x, 1)).updateStateByKey[Int](updateFunc) } testOperation(inputData, updateStateOperation, outputData, true) From a805ac4a7cdd520b6141dd885c780c526bb54ba6 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Wed, 16 Jan 2013 10:55:26 -0800 Subject: [PATCH 213/226] Disabled checkpoint for PairwiseRDD (pySpark). --- core/src/main/scala/spark/api/python/PythonRDD.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/scala/spark/api/python/PythonRDD.scala b/core/src/main/scala/spark/api/python/PythonRDD.scala index 276035a9ad..0138b22d38 100644 --- a/core/src/main/scala/spark/api/python/PythonRDD.scala +++ b/core/src/main/scala/spark/api/python/PythonRDD.scala @@ -138,6 +138,7 @@ private class PairwiseRDD(prev: RDD[Array[Byte]]) extends case Seq(a, b) => (a, b) case x => throw new Exception("PairwiseRDD: unexpected value: " + x) } + override def checkpoint() { } val asJavaPairRDD : JavaPairRDD[Array[Byte], Array[Byte]] = JavaPairRDD.fromRDD(this) } From 2a872335c5c7b5481c927272447e4a344ef59dda Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 16 Jan 2013 13:17:45 -0800 Subject: [PATCH 214/226] Bug fix and test cleanup --- .../streaming/api/java/JavaPairDStream.scala | 4 ++-- .../scala/spark/streaming/JavaAPISuite.java | 17 ++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 49a0f27b5b..1c5b864ff0 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -433,7 +433,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( implicit val cm: ClassManifest[S] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[S]] - def scalaFunc(values: Seq[V], state: Option[S]): Option[S] = { + val scalaFunc: (Seq[V], Option[S]) => Option[S] = (values, state) => { val list: JList[V] = values val scalaState: Optional[S] = state match { case Some(s) => Optional.of(s) @@ -445,7 +445,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( case _ => None } } - dstream.updateStateByKey(scalaFunc _) + dstream.updateStateByKey(scalaFunc) } def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index d95ab485f8..549fb5b733 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -52,15 +52,15 @@ public class JavaAPISuite implements Serializable { Arrays.asList(3,4,5), Arrays.asList(3)); - List> expected = Arrays.asList( - Arrays.asList(4), - Arrays.asList(3), - Arrays.asList(1)); + List> expected = Arrays.asList( + Arrays.asList(4L), + Arrays.asList(3L), + Arrays.asList(1L)); JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); JavaDStream count = stream.count(); JavaTestUtils.attachTestOutputStream(count); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(sc, 3, 3); assertOrderInvariantEquals(expected, result); } @@ -561,8 +561,8 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", 5)), Arrays.asList(new Tuple2("california", 14), new Tuple2("new york", 9)), - Arrays.asList(new Tuple2("california", 10), - new Tuple2("new york", 4))); + Arrays.asList(new Tuple2("california", 14), + new Tuple2("new york", 9))); JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); @@ -572,6 +572,9 @@ public class JavaAPISuite implements Serializable { @Override public Optional call(List values, Optional state) { int out = 0; + if (state.isPresent()) { + out = out + state.get(); + } for (Integer v: values) { out = out + v; } From 8e6cbbc6c7434b53c63e19a1c9c2dca1f24de654 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 16 Jan 2013 13:50:02 -0800 Subject: [PATCH 215/226] Adding other updateState functions --- .../streaming/api/java/JavaPairDStream.scala | 62 +++++++++++++++---- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 1c5b864ff0..8c76d8c1d8 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -420,6 +420,23 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( dstream.countByKeyAndWindow(windowDuration, slideDuration, numPartitions) } + private def convertUpdateStateFunction[S](in: JFunction2[JList[V], Optional[S], Optional[S]]): + (Seq[V], Option[S]) => Option[S] = { + val scalaFunc: (Seq[V], Option[S]) => Option[S] = (values, state) => { + val list: JList[V] = values + val scalaState: Optional[S] = state match { + case Some(s) => Optional.of(s) + case _ => Optional.absent() + } + val result: Optional[S] = in.apply(list, scalaState) + result.isPresent match { + case true => Some(result.get()) + case _ => None + } + } + scalaFunc + } + /** * Create a new "state" DStream where the state for each key is updated by applying * the given function on the previous state of the key and the new values of each key. @@ -432,20 +449,39 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( : JavaPairDStream[K, S] = { implicit val cm: ClassManifest[S] = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[S]] + dstream.updateStateByKey(convertUpdateStateFunction(updateFunc)) + } - val scalaFunc: (Seq[V], Option[S]) => Option[S] = (values, state) => { - val list: JList[V] = values - val scalaState: Optional[S] = state match { - case Some(s) => Optional.of(s) - case _ => Optional.absent() - } - val result: Optional[S] = updateFunc.apply(list, scalaState) - result.isPresent match { - case true => Some(result.get()) - case _ => None - } - } - dstream.updateStateByKey(scalaFunc) + /** + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of each key. + * Hash partitioning is used to generate the RDDs with `numPartitions` partitions. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @param numPartitions Number of partitions of each RDD in the new DStream. + * @tparam S State type + */ + def updateStateByKey[S: ClassManifest]( + updateFunc: JFunction2[JList[V], Optional[S], Optional[S]], + numPartitions: Int) + : JavaPairDStream[K, S] = { + dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), numPartitions) + } + + /** + * Create a new "state" DStream where the state for each key is updated by applying + * the given function on the previous state of the key and the new values of the key. + * [[spark.Partitioner]] is used to control the partitioning of each RDD. + * @param updateFunc State update function. If `this` function returns None, then + * corresponding state key-value pair will be eliminated. + * @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream. + * @tparam S State type + */ + def updateStateByKey[S: ClassManifest]( + updateFunc: JFunction2[JList[V], Optional[S], Optional[S]], + partitioner: Partitioner + ): JavaPairDStream[K, S] = { + dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), partitioner) } def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = { From d5570c7968baba1c1fe86c68dc1c388fae23907b Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Wed, 16 Jan 2013 21:07:49 -0800 Subject: [PATCH 216/226] Adding checkpointing to Java API --- .../scala/spark/api/java/JavaRDDLike.scala | 28 +++++++++++++++++++ .../spark/api/java/JavaSparkContext.scala | 26 +++++++++++++++++ core/src/test/scala/spark/JavaAPISuite.java | 27 ++++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/core/src/main/scala/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/spark/api/java/JavaRDDLike.scala index 81d3a94466..958f5c26a1 100644 --- a/core/src/main/scala/spark/api/java/JavaRDDLike.scala +++ b/core/src/main/scala/spark/api/java/JavaRDDLike.scala @@ -9,6 +9,7 @@ import spark.api.java.JavaPairRDD._ import spark.api.java.function.{Function2 => JFunction2, Function => JFunction, _} import spark.partial.{PartialResult, BoundedDouble} import spark.storage.StorageLevel +import com.google.common.base.Optional trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable { @@ -298,4 +299,31 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable { * Save this RDD as a SequenceFile of serialized objects. */ def saveAsObjectFile(path: String) = rdd.saveAsObjectFile(path) + + /** + * Mark this RDD for checkpointing. The RDD will be saved to a file inside `checkpointDir` + * (set using setCheckpointDir()) and all references to its parent RDDs will be removed. + * This is used to truncate very long lineages. In the current implementation, Spark will save + * this RDD to a file (using saveAsObjectFile()) after the first job using this RDD is done. + * Hence, it is strongly recommended to use checkpoint() on RDDs when + * (i) checkpoint() is called before the any job has been executed on this RDD. + * (ii) This RDD has been made to persist in memory. Otherwise saving it on a file will + * require recomputation. + */ + def checkpoint() = rdd.checkpoint() + + /** + * Return whether this RDD has been checkpointed or not + */ + def isCheckpointed(): Boolean = rdd.isCheckpointed() + + /** + * Gets the name of the file to which this RDD was checkpointed + */ + def getCheckpointFile(): Optional[String] = { + rdd.getCheckpointFile match { + case Some(file) => Optional.of(file) + case _ => Optional.absent() + } + } } diff --git a/core/src/main/scala/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/spark/api/java/JavaSparkContext.scala index bf9ad7a200..22bfa2280d 100644 --- a/core/src/main/scala/spark/api/java/JavaSparkContext.scala +++ b/core/src/main/scala/spark/api/java/JavaSparkContext.scala @@ -342,6 +342,32 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork def clearFiles() { sc.clearFiles() } + + /** + * Set the directory under which RDDs are going to be checkpointed. This method will + * create this directory and will throw an exception of the path already exists (to avoid + * overwriting existing files may be overwritten). The directory will be deleted on exit + * if indicated. + */ + def setCheckpointDir(dir: String, useExisting: Boolean) { + sc.setCheckpointDir(dir, useExisting) + } + + /** + * Set the directory under which RDDs are going to be checkpointed. This method will + * create this directory and will throw an exception of the path already exists (to avoid + * overwriting existing files may be overwritten). The directory will be deleted on exit + * if indicated. + */ + def setCheckpointDir(dir: String) { + sc.setCheckpointDir(dir) + } + + protected def checkpointFile[T](path: String): JavaRDD[T] = { + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + new JavaRDD(sc.checkpointFile(path)) + } } object JavaSparkContext { diff --git a/core/src/test/scala/spark/JavaAPISuite.java b/core/src/test/scala/spark/JavaAPISuite.java index b99e790093..0b5354774b 100644 --- a/core/src/test/scala/spark/JavaAPISuite.java +++ b/core/src/test/scala/spark/JavaAPISuite.java @@ -625,4 +625,31 @@ public class JavaAPISuite implements Serializable { }); Assert.assertEquals((Float) 25.0f, floatAccum.value()); } + + @Test + public void checkpointAndComputation() { + File tempDir = Files.createTempDir(); + JavaRDD rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5)); + sc.setCheckpointDir(tempDir.getAbsolutePath(), true); + Assert.assertEquals(false, rdd.isCheckpointed()); + rdd.checkpoint(); + rdd.count(); // Forces the DAG to cause a checkpoint + Assert.assertEquals(true, rdd.isCheckpointed()); + Assert.assertEquals(Arrays.asList(1, 2, 3, 4, 5), rdd.collect()); + } + + @Test + public void checkpointAndRestore() { + File tempDir = Files.createTempDir(); + JavaRDD rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5)); + sc.setCheckpointDir(tempDir.getAbsolutePath(), true); + Assert.assertEquals(false, rdd.isCheckpointed()); + rdd.checkpoint(); + rdd.count(); // Forces the DAG to cause a checkpoint + Assert.assertEquals(true, rdd.isCheckpointed()); + + Assert.assertTrue(rdd.getCheckpointFile().isPresent()); + JavaRDD recovered = sc.checkpointFile(rdd.getCheckpointFile().get()); + Assert.assertEquals(Arrays.asList(1, 2, 3, 4, 5), recovered.collect()); + } } From 61b877c688d6c9a4e9c4d8f22ca0cadae29895bb Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 09:04:56 -0800 Subject: [PATCH 217/226] Adding flatMap --- .../streaming/api/java/JavaDStreamLike.scala | 29 +++++++ .../scala/spark/streaming/JavaAPISuite.java | 81 ++++++++++++++++++- 2 files changed, 106 insertions(+), 4 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala index 32df665a98..b93cb7865a 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStreamLike.scala @@ -65,6 +65,27 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable } /** + * Return a new DStream by applying a function to all elements of this DStream, + * and then flattening the results + */ + def flatMap[U](f: FlatMapFunction[T, U]): JavaDStream[U] = { + import scala.collection.JavaConverters._ + def fn = (x: T) => f.apply(x).asScala + new JavaDStream(dstream.flatMap(fn)(f.elementType()))(f.elementType()) + } + + /** + * Return a new DStream by applying a function to all elements of this DStream, + * and then flattening the results + */ + def flatMap[K, V](f: PairFlatMapFunction[T, K, V]): JavaPairDStream[K, V] = { + import scala.collection.JavaConverters._ + def fn = (x: T) => f.apply(x).asScala + def cm = implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]] + new JavaPairDStream(dstream.flatMap(fn)(cm))(f.keyType(), f.valueType()) + } + + /** * Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs * of this DStream. Applying mapPartitions() to an RDD applies a function to each partition * of the RDD. @@ -151,4 +172,12 @@ trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This]] extends Serializable transformFunc.call(new JavaRDD[T](in), time).rdd dstream.transform(scalaTransform(_, _)) } + + /** + * Enable periodic checkpointing of RDDs of this DStream + * @param interval Time interval after which generated RDD will be checkpointed + */ + def checkpoint(interval: Duration) = { + dstream.checkpoint(interval) + } } \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 549fb5b733..41fd9f99ff 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -11,10 +11,7 @@ import org.junit.Test; import scala.Tuple2; import spark.HashPartitioner; import spark.api.java.JavaRDD; -import spark.api.java.function.FlatMapFunction; -import spark.api.java.function.Function; -import spark.api.java.function.Function2; -import spark.api.java.function.PairFunction; +import spark.api.java.function.*; import spark.storage.StorageLevel; import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaPairDStream; @@ -308,6 +305,82 @@ public class JavaAPISuite implements Serializable { assertOrderInvariantEquals(expected, result); } + @Test + public void testFlatMap() { + List> inputData = Arrays.asList( + Arrays.asList("go", "giants"), + Arrays.asList("boo", "dodgers"), + Arrays.asList("athletics")); + + List> expected = Arrays.asList( + Arrays.asList("g","o","g","i","a","n","t","s"), + Arrays.asList("b", "o", "o", "d","o","d","g","e","r","s"), + Arrays.asList("a","t","h","l","e","t","i","c","s")); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream flatMapped = stream.flatMap(new FlatMapFunction() { + @Override + public Iterable call(String x) { + return Lists.newArrayList(x.split("(?!^)")); + } + }); + JavaTestUtils.attachTestOutputStream(flatMapped); + List> result = JavaTestUtils.runStreams(sc, 3, 3); + + assertOrderInvariantEquals(expected, result); + } + + @Test + public void testPairFlatMap() { + List> inputData = Arrays.asList( + Arrays.asList("giants"), + Arrays.asList("dodgers"), + Arrays.asList("athletics")); + + List>> expected = Arrays.asList( + Arrays.asList( + new Tuple2(6, "g"), + new Tuple2(6, "i"), + new Tuple2(6, "a"), + new Tuple2(6, "n"), + new Tuple2(6, "t"), + new Tuple2(6, "s")), + Arrays.asList( + new Tuple2(7, "d"), + new Tuple2(7, "o"), + new Tuple2(7, "d"), + new Tuple2(7, "g"), + new Tuple2(7, "e"), + new Tuple2(7, "r"), + new Tuple2(7, "s")), + Arrays.asList( + new Tuple2(9, "a"), + new Tuple2(9, "t"), + new Tuple2(9, "h"), + new Tuple2(9, "l"), + new Tuple2(9, "e"), + new Tuple2(9, "t"), + new Tuple2(9, "i"), + new Tuple2(9, "c"), + new Tuple2(9, "s"))); + + JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaPairDStream flatMapped = stream.flatMap(new PairFlatMapFunction() { + @Override + public Iterable> call(String in) throws Exception { + List> out = Lists.newArrayList(); + for (String letter: in.split("(?!^)")) { + out.add(new Tuple2(in.length(), letter)); + } + return out; + } + }); + JavaTestUtils.attachTestOutputStream(flatMapped); + List>> result = JavaTestUtils.runStreams(sc, 3, 3); + + Assert.assertEquals(expected, result); + } + @Test public void testUnion() { List> inputData1 = Arrays.asList( From 82b8707c6bbb3926e59c241b6e6d5ead5467aae7 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 12:21:44 -0800 Subject: [PATCH 218/226] Checkpointing in Streaming java API --- .../api/java/JavaStreamingContext.scala | 19 +++++- streaming/src/test/scala/JavaTestUtils.scala | 9 ++- .../scala/spark/streaming/JavaAPISuite.java | 68 +++++++++++++++++++ 3 files changed, 93 insertions(+), 3 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 2833793b94..ebbc516b38 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -13,14 +13,29 @@ import java.io.InputStream import java.util.{Map => JMap} class JavaStreamingContext(val ssc: StreamingContext) { - def this(master: String, frameworkName: String, batchDuration: Duration) = - this(new StreamingContext(master, frameworkName, batchDuration)) // TODOs: // - Test StreamingContext functions // - Test to/from Hadoop functions // - Support creating and registering InputStreams + + /** + * Creates a StreamingContext. + * @param master Name of the Spark Master + * @param frameworkName Name to be used when registering with the scheduler + * @param batchDuration The time interval at which streaming data will be divided into batches + */ + def this(master: String, frameworkName: String, batchDuration: Duration) = + this(new StreamingContext(master, frameworkName, batchDuration)) + + /** + * Re-creates a StreamingContext from a checkpoint file. + * @param path Path either to the directory that was specified as the checkpoint directory, or + * to the checkpoint file 'graph' or 'graph.bk'. + */ + def this(path: String) = this (new StreamingContext(path)) + /** * Create an input stream that pulls messages form a Kafka Broker. * @param hostname Zookeper hostname. diff --git a/streaming/src/test/scala/JavaTestUtils.scala b/streaming/src/test/scala/JavaTestUtils.scala index 24ebc15e38..56349837e5 100644 --- a/streaming/src/test/scala/JavaTestUtils.scala +++ b/streaming/src/test/scala/JavaTestUtils.scala @@ -8,7 +8,7 @@ import java.util.ArrayList import collection.JavaConversions._ /** Exposes streaming test functionality in a Java-friendly way. */ -object JavaTestUtils extends TestSuiteBase { +trait JavaTestBase extends TestSuiteBase { /** * Create a [[spark.streaming.TestInputStream]] and attach it to the supplied context. @@ -56,3 +56,10 @@ object JavaTestUtils extends TestSuiteBase { } } +object JavaTestUtils extends JavaTestBase { + +} + +object JavaCheckpointTestUtils extends JavaTestBase { + override def actuallyWait = true +} \ No newline at end of file diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 41fd9f99ff..8a63e8cd3f 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -3,6 +3,7 @@ package spark.streaming; import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import com.google.common.io.Files; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.junit.After; import org.junit.Assert; @@ -17,6 +18,7 @@ import spark.streaming.api.java.JavaDStream; import spark.streaming.api.java.JavaPairDStream; import spark.streaming.api.java.JavaStreamingContext; import spark.streaming.JavaTestUtils; +import spark.streaming.JavaCheckpointTestUtils; import spark.streaming.dstream.KafkaPartitionKey; import sun.org.mozilla.javascript.annotations.JSFunction; @@ -871,6 +873,72 @@ public class JavaAPISuite implements Serializable { Assert.assertEquals(expected, result); } + @Test + public void testCheckpointMasterRecovery() throws InterruptedException { + List> inputData = Arrays.asList( + Arrays.asList("this", "is"), + Arrays.asList("a", "test"), + Arrays.asList("counting", "letters")); + + List> expectedInitial = Arrays.asList( + Arrays.asList(4,2)); + List> expectedFinal = Arrays.asList( + Arrays.asList(1,4), + Arrays.asList(8,7)); + + + File tempDir = Files.createTempDir(); + sc.checkpoint(tempDir.getAbsolutePath(), new Duration(1000)); + + JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream letterCount = stream.map(new Function() { + @Override + public Integer call(String s) throws Exception { + return s.length(); + } + }); + JavaCheckpointTestUtils.attachTestOutputStream(letterCount); + List> initialResult = JavaTestUtils.runStreams(sc, 1, 1); + + assertOrderInvariantEquals(expectedInitial, initialResult); + Thread.sleep(1000); + + sc.stop(); + sc = new JavaStreamingContext(tempDir.getAbsolutePath()); + sc.start(); + List> finalResult = JavaCheckpointTestUtils.runStreams(sc, 2, 2); + assertOrderInvariantEquals(expectedFinal, finalResult); + } + + /** TEST DISABLED: Pending a discussion about checkpoint() semantics with TD + @Test + public void testCheckpointofIndividualStream() throws InterruptedException { + List> inputData = Arrays.asList( + Arrays.asList("this", "is"), + Arrays.asList("a", "test"), + Arrays.asList("counting", "letters")); + + List> expected = Arrays.asList( + Arrays.asList(4,2), + Arrays.asList(1,4), + Arrays.asList(8,7)); + + JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream letterCount = stream.map(new Function() { + @Override + public Integer call(String s) throws Exception { + return s.length(); + } + }); + JavaCheckpointTestUtils.attachTestOutputStream(letterCount); + + letterCount.checkpoint(new Duration(1000)); + + List> result1 = JavaCheckpointTestUtils.runStreams(sc, 3, 3); + assertOrderInvariantEquals(expected, result1); + } + */ + // Input stream tests. These mostly just test that we can instantiate a given InputStream with // Java arguments and assign it to a JavaDStream without producing type errors. Testing of the // InputStream functionality is deferred to the existing Scala tests. From 2261e62ee52495599b7a8717884e878497d343ea Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 18:35:24 -0800 Subject: [PATCH 219/226] Style cleanup --- .../streaming/api/java/JavaDStream.scala | 22 ++++++++++++++++++- .../streaming/api/java/JavaPairDStream.scala | 1 - .../api/java/JavaStreamingContext.scala | 6 ++++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index 32faef5670..e21e54d3e5 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -6,6 +6,26 @@ import spark.api.java.JavaRDD import java.util.{List => JList} import spark.storage.StorageLevel +/** + * A Discretized Stream (DStream), the basic abstraction in Spark Streaming, is a continuous + * sequence of RDDs (of the same type) representing a continuous stream of data (see [[spark.RDD]] + * for more details on RDDs). DStreams can either be created from live data (such as, data from + * HDFS, Kafka or Flume) or it can be generated by transformation existing DStreams using operations + * such as `map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming program is running, each + * DStream periodically generates a RDD, either from live data or by transforming the RDD generated + * by a parent DStream. + * + * This class contains the basic operations available on all DStreams, such as `map`, `filter` and + * `window`. In addition, [[spark.streaming.api.java.JavaPairDStream]] contains operations available + * only on DStreams of key-value pairs, such as `groupByKeyAndWindow` and `join`. These operations + * are automatically available on any DStream of the right type (e.g., DStream[(Int, Int)] through + * implicit conversions when `spark.streaming.StreamingContext._` is imported. + * + * DStreams internally is characterized by a few basic properties: + * - A list of other DStreams that the DStream depends on + * - A time interval at which the DStream generates an RDD + * - A function that is used to generate an RDD after each time interval + */ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassManifest[T]) extends JavaDStreamLike[T, JavaDStream[T]] { @@ -69,4 +89,4 @@ class JavaDStream[T](val dstream: DStream[T])(implicit val classManifest: ClassM object JavaDStream { implicit def fromDStream[T: ClassManifest](dstream: DStream[T]): JavaDStream[T] = new JavaDStream[T](dstream) -} +} \ No newline at end of file diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala index 8c76d8c1d8..ef10c091ca 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala @@ -14,7 +14,6 @@ import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat} import org.apache.hadoop.conf.Configuration import spark.api.java.JavaPairRDD import spark.storage.StorageLevel -import java.lang import com.google.common.base.Optional class JavaPairDStream[K, V](val dstream: DStream[(K, V)])( diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index ebbc516b38..7e1c2a999f 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -12,10 +12,14 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import java.io.InputStream import java.util.{Map => JMap} +/** + * A StreamingContext is the main entry point for Spark Streaming functionality. Besides the basic + * information (such as, cluster URL and job name) to internally create a SparkContext, it provides + * methods used to create DStream from various input sources. + */ class JavaStreamingContext(val ssc: StreamingContext) { // TODOs: - // - Test StreamingContext functions // - Test to/from Hadoop functions // - Support creating and registering InputStreams From 70ba994d6d6f9e62269168e6a8a61ffce736a4d2 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 18:38:30 -0800 Subject: [PATCH 220/226] Import fixup --- .../src/main/scala/spark/streaming/api/java/JavaDStream.scala | 1 - .../scala/spark/streaming/api/java/JavaStreamingContext.scala | 1 - streaming/src/test/scala/spark/streaming/JavaAPISuite.java | 1 - 3 files changed, 3 deletions(-) diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala index e21e54d3e5..2e7466b16c 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaDStream.scala @@ -3,7 +3,6 @@ package spark.streaming.api.java import spark.streaming.{Duration, Time, DStream} import spark.api.java.function.{Function => JFunction} import spark.api.java.JavaRDD -import java.util.{List => JList} import spark.storage.StorageLevel /** diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index 7e1c2a999f..accac82e09 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -1,7 +1,6 @@ package spark.streaming.api.java import scala.collection.JavaConversions._ -import java.util.{List => JList} import java.lang.{Long => JLong, Integer => JInt} import spark.streaming._ diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 8a63e8cd3f..374793b57e 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -20,7 +20,6 @@ import spark.streaming.api.java.JavaStreamingContext; import spark.streaming.JavaTestUtils; import spark.streaming.JavaCheckpointTestUtils; import spark.streaming.dstream.KafkaPartitionKey; -import sun.org.mozilla.javascript.annotations.JSFunction; import java.io.*; import java.util.*; From 6fba7683c29b64a33a6daa28cc56bb5d20574314 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 18:46:24 -0800 Subject: [PATCH 221/226] Small doc fix --- .../scala/spark/streaming/examples/JavaFlumeEventCount.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java index 151b71eb81..cddce16e39 100644 --- a/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java +++ b/examples/src/main/scala/spark/streaming/examples/JavaFlumeEventCount.java @@ -12,7 +12,7 @@ import spark.streaming.dstream.SparkFlumeEvent; * an Avro server on at the request host:port address and listen for requests. * Your Flume AvroSink should be pointed to this address. * - * Usage: FlumeEventCount + * Usage: JavaFlumeEventCount * * is a Spark master URL * is the host the Flume receiver will be started on - a receiver From e0165bf7141086e28f88cd68ab7bc6249061c924 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 21:07:09 -0800 Subject: [PATCH 222/226] Adding queueStream and some slight refactoring --- .../streaming/examples/JavaQueueStream.java | 62 ++++++ project/SparkBuild.scala | 3 +- .../api/java/JavaStreamingContext.scala | 58 ++++++ .../scala/spark/streaming/JavaAPISuite.java | 186 ++++++++++-------- 4 files changed, 226 insertions(+), 83 deletions(-) create mode 100644 examples/src/main/scala/spark/streaming/examples/JavaQueueStream.java diff --git a/examples/src/main/scala/spark/streaming/examples/JavaQueueStream.java b/examples/src/main/scala/spark/streaming/examples/JavaQueueStream.java new file mode 100644 index 0000000000..43c3cd4dfa --- /dev/null +++ b/examples/src/main/scala/spark/streaming/examples/JavaQueueStream.java @@ -0,0 +1,62 @@ +package spark.streaming.examples; + +import com.google.common.collect.Lists; +import scala.Tuple2; +import spark.api.java.JavaRDD; +import spark.api.java.function.Function2; +import spark.api.java.function.PairFunction; +import spark.streaming.Duration; +import spark.streaming.api.java.JavaDStream; +import spark.streaming.api.java.JavaPairDStream; +import spark.streaming.api.java.JavaStreamingContext; + +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +public class JavaQueueStream { + public static void main(String[] args) throws InterruptedException { + if (args.length < 1) { + System.err.println("Usage: JavaQueueStream "); + System.exit(1); + } + + // Create the context + JavaStreamingContext ssc = new JavaStreamingContext(args[0], "QueueStream", new Duration(1000)); + + // Create the queue through which RDDs can be pushed to + // a QueueInputDStream + Queue> rddQueue = new LinkedList>(); + + // Create and push some RDDs into the queue + List list = Lists.newArrayList(); + for (int i = 0; i < 1000; i++) { + list.add(i); + } + + for (int i = 0; i < 30; i++) { + rddQueue.add(ssc.sc().parallelize(list)); + } + + + // Create the QueueInputDStream and use it do some processing + JavaDStream inputStream = ssc.queueStream(rddQueue); + JavaPairDStream mappedStream = inputStream.map( + new PairFunction() { + @Override + public Tuple2 call(Integer i) throws Exception { + return new Tuple2(i % 10, 1); + } + }); + JavaPairDStream reducedStream = mappedStream.reduceByKey( + new Function2() { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + }); + + reducedStream.print(); + ssc.start(); + } +} diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 39db4be842..d5cda347a4 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -162,8 +162,7 @@ object SparkBuild extends Build { def streamingSettings = sharedSettings ++ Seq( name := "spark-streaming", libraryDependencies ++= Seq( - "com.github.sgroschupf" % "zkclient" % "0.1", - "junit" % "junit" % "4.8.1") + "com.github.sgroschupf" % "zkclient" % "0.1") ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala index accac82e09..f82e6a37cc 100644 --- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala @@ -10,6 +10,7 @@ import spark.api.java.function.{Function => JFunction, Function2 => JFunction2} import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import java.io.InputStream import java.util.{Map => JMap} +import spark.api.java.{JavaSparkContext, JavaRDD} /** * A StreamingContext is the main entry point for Spark Streaming functionality. Besides the basic @@ -39,6 +40,9 @@ class JavaStreamingContext(val ssc: StreamingContext) { */ def this(path: String) = this (new StreamingContext(path)) + /** The underlying SparkContext */ + val sc: JavaSparkContext = new JavaSparkContext(ssc.sc) + /** * Create an input stream that pulls messages form a Kafka Broker. * @param hostname Zookeper hostname. @@ -254,6 +258,60 @@ class JavaStreamingContext(val ssc: StreamingContext) { ssc.registerOutputStream(outputStream.dstream) } + /** + * Creates a input stream from an queue of RDDs. In each batch, + * it will process either one or all of the RDDs returned by the queue. + * + * NOTE: changes to the queue after the stream is created will not be recognized. + * @param queue Queue of RDDs + * @tparam T Type of objects in the RDD + */ + def queueStream[T](queue: java.util.Queue[JavaRDD[T]]): JavaDStream[T] = { + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + val sQueue = new scala.collection.mutable.Queue[spark.RDD[T]] + sQueue.enqueue(queue.map(_.rdd).toSeq: _*) + ssc.queueStream(sQueue) + } + + /** + * Creates a input stream from an queue of RDDs. In each batch, + * it will process either one or all of the RDDs returned by the queue. + * + * NOTE: changes to the queue after the stream is created will not be recognized. + * @param queue Queue of RDDs + * @param oneAtATime Whether only one RDD should be consumed from the queue in every interval + * @tparam T Type of objects in the RDD + */ + def queueStream[T](queue: java.util.Queue[JavaRDD[T]], oneAtATime: Boolean): JavaDStream[T] = { + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + val sQueue = new scala.collection.mutable.Queue[spark.RDD[T]] + sQueue.enqueue(queue.map(_.rdd).toSeq: _*) + ssc.queueStream(sQueue, oneAtATime) + } + + /** + * Creates a input stream from an queue of RDDs. In each batch, + * it will process either one or all of the RDDs returned by the queue. + * + * NOTE: changes to the queue after the stream is created will not be recognized. + * @param queue Queue of RDDs + * @param oneAtATime Whether only one RDD should be consumed from the queue in every interval + * @param defaultRDD Default RDD is returned by the DStream when the queue is empty + * @tparam T Type of objects in the RDD + */ + def queueStream[T]( + queue: java.util.Queue[JavaRDD[T]], + oneAtATime: Boolean, + defaultRDD: JavaRDD[T]): JavaDStream[T] = { + implicit val cm: ClassManifest[T] = + implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]] + val sQueue = new scala.collection.mutable.Queue[spark.RDD[T]] + sQueue.enqueue(queue.map(_.rdd).toSeq: _*) + ssc.queueStream(sQueue, oneAtATime, defaultRDD.rdd) + } + /** * Sets the context to periodically checkpoint the DStream operations for master * fault-tolerance. By default, the graph will be checkpointed every batch interval. diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java index 374793b57e..8c94e13e65 100644 --- a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/scala/spark/streaming/JavaAPISuite.java @@ -12,6 +12,7 @@ import org.junit.Test; import scala.Tuple2; import spark.HashPartitioner; import spark.api.java.JavaRDD; +import spark.api.java.JavaSparkContext; import spark.api.java.function.*; import spark.storage.StorageLevel; import spark.streaming.api.java.JavaDStream; @@ -28,17 +29,17 @@ import java.util.*; // serialized, as an alternative to converting these anonymous classes to static inner classes; // see http://stackoverflow.com/questions/758570/. public class JavaAPISuite implements Serializable { - private transient JavaStreamingContext sc; + private transient JavaStreamingContext ssc; @Before public void setUp() { - sc = new JavaStreamingContext("local[2]", "test", new Duration(1000)); + ssc = new JavaStreamingContext("local[2]", "test", new Duration(1000)); } @After public void tearDown() { - sc.stop(); - sc = null; + ssc.stop(); + ssc = null; // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown System.clearProperty("spark.master.port"); } @@ -55,10 +56,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(3L), Arrays.asList(1L)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream count = stream.count(); JavaTestUtils.attachTestOutputStream(count); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); assertOrderInvariantEquals(expected, result); } @@ -72,7 +73,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(5,5), Arrays.asList(9,4)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream letterCount = stream.map(new Function() { @Override public Integer call(String s) throws Exception { @@ -80,7 +81,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(letterCount); - List> result = JavaTestUtils.runStreams(sc, 2, 2); + List> result = JavaTestUtils.runStreams(ssc, 2, 2); assertOrderInvariantEquals(expected, result); } @@ -98,10 +99,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(7,8,9,4,5,6), Arrays.asList(7,8,9)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream windowed = stream.window(new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); - List> result = JavaTestUtils.runStreams(sc, 4, 4); + List> result = JavaTestUtils.runStreams(ssc, 4, 4); assertOrderInvariantEquals(expected, result); } @@ -122,10 +123,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(7,8,9,10,11,12,13,14,15,16,17,18), Arrays.asList(13,14,15,16,17,18)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream windowed = stream.window(new Duration(4000), new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); - List> result = JavaTestUtils.runStreams(sc, 8, 4); + List> result = JavaTestUtils.runStreams(ssc, 8, 4); assertOrderInvariantEquals(expected, result); } @@ -145,10 +146,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(7,8,9,10,11,12), Arrays.asList(13,14,15,16,17,18)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream windowed = stream.tumble(new Duration(2000)); JavaTestUtils.attachTestOutputStream(windowed); - List> result = JavaTestUtils.runStreams(sc, 6, 3); + List> result = JavaTestUtils.runStreams(ssc, 6, 3); assertOrderInvariantEquals(expected, result); } @@ -163,7 +164,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList("giants"), Arrays.asList("yankees")); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream filtered = stream.filter(new Function() { @Override public Boolean call(String s) throws Exception { @@ -171,7 +172,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(filtered); - List> result = JavaTestUtils.runStreams(sc, 2, 2); + List> result = JavaTestUtils.runStreams(ssc, 2, 2); assertOrderInvariantEquals(expected, result); } @@ -186,10 +187,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(Arrays.asList("giants", "dodgers")), Arrays.asList(Arrays.asList("yankees", "red socks"))); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream glommed = stream.glom(); JavaTestUtils.attachTestOutputStream(glommed); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -204,7 +205,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList("GIANTSDODGERS"), Arrays.asList("YANKEESRED SOCKS")); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream mapped = stream.mapPartitions(new FlatMapFunction, String>() { @Override public Iterable call(Iterator in) { @@ -216,7 +217,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(mapped); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -247,10 +248,10 @@ public class JavaAPISuite implements Serializable { Arrays.asList(15), Arrays.asList(24)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream reduced = stream.reduce(new IntegerSum()); JavaTestUtils.attachTestOutputStream(reduced); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -268,15 +269,38 @@ public class JavaAPISuite implements Serializable { Arrays.asList(39), Arrays.asList(24)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream reducedWindowed = stream.reduceByWindow(new IntegerSum(), new IntegerDifference(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reducedWindowed); - List> result = JavaTestUtils.runStreams(sc, 4, 4); + List> result = JavaTestUtils.runStreams(ssc, 4, 4); Assert.assertEquals(expected, result); } + @Test + public void testQueueStream() { + List> expected = Arrays.asList( + Arrays.asList(1,2,3), + Arrays.asList(4,5,6), + Arrays.asList(7,8,9)); + + JavaSparkContext jsc = new JavaSparkContext(ssc.ssc().sc()); + JavaRDD rdd1 = ssc.sc().parallelize(Arrays.asList(1,2,3)); + JavaRDD rdd2 = ssc.sc().parallelize(Arrays.asList(4,5,6)); + JavaRDD rdd3 = ssc.sc().parallelize(Arrays.asList(7,8,9)); + + LinkedList> rdds = Lists.newLinkedList(); + rdds.add(rdd1); + rdds.add(rdd2); + rdds.add(rdd3); + + JavaDStream stream = ssc.queueStream(rdds); + JavaTestUtils.attachTestOutputStream(stream); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); + Assert.assertEquals(expected, result); + } + @Test public void testTransform() { List> inputData = Arrays.asList( @@ -289,7 +313,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(6,7,8), Arrays.asList(9,10,11)); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream transformed = stream.transform(new Function, JavaRDD>() { @Override public JavaRDD call(JavaRDD in) throws Exception { @@ -301,7 +325,7 @@ public class JavaAPISuite implements Serializable { }); }}); JavaTestUtils.attachTestOutputStream(transformed); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); assertOrderInvariantEquals(expected, result); } @@ -318,7 +342,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList("b", "o", "o", "d","o","d","g","e","r","s"), Arrays.asList("a","t","h","l","e","t","i","c","s")); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream flatMapped = stream.flatMap(new FlatMapFunction() { @Override public Iterable call(String x) { @@ -326,7 +350,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(flatMapped); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); assertOrderInvariantEquals(expected, result); } @@ -365,7 +389,7 @@ public class JavaAPISuite implements Serializable { new Tuple2(9, "c"), new Tuple2(9, "s"))); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream flatMapped = stream.flatMap(new PairFlatMapFunction() { @Override public Iterable> call(String in) throws Exception { @@ -377,7 +401,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(flatMapped); - List>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -399,12 +423,12 @@ public class JavaAPISuite implements Serializable { Arrays.asList(2,2,5,5), Arrays.asList(3,3,6,6)); - JavaDStream stream1 = JavaTestUtils.attachTestInputStream(sc, inputData1, 2); - JavaDStream stream2 = JavaTestUtils.attachTestInputStream(sc, inputData2, 2); + JavaDStream stream1 = JavaTestUtils.attachTestInputStream(ssc, inputData1, 2); + JavaDStream stream2 = JavaTestUtils.attachTestInputStream(ssc, inputData2, 2); JavaDStream unioned = stream1.union(stream2); JavaTestUtils.attachTestOutputStream(unioned); - List> result = JavaTestUtils.runStreams(sc, 3, 3); + List> result = JavaTestUtils.runStreams(ssc, 3, 3); assertOrderInvariantEquals(expected, result); } @@ -436,7 +460,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(new Tuple2("giants", 6)), Arrays.asList(new Tuple2("yankees", 7))); - JavaDStream stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = stream.map( new PairFunction() { @Override @@ -453,7 +477,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(filtered); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -492,12 +516,12 @@ public class JavaAPISuite implements Serializable { new Tuple2>("california", Arrays.asList("sharks", "ducks")), new Tuple2>("new york", Arrays.asList("rangers", "islanders")))); - JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream> grouped = pairStream.groupByKey(); JavaTestUtils.attachTestOutputStream(grouped); - List>>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -515,13 +539,13 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", 4))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream reduced = pairStream.reduceByKey(new IntegerSum()); JavaTestUtils.attachTestOutputStream(reduced); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -539,7 +563,7 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", 4))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream combined = pairStream.combineByKey( @@ -551,7 +575,7 @@ public class JavaAPISuite implements Serializable { }, new IntegerSum(), new IntegerSum(), new HashPartitioner(2)); JavaTestUtils.attachTestOutputStream(combined); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -569,12 +593,12 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", 2L))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream counted = pairStream.countByKey(); JavaTestUtils.attachTestOutputStream(counted); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -592,13 +616,13 @@ public class JavaAPISuite implements Serializable { Arrays.asList(new Tuple2>("california", Arrays.asList("sharks", "ducks")), new Tuple2>("new york", Arrays.asList("rangers", "islanders")))); - JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream> groupWindowed = pairStream.groupByKeyAndWindow(new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(groupWindowed); - List>>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -615,13 +639,13 @@ public class JavaAPISuite implements Serializable { Arrays.asList(new Tuple2("california", 10), new Tuple2("new york", 4))); - JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream reduceWindowed = pairStream.reduceByKeyAndWindow(new IntegerSum(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reduceWindowed); - List>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -638,7 +662,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(new Tuple2("california", 14), new Tuple2("new york", 9))); - JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream updated = pairStream.updateStateByKey( @@ -656,7 +680,7 @@ public class JavaAPISuite implements Serializable { } }); JavaTestUtils.attachTestOutputStream(updated); - List>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -673,13 +697,13 @@ public class JavaAPISuite implements Serializable { Arrays.asList(new Tuple2("california", 10), new Tuple2("new york", 4))); - JavaDStream> stream = JavaTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream reduceWindowed = pairStream.reduceByKeyAndWindow(new IntegerSum(), new IntegerDifference(), new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(reduceWindowed); - List>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -700,13 +724,13 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", 2L))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream counted = pairStream.countByKeyAndWindow(new Duration(2000), new Duration(1000)); JavaTestUtils.attachTestOutputStream(counted); - List>> result = JavaTestUtils.runStreams(sc, 3, 3); + List>> result = JavaTestUtils.runStreams(ssc, 3, 3); Assert.assertEquals(expected, result); } @@ -726,7 +750,7 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", "ISLANDERS"))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); JavaPairDStream mapped = pairStream.mapValues(new Function() { @@ -737,7 +761,7 @@ public class JavaAPISuite implements Serializable { }); JavaTestUtils.attachTestOutputStream(mapped); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -765,7 +789,7 @@ public class JavaAPISuite implements Serializable { new Tuple2("new york", "islanders2"))); JavaDStream> stream = JavaTestUtils.attachTestInputStream( - sc, inputData, 1); + ssc, inputData, 1); JavaPairDStream pairStream = JavaPairDStream.fromJavaDStream(stream); @@ -781,7 +805,7 @@ public class JavaAPISuite implements Serializable { }); JavaTestUtils.attachTestOutputStream(flatMapped); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -815,16 +839,16 @@ public class JavaAPISuite implements Serializable { JavaDStream> stream1 = JavaTestUtils.attachTestInputStream( - sc, stringStringKVStream1, 1); + ssc, stringStringKVStream1, 1); JavaPairDStream pairStream1 = JavaPairDStream.fromJavaDStream(stream1); JavaDStream> stream2 = JavaTestUtils.attachTestInputStream( - sc, stringStringKVStream2, 1); + ssc, stringStringKVStream2, 1); JavaPairDStream pairStream2 = JavaPairDStream.fromJavaDStream(stream2); JavaPairDStream, List>> grouped = pairStream1.cogroup(pairStream2); JavaTestUtils.attachTestOutputStream(grouped); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -858,16 +882,16 @@ public class JavaAPISuite implements Serializable { JavaDStream> stream1 = JavaTestUtils.attachTestInputStream( - sc, stringStringKVStream1, 1); + ssc, stringStringKVStream1, 1); JavaPairDStream pairStream1 = JavaPairDStream.fromJavaDStream(stream1); JavaDStream> stream2 = JavaTestUtils.attachTestInputStream( - sc, stringStringKVStream2, 1); + ssc, stringStringKVStream2, 1); JavaPairDStream pairStream2 = JavaPairDStream.fromJavaDStream(stream2); JavaPairDStream> joined = pairStream1.join(pairStream2); JavaTestUtils.attachTestOutputStream(joined); - List>> result = JavaTestUtils.runStreams(sc, 2, 2); + List>> result = JavaTestUtils.runStreams(ssc, 2, 2); Assert.assertEquals(expected, result); } @@ -887,9 +911,9 @@ public class JavaAPISuite implements Serializable { File tempDir = Files.createTempDir(); - sc.checkpoint(tempDir.getAbsolutePath(), new Duration(1000)); + ssc.checkpoint(tempDir.getAbsolutePath(), new Duration(1000)); - JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream letterCount = stream.map(new Function() { @Override public Integer call(String s) throws Exception { @@ -897,15 +921,15 @@ public class JavaAPISuite implements Serializable { } }); JavaCheckpointTestUtils.attachTestOutputStream(letterCount); - List> initialResult = JavaTestUtils.runStreams(sc, 1, 1); + List> initialResult = JavaTestUtils.runStreams(ssc, 1, 1); assertOrderInvariantEquals(expectedInitial, initialResult); Thread.sleep(1000); - sc.stop(); - sc = new JavaStreamingContext(tempDir.getAbsolutePath()); - sc.start(); - List> finalResult = JavaCheckpointTestUtils.runStreams(sc, 2, 2); + ssc.stop(); + ssc = new JavaStreamingContext(tempDir.getAbsolutePath()); + ssc.start(); + List> finalResult = JavaCheckpointTestUtils.runStreams(ssc, 2, 2); assertOrderInvariantEquals(expectedFinal, finalResult); } @@ -922,7 +946,7 @@ public class JavaAPISuite implements Serializable { Arrays.asList(1,4), Arrays.asList(8,7)); - JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(sc, inputData, 1); + JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(ssc, inputData, 1); JavaDStream letterCount = stream.map(new Function() { @Override public Integer call(String s) throws Exception { @@ -933,7 +957,7 @@ public class JavaAPISuite implements Serializable { letterCount.checkpoint(new Duration(1000)); - List> result1 = JavaCheckpointTestUtils.runStreams(sc, 3, 3); + List> result1 = JavaCheckpointTestUtils.runStreams(ssc, 3, 3); assertOrderInvariantEquals(expected, result1); } */ @@ -945,15 +969,15 @@ public class JavaAPISuite implements Serializable { public void testKafkaStream() { HashMap topics = Maps.newHashMap(); HashMap offsets = Maps.newHashMap(); - JavaDStream test1 = sc.kafkaStream("localhost", 12345, "group", topics); - JavaDStream test2 = sc.kafkaStream("localhost", 12345, "group", topics, offsets); - JavaDStream test3 = sc.kafkaStream("localhost", 12345, "group", topics, offsets, + JavaDStream test1 = ssc.kafkaStream("localhost", 12345, "group", topics); + JavaDStream test2 = ssc.kafkaStream("localhost", 12345, "group", topics, offsets); + JavaDStream test3 = ssc.kafkaStream("localhost", 12345, "group", topics, offsets, StorageLevel.MEMORY_AND_DISK()); } @Test public void testNetworkTextStream() { - JavaDStream test = sc.networkTextStream("localhost", 12345); + JavaDStream test = ssc.networkTextStream("localhost", 12345); } @Test @@ -973,7 +997,7 @@ public class JavaAPISuite implements Serializable { } } - JavaDStream test = sc.networkStream( + JavaDStream test = ssc.networkStream( "localhost", 12345, new Converter(), @@ -982,22 +1006,22 @@ public class JavaAPISuite implements Serializable { @Test public void testTextFileStream() { - JavaDStream test = sc.textFileStream("/tmp/foo"); + JavaDStream test = ssc.textFileStream("/tmp/foo"); } @Test public void testRawNetworkStream() { - JavaDStream test = sc.rawNetworkStream("localhost", 12345); + JavaDStream test = ssc.rawNetworkStream("localhost", 12345); } @Test public void testFlumeStream() { - JavaDStream test = sc.flumeStream("localhost", 12345); + JavaDStream test = ssc.flumeStream("localhost", 12345); } @Test public void testFileStream() { JavaPairDStream foo = - sc.fileStream("/tmp/foo"); + ssc.fileStream("/tmp/foo"); } } From c46dd2de78ae0c13060d0a9d2dea110c655659f0 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 21:43:17 -0800 Subject: [PATCH 223/226] Moving tests to appropriate directory --- .../src/test/{scala/spark/streaming => java}/JavaAPISuite.java | 0 streaming/src/test/{scala => java}/JavaTestUtils.scala | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename streaming/src/test/{scala/spark/streaming => java}/JavaAPISuite.java (100%) rename streaming/src/test/{scala => java}/JavaTestUtils.scala (100%) diff --git a/streaming/src/test/scala/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/JavaAPISuite.java similarity index 100% rename from streaming/src/test/scala/spark/streaming/JavaAPISuite.java rename to streaming/src/test/java/JavaAPISuite.java diff --git a/streaming/src/test/scala/JavaTestUtils.scala b/streaming/src/test/java/JavaTestUtils.scala similarity index 100% rename from streaming/src/test/scala/JavaTestUtils.scala rename to streaming/src/test/java/JavaTestUtils.scala From 12b72b3e73798a5a2cc6c745610e135b1d6825a6 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 17 Jan 2013 22:37:56 -0800 Subject: [PATCH 224/226] NetworkWordCount example --- .../examples/JavaNetworkWordCount.java | 62 +++++++++++++++++++ .../streaming/examples/NetworkWordCount.scala | 2 +- 2 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 examples/src/main/scala/spark/streaming/examples/JavaNetworkWordCount.java diff --git a/examples/src/main/scala/spark/streaming/examples/JavaNetworkWordCount.java b/examples/src/main/scala/spark/streaming/examples/JavaNetworkWordCount.java new file mode 100644 index 0000000000..4299febfd6 --- /dev/null +++ b/examples/src/main/scala/spark/streaming/examples/JavaNetworkWordCount.java @@ -0,0 +1,62 @@ +package spark.streaming.examples; + +import com.google.common.collect.Lists; +import scala.Tuple2; +import spark.api.java.function.FlatMapFunction; +import spark.api.java.function.Function2; +import spark.api.java.function.PairFunction; +import spark.streaming.Duration; +import spark.streaming.api.java.JavaDStream; +import spark.streaming.api.java.JavaPairDStream; +import spark.streaming.api.java.JavaStreamingContext; + +/** + * Counts words in UTF8 encoded, '\n' delimited text received from the network every second. + * Usage: NetworkWordCount + * is the Spark master URL. In local mode, should be 'local[n]' with n > 1. + * and describe the TCP server that Spark Streaming would connect to receive data. + * + * To run this on your local machine, you need to first run a Netcat server + * `$ nc -lk 9999` + * and then run the example + * `$ ./run spark.streaming.examples.JavaNetworkWordCount local[2] localhost 9999` + */ +public class JavaNetworkWordCount { + public static void main(String[] args) { + if (args.length < 2) { + System.err.println("Usage: NetworkWordCount \n" + + "In local mode, should be 'local[n]' with n > 1"); + System.exit(1); + } + + // Create the context with a 1 second batch size + JavaStreamingContext ssc = new JavaStreamingContext( + args[0], "NetworkWordCount", new Duration(1000)); + + // Create a NetworkInputDStream on target ip:port and count the + // words in input stream of \n delimited test (eg. generated by 'nc') + JavaDStream lines = ssc.networkTextStream(args[1], Integer.parseInt(args[2])); + JavaDStream words = lines.flatMap(new FlatMapFunction() { + @Override + public Iterable call(String x) { + return Lists.newArrayList(x.split(" ")); + } + }); + JavaPairDStream wordCounts = words.map( + new PairFunction() { + @Override + public Tuple2 call(String s) throws Exception { + return new Tuple2(s, 1); + } + }).reduceByKey(new Function2() { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + }); + + wordCounts.print(); + ssc.start(); + + } +} diff --git a/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala b/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala index 43c01d5db2..32f7d57bea 100644 --- a/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala +++ b/examples/src/main/scala/spark/streaming/examples/NetworkWordCount.scala @@ -22,7 +22,7 @@ object NetworkWordCount { System.exit(1) } - // Create the context and set the batch size + // Create the context with a 1 second batch size val ssc = new StreamingContext(args(0), "NetworkWordCount", Seconds(1)) // Create a NetworkInputDStream on target ip:port and count the From 214345ceace634ec9cc83c4c85b233b699e0d219 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sat, 19 Jan 2013 23:50:17 -0800 Subject: [PATCH 225/226] Fixed issue https://spark-project.atlassian.net/browse/STREAMING-29, along with updates to doc comments in SparkContext.checkpoint(). --- core/src/main/scala/spark/RDD.scala | 17 ++++++++--------- .../main/scala/spark/RDDCheckpointData.scala | 2 +- core/src/main/scala/spark/SparkContext.scala | 13 +++++++------ .../main/scala/spark/streaming/DStream.scala | 8 +++++++- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala index a9f2e86455..e0d2eabb1d 100644 --- a/core/src/main/scala/spark/RDD.scala +++ b/core/src/main/scala/spark/RDD.scala @@ -549,17 +549,16 @@ abstract class RDD[T: ClassManifest]( } /** - * Mark this RDD for checkpointing. The RDD will be saved to a file inside `checkpointDir` - * (set using setCheckpointDir()) and all references to its parent RDDs will be removed. - * This is used to truncate very long lineages. In the current implementation, Spark will save - * this RDD to a file (using saveAsObjectFile()) after the first job using this RDD is done. - * Hence, it is strongly recommended to use checkpoint() on RDDs when - * (i) checkpoint() is called before the any job has been executed on this RDD. - * (ii) This RDD has been made to persist in memory. Otherwise saving it on a file will - * require recomputation. + * Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint + * directory set with SparkContext.setCheckpointDir() and all references to its parent + * RDDs will be removed. This function must be called before any job has been + * executed on this RDD. It is strongly recommended that this RDD is persisted in + * memory, otherwise saving it on a file will require recomputation. */ def checkpoint() { - if (checkpointData.isEmpty) { + if (context.checkpointDir.isEmpty) { + throw new Exception("Checkpoint directory has not been set in the SparkContext") + } else if (checkpointData.isEmpty) { checkpointData = Some(new RDDCheckpointData(this)) checkpointData.get.markForCheckpoint() } diff --git a/core/src/main/scala/spark/RDDCheckpointData.scala b/core/src/main/scala/spark/RDDCheckpointData.scala index d845a522e4..18df530b7d 100644 --- a/core/src/main/scala/spark/RDDCheckpointData.scala +++ b/core/src/main/scala/spark/RDDCheckpointData.scala @@ -63,7 +63,7 @@ extends Logging with Serializable { } // Save to file, and reload it as an RDD - val path = new Path(rdd.context.checkpointDir, "rdd-" + rdd.id).toString + val path = new Path(rdd.context.checkpointDir.get, "rdd-" + rdd.id).toString rdd.context.runJob(rdd, CheckpointRDD.writeToFile(path) _) val newRDD = new CheckpointRDD[T](rdd.context, path) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 88cf357ebf..7f3259d982 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -184,7 +184,7 @@ class SparkContext( private var dagScheduler = new DAGScheduler(taskScheduler) - private[spark] var checkpointDir: String = null + private[spark] var checkpointDir: Option[String] = None // Methods for creating RDDs @@ -595,10 +595,11 @@ class SparkContext( } /** - * Set the directory under which RDDs are going to be checkpointed. This method will - * create this directory and will throw an exception of the path already exists (to avoid - * overwriting existing files may be overwritten). The directory will be deleted on exit - * if indicated. + * Set the directory under which RDDs are going to be checkpointed. The directory must + * be a HDFS path if running on a cluster. If the directory does not exist, it will + * be created. If the directory exists and useExisting is set to true, then the + * exisiting directory will be used. Otherwise an exception will be thrown to + * prevent accidental overriding of checkpoint files in the existing directory. */ def setCheckpointDir(dir: String, useExisting: Boolean = false) { val path = new Path(dir) @@ -610,7 +611,7 @@ class SparkContext( fs.mkdirs(path) } } - checkpointDir = dir + checkpointDir = Some(dir) } /** Default level of parallelism to use when not given by user (e.g. for reduce tasks) */ diff --git a/streaming/src/main/scala/spark/streaming/DStream.scala b/streaming/src/main/scala/spark/streaming/DStream.scala index fbe3cebd6d..c4442b6a0c 100644 --- a/streaming/src/main/scala/spark/streaming/DStream.scala +++ b/streaming/src/main/scala/spark/streaming/DStream.scala @@ -154,10 +154,16 @@ abstract class DStream[T: ClassManifest] ( assert( !mustCheckpoint || checkpointDuration != null, - "The checkpoint interval for " + this.getClass.getSimpleName + " has not been set. " + + "The checkpoint interval for " + this.getClass.getSimpleName + " has not been set." + " Please use DStream.checkpoint() to set the interval." ) + assert( + checkpointDuration == null || ssc.sc.checkpointDir.isDefined, + "The checkpoint directory has not been set. Please use StreamingContext.checkpoint()" + + " or SparkContext.checkpoint() to set the checkpoint directory." + ) + assert( checkpointDuration == null || checkpointDuration >= slideDuration, "The checkpoint interval for " + this.getClass.getSimpleName + " has been set to " + From 33bad85bb9143d41bc5de2068f7e8a8c39928225 Mon Sep 17 00:00:00 2001 From: Tathagata Das Date: Sun, 20 Jan 2013 03:51:11 -0800 Subject: [PATCH 226/226] Fixed streaming testsuite bugs --- streaming/src/test/java/JavaAPISuite.java | 2 ++ .../test/scala/spark/streaming/BasicOperationsSuite.scala | 5 +++++ .../src/test/scala/spark/streaming/CheckpointSuite.scala | 6 +++--- streaming/src/test/scala/spark/streaming/FailureSuite.scala | 3 +++ .../src/test/scala/spark/streaming/InputStreamsSuite.scala | 3 +++ .../src/test/scala/spark/streaming/TestSuiteBase.scala | 6 +++--- .../test/scala/spark/streaming/WindowOperationsSuite.scala | 5 +++++ 7 files changed, 24 insertions(+), 6 deletions(-) diff --git a/streaming/src/test/java/JavaAPISuite.java b/streaming/src/test/java/JavaAPISuite.java index 8c94e13e65..c84e7331c7 100644 --- a/streaming/src/test/java/JavaAPISuite.java +++ b/streaming/src/test/java/JavaAPISuite.java @@ -34,12 +34,14 @@ public class JavaAPISuite implements Serializable { @Before public void setUp() { ssc = new JavaStreamingContext("local[2]", "test", new Duration(1000)); + ssc.checkpoint("checkpoint", new Duration(1000)); } @After public void tearDown() { ssc.stop(); ssc = null; + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown System.clearProperty("spark.master.port"); } diff --git a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala index f73f9b1823..bfdf32c73e 100644 --- a/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/BasicOperationsSuite.scala @@ -8,6 +8,11 @@ class BasicOperationsSuite extends TestSuiteBase { override def framework() = "BasicOperationsSuite" + after { + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") + } + test("map") { val input = Seq(1 to 4, 5 to 8, 9 to 12) testOperation( diff --git a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala index 920388bba9..d2f32c189b 100644 --- a/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala +++ b/streaming/src/test/scala/spark/streaming/CheckpointSuite.scala @@ -15,9 +15,11 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { } after { - if (ssc != null) ssc.stop() FileUtils.deleteDirectory(new File(checkpointDir)) + + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") } var ssc: StreamingContext = null @@ -26,8 +28,6 @@ class CheckpointSuite extends TestSuiteBase with BeforeAndAfter { override def batchDuration = Milliseconds(500) - override def checkpointDir = "checkpoint" - override def checkpointInterval = batchDuration override def actuallyWait = true diff --git a/streaming/src/test/scala/spark/streaming/FailureSuite.scala b/streaming/src/test/scala/spark/streaming/FailureSuite.scala index 4aa428bf64..7493ac1207 100644 --- a/streaming/src/test/scala/spark/streaming/FailureSuite.scala +++ b/streaming/src/test/scala/spark/streaming/FailureSuite.scala @@ -22,6 +22,9 @@ class FailureSuite extends TestSuiteBase with BeforeAndAfter { after { FailureSuite.reset() FileUtils.deleteDirectory(new File(checkpointDir)) + + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") } override def framework = "CheckpointSuite" diff --git a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala index e71ba6ddc1..d7ba7a5d17 100644 --- a/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/InputStreamsSuite.scala @@ -40,6 +40,9 @@ class InputStreamsSuite extends TestSuiteBase with BeforeAndAfter { FileUtils.deleteDirectory(testDir) testDir = null } + + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") } test("network input stream") { diff --git a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala index a76f61d4ad..49129f3964 100644 --- a/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/spark/streaming/TestSuiteBase.scala @@ -10,7 +10,7 @@ import collection.mutable.SynchronizedBuffer import java.io.{ObjectInputStream, IOException} -import org.scalatest.FunSuite +import org.scalatest.{BeforeAndAfter, FunSuite} /** * This is a input stream just for the testsuites. This is equivalent to a checkpointable, @@ -56,7 +56,7 @@ class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBu * This is the base trait for Spark Streaming testsuites. This provides basic functionality * to run user-defined set of input on user-defined stream operations, and verify the output. */ -trait TestSuiteBase extends FunSuite with Logging { +trait TestSuiteBase extends FunSuite with BeforeAndAfter with Logging { def framework = "TestSuiteBase" @@ -64,7 +64,7 @@ trait TestSuiteBase extends FunSuite with Logging { def batchDuration = Seconds(1) - def checkpointDir = null.asInstanceOf[String] + def checkpointDir = "checkpoint" def checkpointInterval = batchDuration diff --git a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala index f9ba1f20f0..0c6e928835 100644 --- a/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala +++ b/streaming/src/test/scala/spark/streaming/WindowOperationsSuite.scala @@ -11,6 +11,11 @@ class WindowOperationsSuite extends TestSuiteBase { override def batchDuration = Seconds(1) + after { + // To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.master.port") + } + val largerSlideInput = Seq( Seq(("a", 1)), Seq(("a", 2)), // 1st window from here

    mPdpjY!ooFGn>F{-Z}dW*l;l(N8Q-3veRMt~RmnTE8 zXp>-uSh1An7e-u>ttoC>4$tfXVLS!CVgH4nN9AQm48!*K(}gp`yJY z`z%b?x}<74Nfsp2@7Z(_N5-H&`5(D~JN5Q%=^A1LEDpY>ipdENPl&#|J2n^xm}bom zMV`sUZeCxx7zH;+08j(yy9hoQM985<4n9WfCuC%y1fuqfR;wodssF;gpHvR}D*5yO zU_t*IjQ!U%|NoW^{sqSVu`Ni_`d<{N-=uu$lDK>2wn_eyO)Sam(%+g$sAM;y|%p zb}8l9G2s_7MrBRMgRyHL$qTx5rg{wd-Sg@=$0iHuT`39ze}^+x78cSE1wmtK)HG*E zm|Q5fw7gH}R0IdE+}lBV#hyy0OMEUfj>E$lxHm)7FT5`a4t9e>Vw9@xZX1x06LG4E zK4sEXH58y6CQE{*_ou_I$&E!y0#2sQE5T#zK1Z$4IKuXmMIZ1QkzB)tYYXb@`B&Yk zSb{j~z@~YY8`fZiGA69GaU^n5KbCSM=S!~buIg0__jrD^^shVtqhXfbW~y(;jcQ#p zQk_El=1%;uqq`#a_NS^YZ#|c>c!EaS!WS%c>FwGFcPahBjP21s@10Evvq*k~&`=Nm z0=1rFaQ@?t`8A#G+hX(g_>J)zKe>gX>sGA+_xvm=m!{E-@t@|@+bn;y&U2m(h2yU~ z0nLBh3jg~~ApcM6{Qn@+{%N3k*47U84tBaWDy9x5LM~stnyHoXUsT&a9ku+Qb&L7; zh()0IxJoh>6m~wH@ZzF?czhIQC8+P#?wVU$EvskeXx9u+x{Tz6DNoHXnN$j4-EI5Z$Fk%ToW&S*k{Z(hG z($8o))6~j+_Ja(+z27v@v<{2!)$BY0M>Wo?2%mi~hPE{<-maA_)pwBgnx+OaydS~H zMdK#xS;8Sd(qt|G$pX9XWg;k^zcT1c)QvROXm37j2)VJ625E)BC_!M{+4gMfzT#I9SB5=cF;GB!_SoDK{;{{28CYm-4hpW%!8G(2hGv_LU zAM((`7w`dEhU{6M#5K0;{viRzcAxdj-Eq#Fc$}tH(|j{#EcrXyX58-h8GuJ%6Yd5M zTqqvCf-T-QeWw_3R^T$Sk0;fEJvg-~Aq&+p{qEsUn=+ySR(9%Fiv|5(r;2~I*uQe2 z1syH_=%xyW#+F~(t^Q4N@@FFb-^To}i?jU~JpVDUD1N$MH3pYF3amQ{0*YYw6Z-oq zY~Bzv?rI-zR5fQT%4POh6zH$lj>!_v!W-lb_}C%53h{k59`b~Y1X)T6LO!Jx^Onxz zkB*EBp3j$a=Wn@BDSVSHL^LE5DZh1=*pzC*(Hegt0jaaj@SGer^`r6cvW+@Oy2fTR zaW{2MF&|UMy*p{U5H}(R9Hv$TmnQ3w;go6AtEudpAE%Y zgr%SQP8C7x5F|aDX*=m&K%k!s$(4-j6QE+3XByrIhzEY5f%r@)z(Ipto{H$eX&E>j{ZBJZ)U#2dFek7!g3S#psyR#mUH40}g{brf@-P^)y+!AS4rvr z)z>t0B$ktz=!f5ia;M4=A&f8Cnh~~~BU2@_9S{D<*t`9CH%dU4k`g7i z(uJ0G-iMe$EPn!_i6)m|E;{`)yby`Y{VF1gksea>=P*~XgX68M_7L5Sa}f0CL9Ly@ z6NKO&XFsKzfB} zLaSLn35|-Lhw>m1rF|%t_=D@*6?9inT>p z-w@cnAv0RzVG|sgMLR}bo$}0^B7)UA`<+jZeqw6%b{uoC)Xm#7Q3!iJJ`(ByTpX@I4r6j6N4a!=C_7;ZU2TfYyYn|9v1 zc8#xt`)A|SIx6}H0LMs^D~R=*YA_$;;Fg zjf=Ah6iyN4Pb_718x??{~yF!>}%PxslKVh-?rj}Bva=VQREzbLSLwDPvAjlt}r4h>AN3I0Wh0jZCWRlADoo|Kwq5Cx`e*3 z_#FEBJAFsBX^6}4*?Nv-|LtUALmPg3yp{2DY;QJP#(GW}hG-&uK;0u{9!x~y;F12A zlX+CAy;!Q5`fm9+FoO}rl<(1m58p3e9ipYckpa%Hg4hp)(HmuaGg7CGsdo;v6eV0( z&ZlF<;@M=9+}`J*EI_~@aAQYL%O?i>&gX%0NX8vmaG^hWxDfI?wsIETN-b{k1Eopq zQTlP=e%-fOB(tCg$`k#S+9KNQF?{KhA7jczY+rx5e*KVuDwp5l$}%%7%j*n^7KWC0 zoyJ;Z;!&uA5g=iAH zxk5@m{OIg=t&x!wOWdJtyMy7L#NLdYIfG2)ZfaM~noThaAK+3jK#7AWW%58dd1GFT zHI1MT0c!y{4eVO!)3h`Vi_OQFTAF`KkJ2B!dp-&E%z}K!b~K-?+Lj__YIR#3 zhEgc%+F!ZUx6UmV%}>Y<=G5(dL}>0>L~vzGCG++yL=}C`RN*Tt{vHrB^VCq}FI02y?9;!_McO+*fB@prvs>wzNy52?)slM9vLk5xcz-2teYamWWJGV@*z8=41rdxe z2vNYy$;7kV+4RY7)-TeS>+jjTE2_tVeMNEJj?aO(d7Ep+!f}F9&7LVP64*F9NNG>H zu2Q#T=i2IdWI)zFbYC-9)J%f865>lxx*)dLZsHoO|@M(rCi+qoE5_NCi5LssQc zg4?38IM$8sP)qb$p_3S5M0puQ>_t1S1Ghn~p(RL}NFdE9EH-L#1kW~m-gC~YTw9jb zwPmVeytujPQt}%HX2R3%k3qv}!wPpTVR86Nb$brD(~U)8Pm?fRJu9AzWW3YLyP}ah zTn7aBkL(hVAsCLm&z+v?brm>*UFX8~_ggZ}`)tk4fJ9vc zcxlMS+ES~D%aAKhl1K70J;RPmntbUUiuG3in6-z6cx|?fQ=?#b4fw!VIweRcXxa)f z>qL=bUCmLgLJ$~P9`r+%L$M70O2L~5*TDN^0`Hru0(V^Gwq7e>9M)d>9Wb`c{#&Yi zOCgBNNGY7a+Iue!#e=P)^j@n-SNFXwaK1fu_4MNTT}XmW`pC;$lQ9&74gnm3j+T$b zyZBNNt3woO^$Ke8luDa-h{e|L$&sLqPwU@gvhakzYRG#Q9Sro#6HsfR&o=!d9@j_ z$tqu4q3r}@Dx`B1(gYMZBQW5&Ch)AJ$DPwSnP<2rXq6M9q+w`ELoD?fg}}&0Jb@)L zg&z5>>qX))HdO3SSFib~TX5*E73{f}E{&31wTw^~rYF4uJ;HYQK|QYRHpceu{3Xc* z8hpgLt&@_hwugF!m&Ok7bAI>d!#8(!hpaQR`rHb0!|rS)I`iv7dwG%|Zf`X@^Ved& zVe=g+lPXPsRPe-_CU^_dF7m7H+W!vvK;x^FA_eg#syvCj#K3&R;=f{f0-5TJ?h^4P z{svODr_OTBAv)>}a1lCe-F-^~ae|8j;zD(&g>;Tjg>Q)=)EW4yy%>{@ygZ+FRy?^Z zF~qI@{2~0z&GFp*=l8boGn~3>v7A?*^hfU3Gsp9^@N;m(&2Hok%=sP(`aaIeung0P z%<2dT#(v@Xohviv2#^8zv_bF!SPGBViG@3yIsq3BgGAmiarT9LS| zuLpgj=zLB)g0V(l4{aIqHOQ{zzX_lWR@nM>GA=i*cX2;Jijq{mVa zYAQ{nJ(LNC!oBTxzW#Y*EER#L1XlVq$^Y#$sSXmzTbkR-?ulgYb{75((u)r|pc|;O zFU7-G5q5fSW|DER#Am~Y-7Xtrl&BRn==kk0{BB=la{_!O)f#W~>2 zPm2A!=nXbQ4V@zBA7onn;nh>tPpruw7WKQ=WY=b^Rkj`cmZy;Yy%sm`2RggWE1{y1 zid-^-Gk_4=To@w4_L~m+#~87va$p?H4KbBEC@P`~_V9v`G%S}lGWVjfIi-38jJ>^| zIuo)ur1;!B4t0cb1q!-NHT?I1cLGQ;2O>RbMPLHvyDLO%dPU{7hD(+enI@HsC#y{W ziQN}+-`Bxh5y9k(I%XV`+-RQSuPC;;*;euK&{FQsh&u2Z4^62=taoIz9Y{l6(FQh0xgsK8R5P7JOAAf^|vk~zyVVq@+q>&i$EAvk`Vy^URj^ttbID=UG z8~cyIH<)KKlExEXO8vcAQ=HC{mdDo7*2j)Y(ns%)OPCKZygi}Ms2Dn>Vio%xf?`rV zgDAz3dN;AM9wr*3wfZz!L|VJPJhOUvOY+_OnFCuT#3-}q(gcPobm#I+P!zvH(s+oB zc#VXSKv-d{m=U-qOirYmLO+s4+J`o*o?;R9h`Z{@{_=Yw5@F}XO&A!uAs-X; z$A)rv%Ta%>1R2WW*czGhX#^U`^6;_J>ZSTJ=f|oRk5;4m1B(T4px$3OHrK>VE()@d z_4MFIsY2@-gn>=Q?%>rGr6I)O&uIdxlT>4USDzv|(bMD%!(1FK%)%gOv5l~z@FGZ% z7mHOHtx;`<4TDU5tQaM_O=bU(IDGeWJy1hcW)_7O)H@V>ybUTsDRfRv$w`sCsS>!!8B;w{mC$4DJ zF#xT=IdiIFf4Pok5!pla5U`589St6+kC& z8`#*W4H~Ca-63Iy+f}3mQGt5ZHx5X85O(?C3#alw1CDtB>wa{ZY_%IJL^q(_DTEZt zp`UjR_#$nTu=@9q=8!(p*)@=^DrfRVPu1vM1@KYqUD!ja5jh8+XjIUdKnW!nCnY%| zDqMg=h9N}sWkK0Fk7Nz*RrF;6HkZ$p4Jh9kbXaY;U1N z;sVzUQ9>)hwzCx26qQ0$8m~pM{Jj&)Co|1J;1;FDYhR(?5RYVXDNv>9t&$DDc4&Gx zdjv85{5uNr$XKTYJV(;Zf7%~|-mNjhvM8C4C%D5P0F=l3JcN|5BSP+tf#YK=+e-}W zeiYR)HfWH-5U$~~)TUywQf$ui6FsuS&kL&tKTI*7@MPM$CnQSSzTMUZ|GLw;wI zKBqFLvRfxk;pq3UMymu9zzc94tQqhiSZ?uTTcuLqjyJ}8RP24@##g@iq%+h+-S>BF zgCcBm7S|5|6WiUSR+!;y1%xL8T9qgrX|4q;y9vl6nJ&y5a20YPInqyWyH$mdr^%^I zie^s?f>WkLd4I;aE^n~GOAW)wv=MZxEl7YDbJ9H|six8Z_K*_}+6ZgK#m6g_uKh_F zg~a_del+l5Z<*{(#uq?6(rXIxP9F|csH0b2+3^^+QnX6Eb@Xi1Vm>+*pl6P!YFr$N zE+OZQ&leE>f{83%pSp(1KmXaiz9!%F1^qw2> zfL;kS(u9vtXaxyJ>WF%aL(Wke=ybzf1r95Bn`_*oQ>s-*H7<$W-gVymDox2jV22dx zk$uNikA6{>0XRvtS2E=-M)s=B{f+8P2B)O9G~%99srNZHlmn=;-$-uyXxfENp$E^N zm$O5pr;!dOcg_*1$FWp>^6DOTpbY6Nlc;DpenLf*0r5ZRXR^SBG!lNYhD0_9T^?oJSLRt-vHQ59KtoTr%uToRh>sDrDy~ z)TjHBzU=0^lISQh*K^H}j0_49#Ocb;Mh|C<%h_CWNs)?!pj2f#P8QwU&Z`133avC) zqta<}<>5b~sbiXff+uqQ|KKcYc$Vb zJ4z$PephfHJQJBw&OL2_ zZNPF%b$GNGJq}Ojxwgiqm&?fi0J6tqnLgA*b+!;UKMkx&pRvzvikpuxWi_+Qk42;D z+o6R~uhl8MGtF5pTzvsrzI5H03?DXGX-8@Lb@7PNlOje?iPcT81bWzCy*oU%ppo0} zK!#BAIEeI0rkB~X@F92W(hs-B4qEvtu}nsmkf!Ez38EY2T3SPsI+2=o1hW9WkcP6p z+??AJCu;o|w=7S7!6+LO<%v+PZTKuVzNg_}L4Z-zjLiNr(%L;lJtpC7B>g+6?Ja%= z$GVWVjZFmsWsIUQ>N#$|$#uA$Ltzcev5eHwYpJXj_va2kH$(NSP<9$rLazDE??F1U zP-UZLM&4uBdg?Nx2DnSJ&+g%Lt5XfC68jQQb^MQodc9ulo4}1)9s>8Eg+wl=Pcgh@ zIyKv|S6Wj7k=gQ?ZhiERgx(>7C_-&@JwZjXpx_cy{74)i+dP?|_G)EIce zjVoQOcJDeAP$nm^D+VxJ-7s7iHdSy`&T-dbdcUyG4Z2>#AILhjKE`k=RE(`X*DDaF zcSD!cW+!X}lSm4%lN}#*5)xeNEb@#`-UanKNx`nkxzAngLT_vQB#^zT^`}s~Fc}Ke zWqSa(4H-dRM{oAhDV?j!*RELkS5C?GF3wi2*9=YpPPWkdZ?m_Uo3@nHl4zU-KPiMF zbu5z>O@k-f*9dU0s9$2e1Mb&W=5FfNjprtvS9;(g4;HASg8?w_y zI2%XYP~OGYm-_H8G^`%H9ayoppVia)9aA-T$ga%8k96DO&W`K+UDY0LCCIn(*_)rN(qYmw=rZZ9>Sr1q{O z!hC}Kb$ovS*(OIDhuG}xTWHHKP!yF$%lJ{giutgJ`wf?CUq2%!Z)rffh~&)fqf{HC+{|<)bKId zz?%yT@R#Ys4T4u3=UiZJR|6|ZlNcQD`R}}Vz-jfurb6+@RT^%8z;k-rDe%spY+_+`yl5sk4ZxSL|}B*T=!qSV?tRPgqSESR@745+ft^G;H9%XIRh z>RCnhSvQYtQwpYI{Vgfw8$(SU(4E)Zx4ZSN?v1-CZvdSvFw{1@zI7we3t*UDDVPZi z&Io3%i1lwaOy3b$z7?eSnC@9YG%W(USYOeu0zO%NTeJT0#`5hT)n^OJ2Slo?7vmca z?6)8}OJ%!n72p9WPuwf-s!CSfB?^I$I#9V>NxRA>1xP36kRHex*qVWzR(4l%;bPww z5*lQ)k!}zQ!8Lsxp|CXhm{g|ToYk^)RTnE?T-Q%+OBab$cP$ZEO#>^sTBecJ zECPjfJ=c)ctO5sgrRoAR>iH3>h*z1LR#xg6%ZeMc_LVE$tE?KR7KAA7?5ntiCP5Ls zf%4FsR!E3>glI^(`Lxs8-fJtTY1;T(_qI`N(L04ZZ*W3B0N)21V~&H5N?QfL7s*U7 zYG*@!_lkqOHot4Foca#;&W+}wLL6Wf~;W+!+zxCiKWBvC1wG4v6Sxw?2nqm`|X z|3YCmr!fcFPlg<5{n{3B;^&_%Q8|4k$}!ZpZ%=6d4qW^j7Mf0x& zEIUP$KLSg|4D4n9c8UK_R7B)gRK)BTb^#$OR(w+jP7W$~BoN|ri62HPP>s71);nm~ z63^Rmytkd@J;>v;3m{@VXFc(hi+NcfB!_CoeMam&nZfmV*8Y5bJB#uyE=fP+AiayJ z-_H;tXu?mSe$18R*GGWy*Dm?G@uUVKD(~OfY%v=8Cdtm?;3r^;hkGSXLA* zPFEa$(m_#aME&BjWU-i9bwZGIX^P@*s6pr%INhc}*RTbby?423zxVIq;NLjyp^O~fv%ZIDY`fKVe%AHm7T z1`8-!e#VOY3fV?v&v76jBNz9c#fQORs)+og;$g2l?k5y@l(TEGhtBU9C>2Q7%dr;?N0CGXwZOrTY{^GHnn;K!8;EZ}a& z?uQEGjH#@*7!)vFi4vivg7BUtqf6tVRsrU!_D(F&U&Y^JBKNJlp+|0RY#u9Blt3Gk z-8Rfeyl5NelKHi}{uNwwWvtUK&w<}0(TN<%NVg_{@S=z^ds(1<4?9j(w;)LX2AxYK zD^ps~F7%pu)h@A4d9HYBs%D+je8|wUFk9?=U3z)0==X`@^__E__rTo? zc7>-@{$?&qeQ>IE)OO_OL1Zv>6|HxGM`O>^`BXCNaC&|tX}K8_jo0@-U>bM#$c(Em zn5O)H)x==@^RihPIyhU~nbZ6o6#N%F!z6EMKf?>#(S8g6BcF+itiW1Yp0|4%QW<=q znBgmC#k5+WT1oLlqi4DxFEmhRu=C*06gj%58je@a+R2OH_2u{H(^r@PS+w!*;!eHC zOGqFYG~e=5IZ>bV-Yj|}LW)JhB~BX2yLU10P`FK(~q!&jqO{ir%e9g(1Y^v$~$0AQw@ z-)YNR6treju=kzd(j#X$rPrb=Sz88KiOXqifFhUUSbh*<6NBOMKj(6q{Jn?{foQHQ zbiBlD5L&R9aiU}_rd!Ayx51pp_dtB>;lP+Fk>sK!2dZuGwC&jWtF`6FREU-VaCX%w ziTkH?b)ad~e<8ockvOsDD>t#G5D58Vf@@~29{#OSqbCFXj|9W}Cc8QeQj%98j zKM6&;5gBDO{QL*dsKK8s@`<+;^Vv(%!w~P{E;T9!se>?*F)FTnJ;Qgdic;E&9e3(W zfMPN;Zo`ZZ;v0v`)Vj3^kxC=qetEG&;a>`;xg$(o13~!F?!=$~Bk-<-z=*7Y3p$`g zi7a%QkC1*p`e6DTp~?hFTnM$t(v}q-egZ&@=l71p4S^e!HbRT!iy}Z8ecZ`HAbbb@ zqi(AA`h70HWQMF?wexS*BWEMNuTuHvL-Y0YXMi-#f53bHZhQVmVE8{PPUNMm5$Iq& zrDrXe0cU{KNM(COOyYo51I^X*N?^%I<8ydv($dOPTO8YB7S)U2$jS5YZFxo4p5#KV zjbgIcy41|02Pdr$A`b^AX|%uBT_T=B3<^`z7#?Z>_{GynUT?0*+ zo8i_^GG%;3y;+^+-agdRB~)?m>7$>yY?(w`k_bAW<6%AEOR5G$Q9X66An^Qk^r8UM zPY-_O@kB~Dc5|(oPG83CCh`^l5_YOP)hK+j?RNWK7o9y5r*=k|>q~3>+-0hhDx^qS zAFcy)i-?O&VLI9rHcF&u8>zcLDO(95f?sz!ZmNQ2=-fZ(2yYEzI5u_i-Y6-~=saUo zB#ppfEU2`xt0)HltC-Zn;JzYX1~y4hoejQw0k-$l^v!x^De&BYmyoA5mO!{nd{i(4FLk0@D;Ftl)} zV>7TOKijVJGCYM+m3dAzt#VkBcw-o_Kyzt5?P}OyHSWMkKxHQLueU8w&blHJPxzjz zkPo6ow7EjpoAD{h6B8ApQBR=F&j1PIVy;Uq5{^nd5r8YT`DS}c55lu z8vLo_`#WMVGWIJElMgXyGrJ2}BT(Kl|Dl&0A7=&Weqag}sXWI#GTSCwrI(&+mAZEP zVydWfBsz~+3$^lHZh%3ZQD;^d-pOc^?RAXfl8wjP^A$QPhy(KYHxDFc_4Ze2dBgUo z4J#~PDGjGJ)gN)?1&e>gmA6dNDv>**?m%^l%rDCVQa`fegKBCwq(-AH*$3TVZ*J`V zzVW1~BlZdWk=U_@J<{XTT4S@=RToMafhaG^K=}00@tC1oZxBte(e?zed;;S4c%sHX z;FQ0F3EhIYuUYPSU1tqx5Q0OFO^Uz}QWzr?MV<3vH6&aAscx5!SBy{Cdvi>WX4fDC z?ZnzF2rt4OPUN15V>7yIH|RFMjucgr7#%E`#F|R1kBoYdE3DcrOYV$YCi1~CZLS~y zNjZbHk~&Tm)aJ<#7xTM1wg;Y?p%2e#f{2X;;mfu(vthdCdS zuXk6Mv`C?uz_#4vY7J&Z&i|lEm>a3vCiTOWOYUnGW0=%*9N~<5l+#P1WPZoiSfL3| zE*19v<=4g>CONLYGU-7Z=|lxom!R;wG!F7ESB(&E8Cipjn@X@a99?YL$O?Ljd7L5a zof+dYueaa_hX|vO{NyIT(s<*QRX9sBx57AbZMm78B|u{A@|P~VfFi-hj_(kSCW!?W zhkJ8>hixykAIoL{}EsRwMs$O&f36H-_TCM(8}PiDfM4f>R{)p zX#LlzOZa!lUgX~)d#h_FbFk=c`^wZo#MwIF(V_Sy{6eKR3j7-^56rf$n!)v&h#la4 zQaipdTe+~iQrJcO`A@pWjP^!GlhppjEQjCF2oJmm<|O+LYQ6(1?4j+Rl1Axgr71~=O_|(!3%WI zZMQ3!jt`Z5Y`W)_Sfa9NrWdiY!d*zygfTttv_(Dv zsboOr^4@EQxxl_arZPNGZBh63@Iaz1P?T5KXp!dAsQltfRl#mxm49$>uGgZWd9*Nu4J;pdPMSz>k^YLW@^abjZ`jp~!dTfc)j z@$;zx+Oozu#YpB*c42If)D@yNn42s33)bkYtnh1<_@h|1R1I3qS7sSYyg82zQ#z+J zuy@=9QWzZgVi8+}GXO?>r{z>5WcXZaFIJp0WT$&kj?vWJ;6TL3k3X7bsp7~y#aD4s z{I3oZ{C|$@zl@AxUtUGsFN?6E;g<;SD>dt{iLYH9L_>XXzVi?qj}TkORbM}Xc1cpd z3TxFT7?rsOZ9oSzBJS?H46ZtWIqTLKvuTRMCG$Fk)M}pPPC@l-o!D)?)T%>)dvqSd zy40hn`-Hm;zvyEqNsJedSJt5W;m0KRG7UhjaLyA>6;ivg!6jNV8S?!zfs}wlEtD#sY;xqz)VHb~ zEl0uGF3C9*@P_G-J&g0-Udz|33eO=Hr1;!wT0J%Vk)JvkW@e5fp}#gOC1D*EG%zt^ z7SU1!cVe})WsjqQ1`@8SIHl>W88MZZwi5~9FerXBv=Rfj5D&oD*205O7>)WU8Nt(4 zB#)yULyrj_%2uYV#C44@*m09pBUvDAR{}Szp`byJ9>&`QFxtxucVzV~m_+P5MH8qE<*3tN_Ap;7S9pSQo z?o@&xokHPVgkV0z=(TJz8^$wPO?(CNnF#CEqhGRz33giht!eH46kJ@0X!)Yo09M5z zQ)Pcxvj+HDMgqx6=_n;X59~Jp?xE?Z@8V-d04cQ`M05j>#fcr1RS`D$YYLN}@#DDo zvdszuU|3w5oO4lS#-+a5*_4>-yfNm~ol793#G_yEw$qbuWRqVMrCbl2A$EFKQ3!=Fy)b>{&^Z1(DH9Mc;eWakI$nrS<{WK9&x?9N6j_ztx2ASNuy zonPRth=BZPAfWD*W5c{3{%9f%7o7146&(SV(-Syj1!MUUF{#5}PWSD*6Mij@0BX|sbm0!&jeWL6#3cTppD|C!;83|Nqlp#i< z`NXpZx_lfzuad0I&~v4x3Ga-CWekl{aHPga3aVRUlVrj#W?h_v^n8H7*`rOu^;V;G z@;{hNnyO6&ON>qW^|1n;c`SgH`+|;a_S|WmHM1v*7#Wj{FcL<($n6wbgD-nA~h&f}R#%Mqb}pk2-u zJ9Bd)F#Xi1tJdzG8dZ3ktF<J~Fp)@wB3x`D94?*_5r zc@QhCp``waesF@>t6!W(f4cl@GPHZTY5TuMxt1uf9pri{wY58)g#-l6&{M&)aY z;ncqk>IvhdgR;oas@mF{kJqF^i&>&rDiz&T0Wde^vFgy}lzyh=ZxN}^WyG&$G5==GHz*b0C#PYe%cMq>BxgF;aE6}@=z-x5 zvfUY{4AZe?Fo2(_N_MEccfSqa>c1|=xd~uly&&YZy|cERTgFXSK6Rq8$0)NPeJgrK z{ooaQ3GkC`TmZr>)~L&T3s>d=nqT+A5C>oP0zlb_^B%#e+hy!jNFWQQRA}@Q`rTIE zl>OJfmH>J{A9W3gXj^pFgTl6~MbMJjobajs*)8D=N|An#(-=$kML$)k*eSy4cz{+8)f>Efnoj4BGu83Wto7ao z67gGEiUuw=EB5ak8O`5ETp3PJ&r?1|^=8&0?RoiBldne+;;q|-t+XxN9zYNG zdqSti)@+8bphxYSToB{wBS_My)N3e=7)A9qjd=(yy#gdkmO~oUF$S3Qa^+CWyVS#m zz6Vlf1LnpEF+-p%&%cqqqKVzz<`v5rRIyv?QHKS^K2ZB8g@{|i1Fh6jjgqHws_Ds8 zYsa~C(iNSE^@WPjp@;QKoIR>_zJi#&@e9aVH2nY;?cl{lCQ2iYqzrz>gW}hdspUcV z?EF^B(92liPh6&Ept3+HQtZ@d8DpwR%dh8c{7ESH5vU}Uku2vI2k$PdZtxDWw9iJ9 zi@a%kCU4XOuEak!&wosR9P0zEpjQdEiC8)Y1%4CYrAUXs>dbbIl~}P0(RT~ zR%%;8M+H!AcQWffpSP2s|6!sO4w!#Z2YjjsAm0&_%WMdJq-C)+ZJ5fCPkHWna_NBR9 z*-(tGz1^oRFZnW|lWC+xz>MYZ44YI$Jwbo2cIZ}ic zr+~<0R%f*~Kx*YyQ5T{O?~~9{P@+m(fRmTw*+pShJrMfbiev>nk+~X7XtlmVTJr@> z^+ERG+FWPQidE3>E-Q1qT?-DiT2_Hi^H9J+CT@b|E3r)t?I9BFU4rhS10W0R)yM{#*=f^V$xkAg zremU_JiLLx4t|6eakX!IA*uLHZgEb2v>b%L>ibX!xu-xgg_Gjesb!=B6UZFdmh=lE zl1DpcC#5wR60^W1WB3Y8mvhqcS@@q3vb$I{BleV^116F4GKt_JrWh*}fcK6Z6uc?3 z0z47)!b6-;A>pm*Xu*EK;z!EyN&MPVB|{IjG(E@@E3lH88D~a-*5%dD4m0&QN)Yb~ zMk;XXp7OoQ%Xzrs4-}%orYB1Slk)q?Dif>WA1etcd#Tiu-Q|=b%O;Jz=&H(LNk`Eg ziJo|lJq}vib z9HnF&^PJm6J<=hBNVDGMv1GbP>Xq*nc?giOJA6BzaA@?1Gn#)r&wN_9>mgN(lNz%` zT~E;C#Qd7Q;Jm1-RBUIl=v)^X7k%IqboV|fZu)6JMsFQOxh-H}PV6+E`)fYR(e3jk zbwe`S0-h~`Z~ML>IMa^C!MLAhEuSrex$3!&t(?gA3SuygU=j_J(qv49ET9 zZu(l)C5X0Zjp0L-J~*qL*{AId;Y^Prnt|J21d|qeIe{&Gv43B#%-|g0-tuuqT$NuP zw`G3?WY>p+lJP7qsvkPV!4%GK-OmCshgdQd7Y7c!;~Cla+S1HL4-jGv-`8mJ&cufH zxcqDaby+qP}nuGqG1+jgJKH}AYN-<)+$t+lJxdiGxR z$F6#`uj~F@RT!ARqKXNR?hP7}8|tL`et2xhzcu|;(Ib9=^Ngu{_0nx5W3dv{O^~2- zMIGum^f8~Pgn0zTI1_W%vT#;Jbr;SN;%=BbGbvo0_G3S0HF~@hw=|!#or_@~Zhp%_ zqFtQ>Kj)lS&L80wBG@Z+>VSm9>JD2cjEV#U;Fc1S3IrCtj*FO}lpryq1jJ1&M3!n&04 zS++BmrR#TRoQG?C80ppPYyO716m-syq!VaYgja89n^N1eYvEYcxQn<* zR`T$YUX8aQ-!yaSFFTsy%hIX_9Z#D|m`jJNl~|frHVMKX%2b;*-aNKfdgPil4sNS$)fORzV=t|&il0<| zmqSqDKqUcC)x8Q-Ni&m*b}${+^AgagUHU>d_v~)PQw)sih0~ayx_6J{F!`xnzV#*U!Qj^;=ZHMA4L)oTqx zGiG=bc#h0jmXB|5tW|?8T1Iv1wU{{bNB)R910p&Tu=-*ulsv{|2arFLYTMSs(t217 zsrFgCsd4Hh2foGiLRRNjYM56Hf;b+rc*I zhI#V%0Pnv9SWy_fgiVq(xq_VmqQKHJ%*nOU_Of7KKyT*NUBcL}d!ZHK{vK7Zjz9hx zw;O?KLb@)3Q*&@xLtxdQB+?=!(4r;U{7pb@g5{K^p>%npw)rMed55bsV7Ny8;sOxv zWvQbWTBtB@V%H189H>CqUpx+9?!k2Gux{)C)c;s?)p;Ua5t+4pi$HUo{1}CfbXtQ!vX@-L?(WGGal_={Mv? z9$pnBKqf_0r}xcP$9!Poe;-ihPN-1&r25^LF$#7~wHGw#I;(lBukTeerL?W4OwiXp z^6^R(obt4QwwyhWqfO`+^{lvrF)G!`naX3|Kb(ug4V7P z_}r`c&;fpXWrQZ^iZ>-K1AFZ~&^|%~3G)0$ZpS;(izgH55)&08>R z%1ZO3v26znHBdb; zAqfadGHJL9H*Ew~LwOq|XUOCTwIY+3UtwEkqpvU_C}J?m;g)Sf7gWy!p3H#&$OU=1 zN8d;PGhn0IKe}Jy0epc{=vXljv#b#_&7r6XvZMr6Xj6=}%)@R!({(c-Y&_s_xGwlQ zvTW7V>xNmdo~X(ugOvlV8GX0fnNGYQI=6w=IvvY6VRGA(X`EQGkzc0_mRthSo;URd zI0=T%$H8jwZh!8SLfDX@b$mC`3^8`DNe;DR-wF@DMIZXRK;2d&jGNTDPDVas4*IRj@Z*PWihXrDIxc!`q$@+Sz$WHv*95FpZvCEFVu za5YBDHgj#DBkgNnqS|t@^#Zy6InXW_X;OQoiG5AdqJ|DcONRxtb_Q3Yc;X7Q5kAw5 z!PjkiIzt=$tzqBTCnPX;fM}=H_EnVr366J^t7~_?L~4gdaW4{GSa&$1lzn9F!TSDt z(qjSH1T_=iY(=|XBQ)V$kIgvGtV{RCK4Zf60w_DBP+(qjXA#V?u7aTb@$pzN{V8Qz zAW}dDKq+OdMnvpJhIjCLV?`o^ck~msm?F4zYnLRSdrWp3AmO=!U2_7a&#TnL^FHEN z`Co}V)o2rA`coeHUr{>hOHXu+PTMhVU>oLms`Uw@O=o{>qf)u=aNcNTJs>yi!5hHI zmoi-=eeg{p8MkU#Vj1dDX~{?d1xwyRg1QACC$e;NkdCa$Veuqf?{P@v-^LP zjc9k%S%fXkG$<%5j=1@&1H8$SlT(s6m&a8TojiAw~% z@7R>p(rCqZ?_Yl}sNQ11ZhRzyVq#v=e!Q@96~wsZS*OOey|Zsj!7Xd;G(~J^yTqYA zE%Ek6Sepbr;vEY;-y1z^?o^z;3-eAw$uMz^g)u!vl)s=pN;7F+6{zBqIjlgJTO*{T%r-7RM|73tPnc3)cdSdPl#d7_53SVnYa}` zC#RYHC#*?(XWc^y_>39&ECFmq3e)!8|n`3N}cGi(_N zDH~4|0`D8$5pp3=VNF`WGZG;jC-f}jeM4|Pt7tBzT-L&aZ;lVQ787uHbVx38FDJdC ziY4sDDc+EBY50D72BdxYXDd6tE z>`*M{E|`sJuQDDo5j;yl`N{ovCp^o677i_!F!0-FsxRSDQdOH9gGK)~*-B$?c8%aG zjbAnJ5GG1`GeTU!Ij9w9VD665IVHJ#)hhefmVwi^7B@roo{l)(e%q-g$J6`!A0$8j zg8f4z86tTwSwsf{`qKJOU&DW!&(!}{B(bzHq5Y=ANarXsN^+F9TtMYd})gGCkkHTs&0<9nPg3Pkjrt+=?b4red z$9AN@&jvT?X3Dfi=P z5K3?rG?ICU&|JFo?9;7;>|LxkdFLS|Y`{6LnALFMbt*n)|8yuEu?c1$pv%`G`OGkU zT$sfyQHB4Ek3M07`xGmb*Rea5Xq-M7Jspy8{88ZaK+13pN1xcaG!)gx8VRrqakkoc zmy~nAe1nTJyuJ;-BW-&%XgvGa;>T*8mzDSRL~G+KayOMiBk4A%v<(%LB7K7Q4u0x) z6ei;uP!wd1A}A)uje#0G(FmV1A*_qJMV=hAjP$x$NKl!{J*H|5sd3l;WlR{CA*N2AiBED6oK; z9|#TEPO9(k&wRGP-~nKTkieC|-D7oaEJU-zYGs&*GT%8q&Uqd_yzo;kwY2yFMpfR% zxh;=r?&tgM7@yA{J3n#hk<;b!x$3^H6T1uHCwD|M05txycTH!Iyy`(*c((opf3%mm zKeLl#u#g}n@dK!7m5>-3|Y!>P2n+IuM0K} z;i^>OFX&afBWlUj)W9SWT(IDMO$)>)rw(#{=`W+{)?VBCxO#gf0stOVGtFc1i82O> zYd2npKAT*&71uFs3zqGwLxy@Bei%Ib1WV#cXtlvESpcA^>#jx40lgT@qfiM;2zz=T zT^c=&cCeqw5_)mZDxso+xrjiCp%AhiIh^f?8*w_xn4?H*iWnf_4y6wSVuE*jn7-3RT*_E`x;8DZ)PVymiqa(VK-Etgh-i5PAXcA^<6NGc`~Yk zyQMnHOi0?}pjwSf6X7|`!*wbIIEIFrU#<|2Za=@0$Jo}{{7*p+UrE?iM~)0yH_!p^ z75!MN6U(oi@kP#+D9;b(1PWuz)Kn`iL7KqLf21AW((n{yqAuM3?##MMa>(UN$x#-7 z_sCib4md{4W9BxR?9=nQ!=b8Io#|uRcbz^HEr${Kh8^i%OD-3p3DPSWNS&C-h2(Kj z)CO@J4hjy}vK5}RN7WC#FMe8P1h&R~BCuk0=lj2aYlLGfD&ZJLB}{gAb!F z>XKRAQ5gvg7J31Fk(Tgylj+a+DeTdqQz_u$X7*GQhMd1RAzOvp!;%t4ck70eCz);$ zwBb6JI~E)cKKmQJjX@$>p*}COAQKG2{UyprpHw@Q3N$%Ly~S0SPN{U zOR&8bBCD)EUCLn|L?sBjsa#rUr(@i=7NL7Bz$Z}PsFIefka8f`m$E2pqt+F!eZc1B zTG-qbUfJrzux-bw=R@uVQRckSt)Cm~(k@iyWJ`#tM|3_r*q}VvSdMc5FNh1fA?k~c zYI$=4k~dl|cHC2^s+HKz~UL#|rvERGh9g+a8?F-m;B>DjS;$M;77?@K^ru0tT~ zU$mZbq({gf5A zi7MGx!#dN0dDOPylrdIb{Y&}tQ8X{XN`47RW-U&PD2UJjIxPBL@Uj#Wa+2=&f~mL` zHI%Nx1_rQ4V`1ybA=NpCK3y|45n0iDs`S%I14MIIZ@GH@PnVwJ?;Ua^D-sU&vO)Jn zwd662NcPy!AVR9rNihge6&EI!)xxQ=Uh=9imgh{1vn4!4S=3E%ej=bC)p*8x^^HlO z(8%?w{)A(sx|2wn9dWpG+sK)<;Yu3AsE}U`XPF+XkeGZU1t9vrwVYkp@*im?qAlwF zIAtyt#7mh6fH|+{A(ac>@G<%!O-4D^3ve^hwJ6VdW_^dI^kOjMHuvxZZZ4iX=>c74 z%x;0bSIzv?ZYnp{gp&>H=;C?HcQMbyFIPil6I8MN1Dz&K>qF!pi^y8GMYpd5}U!5RtaH>sZpM135K;fin zKs-J=Q}5uM*w7pzEERJ>wX$%+(GVX=GO!?S^KBo~fES}@Ee}na4*W6h@Rx!fg|JaZ z7CUn2hh^pz1bG1Zcsbc6Y=z|`y&l3GvXh~R-+&H42NLU<(6-YKad7P;O3znHH>D0}YND+mxEM0VFA+iiG zOigpJh1aK%{m7WRBZ3QX^J2LeO>=$(mPJ1%5of@dg|X_0WHYNAbHTn_X%Kj+Q6q*f zYEuK-MwB@gHZ6+;I+Jrr76QdBucY`pv#T6SS?iPFobbjI)3F>WoMV577~N}>U5g7~ zJrFpL1abX*c>)-+UP+WIhd&&_2ID~Gg~oZI^^GLUdOsTX!wrhzUJF5dwYgfZi7Ohf zZRXn53Xmt#x8mk6Sy7@>HdSpwxOyGyhTzCfv3Vlo=|zZ&kvNx%jy>rGT3Nh@8A4XC z@sAgn$ZRHi5dNH0Sbdxv|;`Up(hWTsOp`b|Td?OgLl$?)O$V z0!ASgv}aKC$L&d0z$_n8-l>+xA17VviUpoqci=AbpyOSOgR{qY)t&PNAL9JcIkS+I z)ZlI36tzv)h@_4O4M$vSb$zOvb;atFU2L2zq2;3O@>8+U>mHY?8><)GR$t4KiHR9I zwa%ZZO3+7f+kV?d!LQt?q%w4nU%wk>H8g;zZbT@#Yw3H!Q+a0uhGg3ei9YI3ydiiQ zpuQyS$rE`2eUvvQ5R(~FW!B96Y|x0$={HUuR5xf49*nA5%NCZ%JCTn%>bt`?zXCM_ z92P-*CZ~$_SzgFLU@3dy=>)#0LD1kVUs>tZWg?C2YNx@Gg) z=r_V2|1Lsx`{LCp99c;w`G#E@k0#>iX8{aEfCLAwgZOE=9~TKS!taYi5YUR1m`Q&hXF>}5pFy- z>B=8Pe33dRzF9%GCE3WLF=H&m`4N}}ph}$?9b)0e-84J~*%vi}hao09Xm!M&K54FL zp!P_xGoY<&mawBnnseY%V+pX-oO$mq2DZ}XMJ;bCY#H_!vXot8VoqS8Y08I|FkNHN zKW);*m~;=`JE^I*4GCSZ%Y<^1je0@tW6UG0JE6+ZHfM+<0YC(yLHjAqICCoRNK+!( zCG=pB1<*fsphh$pH*<1Tsnv$V(o#5;Z=S!K%DkhpW8xUAMlrK&1OXg@(8)6lwPmE? zgZ9Jd>@;}~O30b@6JW7yS%J~z>UAB49x?|mmr1BvLSTbuftMJNF;-&-{cxW5c!mnB zHe);N)z70J)$35^YwE0#>8b8iSFiO*x`O2i1Rj8j7Rw2}Ada_}2!q4;d-p$UCDUzyaCP->=j5`>tRf3Acv;U=>w7Z$41XV!po+ql~T>WX(}A89_bg#PLzr3$K}X z4W5$2Zm)*b3aZ^vvlmN!Wkg(DZvrHOlzHVB?6tsN_xxrSPn)K;bZwR_yVbswrnpMD z34|q$&F@8?Wu2-MOz$z{e5!Bz3nj0GxKLP>-<|%Z_;Fl?-jC$$iI8R`M*3VDwxbA+ zWlY_oJ9tl}B`HZgw=RkwUMfaiw>1xO|I?;?35S&Q1PJSKJ z--WQ0b>v(_`Ooy}cGqKpW0=rx9oIPe6v0O5nAn zY2kT0S%I(_H0`e@(1xI33yCVFNjj`RQ#z*h$7wOD!zi6E`6@5UEt?<~&vjwy+ueia zQh>hd4Gx4zvuQ2X*qHw@FlE=TZKIHwp8ZMzkLj^x8iu6{Do%a^#g4u_qQhv;fHlo} z?9P}hR9X4vR0?>JvHsh;k-mw@%%E$NUm_(acmpAqH+C217oOJ8c9woWH2I7!za?i$*&D8WA>x|U|Cpq@w^Y*)R3%BXSjmZ01QveEqwtt}vHJ!>?I?QwBq$ZKa!4<4nr8d0sNxy5 z>+HS}a;y@v_yxpTTmGVhQ$8AvtG4!r-KQhSNw;Og>#`+svFcqEwYXNz2_>&d2ZO;) zH0=D}xFTgk#P2~odDHWX7u?4-S6hJg6^$Mh`^oUC+OUW9Q-!-gP5KRcL3$hv2c%8? z8Y1iiIERsp05W!D@L^``IIb+|xaS9_QTv*J=`9}>lc&?A6T6(iJZ--mQf|Yt%Y35e zw0L_^d!=p{{t5za2{RXXbc*TOEM$Xlb~d?%C?u%R^WJSt?|P8Fq>Ms+&AzMQcMWyM1t36T zO@d`wqQL;XAggObF*7lHqijKRj~hk(w+_=K2MS(k4@U-FqQut|(cab^e~vsaQDbvk)Oulq>vkXu z)zd0xOK-pyjT&|NzSb-C1TJv*^5&}Q1qyMmAk*7Sn{I+ja^s}T^Rk81pcQmH`|K&( zJQArEceEiFIjkw16BF*#HMvz9_mWZ$AzA|NV3=l3y@@E>$(T#x74%z{2^FB4#j5c) zZNz+;Kc8 z6_Qni7b==~epTi6p8TEAI+s?RW+M)E?mH&EhjUN7MB0jW-S)f!65o`f0md_cP~LM# z%n_SYiT89vo1G{5SAGXdeLglvA(VHViaE*hnTv+Bm0nX?dyA-@>q}*iPHHT2Ywo3H zi@wix*g8r+7LDw!u9581giSwB+Qov3oTsjSrOzN7NmpNuZD_LZfr6{HJ&IfhG6X*c ztTWt{`e{5DPq@o%HS`oC^}YcLuvxZ1&;_}eH*y-UNrXMA|0j)EGeu}C^iwSrC6vfZ zrcNDUddw)r@Tz-T2?33mBU37}m|P}v((ZS(_ZZ6-ZJXieQQq4QE4+-*v8x5pWzFRHwVBCm13DlZx2NNMJuawAN@wI=C#m zzF`_&)nK+ja&x)V!|r!1t$cMn^cfyvGlp1%DYU~`<>l(J{^2`+kZ`DPZQLF4f}%mq zZ-l!z$y3$CJ?9{f@G`$ND9r>L+}=h?IB5KfcV_U!b>6YKs+?i1d{oIA`dVS^#WaOr zoU`SJKG3=5R*>J<6?A8je^&Q++uu-a)`3Cu17@G@&An%GsP6fu9R5(oG>x(Wa_=r% z_2fk;tkJwtQ;yO&?&utZ7UlYW&+dS6<%@S?8IR&*+V(%LF*Q-aUO*w)QAUa{}jI$vmt@Di1a8bx}cV#8FFQfmxU zi8{$=f8NwZbv%e1LipQN?XBl=u1<32Mr6!jT*{!_tln7D>CsFVVYt2Donf6=SJYi$ zLHdVhTuze&%KAFL7g*6dW`xGbwt zTlo3#DOaIGg^;bIs)z^o3c+j~K}8k2X7z6mN0F>`hv!AHd{@mQ z^p8DAe-Tg>pS#5^TQ*$WoY_QCqIB8SLQ#h_ZiFz|?jdjZL2ZAlanB4wBPM0k4~~(Y zJiYXQoITbaVD<|UNT7!lXg6P3J`1?J(=LSYxo4U-T5Sg%{^ugu<8!fUJm0&?iu2_2WPS_%Atx6PlFji=0D-m1_TejL)G# zZwYgVT58B#gqk5_poAbl2!M_+>Jxfi*ca=0#yX@U+J%^(G@?OtwH0XF@`L)9U1=H4 z<(K*!O?T51yJQni~S z^<3)`I@F3D(GKE?q#uI?1MnokNhNoG6=sE}{!s2my_T?H3`T<0C{YC;>jQePGe+-I zpPj#M?UvHYaqeHW%~K0K5l84#1UjIAw(|)((EigK_Of{_=6si>MwS5R@7Q9$aoS{F~||+NTw759HKdVgGEwn<;U2%GX>TPchet>)0=H) z^F!s+Tb2L#5V28yJcU)v>2_K)7%KnAm8o@S$KQ=BKg+k>&dX^rE4-H{%ezb~-?&*T z-u8Z$cV8_jbuX(vvTjtKJ!4jU6=x`YZow-0+*g&;)aejoU^6_M;4&b+7m10719Gob z;oAs>KC=SlA>h$-J3P3>-JlM+1LZA4@46nz?4b_-yio={W94#n;1&@C-2uO?1IM7} za<$ zJ?;gZVAdA)D=j2Wf(`&vgEU2pKGj{-Px|t=RLlRbzGrnWEuwh}!Xs9Cq8=o*cEY$L zN)=$-)WmWoPCsCkoQ3Aw9c51AcsglIYAE+32^!d3t=;r5`bE*!F@zJZzyfP#z%SeE z^vd^KysHvPw@~{-gT`lMZm)Fv1BpbjZ9>yz8ISlU@#FbtJ1lX-NvanTPjH9ObHrGB zw0pAaY=5oBF%xcTR9(HXPxzU?ByG6m8p6wDWvcENhuz*yCJ;HPVI4Uts^ zPRq8u3ZHy7pA5K|RaUC}hnUXxwdHvV(>HeFnZF;i@wLX*h(a0@LvF}^ijLWn_0y=^ zfu=(?N_(O`ry1_LYSY9%!SKScBz&ax+eg|^_CXQB&5L%& z5J`h8nS<*NZYI?1N8qO|LlPFJi9%&8{MBKPLYU(H>OjD9%#6tV9y~>xXP*4QTzGPzUAV=%0GT zh6XQ;>pa&=(<xu1X*KDBcS*pOxE3CZfD9_a9wy(<+=@VO$M+G4Cg*CAX7e(g!$x zM=-t;m4+KS8a`lKa_hEWTUzG~T7J)qZGn_GwpHVvV7SEVokdUVp6y-F=;OsHe+VkN z92%#LwEJ9WcRkK^ejYRMu%$=9Q}2Rj*AOGgEX5^sqNW6GTuh3)lbjQqwt=#!pt>Ce z>=;iUb?D!vm3m0ohqWyasCz^5zF5W|g`iJ1B1X%i7!Zr)Qdkh*8!OaJmE5WE1Eh*S zG0R<~48uE_s&R_j3k=JFDdb7bj6FlMxy0%4@@FG3Zpm=e4E-AsX?H`&Tjg}Pg$dD1 zYIyhq&}(XNLkKo6QXWQ?XH+bUnM));qK)3-Mj`5=!*{A#8ZwF$lb0A;`?;f z84g+_bBgM3VEU9k`k-Bz-QG3No+6gR)GJNuQXl{crQo6q0RuM(-1pUC&&iD~)Mj=X z@_Qsg8pd%o3Wqlmj)!2Ia2K;Fw-t^6IepKduk4t8=ojX!5-=?eDMVh&0DyL3x zt`~yYPrI|N?2fPhqeAfG-NCVbYw?8NL2v(N3}9RTe=!FB6&LVd%Hn@YivQFkie$&7 z`{@v{o6Q8qE3jV!9{h=;iO#AZ3Pq8T>{z48tpk3sOB1s_^t0Undm{HqLzRcK>ESJL ze!sS2rkh=Wj?rJ)?(8IY#E4;g%@Al;!e`e;GN&BnpsWf@`);}a{0%FkLIZVKFH}_~ ziA=e$8!Z)3BR4CHs667IKM=ZIC3z)L;+IGsSJB9(&^Bz^TL2Hsq%=0S0cLzR#~0Zu1-WC+jqYJ(Qe0o$Tkg{Uz-!0NP(z z%8ZsWcX?{qhBT(htTAsAEk*a$ulJsJ`O|;)%9jWD5BQZM_c0%&-|qh|UMBos(dS=jvLfnYvj34N`_Ee(q;#!- zX@aoj6|>Lf{ZkexGJZoS5sv~txHL6&3sK56^k*cAsnpR-LN*ix_Gs$bY0fOiEpn7s+)Q%SbpngcR_Uq5rMa37&-_^O+AZPbj;MsPSdH>5C+GVE z^o}hsI16It4u34T@SA8fv}>(~EiFj5i>8en3=QUby(uX8>ks7n26HfUJ3sgjIEX%@ zh$#arX#7B4Jayj+IP`#V#qAh}vvCup_4y|!;6At(WBgZEseYm$Pe%~|llX59Y}QiU zqh{f8j`HcH5Q+%LdP@P=P*ii*%ZPtpelJaftg_ zYq2Qz`oAMInP+8Xl#|8IZ*pnWbq%qNb1h-EMsy-*j^p8MAqa0kn0!f1cU=KHOgsp` zBVi7NY!~0rX=on)mJ6Y;)w=8Um(I{1aek*#o`Da)Wv6{yOGq)UOJ&u1rirSLY^~JM zm57eaP-e5(PZNUay{f8iXE9PHm?SYruL^qC(Y0sMH|(C0tL>&b3@Mde6gz=c<#5KZ5ld>O$wwPmC~ zDkqZQV%51TI_Tm5&LC_OZyV>Vm#@tWr$Cz}k!542SkrdZ8CrLrdEDr4*!c{FSGk}X z!ISJry){x_4QPM+Q?X*#&TEloE-SQAp6;^s)?C6*?4ex+GP8@3 z!Qq%>fP;LWXM0in@Mc;CNf6NlTI~Hk# z;pIWZoD$4>yO+No*?O2afNztC3jhJZ@7LR|9*yJ+n@>4XR8P7q5bRK zH(E3%TqH22IIh^-r5qAksczg&Zt;LPl&Y*Sht%4px~>yed|``PXnzREkg0iOK~8bg z8Q5G*lJc}1p1ihCFbYfu?s5f+iXu0f>{M^~GS@v?gj@D6h^cLSRq>9H_3Iv-oU8lj zBnY;@a5P>s+fwg8!Ey>4OE`LKT+C?wr+Y!DoChZV{7g0_U3P0=Xq8CbH+v2DmbTw6 z*tUnh`V0LJ?+f0IK=j#nSv?}ee-54ht(X71wEka1=eG|`_#c)q1xLN_WU>F5oK)Xc zeuvVhQY`L z1_<>AQ6&GLX-u*HOB%z6Oqv~;Ob&WXJQ(Q4VcOG#ln2$V{((JbXEGHkNn)){>cDr< z^fyNKd4|mb5U02)2aw0%OGy1KWveVaDUhg7`>f#Km;n&FQ9Z>Xx?u*v)=?*J`p|x`vGU&;a2+13}`4&e|dOu|^Y*qJS)|1ayTCp3b55)FTO-~yq6xJN?-y~Z^_+S+F z2(u_8&i9LrB9ZUS`o&I9GxPKam%zx_HNJJuc=32<%OTVuKti37c;TL;G%fPS3jWM5 zHJT(~uf-MtSFX@3SAUO5Y@?JOV>Jr;64O@Wtx%kX8q8k>jXB;Id6?)p7}9p(l)j-2 zzh;35PUdFz*f&UXsSCZhEGM8Eg~t>IP?`dem3a_gB@D4@6Mvqkb;WGJe1bi<@CmsADWQR^hU5mTi%ODfi=Z^1y%M8G0=63v2b&iaFQ&iP%-$4rUic8E#M{9gNQL&oLbN38MPyirBrezWwS8Q z3|?rJCEX2PT(Ws^{<($?x~M5r@R=VqvRpCbvJU;sw1vW zha}XRFc)>z;OXN|%bnjy@SB#$NL?zd!awvAZYBM5{C%K7I10H?!5ZM67+s0YRBR;V zE|X>NvNaXpU*<7vB#;GoAPFTc(UrBkO9!0h6}OVg-K7c26#}+0cnD|eUZ5)AtzggV z{OT0P@rOrNrUEm$EhbQ-Kybz%;}SB@9&KOFTKG#0P&jz2&B*rb5teT`l4|$sh(a}I zXslGV$-Fzh;Ukr^vLWuuw{V+SdR&-xxKq>pdG~)%$%B4)G48q}m{&Pf0vv;IRaF-d!}Nyr_Ah91Q#|)73eW zF=qoa$z^Z=56|6UYP6X-_{&{>Q`QRR48+@4t{fxaWB)tdg4?wDP}jF~P2Bf#fuRup z4ZLYHKf1@@A!S)05CStEAy_(&0S^!YxhCgRaK-82;{S# zq{RhP#LSy7KPSO+SkW_}S$TxDG4R@L^qD`*Ciba4+JbZq;DSqA4t90(J0kEap6`kt zP$P&_!jjkIU9Nl%33y}#sLLqUW@~mnCa%4;s#ldPEa{Xlx|jPuMpQKO6gPc|S1k=M zEDc)#c$9nFy4=sr0FPncmn|-lS^%=Eb-m0Vm!trX5&(79#ijf2cS_Y}6^qSbJd|(= zW9L`mc}dx~J9)m%!x_{!2_0?XleqXDz6acD=Z!(Eabg?o_<)}L1E8$|*!1VyZYXg@ zlhDwDE5h>Tg6@5-wqD-IV4m!L%Lkrnot6-pqn)kZ6zp*C?yiXIY2J=Sen}~exe>`w zMQgMwLE1uWJnx;-e=;cnmEIy7m_H2T^_;Ia9lGzmjx*o)w*P*;fde7kfb_;uFH(Z-=ee2xcFk542Y>XJ zQ(Lrna|Gn(oIzA7bFe})rSdFopWG^Rx*|_6Q5fHvV*KHvr|H688^yy4!o*u!FKo_a zjtO_K(#CgR2c9K(V3~PLcf*v)P>OzRN>zTcAAcU*!i#p42Na2e4;)Kp2pP0{C$my` zxM!0haLmHXJg~XUPEWqLF=o9?c3kW@`$4c^xc@OrG8MiC`H2MnpzyUDniO^n^}=q8 zHnOu*3k>SBexrn>B_eDUvm86#h7F1*96QotrfHkUV~r_Pn-B&*qV~CgO0*KL#$@q~ zh}w4~E$7VprD9*9E+iAZ5~2~ZP4m+=t+bEBUlR>xuQ)W7Rv zt-w1+AWW(@P;)ih*O@#u{>e3FryY%lP%AtQ%>S^Sonu7OUEzpbrY^ywO_r4%KT3tN zj|D1`6mAnKp2SmO;M^kd3jkV@GQ2oA9c`;ZWBr;CjgFyu0X#sZxkj}KYVh#MW5FGmk8L;2RKTz1mv zlhTiKHj6u;1hH|y!B(3Y_J&`{8Ty83g;Jk`v@PGV$!?LzY3H4WUT4))9B?N-zqVYl z5sdIfp|$S45sU+#^ak0-S2CE|P~1v6zIvj|zoNFYb{r<0Ubj5aL*6~Dcv8rX#pSA=%Y#uGld1My#x;Yz{ zcq?BX8KtJDdVdifH#5J! z%cV|c%q4o-lUx3*Sj81J)q-s{1n9P;0aT1J$O@cj#QWKz=J*9LS}c z(9n>D$fG*50K;C#g%y6-=fO!yt!!1$AENzsA?4+{Dh2Lg zP^V4M_op~NC%b+b!p(xA#i;=&Jjr);R?Wx8(v-Hrtu|)jW!Nn$sN~J58lPL?W98Rv z)~-x55uU0{Z=7w^uTz|VCCR_nHq+>HcKu0_)s*0SlSxAb-%WYC?H{5OW_X_81?2oz z+muon!EHm~Yynl* z`jATb^M-kbFhjgy?z9^~hy&6VxL!3gd+vMEcJkdCsIIku1w5;?*=)8ALoThwzt><)c;;K=(C*i^WY;69Fbv%A03r3n9^utBTCLFrgdg$w zgUZJ@ho>j~(k9iDMB0eAcVDLV5t0GNFvNN!xY|fyxgI}0soKaTL)p(;&0I*hW@rZY zL$WF%`1y2HY4`N+KNkHJ4hkEc-*dt<)PK$i|802xw@LP2=u~+l10yqMBO(J!J%|5E zw2YGfXT`_7ely;fO+r(@V;mVeNVnI0208Cyg}DuzaXcAp|Z&`hKY*7 zBCP&hr(wRGT|{Kx(oZ)}d-)a*%9815RvVLU<3fCYjUuciO^FR*bl@eKv@CkHCuI~r`h`!xTl5bd)Up=!rm1@r4 z*@3GI9@ zcQ&%8mC!S`(EES*`sUzDyKL{+w$rg~+qUhFZQHhO+qT)UI_cQZEo(f33ate%4Q#wzg6xCU)P#CZ%t6wuP;Yh?|{-gNd|-qmzlv|CT$M8`v0InaG;B z{?EF9Srld~TYp<>Bly%Lg`|+n=R=S`E7sFVR;!>yk`qp$NX}Ex6YVVAn*;AHNGJ;X zCfCvzGM%!Sy(`3cT8NMjguTRRyX1PhJgs%V{lin+LYgTkLUn89CCk7eWE<4lTS=?2 z3UtPD6VK6UdI^J*)Oishgll*XEwYyJ*{q7OzZ2I6JEMKVdbDmx@{$5L(@cFciJHQ} z#*xc8jZyO;24wbf2`2LhcJwfyNAJliiuiHjJwDr7i;&0b@qWWhE6H}FY0wh7YSIiJu07?vfj5>D_(Yn;?jQpda zbyL~7+&O7fp<^fm1oX02n`JwU6j{0}37w>cPDW~>=d#-c^ZBakpUiSa9b`O%$Ni)< z$*gUcroY8xn8T{0pb98wCNcsrOpc$xI81&8TUy;LAzOx~s+s4Z4wdaD3)-YFjpG9o z#v_Y^A&$a=?YO+!fqql6NHfKZxgYTpDa>r2;VtuRGG?iTTFhL;j#yG%$lI#-(cTG+ z?LtL=+jT6Gj5f1uWmiJ$6f8oG2(^*LUkr~hTxOtlSa1-}61rBZraM_$Cq+=d!~B$$ zS7a*r!=7cDe^two2p5eKu}!1jl_H~b9d?=g1Ju+F1|RHTzPOW=dKS_9vo6OWA#ZY4DV}2I6TDFzLx_!I>|(+O?#v~KdRa>uEj2W6k@E%Fo`zl zm!n~)M-a^m>@OGl4_uob3xi6WLDmqI7tE7fn}%t&HNx?$KZk4oBaPA(++0QmPqs}h5@=6)imTTFG&6oCI!X0@e^_-EicV5ktrxvz z-n@^#F{>7E!WW?{@24Es%a`nrp1trdpG#-}`Is*OdiQ~wFuCn3_%Vnxp3hx#ejA9; zf}1>?HN*!AKu7`$Vw`A#qSl-^o45%eMf0{PaLp}{gE5!L_JIR z-Q3h;aFPOsR?2vPtZEL%?=e~^-$sv_G^slmRH>{LuqB+dTgb!3OUEQG%t?Gd{RLP7 zoahG#T&SR2m{GjlkG?kRpDM=_D4ZkapDls)IM<^A3uQ)R6jgQNY80@00!Z}L%lHZK zBZ0sUMN(M$P%}yzZ_J>nG)t`jYiQ#B#E7hZum(g7dT?o=jD2Ir4v?RO?x@;5ekjpU zb&v*Tdxrc`c{U{5<#od#nf)`S7UxOT5Ni=kNh16C7?|x-xV$9rE}`!XfGvo-mw_@m zQwWm?h_e^+=REo7D2FkpTB$B%11cCjdgbA@C8u=w9A2)Gyf8ax1C^tl2T^912 z`B~0Q!72!*M@8Qif~KpfJkVZ>2rzI(>u3!|32CG)LyN<5Im-W(2TKKQb}q3$MC=xf z^5G;H-avG>$be_+{`ggccD|5`>A@zn!*3B9v)Nftq(c;c%^o-uw45j`W~g&lrMho& z?u?t5mT-{2eB!(um!=0h0eoWs-BckvKVljYu)ZeE(H4j^jE@DS)mMn*T%RuJls1i0 zKqSmeDYQ1(YUKH;e*}cj%l$}W!D11` z4Oa6?FLyMMm1EDwq>ehP)ZlI-{MhSZIwE4|1tIrS=6yQl?N2P77&g=SsR)es62LW5zbS-HxIQ0&S%ng*P-k?I&879y zF^;T>mV`j=70gR1h-^qEu1tLn{4qN{sOb!BWDvx)*8me89;GrGnPqA@R`e-5cEmh@V3tDPr|>^ZP1Gpz{&4_FuIolB7!Xi7GgP-CaC z0?uDdAEkwj9O6Kk+6Seo26ar;Zi4tCuEC#?m$MNBA}2t+=qr>%O`+dGKYtc%^(u&B zr5Ihrvz`zuf!=v8w1a-;tH%1$o-{2(Em6QjUSEmuG^l1NV|wNVnIHK8s6%w z38zeIh%3`n#;usZ8N!se3MPPaRCG{f%p)fnqEMv?*EBcKRVGO*n4`pt`4`X%N=CrU zq(+d8n3DR1ks>RkTlh12&jnCa0rf_eQzudexDJPV2K${Mw0$~2mn2r^p)!d}Rs2Yz z9vO^JG|>b?r%!|m^k5=knYAVT^e7zn9Sjr3Onfz-MKqjBp%hy<0{VmXBJCaHAKA6jU&7i=5A9~j;yUdUg;dPNx-1n<^RVS2Bhp>*)O%&p( zvhj~pt81i6F=#3Xyz1BdvpYoVTKpTNDyoU9D)HRcldr#hvW=Rwq6Fj-<5kYzM(Na+AdjLEMgl;z#(A;9yT;1NUuT zk|i1iy#^e_V(&c-A?{wsLG&>h5UZ=GT|&{Ys_y{{ zd#@RwS!urr=%k%wyr+MbbdCo-xMdg$F_cs_C2PD&82RW{rnwsEiMP16Gi&%B-Z#gv z07!zxb{yu@zu6crcHS#wTw3bJT~MZP9=KM^aU=a{dwcSMXJl+}3 z=?&>;$ai9s?l5ZDnb7m4z)cBZY7%PEcZzrBvj*P5G=<7!X+{sxBz)vDgGcpiZl9E9 zRVRiSjaLGU=!ljJYg(j?dFWBu)}oXxHwNdy^>8po6nA&rI*bn!;tu4DS;`bXBlI z#SwdKq+am`bEu8w;oe3%t*e8e@7<7kh7!Za`a1%+6K5U0S_&+0M4#gznK!d2NqA^- zcwg5GaPBT*9~J?E@%VYqQG|Tv!aW;eIvs#_RSW#C&f*p1YZtTC;n|xi4c%LPB;j3^ z-0W|gexgDpY@OAvzx@1I*$8cEoHeR1Mg1qK8&Vvj$2_C!Z(6{lS=6~2HeymUpDj7h zX`j@xO6+}O^0YN$6c8VXcBED zTRSbmu_>w7Y}6{E>6$Mnn=kNItxiuN*H48laV~HVX>&3WbXL`n@9@z^6WnHe^kbc^ zERk(j$-U37PcU78|R2+@6M^n&sWo9$D#Ojrs! z{U>)XK+Q4EY+Z2HPA3{}M~h%usWwJRna9gB-Rku`YeTo8k1DPp8?V}*)qTyRW@EN9 zu-{*39e%KNz-~-H&#|}8v@`Ib!j2SK7gT~yzrm@&rv6!SahFYk&=#V-1>3fAHnU$d z92eKJHF5G3_!W;5&n7xg+p(^Ywu&|>yVXRph{Mgm-Y{cxINBEDbs+4xnQZJ>jXb@v z$!r?_$9sLXI>w>^(E1Tfm;RDM?t}^+e-i6_Ekdi>tE7|*AJ>$(mH-)>@*TFhJ}0TL z+0-XD-7p<44W)$D%e7Q2;MO*CRC+}bo} zL!Yira~fzA{Z|W~xTW}M1E%|Yh!%%{gE2%%(O}pJS_fk|_$|#Kc&sXz&LfYL0S>=X z2o-euRw1}cQcs(u?@^+w&2 z3C8`(5uOBtLnzj_MW75eG0I)!2=%HTIOi~fQlJ4ub(gTyAWe3=U`0IkiK7<75}6&| zZKN_A>66LfN|JkJ2%|8?+m8(Ff^3qFNLCftR%Y;yz|o)J>x~ZZrB?gVqYH_OMTKe9 zW*RX`4=W=VgeyrbhQvxux@NBSz={HuxUp9Zt5(C{11&%0CO;F1`nx#i&t)7mbUfRq zP7}@)W7TFS7#L(7k{<3to0dhVI(9>OX2Z!d)J1Wcq!l7I`h7IpDi8tx8Oo8XFuZn8YS)#8;}^5$D0>2=Fs1ZC#f z-f*ru->V`iark0+D?yt%b`?K{FZaNPKmBL3COX9DEUmJW(SecbiSw4N2JJ$|g(7-# zhKgfuTL&}o)TndPVG3+ehW*Edt2F~eH|Ef&E^SI@1lt8<3$vq5=hnc*j%jNQOFQee z`^unz?P0jSYN2r|qx@8ziPT#~$om7PSk|_VkRZ>nfUU@;5g0*4xkUjheZi&x1yP{N^rqxU zPBr)!7?(3E9iW&@|D=A1)1wvvKr}Z-vclYmvB|;emCr;YCiO&G!F;J;MnkWxXUhT~ z0Qws=6qb}l3eXuKL1NCWxiFUd(O{fo3h9#U!k4ItJyU$ifM~Q+5XioH6~RJRks3B$ z5MgQS!~Bp^$)Y>pEm{Aba*9oMP}kk=IfYLY9l`PSfZid!Y#rD zn)}{7DJ(tdpQT@k&Jv^OiGQeQhA40su+b+wlErNU7Cl5RM}|dW<}>z8aa&rcP0xNT zXdxrLM=bR>aqndXY?$pf>tgB2eO+dSv^MGbxwdea?b_5fx?ycQBW_O%NGzamuPQNu z;u99cO7{z`-!CfJ#mHEADq_#GqT@=7U z>T^BF1sh9~z5SN)pv>1TMEP09@O?cg2-d`ePYO)c3`YD3I8yF3(>@XOhw{t-p$A5f zAPjrge>-K);T;~PDu>jpKBgRh_CwN@h*%;p;d?$R?z4G9MnO>;}8UKoqCh0ajF z^hT;8(*o&(Gq2JH-Z7oroLwSI>&$K1jj<--Qn*z;FmcVKm~mSB&yb*;C}o_ntMQ0Q zT9g*O#4T{4$+a&102Qm@;Sv>Q5nz>Uk^|mmf*h`fvejj^7xhu7>JqfgnY3e7)MZv( z`N1y>SO5`RjiNLBjyW{uCk`r_8lFy@6>BS#Bc1d@?%Z^4+^r`20w~Vg!gz4BMXyls zGW&fT7`r~XCys80P(n)R9T4R(&EG~Wx-?Yuy#@Vq34L|S`sk};5|Bp3{Sg%fl2D$p z6#FiU`nbgXYePfBOlAEOAwgyLD{s(5{NagFo<+M4+icUP;{Vd6JJ2HjE1&{CW%i3}T}>Bf9> z&m&c@N-TfzpsYM8;&=stbV&It)t6mjKmt)Vx|;WI2V4;&fSI_USKkZ``@13! z83Q@PZ&w0xRwb7_FuCQnx?$~XsSfPl5c>E9h9V)QqvYfgAHz>e@t+|hRgJ>$hlUa9 zpx+EWBcdIjb9JbG5)$JU(q;9_xs~4JuNXdrHR9aMA7o_?j9Z${ozl#7`!Ov(2e%Wa z*J#ttCQKcTjlnPS*gI^$1h2o0*J0A_nQ;0Nsb5D%fGPjxk!jrod5 zaR1gfa5x@{$XKI~kL3~_wLzSm%egeQmWIzdOK}L-ulpTi^%lfJ@OWlX93{ zL;ji7%k3xYmepEd_BkW;83kwzrS|$xj;=lA!uAb^+X9TPgYT6(uP%~yZlb+?lI7eM zs{AN=w$bF1wskL$xmPTl6T5Pi&$N?Ph!yc)%P%L({?HID0I2ve6W6wLM;ks_T6!Y} z-H)3E`UC^~l!)mi3;MJeysJ8F&2apI5ZDIP0wLK{Z;p73(F|m-$s<&3p9PRx&}viA z<&Fik1TaK?{M&Ald(I3g&vulHc`mj2oK4v9bE*>LnE5k7!jy^BPgjd-?hzX1u-5Ea zM36%Iw=H_1_MZpG?@DSokRtxFVU(^7Acwy-S4Ko7SL}+EcEhjcr)Hq{F-myqkluFppbUykcg4O8leFxiPL;s9<(UES&A0h~R2UDl@}a7Z!gRtRSKboiPT^=qIN(Z|8zG;5qZzYS z?nTQ;&ZO#^$P9({;afbUEp9&oSpq%A@!>s>3duRI_ zzxrov(LdlTB_neaV`r=X4OBUrJ3ATMy4uLvy8a#es$?nmm)#z4aKj{26Co5(wProM zvHNpK8o40QN(Dv2vwa8d@qkmxxzLhcv4Rv~xDWG<80M9g6bS6lN1Y?nlk-fwtIy}_ zD{LPQ9(|e$-7RkYsboL>dy?(NZN(zOgE*8gsa@LvjJH&bQ!+NM@$n2gD<;yUZuUSZ z&|asu4HTap&%@6PNMJ3qw2ftzb8;sW{oJawN&Ca{YVpGWQIwT#Z zf-Dd*^3EZ}?+om-mRw8T_41#5%#moc2FgMhjrx08DaKg_=S~C(X~r03H~d!2Uj1jN z+bC!a7|ikgs2RG)&0OkjC3H$-A2b@^g~5Qh`nzD?5L(Kk2PEm&ff^x&o?A^pes!1g zP)ACT)3^L-C7l5-&>G?~t4i)^hd1@4fj$0Ku}yoXw{N zjz#ytAED$5ohMhr3+U1)@cNeKI%Ez<&Q@H&jl!7q#S#r^c>5mEHy_n$ZeXrSxZpC#Iold45O!hYnTT!3nn=b0nYK|snQD*d>z?|}KsQPSmmf~c|&iX^kdhWr(0 zJHoU9Mk4=YVEU-t>)7My^ZEIX+6R)uC=6Y(uW8ut0wUCvSB1NW9;AhO*=A-R`kig8 z9z$dOoo!5!zOHYcqm*gvwT4_XoejI^(UNc$N%vQ{u|ufsg`EM$R)HG&4v2(8NfOv{zVr#I zR^wY9o8(}L{c&q1d~!#ynmn=yn|sA3Fj0ehg$=9rVwn3vq6kY2{j^VCL}!W9uCV;G z0L?)RMmq_y*j+FK$YZ9hi2Dm2170<7L&t1$7^9%Ns#@xr6IC|)f(lW4ssIq1#Cp9J zo>O-s-5CZc0HI|_tkTH5&D(Y40hp1XwZtJC0|zQ^{d_)npnA zyFc*M^+$`uAq)n6!j32!n?@L%TKkO!N1dCu=#&c!D{J&61#Pu7T2RgYS81so2wEw+ zP`y>G638ec^onNE>k~~A+LC*2rULAQ5Y5zZkh%V1d5qB_3*RBhPuv32k-y|o0+Q=@ zR-QA8QKRH-LdYQU<>-K`O|)alwx)|L3lbN=7fg?9S&&}*9t(H3%1>&}#Kv2~FV$a( z_D9-q5qaXo^Mt@mwsu8}@Mk1xmH`QOluce30j8=${U*EdTwF{|U7n zR* z;@fvEmdBq6kWJxb6oNloM^+A3p6C;8{UY1yIeW?5+4?lw>&p!)->(KEIUNZcIlB&L zMqV>51G-HLpEC8MfoQ`K)^)2BNwdU=5ht?=XQR0=6NaZa9!$t*-EzLhKMuU*gxL*Y zAqMp~9B}94^+!p2kHx9aZnI$|_^bbFhG|DmCtjy!K{e0f3f%nm?)qfjq&zZfUK8HWqY6xLeE;pvM)2q0ZW+MZm63d=He5J^7eqW zRP3>AdlXQuJSn`_0ZMk@(CiW4xP(Sn7>8{}KT$rMO79&iJ*v1_XH9`~Se)W4!;y7oG_(_E(L50njs%2jFFz0Pfg%O#%^ zY*^B%={JSoXTp5s*tX)>|BQ>{28{rn-2af%3^rzgzd1=h)SG{DWW+}apE+QQE2JBr z9LCK;xy8B2cTE81+o%5MP53$IMmaoS(ID$B+jIa!M$E;tzwI61Sa~>#vF)$ADPocH z=iZYF8}Ik4o}25r>*ka{=|)?RnWDthyYLK;!v-yP{+2eqKBvyHKdv7oz?iJ-Hosfok?&;kF48=?-WjkApTwLP=V#gEp2 z0~wb+OQ^gh#ot06yGs2*p$cq@lur5#_Mh8xU#xPxV$eSV0M@9GN}kVMMzo z+`ZUa7DxsSCeN;Tep&MLxZoh}Xr|DFoB6aKN?5?+L@80Cpyk=IzXf|FiAKl@ShgC7 zNejgL$_OEnnn;SxVfiLPLxQoit=JV8ORNaSqIRkcjD`Am2E;_&Ts6^*DA+)MV76-Y zgUh2(U`8tZJhXa33(9~;JC zARulfHO-z?_V9pYKbmaN+ea>MOOp&AA8NwY23es_a;L4CUz`j;-c(hGKATgcthBJL zm(RCRK~mQ^nTM)-kZKHttWd8DmkbJI^a+kIGH}H7E%(5^d%L%A`ae zApm98jbep{RV@phc1jUy*A~qamzcw%$c`Is^W(%p9^ipgVC`ph@{;pB^U2_g^ouhk zV%E^-I@c#qTfTMxI(L%Ab%8cY+f7~@QPK_zX)~v+j{~P7pEd@-?`=t~PU82zF&wg!Dg zGuwf_Y**LHc4h5y2|p~Tbl5t5Fle4nH0mFV%ep+}wK#d?zvWAi)tlX9*!njt_WoF1 z!?}?qAH%8=zu0sooyc@aNsd^ItF`P%iG2T&%i|YfM)m@56i3?cO5;CjycTHNhH@PU zy{^f?w&#%~VCdIzy8`9P@Lbp2REJ$}ATY0r9oRy9hS&_0RPcw;?pX@q-*7uFeA_ir zC3fe_uuhY6o{o5fYzvNflW58!j*5glP^F)ZT*P}v;THGL71dYn>r^$}BT!jZN(;G* z-pXK~7#*@prB6XqbeOD-dkm1FRHX<&EEHn0eG)XW(Dp1jon8aehlbc)$!jv90 zhjQnGlT(j>(m1C=L&HJl4);&?o-OWJ2>*CC2q>NF6B;*qOEmu>a6$!5rFEoQGsSuP z0>o_{JUhrVMn*{tA_4Ot@m?mE;ARDgP1RVL9Rjv^yO>xgsLl~p)C)1IfSMyV`YuA`8}&ef`#=N^c_j7TjZlKV=@=b$Uu|PM<#kmV-NKiR@7HatZtZt zwm<>}clw%(ne>57W~Ahc4&e`jp!XMmZ;{n!kmR_8b7F9>Un=bRhex$t%&CpjVwydbvtFaZ zg-N~454SA@+?gcZ%;We`Lpk&ZKg1p4p`olcyFH-h`^sZoLlI;rrAC8Zp&heyYBa*7 zAeKe0pJYMN!2*zs154Q9h53m@oQ4$jiGOCQ#lY_>UBS6ri=H$pLjr*x%AJFYy|Go< zL~v4&@Q)VBo}5{5E%7%Ejc`(AKl!bjX4Oy0ct0YbazrJ31kCh0*9PpPYWnooMpR&zL^{ALWB?7Rbgi%3w1bw{ z0*XaAz;AG_>~8>V+2u!Lu4vYf*0z?V-$@5B9?qzZ;TiDhS#7H|yO|6LbG&A(47ic2 zae7Bb(pHLHju^nu>oKU|Vkpem>&cJmp2R!kXWQOih6e0FdABl%0?)PvO$%+j5f075 z`rFnDCikYm9LegBR&}CMZ<0`2@oV6ts(v0r!>v2<@OoaPtIA=h}Re?5BwWoB<_t%_$Pq;m3HM9{Ro!3TPj`Drpbz{!z7*cYS6>Qr3xwkEv zS1lz_6Mt_s(vIqPpuKR$*=o$(m|y0<7Ny-3#@;PqiTh<8H#uPedGHaGsNSc|uB+?}v-YvJh$Yd(Q!Cn|!iCKo~@JQ%e zSmRYaIe4Kpo(T23WjzbHdAxssene?6-?pX?$B+pmb%o`3QcXc9$>N$YQ=nXg`y&(~BionC=wW z=LH7*v}L$$;ugQ;n|6<8X8(SRp-c`Se<*+s{{(v+SX0~`Y|sqFIeZ&M!lMJ zxH^FsB=sFPjWUJTiG^uR_-Z|oa-yQd$W*ixl;7aK7n~DuZWRdh!J-c`fY>>O{nC7l zB4#~{^2!Zrb=0$u%8Db?&a>>$yqMlByXr1AJ`_MF{rh?hJ8q1q?J}nNgP+5UK(9T1 z9?Z(FZGME`vOHAY)I=W6lXsKG)EvcNd=9#B!9R<_p&rCM9s?Hn09FH?avlRCOtU1h7HE>w~2*^M0rmrmq!(z;9M;^RUT)w zlY;@$RqnxJ1X!7NjC>3cN+H_e=U9j7Wt3|u8bI&eS^h~g8XibBv_UX zsmyax8pq2J9?exW8Y`-E5n)w*5PVHT{U#ysOIONcDpU_0&3g}7+IZb791m}(?n=gc z&AARQfl79S`W_-D){rNJnh=cVrUB9=L;&SO)9CWl^sRZuk(i$ zv@`{;_1_BxwH_=}D_?_JPHVMlPLU(aeD)Pp$nGsF?8i6xZ9iX9{bN4CK8XKe9vAJXWjlU?|n7_91OO0E=8qSC~G!=aCDoJG?gto)KJh`@l@rhzupUDnQWs2#* z@3r7s;eQpkd%%3wsCzYj3u)YVI1bc9bo><1wuV1+y!K*P5tASc~Sl^C=6?;ZS6o65q-b50mp~nzVi{ z9Zbxbikb^`6+I;>h}zxe-7=YWAKzI8ztU67Rjau?2;zcR$046%`suu z3+$DJ(SFK0-OG+2uIz#i8*uFycA<0tNb3e2a>!UkBlU-#MF>uesbgm}*A&EuWbG3p z|2!-a-?seg6-_wgN=5<}0ALR9|A!nf|NV;gH)~(i#OWJ3a8Pu%u`w{TGWqvV$HCc# zn1TMEuWG@H8#dq4mhT)AbZjBbMYMrpmZjjN&<=0m@;G?{MFoI){%y9)mNl2gniWgp zXG#PlaqhqDgbdH0gMT!)xR`N!axx7+P8Fh36mzYf(cY~bbG#B!@@=Z+9r7V%y%IR!J6 zLl(oKFN9cc>L0@tquJiB38y`YI5Es_jHioR?S#gj;i1A{WW1raPH$h|&b)y>MLUo) zJlGsc56zOLvvAi3YLlgu@H?$|RrTH2LAP2cyHGNPY*8uF!3C$W7BaUE&d-onI=9H^lFm;`#+KP~LnI=@vd9eiJ4SIkox9HrvxBhEEk2a}n<|@I&_)Opk z8nZ(mv<-%$i)hyYv`Y^VTdwE#pD%eN+)C7YiZxBh4}TklhDUg(?8jyf0M|0Wr0wp8 z042kp_clp4b;S1wh%IAx$4l8t&;QVV%su;z9(-FODSW@||KGMq|4xMZ*D#Updn9k{ zZ1lgur~jHZ{(Hgqfbs9i;$Pw_QCo+9j2r)zc~h+XFNkWPLy;U_M7W}6RW;9VKM*Y` zl-L|q+7g)$ZX=(Xy3^U^m+)8Y4hz(GA{6ku!U)%506ckk{rIKyQ;z4+rsvG%6TV&# zVA=?jP}f250#aoQ5k?1Fks2%w0hxCsS5|(dK1YisI5f<~iF7bQ!?_CN>ThTZ$5O%~ zThGf$7RsR<`Xtwv724V5Oce*QL+QlMoKDn^60kHefIIX&(g0XmV6Hq5kqvlN9*xJ- z7Lys_d6PWkc?`oa}M7Iw>EaPC=pgtGJB2|X3%w_uSS%~^*WF?Gu1YDl#d z`GLRAX0$>+ol4pUcdoMrw=`=|#5bxH+|Gs9buq4G#Fz=B@-LKwqX@_6H0GF?=3>Qh zyuTBpP^{Is;h$K8sVk{N?i21^f~A*ZDN2gU5Tyu2Seg&VlP>jXmxCjf6ILNxC*;My zMr^ztL&px7$UHfdwYU37V5me>VKLZiNj-uMo}(m0;|bB@*fn8*f6ZZ!s%Vq(75xcc zhXvjeFB4>gM0bn6gE2;_fnV!AS~IrF69Y6%(^r>kOm*!j#8=HeY=ZV+a zyayhy)@N~Iv7wA8XK&9n4hPw2nU%M2p7X^_)n2slAb^~pUuqBwF`g&tL6x;dkyvR8 zU2~uezFqWcPtcpI9+7mJJ`^CQC@x%bvp|m3nY{5@*h`0K;bEK(Mk_8Lrx)J#IUTPI%pX5)|EjCM(x0 zQ>DkOKp3o_$abHVB9`fyQ9nW+@t%6uGHNpQ__3}78hy|1P7wmf^MRv``Hb!XNG}qM zqI)eg;q1a$Z(RTt zqauJPlxaq+KkG{a--sOgqqv#xeYkk~L%$igw35D)<3qqB$h-r7lK-B%_=#1}IetE~ z`Q4_cHgkJ>J^-%4YC$%AxMjVXOMyjZ_(M;fPbc}IR>8&aV?A>-fF`@8+&w6`eoKQ} zl=>!{QW_gOisdz&fpadbdHM@lo(bnY6W}2(FZe22DT7ROk`4z(Hb8@`0Mac9Jx0^I@M*MsCytoOoz}*GT)b40`IpsR(X#Po4yrj4}_3yi?i5k#ZX!ri-7 zP;=_08>A*2(?aVMa!tW>LRNR!*IOgsP)F#ib#@@8v}wRf;l8+iTr9omHQ)vlWKS`d zJ-xBer+6JAeG)MjNDY08Ggr4Ml4^=$IMy**#i-^Q?CJL>5V@1h{mQt-ZxOQW8`|tT zo149N?dgVgyohb2h7a&xb#QA0SK0mDfakw!;olATzadf6zw5y1JA25H?q8ZaIa5QtuN86`X@CAY_)1V36L$T!6|DNHu4Wy}$R+V+qc9g0->__Ei|9x11;`+j2He zB203rL}1+nF}F8U6`659${J+*^BB6#?>2+|@*b_{?zS|P`Q2jl3rH(cyLn9IEwYjd zSIBLfHMQpiIi{sdpI?M8iG5!jp*uoji?wo{P^iJJPtSg9-$jS+ZK3ImBZT*|W5qJ0 z-l-(hoxDPEC#8V$XGQ=bm`P<*kJ9EGMo46@h9hMHJN78~h+w8g7(`@L`lA#R0$t6W>+dQ!QDEx-|E8u&v=-3J0*S9kmpO0o0Zi zD=^=X-6+$;-*ewf|8j-FJWsy z2SM&`NPJD1Xg`QIC=2VlyiD=#F;q!J{F@d zT;sIHFeVIH*%UTN-Ma4Djg56NNsdkbMBn3d$=ol>9J=;BTQC=4LxKK)2=^f*C%RVi z$cbtrjO{wu$s6*?lXM|wL00&|aSHWfxS^BLYXF`m3n!0zx&#>4 z2;(TL?)wrnk#&rLQa(K2(Eh?>Pi7pcjUMUwsDpd0C^9;n-$>)D=-NIjnclI=FBt1& z#-7(9+88sz;e=0wIr0hZE(sI~`7LywQJL8_Q6nX-I%Z+^@Z-WRm%xf^vFe0e5pgpzvHJ^Q{pa;KI??)jm!OQ!p6GNodp7jB+3BBy1R1OV5SkJZ zw?ZR14NmQ)0EV~Yn&_@-YH*p&_gn)%=SQ^y1BZv-h+K6a5%j*ze&u|=KYV}# zPz#|6RC)&bY`711!Ufd-&S|9OtKjUJ<&@3#b#Cct%9aCs+6>q+)d|V7vP%Wegh&sb zfaEeWO>M_*#Jg|bprBzzAT+rWuUkO9AWrAri_VY^qBp-W)W_R4>c7uu!qPd}D6^Ff zJLeL{7h-y>diCnHCJwb9^dG{GjxhzGX&6HrnfH6vvUAQaYEK&mts*Pd6q=Jg1L8B7 zcsHspCySi&nNlcT9`;9+B1=RcV34K^Vx;Tqw+5U0Rb<$VWk@#0oFf_xbq$aT8wBU& z%h6I>5DZRTh_N=0_N~j#JOH6(&>i!3*twk^tGx-eV+K$#_9+a1j7UZ`5W6t&8YYV< zqtp|-kn}piuD2TKKpH84r};IhkJ@tnfP?hgqkyDPfaid>;fM{@_EX2pbb~DGRoIXRC`m}F{D3Ml$PH-?rja5oZC)OU& zm;@&YPO&~z4+@XF_&$E#Wjr~VK{ZX^{1kFFw)Uf}C!Q;gcD7pGuRA(F%TAlZCAtSK zI){JaP5kqB9IE^b1etPf(6w#BQmXmxH$$$0q`oPYQ0XXJ8AHs}V%+AJYQe3;l?lVc z5JH@{A;qoi<&>`4V%_P(c>%ol@PwfRU@Wl3)HCE5edPpQp#}qRE;P9Y>*9U|(qQCv zfmWlQW&`v;+lo|zJyVMCkL*e)Pix2|XYNYO7d|3ZwA#fS+-VTrzDb( zS@7hYOA@>=qKJ2ktE&eb2tAC4>D60YaPXLUCQi=lah5a6?n-dSSkvc`8uM57ANbv9 zf|rDYTaGB9O6X}=(((tcHD#7CR#^wNJE&VtcA{H0wN(^f&X>vM1w>(TJ#o&6+# zct8R5TKoalt{Abnr&W|$V-3wzr!A&OmRNAsg^(lH+LJNNE#zY4_)xE)h-n%v41XFp z`Rs%M-8NuH23lDY_BPGWTCR&WOOX~*sVG13h4MsV8&bKxk=?cQfh&H z=@wP2tlI#(oH)6pJMFcUZL+tdDUvnMnf^q+ZgE-5eCE8zuz76YTfi8PY@yJXu(VGV zqQw&w1tLv8L1;5suxWr1iGKW|qxTq4l&?>+&^bo02y4GX9nzGj{zz^B`D8p+OJUqf z9xY3qSrvaAT6I;(>fCZTdq$82rzrEu-qn zwkFU32?RK}ySux)y9NmE?(XjH5ZqmZ1b26LcXtaC-s$e!a{GJlj`sr?=M4Vs+H1{O zv#RFZMi_olNWLV*K-%u-YH^l_PdGy|}emQHpet~L97`)TvV|f<* z88_|Jb4czz@i=c!f9kg3XVAMkGHFo*t7-NqNA7{E(?q@OG5Wx4?mkN$RYt-7 zC&@@U6G~pvn9yMo_Xi0|QlLY8tY2CZS;si_9m&0-{~iEqTdo9KE)(q;A^R&8JA3vl zLYdgvRI@LIA&l%!c_5Pnaav9>ceDp28O}}>QN8dON z)WHGTuc86hFg(ul?3vXr!2*Cc;5O3MCl6&LWM#A9MDgSIk)TtgcaY!i+z4_V1~s6P zNfz+WiuT`~&EI$KUzCELgPXMhoxpFu|0ECo866s>WC2WDLFO4Pm-#Gch5|2;NNB|m z8ZHOFUpn#8wkRimGubi9FV-Y^rFrqbspthx>-i+1+!_gt66Sxwy z6r8Eak?};UjtMaQb<~NQEX}}lVqb@`p88p&g<_Fh3{)bxaXD2eAe3zrzt14 zfVo$#>e_?ZF;D7QOJ5wK=O$oBaVteKuc?iUyY$=4y=ETmh%X&eB&&F47Ya=KkrIQi zK=1>l+gO=iiFxP1RKxgV)3*I2>%x!5AQTe%3c?6ym~qp%5o`H-O9Yx0Zek8IsEZUS z%L07+At^Ubp-B!7DcuYK0^B;4>T6A$$)Avm^dLh0GSZUE$`2Rjs3UQ^!2?B_r9#$B zhGWloOpJ+KP(^krdQb1iL`5$EBAB@v$v63KFnPHG*|BnHe-8`KlxMD6u@)$WEmu|! zo}LUz*%%B?P(3a^Wrw_*Us=iKBE}~eH#W>;1_FhtsHN@D^UKoxOy+&X3Hnu2u2jl$ z!NtT4a;Qn79U6@mZAvfG8=|Qy>Si6siA7qxVFu)4QxCgBNX-T2z91_k1z+AH$Hbm< z{QIb9*hS^d7d!bqCIkn~3Q;QsLd>TMyaRthRe7@qu$tmyN4T=6cbpB~v^@{swd<7mD1d-`ckSDlRcx{yV{4=&w8ZkJe4m$ktNN4VaDlC&q!gygz$1 zWm9`2J;T5IxZfDr|Ex_;Q z_SL@1I;_e?@qJZ4O20loBrsIKCXes%_NIgA7qbeU$B&q&XI1MBL|QQ`=}WW=%Uysebj_3oPmF?7 zOe+R!$G{Hns@H02QY(yf3ZAhZaB5x7jFI4)H(n1vKtiJwd6I*uo#v+Z2m=60>-Tks z`w5}GQ14<&m<{NST^UX&n+Ow;NaEU|rs-L1lwq7`iHJOMWRjXCLVh_Lm+gGe1dj`s zm{;wIoR8IW==cb0t*|SfNqdpgwRIsy;YhY)M;-dS;*-zOBo^h&N}R-<&}_m*{~Dis zmLp=NMWBVjYFbfXC0wbU)4)A09M*BNS14Di$k82Sv!Ej$!?)Tgn;bX3XrVJKe}in9 z;=PLlg2uR|4o+?DIhb@Sf5NOep2KD6fcgwZWj3-#aU_$LV*H^(5u|1VCOub|fx~_A z+`SjE1qRNA<{S3U$7x}AE$Sqn-@wP%MBo4bf^X};3c`Q$27ecXf6@QHQ?EimotEQ2 zGukSZfIJe=CNR5DRbR+2f=r258IgQbi;8FASp!)s@rfv~Ag0$@GR4|t*!ya$dZrj$ zB61Q)BW11&B0cAZT^PHVT<`#+*_d2>emy_n@PDetP#l!qO55rB{;Bsh%)#s%3t8;1 zEED^5riuqw)j90rPdwXMj=-3<`{c*C0jLpMZF5Jr)=?M-Uxr}p`GHTJJHg}o&30$4 z$=ubfg6g2Fjr3llaRU(EU)M2V9znCSmwQ|%+oS#IiOpprn5|zS%Ls<)7i$XG0q!Qm z$|M-^Hq>T6Z7_BUD`x@5v62EpY1`|;?i zGr9M{#z&S4de7%Y@Ft&`xG$56Lk39#7F^1UXY|z3c78dhOX3C?pMoJ;Hfy z&8xQUC=xSN$k-?&^rxGZL|Oo;-SiaMjf_2neH5>2n`f>?+s}?8T2ds|ttzTo<+BIT z`R@!$DT+-j=^j}*?NxHTB1(G9=yw{Mmnbkv!XVW(a8CV07wm{Rgv>9I#8G}XF!jcG z-+D8#4Ab{{QLCvvcLrj^9cPl@=J4+lN-p&@?u0E}VY{nU zM#{A1sGHx#%bOed)flOB1>lUqIF>X(^>=Jcll688{`@Z)^S{qShJT%h|NHd&kHk1h*}`T{6qzT1`g92$6gp7V z_XG4g@kf_jD_PZ!y`tuOK<$VgMB;NdvjDPr176j5x!@=b=r_q&-*WJJ+#Hck}J^v3_Z~ zN0opd7*Mlua?2rz2oT(s5$uqJ%jRSBHLZYrxu+4NixP4uq{Ctl9|79Re*c$!#*{cs z2_uf8$231)ED>~4X*EDFTA^Z6IRMOBWi6K)#W8R;_9R!Wl;Ldm^mN-HSPF%$dKN*V zM>U8kJ0@W!8!h}r^E&Oo9e=PH4_c=p5e7St@-~8r#-(v=>e4!5!7|aduTKwqzLq~c zsJ>2m?&o)vL;I{V5j}El5e;X~(iwu{(jERP*+PM%h@~#OV0|8?u>lEW>WnT5#g);d zD;t=5y;wz4GQnrSefUc%-@fe7epy}9YigH8?}ZPQXyW8fMiqS@eSHccIPKH}+b%$i zs+Wsum3%ioD56O^Q&mCxJiGZP^}Y&yMMMnz8K+FCJR|mLV|HT{6?&ccYB{SV_7u*0 zyl!j5au*BZx6dLW*z+~lC>^YJIGa-i>d=V6439ppkmT+FOe+>Z}XP{DkQ3A6)^J+*IJzR@NcNcP9fV*&h15cCOtNBvjFKh&dQ(x z#9oOtTms<4ety0uPHf{j!|o}*iQPF z!wJ=57fSX4OkAo(AUeKn3cl6NK<5W05=Si(a z!}tD9UHHyCSsySCbn7i+6?4E3c_K=rk zfO_k&Gj~O!gtbE~U}ukDZCyA$CqsMObZ>-9{{{E9+PV9CV+}R6Zrn}~wHQ-N_Bh{A zfn{EhbFzpSA=)L=Z?CR3>e-|w@FqzH|6esb)nBjfpL$;bgWqVefBJHMA0q#UBvbmG zB=f#FQsFG`64q8mF6?5B^REWxIq6FJe?w0bDnU>I=q|Q)aeAN*x?YWEq%@h`G^}Od~};b(lexLZ}ajA%%h)vb00$MLK2}$-rR8(qY4AHy-qxRlHWWTzRnkDufGUWl)suWQow*4ggu^g6Q?#p|B zIOY6psoGgS>j>MFANTCe@pjSe$w>E>kv-&!tqfSKRVM?BVdsS{sZ9c2ZTuoV0|m59 zeiXsi_9R9awr)oqW_XZvRm)Au56%7b-^7n*`f7cs*JSRs&o1Kn0)uCQ&;R@-|)all(p;OzQ$NQT>piMNcnTpvBwY!$nwWqde4ihsm*ciyl)%l7L zV!OPGb2(6i41e>_f3E$EQAE2VO|>ZBSJ-csG*ma%%50$q=ZXxOrzv@8MGAfDGbba>T?p5eH;kS|5MtIIb*GScXFv6= zcnuyfa*$90;)OzeidE$)u`yUb{na~Iw&?u}kBCHZSZgp@x7;1}8(ijiPVf9BI$!^t z+nolb=PmX#@{i7)Uxy$Khg*h6PMC(F6Z%fa5Ci3W3JdcNCRcFB90Cl2x&G#ik_d1$ z!fr&}dq+Pq`m|bL;*S}!h{;9si1YslEw_^Iq*MpqHiG|k+x)xDWcmxTe-%LgQ~$v6 zCv;zc!Ur5*CbxmjEj-MUhmUTKiy&!6QNrElcW586Ec_`3nRYdkKLbr@6kZmLx z&{{O@gU7w!G+A{Z4_SR$w6aAgFg#Ss#Y&%)7QWRFY|OX4B{-R>aC&r zIe<3EZ;hyG)KY%Dqb-c9osOyh1pPJmmp`5uq3+P4Y{*yRqutDC&q;bk>Urm%8W-YK zjhXJquIA}$Ou@V$;uLG4=$z;5Go|aXgSBKDgyIIFBUR{rtoGtY43W@)02aBh@uqd; zZmLWXvM+Xt0=xkVBgBY;4d^Q2V&dEu7mDNiis$d_)T>RA3|dd&Fj$9m&uKG+7N2MQ7+n~c~REMo=)Vr+Q?xCMaRPg3eGrp zRQgwBCzB#X1?G++3+O`ypn2m$bY2Z3?A9!?uH{ zu5h*hLR`x%kKAp z+f)c2exFy|TpUt30zs|(KS53R7pN_5Oz42E*sOfZRq#oYQbgiI z=X=SIbV-gCv>^QG>To&Se3o>0#qF()J^xTpNSsig77W7OOYKlgPw`5xvS1Nf< z7fjYD>Rv~#vrHpn;SX|-=upMbhC^7eZVP*R z&|wiMv}jR0-r}aTT+gQqS@g%WWKxvMV}?WF&;aP%`xdU)j(N1_mRo^(EMx)96meIB z)~_l;Rg1(*VN72X{r3ZNsX*a``JxQ@!8^fiq>>W zYy58l2qc8F8GdwY_&Ex+;SIQz3J*cdR)ckhhe=nhvgvxpAJc+mxmYeZ&y^TDj%-<* z(Y{CzHj1dtp6>WwX!UD>H;@+EvRPiZ`GZb~Ex9*UEEMA>Fyq226C4BJj_c4Z3Etr1 zmD6)<5M|`tI6qZOi2MvbrrUO7{Y-JeC*Z#^S4|v4k9LQ|j>N5mSHWMwPavq9B};&Y zY8(A*6ME=5c1wMrFq)(@gtNV)s`Fg=#5X}UW(j6i0G*@4(@)0QLCWhQwotC1`n*57)1&<{Cx11h@PqOr;cLsiPGhahb zKEcr}hMc-#ULZhca?WkLMnTyyl3ST@cbeU&4CoI^Hf`6#C{s#v_ikyIMD4^aONbb@ zHh&3j;H(hL1vlBErm!xmHP%g5osV0I*Co*@0!Im0lrjtwsG|!@Bp9~YA7Q3~MHQNI z#m^HqpLu0SE5&ATZKzitvHeDFbH5_w!BNFn@_0l%FVnOj&d7|m1GDzje`%F#)Fj)h zMbd!2mh;|%1A+Mj+JAFbBzjaXk@c2THZNy;mVc{y_BOgOnP6miJ3aux$h?DUDRc(G zO|&Hm-j)gy!N%8H`kw1JzwHR#F2oAlPprFMG=}4^P3U6<;_IONf&J2dRNj`u{?4+g z=@7F8lvmSXd~ZmnUS4ZS=Qp5pWOdjJ0fBz-KY=dte}VoRzHR%T6{oL3_JnP!bc&8s#H$&m&d=vGD)}IJrlLSg5(XM`#|8= z2Zq-rMqtcVC4a1B>1N{W$(+J;_?QhO{pzRnO8}1gptW>D-8BodlryGF}_z;5(rfl4+eH(LlU{MBa_CfxAVCU17(Z`zyOQPk z!fs5oNsmvPnUjaxV%1v0rQx9AMu`e~L#_0^<`(5XJ$$x&d*(ZE`b6^uy;B=ZqUi@~ zNBqJF{b@|!JqdAH@=G%&t%!M(r_G+e)&)ud8akABoEWhnZ=++*?o4hwF)~B(FO(Jf z`=MAAO~}uRh`E8-!>ToqShcl$CRM1-fb8T7*Y6E*!lzB@;*)$XUXN73wO*;!&8 zpVIn1$H5ww=U<$lh8~=0^kQ7{$kM>GS8u9MQAo+B_Bp??*k4{_22RAYVi2%8ibB2v z+j~Hfc^v%O8+2aAP`Gb^>p(bVy@M-s&rhM}P7p&Va5slm0Gk27PqwO!@F=lrq#w5p z;ssMqGUE}g|CO`Ut7U$;4!9EdS1`0rW~Y8i=C4ixAp5sqp)(Y_M6(pcyGsM2s<>5& zoa*z0qdFlZp{gd8D>rP6eFuTrXm|j)W6eB;{!pa^X1gJyN^nRI%r@3+I|grohD*0j z1^j$4n~Jqg?EDRc2WbMo5P=}v|1a|I-?i`m3kV&6=PFRqK<8j*NhfEoXJBdchk{7y zkMI8m;UDn|e}WL0AukKLwk@6Zlo+d|m2`sWU=_T0dS1 z5%5Wp3r#x`Ljp>IWPj*?e$q>?pB>{l++cdl`g#9y>hht^btsnR#cnGUWf2L`iv{As zU8_(YL`W&--R08fcw`&Hi-a=;gFeLQsOF6w=BrmU zr@rGB46tEtV*~JzsD|G+X`^(LR+rtTN%WO*p-@?-9t9;A+Ix{oJC^sn0cI@v zB~2+ZLZ7?o6X_Bko-DHHlcT|INrP<(A_M?TciA9Qc3}l%>+}qumCz{+BlK5nnS=Y$ z?F#Ee%=?^+iQY*kB)8uK)I_bdmgHW+YTHE|X6-VK8yh-WBphhpAcVgS0K*V^U`~Dt za$YujmdmLALVqK3;CW~}?4UC9ABBiEWu9a+%gZs8+a~SplMn$?<#}z>lGF*-V+BGp zdL|6{pVjM6g8>mVl3Yg2jN(;$O4m?icnLI$HijcIn~&5|L5}+qObF-Ct9xl*(iCd; zuIZ5r@E4V}Wvl#NRBD~pidT(d<%1zidu!|&*PAjJQ7bXR8-(&rNtfqyvS53kLaQE! zeX`R`G4XPfC{(WDIZ#Lypt3A^^K32|9Jzy>(fIVX;L$p_pnAAKwX@p((gMT z{v()-Qjn4B0nSk@{AR*EkvkQT5^xiRF=cU?wdIkJ=D<$8C?Y_eES^Ec-eb{rx`mN^ zeEB3Zs#1Yzzw2YN#t-e^e$?S*S6_ZOS<4h;H<&0GMWY#xfW_TnRVAuarji3=x$z<4$;Z{lz~YE6AkzaxOmx8#IV4ruUE zxY#M#jWO)`YC>LW$0^wxGXV1p7GOAII=p~bc}Iu2KQkmRT3jN{`}G+s5^ZmQQdf=N z8>$4jvLNk?tD>q1CllqDeoe+13R@2-<}x~R)$Cg4Utm8B@Pj3{gcaC@B@_ z_B(%1wumiEbutI48wG$5?{9VEzaJd_U$^C-ywX1(+#d~0zv{6n)*?!$jVKYrj4yiN z$N27tP`x-cLkt6cdvQ_yFf|x`$blt{0))sYQT(X3KIi1d>2W9PFgYj ziW~_XG?-@9ucKA4$&Kc;lK2CxkBTzp?akd56GI?tcyf_JG``1*Br*M%U$bJ-H$7Mv z1V*(qfh>bKgGM(8T_-I=e&c^RbEvTcIq^O029X6(tKq06+oJXe%9HTj3dGIN=v1dR ztXC|R*W92mH*XZiw_HvvXWgG%N_0^sYAr`0n~(fTzCngRq3#hbog5WeiWV7R2YNX- zQZ=@rcv%vsHNO;1ZMiWnGW=~s;GW%>n4j+NyF=zzicE1tdsIwqxEMoi1?HBz$x8UnMN}`O{4`hi= zx^L><7-EK+{EN49kI`*0-f)(4XS))nQ<9l!!=@fb4xp7R^o5-MfZ{ta1IP2>1;O7sHTGATxD+KM*ocU*m#aW?{eY zEaCh9_=G%EH#UB$mp-gHmabGK5J?Ro!HiG#`Xe1|QTqKlMCSR* z0wr2yIW(^|ssKTaP@|Je;u6`_tiFl6DEban1E7)>2XjApJ**r7sn%^DLaDJecHVn6 z7p2WjPxml)vsi8i$C6bGIdvt)K3szG(nq(>**O~qp+c>`u|+y+){A9!!U!dY&#sKHA;=EgNJXSO5KO>IBygH3NRbO)$T!wp3ervjgXC=zf#u`28n zcp;hl3CU67Z(a`L(tN9fg!ha~u<0v*2J)N>-+%TVq?IT-`gVH(h4 zik3lJtiI5kFqh3Eowd)t{_M|Wo9ACBq`6-izWQWt{oMhnb zGUTH;31m;av%<1E#&Ffoy|@kjvx~92?*)j-b}=5=-VbgnHBRa(?i+}TIyWVm#PHt~qH zmN$7*@$#CuRhvG|n|y7+0JgT|$3Zu{1n~%kAOmG>f^;GhuOu`p>{;eH3eiE1frGGN zC+Qg(_GE{!F!zB%?lb3X6u0YXq;}IA_qEs}JlIE_h;MAgMlY`iTPo(>86Vx&;4S1N zl=@nmTTWA#v4$&WW4Mp!+ShK8v zr%Pndx$+VVV?){k5|U?ZvHq4W=9K=f|6kfA7&t&EpG zp9YY?Rhy7XXi;J$6oZ4-{xs}a;PiQmk2tj<1e{i1lz-z;wMdbO73R@9Zxk^a^U1m9jn|RojvyfhI^mx?E0!K0>J%U#al5(Bax?U&| z@!I}bU&F#b@L)N1^T#6Yq!sme1AhV5Kh22LsTL#2+Waz!cuK4C9PF7=k>DK<@hTpOk^ zuZAS@CfOOerpnS_-Vm(amClIT5sk0AkiZQ0J8kBfB1CWwqxyMl*f z1=z}9DI(T_-LDuow)*5DzxLMO?4bvIiO;=f*v}77LoEBS^nWbLdb4-(HdV*4EZRw| z;%>+NFif-?AFjM-v(-Ur%-IX)Xl^!^Nc(Ebqf4M@Hjf?kX=b>l+Uazq*nffV?% zx~uWr76n6E1)udreO|aCBO_VI?ni5Ckgm8nvBY;9)x3)+R}L0uqe1Z{Ts!PRItn_* zU(FD`Kcl+se^k4XA%=!&n%If;xAcBTBDnuje6A2n4|(v4#Xb5mOzft0wkMXPs?;}p zLlHK&zH%0?b5I3iO{Q9>baBjzH+V0_QJ7_RWP|vKk!8U3be|KYmnIb7l&bu(k*VI` zDa789&p{#>vcQ$%2#;b-6S0U8i|A9#(5V2*yvo@2bCiJkW247gt;Vq0sydIiIn9@g z_;PoN!L(|0QIJX8ie6Xo&lJ5=;lo-D$~~A+`h^SXEDuK?-;l&zTU{Y0i}?bO(vH|* zveabmA_mN_M?Vdw)lT|Cylj-Msak22(Rc&YXg2O?sI?C&UD{j>P6rfTcSNP``0sS3 z-mzo4oj%{=snfX@@PO1CND*j0+2H$_!ek8=uOp5;S#qPSe1Sgi+mD!v8Z8Y9{-q9f zUUk6`X|thJ`8cnB*nluE}*&uEF*O82a1MN7NYGCj0Q^TJ<^v&yLS@UveXonLgyj==t*m zDnG#+!|GiliC2g41vdySc>*+KzMdW0J}nR+n?Ayi6;qk=i-G_M>O`kI84{DTQ}N2v z;8f}_cBt8V-+X!lgj{!#Tq9&}3Ojoj(Ou0a&CZoTAU1pz?#E(q+vmJC=zxuw zP6FYgsp3w-J4GY6E2gLBBnd1PM5J=D+0Huz21VjF3 zE&OlA&fhcEKOA3Q9L+5MBi=R!s=UwN*110gyw}q@HNTeV?nVs)83h6 zIzcDund02Acp&cMDq&7oy_N5Xf+h!$))%BY!a&KMn#caz~~ z2W>zPMc!pt18!*Tz=yhL!2YIm3+Xd}lRcU>tC&mX@grD(P+1$3IceZXc;Y|sWKaL9Hd}YP~H%3G7Dk&YO&p-0AacjEW0qk;mh_+U_8lo+;asWQqhX^X}9M z6yI2C^maOM2d7n9gh)?j?gK(~*#h0Mr@ByL%q_MMU?#BiFjkydgAA0$Sv8h=SgjI* z6A3kM-H;(ARirJG2=}q@gpLoTsnb96+6{lirm9F)r zh5U*_f%4hvyBxlySP#m5M{7Aa(ysm$Hhk;QU+U%gUBQFpnd&U1spb~3FcZvErR{r+ zB&GmdKSs-0FGtrJpJ7Jz{(-rhz2`j^NR<4Do;oXB#5#Z6dLO7p*P0;dCJL=xkrj)K2s>_s@cq%$HQ6k;l?=78Qlre6eb2(PDcCL zxCB(*#oBPRcTk;V0rIl0M*YUMCg>ZDTcaI&oPwtXYsX{DJhc4fzDqpyCriG4BX!>BX)F`i?ii&#o+#bDIc zh0(Cg+}lI(iaEhx=+E9quSHJkFPyq%4sLA68-Xzv7swZnNgLnMBn@_J{vy>RP5Yi_ z*SuDz?FbuK={f;>N5e2S(4pZ?EL2QZF}Ptdy-x7XLY?r)r4eNn++Mk5RSN zrzvm4RD>4PQ_P#0mqoC6C$5n`%ddEw04e?h9E|%~nNm^gf(LE8W6>xTZ=`seHsI?M zoSd@G^dLE zy`c1pUAUHE*GD-_&bZM>BRV0P>2=!d)C$h2oA20+7Lb2C5ZE@o zYe_lQwG;WskF8Z&db0k$2!V`Hs_~5fe?8C&@;E zhwfO54Y=oKwZs6TU#?lZCl{;r6Y~qTA8dDp-(lGndV%4{SDBep648nv)st%;2o*>z2HTlOd&A7YvgnVN5%#NDqzDugNFLz-$wd6MyQ7R_T=MVn`lY$ zhYu3dmA{%J5WoLE{w<~*ZC?lSd}jZ}^Zk4L`}Yy}hZJ7M%--I{UPkXP@A5x~xc?B6 zB>_jbzlV{K+93hV!0H1vD4IynLa|&0E8*o!kQ0$}wQ*g6;`fCxp!ogV{wF1rVw@{K z_pL(cS2FxEDA44EIxY`SK zi)~#g-gmKjWtz)I;??D}dM1LoUbtsaIM0$7%-M`Jhk$$D%~J<9Mas|Xn@89oQY4uJ zLl^xs)Ag9BMOKD-`>{e<1ic}>TQs$a8br8q!}PeH&pQ;$==$W3?rTFTkhKf$$|TCB zZVHzoA#iekxDC!J{xkuF{zS>d9hzU|PGdDjm^lat2aN8k#NgSpZP{;nO&=q8HrTHi z^$sj^e*sn|ym8}&JA^*?Yh));M;Ec4s0R`)nd1knLZ$Z*@C-?|93fIof?G#ja2P7; z@5M)yLMJud^4yulc9h|5qooeSGis%0D@~oUZi*Gcr$GtJd>4Un2io-rQgnF=Ia!>n z)|dz>HPUy{;Y7I@sAS(~KUhW;Rb5HD`$2g%Z=z27ohZ&h;%PJqAxTOsmk^EXvX!>G z^K9vnix2FOA6FWhzPS|HNBr^ycd~*6Jenqr;m`QQk!~WlQ($qTn3-f*(mo=XM905X zm_0rH!dymPmOe*69B1j}03(CGyNb_28<-ON$SmKg{u2JpV9cv){?So3^GQE$!+{7p zPDc)OMXd{_#iZc0l3_I<+pj#^_T`9<`I`o|LZ{e_t;GAFE;uwk;4D)v6;Qm?+d2?RNVk&%SuQL$UJc~ zR;QHmxKJHsDyCF5IXmU|AfK1i4GV@M@qSpTqb7fR?9SLr)2s5Z4RpVr1h;tr(P?oP~My^ z&h|$+j28Q!PQgoUmWtBVlRSgYi_M2zifuHVy1FRA%QUSGMj9{_ueRwP$#_O}ua8wu zf=QMOsEYN1arf*bYx~P)V30_5=n}+n`k~O2DVarQM&LxTT(UxyGxIVYi;bEZ^_A*d z9C=#A6^e%{FY6Bz(Jy{__$hqD4$3ZK^jaxwpypjGz|7@~o9I!^qb8XPj3cs-BgAepuNcA=uM`KiS}aj6?1H*h73Aye4j;- zQf$A@x9j)xafpnv1jmuDK#OcyI#|eF*!i#^Th!CrW_DMMwzGUi1+j66(IZ#o9T6Io zZMGpy?}1s%3ze2~$B?l#ruJh%I-Jfg+Yeik?xVhUS{1f&WJdx>Xcbb5r04Rfa;OVp zP;>@SBxTDIBMg-=Ai2b;z^uPKx^m8?9x1^}aO=v6o0)v}!Reh=`tPv6${Z z`w9=E`aHME2**l1YmxS1FNIl)&50@E%&mTg9Q5Bt+($ z(Tx%F#A{bqtB|I64EC1!HWlM0y=0vutvc$QCwH2hcP~yFj_v`{Cq_->W2bn#w%oZ# zuF~qMl8SZY*Q+L~e2@2%U3F4qJ4^`Zl*c#<>9c%!^zd_o@3x`wpk4>nQ(vE9M1-J$u*5l3cnjG;JpWmj%e`+fr3D?h9d%g<4XY-8mXkP z7P06$jn)N@s(r!N`6P;2`qNfB3xizc8Y7A7B9Goybx6~t7`H6!>0g%n zc1E%`iPcJBmR6Cll(S!N%vT!E+|93zCF3F+Wc1l#jrTRZThlhpO~Ss}SpqNa?1PXx ztbl~$JeAlP3c^c6OS5L5PIb=(Sb3p9PbSzVHbYhIy#_C1i0)9Oizv`*erx_#F!TPk zTcXpmH7`@vv$s`Z)6Yy^&!RNh;5?^o>u4;*1SHSIIlY@hzJeqmZr>>cM!&PDT!ne>|up)t|r3={jsgnPyBu zMDh788*4s55+DP|bUm`f%es($G-3Tcy4?N(08~ussL0GEnOPRCSrV-^M+sH7%GDNY z!L)~rtcR3`j9*x9eD3a>;sdE9PGi??#g7-QkFA?p#v7gQFDacM{O}uOx)UWyGiBVl z$cCWNR3uK7-zigOZYsky7r;l$6$J=&DTilPgeBGqZC4W360N2D^76p*Tv#~Nzv`q& z>Dk;A)q5*JiLXj-`NjiHkJ4H~_f~@=O;KG)^s+h~^eJseq*HcnoGwdHr0^f<{gQHb2XU4ZBLewcnROKv$fa^zQ6mbv6v!qEU303JwXCWtL&Fopgh7wQo#z6@L=#F4j%H@6ee%%>11~1Q zbd<9xa(*sHwOmMS#|4p`#a%B1cooxNRQJ9h!;3^@nXE66^|(mi-3s!`>hmaq>~*7g z84!7nq;9T)Cp@x0v~T6&YmA!~<9*Fc&%BCB)Aa?8Eg9=_lk3Y~J`Th*dZm{1Qf;_V z0R+1u!FBR9=ad49A(?S=r#_o}zFI}0(Vi>mwY!WnuCzrJ9is>2Czd8C>xJ?^zMCw3 z-c_SCYAz7}wEx8|ia+9z9SIm4%ajqI8k^?-z0g!8RPn4AzBf@g^doq5d2T+6b|9p? zCn68^Fqb4kB^$5FTd6KKqOvU%wCt)py~8SBBjwKwZ}`z);=T3q$`$W!knIC~l}6Qt zkstdal;LTZu>t7_j3v$ZXtyiDC(re>pT;7bEPP5wleib#H3*>QwQ zVh<(_d_gjvYIwM_HXJzWBI_Ds0MzHkZGL-KXh3gmkTPGmc^*G~++yiY zXIUN4{Nn3-DDs7%SNw-EIbMQp1p3jJQrEZ5t|FMyHUXV+l_!A)`OVzo&1t54`MzDc8}K~3qyrO~;$%6c z8?9&kpOB3Qd(-L9qx;36ZP0u|v=+xDWaa}l2VK}rhk61@b;6t8iShN6WA$u^j6c3&s0R1Dvi zWSgaxEsfQp)m9ljUL%KqiOv<{ZKQcJr48y1m)CjE{X z-nz2(c_VPU0p}voN}siTJP#SGoD0B8Cqn)Cq#90@@vtFCNf^1ABbPz)j3$GIcG1ah zz!k1GG;*kEC)sm-$8@@%O$(|%giTctyfNL35&BGxNKfdL;pH2ylUZMPvt$wa_Olh? zj7VB~Bu@lGvcW4=ub{cV5hy4dDNJL~Wd(7vdR`v~FsS7A*Z!3|ua}>2F%GrO5ki4k z57i+|Wq(DG0-^qhnvK5Ka-dXM9b=>T?w8da`3{B9Ryu^J|G()Lxk$x8?R!F zN%`S)cqt{SyE;=-ek79WU5)Cgtdr;&969#mwORc3s=Ol@Zis{3`!%3>e|xdWebj3M zi5V7jTmMX5bdS(me?3*Q)dOrofo8O|dS#`JgCmN)DymG|&(M;A)hCW&;|58I1J(W# zKzmTnVDbwZ&#o)SGXj^2RdkQMl>>2(4s zr-#@N%JJc{ww@SI!YbhP;ulfdMz=LtX%JBslQOywPIjIaZ9B=N0+BcMD2qh*ORecJ z=%oxpyWy@J?<@JL6HzQjL9BT)}$6tYHuv~9~gQp3Z z-r1)_-j2JbU}WvlImjWMX};YUzQ>Si&o12$=tq3OZ|8_H@4Y?{NuK#XiN6aRr8vpt zAc;}-+;_rd#@x%0@w$1{!ez^Nv^+#)0pp?O0_0v-cm(TTa#ZMe<@{PnpTXUGw_GnE zUA>M1CoeXG@H1h~dzL^m16>HdL!)KAyP4_Pq-j18QR+G(jknBz5Rb&%3LN=;*bvb1 zPwXhh@;6JJc4LP~*$$6pZa*~qa9oJ5!5(gehH8y|U2SfDJ&pyjj@xs{q9)jXM~_n) zmVoS)bnPNA*Z%z39`K66iaT8?=3#p}g}oKj4NipqR_jGCchSua31yj=>F<3)I??#h z^N5>OJTvO^iRFbLF@e{^2XeN0cJj;l^ko%yc=)ghQ?G4jEK zcKvKTS8%32AE3c}LLM-DhPIgTgKcbl91-yzFwOj=$Av5`19n9d4m{55F*Ozz+Dn6gRi&YMWW;9{WhEx% z(dLI>BpAYbi(OF`YrYDSjTqQ{EvW3(tKBsJRntCS1C7p0u%olfrw_7sFMRybSzVJZ z+uI>mM_>KwkNY0CIp9)jQBUnQd<~eQk!^jx%;Ijbwj&{UiFzR9PnxpnThy3Myu*qm z3+4t>H@$kizK1Y!|HAuC-D>ux=E(;fltCc=vljh#!|*qm^cU}_^pE6_f3WF)7z+NL znEk=0M+DUqKq>c=AvTzUkUV+&(ove$;;S1D#H>0>5$XL%2nqN_iPO+we3#DzY{^^A zu&){Vy-mNnTxNM(G@IO>zYI=(n9xt^t|()-?Qc7T4)(lnc1!{L5~GD69ZdVkA>=%8 z&vPuohVG0!cqCp_aFWQ49^SS4tL(F&-0OsD9m6xv zwm0Fb*tTukwr$%sD|%wvcEz@BS8P|DsyLZ+pVNKj^t|)G=6b*8`Q_W*d#!b^8$hX2 zY(|<6$w~duP{%MPwD*0Zgy4z!HF<6)E>sbob3WEip7pBsu-G_QSTsRRspB}gi+KU zEviU3^*9=tgVt4!A;*tX_At`}TL^PC+lXsL8g&c9Uc*`|D`(MUQ}-}jwadbuyB(${R@|&3L5|4$kV=ltSn5_W z-XGY8Xv_z{C0t94b^({}5bPf;nq2D89WFv}4KQwXmAb#GIB%g1D1X119x4|0ZIn=J zts#RQ$xK>bMZeQ+Uua=~ZbTm$r>B{Z_wpi}W~YZ|=#4TgRsH6##?g)S5mk&=l|xVS zNz>|VS!=rncAo#GDuG24QC2>>EasK6`zTLZREpCwM)@;TPPOfvh{`pEWReMQK5e2A zN{SC&Sdiu}ySZXae84+j8cQRe@VE>UFmlrO^^GtOURiHFHwSZVUGPJMHry=F-?weU z`o2eMEA$>_{tb+f!Hdqfe>TzmN83cKIUfWl=e_NKI(Y0J8qv2jrc|&1LFM zcnb!me<E_Bx~ttW$Yo(dBX<_W0Fq?f4lU^oc(_KPM=(`adSmAK=-r`}}KsY|_wF#$H9`YtgPMY1$PdprK)a1tR`B zA2wtVkZu`1^aDaHY{*EKxJe`kEl!JLGJ!XJjA`}ZkaWg#RL0a{s+B6aqVtfmhdg% z^N^AnzO$3i)TbCZI4K?i1NliuN)oxX%YmX8j?}-v=(L9c6qXD^<1ni+HQwBmwT>Pg zA`Z5Q`Y^$X(7ECOp`ZtA&&ewo`I$#u39J8!WO%MM@8Doa$xWW_y9FHrM3On41bKo> zQ70TRG#d1*!A>s}8<0z``2Y|MMw4|;uR#@i&Gi-#4T}YHCL{|vRDIDnVw*gwsR5khQ%~C{vw*13CLPfjB$a6%$y- zSUIOULAK>!FJZwBgkb^JH#iS9ywjavXE z$?7YSnFmND$eK085#lN6bH@j`D?meojKLq*70pVmndnCZq)}}|dh$52q!Ip$HZJX~ z3oc$JA(W%d{8@Kk2%`@yd+kE`6EP{`bgEN@-b^%WSDb7La*DwE;h0RGq`T9hY2$Qd zMcg;d7LAl8B|`CD#U6^?@*G9oRDkd zn}}^gm0s1l>xPVEag^3gN{BGB34zU5@J1_Hc+rnLR??^KVI9vD+Lg2(CF^6Z*0W@T z6PM}2@HxmkbKOQ zN8ZU+!j&*aR)z0qfb2D{>9k0R$NM6U`#&*9Avoj~IaB~?gTY=eylF_!vl zxXB{)qm;CBZ!@_`9U05(51&u91k48;5?Kd%FobMY0YQCtq3(8XyQ_EWs9d@6dv;xt zjW>T+D{GcYNydv1(a>z*-PoSmwaG%h?gGswo^A9`kl#gHsCrO{`|XYMWNay znBzX)NzlO-<6e=7BDu~!Jt3S&kgo3Z);b8IQSr@NE!#J?Fv%b>_-8p}joHZUBJr79 zuNW06NEItHPE-o^;~j#wn)bNap(!HvAvjxNDKv{3qcy1-TsS0z!u4 z(-L+@5|?1&TF%%gr{;3y47(lnj5_SYeQTVhz5tfaLFQJ@{u|n)UT8(5=Q!?^z5OSN zE0T#>8T?(X?>^`^vxPa0Gn`4ji_83}?lC0?tTF6ND(6=4_!5}qU}k4QoZDgqGtf}c z5l~6KTXb^p#{(q?UM8NcEGw)IrSZm59U?mt!L_BlR+UwS4I$ZD2+a6S`NU@%2HFhT zKUH%za_=@67Q&S%r0`tA7DJ6x+66Hp(uRVW)1c_#zk9O>TD2M*Ol}0Mxa-Ca6kPu> zK1`Ru25JeB-m+v`==W6XfbcE2C({-#@CMXT(Js5iG{d8U&i$}90wd258CUjMWQoHo zDj#xFCivz7zK%;Bh|+NVJ^AvYwA^%>By4NSK(aPr_nj5+8zU3QyH3Ds^CUxe4R325 z=4L)WNVIKeG3d*x0Y)DpSvPI~Gz{$QVsmZ>Ic_Uil7>S9Eb>HkHa9P1q>2SMD=unQ zHqnq~!Q%9z&&dgf_a!)j>)98GvI%Z+9Go^u3FiUTlMCtRYdF*Q7LC=Opb!L{Y?9)Iz| zV~%Uqd65+WyNZnztS=er!PqmP6so7E1r-mF4Yl6D$Cq5?ytzrR(C}$IX;0t8IGT(a z=NdyWpT-i&>6G1UVB^Pgkt?!O{dq;R@9HCCszgl7wgPgL$^4umqJY&UtJP?G!y3>eJ8uvMUk2}4XGN7d$(JlWCSx&5!`wIY4| z1kW%=jx~v%P}cgrV4!kh`$aBt;CsM+bX)|1K(vAADc2p2sPpX$T+BB8Xy5#rVY%C5 zBWt1c1wp{00zkYe(!K!4eIt=N3$cqM3~7y+uCEz!RtOgtfrb*VZ27=6nce+~d7$5_ zoMNC{>YfBUb6SP^%=8m6XLqX<2wR-Xmeo^5e4%KY)dQ@UnWGA@HJ9IIy6zK#q3;x+)CmykG{!513!!M{!p!qqDwa&NdJBDt}DbiD&Y|{y6x^>Rfs(w|o$CT;o z3n*a-%rd5+T_?Z+XucKZcfQcXjG7i@-<`So0>>T2rn{z$6kN8Z{qLTu}2 z)LbbdGr~(pNBlv(7i^ z*S89Y9>DflwGxB2WRiyGEBzlsEAAKghdLgDgkrevkhv$m|fW&<3?t=7el9so_}&2L~ejxQ>Nj zn$AoUQ-zldhJDQ?VqmmUuA;h>W%8`|d`bm_eRB@d38&%dU_A3Sk!%R%q@eA5fCAYC zAWddg0E19y^1@_|bkYt#(w%6bAIGdiyxD{PS*se6Z1A-FT_DZA<{f-)Lrtu^0^HZq zU6)EQP`dho#qfxJ`TV_Zva$&zbuJ2%LD=Fue5#hkM?=@T%aC^5>Y9obK?C_)eLYbP zn{}dk=iLATG~^M&pvEAp+6K4#_ovB&c8X+9AIi!?que+h7ScgjwmD9rH0z!&hAwqY zvYQhScE?Cd?O}BzTM)pMKpaM-%U-GFo%ck#E-fujYJXn?et-gqB98G1bq1UMZ);P!94Tr5kaD(DLJm@8K^u=H<(xNqUaEz~d}i}X17DeU1&e7YO>L!A+c7zv#K)iMquXRi#y zcL%0&gW!Bk)T-#>H~f_4>RXuP1B0IwM+pKUR6rH$(s1=~I;*^PpnF%7DPFAV4`CK8)R=IsoO((%_Yrk^v``K`Fhf&p5DkHZUwP2|+g|x!oGzgSE+J@Nf371E$FuXEl=&U;W)iN1q$6hArbc?R1hjfjtJjYqED;ousJ8=g zV;jauKJS~h3swnE)E?Yrf?A*))0qSF^Ph;Ja@$(x&(8}N@m~zwe^Toe{?AMI|4i)p zBN)KaMczrt-p0nr(AfGfBlTpJRi!_q@w_yK00Jh8Fp9hwCTJu!R9u}e`A|55rseZ3 zIkPYUIV^5SB0aq`i0BkeQoar2vGtu}GUXHsqImP!9XZ}tZBI9M3+P{h(hMYM$^_Pp z*5U*6jrE+hBc7I2TQ()nf0n|KOgU&!xQR9FQOO2 z0>F@eG3nCkjb!w7-(T=Q6m6t$LFXj6esMbz%(LBN#YZZPrH=lxs2^) zZs;MSg^Mo}OXuH_$rV;1md;Mk`h~>@!1dEMs;!BTXCQM#I>Qu-t1Bd%yrUx?0YkCQ z+U|*9@Ww!ckYTkK1FyJ`_h%@q8<~a!?01l0{VeUw;zyM@2E)eNqo^H=W~GPUGxix# zVmK=c9|cGUo53&>z){iHv7lHZ@@Soj$d=Gxqsf@mimx1=TL&REXMd5e>JS_fAGQwm zm)mOFJd5}M2a(Y%k}(7u0xsXWFzLPchJKX-ufSD>!stlvcv)z+D6G~TQLdzn6TkDI zyk}?`p77&I0k>FSf7qf=m5(RQ+A6hGhanu~nx@D`?Xm@N3JdX4wjD!asn!Ge5^7l= ztA^k{oY*6h_rmOl(x^=VQVavbG#>?%n;w37pFP#c5l~ZP(zW{P{b$lZ4v5^LUx7{yvlp2M> z)GJ<+9n7MiW;!I>F}7=Cl!4_GUtwh1wq04gM&*}!V?1R{`3f*~gkp1pn^{zPyiU)e z{R$Ap6+p3PN!X6^N*ux6DAo>aypdw}`_s^y1|>~<=ks<2{uk)?PdwiG@7wh^9zfx-c`uelV{eOP_sd)M8mp}jg-%V1nnzk~ID#AM)0w7yP zC=im-Mh1brRfjz91})Hu6?%eHQy^$%b#;L0Cqu(z2tudG_8TME6q#v7<05$Ft73}p zG);>Up>{jGv)Rl2Q~S#~-{#!|f6ptF!Iy33GEMqhT0TqeEW$P(-ZO8}%b83+i_qrd zl1ODJ-1;FjQOuv}qNjO!%WIhvYQkb?c)NOO6ulGJ&`*xz1`Na?1WZ z6$kbih9s^$j#O>p^Aew?7}TH_)F1Cpf^Q_ zAi)VwZD_qUSWiI|?MjLeUvMfXAiP0rs?7%W)7xM@8`S~r{tviLS)ts_Cr9R@3Dz90 z^rJ!%mBS<5PV(0LmIe#7CC5yQ=%Dlm&zu^C!!G)W(>NKlH3=YmgQVT>r+hd#H@;VW z5(pk>!rPxI(_elsL0aJ;Qwhp4=|r8+I~9DRD@H6$O1EBxvhuFCt4pbiN5lHCX*#j+(*4EC(R6_ZlRM_&DN02K{o!=5z1_o4PZ$0n$ zQAK#*Vxq~amuP1l7U!!c-WW$1K{y(Yz(U-SGRI#WQ&vI<)zf1wEgzTN`}P0dvE;Yr+0wl>huBbwYdt@$&A%Z?wyHhsgDGw5^Guw- zv}@{B(Fo+92k+ls(DM5S65qh_d@Q9;0R7kuUq&*SQ_{aZv-{Fi&YeW-xN3HV{@|Z&GU1);y4JVUnvGXjDst1jIG_xMrOEQwkAL0Jom}Jg<3GGcvFS@ z)}f098eUYTiM=*f0>tZ}kn{&1nr4Wn6;^Kw6#z$&!re_v<`ga}0{%LSL~x#q=yh<( zK0O~59|W*Qc=!<8!EW}2a#9f?>>g%$)t!~C+N z7&2tSR50U{pv>Le|Ft9XSfVQodt90Hy-#6n%!A+B72Rk_&UC%j%L|2JSHzFkvtL8?wFHV7k+ZZb4`LPfN6ya+*JSz=ws zZk_cEeJU=c%nBEbS^jPJM(An0b*%QO21V1hD=2+4`K#&fSN5z$xk``B&j_pHFmA*Z zt+#q$^LO#amW<4`(w5ql`wkpcITq_J_38;{;ptcvI6w_>sNBQz>=2sSu04Xi4 zvMRW-bOuwpQyrJn?VCPxU0KcX`6Y9Im2*CGC2f-CNv%ZQ<1F9b4;R+&e!oZbzZ@Ad zm;EAu#P(TrkKIp^X?dNWIQ9!~97|zE<7fk6 z8{cmz>a}z(E28O0>o|-rwSHHdcK%?7t0!anRBtZW*EROd>O*E6Xz8^Z&Io_(1QWbM#Vbn;}Dm(lDqr9pR}0v77Q zfUnFQ5m63rylZy8+wvbow|?Id2D=EIwNn0R7o7i#ctEHw)TrFL6|t4Bs+ORofm zn<}pEYWK7h2Pv7_QZvrOub@XzSu8o^5y-*h?nueaKwy7nr6DNIPsTG*uNe1hQnFGy z7ko&XoxDScow`S<@h{LCn%d&;XFeOtT<#{d#aiC@rfl6c#NN=2rSqP!COG%(2EFs2 zcG_3Jqu}%QmC8G~{_5wewm)m|E_;o!0_)-NrwWjh&@l9il<)}xy4;7tGst$kM?s^0 zj3@#?$t;Gcd=>pZ?+0KJcR^zzFK!sO-u z(PztB=$Q;4%LA zlfS=)^HX*FbLWo-{=ZkQf42WLe?Jno|4PlH7cn(6bhU92wzqZol*KrIX6Fgpo0uB^ z<-?`2uJq}!%$q2XGN=lIf-*>Lg|vuzg(cDk5-k=)qE&o2QH!~EW`_P?B#QqA{4oID z2c4p3`E(|(`P)W~VbvmD<>GQO>ypp6-P`Z|`4+YpgbJp-{(`$bja{aXfx~)59cQ;4 zU^}&@nxFfrC36Bhl(W3_W8@!>qvPA3m}ylbN2!6dm7{UqzF&hKaf0DZY`4?^un!@~ zo8vLKj1dEjweRxsLJ1i$Jrv@Fh(bu38C}kA(*jU2ywq_z&d&Avkl`L|LJe86DZz?Z zH`!$Ky$|GHoMnoRudgCPZ0td5g1OmVb<;-}omvSWR1>gU zVijZWDW+?|>?YhA*{zx%5{N8fFlMLnGw?7xBToyyc^LYk9Hlg`*a4{&)2(?y(Lrp; z5Jz&CW?FJwQI!e)lL{(fh=VREoUQ$I%*VdoYv`6P4zrHaR~gygG=*NEdx}s(6ho(3 zT*<`X>S!dx#umPTf64^qkGoL6#ayI&Uc2F;qH)4N=?o(tsVxA;bCfu`5BQlmhvGYj z;hLIh+}*Pz+R+wBB&?rkYj7hz-?2zdD@mb|Hz`!)0Aq($bI?esQDjXniR7{iC)FCt zWVEQ}#bfxVFxPm&oVsT>2lOFN(i!=fBi73ZE&m{~5$dEbS~^NdBtcAxi%Ok4w-~ zmQ^iKQBV|h>Of6s`O=XJ@<=SstD4|DWmCVR0n*LrLEePB9|;&0_k(W6z&{-yC!6Pt z!tLsro$a=trVmGE==HyDU5 zN8^kJ5M8&~gcIC1c{Sa{1@M}stsbwxY(x|51FHKRajF~FVv_eUFht9CMob4DlnPGD>#^y8I>%Rv-4rj?%bl7!jp)bjvJ? z){aEGenV18MOl{ism)?AeqP%?|irI@J>9; zw}1OGSi^sdmA$XauhXZJuub;HvWl6^wXp9POR}Gw{Sv&o@84_UKVaerO9d29&R>u@aNJmurhzppU{V7D2-%YDlI)6# zjaGF~lD-Lpkr5m~-{7 z@W0FgC_#wF$9+cO_kxLO@E~)T;}6kaZzv4|?a_o&7=o?t_Z4}>G`OFpva({4_)w-l zNK6YRpYVV%JE8OlIxR=J0z#mc)GT-O*m$`jXaE&Pde!tZ_wKSyV!&?ec>30`A_cQg zwpML9R!Ei0zjaudn#^cVDaCy8ZSP%!T#jfatdsp(^Is&;)gYQ|Sy1j8>sU7Ju}SjS z?zA7?mlr1-ea&4k+<+gxAsogq@oY~QD7a+Wf#&0-l!xIzsp<>{1$S6xK$7jzBwNqO z{;=rJQW})p4d!j}E)+&J{2jo{Om2AkgA*|KFLuu z0VU7(PeLC}IfSRw=eo+{zjR;!M9%+{+4HBo)rtPk?d89HHA(&m7X8DU#@WTt$;H*- zuXn^%Y3>izc9wJ&2OEH;B&k^eD2O>g-<=O2Iw#6 z;$?Gw1wotDj&AIk%gL>pZGJx>48z_XG0l9Vy40e;{D%nqoLh-9=X`+Yr1JFaYWRW( z*)yfEb1koFty=?dKi#1aDNi<3GI{!I?VrP@hliF@aN91##~w5g9kZv#D8Y)(sS#e3 zjSEksA-BU0Q{$sgj-QDJo{hZRBPYBvU2MNZr3hX$rdr`G|M&}_Nk16_E)-YC@I(%Y zatpbP&X>ToJV#TUXx?8G9MoBmVYm9 zF0fMDL}!v~$l~#$y?1mID4{K=cg*aoKKE_~j_g}cRVv#|I&LHQ>PO1W66T&tXc0NhX4V7BU+mEj$geZx2RFSoG*Tnaa&NSg((qyDABya z7Sx7tNM%ZO%a-c@bNf;!+o*o{d44!P%h3NF+K~SHO8>p`{C5(^e+tp3AF-j+zyGS} zYWk@vBw_x~um5F?o&D)a{6}D!GL-2#kpezM0Y$Y&L_L~<5P=A&RRyXrlus_5a7S%6 zX+wv|n<^iY%&Woc&se6dbUOwif?%@E?5>CDw&RVFEB?>(MPZyns1J{P7#@KS0qBkh z>q=2rh#vu-p6xgQ?Y`eP1yt`HunD`7ZsfNHNKoX0uM3I#QM6Q4!u*6CSmWE?Jg!C` zr?7-REU}0gFr{!uKJ^ILp=z=tAwrC2ewmqohYO6A;|$fq<{j0njWa)CGXvBaU4Xo5 zziGxM?_7w=R`(o9*X1-~3)$WRmYJP?N8>z0t|QblhS}f&Q^fVQ9*V=%$RD+5C5cu* z=SSy^WaTSBmv5|RNlATIKHEPY)1{)t9#Y%1m%<~Dpp!wd_I{1)_?jyF;A<@JakF<| znFx#R>q&J%F%W0jP>F>3V!PW7#UGYUYvm5-UXG-Fz9b9SUBeU%c7cDOjWWz4bkEOd{qcNd`zrzL1H#(x4X-AX%?5ee;95oR)1>A~yEeW0 ziiM^GPb;Fw@e*;mMo$|)cIDC%Wl3HdJZ)0?pk64@`n4QgN0}NuQkYfenTz?CLvOt$ zH>!f`jqhH~<{fZAM~I6NhgEE|9Y#0j*S?%BGI`mV&BnZCxu{iEeKb%N!sJH{$jq)= zdl^iicZ+hH722LNT*~>(UtoN&Hojy_4DG=dd|k7D!39N&JO<7a%%nIMyk2n0m;CVM zs{#fQ0PQKeDAk&=HA5_b%~v9QxKGkRb~^ih37Jvly4vom)m%XZLqsvrZ0t=P-Sq&> z07Goc0FqT?anj7P7s^<{G&4h68dZzaQ6c4X_i-WaVlSxDD^CEDNQ}M~DZ2;pCYxrp zdGnK*I@wiUqiM5UIWGVzkwk6ISJB}v#$5F~m5@BmhrOhh=b8yr$vOcf09If2$nJ}d@B<$Q z3F8}K?c)d{*|D2O?>N2*QCA%GqFxbk1KVc;HLpcv&2y96FIM%-Q<}l2o$QY!RTKI@ zG$GTSz<=ePrWQT&;?hQ6AGq2btZN?crCMj_PPM@|wD&EWHs46dJNxb?Pot;z!P139 zGo&&do3RueMO~H)M#qY!CuJoqv^sJa+uqwd;kAwhY8%%r_Vz2bxYdqe$g+8En6+FV zEgKyDi1AZBjVrc0PszzqAkp`*+~Ch3ZKv5Zl^7Taox36K=|{8rZLALo?&v4CpJoiq zR$-|hGc;P5U~GBYo?%Q~Heoh-jcfZe#(ja)`$u|R5Z8FZh8+t2%wpTTD?e)-r(b&# z%uU|+GNcs;^rleW{RerX$Pw$8j9zWxEG?YF=LK-tcgNK@=d$WhR&V)&-|Mz|&AuIG z%KTU{`(7oj35)a3$!bc5s2p8v4c{D6^%~+l<|x!oZc+qF7^h(;?AZB&q z#DZ9a;$>f9^6@DJP6#SL7psi|{VZpJcJ-;L_wNptV=X53Lffpxz^QXR#Mo^Vn+3ii zZkOyBTprMcpE3^w~>dxLkT`v;Ry!+h7_nX#8qcTzHa zVTyPkSm5x!mDx-W=tzE6xJSFgm0B6Lx$mK*qhDAGN=k^3tfZnPPR6v5HZ7;5@3jeM z4#0{ed_XvFFIe6f-=eWhKEVEX=Rzc}CR09}a_#?udH&f6{s#;GOWN`Gd#B)JZ{qqH z)$(sF_}`m{Dm7bmY*7?HG6CiOTdQJw3(D^oK*N!Z{+Gmc5$peRaq4a5?$<$=~+>niez`H@b={jxGq01X4{}4ox9s6)`pyuikC2va-ZQ z+3cl?ULS{@G%OhfW2Xj+C|I*JWc09;`trVo?}pbR`)jx3memTV~=-3RQ7}1IDFwbmdGw21b0(%OiuJ z1D_E?Vak?gJFUnqp|rAXZuc_{Jh*BmvRYlr2Dlb8Sb9SYW!K;*9u~B`ES$JQNh1+} zH#SqGxl?iof1`199G)91ciRHfLNZW%2)B$xG$3EqC!YNNA+pO?ZiHp~oHYqVUeIQ%y^ zODA*hfGn5}S=A;2`6W>R=Y`R6-NqKchmWjfd@Q=3uP<^R8r1Uu=@F?>Rjx&3Wk+&J zQi(lA9YieIVDY!N4KoDj_Rcy;_ptjQBf1U?o+-9tF;QD39#0UabcJY09a$%D=I=Lc z-05o$i8XfFyvl{f$do82)L`n9N498u%demX(2}6fxRm$Yu39R;6yrMAPzWOfo2&Hol zDZtoUU<{aA9TE{lwv^+-rWmH-^j&p@P_xV&hz1{NadzMh%)m}BuXA(+WU_q3^ms*i zUG1rbIZVO>xisLCb=7g%TQAPz62SCG!E}%a^S}O}B6={b2GFt`?)NdQ?~;MZ<$YdKUFIHmU)6HfP{-xI zNEep+aH9Zm4Pgc($FHrTJftovreo1VWA+6C`Ay7>JzVW&}WWITfdy=u&qH zL_b349zp(rgOiyXi`wqAltMzQz8q;4i0wLa&zV@eZzwkGAyWVfvyLJ_~ zO9ud#ipT8Ei*^f!Q}01+E?i$ozNgf$eDm|DiY2sFd|~!IHM?r^>G|bwi8q(?>mH|v zFQ(^3>z%h4zG#FgZI-_+2j0St)qA;#Pg`upiA!t>e@*TkE8X!OL zEPBhcg26!!3dQ<*PG|7lutV-oc)Aw4JZg@5gJ*SxJ~n01y&SVtS)FS%SF_f@3m|$& zMFUHfTPxBi`M7p1|<8*_kCqTDAhjI863H^KG6zVBoWYmwC(NcSztwiPfp2mQ!yp3~BB2FTw z46$*@`=X^LIyv>8rRYqahHM1NyU3FGEhGxYH0_e)bwTP^t+M3g;ffM}^U@PJUOmXl zweqyasIpJj!m0*^&Qkk15*?H_fix-V-iDK}Mtd^77Fdo5TsHG`aDru3gP%IiDc zPl7n%Zv6J8c-6ZA0^~+QAdE04FRbW`@K>f{V$xoQ3@8Ssz7vqrIhk)~80qW=)FZ<` zJwjI#445ZljBTbpDc=7i&Kn`48nb>P2a}&y@jvLnKi5nDxrYA&uvHyQ3|;;mUgRuk zrzQRGwNjM^qzCc{>idd@o@JFo8b-kv!EawR$soAnK+=o8j*tLhmJKNiFSx}oo7FWU zRjL|gHA+e^ywLIHoyIp(+MXcd#1Yy-oFcB>%Ur#5U%l9^`J0jE*1!F-K9uJ!On=zw zeCRRWdJZqeHcsQA81^17Y_*oTLsmLonX1Zlzomm3Bh`)rm&eJtN4nl>|Ak8RkljN z`8KOi*cYl5VZCzEXnBAO>UtCOT;)YlrOg{3Y@DeEA6pr&9QUJ(Wl_V@Uu~l?$~I)u zZ(ytZN*>!!Cor9Fy!y^Kg}Wn(%gkj#iGjY+z5vEBs5EZHd(=CA_d)$5t{(F7yK2a^#Z ztRM zVLAf$BzD5+W~^54+&-GD&v$3~{D{g#eVjNY_>!Dve`5^YY_$=iJcc*0YEg^?P#&iR z4Ob97pU|`0M9O7K(55C0#$T`1-KiHQi5DqLTpE-W%}>YY*3EWfY{>#9`&G7lfd}VI zZGvGaGd_TUv|uq1IIBOOMQUm_Au=>tA9%5Rs#DTE>0h#)=5a>*IZUU&-$-dA`EQ0E z8}Qqb^xB>ZZ86$_uE+q}8n0rc5-J%L^`gK;19fOBnno9Qoi=7tA~GjZatJL+Y$VM& z3iQoCx|k?U1o5Svs5*p8cMY&9iLsNKh~VNhkc~C)X^cig+O7~&{dCNs5^aR)zwDV> zcFq=i5L;h{r~ZLZA@GAkakswRdo-I_DIwEpBVwJUVs3JZb@5sJ>SchnDXBZ|ED8%|COV`_HB5Y}X!n=Pu zJpIswW;z+dodSB1dj0UMo|r-Lvl^GFw;Y4wLFG+`Rwq=AX-+G5xD7x^uSVb$+g~`< zVHmyCtVwN@~EO3dT8-$Ie(jyY!ZNeTuvo?*lU?@dPFTQ#d zND6OlSyi^Re*IeuXN0>%OfNV$;>Ds!+60{@)Rh6LL1UytwKEcw0mb<$#of9UpZpy> zNK2(ty-)Y!p`TB(Ekd2C+(kZT@%qbFsln>ODp>*XKqYJO9`H}nj)vxZa`-R@jN@-+ zly`zz6+_^Rc?Ls8oV#ZoIQ$}Fb=NCVP$f>>xT#!+f96fx**5022#AxB}CvUoNPO;k7D*qzQ;Ip3s*M~zxG)0Ze8bHU&IRCvZW8@U(j z>7)R$*jAwV%rm*bql}Y%y2&q??Jc2sTI*#+p zB-s!FZzz1UP#5N9`=frPmFC|1^Kp{j_ne`Q8c&y{gv_;ql~CA;9-kZo#eV@|b(if* z`h;uhcV}W}pia8*?x2C!<=Nz4UW(HDRVX#4JxL*wAKj5d3gPhs4Zsc1w$msVK&REa zDODm4TT&Hvii{edG8Ob6hX~zKh4TN}PX6Jc?49)EHO_6UTVN&<&RixvG4lR6_6B*5 z>@A7?rd1Y|zm(zDenE8qUV3+4e@7ik4g3MK*pA*qoPjp8QakX_MK2rs@=TZ9^Cq)S z{mBq0Rq=)!vD}vo`;k9v{?7YVk)^0<52y^ShTlK`_n~v=BR2C8?|7vDwVYgQ&9`?9 zZcptq>)Bz>;#<0$L%YXqi;_m{-Uq1&IEWjcneWI~+D=)d;>j#{i{HrZR&-+a3gP`5B5-frYj`0_;XwsbMF|oNmY+-C^cf(vuoPm ztvnD?;SCK}zNE0IwxHDcAmqmo26w2TI+FHGy;kG)J`39XSY|0C)xxFQdlqnATyRBv z=lwdfpXLb^36w)*#{+o2P6iG`ovW)DTVM(+&Fx(xk&o*fCas~2Ux^~z=9QsaLX=RE zd@Dq1E^ai&WUl%OU5i6q7f7!)@kG^>5bXxBsSy~IRmzDcNL;hX5aakx;{+(ijxmnK z8TyHNLIel>W9sJ+U(S3Wum_o4{y>F@zJC7+gnT9G+t62%%!rfl<*>0Y^S~S`ZxFY- z%s@GVpePX>I)lzLyJ3MudN``!*AZpxd0;%p*X>jyzF)VEIlN$azixx#(C$s4JNQ66 zxWMjJ9Xr^g((Pm`ULjmwWiW<4h%|jUhIe5F1z!3ppGTGlyZ3f{t$^xrf-oIdso~xn z1`pb}(CUB|>#X(Q$QSWwf|$$88cg0XZlaH1g6D3Jpzr5`G0Ol8OcXW1>ceJCTP>93 z;O3Mv*feUudpSJC5$=lA>-Pu>Gw?-kP#q$L$XtJD<$v+@PVteaTN`M1&~ehSla6iM zwr$&1M-{tc+ji2iZQFJ_?&Q?W>^=LNea_i+Q*}|lx~cX4*SntefLvv~Q>F5{IvdQu5<@yx**;`R1U&ywPx7mjYN2nY5yI^7t#-I z>GyBgL%`X)$CqD3=S&`T!*p~^wxmiU3;E%GAMv5wBzNuMG)Yfj%ZS4_e1G6Gba}~C zi;$l_S^iH~9KyeG=zral{uv+fx2L>@(wscXyKQOE4@&vZ!IW10P2{yLxgtMQA_PRW zIQRB!AW333=`}F!)mnF1_nyw%{f2JVe+Xda7~#dIdrdx1K4jkAJuY2+x~0cJB4N9i zwido{)v#sO=k0r)<&#rhiE#-DXRB?-goXVuXAgzH=0@rcwkd8@a%2iSaKpX>+C zO?8J=ie-(lP!8FC-G*x_fqWvZJ!gOvU)kDtGI_ZWHIqn}=C@cRix9xu%4=>_ zxNknkCR>N*sU$dKgL7-4vhZl`hn?AO>@lJ^swRP#Z^!xgxzz61H}y3r;)=c|CXQ~n zVH?8HP8>(57>q!^-&dg6$*vD}uLaG&$N?9bcuH)Q?K&ev8V~H|yKO;J8umwIsn5FI zmr^{zGIIU2ePN6MbN(nCeiFw${J1=QGZWc!$fR-HxJ~UKCVUkWL^=6LN+crV=ldDQ z@Y9<=C7NknZnrPMjn30QHah=3__O}+lLVBd0D%A}7aJ2tWjlKdBN>Cg7vG7>I!<$< zC_HEEXlMc6-*aWt`L#YbA={rfNd(UQh-Dv`m;K~Ne?6-~l`fo9q3uK8;0Lte$3K?V zgUOUgVVt|24|yjCwAX2#a&ikZ&00Qg@;q)j0iDvS{XRhXp&l{si?BVDmTPc(?CWrs zqx~kDTlz3%skNeMF7$&9h?B4AiI>)GZm}MN&76vfxVKzQ+M6c3ZJEaT4!L4H7?i&b zKn^pvIv>cAPv{_4w9XpHir)7ZM;Ls6Arq;wZV1M^t+ zw=~oJ&Bk=#FdLh#x4pMccmSomE$E}&o9F}Smr6TZFeVYCSVf@?@~#`jG|i--0BKux z!%ov}Q0X&wd?wi97kgkjERCx)96Dyh;G$={E{loI|h0^q9oNfhjFzZS2$It z%F`?)nZxVb{*7wCxR>VWKj6RQW0+(rzd%vYE!X=1Zg1v_-#}QH&@o9vtO)846w@ad z!&-{8wMT3EB({m`)vY1|!?p1Mn3H}avo>uOSCV)^;hAG%Djq-7;WB1V#JxxCebqzO zY%B`_D!T<88deO(XIJ)w1uPZC4#+A5BcF}Q^k-J9{!sZsULXbxVv+FP~`q|_dGHb-)@LZk@#{2je-|x%UG0eFwx92 z@|fyErzpZOrH!DLD1OuB{T}Q8?w{E59lIeuXNG*dv-jz- z&F-#u1!4(0|0Kh?nF;_8^WQHn;HN*eXLNrv5&r*ess2MhVE9+_1Rn3dm=M&eWl}wU2J#dDZeu%yFFuv|=qo4IOU$lsq zW!y|JJC1xayv+RGUT#tOKR;qBs-V6^tGn^^(0|C5HhtCENlt|Am2HsRF+ZS#fPZA~ zb04|UJ~>qZ#93EO(rc>91jquzSz6W5eYb&r8b(q69s2ImhgvqaTt03OU7)}yWJ%lL zS;`7x`m8|=q(9xOYahV@C+7-p>4Wfv3erZ51(Y(FvuEOs~(W@-%@ zyb!3yGt8`v>Br!C*2EdK39~H6Rj^f95pv1I&#X(qu9|tRgDiB-f-9_55YoD7MBr#Y zmC6=|ySayuk}KNQ;?JhZM$zi~=5fYUvCNd_3&xls+77kLu&S|_K)W5OE- zg+G-!VbNQfca<9zzGKju$>Gd52xCZ@hKs3+h^5ppuU9>_&Y|j*hjUm(1 zO9O3ODi4!Q@PY4OZeHF2!*rv~nFSrXrzY5}oIv1a$p*ELnI(LpRhI`;aShnTLl_?JdZN7k>dhtZHs~34S2spge4F&Sc7c=qb zv$Q>4W_>0;PFKuvsOhc|{LpX;JCS0NtV8rfrg9#go_rgOABkai`g&13PE3Gyf<8hk zde-_zCJp4x-DjAMx)cb73NEp_P6nY2!ti#8|4W5|uEiwx# zZ$S%|+vPkIpDMY6@llI;vFK0cHM-dbW4qzA-EE1YK9MO5WgHQ?Ees9hs&R;MrI;o4 zQu%Aihq{=s*#c~R;s0oT{|&Y&{e3|GFWn1xMx;zk?12kDrN8LhBJTDUjwaF;PR=H_ z|6cq3!2>flur;I_7V)PPkV2%?y;c-%wancv z9FFWPJW(VxXs(w)`K`if>0&I3pLrw3ktH)m9txWhV(s|Ab>w~YvUikR{YkX8KbCx0 zp2K_c7rW3=0MPZ0FWJ%m4qA<<)5wceXsl(gwHJ;Q14}883bxilgU~&FD$&Y|O8dDA zMb3!{);i0;K}Tz3`ar6RkxNDs{LCKYKV{O>)+6-W;X9V^)3$Z6GOmTY9Yx|wfzRjj{8NC)0WotoU z`J5hRn4$=ojG!H`&4euFMW(nFIcp}T`NCIwiJ_zpwEnyqX6*F{)87ZCyYLama?u>e zU^l$jGomkt*TA(SjXqHTls*J8#g(R(EaRy}c(R!sp!%R-4T8>>EOP`ATd=vANZ49G zh*8GwjkXyBHGZUzF_MAkE@B=_9=xA7wq=xP_bh1=hwCAMqiU_mJb&=3)RJ-@P3f${p8G^;7tN!Fv{com zJu1chJH`Sr+3-?>*()zu7TH6A;HKqg^t_J4&Y4Zv{ZerU=mb`5EPG8lqSY*1&#L3d z86BdIFcsn`0ltZwCodcibf4z4w1umBWB-|zuHXl=?CBMUx^SiwgyYh@56~~=!`_1~ zNGc9@G(DlEtZ1d|?$Q9;1-M_+rG+vlly!bW6ey8Pxp({Z{*zU?SL}7cP`6%>TAs&5 zp0nBXg&eHs)0qp)X}1ye=w$zWP`~|sQ2&w(ikUdenVLEQBhUX3W&O37N>o&kTa`!QEk0k(*E@c4UZ$?lUpAOzB>L!Ys;39HJCpdd|kzV>1|+|!PFY7MHoR1DFO==25h zWWwMqsWU%K%t!1@#AwOQvJfVz8iK9D^H=n$n{q@zH%w!f8)f*nZ=n~HW4>oCp|N%w zeU&8N;N!sv$4S12EaOBVLEdaOq-ULdt7tUMv@jZbs5ldU&AhQ-#?)c~Gwh71`060n zoi8LLB8zm3#L}KPlCSNeH?}YtbuV)ld1Q!0anf_pGrSTHt8&!gdCABms2u8Ph!Tr@ z5FenUvwinmJn>D%CBylKA5T>@u)d;qz&QAvVgqE*#4$)}_#$1+>L}bZvVcxmt;vHn zF?rNZ&n$ao!nPs@`cA@EpS;08AVHeh?MqNK?2o#YEf$F0p5@qsCze1J24R9RYjQ9^0P~OC~{$C4b^3FoO+BS z6gym?1M-P|HMaG42jd_oz}@@3Ghk{LkW{{W0XJYy%zh)m*68S}kvQ@|8xngV`!xd3IsPu!zb{_@)Dr&Z zvSkBQ4q2F*I2)P&-940aOd{OwBB|$LtIMEP0!OGluutU;%``M4Xv48N8>=s=lj$1FYND>c#J75s3BSE*PuPq zYIo_C{Nq%mdqt4N4S(=HIgWt5&-@`+n};VXAqU+`C68vb+^IkoL`%I9wumXrWu(4) zRy%8_p)rMwy_`hoKfQY`1_k!ng!gS!%SfCxkNHQwe@@8FY}V}20uZcCbi0qap~cUl z)*{2;PDIZM=u`nw?G_F69m;Alxch8f9YJ)V5KHEU&{@1B>iZn)JnW8nnT8W90Oli~ z`;_7s9KfG%#+bZ4Tv>as07YTkBYNc$ZR!~cSl#cK$Sa9qN`Go;r_w5EOMkyA)s;^Y5yTQ;MOwAXDP+)`_La*4FN`@E zo5wPnX2_Zmf@p|?r}B(^U$@~aFcd26o0^?QOIRKh_8yq7f&;PO>La=})^|--PaV1f zbn=U_C=*1RG8e!OQ*+Wb&|i?F|GL|iDJcsYB<|MH(lBJ7x6YcQbjoLrCIE?84fPSD zLWDwu9pAnZd_tTfu&mJOs!M7d=$|49cN#mHcjU-8c<+X4Q5m&4595+!u40R_dLfTEcBI*Z@`z2Xx(g&>wr@23Mjk>=FiXxYWSdSEp z6jsJqZ-3TwmZQCf4X~yo|L0R@0Y z|G``awMnil=NI{k`dOp4au~#y=R0wDyfT#~v`@AzgnHX}ED+r38kLjwzJ_=T@n+gw z*+}_nZa$a!usP>+#ql_K)y3!cS-sx~U?}9$NVga$PyYjJ^eVK4;68WyE^bO_lD=j) z#2Z<08Q!2p}0(>tK zpvUvufT&v+$vvqZCaP?_!{hHFY+QB>!76bQ04{rrdAN>g3U|m>FKSsV^Qr#$*T`MU zKJnIgS2V}gy$*{DC_Y_Rdx}c?Oatnh@i`q#h}T}PBHqvkG!HX_0PnTc5*xYZ^_tJ^RZ#~`hDn*Hr%GJpsD zx|(i!CEl^mh~dI3R1U&lQf_EwWar7tmjv-d0-O^Sm9R#>K;X-uqv6d))kyI65#1ZC zpObFH$7^77-u*Zei>`YnP7CxvwSJ|d^_J~KNEaEtMRsPqt3G9rB$e5P<%W9CJ(~7A zR{nxp;@daOFesP5g?Q7bbZoT#6rhzoxt-;oo~tGy&X+lGy6zUK?3ZJ`{3^SBHRJTd zFh~%5L0A&ejX~^LJK!Y$(HrUUIDshra!(*2&njK=<4=t1J5Fi$9}i@u|5?01ueyIt z!hfN~!g8`Af882b8|96am~G2h;-YYOz_c(!^Ih#TuH zS~k7w7puvDZZt`ju}oBOB3+Tktc4(rs5RDJ#9WA+SvytxE{R70ck`4jM5rgKSMTgT z*ydBA@;F5Vk7(zMQa>fSGw37P4|Gw&3!Qi~U*;uS(TNv5x|UQ4h~_zba#i{JQWvtv z2No!e2VR+{cj$=udxQ%b&W#1izSHibe&+am@aLZqf?ZK30Tf8M{6l2(-`(*)uGxQ6 zD*wL??Tsy*fc?|f#K@VL>EGm|zr?hl{v>0RU+JNTWk3NrK@n=U(4Uzx5aARL_Dq?& zdl?f}Bf#Io{BjGwNR{2sQ1Xq|nI**Ms5;}jx_Y+BW%F*-Z~1w@gXkl1eBq%3UD_>g z>ccdACNi*Tsl!lT$9<%pW5Corq#igt|O!PzhjH8zgRSmU`|Evg^TnM&FfaZUBF zWRSj0UwbueQx=g}5+p?`iZ$f=8kTpVp$KXF9A*JW^Cc};1TlDWIwRJ9N(a!Xja#At zAO#xQL}RqY=eP3QC8oMc*CtH9R?)Z~yA$tcHx=y3W96mrLVDM4Uzd$3>T`))TACMU zI2iNHR$jA$WL>B?H8pFjE2lL!! z;7An&Q~=L*wBq_X`qYs-wbDj!L6)10q0k6qj@iGnLfm9KiEze$t;?;0vdRB$lpx#- zHgKfvE}yjZ14W<5J_F7|$<^&(dY5E@9L|aJaDg5+qtc>w`?<>Ex!fO;i!*npGa6!) zY4>Usug|$--LLBlb6ssl0k;1a+7Na1`8sd48Z`^RqR1*~jER^3dE@kOX6jp!Y4jpwCd+I(P>ljPKZg_5 zDR$O=-7aLI&7(&ICVDmx#i@&zPvu%0xj7GUpXksSNo-?SsyX&VwqD?a>9(ne(gz-l zOZc;gskcLG;38i|Bc0syP)@B3ww?tA%RDN^N9YXdFl?}QX?|JkzA(DBgq$rSOh>gd z5t5n!ISB0q%!e80Sd?e6RZ9gWZ@_ZY})@`ktx{#yYyGzCX{HZV)0BF=Bc0sfzS>Yf8o1f%W~F+V9xh$cZ2{xd zd4w%;z?8hvT%z1An|XOvh$$y$MDPd}JVO!psWHI!KSRp*4|33vZT{_?tea4XZEMYGlS%qO^H8y*xvJTy3Px&KMj@k0_#nl~E4+>*<~^Kdz_LaQ zc~eT)Dd;okQSUY=>>jR=Wy^4E^c@|H35%vDsL-*T!B=BL9p_mTSF=V0SU01|>G)aB8p_2uyPhQJf`MO;gu3OpO;_A`VCs z8iYAIUrxfM7HsUX;PY5^lU6k>qo$27VSmFGTi=6uve;)FRnb`BC2eXD8p`w)_*2JI zND~avqcNR=E3UNndMn>04$-K<7ociRSq>M=L(;UD->5&*NdRGTEn6PsvYkZ>5?M)O z^8GU0Hn&I}X8m3Gz*_3UYc$-;*MMc2A=3qOsdEGZ1gy*1hm#+WeLTeUIPlyy#+(^E zZ3zWuc?orq1rkAeF;#xTTLSlZknQ4GxxN{fU>lzA$g{yh+N;I*t7Fr_wqKe0oJs95 z>4t3zz`6X+RiFIJLohaCB4yiUrc=YB=mBR6#+kbt-*|`??V55_HRV%#fKw>2r1i zp*sHzNItkJ4j;Qz+cbZgJ|66Cxv&_9iRkk_P~g3{!AvM^AexZ6VSTyLew5*L_5St? zI7|_Y(-TL?Zetm)Hh>99ZPs?=5Uv|)s4wQ2*+(!j@57EY$oNI((rz^Of1TnYx#hir zTH0TXx96FwMK>eGTy_F?fOXy`;n+V!kDpxjlJjvICS1`_-d_g$WArGf-0>w z4xVVTLwju^A;oi=pOZ@P;F_`{*j8OL$kuA>;Kc%+tuhk;BT+CtYTwZ|WltR;-LH_G z+p2|fb;t&p&`Ki-y^1ONk$Su3GR%*yI6Wet9dVBiNaBTx5}12QGgBd6mK6bteqQ??iISw&@K_ zDliJ~&d1XvvQffVBEMEMc^mId1eG%qaqnVxMje{NOs3pgckmB&1XK^{vV`GdO02>c z0Jdp$aAP;(I~qI5nMguQC)Oy^U#%p9b7_7Tu}UB*;?2LEM%uuz**Q!h6DyQKS7GQP z9X1sdZBuM;LJ9U=!!kIJUKXd9XgKC^$To3eK~YvJ-jig%2=5|ex%FO|rYf&eTqGD* z=WC3O!@uzlbPprlZZ+X!>=?H z-y_j@$xj$Mk&85K5Xbz^pNRx^I4b;m?h`O{a&|N@a{fyet!(aSVqpBQ>zV4S)2b>OubN2YZkBg7YJ*qQFAMqD||VA%4SoEwe`O^Z9;Z* z9WEa;ou=HPFAtWs-hbhL5*Ld4UTw_*r2CGVpMnXRlrK(S)NOiHyTxy$EzU#UyZkEB zt*?alpXueg=tyFOqf+5ol2)>FRXW(TO!~CdaO#5V5ErmeTHKJN%C)3quI-sj*&+pr z)ZVmMo2e>LZ*>G#RIS6=G$Xx0U4Q7z*=A9!JjQULayIL1Ab#gIh5LFw$p3vVZP07| z_WZZ^L=$bKrldo0h6#(ygfQJ6(+!_&PrjzvF^f9Aw9i~?$!R8TrDS2Mxw#$o@Z5z| z`J6b#Le<8MB~PjABv+g3VQ>y4wL2Kbr?76w!IunJLi=u|^>6;m6It)Vpol-G#W)NQ zgU*T}{hz05uT=QK@b8rl$t$ho6hO|Uy<=#jp6pLxKyJFuXJC-UuTEJd*_^~XqThWo0*^@Pe+%yTt6-KGY=U_u9I7IYs=|cB{nQg+PCTc{~gT#2% zzmuG^EGc%{vX^C_-BBzyF9nsk%vheXE~=Gmu%j3}uYPXo>y8rr0A+4li?+Zr-B`Hz zI@A`e(B+TLYzJQNVVeTz2(WfLl9{L|mRxX4sX|v3kuQBrzJ^Av*W?7euDQ%wsUzJN zur}HzcL%1owK*?^F06eAnaC0U_LjPMD+u)%>I>cB=3smH@%E;mXWP?+pevNR=oGc6 z6s$uDc->ch`1GhzBqp@N4>bsL$q;vXY-b^$#70$Q%7u$dvR}5f=9yd*!5x>(oC=go z7Mk2jRxLC`;Oh$4abcfHK@8Mt2;rmMi6q@Xt~1eKPWV2W{$^**t&#rtsoupdi2%K? z!8HU`ao-k$T2??eKctv`fXRUN^=@QT+Q!YD8DDvYJg(&)<;Gh_mv&#O z8WZV=SyUy!I4D7!g^izms-e3AF_+i7DaNDWo=0Gp-L&prSiH+P^#o2<;x%S=apZbs z4r#2vS6f&L&%`?eCzIH6*9Lq~J%22KZ!(r3HB-Ns2;$vn{c%(uzu5CIY+EN`o_7+X*xMStk7EmxgQHh)lLdL0X5z6~Ih$8eLuMt9`)up=p$Rxs?7gFRci+eK7Jht< zONAKB`Pp_}a;5~)Q>(rJS>ef0&B8`^h{&X_MCq=eP8c3n|fam78Bo3b^&OR^X=Z|yiA>gv(S z^lE6i2(46A6ZHxR^{K4%5BObjK1!(HWEF&Q9-?g9_j^lLdLpp41Wdnq@zq>t+h-)MppSh z#u}z2RjvR`b`cR@?seyo!#0(4-G%tue2F&LZVuOHxEZss>HPiea>JA5LUom&@V^Pu z5xNbI4P*vyMxujH{^mdCh|b6Ni`tR4KScTnn&!Lq=l>+O$%5vvzum(#o!dSwvkrkb z_{fjH8JzT$avdgWGq_WfeAD2Of9O)M*&ZiEjK};-*!&Cq4WjV_v|$)ksGfi$zMr25 zxSVgN34}cCcb^}}pAIT=ewAy{>D_}o5K!H5ExL@evGcZzO#L#1pH9KUnls_~=)WZZ z;U7}Bo4Nhf;n4YpyT>UDlqI-3AZT_5tZ!ZAtg)9hcb+Yy%%_Ps^WMNz_MGr@9;?|NO})y9wEa%L-Hntu}_WHk2f#o03GqcM+~{ z`{=7#Qr}L)lC9Vi&t>n3-=C(v3j)6!?19UpqJQ{J{x`|#KXT50yLO3MSewWI;Y$NE z6D1R%DbT;oJ^mhx{<2*F4W4a-MCm7!Z?x2!2lv+U{v>FqghFF@zvhg&=6?da5|Y;? znV&Z!%$Ek7iQHV%A=AFT=DL|3t}`92dmSBB1BW7Mqi6yxwz#V>l2Wj~P`EKdNOz@q z5qE~`mR(q%`mA_OKr?1rL?H`cK0t%)N&lPo$U@pRsKtH4>m!DG;?6p$U+u_-YwzCW z0X0es>x)PM43ot@$FT@!AZ#6M2RL$YuNg-5g_yj-@s5_(Da!~=k|p*Vi5u#YOf%k# zBHPK6HTBxBNX1?liPW`d!A|$ZK<5){>u~}2QfdJGIK6y=CU@v6+)ni> zHGm?5ApaK5U*+0!SG6{b~(i^M=qDC|-(61WdbLf9OxD)WDk47+)@uId5cIf)vd5+EFan z+m$VJ;E13?ONG{A)F;32daj}<^dDV-t*v&za8>czyz({Wg{9@u1Y!SOPwS>U&VRs?Ji@IyPFm@itb~ z(5aYFHnnSL%9ZI)y|50sZM1{W8k`fvwtjySiXipSQ_p|{81z5xg8waw=KFg9`~MRs z{e6spv_Sz|W8lg9YftO+Z@8gS70L};4fUNgjwhaTVwY4_c~z|CMJokdYhsvc`yjYt0MG|LI*>R`RM|AMRJqk^D(<^1pBAW z$bHSG414xtoD-f7x5w9w*Gs7n-v^FQ?LfbQnMfVSy4@NBXz+AE1jhz4C^B-@_4*GM zSL5XL1sNRycfUnu>OQZCfCZZiB51)4*JzwG?H!CZ4nmxzn#!Gd0&vOd03W57{vTP? z9)J_lWGEYp8Je{;at#`f9oz;-%pDb5En5Y|#c4>E0LVP%t9#{U3&=66l}df9RoB~* z`3VX+n)*NLR+tRHDr2GL_3E>!U7uf8~8J89c_CB5@y|QrC zJxP#NLvYZGMQ~F0$@<4-O%EcvaFN+xHDaXgy0P<@13QRI7I4fJZ%o12s6R^#k^{5mgTuq& z3H@eWI|?N!bLVRCn?-98DiY*pT)E+zCHx+Dwk^qH1O@iqB{6NPke`()7kKk_zIx^z z_Zz^S%@7B^sopXe=u#HvvW0FT!dV4So*1DXqEnVgNiV}AW0#Y4vMu=jRO|cnrSL5u zx{11Ku9m2o%e9o9xO_Wyr-hOcYtb-Mneit#bK|f4U3d^CI&g6+F%yhU00VfX3v4Nb z=gGrbK1MGWiZ(W}%QPYfgTVL}34ylLVV_pawrwQUGM=`H&75&jSgdjGzC?yvfmom=eSQE3PPgqz3OFq(@)&KqnFEc~>t z48cZ)C~0<#QSfAIZ@IZ@#QQoFYr8E>ONNoB8j>2a%ACe~+pjOl`(CnwDNDZ_S*lk_ z1IOsBu#y0hl+lux?j%Fz|teIZHW3;gU&O{lbymN12wzH;>7H}oV-a#Q{}FMsbWeVfBDKV zZ`73VHARMky$t`RomRljGxbfnuJdll!5yMEyxzB_ejbtyXEn%=6ad}M*z8sHb&uyS zUXh9e7p6T1c%C^81=!WIrIe{0H$)yiwVdbUoJK-AY_odnjEEqxBjzLQr=tGh=BK+n2q?HdAoWA8XH=N5Lw72n7#J8ELO1_;;gv*?^jOQmnLLh zI76l#6T@D;CpeQ#&YJ}SLtJwoch=vs)ilZl9*jc{@SECDx2Oiae%^*N7oYulo75M+ z-qA1PD;wR!fA`xx7g}19-Gfcu=KoQ^W z49_-SCeFw2iSP~rrqQj_>tUht8RY<8m4q1OslFm2^9_w5{tVKau1}C6%>l>0?2;V$ zrEAh(rWF5csN}^c0VlF)VG{?(jYn+CGejzBE5fb($p)n>;i7{8S-Q%~tD?f9eSVSb zr$oAq&spNX=#*9QGA-%#UzT#Z<%+V zK~)q_5bWTin5j(O3DynoZAG`)2u)p%&ev*_7Vo2%gDk?B<k=bx+>BwhT6dy#voYQ_4GQl~MKVo1W^&>LDNltcZsVCwNJm>B|#<`Ud z#RO+WX=Ubk3o9*8K&PM3!32i5bz*W$H;Ga1*V<#^#}?pwCPq2KNaNG&F~s{R`cCbI zO>fk@@1DbUV!*gCM|MJ9M636iRCqvp8Nr!&|AKX$*c+KCqmTnHNICh!1jB5;W*(>QBvh1>jq^Hg>8#9uKAd+&)L%czsl zdz^L`X(m<-sB#k9=N`@IlplGv+1DCSkveY)8Z&uw{?g@b+H)(a=)YIiP{5H>uGza$ zeQmW#b-&AoY5`G_{*cE~YlIdUz8YkJ0W#(W+STxD_;PliHl%kkV zzF%GL-MoK5zk!tn6mxF?`{hX9H{lC&JQ`DV(|oPp{$o5jNvV*`?)?_KFqZ-9;@VDBfv{ zM?z_DAb|JeD+`UE6=5=~j~UHVULIMG8Tfo%J`ncbNdp%h%(4dbpwpR*vpp|Ey6uz;Ia9CT`moZefP=q{zj%=b3q%Im}bsE#%1>h3IpCO?6AC5d6tOp34pf zuXu>^;@a_=H#>N64#2*%AxySGF^a<8Zx21Ra`m~`Arins2^xyFY0FphqOJ$`Y=qDw z)RiSYUP+XpqHs8Ek`@l^=+?au7K~Gc3F=*%rKZ*cn5CHkckVlqA@CxeRK?Y^mRyBZ z8o&wM0xlLJ>Ep)1Cy!fN&-#z7U1zJ}0e)U>t_0clLbJh@YB zVjsHLLHjS4m;>w2#LjQNdl*OYsgbqvC#$WbrA1 z!gWM*f>xu=KYMidddDdf`rXKP2x;`%tei52bDuoy3iv$z8<1cr>g7nEzZoq`# z-i2Mz{Tj?D4HU;+L{G`Dsu+RjMFzur?8D%ONOt0#Gt?ZxdKJGN|7fr0PI&c_9NJMF z$yesAjWsfRUCNv?ozXJGdIBZO;)tP9J}c?2Gsqv5XEgvExSF~5ZrQW#I_`Rg!5_uxTs=JMeJ}c+PLu3T~LEpcKKIQS3Gukk*qHkCD`UZW^y>tHe&} z?xx4nU1?$AzyPjh!pwQBjkJ9ey@4jaoprH2Iup&|Ju73(6~p=)e^?1V(DdR*caiQw z9Csp!BsSi?*=+fET-tp79Jymv`e{1#2}!g`e!JOLMu=@JmQQ;+Y^a)FI&4>`euCJh zKG`s(1bVgD0Vren@CUNivj12?^$0*6VgW1i=^qvO-`s_7e^=z+%)UQWQ9^buw$A^t zX7ENiK>hG*Wlfbv`UX|=EoD1kgwzZaxcJz)V%>XbjV1K_d-&1xknif{{mRGg zAuRc-1D`*_99h0?SAr79k>^s^F7>*X#n%fJPHptNvM1v1# z8rjzvl6ZxU>&QTzN{|67&^UW)>nh~o>hdc@a`OlXiKCzEb{z85@(rq;g@OWc0Z2dM zvD;an+qm93t1de$&)lp}mI>&`y7byL?}QuFhv!VNcGN?qCzrSNwwexLweuSevFZ=g z^e0%VTwY%;FQakSz%r6n3oxVIVR;Jh@2!EbJey%lt^;{l`bDT@h zmtE1}d^k&g-wh!)we!cP1ENU`o!0=*aTI%vrsbHRh|efFVbOl@WzR_pvjvUlM zlN~Zt0YJvCg*|aX&BKB~Bf!$m7ZR436>h%}BF$WL}kE?2(t zl|Z;5>EM)*1zfBjo*kkbeTtj>!kp9{RY+;7h3E-3U)%85dJhV+;Icwt__8QZNIKCJ z&XF?4sRqfse4(c&Xh!?1buiT+^*jZI2bEIY)3QF^yC?)lG}prXb7pC*L3?hH^0{ktYk7jEVg@`nm*Yo`rEcMJ{(7AQ5HZSR-K1t5hEmIi?0+Uu#kx zG3kSHnKavPyMWaHc8wt)_5PU)Ry`yGB1KIK)*>TL>z*f(;KqmA4KQX$numk1Rkeq~ zA5Jvtyqp#?7#(fgj|*or;`O>i5#T}l5uBic@aw%al?Z(c&|E*DRSL~B;7*ol zX1d;6;*PG>4t`aX4yXcO3xfu^a7j2?EowB|QoPaqDVIC{Gkb(7J0!M`S=)FQ=jmJK zEtC?|cz;#RO7`CIa^-H{Q)mFfPX0K5>7@}!xl@b)Ee%@Ch18%n>kp?!cKc%MOr`A& z+e;W|-rQ|SUWs2H^`d-jyny$-8-af*=c86-LHe?{&d&m}@?EF=sAs~&qr;T}%Vn5tQV2bzY6 zQ=ALBm3%Z_PR+U|3k-7*u29VuS;QxI|H49lKQts&)ri5e`)XvTFl9h zbo9`)cPKYw(RbZ|t{r@{d%7_{hCc=!nM=&-b&MGQD{a)I6)=e+5dm)eL;aL7j_8lq-C z?t<)EUkD#GCS9`hH_)CPku0D`oQPt7*me&+0w|QNl(DDR6hBc)q=EbN^t;AmO5Me` zTz}+-Xby|q+oW@1KiQQ^TA=z;6?;}OubJAW z7i_6{_rwO}!2{uBp)=K0HQg54fSw20@Yz{Kg6hL5GS3+7)WM@R%XUicGurfMtl%S$ zx_cy8ndXZYu^lamIqIa0!0X5yx276$L(?|kjZ6OO@)zHa&NA0_Im3~!r|Y+iQ9>m= z{zW=)2y-SIwSW-0ge*s9Ba_up>)RidC<V;_SPj|a+n5gI5Zv;6$VFHV!2|v#=tJ&MG4>08 z^fW?-&2fMqSRK#?I5lJ({JyytEp0rgst8K|OI4FU9`GZ*RxelPMCx9VRpQ(qNr8 zl1mj;;%1MUvn;|^%%O3QegYUyJG*vFT|tZ0Go$omjW;_5uZ)f>3f^(YVYWkm$qSvh zPe9=nA-An-c0-ScF_>H+{(z4^s3y0w22w7WF%aarUM>!o?_Rf58oAwGV1Obg@oZst zLY{S^vwBQjS=4j1l5dXK9;C+OpIz&(=P{OffUw)zmvTkz_h|}l?-5};M%VMVhodTP zk6uh>H(%z#X87h%&ZVO&cld37iIXbq-a%gpChD;aNs;2d0;@V%v9a)eHXdql$K)-d zg7>P{As(t^b-lgtkLd6^U9*+Y6bH%i(UAIc=C}impTw+6qqv4l)fB56k($?$MPIMd zLsh-In~jJR&X72BS$7;Yt9#MTL+~A}3u(^#Wr(*TFIu8ZGl-V`+oyx~jt70_m+?uS z9kc<(Clh9+YaRT=zKCkOB=-@7+SEA9M3X|N;Xl>SxP^3(~rNk<#lT7UtV5lOMLu zf+v(GvnoMj;r;~!1uQCUhGFx*zGaRt;t&YMCelP^0z!Krh6+XgLCiJ1AVqLL3gOg& zIBXc`L^4uw`S(l86AB(-waCX7G2zTtTY+s;d9Y6vCnbYlnpW^yvlv|6(lvO>8s#e@_+vL#VB0VQp+=;ApJm3}oKe zIQ@&{`JWBuUmVXr#}W}&6X00#FD_i6^8Z8GJB3-IW!t(L%&=|S%5Y@Zwr$(CZDrWD zZQHhOzwmxetnG5)&2(lhaTgzkeaHZ42)t^&T*!Z;(?otU^pj* zBF(O!@XIepMyavR(P>G*JCRff@dMzKY%kq36175 zuRx35AS^KdCofth_zaR6LxSuqogBmum<5~tTOY>zh*n?P50?mhO)?q=0}}pdjV58f z7fux>c=wxuwQvFRo-6`Mk%fdP#;z^%E*P77lppn0emOUFYc;&_R zV-#63E=7b)YMqb4XMW5l;PM_OWyiz^`mM^&1bD zh@%k&ecQ|`)Qgd9F!syR8EWl7LHTqcnxx{@J47&3~ zWPXSJmhA5((iFw4%js>DF+aq3&_3=}OUaQPnH{qMB3H9$JwM*PT%wd<;BRH}ERj02 z27Nu5G}_V`qqM)AQ=eW`9!`(N@`%*@a9_0bF*rno*^o}&B+yn5z5^&g(uxD0$+wQe zi`@*)y7yYo`$@X$3llf*+bVX(`p~y7Kq@c9s70b|$u7iD!JjDA$=`X8bHr3NPy3jB z!ucx)?3$AXZIex-pLjMJ?TnH{*i%dv(8brTmpO; z{#OfN@4xC%|94ZA|F7ogKRQiH?#dX-aM^8VfMa>KymZf60$+ZMbY@6-01@C~Ok@jb zXlo$K8iATwB&O0`vcE+pO(o|t=3u8`7v@^V%;fwAPj#<0S{S=hUnbVwpZ?^ee#@01 z)!CRyYw&9?OfUe9QYcZi9&R{so5&ak}-O8>qHMWDU@I=>Sl$#bK zZkd*0VwSUF$@dVA;*32dl`as=_<2|+CY~yPq7jxr)PS}!nw=}hnb#JDR!Jzjq3?md z^Nd|kEEZ`54fOb(zbW(=WdU~1V`fC|X!?YmR^>r4D8Q3<2O4-dNABTWy3{yukR*JMB#~TEIH~G;v@?k1X^-&l93cmsm}5*R(AlX z)OEp?rXyu{e}NzrlPFTuh8?K3=6H4ZcyY9%_5!Gg}l1*%}#CQAKpxv-#?4dwRU-D zHl>o=Diup`%Wc4T2z-}bC&-M41+#o!RLVx=b^;nB;1AX07ZUT>SK)_Q$hzq_u`6h% z$Q;)hKYr{B=v)RRR9W6m(*Q3KRzohU9&?u`V1F-MhT4OAejJJmx7}pZ$ zf?Xk(#?g6et!H6v>7~+SsfVz`>_QLBA>I?K46+!~CTR<}j~nES+CXjbj6sSbJ=y8$ zt-Am2pAZGqJ-juP7m_HqE8p`q_jq+JC%+|PX%)+`lClXK8OT!@-%KZBA^mZXnSMKY z6kO5LQTtCm45nuYrpCXw*@@1X%36vY z5hY6(3QRT1n99$zn;|n? ziN10bZ7e{D{^r5Axtm;7nPEqlt%|O2?=4rAC|G6dn&?Ec{M^;;a%+ce4hi0M#7f^7 zVWj-HIbCN~X-35x!K-E&343$l2Ac8CL}Hlx5PdVyL#VyxHi%Y%IUcFR?&MTKo0FIp z_nZs6zUF}fk#JpRdEn|PUu22Vl;h6K-iPs{eTo&~EMUX7(x||ym%bkbxr5OzMEdEP zoaF(M9ui_{7N>~osak6X>*%5Jz85fc8_cCi z{l`V6oN_fIVreNe(Q3KN^YUCN#gZIxiB^$u?6jFs2RN;>E3!2Lrj-LA^E=zg58x=0 z_eDJ)AV{!#v!NCcV*h47oBiG{-#u!n>$Y#%B$~_CXrBJo&cD{^Ttf)`^SC?!CO>($ zFF~L>79O}&qjq_GiVD8>(E0Yl9zLWP+*%g={Gru@{wBDfgV&&GGw z#&_;(dd7DR#p+ubm%N#5%!}G$&}yH*7L^r-+NDBz0KELYI*7f{SGPQ^HF#dkbPb>M zxYfsRd<(d!mUK2b{(af#8^|OUH;qnH2Tc5Pzx!l&8dG&^-<2-UJNzr$^~=?^i?gBO z!risr%A4aDDkvf*Sq>#lJ!i@@SnwNbEcee?UuWfj`iNi|$LFsFM&j86+Pnw=_*@5mJFG(B_KpWuIy4}hI-1@OOKnODgFnHv6Q!`a~P)bRgG z-}9H%{9ivM`qRAXe@TJEhEQ$#tHibSR5*IV3wWVY{gto zCYv^)Z^O}#J$q7mhv8UXgjNWdK1P@#*h@p_xPncN6Wket zuVY#kYLAax8hNuJfT}*xI37YwUWPu0?*}M3ye&fQNVN)Q|B0!7IGKWYL!E3X2HIfA zA-!35x2tCQ*nD$$xGw16OxXsacbGH`!*m{is)nu##iG=O&O<)I*-c7_XqgP?WK{Jf z%ll7T4i#`iB3&B)GB$~`xB|bVJ;rvnkM7?X=%<#e_q{*Kf=U=e^oN%?XR9YoJ4=>D zIoeKdz6xW_tOFyF$d?@E>UwF@!W760qe*qT3PAN4}@A`B9fRP`oZkxoptt;Q%?h6p^bBug9(bsIf|+=J7dz-6(j-0l1`k%suDJ5v z{Gr_Az^wvD4sjES#+^Fl9zN!2;zRK7P!<05!emIYVabk9|aLIxDqmu*o1cL49U94I+!zN1P^2| za3Y-NZ`=^{o!4;qe#)EZS2$DSY>YhKp1%RMzflLR;zcyjwlcs$r30~P1Lq=^l4U`9 z^4z+1qn8GI;UzLjqeDzA#^fx64*_?PXO=sFa?xhDNr}`N)SD+S zc*1ogVIm{^U^cs2d$^!MhAkBqBD?JIRDf;3Vvg6Yf^AmLX1rK?*b!ll^fOsOJJRRF zaRfo5<+4hZEH-q|t*(@3)rz`kc_vw1y4c}~3OXC=W}U17)05;6rB3|FN6O{enovFH zWZS?1CVBL)Iu83KJML?r^G8JFbixbI0zH!(KJt(KdB)tVJ6h;koF#2C9xy_LCzTXa zR-Q3}K&$=kOe~ewCuG#$L9@r(axk6}PSk;3eNJ(9mf&vwj}=fpWeVTq zuH4dcQuG#0E((vN#7tnDY=he|Hq_7qxBeIq78-=J;e5qm!+#wRGX3Aj;a{+Xf5cy; z{9o~xPGXA%Wyx;>4!0^NKP$7FP$p0&23L%P6eg4Q6@Zp|i>kt%c-v*oprPNneWLls z*-_+>O==D_-4CC~$5U6byE=b=s}2>0dTB&`XPq$b!nvxi~7Ui!hj-5q#p$+A#>Xz1x$r?TZ2nva* z&YGi;gU+3DJ@RJR6_+X)8{{(8IneJ!X~8_Lg4<&2`A>DZWX1a=fyzoVnAdY7`g{@T z=B1}}O6%q`%TEMzu0>B6YmLe3IY%qTO!JF9MPV5)M!+V&{8Z*+gJ}g>uV0}K$qw@{E{H)~uU3!GIIhQjX`NzOTDh#m!v%L`6u2(Qyhu5Hr_1L_nBjnM}o4 zgPPtN>6v4kj-I|6e_f$nQ)S?y?}n?XuVw5)ye%a{AkmT0rkQU*no_@7PEnDD$RzW$ z5pNR7@6ndHcLsyIY^r)Cra2Ubp)Oud2bGb^wq#__OAwzq>S)Ys%RUASWlI{;@08cw3YvPe=__XxBoNG_!kQ8@5SDK zi&{@PS?f7ExX<-MNqIm+L#RPCO1NMA#k$eOhzJxC7?gMl$3WIrhn8B5ZFU)==gPh@ zKX-j`yQBFu*4q{0)5tohreel7Rd_k>%4M+i> z8$-yl3mUYF6C=J54>EIhAK!+)$M2~!?iI<33LME-%6`&jtZssuDilmgVVMdHMU@wV zS60J5pb(SG3^f@NCKSN-^7$qigpoZlTh*W6#>IJX#3|Ln9wode;<#?Sj5bYf$APLQ zt=S7Bi#Yn|)&W{cUmqkF`t-6QkRt7|%S3JPO2>vAdJzm}+aOB32KBHZr(VRN(c7*K zwq=JMDp^_1iFi{cY6LBPu2qR3PVI83Pu)+^PGYsg@-UJ$9FfyRbyW}*GwzmePYKm` zC?^DUkwe0#FQt!HQ?R>g!ri!tnK-`oAhnF>DRQA6H!2! z+tG$2GG}nfblbr$Ht8_{0VhF~Xj_j)ulFBUVDt0T_p>jygYdsyf&aO+Bl~wK{Uy^? zGPE-_a{Z@RSI$sR*Fx7y-|$}zODA0m(?47;{`r-um9g{}eoxoo%X;RY?{lR5jMSen znZN=^QO)Lq!g3Hd>LXj;@GAWC$(fNy~|j{hUC;t1IZ_=gaa>u>?B5ai8sODpVL0)MW z>2%rh7F=s02E#_P9W5B~!K|7=9W@ed5grA2W*72Z*f4=sRCF*%!=RVnl0Yw-<&+yY zW@Qa3&zri*gL1>mYZpp$f(KaOZ7ew3`2L#la>r)Fc_IQ#+(4?44nF?;`jvr-ZkVbF zePmHOljGPO$?Z2udO>!1_oM|UR-lKNQ${>Hv$CyOEM72oIF33<#$CFvAK*r}bwkaH zpAmq8`mDI*f`#dzrYj^`h|iO78ox}OhDL{)-5V2*`>>N}X4wdl><El>dcyt5;w)&`9u|f}k-MiMXVsN)UlZt4f`<`7+KOKS8@d zHhiWZcRQM4QtJwcjAfI!$(?a(lKZQ|n$q6#`liy)&Rg!X6OokkGB6Kz@~1B zbY0b>58A!Z^wJMB%HC|3kM+BP2`3g#g{8*S{s=VL8P`L|&Nq3;&Z@ne(s z$D#F*^~o%tycWMgu&do#!_qPEv)~4Q^6K0|Jvku^k?dy>pNXfV^)jrH$&}a~rMw%+GJgrNedtW-ux=Ksxf)9~t2Z0V3U6*YLd*Nsts-8W|(&`Z2 zK$sfsR%Xa|k1Ieg5zB2bl)I8Y$X!G$b~H9TPOZi|iRGhh?jkIKFLW1}`*{=eJHL-E z_AQu%9mjW2rV6ZQ!|@M5iD^+Q`$Dg6eKZmwv^mozxLj~f1h+)#NN%ijiB~SOP^Z@` z!re56zuRtT{~uM@rLY13;co;%o%+Hq{rwO6LAhl16dDhqR{t~Y*rIHhLvA=9BgH`Q z9^&>KxTZWfQ?*3LU(>!fKN8})`G7SG)?Q5|Q4TxtG+75V2a2zdJ^?(3Muf=L3BACa zpK2sNRYFekoQdmspzf%5=(7UIgj#~pKmW8&d^yRR>G(P)-~X)?`_KEI^6&fqZ^gL2 zzM+l77jxwAEW+-K)4=>!<>mPWAo*hSdtIVML`fNJf0O-*U5lFU_v={39}JvTmJ#n} zPqW3su;7c}3;JsQ4!#RXlvcbY@h&?!XT>`+L2}W-b;MRR#m3|1^$Ms13X7f#qAFjg z(qPd8M=MfC(C#PWm{X`3zxGGJp`Lo^7KhFue|8OO$FPNe`>s+3rKu^`gF-#WM#6o1Vu6 zmp|9B80?iCUp-08i;u9xL8~;br)-@H(!bx{U5H!e&_UxTK^OoO15_h`Ae}@BWI_Hc zz27F;*V1@j@9%IEvDx2YXs_z?7(FRb_MOhq%4DfN?EdT*s&Bn6M6d&sD6(ih%(>R) z`j+v!tXrT0hLYQ@K}00r=ZVzGy0y^M-Zf$?fv1d2!WFkL3&x(2`NeA)B*C*VWI0B( zEJx-<3sGL#V>4S)Kg=3GG}K@w#z%8tbHF2Xgo1r-km{NT=$snc3v$oMt=!6(hwf+U z;Kzk+*Myx=F#I>$c2gQ1sEh6uT)zI_JE=wXOZ@R)Rcui*V=T^^ywIUP+EKs6|5`s- zo24lWVx@z=gXu13TdUPI^Vm5Ai7xSKL!gQDU1!rsx<_Z_ZEqgQFV9QEHTqAg90KhQ zp^Zvm@72QQ4ctu}18;7Fq(-nvW@V%q6m{ypqbYCUKO_O?Fp|vOV_ScV6(R}2;zyD9 z{(xucqH6SMOI($c9QcSXOymI=WY8mRmP7k&;a;G*Rh=IiwamfPKWBbS zWc6YP0%AiUkeV-sSQCikm%FapY>hJhq z*5q?KeBi#v(L*$hz8JT38#kQYEbwVn(Xt;n&QDN3%!!P{&WLjD2WDnLA5cdh`cksC zWsY5sV*x2o6K#{%?}CLWEJ)4q;cUl^d)YJwJr86hFKs7IL@#%^`7eHw!#{Z94EY(D zQw`vt4~;~V=P|JF>xIu7F6Vd?GQ+X8{WN#QD6vmtCKK<9GxY2UoFdT{*XCO55{;D| z|8bg47E>l)t%|YD46wwoZ3Ien=`oP#ont_!AvpOndxpyR*H(3`Rw~E$4oQ7`@NQ_c zZzOGoRwn58Nv1p#cFQbp&QSE2u+xR*Vr`3Yqt<`q;i2nv8N^?)IQ4IN_#J3q=aXqcqz1%*02!BH?W=K&)excF62R1$~aeJ`r5f=lM&3roo zT!M$;UyGpRJPE9Wnh2a>4w4Y9Kx}8+?mP-%zWkmQ2RruT5B}(L(w)m#%?49=2Zt&5 zl?XAeFoX2BkqtT?!(S78lo3?5bm(e#6c8;_bP6ksb4>h;8xCZl!_ssS3R$eu*y?Vh zQhC0bikB#`;vKPZ7HxU@r5hNx%!_aLQuUYHAF63?d6r{ljgt%57$Q;c4ieg@d z5zp3t#1JMi))1k%#n8`0@i7SC@ZDn@60itQz!dPp^HIk1`?6R8cD$aRJ|7zOBm!GMzu0h%^V~e$y4D|@|tkxlp<21s{Jub{F>ZV zqeA@JRt=tJ4j{rQ4eSr()FqKAhKy(WSnntE9hxLl7(=%^@m_F1DNcJ){e@@9U9Kf! zXwF}3MkUxm$V_?C-iXH>iE|{;$_y|S=)FvHJhBma=*QaTEN>7Ow0lQ1T-6TV;F9;i z5?()_@|un(9+>G=JR);Ir8@i2I`$_?;M;s5p*1A^pf%iii_l=s)@_5xcb7s}n6B_D z*jME&wgqJOog|Eh^a zEp4pr9MsJJsgC|TF>?IV@9|$>{9`l=uoNF3|Q5dt=h6wtP?Syw~#(1x3*b~k24r7#%lVR`byTfA^1HO zE8>5fZrxK^eOWVGw@5Y64Rzztt()Blg`f30T59tkFr*(uoiPEDV%DE&qD6{6#n3S;R_2EpFp0PbClxRKM@sVvY4Au7JUSN%>)F8^@}0>(MW=i2+NyF?TAIJ8SYV|8Ogs1$4%3x31L`^ntJ|p zL$OlXmL1KjoqL9Bl4Fft+lO=Nq+-5=L|9`$O^3Q`iAR2EJ@yC)MN0fZ9xU3>IUHES zcI8@Gw4;wPvK}4=CHp}XXu3|U0DGP&IrSk$q0z?`=b2s`QoAiwwrkrbp$~I|WJE6w zM8!g#y)&5}H#NO*m{m%& zfoSv>pI&qn6Rx3t^DTt$FbfT?6utZ%2t4tpJWu3la-1o_1*M0iz8p-|-lgkMwPyCV z3e!NLdQm(f6#aO+ShlWNUxTGQ@m$5l#ZM7E5kJcU`X~&|M35o+4%PIxPElL;zN^vG z-6ki#n$&w*QMpJc=>B7peDRD=+77#2lgRiBq>d5*M~q+(gpvKJW4H20t~ktBxz*qK zRUD?8*lJjxkad0T$KFAA;`N>UX9Dko0>UKRvC9LAMa_*#Qw|0QZ!W);*F$ts^w-&gXP%Je%=_L_UiA6KOx#smBn%^T0f@!m`%K-me+B&N{XR`JhO&ZT&``r? z9!s@XTCTW0R5a?22EzHP4uKi{5;UgIxL&-t&Za4Umv$GwK`BhFOPrKmZSEz<_;{nQ zPqX~<-WwbImM@`>=RVwSbOTj|kb{u9TzIBZXh!ZON1HBdYv>1pgcF&PM#Zc(|GJVEMYd3=5$zy_n%uz*Bvu=kY9-?=9f>}zr1Asvk=4iyAb;e*YGdR+&?qU z0d;T>4Z{YWqcH|mE7Q3S2kRosI2zO0q*)TpxXQUgNXfKWzuk**Nrnppqm+Vr1_S7czUFk~0dsD(X*t-(k!`tsd~k*yoZ)aU+UH@?p*3 zCo7%5IqQ}CzzQw+02 zx8v+xwyEXF!I?o2Rch>zh89LnG4bFEEuX0(`C-0XU}vYM(x`{@N!Rz{mkx))5?8A;VjUM1*xXN*0&w*Q=TE0_5=~Sa`~gUfi!A@R)d= zCuF9P)w8Kp{uG8lW>~tSLZuTY#Zs&1$2njCq|9Nxa=$>5>Xax=irnB&spi3Q8lx73 zJcQ>>wC5`QgTV?MD+En2x!O36`$jll0mXPfU5GO0d5B0<+5Jdt2jz5_>uW?8kEJRL z%av5cpyn>iYwIUQ&B9S8LT@x@eiD1wGqu5d3-Zq=tmZ!1SHo#V;h4+?xP82e&L7TFlP%!|nql=SQ&o#G&Lz(LlQca1p zs!Qhgnh_D&0HCfP^n!%*Dr{pN>+Q6{3uO4&$3cVj;v0V-CM2VmDKinQmp>yGuc&IW zn^-$Y0#xFdnlUTLp{&N8)Q8NHrg6!b!iU63WkG2YatZgB9f5i&TMN2$Win&Nbd+wc z%JyI|0mG({0#^U56x@E*)#h37hqy=Aoso9lJLi<}p#juPZmR?jhLZrM6Cy+v) zS?L@t&`Fo@dWitT zb#Aut(Ws>HYvs$RMGWF%hMWB-;7p8@g6~*lEzR;BO6~nd)C{N;BEM% zTMmsz_;4E;$r+)@k|u=~$PHGL#$rz-9OLCHe5{J!-enRceBw8;Eol@?)>4vq?wRJy z(ajK?06cn-A1QGqG$hIB4seRZ^)3R?cIQs3FXB031bEc=X8twLHJ2*x1UWc)lWK7& zBk(6k*5oww46%UHJ+vz6+H&@;Oe1$ANDne#;TI5An44m9*wf1hYD|e#bWM(S!t6oW zfm_uAT@!5HhM?w5rT(%dDwEIOb(5$pkIM1f8&Ohg>`Rbca6Y?tKVpv_C&M$Nb`+@k zWPMR1%|3`})b%yVDNw&#L}g!h|70JSmKco*VYh){3Xp(DLCZX(6$#yDh z9%#{?#H0+!5{JMJ=Z{+%5L=8Rx~>+3EE6g-ZV(4SkDooRtl6Qc)3G^2-*|{9CCJZk zssS+3&G0h`NKOuI2t-35A)uS;G*il-8+J6miT4aF_wP~(x{QTHmri_Ay`hmKaqp7G zy6UvdZg{B4;hsC^wUb(aB=115461^02wC+@WINbkhP}a(kjK0tYZ%(7X<`)t#v5eH zD_T<_>;`{>sH9IRKlo1RFo&kM=eB3XaRJ6`bn?rHlRhPRYixDdTl^-&Q||`p1+W>U4wzgv_|L>)Ov6%d_Ide6g3 zn{)RNymD(_7eg@34;78~k%dC_;ySfA^L)YVO9Xp1r@mQXg;3Nc!p+2xIe&D=&_e8Ag%tr<2}JhRJx+H9yr& z@WbvRP>8QBw!TM#A6p{2uc7-|z~Y#RBxW<}T8>a(hXGnk+h~^dnuWi)0!Y=6bhX0| znS)|%@L(n@sC4*34uTa}u*_jB924b2y>$c%m-rdpO-?4AS!2D&swxbi8plnHWhy_v zrtPDYo1@H4e#fj3#e6-@_tFakbS_W*JE43?`c#!3^_y_2oeR!{7+O0xDSY$WYhlO} z1{T()BYtsB92~0|q;{YyJ?hSg>Y2en4Lw8c7%{`UR0ldeQ8JuhY*$u@tZsLec;0}9 zOXk=EvN=aG+(~k}j(JukmZVl(sJv~@XGN6X00GQeEPg-j^0#`d7aNU-AQY0#JG{IF z#7QP_QfV^_2tpP!jE89k@w8gVp7Ff=9eu^A*Z#h zS+=j>Vc)^Xle8rO>TcT@Ido7r<YQCBf2Q*aemZRH)5aSYlTX?| zN;Smj+Mv_UEmR1s$Wo|=zxqzKiFjEU$O6Zs7lLb#hz4Q!cuo-~j zd=1u=sBry}4Nu)lKyKbQ$mGlUDr2 zXZKJrErkr|!NN1b=L(rz$Gc%lmA2&Nd{iMAq;@fWd6#Tf1K`UwF2pDD;NmXjv$WY# zu0Y&x0j9FWvS*AccWm8^fDYKL?cg)Ep>*kQitnCkW^GOq@O~v*NQMqRGZi?@af?W@ zy6ffnj+XC7%H|KkU4Wn4(~`vgJ{3}rIF&;2Hq z+?`sKgCXnzCAApg(+tR~AMIA{bL@Ha&5=uzwXTjni_mBG++*QW^(9Hc?i?jdf}0RE z(IF@AG(A`515otw!Qhiu2*Z^N4Dy&J0v5T542*`2$c(?F`rXrALFgs90tPHz-t#HWA>9YdQnyU1~^kW!?D}4 zYDte|-rNbJX(W0uXe3wEGYDpJzArSPLlB<@ui+Pe7~&%$SCD-L7sylN!xb_a(QQ3( zS`qX+F7X9+g7V5M=O2p_bk(&-Q76aoE^{3xGkK8779NKp=n1e#1L zb#}p4hf<3ZCN;g`_B1cZ&q~`IhC!l2_o}bA?yBzq&LMAsnk^#3A~;Ngs!>9x#mi-{ zm+Dq2aRCP#y2#SH(D#Oq-5_Y)#MybyX+3ZO<{7vQtqVWgP;IV(U+K97Gv~}G{SIEM z=wwQtrpc3>vUq3%d$nWtE;s!RarQC?qui@lh}3q1Mp?9_xNj5weFvijEpjle98IxM z%i}>y6ROh;4Ab+r}I9L|*0c zqqk@R8ZGDvy~!o{M0OpfsoU7l&a)>R_BZLy21L`yb?3g|)Ng`zj%_LQ&5eN*({0bd zYpHJ0(7miUw#yEi>_FxPRM=+biIa@iUGsn^JdX{GC@+8B#N&0~c2r7oMl;SdyR*EFHl;zYt6*41^W%ybt^afIv%$9IN!>X!;?Oo`FBUz&s8me-sk;aJk6qv`0v%l8B+#;dx zTW1TjtbsOUTjw!t@+__zn>RQH$IdsDdLf5z8r^%&h}yB6<%67~P}{izmWYS!H!Kf# zAK*5*TRPu5wz2Wr@Ahw3n5rgStclwpEi{CwCmf402l0Z+1-~$WmTf9Dj?fYBE)ib)ao*TJG4#~|W$5oOC-8=UiV2BwYM!FCRRa%yJ{e}d_46NoaN zb4D3#L!ymm?{3>*_nFz4NrcI?^`QiN6~jD!N`UcX)*`IyebdBQbfNabGY^Ed^Nq+W z&y{n$iA5-b#C9u}n2&ZXr4)HMyV+xCmu_|Rk=}_^ZI?#el$@>IV}vpM&T||WA5-x^mRv{>tAm&6mf4@?wvj zWoI}I=RGw4q=)pT=y3Kf$L|whzM4;d^!v?`UE^4!qG}`aGvs^0r)Okk#`G0eWm&&+ zxkk(6i84vi4*B5{jl4usD@W*OE##YI>0k~ah!Fwxkb)Cf`;e+8nAiim$?27R;#chX zx}hk-jOix@(o88%%{hrl@P@G-l;~w^U1~jUsP)1OZWK=RT(l(Osj`T*&k^g0!;eM> zT#&Wv`gcU>r&^H&6H~G`1Y%bNYN~Bzm}aCQKgvxsT;%GrkS>b)jRFv#Z>LKPEOsH# z_`M7^1X>Z)`xnniK*@Y$i;(rw;lW=&E^_0&lE1hI!OJD?+H9TVPT}?xWjDm0nwiH4 z?t;>;4Xn!JaBl#ASzo30HgB~ce*3nr`ahe1|2dI1`MU{drt74uYh(KV&kS1sOD^K@ zHz@fp#W4}XzZNi^T=95H#Z*k!#9Gn2JCOt5bpEaUvh+H5jwa9+A zWi5JduMuejEX2DcPQE}rr%J|$Y!X*`2wo84xvsl&>)TmY74BaXYy?-(3NoN2B>N<4 zv>@gDb+}xb^>&B$UnpY2DV+|aoPVrz5E`#0>A%cDS(eNg2AOCn4L8goRt{Vk?zkm6 zY)*9<=v1MPVtP3G8!x_)963_zv*O=w1J0=MVpf-aV@f)Smw=Y^{ z8a_SdwZ=-G9-78B1KO%(WUL%(QoN)f&cE;75u?6wR0JTZ%eKt=Tk|EFF zI_Ot#Lp4JOmEM)yP(xrI%SiKQg$&iFlOo_yrZ6nxAKVixJGJ4l88O`Ve&X@!r<=xL z3tD5RA;^)P5gNHQnK3#@x=sQt65yeapg%TjQH-mTTlM3B>L^L<14uto=;KJ4ri+5& zH;8Yo;9AOnO=bU(Gf;1=O&iF`8%Q#Fh^+bMXmID8UEPZiX6Q z6iE;&TUJ32RgU_?BHNWh_G-t^kB1lG&;(&sR4K$~%E>eh%x8as8H+{AD=B6}vjOVAh#>vh$%%szxX9?wGJ-WOz zPF@9UX_n3m%b(m45fUMuXc9Twy5a)qyEi|^MiCq zR7|gS697ar0d<)Go}JWq=0C=K2DN2>@@jGhl;Zw6b?DHVWi$(Rm#jCfH6<)580x}H z{R|W4Ak{Jbu7ZKH2Y!;56FGWp>XHQ=>K1gAP((n^w*_ss3;lxm$1pxDTxjOzYZxE$ zUlqd!e;>oY27q6l(*NfH;Gga_|NDUb=ZX7Yzxap9zv3S{uEvC8mr^Xqd?v|Qx zzfrh!$5gz@>V!Lf3AyymuVCMQcCKuhHSpp0y-Xj{7^A5)Y7W`&L-1?Y%JYS3BjA6&u(zQLXV#d8=d}4X7yI@EcNt*#f;%ff7BVUknhh)9DPQGu#iLe|nlsXqp7(%hM2j|nc#5@7<0)f}s(OBR@tCQgu_S!Q&){F48rhr&vsZFDC#x*2$% z=Y1@iDcXs9oHF0iTg+yBz)4=>vZ*=3Y4WH+r_Oa9cQo0HV&U?Og$)3u*R3N~(hJ*Bz#3mW5wjF7xr9gt>$4MmQI>mwNA zK?@<$C@EQ4JHl^`S`%ziOkP{aI`%89LptfJc#Qa8#=L8kX%7gOaF_$Eyhka2q!H^i+ z#wyT-y~NuHg;$Mh|}W%)f`KQJYddbB_v=Z&?J=V zYMBPypp2QL)(`|pwbj|_RL7A=qMs1nOl&OP_<6yOlyVMEHWv+jcQshGT%f+Do3Z8&~0Wp z1gsp<9T$H(%XXblE_=zoPAHB9H(T6jpEL?0GS~=e4KZZoXTJfS9uyv9O-g^f!IUFa zcm?(<+|Kym6vOmfq4NBZJ{O>#$ghSv@qx>n2vbbLgWdX_sI$4G99>v7LZp3u!0)D_ z)T0qE-jo})7pa1T zuB$Cp;}SAj0q{lCX=vyeM{rBuVkfMyqVar(nCf33$mv$amW%WTkw<$bPvLCCaLVa8 zE>8=SnK`8|HQ!vlU;R`W$o)PVFI?D-)kS~#^EIDvn=VH?8QmrPIw>e?;>DLs^aSY7^GoxVJyO4zj__#%SmU;h-cdlW`&+w@447zJPwYLGf`qP=2IeezfN>8lt&Xcs^*X zd4w^h<3~XPF*xdDooE}0jb=1R2zbl35l?48!AGW@GNi1m1~cy|Or<)^6j z%af#_-00dCQrWw&Pg~`lrgbYk_?gay7_+BpyXO|Rn%=W4+P#$Pj&$p*Z!qt-$Iq-r zSk)Wn?hxGFA;2cxb9V3UwV!n^ z4i~_8UvJf@s!?N5`R0{+klAvvPe$&|ocu!b=F@F7+UGJbUSkSZq$=1V_RQ>TlYCfx zX43Nv$uynKr17YZ?~1!P$(NztgU9VHNiv+Nqh(XEKQ+A6_mTP-rYS&I!)~3xxB1za zK4R5+fmK3csXCx)miR9^2zR}%=FhUYPcRvWbFw|d@aSOf=WksIU21t|S)T!|d(WLj z+nwniU=x?rH14RHk$Q{X-`eMhCUw$iXO^bI4_{f4J#d<#gc(ICaCe1IBn@3eUB!_- zy^_z9B*;PS958CoTJ|zIplW$_Y&touTsxXLeMf#%Zo3+BGZ6kp64cp6v%FP#xZtW2 zihffAKX9TLzKh*htdIbI@xwWusUja)$Pci&?b3RQ`AhrTD%IAaHAshP?xO)x<(3T5 zhU)Urjc|#j8!2ci%f15=OLdq_Wz1ynX0Ta!7&is+!y*~(P&dz|40j7uyzR&_UEykE z2F+?UqTX8MHjKy8*C^P*+^!D4J;{?qonPrYRA03FN>#oQw^Q$YBR$q|%rJa)SHY|? zllj~?6E2?TH>u)aZ2GoQ@?9BZp2eqIr)1ui>ae;!A=D&% z3af7C%BT&#)4x+e&Ysd6)`cYLYM-D2K_hf}24SOcIF0dXfIwT-BVA+Zi&dSl#|+AG zFFa(OP4xO&oZS=ZtFYNQ%wwm->iZwAjBpoGUmQOK!da02w+8cXYn?xlt(2L)y^X!p z2e;Y&Z_xTDs{NDjAE6-iyG!%}i80!tKmY;~0wvrPQ1jmeP~9$#zT?q>+{5fq@C z2_2MOb?1d38H(%N0q_PknItY82HoE58G&E_qmk@ho(y>SH5G!tWWIGC75Wsp_mq2c zyIT-(J1^Fgi3i!U@32QsZ_fL4C>rsJQvC6)$$ zL&Pnud_M@5;m%{Zk+xPikpT8Hwrn=T7P}!u#ygcAMh^rQx#S$1oOXPJb;G>nl=c)(u~Rp$aNu(x%Ia%L z)!#}C=!E(5X;bW_ali2k`1J> z%E^Y#pTFd;eWsuj=*M4?mT_vFwHD>~Zg1B?p(gTnuB+VtP?{Qo@j`k?b0o0%w>>e(9^ z{zvUe(Mo|p1THgEuN#a{#Sj@x!8LmuvmSx)GpYiVun=foHV%adHEp!gl9IE^=# zD0KO5DY1Nro_(PsrMSoxu`nLHlG9?e9|5#JQRW8S?k%7*bJ~$wC=ujWgQ2lWc?Kl6 zKQD;AXOM{jn8}?K+D?8i@3z(_cXuscqof;ET%{XukZ3Iwk3edzKIj1^keH^*zDgnn zX1piR;O#-}0eX6CxP#$deSJ5>6;0R!*YVuedjr2f4g(z8t*Z{|gA zhGP4+Q|RJJ3gvCADXJ;!))lt&!$ZGnl6bu1Vy^i13`Kn9AoMCvm4*hqyaDqEjyDHb z(woD1(cRkT-+(>yQW+;X-sa4%(&mHiniX@HXna8;yV30-@9g<2gMHhpYo(|g+gorc zi3Rv&tG@1R&QpX8G^1_kG_RQ}Pxf3?`lkzk*?w6H5_po)$QW%x)-ug=rg> z{gh7KL;0DOufIzGKic#>1517~sA;;I1|6LjDsEgCpq5mAbU+7o&XR4wqg^+iK&Jus#QZX9ghdMKxT5kdfj-9ok zN^DXZQ8;32H=gZ%EKvW^!P`nBl}2{ofXo}{>l>2Zi<@qjdit^~0?v6G!O^*uq73mG zXCm-Eex`|5FHlc>1mew7h4W+mS|v0u)mGJx20=`qx}_|{>LNDymrk}VUhjpaSM(|6 zI_1-1=kezrUt={=SoMNU0-616>?vqSyah4Kx>hTCf*}B$fOYUaK0APQMueBs{(d@8 z_~j5M*Cg0L01G#_>(?hYH6)JSkPfSw#z6w$7ybK3cgjdF_d(4usv4IO-q}BSnbpA! z(4RlDi^u;u3;J6l;`caK%E{8vOxVcL!1O;-J%3kC7ke{DBSoA4vNaPFG-T#@k=~5e z$jg;<_b3RoDbBx%sv$7}h4{tlqJ#s#aBCRrWlL6P2LdAE#n$-Qhx=8BD&Wr zOmWz6O?TLT$in3TQoKy=R)^W1LcEzlg>Db4uH0*z+8DsRVn&I<3FG{zoDF6lmGc-K zkf>iW5S<0Ja40N7#Q(c;5~3H+2OdD568A2l*nF73U5GY*`8fH6R3ucMXqEj?Z)8OV zv0_%&OQzL>SL3o~6g-rqbyQ(_`u5?0w6@GeZ9LocVVd4iyK_MYja2qt99bhtOt4xe zA5!4j`mG09`yucvW)ecIG$3>K0==n6*Kh>wIkX6pY~Io_Z;&6=y#&QwKp1*w{@o#C z$&`p(&>;~Dz|WM8GRK;bf|bvV24GO*RyjE@doH{jvb~9k7&UDix`4Z5KFh}(5FfSL zngGA(U!icC>yomsYN@O0nbP4R42ibxrL%)_L9=g75L8CQ6gY8YCXDO9F{g79mO=+a z-td%(u(uR8U9e`G&;P?J)c2qpr^QW`+$>HP)&R5q#awk;H z17bhO^?iZ9_md|`zi~tQVs5|1yJgRQe=RTr0|I+QokA3{1#f>SBb>6Kf}g^?a)V@% zlVF%%qCihQ8}0h&Q8FGH?JfD6vKKS9$DFkCM$MqTz9dgp8p8&GWF9OAR_g|c}CenBix z$C6WDR|ZWv@^J&6W+X@aD#$$3!Rl7K{01nAy%X1fUI4_-50pFu@7lxA=q9>zJ8;{* zbBViZpXq|KdW5z+Drz1R(0qT*gElTz(qHR7h3dSTL~qD?QeT8t=yT$}!GmN}pSp#W zN)BI$j$zTucChM`@X$*Ws0gWXA`T;XRdqozI%?j&U?fDh7DAV$hawjWrb#r|J`0x? zmC8^JEWzKZ}^n`UNlteP@c@1(`?>xQmZ6Yw8pof*@tb zopzUja)-*q3B9Ninu8*on@mP8m@!Pol&_4dr0c13ctKRpplNcX?}-9--`M+UPUotV zyMfse=Nu=i^Z4a=oLs}Y*A0BgLhOCS%HM4Cf8(mZPb2@rReBB|gFYHTBV)af9QcP; z?f($7{ZFt&D16v)%E56pawe9~^Y?$|{#3u+0AW#`8cp!=jx0g}mU}N}_YG8*7YnT} zVwWmnlC)R;SG7DNtgTkT;(^S|=ci+94wLCAPfsVSE}w|w(PyOL$W<&oI&_nlk*IN= z+)qSBUjd_qG;{Wc_E`*)Kbkv8x!Naa^^xzCVCygQF6y=t!?y6cE;lkbItRA^_NdAqqml<09S_En4`&%wDM`*}lqZ|j_|(GqmZ zhy0k;BPTRigm#6?fSB^s3z7+@YR)*b7`ZyXyYY5Jt`#ovCd8|P)NKMG#vnr=MA4Ar9`y+(5i(ZK517adotDTE~edox;l4CUb>tQ(0$kkdauR4YW?M_fT%|WKegxkdG zI?wy_9m^+&CF5Y!wnCdLiv z06So9k6U1+IBmYAgvGORrmm^d1u2)>sVUtUZLZCjtWWbu9C}? zoIbj-j5!28J*Rk}(`;klAk3Px_39dvQ_$s6${>mjgcIaO?i~9{?2!qf$?j9s=LWi4 zVz4?7gji>26~2D~$$= zphk51;f)=Z`U-vEIJ@v2`X}z#MQAJ1sjrpFwd<+C5eNe$e2O(<0w|RoGmg~wCd!aa zey|9xxMJYtnEpul#iuQyamqV-du%XC3x^!zvP3c7o0hV&72E1q zv&ni*vl%tAhiuLWoHLKG&~d|y7Kql#{pnLK8G2iwKXVcJWa)6#+PY{rGeBqU2wE5w z`bA>C;D&YN1c(0$*xL6ww}!NqTYlpFnM0+9+-OG^DYTZuyzVcdC$%5o0YLMP4M1;J7=2m?ffowv^ zQy`*Y{588x$`v_&dkIMdB=8VlrS{vfw(}U?&$(0+>3bM5*FM{)%?mO-r4ZC_u{Iof z@txT3XnN*y{7TM={KiKz$B71UmRuU85-De6mGsa;zHwx*)i!#ue)S<}=2d1`D6ovL zzo0`{XE;BRh(`^un6d~nAkNLvu)Z=-qe)M*k_G<|-AQ8=;0HYc_M+>)Qv9}JnZ_z} z5_1S=B8IqYj&@+`nYQrC7&YYrgbEvQCXNv;Q%~Cvbrmt^(AF4Kf(FXxNqtDYW_rPG zxkXYwL%kLOG3qfye@aMtj^Xi00VoprBn&~C?2q5zZ@0gVaYH-Vlk_8pwAvqaf!$3H zSO>St8tyxHs)&M0*4FJUuIOoA(-jtGXHj96UWBBZ>l%;HQC7&|N{o-N5ofYy2*j6a z$&K(Wjo~Y7FY@hY93$1;W}#ydl(MovZ!|7|34$C(ukXPjafEOKh)#1dlZf5Y$hIY6(0#B zO<1axfW9!?c{jDzdfc-eaC&6ccK*70h5ck~n}M$tXZQ0OdGY|-q(WkOt8LIf6iIJw zKJTYBk^j+@lV{M@Q2L2SPuM;>402%udMxsCex8T`j%#G+xc^jKZbwUB*8iMS$_ZEN84S4yrU?J|^Ts4b9IR$V0hF!l4n*q*!J@0`octk>4c~Sgv+Snw!J`ljn zrM59T3`cwBJ8|!mpVw^jo~qsM({VB8XCTy=`a_Avosh(P_A+^_damv=yT)#R>Y^2l3H0SvSx zoSxK1um-dNmb8pv#ZxHcADIvP^)MQ~#MbdwmqSY&gsd7);`w@E2Y=S==*y~~CBfQ>IZ$X{f_g24M+}+g|DyB>BP*?(H-@~nQ;Dvp6 zgnh&k-A$Pyz8bkTEnFPtjrQCbi7Hm!6jO_#Cjuo4ef1JoqM!?gH>VT~2RZem-< z$#EOdSB`-zxH#7UHTB`U?knbv>55TT)C)P&i<3BqrF_}_&pFyT$VOHu5mpHy7V4PFQJk2nm;e_6P^fv_|N zrOn3EOOY^c6l%Y#&b9?wdGgoB3A&i`o-XN9Jq+^vaS!@cbKW59;|QJeANQdD#wt{Q zr3XtJ6BExd#eR#T(yWd4*NO2aU#7~3Bq~GNm*5d!V49@2 zI4#7q3&&Ri+r_#51bHMT>Rb8(TR4~KtXykivY(#v`tW zSS5=s149bQj1p9U-F}Ra=Q4>y}=%q5=Kcc z>$Y~E922~yv`rG-CGDt03~jQ^SEN7A0$xjsL{jHz?Ql8a-_l>F4J^Lw@Oo}m6ow-NVzmo z&z<+?%fcB;>hLkZC426+wLtCj7}P|x6NY^oqwH9y%xozvK~!<+OoN6>ERaM z0{<-1&>G0ZO=FY{{O&^kz8;UQ4M*Kh@%~{a7p>byOK}yQ{hcm4wm@ID$zMQhJ zMTpWWJakE@?b-8NO|145+_{6?9!yeV@(mR|+H(@sU}D4j_Z-r=a$3P8r3hOAoUJAS z(@Ev)y%huSh6y|uH?O1vuL3NZ_o$h|;eyyZ-c*mZYKt}0PJxEC6-&sZ7sVM(*5RG; z!djbena)U%bhns?0#uQ#2+&@Ike*xgZRAdIpYN(z7}164muFx$cijA}_C`A-WFcjO z)<-o)>;ekPb}@Y?%OXvSI3X>INY>B*!ME~DIv`?#h-d?BfbLl8e&MNN$cI2-I!UYI zx_Ol71`F*KWQ_(9bs=gk73&ohsa5d@gZN{3CVUHMi4>kBvA)uc&uImTRjiCjKHJ(>1{xxh_=7s&J&R0X$ zlqa-(grSqM;!Z0M=<$l!i#BZ`E-)b?%Jgp>)M6f(9;4-hdbUF9k=MoNh+8(4E6q14 z6D3BJ?!b-c;Y{R#t*5q=i1{STu{tog2rn7muzR`O^R|gnx3~$=W|Pu3%JVn)*z=H1 za(r^(-1DY7J6wMuf^!4zt8kn-I(U)MGIj>hyqyAKb^`@8N3ijnzFQb#>pz1Ch16BR z8+Qyc^yt{^C^(wi*qp;HPbkWN*W_@@;6~1IaNOqrkED~xYQ3Unxj`>kgWqSWAJSP$ zV{Z(^>xRO=o0cVl+Qa|hLU~IA=pnWH>>@A?U!x(vQ+|W``(f30wpKTOJgoJ9be;d6 zrSSaqu>L>PTJ4QYem9x+e|Dr|*6J4jMRk8nR6g8rK>}xLAgP0a6P3%Cf@;3oNJxU> zlPe0A5nWrgVCx>yua2D~+^Lw&#fs+QN_~i~ajZ%qiD5kSPOLXqejN0_+$=8Id_uVl z##eIzF$!V&cpp2Mo{jx3nl1vt4YCfI{n!(>H=<-Wj>Ddr5-R85^p$;?XHWdpl#RrSO6PUgPYZY75#ZX2v>1MDtohAjV7a z23XQlA-@4%x7qP}QTDIAZxy381&FPCj(qoZP>&dep5f*>??M^oM^YF%`XWdW!8?pQWA5XmWTs^^v;^~;Ip z{vaN6PATJkzR#h}V&v4Z8`Xg_EbjHhYK%>ujiK||yho#GcYn|(z;JEZm+!QaUKKkQ z7QifaX>>DGQja`|!I}){yqKw)F&RZ`3PTf9PmnK04)VyvQ#|?>;ofwA@1eyK`T3TR z7=n|U`mwCgaPo$q*@ZeSLb zBl(+`aG`&hp_A<^h#UI1LkzuLU~Snq%lNDug-w`vcCz%Z#NcGYIVW?aQ4=B8u+UlA zl4sW5Gzpz}3haQAoV$z>t-0!T=&@NAeu+3ke6D~y>!1f0{%y6jUay+iT`9+sKZON7 z2OH8uKBC_JpOa>5W4@0-|LX_)j@y+&Xk-SyjN_#+4#r*ztav~Rp5tNWV zd|FGe%KcJP<>l+|tSsr*t+!c2>;F<$0^4&SP?!t5wD4*VE|5Cs}}20IvsUz56c-gCY2R zSXUd*?(%Z6sITaL`cYXOdxa5IDHa`iuPl<7SHl5X_T@|(E;r*q9;0>m3^er(R_Oc0 zzR%_XesIEiiny!f;?n+s~2hn32n1{G12AsROEI9kp&wD;EwHETG;H zpt>-A5H8lXvbs3kHdpWrlA$1cV|!VFc+g=$s-F@jlG5eXMlzIa2%A8%SUcnb`hAKD zc(M4r`KRu={dT{n19ZP6f<%8u9$EIbl7u^VXP0U+s>-<{Y;1eUtjmwwE|5vtAov)W zXWWZbVD=JG)>46P{Ro9Ki!4zt3SSb{li7b)D$hIr{$R;3@#fM8X()i_NArnos5Lj! zn`jbR#5Xr@!1$ARRUqx6GkdX>Eb_`CTcyRW9ar;))Jv;WBPJ8-mB5sD4|57vJ=uEx zf!@_y^pBOzH3kkg2`zaWzenDI_7`_ZFzA;Eds;R0bW#>+;SB5z*EjlpS)|-@ePs^H zs1<+x`(i*1+F74QC5|()W+{RAab`v7Jcv;6{(Oz?cX^QpAJchTKEJl;*<=(4_#rc1Qg@5&Gj*s%Cq@*v^y85 z@u)*XPLfH(6E_Ev^LRu$40y*_jB?Dj@emI&RZsB}JIOqxMD))09nLpUB!Hheg?}y> zYfM{_O zBTeA_j$J%uBMR!c5@sACZQuf1RpSPhqkIKP&^eBcBB28J@~k52D1M=LM@kQcH;ZMb zburVA6rh_iVKy|C)B_E?fcNF+IX5v)5T1DEi6e(#CFfU6!4Fc?L5#`}!4w5*aQh`} zl%z8r)n^Xoo5%0e9!rOgok6wxnLznH9#h_hU%f$h98`ji8AyE&2D}~w&~aukh;Eo8 zJ|jNK1Lpj^4KzzqB09af^5!H6>(DK~j!ernBmwMhXIhF2Wg)B7qJP>&Y~ZW(!LYb&8K zz3d0beKqL`5mmk=_G5mfG7Y~}7_?Y)kmVaS3}CsBn@&T0khtKOZA_>ZkX3>l95;XZ zp&;8h2G8bnaB&NcASJX~M&41R=47>d%h{gaF!pq5$#WG2&e990n5$33lv*ozpX)du zW-=F9i>Fy5I)oIsW@eUpv4b6_n@v;Vw#&V-u2pYYdU4#SZ72N+JhB3Z{Aq@V5oJpU!mY6l4LXvSLfcr9qJ3kyFrp_K` zhT|AHalC57lQH^s_u&Efi5I&Rl;6sP^ElN*d6S4XkHdd!$Wd#wT869aqM})A;{dcL zkqgVi%Es^-$k*|N={;tVs>u2A;u^&_nBo4s8|BTH$%ab-Gve{=#^cjkSBuwRzeAw> zQ#dcLO9gFkJ=LlwGmd6zUed3br20#MPUluV5abS5t+UK7&c!t)1H={t1Y9&8@GNL- zmMm(3DySbVvLEXZye{7JPa@y7cXMcdc2EYqJXYvRS-YcLHEzN8*`ez(SUPjmM(Wk; z4x%YCFM75tu|OI4EZ*m}^Z87j4(3*ExP8LtJ0_Yg0mYxMy3L?-3nn$bZc=qvKUUFk z)x3du4DCwK7Hh2TAjTR=oXR7{PPlOtUmpASH(YlSt0dx&5HL{0tk-3~6g;I)_A4FThEhxXE+TydNxYb*nRgrZMig8NA2L}mR|_S8aE0TY9ZMjWqGlawoarEzzoAD zAyw*7XcA>U7_eBT(d_hgc4p{{&o4b!#%f1T#@?u8AO#b3$Hw{D=C6pD?@TVX`NVYl$AmG|;JW^liF)3NKU-lxeOgQIx1_=p`aOH}d z{H52J#oXu_(IIhh46?j0moSiSs-VejUl0r4(kfSr2;wr35~rY`rO>lzztMcO@;SFS zzsp1u8}vrQN14$3N16C{BKRBX{k8V}59vkL-p0_$;J;9>;6t|)Lj)a z;&IP8Hs)~QwLf8df$Q^j&%rM$trUYA zB>JJ}P;Fm3&j2TFpm}-L-3dz8+{~sfv)EaKx^~6Jk>g9d&3(A|=$PHsAn-64zm zm;Lje(>u(<1Te!f|w>W5%D%uHYz-v=qjHkwKWeU@ch&DuE~~Y><|Kk z=-xou8nz`G)a)}E(>5RKY9q(v5dO}^!?{}6ca26>(>V@dpDBZFE33R|pj{pJ`<$@D z5GW~p_V5cJLmKEj1_j<)17faPX2n?7M*FjPA-sgL4l|&)n4s=gLNL~tEO;79k%yMM zpo1c7a)+EOP?-l4S1MWfrj*XX^f*Dx`FuvzG`DE@6Kfl0Xl+$G%=1WO0w55xBZq}O!u7;-H>@(_o7uDab`r%y} z?Cw1}@Dv7LYHj74efq2StCk@ zX7M_6z8`eFtr%B9C>hDFeJ-E2dnYNin7nq`*vwZO5;0w2*^Q8>kip-N#}w9XgQ@DM zfnsbwYDTeMfuqBHHGc#tYI7tPNFgQcV9-lP)PW5`%nkaR)^cR4_TU5r3a2)NTP;)E zLv`B?T2@KY3SCY*?>8qY)+H{84fCu5$X3rs$IjC#wm3-C5upY_Pa?DQMm22+9ukdg z*t^L2E289Fd5$8QiUM1-Xi>=1 z)?{Cp+RBlZBrI!Gf*<{9qh(@M-O$CtHEU)&e*;#w|GR=VpafgZ5qBAsJXFKgxUMpF zcrUW6C{~j^9rX1LIiVsnjsmONGo4nln)C7;?iF8L{JxE(?JhKe;j^lGggWF6M z#t&`N4CzK2XbA{P?{Dq1bRJKMdTOUUVJOyQGJ1KlX8}q;QXrw;FpbP;)BZZVGLWYC zP>19|USOto;LjaqYQkQc4`4|8PN%ad!1{YDuQUw2SChz0R6lyXXn@sQyN#D3Hxy98 z1bLFd|^_nhq3juM_R?q_X>F)|Y~HMS>Q0N7?WOU-)nUKg*oDvvgt*9Mq?A z>QG}A;^hqmmW&z@83VG8qc&t}9PVJCIKVo|`0bKF9V04f*%d5=Y=G7paeBp-+b!Zx z-fYDv7ct>S!u|2T?yvr4=Kp7ZC8cNmfx-T-GZP2L56+E~?SCE5BNQ(j5QWgZ8}i%t zS*asb$z|P51BjGh`Zmjj)YYN$i}RfaqS8j0*_O_Y@9)L0a4$Ef<@^a5o~O~@$p&3m z(8&>u$AZ?*Iyy5C+%_NHo?jX_fi!zz1eO*8#rmB|z_Vkno5aRjnIyN26;-}axC9)?}8onFVmiBAW`6=*SWTh zQgw6Va@CZHp1uScS2ttWWv;gPQ?yqLMZ6kKWvvN zDvf~O1y-6G8?6?##d$<~ohuAix?90v=}u`BVMo_TzhTIFLtptxsm9&-td6E7km%Zr z(Di}4v8S;WeGB1RkSit=pq5a)P?nhkZWzFg@bt=mHiOUWkAHlar?sxAeCtVSF}{<5 zv58pca%mlmtf|+8hP!)}H+FBZih9LuR)EXa&xFsUrm~xVs|085Af38GTKe!6vIAQi8?`3d9XneGYE*LEjVqW*6>A5dY2R> zqe`w0G$FC@)FT}wj>L*{X!dNHVA85Y@i<-e0%CQMv8kJtxd)X@?mbofOD(xuCKQDx zI=0wsr^D4rcO`+!77gC?wiyP_5 z>Q`nX-CM~_{kK(8RxLM!V26mTYQ{bdfjJ)`{)3Tv9~z$QMttr!Uw$-gmk_VVhsV@d zZ+ht#K^Yg0bu|4uA(h;~`sx$xQXZknzyuz#Kh`Sl4TMG)CMl_KbO7_Nf+SsUQxm1@CL*d;29TNwxv*m z=bSeT7WxPt+pN#G1S+mlk%q}L6}ChB`_u{=Ve_SanbUs?tq#$GLP3_935JV&RR$!I z1H62s4Y%t2&(uvZP1H4QxjZTMO^Pv5CSpt(6n19Iz15)z0%?BcM%*;@MZ57YoY4Fl zG@HJ4_g3Ud5O*VmM#^ZJF6vNSpsv@ooKEbAtEfOf_rPD=FUDPy8IU)E<~4bAN9kp^ zcPO{>aSW%>0ni<}Nf1eFaSE8^h?%f!C2lh+<=i##Wn!v`3--niR^yfJV#qN$OoC22 z!BR_3^&2UG1H=rc3ND~p#_XRDXIdi`n5RWQj@B`6XRTK&tM3Y)&gW)krQ5x;2B6zL z!YGHzb!~3y{W4`(Z*fF$;+5@gsh>BpMQkQ?gTM)Gb&*~f_c2%H1c$#rY=cVAJd-1y zZG9ugG(~YpFm|j{pz`YZsgdp#cpauAE3%NoZcz+Faaiai{FszxiYQg*dBo7rF*xO~ zr1_#s)y`MZ6dI#brWKW@(>sniog4!NZZkI)g^usCMKTZ(jQ+eJgBoH%^y?3sfkuc# zXX1}1$^Vbbl7F`)H2->%|7}M8Ur+KsyYdJ{D+k2iv{S^Eap!tsBwC?@IaOvcy)PZI zlAtU6#L3L_Q)E9^$M+l7YnHVh0)Yl~wku`he;$VKnD`*=G_zDU%sP_1=v|yQPkK!> zAFv(#x;c*exXY{_&t`cgs_RePfdnX|fq>yyQDwy<@QIRo1Zgr`(kFlYU(lhB|oyv8wcX=l)Z~?Phj;k2}x!nazd<`Koj5K z>9ehgwe*A~S!uv6t)gRn3DAl`O)KTdToP@ug5Nl>QbBXYR-kWDfM)3BG-H(#Ti%23 z{lP|rBJ^vjORX(u2iiWqj_WNz;sS zK+~Z5A3_jAC$Ml04XwR_p&S7-ByBJP^FQ8kP4n7CHYft0O*hE(5y6gXM3MRW!t$+7 zAL!ymYi+-`+7u8b=?xRA6hj6X^7Sf#Q%8d@=1HcAkV8^~To^D&0Eyyxs=fH`Ii(Wzjg!=Re zc1@?77h@(5h-H8-A;>NRKR-BBBZD^V}Bk58`g+0!}MNcXAXPP*=t$; zgka;Y#LhShRi~H5k*9?7>$jWfOUO4EL5n#~nSioyQ3QO*31@@ziaQHWeBu>30yi82 z)3gxz(Qyv(#_q}}9qMNG)2Sqqp&LjwHHG+DSCacGl5e>kGU7(Gh4(&0r)cF8{hPE3 z$9ngqs*`jNr2DvSeXYx3T=7HdBigwtuzNTa@R#iP?oBCg*k!SH;cefIGj>}8#EwsZ zsg8#!n}pw8<}@RZp5{2hx~kP5*ka?{bczC08#BEkJG3*7WrKuwUSe)vFZsxro7# z2K-eC13rS}*>pRKJ~M@yLL0&0P+;;j#o?uc?ZETt`Hk?q9uj&DaY6txH!j4)lp3V@ ziQgm{Ix!PY;zARgzg#~mx4@075GZ|cCH?8}0}X?r@f_q6Rh+QDtS=!TC^Ss2tQdOsej3Rg96Ty%NGr zv|dh=1o|Kv#&}E)^`?&JHqQ;$)~He?Qjo7^!HEI@p)xn%XF{^9M?KN~sNbSgVI1#| z%c&lqQk%8teY_w^lycUgDS2q3&WyPcPR$u@=ojM6U=O8wjH@P>+(z`XT~Q|6<{sM2 z_Ywfd9%;P&kIqLM;&3+BaOsT3QD1+r)Ve(AMVAl?s6jLmDcVI)hj*OQ){Cg#`&ljg z)b~n)p2e^owcDSdy@Bsxdff%*(!(x6=PsLHd0_ICf;p2RLnh<8Cds6p$2 zNI);U^}-I%73w?*)M>5Tz=yfF0KrT`4}H=~`^xn==uOm`0b!>C7Mp2jo!7JIM5>N9 zsn5^iJ}JGcutx^-D$bcDI$I-vLQ>mQNX>Ai4oAniQO@qOiosaloSSQ(=(VEENP0JH zK~=C?ZX~T75E{I!!Ktml=Gp0@u!CFK*s-Z~oaZ9HaAuHKi<+~=rI(R(jXRHp zi@~A9nA4Z<3nDhti6=`vF+C(LjZ=*wZ&J`5y>9YWTiX)!dCv3Q;S$IXWK#w1+19b@ ztEx*q37b-1Aos+jP)m-%W6wacsvkf|$S&q(0)1HyzqR$pS5)f-UHoW};-ZZ}_s?0+ zHUcM=Y-7vIVu8N>3663&Cv+&iK=&|LknyL1*EqDyJ;ui?ZT?3!|Mx2;`0JJahvoAp zi$~GM*33Y`QP1(ubyS4P7nKinqz)TlLN_9O&hLf+V6KMM!BRkawL>}BUkG`r5@~g5 z>CyA~Agr>%R>jsFB_13jYMG^G8Leg+4~V!*9Hc#@eRzEoWiHZUEb)oV9d9@G+Aq8| zy&gJ*E_mKw4?2N#k=}&6ni47Z3`R8R%i#_ohF{3pa4%~C!! z{vXEPDN2wi-PTRpwr$(CZQEw0ZQHhORoX_S%}P7-WOwh~=k`AL+;JZwVvH4UF&1L| z>zi}ZqaU#Z>+Z0DA@l)FnU{pP4~9dO%73h)8^Xx$j6$?91Vn z`n?kAG;a#P3pjLle)+Z_iym-Y;ylmCtOJk>4vaC~A2<`9n$cgwjQ2-_D_Pz|UTcPo z$9lwvDeLca_H84U%R50{7yV5M>agO;Yl{gHHM+EC$#w7_RblmKxD<_faokP`TqKAs zj@1HXE%OM7=4HgK{14mY9jW~9S27v$LCRMjI{Q#JS}ixL3Ol{hRH?Oyj!ycMCbQEO z0rm2d!0aU@!E_v5rmj3@;X1JMqEsj+Wob~@LOq>zYSJl&>H@Lyl0v=w)JQC06-N3| z6y|8_a_c1}MdY7L8N{wKYCx5WY!P%-i7uhvvcnBps-gVQUH6eo_e{s=gCTwr?Y_e$ zvoP5h$jHxAv%+9@!7^GJwiez_VHq6~CbudQ2$Hmq$Q&hRbHk7AF8K!guj~Xlq6eKg zq|TXp-c1Q65Q8+QJrcvg-;nl1S|#!+gr(tQNS$(*In_hHi%d*3LXie(kcSN&2pusK zy1c(*;!c+Wx8=2pzL&#*)b&!}TZ2ve>xa__^ByY@%_PZ2({iK?YV>p`*LKNTVXzY& z5r)1KlqHL*&9j9|@g~r%l;gTUtu~a%6sA&^9p%GmDMEsB6d@rr*Z9-ySQhd$X)!5x zo-i5WbjM8N>EcRNB{*I(`7N14-}U8)%{i93v<2=)uJ_r|=2jnSeCYHe$#)rA{lwQa zW^$Cj;c^MVs3N@azBAJjHBC#B*M|t^m_z&zx?eB=l+i=Hc`kvk7j&nkGWSTQr7{Fp zs-rQVSg6JGAr}>K1zLA{Bs`78F2&Ql^JevdlO1TBGWPfmV;q3sA#|pw8+5)@ez|-? zUyJetBT||kDDlW;p;a7K{SAkEM`J^QPb}YZ{c;jWvxuY@c9c zUN|ZxO#>6nS%G)h$h4MUteveSLn?4DoxqUcjmhIWH3y+l4O&v2FJN8LhG*G0Z`U^> zYgwl=G4>=4+G8H7Yr*&o?I*pgV$T)uX7zj&Dm+uJ*jaH~)C8G15o5}tnJIrA?%FrH z(lznl$=u`h*nzh35@=*QBVPaR(TbDnOkS`JM5@Cej%EjWk zrsmy4$#{X3<_a^m|FSZfP4GfW zJQ)&_yEXIZOL`YM;Ou_srgYQUgz+Z{l4|qS1!2Pm)uuYp1>5q2`5hJj4}iNw~k%5POMeOl_I+kVNiI8{{t5N%oY; zwtC$E;mnJ%-htM3R}_L*7_FR;( zNyND4v!Z*IiW{qIZ-tl>cynr3N9=K$&Vz8tB)x@%zFYbvb}c(qS_?lNx90l}y$Vk5 zG(R6SH<4(x*8#0a!~!X&W|9`i&dWWM)P;R3E&JRJ)ppaF6!CNj!aZ-vvu}xKY_Z+M z+k;{HF{*?N$xy>v9G31>aS`h}8%vKI4g;DtM4u`|U02KQ?ifl!NH##v5Tq>Z-e{=L z#%c!V#nl}kWMV<&zNwx#Ou4QQ8#k60DL>fbN{ehY|9FhEos?f=Ys_)JDKCP9{vku2 zu!dI<)||Z(6OCuLt`n-@q9myq`UcikCMQH)J8qV3U%|8^UUsrstPd6I;t^C3K2+a$ zJzZuK`>I23?xFFd&2{C@Fo(TkbMA0OPTREn@g^>Ec%(htXyyEeGev!nYlL%t>{gH5 z@GZ7bx5;oph~u{vQ9wC+ZK zqeRlMjb8(2_yj!f^|El$H{>nd+ft%tLxr%Pn@r=sru+EZ*?;?*IrM7IO5l}gGpw5k z)w3l5c*a0;^KKf8OHH=BzT6Fe1@=7#m~NuO)57W9ZQY|K$vu~cW=+7mzf9aZqq}K` zl7(@DJsD#QBe%;v@QKCb={9g)*SW03Fu%PAp4V}NeYvI6X}U4^v8|dc9zn&WkY>+N z+Il+T@9~}W-wXaBUlg0UK26flS8B)z$~d(kS~?=5J|ycZ^`oE@Azv&IU5z01qTNPk z1kZBZsdaA$g?wjM7nvWF+wCB-iLIBRv&c?_bN>y$QfiNp{YyI1eEdW83~Q&`p=*{7 ztUY*}cf-fNIeBfj#4I+ZWDWmG+u@t^u?P$Q06m*Y(y=oaRM7;38y#MYjVx}JCYCeqkGZk* zLIXu3Ydfk{N@JEP${vU+%X@w5`FDN2CMSK9@yg~DRLFiA%K>J0s zw?&Z}eX3Qbj&0=&!TLpTg4lz<{L~vTD<3-pEDSFonY|!ytI_&kG*k~pm>Z$218+bf zd;zKSmGL!HjWv7=YSW&O30JU3+*U`M*yI1YyDv|;vnQilA9Z1m>(`{**pcigQ}1jl z^!BOswk3QB)cDvFzZPh{?uFjv{@lgxidyc5Vh(#+?has&>Q*P>*c0=rQ+_h;`E{w| z;#4nxOi_Mv2)OV3GoYKGH(_2S5fX?@^A4Mbk`8SN09Am)Gy}L zIP8Mi_E*YO`(@ES<*;{VnjPCukwFyQaNB-$_Qmm&V${0YA5 z#zj@P)(O71y9ya_D8TLr@;zgo{cRv{x~E?{BL}A>RgsJ@uNc;;%EZ71gU9^)&q+WZ zXMX?KtUiyH{%QMBG=`x4e{ELD{{0*Le>?VJWcaUN;U9yPe{MRVdg`H2L;w>aQ8x=w zQ0hjYRV2zmmx#o6N+%W2LNOcBfxf6>pu~`TS#HID;9qNitDyIKXU??OUgzHXe-iCv ziY5{&F1s5>+X;aMRn^u}I=7h)qr6$tGYDC&B}F*jh{D!nT@whD=d1_kkgI!6s_nT& z%T*y|FxA8H8~Yd9JoK)#e%aJH$5K^~0wv_+h2feO;w=Bra$H&Bz6_e4QnN?VFf`xh z;C9T;WsRr)SfqG5s7|W;tSYiH#f5U*x27ts@d=LGgY(r%%{yek;&hO0QFNX*oW)0k zkPtz-6onNuT~ail*r8&LthM>@q zGejeX2E5(d<@=axADTbr{?!As3V~BB#@tu!X)B!6TE_5VDoG+`O#`t;Sw(7(ggKJ4 z^=3Sj0Pd{~YIDHN0egt)d!Ns@->Y^EHx4Y|U+gXs6G@9DP6MLMvmSNG+-w?&t*a z(z(xll1$qo?t&iDS5PrO>6adVED8Qw5ZIs7${Vs z;+3V#3RLXbh=w9rMe*Z4EtFfWTodek)Xkh@AQ8F(v95I{ zzdN)EF|~>p=O;26=CEUnwNTGbpf7eYhS<4-Q^@Z51rXFBkHOWSLO-@Z$SKwb9NkAw zAL>k|j&X}cPa)r%U>>gKyqU_~f7n+$48o3GOA7Qy$a-fD-UY`nGPmcGm;DJ-xI=i0 z89`fa1TEU07w8$1VK9eqQYaJRUN-C*eh3KWfH8NDPH@E|=kA~J&$I(A6bCdM;m&1CqrW!(|_g9D*vnff0ufUZS0**N&c&plk#7s zoKlojT5W34sBj>w6utNmphz@95s7}NOrVIkMbiyBnl4xR@Gq)cAVuWI>ER#ap{~vH zV5334%c<-O_nB9-Utez@sQnzrm`0|PO{@*Y*ntK1)5n>FE0wC^?WU`Zp0d$gSnM3a zC(&l7rPnh|&8Cf6F!P|+Q9d)=gDV{kls4jRQ!z)QFg78Ec46x7?UkCP2Uk$qu7ehp z_X`TzCX!_xmKmzEGS;C-;Sb5da-g{JCS&1@F2%I7sBA+TXEp&*Fjw9440_CmOeMCx zqpCq!tgbbUv@C5eaV9->+I1cy2%>$RB9iPQm2KLHPo4BC`2^QNXJ)66gdVka99H9% z%Pvi11z`$UE`JrD%o|B3Yb&}3S~?XZc{*@4pt{O;iIjBP{q;h(m7_@>5bX>%Q)}|^ zBQWR=poSj4sV$@$$wh-ZL<^9>P_yrgCfmgf0wt?j%{$*}74S9RjjV&n==Q*5y^UQx zb1;;+M#3jLBeao7driGLyCow;7wh6A?p4}dt)mPdY$!;#$}9xqP^Jhb@@cY4>(ODZIRdfrU>H+^ zvMTx@&3VPq^~gwI;1LD${5nn%CEmJAS*1GJ-jsI*utL6F$L_pca6#Vf@~C7#8P;3p zGsk=d?vUvi55?gem)VQa2i%KM(?^`0uq?QEExAUVUlVRHxP~9y$GS-c57Q?pKEs5B zO%kA;MiWLQ^5YKJu85k|8CPJsg-#%9Dn8&{d5mGipTt<52T|o}g&`d(`~j7Mp)6 zP)L5>lE~Pb|C>NCvvGB{_>s>4v^oF3Gsc zGQ>8K1Rq*h+ZGVV-J5PeXiB#ys;J}8Bqwv-@4lo<)7QmXcy=RK8Q zuNOa}T4@V}3EQNRg00A8l5G2of!H}Cf&*|#iev(rx=^YG#ZIxY)pK1T!yhogg6Ke^ zyf!*%>@;mN3&cz6^fKzJ*Mf@K0xt&@L7b6aeD|J|5PW8iq0M$=ZX$IsXzBJTWyvr_q0?L;`J}fdvypz2ifG8JvVly6AHl*Z zYPayzT1^~;z@q)0W4)%HZQW}B23`KmuG5g>Jo=Y? zE$;-Q$RPzj?7r4G;T}|-V)RB9z3<2av^~l@Ryp<)5!v44Uk>RpZ9K#9KiyLLKPL14 zdDd?A?{4|;$^8G1vxBj{t*xcYe}|0!AGLax|2^aXtD65`Gk(i|3%|XE2w0MZI+_7x zBt82Hb^%L-gaV{Q3qVU^5bUwd|JWT|ialWg>VpaaMS%19#|cf@(G(!uVrV}_o5wv* zO=WyOzK_ZSz|WbLKe}Rp8>x#07Cg#bG$1G=y0=IzL^Mf(h>boc`io~5G$M;AN!}+D z?rh^RpmchP^ySKn(0H;SI1;sRZFtZ0>oGVx*-isAE$`-0`6D^e1NJff;BXGGCWH*D$o!+mtqf$!*g`7BGb>=;o~p zq>x&pXk}c(IS4hw$l|3G@#(^Za!ZK6u`*@v#=uij%GN)7rJVP+^Evhs<&+LIz{d^stEl$hGzGY$-f_ z&w>)yz~YI8)u7*UK_Xcu!^n>_vfFJPTd>RFI=&l&<|Ebg?{Xe_wf8nwJ4T=1>wf{c?s8yHwPi9!`nKJB>>=v8P1b}6m z+=>LWQt2pVC6m}B(~?WNr@BcNr>3%I7GUFL1q2k(6csc@s>>}X87#vIpu)rVf#}+S z_iMu`@_(H__dWMylxC!n(SE#5{$yD^+dR`e!|r_C7|SEf3*9=B*>`0~hDNg{ln=1D zreMlvCq$gBMhdh+e6xZ16_EdB<1fB&S^YwJe*TJtNJPx_3&=}v*z>5;!O1#OC zsULkz%Ur9PVXU1*H>wiDE6+Q%EQ{+=-6=7q_z#z_FB+3*KVZbaWVxSqJ-n6U-Bj4i zlM~SI}x1`(+TbKi4m@t<8VFHRxcJj zvJ_zU%;^*LrX-2JEFq;*?Mw`8(MY0>6qRL4Skq=&3Y^(0koA`q?c@tM^eM^;Ph{iD zaweIz54w3%!;1;&ihT=P#=-1@xLAUZ*Rr&-xJtvs0DvdsEOqH}g;MyktsK=xcSpm$ zJ%ekI$W=5b3C#%vAdV`Q?_=^MNanoCElFRkWzjAh5_WYQHgqcqG2$V3QIr}Uauf)6 z2`Ex*ze|$5#^Os2qAX>}@TA12UVvJ7b~NST>iM*Uq)P(jLJk*u5>V)rX_kjrIsi0& zm{#YkM-!2aG$y9f2_U3Y^vEw6Q8h>JV)EKDW=R|F@qjzRcdafri~^P!8w_gdkStPr zbLnlh-R<46Bk_{vB6iE`R2oU6916$$HN_X>5)TmD>I9U$p=LuHCtPOf1*<_Z_0m!C z3!0QXYHull3sZUAEhy&j;VxNx+h+d_6L~<_v0R|UUbnW~=y!`M&?QQ5s8gPGWdZbE z9^wp^B)$zhL0uAWs7mQ46cr4GKVDjKZ{?eT=Lx)^s6W?o7_hMy$qY-)9nVP=uWwj} zL>WsVnjo{@389Cb zEPM#euCdnDt^^K+1sz8(uVJv78PR;bQ&5*h8;LReVs(+ARWp37k2}eqO|#1>kwh^M zQKYnn%DZr225_T;Gt@|lY(EV6*v??O$AysxzS5-8R`H_A45>jes^n;wz2W3eD)E!s zQxY9XbFaE{iJqwmjuz;n@>$R-@6dksQY*nTioJmIf{EI`z)uE=#>zLvg3zC$l zIq;10w!=`trcEUmsY*^Hcu{6U;x$_`+Z1Y-xmVf#dDLZLU$j_w;+2{y*tlWC%LYf1 z=`{wDHh^)y#Y^Vs;Dm=cTN(9MR=cgGrIq$hLJC>9T}RD+yP>B=>*LaAUTZ)=5;}b8FykL#0)^Od~A@>;zHA| z@Vw|5m42S+V&gO+)otmZsa_F&7QFEyG8`Tnx}1Efzt0+VgF^A+_Z7oy<~P@0SX^m4 zyRFAuJO!lpjg`_FUfl|c!eM_Z&N5*-uC>YBIV=J{i*ovumSKiQj#kko+YSg2gC{IH zWqNi=U$ZXHw#RycC)~&dXM|rQ*5i_hBa)ZSLM!1rFSAS>6qYGDL2ay@Hy&dj5^IA~ zE#;CaRRa&;u;B@vyfyXG(TgQ_P-shldt50cdWV7Mg`7-au{e;a(#A5`n)OnaT@`}xM zpC=*m$dTab857$tLN6xCE4m?Al*lOHBi7waIlB(M)e!fCuV^IEja|w~mg7l=Xl1S4 zs3sFUjiQ5+8ZPl1N}6D0KHI>!{0@^jIYO()okvvP&`NvWP`HCW|1-80*!38mY^gvd zI-mhom>>?sLGh<)3((%^H;KfQM9BjlF!%?ClN5>UR$X2{`dC!e#%eV_h z(Jnj{&6Z?uGE+_xY%L;{S7E#wEm(1-i1J=^nlGR<4cn)=y7bgorK@I)E=5wz3tAm( zClt#EswL*BXR8P6t#mwM+9WOfPqB@^7Z2JPt%u_hrO#fc&oWVB&~P(=f8&?{z9K9G zAF^9?!5RxWTD?x`W!%{{&!?!#Z31uFGeWtgvAjI&>(zHai()p z;2(96Tw#5xa%HGlDT_mMqs8j!$>$7+EQ^1lp1x2uJs2vA$Zdt%P97fOapT|g_W&Pc zHi9RN6I^gFS0t_%id+|}zd{8{nV>9jJ5uuDki3ZNvFFkfm#D#*$BPTDbBJ(JBtZPk zks5um^~g(aDtK)aZkTM-${DdEOpONGD{qQHk7|q|-gUubKw)kaOSRF1RZ3tSm8Rzu zP=iH{A<|(u{?7F|&>-0J`&@n(1WG-MB5_JtO?%Wx9rHZsm$sy~Iv^oP=8T~-L;-*Z zLmEc0>~{kS6AGY4OSmapS|}&(REh6t#HSBglRa1JOiQ(9FSeV?S>^>zV@kYh)uUiMdg&z8j4Xwn|AebLR&)TxPO-uxmH|;=gODO`Q_lp>D zo@XZ92#6?W$kB+f!pK0{crnT!U^+u~}>78L~1m^o#jubCD2`UE2H;7V>ynn*3o;hZtEC1XFw9EoLifEP3!P!jRu;l&M#x(U4HeJKI98I^Zh*(Euf!B&Li3)zhP3u2%}f zQaKoD{L57moc5Pe{P@`0kungn(UQ-l)VlRkxd>x2rY6(j`D9t1w{L!eS&( zk2CtWt*#qxX0L^`TZ}iB6EpDl2~qt3rFryE-#dBe{8O zyLVO`yHgg3U080IC7r&UMYkj$_llP%vpr?-WEvfoyO(DcNQM)~_#XpMqhRKk+2=b@ zrRB_ty@bkpw>gM)=A7erIWDuLkjwKe&~BVS;l{*zE^0_+P7G{4FEz6kH91Xl*{4A) z6PxO$z@jksvC(1^Floi~44uY(-5i*1jj#a^B}Nsh@CH0N|B&+BOGV(dTz7r_99>XP z4qlS9swQVvOczh|so#4}V{jYS5Y@i3hFv&hJe$my_jh`|7^B2CS~tDqoF{BURx0@V z6F0(k!)c5;3I<&l0I*2K<^u|1+0^OBXm5Rc4ikBLs#EGrQ08%sE6FUYuSMxq`_pa( ztAAS-zWDrIbmUtmk=3cL7-?&9;Czz_Ol^UBFJ8guA%tXkSaK6sEd(5m+_Ph)RJA>!=n z`m~vR8%{PB)f(VrKFCh#o-?adOoC?X4qZoBqa8V za|ICIJcB}N2RM5M_7UX_B$e;Q>MXuk-?fZMNZYbK-Ib-_bb5=>J>l+IC}W1iM-C z@?y*B8NU-C=8D=cg)1cu5)(|-bQm_4K*jK4{z$Wh@|%TjK?AphhR|6ucjIibkEd!c z*w?p}(LkdC;eu*Fd9=J>qG!YL7S*F7rBsuUo1dGsEDMg>Tr029-*_lmd|D2h5BhF- z&?Ke4W~yhoyyLEj|FIu*EZ9Cp3N-6-6d|fEy?3-#Hwxo%*kHE#H}(;AU?x2q>PoVS zTc)#ypjL&+PG)m<;fDKQIcup`(Foh4>WHI zE(Ir;it?b9Gq2kml5gBRzs2 zv%|+##V}TMv1rD9r^uy!8b>!s^>EZtg8Gq6jU&{mlK#k=_jRLEU!xc=6T}l7;8M~| zi@QiyBxS=GHl4yu3o|b3>T+CZLXwrj5o)cY0MYti$!#-)9$E4>D7Fv-g}7=PC}Dol z!9i->kmqi@609J+M#$YjnY2aSx-CR~^CINg+SRk2_!7F}lqOe{c^vZD`b4b}(D5AQ z=s6Z!}<}Nb6x&aqzH;_s3EnkF%sISX1y_;x4`|yVNa5FP}C}cukgq0S+x8Tj!x4;5~s%B@UP0E!e zVeDIG+ADw4tya|4o3A8{UIOn6cPm_B08=GhoGW@p>i4i+ z-5TXnJ{mN(BC%~VW`PV7>bhz4-zKWHdp^g8EoaEDF&FEt-d@>pi)+(L$*(bD9K`^E z%Znt;;rZgG%kVVG$ux`oPd!K0l%FE)wGkEC>YxghiI`pilJuy4iS1Lw#qgsZ6*{}W zzuN_SXfGKE4|VkfgXS-nY3qIAd@~ORBd#Nip^Q&#E=Oo4h!fV*Gh&}gV=mX^MYVBw z@KdxU+I=?(^J3N4{<7