diff --git a/.license_header_template b/.license_header_template new file mode 100644 index 0000000..31a6e18 --- /dev/null +++ b/.license_header_template @@ -0,0 +1,13 @@ +@@===----------------------------------------------------------------------===@@ +@@ +@@ This source file is part of the swift-libp2p open source project +@@ +@@ Copyright (c) YEARS swift-libp2p project authors +@@ Licensed under MIT +@@ +@@ See LICENSE for license information +@@ See CONTRIBUTORS for the list of swift-libp2p project authors +@@ +@@ SPDX-License-Identifier: MIT +@@ +@@===----------------------------------------------------------------------===@@ diff --git a/.licenseignore b/.licenseignore new file mode 100644 index 0000000..e78e0b8 --- /dev/null +++ b/.licenseignore @@ -0,0 +1,10 @@ +.github/workflows/configs/.flake8 +**/*.yml +*.md +*.txt +Package.swift +Package.resolved +.gitignore +.swift-format +.licenseignore +.license_header_template diff --git a/.swift-format b/.swift-format new file mode 100644 index 0000000..7e8ae73 --- /dev/null +++ b/.swift-format @@ -0,0 +1,68 @@ +{ + "version" : 1, + "indentation" : { + "spaces" : 4 + }, + "tabWidth" : 4, + "fileScopedDeclarationPrivacy" : { + "accessLevel" : "private" + }, + "spacesAroundRangeFormationOperators" : false, + "indentConditionalCompilationBlocks" : false, + "indentSwitchCaseLabels" : false, + "lineBreakAroundMultilineExpressionChainComponents" : false, + "lineBreakBeforeControlFlowKeywords" : false, + "lineBreakBeforeEachArgument" : true, + "lineBreakBeforeEachGenericRequirement" : true, + "lineLength" : 120, + "maximumBlankLines" : 1, + "respectsExistingLineBreaks" : true, + "prioritizeKeepingFunctionOutputTogether" : true, + "noAssignmentInExpressions" : { + "allowedFunctions" : [ + "XCTAssertNoThrow", + "XCTAssertThrowsError" + ] + }, + "rules" : { + "AllPublicDeclarationsHaveDocumentation" : false, + "AlwaysUseLiteralForEmptyCollectionInit" : false, + "AlwaysUseLowerCamelCase" : false, + "AmbiguousTrailingClosureOverload" : true, + "BeginDocumentationCommentWithOneLineSummary" : false, + "DoNotUseSemicolons" : true, + "DontRepeatTypeInStaticProperties" : true, + "FileScopedDeclarationPrivacy" : true, + "FullyIndirectEnum" : true, + "GroupNumericLiterals" : true, + "IdentifiersMustBeASCII" : true, + "NeverForceUnwrap" : false, + "NeverUseForceTry" : false, + "NeverUseImplicitlyUnwrappedOptionals" : false, + "NoAccessLevelOnExtensionDeclaration" : true, + "NoAssignmentInExpressions" : true, + "NoBlockComments" : true, + "NoCasesWithOnlyFallthrough" : true, + "NoEmptyTrailingClosureParentheses" : true, + "NoLabelsInCasePatterns" : true, + "NoLeadingUnderscores" : false, + "NoParensAroundConditions" : true, + "NoVoidReturnOnFunctionSignature" : true, + "OmitExplicitReturns" : true, + "OneCasePerLine" : true, + "OneVariableDeclarationPerLine" : true, + "OnlyOneTrailingClosureArgument" : true, + "OrderedImports" : true, + "ReplaceForEachWithForLoop" : true, + "ReturnVoidInsteadOfEmptyTuple" : true, + "UseEarlyExits" : false, + "UseExplicitNilCheckInConditions" : false, + "UseLetInEveryBoundCaseVariable" : false, + "UseShorthandTypeNames" : true, + "UseSingleLinePropertyGetter" : false, + "UseSynthesizedInitializer" : false, + "UseTripleSlashForDocumentationComments" : true, + "UseWhereClausesInForLoops" : false, + "ValidateDocumentationComments" : false + } +} diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..55aaec7 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,19 @@ +Copyright (c) 2025 swift-libp2p + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Package.swift b/Package.swift index 98f3096..f472c95 100644 --- a/Package.swift +++ b/Package.swift @@ -1,5 +1,17 @@ // swift-tools-version: 5.6 -// The swift-tools-version declares the minimum version of Swift required to build this package. +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import PackageDescription @@ -7,20 +19,22 @@ let package = Package( name: "swift-libp2p-pubsub", platforms: [ .macOS(.v10_15), - .iOS(.v13) + .iOS(.v13), ], products: [ // Products define the executables and libraries a package produces, and make them visible to other packages. .library( name: "LibP2PPubSub", - targets: ["LibP2PPubSub"]), + targets: ["LibP2PPubSub"] + ) ], dependencies: [ // Dependencies declare other packages that this package depends on. - // .package(url: /* package url */, from: "1.0.0"), - .package(url: "https://github.com/swift-libp2p/swift-libp2p.git", .upToNextMajor(from: "0.1.0")), - .package(url: "https://github.com/swift-libp2p/swift-libp2p-noise.git", .upToNextMajor(from: "0.1.0")), - .package(url: "https://github.com/swift-libp2p/swift-libp2p-mplex.git", .upToNextMajor(from: "0.1.0")), + .package(url: "https://github.com/swift-libp2p/swift-libp2p.git", .upToNextMinor(from: "0.2.0")), + + // Test dependencies + .package(url: "https://github.com/swift-libp2p/swift-libp2p-noise.git", .upToNextMinor(from: "0.1.0")), + .package(url: "https://github.com/swift-libp2p/swift-libp2p-mplex.git", .upToNextMinor(from: "0.1.0")), ], targets: [ // Targets are the basic building blocks of a package. A target can define a module or a test suite. @@ -28,11 +42,11 @@ let package = Package( .target( name: "LibP2PPubSub", dependencies: [ - .product(name: "LibP2P", package: "swift-libp2p"), + .product(name: "LibP2P", package: "swift-libp2p") ], resources: [ .copy("Protobufs/RPC.proto"), - .copy("Protobufs/RPC2.proto") + .copy("Protobufs/RPC2.proto"), ] ), .testTarget( @@ -41,6 +55,7 @@ let package = Package( "LibP2PPubSub", .product(name: "LibP2PNoise", package: "swift-libp2p-noise"), .product(name: "LibP2PMPLEX", package: "swift-libp2p-mplex"), - ]), + ] + ), ] ) diff --git a/Sources/LibP2PPubSub/LibP2PPubSub.swift b/Sources/LibP2PPubSub/LibP2PPubSub.swift index 89d319a..e8365d7 100644 --- a/Sources/LibP2PPubSub/LibP2PPubSub.swift +++ b/Sources/LibP2PPubSub/LibP2PPubSub.swift @@ -1,15 +1,22 @@ +//===----------------------------------------------------------------------===// // -// PubSub.swift +// This source file is part of the swift-libp2p open source project // +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // -// Created by Brandon Toms on 7/28/21. +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors // +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -fileprivate protocol RPCValidator { - func validate(message:RPC, from:PeerID) -> Bool - func validateExtended(message:RPC, from:PeerID) -> BasePubSub.ValidationResult +private protocol RPCValidator { + func validate(message: RPC, from: PeerID) -> Bool + func validateExtended(message: RPC, from: PeerID) -> BasePubSub.ValidationResult } /// The Base PubSub Class all PubSub implementations should build off of @@ -30,14 +37,14 @@ fileprivate protocol RPCValidator { /// The JS implementation wraps the inbound and outbound stream in a [PeerStreams](https://github.com/libp2p/js-libp2p-interfaces/blob/master/packages/libp2p-pubsub/src/peer-streams.ts) object to simplify reading / writing. open class BasePubSub { //public static var multicodec: String { "" } - + public typealias Topic = String //typealias FancyValidator = (uuid:String, exec:(Pubsub_Pb_Message) -> EventLoopFuture) //typealias FancyValidatorExtended = (uuid:String, exec:(Pubsub_Pb_Message) -> EventLoopFuture) typealias Validator = (PubSubMessage) -> Bool typealias ValidatorExtended = (PubSubMessage) -> ValidationResult - - public enum Errors:Error { + + public enum Errors: Error { case debugNameMustBeSet case invalidMulticodecLength case alreadyRunning @@ -45,69 +52,69 @@ open class BasePubSub { case invalidTopic case invalidSubscription case notImplemented - + /// Overrides case noRPCDecoder case noRPCEncoder - + /// Message Processing case duplicateMessage case signaturePolicyViolation case noIDFunctionForTopic case failedMessageValidation - + /// Peerstates & MessageStates case invalidPeerStateConformance case invalidMessageStateConformance } - + public static let MessagePrefix = "libp2p-pubsub:".data(using: .utf8)! - + //public var multicodec: String = "" - public var peerID:PeerID { self.libp2p!.peerID } - - private weak var libp2p:Application! - private let elg:EventLoopGroup - public let eventLoop:EventLoop - public private(set) var state:ServiceLifecycleState //State - private let multicodecs:[SemVerProtocol] - private let messageSignaturePolicy:PubSub.SignaturePolicy - private let canRelayMessages:Bool - private let emitSelf:Bool - private var messageSequenceNumber:Int = 0 - + public var peerID: PeerID { self.libp2p!.peerID } + + private weak var libp2p: Application! + private let elg: EventLoopGroup + public let eventLoop: EventLoop + public private(set) var state: ServiceLifecycleState //State + private let multicodecs: [SemVerProtocol] + private let messageSignaturePolicy: PubSub.SignaturePolicy + private let canRelayMessages: Bool + private let emitSelf: Bool + private var messageSequenceNumber: Int = 0 + /// A simple seen cache to store message id's that we've encountered recently (seenTTL) //internal var seenCache:[Data:UInt64] = [:] - internal let seenCache:SeenCache - - internal var messageCache:MessageStateProtocol - internal var peerState:PeerStateProtocol - + internal let seenCache: SeenCache + + internal var messageCache: MessageStateProtocol + internal var peerState: PeerStateProtocol + //private var validators:[Topic:[FancyValidator]] = [:] //private var validatorsExt:[Topic:[FancyValidatorExtended]] = [:] - internal var validators:[Topic:[Validator]] = [:] - internal var validatorsExt:[Topic:[ValidatorExtended]] = [:] - internal var messageIDFunctions:[Topic:(PubSubMessage) -> Data] = [:] - internal var topicSignaturePolicy:[Topic:PubSub.SignaturePolicy] = [:] - - private var topology:MulticodecTopology! - - internal var logger:Logger - - private var runLoopTask:RepeatedTask? = nil - private let runLoopInterval:TimeAmount + internal var validators: [Topic: [Validator]] = [:] + internal var validatorsExt: [Topic: [ValidatorExtended]] = [:] + internal var messageIDFunctions: [Topic: (PubSubMessage) -> Data] = [:] + internal var topicSignaturePolicy: [Topic: PubSub.SignaturePolicy] = [:] + + private var topology: MulticodecTopology! + + internal var logger: Logger + + private var runLoopTask: RepeatedTask? = nil + private let runLoopInterval: TimeAmount //private var handler:LibP2P.ProtocolHandler! - - internal var subscriptions:[Topic:PubSub.SubscriptionHandler] - internal var subscriptionTopics:[Topic] { + + internal var subscriptions: [Topic: PubSub.SubscriptionHandler] + internal var subscriptionTopics: [Topic] { get { self.subscriptions.keys.map { $0 } } } - + internal enum PubSubEvent { case inbound(_PubSubEvent) case outbound(_PubSubEvent) - - var description:String { + + var description: String { switch self { case .inbound(let event): return "Inbound(\(event.description))" @@ -116,69 +123,80 @@ open class BasePubSub { } } } - + internal enum _PubSubEvent { - case subscriptionChange(PeerID, [String:Bool]) + case subscriptionChange(PeerID, [String: Bool]) case graft(PeerID, String) case prune(PeerID, String) case iHave(PeerID, [Data]) case iWant(PeerID, [Data]) case message(PeerID, [PubSubMessage]) - - var description:String { + + var description: String { switch self { case .message(let peerID, let messages): - return "message(\(peerID), [\(messages.map({ "(\($0.topicIds.first ?? "???") -> \(String(data: $0.data, encoding: .utf8) ?? "Not UTF8")"}).joined(separator: ", "))]" + return + "message(\(peerID), [\(messages.map({ "(\($0.topicIds.first ?? "???") -> \(String(data: $0.data, encoding: .utf8) ?? "Not UTF8")"}).joined(separator: ", "))]" default: return "\(self)" } } } - - internal var _eventHandler:((PubSubEvent) -> Void)? - + + internal var _eventHandler: ((PubSubEvent) -> Void)? + //private var topics: - - internal init(group:EventLoopGroup, libp2p:Application, peerState:PeerStateProtocol, messageCache:MessageStateProtocol, debugName:String, multicodecs:[String], globalSignaturePolicy:PubSub.SignaturePolicy = .strictSign, canRelayMessages:Bool = false, emitSelf:Bool = false) throws { + + internal init( + group: EventLoopGroup, + libp2p: Application, + peerState: PeerStateProtocol, + messageCache: MessageStateProtocol, + debugName: String, + multicodecs: [String], + globalSignaturePolicy: PubSub.SignaturePolicy = .strictSign, + canRelayMessages: Bool = false, + emitSelf: Bool = false + ) throws { guard !debugName.isEmpty && debugName != "" else { throw Errors.debugNameMustBeSet } - + self.multicodecs = multicodecs.compactMap { SemVerProtocol($0) } guard self.multicodecs.count > 0 else { throw Errors.invalidMulticodecLength } - + self.logger = Logger(label: "\(debugName)[\(libp2p.peerID.shortDescription)][\(UUID().uuidString.prefix(5))]") - self.logger.logLevel = .info //libp2p.logger.logLevel //LOG_LEVEL - + self.logger.logLevel = .info //libp2p.logger.logLevel //LOG_LEVEL + self.libp2p = libp2p self.elg = group self.eventLoop = elg.next() self.state = .stopped - + self.seenCache = SeenCache(eventLoop: elg.next(), logger: self.logger) self.peerState = peerState self.messageCache = messageCache - + self.messageSignaturePolicy = globalSignaturePolicy self.canRelayMessages = canRelayMessages self.emitSelf = emitSelf self.subscriptions = [:] - + self.runLoopInterval = .seconds(1) - + /// Set up our Network Topology Filter for our specified multicodecs self.topology = nil } - + /// Starts our PubSub Service /// - Throws: PubSub.Errors when we attempt to start an already started PubSub service /// - Note: This kicks off a `scheduleRepeatedAsyncTask` public func start() throws { guard self.state == .stopped else { throw Errors.alreadyRunning } self.state = .starting - + /// Init Message Cache //self.messageCache = MCache(eventLoop: self.elg.next(), historyWindows: 5, gossipWindows: 3) try self.messageCache.start() - + /// Init Peering State //self.peerState = PeeringState(eventLoop: self.elg.next()) try self.peerState.start() @@ -186,126 +204,140 @@ open class BasePubSub { // Let our peerstate know of our subscriptions (for mesh / fanout distinction) let _ = self.peerState.subscribeSelf(to: sub.key, on: nil) } - + /// Set up our Network Topology Filter for our specified multicodecs /// - TODO: Have a way to unregister from the topology... - libp2p.topology.register(TopologyRegistration( - protocol: multicodecs.first!.stringValue, - handler: TopologyHandler( - onConnect: onPeerConnected, - onDisconnect: onPeerDisconnected + libp2p.topology.register( + TopologyRegistration( + protocol: multicodecs.first!.stringValue, + handler: TopologyHandler( + onConnect: onPeerConnected, + onDisconnect: onPeerDisconnected + ) ) - )) - + ) + /// Set our state to started self.state = .started - + /// Register floodsub on our LibP2P node /// - TODO: We should probably have a way to control if a hard coded route handler is announced or not... // app.registrar.regsiter(multicodecs) - + /// Kick off our run loop runLoopTask = self.eventLoop.scheduleRepeatedAsyncTask(initialDelay: .zero, delay: runLoopInterval, runLoop) } - + public func stop() throws { guard self.state == .starting || self.state == .started else { throw Errors.alreadyStopped } self.state = .stopping - + /// Deinit Message Cache //try self.messageCache.stop().wait() try self.messageCache.stop() - + /// Deinit PeerState //try self.peerState.stop().wait() try self.peerState.stop() - + /// Deinit Network Topology Filter for our specified multicodecs //self.topology.deinitialize() self.topology = nil - + /// Remove our support for the pubsub protocol // app.registrar.unregister(multicodecs) - + /// Cancel our run loop runLoopTask?.cancel() - + /// Set our state to Stopped self.state = .stopped } - + /// Called once every X seconds, this method should handle shifting our MessageCache, updating the PeerState, relaying messages and anything else we need to do to stay in sync with the PubSub network - private func runLoop(_:RepeatedTask) -> EventLoopFuture { + private func runLoop(_: RepeatedTask) -> EventLoopFuture { guard self.state == .started else { return self.eventLoop.makeSucceededVoidFuture() } self.logger.trace("RunLoop executing") - + /// Call Heartbeat on our services... return [ self.seenCache.trim(), self.heartbeat(), self.messageCache.heartbeat(), - self.peerState.heartbeat() + self.peerState.heartbeat(), ].flatten(on: self.eventLoop) } - + /// PubSub implementations should override this method in order to handle reccuring/repeated tasks public func heartbeat() -> EventLoopFuture { self.eventLoop.makeSucceededVoidFuture() } - -// private func trimSeenCache() -> EventLoopFuture { -// self.eventLoop.submit { -// /// Trim the seenCache of expired messages -// var expired = DispatchTime.now().uptimeNanoseconds -// if expired > 120_000_000_000 { expired -= 120_000_000_000 } else { return } -// self.seenCache = self.seenCache.compactMapValues { time in -// time < expired ? nil : time -// } -// } -// } - + + // private func trimSeenCache() -> EventLoopFuture { + // self.eventLoop.submit { + // /// Trim the seenCache of expired messages + // var expired = DispatchTime.now().uptimeNanoseconds + // if expired > 120_000_000_000 { expired -= 120_000_000_000 } else { return } + // self.seenCache = self.seenCache.compactMapValues { time in + // time < expired ? nil : time + // } + // } + // } + /// Called when we discover a new peer that supports the PubSub protocol /// /// We take this opportunity to add the peer to our PeeringState, we then attempt to reach /// out to the peer to discover what topics they're subscribed too and the peers they know about - private func onPeerConnected(peer:PeerID, connection:Connection) { + private func onPeerConnected(peer: PeerID, connection: Connection) { /// When running multiple local instances, our global notification center / event bus can propogate events for our own peer. So we make sure we disregard event related to us... guard peer != self.peerID else { return } - - guard self.state == .started else { self.logger.warning("Ignoring onPeerConnected notification received while in state '\(state)'"); return } - + + guard self.state == .started else { + self.logger.warning("Ignoring onPeerConnected notification received while in state '\(state)'") + return + } + self.logger.info("Peer Connected: \(peer.b58String)") self.peerState.addNewPeer(peer, on: nil).whenComplete { _ in self.logger.trace("Peer Stored: \(peer.b58String)") } - + guard let codec = self.multicodecs.first?.stringValue else { return } - + // -TODO: Maybe we take this opportunity to open a PubSub stream if one doesn't already exist... if let conn = connection as? BasicConnectionLight { - self.logger.debug("PubSub::Attempting to auto-dial \(connection.remotePeer?.description ?? "nil") for outbound `\(codec)` stream") + self.logger.debug( + "PubSub::Attempting to auto-dial \(connection.remotePeer?.description ?? "nil") for outbound `\(codec)` stream" + ) conn.newStream(forProtocol: codec, mode: .ifOutboundDoesntAlreadyExist) } } - + /// Called when a Peer that support the PubSub protocol disconnects from us (no longer reachable, goes offline, etc) /// /// We take this opportunity to remove the peer from our peering state. /// We also update any metadata in our databases and perform any necessary cleanup - private func onPeerDisconnected(peer:PeerID) { - guard self.state == .started else { self.logger.warning("Ignoring onPeerDisconnected notification received while in state '\(state)'"); return } + private func onPeerDisconnected(peer: PeerID) { + guard self.state == .started else { + self.logger.warning("Ignoring onPeerDisconnected notification received while in state '\(state)'") + return + } self.logger.info("Peer Disconnected: \(peer.b58String)") let _ = peerState.onPeerDisconnected(peer) } - /// This is the main entry point for all PubSub logic /// Both inbound and outbound events get passed though here and routed to their specific handlers... - public func processRequest(_ req:Request) -> EventLoopFuture> { + public func processRequest(_ req: Request) -> EventLoopFuture> { switch req.event { case .ready: // This should always succeed... - guard let stream = req.connection.hasStream(forProtocol: multicodecs.first!.stringValue, direction: req.streamDirection) else { + guard + let stream = req.connection.hasStream( + forProtocol: multicodecs.first!.stringValue, + direction: req.streamDirection + ) + else { self.logger.warning("Failed to find stream associated with new inbound request") return req.eventLoop.makeSucceededFuture(.stayOpen) } @@ -315,7 +347,7 @@ open class BasePubSub { case .outbound: return self.handleNewOutboundStream(req, stream: stream) } - + case .data: guard case .inbound = req.streamDirection else { self.logger.warning("We dont accept data on outbound PubSub streams") @@ -323,7 +355,7 @@ open class BasePubSub { } //self.logger.info("TODO::HandleInboundData \(data)") return self.handleInboundData(req) - + case .closed: switch req.streamDirection { case .inbound: @@ -332,7 +364,7 @@ open class BasePubSub { self.handleClosedOutboundStream(req) } return self.eventLoop.makeSucceededFuture(.close) - + case .error(let error): switch req.streamDirection { case .inbound: @@ -343,30 +375,34 @@ open class BasePubSub { return self.eventLoop.makeSucceededFuture(.close) } } - - func handleNewInboundStream(_ req:Request, stream:LibP2PCore.Stream) -> EventLoopFuture> { + + func handleNewInboundStream(_ req: Request, stream: LibP2PCore.Stream) -> EventLoopFuture> { //self.logger.info("TODO::HandleNewInboundStream") guard let remotePeer = req.remotePeer else { - self.logger.warning("Received new inbound stream without an authenticated remote peer attached! Closing stream!") + self.logger.warning( + "Received new inbound stream without an authenticated remote peer attached! Closing stream!" + ) return req.eventLoop.makeSucceededFuture(.close) } // Alert our peerstore of the new peer and associated stream return peerState.attachInboundStream(remotePeer, inboundStream: stream, on: req.eventLoop).flatMap { // Check to see if we have an outbound stream to this peer for /floodsub/1.0.0 -// if req.connection.hasStream(forProtocol: self.multicodecs.first!.stringValue, direction: .outbound) == nil { -// // If not, then open one... -// req.logger.info("PubSub::Attempting to auto-dial \(req.remotePeer?.description ?? "nil") for outbound \(self.multicodecs.first!.stringValue) stream") -// (req.connection as? BasicConnectionLight)?.newStream(forProtocol: self.multicodecs.first!.stringValue, mode: .ifOutboundDoesntAlreadyExist) -// } - - return req.eventLoop.makeSucceededFuture(.stayOpen) + // if req.connection.hasStream(forProtocol: self.multicodecs.first!.stringValue, direction: .outbound) == nil { + // // If not, then open one... + // req.logger.info("PubSub::Attempting to auto-dial \(req.remotePeer?.description ?? "nil") for outbound \(self.multicodecs.first!.stringValue) stream") + // (req.connection as? BasicConnectionLight)?.newStream(forProtocol: self.multicodecs.first!.stringValue, mode: .ifOutboundDoesntAlreadyExist) + // } + + req.eventLoop.makeSucceededFuture(.stayOpen) } } - - func handleNewOutboundStream(_ req:Request, stream:LibP2PCore.Stream) -> EventLoopFuture> { + + func handleNewOutboundStream(_ req: Request, stream: LibP2PCore.Stream) -> EventLoopFuture> { self.logger.debug("TODO::HandleNewOutboundStream") guard let remotePeer = req.remotePeer else { - self.logger.warning("Received new outbound stream without an authenticated remote peer attached! Closing stream!") + self.logger.warning( + "Received new outbound stream without an authenticated remote peer attached! Closing stream!" + ) return req.eventLoop.makeSucceededFuture(.close) } // Store the new outbound (write) stream in our peerstate @@ -375,13 +411,13 @@ open class BasePubSub { guard let subs = try? self.generateSubscriptionPayload() else { self.logger.warning("Failed to generate subscription payload for new \(remotePeer)") return .stayOpen - } // Or should we close here?? + } // Or should we close here?? self.logger.trace("Sharing our subscriptions with our new \(remotePeer)") return .respond(req.allocator.buffer(bytes: subs)) } } - - func handleClosedInboundStream(_ req:Request) { + + func handleClosedInboundStream(_ req: Request) { // Tell our peerstate that the inbound (read) stream for this peer has been closed... // Maybe we follow suit and close our outbound (write) stream as well... self.logger.debug("TODO::HandleClosedInboundStream") @@ -390,8 +426,8 @@ open class BasePubSub { self.logger.trace("Detached inbound stream for \(remotePeer)") } } - - func handleClosedOutboundStream(_ req:Request) { + + func handleClosedOutboundStream(_ req: Request) { // Tell our peerstate that our outbound (write) stream for this peer has been closed... // Maybe we attempt to reopen? Or we request the inbound (read) stream to close as well... self.logger.debug("TODO::HandleClosedOutboundStream") @@ -400,15 +436,15 @@ open class BasePubSub { self.logger.trace("Detached outbound stream for \(remotePeer)") } } - - func handleErrorInboundStream(_ req:Request, error:Error) { + + func handleErrorInboundStream(_ req: Request, error: Error) { self.logger.debug("TODO::HandleErrorInboundStream -> \(error)") } - - func handleErrorOutboundStream(_ req:Request, error:Error) { + + func handleErrorOutboundStream(_ req: Request, error: Error) { self.logger.debug("TODO::HandleErrorOutboundStream -> \(error)") } - + /// As the base pub sub implementation there are a few things we can do to offload some boiler plate code from our specific Router implementations... /// /// 1) We ensure the inbound data can be decoded into an RPCMessageCore compliant object @@ -421,76 +457,78 @@ open class BasePubSub { /// 5) Store the message in our Message Cache /// 6) Alert the Router of the message /// 7) Pass the message along to any subscribers - func handleInboundData(_ request:Request) -> EventLoopFuture> { + func handleInboundData(_ request: Request) -> EventLoopFuture> { /// Record the time for metrics purposes let tic = DispatchTime.now().uptimeNanoseconds - + /// Ensure the request has a remotePeer installed on it guard let remotePeer = request.remotePeer else { self.logger.warning("Failed to determine message originator (RemotePeer)") return request.eventLoop.makeSucceededFuture(.close) } - + /// Ask our router to decode the inbound data as an RPCMessageCore compliant object guard let rpc = try? self.decodeRPC(Data(request.payload.readableBytesView)) else { self.logger.warning("Failed to decode RPC PubSub Message") - self.logger.warning("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") + self.logger.warning( + "UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")" + ) self.logger.warning("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") /// Do we close the stream? Or keep it open and give them another shot... return request.eventLoop.makeSucceededFuture(.close) } - - var tasks:[EventLoopFuture] = [] - + + var tasks: [EventLoopFuture] = [] + /// If message contains subscriptions, process them... - tasks.append( self.processSubscriptions(rpc, peer: remotePeer) ) - + tasks.append(self.processSubscriptions(rpc, peer: remotePeer)) + /// Give the router a chance to process the entire RPC /// - Note: Floodsub / Randomsub might choose to disregard this by not overriding the method, but more complex routers like Gossipsub will need to override this in order to process extra data like control messages - tasks.append( self.processInboundRPC(rpc, from: remotePeer, request: request) ) - + tasks.append(self.processInboundRPC(rpc, from: remotePeer, request: request)) + /// Handle the published messages (one at a time) -// tasks.append(contentsOf: rpc.messages.compactMap { message -> EventLoopFuture in -// /// Process this message -// self.processPubSubMessage(message).flatMap { message -> EventLoopFuture in -// /// If we get to this point, it means that the message is new and valid -// -// /// Pass each message onto our specific implementations -// return self.processInboundMessage(message, from: remotePeer, request: request).flatMap { -// -// /// Let our eventhandler know of the message... -// self._eventHandler?(.message(remotePeer, [message])) -// -// /// Alert the SubscriptionHandlers interested in this message -// if let handler = self.subscriptions[message.topicIds.first!] { -// self.logger.trace("Forwarding new valid message to handler") -// let _ = handler.on?(.data(message)) -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") -// } -// -// -// /// We're finally done processing this message... -// return self.eventLoop.makeSucceededVoidFuture() -// } -// } -// }) - + // tasks.append(contentsOf: rpc.messages.compactMap { message -> EventLoopFuture in + // /// Process this message + // self.processPubSubMessage(message).flatMap { message -> EventLoopFuture in + // /// If we get to this point, it means that the message is new and valid + // + // /// Pass each message onto our specific implementations + // return self.processInboundMessage(message, from: remotePeer, request: request).flatMap { + // + // /// Let our eventhandler know of the message... + // self._eventHandler?(.message(remotePeer, [message])) + // + // /// Alert the SubscriptionHandlers interested in this message + // if let handler = self.subscriptions[message.topicIds.first!] { + // self.logger.trace("Forwarding new valid message to handler") + // let _ = handler.on?(.data(message)) + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") + // } + // + // + // /// We're finally done processing this message... + // return self.eventLoop.makeSucceededVoidFuture() + // } + // } + // }) + /// Handle the published messages (all at once) tasks.append( self.batchProcessPubSubMessages(rpc.messages).flatMap { newMessages -> EventLoopFuture in guard !newMessages.isEmpty else { return request.eventLoop.makeSucceededVoidFuture() } - + /// Give our router implementation a chance to process the new messages... return self.processInboundMessages(newMessages, from: remotePeer, request: request).flatMap { - + /// - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.message(remotePeer, newMessages))) - + /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately let messagesPerTopic = self.sortMessagesByTopic(newMessages) - + /// Pass the messages onto any SubscriptionHandlers at this point for (topic, msgs) in messagesPerTopic { if let handler = self.subscriptions[topic] { @@ -502,18 +540,18 @@ open class BasePubSub { self.logger.warning("No Subscription Handler for topic:`\(topic)`") } } - + /// We're finally done processing this message... return request.eventLoop.makeSucceededVoidFuture() - + } } ) - + return EventLoopFuture.whenAllComplete(tasks, on: request.eventLoop).map { results in self.logger.debug("Processed Inbound RPC Message in \(DispatchTime.now().uptimeNanoseconds - tic)ns") - var successes:Int = 0 - results.forEach { if case .success = $0 { successes += 1 } } + var successes: Int = 0 + for result in results { if case .success = result { successes += 1 } } if successes != results.count { self.logger.trace("Performed \(results.count) tasks") self.logger.trace("\(successes) successes") @@ -522,87 +560,87 @@ open class BasePubSub { return .stayOpen } } -// func handleInboundData2(_ request:Request) -> EventLoopFuture> { -// /// Record the time for metrics purposes -// let tic = DispatchTime.now().uptimeNanoseconds -// -// /// Ensure the request has a remotePeer installed on it -// guard let remotePeer = request.remotePeer else { -// self.logger.warning("Failed to determine message originator (RemotePeer)") -// return request.eventLoop.makeSucceededFuture(.close) -// } -// -// /// Ask our router to decode the inbound data as an RPCMessageCore compliant object -// guard let rpc = try? self.decodeRPC(Data(request.payload.readableBytesView)) else { -// self.logger.warning("Failed to decode RPC PubSub Message") -// self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") -// self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") -// /// Do we close the stream? Or keep it open and give them another shot... -// return request.eventLoop.makeSucceededFuture(.close) -// } -// -// -// -// /// If message contains subscriptions, process them... -// return self.processSubscriptions(rpc, peer: remotePeer).flatMap { -// /// Give the router a chance to process the entire RPC -// /// - Note: Floodsub / Randomsub might choose to disregard this by not overriding the method, but more complex routers like Gossipsub will need to override this in order to process extra data like control messages -// self.processInboundRPC(rpc, from: remotePeer, request: request).flatMap { -// /// Handle the published messages (all at once) -// self.batchProcessPubSubMessages(rpc.messages).flatMap { newMessages -> EventLoopFuture in -// guard !newMessages.isEmpty else { return request.eventLoop.makeSucceededVoidFuture() } -// -// /// - TODO: Event sub, possibly remove later... -// self._eventHandler?(.message(remotePeer, newMessages)) -// -// /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) -// /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately -// let messagesPerTopic = self.sortMessagesByTopic(newMessages) -// -// /// Pass the messages onto any SubscriptionHandlers at this point -// for (topic, msgs) in messagesPerTopic { -// if let handler = self.subscriptions[topic] { -// for message in msgs { -// let _ = handler.on?(.data(message)) -// } -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(topic)`") -// } -// } -// -// /// We're finally done processing this message... -// return request.eventLoop.makeSucceededVoidFuture() -// }.map { -// self.logger.debug("Processed Inbound RPC Message in \(DispatchTime.now().uptimeNanoseconds - tic)ns") -// return .stayOpen -// } -// } -// } -// } - - private func processSubscriptions(_ rpc:RPCMessageCore, peer remotePeer:PeerID) -> EventLoopFuture { + // func handleInboundData2(_ request:Request) -> EventLoopFuture> { + // /// Record the time for metrics purposes + // let tic = DispatchTime.now().uptimeNanoseconds + // + // /// Ensure the request has a remotePeer installed on it + // guard let remotePeer = request.remotePeer else { + // self.logger.warning("Failed to determine message originator (RemotePeer)") + // return request.eventLoop.makeSucceededFuture(.close) + // } + // + // /// Ask our router to decode the inbound data as an RPCMessageCore compliant object + // guard let rpc = try? self.decodeRPC(Data(request.payload.readableBytesView)) else { + // self.logger.warning("Failed to decode RPC PubSub Message") + // self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") + // self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") + // /// Do we close the stream? Or keep it open and give them another shot... + // return request.eventLoop.makeSucceededFuture(.close) + // } + // + // + // + // /// If message contains subscriptions, process them... + // return self.processSubscriptions(rpc, peer: remotePeer).flatMap { + // /// Give the router a chance to process the entire RPC + // /// - Note: Floodsub / Randomsub might choose to disregard this by not overriding the method, but more complex routers like Gossipsub will need to override this in order to process extra data like control messages + // self.processInboundRPC(rpc, from: remotePeer, request: request).flatMap { + // /// Handle the published messages (all at once) + // self.batchProcessPubSubMessages(rpc.messages).flatMap { newMessages -> EventLoopFuture in + // guard !newMessages.isEmpty else { return request.eventLoop.makeSucceededVoidFuture() } + // + // /// - TODO: Event sub, possibly remove later... + // self._eventHandler?(.message(remotePeer, newMessages)) + // + // /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) + // /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately + // let messagesPerTopic = self.sortMessagesByTopic(newMessages) + // + // /// Pass the messages onto any SubscriptionHandlers at this point + // for (topic, msgs) in messagesPerTopic { + // if let handler = self.subscriptions[topic] { + // for message in msgs { + // let _ = handler.on?(.data(message)) + // } + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(topic)`") + // } + // } + // + // /// We're finally done processing this message... + // return request.eventLoop.makeSucceededVoidFuture() + // }.map { + // self.logger.debug("Processed Inbound RPC Message in \(DispatchTime.now().uptimeNanoseconds - tic)ns") + // return .stayOpen + // } + // } + // } + // } + + private func processSubscriptions(_ rpc: RPCMessageCore, peer remotePeer: PeerID) -> EventLoopFuture { /// Make sure we have subscription messages to work on, otherwise just return... guard rpc.subs.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } - + /// Create a [Topic:Subscribed] dictionary from the subscription options messages - var subs:[String:Bool] = [:] - rpc.subs.forEach { - subs[$0.topicID] = $0.subscribe + var subs: [String: Bool] = [:] + for sub in rpc.subs { + subs[sub.topicID] = sub.subscribe } - + /// Log the subscription changes... self.logger.debug("\(remotePeer)::Subscriptions: \(subs)") - + /// - TODO: Event sub, possibly remove later... _eventHandler?(.inbound(.subscriptionChange(remotePeer, subs))) - + /// Update our PeeringState with the new subscription changes return self.peerState.update(subscriptions: subs, for: remotePeer, on: nil).map { /// Notify our handlers of the subscription changes now that they're reflected in our PeeringState self.notifyHandlers(for: subs, peer: remotePeer) } } - + public func generateSubscriptionPayload() throws -> [UInt8] { /// Generate an RPC Message containing all of our subscriptions... var rpc = RPC() @@ -612,12 +650,12 @@ open class BasePubSub { subOpt.subscribe = true return subOpt } - + let payload = try rpc.serializedData() return putUVarInt(UInt64(payload.count)) + payload } - - private func notifyHandlers(for subs:[String:Bool], peer remotePeer:PeerID) { + + private func notifyHandlers(for subs: [String: Bool], peer remotePeer: PeerID) { for sub in subs { guard let handler = self.subscriptions[sub.key] else { self.logger.warning("No subscription handler for topic:`\(sub.key)`") @@ -630,7 +668,7 @@ open class BasePubSub { } } } - + /// Processes a single PubSubMessage at a time /// - Returns: The message if it's new (unseen) and valid /// @@ -640,50 +678,62 @@ open class BasePubSub { /// 3) Ensuring we haven't already seen the message /// 4) Validating the Message (by running it through the appropriate installed validators) /// 5) Storing the message - private func processPubSubMessage(_ message:PubSubMessage) -> EventLoopFuture { + private func processPubSubMessage(_ message: PubSubMessage) -> EventLoopFuture { /// Ensure the message conforms to our MessageSignaturePolicy guard passesMessageSignaturePolicy(message) else { self.logger.warning("Failed signature policy, discarding message") return self.eventLoop.makeFailedFuture(Errors.signaturePolicyViolation) } - + /// Derive the message id using the overidable messageID function guard let messageIDFunc = self.messageIDFunctions[message.topicIds.first!] else { - self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") + self.logger.warning( + "No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message." + ) return self.eventLoop.makeFailedFuture(Errors.noIDFunctionForTopic) } - + /// Get the messages ID let id = messageIDFunc(message) - + self.logger.trace("Message ID `\(id.asString(base: .base16))`") self.logger.trace("\(message.description)") - + /// Check to ensure we haven't seen this message already... return self.messageCache.exists(messageID: id, on: nil).flatMap { exists -> EventLoopFuture in - guard exists == false else { self.logger.trace("Dropping Duplicate Message"); return self.eventLoop.makeFailedFuture(Errors.duplicateMessage) } - + guard exists == false else { + self.logger.trace("Dropping Duplicate Message") + return self.eventLoop.makeFailedFuture(Errors.duplicateMessage) + } + /// Validate the unseen message before storing it in our message cache... return self.validate(message: message).flatMap { valid -> EventLoopFuture in - guard valid else { self.logger.warning("Dropping Invalid Message: \(message)"); return self.eventLoop.makeFailedFuture(Errors.failedMessageValidation) } - + guard valid else { + self.logger.warning("Dropping Invalid Message: \(message)") + return self.eventLoop.makeFailedFuture(Errors.failedMessageValidation) + } + /// Store the message in our message cache - self.logger.trace("Storing Message: \(id.asString(base: .base16))"); + self.logger.trace("Storing Message: \(id.asString(base: .base16))") /// - Note: We can run into issues where we end up saving duplicate messages cause when we check for existance they haven't been saved yet, and by the time we get around to saving them, theirs multiple copies ready to be stored. /// We temporarily added the `valid` flag to the `put` method to double check existance of a message before forwarding it and alerting our handler. - return self.messageCache.put(messageID: id, message: (topic: message.topicIds.first!, data: message), on: nil).flatMap { valid -> EventLoopFuture in + return self.messageCache.put( + messageID: id, + message: (topic: message.topicIds.first!, data: message), + on: nil + ).flatMap { valid -> EventLoopFuture in guard valid else { self.logger.warning("Encountered Duplicate Message While Attempting To Store In Message Cache") return self.eventLoop.makeFailedFuture(Errors.duplicateMessage) } - + /// Pass each message onto our specific implementations return self.eventLoop.makeSucceededFuture(message) } } } } - + /// Processes a batch of PubSubMessage, in an attempt to reduce eventLoop hops... /// - Returns: All of the new (unseen) valid messages /// @@ -693,133 +743,151 @@ open class BasePubSub { /// 3) Ensuring we haven't already seen the message /// 4) Validating the Message (by running it through the appropriate installed validators) /// 5) Storing the message - private func batchProcessPubSubMessages(_ messages:[PubSubMessage]) -> EventLoopFuture<[PubSubMessage]> { - return self.ensureSignaturePolicyConformance(messages).flatMap { signedMessages -> EventLoopFuture<[PubSubMessage]> in + private func batchProcessPubSubMessages(_ messages: [PubSubMessage]) -> EventLoopFuture<[PubSubMessage]> { + self.ensureSignaturePolicyConformance(messages).flatMap { signedMessages -> EventLoopFuture<[PubSubMessage]> in guard !signedMessages.isEmpty else { return self.eventLoop.makeSucceededFuture([]) } - + /// Compute the message ID for each message - return self.computeMessageIds(signedMessages).flatMap { identifiedMessages -> EventLoopFuture<[PubSubMessage]> in + return self.computeMessageIds(signedMessages).flatMap { + identifiedMessages -> EventLoopFuture<[PubSubMessage]> in guard !identifiedMessages.isEmpty else { return self.eventLoop.makeSucceededFuture([]) } - + /// Using the computed ID's, discard any messages that we've already seen / encountered - return self.discardKnownMessagesUsingSeenCache(identifiedMessages).flatMap { newMessages -> EventLoopFuture<[PubSubMessage]> in + return self.discardKnownMessagesUsingSeenCache(identifiedMessages).flatMap { + newMessages -> EventLoopFuture<[PubSubMessage]> in guard !newMessages.isEmpty else { return self.eventLoop.makeSucceededFuture([]) } - + /// Store the new / unique messages in our Seen & MessaheCache - return self.storeMessages(newMessages).flatMap { storedMessages -> EventLoopFuture<[PubSubMessage]> in - + return self.storeMessages(newMessages).flatMap { + storedMessages -> EventLoopFuture<[PubSubMessage]> in + /// Return the new / unique messages for further processing - return self.eventLoop.makeSucceededFuture(storedMessages.map { $0.value }) + self.eventLoop.makeSucceededFuture(storedMessages.map { $0.value }) } } } } } - - + /// This should really just call our implementers -// private func onMessage(_ stream:LibP2P.Stream, request:Request) -> EventLoopFuture { -// let payload = Data(request.payload.readableBytesView) -// self.logger.info("Raw Msg:\(payload.asString(base: .base16))") -// -// var pl = payload -// -// /// The inbound message is uvarint length prefixed -// let lengthPrefix = uVarInt(pl.bytes) -// if lengthPrefix.bytesRead > 0 { -// pl = pl.dropFirst(lengthPrefix.bytesRead) -// } -// -// return self.processInboundMessage(pl, from: stream, request: request) -// -// /// Return -// //return request.eventLoop.makeSucceededFuture(nil) -// } - + // private func onMessage(_ stream:LibP2P.Stream, request:Request) -> EventLoopFuture { + // let payload = Data(request.payload.readableBytesView) + // self.logger.info("Raw Msg:\(payload.asString(base: .base16))") + // + // var pl = payload + // + // /// The inbound message is uvarint length prefixed + // let lengthPrefix = uVarInt(pl.bytes) + // if lengthPrefix.bytesRead > 0 { + // pl = pl.dropFirst(lengthPrefix.bytesRead) + // } + // + // return self.processInboundMessage(pl, from: stream, request: request) + // + // /// Return + // //return request.eventLoop.makeSucceededFuture(nil) + // } + /// Can be overridden by our custom router implementations - internal func processInboundRPC(_ rpc:RPCMessageCore, from:PeerID, request:Request) -> EventLoopFuture { - return request.eventLoop.makeSucceededVoidFuture() + internal func processInboundRPC(_ rpc: RPCMessageCore, from: PeerID, request: Request) -> EventLoopFuture { + request.eventLoop.makeSucceededVoidFuture() } - + /// Has to be overriden by our custom router implementations - internal func processInboundMessage(_ msg:PubSubMessage, from:PeerID, request:Request) -> EventLoopFuture { - assertionFailure("Your PubSub implementation must override the PubSub::ProcessInboundMessage(:Data, :Stream, :LibP2P.ProtocolRequest) method") + internal func processInboundMessage(_ msg: PubSubMessage, from: PeerID, request: Request) -> EventLoopFuture { + assertionFailure( + "Your PubSub implementation must override the PubSub::ProcessInboundMessage(:Data, :Stream, :LibP2P.ProtocolRequest) method" + ) return request.eventLoop.makeSucceededVoidFuture() } - + /// Has to be overriden by our custom router implementations - internal func processInboundMessages(_ messages:[PubSubMessage], from:PeerID, request:Request) -> EventLoopFuture { - assertionFailure("Your PubSub implementation must override the PubSub::ProcessInboundMessage(:Data, :Stream, :LibP2P.ProtocolRequest) method") + internal func processInboundMessages( + _ messages: [PubSubMessage], + from: PeerID, + request: Request + ) -> EventLoopFuture { + assertionFailure( + "Your PubSub implementation must override the PubSub::ProcessInboundMessage(:Data, :Stream, :LibP2P.ProtocolRequest) method" + ) return request.eventLoop.makeSucceededVoidFuture() } - + /// Has to be overriden by our custom router implementations - internal func decodeRPC(_ data:Data) throws -> RPCMessageCore { + internal func decodeRPC(_ data: Data) throws -> RPCMessageCore { assertionFailure("Your PubSub implementation must override the PubSub::decodeRPC(_ data:Data) method") throw Errors.noRPCDecoder } - - internal func encodeRPC(_ rpc:RPCMessageCore) throws -> Data { + + internal func encodeRPC(_ rpc: RPCMessageCore) throws -> Data { assertionFailure("Your PubSub implementation must override the PubSub::encodeRPC(_ rpc:RPCMessageCore) method") throw Errors.noRPCEncoder } - + /// Checks to make sure the message conforms to our Message Signing Policy /// - Parameter message: The RPC Message to check signature validity for /// - Returns: `True` if the message conforms to our policy and is safe to process further. /// `False` if the message doesn't conform to our policy, or if the signature is invalid, we should discrad the message immediately in this case. /// /// - Warning: We should discard any message that fails this check (results in a false return value) - func passesMessageSignaturePolicy(_ message:PubSubMessage) -> Bool { - guard let topic = message.topicIds.first, let policy = self.topicSignaturePolicy[topic] else { + func passesMessageSignaturePolicy(_ message: PubSubMessage) -> Bool { + guard let topic = message.topicIds.first, let policy = self.topicSignaturePolicy[topic] else { self.logger.warning("No Signature Policy for `\(message.topicIds.first ?? "")` Dropping Message.") return false } switch policy { case .strictNoSign: guard message.signature.isEmpty && message.key.isEmpty else { - //guard !message.hasSignature && !message.hasKey else { - self.logger.warning("Message Signature Policy Mismatch. Current Policy == Strict No Sign and the Message is signed. Dropping Message.") + //guard !message.hasSignature && !message.hasKey else { + self.logger.warning( + "Message Signature Policy Mismatch. Current Policy == Strict No Sign and the Message is signed. Dropping Message." + ) return false } return true case .strictSign: guard !message.signature.isEmpty && !message.key.isEmpty else { - //guard message.hasSignature && message.hasKey else { - self.logger.warning("Message Signature Policy Mismatch. Current Policy == Strict Sign and the Message isn't signed. Dropping Message.") + //guard message.hasSignature && message.hasKey else { + self.logger.warning( + "Message Signature Policy Mismatch. Current Policy == Strict Sign and the Message isn't signed. Dropping Message." + ) return false } /// Validate Message Signature return (try? self.verifyMessageSignature(message)) == true } } - - func publish(topics:[Topic], messages:[RPC.Message], on loop:EventLoop? = nil) -> EventLoopFuture { + + func publish(topics: [Topic], messages: [RPC.Message], on loop: EventLoop? = nil) -> EventLoopFuture { print("TODO::Publishing Topics:\(topics) -> \(messages)") return self.eventLoop.makeSucceededVoidFuture().hop(to: loop ?? eventLoop) } - + public func publish(topic: String, data: Data, on: EventLoop?) -> EventLoopFuture { self.eventLoop.makeFailedFuture(Errors.notImplemented) } - + public func publish(topic: String, bytes: [UInt8], on: EventLoop?) -> EventLoopFuture { self.eventLoop.makeFailedFuture(Errors.notImplemented) } - + public func publish(topic: String, buffer: ByteBuffer, on: EventLoop?) -> EventLoopFuture { self.eventLoop.makeFailedFuture(Errors.notImplemented) } - - func publish(msg:RPC.Message, on loop:EventLoop? = nil) -> EventLoopFuture { + + func publish(msg: RPC.Message, on loop: EventLoop? = nil) -> EventLoopFuture { /// Get all streams subscribed to the topic... guard let topic = msg.topicIds.first else { return self.eventLoop.makeFailedFuture(Errors.invalidTopic) } - + return self.getPeersSubscribed(to: topic).flatMap { subscribers -> EventLoopFuture in - if subscribers.isEmpty { self.logger.trace("No known peers subscribed to topic: \(topic). Proceeding with message signing and local storage.") } - + if subscribers.isEmpty { + self.logger.trace( + "No known peers subscribed to topic: \(topic). Proceeding with message signing and local storage." + ) + } + var msgToSend = RPC.Message() - + /// Sign the message if necessary... do { if let policy = self.topicSignaturePolicy[topic] { @@ -829,14 +897,15 @@ open class BasePubSub { let bytes = try BasePubSub.MessagePrefix + msg.serializedData() msgToSend = msg msgToSend.signature = try self.peerID.signature(for: bytes) - msgToSend.key = try Data(self.peerID.marshalPublicKey()) // pubkey.data and marshalPublicKey have an extra 0801 prepended + // pubkey.data and marshalPublicKey have an extra 0801 prepended + msgToSend.key = try Data(self.peerID.marshalPublicKey()) self.logger.trace("Signed Message: \(msgToSend)") case .strictNoSign: // Nothing to do... msgToSend = msg } } - + /// Construct the RPC message var rpc = RPC() rpc.msgs = [msgToSend] @@ -844,86 +913,94 @@ open class BasePubSub { var payload = try rpc.serializedData() /// prepend a varint length prefix payload = Data(putUVarInt(UInt64(payload.count)) + payload.bytes) - + self.logger.trace("\(payload.asString(base: .base16))") - + /// Store the message in our message cache... if let msgID = self.messageIDFunctions[topic]?(msgToSend) { let _ = self.messageCache.put(messageID: msgID, message: (topic, msgToSend), on: nil) /// Do we also add it to our seenCache?? self.seenCache.put(messageID: msgID) } - + /// For each peer subscribed to the topic, send the message their way... for subscriber in subscribers { self.logger.debug("Attempting to send message to \(subscriber.id)") self._eventHandler?(.outbound(.message(subscriber.id, [msg]))) try? subscriber.write(payload.bytes) } - + /// This is gossipsub related... we should move this into the gsub logic if subscribers.isEmpty { - self.logger.warning("This message was signed and stored locally. If your attached router supports publishing past messages then the message might be published to the network eventually.") + self.logger.warning( + "This message was signed and stored locally. If your attached router supports publishing past messages then the message might be published to the network eventually." + ) } - + return self.eventLoop.makeSucceededVoidFuture() } catch { return self.eventLoop.makeFailedFuture(error) } }.hop(to: loop ?? eventLoop) } - - public func subscribe(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { + + public func subscribe(topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture { self.logger.debug("TODO::Subscribe to topic: \(topic)") return self.eventLoop.makeSucceededVoidFuture().hop(to: loop ?? eventLoop) } - - public func subscribe(_ config:PubSub.SubscriptionConfig, on loop:EventLoop? = nil) -> EventLoopFuture { + + public func subscribe(_ config: PubSub.SubscriptionConfig, on loop: EventLoop? = nil) -> EventLoopFuture { /// Ensure the Topic we're subscribing to is valid... - guard !config.topic.isEmpty, config.topic != "" else { return self.eventLoop.makeFailedFuture(Errors.invalidTopic) } + guard !config.topic.isEmpty, config.topic != "" else { + return self.eventLoop.makeFailedFuture(Errors.invalidTopic) + } self.logger.debug("Subscribing to topic `\(config.topic)`") return self.eventLoop.submit { /// Ensure that the topic and subscription handler has been set in our Subscription list by the specific PubSub implementation - guard self.subscriptions[config.topic] != nil else { throw Errors.invalidSubscription } - + guard self.subscriptions[config.topic] != nil else { throw Errors.invalidSubscription } + /// Assign our Topic specific MessageSignaturePolicy self.assignSignaturePolicy(for: config.topic, policy: config.signaturePolicy) - + /// Allow all messages on this topic let _ = self.addValidator(for: config.topic, validator: config.validator.validationFunction) - + /// Assign our MessageID function for the specified topic (different topics can use differnt message ID functions) self.logger.trace("Using the \(config.messageIDFunc) as our MessageID function for topic:'\(config.topic)'") self.assignMessageIDFunction(for: config.topic, config.messageIDFunc.messageIDFunction) - + /// Let our peerstate know of our subscriptions (for mesh / fanout distinction) let _ = self.peerState.subscribeSelf(to: config.topic, on: nil) - + /// Alert everyone we know of to our new subscription let _ = self.peerState.getAllPeers(on: loop).map { subs in - //let _ = self.peerState.peersSubscribedTo(topic: config.topic, on: loop).map { subs in - guard subs.count > 0 else { self.logger.warning("No subscribers to share subscription with"); return } + //let _ = self.peerState.peersSubscribedTo(topic: config.topic, on: loop).map { subs in + guard subs.count > 0 else { + self.logger.warning("No subscribers to share subscription with") + return + } guard let subPayload = try? self.generateSubPayload(forTopics: [config.topic]) else { - self.logger.warning("Failed to serialize subscription payload. Unable to alert peers of subscription") + self.logger.warning( + "Failed to serialize subscription payload. Unable to alert peers of subscription" + ) return } - - subs.forEach { - try? $0.write(subPayload) - self._eventHandler?(.outbound(.subscriptionChange($0.id, [config.topic: true]))) + + for sub in subs { + try? sub.write(subPayload) + self._eventHandler?(.outbound(.subscriptionChange(sub.id, [config.topic: true]))) } } }.hop(to: loop ?? eventLoop) } - - + public func getPeersSubscribed(to topic: String, on: EventLoop?) -> EventLoopFuture<[PeerID]> { self.peerState.peersSubscribedTo(topic: topic, on: on).map { subscribers -> [PeerID] in subscribers.map { $0.id } } } - - public func generateUnsubPayload(forTopics topics:[String]) throws -> [UInt8] { + + public func generateUnsubPayload(forTopics topics: [String]) throws -> [UInt8] { /// Generate an RPC Message containing all of our subscriptions... var rpc = RPC() rpc.subscriptions = topics.map { @@ -932,12 +1009,12 @@ open class BasePubSub { subOpt.subscribe = false return subOpt } - + let payload = try rpc.serializedData() return putUVarInt(UInt64(payload.count)) + payload } - - public func generateSubPayload(forTopics topics:[String]) throws -> [UInt8] { + + public func generateSubPayload(forTopics topics: [String]) throws -> [UInt8] { /// Generate an RPC Message containing all of our subscriptions... var rpc = RPC() rpc.subscriptions = topics.map { @@ -946,38 +1023,38 @@ open class BasePubSub { subOpt.subscribe = true return subOpt } - + let payload = try rpc.serializedData() return putUVarInt(UInt64(payload.count)) + payload } - + /// This method simply removes any references to the subscribed topic. /// It does NOT handle gracefully unsubscribing from a topic on a network. /// For example, letting peers know of the unsubscription by sending RPC messages. /// Gracefully unsubscribing is left to the specific PubSub implemtation - public func unsubscribe(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { + public func unsubscribe(topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture { self.logger.debug("Unsubscribing from topic: \(topic)") - + /// Alert all of the peers we know of to our unsubscription return self.peerState.getAllPeers(on: loop).flatMap { subs -> EventLoopFuture in - //return self.peerState.peersSubscribedTo(topic: topic, on: loop).flatMap { subs -> EventLoopFuture in + //return self.peerState.peersSubscribedTo(topic: topic, on: loop).flatMap { subs -> EventLoopFuture in do { let rpc = try self.generateUnsubPayload(forTopics: [topic]) - subs.forEach { - if (try? $0.write(rpc)) != nil { - self.logger.trace("Sent our `\(topic)` unsub to \($0.id)") - self._eventHandler?(.outbound(.subscriptionChange($0.id, [topic: false]))) + for sub in subs { + if (try? sub.write(rpc)) != nil { + self.logger.trace("Sent our `\(topic)` unsub to \(sub.id)") + self._eventHandler?(.outbound(.subscriptionChange(sub.id, [topic: false]))) } else { - self.logger.trace("Failed to send our `\(topic)` unsub to \($0.id)") + self.logger.trace("Failed to send our `\(topic)` unsub to \(sub.id)") } } - + return self.eventLoop.flatSubmit { self.validators.removeValue(forKey: topic) self.messageIDFunctions.removeValue(forKey: topic) self.topicSignaturePolicy.removeValue(forKey: topic) self.subscriptions.removeValue(forKey: topic) - + // Let our peerstate know of our unsubscription return self.peerState.unsubscribeSelf(from: topic, on: nil).transform(to: ()) } @@ -987,27 +1064,30 @@ open class BasePubSub { } }.hop(to: loop ?? eventLoop) } - + } /// Batch message functions extension BasePubSub { /// Given an array of `RPC.Message`s, this method will ensure each message conforms to our SignaturePolicy, dropping/discarding the messages that don't - internal func ensureSignaturePolicyConformance(_ messages:[PubSubMessage]) -> EventLoopFuture<[PubSubMessage]> { + internal func ensureSignaturePolicyConformance(_ messages: [PubSubMessage]) -> EventLoopFuture<[PubSubMessage]> { self.eventLoop.submit { messages.filter { self.passesMessageSignaturePolicy($0) } } } - + /// Given an array of `RPC.Message`s, this method will compute the Message ID for each message (or drop the message if it's invalid) and returns an `[ID:RPC.Message]` dictionary - internal func computeMessageIds(_ messages:[PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { + internal func computeMessageIds(_ messages: [PubSubMessage]) -> EventLoopFuture<[Data: PubSubMessage]> { self.eventLoop.submit { - var msgs:[Data:PubSubMessage] = [:] - messages.forEach { message in + var msgs: [Data: PubSubMessage] = [:] + for message in messages { /// Ensure the message has a topic and that we have a messageIDFunc registered for that topic - guard let firstTopic = message.topicIds.first, let messageIDFunc = self.messageIDFunctions[firstTopic] else { - self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first ?? "")'. Dropping Message.") - return + guard let firstTopic = message.topicIds.first, let messageIDFunc = self.messageIDFunctions[firstTopic] + else { + self.logger.warning( + "No MessageIDFunction defined for topic '\(message.topicIds.first ?? "")'. Dropping Message." + ) + continue } /// Compute the message id and insert it into our dictionary msgs[Data(messageIDFunc(message))] = message @@ -1015,67 +1095,72 @@ extension BasePubSub { return msgs } } - + /// Given a dictionary of Messages and their IDs, this method will discard any messages that are already present in our message cache, returning a dictionary of new and unique messages /// I think this should sort on our seenCache instead of the messageCache -// internal func discardKnownMessagesUsingMessageCache(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { -// let ids = messages.keys.map { $0 } -// return self.messageCache.filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { unknownIDs -> [Data:PubSubMessage] in -// var newMessages:[Data:PubSubMessage] = [:] -// unknownIDs.forEach { newMessages[$0] = messages[$0] } -// return newMessages -// } -// } - - internal func discardKnownMessagesUsingSeenCache(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { + // internal func discardKnownMessagesUsingMessageCache(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { + // let ids = messages.keys.map { $0 } + // return self.messageCache.filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { unknownIDs -> [Data:PubSubMessage] in + // var newMessages:[Data:PubSubMessage] = [:] + // unknownIDs.forEach { newMessages[$0] = messages[$0] } + // return newMessages + // } + // } + + internal func discardKnownMessagesUsingSeenCache( + _ messages: [Data: PubSubMessage] + ) -> EventLoopFuture<[Data: PubSubMessage]> { let ids = messages.keys.map { $0 } - return self.seenCache.filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { unknownIDs -> [Data:PubSubMessage] in - var newMessages:[Data:PubSubMessage] = [:] - unknownIDs.forEach { newMessages[$0] = messages[$0] } + return self.seenCache.filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { + unknownIDs -> [Data: PubSubMessage] in + var newMessages: [Data: PubSubMessage] = [:] + for unknownID in unknownIDs { newMessages[unknownID] = messages[unknownID] } return newMessages } } - + /// Given a dictionary of Messages, this method will validate each message using the appropriate validation function, and silently discard any messages that fail to validate for any reason. Returns a dictionary of Valid RPC.Messages indexed by their ID - internal func validateMessages(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { - var validMessages:[Data:PubSubMessage] = [:] + internal func validateMessages(_ messages: [Data: PubSubMessage]) -> EventLoopFuture<[Data: PubSubMessage]> { + var validMessages: [Data: PubSubMessage] = [:] return messages.map { message in self.validate(message: message.value, on: self.eventLoop).map { valid in validMessages[message.key] = message.value } }.flatten(on: self.eventLoop).map { - return validMessages + validMessages } } - + /// Given a dictionary of MessageIDs and their Message, this function will /// - Store the messageIDs in our SeenCache (for duplicate message dropping) /// - Store the messages in our MessageCache for further propogation / forwarding... - internal func storeMessages(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { + internal func storeMessages(_ messages: [Data: PubSubMessage]) -> EventLoopFuture<[Data: PubSubMessage]> { /// Store the message IDs in our SeenCache self.seenCache.put(messageIDs: messages.keys.map { $0 }).flatMap { /// Then store the complete message in our MessageCache self.messageCache.put(messages: messages, on: self.eventLoop) } } - - internal func sortMessagesByTopic(_ messages:[Data:PubSubMessage]) -> [String:[(id:Data, message:PubSubMessage)]] { - var messagesByTopic:[String:[(Data, PubSubMessage)]] = [:] + + internal func sortMessagesByTopic( + _ messages: [Data: PubSubMessage] + ) -> [String: [(id: Data, message: PubSubMessage)]] { + var messagesByTopic: [String: [(Data, PubSubMessage)]] = [:] for message in messages { for topic in message.value.topicIds { if messagesByTopic[topic] == nil { messagesByTopic[topic] = [] } - messagesByTopic[topic]?.append( (message.key, message.value) ) + messagesByTopic[topic]?.append((message.key, message.value)) } } return messagesByTopic } - - internal func sortMessagesByTopic(_ messages:[PubSubMessage]) -> [String:[PubSubMessage]] { - var messagesByTopic:[String:[PubSubMessage]] = [:] + + internal func sortMessagesByTopic(_ messages: [PubSubMessage]) -> [String: [PubSubMessage]] { + var messagesByTopic: [String: [PubSubMessage]] = [:] for message in messages { for topic in message.topicIds { if messagesByTopic[topic] == nil { messagesByTopic[topic] = [] } - messagesByTopic[topic]?.append( message ) + messagesByTopic[topic]?.append(message) } } return messagesByTopic @@ -1083,48 +1168,64 @@ extension BasePubSub { } extension BasePubSub { - - public func getTopics(on loop:EventLoop? = nil) -> EventLoopFuture<[Topic]> { - return self.peerState.topicSubscriptions(on: loop) + + public func getTopics(on loop: EventLoop? = nil) -> EventLoopFuture<[Topic]> { + self.peerState.topicSubscriptions(on: loop) } - -// public func getPeersSubscribed(to topic: Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID]> { -// return self.peerState.peersSubscribedTo(topic: topic, on: loop) -// } - - func getPeersSubscribed(to topic: Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PubSub.Subscriber]> { - return self.peerState.peersSubscribedTo(topic: topic, on: loop) + + // public func getPeersSubscribed(to topic: Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID]> { + // return self.peerState.peersSubscribedTo(topic: topic, on: loop) + // } + + func getPeersSubscribed(to topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PubSub.Subscriber]> { + self.peerState.peersSubscribedTo(topic: topic, on: loop) } - - func validate(message: PubSubMessage, on loop:EventLoop? = nil) -> EventLoopFuture { + + func validate(message: PubSubMessage, on loop: EventLoop? = nil) -> EventLoopFuture { self.eventLoop.submit { () -> Bool in - guard let topic = message.topicIds.first else { self.logger.warning("No message topic"); return false } - guard let validators = self.validators[topic] else { print("Warning! No Validators found for Topic: '\(topic)'. Failing message validation by default."); return false } - return validators.allSatisfy { $0(message) } //TODO: Fail this if it takes to long to return + guard let topic = message.topicIds.first else { + self.logger.warning("No message topic") + return false + } + guard let validators = self.validators[topic] else { + print("Warning! No Validators found for Topic: '\(topic)'. Failing message validation by default.") + return false + } + return validators.allSatisfy { $0(message) } //TODO: Fail this if it takes to long to return }.hop(to: loop ?? eventLoop) } - + /// Warning! We currently only validate the message using the first validation function in the tpoics array /// - TODO: Make this more efficient by failing quickly after the first failed / rejected validation function... - func validateExtended(message:PubSubMessage, on loop:EventLoop? = nil) -> EventLoopFuture { + func validateExtended(message: PubSubMessage, on loop: EventLoop? = nil) -> EventLoopFuture { self.eventLoop.submit { () -> ValidationResult in - guard let topic = message.topicIds.first else { self.logger.warning("No message topic"); return .reject } - guard let validators = self.validatorsExt[topic] else { print("Warning! No ExtValidators found for Topic: '\(topic)'. Failing message validation by default."); return .reject } + guard let topic = message.topicIds.first else { + self.logger.warning("No message topic") + return .reject + } + guard let validators = self.validatorsExt[topic] else { + print("Warning! No ExtValidators found for Topic: '\(topic)'. Failing message validation by default.") + return .reject + } /// Run the message through all of our validation functions and store the results... let validationResults = validators.map { $0(message) } /// If any validation functions failed, reject the message - if validationResults.contains( .reject ) { return .reject } + if validationResults.contains(.reject) { return .reject } /// If any validations functions requested a throttle, throttle the message - if validationResults.contains( .throttle ) { return .throttle } + if validationResults.contains(.throttle) { return .throttle } /// If any validation functions requested the message to be ignored, ignore the message - if validationResults.contains( .ignore ) { return .ignore } + if validationResults.contains(.ignore) { return .ignore } /// Otherwise, all validation functions accepted the message return .accept }.hop(to: loop ?? eventLoop) } - + /// Adds a validation function (validator) to the specified topic - func addValidator(for topic:Topic, validator:@escaping Validator, on loop:EventLoop? = nil) -> EventLoopFuture { + func addValidator( + for topic: Topic, + validator: @escaping Validator, + on loop: EventLoop? = nil + ) -> EventLoopFuture { self.eventLoop.submit { () -> Void in if var existingValidators = self.validators[topic] { existingValidators.append(validator) @@ -1134,9 +1235,13 @@ extension BasePubSub { } }.hop(to: loop ?? eventLoop) } - + /// Adds an extended validation function (validatorExt) to the specified topic - func addValidatorExtended(for topic:Topic, validator:@escaping ValidatorExtended, on loop:EventLoop? = nil) -> EventLoopFuture { + func addValidatorExtended( + for topic: Topic, + validator: @escaping ValidatorExtended, + on loop: EventLoop? = nil + ) -> EventLoopFuture { self.eventLoop.submit { () -> Void in if var existingValidators = self.validatorsExt[topic] { existingValidators.append(validator) @@ -1146,62 +1251,65 @@ extension BasePubSub { } }.hop(to: loop ?? eventLoop) } - + /// Removes all validators bound to the specified topic - func removeValidators(for topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { + func removeValidators(for topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture { self.eventLoop.submit { () -> Void in self.validators.removeValue(forKey: topic) }.hop(to: loop ?? eventLoop) } - + /// Removes all validators bound to the specified topic - func removeValidatorsExtended(for topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { + func removeValidatorsExtended(for topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture { self.eventLoop.submit { () -> Void in self.validatorsExt.removeValue(forKey: topic) }.hop(to: loop ?? eventLoop) } - - func assignMessageIDFunction(for topic:Topic, _ idFunc:@escaping (PubSubMessage) -> Data) { + + func assignMessageIDFunction(for topic: Topic, _ idFunc: @escaping (PubSubMessage) -> Data) { self.messageIDFunctions[topic] = idFunc } - - func assignSignaturePolicy(for topic:Topic, policy:PubSub.SignaturePolicy) { + + func assignSignaturePolicy(for topic: Topic, policy: PubSub.SignaturePolicy) { self.topicSignaturePolicy[topic] = policy } - + /// Do we actually increment this for every message we send / per topic? /// Or do we just return a random UInt64 (what JS appears to do) func nextMessageSequenceNumber() -> [UInt8] { - return Array(withUnsafeBytes(of: UInt64.random(in: 0...UInt64.max).bigEndian) { $0 }) + Array(withUnsafeBytes(of: UInt64.random(in: 0...UInt64.max).bigEndian) { $0 }) //self.messageSequenceNumber += 1 //return self.messageSequenceNumber } - - internal func verifyMessageSignature(_ message:PubSubMessage) throws -> Bool { + + internal func verifyMessageSignature(_ message: PubSubMessage) throws -> Bool { //self.mainLoop.submit { () -> Bool in guard let key = try? PeerID(marshaledPublicKey: message.key) else { self.logger.warning("Failed to recover public key from message data") return false } - + if key.b58String != message.from.asString(base: .base58btc) { self.logger.warning("Message Key does NOT belong to sender") return false } - + let messageWithoutSignature = try RPC.Message.with { msg in msg.from = message.from msg.data = message.data msg.seqno = message.seqno msg.topicIds = message.topicIds }.serializedData() - - let verified = try key.isValidSignature(message.signature, for: BasePubSub.MessagePrefix + messageWithoutSignature) - + + let verified = try key.isValidSignature( + message.signature, + for: BasePubSub.MessagePrefix + messageWithoutSignature + ) + return verified == true //} } - + /// ValidationResult represents the decision of an extended validator enum ValidationResult { /// Accept is a validation decision that indicates a valid message that should be accepted and delivered to the application and forwarded to the network. @@ -1213,79 +1321,78 @@ extension BasePubSub { /// Used Internally to throttle messages from a particular peer case throttle } - + enum SubscriptionEvent { case newPeer(PeerID) case data(PubSubMessage) case error(Error) - - internal var rawValue:String { + + internal var rawValue: String { switch self { - case .newPeer: return "newPeer" - case .data: return "data" - case .error: return "error" + case .newPeer: return "newPeer" + case .data: return "data" + case .error: return "error" } } } - + /// Code for multiple async validators per topic... - + /// return (try? validators.allSatisfy({ validator in /// try validator.exec(message).wait() /// })) ?? false -// func validate(message: Pubsub_Pb_Message) -> EventLoopFuture { -// self.mainLoop.flatSubmit { () -> EventLoopFuture in -// guard let validators = self.validators[message.topic] else { print("Warnin! No Validators found for Topic: '\(message.topic)'. Failing message validation by default."); return self.mainLoop.makeSucceededFuture(false) } -// -// let promise = self.mainLoop.makePromise(of: Bool.self) -// let _ = validators.map { $0.exec(message).cascade(to: promise) } -// -// return promise.futureResult -// } -// } -// -// /// Warning! We currently only validate the message using the first validation function in the tpoics array -// func validateExtended(message:Pubsub_Pb_Message) -> EventLoopFuture { -// self.mainLoop.flatSubmit { () -> EventLoopFuture in -// guard let validator = self.validatorsExt[message.topic]?.first else { print("Warnin! No Validators found for Topic: '\(message.topic)'. Failing message validation by default."); return self.mainLoop.makeSucceededFuture(.reject) } -// return validator.exec(message) -// } -// } -// -// func addValidator(for topic:Topic, validator:FancyValidator, on loop:EventLoop? = nil) -> EventLoopFuture { -// self.mainLoop.submit { () -> Void in -// if self.validators[topic] != nil { -// /// Ensure the validator doesn't already exist in our array -// guard !self.validators[topic]!.contains(where: { $0.uuid == validator.uuid }) else { return } -// /// Add the new validator to our array -// self.validators[topic]!.append(validator) -// } else { -// /// Create the topic and add the first validator -// self.validators[topic] = [validator] -// } -// } -// } -// -// func addValidatorExtended(for topic:Topic, validator:FancyValidatorExtended, on loop:EventLoop? = nil) -> EventLoopFuture { -// self.mainLoop.submit { () -> Void in -// if self.validatorsExt[topic] != nil { -// /// Ensure the validator doesn't already exist in our array -// guard !self.validatorsExt[topic]!.contains(where: { $0.uuid == validator.uuid }) else { return } -// /// Add the new validator to our array -// self.validatorsExt[topic]!.append(validator) -// } else { -// self.validatorsExt[topic] = [validator] -// } -// } -// } - + // func validate(message: Pubsub_Pb_Message) -> EventLoopFuture { + // self.mainLoop.flatSubmit { () -> EventLoopFuture in + // guard let validators = self.validators[message.topic] else { print("Warnin! No Validators found for Topic: '\(message.topic)'. Failing message validation by default."); return self.mainLoop.makeSucceededFuture(false) } + // + // let promise = self.mainLoop.makePromise(of: Bool.self) + // let _ = validators.map { $0.exec(message).cascade(to: promise) } + // + // return promise.futureResult + // } + // } + // + // /// Warning! We currently only validate the message using the first validation function in the tpoics array + // func validateExtended(message:Pubsub_Pb_Message) -> EventLoopFuture { + // self.mainLoop.flatSubmit { () -> EventLoopFuture in + // guard let validator = self.validatorsExt[message.topic]?.first else { print("Warnin! No Validators found for Topic: '\(message.topic)'. Failing message validation by default."); return self.mainLoop.makeSucceededFuture(.reject) } + // return validator.exec(message) + // } + // } + // + // func addValidator(for topic:Topic, validator:FancyValidator, on loop:EventLoop? = nil) -> EventLoopFuture { + // self.mainLoop.submit { () -> Void in + // if self.validators[topic] != nil { + // /// Ensure the validator doesn't already exist in our array + // guard !self.validators[topic]!.contains(where: { $0.uuid == validator.uuid }) else { return } + // /// Add the new validator to our array + // self.validators[topic]!.append(validator) + // } else { + // /// Create the topic and add the first validator + // self.validators[topic] = [validator] + // } + // } + // } + // + // func addValidatorExtended(for topic:Topic, validator:FancyValidatorExtended, on loop:EventLoop? = nil) -> EventLoopFuture { + // self.mainLoop.submit { () -> Void in + // if self.validatorsExt[topic] != nil { + // /// Ensure the validator doesn't already exist in our array + // guard !self.validatorsExt[topic]!.contains(where: { $0.uuid == validator.uuid }) else { return } + // /// Add the new validator to our array + // self.validatorsExt[topic]!.append(validator) + // } else { + // self.validatorsExt[topic] = [validator] + // } + // } + // } } protocol LibP2P_PubSub_MessageCacheProtocol { - func put(message:PubSubMessage, with id:String, on loop:EventLoop) -> EventLoopFuture - func get(messageID:String, on loop:EventLoop) -> EventLoopFuture - func exists(messageID:String, on loop:EventLoop) -> EventLoopFuture + func put(message: PubSubMessage, with id: String, on loop: EventLoop) -> EventLoopFuture + func get(messageID: String, on loop: EventLoop) -> EventLoopFuture + func exists(messageID: String, on loop: EventLoop) -> EventLoopFuture func heartbeat() -> EventLoopFuture } @@ -1293,19 +1400,17 @@ protocol LibP2P_PubSub_MessageCacheProtocol { /// - Add, Update, Delete Peer /// - Topic <-> Peer relationships protocol PubSub_PeeringState { - func put(_ peer:PeerID) -> EventLoopFuture + func put(_ peer: PeerID) -> EventLoopFuture } /// Defines the basic operations for a PubSub Router Subscription State /// - Subscribe / Unsubscribe protocol PubSub_Subscriptions { - + } extension Array where Element == String { - mutating func appendIfNotPresent(_ item:String) { + mutating func appendIfNotPresent(_ item: String) { if !self.contains(item) { self.append(item) } } } - - diff --git a/Sources/LibP2PPubSub/Protobufs/RPC+RPCMessageCore.swift b/Sources/LibP2PPubSub/Protobufs/RPC+RPCMessageCore.swift index c983e82..45a3302 100644 --- a/Sources/LibP2PPubSub/Protobufs/RPC+RPCMessageCore.swift +++ b/Sources/LibP2PPubSub/Protobufs/RPC+RPCMessageCore.swift @@ -1,29 +1,35 @@ +//===----------------------------------------------------------------------===// // -// RPC+RPCMessageCore.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/23/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -extension RPC.SubOpts:SubOptsCore { } +extension RPC.SubOpts: SubOptsCore {} -extension RPC.Message:PubSubMessage { } +extension RPC.Message: PubSubMessage {} -extension RPC:RPCMessageCore { +extension RPC: RPCMessageCore { var subs: [SubOptsCore] { self.subscriptions.map { $0 as SubOptsCore } } - + var messages: [PubSubMessage] { self.msgs.map { $0 as PubSubMessage } } } - extension RPC { - init(_ rpc:RPCMessageCore) throws { + init(_ rpc: RPCMessageCore) throws { self.msgs = rpc.messages.map { var msg = RPC.Message() msg.data = $0.data diff --git a/Sources/LibP2PPubSub/Protobufs/RPC.pb.swift b/Sources/LibP2PPubSub/Protobufs/RPC.pb.swift index f4ceca1..f2077ee 100644 --- a/Sources/LibP2PPubSub/Protobufs/RPC.pb.swift +++ b/Sources/LibP2PPubSub/Protobufs/RPC.pb.swift @@ -1,3 +1,17 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + // DO NOT EDIT. // swift-format-ignore-file // diff --git a/Sources/LibP2PPubSub/Protobufs/RPC.proto b/Sources/LibP2PPubSub/Protobufs/RPC.proto index bc1a01f..ef6833c 100644 --- a/Sources/LibP2PPubSub/Protobufs/RPC.proto +++ b/Sources/LibP2PPubSub/Protobufs/RPC.proto @@ -1,3 +1,17 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + syntax = "proto2"; package pubsub.pb; @@ -32,18 +46,18 @@ message ControlMessage { message ControlIHave { optional string topicID = 1; - // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + // implementors from other languages should use bytes here - go protobuf + // emits invalid utf8 strings repeated string messageIDs = 2; } message ControlIWant { - // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + // implementors from other languages should use bytes here - go protobuf + // emits invalid utf8 strings repeated string messageIDs = 1; } -message ControlGraft { - optional string topicID = 1; -} +message ControlGraft { optional string topicID = 1; } message ControlPrune { optional string topicID = 1; diff --git a/Sources/LibP2PPubSub/Protobufs/RPC2.pb.swift b/Sources/LibP2PPubSub/Protobufs/RPC2.pb.swift index a48ed75..229b2c6 100644 --- a/Sources/LibP2PPubSub/Protobufs/RPC2.pb.swift +++ b/Sources/LibP2PPubSub/Protobufs/RPC2.pb.swift @@ -1,3 +1,17 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + // DO NOT EDIT. // swift-format-ignore-file // diff --git a/Sources/LibP2PPubSub/Protobufs/RPC2.proto b/Sources/LibP2PPubSub/Protobufs/RPC2.proto index 141dd00..c7d6b83 100644 --- a/Sources/LibP2PPubSub/Protobufs/RPC2.proto +++ b/Sources/LibP2PPubSub/Protobufs/RPC2.proto @@ -1,52 +1,62 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + syntax = "proto2"; message RPC { - repeated SubOpts subscriptions = 1; - repeated Message msgs = 2; - optional ControlMessage control = 3; - - message SubOpts { - optional bool subscribe = 1; // subscribe or unsubcribe - optional string topicID = 2; - } - - message Message { - optional bytes from = 1; - optional bytes data = 2; - optional bytes seqno = 3; - repeated string topicIDs = 4; - optional bytes signature = 5; - optional bytes key = 6; - } - - message ControlMessage { - repeated ControlIHave ihave = 1; - repeated ControlIWant iwant = 2; - repeated ControlGraft graft = 3; - repeated ControlPrune prune = 4; - } - - message ControlIHave { - optional string topicID = 1; - repeated bytes messageIDs = 2; - } - - message ControlIWant { - repeated bytes messageIDs = 1; - } - - message ControlGraft { - optional string topicID = 1; - } - - message ControlPrune { - optional string topicID = 1; - repeated PeerInfo peers = 2; - optional uint64 backoff = 3; - } - - message PeerInfo { - optional bytes peerID = 1; - optional bytes signedPeerRecord = 2; - } + repeated SubOpts subscriptions = 1; + repeated Message msgs = 2; + optional ControlMessage control = 3; + + message SubOpts { + optional bool subscribe = 1; // subscribe or unsubcribe + optional string topicID = 2; + } + + message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + repeated string topicIDs = 4; + optional bytes signature = 5; + optional bytes key = 6; + } + + message ControlMessage { + repeated ControlIHave ihave = 1; + repeated ControlIWant iwant = 2; + repeated ControlGraft graft = 3; + repeated ControlPrune prune = 4; + } + + message ControlIHave { + optional string topicID = 1; + repeated bytes messageIDs = 2; + } + + message ControlIWant { repeated bytes messageIDs = 1; } + + message ControlGraft { optional string topicID = 1; } + + message ControlPrune { + optional string topicID = 1; + repeated PeerInfo peers = 2; + optional uint64 backoff = 3; + } + + message PeerInfo { + optional bytes peerID = 1; + optional bytes signedPeerRecord = 2; + } } diff --git a/Sources/LibP2PPubSub/Routers/Floodsub/Application+Floodsub.swift b/Sources/LibP2PPubSub/Routers/Floodsub/Application+Floodsub.swift index 76d6685..0001878 100644 --- a/Sources/LibP2PPubSub/Routers/Floodsub/Application+Floodsub.swift +++ b/Sources/LibP2PPubSub/Routers/Floodsub/Application+Floodsub.swift @@ -1,9 +1,16 @@ +//===----------------------------------------------------------------------===// // -// Application+Floodsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/19/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P @@ -17,11 +24,16 @@ extension Application.PubSubServices.Provider { } } } - - public static func floodsub(emitSelf:Bool) -> Self { + + public static func floodsub(emitSelf: Bool) -> Self { .init { $0.pubsub.use { app -> FloodSub in - let fsub = try! FloodSub(group: app.eventLoopGroup, libp2p: app, debugName: "Floodsub", emitSelf: emitSelf) + let fsub = try! FloodSub( + group: app.eventLoopGroup, + libp2p: app, + debugName: "Floodsub", + emitSelf: emitSelf + ) app.lifecycle.use(fsub) return fsub } @@ -30,10 +42,12 @@ extension Application.PubSubServices.Provider { } extension Application.PubSubServices { - - public var floodsub:FloodSub { + + public var floodsub: FloodSub { guard let fsub = self.service(for: FloodSub.self) else { - fatalError("Floodsub accessed without instantiating it first. Use app.pubsub.use(.floodsub) to initialize a shared Floodsub instance.") + fatalError( + "Floodsub accessed without instantiating it first. Use app.pubsub.use(.floodsub) to initialize a shared Floodsub instance." + ) } return fsub } diff --git a/Sources/LibP2PPubSub/Routers/Floodsub/Floodsub.swift b/Sources/LibP2PPubSub/Routers/Floodsub/Floodsub.swift index b6be906..706ede8 100644 --- a/Sources/LibP2PPubSub/Routers/Floodsub/Floodsub.swift +++ b/Sources/LibP2PPubSub/Routers/Floodsub/Floodsub.swift @@ -1,9 +1,16 @@ +//===----------------------------------------------------------------------===// // -// Floodsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P @@ -17,89 +24,108 @@ import LibP2P /// - publish(msg:) /// - Local PeerID /// - nextMessageSequenceNumber -public class FloodSub:BasePubSub, PubSubCore, LifecycleHandler { +public class FloodSub: BasePubSub, PubSubCore, LifecycleHandler { public static var multicodec: String = "/floodsub/1.0.0" - - public init(group: EventLoopGroup, libp2p: Application, debugName: String = "Floodsub", emitSelf: Bool = false) throws { + + public init( + group: EventLoopGroup, + libp2p: Application, + debugName: String = "Floodsub", + emitSelf: Bool = false + ) throws { /// Init our PeerState /// - TODO: Use a simple array instead... - let peerState = BasicPeerState(eventLoop: group.next()) //PeeringState(eventLoop: group.next()) - + let peerState = BasicPeerState(eventLoop: group.next()) //PeeringState(eventLoop: group.next()) + /// Init our Message Cache let messageCache = BasicMessageCache(eventLoop: group.next(), timeToLiveInSeconds: 30) - + /// Register our floodsub route handler try registerFloodsubRoute(libp2p) - + /// Init super - try super.init(group: group, libp2p: libp2p, peerState: peerState, messageCache: messageCache, debugName: debugName, multicodecs: [FloodSub.multicodec], globalSignaturePolicy: .strictNoSign, canRelayMessages: true, emitSelf: emitSelf) - + try super.init( + group: group, + libp2p: libp2p, + peerState: peerState, + messageCache: messageCache, + debugName: debugName, + multicodecs: [FloodSub.multicodec], + globalSignaturePolicy: .strictNoSign, + canRelayMessages: true, + emitSelf: emitSelf + ) + //self.logger[metadataKey: "Floodsub"] = .string("\(UUID().uuidString.prefix(5))") } - + public func didBoot(_ application: Application) throws { try? self.start() } - + public func shutdown(_ application: Application) { try? self.stop() } - + /// We can override methods if we need to, just make sure to call super... public override func start() throws { guard self.state == .stopped else { throw Errors.alreadyRunning } try super.start() // Do whatever we need to do... } - + /// We can override methods if we need to, just make sure to call super... public override func stop() throws { guard self.state == .starting || self.state == .started else { throw Errors.alreadyStopped } // Do whatever we need to do... try super.stop() } - + /// We have to override / implement this method so our BasePubSub implementation isn't constrained to a particular RPC PubSub Message Type override func decodeRPC(_ data: Data) throws -> RPCMessageCore { try RPC(contiguousBytes: data) } - + /// We have to override / implement this method so our BasePubSub implementation isn't constrained to a particular RPC PubSub Message Type override func encodeRPC(_ rpc: RPCMessageCore) throws -> Data { - return try RPC(rpc).serializedData() + try RPC(rpc).serializedData() } - + /// The methods that we must implement will be enforced by the compiler when we conform to PubSub - override internal func processInboundRPC(_ rpc:RPCMessageCore, from:PeerID, request:Request) -> EventLoopFuture { + override internal func processInboundRPC( + _ rpc: RPCMessageCore, + from: PeerID, + request: Request + ) -> EventLoopFuture { /// Floodsub doesn't have to do any processing on the main RPC message; therefore we just return. - return self.eventLoop.makeSucceededVoidFuture() + self.eventLoop.makeSucceededVoidFuture() } - + /// Publish arbitrary data, bundled as an RPC message under the specified topic override public func publish(topic: String, data: Data, on: EventLoop?) -> EventLoopFuture { self.logger.info("Attempting to publish data as RPC Message") - + var msg = RPC.Message() msg.data = data msg.from = Data(self.peerID.bytes) msg.seqno = Data(self.nextMessageSequenceNumber()) msg.topicIds = [topic] - + return self.publish(msg: msg) } - + /// Convenience method for publishing an RPC message as bytes override public func publish(topic: String, bytes: [UInt8], on: EventLoop?) -> EventLoopFuture { self.publish(topic: topic, data: Data(bytes), on: on) } - + /// Convenience method for publishing an RPC message as a ByteBuffer override public func publish(topic: String, buffer: ByteBuffer, on: EventLoop?) -> EventLoopFuture { self.publish(topic: topic, data: Data(buffer.readableBytesView), on: on) } - + /// Attempts to subscribe to the specified topic - func subscribe(topic:Topic) throws -> PubSub.SubscriptionHandler { + func subscribe(topic: Topic) throws -> PubSub.SubscriptionHandler { let defaultConfig = LibP2PCore.PubSub.SubscriptionConfig( topic: topic, signaturePolicy: .strictNoSign, @@ -108,182 +134,196 @@ public class FloodSub:BasePubSub, PubSubCore, LifecycleHandler { ) return try self.subscribe(defaultConfig) } - + /// Attempts to subscribe to the specified topic - public func subscribe(_ config:PubSub.SubscriptionConfig) throws -> PubSub.SubscriptionHandler { + public func subscribe(_ config: PubSub.SubscriptionConfig) throws -> PubSub.SubscriptionHandler { /// Ensure the Topic we're subscribing to is valid... guard !config.topic.isEmpty, config.topic != "" else { throw Errors.invalidTopic } - + self.logger.info("Subscribing to topic: \(config.topic)") - + /// Init Subscription handler let subHandler = PubSub.SubscriptionHandler(pubSub: self, topic: config.topic) self.subscriptions[config.topic] = subHandler - + /// Let the base/parent PubSub implementation know of the subscription... let _ = self.subscribe(config, on: nil) - + /// return the subscription handler return subHandler } - -// public override func unsubscribe(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { -// -// return super.unsubscribe(topic: topic, on: loop) -// } - - private func publish(message:RPC.Message, to:Topic) -> EventLoopFuture { + + // public override func unsubscribe(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture { + // + // return super.unsubscribe(topic: topic, on: loop) + // } + + private func publish(message: RPC.Message, to: Topic) -> EventLoopFuture { self.logger.info("TODO::Publish RPC PubSub Message") return self.eventLoop.makeSucceededVoidFuture() } - + /// This will only be called for new (unseen) messages that have passed signature policies and the installed validators // func processInboundMessage(PubSubMessage) { } /// This will be called whenever we receive a new subscription message from a remote peer // func processNewSubscriptions(subs, fromPeer:PeerID) { } - - override internal func processInboundMessages(_ messages:[PubSubMessage], from:PeerID, request:Request) -> EventLoopFuture { + + override internal func processInboundMessages( + _ messages: [PubSubMessage], + from: PeerID, + request: Request + ) -> EventLoopFuture { messages.map { self.processInboundMessage($0, from: from, request: request) }.flatten(on: request.eventLoop) } - - override internal func processInboundMessage(_ message: PubSubMessage, from: PeerID, request: Request) -> EventLoopFuture { - + + override internal func processInboundMessage( + _ message: PubSubMessage, + from: PeerID, + request: Request + ) -> EventLoopFuture { + guard let message = message as? RPC.Message else { self.logger.error("Floodsub was passed a PubSubMessage that wasn't an RPC.Message") return self.eventLoop.makeSucceededVoidFuture() } - + /// The message has already been vetted /// Forward the message onto any other subscribers to this topic (excluding the sender) - return self.peerState.peersSubscribedTo(topic: message.topicIds.first!, on: nil).flatMap { subscribers -> EventLoopFuture in - + return self.peerState.peersSubscribedTo(topic: message.topicIds.first!, on: nil).flatMap { + subscribers -> EventLoopFuture in + guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } - - self.logger.info("Checking \(subscribers.count) `\(message.topicIds.first!)` subscribers for message propogation") - + + self.logger.info( + "Checking \(subscribers.count) `\(message.topicIds.first!)` subscribers for message propogation" + ) + var forwardedRPC = RPC() forwardedRPC.msgs = [message] let payload = try! forwardedRPC.serializedData() - + return subscribers.map { peerStreams in - guard peerStreams.id != from else { self.logger.info("Skipping OP"); return self.eventLoop.makeSucceededVoidFuture() } + guard peerStreams.id != from else { + self.logger.info("Skipping OP") + return self.eventLoop.makeSucceededVoidFuture() + } self.logger.info("Forwarding message to subscriber \(peerStreams.id)") - + try? peerStreams.write(putUVarInt(UInt64(payload.count)) + payload) return self.eventLoop.makeSucceededVoidFuture() }.flatten(on: self.eventLoop) } } - -// internal func processInboundMessage(_ msg:Data, from stream: LibP2P.Stream, request: Request) -> EventLoopFuture { -// /// This should only ever be an RPC message. -// guard let rpc = try? RPC(serializedData: msg) else { -// self.logger.warning("Failed to decode RPC PubSub Message") -// self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") -// self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") -// return request.eventLoop.makeSucceededFuture(nil) -// } -// -// guard let remotePeer = request.remotePeer else { -// //guard let remotePeer = stream.connection?.remotePeer else { -// self.logger.warning("Failed to determine message originator (RemotePeer)") -// return request.eventLoop.makeSucceededFuture(nil) -// } -// -// /// Handle the RPC Control Messages (for Floodsub this is only just a list of subscription changes) -// if rpc.subscriptions.count > 0 { -// var subs:[String:Bool] = [:] -// rpc.subscriptions.forEach { -// subs[$0.topicID] = $0.subscribe -// } -// self.logger.info("\(remotePeer)::Subscriptions: \(subs)") -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.subscriptionChange(remotePeer, subs)) -// -// let _ = self.peerState.update(subscriptions: subs, for: remotePeer) -// -// /// Notify our subscription handlers of any relevant peers -// for sub in subs { -// if sub.1 == true, let handler = self.subscriptions[sub.key] { -// self.logger.info("Notifying `\(sub.key)` subscription handler of new subscriber/peer") -// let _ = handler.on?(.newPeer(remotePeer)) -// } -// /// - TODO: Should we alert our subscription handler when a peer unsubscribes from a topic? -// } -// } -// -// /// Handle the published messages -// let _ = rpc.msgs.compactMap { message -> EventLoopFuture in -// -// /// Ensure the message conforms to our MessageSignaturePolicy -// guard passesMessageSignaturePolicy(message) else { -// self.logger.warning("Failed signature policy, discarding message") -// return self.eventLoop.makeSucceededVoidFuture() -// } -// -// /// Derive the message id using the overidable messageID function -// guard let messageIDFunc = self.messageIDFunctions[message.topicIds.first!] else { -// self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") -// return self.eventLoop.makeSucceededVoidFuture() -// } -// -// let id = messageIDFunc(message) -// -// self.logger.info("Message ID `\(id.asString(base: .base16))`") -// self.logger.info("\(message.description)") -// -// /// Check to ensure we haven't seen this message already... -// return self.messageCache.exists(messageID: id, on: nil).flatMap { exists -> EventLoopFuture in -// guard exists == false else { self.logger.trace("Dropping Duplicate Message"); return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Validate the unseen message before storing it in our message cache... -// return self.validate(message: message).flatMap { valid -> EventLoopFuture in -// guard valid else { self.logger.warning("Dropping Invalid Message: \(message)"); return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Store the message in our message cache -// self.logger.info("Storing Message: \(id.asString(base: .base16))"); -// /// - Note: We can run into issues where we end up saving duplicate messages cause when we check for existance they haven't been saved yet, and by the time we get around to saving them, theirs multiple copies ready to be stored. -// /// We temporarily added the `valid` flag to the `put` method to double check existance of a message before forwarding it and alerting our handler. -// return self.messageCache.put(messageID: id, message: (topic: message.topicIds.first!, data: message), on: nil).flatMap { valid in -// guard valid else { self.logger.warning("Encountered Duplicate Message While Attempting To Store In Message Cache"); return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Should we pass the message onto any SubscriptionHandlers at this point? -// if let handler = self.subscriptions[message.topicIds.first!] { -// self.logger.trace("Forwarding message to handler: ID:\(id.asString(base: .base16))") -// let _ = handler.on?(.data(message)) -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") -// } -// -// /// - TODO: Event sub, possibly remove later... -// self._eventHandler?(.message(remotePeer, [message])) -// -// /// Forward the message onto any other subscribers to this topic (excluding the sender) -// return self.peerState.peersSubscribedTo2(topic: message.topicIds.first!, on: nil).flatMap { subscribers -> EventLoopFuture in -// -// guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } -// -// var forwardedRPC = RPC() -// forwardedRPC.msgs = [message] -// let payload = try! forwardedRPC.serializedData() -// -// return subscribers.map { (peerID, stream) in -// guard peerID != remotePeer else { return self.eventLoop.makeSucceededVoidFuture() } -// self.logger.trace("Forwarding message to subscriber \(peerID)") -// -// return stream.write(putUVarInt(UInt64(payload.count)) + payload) -// }.flatten(on: self.eventLoop) -// } -// } -// } -// } -// } -// -// /// Return the response if any... -// return self.eventLoop.makeSucceededFuture(nil) -// } - + + // internal func processInboundMessage(_ msg:Data, from stream: LibP2P.Stream, request: Request) -> EventLoopFuture { + // /// This should only ever be an RPC message. + // guard let rpc = try? RPC(serializedData: msg) else { + // self.logger.warning("Failed to decode RPC PubSub Message") + // self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") + // self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") + // return request.eventLoop.makeSucceededFuture(nil) + // } + // + // guard let remotePeer = request.remotePeer else { + // //guard let remotePeer = stream.connection?.remotePeer else { + // self.logger.warning("Failed to determine message originator (RemotePeer)") + // return request.eventLoop.makeSucceededFuture(nil) + // } + // + // /// Handle the RPC Control Messages (for Floodsub this is only just a list of subscription changes) + // if rpc.subscriptions.count > 0 { + // var subs:[String:Bool] = [:] + // rpc.subscriptions.forEach { + // subs[$0.topicID] = $0.subscribe + // } + // self.logger.info("\(remotePeer)::Subscriptions: \(subs)") + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.subscriptionChange(remotePeer, subs)) + // + // let _ = self.peerState.update(subscriptions: subs, for: remotePeer) + // + // /// Notify our subscription handlers of any relevant peers + // for sub in subs { + // if sub.1 == true, let handler = self.subscriptions[sub.key] { + // self.logger.info("Notifying `\(sub.key)` subscription handler of new subscriber/peer") + // let _ = handler.on?(.newPeer(remotePeer)) + // } + // /// - TODO: Should we alert our subscription handler when a peer unsubscribes from a topic? + // } + // } + // + // /// Handle the published messages + // let _ = rpc.msgs.compactMap { message -> EventLoopFuture in + // + // /// Ensure the message conforms to our MessageSignaturePolicy + // guard passesMessageSignaturePolicy(message) else { + // self.logger.warning("Failed signature policy, discarding message") + // return self.eventLoop.makeSucceededVoidFuture() + // } + // + // /// Derive the message id using the overidable messageID function + // guard let messageIDFunc = self.messageIDFunctions[message.topicIds.first!] else { + // self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") + // return self.eventLoop.makeSucceededVoidFuture() + // } + // + // let id = messageIDFunc(message) + // + // self.logger.info("Message ID `\(id.asString(base: .base16))`") + // self.logger.info("\(message.description)") + // + // /// Check to ensure we haven't seen this message already... + // return self.messageCache.exists(messageID: id, on: nil).flatMap { exists -> EventLoopFuture in + // guard exists == false else { self.logger.trace("Dropping Duplicate Message"); return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Validate the unseen message before storing it in our message cache... + // return self.validate(message: message).flatMap { valid -> EventLoopFuture in + // guard valid else { self.logger.warning("Dropping Invalid Message: \(message)"); return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Store the message in our message cache + // self.logger.info("Storing Message: \(id.asString(base: .base16))"); + // /// - Note: We can run into issues where we end up saving duplicate messages cause when we check for existance they haven't been saved yet, and by the time we get around to saving them, theirs multiple copies ready to be stored. + // /// We temporarily added the `valid` flag to the `put` method to double check existance of a message before forwarding it and alerting our handler. + // return self.messageCache.put(messageID: id, message: (topic: message.topicIds.first!, data: message), on: nil).flatMap { valid in + // guard valid else { self.logger.warning("Encountered Duplicate Message While Attempting To Store In Message Cache"); return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Should we pass the message onto any SubscriptionHandlers at this point? + // if let handler = self.subscriptions[message.topicIds.first!] { + // self.logger.trace("Forwarding message to handler: ID:\(id.asString(base: .base16))") + // let _ = handler.on?(.data(message)) + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") + // } + // + // /// - TODO: Event sub, possibly remove later... + // self._eventHandler?(.message(remotePeer, [message])) + // + // /// Forward the message onto any other subscribers to this topic (excluding the sender) + // return self.peerState.peersSubscribedTo2(topic: message.topicIds.first!, on: nil).flatMap { subscribers -> EventLoopFuture in + // + // guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } + // + // var forwardedRPC = RPC() + // forwardedRPC.msgs = [message] + // let payload = try! forwardedRPC.serializedData() + // + // return subscribers.map { (peerID, stream) in + // guard peerID != remotePeer else { return self.eventLoop.makeSucceededVoidFuture() } + // self.logger.trace("Forwarding message to subscriber \(peerID)") + // + // return stream.write(putUVarInt(UInt64(payload.count)) + payload) + // }.flatten(on: self.eventLoop) + // } + // } + // } + // } + // } + // + // /// Return the response if any... + // return self.eventLoop.makeSucceededFuture(nil) + // } + } diff --git a/Sources/LibP2PPubSub/Routers/Floodsub/MessageStore/BasicMessageCache.swift b/Sources/LibP2PPubSub/Routers/Floodsub/MessageStore/BasicMessageCache.swift index e781fa7..6d1ab25 100644 --- a/Sources/LibP2PPubSub/Routers/Floodsub/MessageStore/BasicMessageCache.swift +++ b/Sources/LibP2PPubSub/Routers/Floodsub/MessageStore/BasicMessageCache.swift @@ -1,72 +1,79 @@ +//===----------------------------------------------------------------------===// // -// BasicMessageCache.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/23/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// -import LibP2P import CoreFoundation +import LibP2P /// BasicMessageCache - A simple message cache that uses time based message expiration /// /// Creates and maintains a dictionary with messages keyed by their ID for quick access /// Alongside the deictionary it also keeps an ordered (newest -> oldest) list of message ids along with their arivale time for message expiration / deletion /// Every heartbeat we trim the message cache of all expired messages -class BasicMessageCache:MessageStateProtocol { +class BasicMessageCache: MessageStateProtocol { typealias MessageID = Data typealias Message = (topic: String, data: PubSubMessage) - + /// The eventloop that this Message Cache is constrained to - internal let eventLoop:EventLoop + internal let eventLoop: EventLoop /// The Cache - var messages:[MessageID:PubSubMessage] - var expirations:[(MessageID, CFAbsoluteTime)] + var messages: [MessageID: PubSubMessage] + var expirations: [(MessageID, CFAbsoluteTime)] /// Our Logger - var logger:Logger + var logger: Logger /// Our State var state: ServiceLifecycleState - + /// The duration to keep messages for - private let ttl:Double - - required init(eventLoop:EventLoop, timeToLiveInSeconds ttl:Double = 120) { + private let ttl: Double + + required init(eventLoop: EventLoop, timeToLiveInSeconds ttl: Double = 120) { self.eventLoop = eventLoop self.ttl = ttl self.state = .stopped self.messages = [:] self.expirations = [] - + self.logger = Logger(label: "com.swift.libp2p.pubsub.messagecache[\(UUID().uuidString.prefix(5))]") self.logger.logLevel = .trace //self.logger[metadataKey: "MessageCache"] = .string("\(UUID().uuidString.prefix(5))") - + self.logger.debug("Instantiated") } - + func start() throws { guard self.state == .stopped else { throw BasePubSub.Errors.alreadyRunning } self.logger.trace("Starting") - + // Do stuff here, maybe re init our caches?? - + self.state = .started } - + func stop() throws { guard self.state == .started || self.state == .starting else { throw BasePubSub.Errors.alreadyStopped } if self.state == .stopping { self.logger.trace("Force Quiting!") } self.logger.trace("Stopping") - + // Do stuff here, maybe clear our caches?? - + self.state = .stopped } - + /// Adds a message to the current window and the cache - func put(messageID:MessageID, message:Message, on loop:EventLoop? = nil) -> EventLoopFuture { + func put(messageID: MessageID, message: Message, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in /// blindly overwrites any existing entries with the specified messageID if self.messages[messageID] == nil { @@ -78,12 +85,12 @@ class BasicMessageCache:MessageStateProtocol { } }.hop(to: loop ?? eventLoop) } - + /// Given a dictionary of messages to store, this method will attempt to add each one and return a dictionary of the added messages. - func put(messages:[Data:PubSubMessage], on loop:EventLoop? = nil) -> EventLoopFuture<[Data:PubSubMessage]> { - eventLoop.submit { () -> [Data:PubSubMessage] in + func put(messages: [Data: PubSubMessage], on loop: EventLoop? = nil) -> EventLoopFuture<[Data: PubSubMessage]> { + eventLoop.submit { () -> [Data: PubSubMessage] in /// blindly overwrites any existing entries with the specified messageID - var added:[Data:PubSubMessage] = [:] + var added: [Data: PubSubMessage] = [:] for message in messages { //guard let topic = message.value.topicIds.first else { continue } if self.messages[message.key] == nil { @@ -94,9 +101,9 @@ class BasicMessageCache:MessageStateProtocol { return added }.hop(to: loop ?? eventLoop) } - + /// Retrieves a message from the cache by its ID, if it is still present. - func get(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + func get(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Message? in if let msg = self.messages[messageID] { return (topic: msg.topicIds.first!, data: msg) @@ -104,14 +111,18 @@ class BasicMessageCache:MessageStateProtocol { return nil }.hop(to: loop ?? eventLoop) } - - func exists(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func exists(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in self.messages[messageID] != nil }.hop(to: loop ?? eventLoop) } - - func filter(ids: Set, returningOnly filter: PubSub.MessageState.FilterType, on loop: EventLoop?) -> EventLoopFuture<[Data]> { + + func filter( + ids: Set, + returningOnly filter: PubSub.MessageState.FilterType, + on loop: EventLoop? + ) -> EventLoopFuture<[Data]> { eventLoop.submit { () -> [Data] in switch filter { case .known, .full: @@ -125,15 +136,15 @@ class BasicMessageCache:MessageStateProtocol { } }.hop(to: loop ?? eventLoop) } - + /// Retrieves the message IDs for messages in the most recent history windows, scoped to a given topic. /// - Note: The number of windows to examine is controlled by the gossipLength parameter - func getGossipIDs(topic:String, on loop:EventLoop? = nil) -> EventLoopFuture<[MessageID]> { + func getGossipIDs(topic: String, on loop: EventLoop? = nil) -> EventLoopFuture<[MessageID]> { eventLoop.submit { () -> [MessageID] in self.messages.map { $0.key } }.hop(to: loop ?? eventLoop) } - + func heartbeat() -> EventLoopFuture { eventLoop.submit { var deleted = 0 @@ -151,7 +162,6 @@ class BasicMessageCache:MessageStateProtocol { } } - /// SeenCache - A cache of recently seen MessageIDs /// /// - Note: Used for dropping duplicate messages by the BasePubSub implementation @@ -159,33 +169,33 @@ class BasicMessageCache:MessageStateProtocol { class SeenCache { typealias MessageID = Data typealias Timestamp = Double - + /// The eventloop that this MessageID Cache is constrained to - internal let eventLoop:EventLoop - + internal let eventLoop: EventLoop + /// The Cache - private var cache:[MessageID:Timestamp] - + private var cache: [MessageID: Timestamp] + /// Our Logger - var logger:Logger - + var logger: Logger + /// The duration to keep messages for in seconds - private let ttl:Double - - required init(eventLoop:EventLoop, timeToLiveInSeconds ttl:Double = 120, logger:Logger) { + private let ttl: Double + + required init(eventLoop: EventLoop, timeToLiveInSeconds ttl: Double = 120, logger: Logger) { self.eventLoop = eventLoop self.ttl = ttl self.cache = [:] - + self.logger = logger self.logger[metadataKey: "SeenCache"] = .string("[\(UUID().uuidString.prefix(5))][TTL:\(self.ttl)]") - + self.logger.debug("Instantiated") } - + /// Adds a message to the current window and the cache @discardableResult - func put(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + func put(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in if self.cache[messageID] == nil { self.cache[messageID] = CFAbsoluteTimeGetCurrent() @@ -195,9 +205,9 @@ class SeenCache { } }.hop(to: loop ?? eventLoop) } - + @discardableResult - func put(messageIDs:[MessageID], on loop:EventLoop? = nil) -> EventLoopFuture { + func put(messageIDs: [MessageID], on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { let time = CFAbsoluteTimeGetCurrent() for messageID in messageIDs { @@ -207,14 +217,18 @@ class SeenCache { } }.hop(to: loop ?? eventLoop) } - - func hasSeen(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func hasSeen(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in self.cache[messageID] == nil ? false : true }.hop(to: loop ?? eventLoop) } - - func filter(ids: Set, returningOnly filter: PubSub.MessageState.FilterType, on loop: EventLoop?) -> EventLoopFuture<[Data]> { + + func filter( + ids: Set, + returningOnly filter: PubSub.MessageState.FilterType, + on loop: EventLoop? + ) -> EventLoopFuture<[Data]> { eventLoop.submit { () -> [Data] in switch filter { case .known, .full: @@ -228,11 +242,11 @@ class SeenCache { } }.hop(to: loop ?? eventLoop) } - + func trim() -> EventLoopFuture { eventLoop.submit { var deleted = 0 - + /// Trim the seenCache of expired messages let expired = CFAbsoluteTimeGetCurrent() - self.ttl self.cache = self.cache.compactMapValues { time in @@ -243,11 +257,10 @@ class SeenCache { return time } } - + if deleted > 0 { self.logger.trace("Deleted \(deleted) expired messages. Messages in cache \(self.cache.count)") } } } } - diff --git a/Sources/LibP2PPubSub/Routers/Floodsub/PeerStore/BasicPeerStore.swift b/Sources/LibP2PPubSub/Routers/Floodsub/PeerStore/BasicPeerStore.swift index efd5291..8347705 100644 --- a/Sources/LibP2PPubSub/Routers/Floodsub/PeerStore/BasicPeerStore.swift +++ b/Sources/LibP2PPubSub/Routers/Floodsub/PeerStore/BasicPeerStore.swift @@ -1,66 +1,73 @@ +//===----------------------------------------------------------------------===// // -// BasicPeerStore.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/23/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -class BasicPeerState:PeerStateProtocol { - +class BasicPeerState: PeerStateProtocol { + typealias Topic = String typealias PID = String //typealias Subscriber = (id:PeerID, inbound:LibP2P.Stream?, outbound:LibP2P.Stream?) - + var state: ServiceLifecycleState - + /// A set of ids of all known peers that support floodsub. - var peers:[PID:PubSub.Subscriber] + var peers: [PID: PubSub.Subscriber] /// A map of subscribed topics to the set of peers in our overlay mesh for that topic. - var mesh:[Topic:[PID]] + var mesh: [Topic: [PID]] /// Like mesh, fanout is a map of topics to a set of peers, however, the fanout map contains topics to which we are NOT subscribed. - var fanout:[Topic:[PID]] - + var fanout: [Topic: [PID]] + /// The eventloop that this PeeringState is constrained to - internal let eventLoop:EventLoop + internal let eventLoop: EventLoop /// Our Logger - private var logger:Logger - - required init(eventLoop:EventLoop) { + private var logger: Logger + + required init(eventLoop: EventLoop) { print("PubSub::PeeringState Instantiated...") self.eventLoop = eventLoop self.logger = Logger(label: "com.swift.libp2p.pubsub.pstate[\(UUID().uuidString.prefix(5))]") - self.logger.logLevel = .trace // LOG_LEVEL + self.logger.logLevel = .trace // LOG_LEVEL self.state = .stopped - + /// Initialize our caches self.peers = [:] self.mesh = [:] self.fanout = [:] } - + func start() throws { guard self.state == .stopped else { throw BasePubSub.Errors.alreadyRunning } self.logger.info("Starting") - + // Do stuff here, maybe re init our caches?? self.state = .started } - + func stop() throws { guard self.state == .started || self.state == .starting else { throw BasePubSub.Errors.alreadyStopped } if self.state == .stopping { self.logger.info("Force Quiting!") } self.logger.info("Stopping") - + // Do stuff here, maybe clear our caches?? - + self.state = .stopped } - - func onPeerConnected(peerID peer: PeerID, stream:LibP2PCore.Stream) -> EventLoopFuture { + + func onPeerConnected(peerID peer: PeerID, stream: LibP2PCore.Stream) -> EventLoopFuture { eventLoop.submit { if self.peers[peer.b58String] == nil { switch stream.direction { @@ -77,46 +84,56 @@ class BasicPeerState:PeerStateProtocol { case .outbound: self.peers[peer.b58String]?.attachOutbound(stream: stream) } - self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + self.logger.warning( + "Received a peer connected event for a peer that was already present in our PeeringState" + ) } } } - - func attachInboundStream(_ peerID: PeerID, inboundStream: LibP2PCore.Stream, on loop:EventLoop? = nil) -> EventLoopFuture { + + func attachInboundStream( + _ peerID: PeerID, + inboundStream: LibP2PCore.Stream, + on loop: EventLoop? = nil + ) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String, default: .init(id: peerID)].attachInbound(stream: inboundStream) -// if self.peers[peerID.b58String] == nil { -// //Add the new peer to our `peers` list -// self.peers[peerID.b58String] = .init(id: peerID, inbound: inboundStream) -// self.logger.info("Added \(peerID) to our peering state (peers2)") -// } else { -// self.peers[peerID.b58String]?.attachInbound(stream: inboundStream) -// //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") -// } + // if self.peers[peerID.b58String] == nil { + // //Add the new peer to our `peers` list + // self.peers[peerID.b58String] = .init(id: peerID, inbound: inboundStream) + // self.logger.info("Added \(peerID) to our peering state (peers2)") + // } else { + // self.peers[peerID.b58String]?.attachInbound(stream: inboundStream) + // //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + // } }.hop(to: loop ?? self.eventLoop) } - - func attachOutboundStream(_ peerID: PeerID, outboundStream: LibP2PCore.Stream, on loop:EventLoop? = nil) -> EventLoopFuture { + + func attachOutboundStream( + _ peerID: PeerID, + outboundStream: LibP2PCore.Stream, + on loop: EventLoop? = nil + ) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String, default: .init(id: peerID)].attachOutbound(stream: outboundStream) -// if self.peers[peerID.b58String] == nil { -// //Add the new peer to our `peers` list -// self.peers[peerID.b58String] = .init(id: peerID, outbound: outboundStream) -// self.logger.info("Added \(peerID) to our peering state (peers2)") -// } else { -// self.peers[peerID.b58String]?.attachOutbound(stream: outboundStream) -// //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") -// } + // if self.peers[peerID.b58String] == nil { + // //Add the new peer to our `peers` list + // self.peers[peerID.b58String] = .init(id: peerID, outbound: outboundStream) + // self.logger.info("Added \(peerID) to our peering state (peers2)") + // } else { + // self.peers[peerID.b58String]?.attachOutbound(stream: outboundStream) + // //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + // } }.hop(to: loop ?? self.eventLoop) } - - func detachInboundStream(_ peerID: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func detachInboundStream(_ peerID: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String]?.detachInboundStream() }.hop(to: loop ?? self.eventLoop) } - - func detachOutboundStream(_ peerID: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func detachOutboundStream(_ peerID: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String]?.detachOutboundStream() }.hop(to: loop ?? self.eventLoop) @@ -127,30 +144,32 @@ class BasicPeerState:PeerStateProtocol { self.peers.removeValue(forKey: peer.b58String) } } - + /// Adds a new peer (who supports our base PubSub protocol (aka floodsub / gossipsub)) to the peers cache - func addNewPeer(_ peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func addNewPeer(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in if self.peers[peer.b58String] == nil { self.peers[peer.b58String] = .init(id: peer) self.logger.info("Added \(peer) to our peering state") return true } else { - self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + self.logger.warning( + "Received a peer connected event for a peer that was already present in our PeeringState" + ) return false } }.hop(to: loop ?? self.eventLoop) } - + /// Removes the specified peer from our peers cache - func removePeer(_ peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func removePeer(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.peers.removeValue(forKey: peer.b58String) }.hop(to: loop ?? self.eventLoop) } - + /// This is called when we receive an RPC message from a peer containing the topics - func update(topics:[Topic], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func update(topics: [Topic], for peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { let pid = peer.b58String for topic in topics { @@ -160,7 +179,7 @@ class BasicPeerState:PeerStateProtocol { subs.append(pid) self.mesh[topic] = subs } - } else { // add the (topic:peer) entry to our fanout cache + } else { // add the (topic:peer) entry to our fanout cache if var subs = self.fanout[topic] { /// Add the peer to the existing topic entry... if !subs.contains(pid) { @@ -175,9 +194,9 @@ class BasicPeerState:PeerStateProtocol { } }.hop(to: loop ?? self.eventLoop) } - + /// This is called when we receive an RPC message from a peer containing the topics - func update(subscriptions:[Topic:Bool], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func update(subscriptions: [Topic: Bool], for peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { let pid = peer.b58String for (topic, subscribed) in subscriptions { @@ -189,7 +208,7 @@ class BasicPeerState:PeerStateProtocol { self.mesh[topic] = subs self.logger.trace("Added \(peer) to mesh[\(topic)] cache") } - } else { // add the (topic:peer) entry to our fanout cache + } else { // add the (topic:peer) entry to our fanout cache if var subs = self.fanout[topic] { /// Add the peer to the existing topic entry... if !subs.contains(pid) { @@ -203,7 +222,7 @@ class BasicPeerState:PeerStateProtocol { self.logger.trace("Added \(peer) to new fanout[\(topic)] cache") } } - } else { // Unregister this PID from our fanout and mesh for the specified topic + } else { // Unregister this PID from our fanout and mesh for the specified topic if let subs = self.mesh[topic], subs.contains(pid) { self.mesh[topic]?.removeAll(where: { $0 == pid }) self.logger.trace("Removed \(peer) from mesh[\(topic)] cache") @@ -217,24 +236,24 @@ class BasicPeerState:PeerStateProtocol { self.logger.info("Updated subscriptions for \(peer)") }.hop(to: loop ?? self.eventLoop) } - - func topicSubscriptions(on loop:EventLoop? = nil) -> EventLoopFuture<[Topic]> { + + func topicSubscriptions(on loop: EventLoop? = nil) -> EventLoopFuture<[Topic]> { eventLoop.submit { () -> [Topic] in self.mesh.map { $0.key } }.hop(to: loop ?? eventLoop) } - + /// This method updates our PeerState to reflect a new subscription /// /// It will... /// - Create a new entry in our Subscription Mesh for the specified topic /// - Bootstrap the new entry with any known peers that also subscribe to the topic /// Returns a list of PeerIDs that can be used to send grafting messages to - func subscribeSelf(to topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { + func subscribeSelf(to topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PID]> { eventLoop.submit { () -> [PID] in /// Make sure we're not already subscribed... if let peers = self.mesh[topic] { return peers } - + /// Check to see if we're aware of the topic (is it in our fanout set) if let knownTopic = self.fanout.removeValue(forKey: topic) { self.logger.trace("Upgrading `\(topic)` subscription from fanout to mesh") @@ -249,13 +268,13 @@ class BasicPeerState:PeerStateProtocol { } } } - + /// This method updates our PeerState to reflect a subscription removal /// /// It will remove the latest known peer subscription state from our Subcription Mesh and transfer /// that state into our fanout set for future reference. /// Returns a list of PeerIDs that can be used to send unsub messages to - func unsubscribeSelf(from topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { + func unsubscribeSelf(from topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PID]> { eventLoop.submit { () -> [PID] in guard self.state == .started || self.state == .stopping else { return [] } /// Check to see if we're aware of the topic (is it in our fanout set) @@ -268,44 +287,43 @@ class BasicPeerState:PeerStateProtocol { } else { self.logger.trace("Unsubscribing self from `\(topic)`") } - + return [] } } - func metaPeerIDs(on loop:EventLoop? = nil) -> EventLoopFuture<[Topic:[PeerID]]> { - eventLoop.submit { () -> [Topic:[PeerID]] in - var metaPeers:[Topic:[PeerID]] = [:] - self.fanout.forEach { topic, pids in + func metaPeerIDs(on loop: EventLoop? = nil) -> EventLoopFuture<[Topic: [PeerID]]> { + eventLoop.submit { () -> [Topic: [PeerID]] in + var metaPeers: [Topic: [PeerID]] = [:] + for (topic, pids) in self.fanout { metaPeers[topic] = self.idsToPeers(pids) } return metaPeers }.hop(to: loop ?? eventLoop) } - - private func idToPeer(_ id:PID) -> PeerID? { + + private func idToPeer(_ id: PID) -> PeerID? { guard eventLoop.inEventLoop else { return nil } return self.peers[id]?.id } - - private func idsToPeers(_ ids:[PID]) -> [PeerID] { + + private func idsToPeers(_ ids: [PID]) -> [PeerID] { guard eventLoop.inEventLoop else { return [] } return ids.compactMap { self.peers[$0]?.id } } - + /// Returns a list of all known peers subscribed to the specified topic /// /// - TODO: Make this mo better... right now we do a ton of work to extract the PeerID for each subscriber (this could be solved if we changed peers to a dictionary with the PID as the key). - func peersSubscribedTo(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID]> { + func peersSubscribedTo(topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PeerID]> { eventLoop.submit { () -> [PeerID] in let subbed = self.mesh[topic] ?? [] let known = self.fanout[topic] ?? [] - return self.idsToPeers( subbed + known ) + return self.idsToPeers(subbed + known) }.hop(to: loop ?? eventLoop) } - - - func peersSubscribedTo(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PubSub.Subscriber]> { + + func peersSubscribedTo(topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PubSub.Subscriber]> { eventLoop.submit { () -> [PubSub.Subscriber] in let subbed = self.mesh[topic] ?? [] return subbed.compactMap { @@ -313,14 +331,14 @@ class BasicPeerState:PeerStateProtocol { } }.hop(to: loop ?? eventLoop) } - + func getAllPeers(on loop: EventLoop?) -> EventLoopFuture<[PubSub.Subscriber]> { eventLoop.submit { () -> [PubSub.Subscriber] in self.peers.map { $0.value } }.hop(to: loop ?? eventLoop) } - - func streamsFor(_ peer: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func streamsFor(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () throws -> PubSub.Subscriber in if let p = self.peers[peer.b58String] { return p @@ -329,120 +347,118 @@ class BasicPeerState:PeerStateProtocol { } }.hop(to: loop ?? eventLoop) } - - typealias Subscriptions = (full:[Topic], meta:[Topic]) - + + typealias Subscriptions = (full: [Topic], meta: [Topic]) + /// Returns the subscriber info (PeerID and Stream) for the specified b58string peer id - func subscriptionForID(_ id:PID) -> EventLoopFuture<(PubSub.Subscriber, Subscriptions)> { + func subscriptionForID(_ id: PID) -> EventLoopFuture<(PubSub.Subscriber, Subscriptions)> { eventLoop.submit { () throws -> (PubSub.Subscriber, Subscriptions) in guard let sub = self.peers[id] else { throw Errors.unknownPeerID } - let full:[Topic] = self.mesh.compactMap { topic in + let full: [Topic] = self.mesh.compactMap { topic in if topic.value.contains(id) { return topic.key } return nil } - let meta:[Topic] = self.fanout.compactMap { topic in + let meta: [Topic] = self.fanout.compactMap { topic in if topic.value.contains(id) { return topic.key } return nil } - + return (sub, (full: full, meta: meta)) } } - - - + /// This method returns true if the peer is a full peers /// false if the peer is a meta data only peer /// and throws an error if the peer id is unknown /// - TODO: shouldn't this be topic specific?? -// func isFullPeer(_ peer: PeerID) -> EventLoopFuture { -// eventLoop.submit { () -> Bool in -// var isPeer = false -// var isFull = false -// let id = peer.b58String -// -// /// Check our mesh cache for the peer id -// for (_, subs) in self.mesh { -// if subs.contains(id) { -// isPeer = true -// isFull = true -// break -// } -// } -// /// If we found the peer in our Mesh cach, they're a full peer, return true! -// if isPeer && isFull { return true } -// -// /// Lets proceed to check the fanout... -// for (_, subs) in self.fanout { -// if subs.contains(id) { -// isPeer = true -// isFull = false -// break -// } -// } -// -// /// We found the peer but they're a metadata only peer... -// if isPeer { return false } -// -// /// If we don't have record of this peer, throw an error -// self.logger.error("Error while checking isFullPeer, unknown PeerID:\(peer)") -// throw Errors.unknownPeerID -// } -// } - -// func makeFullPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { -// eventLoop.submit { () -> Void in -// let pid = peer.b58String -// if var subs = self.mesh[topic] { -// if !subs.contains( pid ) { -// /// Add the peer to our mesh cache -// subs.append(pid) -// self.mesh[topic] = subs -// } else { -// /// The peer is already a full peer in our mesh cache -// return -// } -// } else { -// /// We don't have an entry for this topic yet -// self.mesh[topic] = [pid] -// } -// -// /// Make sure we remove the PID from our fanout cache -// self.fanout[topic]?.removeAll(where: { $0 == pid } ) -// } -// } -// -// func makeMetaPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { -// eventLoop.submit { () -> Void in -// let pid = peer.b58String -// if var subs = self.fanout[topic] { -// if !subs.contains( pid ) { -// /// Add the peer to our fanout cache -// subs.append(pid) -// self.fanout[topic] = subs -// //self.logger.info("Added peer \(pid) to fanout on topic \(topic)") -// } else { -// /// The peer is already a meta data only peer in our fanout cache -// //self.logger.info("Peer is already a meta peer, nothing to do") -// return -// } -// } else { -// /// We don't have an entry for this topic yet -// self.fanout[topic] = [pid] -// //self.logger.info("Created new fanout for topic \(topic). And downgraded peer \(pid) to meta peer.") -// } -// -// /// Make sure we remove the PID from our full message mesh cache -// self.mesh[topic]?.removeAll(where: { $0 == pid } ) -// //self.logger.info("Removed \(pid) from mesh[\(topic)]") -// //self.logger.info("Remaining Full Peers for topic `\(topic)` -> \(self.mesh[topic]?.compactMap { $0.prefix(5) }.joined(separator: ", ") ?? "nil")") -// //self.logger.info("\(self.mesh)") -// } -// } - - enum Errors:Error { + // func isFullPeer(_ peer: PeerID) -> EventLoopFuture { + // eventLoop.submit { () -> Bool in + // var isPeer = false + // var isFull = false + // let id = peer.b58String + // + // /// Check our mesh cache for the peer id + // for (_, subs) in self.mesh { + // if subs.contains(id) { + // isPeer = true + // isFull = true + // break + // } + // } + // /// If we found the peer in our Mesh cach, they're a full peer, return true! + // if isPeer && isFull { return true } + // + // /// Lets proceed to check the fanout... + // for (_, subs) in self.fanout { + // if subs.contains(id) { + // isPeer = true + // isFull = false + // break + // } + // } + // + // /// We found the peer but they're a metadata only peer... + // if isPeer { return false } + // + // /// If we don't have record of this peer, throw an error + // self.logger.error("Error while checking isFullPeer, unknown PeerID:\(peer)") + // throw Errors.unknownPeerID + // } + // } + + // func makeFullPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { + // eventLoop.submit { () -> Void in + // let pid = peer.b58String + // if var subs = self.mesh[topic] { + // if !subs.contains( pid ) { + // /// Add the peer to our mesh cache + // subs.append(pid) + // self.mesh[topic] = subs + // } else { + // /// The peer is already a full peer in our mesh cache + // return + // } + // } else { + // /// We don't have an entry for this topic yet + // self.mesh[topic] = [pid] + // } + // + // /// Make sure we remove the PID from our fanout cache + // self.fanout[topic]?.removeAll(where: { $0 == pid } ) + // } + // } + // + // func makeMetaPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { + // eventLoop.submit { () -> Void in + // let pid = peer.b58String + // if var subs = self.fanout[topic] { + // if !subs.contains( pid ) { + // /// Add the peer to our fanout cache + // subs.append(pid) + // self.fanout[topic] = subs + // //self.logger.info("Added peer \(pid) to fanout on topic \(topic)") + // } else { + // /// The peer is already a meta data only peer in our fanout cache + // //self.logger.info("Peer is already a meta peer, nothing to do") + // return + // } + // } else { + // /// We don't have an entry for this topic yet + // self.fanout[topic] = [pid] + // //self.logger.info("Created new fanout for topic \(topic). And downgraded peer \(pid) to meta peer.") + // } + // + // /// Make sure we remove the PID from our full message mesh cache + // self.mesh[topic]?.removeAll(where: { $0 == pid } ) + // //self.logger.info("Removed \(pid) from mesh[\(topic)]") + // //self.logger.info("Remaining Full Peers for topic `\(topic)` -> \(self.mesh[topic]?.compactMap { $0.prefix(5) }.joined(separator: ", ") ?? "nil")") + // //self.logger.info("\(self.mesh)") + // } + // } + + enum Errors: Error { case unknownPeerID case unknownTopic } - + } diff --git a/Sources/LibP2PPubSub/Routers/Floodsub/Route+Floodsub.swift b/Sources/LibP2PPubSub/Routers/Floodsub/Route+Floodsub.swift index 0d0195c..92cefb2 100644 --- a/Sources/LibP2PPubSub/Routers/Floodsub/Route+Floodsub.swift +++ b/Sources/LibP2PPubSub/Routers/Floodsub/Route+Floodsub.swift @@ -1,23 +1,30 @@ +//===----------------------------------------------------------------------===// // -// Route+Floodsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -func registerFloodsubRoute(_ app:Application) throws { +func registerFloodsubRoute(_ app: Application) throws { app.group("floodsub") { fsub in fsub.on("1.0.0", handlers: [.varIntFrameDecoder]) { req -> EventLoopFuture> in - + guard req.application.isRunning else { req.logger.error("Floodsub::Recieved Request After App Shutdown") return req.eventLoop.makeFailedFuture(BasePubSub.Errors.alreadyStopped) } return req.application.pubsub.floodsub.processRequest(req) - + } } } diff --git a/Sources/LibP2PPubSub/Routers/Gossipsub/Application+Gossipsub.swift b/Sources/LibP2PPubSub/Routers/Gossipsub/Application+Gossipsub.swift index f454f8e..211c262 100644 --- a/Sources/LibP2PPubSub/Routers/Gossipsub/Application+Gossipsub.swift +++ b/Sources/LibP2PPubSub/Routers/Gossipsub/Application+Gossipsub.swift @@ -1,9 +1,16 @@ +//===----------------------------------------------------------------------===// // -// Application+Gossipsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/19/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P @@ -17,8 +24,8 @@ extension Application.PubSubServices.Provider { } } } - - public static func gossipsub(emitSelf:Bool) -> Self { + + public static func gossipsub(emitSelf: Bool) -> Self { .init { $0.pubsub.use { app -> GossipSub in let gsub = try! GossipSub(group: app.eventLoopGroup, libp2p: app, emitSelf: emitSelf) @@ -30,33 +37,35 @@ extension Application.PubSubServices.Provider { } extension Application.PubSubServices { - - public var gossipsub:GossipSub { + + public var gossipsub: GossipSub { guard let gsub = self.service(for: GossipSub.self) else { - fatalError("Gossipsub accessed without instantiating it first. Use app.pubsub.use(.gossipsub) to initialize a shared Gossipsub instance.") + fatalError( + "Gossipsub accessed without instantiating it first. Use app.pubsub.use(.gossipsub) to initialize a shared Gossipsub instance." + ) } return gsub } - -// public var gossipsub: GossipSub { -// let lock = self.application.locks.lock(for: Key.self) -// lock.lock() -// defer { lock.unlock() } -// if let existing = self.application.storage[Key.self] { -// return existing -// } -// let new = ClientBootstrap(group: self.application.eventLoopGroup) -// // Enable SO_REUSEADDR. -// .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1) -// .channelInitializer { channel in -// // Do we install the upgrader here or do we let the Connection install the handlers??? -// //channel.pipeline.addHandlers(upgrader.channelHandlers(mode: .initiator)) // The MSS Handler itself needs to have access to the Connection Delegate -// channel.eventLoop.makeSucceededVoidFuture() -// } -// -// self.application.storage.set(Key.self, to: new) -// -// return new -// } - + + // public var gossipsub: GossipSub { + // let lock = self.application.locks.lock(for: Key.self) + // lock.lock() + // defer { lock.unlock() } + // if let existing = self.application.storage[Key.self] { + // return existing + // } + // let new = ClientBootstrap(group: self.application.eventLoopGroup) + // // Enable SO_REUSEADDR. + // .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1) + // .channelInitializer { channel in + // // Do we install the upgrader here or do we let the Connection install the handlers??? + // //channel.pipeline.addHandlers(upgrader.channelHandlers(mode: .initiator)) // The MSS Handler itself needs to have access to the Connection Delegate + // channel.eventLoop.makeSucceededVoidFuture() + // } + // + // self.application.storage.set(Key.self, to: new) + // + // return new + // } + } diff --git a/Sources/LibP2PPubSub/Routers/Gossipsub/Gossipsub.swift b/Sources/LibP2PPubSub/Routers/Gossipsub/Gossipsub.swift index bb88bf4..82c27c8 100644 --- a/Sources/LibP2PPubSub/Routers/Gossipsub/Gossipsub.swift +++ b/Sources/LibP2PPubSub/Routers/Gossipsub/Gossipsub.swift @@ -1,9 +1,16 @@ +//===----------------------------------------------------------------------===// // -// Gossipsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P import LibP2PCore @@ -20,134 +27,163 @@ import LibP2PCore /// /// - Note: [Gossipsub v1.0.0 Spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) /// - Note: The methods that we must implement will be enforced by the compiler when we conform to PubSub -public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { +public class GossipSub: BasePubSub, PubSubCore, LifecycleHandler { public static var multicodec: String = "/meshsub/1.0.0" - + public let lowerOutboundDegree = 4 public let targetOutboundDegree = 6 public let upperOutboundDegree = 12 //public let lazyOutboundDegree = 6 - - public let heartbeatInterval:TimeAmount = .seconds(1) + + public let heartbeatInterval: TimeAmount = .seconds(1) public let mcacheWindowLength = 8 public let mcacheGossipLength = 5 - - public let seenTTL:TimeAmount = .seconds(120) - - private var eventList:[PubSubEvent] = [] - - public init(group: EventLoopGroup, libp2p: Application, debugName: String = "Gossipsub", emitSelf: Bool = false) throws { + + public let seenTTL: TimeAmount = .seconds(120) + + private var eventList: [PubSubEvent] = [] + + public init( + group: EventLoopGroup, + libp2p: Application, + debugName: String = "Gossipsub", + emitSelf: Bool = false + ) throws { // Init our PeerState let peerState = PeeringState(eventLoop: group.next()) - + // Init our Message Cache - let messageCache = MessageCache(eventLoop: group.next(), historyWindows: mcacheWindowLength, gossipWindows: mcacheGossipLength) - + let messageCache = MessageCache( + eventLoop: group.next(), + historyWindows: mcacheWindowLength, + gossipWindows: mcacheGossipLength + ) + // Regsiter our /meshsub/1.0.0 route try registerGossipsubRoute(libp2p) - + // Init super - try super.init(group: group, libp2p: libp2p, peerState: peerState, messageCache: messageCache, debugName: debugName, multicodecs: [GossipSub.multicodec], globalSignaturePolicy: .strictSign, canRelayMessages: true, emitSelf: emitSelf) - + try super.init( + group: group, + libp2p: libp2p, + peerState: peerState, + messageCache: messageCache, + debugName: debugName, + multicodecs: [GossipSub.multicodec], + globalSignaturePolicy: .strictSign, + canRelayMessages: true, + emitSelf: emitSelf + ) + self._eventHandler = { event in self.eventList.append(event) } } - + public func dumpEventList() { self.logger.notice("*** Event List ***") - self.eventList.forEach { event in + for event in self.eventList { self.logger.notice("\(event.description)") } self.logger.notice("******************") } - + public func didBoot(_ application: Application) throws { try? self.start() } - + public func shutdown(_ application: Application) { try? self.stop() } - + /// We can override methods if we need to, just make sure to call super... public override func start() throws { guard self.state == .stopped else { throw Errors.alreadyRunning } try super.start() // Do whatever else we need to do... } - + /// We can override methods if we need to, just make sure to call super... public override func stop() throws { guard self.state == .starting || self.state == .started else { throw Errors.alreadyStopped } // Do whatever we need to do... try super.stop() } - + /// The current implementation would send a seperate IHave message for each topic a meta peer has in common with us. I'm not sure if this is correct according to the specs. /// Ex: if peerA and us and meta peers on topics "news" and "fruit" we would send two iHave messages, one with messageIDs for news and another with messageIDs for fruit public override func heartbeat() -> EventLoopFuture { /// Publish iHave messages if necessary self.logger.trace("Heartbeat called") let tic = Date().timeIntervalSince1970 - - var tasks:[EventLoopFuture] = [] - + + var tasks: [EventLoopFuture] = [] + /// Mesh Maintenance - tasks.append(self.peerState.topicSubscriptions(on: self.eventLoop).flatMap { subscriptions -> EventLoopFuture in - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } - - return subscriptions.map { topic in - guard let subs = ps.mesh[topic]?.count else { return self.eventLoop.makeSucceededVoidFuture() } - if subs < self.lowerOutboundDegree { - self.logger.trace("\(topic) doesn't have enough subscribers (Has: \(subs), Wants: \(self.lowerOutboundDegree)) attempting to graft some peers") - return self.getMetaPeers().flatMap { metaPeers in - guard let metaSubsForTopic = metaPeers[topic] else { - self.logger.trace("No peers in fanout to graft") - return self.eventLoop.makeSucceededVoidFuture() + tasks.append( + self.peerState.topicSubscriptions(on: self.eventLoop).flatMap { subscriptions -> EventLoopFuture in + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } + + return subscriptions.map { topic in + guard let subs = ps.mesh[topic]?.count else { return self.eventLoop.makeSucceededVoidFuture() } + if subs < self.lowerOutboundDegree { + self.logger.trace( + "\(topic) doesn't have enough subscribers (Has: \(subs), Wants: \(self.lowerOutboundDegree)) attempting to graft some peers" + ) + return self.getMetaPeers().flatMap { metaPeers in + guard let metaSubsForTopic = metaPeers[topic] else { + self.logger.trace("No peers in fanout to graft") + return self.eventLoop.makeSucceededVoidFuture() + } + return metaSubsForTopic.prefix(self.targetOutboundDegree - subs).map { + self.graft(peer: $0, for: topic, andSend: true, includingRecentIHaves: true) + }.flatten(on: self.eventLoop) } - return metaSubsForTopic.prefix(self.targetOutboundDegree - subs).map { - self.graft(peer: $0, for: topic, andSend: true, includingRecentIHaves: true) - }.flatten(on: self.eventLoop) - } - } else if subs > self.upperOutboundDegree { - self.logger.trace("\(topic) has too many subscribers (Has: \(subs), Wants: \(self.upperOutboundDegree)) attempting to prune some peers") - return self.getPeersSubscribed(to: topic).flatMap { fullPeers in - guard fullPeers.count > self.upperOutboundDegree else { return self.eventLoop.makeSucceededVoidFuture() } - return fullPeers.prefix(fullPeers.count - self.targetOutboundDegree).map { - self.prune(peer: $0.id, for: topic, andSend: true) - }.flatten(on: self.eventLoop) + } else if subs > self.upperOutboundDegree { + self.logger.trace( + "\(topic) has too many subscribers (Has: \(subs), Wants: \(self.upperOutboundDegree)) attempting to prune some peers" + ) + return self.getPeersSubscribed(to: topic).flatMap { fullPeers in + guard fullPeers.count > self.upperOutboundDegree else { + return self.eventLoop.makeSucceededVoidFuture() + } + return fullPeers.prefix(fullPeers.count - self.targetOutboundDegree).map { + self.prune(peer: $0.id, for: topic, andSend: true) + }.flatten(on: self.eventLoop) + } + } else { + return self.eventLoop.makeSucceededVoidFuture() } - } else { - return self.eventLoop.makeSucceededVoidFuture() - } - }.flatten(on: self.eventLoop).transform(to: ()) - }) - + }.flatten(on: self.eventLoop).transform(to: ()) + } + ) + /// Fanout Maintenance - + /// Send iHaves to meta peers tasks.append(self.sendIHaveMessagesToMetaPeers()) - + return EventLoopFuture.whenAllComplete(tasks, on: self.eventLoop).map({ res in self.logger.trace("Heartbeat completed in \(Int((Date().timeIntervalSince1970 - tic) * 1_000_000))us") }) } - + /// We have to override / implement this method so our BasePubSub implementation isn't constrained to a particular RPC PubSub Message Type override func decodeRPC(_ data: Data) throws -> RPCMessageCore { try RPC(contiguousBytes: data) } - + /// We have to override / implement this method so our BasePubSub implementation isn't constrained to a particular RPC PubSub Message Type override func encodeRPC(_ rpc: RPCMessageCore) throws -> Data { - return try RPC(rpc).serializedData() + try RPC(rpc).serializedData() } - -// public func on(_ topic:String, closure:@escaping((PubSub.SubscriptionEvent) -> EventLoopFuture)) { -// self.subscribe(topic: topic, on: closure) -// } - + + // public func on(_ topic:String, closure:@escaping((PubSub.SubscriptionEvent) -> EventLoopFuture)) { + // self.subscribe(topic: topic, on: closure) + // } + /// Itterates through each meta peer, creates an iHave control message for each topic containing any associated message id's that we may have in our Message Cache, and sends it to them func sendIHaveMessagesToMetaPeers() -> EventLoopFuture { /// Itterate through our subscriptions and create a [topic:[ControlIHave]] dictionary @@ -164,14 +200,16 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { self.logger.trace("No iHave Control messages to send to peers") return self.eventLoop.makeSucceededVoidFuture() } - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } - var messagesSent:Int = 0 + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } + var messagesSent: Int = 0 /// For peer in metaPeer return metaPeers.compactMap { peer -> EventLoopFuture in /// Grab the subscription information for this peer id ps.subscriptionForID(peer).map { subscriber, subscriptions -> Void in guard subscriptions.meta.count > 0 else { return } - + /// Gen RPC Message var rpc = RPC() rpc.control = RPC.ControlMessage() @@ -180,35 +218,39 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// Append Control Message for topic to RPC iHaveMsgs.first(where: { $0.topicID == topic }) } - + /// If RPC.ControlMessages.count > 0 guard rpc.control.ihave.count > 0 else { return } - + guard var payload = try? rpc.serializedData() else { return } payload = putUVarInt(UInt64(payload.count)) + payload - + try? subscriber.write(payload.bytes) - self._eventHandler?(.outbound(.iHave(subscriber.id, rpc.control.ihave.compactMap { try? $0.serializedData() }))) - + self._eventHandler?( + .outbound(.iHave(subscriber.id, rpc.control.ihave.compactMap { try? $0.serializedData() })) + ) + messagesSent += 1 - + return } - + }.flatten(on: self.eventLoop).map { if messagesSent > 0 { self.logger.debug("Sent iHave Control messages to \(messagesSent) meta peers") } } - } + } } - + private func generateIHaveMessages() -> EventLoopFuture<[RPC.ControlIHave]> { - guard let mc = self.messageCache as? MessageCache else { return self.eventLoop.makeFailedFuture(Errors.invalidMessageStateConformance) } - return self.eventLoop.flatSubmit({ //-> [String:[RPC.ControlIHave]] in - var iHaves:[RPC.ControlIHave] = [] + guard let mc = self.messageCache as? MessageCache else { + return self.eventLoop.makeFailedFuture(Errors.invalidMessageStateConformance) + } + return self.eventLoop.flatSubmit({ //-> [String:[RPC.ControlIHave]] in + var iHaves: [RPC.ControlIHave] = [] return self.subscriptions.keys.compactMap { topic in mc.getGossipIDs(topic: topic).map { messageIDs in iHaves.append( @@ -219,30 +261,36 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { ) } }.flatten(on: self.eventLoop).map { - return iHaves + iHaves } }) } /// Get MetaPeer b58String ID's with an array of their subscriptions (instead of the other way around) - private func getMetaPeers() -> EventLoopFuture<[String:[PeerID]]> { - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + private func getMetaPeers() -> EventLoopFuture<[String: [PeerID]]> { + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } return ps.metaPeerIDs() } - - private func getMetaPeers(forTopic topic:String) -> EventLoopFuture<[PeerID]> { - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + + private func getMetaPeers(forTopic topic: String) -> EventLoopFuture<[PeerID]> { + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } return ps.metaPeerIDs().map { $0[topic] ?? [] } } - + private func getPeerIDs() -> EventLoopFuture<[String]> { - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } return ps.eventLoop.submit { //self.logger.trace("Gossip Peers Count \(ps.peers.count)") - return ps.peers.keys.map { $0 } + ps.peers.keys.map { $0 } } } - + /// FRUIT TOPIC let FruitSubscription = PubSub.SubscriptionConfig( /// The subscription topic string @@ -254,7 +302,7 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// The custom messageID function to use for this topic messageIDFunc: .concatFromAndSequenceFields ) - + /// Process the inbound messages however you'd like /// /// This method will get called once the default implementation determines the message is valid ( @@ -262,39 +310,39 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// - conforms to our signing policy /// - contains the appropriate messageID /// - and isn't a duplicate -// func processRPCMessage(_ message: RPC) -> EventLoopFuture { -// self.logger.info("TODO::ProcessRPCMessage") -// self.logger.info("\(message)") -// return self.eventLoop.makeSucceededVoidFuture() -// } - + // func processRPCMessage(_ message: RPC) -> EventLoopFuture { + // self.logger.info("TODO::ProcessRPCMessage") + // self.logger.info("\(message)") + // return self.eventLoop.makeSucceededVoidFuture() + // } + /// Publish arbitrary data, bundled as an RPC message under the specified topic public override func publish(topic: String, data: Data, on: EventLoop?) -> EventLoopFuture { self.logger.debug("Attempting to publish data as RPC Message") - + var msg = RPC.Message() msg.data = data msg.from = Data(self.peerID.bytes) msg.seqno = Data(self.nextMessageSequenceNumber()) msg.topicIds = [topic] - + return self.publish(msg: msg) } - + /// Convenience method for publishing an RPC message as bytes public override func publish(topic: String, bytes: [UInt8], on: EventLoop?) -> EventLoopFuture { self.publish(topic: topic, data: Data(bytes), on: on) } - + /// Convenience method for publishing an RPC message as a ByteBuffer public override func publish(topic: String, buffer: ByteBuffer, on: EventLoop?) -> EventLoopFuture { self.publish(topic: topic, data: Data(buffer.readableBytesView), on: on) } - + /// Attempts to subscribe to the specified topic /// - Warning: This method assumes some defaults such as `stringNoSign` and `acceptAll` message validation. /// - Warning: This is probably not what you want! Consider using `subscribe(PubSub.SubscriptionConfig)` instead - func subscribe(topic:Topic) throws -> PubSub.SubscriptionHandler { + func subscribe(topic: Topic) throws -> PubSub.SubscriptionHandler { let defaultConfig = PubSub.SubscriptionConfig( topic: topic, signaturePolicy: .strictNoSign, @@ -303,22 +351,22 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { ) return try self.subscribe(defaultConfig) } - + /// Attempts to subscribe to the specified topic - public func subscribe(_ config:PubSub.SubscriptionConfig) throws -> PubSub.SubscriptionHandler { + public func subscribe(_ config: PubSub.SubscriptionConfig) throws -> PubSub.SubscriptionHandler { /// Ensure the Topic we're subscribing to is valid... guard !config.topic.isEmpty, config.topic != "" else { throw Errors.invalidTopic } - + self.logger.info("Subscribing to topic: \(config.topic)") - + /// Init Subscription handler let subHandler = PubSub.SubscriptionHandler(pubSub: self, topic: config.topic) self.subscriptions[config.topic] = subHandler - + /// Let the base/parent PubSub implementation know of the subscription... let _ = self.subscribe(config, on: nil).flatMap { self.getMetaPeers(forTopic: config.topic).flatMap { subscribers -> EventLoopFuture in - //self.getPeersSubscribed(to: config.topic).flatMap { subscribers -> EventLoopFuture in + //self.getPeersSubscribed(to: config.topic).flatMap { subscribers -> EventLoopFuture in guard !subscribers.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } let graftSubscribers = subscribers.prefix(self.targetOutboundDegree) self.logger.trace("Sending Graft Messages to \(graftSubscribers.count) subscribers") @@ -328,26 +376,29 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { }.flatten(on: self.eventLoop) } } - + /// return the subscription handler return subHandler } - - public func subscribe(_ config:PubSub.SubscriptionConfig, closure:@escaping ((PubSub.SubscriptionEvent) -> EventLoopFuture)) throws { + + public func subscribe( + _ config: PubSub.SubscriptionConfig, + closure: @escaping ((PubSub.SubscriptionEvent) -> EventLoopFuture) + ) throws { /// Ensure the Topic we're subscribing to is valid... guard !config.topic.isEmpty, config.topic != "" else { throw Errors.invalidTopic } - + self.logger.info("Subscribing to topic: \(config.topic)") - + /// Init Subscription handler let subHandler = PubSub.SubscriptionHandler(pubSub: self, topic: config.topic) subHandler.on = closure self.subscriptions[config.topic] = subHandler - + /// Let the base/parent PubSub implementation know of the subscription... let _ = self.subscribe(config, on: nil).flatMap { self.getMetaPeers(forTopic: config.topic).flatMap { subscribers -> EventLoopFuture in - //self.getPeersSubscribed(to: config.topic).flatMap { subscribers -> EventLoopFuture in + //self.getPeersSubscribed(to: config.topic).flatMap { subscribers -> EventLoopFuture in guard !subscribers.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } let graftSubscribers = subscribers.prefix(self.targetOutboundDegree) self.logger.trace("Sending Graft Messages to \(graftSubscribers.count) subscribers") @@ -358,21 +409,23 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { } } } - + override public func unsubscribe(topic: BasePubSub.Topic, on loop: EventLoop? = nil) -> EventLoopFuture { /// If we have peers that are interested in this topic, let them know that we're unsubscribing... self.logger.info("Unsubscribing from topic: \(topic)") return self.getPeersSubscribed(to: topic).flatMap { peers -> EventLoopFuture in guard peers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } - + return peers.map { peer -> EventLoopFuture in - self.logger.trace("Sending \(peer.id) a prune message before unsubscribing, because they were a full peer. ✊") + self.logger.trace( + "Sending \(peer.id) a prune message before unsubscribing, because they were a full peer. ✊" + ) return self.prune(peer: peer.id, for: topic, andSend: true) }.flatten(on: self.eventLoop).flatMap { // Call unsub on our base class... - return super.unsubscribe(topic: topic, on: loop) + super.unsubscribe(topic: topic, on: loop) } - + /// Generate an RPC Message containing our unsubscription... /// /// - Note: We just send the prune message here because our base pub sub handles sending subscription updates @@ -384,31 +437,34 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// - Note: I think this is because our Stream.write future never completes so we never actually send the updated subscriptions... (this is what was happening) } } - - private func publish(message:RPC.Message, to:Topic) -> EventLoopFuture { + + private func publish(message: RPC.Message, to: Topic) -> EventLoopFuture { self.logger.debug("TODO::Publish RPC PubSub Message") return self.eventLoop.makeSucceededVoidFuture() } - /// Our chance to process the inbound RPC /// /// - Note: Our BasePubSub handles subscriptions all we need to do here is handle what's unique to GossipSub, which is the Control Messages... - override internal func processInboundRPC(_ rpc:RPCMessageCore, from:PeerID, request:Request) -> EventLoopFuture { + override internal func processInboundRPC( + _ rpc: RPCMessageCore, + from: PeerID, + request: Request + ) -> EventLoopFuture { guard let rpc = rpc as? RPC else { self.logger.error("Gossipsub was passed a RPCMessageCore that wasn't an RPC") return self.eventLoop.makeSucceededVoidFuture() } - + /// Process Control Messages return self.processControlMessages(rpc, peer: request.remotePeer!).flatMap { res -> EventLoopFuture in /// If the inbound control messages warrent a response, we'll send an RPC message back to the remotePeer now self.replyToControlIfNecessary(res, request: request) } - + //return self.processMessages(rpc, peer: from) } - + /// Our chance to process individual PubSub Message /// /// This method will get called once the default implementation determines the message is valid @@ -416,12 +472,16 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// - conforms to our signing policy /// - contains the appropriate messageID /// - and isn't a duplicate - internal override func processInboundMessage(_ msg: PubSubMessage, from: PeerID, request: Request) -> EventLoopFuture { + internal override func processInboundMessage( + _ msg: PubSubMessage, + from: PeerID, + request: Request + ) -> EventLoopFuture { //self.processInboundMessageFlatMap(msg, from: from, request: request) self.logger.warning("TODO::Process Inbound Message") return self.eventLoop.makeSucceededVoidFuture() } - + /// Our chance to process a batch of PubSub Messages /// /// This method will get called once the default implementation determines the message is valid @@ -429,32 +489,36 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { /// - conforms to our signing policy /// - contains the appropriate messageID /// - and isn't a duplicate - internal override func processInboundMessages(_ messages:[PubSubMessage], from: PeerID, request: Request) -> EventLoopFuture { + internal override func processInboundMessages( + _ messages: [PubSubMessage], + from: PeerID, + request: Request + ) -> EventLoopFuture { self.logger.trace("Batch Processing Inbound Messages") - + /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately let messagesPerTopic = self.sortMessagesByTopic(messages) - + /// Build a list of messages to send //var messagesToSend - + /// Forward the messages onto any other subscribers to this topic (excluding the sender) return messagesPerTopic.compactMap { (topic, msgs) -> EventLoopFuture in /// Note: This method will result in multiple messages being sent to a peer with multiple common subscriptions to us /// Example: PeerA and us are both subscribed to topics Food and Fruit, we'll send an RPC message forwarding all Food messages and another RPC message forwarding all Fruit messages /// We should try and bundle those messages together into a single RPC message self.peerState.peersSubscribedTo(topic: topic, on: nil).flatMap { subscribers -> EventLoopFuture in - + /// Ensure there's subscribers to send the messages to, otherwise bail guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } - + /// Prepare the message var forwardedRPC = RPC() forwardedRPC.msgs = msgs.compactMap { $0 as? RPC.Message } var payload = try! forwardedRPC.serializedData() payload = putUVarInt(UInt64(payload.count)) + payload - + /// Send the message to each peer subscribed to this topic return subscribers.compactMap { peerStreams -> EventLoopFuture in guard peerStreams.id != from else { return self.eventLoop.makeSucceededVoidFuture() } @@ -463,72 +527,84 @@ public class GossipSub:BasePubSub, PubSubCore, LifecycleHandler { return self.eventLoop.makeSucceededVoidFuture() }.flatten(on: self.eventLoop) } - + }.flatten(on: self.eventLoop) - + /// Send all of the above messages... } - + } /// RPC Control Message Logic extension GossipSub { - - private func processControlMessages(_ rpc:RPC, peer:PeerID) -> EventLoopFuture<(graftRejections:[RPC.ControlPrune], iWantResponses:[RPC.Message], iWant:RPC.ControlIWant?)> { + + private func processControlMessages( + _ rpc: RPC, + peer: PeerID + ) -> EventLoopFuture<(graftRejections: [RPC.ControlPrune], iWantResponses: [RPC.Message], iWant: RPC.ControlIWant?)> + { guard rpc.hasControl else { return self.eventLoop.makeSucceededFuture(([], [], nil)) } return self.processGrafts(rpc.control.graft, peer: peer) .and(self.processPrunes(rpc.control.prune, peer: peer)) .and(self.processIWants(rpc.control.iwant, peer: peer)) .and(self.processIHavesUsingSeenCache(rpc.control.ihave, peer: peer)).map { res in - return (res.0.0.0, res.0.1, res.1) - } + (res.0.0.0, res.0.1, res.1) + } } - + /// Processes Inbound Control Graft Messages /// /// [Process Graft Spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#graft) /// - On receiving a `GRAFT(topic)` message, the router will check to see if it is indeed subscribed to the topic identified in the message. /// - If so, the router will add the sender to `mesh[topic]`. /// - If the router is no longer subscribed to the topic, it will respond with a `PRUNE(topic)` message to inform the sender that it should remove its mesh link - private func processGrafts(_ grafts:[RPC.ControlGraft], peer remotePeer:PeerID) -> EventLoopFuture<[RPC.ControlPrune]> { + private func processGrafts( + _ grafts: [RPC.ControlGraft], + peer remotePeer: PeerID + ) -> EventLoopFuture<[RPC.ControlPrune]> { guard grafts.count > 0 else { return self.eventLoop.makeSucceededFuture([]) } - + return grafts.compactMap { graft -> EventLoopFuture in self.logger.trace("RPC::Control - Handling Graft(\(graft.topicID)) Message") - + if self.subscriptions[graft.topicID] != nil { /// Check to see if this peer is already a full peer, or in our fanout... - self.logger.trace("Received a Graft Message for a topic `\(graft.topicID)` we're subscribed to! Checking \(remotePeer)'s current peer status") - + self.logger.trace( + "Received a Graft Message for a topic `\(graft.topicID)` we're subscribed to! Checking \(remotePeer)'s current peer status" + ) + /// This peice of code will reject an inbound graft request immediately if we're above our `UpperOutboundDegree` limit for this topic. /// Pros: It's nice to be in control of wether or not a remote peer can force a grafting /// Cons: It seems to prevent some peers from ever joining our mesh -// guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } -// // - TODO: Bad Access Code Error when attempting to read dictionary value count -// guard (ps.mesh[graft.topicID]?.count ?? 0) < self.upperOutboundDegree else { -// self.logger.trace("Adding \(remotePeer) to our mesh would exceed our upper network degree bounds") -// return ps.newMetaPeer(remotePeer, for: graft.topicID).flatMap { -// self.logger.trace("Rejecting Graft request for \(remotePeer)") -// return self.eventLoop.makeSucceededFuture( -// RPC.ControlPrune.with { pruneMsg in -// pruneMsg.topicID = graft.topicID -// } -// ) -// } -// } - + // guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + // // - TODO: Bad Access Code Error when attempting to read dictionary value count + // guard (ps.mesh[graft.topicID]?.count ?? 0) < self.upperOutboundDegree else { + // self.logger.trace("Adding \(remotePeer) to our mesh would exceed our upper network degree bounds") + // return ps.newMetaPeer(remotePeer, for: graft.topicID).flatMap { + // self.logger.trace("Rejecting Graft request for \(remotePeer)") + // return self.eventLoop.makeSucceededFuture( + // RPC.ControlPrune.with { pruneMsg in + // pruneMsg.topicID = graft.topicID + // } + // ) + // } + // } + // - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.graft(remotePeer, graft.topicID))) - + // - TODO: Do we need to respond with a graft message to confirm? or does no response == confirmation?? // - No Response = Confirmation // - Prune Response = Rejection - return self.graft(peer: remotePeer, for: graft.topicID, andSend: true, includingRecentIHaves: true).transform(to: nil) - + return self.graft(peer: remotePeer, for: graft.topicID, andSend: true, includingRecentIHaves: true) + .transform(to: nil) + } else { // We're not subscribed to the topic, reject the graft message by sending a prune message - self.logger.debug("Received a Graft Message for a topic `\(graft.topicID)` that we're not subscribed to. Responding with a Prune Message...") - + self.logger.debug( + "Received a Graft Message for a topic `\(graft.topicID)` that we're not subscribed to. Responding with a Prune Message..." + ) + return self.eventLoop.makeSucceededFuture( RPC.ControlPrune.with { pruneMsg in pruneMsg.topicID = graft.topicID @@ -540,52 +616,60 @@ extension GossipSub { optionalPrunes.compactMap { $0 } } } - - private func processPrunes(_ prunes:[RPC.ControlPrune], peer remotePeer:PeerID) -> EventLoopFuture { + + private func processPrunes(_ prunes: [RPC.ControlPrune], peer remotePeer: PeerID) -> EventLoopFuture { guard prunes.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } - + return prunes.map { prune in self.logger.debug("RPC::Control - Handling Prune(\(prune.topicID)) Message") - + // - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.prune(remotePeer, prune.topicID))) - + return self.prune(peer: remotePeer, for: prune.topicID, andSend: false) - + }.flatten(on: self.eventLoop) } - + /// Processes an array of iWant Control messages in an inbound RPC message. If we contain any of the desired messages we may have in our message cache - private func processIWants(_ iWants:[RPC.ControlIWant], peer remotePeer:PeerID) -> EventLoopFuture<[RPC.Message]> { + private func processIWants(_ iWants: [RPC.ControlIWant], peer remotePeer: PeerID) -> EventLoopFuture<[RPC.Message]> + { guard iWants.count > 0 else { return self.eventLoop.makeSucceededFuture([]) } /// Pull the messageIds out from each iWant message /// Reduce the array of array of message ids into a single array /// Instantiate a Set with the flattened array which results in... /// A unique set of requested Message IDs. - let ids = Set( iWants.map { iWant in iWant.messageIds }.reduce([], +) ) - + let ids = Set(iWants.map { iWant in iWant.messageIds }.reduce([], +)) + /// Ask our message cache for the message associated with each id - return (self.messageCache as! MessageCache).get(messageIDs: ids, on: self.eventLoop).map { $0.compactMap { msg in msg.data as? RPC.Message } }.always { _ in + return (self.messageCache as! MessageCache).get(messageIDs: ids, on: self.eventLoop).map { + $0.compactMap { msg in msg.data as? RPC.Message } + }.always { _ in /// - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.iWant(remotePeer, ids.map { $0 }))) } } - + /// Given a set of iHave messages, this method checks our Message Cache for ID's that we haven't seen and bundles them into a single RPC.ControlIWant message /// This version uses the MCache as our source of seen messages (not what the spec says) - private func processIHavesUsingMCache(_ iHaves:[RPC.ControlIHave], peer remotePeer:PeerID) -> EventLoopFuture { + private func processIHavesUsingMCache( + _ iHaves: [RPC.ControlIHave], + peer remotePeer: PeerID + ) -> EventLoopFuture { guard iHaves.count > 0 else { return self.eventLoop.makeSucceededFuture(nil) } - + /// Get a unique set of message IDs (across all topics) to check our message cache for... - let ids:Set = Set(iHaves.compactMap { - /// Ensure we're subscribed to the topic - guard self.subscriptions[$0.topicID] != nil else { return nil } - return $0.messageIds - }.reduce([], +)) - + let ids: Set = Set( + iHaves.compactMap { + /// Ensure we're subscribed to the topic + guard self.subscriptions[$0.topicID] != nil else { return nil } + return $0.messageIds + }.reduce([], +) + ) + /// - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.iHave(remotePeer, ids.map { $0 }))) - + /// For each messageID return self.messageCache.filter(ids: ids, returningOnly: .unknown, on: self.eventLoop).map { needed in /// If we don't have any needed messageID's return nil @@ -597,27 +681,32 @@ extension GossipSub { } } } - + /// This version uses the base pub subs `SeenCache` as our source of seen messages (what the spec says) - private func processIHavesUsingSeenCache(_ iHaves:[RPC.ControlIHave], peer remotePeer:PeerID) -> EventLoopFuture { + private func processIHavesUsingSeenCache( + _ iHaves: [RPC.ControlIHave], + peer remotePeer: PeerID + ) -> EventLoopFuture { guard iHaves.count > 0 else { return self.eventLoop.makeSucceededFuture(nil) } - + /// Get a unique set of message IDs (across all topics) to check our message cache for... - let ids:Set = Set(iHaves.compactMap { - /// Ensure we're subscribed to the topic - guard self.subscriptions[$0.topicID] != nil else { return nil } - return $0.messageIds - }.reduce([], +)) - + let ids: Set = Set( + iHaves.compactMap { + /// Ensure we're subscribed to the topic + guard self.subscriptions[$0.topicID] != nil else { return nil } + return $0.messageIds + }.reduce([], +) + ) + guard !ids.isEmpty else { self.logger.warning("We're discarding received iHave messages...") self.logger.warning("\(iHaves.map { "\($0.topicID) - \($0.messageIds.count)" }.joined(separator: "\n"))") return self.eventLoop.makeSucceededFuture(nil) } - + /// - TODO: Event sub, possibly remove later... self._eventHandler?(.inbound(.iHave(remotePeer, ids.map { $0 }))) - + /// For each messageID /// The spec says we should check our seen cache instead of mcache return self.seenCache.filter(ids: ids, returningOnly: .unknown, on: self.eventLoop).map { needed in @@ -630,20 +719,25 @@ extension GossipSub { } } } - - - private func replyToControlIfNecessary(_ res:(graftRejections:[RPC.ControlPrune], iWantResponses:[RPC.Message], iWant:RPC.ControlIWant?), request:Request) -> EventLoopFuture { + + private func replyToControlIfNecessary( + _ res: (graftRejections: [RPC.ControlPrune], iWantResponses: [RPC.Message], iWant: RPC.ControlIWant?), + request: Request + ) -> EventLoopFuture { if !res.graftRejections.isEmpty || !res.iWantResponses.isEmpty || res.iWant != nil { - guard let stream = request.connection.hasStream(forProtocol: GossipSub.multicodec, direction: .outbound) else { + guard let stream = request.connection.hasStream(forProtocol: GossipSub.multicodec, direction: .outbound) + else { self.logger.warning("Failed to find outbound gossipsub stream to peer \(request.remotePeer!)") self.logger.warning("Skipping Control Message Response") return self.eventLoop.makeSucceededVoidFuture() } self.logger.trace("We have \(res.graftRejections.count) graft rejection messages") self.logger.trace("We have \(res.iWantResponses.count) messages to send in response to iWant requests") - self.logger.trace("We have \(res.iWant?.messageIds.count ?? 0) iWants in response to the iHaves we received") - - res.graftRejections.forEach { reject in + self.logger.trace( + "We have \(res.iWant?.messageIds.count ?? 0) iWants in response to the iHaves we received" + ) + + for reject in res.graftRejections { self._eventHandler?(.outbound(.prune(request.remotePeer!, reject.topicID))) } if !res.iWantResponses.isEmpty { @@ -652,7 +746,7 @@ extension GossipSub { if let want = res.iWant { self._eventHandler?(.outbound(.iWant(request.remotePeer!, want.messageIds))) } - + /// We need to respond to the sender with an RPC message var rpc = RPC() rpc.msgs = res.iWantResponses @@ -660,14 +754,14 @@ extension GossipSub { ctrl.iwant = res.iWant == nil ? [] : [res.iWant!] ctrl.prune = res.graftRejections } - + var payload = try! rpc.serializedData() payload = putUVarInt(UInt64(payload.count)) + payload - + /// Respond to the remote peer self.logger.debug("Responding to Control Message") let _ = stream.write(payload.bytes) - + } else { self.logger.trace("No Control Response Necessary") } @@ -678,7 +772,7 @@ extension GossipSub { /// These should probably be moved to Gossipsub... extension GossipSub { /// Constructs and sends an RPC Prune Control message to the specified peer and topic - private func _sendPrune(peer:PubSub.Subscriber, for topic:Topic) -> EventLoopFuture { + private func _sendPrune(peer: PubSub.Subscriber, for topic: Topic) -> EventLoopFuture { /// Construct the Prune RPC Message let rpcPrune = RPC.with { rpc in rpc.control = RPC.ControlMessage.with { ctrlMsg in @@ -698,15 +792,20 @@ extension GossipSub { /// Send it try? peer.write(prunePayload.bytes) self._eventHandler?(.outbound(.prune(peer.id, topic))) - + /// Complete the future... return self.eventLoop.makeSucceededVoidFuture() } - func prune(peer:PeerID, for topic:Topic, andSend sendPruneMessage:Bool) -> EventLoopFuture { - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + func prune(peer: PeerID, for topic: Topic, andSend sendPruneMessage: Bool) -> EventLoopFuture { + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } /// Ensure that the peer is in fact a full peer in our PeerState return ps.isFullPeer(peer).flatMap { isFullPeer in - guard isFullPeer else { self.logger.debug("\(peer) isn't a full peer, no pruning necessary"); return self.eventLoop.makeSucceededVoidFuture() } + guard isFullPeer else { + self.logger.debug("\(peer) isn't a full peer, no pruning necessary") + return self.eventLoop.makeSucceededVoidFuture() + } self.logger.trace("Pruning full peer \(peer)") return ps.makeMetaPeer(peer, for: topic).flatMap { guard sendPruneMessage else { return self.eventLoop.makeSucceededVoidFuture() } @@ -718,9 +817,12 @@ extension GossipSub { } } - /// Constructs and sends an RPC Graft Control message to the specified peer and topic - private func _sendGraft(peer:PubSub.Subscriber, for topic:Topic, withRecentIHaves:[Data]? = nil) -> EventLoopFuture { + private func _sendGraft( + peer: PubSub.Subscriber, + for topic: Topic, + withRecentIHaves: [Data]? = nil + ) -> EventLoopFuture { /// Construct the Graft RPC Message let rpcGraft = RPC.with { rpc in rpc.control = RPC.ControlMessage.with { ctrlMsg in @@ -751,7 +853,7 @@ extension GossipSub { if let ids = withRecentIHaves { self._eventHandler?(.outbound(.iHave(peer.id, ids))) } - + /// We optimisticaly update this peer locally to be a full message (grafted peer). /// If they deny this grafting request then they'll send back a prune request, at which point we update our peering state to reflect the failed grafting... /// - TODO: Update Peering State @@ -759,10 +861,20 @@ extension GossipSub { /// Complete the future... return self.eventLoop.makeSucceededVoidFuture() } - func graft(peer:PeerID, for topic:Topic, andSend sendGraftMessage:Bool, includingRecentIHaves:Bool = false) -> EventLoopFuture { - guard let ps = self.peerState as? PeeringState else { return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) } + func graft( + peer: PeerID, + for topic: Topic, + andSend sendGraftMessage: Bool, + includingRecentIHaves: Bool = false + ) -> EventLoopFuture { + guard let ps = self.peerState as? PeeringState else { + return self.eventLoop.makeFailedFuture(Errors.invalidPeerStateConformance) + } return ps.isFullPeer(peer).flatMap { isFullPeer -> EventLoopFuture in - guard !isFullPeer else { self.logger.debug("\(peer) is already a full peer, no grafting necessary"); return self.eventLoop.makeSucceededVoidFuture() } + guard !isFullPeer else { + self.logger.debug("\(peer) is already a full peer, no grafting necessary") + return self.eventLoop.makeSucceededVoidFuture() + } self.logger.trace("Grafting meta peer \(peer)") return ps.makeFullPeer(peer, for: topic).flatMap { guard sendGraftMessage else { return self.eventLoop.makeSucceededVoidFuture() } @@ -770,7 +882,9 @@ extension GossipSub { self.logger.trace("Sending Graft Message to \(peer)") if includingRecentIHaves { return (self.messageCache as! MessageCache).getGossipIDs(topic: topic).flatMap { ids in - guard !ids.isEmpty else { return self._sendGraft(peer: peerStreams, for: topic, withRecentIHaves: nil) } + guard !ids.isEmpty else { + return self._sendGraft(peer: peerStreams, for: topic, withRecentIHaves: nil) + } return self._sendGraft(peer: peerStreams, for: topic, withRecentIHaves: ids) } } else { @@ -783,7 +897,7 @@ extension GossipSub { /// This method constructs and publishes an IWant message containign the specified Message IDs. /// If the peer has these messages, we can expect an RPC message containing them to arrive on our handler. - func requestMessages(_ ids:[Data], from peer:PeerID) -> EventLoopFuture { + func requestMessages(_ ids: [Data], from peer: PeerID) -> EventLoopFuture { guard ids.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } return self.peerState.streamsFor(peer, on: nil).flatMap { stream in let rpcWant = RPC.with { rpc in @@ -812,7 +926,7 @@ extension GossipSub { } } - func publishIHave(_ ids:[String], for topic:Topic) -> EventLoopFuture { + func publishIHave(_ ids: [String], for topic: Topic) -> EventLoopFuture { self.getPeersSubscribed(to: topic).flatMap { subscribers in guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } @@ -849,519 +963,517 @@ extension GossipSub { /// Old code extension GossipSub { -// internal func processInboundMessageMap(_ msg:Data, from stream: Stream, request: LibP2P.ProtocolRequest) { -// self.logger.info("Processing Inbound Message Using our Map Method") -// -// /// This should only ever be an RPC message. -// guard let rpc = try? RPC(serializedData: msg) else { -// self.logger.warning("Failed to decode RPC PubSub Message") -// self.logger.info("UTF8: \(String(data: request.payload, encoding: .utf8) ?? "Not UTF8")") -// self.logger.info("Hex: \(request.payload.asString(base: .base16))") -// return //request.eventLoop.makeSucceededFuture(nil) -// } -// -// guard let remotePeer = stream.connection?.remotePeer else { -// self.logger.warning("Failed to determine message originator (RemotePeer)") -// return //request.eventLoop.makeSucceededFuture(nil) -// } -// -// let tic = Date().timeIntervalSince1970 -// -// /// Forward Messages to Full Peers -// let _ = self.processSubscriptions(rpc, peer: remotePeer).map { -// /// Process Control Messages -// self.processControlMessages(rpc, peer: remotePeer).map { res in -// /// If the inbound control messages warrent a response, we'll send an RPC message back to the remotePeer now -// self.replyToControlIfNecessary(res, stream: stream).map { -// /// Process Messages -// /// Ensure each message conforms to our signature policy, discard any that don't -// self.ensureSignaturePolicyConformance(rpc.msgs).map { signedMessages in -// /// Compute the message ID for each message -// self.computeMessageIds(signedMessages).map { identifiedMessages in -// /// Using the computed ID's, discard any messages that we've already seen / encountered -// self.discardKnownMessages(identifiedMessages).map { newMessages in -// /// Store the new / unique messages in our MessaheCache -// self.storeMessages(newMessages).map { storedMessages in -// -// /// - TODO: Event sub, possibly remove later... -// self._eventHandler?(.message(remotePeer, storedMessages.map { $0.value })) -// -// /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) -// /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately -// let messagesPerTopic = self.sortMessagesByTopic(storedMessages) -// -// /// Pass the messages onto any SubscriptionHandlers at this point -// for (topic, msgs) in messagesPerTopic { -// if let handler = self.subscriptions[topic] { -// for (_, message) in msgs { -// let _ = handler.on?(.data(message)) -// } -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(topic)`") -// } -// } -// -// /// Forward the messages onto any other subscribers to this topic (excluding the sender) -// let _ = messagesPerTopic.map { (topic, msgs) in -// /// Note: This method will result in multiple messages being sent to a peer with multiple common subscriptions to us -// /// Example: PeerA and us are both subscribed to topics Food and Fruit, we'll send an RPC message forwarding all Food messages and another RPC message forwarding all Fruit messages -// /// We should try and bundle those messages together into a single RPC message -// self.peerState.peersSubscribedTo2(topic: topic, on: nil).map { subscribers in -// -// /// Ensure there's subscribers to send the messages to, otherwise bail -// guard subscribers.count > 0 else { return } -// -// /// Prepare the message -// var forwardedRPC = RPC() -// forwardedRPC.msgs = msgs.map { $0.message } -// var payload = try! forwardedRPC.serializedData() -// payload = putUVarInt(UInt64(payload.count)) + payload -// -// /// Send the message to each peer subscribed to this topic -// let _ = subscribers.map { (peerID, stream) in -// guard peerID != remotePeer else { return } -// self.logger.info("Forwarding message to subscriber \(peerID)") -// -// let _ = stream.write(payload) -// } -// } -// } -// } -// } -// } -// } -// } -// } -// }.always { result in -// let toc = Int((Date().timeIntervalSince1970 - tic) * 1_000_000) -// self.logger.info("Processed \(rpc.msgs.count) Inbound Messages from Peer \(remotePeer) in \(toc)us") -// } -// } - + // internal func processInboundMessageMap(_ msg:Data, from stream: Stream, request: LibP2P.ProtocolRequest) { + // self.logger.info("Processing Inbound Message Using our Map Method") + // + // /// This should only ever be an RPC message. + // guard let rpc = try? RPC(serializedData: msg) else { + // self.logger.warning("Failed to decode RPC PubSub Message") + // self.logger.info("UTF8: \(String(data: request.payload, encoding: .utf8) ?? "Not UTF8")") + // self.logger.info("Hex: \(request.payload.asString(base: .base16))") + // return //request.eventLoop.makeSucceededFuture(nil) + // } + // + // guard let remotePeer = stream.connection?.remotePeer else { + // self.logger.warning("Failed to determine message originator (RemotePeer)") + // return //request.eventLoop.makeSucceededFuture(nil) + // } + // + // let tic = Date().timeIntervalSince1970 + // + // /// Forward Messages to Full Peers + // let _ = self.processSubscriptions(rpc, peer: remotePeer).map { + // /// Process Control Messages + // self.processControlMessages(rpc, peer: remotePeer).map { res in + // /// If the inbound control messages warrent a response, we'll send an RPC message back to the remotePeer now + // self.replyToControlIfNecessary(res, stream: stream).map { + // /// Process Messages + // /// Ensure each message conforms to our signature policy, discard any that don't + // self.ensureSignaturePolicyConformance(rpc.msgs).map { signedMessages in + // /// Compute the message ID for each message + // self.computeMessageIds(signedMessages).map { identifiedMessages in + // /// Using the computed ID's, discard any messages that we've already seen / encountered + // self.discardKnownMessages(identifiedMessages).map { newMessages in + // /// Store the new / unique messages in our MessaheCache + // self.storeMessages(newMessages).map { storedMessages in + // + // /// - TODO: Event sub, possibly remove later... + // self._eventHandler?(.message(remotePeer, storedMessages.map { $0.value })) + // + // /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) + // /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately + // let messagesPerTopic = self.sortMessagesByTopic(storedMessages) + // + // /// Pass the messages onto any SubscriptionHandlers at this point + // for (topic, msgs) in messagesPerTopic { + // if let handler = self.subscriptions[topic] { + // for (_, message) in msgs { + // let _ = handler.on?(.data(message)) + // } + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(topic)`") + // } + // } + // + // /// Forward the messages onto any other subscribers to this topic (excluding the sender) + // let _ = messagesPerTopic.map { (topic, msgs) in + // /// Note: This method will result in multiple messages being sent to a peer with multiple common subscriptions to us + // /// Example: PeerA and us are both subscribed to topics Food and Fruit, we'll send an RPC message forwarding all Food messages and another RPC message forwarding all Fruit messages + // /// We should try and bundle those messages together into a single RPC message + // self.peerState.peersSubscribedTo2(topic: topic, on: nil).map { subscribers in + // + // /// Ensure there's subscribers to send the messages to, otherwise bail + // guard subscribers.count > 0 else { return } + // + // /// Prepare the message + // var forwardedRPC = RPC() + // forwardedRPC.msgs = msgs.map { $0.message } + // var payload = try! forwardedRPC.serializedData() + // payload = putUVarInt(UInt64(payload.count)) + payload + // + // /// Send the message to each peer subscribed to this topic + // let _ = subscribers.map { (peerID, stream) in + // guard peerID != remotePeer else { return } + // self.logger.info("Forwarding message to subscriber \(peerID)") + // + // let _ = stream.write(payload) + // } + // } + // } + // } + // } + // } + // } + // } + // } + // }.always { result in + // let toc = Int((Date().timeIntervalSince1970 - tic) * 1_000_000) + // self.logger.info("Processed \(rpc.msgs.count) Inbound Messages from Peer \(remotePeer) in \(toc)us") + // } + // } + // This should only handle processing messages... -// internal func processInboundMessageFlatMap2(_ msg:Data, from stream: LibP2P.Stream, request: Request) { -// self.logger.info("Processing Inbound Message Using our FlatMap Method") -// /// This should only ever be an RPC message. -// guard let rpc = try? RPC(serializedData: msg) else { -// self.logger.warning("Failed to decode RPC PubSub Message") -// self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") -// self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") -// return //request.eventLoop.makeSucceededFuture(nil) -// } -// -// guard let remotePeer = stream.connection?.remotePeer else { -// self.logger.warning("Failed to determine message originator (RemotePeer)") -// return //request.eventLoop.makeSucceededFuture(nil) -// } -// -// self.logger.info("Subscription: \(rpc.subscriptions)") -// self.logger.info("Controls: \(rpc.control)") -// self.logger.info("Messages: \(rpc.msgs)") -// -// let tic = Date().timeIntervalSince1970 -// -// /// Forward Messages to Full Peers -// let _ = self.processSubscriptions(rpc, peer: remotePeer).flatMap { _ -> EventLoopFuture in -// /// Process Control Messages -// self.processControlMessages(rpc, peer: remotePeer).flatMap { res -> EventLoopFuture in -// /// If the inbound control messages warrent a response, we'll send an RPC message back to the remotePeer now -// self.replyToControlIfNecessary(res, stream: stream).flatMap { _ -> EventLoopFuture in -// /// Process Messages -// guard !rpc.msgs.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } -// /// Ensure each message conforms to our signature policy, discard any that don't -// return self.ensureSignaturePolicyConformance(rpc.msgs).flatMap { signedMessages -> EventLoopFuture in -// guard !signedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Compute the message ID for each message -// return self.computeMessageIds(signedMessages).flatMap { identifiedMessages -> EventLoopFuture in -// guard !identifiedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Using the computed ID's, discard any messages that we've already seen / encountered -// return self.discardKnownMessages(identifiedMessages).flatMap { newMessages -> EventLoopFuture in -// guard !newMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Store the new / unique messages in our MessaheCache -// return self.storeMessages(newMessages).flatMap { storedMessages -> EventLoopFuture in -// guard !storedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } -// -// /// - TODO: Event sub, possibly remove later... -// self._eventHandler?(.message(remotePeer, storedMessages.map { $0.value })) -// -// /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) -// /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately -// let messagesPerTopic = self.sortMessagesByTopic(storedMessages) -// -// /// Pass the messages onto any SubscriptionHandlers at this point -// for (topic, msgs) in messagesPerTopic { -// if let handler = self.subscriptions[topic] { -// for (_, message) in msgs { -// let _ = handler.on?(.data(message)) -// } -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(topic)`") -// } -// } -// -// /// Forward the messages onto any other subscribers to this topic (excluding the sender) -// return messagesPerTopic.compactMap { (topic, msgs) -> EventLoopFuture in -// /// Note: This method will result in multiple messages being sent to a peer with multiple common subscriptions to us -// /// Example: PeerA and us are both subscribed to topics Food and Fruit, we'll send an RPC message forwarding all Food messages and another RPC message forwarding all Fruit messages -// /// We should try and bundle those messages together into a single RPC message -// self.peerState.peersSubscribedTo2(topic: topic, on: nil).flatMap { subscribers -> EventLoopFuture in -// -// /// Ensure there's subscribers to send the messages to, otherwise bail -// guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } -// -// /// Prepare the message -// var forwardedRPC = RPC() -// forwardedRPC.msgs = msgs.compactMap { $0.message as? RPC.Message } -// var payload = try! forwardedRPC.serializedData() -// payload = putUVarInt(UInt64(payload.count)) + payload -// -// /// Send the message to each peer subscribed to this topic -// return subscribers.compactMap { (peerID, stream) -> EventLoopFuture in -// guard peerID != remotePeer else { return self.eventLoop.makeSucceededVoidFuture() } -// self.logger.info("Forwarding message to subscriber \(peerID)") -// -// return stream.write(payload.bytes) -// }.flatten(on: self.eventLoop) -// } -// -// }.flatten(on: self.eventLoop) -// } -// } -// } -// } -// } -// } -// }.always { result in -// let toc = Int((Date().timeIntervalSince1970 - tic) * 1_000_000) -// self.logger.info("Processed \(rpc.msgs.count) Inbound Messages from Peer \(remotePeer) in \(toc)us") -// } -// } - -// internal override func processInboundMessage(_ msg: Data, from stream: Stream, request: LibP2P.ProtocolRequest) -> EventLoopFuture { -// /// This should only ever be an RPC message. -// guard let rpc = try? RPC(serializedData: msg) else { -// self.logger.warning("Failed to decode RPC PubSub Message") -// self.logger.info("UTF8: \(String(data: request.payload, encoding: .utf8) ?? "Not UTF8")") -// self.logger.info("Hex: \(request.payload.asString(base: .base16))") -// return request.eventLoop.makeSucceededFuture(nil) -// } -// -// guard let remotePeer = stream.connection?.remotePeer else { -// self.logger.warning("Failed to determine message originator (RemotePeer)") -// return request.eventLoop.makeSucceededFuture(nil) -// } -// -// /// Handle the RPC Control Messages (for Floodsub this is only just a list of subscription changes) -// if rpc.subscriptions.count > 0 { -// var subs:[String:Bool] = [:] -// rpc.subscriptions.forEach { -// subs[$0.topicID] = $0.subscribe -// } -// self.logger.info("\(remotePeer)::Subscriptions: \(subs)") -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.subscriptionChange(remotePeer, subs)) -// -// let _ = self.peerState.update(subscriptions: subs, for: remotePeer) -// -// /// Notify our subscription handlers of any relevant peers -// for sub in subs { -// if sub.1 == true, let handler = self.subscriptions[sub.key] { -// self.logger.info("Notifying `\(sub.key)` subscription handler of new subscriber/peer") -// let _ = handler.on?(.newPeer(remotePeer)) -// } -// /// - TODO: Should we alert our subscription handler when a peer unsubscribes from a topic? -// } -// } -// -// /// - TODO: As we itterate over the control messages, we should bundle all the repsonses we need and then send them all out in a single message at the end of this loop. -// /// iWant responses -// /// iHave responses -// /// prune / graft responses -// /// All packaged up in one RPC response... -// /// -// /// We should also make sure we're still subscribed to this topic (cause we can still receive messages for a period of time after unsubscribing) -// if rpc.hasControl { -// let ctrl = rpc.control -// -// /// Itterate over all graft messages and handle/process them accordingly -// for graft in ctrl.graft { -// self.logger.info("RPC::Control - Handling Graft(\(graft.topicID)) Message") -// /// At the momment we only check to see if we're subscribed to the topic -// /// But we should also consider the peers relative distance, our high water mark, the peers metadata (measured latency, etc) -// /// before agreeing to the Graft request -// if self.subscriptions[graft.topicID] != nil { -// /// Check to see if this peer is already a full peer, or in our fanout... -// self.logger.info("Received a Graft Message for a topic `\(graft.topicID)` we're subscribed to! Checking \(remotePeer)'s current peer status") -// let _ = self.peerState.isFullPeer(remotePeer).flatMap { isFullPeer -> EventLoopFuture in -// guard isFullPeer == false else { self.logger.info("Remote peer confirmed our Graft Request"); return self.mainLoop.makeSucceededVoidFuture() } -// self.logger.info("Attempting to upgrade Metadata Peer to Full Peer") -// return self.peerState.makeFullPeer(remotePeer, for: graft.topicID).flatMap { _ -> EventLoopFuture in -// // Lets accept the graft by responding with a graft -// self.logger.info("Accepting the Graft by sending back a Graft Message...") -// -// guard let rpc = try? (RPC.with { rpcMsg in -// rpcMsg.control = RPC.ControlMessage.with { ctrlMsg in -// ctrlMsg.graft = [ -// RPC.ControlGraft.with { grftMsg in -// grftMsg.topicID = graft.topicID -// } -// ] -// } -// }).serializedData() else { self.logger.warning("Failed to construct Graft Response Message"); return self.mainLoop.makeSucceededVoidFuture() } -// return stream.write( putUVarInt(UInt64(rpc.count)) + rpc ) -// } -// } -// -// } else { -// // We're not subscribed to the topic, reject the graft message by sending a prune message -// self.logger.info("Received a Graft Message for a topic `\(graft.topicID)` that we're not subscribed to. Responding with a Prune Message...") -// guard let rpc = try? (RPC.with { rpcMsg in -// rpcMsg.control = RPC.ControlMessage.with { ctrlMsg in -// ctrlMsg.prune = [ -// RPC.ControlPrune.with { pruneMsg in -// pruneMsg.topicID = graft.topicID -// } -// ] -// } -// }).serializedData() else { continue } -// let _ = stream.write( putUVarInt(UInt64(rpc.count)) + rpc ) -// } -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.graft(remotePeer, graft.topicID)) -// } -// -// /// Itterate over all prune messages and handle/process them accordingly -// for prune in ctrl.prune { -// self.logger.info("RPC::Control - Handling Prune(\(prune.topicID)) Message") -// -// let _ = self.peerState.isFullPeer(remotePeer).flatMap { isFullPeer -> EventLoopFuture in -// guard isFullPeer else { return self.mainLoop.makeSucceededVoidFuture() } -// self.logger.info("Pruning Full Peer: \(remotePeer) at their request") -// return self.peerState.makeMetaPeer(remotePeer, for: prune.topicID) -// } -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.prune(remotePeer, prune.topicID)) -// } -// -// for iHave in ctrl.ihave { -// self.logger.info("RPC::Control - Handle IHave(\(iHave.topicID)) Message") -// self.logger.info("\(iHave.messageIds.compactMap { $0.asString(base: .base16) }.joined(separator: ", "))") -// -// let _ = iHave.messageIds.map { msgId in -// self.messageCache.exists(messageID: msgId, on: nil).map { exists -> Data? in -// guard !exists else { return nil } -// return msgId -// } -// }.flatten(on: self.mainLoop).map { msgWeNeed in -// guard msgWeNeed.compactMap({ $0 }).count > 0 else { self.logger.info("All caught up with the gossip!"); return } -// self.logger.info("Messages we need \(msgWeNeed.compactMap { $0?.asString(base: .base16) }.joined(separator: ","))") -// -// //Go ahead an request those messages... -// let rpc = RPC.with { r in -// r.control = RPC.ControlMessage.with { ctrl in -// ctrl.iwant = [RPC.ControlIWant.with { iWant in -// iWant.messageIds = msgWeNeed.compactMap { $0 } -// }] -// } -// } -// -// var payload = try! rpc.serializedData() -// payload = putUVarInt(UInt64(payload.count)) + payload -// -// let _ = stream.write(payload) -// } -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.iHave(remotePeer, iHave.messageIds)) -// } -// -// for iWant in ctrl.iwant { -// self.logger.info("RPC::Control - Handle IWant Message") -// self.logger.info("\(iWant.messageIds.map { "\($0.asString(base: .base16))" }.joined(separator: " ,"))") -// -// let _ = iWant.messageIds.compactMap { msgId in -// self.messageCache.get(messageID: msgId, on: nil) -// }.flatten(on: self.mainLoop).map { messages in -// var rpc = RPC() -// rpc.msgs = messages.compactMap { $0?.data } -// -// var payload = try! rpc.serializedData() -// payload = putUVarInt(UInt64(payload.count)) + payload -// -// self.logger.info("Responding to iWant control message by sending \(rpc.msgs.count)/\(iWant.messageIds.count) of the requested messages to peer \(remotePeer)") -// -// let _ = stream.write(payload) -// } -// -// /// - TODO: Event sub, possibly remove later... -// _eventHandler?(.iWant(remotePeer, iWant.messageIds)) -// } -// } -// -// /// Handle the published messages -// let _ = rpc.msgs.flatMap { message -> EventLoopFuture in -// -// /// Ensure the message conforms to our MessageSignaturePolicy -// guard passesMessageSignaturePolicy(message) else { -// self.logger.warning("Failed signature policy, discarding message") -// return self.mainLoop.makeSucceededVoidFuture() -// } -// -// /// Derive the message id using the overidable messageID function -// guard let messageIDFunc = self.messageIDFunctions[message.topicIds.first!] else { -// self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") -// return self.mainLoop.makeSucceededVoidFuture() -// } -// -// let id = messageIDFunc(message) -// -// self.logger.info("Message ID `\(id.asString(base: .base16))`") -// self.logger.info("\(message.description)") -// -// /// Check to ensure we haven't seen this message already... -// return self.messageCache.exists(messageID: id, on: nil).flatMap { exists -> EventLoopFuture in -// guard exists == false else { self.logger.warning("Dropping Duplicate Message"); return self.mainLoop.makeSucceededVoidFuture() } -// -// /// Validate the unseen message before storing it in our message cache... -// return self.validate(message: message).flatMap { valid -> EventLoopFuture in -// guard valid else { self.logger.warning("Dropping Invalid Message: \(message)"); return self.mainLoop.makeSucceededVoidFuture() } -// -// /// Store the message in our message cache -// self.logger.info("Storing Message: \(id.asString(base: .base16))"); -// /// - Note: We can run into issues where we end up saving duplicate messages cause when we check for existance they haven't been saved yet, and by the time we get around to saving them, theirs multiple copies ready to be stored. -// /// We temporarily added the `valid` flag to the `put` method to double check existance of a message before forwarding it and alerting our handler. -// return self.messageCache.put(messageID: id, message: (topic: message.topicIds.first!, data: message), on: nil).flatMap { valid in -// guard valid else { self.logger.warning("Encountered Duplicate Message While Attempting To Store In Message Cache"); return self.mainLoop.makeSucceededVoidFuture() } -// -// /// Should we pass the message onto any SubscriptionHandlers at this point? -// if let handler = self.subscriptions[message.topicIds.first!] { -// self.logger.info("Forwarding message to handler: ID:\(id.asString(base: .base16))") -// let _ = handler.on?(.data(message)) -// } else { -// self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") -// } -// -// /// - TODO: Event sub, possibly remove later... -// self._eventHandler?(.message(remotePeer, [message])) -// -// /// Forward the message onto any other subscribers to this topic (excluding the sender) -// return self.peerState.peersSubscribedTo2(topic: message.topicIds.first!, on: nil).flatMap { subscribers -> EventLoopFuture in -// -// guard subscribers.count > 0 else { return self.mainLoop.makeSucceededVoidFuture() } -// -// var forwardedRPC = RPC() -// forwardedRPC.msgs = [message] -// let payload = try! forwardedRPC.serializedData() -// -// return subscribers.map { (peerID, stream) in -// guard peerID != remotePeer else { return self.mainLoop.makeSucceededVoidFuture() } -// self.logger.info("Forwarding message to subscriber \(peerID)") -// -// return stream.write(putUVarInt(UInt64(payload.count)) + payload) -// }.flatten(on: self.mainLoop) -// } -// } -// } -// } -// } -// -// /// Return our response if we have one... -// return self.mainLoop.makeSucceededFuture(nil) -// } - -// private func replyToControlIfNecessary2(_ res:(graftRejections:[RPC.ControlPrune], iWantResponses:[RPC.Message], iWant:RPC.ControlIWant?), stream:LibP2P.Stream) -> EventLoopFuture { -// if !res.graftRejections.isEmpty || !res.iWantResponses.isEmpty || res.iWant != nil { -// self.logger.info("We have \(res.graftRejections.count) graft rejection messages") -// self.logger.info("We have \(res.iWantResponses.count) messages to send in response to iWant requests") -// self.logger.info("We have \(res.iWant?.messageIds.count ?? 0) iWants in response to the iHaves we received") -// -// /// We need to respond to the sender with an RPC message -// var rpc = RPC() -// rpc.msgs = res.iWantResponses -// rpc.control = RPC.ControlMessage.with { ctrl in -// ctrl.iwant = res.iWant == nil ? [] : [res.iWant!] -// ctrl.prune = res.graftRejections -// } -// -// var payload = try! rpc.serializedData() -// payload = putUVarInt(UInt64(payload.count)) + payload -// -// /// Respond to the remote peer -// self.logger.info("Responding to Control Message") -// let _ = stream.write(payload.bytes) -// -// } else { -// self.logger.info("No Control Response Necessary") -// } -// return self.eventLoop.makeSucceededVoidFuture() -// } - + // internal func processInboundMessageFlatMap2(_ msg:Data, from stream: LibP2P.Stream, request: Request) { + // self.logger.info("Processing Inbound Message Using our FlatMap Method") + // /// This should only ever be an RPC message. + // guard let rpc = try? RPC(serializedData: msg) else { + // self.logger.warning("Failed to decode RPC PubSub Message") + // self.logger.info("UTF8: \(String(data: Data(request.payload.readableBytesView), encoding: .utf8) ?? "Not UTF8")") + // self.logger.info("Hex: \(Data(request.payload.readableBytesView).asString(base: .base16))") + // return //request.eventLoop.makeSucceededFuture(nil) + // } + // + // guard let remotePeer = stream.connection?.remotePeer else { + // self.logger.warning("Failed to determine message originator (RemotePeer)") + // return //request.eventLoop.makeSucceededFuture(nil) + // } + // + // self.logger.info("Subscription: \(rpc.subscriptions)") + // self.logger.info("Controls: \(rpc.control)") + // self.logger.info("Messages: \(rpc.msgs)") + // + // let tic = Date().timeIntervalSince1970 + // + // /// Forward Messages to Full Peers + // let _ = self.processSubscriptions(rpc, peer: remotePeer).flatMap { _ -> EventLoopFuture in + // /// Process Control Messages + // self.processControlMessages(rpc, peer: remotePeer).flatMap { res -> EventLoopFuture in + // /// If the inbound control messages warrent a response, we'll send an RPC message back to the remotePeer now + // self.replyToControlIfNecessary(res, stream: stream).flatMap { _ -> EventLoopFuture in + // /// Process Messages + // guard !rpc.msgs.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } + // /// Ensure each message conforms to our signature policy, discard any that don't + // return self.ensureSignaturePolicyConformance(rpc.msgs).flatMap { signedMessages -> EventLoopFuture in + // guard !signedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Compute the message ID for each message + // return self.computeMessageIds(signedMessages).flatMap { identifiedMessages -> EventLoopFuture in + // guard !identifiedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Using the computed ID's, discard any messages that we've already seen / encountered + // return self.discardKnownMessages(identifiedMessages).flatMap { newMessages -> EventLoopFuture in + // guard !newMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Store the new / unique messages in our MessaheCache + // return self.storeMessages(newMessages).flatMap { storedMessages -> EventLoopFuture in + // guard !storedMessages.isEmpty else { return self.eventLoop.makeSucceededVoidFuture() } + // + // /// - TODO: Event sub, possibly remove later... + // self._eventHandler?(.message(remotePeer, storedMessages.map { $0.value })) + // + // /// Sort the messages based on topic (if a message contains multiple topic ids, this will duplicate the message for each topic) + // /// Example message "🍍" has topicIds "food" and "fruit", the message "🍍" will appear twice in the dictionary below. Allowing us to notify both the Food and Fruit Subscription handlers seperately + // let messagesPerTopic = self.sortMessagesByTopic(storedMessages) + // + // /// Pass the messages onto any SubscriptionHandlers at this point + // for (topic, msgs) in messagesPerTopic { + // if let handler = self.subscriptions[topic] { + // for (_, message) in msgs { + // let _ = handler.on?(.data(message)) + // } + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(topic)`") + // } + // } + // + // /// Forward the messages onto any other subscribers to this topic (excluding the sender) + // return messagesPerTopic.compactMap { (topic, msgs) -> EventLoopFuture in + // /// Note: This method will result in multiple messages being sent to a peer with multiple common subscriptions to us + // /// Example: PeerA and us are both subscribed to topics Food and Fruit, we'll send an RPC message forwarding all Food messages and another RPC message forwarding all Fruit messages + // /// We should try and bundle those messages together into a single RPC message + // self.peerState.peersSubscribedTo2(topic: topic, on: nil).flatMap { subscribers -> EventLoopFuture in + // + // /// Ensure there's subscribers to send the messages to, otherwise bail + // guard subscribers.count > 0 else { return self.eventLoop.makeSucceededVoidFuture() } + // + // /// Prepare the message + // var forwardedRPC = RPC() + // forwardedRPC.msgs = msgs.compactMap { $0.message as? RPC.Message } + // var payload = try! forwardedRPC.serializedData() + // payload = putUVarInt(UInt64(payload.count)) + payload + // + // /// Send the message to each peer subscribed to this topic + // return subscribers.compactMap { (peerID, stream) -> EventLoopFuture in + // guard peerID != remotePeer else { return self.eventLoop.makeSucceededVoidFuture() } + // self.logger.info("Forwarding message to subscriber \(peerID)") + // + // return stream.write(payload.bytes) + // }.flatten(on: self.eventLoop) + // } + // + // }.flatten(on: self.eventLoop) + // } + // } + // } + // } + // } + // } + // }.always { result in + // let toc = Int((Date().timeIntervalSince1970 - tic) * 1_000_000) + // self.logger.info("Processed \(rpc.msgs.count) Inbound Messages from Peer \(remotePeer) in \(toc)us") + // } + // } + + // internal override func processInboundMessage(_ msg: Data, from stream: Stream, request: LibP2P.ProtocolRequest) -> EventLoopFuture { + // /// This should only ever be an RPC message. + // guard let rpc = try? RPC(serializedData: msg) else { + // self.logger.warning("Failed to decode RPC PubSub Message") + // self.logger.info("UTF8: \(String(data: request.payload, encoding: .utf8) ?? "Not UTF8")") + // self.logger.info("Hex: \(request.payload.asString(base: .base16))") + // return request.eventLoop.makeSucceededFuture(nil) + // } + // + // guard let remotePeer = stream.connection?.remotePeer else { + // self.logger.warning("Failed to determine message originator (RemotePeer)") + // return request.eventLoop.makeSucceededFuture(nil) + // } + // + // /// Handle the RPC Control Messages (for Floodsub this is only just a list of subscription changes) + // if rpc.subscriptions.count > 0 { + // var subs:[String:Bool] = [:] + // rpc.subscriptions.forEach { + // subs[$0.topicID] = $0.subscribe + // } + // self.logger.info("\(remotePeer)::Subscriptions: \(subs)") + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.subscriptionChange(remotePeer, subs)) + // + // let _ = self.peerState.update(subscriptions: subs, for: remotePeer) + // + // /// Notify our subscription handlers of any relevant peers + // for sub in subs { + // if sub.1 == true, let handler = self.subscriptions[sub.key] { + // self.logger.info("Notifying `\(sub.key)` subscription handler of new subscriber/peer") + // let _ = handler.on?(.newPeer(remotePeer)) + // } + // /// - TODO: Should we alert our subscription handler when a peer unsubscribes from a topic? + // } + // } + // + // /// - TODO: As we itterate over the control messages, we should bundle all the repsonses we need and then send them all out in a single message at the end of this loop. + // /// iWant responses + // /// iHave responses + // /// prune / graft responses + // /// All packaged up in one RPC response... + // /// + // /// We should also make sure we're still subscribed to this topic (cause we can still receive messages for a period of time after unsubscribing) + // if rpc.hasControl { + // let ctrl = rpc.control + // + // /// Itterate over all graft messages and handle/process them accordingly + // for graft in ctrl.graft { + // self.logger.info("RPC::Control - Handling Graft(\(graft.topicID)) Message") + // /// At the momment we only check to see if we're subscribed to the topic + // /// But we should also consider the peers relative distance, our high water mark, the peers metadata (measured latency, etc) + // /// before agreeing to the Graft request + // if self.subscriptions[graft.topicID] != nil { + // /// Check to see if this peer is already a full peer, or in our fanout... + // self.logger.info("Received a Graft Message for a topic `\(graft.topicID)` we're subscribed to! Checking \(remotePeer)'s current peer status") + // let _ = self.peerState.isFullPeer(remotePeer).flatMap { isFullPeer -> EventLoopFuture in + // guard isFullPeer == false else { self.logger.info("Remote peer confirmed our Graft Request"); return self.mainLoop.makeSucceededVoidFuture() } + // self.logger.info("Attempting to upgrade Metadata Peer to Full Peer") + // return self.peerState.makeFullPeer(remotePeer, for: graft.topicID).flatMap { _ -> EventLoopFuture in + // // Lets accept the graft by responding with a graft + // self.logger.info("Accepting the Graft by sending back a Graft Message...") + // + // guard let rpc = try? (RPC.with { rpcMsg in + // rpcMsg.control = RPC.ControlMessage.with { ctrlMsg in + // ctrlMsg.graft = [ + // RPC.ControlGraft.with { grftMsg in + // grftMsg.topicID = graft.topicID + // } + // ] + // } + // }).serializedData() else { self.logger.warning("Failed to construct Graft Response Message"); return self.mainLoop.makeSucceededVoidFuture() } + // return stream.write( putUVarInt(UInt64(rpc.count)) + rpc ) + // } + // } + // + // } else { + // // We're not subscribed to the topic, reject the graft message by sending a prune message + // self.logger.info("Received a Graft Message for a topic `\(graft.topicID)` that we're not subscribed to. Responding with a Prune Message...") + // guard let rpc = try? (RPC.with { rpcMsg in + // rpcMsg.control = RPC.ControlMessage.with { ctrlMsg in + // ctrlMsg.prune = [ + // RPC.ControlPrune.with { pruneMsg in + // pruneMsg.topicID = graft.topicID + // } + // ] + // } + // }).serializedData() else { continue } + // let _ = stream.write( putUVarInt(UInt64(rpc.count)) + rpc ) + // } + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.graft(remotePeer, graft.topicID)) + // } + // + // /// Itterate over all prune messages and handle/process them accordingly + // for prune in ctrl.prune { + // self.logger.info("RPC::Control - Handling Prune(\(prune.topicID)) Message") + // + // let _ = self.peerState.isFullPeer(remotePeer).flatMap { isFullPeer -> EventLoopFuture in + // guard isFullPeer else { return self.mainLoop.makeSucceededVoidFuture() } + // self.logger.info("Pruning Full Peer: \(remotePeer) at their request") + // return self.peerState.makeMetaPeer(remotePeer, for: prune.topicID) + // } + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.prune(remotePeer, prune.topicID)) + // } + // + // for iHave in ctrl.ihave { + // self.logger.info("RPC::Control - Handle IHave(\(iHave.topicID)) Message") + // self.logger.info("\(iHave.messageIds.compactMap { $0.asString(base: .base16) }.joined(separator: ", "))") + // + // let _ = iHave.messageIds.map { msgId in + // self.messageCache.exists(messageID: msgId, on: nil).map { exists -> Data? in + // guard !exists else { return nil } + // return msgId + // } + // }.flatten(on: self.mainLoop).map { msgWeNeed in + // guard msgWeNeed.compactMap({ $0 }).count > 0 else { self.logger.info("All caught up with the gossip!"); return } + // self.logger.info("Messages we need \(msgWeNeed.compactMap { $0?.asString(base: .base16) }.joined(separator: ","))") + // + // //Go ahead an request those messages... + // let rpc = RPC.with { r in + // r.control = RPC.ControlMessage.with { ctrl in + // ctrl.iwant = [RPC.ControlIWant.with { iWant in + // iWant.messageIds = msgWeNeed.compactMap { $0 } + // }] + // } + // } + // + // var payload = try! rpc.serializedData() + // payload = putUVarInt(UInt64(payload.count)) + payload + // + // let _ = stream.write(payload) + // } + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.iHave(remotePeer, iHave.messageIds)) + // } + // + // for iWant in ctrl.iwant { + // self.logger.info("RPC::Control - Handle IWant Message") + // self.logger.info("\(iWant.messageIds.map { "\($0.asString(base: .base16))" }.joined(separator: " ,"))") + // + // let _ = iWant.messageIds.compactMap { msgId in + // self.messageCache.get(messageID: msgId, on: nil) + // }.flatten(on: self.mainLoop).map { messages in + // var rpc = RPC() + // rpc.msgs = messages.compactMap { $0?.data } + // + // var payload = try! rpc.serializedData() + // payload = putUVarInt(UInt64(payload.count)) + payload + // + // self.logger.info("Responding to iWant control message by sending \(rpc.msgs.count)/\(iWant.messageIds.count) of the requested messages to peer \(remotePeer)") + // + // let _ = stream.write(payload) + // } + // + // /// - TODO: Event sub, possibly remove later... + // _eventHandler?(.iWant(remotePeer, iWant.messageIds)) + // } + // } + // + // /// Handle the published messages + // let _ = rpc.msgs.flatMap { message -> EventLoopFuture in + // + // /// Ensure the message conforms to our MessageSignaturePolicy + // guard passesMessageSignaturePolicy(message) else { + // self.logger.warning("Failed signature policy, discarding message") + // return self.mainLoop.makeSucceededVoidFuture() + // } + // + // /// Derive the message id using the overidable messageID function + // guard let messageIDFunc = self.messageIDFunctions[message.topicIds.first!] else { + // self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") + // return self.mainLoop.makeSucceededVoidFuture() + // } + // + // let id = messageIDFunc(message) + // + // self.logger.info("Message ID `\(id.asString(base: .base16))`") + // self.logger.info("\(message.description)") + // + // /// Check to ensure we haven't seen this message already... + // return self.messageCache.exists(messageID: id, on: nil).flatMap { exists -> EventLoopFuture in + // guard exists == false else { self.logger.warning("Dropping Duplicate Message"); return self.mainLoop.makeSucceededVoidFuture() } + // + // /// Validate the unseen message before storing it in our message cache... + // return self.validate(message: message).flatMap { valid -> EventLoopFuture in + // guard valid else { self.logger.warning("Dropping Invalid Message: \(message)"); return self.mainLoop.makeSucceededVoidFuture() } + // + // /// Store the message in our message cache + // self.logger.info("Storing Message: \(id.asString(base: .base16))"); + // /// - Note: We can run into issues where we end up saving duplicate messages cause when we check for existance they haven't been saved yet, and by the time we get around to saving them, theirs multiple copies ready to be stored. + // /// We temporarily added the `valid` flag to the `put` method to double check existance of a message before forwarding it and alerting our handler. + // return self.messageCache.put(messageID: id, message: (topic: message.topicIds.first!, data: message), on: nil).flatMap { valid in + // guard valid else { self.logger.warning("Encountered Duplicate Message While Attempting To Store In Message Cache"); return self.mainLoop.makeSucceededVoidFuture() } + // + // /// Should we pass the message onto any SubscriptionHandlers at this point? + // if let handler = self.subscriptions[message.topicIds.first!] { + // self.logger.info("Forwarding message to handler: ID:\(id.asString(base: .base16))") + // let _ = handler.on?(.data(message)) + // } else { + // self.logger.warning("No Subscription Handler for topic:`\(message.topicIds.first!)`") + // } + // + // /// - TODO: Event sub, possibly remove later... + // self._eventHandler?(.message(remotePeer, [message])) + // + // /// Forward the message onto any other subscribers to this topic (excluding the sender) + // return self.peerState.peersSubscribedTo2(topic: message.topicIds.first!, on: nil).flatMap { subscribers -> EventLoopFuture in + // + // guard subscribers.count > 0 else { return self.mainLoop.makeSucceededVoidFuture() } + // + // var forwardedRPC = RPC() + // forwardedRPC.msgs = [message] + // let payload = try! forwardedRPC.serializedData() + // + // return subscribers.map { (peerID, stream) in + // guard peerID != remotePeer else { return self.mainLoop.makeSucceededVoidFuture() } + // self.logger.info("Forwarding message to subscriber \(peerID)") + // + // return stream.write(putUVarInt(UInt64(payload.count)) + payload) + // }.flatten(on: self.mainLoop) + // } + // } + // } + // } + // } + // + // /// Return our response if we have one... + // return self.mainLoop.makeSucceededFuture(nil) + // } + + // private func replyToControlIfNecessary2(_ res:(graftRejections:[RPC.ControlPrune], iWantResponses:[RPC.Message], iWant:RPC.ControlIWant?), stream:LibP2P.Stream) -> EventLoopFuture { + // if !res.graftRejections.isEmpty || !res.iWantResponses.isEmpty || res.iWant != nil { + // self.logger.info("We have \(res.graftRejections.count) graft rejection messages") + // self.logger.info("We have \(res.iWantResponses.count) messages to send in response to iWant requests") + // self.logger.info("We have \(res.iWant?.messageIds.count ?? 0) iWants in response to the iHaves we received") + // + // /// We need to respond to the sender with an RPC message + // var rpc = RPC() + // rpc.msgs = res.iWantResponses + // rpc.control = RPC.ControlMessage.with { ctrl in + // ctrl.iwant = res.iWant == nil ? [] : [res.iWant!] + // ctrl.prune = res.graftRejections + // } + // + // var payload = try! rpc.serializedData() + // payload = putUVarInt(UInt64(payload.count)) + payload + // + // /// Respond to the remote peer + // self.logger.info("Responding to Control Message") + // let _ = stream.write(payload.bytes) + // + // } else { + // self.logger.info("No Control Response Necessary") + // } + // return self.eventLoop.makeSucceededVoidFuture() + // } + /// Given an array of `RPC.Message`s, this method will ensure each message conforms to our SignaturePolicy, dropping/discarding the messages that don't -// private func ensureSignaturePolicyConformance(_ messages:[RPC.Message]) -> EventLoopFuture<[RPC.Message]> { -// self.eventLoop.submit { -// messages.filter { self.passesMessageSignaturePolicy($0) } -// } -// } - + // private func ensureSignaturePolicyConformance(_ messages:[RPC.Message]) -> EventLoopFuture<[RPC.Message]> { + // self.eventLoop.submit { + // messages.filter { self.passesMessageSignaturePolicy($0) } + // } + // } + /// Given an array of `RPC.Message`s, this method will compute the Message ID for each message (or drop the message if it's invalid) and returns an `[ID:RPC.Message]` dictionary -// private func computeMessageIds(_ messages:[RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { -// self.eventLoop.submit { -// var msgs:[Data:RPC.Message] = [:] -// messages.forEach { message in -// /// Ensure the message has a topic and that we have a messageIDFunc registered for that topic -// guard let firstTopic = message.topicIds.first, let messageIDFunc = self.messageIDFunctions[firstTopic] else { -// self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") -// return -// } -// /// Compute the message id and insert it into our dictionary -// msgs[Data(messageIDFunc(message))] = message -// } -// return msgs -// } -// } -// + // private func computeMessageIds(_ messages:[RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { + // self.eventLoop.submit { + // var msgs:[Data:RPC.Message] = [:] + // messages.forEach { message in + // /// Ensure the message has a topic and that we have a messageIDFunc registered for that topic + // guard let firstTopic = message.topicIds.first, let messageIDFunc = self.messageIDFunctions[firstTopic] else { + // self.logger.warning("No MessageIDFunction defined for topic '\(message.topicIds.first!)'. Dropping Message.") + // return + // } + // /// Compute the message id and insert it into our dictionary + // msgs[Data(messageIDFunc(message))] = message + // } + // return msgs + // } + // } + // /// Given a dictionary of Messages and their IDs, this method will discard any messages that are already present in our message cache, returning a dictionary of new and unique messages -// private func discardKnownMessages(_ messages:[Data:RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { -// let ids = messages.keys.map { $0 } -// return (self.messageCache as! MCache).filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { unknownIds -> [Data:RPC.Message] in -// var newMessages:[Data:RPC.Message] = [:] -// unknownIds.forEach { newMessages[$0] = messages[$0] } -// return newMessages -// } -// } - + // private func discardKnownMessages(_ messages:[Data:RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { + // let ids = messages.keys.map { $0 } + // return (self.messageCache as! MCache).filter(ids: Set(ids), returningOnly: .unknown, on: self.eventLoop).map { unknownIds -> [Data:RPC.Message] in + // var newMessages:[Data:RPC.Message] = [:] + // unknownIds.forEach { newMessages[$0] = messages[$0] } + // return newMessages + // } + // } + /// Given a dictionary of Messages, this method will validate each message using the appropriate validation function, and silently discard any messages that fail to validate for any reason. Returns a dictionary of Valid RPC.Messages indexed by their ID -// private func validateMessages(_ messages:[Data:RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { -// var validMessages:[Data:RPC.Message] = [:] -// return messages.map { message in -// self.validate(message: message.value, on: self.eventLoop).map { valid in -// validMessages[message.key] = message.value -// } -// }.flatten(on: self.eventLoop).map { -// return validMessages -// } -// } - -// private func storeMessages(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { -// self.messageCache.put(messages: messages, on: self.eventLoop) -// } - -// private func sortMessagesByTopic(_ messages:[Data:RPC.Message]) -> [String:[(id:Data, message:RPC.Message)]] { -// var messagesByTopic:[String:[(Data, RPC.Message)]] = [:] -// for message in messages { -// for topic in message.value.topicIds { -// if messagesByTopic[topic] == nil { messagesByTopic[topic] = [] } -// messagesByTopic[topic]?.append( (message.key, message.value) ) -// } -// } -// return messagesByTopic -// } -} + // private func validateMessages(_ messages:[Data:RPC.Message]) -> EventLoopFuture<[Data:RPC.Message]> { + // var validMessages:[Data:RPC.Message] = [:] + // return messages.map { message in + // self.validate(message: message.value, on: self.eventLoop).map { valid in + // validMessages[message.key] = message.value + // } + // }.flatten(on: self.eventLoop).map { + // return validMessages + // } + // } + // private func storeMessages(_ messages:[Data:PubSubMessage]) -> EventLoopFuture<[Data:PubSubMessage]> { + // self.messageCache.put(messages: messages, on: self.eventLoop) + // } + // private func sortMessagesByTopic(_ messages:[Data:RPC.Message]) -> [String:[(id:Data, message:RPC.Message)]] { + // var messagesByTopic:[String:[(Data, RPC.Message)]] = [:] + // for message in messages { + // for topic in message.value.topicIds { + // if messagesByTopic[topic] == nil { messagesByTopic[topic] = [] } + // messagesByTopic[topic]?.append( (message.key, message.value) ) + // } + // } + // return messagesByTopic + // } +} diff --git a/Sources/LibP2PPubSub/Routers/Gossipsub/MessageStore/MessageCache.swift b/Sources/LibP2PPubSub/Routers/Gossipsub/MessageStore/MessageCache.swift index cc63f21..84d577c 100644 --- a/Sources/LibP2PPubSub/Routers/Gossipsub/MessageStore/MessageCache.swift +++ b/Sources/LibP2PPubSub/Routers/Gossipsub/MessageStore/MessageCache.swift @@ -1,13 +1,19 @@ +//===----------------------------------------------------------------------===// // -// MessageState.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P - /// MessageCache creates a sliding window cache that remembers messages for as /// long as `history` slots. /// @@ -20,66 +26,69 @@ import LibP2P /// The slack between `gossip` and `history` accounts for the reaction time /// between when a message is advertised via IHAVE gossip, and the peer pulls it /// via an IWANT command. -class MessageCache:MessageStateProtocol { +class MessageCache: MessageStateProtocol { typealias MessageID = Data - typealias Message = (topic:String, data:PubSubMessage) - typealias HistoryWindow = [MessageID:Message] - + typealias Message = (topic: String, data: PubSubMessage) + typealias HistoryWindow = [MessageID: Message] + /// The eventloop that this Message Cache is constrained to - internal let eventLoop:EventLoop + internal let eventLoop: EventLoop /// The Cache - var windows:[HistoryWindow] + var windows: [HistoryWindow] /// Our Logger - var logger:Logger + var logger: Logger /// Our State var state: ServiceLifecycleState - + /// The number of history windows to keep /// - Alias: mcache_len - private let cacheLength:Int - + private let cacheLength: Int + /// The number of windows to examine when sending gossip /// - Alias: mcache_gossip - private let gossipLength:Int - - required init(eventLoop:EventLoop, historyWindows:Int = 3, gossipWindows:Int = 2) { - precondition(historyWindows > gossipWindows, "Invalid parameters for message cache. GossipWindows [\(gossipWindows)] cannot be larger than historyWindows [\(historyWindows)]") + private let gossipLength: Int + + required init(eventLoop: EventLoop, historyWindows: Int = 3, gossipWindows: Int = 2) { + precondition( + historyWindows > gossipWindows, + "Invalid parameters for message cache. GossipWindows [\(gossipWindows)] cannot be larger than historyWindows [\(historyWindows)]" + ) print("PubSub::MessageChache Instantiated...") self.windows = [] self.eventLoop = eventLoop self.cacheLength = historyWindows self.gossipLength = gossipWindows self.logger = Logger(label: "com.swift.libp2p.pubsub.mcache[\(UUID().uuidString.prefix(5))]") - self.logger.logLevel = .info //LOG_LEVEL + self.logger.logLevel = .info //LOG_LEVEL self.state = .stopped - + /// Initialize our cache windows let _ = self.shift() } - + func start() throws { guard self.state == .stopped else { throw BasePubSub.Errors.alreadyRunning } self.logger.info("Starting") - + // Do stuff here, maybe re init our caches?? - + self.state = .started } - + func stop() throws { guard self.state == .started || self.state == .starting else { throw BasePubSub.Errors.alreadyStopped } if self.state == .stopping { self.logger.info("Force Quiting!") } self.logger.info("Stopping") - + // Do stuff here, maybe clear our caches?? - + self.state = .stopped } - + /// Adds a message to the current window and the cache - func put(messageID:MessageID, message:Message, on loop:EventLoop? = nil) -> EventLoopFuture { + func put(messageID: MessageID, message: Message, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in /// blindly overwrites any existing entries with the specified messageID if self.windows.isEmpty { self.windows[0] = HistoryWindow() } @@ -91,13 +100,13 @@ class MessageCache:MessageStateProtocol { } }.hop(to: loop ?? eventLoop) } - + /// Given a dictionary of messages to store, this method will attempt to add each one and return a dictionary of the added messages. - func put(messages:[Data:PubSubMessage], on loop:EventLoop? = nil) -> EventLoopFuture<[Data:PubSubMessage]> { - eventLoop.submit { () -> [Data:PubSubMessage] in + func put(messages: [Data: PubSubMessage], on loop: EventLoop? = nil) -> EventLoopFuture<[Data: PubSubMessage]> { + eventLoop.submit { () -> [Data: PubSubMessage] in /// blindly overwrites any existing entries with the specified messageID if self.windows.isEmpty { self.windows[0] = HistoryWindow() } - var added:[Data:PubSubMessage] = [:] + var added: [Data: PubSubMessage] = [:] for message in messages { guard let topic = message.value.topicIds.first else { continue } if self.windows[0][message.key] == nil { @@ -108,9 +117,9 @@ class MessageCache:MessageStateProtocol { return added }.hop(to: loop ?? eventLoop) } - - private func _get(messageID:MessageID) -> Message? { - var msg:Message? = nil + + private func _get(messageID: MessageID) -> Message? { + var msg: Message? = nil for window in self.windows { if let message = window[messageID] { msg = message @@ -120,9 +129,9 @@ class MessageCache:MessageStateProtocol { return msg } - private func _exists(messageID:MessageID, fullOnly:Bool = false) -> Bool { - var exists:Bool = false - + private func _exists(messageID: MessageID, fullOnly: Bool = false) -> Bool { + var exists: Bool = false + for window in self.windows.prefix(fullOnly ? cacheLength : windows.count) { if window[messageID] != nil { exists = true @@ -132,46 +141,48 @@ class MessageCache:MessageStateProtocol { return exists } - + /// Retrieves a message from the cache by its ID, if it is still present. - func get(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + func get(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Message? in self._get(messageID: messageID) }.hop(to: loop ?? eventLoop) } - + /// Retrieves a message from the cache by its ID, if it is still present. - func get(messageIDs:Set, on loop:EventLoop? = nil) -> EventLoopFuture<[Message]> { + func get(messageIDs: Set, on loop: EventLoop? = nil) -> EventLoopFuture<[Message]> { eventLoop.submit { () -> [Message] in messageIDs.compactMap { self._get(messageID: $0) } }.hop(to: loop ?? eventLoop) } - - func exists(messageID:MessageID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func exists(messageID: MessageID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in self._exists(messageID: messageID) }.hop(to: loop ?? eventLoop) } - + /// Retrieves the message IDs for messages in the most recent history windows, scoped to a given topic. /// - Note: The number of windows to examine is controlled by the gossipLength parameter - func getGossipIDs(topic:String, on loop:EventLoop? = nil) -> EventLoopFuture<[MessageID]> { + func getGossipIDs(topic: String, on loop: EventLoop? = nil) -> EventLoopFuture<[MessageID]> { eventLoop.submit { () -> [MessageID] in - var ids:[MessageID] = [] + var ids: [MessageID] = [] for (idx, window) in self.windows.enumerated() { guard idx < self.gossipLength else { break } - - ids.append(contentsOf: window.filter({ message in - message.value.topic == topic - }).map { $0.key } ) + + ids.append( + contentsOf: window.filter({ message in + message.value.topic == topic + }).map { $0.key } + ) } - + return ids }.hop(to: loop ?? eventLoop) } - + /// BasePubSub Calls this method every X (usually 1) seconds, we take the opportunity to shift our Message Cache - var runningHeartbeatCounter:UInt64 = 0 + var runningHeartbeatCounter: UInt64 = 0 func heartbeat() -> EventLoopFuture { self.eventLoop.submit { /// Every 30 seconds we shift our message store @@ -180,16 +191,16 @@ class MessageCache:MessageStateProtocol { self.logger.trace("Shifting Message Cache Window") self.shift() } - + /// Increment our heartbeat counter... self.runningHeartbeatCounter += 1 } } - + /// Shifts the current window, discarding messages older than the history length of the cache (mcache_len) /// - Warning: Ensure that this method is only called once per heartbeat interval (otherwise we'll drop message before they expire) @discardableResult - func shift(on loop:EventLoop? = nil) -> EventLoopFuture { + func shift(on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Void in /// Remove all windows that are older than our cacheLength while self.windows.count >= self.cacheLength { @@ -199,12 +210,16 @@ class MessageCache:MessageStateProtocol { self.windows.insert(HistoryWindow(), at: 0) }.hop(to: loop ?? eventLoop) } - + /// Given an array of message ids, this method will filter them using the specified filter and return the ID's that satisfy the filter... /// Example: .known -> returns only those message id's that we have in our cache /// Example: .unknown -> returns only those message id's that we haven't seen / encountered lately /// Example: .full -> returns only those message id's for which we have the full message contents - func filter(ids:Set, returningOnly filter:PubSub.MessageState.FilterType, on loop:EventLoop? = nil) -> EventLoopFuture<[Data]> { + func filter( + ids: Set, + returningOnly filter: PubSub.MessageState.FilterType, + on loop: EventLoop? = nil + ) -> EventLoopFuture<[Data]> { eventLoop.submit { () -> [Data] in switch filter { case .known: diff --git a/Sources/LibP2PPubSub/Routers/Gossipsub/PeerStore/PeeringState.swift b/Sources/LibP2PPubSub/Routers/Gossipsub/PeerStore/PeeringState.swift index fe2fb35..8baa04c 100644 --- a/Sources/LibP2PPubSub/Routers/Gossipsub/PeerStore/PeeringState.swift +++ b/Sources/LibP2PPubSub/Routers/Gossipsub/PeerStore/PeeringState.swift @@ -1,9 +1,16 @@ +//===----------------------------------------------------------------------===// // -// PeeringState.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P @@ -24,60 +31,60 @@ import LibP2P /// // will loop through peers and update the subscriptions in either fanout or mesh /// } /// -class PeeringState:PeerStateProtocol { - +class PeeringState: PeerStateProtocol { + typealias Topic = String typealias PID = String - + var state: ServiceLifecycleState - + /// A set of ids of all known peers that support gossipsub or floodsub. - var peers:[PID:PubSub.Subscriber] + var peers: [PID: PubSub.Subscriber] /// A map of subscribed topics to the set of peers in our overlay mesh for that topic. - var mesh:[Topic:[PID]] + var mesh: [Topic: [PID]] /// Like mesh, fanout is a map of topics to a set of peers, however, the fanout map contains topics to which we are NOT subscribed. - var fanout:[Topic:[PID]] - + var fanout: [Topic: [PID]] + /// The eventloop that this PeeringState is constrained to - internal let eventLoop:EventLoop + internal let eventLoop: EventLoop /// Our Logger - private var logger:Logger - - required init(eventLoop:EventLoop) { + private var logger: Logger + + required init(eventLoop: EventLoop) { print("PubSub::PeeringState Instantiated...") self.eventLoop = eventLoop self.logger = Logger(label: "com.swift.libp2p.pubsub.pstate[\(UUID().uuidString.prefix(5))]") - self.logger.logLevel = .trace // LOG_LEVEL + self.logger.logLevel = .trace // LOG_LEVEL self.state = .stopped - + /// Initialize our caches self.peers = [:] self.mesh = [:] self.fanout = [:] } - + func start() throws { guard self.state == .stopped else { throw BasePubSub.Errors.alreadyRunning } self.logger.info("Starting") - + // Do stuff here, maybe re init our caches?? - + self.state = .started } - + func stop() throws { guard self.state == .started || self.state == .starting else { throw BasePubSub.Errors.alreadyStopped } if self.state == .stopping { self.logger.info("Force Quiting!") } self.logger.info("Stopping") - + // Do stuff here, maybe clear our caches?? - + self.state = .stopped } - - func onPeerConnected(peerID peer: PeerID, stream:LibP2PCore.Stream) -> EventLoopFuture { + + func onPeerConnected(peerID peer: PeerID, stream: LibP2PCore.Stream) -> EventLoopFuture { eventLoop.submit { if self.peers[peer.b58String] == nil { switch stream.direction { @@ -94,84 +101,94 @@ class PeeringState:PeerStateProtocol { case .outbound: self.peers[peer.b58String]?.attachOutbound(stream: stream) } - self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + self.logger.warning( + "Received a peer connected event for a peer that was already present in our PeeringState" + ) } } } - - func attachInboundStream(_ peerID: PeerID, inboundStream: LibP2PCore.Stream, on loop:EventLoop? = nil) -> EventLoopFuture { + + func attachInboundStream( + _ peerID: PeerID, + inboundStream: LibP2PCore.Stream, + on loop: EventLoop? = nil + ) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String, default: .init(id: peerID)].attachInbound(stream: inboundStream) -// if self.peers[peerID.b58String] == nil { -// //Add the new peer to our `peers` list -// self.peers[peerID.b58String] = .init(id: peerID, inbound: inboundStream) -// self.logger.info("Added \(peerID) to our peering state (peers2)") -// } else { -// self.peers[peerID.b58String]?.attachInbound(stream: inboundStream) -// //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") -// } + // if self.peers[peerID.b58String] == nil { + // //Add the new peer to our `peers` list + // self.peers[peerID.b58String] = .init(id: peerID, inbound: inboundStream) + // self.logger.info("Added \(peerID) to our peering state (peers2)") + // } else { + // self.peers[peerID.b58String]?.attachInbound(stream: inboundStream) + // //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + // } }.hop(to: loop ?? eventLoop) } - - func attachOutboundStream(_ peerID: PeerID, outboundStream: LibP2PCore.Stream, on loop:EventLoop? = nil) -> EventLoopFuture { + + func attachOutboundStream( + _ peerID: PeerID, + outboundStream: LibP2PCore.Stream, + on loop: EventLoop? = nil + ) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String, default: .init(id: peerID)].attachOutbound(stream: outboundStream) -// if self.peers[peerID.b58String] == nil { -// //Add the new peer to our `peers` list -// self.peers[peerID.b58String] = .init(id: peerID, outbound: outboundStream) -// self.logger.info("Added \(peerID) to our peering state (peers2)") -// } else { -// self.peers[peerID.b58String]?.attachOutbound(stream: outboundStream) -// //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") -// } + // if self.peers[peerID.b58String] == nil { + // //Add the new peer to our `peers` list + // self.peers[peerID.b58String] = .init(id: peerID, outbound: outboundStream) + // self.logger.info("Added \(peerID) to our peering state (peers2)") + // } else { + // self.peers[peerID.b58String]?.attachOutbound(stream: outboundStream) + // //self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + // } }.hop(to: loop ?? eventLoop) } - func detachInboundStream(_ peerID: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func detachInboundStream(_ peerID: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String]?.detachInboundStream() }.hop(to: loop ?? eventLoop) } - - func detachOutboundStream(_ peerID: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func detachOutboundStream(_ peerID: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.peers[peerID.b58String]?.detachOutboundStream() }.hop(to: loop ?? eventLoop) } - + func onPeerDisconnected(_ peer: PeerID) -> EventLoopFuture { eventLoop.submit { self.logger.info("Removing Peer From Gossipsub Peers") self.peers.removeValue(forKey: peer.b58String) } } - + /// Adds a new peer (who supports our base PubSub protocol (aka floodsub / gossipsub)) to the peers cache - func addNewPeer(_ peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func addNewPeer(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () -> Bool in if self.peers[peer.b58String] == nil { self.peers[peer.b58String] = .init(id: peer) self.logger.info("Added \(peer) to our peering state") return true } else { - self.logger.warning("Received a peer connected event for a peer that was already present in our PeeringState") + self.logger.warning( + "Received a peer connected event for a peer that was already present in our PeeringState" + ) return false } }.hop(to: loop ?? eventLoop) } - + /// Removes the specified peer from our peers cache - func removePeer(_ peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func removePeer(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { self.logger.info("Removing Peer From Gossipsub Peers") self.peers.removeValue(forKey: peer.b58String) }.hop(to: loop ?? eventLoop) } - - - + /// This is called when we receive an RPC message from a peer containing the topics - func update(topics:[Topic], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func update(topics: [Topic], for peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { let pid = peer.b58String for topic in topics { @@ -181,7 +198,7 @@ class PeeringState:PeerStateProtocol { subs.append(pid) self.mesh[topic] = subs } - } else { // add the (topic:peer) entry to our fanout cache + } else { // add the (topic:peer) entry to our fanout cache if var subs = self.fanout[topic] { /// Add the peer to the existing topic entry... if !subs.contains(pid) { @@ -196,51 +213,51 @@ class PeeringState:PeerStateProtocol { } }.hop(to: loop ?? eventLoop) } - + /// This is called when we receive an RPC message from a peer containing the topics -// func update2(subscriptions:[Topic:Bool], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { -// eventLoop.submit { -// let pid = peer.b58String -// for (topic, subscribed) in subscriptions { -// if subscribed == true { -// // if we're subscribed to topic, ensure that this peer is in our mesh cache -// if var subs = self.mesh[topic] { -// if !subs.contains(pid) { -// subs.append(pid) -// self.mesh[topic] = subs -// self.logger.trace("Added \(peer) to mesh[\(topic)] cache") -// } -// } else { // add the (topic:peer) entry to our fanout cache -// if var subs = self.fanout[topic] { -// /// Add the peer to the existing topic entry... -// if !subs.contains(pid) { -// subs.append(pid) -// self.fanout[topic] = subs -// self.logger.trace("Added \(peer) to existing fanout[\(topic)] cache") -// } -// } else { -// /// Create a new topic entry... -// self.fanout[topic] = [pid] -// self.logger.trace("Added \(peer) to new fanout[\(topic)] cache") -// } -// } -// } else { // Unregister this PID from our fanout and mesh for the specified topic -// if let subs = self.mesh[topic], subs.contains(pid) { -// self.mesh[topic]?.removeAll(where: { $0 == pid }) -// self.logger.trace("Removed \(peer) from mesh[\(topic)] cache") -// } -// if let subs = self.fanout[topic], subs.contains(pid) { -// self.fanout[topic]?.removeAll(where: { $0 == pid }) -// self.logger.trace("Removed \(peer) from fanout[\(topic)] cache") -// } -// } -// } -// self.logger.info("Updated subscriptions for \(peer)") -// }.hop(to: loop ?? eventLoop) -// } - + // func update2(subscriptions:[Topic:Bool], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + // eventLoop.submit { + // let pid = peer.b58String + // for (topic, subscribed) in subscriptions { + // if subscribed == true { + // // if we're subscribed to topic, ensure that this peer is in our mesh cache + // if var subs = self.mesh[topic] { + // if !subs.contains(pid) { + // subs.append(pid) + // self.mesh[topic] = subs + // self.logger.trace("Added \(peer) to mesh[\(topic)] cache") + // } + // } else { // add the (topic:peer) entry to our fanout cache + // if var subs = self.fanout[topic] { + // /// Add the peer to the existing topic entry... + // if !subs.contains(pid) { + // subs.append(pid) + // self.fanout[topic] = subs + // self.logger.trace("Added \(peer) to existing fanout[\(topic)] cache") + // } + // } else { + // /// Create a new topic entry... + // self.fanout[topic] = [pid] + // self.logger.trace("Added \(peer) to new fanout[\(topic)] cache") + // } + // } + // } else { // Unregister this PID from our fanout and mesh for the specified topic + // if let subs = self.mesh[topic], subs.contains(pid) { + // self.mesh[topic]?.removeAll(where: { $0 == pid }) + // self.logger.trace("Removed \(peer) from mesh[\(topic)] cache") + // } + // if let subs = self.fanout[topic], subs.contains(pid) { + // self.fanout[topic]?.removeAll(where: { $0 == pid }) + // self.logger.trace("Removed \(peer) from fanout[\(topic)] cache") + // } + // } + // } + // self.logger.info("Updated subscriptions for \(peer)") + // }.hop(to: loop ?? eventLoop) + // } + /// This is called when we receive an RPC message from a peer containing the topics - func update(subscriptions:[Topic:Bool], for peer:PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + func update(subscriptions: [Topic: Bool], for peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { let pid = peer.b58String for (topic, subscribed) in subscriptions { @@ -258,7 +275,7 @@ class PeeringState:PeerStateProtocol { self.fanout[topic] = [pid] self.logger.trace("Added \(peer) to new fanout[\(topic)] cache") } - } else { // Unregister this PID from our fanout and mesh for the specified topic + } else { // Unregister this PID from our fanout and mesh for the specified topic if let subs = self.mesh[topic], subs.contains(pid) { self.mesh[topic]?.removeAll(where: { $0 == pid }) self.logger.trace("Removed \(peer) from mesh[\(topic)] cache") @@ -272,13 +289,13 @@ class PeeringState:PeerStateProtocol { self.logger.info("Updated subscriptions for \(peer)") }.hop(to: loop ?? eventLoop) } - - func topicSubscriptions(on loop:EventLoop? = nil) -> EventLoopFuture<[Topic]> { + + func topicSubscriptions(on loop: EventLoop? = nil) -> EventLoopFuture<[Topic]> { eventLoop.submit { () -> [Topic] in self.mesh.map { $0.key } }.hop(to: loop ?? eventLoop) } - + /// This method updates our PeerState to reflect a new subscription /// /// It will... @@ -286,11 +303,11 @@ class PeeringState:PeerStateProtocol { /// - Bootstrap the new entry with any known peers that also subscribe to the topic /// Returns a list of PeerIDs that can be used to send grafting messages to /// - Note: Is this correct? Do we optimistically make all these peers mesh / full peers? Or do we have to manually do it as we graft the peers... - func subscribeSelf(to topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { + func subscribeSelf(to topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PID]> { eventLoop.submit { () -> [PID] in /// Make sure we're not already subscribed... if let peers = self.mesh[topic] { return peers } - + /// Check to see if we're aware of the topic (is it in our fanout set) if let knownTopic = self.fanout.removeValue(forKey: topic) { self.logger.trace("Upgrading `\(topic)` subscription from fanout to mesh") @@ -305,23 +322,23 @@ class PeeringState:PeerStateProtocol { } } } -// func subscribeSelf(to topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { -// eventLoop.submit { () -> [PID] in -// /// Make sure we're not already subscribed... -// if let peers = self.mesh[topic] { return peers } -// -// self.mesh[topic] = [] -// -// return [] -// } -// } - + // func subscribeSelf(to topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { + // eventLoop.submit { () -> [PID] in + // /// Make sure we're not already subscribed... + // if let peers = self.mesh[topic] { return peers } + // + // self.mesh[topic] = [] + // + // return [] + // } + // } + /// This method updates our PeerState to reflect a subscription removal /// /// It will remove the latest known peer subscription state from our Subcription Mesh and transfer /// that state into our fanout set for future reference. /// Returns a list of PeerIDs that can be used to send unsub messages to - func unsubscribeSelf(from topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PID]> { + func unsubscribeSelf(from topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PID]> { eventLoop.submit { () -> [PID] in guard self.state == .started || self.state == .stopping else { return [] } /// Check to see if we're aware of the topic (is it in our fanout set) @@ -338,85 +355,85 @@ class PeeringState:PeerStateProtocol { } else { self.logger.trace("Unsubscribing self from `\(topic)`") } - + return [] } } - -// public enum SubscriptionType { -// case full -// case meta -// } - + + // public enum SubscriptionType { + // case full + // case meta + // } + /// Returns a list of all known peers subscribed to the specified topic /// /// - TODO: Make this mo better... right now we do a ton of work to extract the PeerID for each subscriber (this could be solved if we changed peers to a dictionary with the PID as the key). - func peersSubscribedTo(topic:Topic, on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID]> { + func peersSubscribedTo(topic: Topic, on loop: EventLoop? = nil) -> EventLoopFuture<[PeerID]> { eventLoop.submit { () -> [PeerID] in let subbed = self.mesh[topic] ?? [] //let known = self.fanout[topic] ?? [] - return self.idsToPeers( subbed /*+ known*/ ) + return self.idsToPeers(subbed) // known }.hop(to: loop ?? eventLoop) } - + func peersSubscribedTo(topic: String, on loop: EventLoop?) -> EventLoopFuture<[PubSub.Subscriber]> { eventLoop.submit { () -> [PubSub.Subscriber] in let subbed = self.mesh[topic] ?? [] //let known = self.fanout[topic] ?? [] - return self.idsToSubs( subbed /*+ known*/ ) + return self.idsToSubs(subbed) // known }.hop(to: loop ?? eventLoop) } - + func getAllPeers(on loop: EventLoop?) -> EventLoopFuture<[PubSub.Subscriber]> { eventLoop.submit { () -> [PubSub.Subscriber] in self.peers.map { $0.value } }.hop(to: loop ?? eventLoop) } - -// public enum PeerGrouping { -// case topic -// } - func metaPeerIDs(on loop:EventLoop? = nil) -> EventLoopFuture<[Topic:[PeerID]]> { - eventLoop.submit { () -> [Topic:[PeerID]] in - var metaPeers:[Topic:[PeerID]] = [:] - self.fanout.forEach { topic, pids in + + // public enum PeerGrouping { + // case topic + // } + func metaPeerIDs(on loop: EventLoop? = nil) -> EventLoopFuture<[Topic: [PeerID]]> { + eventLoop.submit { () -> [Topic: [PeerID]] in + var metaPeers: [Topic: [PeerID]] = [:] + for (topic, pids) in self.fanout { metaPeers[topic] = self.idsToPeers(pids) } return metaPeers }.hop(to: loop ?? eventLoop) } - -// func metaPeerSubscriptions(on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID:[Topic]]> { -// eventLoop.submit { () -> [PeerID:[Topic]] in -// var metaPeerSubscriptions:[PeerID:[Topic]] = [:] -// self.fanout.forEach { topic, pids in -// -// metaPeers[topic] = pids.compactMap { pid in -// self.peers.first { pi in -// pi.id.b58String == pid -// }.map { $0.id } -// } -// } -// return metaPeers -// }.hop(to: loop ?? eventLoop) -// } - - private func idToPeer(_ id:PID) -> PeerID? { + + // func metaPeerSubscriptions(on loop:EventLoop? = nil) -> EventLoopFuture<[PeerID:[Topic]]> { + // eventLoop.submit { () -> [PeerID:[Topic]] in + // var metaPeerSubscriptions:[PeerID:[Topic]] = [:] + // self.fanout.forEach { topic, pids in + // + // metaPeers[topic] = pids.compactMap { pid in + // self.peers.first { pi in + // pi.id.b58String == pid + // }.map { $0.id } + // } + // } + // return metaPeers + // }.hop(to: loop ?? eventLoop) + // } + + private func idToPeer(_ id: PID) -> PeerID? { guard eventLoop.inEventLoop else { return nil } return self.peers[id]?.id } - - private func idsToPeers(_ ids:[PID]) -> [PeerID] { + + private func idsToPeers(_ ids: [PID]) -> [PeerID] { guard eventLoop.inEventLoop else { return [] } return ids.compactMap { self.peers[$0]?.id } } - - private func idsToSubs(_ ids:[PID]) -> [PubSub.Subscriber] { + + private func idsToSubs(_ ids: [PID]) -> [PubSub.Subscriber] { guard eventLoop.inEventLoop else { return [] } return ids.compactMap { self.peers[$0] } } - - func streamsFor(_ peer: PeerID, on loop:EventLoop? = nil) -> EventLoopFuture { + + func streamsFor(_ peer: PeerID, on loop: EventLoop? = nil) -> EventLoopFuture { eventLoop.submit { () throws -> PubSub.Subscriber in if let p = self.peers[peer.b58String] { return p @@ -425,27 +442,26 @@ class PeeringState:PeerStateProtocol { } }.hop(to: loop ?? eventLoop) } - - typealias Subscriptions = (full:[Topic], meta:[Topic]) - + + typealias Subscriptions = (full: [Topic], meta: [Topic]) + /// Returns the subscriber info (PeerID and Stream) for the specified b58string peer id - func subscriptionForID(_ id:PID) -> EventLoopFuture<(PubSub.Subscriber, Subscriptions)> { + func subscriptionForID(_ id: PID) -> EventLoopFuture<(PubSub.Subscriber, Subscriptions)> { eventLoop.submit { () throws -> (PubSub.Subscriber, Subscriptions) in guard let sub = self.peers[id] else { throw Errors.unknownPeerID } - let full:[Topic] = self.mesh.compactMap { topic in + let full: [Topic] = self.mesh.compactMap { topic in if topic.value.contains(id) { return topic.key } return nil } - let meta:[Topic] = self.fanout.compactMap { topic in + let meta: [Topic] = self.fanout.compactMap { topic in if topic.value.contains(id) { return topic.key } return nil } - + return (sub, (full: full, meta: meta)) } } - - + /// This method returns true if the peer is a full peers /// false if the peer is a meta data only peer /// and throws an error if the peer id is unknown @@ -455,7 +471,7 @@ class PeeringState:PeerStateProtocol { var isPeer = false var isFull = false let id = peer.b58String - + /// Check our mesh cache for the peer id for (_, subs) in self.mesh { if subs.contains(id) { @@ -466,7 +482,7 @@ class PeeringState:PeerStateProtocol { } /// If we found the peer in our Mesh cach, they're a full peer, return true! if isPeer && isFull { return true } - + /// Lets proceed to check the fanout... for (_, subs) in self.fanout { if subs.contains(id) { @@ -475,21 +491,21 @@ class PeeringState:PeerStateProtocol { break } } - + /// We found the peer but they're a metadata only peer... if isPeer { return false } - + /// If we don't have record of this peer, throw an error self.logger.error("Error while checking isFullPeer, unknown PeerID:\(peer)") throw Errors.unknownPeerID } } - + func makeFullPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { eventLoop.submit { () -> Void in let pid = peer.b58String if var subs = self.mesh[topic] { - if !subs.contains( pid ) { + if !subs.contains(pid) { /// Add the peer to our mesh cache subs.append(pid) self.mesh[topic] = subs @@ -501,12 +517,12 @@ class PeeringState:PeerStateProtocol { /// We don't have an entry for this topic yet self.mesh[topic] = [pid] } - + /// Make sure we remove the PID from our fanout cache - self.fanout[topic]?.removeAll(where: { $0 == pid } ) + self.fanout[topic]?.removeAll(where: { $0 == pid }) } } - + func makeMetaPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { eventLoop.submit { () -> Void in let pid = peer.b58String @@ -515,7 +531,7 @@ class PeeringState:PeerStateProtocol { return } if var subs = self.fanout[topic] { - if !subs.contains( pid ) { + if !subs.contains(pid) { /// Add the peer to our fanout cache subs.append(pid) self.fanout[topic] = subs @@ -530,20 +546,20 @@ class PeeringState:PeerStateProtocol { self.fanout[topic] = [pid] self.logger.trace("Created new fanout for topic \(topic). And downgraded peer \(pid) to meta peer.") } - + /// Make sure we remove the PID from our full message mesh cache - self.mesh[topic]?.removeAll(where: { $0 == pid } ) + self.mesh[topic]?.removeAll(where: { $0 == pid }) //self.logger.info("Removed \(pid) from mesh[\(topic)]") //self.logger.info("Remaining Full Peers for topic `\(topic)` -> \(self.mesh[topic]?.compactMap { $0.prefix(5) }.joined(separator: ", ") ?? "nil")") //self.logger.info("\(self.mesh)") } } - + func newMetaPeer(_ peer: PeerID, for topic: String) -> EventLoopFuture { eventLoop.submit { () -> Void in let pid = peer.b58String if var subs = self.fanout[topic] { - if !subs.contains( pid ) { + if !subs.contains(pid) { /// Add the peer to our fanout cache subs.append(pid) self.fanout[topic] = subs @@ -558,18 +574,18 @@ class PeeringState:PeerStateProtocol { self.fanout[topic] = [pid] self.logger.trace("Created new fanout for topic \(topic). And adde new meta \(peer) to meta peer.") } - + /// Make sure we remove the PID from our full message mesh cache - self.mesh[topic]?.removeAll(where: { $0 == pid } ) + self.mesh[topic]?.removeAll(where: { $0 == pid }) //self.logger.info("Removed \(pid) from mesh[\(topic)]") //self.logger.info("Remaining Full Peers for topic `\(topic)` -> \(self.mesh[topic]?.compactMap { $0.prefix(5) }.joined(separator: ", ") ?? "nil")") //self.logger.info("\(self.mesh)") } } - - enum Errors:Error { + + enum Errors: Error { case unknownPeerID case unknownTopic } - + } diff --git a/Sources/LibP2PPubSub/Routers/Gossipsub/Route+Gossipsub.swift b/Sources/LibP2PPubSub/Routers/Gossipsub/Route+Gossipsub.swift index 1181bad..5051c4a 100644 --- a/Sources/LibP2PPubSub/Routers/Gossipsub/Route+Gossipsub.swift +++ b/Sources/LibP2PPubSub/Routers/Gossipsub/Route+Gossipsub.swift @@ -1,23 +1,30 @@ +//===----------------------------------------------------------------------===// // -// Route+Gossipsub.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -func registerGossipsubRoute(_ app:Application) throws { +func registerGossipsubRoute(_ app: Application) throws { app.group("meshsub") { msub in msub.on("1.0.0", handlers: [.varIntFrameDecoder]) { req -> EventLoopFuture> in - + guard req.application.isRunning else { req.logger.error("Gossipsub::Recieved Request After App Shutdown") return req.eventLoop.makeFailedFuture(BasePubSub.Errors.alreadyStopped) } return req.application.pubsub.gossipsub.processRequest(req) - + } } } diff --git a/Sources/LibP2PPubSub/routes.swift b/Sources/LibP2PPubSub/routes.swift index 113864b..5df1d68 100644 --- a/Sources/LibP2PPubSub/routes.swift +++ b/Sources/LibP2PPubSub/routes.swift @@ -1,13 +1,20 @@ +//===----------------------------------------------------------------------===// // -// routes.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/18/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// import LibP2P -//func routes(_ app:Application) throws { +//func routes(_ app: Application) throws { // app.group("pubsub") { rendezvous in // // rendezvous.on("1.0.0") { req -> EventLoopFuture> in diff --git a/Tests/LibP2PPubSubTests/LibP2PPubSubFloodsubTests.swift b/Tests/LibP2PPubSubTests/LibP2PPubSubFloodsubTests.swift index 5fdb369..a7d7430 100644 --- a/Tests/LibP2PPubSubTests/LibP2PPubSubFloodsubTests.swift +++ b/Tests/LibP2PPubSubTests/LibP2PPubSubFloodsubTests.swift @@ -1,18 +1,26 @@ +//===----------------------------------------------------------------------===// // -// LibP2PPubSubFloodsubTests.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/25/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// -import XCTest import LibP2P -import LibP2PNoise import LibP2PMPLEX +import LibP2PNoise +import XCTest + @testable import LibP2PPubSub class LibP2PPubSubFloodsubTests: XCTestCase { - + /// ************************************** /// Testing Internal Floodsub /// ************************************** @@ -20,68 +28,86 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// Init the libp2p nodes let node1 = try makeHost() let node2 = try makeHost() - + /// Prepare our expectations - let expectationNode1ReceivedNode2Subscription = expectation(description: "Node1 received fruit subscription from Node2") + let expectationNode1ReceivedNode2Subscription = expectation( + description: "Node1 received fruit subscription from Node2" + ) let expectationNode1ReceivedNode2Message = expectation(description: "Node1 received message from Node2") - let expectationNode2ReceivedNode1Subscription = expectation(description: "Node2 received fruit subscription from Node1") + let expectationNode2ReceivedNode1Subscription = expectation( + description: "Node2 received fruit subscription from Node1" + ) let expectationNode2ReceivedNode1Message = expectation(description: "Node2 received message from Node1") - + let node1Message = "banana" let node2Message = "pineapple" - + /// Node1 subscribes to topic 'fruit' - let subscription1 = try node1.pubsub.floodsub.subscribe(.init(topic: "fruit", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + let subscription1 = try node1.pubsub.floodsub.subscribe( + .init( + topic: "fruit", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription1.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): node1.logger.info("Node1::NewPeer -> \(peer)") XCTAssertEqual(peer, node2.peerID) expectationNode1ReceivedNode2Subscription.fulfill() - + case .data(let pubSubMessage): node1.logger.info("Node1 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node2Message) expectationNode1ReceivedNode2Message.fulfill() - + case .error(let error): node1.logger.error("Node1 Error: \(error)") XCTFail(error.localizedDescription) } return node1.eventLoopGroup.next().makeSucceededVoidFuture() } - + /// Node2 subcribes to topic 'fruit' //let subscription2 = try fSub2.subscribe(topic: "fruit") - let subscription2 = try node2.pubsub.floodsub.subscribe(.init(topic: "fruit", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + let subscription2 = try node2.pubsub.floodsub.subscribe( + .init( + topic: "fruit", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription2.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): node2.logger.info("Node2::NewPeer -> \(peer)") XCTAssertEqual(peer, node1.peerID) expectationNode2ReceivedNode1Subscription.fulfill() - + case .data(let pubSubMessage): node2.logger.info("Node2 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node1Message) expectationNode2ReceivedNode1Message.fulfill() - + case .error(let error): node2.logger.error("Node2 Error: \(error)") XCTFail(error.localizedDescription) } return node2.eventLoopGroup.next().makeSucceededVoidFuture() } - + /// Start the libp2p nodes try node1.start() try node2.start() - + sleep(1) - + /// Have node1 reach out to node2 try node1.newStream(to: node2.listenAddresses.first!, forProtocol: "/floodsub/1.0.0") - + /// Publish some messages... node1.eventLoopGroup.next().scheduleTask(in: .seconds(1)) { print("Node 1 Publishing Message") @@ -91,24 +117,24 @@ class LibP2PPubSubFloodsubTests: XCTestCase { print("Node 2 Publishing Message") subscription2.publish(node2Message.data(using: .utf8)!) } - + waitForExpectations(timeout: 10, handler: nil) - + /// Check to see if we can poll our PeerStore for known peers that support '/floodsub/1.0.0' let peers = try node1.peers.getPeers(supportingProtocol: SemVerProtocol("/floodsub/1.0.0")!, on: nil).wait() XCTAssertEqual(peers.count, 1) XCTAssertEqual(peers.first!, node2.peerID.b58String) - + /// Dump the current state of our PeerStore node1.peers.dumpAll() - + /// Stop the nodes node1.shutdown() node2.shutdown() - + print("All Done!") } - + /// ************************************** /// Testing Internal Floodsub Subscriptions /// ************************************** @@ -120,21 +146,37 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// Init the libp2p nodes let node1 = try makeHost() let node2 = try makeHost() - + /// Prepare our expectations - let expectationNode1ReceivedNode2Subscription = expectation(description: "Node1 received news subscription from Node2") + let expectationNode1ReceivedNode2Subscription = expectation( + description: "Node1 received news subscription from Node2" + ) //let expectationNode1ReceivedNode2Unsubscription = expectation(description: "Node1 received news unsubscription from Node2") - let expectationNode1ReceivedNode2SecondSubscription = expectation(description: "Node1 received news subscription from Node2 for the second time") - - let expectationNode2ReceivedNode1Subscription = expectation(description: "Node2 received news subscription from Node1") - let expectationNode2ReceivedFirstNode1Message = expectation(description: "Node2 received news message from Node1") - let expectationNode2ReceivedSecondNode1Message = expectation(description: "Node2 received news message from Node1") - + let expectationNode1ReceivedNode2SecondSubscription = expectation( + description: "Node1 received news subscription from Node2 for the second time" + ) + + let expectationNode2ReceivedNode1Subscription = expectation( + description: "Node2 received news subscription from Node1" + ) + let expectationNode2ReceivedFirstNode1Message = expectation( + description: "Node2 received news message from Node1" + ) + let expectationNode2ReceivedSecondNode1Message = expectation( + description: "Node2 received news message from Node1" + ) + let node1Message = "hot news!" - + let subscriptionConfig = PubSub.SubscriptionConfig( + topic: "news", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + var node2SubscriptionCount = 0 /// Node1 subscribes to topic 'fruit' - let subscription1 = try node1.pubsub.floodsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + let subscription1 = try node1.pubsub.floodsub.subscribe(subscriptionConfig) subscription1.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): @@ -146,29 +188,30 @@ class LibP2PPubSubFloodsubTests: XCTestCase { } else if node2SubscriptionCount == 2 { expectationNode1ReceivedNode2SecondSubscription.fulfill() } - + case .data(let pubSubMessage): node1.logger.info("Node1 -> \(pubSubMessage)") XCTFail("Node 1 shouldn't receive data during this test") - + case .error(let error): node1.logger.error("Node1 Error: \(error)") XCTFail(error.localizedDescription) } return node1.eventLoopGroup.next().makeSucceededVoidFuture() } - - /// Node2 subcribes to topic 'fruit' + + /// Node2 subcribes to topic 'news' //let subscription2 = try fSub2.subscribe(topic: "fruit") var node2MessageCount = 0 let messagesPerBatch = 2 - let subscriptionHandler:(PubSub.SubscriptionEvent) -> EventLoopFuture = { event -> EventLoopFuture in + let subscriptionHandler: (PubSub.SubscriptionEvent) -> EventLoopFuture = { + event -> EventLoopFuture in switch event { case .newPeer(let peer): node2.logger.info("Node2::NewPeer -> \(peer)") XCTAssertEqual(peer, node1.peerID) expectationNode2ReceivedNode1Subscription.fulfill() - + case .data(let pubSubMessage): node2.logger.info("Node2 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node1Message) @@ -178,72 +221,86 @@ class LibP2PPubSubFloodsubTests: XCTestCase { } else if node2MessageCount == messagesPerBatch * 2 { expectationNode2ReceivedSecondNode1Message.fulfill() } - + case .error(let error): node2.logger.error("Node2 Error: \(error)") XCTFail(error.localizedDescription) } return node2.eventLoopGroup.next().makeSucceededVoidFuture() } - - var subscription2 = try node2.pubsub.floodsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + + var subscription2 = try node2.pubsub.floodsub.subscribe(subscriptionConfig) subscription2.on = subscriptionHandler - + /// Start the libp2p nodes try node1.start() try node2.start() - + sleep(1) - + /// Have node1 reach out to node2 try node2.newStream(to: node1.listenAddresses.first!, forProtocol: FloodSub.multicodec) - + /// Publish some messages... - let repeatedTask = node1.eventLoopGroup.next().scheduleRepeatedTask(initialDelay: .milliseconds(50), delay: .seconds(1)) { task in + let repeatedTask = node1.eventLoopGroup.next().scheduleRepeatedTask( + initialDelay: .milliseconds(50), + delay: .seconds(1) + ) { task in subscription1.publish(node1Message.data(using: .utf8)!) } - + /// Wait for initial subscription alerts and the first message to arrive on Node 2 - wait(for: [expectationNode1ReceivedNode2Subscription, expectationNode2ReceivedNode1Subscription, expectationNode2ReceivedFirstNode1Message], timeout: 10, enforceOrder: false) - + wait( + for: [ + expectationNode1ReceivedNode2Subscription, expectationNode2ReceivedNode1Subscription, + expectationNode2ReceivedFirstNode1Message, + ], + timeout: 10, + enforceOrder: false + ) + /// Unsubscribe Node2 from our `news` subscription //try node2.pubsub.floodsub.unsubscribe(topic: "news").wait() subscription2.unsubscribe() - + //wait(for: [expectationNode1ReceivedNode2Unsubscription], timeout: 10, enforceOrder: false) - + sleep(1) - + /// Re subscribe Node2 to our `news` subscription - subscription2 = try node2.pubsub.floodsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + subscription2 = try node2.pubsub.floodsub.subscribe(subscriptionConfig) subscription2.on = subscriptionHandler - + /// Wait for the second subscription alert on Node1 and the second `news` message to arrive at Node2 - wait(for: [expectationNode1ReceivedNode2SecondSubscription, expectationNode2ReceivedSecondNode1Message], timeout: 10, enforceOrder: false) - + wait( + for: [expectationNode1ReceivedNode2SecondSubscription, expectationNode2ReceivedSecondNode1Message], + timeout: 10, + enforceOrder: false + ) + try node2.pubsub.floodsub.unsubscribe(topic: "news").wait() - + repeatedTask.cancel() - + sleep(1) - + /// Check to see if we can poll our PeerStore for known peers that support '/chat/1.0.0' let peers = try node1.peers.getPeers(supportingProtocol: SemVerProtocol(FloodSub.multicodec)!, on: nil).wait() XCTAssertEqual(peers.count, 1) XCTAssertEqual(peers.first!, node2.peerID.b58String) - + /// Ensure Node1 Subscription count equals 2 (Node2 subscribed twice) XCTAssertEqual(node2SubscriptionCount, 2) /// Ensure the Node2 received the appropriate number of `news` messages XCTAssertEqual(node2MessageCount, messagesPerBatch * 2) - + /// Stop the nodes node1.shutdown() node2.shutdown() - + print("All Done!") } - + /// ************************************** /// Testing FloodSub Message Propogation /// ************************************** @@ -263,15 +320,15 @@ class LibP2PPubSubFloodsubTests: XCTestCase { case beacon case beacon2beacon } - + class Node { - let libp2p:Application - let expectation:XCTestExpectation - let messageToSend:String - var messagesReceived:[String] - var handler:PubSub.SubscriptionHandler! + let libp2p: Application + let expectation: XCTestExpectation + let messageToSend: String + var messagesReceived: [String] + var handler: PubSub.SubscriptionHandler! - init(libp2p:Application, expectation:XCTestExpectation, messageToSend:String) { + init(libp2p: Application, expectation: XCTestExpectation, messageToSend: String) { self.libp2p = libp2p self.expectation = expectation self.messageToSend = messageToSend @@ -281,13 +338,13 @@ class LibP2PPubSubFloodsubTests: XCTestCase { } /// Consider the ConenctionManagers max concurrent connections param while setting this number (especially for the beacon structure) (the default is 25 connections) - let nodesToTest:Int = 10 - let structureToTest:NetworkStructure = .beacon2beacon - + let nodesToTest: Int = 10 + let structureToTest: NetworkStructure = .beacon2beacon + //guard nodesToTest > 2 else { XCTFail("We need at least 3 nodes to accurately perform this test..."); return } /// Init the libp2p nodes, floodsub routers, and prepare our expectations - var nodes:[Node] = try (0.. \(peer)") - + case .data(let pubSubMessage): node.libp2p.logger.debug("Node[\(node.libp2p.peerID)]::Data -> \(pubSubMessage)") node.messagesReceived.append(String(data: pubSubMessage.data, encoding: .utf8)!) if node.messagesReceived.count == nodesToTest { node.expectation.fulfill() } - + case .error(let error): node.libp2p.logger.error("Node[\(node.libp2p.peerID)]::Error -> \(error)") XCTFail(error.localizedDescription) @@ -327,10 +384,12 @@ class LibP2PPubSubFloodsubTests: XCTestCase { return node.libp2p.eventLoopGroup.next().makeSucceededVoidFuture() } } - + /// Start the libp2p nodes - try nodes.forEach { try $0.libp2p.start() } - + for node in nodes { XCTAssertNoThrow(try node.libp2p.start()) } + + print("Structuring Peers - \(structureToTest)") + /// ****************************************** /// The following logic determines the structure of the network /// ****************************************** @@ -340,11 +399,18 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// /// Network Structure Diagram /// n -> ... -> n - try nodes.enumerated().forEach { (idx, node) in - guard nodes.count > (idx+1) else { return } - try node.libp2p.newStream(to: nodes[idx + 1].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) + for (idx, node) in nodes.enumerated() { + guard nodes.count > (idx + 1) else { continue } + guard let nextPeerAddress = nodes[idx + 1].libp2p.listenAddresses.first else { + XCTFail("Next Peer Address not available") + continue + } + try? node.libp2p.newStream( + to: nextPeerAddress, + forProtocol: FloodSub.multicodec + ) } - + case .circular: /// If we tie the ends together, we have a circular network graph /// - Note: with 3 Nodes, this results in 6 wasted / redundant message propogations, 7 Nodes -> 14 redundant messages... @@ -352,14 +418,20 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// n -> ... -> n -, /// ^ | /// '----------------' - try nodes.enumerated().forEach { (idx, node) in - guard nodes.count > (idx+1) else { - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) - return + for (idx, node) in nodes.enumerated() { + guard nodes.count > (idx + 1) else { + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: FloodSub.multicodec + ) + continue } - try node.libp2p.newStream(to: nodes[idx + 1].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) + try node.libp2p.newStream( + to: nodes[idx + 1].libp2p.listenAddresses.first!, + forProtocol: FloodSub.multicodec + ) } - + case .beacon: /// Have each node reach out to the zeroeth node (a beacon set up) /// @@ -370,11 +442,11 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// : / /// n /// - try nodes.enumerated().forEach { (idx, node) in - guard idx != 0 else { return } + for (idx, node) in nodes.enumerated() { + guard idx != 0 else { continue } try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) } - + case .beacon2beacon: /// Splits the network into Evens & Odds then connects node 0 and 1 to bridge the devide... /// @@ -385,26 +457,34 @@ class LibP2PPubSubFloodsubTests: XCTestCase { /// / \ /// n n /// - try nodes.enumerated().forEach { (idx, node) in - guard idx != 0 else { return } + for (idx, node) in nodes.enumerated() { + guard idx != 0 else { continue } if idx == 1 { /// Have Node1 reach out to Node0 - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) - return + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: FloodSub.multicodec + ) + continue } if idx % 2 == 0 { /// If the node is an even number (have it reach out to Node0, our even beacon node) - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: FloodSub.multicodec + ) } else { /// Otherwise the node must be odd (have it reach out to Node1, our odd beacon node) - try node.libp2p.newStream(to: nodes[1].libp2p.listenAddresses.first!, forProtocol: FloodSub.multicodec) + try node.libp2p.newStream( + to: nodes[1].libp2p.listenAddresses.first!, + forProtocol: FloodSub.multicodec + ) } } - } /// Publish some messages... - nodes.enumerated().forEach { (idx, node) in + for node in nodes { node.libp2p.eventLoopGroup.next().scheduleTask(in: .milliseconds(Int64.random(in: 500...2_000))) { //node.5!.publish(node.3.data(using: .utf8)!) node.libp2p.pubsub.publish(node.messageToSend.data(using: .utf8)!.bytes, toTopic: "fruit") @@ -412,16 +492,21 @@ class LibP2PPubSubFloodsubTests: XCTestCase { } /// Wait for each node to receive each message - waitForExpectations(timeout: 10, handler: nil) + waitForExpectations(timeout: 10) /// Wait an additional 2 seconds to ensure message propogation doesn't echo through the network causing duplicates sleep(2) nodes.first!.libp2p.peers.dumpAll() - + + /// Close all connections + for node in nodes { + try? node.libp2p.connections.closeAllConnections().wait() + } + /// Stop the nodes - nodes.forEach { $0.libp2p.shutdown() } - + for node in nodes { node.libp2p.shutdown() } + /// Ensure that each node received every message... for node in nodes { XCTAssertEqual(node.messagesReceived.count, nodes.count) @@ -437,11 +522,11 @@ class LibP2PPubSubFloodsubTests: XCTestCase { let waitExp = expectation(description: "Another Wait") wait(for: 1, expectation: waitExp) - waitForExpectations(timeout: 20, handler: nil) + waitForExpectations(timeout: 20) print("All Done!") } - + /// ************************************** /// Testing JS Interoperability /// ************************************** @@ -471,11 +556,18 @@ class LibP2PPubSubFloodsubTests: XCTestCase { let topic = "news" var expectedMessageCount = 5 let messageExpectation = expectation(description: "MessagesReceived") - + try app.start() //try app.pubsub.floodsub.start() - - let subscription = try app.pubsub.floodsub.subscribe(.init(topic: topic, signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + + let subscription = try app.pubsub.floodsub.subscribe( + .init( + topic: topic, + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): @@ -483,36 +575,36 @@ class LibP2PPubSubFloodsubTests: XCTestCase { app.eventLoopGroup.next().scheduleTask(in: .milliseconds(100)) { app.pubsub.publish("Hello from swift!".data(using: .utf8)!.bytes, toTopic: topic) } - + case .data(let pubSubMessage): print(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL") expectedMessageCount -= 1 if expectedMessageCount == 0 { messageExpectation.fulfill() } - + case .error(let error): app.logger.error("Error: \(error)") } return app.eventLoopGroup.next().makeSucceededVoidFuture() } - + try? app.newStream(to: Multiaddr("/ip4/192.168.1.19/tcp/51249"), forProtocol: "/ipfs/ping/1.0.0") waitForExpectations(timeout: 30) - + let _ = app.pubsub.publish("Goodbyte from swift!".data(using: .utf8)!.bytes, toTopic: topic) - + subscription.unsubscribe() - + print("Shutting down libp2p chat...") app.peers.dumpAll() - + //try app.pubsub.floodsub.stop() //app.running?.stop() app.shutdown() } - + func testExternalFloodsubConnections() throws { throw XCTSkip("Integration Test Skipped By Default") let app = try Application(.testing, peerID: PeerID(.Ed25519)) @@ -523,7 +615,7 @@ class LibP2PPubSubFloodsubTests: XCTestCase { app.security.use(.noise) app.muxers.use(.mplex) app.pubsub.use(.floodsub) - + //app.discovery.use(.bootstrap([ // Multiaddr("/ip4/20.80.20.28/tcp/4001/p2p/12D3KooWH2jndcSD6MC7cvs5zJNfMgHJFBc8zpebNS3L2HGXvQnS"), // Multiaddr("/ip4/23.239.22.148/tcp/4001/p2p/12D3KooWBidnLf4iRGgZpeFVCqQjNzAsSx2opZPbG8o9tpCf2rG5") @@ -534,11 +626,18 @@ class LibP2PPubSubFloodsubTests: XCTestCase { //let topic = "/ipfs-pubsub-direct-channel/v1/\(app.peerID.b58String)/12D3KooWBidnLf4iRGgZpeFVCqQjNzAsSx2opZPbG8o9tpCf2rG5" //var expectedMessageCount = 5 //let messageExpectation = expectation(description: "MessagesReceived") - + try app.start() //try app.pubsub.floodsub.start() - - let subscription = try app.pubsub.floodsub.subscribe(.init(topic: topic, signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + + let subscription = try app.pubsub.floodsub.subscribe( + .init( + topic: topic, + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): @@ -546,49 +645,52 @@ class LibP2PPubSubFloodsubTests: XCTestCase { app.eventLoopGroup.next().scheduleTask(in: .milliseconds(100)) { app.pubsub.publish("Hello from swift!".data(using: .utf8)!.bytes, toTopic: topic) } - + case .data(let pubSubMessage): print(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL") -// expectedMessageCount -= 1 -// if expectedMessageCount == 0 { -// messageExpectation.fulfill() -// } - + // expectedMessageCount -= 1 + // if expectedMessageCount == 0 { + // messageExpectation.fulfill() + // } + case .error(let error): app.logger.error("Error: \(error)") } return app.eventLoopGroup.next().makeSucceededVoidFuture() } - + do { //try app.newStream(to: Multiaddr("/ip4/20.80.20.28/tcp/4001/p2p/12D3KooWH2jndcSD6MC7cvs5zJNfMgHJFBc8zpebNS3L2HGXvQnS"), forProtocol: "/ipfs/ping/1.0.0") //try app.newStream(to: Multiaddr("/ip4/23.239.22.148/tcp/4001/p2p/12D3KooWBidnLf4iRGgZpeFVCqQjNzAsSx2opZPbG8o9tpCf2rG5"), forProtocol: "/ipfs/ping/1.0.0") - try app.newStream(to: Multiaddr("/ip4/139.178.88.229/tcp/4001/p2p/12D3KooWK3rWCYssQkQHHm5q1K1qHUBRgmEp18sHDnxRRtL5kPsb"), forProtocol: "/ipfs/ping/1.0.0") + try app.newStream( + to: Multiaddr("/ip4/139.178.88.229/tcp/4001/p2p/12D3KooWK3rWCYssQkQHHm5q1K1qHUBRgmEp18sHDnxRRtL5kPsb"), + forProtocol: "/ipfs/ping/1.0.0" + ) } catch { print("\(error)") } //waitForExpectations(timeout: 10) sleep(20) - + let _ = app.pubsub.publish("Goodbyte from swift!".data(using: .utf8)!.bytes, toTopic: topic) - + subscription.unsubscribe() - + print("Shutting down libp2p chat...") app.peers.dumpAll() - + //try app.pubsub.floodsub.stop() //app.running?.stop() app.shutdown() } - - private func wait(for sec:Int, expectation:XCTestExpectation) { + + private func wait(for sec: Int, expectation: XCTestExpectation) { DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(sec)) { expectation.fulfill() } } - - var nextPort:Int = 10000 + + var nextPort: Int = 10200 private func makeHost() throws -> Application { let lib = try Application(.testing, peerID: PeerID(.Ed25519)) lib.connectionManager.use(connectionType: BasicConnectionLight.self) @@ -597,9 +699,9 @@ class LibP2PPubSubFloodsubTests: XCTestCase { lib.muxers.use(.mplex) lib.pubsub.use(.floodsub) lib.servers.use(.tcp(host: "127.0.0.1", port: nextPort)) - + nextPort += 1 - + return lib } diff --git a/Tests/LibP2PPubSubTests/LibP2PPubSubGossipsubTests.swift b/Tests/LibP2PPubSubTests/LibP2PPubSubGossipsubTests.swift index 1bf9c55..65cc3df 100644 --- a/Tests/LibP2PPubSubTests/LibP2PPubSubGossipsubTests.swift +++ b/Tests/LibP2PPubSubTests/LibP2PPubSubGossipsubTests.swift @@ -1,18 +1,26 @@ +//===----------------------------------------------------------------------===// // -// LibP2PPubSubGossipsubTests.swift -// +// This source file is part of the swift-libp2p open source project // -// Created by Brandon Toms on 4/28/22. +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT // +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// -import XCTest import LibP2P -import LibP2PNoise import LibP2PMPLEX +import LibP2PNoise +import XCTest + @testable import LibP2PPubSub class LibP2PPubSubGossipsubTests: XCTestCase { - + /// ************************************** /// Testing Internal Gossipsub /// ************************************** @@ -20,68 +28,86 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// Init the libp2p nodes let node1 = try makeHost() let node2 = try makeHost() - + /// Prepare our expectations - let expectationNode1ReceivedNode2Subscription = expectation(description: "Node1 received fruit subscription from Node2") + let expectationNode1ReceivedNode2Subscription = expectation( + description: "Node1 received fruit subscription from Node2" + ) let expectationNode1ReceivedNode2Message = expectation(description: "Node1 received message from Node2") - let expectationNode2ReceivedNode1Subscription = expectation(description: "Node2 received fruit subscription from Node1") + let expectationNode2ReceivedNode1Subscription = expectation( + description: "Node2 received fruit subscription from Node1" + ) let expectationNode2ReceivedNode1Message = expectation(description: "Node2 received message from Node1") - + let node1Message = "banana" let node2Message = "pineapple" - + /// Node1 subscribes to topic 'fruit' - let subscription1 = try node1.pubsub.gossipsub.subscribe(.init(topic: "fruit", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .concatFromAndSequenceFields)) + let subscription1 = try node1.pubsub.gossipsub.subscribe( + .init( + topic: "fruit", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .concatFromAndSequenceFields + ) + ) subscription1.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): node1.logger.info("Node1::NewPeer -> \(peer)") XCTAssertEqual(peer, node2.peerID) expectationNode1ReceivedNode2Subscription.fulfill() - + case .data(let pubSubMessage): node1.logger.info("Node1 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node2Message) expectationNode1ReceivedNode2Message.fulfill() - + case .error(let error): node1.logger.error("Node1 Error: \(error)") XCTFail(error.localizedDescription) } return node1.eventLoopGroup.next().makeSucceededVoidFuture() } - + /// Node2 subcribes to topic 'fruit' //let subscription2 = try fSub2.subscribe(topic: "fruit") - let subscription2 = try node2.pubsub.gossipsub.subscribe(.init(topic: "fruit", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .concatFromAndSequenceFields)) + let subscription2 = try node2.pubsub.gossipsub.subscribe( + .init( + topic: "fruit", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .concatFromAndSequenceFields + ) + ) subscription2.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): node2.logger.info("Node2::NewPeer -> \(peer)") XCTAssertEqual(peer, node1.peerID) expectationNode2ReceivedNode1Subscription.fulfill() - + case .data(let pubSubMessage): node2.logger.info("Node2 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node1Message) expectationNode2ReceivedNode1Message.fulfill() - + case .error(let error): node2.logger.error("Node2 Error: \(error)") XCTFail(error.localizedDescription) } return node2.eventLoopGroup.next().makeSucceededVoidFuture() } - + /// Start the libp2p nodes try node1.start() try node2.start() - + sleep(1) - + /// Have node1 reach out to node2 try node1.newStream(to: node2.listenAddresses.first!, forProtocol: "/meshsub/1.0.0") - + /// Publish some messages... node1.eventLoopGroup.next().scheduleTask(in: .seconds(2)) { subscription1.publish(node1Message.data(using: .utf8)!) @@ -89,34 +115,33 @@ class LibP2PPubSubGossipsubTests: XCTestCase { node2.eventLoopGroup.next().scheduleTask(in: .seconds(3)) { subscription2.publish(node2Message.data(using: .utf8)!) } - + waitForExpectations(timeout: 10, handler: nil) - + subscription1.unsubscribe() subscription2.unsubscribe() - + sleep(1) - + /// Check to see if we can poll our PeerStore for known peers that support '/chat/1.0.0' let peers = try node1.peers.getPeers(supportingProtocol: SemVerProtocol("/meshsub/1.0.0")!, on: nil).wait() XCTAssertEqual(peers.count, 1) XCTAssertEqual(peers.first!, node2.peerID.b58String) - + /// Dump the current state of our PeerStore node1.peers.dumpAll() - + node1.pubsub.gossipsub.dumpEventList() - + node2.pubsub.gossipsub.dumpEventList() - + /// Stop the nodes node1.shutdown() node2.shutdown() - + print("All Done!") } - - + /// ************************************** /// Testing Internal Gossipsub Subscriptions /// ************************************** @@ -128,20 +153,37 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// Init the libp2p nodes let node1 = try makeHost() let node2 = try makeHost() - + /// Prepare our expectations - let expectationNode1ReceivedNode2Subscription = expectation(description: "Node1 received fruit subscription from Node2") + let expectationNode1ReceivedNode2Subscription = expectation( + description: "Node1 received fruit subscription from Node2" + ) //let expectationNode1ReceivedNode2Unsubscription = expectation(description: "Node1 received fruit unsubscription from Node2") - let expectationNode1ReceivedNode2SecondSubscription = expectation(description: "Node1 received fruit subscription from Node2 for the second time") - - let expectationNode2ReceivedNode1Subscription = expectation(description: "Node2 received fruit subscription from Node1") - let expectationNode2ReceivedFirstNode1Message = expectation(description: "Node2 received first message from Node1") - let expectationNode2ReceivedSecondNode1Message = expectation(description: "Node2 received first message from Node1") - + let expectationNode1ReceivedNode2SecondSubscription = expectation( + description: "Node1 received fruit subscription from Node2 for the second time" + ) + + let expectationNode2ReceivedNode1Subscription = expectation( + description: "Node2 received fruit subscription from Node1" + ) + let expectationNode2ReceivedFirstNode1Message = expectation( + description: "Node2 received first message from Node1" + ) + let expectationNode2ReceivedSecondNode1Message = expectation( + description: "Node2 received first message from Node1" + ) + let node1Message = "hot news!" var node2SubscriptionCount = 0 /// Node1 subscribes to topic 'fruit' - let subscription1 = try node1.pubsub.gossipsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + let subscription1 = try node1.pubsub.gossipsub.subscribe( + .init( + topic: "news", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription1.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): @@ -153,28 +195,29 @@ class LibP2PPubSubGossipsubTests: XCTestCase { } else if node2SubscriptionCount == 2 { expectationNode1ReceivedNode2SecondSubscription.fulfill() } - + case .data(let pubSubMessage): node1.logger.info("Node1 -> \(pubSubMessage)") XCTFail("Node 1 shouldn't receive data during this test") - + case .error(let error): node1.logger.error("Node1 Error: \(error)") XCTFail(error.localizedDescription) } return node1.eventLoopGroup.next().makeSucceededVoidFuture() } - + /// Node2 subcribes to topic 'fruit' - var node2Messages:[PubSubMessage] = [] + var node2Messages: [PubSubMessage] = [] var fulfillmentCount = 0 - let subscriptionHandler:(PubSub.SubscriptionEvent) -> EventLoopFuture = { event -> EventLoopFuture in + let subscriptionHandler: (PubSub.SubscriptionEvent) -> EventLoopFuture = { + event -> EventLoopFuture in switch event { case .newPeer(let peer): node2.logger.info("Node2::NewPeer -> \(peer)") XCTAssertEqual(peer, node1.peerID) expectationNode2ReceivedNode1Subscription.fulfill() - + case .data(let pubSubMessage): node2.logger.info("Node2 -> \(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL")") //XCTAssertEqual(String(data: pubSubMessage.data, encoding: .utf8), node1Message) @@ -186,92 +229,119 @@ class LibP2PPubSubGossipsubTests: XCTestCase { expectationNode2ReceivedSecondNode1Message.fulfill() fulfillmentCount += 1 } - + case .error(let error): node2.logger.error("Node2 Error: \(error)") XCTFail(error.localizedDescription) } return node2.eventLoopGroup.next().makeSucceededVoidFuture() } - - var subscription2 = try node2.pubsub.gossipsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + + var subscription2 = try node2.pubsub.gossipsub.subscribe( + .init( + topic: "news", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription2.on = subscriptionHandler - + /// Start the libp2p nodes try node1.start() try node2.start() - + sleep(1) - + /// Have node1 reach out to node2 try node2.newStream(to: node1.listenAddresses.first!, forProtocol: GossipSub.multicodec) - + /// Publish some messages... var counter = 0 - var node1Messages:[String] = [] - let task = node1.eventLoopGroup.next().scheduleRepeatedTask(initialDelay: .milliseconds(500), delay: .seconds(1)) { task in + var node1Messages: [String] = [] + let task = node1.eventLoopGroup.next().scheduleRepeatedTask( + initialDelay: .milliseconds(500), + delay: .seconds(1) + ) { task in let msg = "\(node1Message)[\(counter)]" subscription1.publish(msg.data(using: .utf8)!) node1Messages.append(msg) counter += 1 } - + /// Wait for initial subscription alerts and the first message to arrive on Node 2 - wait(for: [expectationNode1ReceivedNode2Subscription, expectationNode2ReceivedNode1Subscription, expectationNode2ReceivedFirstNode1Message], timeout: 10, enforceOrder: false) - + wait( + for: [ + expectationNode1ReceivedNode2Subscription, expectationNode2ReceivedNode1Subscription, + expectationNode2ReceivedFirstNode1Message, + ], + timeout: 10, + enforceOrder: false + ) + //sleep(1) - + /// Unsubscribe Node2 from our `news` subscription //try node2.pubsub.gossipsub.unsubscribe(topic: "news").wait() subscription2.unsubscribe() - + //wait(for: [expectationNode1ReceivedNode2Unsubscription], timeout: 10, enforceOrder: false) - + sleep(2) - + /// Re subscribe Node2 to our `news` subscription - subscription2 = try node2.pubsub.gossipsub.subscribe(.init(topic: "news", signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .hashSequenceNumberAndFromFields)) + subscription2 = try node2.pubsub.gossipsub.subscribe( + .init( + topic: "news", + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .hashSequenceNumberAndFromFields + ) + ) subscription2.on = subscriptionHandler - + /// Wait for the second subscription alert on Node1 and the second `news` message to arrive at Node2 - wait(for: [expectationNode1ReceivedNode2SecondSubscription, expectationNode2ReceivedSecondNode1Message], timeout: 10, enforceOrder: false) - + wait( + for: [expectationNode1ReceivedNode2SecondSubscription, expectationNode2ReceivedSecondNode1Message], + timeout: 10, + enforceOrder: false + ) + /// Stop sending messages task.cancel() - + // Wait a couple senconds sleep(2) - + //try node2.pubsub.gossipsub.unsubscribe(topic: "news").wait() subscription2.unsubscribe() - + sleep(1) - + /// Check to see if we can poll our PeerStore for known peers that support '/chat/1.0.0' let peers = try node1.peers.getPeers(supportingProtocol: SemVerProtocol(GossipSub.multicodec)!, on: nil).wait() XCTAssertEqual(peers.count, 1) XCTAssertEqual(peers.first!, node2.peerID.b58String) - + /// Ensure Node1 Subscription count equals 2 (Node2 subscribed twice) XCTAssertEqual(node2SubscriptionCount, 2) /// Ensure the Node2 received the appropriate number of `news` messages //XCTAssertGreaterThanOrEqual(node2Messages.count, messagesPerBatch * 2) XCTAssertGreaterThanOrEqual(node2Messages.count, 2) XCTAssertLessThanOrEqual(node2Messages.count, node1Messages.count) - + print("Node 1 Sent Messages") print(node1Messages) print("Node 2 Received Messages") - print(node2Messages.map { String(data: $0.data, encoding: .utf8) ?? "NIL" }.joined(separator: "\n") ) - + print(node2Messages.map { String(data: $0.data, encoding: .utf8) ?? "NIL" }.joined(separator: "\n")) + /// Stop the nodes node1.shutdown() node2.shutdown() - + print("All Done!") } - - + /// ************************************** /// Testing Gossipsub Message Propogation /// ************************************** @@ -289,15 +359,15 @@ class LibP2PPubSubGossipsubTests: XCTestCase { case beacon case beacon2beacon } - + class Node { - let libp2p:Application - let expectation:XCTestExpectation - let messageToSend:String - var messagesReceived:[String] - var handler:PubSub.SubscriptionHandler! + let libp2p: Application + let expectation: XCTestExpectation + let messageToSend: String + var messagesReceived: [String] + var handler: PubSub.SubscriptionHandler! - init(libp2p:Application, expectation:XCTestExpectation, messageToSend:String) { + init(libp2p: Application, expectation: XCTestExpectation, messageToSend: String) { self.libp2p = libp2p self.expectation = expectation self.messageToSend = messageToSend @@ -307,13 +377,13 @@ class LibP2PPubSubGossipsubTests: XCTestCase { } /// Consider the ConenctionManagers max concurrent connections param while setting this number (especially for the beacon structure) (the default is 25 connections) - let nodesToTest:Int = 10 - let structureToTest:NetworkStructure = .beacon - + let nodesToTest: Int = 10 + let structureToTest: NetworkStructure = .beacon + //guard nodesToTest > 2 else { XCTFail("We need at least 3 nodes to accurately perform this test..."); return } /// Init the libp2p nodes, gossipsub routers, and prepare our expectations - var nodes:[Node] = try (0.. \(peer)") - + case .data(let pubSubMessage): node.libp2p.logger.debug("Node[\(node.libp2p.peerID)]::Data -> \(pubSubMessage)") node.messagesReceived.append(String(data: pubSubMessage.data, encoding: .utf8)!) if node.messagesReceived.count == nodesToTest { node.expectation.fulfill() } - + case .error(let error): node.libp2p.logger.error("Node[\(node.libp2p.peerID)]::Error -> \(error)") XCTFail(error.localizedDescription) @@ -352,10 +422,10 @@ class LibP2PPubSubGossipsubTests: XCTestCase { return node.libp2p.eventLoopGroup.next().makeSucceededVoidFuture() } } - + /// Start the libp2p nodes - try nodes.forEach { try $0.libp2p.start() } - + for node in nodes { XCTAssertNoThrow(try node.libp2p.start()) } + /// ****************************************** /// The following logic determines the structure of the network /// ****************************************** @@ -365,11 +435,14 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// /// Network Structure Diagram /// n -> ... -> n - try nodes.enumerated().forEach { (idx, node) in - guard nodes.count > (idx+1) else { return } - try node.libp2p.newStream(to: nodes[idx + 1].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) + for (idx, node) in nodes.enumerated() { + guard nodes.count > (idx + 1) else { continue } + try node.libp2p.newStream( + to: nodes[idx + 1].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) } - + case .circular: /// If we tie the ends together, we have a circular network graph /// - Note: with 3 Nodes, this results in 6 wasted / redundant message propogations, 7 Nodes -> 14 redundant messages... @@ -377,14 +450,20 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// n -> ... -> n -, /// ^ | /// '--------------' - try nodes.enumerated().forEach { (idx, node) in - guard nodes.count > (idx+1) else { - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) - return + for (idx, node) in nodes.enumerated() { + guard nodes.count > (idx + 1) else { + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) + continue } - try node.libp2p.newStream(to: nodes[idx + 1].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) + try node.libp2p.newStream( + to: nodes[idx + 1].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) } - + case .beacon: /// Have each node reach out to the zeroeth node (a beacon set up) /// @@ -395,11 +474,11 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// : / /// n /// - try nodes.enumerated().forEach { (idx, node) in - guard idx != 0 else { return } + for (idx, node) in nodes.enumerated() { + guard idx != 0 else { continue } try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) } - + case .beacon2beacon: /// Splits the network into Evens & Odds then connects node 0 and 1 to bridge the devide... /// @@ -410,26 +489,35 @@ class LibP2PPubSubGossipsubTests: XCTestCase { /// / \ /// n n /// - try nodes.enumerated().forEach { (idx, node) in - guard idx != 0 else { return } + for (idx, node) in nodes.enumerated() { + guard idx != 0 else { continue } if idx == 1 { /// Have Node1 reach out to Node0 - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) - return + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) + continue } if idx % 2 == 0 { /// If the node is an even number (have it reach out to Node0, our even beacon node) - try node.libp2p.newStream(to: nodes[0].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) + try node.libp2p.newStream( + to: nodes[0].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) } else { /// Otherwise the node must be odd (have it reach out to Node1, our odd beacon node) - try node.libp2p.newStream(to: nodes[1].libp2p.listenAddresses.first!, forProtocol: GossipSub.multicodec) + try node.libp2p.newStream( + to: nodes[1].libp2p.listenAddresses.first!, + forProtocol: GossipSub.multicodec + ) } } - + } /// Publish some messages... - nodes.enumerated().forEach { (idx, node) in + for node in nodes { node.libp2p.eventLoopGroup.next().scheduleTask(in: .milliseconds(Int64.random(in: 500...3_000))) { //node.5!.publish(node.3.data(using: .utf8)!) node.libp2p.pubsub.publish(node.messageToSend.data(using: .utf8)!.bytes, toTopic: "fruit") @@ -440,14 +528,19 @@ class LibP2PPubSubGossipsubTests: XCTestCase { waitForExpectations(timeout: 10, handler: nil) /// Wait an additional 2 seconds to ensure message propogation doesn't echo through the network causing duplicates - sleep(1) - + sleep(2) + nodes.first!.libp2p.peers.dumpAll() nodes.first!.libp2p.pubsub.gossipsub.dumpEventList() - + + /// Close all connections + for node in nodes { + try? node.libp2p.connections.closeAllConnections().wait() + } + /// Stop the nodes - nodes.forEach { $0.libp2p.shutdown() } - + for node in nodes { node.libp2p.shutdown() } + /// Ensure that each node received every message... for node in nodes { XCTAssertEqual(node.messagesReceived.count, nodes.count) @@ -465,7 +558,7 @@ class LibP2PPubSubGossipsubTests: XCTestCase { print("All Done!") } - + /// ************************************** /// Testing JS Interoperability /// ************************************** @@ -496,11 +589,18 @@ class LibP2PPubSubGossipsubTests: XCTestCase { let topic = "news" var expectedMessageCount = 4 let messageExpectation = expectation(description: "MessagesReceived") - + try app.start() try app.pubsub.gossipsub.start() - - let subscription = try app.pubsub.gossipsub.subscribe(.init(topic: topic, signaturePolicy: .strictSign, validator: .acceptAll, messageIDFunc: .concatFromAndSequenceFields)) + + let subscription = try app.pubsub.gossipsub.subscribe( + .init( + topic: topic, + signaturePolicy: .strictSign, + validator: .acceptAll, + messageIDFunc: .concatFromAndSequenceFields + ) + ) subscription.on = { event -> EventLoopFuture in switch event { case .newPeer(let peer): @@ -508,43 +608,42 @@ class LibP2PPubSubGossipsubTests: XCTestCase { app.eventLoopGroup.next().scheduleTask(in: .milliseconds(100)) { app.pubsub.publish("Hello from swift!".data(using: .utf8)!.bytes, toTopic: topic) } - + case .data(let pubSubMessage): print(String(data: pubSubMessage.data, encoding: .utf8) ?? "NIL") expectedMessageCount -= 1 if expectedMessageCount == 0 { messageExpectation.fulfill() } - + case .error(let error): app.logger.error("Error: \(error)") } return app.eventLoopGroup.next().makeSucceededVoidFuture() } - + try? app.newStream(to: Multiaddr("/ip4/192.168.1.19/tcp/56758"), forProtocol: "/ipfs/ping/1.0.0") waitForExpectations(timeout: 30) - + let _ = app.pubsub.publish("Goodbyte from swift!".data(using: .utf8)!.bytes, toTopic: topic) //subscription.publish("Goodbyte from swift!".data(using: .utf8)!.bytes) - + subscription.unsubscribe() - + sleep(1) - + print("Shutting down libp2p chat...") app.peers.dumpAll() - + app.pubsub.gossipsub.dumpEventList() - + try app.pubsub.gossipsub.stop() app.running?.stop() app.shutdown() } - - - var nextPort:Int = 10000 + + var nextPort: Int = 10100 private func makeHost() throws -> Application { let lib = try Application(.testing, peerID: PeerID(.Ed25519)) lib.logger.logLevel = .info @@ -553,9 +652,9 @@ class LibP2PPubSubGossipsubTests: XCTestCase { lib.muxers.use(.mplex) lib.pubsub.use(.gossipsub) lib.servers.use(.tcp(host: "127.0.0.1", port: nextPort)) - + nextPort += 1 - + return lib } } diff --git a/Tests/LibP2PPubSubTests/LibP2PPubSubTests.swift b/Tests/LibP2PPubSubTests/LibP2PPubSubTests.swift index 746b47f..1c7369e 100644 --- a/Tests/LibP2PPubSubTests/LibP2PPubSubTests.swift +++ b/Tests/LibP2PPubSubTests/LibP2PPubSubTests.swift @@ -1,11 +1,26 @@ -import XCTest -@testable import LibP2PPubSub +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-libp2p open source project +// +// Copyright (c) 2022-2025 swift-libp2p project authors +// Licensed under MIT +// +// See LICENSE for license information +// See CONTRIBUTORS for the list of swift-libp2p project authors +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + import LibP2P -import LibP2PNoise import LibP2PMPLEX +import LibP2PNoise +import XCTest + +@testable import LibP2PPubSub final class LibP2PPubSubTests: XCTestCase { - + func testExample() throws { let app = try Application(.testing, peerID: PeerID(.Ed25519)) app.logger.logLevel = .trace @@ -15,12 +30,12 @@ final class LibP2PPubSubTests: XCTestCase { app.security.use(.noise) app.muxers.use(.mplex) app.pubsub.use(.floodsub) - + try app.start() - + sleep(2) - + app.shutdown() } - + }