diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 38c631ee6..b5b6adcc3 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -31,6 +31,7 @@ jobs: - test_dataset.py - test_pcap_dataset.py - test_zeek_dataset.py + - test_fides.py steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index fb07e70d3..376d766af 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -83,6 +83,8 @@ jobs: - test_host_ip_manager.py - test_rnn_cc_detection.py - test_idea_format.py + - test_fides_sqlite_db.py + - test_fides_module.py steps: - uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index 1e30f537e..e02821baf 100644 --- a/.gitignore +++ b/.gitignore @@ -172,3 +172,9 @@ output/ config-live-macos-* dataset-private/* appendonly.aof +/slipsOut/flows.sqlite +/slipsOut/metadata/info.txt +/slipsOut/metadata/slips.yaml +/slipsOut/metadata/whitelist.conf +/p2p_db.sqlite + diff --git a/config/slips.yaml b/config/slips.yaml index b3cacb48f..ce766ae74 100644 --- a/config/slips.yaml +++ b/config/slips.yaml @@ -78,13 +78,13 @@ parameters: # zeek breaks the connection into smaller connections tcp_inactivity_timeout: 60 # Should we delete the previously stored data in the DB when we start? - # By default False. Meaning we don't DELETE the DB by default. + # By default false. Meaning we don't DELETE the DB by default. deletePrevdb: true # You can remember the data in all the previous runs of the DB if - # you put False. + # you put false. # Redis will remember as long as the redis server is not down. # The persistence is in memory, not disk. - # deletePrevdb : False + # deletePrevdb : false # Set the label for all the flows that are being read. # For now only normal and malware directly. No option for setting labels # with a filter @@ -154,7 +154,7 @@ detection: # May lead to false negatives evidence_detection_threshold: 0.25 # Slips can show a popup/notification with every alert. - popup_alerts: False + popup_alerts: false ############################# modules: # List of modules to ignore. By default we always ignore the template! @@ -198,7 +198,7 @@ threatintelligence: # and all TI files are loaded successfully # this is usefull if you want to ensure that slips doesn't miss the # detection of any blacklisted IPs - wait_for_TI_to_finish: False + wait_for_TI_to_finish: false # Default Path to the folder with files holding malcious IPs # All the files in this folder are read and the IPs are considered malicious # The format of the files must be, per line: "Number","IP address","Rating", @@ -275,7 +275,7 @@ exporting_alerts: # if your TAXII server is a remote server, # you can set the port to 443 or 80. port: 1234 - use_https: False + use_https: false discovery_path: /services/discovery-a inbox_path: /services/inbox-a # Collection on the server you want to push stix data to @@ -299,8 +299,8 @@ exporting_alerts: CESNET: # Slips supports exporting and importing evidence in the IDEA format to/from # warden servers. - send_alerts: False - receive_alerts: False + send_alerts: false + receive_alerts: false # warden configuration file. For format instructions check # yamllint disable-line rule:line-length # https://stratospherelinuxips.readthedocs.io/en/develop/exporting.html?highlight=exporting# cesnet-sharing @@ -346,7 +346,7 @@ Docker: Profiling: # [11] CPU profiling # enable cpu profiling [yes,no] - cpu_profiler_enable: False + cpu_profiler_enable: false # Available options are [dev,live] # dev for deterministic profiling. this will give precise information # about the CPU usage @@ -363,7 +363,7 @@ Profiling: # set the wait time between sampling sequences in seconds (live mode only) cpu_profiler_sampling_interval: 20 # enable memory profiling [yes,no] - memory_profiler_enable: False + memory_profiler_enable: false # set profiling mode [dev,live] memory_profiler_mode: live # profile all subprocesses [yes,no] @@ -371,8 +371,15 @@ Profiling: ############################# web_interface: port: 55000 + +############################# +global_p2p: + # this is the global p2p's trust model. can only be enabled when + # running slips on an interface + use_fides: false + ############################# P2P: # create p2p.log with additional info about peer communications? - create_p2p_logfile: False - use_p2p: False + create_p2p_logfile: false + use_p2p: false diff --git a/docs/contributing.md b/docs/contributing.md index 1c807bf64..4848efd36 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -171,4 +171,107 @@ Once all modules are done processing, EvidenceHandler is killed by the Process m Using one of these 3 ways - + + +## Global P2P - Fides contribution notes + +Variables used in the trust evaluation and its accompanied processes, such as database-backup in persistent +SQLite storage and memory persistent +Redis database of Slips, are strings, integers and floats grouped into custom dataclasses. Aforementioned data classes can +be found in modules/fidesModule/model. The reader may find that all of the floating variables are in the interval <-1; 1> +and some of them are between <0; 1>, please refer to the modules/fidesModule/model directory. + +The Fides Module is designed to cooperate with a global-peer-to-peer module. The communication is done using Slips' Redis +channel, for more information please refer to communication and messages sections above. + +An example of a message answering Fides-Module's opinion request follows. +``` +import redis + +# connect to redis database 0 +redis_client = redis.StrictRedis(host='localhost', port=6379, db=0) + +message = ''' +{ + "type": "nl2tl_intelligence_response", + "version": 1, + "data": [ + { + "sender": { + "id": "peer1", + "organisations": ["org_123", "org_456"], + "ip": "192.168.1.1" + }, + "payload": { + "intelligence": { + "target": {"type": "server", "value": "192.168.1.10"}, + "confidentiality": {"level": 0.8}, + "score": 0.5, + "confidence": 0.95 + }, + "target": "stratosphere.org" + } + }, + { + "sender": { + "id": "peer2", + "organisations": ["org_789"], + "ip": "192.168.1.2" + }, + "payload": { + "intelligence": { + "target": {"type": "workstation", "value": "192.168.1.20"}, + "confidentiality": {"level": 0.7}, + "score": -0.85, + "confidence": 0.92 + }, + "target": "stratosphere.org" + } + } + ] +} +''' + +# publish the message to the "network2fides" channel +channel = "network2fides" +redis_client.publish(channel, message) + +print(f"Message published to channel '{channel}'.") +``` + +For more information about message handling, please also refer to modules/fidesModule/messaging/message_handler.py +and to modules/fidesModule/messaging/dacite/core.py for message parsing. + + +### **Communication** +The module uses Slips' Redis to receive and send messages related to trust intelligence, +evaluation of trust in peers and alert message dispatch. + +**Used Channels** +modules/fidesModule/messaging/message_handler.py +| **Slips Channel Name** | **Purpose** | +|-----------------|-------------------------------------------------------------------------| +| `slips2fides` | Provides communication channel from Slips to Fides | +| `fides2slips` | Enables the Fides Module to answer requests from slips2fides | +| `network2fides` | Facilitates communication from network (P2P) module to the Fides Module | +| `fides2network` | Lets the Fides Module request network opinions form network modules | + +For more details, the code [here](https://github.com/stratosphereips/fides/tree/bfac47728172d3a4bbb27a5bb53ceef424e45e4f/fides/messaging) may be read. + + +### **Messages** + +| **Message type (data['type'])** | **Channel** | **Call/Handle** | **Description** | +|:-------------------------------:|-----------------|-----------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------| +| `alert` | `slips2fides` | FidesModule as self.__alerts.dispatch_alert(target=data['target'], confidence=data['confidence'],score=data['score']) | Triggers sending an alert to the network, about given target, which SLips believes to be compromised. | +| `intelligence_request` | `slips2fides` | FidesModule as self.__intelligence.request_data(target=data['target']) | Triggers request of trust intelligence on given target. | +| `tl2nl_alert` | `fides2network` | call dispatch_alert() of AlertProtocol class instance | Broadcasts alert through the network about the target. | +| `tl2nl_intelligence_response` | `fides2network` | NetworkBridge.send_intelligence_response(...) | Shares Intelligence with peer that requested it. | +| `tl2nl_intelligence_request` | `fides2network` | NetworkBridge.send_intelligence_request(...) | Requests network intelligence from the network regarding this target. | +| `tl2nl_recommendation_response` | `fides2network` | NetworkBridge.send_recommendation_response(...) | Responds to given request_id to recipient with recommendation on target. | +| `tl2nl_recommendation_request` | `fides2network` | NetworkBridge.send_recommendation_request(...) | Request recommendation from recipients on given peer. | +| `tl2nl_peers_reliability` | `fides2network` | NetworkBridge.send_peers_reliability(...) | Sends peer reliability, this message is only for network layer and is not dispatched to the network. | + + +Implementations of Fides_Module-network-communication can be found in ```modules/fidesModule/messaging/network_bridge.py```. diff --git a/docs/fides_module.md b/docs/fides_module.md new file mode 100644 index 000000000..5e44be8ef --- /dev/null +++ b/docs/fides_module.md @@ -0,0 +1,98 @@ +# Fides module + +The Fides module is an essential component of the Global P2P system in Slips. + + +Traditional network defense systems depend on centralized threat intelligence, which has limitations like single points of failure, inflexibility, and reliance on trust in centralized authorities. Peer-to-peer networks offer an alternative for sharing threat intelligence but face challenges in verifying the trustworthiness of participants, including potential malicious actors. + +The Fides Module, based on [Master Theses](https://github.com/stratosphereips/fides/tree/bfac47728172d3a4bbb27a5bb53ceef424e45e4f) on CTU FEL by Lukáš Forst. The goal of this module is to address the challenge of trustworthiness of peers in peer-to-peer networks by providing several trust evaluation models. It evaluates peer behavior, considers membership in trusted organizations, and assesses incoming threat data to determine reliability. Fides aggregates and weights data to enhance intrusion prevention systems, even in adversarial scenarios. Experiments show that Fides can maintain accurate threat intelligence even when 75% of the network is controlled by malicious actors, assuming the remaining 25% are trusted. + +The whole architecture is thoroughly documented in the thesis itself, which can be downloaded from the link above. + +## Docker direct use +You can use Slips with Fides Module by allowing it in the Slips config file or by using the following commands. + +``` +docker pull stratosphereips/slips +docker run -it --rm --net=host --cap-add=NET_ADMIN stratosphereips/slips +``` + +To be able to use the fides module you should use ```--cap-add=NET_ADMIN``` + +## Installation: + +``` +docker pull stratosphereips/slips +docker run -it --rm --net=host --use_fides=True stratosphereips/slips +``` + +***NOTE*** + +If you plan on using the Fides Module, lease be aware that it is used only +if Slips is running on an interface. The `--use_fides=True` is ignored when Slips is run on a file. + +### Configuration +Evaluation model, evaluation thrash-holds and other configuration is located in fides.conf.yml + +**Possible threat intelligence evaluation models** + +| **Model Name** | **Description** | +|:-----------------------|--------------------------------------------------------------| +| `average` | Average Confidence Trust Intelligence Aggregation | +| `weightedAverage` | Weighted Average Confidence Trust Intelligence Aggregation | +| `stdevFromScore` | Standard Deviation From Score Trust Intelligence Aggregation | + +## Usage in Slips + +Fides is inactive by default in Slips. + +To enable it, change ```use_fides=False``` to ```use_fides=True``` in ```config/slips.yaml```. + +And start slips on your interface. + +## Project sections + +The project is built into Slips as a module and uses Redis for communication. Integration with Slips +is seamless, and it should be easy to adjust the module for use with other IPSs. + + - Slips, the Intrusion Prevention System + - Fides Module the trust evaluation module for global p2p interaction + + +## How it works: + +Slips interacts with other slips peers for the following purposes: + +### Sharing opinion on peers + +If a peers A is asked for its opinion on peer B by peer C, peer A sends the aggregated opinion on peer B to peer C, if there is any. + +### Asking for an opinion + +Newly connected peer will create a base trust by asking ather peers for opinion. + +### Dispatching alerts + +If a threat so great it may impact whole network, one or more groups, threat alert is +dispatched to peers, without regard to trust level accumulated on them. + +### Answering and receiving requests form global P2P module. + +## Logs + +Slips contains a minimal log file for reports received by other peers and peer updates in +```output``` directory if not manually specified using the appropriate slips parameter upon start. +Custom logger ```modules/fidesModule/utils/logger.py``` is used by Fide Module for internal logging. Either Slips' logging is used, or the custom logger is defaulted to logging via Python's printing function. + +## Limitations + +For now, slips supports the trust intelligence evaluation, global p2p is to be implemented. + +## Implementation notes and credit +The mathematical models for trust evaluation were written by Lukáš Forst as part of his theses and can be accessed [here](https://github.com/LukasForst/fides/commits?author=LukasForst). + + +## TLDR; + +Slips (meaning Fides Module here) only shares trust level and confidence (numbers) generated by slips about IPs to the network, +no private information is shared. diff --git a/docs/images/gw_info.jpg b/docs/images/gw_info.jpg new file mode 100644 index 000000000..6a82b7bf7 Binary files /dev/null and b/docs/images/gw_info.jpg differ diff --git a/docs/index.rst b/docs/index.rst index 628dbf0b9..7e5fe9bb7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -18,8 +18,8 @@ This documentation gives an overview how Slips works, how to use it and how to h - **Detection modules**. Explanation of detection modules in Slips, types of input and output. See :doc:`Detection modules `. - **Architecture**. Internal architecture of Slips (profiles, timewindows), the use of Zeek and connection to Redis. See :doc:`Architecture `. - -- **Training with your own data**. Explanation on how to re-train the machine learning system of Slips with your own traffic (normal or malicious).See :doc:`Training `. + +- **Training with your own data**. Explanation on how to re-train the machine learning system of Slips with your own traffic (normal or malicious).See :doc:`Training `. - **Detections per Flow**. Explanation on how Slips works to make detections on each flow with different techniques. See :doc:`Flow Alerts `. @@ -41,9 +41,9 @@ This documentation gives an overview how Slips works, how to use it and how to h .. toctree:: :maxdepth: 2 :hidden: - :caption: Slips - - self + :caption: Slips + + self installation usage architecture @@ -59,6 +59,4 @@ This documentation gives an overview how Slips works, how to use it and how to h FAQ code_documentation datasets - - - + fides_module diff --git a/feel_project b/feel_project index a2feeb1f9..450e3a655 160000 --- a/feel_project +++ b/feel_project @@ -1 +1 @@ -Subproject commit a2feeb1f965008074a4f59dc48ddb8e111e2f66b +Subproject commit 450e3a655fb9c880e0f9337cdd1b182af2ec9d37 diff --git a/fides b/fides index e2aba671c..8492d6cf2 160000 --- a/fides +++ b/fides @@ -1 +1 @@ -Subproject commit e2aba671c7cc305bd575f0cb642dbdf18997aff7 +Subproject commit 8492d6cf216e0182b9f96d6ed6baffd3a4c41c24 diff --git a/iris b/iris index 91b805121..4d66f9c7c 160000 --- a/iris +++ b/iris @@ -1 +1 @@ -Subproject commit 91b805121592915311a35da80175e82ab1b351a9 +Subproject commit 4d66f9c7cf5f9eeef7cb566c1ac66d6c2a76eebb diff --git a/modules/fidesModule/__init__.py b/modules/fidesModule/__init__.py new file mode 100644 index 000000000..dcfb16e21 --- /dev/null +++ b/modules/fidesModule/__init__.py @@ -0,0 +1 @@ +# This module contains code that is necessary for Slips to use the Fides trust model diff --git a/modules/fidesModule/config/fides.conf.yml b/modules/fidesModule/config/fides.conf.yml new file mode 100644 index 000000000..a83336ff5 --- /dev/null +++ b/modules/fidesModule/config/fides.conf.yml @@ -0,0 +1,151 @@ +# This is main configuration file for the trust model +# NOTE: if you update this file' structure, you need to update fides.model.configuration.py parsing as well + +# Settings related to running inside slips +slips: + +# settings related to network protocol +network: + +# Values that define this instance of Fides +my: + id: myId + organisations: [ ] + +# Confidentiality related settings +confidentiality: + # possible levels of data that are labeled by Slips + # the value defines how secret the data are where 0 (can be shared + # with anybody) and 1 (can not be shared at all) + # + # the checks are: if(entity.confidentiality_level >= data.confidentiality_level) allowData() + # see https://www.cisa.gov/tlp + levels: + # share all data + - name: WHITE # name of the level, used mainly for debugging purposes + value: 0 # value that is used during computation + - name: GREEN + value: 0.2 + - name: AMBER + value: 0.5 + - name: RED + value: 0.7 + # do not share anything ever + - name: PRIVATE + value: 1.1 # never meets condition peer.privacyLevel >= data.level as peer.privacyLevel <0, 1> + + # if some data are not labeled, what value should we use + defaultLevel: 0 + + # rules that apply when the model is filtering data for peers + thresholds: + - level: 0.2 # for this level (and all levels > this) require + requiredTrust: 0.2 # this trust + - level: 0.5 + requiredTrust: 0.5 + - level: 0.7 + requiredTrust: 0.8 + - level: 1 + requiredTrust: 1 + +# Trust model related settings +trust: + # service trust evaluation + service: + # initial reputation that is assigned for every peer when there's new encounter + initialReputation: 0.5 + + # maximal size of Service History, sh_max + historyMaxSize: 100 + + # settings for recommendations + recommendations: + # if the recommendation protocol should be executed + enabled: True + # when selecting recommenders, use only the ones that are currently connected + useOnlyConnected: False + # if true, protocol will only ask pre-trusted peers / organisations for recommendations + useOnlyPreconfigured: False + # require minimal number of trusted connected peers before running recommendations + # valid only if trust.recommendations.useOnlyPreconfigured == False + requiredTrustedPeersCount: 1 + # minimal trust for trusted peer + # valid only if trust.recommendations.useOnlyPreconfigured == False + trustedPeerThreshold: 0.8 + # maximal count of peers that are asked to give recommendations on a peer, η_max + peersMaxCount: 100 + # maximal size of Recommendation History, rh_max + historyMaxSize: 100 + + # alert protocol + alert: + # how much should we trust an alert that was sent by peer we don't know anything about + defaultTrust: 0.5 + + # trust these organisations with given trust by default + organisations: + - id: org1 # public key of the organisation + name: Organisation \#1 # name + trust: 0.1 # how much should the model trust peers from this org + enforceTrust: True # whether to allow (if false) changing trust during runtime (when we received more data from org) + confidentialityLevel: 0.7 # what level of data should be shared with peers from this org, see privacy.levels + + - id: org2 + name: Organisation \#2 + trust: 0.9 + enforceTrust: False + confidentialityLevel: 0.9 + + # trust these peers with given trust by default + # see doc for trust.organisations + peers: + - id: peer1 + name: Peer \#1 + trust: 0.1 + enforceTrust: True + confidentialityLevel: 0.7 + + - id: peer2 + name: Peer \#2 + trust: 0.9 + enforceTrust: False + confidentialityLevel: 0.9 + + # how many minutes is network opinion considered valid + networkOpinionCacheValidSeconds: 3600 + + # which strategy should be used to evaluate interaction when peer provided threat intelligence on a target + # see fides.evaluation.ti_evaluation.py for options + # options: ['even', 'distance', 'localDistance', 'threshold', 'maxConfidence', 'weighedDistance'] + interactionEvaluationStrategies: + used: 'threshold' + # these are configuration for the strategies, content will be passed as a **kwargs to the instance + # even strategy uses the same satisfaction value for every interaction + even: + # value used as a default satisfaction for all peers + satisfaction: 1 + # distance measures distance between aggregated network intelligence and each intelligence from the peers + distance: + # localDistance measures distance between each peer's intelligence to local threat intelligence by Slips + localDistance: + # weighedDistance combines distance and localDistance with given weight + weighedDistance: + # weight of the local TI to TI aggregated from the network + localWeight: 0.4 + # maxConfidence uses combination of distance, localDistance and even - utilizes their confidence to + # make decisions with the highest possible confidence + maxConfidence: + # threshold employs 'lower' value strategy when the confidence of the aggregated TI is lower than 'threshold', + # otherwise it uses 'higher' - 'even' and 'distance' strategies work best with this + threshold: + # minimal confidence level + threshold: 0.7 + # this strategy is used when the aggregated confidence is lower than the threshold + lower: 'even' + # and this one when it is higher + higher: 'distance' + + # Threat Intelligence aggregation strategy + # valid values - ['average', 'weightedAverage', 'stdevFromScore'] + tiAggregationStrategy: 'average' + diff --git a/modules/fidesModule/evaluation/README.md b/modules/fidesModule/evaluation/README.md new file mode 100644 index 000000000..ee22d1029 --- /dev/null +++ b/modules/fidesModule/evaluation/README.md @@ -0,0 +1 @@ +All algorithms in this package are based on SORT - see paper. \ No newline at end of file diff --git a/modules/fidesModule/evaluation/__init__.py b/modules/fidesModule/evaluation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/evaluation/discount_factor.py b/modules/fidesModule/evaluation/discount_factor.py new file mode 100644 index 000000000..94aeb4dcd --- /dev/null +++ b/modules/fidesModule/evaluation/discount_factor.py @@ -0,0 +1,9 @@ +def compute_discount_factor() -> float: + """ + Computes discount factor used for `competence + (discount) * integrity` to lower + the expectations of current peer for future interaction. + + :return: discount factor for integrity + """ + # arbitrary value -1/2 explained in the paper + return -0.5 diff --git a/modules/fidesModule/evaluation/recommendation/__init__.py b/modules/fidesModule/evaluation/recommendation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/evaluation/recommendation/new_history.py b/modules/fidesModule/evaluation/recommendation/new_history.py new file mode 100644 index 000000000..387e70e0e --- /dev/null +++ b/modules/fidesModule/evaluation/recommendation/new_history.py @@ -0,0 +1,78 @@ +from ...model.configuration import TrustModelConfiguration +from ...model.peer_trust_data import PeerTrustData +from ...model.recommendation import Recommendation +from ...model.recommendation_history import RecommendationHistoryRecord, RecommendationHistory +from ...utils.time import now + + +def create_recommendation_history_for_peer( + configuration: TrustModelConfiguration, + peer: PeerTrustData, + recommendation: Recommendation, + history_factor: float, + er_ij: float, + ecb_ij: float, + eib_ij: float +) -> RecommendationHistory: + """ + Creates new recommendation_history for given peer and its recommendations. + + :param configuration: configuration for current trust model + :param peer: peer "k" which provided recommendation r + :param recommendation: recommendation provided by peer k + :param history_factor: int(mean(size of history) / maximal history size) + :param er_ij: estimation about reputation + :param ecb_ij: estimation about competence belief + :param eib_ij: estimation about integrity belief + :return: + """ + rs_ik = __compute_recommendation_satisfaction_parameter(recommendation, er_ij, ecb_ij, eib_ij) + rw_ik = __compute_weight_of_recommendation(configuration, recommendation, history_factor) + + updated_history = peer.recommendation_history + [RecommendationHistoryRecord(satisfaction=rs_ik, + weight=rw_ik, + timestamp=now())] + # fix history len if we reached max size + if len(updated_history) > configuration.recommendations.history_max_size: + last_idx = len(updated_history) + updated_history = updated_history[last_idx - configuration.recommendations.history_max_size: last_idx] + + return updated_history + + +def __compute_recommendation_satisfaction_parameter( + recommendation: Recommendation, + er_ij: float, + ecb_ij: float, + eib_ij: float +) -> float: + """ + Computes satisfaction parameter - how much was peer satisfied with provided data. + + :param recommendation: recommendation from the peer + :param er_ij: estimation about reputation + :param ecb_ij: estimation about competence belief + :param eib_ij: estimation about integrity belief + :return: recommendation satisfaction rs_ik + """ + r_diff = (1 - abs(recommendation.recommendation - er_ij) / er_ij) if er_ij > 0 else 0 + cb_diff = (1 - abs(recommendation.competence_belief - ecb_ij) / ecb_ij) if ecb_ij > 0 else 0 + ib_diff = (1 - abs(recommendation.integrity_belief - eib_ij) / eib_ij) if eib_ij > 0 else 0 + return (r_diff + cb_diff + ib_diff) / 3 + + +def __compute_weight_of_recommendation( + configuration: TrustModelConfiguration, + recommendation: Recommendation, + history_factor: float +) -> float: + """ + Computes weight of recommendation - in model's notation rw^z_ik. + :param configuration: current trust model config + :param recommendation: recommendation from the peer + :param history_factor: int(mean(size of history) / maximal history size) + :return: recommendation weight rw^z_ik + """ + service_history = recommendation.service_history_size / configuration.service_history_max_size + used_peers = recommendation.initial_reputation_provided_by_count / configuration.recommendations.peers_max_count + return history_factor * service_history + (1 - history_factor) * used_peers diff --git a/modules/fidesModule/evaluation/recommendation/peer_update.py b/modules/fidesModule/evaluation/recommendation/peer_update.py new file mode 100644 index 000000000..9e6a7efac --- /dev/null +++ b/modules/fidesModule/evaluation/recommendation/peer_update.py @@ -0,0 +1,116 @@ +import dataclasses +from math import sqrt +from typing import List + +from ...evaluation.discount_factor import compute_discount_factor +from ...model.configuration import TrustModelConfiguration +from ...model.peer_trust_data import PeerTrustData +from ...model.recommendation_history import RecommendationHistory + + +# noinspection DuplicatedCode +# TODO: [+] try to abstract this +def update_recommendation_data_for_peer( + configuration: TrustModelConfiguration, + peer: PeerTrustData, + new_history: RecommendationHistory +) -> PeerTrustData: + """ + Computes and updates all recommendation data for given peer with new_history. + + Does not modify given trust values directly, returns new object - however, this + method does not create new collections as they're not being modified, they're simply copied. + + :param configuration: current trust model configuration + :param peer: peer to be updated, its recommendation_history is older than new_history + :param new_history: history to be used as base for recommendation computation + :return: new object peer trust data with updated recommendation_trust and recommendation_history + """ + fading_factor = __compute_fading_factor(configuration, new_history) + competence_belief = __compute_competence_belief(new_history, fading_factor) + integrity_belief = __compute_integrity_belief(new_history, fading_factor, competence_belief) + integrity_discount = compute_discount_factor() + + history_factor = len(new_history) / configuration.recommendations.history_max_size + + # (rh_ik / rh_max) * (rcb_ik -0.5 * rib_ik) -> where -0.5 is discount factor + reputation_trust_own_experience = history_factor * (competence_belief + integrity_discount * integrity_belief) + # (1 - (rh_ik / rh_max)) * r_ik + reputation_experience = (1 - history_factor) * peer.reputation + # and now add both parts together + recommendation_trust = reputation_trust_own_experience + reputation_experience + + updated_trust = dataclasses.replace(peer, + recommendation_trust=recommendation_trust, + recommendation_history=new_history + ) + + return updated_trust + + +def __compute_fading_factor(configuration: TrustModelConfiguration, + recommendation_history: RecommendationHistory) -> List[float]: + """ + Computes fading factor for each record in recommendation history. + + In model's notation rf^z_ik where "z" is index in recommendation history. + + :param configuration: trust models configuration + :param recommendation_history: history for which should be fading factor generated + :return: ordered list of fading factors, index of fading factor matches record in RecommendationHistory + """ + # TODO: [?] this might be time based in the future + # f^k_ij = k / sh_ij + # where 1 <= k <= sh_ij + # Linear forgetting + # history_size = len(recommendation_history) + # return [i / history_size for i, _ in enumerate(recommendation_history, start=1)] + + # Do not forget anything + return [1] * len(recommendation_history) + + +def __compute_competence_belief(recommendation_history: RecommendationHistory, fading_factor: List[float]) -> float: + """ + Computes competence belief - rcb_ik. + + :param recommendation_history: history for peer k + :param fading_factor: fading factors for given history + :return: reputation competence belief for given data + """ + assert len(recommendation_history) == len(fading_factor), \ + "Recommendation history must have same length as fading factors." + + normalisation = sum( + [recommendation.weight * fading for recommendation, fading in zip(recommendation_history, fading_factor)]) + + belief = sum([service.satisfaction * service.weight * fading + for service, fading + in zip(recommendation_history, fading_factor)]) + + return belief / normalisation if normalisation > 0 else 0 + + +def __compute_integrity_belief(recommendation_history: RecommendationHistory, + fading_factor: List[float], + recommendation_competence_belief: float) -> float: + """ + Computes integrity belief - rib_ik. + + :param recommendation_competence_belief: rcb_ik competence belief for given service history and fading factor + :param recommendation_history: history for peer k + :param fading_factor: fading factors for given history + :return: integrity belief for given data + """ + assert len(recommendation_history) == len(fading_factor), \ + "Recommendation history must have same length as fading factors." + + history_size = len(recommendation_history) + weight_mean = sum(service.weight for service in recommendation_history) / history_size + fading_mean = sum(fading_factor) / history_size + + sat = sum((recommendation.satisfaction * weight_mean * fading_mean - recommendation_competence_belief) ** 2 + for recommendation + in recommendation_history) + + return sqrt(sat / history_size) diff --git a/modules/fidesModule/evaluation/recommendation/process.py b/modules/fidesModule/evaluation/recommendation/process.py new file mode 100644 index 000000000..d0368e2e8 --- /dev/null +++ b/modules/fidesModule/evaluation/recommendation/process.py @@ -0,0 +1,140 @@ +import dataclasses +from typing import Dict + +from ...evaluation.discount_factor import compute_discount_factor +from ...evaluation.recommendation.new_history import create_recommendation_history_for_peer +from ...evaluation.recommendation.peer_update import update_recommendation_data_for_peer +from ...model.aliases import PeerId +from ...model.configuration import TrustModelConfiguration +from ...model.peer_trust_data import TrustMatrix, PeerTrustData +from ...model.recommendation import Recommendation + + +def process_new_recommendations( + configuration: TrustModelConfiguration, + subject: PeerTrustData, + matrix: TrustMatrix, + recommendations: Dict[PeerId, Recommendation] +) -> TrustMatrix: + """ + Evaluates received recommendation, computing recommendations and recommendation + trust for each peer in :param recommendations. + + This function should be called when new recommendations are available. + + Returns dictionary with peers which were updated. + + :param configuration: configuration of the current trust model + :param subject: subject of recommendations, this peer was asking other peers for recommendation about + this subject, in model's notation this is "j" + :param matrix: trust matrix with peers that provided recommendations, in model's notation this is "k"s, + part of the T_i set + :param recommendations: responses received from the network when + asking for recommendations, peer ids here are in model's notation "k"s + :return: new matrix that contains only peers that were updated - it should contain + """ + # verify that peers with responses are in trust matrix + for peer in recommendations.keys(): + assert matrix[peer] is not None, f"Peer {peer} is not present in peer matrix." + + er_ij = __estimate_recommendation(matrix, recommendations) + ecb_ij, eib_ij = __estimate_competence_integrity_belief(matrix, recommendations) + + history_sizes = [r.service_history_size for r in recommendations.values()] + history_mean = int(sum(history_sizes) / len(history_sizes)) + + integrity_discount = compute_discount_factor() + history_factor = history_mean / configuration.service_history_max_size + # ecb_ij -0.5 * eib_ij (where -0.5 is integrity discount) + own_experience = history_factor * (ecb_ij + integrity_discount * eib_ij) + reputation_experience = (1 - history_factor) * er_ij + + # r_ij + reputation = own_experience + reputation_experience + # now update final trust for the subject with new reputation + # we also trust the subject same with service as well as with recommendations + # we also set service_trust if it is not set, because for the first interaction it is equal to reputation + updated_subject_trust = dataclasses \ + .replace(subject, + service_trust=max(subject.service_trust, reputation), + reputation=reputation, + recommendation_trust=reputation, + initial_reputation_provided_by_count=len(recommendations) + ) + peers_updated_matrix = {updated_subject_trust.peer_id: updated_subject_trust} + + # now we need to reflect performed reputation query and update how much we trust other peers + for peer_id, recommendation in recommendations.items(): + peer = matrix[peer_id] + # build new history + new_history = create_recommendation_history_for_peer( + configuration=configuration, peer=peer, recommendation=recommendation, + history_factor=history_factor, er_ij=er_ij, ecb_ij=ecb_ij, eib_ij=eib_ij + ) + # and update peer and its recommendation data + updated_peer = update_recommendation_data_for_peer(configuration=configuration, + peer=peer, + new_history=new_history) + peers_updated_matrix[updated_peer.peer_id] = updated_peer + + return peers_updated_matrix + + +def __estimate_recommendation( + matrix: TrustMatrix, + recommendations: Dict[PeerId, Recommendation] +) -> float: + """ + Computes estimation about recommendation. + + In model's notation er_ij. + + :param matrix: trust matrix with peers that provided recommendations + :param recommendations: responses from the peers + :return: estimation about recommendation er_ij + """ + normalisation = sum([ + matrix[peer].recommendation_trust * response.initial_reputation_provided_by_count + for peer, response + in recommendations.items()] + ) + + recommendations = sum( + [matrix[peer].recommendation_trust * response.initial_reputation_provided_by_count * response.recommendation + for peer, response + in recommendations.items()]) + + return recommendations / normalisation if normalisation > 0 else 0 + + +def __estimate_competence_integrity_belief( + matrix: TrustMatrix, + recommendations: Dict[PeerId, Recommendation] +) -> [float, float]: + """ + Estimates about competence and integrity beliefs. + + In model's notation ecb_ij and eib_ij. + + :param matrix: trust matrix with peers that provided recommendations + :param recommendations: responses from the peers + :return: tuple with [competence, integrity] beliefs -> [ecb_ij, eib_ij] + """ + normalisation = 0 + competence = 0 + integrity = 0 + + # as we would need to iterate three times, it's just better to make for cycle + for peer, response in recommendations.items(): + trust_history_size = matrix[peer].recommendation_trust * response.service_history_size + # rt_ik * sh_kj + normalisation += trust_history_size + # rt_ik * sh_kj * cb_kj + competence += trust_history_size * response.competence_belief + # rt_ik * sh_kj * ib_kj + integrity += trust_history_size * response.integrity_belief + + competence_belief = competence / normalisation if normalisation > 0 else 0 + integrity_belief = integrity / normalisation if normalisation > 0 else 0 + + return [competence_belief, integrity_belief] diff --git a/modules/fidesModule/evaluation/recommendation/selection.py b/modules/fidesModule/evaluation/recommendation/selection.py new file mode 100644 index 000000000..b38c789d2 --- /dev/null +++ b/modules/fidesModule/evaluation/recommendation/selection.py @@ -0,0 +1,25 @@ +from math import sqrt +from typing import Dict, List + +from ...model.aliases import PeerId + + +def select_trustworthy_peers_for_recommendations( + data: Dict[PeerId, float], + max_peers: int +) -> List[PeerId]: + """ + Selects peers that can be asked for recommendation. + :param data: PeerId: Peer.recommendation_trust + :param max_peers: maximum of peers to select + :return: list of peers that should be asked for recommendation + """ + mean = sum(data.values()) / len(data.values()) + var = sqrt(sum((rt - mean) ** 2 for rt in data.values())) + lowest_rt = mean - var + # select only peers that have recommendation_trust higher than mean - variance + candidates = sorted([ + {'id': peer_id, 'rt': rt} for peer_id, rt in data.items() if rt >= lowest_rt + ], key=lambda x: x['rt'], reverse=True) + # and now cut them at max + return [p['id'] for p in candidates[: max_peers]] diff --git a/modules/fidesModule/evaluation/service/__init__.py b/modules/fidesModule/evaluation/service/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/evaluation/service/interaction.py b/modules/fidesModule/evaluation/service/interaction.py new file mode 100644 index 000000000..cec4b5ec2 --- /dev/null +++ b/modules/fidesModule/evaluation/service/interaction.py @@ -0,0 +1,27 @@ +from enum import Enum + +Satisfaction = float +"""Represents value how much was client satisfied with the interaction +0 <= satisfaction <= 1 where 0 is NOT satisfied and 1 is satisfied. +""" + + +class SatisfactionLevels: + Ok: float = 1 + Unsure: float = 0.5 + + +class Weight(Enum): + """How much was the interaction important. + 0 <= weight <= 1 + where 0 is unimportant and 1 is important + """ + FIRST_ENCOUNTER = 0.1 + PING = 0.2 + INTELLIGENCE_NO_DATA_REPORT = 0.3 + INTELLIGENCE_REQUEST = 0.5 + ALERT = 0.7 + RECOMMENDATION_REQUEST = 0.7 + INTELLIGENCE_DATA_REPORT = 1 + RECOMMENDATION_RESPONSE = 1 + ERROR = 1 diff --git a/modules/fidesModule/evaluation/service/peer_update.py b/modules/fidesModule/evaluation/service/peer_update.py new file mode 100644 index 000000000..732584a93 --- /dev/null +++ b/modules/fidesModule/evaluation/service/peer_update.py @@ -0,0 +1,122 @@ +import dataclasses +from math import sqrt +from typing import List + +from ...evaluation.discount_factor import compute_discount_factor +from ...model.configuration import TrustModelConfiguration +from ...model.peer_trust_data import PeerTrustData +from ...model.service_history import ServiceHistory +from ...utils import bound + + +# noinspection DuplicatedCode +# TODO: [+] try to abstract this + +def update_service_data_for_peer( + configuration: TrustModelConfiguration, + peer: PeerTrustData, + new_history: ServiceHistory +) -> PeerTrustData: + """ + Computes and updates PeerTrustData.service_trust - st_ij - for peer j - based on the given data. + + Does not modify given trust values directly, returns new object - however, this + method does not create new collections as they're not being modified, they're simply copied. + + :param configuration: configuration of the current trust model + :param peer: trust data for peer j with old history, to be updated + :param new_history: history with updated records + :return: new peer trust data object with fresh service_trust, competence_belief, integrity_belief + and service_history + """ + + fading_factor = __compute_fading_factor(configuration, new_history) + competence_belief = __compute_competence_belief(new_history, fading_factor) + integrity_belief = __compute_integrity_belief(new_history, fading_factor, competence_belief) + integrity_discount = compute_discount_factor() + + history_factor = len(new_history) / configuration.service_history_max_size + + # (sh_ij / sh_max) * (cb_ij -0.5 * ib_ij) -> where -0.5 is discount factor + service_trust_own_experience = history_factor * (competence_belief + integrity_discount * integrity_belief) + # (1 - (sh_ij / sh_max)) * r_ij + service_trust_reputation = (1 - history_factor) * peer.reputation + # and now add both parts together + service_trust = service_trust_own_experience + service_trust_reputation + # TODO: [?] verify why do we need that + # (case when the data do not follow normal distribution and ib is higher then mean) + service_trust = bound(service_trust, 0, 1) + + updated_trust = dataclasses.replace(peer, + service_trust=service_trust, + competence_belief=competence_belief, + integrity_belief=integrity_belief, + service_history=new_history + ) + + return updated_trust + + +def __compute_fading_factor(configuration: TrustModelConfiguration, service_history: ServiceHistory) -> List[float]: + """ + Computes fading factor for each record in service history. + + In model's notation f^k_ij where "k" is index in service history. + + :param configuration: trust models configuration + :param service_history: history for which should be fading factor generated + :return: ordered list of fading factors, index of fading factor matches record in ServiceHistory + """ + # TODO: [?] this might be time based in the future + # f^k_ij = k / sh_ij + # where 1 <= k <= sh_ij + + # Linear forgetting + # history_size = len(service_history) + # return [i / history_size for i, _ in enumerate(service_history, start=1)] + + # Do not forget anything + return [1] * len(service_history) + + +def __compute_competence_belief(service_history: ServiceHistory, fading_factor: List[float]) -> float: + """ + Computes competence belief - cb_ij. + + :param service_history: history for peer j + :param fading_factor: fading factors for given history + :return: competence belief for given data + """ + assert len(service_history) == len(fading_factor), "Service history must have same length as fading factors." + + normalisation = sum([service.weight * fading for service, fading in zip(service_history, fading_factor)]) + belief = sum([service.satisfaction * service.weight * fading + for service, fading + in zip(service_history, fading_factor)]) + + return belief / normalisation + + +def __compute_integrity_belief(service_history: ServiceHistory, + fading_factor: List[float], + competence_belief: float) -> float: + """ + Computes integrity belief - ib_ij. + + :param competence_belief: competence belief for given service history and fading factor + :param service_history: history for peer j + :param fading_factor: fading factors for given history + :return: integrity belief for given data + """ + assert len(service_history) == len(fading_factor), "Service history must have same length as fading factors." + + history_size = len(service_history) + weight_mean = sum([service.weight for service in service_history]) / history_size + fading_mean = sum(fading_factor) / history_size + + sat = sum([(service.satisfaction * weight_mean * fading_mean - competence_belief) ** 2 + for service + in service_history]) + + ib = sqrt(sat / history_size) + return ib diff --git a/modules/fidesModule/evaluation/service/process.py b/modules/fidesModule/evaluation/service/process.py new file mode 100644 index 000000000..159c382cf --- /dev/null +++ b/modules/fidesModule/evaluation/service/process.py @@ -0,0 +1,40 @@ +import dataclasses + +from ...evaluation.service.interaction import Satisfaction, Weight +from ...evaluation.service.peer_update import update_service_data_for_peer +from ...model.configuration import TrustModelConfiguration +from ...model.peer_trust_data import PeerTrustData +from ...model.service_history import ServiceHistoryRecord +from ...utils.logger import Logger +from ...utils.time import now + +logger = Logger(__name__) + + +def process_service_interaction( + configuration: TrustModelConfiguration, + peer: PeerTrustData, + satisfaction: Satisfaction, + weight: Weight +) -> PeerTrustData: + """Processes given interaction and updates trust data.""" + new_history = peer.service_history + [ServiceHistoryRecord( + satisfaction=satisfaction, + weight=weight.value, + timestamp=now() + )] + # now restrict new history to max length + if len(new_history) > configuration.service_history_max_size: + last = len(new_history) + new_history = new_history[last - configuration.service_history_max_size: last] + + # we don't update service trust for fixed trust peers + if peer.has_fixed_trust: + logger.debug(f"Peer {peer.peer_id} has fixed trust.") + return dataclasses.replace(peer, service_history=new_history) + else: + return update_service_data_for_peer( + configuration=configuration, + peer=peer, + new_history=new_history + ) diff --git a/modules/fidesModule/evaluation/ti_aggregation.py b/modules/fidesModule/evaluation/ti_aggregation.py new file mode 100644 index 000000000..14aae9be7 --- /dev/null +++ b/modules/fidesModule/evaluation/ti_aggregation.py @@ -0,0 +1,86 @@ +from dataclasses import dataclass +from typing import List + +import numpy as np + +from ..model.peer_trust_data import PeerTrustData +from ..model.threat_intelligence import ThreatIntelligence +from ..utils import bound + + +@dataclass +class PeerReport: + report_ti: ThreatIntelligence + """Threat intelligence report.""" + + reporter_trust: PeerTrustData + """How much does Slips trust the reporter.""" + + +class TIAggregation: + + def assemble_peer_opinion(self, data: List[PeerReport]) -> ThreatIntelligence: + """ + Assemble reports given by all peers and compute the overall network opinion. + + :param data: a list of peers and their reports, in the format given by TrustDB.get_opinion_on_ip() + :return: final score and final confidence + """ + raise NotImplemented('') + + +class AverageConfidenceTIAggregation(TIAggregation): + + def assemble_peer_opinion(self, data: List[PeerReport]) -> ThreatIntelligence: + """ + Uses average when computing final confidence. + """ + reports_ti = [d.report_ti for d in data] + reporters_trust = [d.reporter_trust.service_trust for d in data] + + normalize_net_trust_sum = sum(reporters_trust) + weighted_reporters = [trust / normalize_net_trust_sum for trust in reporters_trust] \ + if normalize_net_trust_sum > 0 else [0] * len(reporters_trust) + + combined_score = sum(r.score * w for r, w, in zip(reports_ti, weighted_reporters)) + combined_confidence = sum(r.confidence * w for r, w, in zip(reports_ti, reporters_trust)) / len(reporters_trust) + + return ThreatIntelligence(score=combined_score, confidence=combined_confidence) + + +class WeightedAverageConfidenceTIAggregation(TIAggregation): + + def assemble_peer_opinion(self, data: List[PeerReport]) -> ThreatIntelligence: + reports_ti = [d.report_ti for d in data] + reporters_trust = [d.reporter_trust.service_trust for d in data] + + normalize_net_trust_sum = sum(reporters_trust) + weighted_reporters = [trust / normalize_net_trust_sum for trust in reporters_trust] + + combined_score = sum(r.score * w for r, w, in zip(reports_ti, weighted_reporters)) + combined_confidence = sum(r.confidence * w for r, w, in zip(reports_ti, weighted_reporters)) + + return ThreatIntelligence(score=combined_score, confidence=combined_confidence) + + +class StdevFromScoreTIAggregation(TIAggregation): + + def assemble_peer_opinion(self, data: List[PeerReport]) -> ThreatIntelligence: + reports_ti = [d.report_ti for d in data] + reporters_trust = [d.reporter_trust.service_trust for d in data] + + normalize_net_trust_sum = sum(reporters_trust) + weighted_reporters = [trust / normalize_net_trust_sum for trust in reporters_trust] + + merged_score = [r.score * r.confidence * w for r, w, in zip(reports_ti, weighted_reporters)] + combined_score = sum(merged_score) + combined_confidence = bound(1 - np.std(merged_score), 0, 1) + + return ThreatIntelligence(score=combined_score, confidence=combined_confidence) + + +TIAggregationStrategy = { + 'average': AverageConfidenceTIAggregation, + 'weightedAverage': WeightedAverageConfidenceTIAggregation, + 'stdevFromScore': StdevFromScoreTIAggregation, +} diff --git a/modules/fidesModule/evaluation/ti_evaluation.py b/modules/fidesModule/evaluation/ti_evaluation.py new file mode 100644 index 000000000..a2bf0f00d --- /dev/null +++ b/modules/fidesModule/evaluation/ti_evaluation.py @@ -0,0 +1,255 @@ +from collections import defaultdict +from typing import Dict, Tuple, Optional + +from ..evaluation.service.interaction import Satisfaction, Weight, SatisfactionLevels +from ..messaging.model import PeerIntelligenceResponse +from ..model.aliases import PeerId, Target +from ..model.peer_trust_data import PeerTrustData, TrustMatrix +from ..model.threat_intelligence import SlipsThreatIntelligence +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class TIEvaluation: + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + """Evaluate interaction with all peers that gave intelligence responses.""" + raise NotImplemented('Use implementation rather then interface!') + + @staticmethod + def _weight() -> Weight: + return Weight.INTELLIGENCE_DATA_REPORT + + @staticmethod + def _assert_keys(responses: Dict[PeerId, PeerIntelligenceResponse], trust_matrix: TrustMatrix): + assert trust_matrix.keys() == responses.keys() + + +class EvenTIEvaluation(TIEvaluation): + """Basic implementation for the TI evaluation, all responses are evaluated the same. + This implementation corresponds with Salinity botnet. + """ + + def __init__(self, **kwargs): + self.__kwargs = kwargs + self.__satisfaction = kwargs.get('satisfaction', SatisfactionLevels.Ok) + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + + return {p.peer_id: (p, self.__satisfaction, self._weight()) for p in + trust_matrix.values()} + + +class DistanceBasedTIEvaluation(TIEvaluation): + """Implementation that takes distance from the aggregated result and uses it as a penalisation.""" + + def __init__(self, **kwargs): + self.__kwargs = kwargs + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + return self._build_evaluation( + baseline_score=aggregated_ti.score, + baseline_confidence=aggregated_ti.confidence, + responses=responses, + trust_matrix=trust_matrix + ) + + def _build_evaluation( + self, + baseline_score: float, + baseline_confidence: float, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + satisfactions = { + peer_id: self._satisfaction( + baseline_score=baseline_score, + baseline_confidence=baseline_confidence, + report_score=ti.intelligence.score, + report_confidence=ti.intelligence.confidence + ) + for peer_id, ti in responses.items() + } + + return {p.peer_id: (p, satisfactions[p.peer_id], self._weight()) for p in + trust_matrix.values()} + + @staticmethod + def _satisfaction(baseline_score: float, + baseline_confidence: float, + report_score: float, + report_confidence: float) -> Satisfaction: + return (1 - (abs(baseline_score - report_score) / 2) * report_confidence) * baseline_confidence + + +class LocalCompareTIEvaluation(DistanceBasedTIEvaluation): + """This strategy compares received threat intelligence with the threat intelligence from local database. + + Uses the same penalisation system as DistanceBasedTIEvaluation with the difference that as a baseline, + it does not use aggregated value, but rather local intelligence. + + If it does not find threat intelligence for the target, it falls backs to DistanceBasedTIEvaluation. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.__default_ti_getter = kwargs.get('default_ti_getter', None) + + def get_local_ti(self, + target: Target, + local_ti: Optional[SlipsThreatIntelligence] = None) -> Optional[SlipsThreatIntelligence]: + if local_ti: + return local_ti + elif self.__default_ti_getter: + return self.__default_ti_getter(target) + else: + return None + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + local_ti: Optional[SlipsThreatIntelligence] = None, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + + ti = self.get_local_ti(aggregated_ti.target, local_ti) + if not ti: + ti = aggregated_ti + logger.warn(f'No local threat intelligence available for target {ti.target}! ' + + 'Falling back to DistanceBasedTIEvaluation.') + + return self._build_evaluation( + baseline_score=ti.score, + baseline_confidence=ti.confidence, + responses=responses, + trust_matrix=trust_matrix + ) + + +class WeighedDistanceToLocalTIEvaluation(TIEvaluation): + """Strategy combines DistanceBasedTIEvaluation and LocalCompareTIEvaluation with the local weight parameter.""" + + def __init__(self, **kwargs): + super().__init__() + self.__distance = kwargs.get('distance', DistanceBasedTIEvaluation()) + self.__local = kwargs.get('localDistance', LocalCompareTIEvaluation()) + self.__local_weight = kwargs.get('localWeight', 0.5) + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + + distance_data = self.__distance.evaluate(aggregated_ti, responses, trust_matrix, **kwargs) + local_data = self.__local.evaluate(aggregated_ti, responses, trust_matrix, **kwargs) + + return {p.peer_id: (p, + self.__local_weight * local_data[p.peer_id][1] + + (1 - self.__local_weight) * distance_data[p.peer_id][1], + self._weight() + ) for p in trust_matrix.values()} + + +class MaxConfidenceTIEvaluation(TIEvaluation): + """Strategy combines DistanceBasedTIEvaluation, LocalCompareTIEvaluation and EvenTIEvaluation + in order to achieve maximal confidence when producing decision. + """ + + def __init__(self, **kwargs): + super().__init__() + self.__distance = kwargs.get('distance', DistanceBasedTIEvaluation()) + self.__local = kwargs.get('localDistance', LocalCompareTIEvaluation()) + self.__even = kwargs.get('even', EvenTIEvaluation()) + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + zero_dict = defaultdict(lambda: (None, 0, None)) + + # weight of the distance based evaluation + distance_weight = aggregated_ti.confidence + distance_data = self.__distance.evaluate(aggregated_ti, responses, trust_matrix, **kwargs) \ + if distance_weight > 0 \ + else zero_dict + + # now we need to check if we even have some threat intelligence data + local_ti = self.__local.get_local_ti(aggregated_ti.target, **kwargs) + # weight of the local evaluation + local_weight = min(1 - distance_weight, local_ti.confidence) if local_ti else 0 + local_data = self.__local.evaluate(aggregated_ti, responses, trust_matrix, **kwargs) \ + if local_weight > 0 \ + else zero_dict + + # weight of the same eval + even_weight = 1 - distance_weight - local_weight + even_data = self.__even.evaluate(aggregated_ti, responses, trust_matrix, **kwargs) \ + if even_weight > 0 \ + else zero_dict + + def aggregate(peer: PeerId): + return distance_weight * distance_data[peer][1] + \ + local_weight * local_data[peer][1] + \ + even_weight * even_data[peer][1] + + return {p.peer_id: (p, aggregate(p.peer_id), self._weight()) for p in + trust_matrix.values()} + + +class ThresholdTIEvaluation(TIEvaluation): + """Employs DistanceBasedTIEvaluation when the confidence of the decision + is higher than given threshold. Otherwise, it uses even evaluation. + """ + + def __init__(self, **kwargs): + self.__kwargs = kwargs + self.__threshold = kwargs.get('threshold', 0.5) + self.__lower = kwargs.get('lower', EvenTIEvaluation()) + self.__higher = kwargs.get('higher', DistanceBasedTIEvaluation()) + + def evaluate(self, + aggregated_ti: SlipsThreatIntelligence, + responses: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix, + **kwargs, + ) -> Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]: + super()._assert_keys(responses, trust_matrix) + + return self.__higher.evaluate(aggregated_ti, responses, trust_matrix) \ + if self.__threshold <= aggregated_ti.confidence \ + else self.__lower.evaluate(aggregated_ti, responses, trust_matrix) + + +EvaluationStrategy = { + 'even': EvenTIEvaluation, + 'distance': DistanceBasedTIEvaluation, + 'localDistance': LocalCompareTIEvaluation, + 'threshold': ThresholdTIEvaluation, + 'maxConfidence': MaxConfidenceTIEvaluation, + 'weighedDistance': WeighedDistanceToLocalTIEvaluation +} diff --git a/modules/fidesModule/fidesModule.py b/modules/fidesModule/fidesModule.py new file mode 100644 index 000000000..a4817f6f9 --- /dev/null +++ b/modules/fidesModule/fidesModule.py @@ -0,0 +1,235 @@ +import os +import json +from dataclasses import asdict +from pathlib import Path + +from slips_files.common.slips_utils import utils +from slips_files.common.abstracts.module import IModule +from slips_files.common.parsers.config_parser import ( + ConfigParser, +) +from slips_files.core.structures.alerts import ( + dict_to_alert, + Alert, +) +from .messaging.model import NetworkMessage +from ..fidesModule.messaging.message_handler import MessageHandler +from ..fidesModule.messaging.network_bridge import NetworkBridge +from ..fidesModule.model.configuration import load_configuration +from ..fidesModule.model.threat_intelligence import SlipsThreatIntelligence, ThreatIntelligence +from ..fidesModule.protocols.alert import AlertProtocol +from ..fidesModule.protocols.initial_trusl import InitialTrustProtocol +from ..fidesModule.protocols.opinion import OpinionAggregator +from ..fidesModule.protocols.peer_list import PeerListUpdateProtocol +from ..fidesModule.protocols.recommendation import RecommendationProtocol +from ..fidesModule.protocols.threat_intelligence import ( + ThreatIntelligenceProtocol, +) +from ..fidesModule.utils.logger import LoggerPrintCallbacks +from ..fidesModule.messaging.redis_simplex_queue import RedisSimplexQueue, RedisDuplexQueue +from ..fidesModule.persistence.threat_intelligence_db import ( + SlipsThreatIntelligenceDatabase, +) +from ..fidesModule.persistence.trust_db import SlipsTrustDatabase +from ..fidesModule.persistence.sqlite_db import SQLiteDB + +from ..fidesModule.model.alert import Alert as FidesAlert + + +class FidesModule(IModule): + """ + This module ony runs when slips is running on an interface + """ + + name = "Fides" + description = "Trust computation module for P2P interactions." + authors = ["David Otta", "Lukáš Forst"] + + def init(self): + self.__output = self.logger + + # IModule has its own logger, no set-up + LoggerPrintCallbacks.clear() + LoggerPrintCallbacks.append(self.print) + + # load trust model configuration + current_dir = Path(__file__).resolve().parent + config_path = current_dir / "config" / "fides.conf.yml" + self.__trust_model_config = load_configuration(config_path.__str__()) + + # prepare variables for global protocols + self.__bridge: NetworkBridge + self.__intelligence: ThreatIntelligenceProtocol + self.__alerts: AlertProtocol + self.f2n = self.db.subscribe("fides2network") + self.n2f = self.db.subscribe("network2fides") + self.s2f = self.db.subscribe("slips2fides") + self.ch_alert = self.db.subscribe("new_alert") + self.f2s = self.db.subscribe("fides2slips") + self.ch_ip = self.db.subscribe("new_ip") + self.channels = { + "network2fides": self.n2f, + "fides2network": self.f2n, + "slips2fides": self.s2f, + "fides2slips": self.f2s, + "new_alert": self.ch_alert, + "new_ip": self.ch_ip, + } + + # this sqlite is shared between all runs, like a cache, + # so it shouldnt be stored in the current output dir, it should be + # in the main slips dir + self.sqlite = SQLiteDB( + self.logger, + os.path.join(os.getcwd(), self.__trust_model_config.database), + ) + + def read_configuration(self): + """reurns true if all necessary configs are present and read""" + conf = ConfigParser() + self.__slips_config = conf.export_to() + + def __setup_trust_model(self): + # create database wrappers for Slips using Redis + # trust_db = InMemoryTrustDatabase(self.__trust_model_config) + # ti_db = InMemoryThreatIntelligenceDatabase() + trust_db = SlipsTrustDatabase( + self.__trust_model_config, self.db, self.sqlite + ) + ti_db = SlipsThreatIntelligenceDatabase( + self.__trust_model_config, self.db, self.sqlite + ) + + # create queues + # TODONE: [S] check if we need to use duplex or simplex queue for + # communication with network module + self.network_fides_queue = RedisSimplexQueue( + self.db, + send_channel="fides2network", + received_channel="network2fides", + channels=self.channels, + ) + + # #iris uses only one channel for communication + # self.network_fides_queue = RedisDuplexQueue( + # self.db, + # channel="fides2network", + # channels=self.channels, + # ) + + bridge = NetworkBridge(self.network_fides_queue) + + recommendations = RecommendationProtocol( + self.__trust_model_config, trust_db, bridge + ) + trust = InitialTrustProtocol( + trust_db, self.__trust_model_config, recommendations + ) + peer_list = PeerListUpdateProtocol( + trust_db, bridge, recommendations, trust + ) + opinion = OpinionAggregator( + self.__trust_model_config, + ti_db, + self.__trust_model_config.ti_aggregation_strategy, + ) + + intelligence = ThreatIntelligenceProtocol( + trust_db, + ti_db, + bridge, + self.__trust_model_config, + opinion, + trust, + self.__trust_model_config.interaction_evaluation_strategy, + self.__network_opinion_callback, + ) + alert = AlertProtocol( + trust_db, + bridge, + trust, + self.__trust_model_config, + opinion, + self.__network_opinion_callback, + ) + + # [S+] add on_unknown and on_error handlers if necessary + message_handler = MessageHandler( + on_peer_list_update=peer_list.handle_peer_list_updated, + on_recommendation_request=recommendations.handle_recommendation_request, + on_recommendation_response=recommendations.handle_recommendation_response, + on_alert=alert.handle_alert, + on_intelligence_request=intelligence.handle_intelligence_request, + on_intelligence_response=intelligence.handle_intelligence_response, + on_unknown=None, + on_error=None, + ) + + # bind local vars + self.__bridge = bridge + self.__intelligence = intelligence + self.__alerts = alert + + # and finally execute listener + self.__bridge.listen(message_handler, block=False) + + def __network_opinion_callback(self, ti: SlipsThreatIntelligence): + """This is executed every time when trust model was able to create an + aggregated network opinion.""" + self.db.publish("fides2slips", json.dumps(ti.to_dict())) + + def shutdown_gracefully(self): + self.sqlite.close() + self.network_fides_queue.stop_all_queue_threads() + + def pre_main(self): + """ + Initializations that run only once before the main() function + runs in a loop + """ + self.__setup_trust_model() + utils.drop_root_privs() + + def main(self): + if msg := self.get_msg("new_alert"): + # if there's no string data message we can continue waiting + if not msg["data"]: + return + alert: dict = json.loads(msg["data"]) + alert: Alert = dict_to_alert(alert) + self.__alerts.dispatch_alert( + target=alert.profile.ip, + confidence=0.5, + score=0.8, + ) + # envelope = NetworkMessage( + # type="tl2nl_alert", + # version=self.__bridge.version, + # data={ + # "payload": FidesAlert( + # target=alert.profile.ip, + # score=0.8, + # confidence=0.5, + # ) + # }, + # ) + # self.db.publish("fides2network", json.dumps(asdict(envelope))) + + if msg := self.get_msg("new_ip"): + # if there's no string data message we can continue waiting + if not msg["data"]: + return + + ip = msg["data"] + + if utils.detect_ioc_type(ip) != "ip": + return + + if utils.is_ignored_ip(ip): + return + self.__intelligence.request_data(ip) + + # TODO: the code below exists for testing purposes for + # tests/integration_tests/test_fides.py + if msg := self.get_msg("fides2network"): + pass diff --git a/modules/fidesModule/messaging/__init__.py b/modules/fidesModule/messaging/__init__.py new file mode 100644 index 000000000..8753dd9db --- /dev/null +++ b/modules/fidesModule/messaging/__init__.py @@ -0,0 +1 @@ +# classes related to interprocess / Redis communication diff --git a/modules/fidesModule/messaging/dacite/__init__.py b/modules/fidesModule/messaging/dacite/__init__.py new file mode 100644 index 000000000..21efa9ea0 --- /dev/null +++ b/modules/fidesModule/messaging/dacite/__init__.py @@ -0,0 +1,29 @@ +from ..dacite.cache import set_cache_size, get_cache_size, clear_cache +from ..dacite.config import Config +from ..dacite.core import from_dict +from ..dacite.exceptions import ( + DaciteError, + DaciteFieldError, + WrongTypeError, + MissingValueError, + UnionMatchError, + StrictUnionMatchError, + ForwardReferenceError, + UnexpectedDataError, +) + +__all__ = [ + "set_cache_size", + "get_cache_size", + "clear_cache", + "Config", + "from_dict", + "DaciteError", + "DaciteFieldError", + "WrongTypeError", + "MissingValueError", + "UnionMatchError", + "StrictUnionMatchError", + "ForwardReferenceError", + "UnexpectedDataError", +] diff --git a/modules/fidesModule/messaging/dacite/cache.py b/modules/fidesModule/messaging/dacite/cache.py new file mode 100644 index 000000000..998fff7f9 --- /dev/null +++ b/modules/fidesModule/messaging/dacite/cache.py @@ -0,0 +1,25 @@ +from functools import lru_cache +from typing import TypeVar, Callable, Optional + +T = TypeVar("T", bound=Callable) + +__MAX_SIZE: Optional[int] = 2048 + + +@lru_cache(maxsize=None) +def cache(function: T) -> T: + return lru_cache(maxsize=get_cache_size(), typed=True)(function) # type: ignore + + +def set_cache_size(size: Optional[int]) -> None: + global __MAX_SIZE # pylint: disable=global-statement + __MAX_SIZE = size + + +def get_cache_size() -> Optional[int]: + global __MAX_SIZE # pylint: disable=global-variable-not-assigned + return __MAX_SIZE + + +def clear_cache() -> None: + cache.cache_clear() diff --git a/modules/fidesModule/messaging/dacite/config.py b/modules/fidesModule/messaging/dacite/config.py new file mode 100644 index 000000000..4832b84bf --- /dev/null +++ b/modules/fidesModule/messaging/dacite/config.py @@ -0,0 +1,25 @@ +import sys +from dataclasses import dataclass, field +from typing import Dict, Any, Callable, Optional, Type, List + +from ..dacite.frozen_dict import FrozenDict + +if sys.version_info.minor >= 8: + from functools import cached_property # type: ignore # pylint: disable=no-name-in-module +else: + # Remove when we drop support for Python<3.8 + cached_property = property # type: ignore # pylint: disable=invalid-name + + +@dataclass +class Config: + type_hooks: Dict[Type, Callable[[Any], Any]] = field(default_factory=dict) + cast: List[Type] = field(default_factory=list) + forward_references: Optional[Dict[str, Any]] = None + check_types: bool = True + strict: bool = False + strict_unions_match: bool = False + + @cached_property + def hashable_forward_references(self) -> Optional[FrozenDict]: + return FrozenDict(self.forward_references) if self.forward_references else None diff --git a/modules/fidesModule/messaging/dacite/core.py b/modules/fidesModule/messaging/dacite/core.py new file mode 100644 index 000000000..71697aebc --- /dev/null +++ b/modules/fidesModule/messaging/dacite/core.py @@ -0,0 +1,158 @@ +from dataclasses import is_dataclass +from itertools import zip_longest +from typing import TypeVar, Type, Optional, get_type_hints, Mapping, Any, Collection, MutableMapping + +from ..dacite.cache import cache +from ..dacite.config import Config +from ..dacite.data import Data +from ..dacite.dataclasses import ( + get_default_value_for_field, + DefaultValueNotFoundError, + get_fields, + is_frozen, +) +from ..dacite.exceptions import ( + ForwardReferenceError, + WrongTypeError, + DaciteError, + UnionMatchError, + MissingValueError, + DaciteFieldError, + UnexpectedDataError, + StrictUnionMatchError, +) +from ..dacite.types import ( + is_instance, + is_generic_collection, + is_union, + extract_generic, + is_optional, + extract_origin_collection, + is_init_var, + extract_init_var, + is_subclass, +) + +from dataclasses import dataclass +from typing import List, Optional + +T = TypeVar("T") + + +def from_dict(data_class: Type[T], data: Data, config: Optional[Config] = None) -> T: + """Create a data class instance from a dictionary. + + :param data_class: a data class type + :param data: a dictionary of a input data + :param config: a configuration of the creation process + :return: an instance of a data class + """ + init_values: MutableMapping[str, Any] = {} + post_init_values: MutableMapping[str, Any] = {} + config = config or Config() + try: + data_class_hints = cache(get_type_hints)(data_class, localns=config.hashable_forward_references) + except NameError as error: + raise ForwardReferenceError(str(error)) + data_class_fields = cache(get_fields)(data_class) + if config.strict: + extra_fields = set(data.keys()) - {f.name for f in data_class_fields} + if extra_fields: + raise UnexpectedDataError(keys=extra_fields) + for field in data_class_fields: + field_type = data_class_hints[field.name] + if field.name in data: + try: + field_data = data[field.name] + value = _build_value(type_=field_type, data=field_data, config=config) + except DaciteFieldError as error: + error.update_path(field.name) + raise + if config.check_types and not is_instance(value, field_type): + raise WrongTypeError(field_path=field.name, field_type=field_type, value=value) + else: + try: + value = get_default_value_for_field(field, field_type) + except DefaultValueNotFoundError: + if not field.init: + continue + raise MissingValueError(field.name) + if field.init: + init_values[field.name] = value + elif not is_frozen(data_class): + post_init_values[field.name] = value + instance = data_class(**init_values) + for key, value in post_init_values.items(): + setattr(instance, key, value) + return instance + + +def _build_value(type_: Type, data: Any, config: Config) -> Any: + if is_init_var(type_): + type_ = extract_init_var(type_) + if type_ in config.type_hooks: + data = config.type_hooks[type_](data) + if is_optional(type_) and data is None: + return data + if is_union(type_): + data = _build_value_for_union(union=type_, data=data, config=config) + elif is_generic_collection(type_): + data = _build_value_for_collection(collection=type_, data=data, config=config) + elif cache(is_dataclass)(type_) and isinstance(data, Mapping): + data = from_dict(data_class=type_, data=data, config=config) + for cast_type in config.cast: + if is_subclass(type_, cast_type): + if is_generic_collection(type_): + data = extract_origin_collection(type_)(data) + else: + data = type_(data) + break + return data + + +def _build_value_for_union(union: Type, data: Any, config: Config) -> Any: + types = extract_generic(union) + if is_optional(union) and len(types) == 2: + return _build_value(type_=types[0], data=data, config=config) + union_matches = {} + for inner_type in types: + try: + # noinspection PyBroadException + try: + value = _build_value(type_=inner_type, data=data, config=config) + except Exception: # pylint: disable=broad-except + continue + if is_instance(value, inner_type): + if config.strict_unions_match: + union_matches[inner_type] = value + else: + return value + except DaciteError: + pass + if config.strict_unions_match: + if len(union_matches) > 1: + raise StrictUnionMatchError(union_matches) + return union_matches.popitem()[1] + if not config.check_types: + return data + raise UnionMatchError(field_type=union, value=data) + + +def _build_value_for_collection(collection: Type, data: Any, config: Config) -> Any: + data_type = data.__class__ + if isinstance(data, Mapping) and is_subclass(collection, Mapping): + item_type = extract_generic(collection, defaults=(Any, Any))[1] + return data_type((key, _build_value(type_=item_type, data=value, config=config)) for key, value in data.items()) + elif isinstance(data, tuple) and is_subclass(collection, tuple): + if not data: + return data_type() + types = extract_generic(collection) + if len(types) == 2 and types[1] == Ellipsis: + return data_type(_build_value(type_=types[0], data=item, config=config) for item in data) + return data_type( + _build_value(type_=type_, data=item, config=config) for item, type_ in zip_longest(data, types) + ) + elif isinstance(data, Collection) and is_subclass(collection, Collection): + item_type = extract_generic(collection, defaults=(Any,))[0] + return data_type(_build_value(type_=item_type, data=item, config=config) for item in data) + return data diff --git a/modules/fidesModule/messaging/dacite/data.py b/modules/fidesModule/messaging/dacite/data.py new file mode 100644 index 000000000..c8e6ce4ca --- /dev/null +++ b/modules/fidesModule/messaging/dacite/data.py @@ -0,0 +1,3 @@ +from typing import Mapping, Any + +Data = Mapping[str, Any] diff --git a/modules/fidesModule/messaging/dacite/dataclasses.py b/modules/fidesModule/messaging/dacite/dataclasses.py new file mode 100644 index 000000000..8f976d8fe --- /dev/null +++ b/modules/fidesModule/messaging/dacite/dataclasses.py @@ -0,0 +1,32 @@ +from dataclasses import Field, MISSING, _FIELDS, _FIELD, _FIELD_INITVAR # type: ignore +from typing import Type, Any, TypeVar, List + +from ..dacite.cache import cache +from ..dacite.types import is_optional + +T = TypeVar("T", bound=Any) + + +class DefaultValueNotFoundError(Exception): + pass + + +def get_default_value_for_field(field: Field, type_: Type) -> Any: + if field.default != MISSING: + return field.default + elif field.default_factory != MISSING: # type: ignore + return field.default_factory() # type: ignore + elif is_optional(type_): + return None + raise DefaultValueNotFoundError() + + +@cache +def get_fields(data_class: Type[T]) -> List[Field]: + fields = getattr(data_class, _FIELDS) + return [f for f in fields.values() if f._field_type is _FIELD or f._field_type is _FIELD_INITVAR] + + +@cache +def is_frozen(data_class: Type[T]) -> bool: + return data_class.__dataclass_params__.frozen diff --git a/modules/fidesModule/messaging/dacite/exceptions.py b/modules/fidesModule/messaging/dacite/exceptions.py new file mode 100644 index 000000000..de96d0bd7 --- /dev/null +++ b/modules/fidesModule/messaging/dacite/exceptions.py @@ -0,0 +1,80 @@ +from typing import Any, Type, Optional, Set, Dict +from ..dacite.types import is_union + + +def _name(type_: Type) -> str: + return type_.__name__ if hasattr(type_, "__name__") and not is_union(type_) else str(type_) + + +class DaciteError(Exception): + pass + + +class DaciteFieldError(DaciteError): + def __init__(self, field_path: Optional[str] = None): + super().__init__() + self.field_path = field_path + + def update_path(self, parent_field_path: str) -> None: + if self.field_path: + self.field_path = f"{parent_field_path}.{self.field_path}" + else: + self.field_path = parent_field_path + + +class WrongTypeError(DaciteFieldError): + def __init__(self, field_type: Type, value: Any, field_path: Optional[str] = None) -> None: + super().__init__(field_path=field_path) + self.field_type = field_type + self.value = value + + def __str__(self) -> str: + return ( + f'wrong value type for field "{self.field_path}" - should be "{_name(self.field_type)}" ' + f'instead of value "{self.value}" of type "{_name(type(self.value))}"' + ) + + +class MissingValueError(DaciteFieldError): + def __init__(self, field_path: Optional[str] = None): + super().__init__(field_path=field_path) + + def __str__(self) -> str: + return f'missing value for field "{self.field_path}"' + + +class UnionMatchError(WrongTypeError): + def __str__(self) -> str: + return ( + f'can not match type "{_name(type(self.value))}" to any type ' + f'of "{self.field_path}" union: {_name(self.field_type)}' + ) + + +class StrictUnionMatchError(DaciteFieldError): + def __init__(self, union_matches: Dict[Type, Any], field_path: Optional[str] = None) -> None: + super().__init__(field_path=field_path) + self.union_matches = union_matches + + def __str__(self) -> str: + conflicting_types = ", ".join(_name(type_) for type_ in self.union_matches) + return f'can not choose between possible Union matches for field "{self.field_path}": {conflicting_types}' + + +class ForwardReferenceError(DaciteError): + def __init__(self, message: str) -> None: + super().__init__() + self.message = message + + def __str__(self) -> str: + return f"can not resolve forward reference: {self.message}" + + +class UnexpectedDataError(DaciteError): + def __init__(self, keys: Set[str]) -> None: + super().__init__() + self.keys = keys + + def __str__(self) -> str: + formatted_keys = ", ".join(f'"{key}"' for key in self.keys) + return f"can not match {formatted_keys} to any data class field" diff --git a/modules/fidesModule/messaging/dacite/frozen_dict.py b/modules/fidesModule/messaging/dacite/frozen_dict.py new file mode 100644 index 000000000..d27aab413 --- /dev/null +++ b/modules/fidesModule/messaging/dacite/frozen_dict.py @@ -0,0 +1,34 @@ +from collections.abc import Mapping + + +class FrozenDict(Mapping): + dict_cls = dict + + def __init__(self, *args, **kwargs): + self._dict = self.dict_cls(*args, **kwargs) + self._hash = None + + def __getitem__(self, key): + return self._dict[key] + + def __contains__(self, key): + return key in self._dict + + def copy(self, **add_or_replace): + return self.__class__(self, **add_or_replace) + + def __iter__(self): + return iter(self._dict) + + def __len__(self): + return len(self._dict) + + def __repr__(self): + return f"<{self.__class__.__name__} {repr(self._dict)}>" + + def __hash__(self): + if self._hash is None: + self._hash = 0 + for key, value in self._dict.items(): + self._hash ^= hash((key, value)) + return self._hash diff --git a/modules/fidesModule/messaging/dacite/py.typed b/modules/fidesModule/messaging/dacite/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/messaging/dacite/types.py b/modules/fidesModule/messaging/dacite/types.py new file mode 100644 index 000000000..4a96fa43f --- /dev/null +++ b/modules/fidesModule/messaging/dacite/types.py @@ -0,0 +1,181 @@ +from dataclasses import InitVar +from typing import ( + Type, + Any, + Optional, + Union, + Collection, + TypeVar, + Mapping, + Tuple, + cast as typing_cast, +) + +from ..dacite.cache import cache + +T = TypeVar("T", bound=Any) + + +@cache +def extract_origin_collection(collection: Type) -> Type: + try: + return collection.__extra__ + except AttributeError: + return collection.__origin__ + + +@cache +def is_optional(type_: Type) -> bool: + return is_union(type_) and type(None) in extract_generic(type_) + + +@cache +def extract_optional(optional: Type[Optional[T]]) -> T: + other_members = [member for member in extract_generic(optional) if member is not type(None)] + if other_members: + return typing_cast(T, Union[tuple(other_members)]) + else: + raise ValueError("can not find not-none value") + + +@cache +def is_generic(type_: Type) -> bool: + return hasattr(type_, "__origin__") + + +@cache +def is_union(type_: Type) -> bool: + if is_generic(type_) and type_.__origin__ == Union: + return True + + try: + from types import UnionType # type: ignore + + return isinstance(type_, UnionType) + except ImportError: + return False + + +@cache +def is_tuple(type_: Type) -> bool: + return is_subclass(type_, tuple) + + +@cache +def is_literal(type_: Type) -> bool: + try: + from typing import Literal # type: ignore + + return is_generic(type_) and type_.__origin__ == Literal + except ImportError: + return False + + +@cache +def is_new_type(type_: Type) -> bool: + return hasattr(type_, "__supertype__") + + +@cache +def extract_new_type(type_: Type) -> Type: + return type_.__supertype__ + + +@cache +def is_init_var(type_: Type) -> bool: + return isinstance(type_, InitVar) or type_ is InitVar + + +@cache +def extract_init_var(type_: Type) -> Union[Type, Any]: + try: + return type_.type + except AttributeError: + return Any + + +def is_instance(value: Any, type_: Type) -> bool: + try: + # As described in PEP 484 - section: "The numeric tower" + if (type_ in [float, complex] and isinstance(value, (int, float))) or isinstance(value, type_): + return True + except TypeError: + pass + if type_ == Any: + return True + elif is_union(type_): + return any(is_instance(value, t) for t in extract_generic(type_)) + elif is_generic_collection(type_): + origin = extract_origin_collection(type_) + if not isinstance(value, origin): + return False + if not extract_generic(type_): + return True + if isinstance(value, tuple) and is_tuple(type_): + tuple_types = extract_generic(type_) + if len(tuple_types) == 1 and tuple_types[0] == (): + return len(value) == 0 + elif len(tuple_types) == 2 and tuple_types[1] is ...: + return all(is_instance(item, tuple_types[0]) for item in value) + else: + if len(tuple_types) != len(value): + return False + return all(is_instance(item, item_type) for item, item_type in zip(value, tuple_types)) + if isinstance(value, Mapping): + key_type, val_type = extract_generic(type_, defaults=(Any, Any)) + for key, val in value.items(): + if not is_instance(key, key_type) or not is_instance(val, val_type): + return False + return True + return all(is_instance(item, extract_generic(type_, defaults=(Any,))[0]) for item in value) + elif is_new_type(type_): + return is_instance(value, extract_new_type(type_)) + elif is_literal(type_): + return value in extract_generic(type_) + elif is_init_var(type_): + return is_instance(value, extract_init_var(type_)) + elif is_type_generic(type_): + return is_subclass(value, extract_generic(type_)[0]) + else: + return False + + +@cache +def is_generic_collection(type_: Type) -> bool: + if not is_generic(type_): + return False + origin = extract_origin_collection(type_) + try: + return bool(origin and issubclass(origin, Collection)) + except (TypeError, AttributeError): + return False + + +@cache +def extract_generic(type_: Type, defaults: Tuple = ()) -> tuple: + try: + if getattr(type_, "_special", False): + return defaults + if type_.__args__ == (): + return (type_.__args__,) + return type_.__args__ or defaults # type: ignore + except AttributeError: + return defaults + + +@cache +def is_subclass(sub_type: Type, base_type: Type) -> bool: + if is_generic_collection(sub_type): + sub_type = extract_origin_collection(sub_type) + try: + return issubclass(sub_type, base_type) + except TypeError: + return False + + +@cache +def is_type_generic(type_: Type) -> bool: + try: + return type_.__origin__ in (type, Type) + except AttributeError: + return False diff --git a/modules/fidesModule/messaging/message_handler.py b/modules/fidesModule/messaging/message_handler.py new file mode 100644 index 000000000..41f235e3c --- /dev/null +++ b/modules/fidesModule/messaging/message_handler.py @@ -0,0 +1,176 @@ +from http.client import responses +from typing import Dict, List, Callable, Optional, Union, Any + +from absl.logging import debug + +from slips_files.common.printer import Printer +from ..messaging.dacite import from_dict + +from ..messaging.model import NetworkMessage, PeerInfo, \ + PeerIntelligenceResponse, PeerRecommendationResponse +from ..model.alert import Alert +from ..model.aliases import PeerId, Target +from ..model.recommendation import Recommendation +from ..model.threat_intelligence import ThreatIntelligence +from ..utils.logger import Logger + +logger = Logger(__name__) + + + +class MessageHandler: + """ + Class responsible for parsing messages and handling requests coming from the queue. + + The entrypoint is on_message. + """ + + + + #def print(self, *args, **kwargs): + # return self.printer.print(*args, **kwargs) + + version = 1 + + def __init__(self, + on_peer_list_update: Callable[[List[PeerInfo]], None], + on_recommendation_request: Callable[[str, PeerInfo, PeerId], None], + on_recommendation_response: Callable[[List[PeerRecommendationResponse]], None], + on_alert: Callable[[PeerInfo, Alert], None], + on_intelligence_request: Callable[[str, PeerInfo, Target], None], + on_intelligence_response: Callable[[List[PeerIntelligenceResponse]], None], + on_unknown: Optional[Callable[[NetworkMessage], None]] = None, + on_error: Optional[Callable[[Union[str, NetworkMessage], Exception], None]] = None + ): + #self.logger = None + self.__on_peer_list_update_callback = on_peer_list_update + self.__on_recommendation_request_callback = on_recommendation_request + self.__on_recommendation_response_callback = on_recommendation_response + self.__on_alert_callback = on_alert + self.__on_intelligence_request_callback = on_intelligence_request + self.__on_intelligence_response_callback = on_intelligence_response + self.__on_unknown_callback = on_unknown + self.__on_error = on_error + #self.printer = Printer(self.logger, self.name) + + def on_message(self, message: NetworkMessage): + """ + Entry point for generic messages coming from the queue. + This method parses the message and then executes correct procedure from event. + :param message: message from the queue + :return: value from the underlining function from the constructor + """ + if message.version != self.version: + logger.warn(f'Unknown message version! This handler supports {self.version}.', message) + return self.__on_unknown_message(message) + + execution_map = { + 'nl2tl_peers_list': self.__on_nl2tl_peer_list, + 'nl2tl_recommendation_request': self.__on_nl2tl_recommendation_request, + 'nl2tl_recommendation_response': self.__on_nl2tl_recommendation_response, + 'nl2tl_alert': self.__on_nl2tl_alert, + 'nl2tl_intelligence_request': self.__on_nl2tl_intelligence_request, + 'nl2tl_intelligence_response': self.__on_nl2tl_intelligence_response + } + func = execution_map.get(message.type, lambda data: self.__on_unknown_message(message)) + # we want to handle everything + # noinspection PyBroadException + try: + # we know that the functions can handle that, and if not, there's always error handling + # noinspection PyArgumentList + return func(message.data) + except Exception as ex: + logger.error(f"Error when executing handler for message: {message.type}.", ex) + if self.__on_error: + return self.__on_error(message, ex) + + def on_error(self, original_data: str, exception: Optional[Exception] = None): + """ + Should be executed when it was not possible to parse the message. + :param original_data: string received from the queue + :param exception: exception that occurred during handling + :return: + """ + logger.error(f'Unknown data received: {original_data}.') + if self.__on_error: + self.__on_error(original_data, exception if exception else Exception('Unknown data type!')) + + def __on_unknown_message(self, message: NetworkMessage): + logger.warn(f'Unknown message handler executed!') + logger.debug(f'Message:', message) + + if self.__on_unknown_callback is not None: + self.__on_unknown_callback(message) + + def __on_nl2tl_peer_list(self, data: Dict): + logger.debug('nl2tl_peer_list message') + + peers = [from_dict(data_class=PeerInfo, data=peer) for peer in data['peers']] + return self.__on_peer_list_update(peers) + + def __on_peer_list_update(self, peers: List[PeerInfo]): + return self.__on_peer_list_update_callback(peers) + + def __on_nl2tl_recommendation_request(self, data: Dict): + logger.debug('nl2tl_recommendation_request message') + + request_id = data['request_id'] + sender = from_dict(data_class=PeerInfo, data=data['sender']) + subject = data['payload'] + return self.__on_recommendation_request(request_id, sender, subject) + + def __on_recommendation_request(self, request_id: str, sender: PeerInfo, subject: PeerId): + return self.__on_recommendation_request_callback(request_id, sender, subject) + + def __on_nl2tl_recommendation_response(self, data: List[Dict]): + logger.debug('nl2tl_recommendation_response message') + + responses = [PeerRecommendationResponse( + sender=from_dict(data_class=PeerInfo, data=single['sender']), + subject=single['payload']['subject'], + recommendation=from_dict(data_class=Recommendation, data=single['payload']['recommendation']) + ) for single in data] + return self.__on_recommendation_response(responses) + + def __on_recommendation_response(self, recommendations: List[PeerRecommendationResponse]): + return self.__on_recommendation_response_callback(recommendations) + + def __on_nl2tl_alert(self, data: Dict): + logger.debug('nl2tl_alert message') + + sender = from_dict(data_class=PeerInfo, data=data['sender']) + alert = from_dict(data_class=Alert, data=data['payload']) + return self.__on_alert(sender, alert) + + def __on_alert(self, sender: PeerInfo, alert: Alert): + return self.__on_alert_callback(sender, alert) + + def __on_nl2tl_intelligence_request(self, data: Dict): + logger.debug('nl2tl_intelligence_request message') + + request_id = data['request_id'] + sender = from_dict(data_class=PeerInfo, data=data['sender']) + target = data['payload'] + return self.__on_intelligence_request(request_id, sender, target) + + def __on_intelligence_request(self, request_id: str, sender: PeerInfo, target: Target): + return self.__on_intelligence_request_callback(request_id, sender, target) + + def __on_nl2tl_intelligence_response(self, data: Dict): + logger.debug('nl2tl_intelligence_response message') + + responses = [] + + try: + responses = [PeerIntelligenceResponse( + sender=from_dict(data_class=PeerInfo, data=single['sender']), + intelligence=from_dict(data_class=ThreatIntelligence, data=single['payload']['intelligence']), + target=single['payload']['target'] + ) for single in data] + except Exception as e: + print("Error in Fides message_handler.py __on_nl2tl_intelligence_response(): ", e.__str__()) + #self.print("Error in Fides message_handler.py __on_nl2tl_intelligence_response(): ") + return self.__on_intelligence_response(responses) + + def __on_intelligence_response(self, responses: List[PeerIntelligenceResponse]): + return self.__on_intelligence_response_callback(responses) diff --git a/modules/fidesModule/messaging/model.py b/modules/fidesModule/messaging/model.py new file mode 100644 index 000000000..e36b6c0a0 --- /dev/null +++ b/modules/fidesModule/messaging/model.py @@ -0,0 +1,33 @@ +from dataclasses import dataclass +from typing import Any + +from ..model.aliases import PeerId, Target +from ..model.peer import PeerInfo +from ..model.recommendation import Recommendation +from ..model.threat_intelligence import ThreatIntelligence + +""" +Model data coming from the Redis queue - +communication layer between network and trust layer. +""" + + +@dataclass +class NetworkMessage: + type: str + version: int + data: Any + + +@dataclass +class PeerRecommendationResponse: + sender: PeerInfo + subject: PeerId + recommendation: Recommendation + + +@dataclass +class PeerIntelligenceResponse: + sender: PeerInfo + intelligence: ThreatIntelligence + target: Target diff --git a/modules/fidesModule/messaging/network_bridge.py b/modules/fidesModule/messaging/network_bridge.py new file mode 100644 index 000000000..e6495271d --- /dev/null +++ b/modules/fidesModule/messaging/network_bridge.py @@ -0,0 +1,151 @@ +import json +from dataclasses import asdict +from typing import Dict, List + +from .dacite import from_dict + +from .message_handler import MessageHandler +from .model import NetworkMessage +from .queue import Queue +from ..model.alert import Alert +from ..model.aliases import PeerId, Target +from ..model.recommendation import Recommendation +from ..model.threat_intelligence import ThreatIntelligence +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class NetworkBridge: + """ + Class responsible for communication with the network originals. + + In order to connect bridge to the queue and start receiving messages, + execute "listen" method. + """ + + version = 1 + + def __init__(self, queue: Queue): + self.__queue = queue + + def listen(self, handler: MessageHandler, block: bool = False): + """Starts messages processing + + If :param: block = False, this method won't block this thread. + """ + + def message_received(message: str): + try: + # with open("fides_nb.txt", "a") as f: + # f.write(message) + logger.debug("New message received! Trying to parse.") + parsed = json.loads(message) + network_message = from_dict( + data_class=NetworkMessage, data=parsed + ) + logger.debug("Message parsed. Executing handler.") + handler.on_message(network_message) + except Exception as e: + logger.error( + f"There was an error processing message, Exception: {e}." + ) + handler.on_error(message, e) + + logger.debug("Starts listening...") + + return self.__queue.listen(message_received, block=block) + + def send_intelligence_response( + self, request_id: str, target: Target, intelligence: ThreatIntelligence + ): + """Shares Intelligence with peer that requested it. request_id comes + from the first request.""" + envelope = NetworkMessage( + type="tl2nl_intelligence_response", + version=self.version, + data={ + "request_id": request_id, + "payload": {"target": target, "intelligence": intelligence}, + }, + ) + return self.__send(envelope) + + def send_intelligence_request(self, target: Target): + """Requests network intelligence from the network regarding this target.""" + envelope = NetworkMessage( + type="tl2nl_intelligence_request", + version=self.version, + data={"payload": target}, + ) + return self.__send(envelope) + + def send_alert(self, target: Target, intelligence: ThreatIntelligence): + """Broadcasts alert through the network about the target.""" + envelope = NetworkMessage( + type="tl2nl_alert", + version=self.version, + data={ + "payload": Alert( + target=target, + score=intelligence.score, + confidence=intelligence.confidence, + ) + }, + ) + return self.__send(envelope) + + def send_recommendation_response( + self, + request_id: str, + recipient: PeerId, + subject: PeerId, + recommendation: Recommendation, + ): + """Responds to given request_id to recipient with recommendation on target.""" + envelope = NetworkMessage( + type="tl2nl_recommendation_response", + version=self.version, + data={ + "request_id": request_id, + "recipient_id": recipient, + "payload": { + "subject": subject, + "recommendation": recommendation, + }, + }, + ) + return self.__send(envelope) + + def send_recommendation_request( + self, recipients: List[PeerId], peer: PeerId + ): + """Request recommendation from recipients on given peer.""" + envelope = NetworkMessage( + type="tl2nl_recommendation_request", + version=self.version, + data={"receiver_ids": recipients, "payload": peer}, + ) + return self.__send(envelope) + + def send_peers_reliability(self, reliability: Dict[PeerId, float]): + """Sends peer reliability, this message is only for network layer and is not dispatched to the network.""" + data = [ + {"peer_id": key, "reliability": value} + for key, value in reliability.items() + ] + envelope = NetworkMessage( + type="tl2nl_peers_reliability", version=self.version, data=data + ) + return self.__send(envelope) + + def __send(self, envelope: NetworkMessage): + logger.debug("Sending", envelope) + try: + j = json.dumps(asdict(envelope)) + return self.__queue.send(j) + except Exception as ex: + logger.error( + f"Exception during sending an envelope: {ex}.", envelope + ) + raise ex diff --git a/modules/fidesModule/messaging/queue.py b/modules/fidesModule/messaging/queue.py new file mode 100644 index 000000000..1ea8728f7 --- /dev/null +++ b/modules/fidesModule/messaging/queue.py @@ -0,0 +1,20 @@ +from typing import Callable + + +class Queue: + """ + Wrapper around actual implementation of queue. + + Central point used for communication with the network layer and another peers. + """ + + def send(self, serialized_data: str, **argv): + """Sends serialized data to the queue.""" + raise NotImplemented('This is interface. Use implementation.') + + def listen(self, on_message: Callable[[str], None], **argv): + """Starts listening, executes :param: on_message when new message arrives. + + Depending on the implementation, this method might be blocking. + """ + raise NotImplemented('This is interface. Use implementation.') diff --git a/modules/fidesModule/messaging/queue_in_memory.py b/modules/fidesModule/messaging/queue_in_memory.py new file mode 100644 index 000000000..ae08db2f8 --- /dev/null +++ b/modules/fidesModule/messaging/queue_in_memory.py @@ -0,0 +1,43 @@ +import threading +from typing import Callable, Optional + +from ..messaging.queue import Queue +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class InMemoryQueue(Queue): + """In Memory implementation of Queue. + + This should not be used in production. + """ + + def __init__(self, on_message: Optional[Callable[[str], None]] = None): + def default_on_message(data: str): + InMemoryQueue.__exception(data) + + self.__on_message: Callable[[str], None] = on_message if on_message else default_on_message + + def send(self, serialized_data: str, should_wait_for_join: bool = False, **argv): + """Sends serialized data to the queue.""" + logger.debug('New data received for send.') + if self.__on_message is None: + self.__exception(serialized_data) + + th = threading.Thread(target=lambda: self.__on_message(serialized_data)) + th.start() + if should_wait_for_join: + th.join() + + return th + + def listen(self, on_message: Callable[[str], None], **argv): + """Starts listening, executes :param: on_message when new message arrives. + This method is not blocking. + """ + self.__on_message = on_message + + @staticmethod + def __exception(data: str): + raise Exception(f'No on_message set! Call listen before calling send! Data: {data}') diff --git a/modules/fidesModule/messaging/redis_simplex_queue.py b/modules/fidesModule/messaging/redis_simplex_queue.py new file mode 100644 index 000000000..4fd152416 --- /dev/null +++ b/modules/fidesModule/messaging/redis_simplex_queue.py @@ -0,0 +1,128 @@ +from threading import Thread +from typing import Callable, Optional + + +from slips_files.core.database.database_manager import DBManager +from ..messaging.queue import Queue +from ..utils.logger import Logger +from dataclasses import dataclass +from typing import List, Optional + +logger = Logger(__name__) + + +class RedisSimplexQueue(Queue): + """ + Implementation of Queue interface that uses two Redis queues. + One for sending data and one for listening. + """ + + def __init__( + self, db: DBManager, send_channel: str, received_channel: str, channels + ): + self.db = db + self.__pub = channels[received_channel] + self.__pub_sub_thread: Optional[Thread] = None + self.__send = send_channel + self.__receive = received_channel + # to keep track of the threads opened by this class to be able to + # close them later + self._threads = [] + + def send(self, serialized_data: str, **argv): + self.db.publish(self.__send, serialized_data) + + def listen( + self, + on_message: Callable[[str], None], + block: bool = False, + sleep_time_in_new_thread: float = 0.001, + **argv, + ): + """Starts listening, if :param: block = True, + the method blocks current thread!""" + if block: + return self.__listen_blocking(on_message) + else: + return self.__register_handler( + on_message, sleep_time_in_new_thread + ) + + def __register_handler( + self, + on_message: Callable[[str], None], + sleep_time_in_new_thread: float, + ) -> Thread: + # subscribe with given + self.__pub.subscribe( + **{self.__receive: lambda x: self.__exec_message(x, on_message)} + ) + # creates a new thread + # this is simply a wrapper around `get_message()` that runs in a + # separate thread + self.__pub_sub_thread = self.__pub.run_in_thread( + sleep_time=sleep_time_in_new_thread + ) + self._threads.append(self.__pub_sub_thread) + return self.__pub_sub_thread + + def __listen_blocking(self, on_message: Callable[[str], None]): + ## subscription done in init + # if not self.__pub.subscribed: + # self.__pub.subscribe(self.__receive) + + for msg in self.__pub.listen(): + self.__exec_message(msg, on_message) + + def __exec_message( + self, redis_msg: dict, on_message: Callable[[str], None] + ): + data = None + + if ( + redis_msg is not None + and redis_msg["data"] is not None + and isinstance(redis_msg["data"], str) + ): + data = redis_msg["data"] + + if data is None: + return + + elif data == "stop_process": + logger.debug( + "Stop process message received! " "Stopping subscription." + ) + # unsubscribe from the receive queue + self.__pub.unsubscribe(self.__receive) + self.__pub.close() + # and stop thread if it is possible + try: + if hasattr(self.__pub_sub_thread, "stop"): + self.__pub_sub_thread.stop() + except Exception as ex: + logger.debug(f"Error when stopping thread: {ex}") + return + logger.debug(f"New message received! {data}") + + try: + on_message(data) + except Exception as ex: + logger.error(f"Error when executing on_message!, {ex}") + + def stop_all_queue_threads(self): + """stops all tracked threads""" + for thread in self._threads: + if thread.is_alive(): + thread.stop() + self._threads.clear() # clear the thread list + + +class RedisDuplexQueue(RedisSimplexQueue): + """ + Implementation of Queue interface that uses single Redis queue + for duplex communication (sending and listening on the same channel). + """ + + def __init__(self, db: DBManager, channel: str, channels): + super().__init__(db, channel, channel, channels) diff --git a/modules/fidesModule/model/__init__.py b/modules/fidesModule/model/__init__.py new file mode 100644 index 000000000..f5eb68be0 --- /dev/null +++ b/modules/fidesModule/model/__init__.py @@ -0,0 +1 @@ +# various data classes and data model representation in general diff --git a/modules/fidesModule/model/alert.py b/modules/fidesModule/model/alert.py new file mode 100644 index 000000000..ec73766f5 --- /dev/null +++ b/modules/fidesModule/model/alert.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass + +from ..model.aliases import Target +from ..model.threat_intelligence import ThreatIntelligence + + +@dataclass +class Alert(ThreatIntelligence): + """Alert that was broadcast on the network.""" + + target: Target + """Target that """ + + score: float + """Score of the alert. See ThreatIntelligence.score.""" + + confidence: float + """Confidence of the alert. See ThreatIntelligence.confidence.""" diff --git a/modules/fidesModule/model/aliases.py b/modules/fidesModule/model/aliases.py new file mode 100644 index 000000000..fed80418e --- /dev/null +++ b/modules/fidesModule/model/aliases.py @@ -0,0 +1,30 @@ +IP = str +"""IPv4, IPv6 in string representation.""" + +Domain = str +"""Host Name, Domain.""" + +PeerId = str +"""String representation of peer's public key. """ + +OrganisationId = str +"""String representation of organisation ID.""" + +Target = str +"""Intelligence Target - domain or IP.""" + +ConfidentialityLevel = float +"""Confidentiality level for threat intelligence. + +If an entity needs to have access to any data, it must mean + +entity.confidentiality_level >= data.confidentiality_level + +thus level 0 means accessible for everybody +""" + +Score = float +"""Score for the target, -1 <= score <= 1""" + +Confidence = float +"""Confidence in score, 0 <= confidence <= 1""" diff --git a/modules/fidesModule/model/configuration.py b/modules/fidesModule/model/configuration.py new file mode 100644 index 000000000..99bfe71e0 --- /dev/null +++ b/modules/fidesModule/model/configuration.py @@ -0,0 +1,204 @@ +from dataclasses import dataclass +from typing import List, Union + +from ..evaluation.ti_aggregation import TIAggregationStrategy, TIAggregation +from ..evaluation.ti_evaluation import TIEvaluation, EvaluationStrategy +from ..model.aliases import OrganisationId, PeerId +from ..utils.logger import Logger + + +@dataclass(frozen=True) +class PrivacyLevel: + name: str + """Name of the level.""" + value: float + """Value used for comparison. + + 0 <= value <= 1 + + (there can be a case where value > 1 but that means the data won't be ever send) + """ + + def __cmp__(self, other): + return self.value - other.value + + +@dataclass(frozen=True) +class ConfidentialityThreshold: + level: float + """For this level (and all levels > this) require peer to have at least this trust.""" + required_trust: float + """The trust required to obtain data with this level.""" + + +@dataclass(frozen=True) +class TrustedEntity: + id: Union[PeerId, OrganisationId] + """Unique identifier for the peer or organisation.""" + + name: str + """Name of the entity.""" + + trust: float + """Initial trust for the entity. + + If, "enforce_trust = false" this value will change during time as the instance has more interactions with + organisation nodes. If "enforce_trust = true", the trust for all peers from this entity will remain + the same. + """ + + enforce_trust: bool + """If true, entity nodes will have always initial trust.""" + + confidentiality_level: float + """What level of data should be shared with this entity.""" + + +@dataclass(frozen=True) +class RecommendationsConfiguration: + enabled: bool + """If the recommendation protocol should be executed.""" + + only_connected: bool + """When selecting recommenders, use only the ones that are currently connected.""" + + only_preconfigured: bool + """If true, protocol will only ask pre-trusted peers / organisations for recommendations.""" + + required_trusted_peers_count: int + """Require minimal number of trusted connected peers before running recommendations.""" + + trusted_peer_threshold: float + """Minimal trust for trusted peer.""" + + peers_max_count: int + """Maximal count of peers that are asked to give recommendations on a peer. + + In model's notation η_max. + """ + + history_max_size: int + """Maximal size of Recommendation History. + + In model's notation rh_max. + """ + + +@dataclass(frozen=True) +class TrustModelConfiguration: + privacy_levels: List[PrivacyLevel] + """Privacy levels settings.""" + + confidentiality_thresholds: List[ConfidentialityThreshold] + """Thresholds for data filtering.""" + + data_default_level: float + """If some data are not labeled, what value should we use.""" + + initial_reputation: float + """Initial reputation that is assigned for every peer when there's new encounter.""" + + service_history_max_size: int + """Maximal size of Service History. + + In model's notation sh_max. + """ + + recommendations: RecommendationsConfiguration + """Config for recommendations.""" + + alert_trust_from_unknown: float + """How much should we trust an alert that was sent by peer we don't know anything about. + + 0 <= alert_trust_from_unknown <= 1 + """ + + trusted_peers: List[TrustedEntity] + """List of preconfigured peers.""" + + trusted_organisations: List[TrustedEntity] + """List of preconfigured organisations.""" + + network_opinion_cache_valid_seconds: int + """How many minutes is network opinion considered valid.""" + + interaction_evaluation_strategy: TIEvaluation + """Evaluation strategy.""" + + ti_aggregation_strategy: TIAggregation + """Threat Intelligence aggregation strategy.""" + + database: str + + +def load_configuration(file_path: str) -> TrustModelConfiguration: + with open(file_path, "r") as stream: + try: + import yaml + return __parse_config(yaml.safe_load(stream)) + except Exception as exc: + Logger('config_loader').error(f"It was not possible to load file! {exc}.") + raise exc + + +def __parse_config(data: dict) -> TrustModelConfiguration: + return TrustModelConfiguration( + privacy_levels=[PrivacyLevel(name=level['name'], + value=level['value']) + for level in data['confidentiality']['levels']], + confidentiality_thresholds=[ConfidentialityThreshold(level=threshold['level'], + required_trust=threshold['requiredTrust']) + for threshold in data['confidentiality']['thresholds']], + data_default_level=data['confidentiality']['defaultLevel'], + initial_reputation=data['trust']['service']['initialReputation'], + service_history_max_size=data['trust']['service']['historyMaxSize'], + recommendations=RecommendationsConfiguration( + enabled=data['trust']['recommendations']['enabled'], + only_connected=data['trust']['recommendations']['useOnlyConnected'], + only_preconfigured=data['trust']['recommendations']['useOnlyPreconfigured'], + required_trusted_peers_count=data['trust']['recommendations']['requiredTrustedPeersCount'], + trusted_peer_threshold=data['trust']['recommendations']['trustedPeerThreshold'], + peers_max_count=data['trust']['recommendations']['peersMaxCount'], + history_max_size=data['trust']['recommendations']['historyMaxSize'] + ), + alert_trust_from_unknown=data['trust']['alert']['defaultTrust'], + trusted_peers=[TrustedEntity(id=e['id'], + name=e['name'], + trust=e['trust'], + enforce_trust=e['enforceTrust'], + confidentiality_level=e['confidentialityLevel']) + for e in data['trust']['peers']], + trusted_organisations=[TrustedEntity(id=e['id'], + name=e['name'], + trust=e['trust'], + enforce_trust=e['enforceTrust'], + confidentiality_level=e['confidentialityLevel']) + for e in data['trust']['organisations']], + network_opinion_cache_valid_seconds=data['trust']['networkOpinionCacheValidSeconds'], + interaction_evaluation_strategy=__parse_evaluation_strategy(data), + ti_aggregation_strategy=TIAggregationStrategy[data['trust']['tiAggregationStrategy']](), + database=data['database'] if 'database' in data else "fides_p2p_db.sqlite", + ) + + +def __parse_evaluation_strategy(data: dict) -> TIEvaluation: + strategies = data['trust']['interactionEvaluationStrategies'] + + def get_strategy_for_key(key: str) -> TIEvaluation: + kwargs = strategies[key] + kwargs = kwargs if kwargs else {} + # there's special handling as this one combines multiple of them + if key == 'threshold': + kwargs['lower'] = get_strategy_for_key(kwargs['lower']) + kwargs['higher'] = get_strategy_for_key(kwargs['higher']) + elif key == 'maxConfidence': + kwargs['distance'] = get_strategy_for_key('distance') + kwargs['localDistance'] = get_strategy_for_key('localDistance') + kwargs['even'] = get_strategy_for_key('even') + elif key == 'weighedDistance': + kwargs['distance'] = get_strategy_for_key('distance') + kwargs['localDistance'] = get_strategy_for_key('localDistance') + + return EvaluationStrategy[key](**kwargs) + + return get_strategy_for_key(strategies['used']) diff --git a/modules/fidesModule/model/peer.py b/modules/fidesModule/model/peer.py new file mode 100644 index 000000000..bb7dcb337 --- /dev/null +++ b/modules/fidesModule/model/peer.py @@ -0,0 +1,36 @@ +from dataclasses import dataclass +from typing import List, Optional + +from ..model.aliases import PeerId, OrganisationId, IP + + +@dataclass +class PeerInfo: + """Identification data of a single peer in the network.""" + + id: PeerId + """Unique identification of a peer in the network.""" + + organisations: List[OrganisationId] + """List of organization that signed public key of this peer. + According to the protocol, these are organizations that trust the peer. + """ + + ip: Optional[IP] = None + """Ip address of the peer, if we know it. + There are cases when we don't know the IP of the peer - when running behind NAT + or when the peers used TURN server to connect to each other. + """ + + def to_dict(self): + """Convert to dictionary for serialization.""" + return { + 'id': self.id, + 'organisations': [org for org in self.organisations], + 'ip': self.ip, + } + + @classmethod + def from_dict(cls, data): + """Create an instance from a dictionary.""" + return cls(**data) diff --git a/modules/fidesModule/model/peer_trust_data.py b/modules/fidesModule/model/peer_trust_data.py new file mode 100644 index 000000000..c2032826e --- /dev/null +++ b/modules/fidesModule/model/peer_trust_data.py @@ -0,0 +1,153 @@ +from dataclasses import dataclass +from typing import Dict, List + +from ..model.aliases import PeerId, OrganisationId +from ..model.peer import PeerInfo +from ..model.recommendation_history import RecommendationHistory +from ..model.service_history import ServiceHistory + + +@dataclass +class PeerTrustData: + """Trust data related to given peer j - in model's notation "peer_id" is actually "j".""" + + info: PeerInfo + """Information about the peer.""" + + has_fixed_trust: bool + """Determines if the trust is dynamic or fixed.""" + + service_trust: float + """Service Trust Metric. + + Semantic meaning is basically "trust" - how much does current peer trust peer "j" about quality of service. + In model's notation st_ij. + + 0 <= service_trust <= 1 + """ + + reputation: float + """Reputation Metric. + + The reputation metric measures a stranger’s trustworthiness based on recommendations. + In model's notation r_ij. + + 0 <= reputation <= 1 + """ + + recommendation_trust: float + """Recommendation Trust Metric. + + How much does the peer trust that any recommendation received from this peer is correct. + In model's notation rt_ij. + + 0 <= recommendation_trust <= 1 + """ + + competence_belief: float + """How much is peer satisfied with historical service interactions. + + In general, this is expected mean behavior of the peer. + In model's notation cb_ij. + + 0 <= competence_belief <= 1 + """ + + integrity_belief: float + """How much is peer consistent in its behavior. + + In general, this is standard deviation from the mean behavior. + In model's notation ib_ij. + + 0 <= integrity_belief <= 1 + """ + + initial_reputation_provided_by_count: int + """How many peers provided recommendation during initial calculation of reputation. + + In model's notation η_ij. + """ + + service_history: ServiceHistory + """History of interactions, in model's notation SH_ij.""" + + recommendation_history: RecommendationHistory + """History of recommendation, in model's notation RH_ij.""" + + @property + def peer_id(self) -> PeerId: + """ID of the peer these data are for.""" + return self.info.id + + @property + def organisations(self) -> List[OrganisationId]: + """Organisations that signed this peer.""" + return self.info.organisations + + @property + def service_history_size(self): + """Size of the history, in model's notation sh_ij.""" + return len(self.service_history) + + @property + def recommendation_history_size(self): + """Size of the recommendation history, in model's notation rh_ij.""" + return len(self.recommendation_history) + + def to_dict(self, remove_histories: bool = False): + data = { + "info": self.info.to_dict(), # Assuming PeerInfo has to_dict method + "has_fixed_trust": self.has_fixed_trust, + "service_trust": self.service_trust, + "reputation": self.reputation, + "recommendation_trust": self.recommendation_trust, + "competence_belief": self.competence_belief, + "integrity_belief": self.integrity_belief, + "initial_reputation_provided_by_count": self.initial_reputation_provided_by_count, + "service_history": [sh.to_dict() for sh in self.service_history], # Assuming ServiceHistory has to_dict + "recommendation_history": [rh.to_dict() for rh in self.recommendation_history] # Assuming RecommendationHistory has to_dict + } + + if remove_histories: + del data["service_history"] + del data["recommendation_history"] + + return data + + # Method to create an object from a dictionary + @classmethod + def from_dict(cls, data): + return cls( + info=PeerInfo.from_dict(data["info"]), # Assuming PeerInfo has from_dict method + has_fixed_trust=data["has_fixed_trust"], + service_trust=data["service_trust"], + reputation=data["reputation"], + recommendation_trust=data["recommendation_trust"], + competence_belief=data["competence_belief"], + integrity_belief=data["integrity_belief"], + initial_reputation_provided_by_count=data["initial_reputation_provided_by_count"], + service_history=[ServiceHistory.from_dict(sh) for sh in data["service_history"]], + # Assuming ServiceHistory has from_dict + recommendation_history=[RecommendationHistory.from_dict(rh) for rh in data["recommendation_history"]] + # Assuming RecommendationHistory has from_dict + ) + + +TrustMatrix = Dict[PeerId, PeerTrustData] +"""Matrix that have PeerId as a key and then value is data about trust we have.""" + + +def trust_data_prototype(peer: PeerInfo, has_fixed_trust: bool = False) -> PeerTrustData: + """Creates clear trust object with 0 values and given peer info.""" + return PeerTrustData( + info=peer, + has_fixed_trust=has_fixed_trust, + service_trust=0, + reputation=0, + recommendation_trust=0, + competence_belief=0, + integrity_belief=0, + initial_reputation_provided_by_count=0, + service_history=[], + recommendation_history=[] + ) diff --git a/modules/fidesModule/model/recommendation.py b/modules/fidesModule/model/recommendation.py new file mode 100644 index 000000000..6b6c9d937 --- /dev/null +++ b/modules/fidesModule/model/recommendation.py @@ -0,0 +1,44 @@ +from dataclasses import dataclass + + +@dataclass +class Recommendation: + """Represents k peer's response to recommendation query about peer j.""" + + competence_belief: float + """How much is peer satisfied with historical service interactions. + + In general, this is expected mean behavior of the peer. + In model's notation cb_kj. + + 0 <= competence_belief <= 1 + """ + + integrity_belief: float + """How much is peer consistent in its behavior. + + In general, this is standard deviation from the mean behavior. + In model's notation ib_kj. + + 0 <= integrity_belief <= 1 + """ + + service_history_size: int + """Size of service interaction history. + + In model's notation sh_kj. + """ + + recommendation: float + """Recommendation about reputation. + + In model's notation r_kj. + + 0 <= recommendation <= 1 + """ + + initial_reputation_provided_by_count: int + """How many peers which provided recommendation during the initial calculation of r_kj. + + In model's notation η_kj. + """ diff --git a/modules/fidesModule/model/recommendation_history.py b/modules/fidesModule/model/recommendation_history.py new file mode 100644 index 000000000..340d82aa0 --- /dev/null +++ b/modules/fidesModule/model/recommendation_history.py @@ -0,0 +1,49 @@ +from dataclasses import dataclass +from typing import List + +from ..utils.time import Time + + +@dataclass +class RecommendationHistoryRecord: + """Represents an evaluation of a single recommendation interaction between peer i and peer j.""" + + satisfaction: float + """Peer's satisfaction with the recommendation. In model's notation rs_ij. + + 0 <= satisfaction <= 1 + """ + + weight: float + """Weight of the recommendation. In model's notation rw_ij. + + 0 <= weight <= 1 + """ + + timestamp: Time + """Date time when this recommendation happened.""" + + + def to_dict(self): + """Convert the instance to a dictionary.""" + return { + 'satisfaction': self.satisfaction, + 'weight': self.weight, + 'timestamp': self.timestamp # Keep as float + } + + @classmethod + def from_dict(cls, dict_obj): + """Create an instance of RecommendationHistoryRecord from a dictionary.""" + return cls( + satisfaction=dict_obj['satisfaction'], + weight=dict_obj['weight'], + timestamp=dict_obj['timestamp'] # Keep as float + ) + + +RecommendationHistory = List[RecommendationHistoryRecord] +"""Ordered list with history of recommendation interactions. + +First element in the list is the oldest one. +""" diff --git a/modules/fidesModule/model/service_history.py b/modules/fidesModule/model/service_history.py new file mode 100644 index 000000000..d9526a63a --- /dev/null +++ b/modules/fidesModule/model/service_history.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +from typing import List + +from ..utils.time import Time + + +@dataclass +class ServiceHistoryRecord: + """Represents an evaluation of a single service interaction between peer i and peer j.""" + + satisfaction: float + """Peer's satisfaction with the service. In model's notation s_ij. + + 0 <= satisfaction <= 1 + """ + + weight: float + """Weight of the service interaction. In model's notation w_ij. + + 0 <= weight <= 1 + """ + + timestamp: Time + """Date time when this interaction happened.""" + + def to_dict(self): + """Convert the instance to a dictionary.""" + return { + 'satisfaction': self.satisfaction, + 'weight': self.weight, + 'timestamp': self.timestamp + } + + @classmethod + def from_dict(cls, dict_obj): + """Create an instance of ServiceHistoryRecord from a dictionary.""" + return cls( + satisfaction=dict_obj['satisfaction'], + weight=dict_obj['weight'], + timestamp=dict_obj['timestamp'] # Convert ISO format back to datetime + ) + + +ServiceHistory = List[ServiceHistoryRecord] +"""Ordered list with history of service interactions. + +First element in the list is the oldest one. +""" diff --git a/modules/fidesModule/model/threat_intelligence.py b/modules/fidesModule/model/threat_intelligence.py new file mode 100644 index 000000000..3f439c054 --- /dev/null +++ b/modules/fidesModule/model/threat_intelligence.py @@ -0,0 +1,50 @@ +from dataclasses import dataclass +from typing import Optional + +from ..model.aliases import Target, ConfidentialityLevel, Score, Confidence + + +@dataclass +class ThreatIntelligence: + """Representation of peer's opinion on a subject (IP address or domain).""" + + score: Score + """How much is subject malicious or benign. + + -1 <= score <= 1 + """ + + confidence: Confidence + """How much does peer trust, that score is correct. + + 0 <= confidence <= 1 + """ + + +@dataclass +class SlipsThreatIntelligence(ThreatIntelligence): + target: Target + """Target of the intelligence.""" + + confidentiality: Optional[ConfidentialityLevel] = None + """Confidentiality level if known.""" + + def to_dict(self): + result = { + "target": self.target, + "score": self.score, + "confidence": self.confidence, + } + if self.confidentiality is not None: + result["confidentiality"] = self.confidentiality + return result + + # Create an instance from a dictionary + @classmethod + def from_dict(cls, data: dict): + return cls( + target=data["target"], + confidentiality=float(data["confidentiality"]) if data.get("confidentiality") else None, + score=float(data["score"]) if data.get("score") else None, + confidence=float(data["confidence"]) if data.get("confidence") else None + ) diff --git a/modules/fidesModule/originals/__init__.py b/modules/fidesModule/originals/__init__.py new file mode 100644 index 000000000..6dfb2ebed --- /dev/null +++ b/modules/fidesModule/originals/__init__.py @@ -0,0 +1,2 @@ +# This module includes code that was copied from original Slips repository +# https://github.com/stratosphereips/StratosphereLinuxIPS diff --git a/modules/fidesModule/originals/abstracts.py b/modules/fidesModule/originals/abstracts.py new file mode 100644 index 000000000..699575d32 --- /dev/null +++ b/modules/fidesModule/originals/abstracts.py @@ -0,0 +1,29 @@ +# This file is copy and paste from original Slip repository +# to keep the originals building +# https://github.com/stratosphereips/StratosphereLinuxIPS/blob/5015990188f21176224e093976f80311524efe4e/slips_files/common/abstracts.py +# -------------------------------------------------------------------------------------------------- + +# File containing some abstract definitions for slips + + +# This is the abstract Module class to check against. Do not modify +class Module(object): + name = '' + description = 'Template abstract originals' + authors = ['Template abstract Author'] + output = [] + + def __init__(self): + pass + + def usage(self): + print('Usage') + + def help(self): + print('Help') + + def run(self): + try: + print('test') + except Exception as e: + print('error') diff --git a/modules/fidesModule/originals/database.py b/modules/fidesModule/originals/database.py new file mode 100644 index 000000000..fab26689c --- /dev/null +++ b/modules/fidesModule/originals/database.py @@ -0,0 +1,18 @@ +# This file is truncated file from original Slips repository - only methods that are necessary for module to build +# were left +# https://github.com/stratosphereips/StratosphereLinuxIPS/blob/5015990188f21176224e093976f80311524efe4e/slips_files/core/database.py +# -------------------------------------------------------------------------------------------------- +from redis.client import Redis + + +class Database(object): + """ Database object management """ + + def __init__(self): + self.r: Redis + + def start(self, slip_conf): + raise NotImplemented('Use real implementation for Slips!') + + +__database__ = Database() diff --git a/modules/fidesModule/persistence/__init__.py b/modules/fidesModule/persistence/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/persistence/sqlite_db.py b/modules/fidesModule/persistence/sqlite_db.py new file mode 100644 index 000000000..8fc48b7c1 --- /dev/null +++ b/modules/fidesModule/persistence/sqlite_db.py @@ -0,0 +1,640 @@ +""" +Programmers notes: + +Python has None, SQLite has NULL, conversion is automatic in both ways. +""" +import os +import sqlite3 +from typing import List, Any, Optional + +from slips_files.core.output import Output +from ..model.peer import PeerInfo +from ..model.peer_trust_data import PeerTrustData +from ..model.recommendation_history import RecommendationHistoryRecord +from ..model.service_history import ServiceHistoryRecord +from ..model.threat_intelligence import SlipsThreatIntelligence +from ..model.aliases import * +import threading + + +class SQLiteDB: + _lock = threading.RLock() + name = "Fides SQLiteDB" + + def __init__(self, logger: Output, db_path: str) -> None: + """ + Initializes the SQLiteDB instance, sets up logging, and connects to the database. + + :param logger: Logger for logging debug information. + :param db_path: Path where the SQLite database will be stored. + """ + self.logger = logger + self.db_path = db_path + with open(self.db_path, "a") as f: + f.close() + sqlite3.connect(self.db_path).close() + self.connection: Optional[sqlite3.Connection] = None + self.__connect() + self.__create_tables() + + def __slips_log(self, txt: str) -> None: + self.logger.output_line( + {"verbose": 2, "debug": 0, "from": self.name, "txt": txt} + ) + + def get_slips_threat_intelligence_by_target( + self, target: Target + ) -> Optional[SlipsThreatIntelligence]: + """ + Retrieves a SlipsThreatIntelligence record by its target. + + :param target: The target (IP address, domain, etc.) of the intelligence. + :return: A SlipsThreatIntelligence instance or None if not found. + """ + query = """ + SELECT score, confidence, target, confidentiality + FROM ThreatIntelligence + WHERE target = ?; + """ + + # Execute the query to get the result + rows = self.__execute_query(query, [target]) + + if rows: + score, confidence, target, confidentiality = rows[0] + return SlipsThreatIntelligence( + score=score, + confidence=confidence, + target=target, + confidentiality=confidentiality, + ) + + return None + + def store_slips_threat_intelligence( + self, intelligence: SlipsThreatIntelligence + ) -> None: + """ + Stores or updates the given SlipsThreatIntelligence object in the database based on the target. + + :param intelligence: The SlipsThreatIntelligence object to store or update. + """ + query = """ + INSERT INTO ThreatIntelligence ( + target, score, confidence, confidentiality + ) + VALUES (?, ?, ?, ?) + ON CONFLICT(target) DO UPDATE SET + score = excluded.score, + confidence = excluded.confidence, + confidentiality = excluded.confidentiality; + """ + + # Convert the confidentiality to None if not provided, and flatten data for insertion + params = [ + intelligence.target, + intelligence.score, + intelligence.confidence, + intelligence.confidentiality, + ] + + # Execute the query + self.__execute_query(query, params) + + def store_peer_trust_data(self, peer_trust_data: PeerTrustData) -> None: + with SQLiteDB._lock: + # Insert PeerInfo first to ensure the peer exists + self.__execute_query( + """ + INSERT OR REPLACE INTO PeerInfo (peerID, ip) + VALUES (?, ?); + """, + (peer_trust_data.info.id, peer_trust_data.info.ip), + ) + + # Insert organisations for the peer into the PeerOrganisation table + for org_id in peer_trust_data.info.organisations: + self.__execute_query( + """ + INSERT OR REPLACE INTO PeerOrganisation (peerID, organisationID) + VALUES (?, ?); + """, + (peer_trust_data.info.id, org_id), + ) + + # Insert PeerTrustData itself + self.__execute_query( + """ + INSERT INTO PeerTrustData ( + peerID, has_fixed_trust, service_trust, reputation, recommendation_trust, + competence_belief, integrity_belief, initial_reputation_provided_by_count + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?); + """, + ( + peer_trust_data.info.id, + int(peer_trust_data.has_fixed_trust), + peer_trust_data.service_trust, + peer_trust_data.reputation, + peer_trust_data.recommendation_trust, + peer_trust_data.competence_belief, + peer_trust_data.integrity_belief, + peer_trust_data.initial_reputation_provided_by_count, + ), + ) + + # Prepare to insert service history and link to PeerTrustData + for sh in peer_trust_data.service_history: + self.__execute_query( + """ + INSERT INTO ServiceHistory (peerID, satisfaction, weight, service_time) + VALUES (?, ?, ?, ?); + """, + ( + peer_trust_data.info.id, + sh.satisfaction, + sh.weight, + sh.timestamp, + ), + ) + + # Insert into PeerTrustServiceHistory + self.__execute_query( + """ + INSERT INTO PeerTrustServiceHistory (peer_trust_data_id, service_history_id) + VALUES (last_insert_rowid(), last_insert_rowid()); + """ + ) + + # Prepare to insert recommendation history and link to PeerTrustData + for rh in peer_trust_data.recommendation_history: + self.__execute_query( + """ + INSERT INTO RecommendationHistory (peerID, satisfaction, weight, recommend_time) + VALUES (?, ?, ?, ?); + """, + ( + peer_trust_data.info.id, + rh.satisfaction, + rh.weight, + rh.timestamp, + ), + ) + + # Insert into PeerTrustRecommendationHistory + self.__execute_query( + """ + INSERT INTO PeerTrustRecommendationHistory (peer_trust_data_id, recommendation_history_id) + VALUES (last_insert_rowid(), last_insert_rowid()); + """ + ) + + def get_peers_by_minimal_recommendation_trust( + self, minimal_recommendation_trust: float + ) -> List[PeerInfo]: + # SQL query to select PeerInfo of peers that meet the minimal recommendation_trust criteria + query = """ + SELECT pi.peerID, pi.ip + FROM PeerTrustData ptd + JOIN PeerInfo pi ON ptd.peerID = pi.peerID + WHERE ptd.recommendation_trust >= ?; + """ + + # Execute the query, passing the minimal_recommendation_trust as a parameter + result_rows = self.__execute_query( + query, [minimal_recommendation_trust] + ) + + peer_list = [] + for row in result_rows: + peer_id = row[0] + ip = row[1] + + # Get the organisations for the peer using the get_peer_organisations method below + organisations = self.get_peer_organisations(peer_id) + + # Create a PeerInfo instance with the retrieved organisations and IP + peer_info = PeerInfo( + id=peer_id, organisations=organisations, ip=ip + ) + peer_list.append(peer_info) + + return peer_list + + def get_peer_trust_data(self, peer_id: str) -> PeerTrustData: + # Fetch PeerTrustData along with PeerInfo + query_peer_trust = """ + SELECT ptd.*, pi.peerID, pi.ip + FROM PeerTrustData ptd + JOIN PeerInfo pi ON ptd.peerID = pi.peerID + WHERE ptd.peerID = ?; + """ + peer_trust_row = self.__execute_query(query_peer_trust, [peer_id]) + + # If no result found, return None + if not peer_trust_row: + return None + + peer_trust_row = peer_trust_row[ + 0 + ] # Get the first row (since fetchall() returns a list of rows) + + # Unpack PeerTrustData row (adjust indices based on your column order) + ( + trust_data_id, + peerID, + has_fixed_trust, + service_trust, + reputation, + recommendation_trust, + competence_belief, + integrity_belief, + initial_reputation_count, + _, + ip, + ) = peer_trust_row + + # Fetch ServiceHistory for the peer + query_service_history = """ + SELECT sh.satisfaction, sh.weight, sh.service_time + FROM ServiceHistory sh + JOIN PeerTrustServiceHistory pts ON sh.id = pts.service_history_id + JOIN PeerTrustData ptd ON pts.peer_trust_data_id = ptd.id + WHERE ptd.peerID = ?; + """ + service_history_rows = self.__execute_query( + query_service_history, [peer_id] + ) + + service_history = [ + ServiceHistoryRecord( + satisfaction=row[0], weight=row[1], timestamp=row[2] + ) + for row in service_history_rows + ] + + # Fetch RecommendationHistory for the peer + query_recommendation_history = """ + SELECT rh.satisfaction, rh.weight, rh.recommend_time + FROM RecommendationHistory rh + JOIN PeerTrustRecommendationHistory ptr ON rh.id = ptr.recommendation_history_id + JOIN PeerTrustData ptd ON ptr.peer_trust_data_id = ptd.id + WHERE ptd.peerID = ?; + """ + recommendation_history_rows = self.__execute_query( + query_recommendation_history, [peer_id] + ) + + recommendation_history = [ + RecommendationHistoryRecord( + satisfaction=row[0], weight=row[1], timestamp=row[2] + ) + for row in recommendation_history_rows + ] + + # Construct PeerInfo + peer_info = PeerInfo( + id=peerID, organisations=self.get_peer_organisations(peerID), ip=ip + ) # Assuming organisation info is not fetched here. + + # Construct and return PeerTrustData object + return PeerTrustData( + info=peer_info, + has_fixed_trust=bool(has_fixed_trust), + service_trust=service_trust, + reputation=reputation, + recommendation_trust=recommendation_trust, + competence_belief=competence_belief, + integrity_belief=integrity_belief, + initial_reputation_provided_by_count=initial_reputation_count, + service_history=service_history, + recommendation_history=recommendation_history, + ) + + def get_peers_by_organisations( + self, organisation_ids: List[str] + ) -> List[PeerInfo]: + """ + Fetch PeerInfo records for peers that belong to at least one of the given organisations. + Each peer will also have their associated organisations. + + :param organisation_ids: List of organisation IDs to filter peers by. + :return: List of PeerInfo objects with associated organisation IDs. + """ + placeholders = ",".join("?" for _ in organisation_ids) + query = f""" + SELECT P.peerID, P.ip, GROUP_CONCAT(PO.organisationID) as organisations + FROM PeerInfo P + JOIN PeerOrganisation PO ON P.peerID = PO.peerID + WHERE PO.organisationID IN ({placeholders}) + GROUP BY P.peerID, P.ip; + """ + + results = self.__execute_query(query, organisation_ids) + + # Convert the result into a list of PeerInfo objects + peers = [] + for row in results: + peerID = row[0] + ip = row[1] + organisations = row[2].split(",") if row[2] else [] + peers.append( + PeerInfo(id=peerID, organisations=organisations, ip=ip) + ) + + return peers + + def insert_organisation_if_not_exists( + self, organisation_id: OrganisationId + ) -> None: + """ + Inserts an organisation into the Organisation table if it doesn't already exist. + + :param organisation_id: The organisation ID to insert. + """ + query = ( + "INSERT OR IGNORE INTO Organisation (organisationID) VALUES (?)" + ) + self.__execute_query(query, [organisation_id]) + + def insert_peer_organisation_connection( + self, peer_id: PeerId, organisation_id: OrganisationId + ) -> None: + """ + Inserts a connection between a peer and an organisation in the PeerOrganisation table. + + :param peer_id: The peer's ID. + :param organisation_id: The organisation's ID. + """ + self.__insert_peer_organisation(peer_id, organisation_id) + + def store_connected_peers_list(self, peers: List[PeerInfo]) -> None: + """ + Stores a list of PeerInfo instances into the database. + + :param peers: A list of PeerInfo instances to be stored. + """ + + peer_ids = [ + peer.id for peer in peers + ] # Extract the peer IDs from list L + placeholders = ",".join("?" for _ in peer_ids) + delete_query = ( + f"DELETE FROM PeerInfo WHERE peerID NOT IN ({placeholders})" + ) + self.__execute_query(delete_query, peer_ids) + + for peer_info in peers: + peer = { + "peerID": peer_info.id, + "ip": peer_info.ip, + } + self.__insert_peer_info(peer) + + for organisation_id in peer_info.organisations: + self.insert_organisation_if_not_exists(organisation_id) + self.insert_peer_organisation_connection( + peer_info.id, organisation_id + ) + + def get_connected_peers(self) -> List[PeerInfo]: + """ + Retrieves a list of PeerInfo instances from the database, including associated organisations. + + :return: A list of PeerInfo instances. + """ + peer_info_list = [] + + with SQLiteDB._lock: + # Step 1: Query the PeerInfo table to get all peer information + peer_info_query = "SELECT peerID, ip FROM PeerInfo" + peer_info_results = self.__execute_query(peer_info_query) + + # Step 2: For each peer, get the associated organisations from PeerOrganisation table + for row in peer_info_results: + peer_id = row[0] # peerID is the first column + ip = row[1] # ip is the second column + + # Step 3: Get associated organisations from PeerOrganisation table + organisations = self.get_peer_organisations(peer_id) + + # Step 4: Create the PeerInfo object and add to the list + peer_info = PeerInfo( + id=peer_id, organisations=organisations, ip=ip + ) + peer_info_list.append(peer_info) + + return peer_info_list + + def get_peer_organisations(self, peer_id: PeerId) -> List[OrganisationId]: + """ + Retrieves the list of organisations associated with a given peer from the PeerOrganisation table. + + :param peer_id: The peer's ID. + :return: A list of Organisation IDs associated with the peer. + """ + query = "SELECT organisationID FROM PeerOrganisation WHERE peerID = ?" + results = self.__execute_query(query, [peer_id]) + + # Extract organisationIDs from the query result and return as a list + return [row[0] for row in results] + + def __insert_peer_info(self, peer_info: dict) -> None: + """ + Inserts or updates the given PeerInfo object in the database. + + :param peer_info: The PeerInfo object to insert or update. + """ + # Insert or replace PeerInfo + self.__save("PeerInfo", peer_info) + + def __insert_peer_organisation( + self, peer_id: PeerId, organisation_id: OrganisationId + ) -> None: + """ + Inserts a PeerOrganisation record. + + :param peer_id: The peer's ID. + :param organisation_id: The organisation's ID. + """ + query = """ + INSERT OR REPLACE INTO PeerOrganisation (peerID, organisationID) + VALUES (?, ?); + """ + self.__execute_query(query, [peer_id, organisation_id]) + + def __connect(self) -> None: + """ + Establishes a connection to the SQLite database. + """ + self.__slips_log(f"Connecting to SQLite database at {self.db_path}") + self.connection = sqlite3.connect(self.db_path, check_same_thread=False) + + if self.connection is None: + self.__slips_log("Failed to connect to the SQLite database!") + raise ConnectionError("SQLite connection failed") + + def __execute_query( + self, query: str, params: Optional[List[Any]] = None + ) -> List[Any]: + """ + Executes a given SQL query and returns the results. + + :param query: The SQL query to execute. + :param params: Optional list of parameters for parameterized queries. + :return: List of results returned from the executed query. + """ + with SQLiteDB._lock: + self.__slips_log(f"Executing query: {query}") + cursor = self.connection.cursor() + + # Split the query string by semicolons to handle multiple queries + # queries = [q.strip() + ";" for q in query.split(";") if q.strip()] + # results = [] + + cursor = self.connection.cursor() + # start_idx = 0 + try: + if params: + cursor.execute(query, params) + else: + cursor.execute(query) + self.connection.commit() + return cursor.fetchall() + except Exception as e: + self.logger.error(f"Error executing query: {e}") + raise + finally: + cursor.close() # Ensure the cursor is always closed + + def __save(self, table: str, data: dict) -> None: + """ + Inserts or replaces data into a given table. + + :param table: The table in which to save the data. + :param data: A dictionary where the keys are column names, and values are the values to be saved. + :return: None + """ + columns = ", ".join(data.keys()) + placeholders = ", ".join("?" * len(data)) + query = f"INSERT OR REPLACE INTO {table} ({columns}) VALUES ({placeholders})" + self.__slips_log(f"Saving data: {data} into table: {table}") + self.__execute_query(query, list(data.values())) + + def __delete( + self, table: str, condition: str, params: Optional[List[Any]] = None + ) -> None: + """ + Deletes rows from a table that match the condition. + + :param table: The table from which to delete the data. + :param condition: A SQL condition for deleting rows (e.g., "id = ?"). + :param params: Optional list of parameters for parameterized queries. + :return: None + """ + query = f"DELETE FROM {table} WHERE {condition}" + self.__slips_log(f"Deleting from table: {table} where {condition}") + self.__execute_query(query, params) + + def close(self) -> None: + """ + Closes the SQLite database connection. + """ + if self.connection: + self.__slips_log("Closing database connection") + self.connection.close() + + def __create_tables(self) -> None: + """ + Creates the necessary tables in the SQLite database. + """ + table_creation_queries = [ + """ + CREATE TABLE IF NOT EXISTS PeerInfo ( + peerID TEXT PRIMARY KEY, + ip VARCHAR(39) + -- Add other attributes here (e.g., name TEXT, email TEXT, ...) + ); + """, + """ + CREATE TABLE IF NOT EXISTS ServiceHistory ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + peerID TEXT, + satisfaction FLOAT NOT NULL CHECK (satisfaction >= 0.0 AND satisfaction <= 1.0), + weight FLOAT NOT NULL CHECK (weight >= 0.0 AND weight <= 1.0), + service_time float NOT NULL, + -- Add other attributes here (e.g., serviceDate DATE, serviceType TEXT) + FOREIGN KEY (peerID) REFERENCES PeerInfo(peerID) ON DELETE CASCADE + ); + """, + """ + CREATE TABLE IF NOT EXISTS RecommendationHistory ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + peerID TEXT, + satisfaction FLOAT NOT NULL CHECK (satisfaction >= 0.0 AND satisfaction <= 1.0), + weight FLOAT NOT NULL CHECK (weight >= 0.0 AND weight <= 1.0), + recommend_time FLOAT NOT NULL, + -- Add other attributes here (e.g., recommendationDate DATE, recommendedBy TEXT, ...) + FOREIGN KEY (peerID) REFERENCES PeerInfo(peerID) ON DELETE CASCADE + ); + """, + """ + CREATE TABLE IF NOT EXISTS Organisation ( + organisationID TEXT PRIMARY KEY + -- Add other attributes here (e.g., organisationName TEXT, location TEXT, ...) + ); + """, + """ + CREATE TABLE IF NOT EXISTS PeerOrganisation ( + peerID TEXT, + organisationID TEXT, + PRIMARY KEY (peerID, organisationID), + FOREIGN KEY (peerID) REFERENCES PeerInfo(peerID) ON DELETE CASCADE, + FOREIGN KEY (organisationID) REFERENCES Organisation(organisationID) ON DELETE CASCADE + ); + """, + """ + CREATE TABLE IF NOT EXISTS PeerTrustData ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + peerID TEXT, -- The peer providing the trust evaluation + has_fixed_trust INTEGER NOT NULL CHECK (has_fixed_trust IN (0, 1)), -- Whether the trust is dynamic or fixed + service_trust REAL NOT NULL CHECK (service_trust >= 0.0 AND service_trust <= 1.0), -- Service Trust Metric + reputation REAL NOT NULL CHECK (reputation >= 0.0 AND reputation <= 1.0), -- Reputation Metric + recommendation_trust REAL NOT NULL CHECK (recommendation_trust >= 0.0 AND recommendation_trust <= 1.0), -- Recommendation Trust Metric + competence_belief REAL NOT NULL CHECK (competence_belief >= 0.0 AND competence_belief <= 1.0), -- Competence Belief + integrity_belief REAL NOT NULL CHECK (integrity_belief >= 0.0 AND integrity_belief <= 1.0), -- Integrity Belief + initial_reputation_provided_by_count INTEGER NOT NULL, -- Count of peers providing initial reputation + FOREIGN KEY (peerID) REFERENCES PeerInfo(peerID) ON DELETE CASCADE -- Delete trust data when PeerInfo is deleted + ); + """, + """ + CREATE TABLE IF NOT EXISTS PeerTrustServiceHistory ( + peer_trust_data_id INTEGER, + service_history_id INTEGER, + PRIMARY KEY (peer_trust_data_id, service_history_id), + FOREIGN KEY (peer_trust_data_id) REFERENCES PeerTrustData(id) ON DELETE CASCADE, + FOREIGN KEY (service_history_id) REFERENCES ServiceHistory(id) ON DELETE CASCADE + ); + """, + """ + CREATE TABLE IF NOT EXISTS PeerTrustRecommendationHistory ( + peer_trust_data_id INTEGER, + recommendation_history_id INTEGER, + PRIMARY KEY (peer_trust_data_id, recommendation_history_id), + FOREIGN KEY (peer_trust_data_id) REFERENCES PeerTrustData(id) ON DELETE CASCADE, + FOREIGN KEY (recommendation_history_id) REFERENCES RecommendationHistory(id) ON DELETE CASCADE + ); + """, + """ + CREATE TABLE IF NOT EXISTS ThreatIntelligence ( + target TEXT PRIMARY KEY, -- The target of the intelligence (IP, domain, etc.) + score REAL NOT NULL CHECK (score >= -1.0 AND score <= 1.0), + confidence REAL NOT NULL CHECK (confidence >= 0.0 AND confidence <= 1.0), + confidentiality REAL -- Optional confidentiality level + ); + """, + ] + + for query in table_creation_queries: + self.__slips_log(f"Creating tables with query: {query}") + self.__execute_query(query) diff --git a/modules/fidesModule/persistence/threat_intelligence.py b/modules/fidesModule/persistence/threat_intelligence.py new file mode 100644 index 000000000..f8ce520e2 --- /dev/null +++ b/modules/fidesModule/persistence/threat_intelligence.py @@ -0,0 +1,12 @@ +from typing import Optional + +from modules.fidesModule.model.aliases import Target +from modules.fidesModule.model.threat_intelligence import SlipsThreatIntelligence + + +class ThreatIntelligenceDatabase: + """Database that stores threat intelligence data.""" + + def get_for(self, target: Target) -> Optional[SlipsThreatIntelligence]: + """Returns threat intelligence for given target or None if there are no data.""" + raise NotImplemented() diff --git a/modules/fidesModule/persistence/threat_intelligence_db.py b/modules/fidesModule/persistence/threat_intelligence_db.py new file mode 100644 index 000000000..5585edf26 --- /dev/null +++ b/modules/fidesModule/persistence/threat_intelligence_db.py @@ -0,0 +1,41 @@ +from typing import Optional + + +from ..model.aliases import Target +from ..model.configuration import TrustModelConfiguration +from ..model.threat_intelligence import SlipsThreatIntelligence +from modules.fidesModule.persistence.threat_intelligence import ThreatIntelligenceDatabase + +from slips_files.core.database.database_manager import DBManager +import json +from .sqlite_db import SQLiteDB + + +class SlipsThreatIntelligenceDatabase(ThreatIntelligenceDatabase): + """Implementation of ThreatIntelligenceDatabase that uses Slips native + storage for the TI.""" + + def __init__( + self, + configuration: TrustModelConfiguration, + db: DBManager, + sqldb: SQLiteDB, + ): + self.__configuration = configuration + self.db = db + self.sqldb = sqldb + + def get_for(self, target: Target) -> Optional[SlipsThreatIntelligence]: + """Returns threat intelligence for given target or None if + there are no data.""" + out = self.db.get_fides_ti(target) # returns str containing dumped + # dict of STI or None + if out: + out = SlipsThreatIntelligence(**json.loads(out)) + else: + out = self.sqldb.get_slips_threat_intelligence_by_target(target) + return out + + def save(self, ti: SlipsThreatIntelligence): + self.sqldb.store_slips_threat_intelligence(ti) + self.db.save_fides_ti(ti.target, json.dumps(ti.to_dict())) diff --git a/modules/fidesModule/persistence/trust.py b/modules/fidesModule/persistence/trust.py new file mode 100644 index 000000000..d9efe379e --- /dev/null +++ b/modules/fidesModule/persistence/trust.py @@ -0,0 +1,68 @@ +from typing import List, Optional, Union + +from modules.fidesModule.messaging.model import PeerInfo +from modules.fidesModule.model.aliases import PeerId, Target, OrganisationId +from modules.fidesModule.model.configuration import TrustModelConfiguration +from modules.fidesModule.model.peer_trust_data import PeerTrustData, TrustMatrix +from modules.fidesModule.model.threat_intelligence import SlipsThreatIntelligence + + +class TrustDatabase: + """Class responsible for persisting data for trust model.""" + + def __init__(self, configuration: TrustModelConfiguration): + self.__configuration = configuration + + def get_model_configuration(self) -> TrustModelConfiguration: + """Returns current trust model configuration if set.""" + return self.__configuration + + def store_connected_peers_list(self, current_peers: List[PeerInfo]): + """Stores list of peers that are directly connected to the Slips.""" + raise NotImplemented() + + def get_connected_peers(self) -> List[PeerInfo]: + """Returns list of peers that are directly connected to the Slips.""" + raise NotImplemented() + + def get_peers_info(self, peer_ids: List[PeerId]) -> List[PeerInfo]: + """Returns list of peer infos for given ids.""" + raise NotImplemented() + + def get_peers_with_organisations(self, organisations: List[OrganisationId]) -> List[PeerInfo]: + """Returns list of peers that have one of given organisations.""" + raise NotImplemented() + + def get_peers_with_geq_recommendation_trust(self, minimal_recommendation_trust: float) -> List[PeerInfo]: + """Returns peers that have >= recommendation_trust then the minimal.""" + raise NotImplemented() + + def get_peers_with_geq_service_trust(self, minimal_service_trust: float) -> List[PeerInfo]: + """Returns peers that have >= service_trust then the minimal.""" + raise NotImplemented() + + def store_peer_trust_data(self, trust_data: PeerTrustData): + """Stores trust data for given peer - overwrites any data if existed.""" + raise NotImplemented() + + def store_peer_trust_matrix(self, trust_matrix: TrustMatrix): + """Stores trust matrix.""" + for peer in trust_matrix.values(): + self.store_peer_trust_data(peer) + + def get_peer_trust_data(self, peer: Union[PeerId, PeerInfo]) -> Optional[PeerTrustData]: + """Returns trust data for given peer ID, if no data are found, returns None.""" + raise NotImplemented() + + def get_peers_trust_data(self, peer_ids: List[Union[PeerId, PeerInfo]]) -> TrustMatrix: + """Return trust data for each peer from peer_ids.""" + data = [self.get_peer_trust_data(peer_id) for peer_id in peer_ids] + return {peer.peer_id: peer for peer in data if peer} + + def cache_network_opinion(self, ti: SlipsThreatIntelligence): + """Caches aggregated opinion on given target.""" + raise NotImplemented() + + def get_cached_network_opinion(self, target: Target) -> Optional[SlipsThreatIntelligence]: + """Returns cached network opinion. Checks cache time and returns None if data expired.""" + raise NotImplemented() diff --git a/modules/fidesModule/persistence/trust_db.py b/modules/fidesModule/persistence/trust_db.py new file mode 100644 index 000000000..98d2c7c11 --- /dev/null +++ b/modules/fidesModule/persistence/trust_db.py @@ -0,0 +1,166 @@ +from typing import List, Optional, Union + +from ..messaging.model import PeerInfo +from ..model.aliases import PeerId, Target, OrganisationId +from ..model.configuration import TrustModelConfiguration +from ..model.peer_trust_data import PeerTrustData, TrustMatrix +from ..model.threat_intelligence import SlipsThreatIntelligence +from modules.fidesModule.persistence.trust import TrustDatabase +from .sqlite_db import SQLiteDB + +from slips_files.core.database.database_manager import DBManager +import json +from ..utils.time import now + + +# because this will be implemented +# noinspection DuplicatedCode +class SlipsTrustDatabase(TrustDatabase): + """Trust database implementation that uses Slips redis and own SQLite as + a storage.""" + + def __init__( + self, + configuration: TrustModelConfiguration, + db: DBManager, + sqldb: SQLiteDB, + ): + super().__init__(configuration) + self.db = db + self.sqldb = sqldb + self.__configuration = configuration + self.conf = configuration + + def store_connected_peers_list(self, current_peers: List[PeerInfo]): + """Stores list of peers that are directly connected to the Slips.""" + + json_peers = [json.dumps(peer.to_dict()) for peer in current_peers] + self.sqldb.store_connected_peers_list(current_peers) + self.db.store_connected_peers(json_peers) + + def get_connected_peers(self) -> List[PeerInfo]: + """Returns list of peers that are directly connected to the Slips.""" + json_peers = self.db.get_connected_peers() # on no data returns [] + if not json_peers: + current_peers = self.sqldb.get_connected_peers() + else: + current_peers = [ + PeerInfo(**json.loads(peer_json)) for peer_json in json_peers + ] + return current_peers + + def get_peers_with_organisations( + self, organisations: List[OrganisationId] + ) -> List[PeerInfo]: + """Returns list of peers that have one of given organisations.""" + out = [] + raw = self.get_connected_peers() + + # self.sqldb.get_peers_by_organisations(organisations) + for peer in raw: + for organisation in organisations: + if organisation in peer.organisations: + out.append(peer) + return out + + def get_peers_with_geq_recommendation_trust( + self, minimal_recommendation_trust: float + ) -> List[PeerInfo]: + """ + Returns peers that have >= recommendation_trust then the minimal. + """ + connected_peers = self.get_connected_peers() # returns data or [] + out = [] + + # if no peers present in Redis, try SQLite DB + if connected_peers: + for peer in connected_peers: + td = self.get_peer_trust_data(peer.id) + + if ( + td is not None + and td.recommendation_trust >= minimal_recommendation_trust + ): + out.append(peer) + else: + out = self.sqldb.get_peers_by_minimal_recommendation_trust( + minimal_recommendation_trust + ) + + return out + + def store_peer_trust_data(self, trust_data: PeerTrustData): + """ + Stores trust data for given peer - overwrites any data if existed. + """ + self.sqldb.store_peer_trust_data(trust_data) + id_ = trust_data.info.id + td_json = json.dumps(trust_data.to_dict()) + self.db.store_peer_trust_data(id_, td_json) + + def store_peer_trust_matrix(self, trust_matrix: TrustMatrix): + """Stores trust matrix.""" + for peer in trust_matrix.values(): + self.store_peer_trust_data(peer) + + def get_peer_trust_data( + self, peer: Union[PeerId, PeerInfo] + ) -> Optional[PeerTrustData]: + """Returns trust data for given peer ID, if no data are found, + returns None.""" + out = None + peer_id = "" + + if isinstance(peer, PeerId): + peer_id = peer + elif isinstance(peer, PeerInfo): + peer_id = peer.id + else: + return out + + td_json = self.db.get_peer_trust_data(peer_id) + if td_json: # Redis has available data + out = PeerTrustData(**json.loads(td_json)) + else: # if redis is empty, try SQLite + out = self.sqldb.get_peer_trust_data(peer_id) + return out + + def get_peers_trust_data( + self, peer_ids: List[Union[PeerId, PeerInfo]] + ) -> TrustMatrix: + """Return trust data for each peer from peer_ids.""" + out = {} + peer_id = None + + for peer in peer_ids: + # get PeerID to properly create TrustMatrix + if isinstance(peer, PeerId): + peer_id = peer + elif isinstance(peer, PeerInfo): + peer_id = peer.id + + # TrustMatrix = Dict[PeerId, PeerTrustData]; here - peer_id: PeerId + out[peer_id] = self.get_peer_trust_data(peer_id) + return out + + def cache_network_opinion(self, ti: SlipsThreatIntelligence): + """Caches aggregated opinion on given target.""" + # cache is not backed up into SQLite, can be recalculated, not critical + self.db.cache_network_opinion(ti.target, ti.to_dict(), now()) + + def get_cached_network_opinion( + self, target: Target + ) -> Optional[SlipsThreatIntelligence]: + """Returns cached network opinion. Checks cache time and returns None + if data expired.""" + # cache is not backed up into SQLite, can be recalculated, + # not critical + rec = self.db.get_cached_network_opinion( + target, + self.__configuration.network_opinion_cache_valid_seconds, + now(), + ) + if rec is None: + return None + else: + return SlipsThreatIntelligence.from_dict(rec) diff --git a/modules/fidesModule/protocols/__init__.py b/modules/fidesModule/protocols/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/fidesModule/protocols/alert.py b/modules/fidesModule/protocols/alert.py new file mode 100644 index 000000000..8ffdfa0c0 --- /dev/null +++ b/modules/fidesModule/protocols/alert.py @@ -0,0 +1,62 @@ +from typing import Callable + +from ..evaluation.service.interaction import Weight, SatisfactionLevels +from ..messaging.network_bridge import NetworkBridge +from ..model.alert import Alert +from ..model.aliases import Target +from ..model.configuration import TrustModelConfiguration +from ..model.peer import PeerInfo +from ..model.threat_intelligence import ( + ThreatIntelligence, + SlipsThreatIntelligence, +) +from ..persistence.trust_db import SlipsTrustDatabase +from ..protocols.initial_trusl import InitialTrustProtocol +from ..protocols.opinion import OpinionAggregator +from ..protocols.protocol import Protocol + + +class AlertProtocol(Protocol): + """Protocol that reacts and dispatches alerts.""" + + def __init__( + self, + trust_db: SlipsTrustDatabase, + bridge: NetworkBridge, + trust_protocol: InitialTrustProtocol, + configuration: TrustModelConfiguration, + aggregator: OpinionAggregator, + alert_callback: Callable[[SlipsThreatIntelligence], None], + ): + super().__init__(configuration, trust_db, bridge) + self.__trust_protocol = trust_protocol + self.__alert_callback = alert_callback + self.__aggregator = aggregator + + def dispatch_alert(self, target: Target, score: float, confidence: float): + """Dispatches alert to the network.""" + self._bridge.send_alert( + target, ThreatIntelligence(score=score, confidence=confidence) + ) + + def handle_alert(self, sender: PeerInfo, alert: Alert): + """Handle alert received from the network.""" + peer_trust = self._trust_db.get_peer_trust_data(sender.id) + + if peer_trust is None: + peer_trust = ( + self.__trust_protocol.determine_and_store_initial_trust( + sender, get_recommendations=False + ) + ) + # TODO: [?] maybe dispatch request to ask fellow peers? + + # aggregate request + ti = self.__aggregator.evaluate_alert(peer_trust, alert) + # and dispatch callback + self.__alert_callback(ti) + + # and update service data + self._evaluate_interaction( + peer_trust, SatisfactionLevels.Ok, Weight.ALERT + ) diff --git a/modules/fidesModule/protocols/initial_trusl.py b/modules/fidesModule/protocols/initial_trusl.py new file mode 100644 index 000000000..5e088ba00 --- /dev/null +++ b/modules/fidesModule/protocols/initial_trusl.py @@ -0,0 +1,93 @@ +from ..evaluation.service.interaction import Weight, SatisfactionLevels +from ..evaluation.service.process import process_service_interaction +from ..model.configuration import TrustModelConfiguration, TrustedEntity +from ..model.peer import PeerInfo +from ..model.peer_trust_data import PeerTrustData, trust_data_prototype +from ..persistence.trust_db import SlipsTrustDatabase +from ..protocols.recommendation import RecommendationProtocol +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class InitialTrustProtocol: + def __init__(self, + trust_db: SlipsTrustDatabase, + configuration: TrustModelConfiguration, + recommendation_protocol: RecommendationProtocol + ): + self.__trust_db = trust_db + self.__configuration = configuration + self.__recommendation_protocol = recommendation_protocol + + def determine_and_store_initial_trust(self, peer: PeerInfo, get_recommendations: bool = False) -> PeerTrustData: + """Determines initial trust and stores that value in database. + + Returns trust data before the recommendation protocol is executed. + """ + logger.debug(f"Determining trust for peer {peer.id}", peer) + + existing_trust = self.__trust_db.get_peer_trust_data(peer.id) + if existing_trust is not None: + logger.debug(f"There's an existing trust for peer {peer.id}: ST: {existing_trust.service_trust}") + return existing_trust + + # now we know that this is a new peer + trust = trust_data_prototype(peer) + # set initial reputation from the config + trust.reputation = self.__configuration.initial_reputation + trust.recommendation_trust = trust.reputation + trust.initial_reputation_provided_by_count = 1 + + # check if this is pre-trusted peer + pre_trusted_peer = [p for p in self.__configuration.trusted_peers if trust.peer_id == p.id] + if len(pre_trusted_peer) == 1: + configured_peer = pre_trusted_peer[0] + self.__inherit_trust(trust, configured_peer) + trust.initial_reputation_provided_by_count += 1 + + # add values that are inherited from the organisations + peers_orgs = [org for org in self.__configuration.trusted_organisations if org.id in peer.organisations] + if peers_orgs: + logger.debug(f"Peer {peer.id} has known organisations.", peers_orgs) + trust.initial_reputation_provided_by_count += len(peers_orgs) + # select organisation that has the highest trust + leading_organisation = max(peers_orgs, key=lambda org: org.trust) + logger.debug(f"Main organisation selected, computing trust", leading_organisation) + # now set all other stuff from the organisation + self.__inherit_trust(trust, leading_organisation) + + # process interaction and assign all others values + trust = process_service_interaction(configuration=self.__configuration, + peer=trust, + satisfaction=SatisfactionLevels.Ok, + weight=Weight.FIRST_ENCOUNTER + ) + logger.debug(f"New trust for peer: {trust.peer_id}", trust) + + # determine if it is necessary to get recommendations from the network + # get recommendations if peer does not have any trusted organisation, or it is not pre-trusted + if not peers_orgs and not pre_trusted_peer and get_recommendations: + logger.debug("Getting recommendations.") + self.__recommendation_protocol.get_recommendation_for(trust.info) + + # now we save the trust to the database as we have everything we need + self.__trust_db.store_peer_trust_data(trust) + return trust + + @staticmethod + def __inherit_trust(trust: PeerTrustData, parent: TrustedEntity) -> PeerTrustData: + # TODO [?] check which believes / trust metrics can we set as well + trust.reputation = max(trust.reputation, parent.trust) + trust.recommendation_trust = trust.reputation + # if we need to enforce that the peer has the same trust during the runtime, + # we need to set service trust as well + if parent.enforce_trust: + trust.has_fixed_trust = True + trust.service_trust = trust.reputation + # and we will be satisfied with all interactions equally + trust.integrity_belief = 1 + trust.competence_belief = 1 + logger.debug(f"Enforced trust, leaving service trust to: {trust.service_trust}.") + + return trust diff --git a/modules/fidesModule/protocols/opinion.py b/modules/fidesModule/protocols/opinion.py new file mode 100644 index 000000000..79cb89b30 --- /dev/null +++ b/modules/fidesModule/protocols/opinion.py @@ -0,0 +1,43 @@ +from typing import Dict + +from ..evaluation.ti_aggregation import TIAggregation, PeerReport +from ..messaging.model import PeerIntelligenceResponse +from ..model.alert import Alert +from ..model.aliases import PeerId, Target +from ..model.configuration import TrustModelConfiguration +from ..model.peer_trust_data import PeerTrustData, TrustMatrix +from ..model.threat_intelligence import SlipsThreatIntelligence +from ..persistence.threat_intelligence_db import SlipsThreatIntelligenceDatabase + + +class OpinionAggregator: + """ + Class responsible for evaluation of the intelligence received from the network. + """ + + def __init__(self, + configuration: TrustModelConfiguration, + ti_db: SlipsThreatIntelligenceDatabase, + ti_aggregation: TIAggregation): + self.__configuration = configuration + self.__ti_db = ti_db + self.__ti_aggregation = ti_aggregation + + def evaluate_alert(self, peer_trust: PeerTrustData, alert: Alert) -> SlipsThreatIntelligence: + """Evaluates given data about alert and produces aggregated intelligence for Slips.""" + + alert_trust = max(self.__configuration.alert_trust_from_unknown, peer_trust.service_trust) + score = alert.score + confidence = alert.confidence * alert_trust + return SlipsThreatIntelligence(score=score, confidence=confidence, target=alert.target) + + def evaluate_intelligence_response(self, + target: Target, + data: Dict[PeerId, PeerIntelligenceResponse], + trust_matrix: TrustMatrix) -> SlipsThreatIntelligence: + """Evaluates given threat intelligence report from the network.""" + reports = [PeerReport(report_ti=ti.intelligence, + reporter_trust=trust_matrix[peer_id] + ) for peer_id, ti in data.items()] + ti = self.__ti_aggregation.assemble_peer_opinion(data=reports) + return SlipsThreatIntelligence(score=ti.score, confidence=ti.confidence, target=target) diff --git a/modules/fidesModule/protocols/peer_list.py b/modules/fidesModule/protocols/peer_list.py new file mode 100644 index 000000000..6e6fcc554 --- /dev/null +++ b/modules/fidesModule/protocols/peer_list.py @@ -0,0 +1,45 @@ +from typing import List + +from ..messaging.network_bridge import NetworkBridge +from ..model.peer import PeerInfo +from ..persistence.trust_db import SlipsTrustDatabase +from ..protocols.initial_trusl import InitialTrustProtocol +from ..protocols.recommendation import RecommendationProtocol + + +class PeerListUpdateProtocol: + """Protocol handling situations when peer list was updated.""" + + def __init__(self, + trust_db: SlipsTrustDatabase, + bridge: NetworkBridge, + recommendation_protocol: RecommendationProtocol, + trust_protocol: InitialTrustProtocol + ): + self.__trust_db = trust_db + self.__bridge = bridge + self.__recommendation_protocol = recommendation_protocol + self.__trust_protocol = trust_protocol + + def handle_peer_list_updated(self, peers: List[PeerInfo]): + """Processes updated peer list.""" + # first store them in the database + self.__trust_db.store_connected_peers_list(peers) + # and now find their trust metrics to send it to the network module + trust_data = self.__trust_db.get_peers_trust_data([p.id for p in peers]) + known_peers = {peer_id for peer_id, trust in trust_data.items() if trust is not None} + # if we don't have data for all peers that means that there are some new peers + # we need to establish initial trust for them + if len(known_peers) != len(peers): + new_trusts = [] + for peer in [p for p in peers if p.id not in known_peers]: + # this stores trust in database as well, do not get recommendations because at this point + # we don't have correct peer list in database + peer_trust = self.__trust_protocol.determine_and_store_initial_trust(peer, get_recommendations=False) + new_trusts.append(peer_trust) + # get recommendations for this peer + self.__recommendation_protocol.get_recommendation_for(peer, connected_peers=list(known_peers)) + # send only updated trusts to the network layer + self.__bridge.send_peers_reliability({p.peer_id: p.service_trust for p in new_trusts}) + # now set update peer list in database + self.__trust_db.store_connected_peers_list(peers) diff --git a/modules/fidesModule/protocols/protocol.py b/modules/fidesModule/protocols/protocol.py new file mode 100644 index 000000000..b9ec4b614 --- /dev/null +++ b/modules/fidesModule/protocols/protocol.py @@ -0,0 +1,42 @@ +from typing import Dict, Tuple + +from ..evaluation.service.interaction import Satisfaction, Weight +from ..evaluation.service.process import process_service_interaction +from ..messaging.network_bridge import NetworkBridge +from ..model.aliases import PeerId +from ..model.configuration import TrustModelConfiguration +from ..model.peer_trust_data import PeerTrustData, TrustMatrix +from modules.fidesModule.persistence.trust import TrustDatabase + + +class Protocol: + + def __init__(self, + configuration: TrustModelConfiguration, + trust_db: TrustDatabase, + bridge: NetworkBridge): + self._configuration = configuration + self._trust_db = trust_db + self._bridge = bridge + + def _evaluate_interaction(self, + peer: PeerTrustData, + satisfaction: Satisfaction, + weight: Weight + ) -> PeerTrustData: + """Callback to evaluate and save new trust data for given peer.""" + return self._evaluate_interactions({peer.peer_id: (peer, satisfaction, weight)})[peer.peer_id] + + def _evaluate_interactions(self, + data: Dict[PeerId, Tuple[PeerTrustData, Satisfaction, Weight]]) -> TrustMatrix: + """Callback to evaluate and save new trust data for given peer matrix.""" + trust_matrix: TrustMatrix = {} + # first process all interactions + for _, (peer_trust, satisfaction, weight) in data.items(): + updated_trust = process_service_interaction(self._configuration, peer_trust, satisfaction, weight) + trust_matrix[updated_trust.peer_id] = updated_trust + # then store matrix + self._trust_db.store_peer_trust_matrix(trust_matrix) + # and dispatch this update to the network layer + self._bridge.send_peers_reliability({p.peer_id: p.service_trust for p in trust_matrix.values()}) + return trust_matrix diff --git a/modules/fidesModule/protocols/recommendation.py b/modules/fidesModule/protocols/recommendation.py new file mode 100644 index 000000000..a9b732fdc --- /dev/null +++ b/modules/fidesModule/protocols/recommendation.py @@ -0,0 +1,166 @@ +import math +from typing import List, Optional + +from ..evaluation.recommendation.process import process_new_recommendations +from ..evaluation.service.interaction import Weight, SatisfactionLevels +from ..messaging.model import PeerRecommendationResponse +from ..messaging.network_bridge import NetworkBridge +from ..model.aliases import PeerId +from ..model.configuration import TrustModelConfiguration +from ..model.peer import PeerInfo +from ..model.recommendation import Recommendation +from ..persistence.trust_db import SlipsTrustDatabase +from ..protocols.protocol import Protocol +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class RecommendationProtocol(Protocol): + """Protocol that is responsible for getting and updating recommendation data.""" + + def __init__(self, configuration: TrustModelConfiguration, trust_db: SlipsTrustDatabase, bridge: NetworkBridge): + super().__init__(configuration, trust_db, bridge) + self.__rec_conf = configuration.recommendations + self.__trust_db = trust_db + self.__bridge = bridge + + def get_recommendation_for(self, peer: PeerInfo, connected_peers: Optional[List[PeerId]] = None): + """Dispatches recommendation request from the network. + + connected_peers - new peer list if the one from database is not accurate + """ + if not self.__rec_conf.enabled: + logger.debug(f"Recommendation protocol is disabled. NOT getting recommendations for Peer {peer.id}.") + return + + connected_peers = connected_peers if connected_peers is not None else self.__trust_db.get_connected_peers() + recipients = self.__get_recommendation_request_recipients(peer, connected_peers) + if recipients: + self.__bridge.send_recommendation_request(recipients=recipients, peer=peer.id) + else: + logger.debug(f"No peers are trusted enough to ask them for recommendation!") + + def handle_recommendation_request(self, request_id: str, sender: PeerInfo, subject: PeerId): + """Handle request for recommendation on given subject.""" + sender_trust = self.__trust_db.get_peer_trust_data(sender) + # TODO: [+] implement data filtering based on the sender + trust = self.__trust_db.get_peer_trust_data(subject) + # if we know sender, and we have some trust for the target + if sender_trust and trust: + recommendation = Recommendation( + competence_belief=trust.competence_belief, + integrity_belief=trust.integrity_belief, + service_history_size=trust.service_history_size, + recommendation=trust.reputation, + initial_reputation_provided_by_count=trust.initial_reputation_provided_by_count + ) + else: + recommendation = Recommendation( + competence_belief=0, + integrity_belief=0, + service_history_size=0, + recommendation=0, + initial_reputation_provided_by_count=0 + ) + self.__bridge.send_recommendation_response(request_id, sender.id, subject, recommendation) + # it is possible that we saw sender for the first time + # TODO: [+] initialise peer if we saw it for the first time + if sender_trust: + self._evaluate_interaction(sender_trust, SatisfactionLevels.Ok, Weight.INTELLIGENCE_REQUEST) + + def handle_recommendation_response(self, responses: List[PeerRecommendationResponse]): + """Handles response from peers with recommendations. Updates all necessary values in db.""" + if len(responses) == 0: + return + # TODO: [+] handle cases with multiple subjects + assert all(responses[0].subject == r.subject for r in responses), \ + "Responses are not for the same subject!" + + subject = self.__trust_db.get_peer_trust_data(responses[0].subject) + if subject is None: + logger.warn(f'Received recommendation for subject {responses[0].subject} that does not exist!') + return + + recommendations = {r.sender.id: r.recommendation for r in responses} + trust_matrix = self.__trust_db.get_peers_trust_data(list(recommendations.keys())) + + # check that the data are consistent + assert len(trust_matrix) == len(responses) == len(recommendations), \ + f'Data are not consistent: TM: {len(trust_matrix)}, RES: {len(responses)}, REC: {len(recommendations)}!' + + # update all recommendations + updated_matrix = process_new_recommendations( + configuration=self._configuration, + subject=subject, + matrix=trust_matrix, + recommendations=recommendations + ) + # now store updated matrix + self.__trust_db.store_peer_trust_matrix(updated_matrix) + # and dispatch event + self.__bridge.send_peers_reliability({p.peer_id: p.service_trust for p in updated_matrix.values()}) + + # TODO: [+] optionally employ same thing as when receiving TI + interaction_matrix = {p.peer_id: (p, SatisfactionLevels.Ok, Weight.RECOMMENDATION_RESPONSE) + for p in trust_matrix.values()} + self._evaluate_interactions(interaction_matrix) + + @staticmethod + def __is_zero_recommendation(recommendation: Recommendation) -> bool: + return recommendation.competence_belief == 0 and \ + recommendation.integrity_belief == 0 and \ + recommendation.service_history_size == 0 and \ + recommendation.recommendation == 0 and \ + recommendation.initial_reputation_provided_by_count == 0 + + def __get_recommendation_request_recipients(self, + subject: PeerInfo, + connected_peers: List[PeerInfo]) -> List[PeerId]: + recommenders: List[PeerInfo] = [] + require_trusted_peer_count = self.__rec_conf.required_trusted_peers_count + trusted_peer_threshold = self.__rec_conf.trusted_peer_threshold + + if self.__rec_conf.only_connected: + recommenders = connected_peers + + if self.__rec_conf.only_preconfigured: + preconfigured_peers = set(p.id for p in self._configuration.trusted_peers) + preconfigured_organisations = set(p.id for p in self._configuration.trusted_organisations) + + if len(recommenders) > 0: + # if there are already some recommenders it means that only_connected filter is enabled + # in that case we need to filter those peers and see if they either are on preconfigured + # list or if they have any organisation + recommenders = [p for p in recommenders + if p.id in preconfigured_peers + or preconfigured_organisations.intersection(p.organisations)] + else: + # if there are no recommenders, only_preconfigured is disabled, so we select all preconfigured + # peers and all peers from database that have the organisation + recommenders = self.__trust_db.get_peers_info(list(preconfigured_peers)) \ + + self.__trust_db.get_peers_with_organisations(list(preconfigured_organisations)) + # if we have only_preconfigured, we do not need to care about minimal trust because we're safe enough + require_trusted_peer_count = -math.inf + elif not self.__rec_conf.only_connected: + # in this case there's no restriction, and we can freely select any peers + # select peers that hev at least trusted_peer_threshold recommendation trust + recommenders = self.__trust_db.get_peers_with_geq_recommendation_trust(trusted_peer_threshold) + # if there's not enough peers like that, select some more with this service trust + if len(recommenders) <= self.__rec_conf.peers_max_count: + # TODO: [+] maybe add higher trusted_peer_threshold for this one + recommenders += self.__trust_db.get_peers_with_geq_service_trust(trusted_peer_threshold) + + # now we need to get all trust data and sort them by recommendation trust + candidates = list(self.__trust_db.get_peers_trust_data(recommenders).values()) + candidates = [c for c in candidates if c.peer_id != subject.id] + # check if we can proceed + if len(candidates) == 0 or len(candidates) < require_trusted_peer_count: + logger.debug( + f"Not enough trusted peers! Candidates: {len(candidates)}, requirement: {require_trusted_peer_count}.") + return [] + + # now sort them + candidates.sort(key=lambda c: c.service_trust, reverse=True) + # and take only top __rec_conf.peers_max_count peers to ask for recommendations + return [p.peer_id for p in candidates][:self.__rec_conf.peers_max_count] diff --git a/modules/fidesModule/protocols/threat_intelligence.py b/modules/fidesModule/protocols/threat_intelligence.py new file mode 100644 index 000000000..6ae9234d9 --- /dev/null +++ b/modules/fidesModule/protocols/threat_intelligence.py @@ -0,0 +1,112 @@ +from typing import List, Callable, Optional + +from ..evaluation.service.interaction import Weight, SatisfactionLevels +from ..evaluation.ti_evaluation import TIEvaluation +from ..messaging.model import PeerIntelligenceResponse +from ..messaging.network_bridge import NetworkBridge +from ..model.aliases import Target +from ..model.configuration import TrustModelConfiguration +from ..model.peer import PeerInfo +from ..model.peer_trust_data import PeerTrustData +from ..model.threat_intelligence import ThreatIntelligence, SlipsThreatIntelligence +from ..persistence.threat_intelligence_db import SlipsThreatIntelligenceDatabase +from ..persistence.trust_db import SlipsTrustDatabase +from ..protocols.initial_trusl import InitialTrustProtocol +from ..protocols.opinion import OpinionAggregator +from ..protocols.protocol import Protocol +from ..utils.logger import Logger + +logger = Logger(__name__) + + +class ThreatIntelligenceProtocol(Protocol): + """Class handling threat intelligence requests and responses.""" + + def __init__(self, + trust_db: SlipsTrustDatabase, + ti_db: SlipsThreatIntelligenceDatabase, + bridge: NetworkBridge, + configuration: TrustModelConfiguration, + aggregator: OpinionAggregator, + trust_protocol: InitialTrustProtocol, + ti_evaluation_strategy: TIEvaluation, + network_opinion_callback: Callable[[SlipsThreatIntelligence], None] + ): + super().__init__(configuration, trust_db, bridge) + self.__ti_db = ti_db + self.__aggregator = aggregator + self.__trust_protocol = trust_protocol + self.__ti_evaluation_strategy = ti_evaluation_strategy + self.__network_opinion_callback = network_opinion_callback + + def request_data(self, target: Target): + """Requests network opinion on given target.""" + cached = self._trust_db.get_cached_network_opinion(target) + if cached: + logger.debug(f'TI for target {target} found in cache.') + return self.__network_opinion_callback(cached) + else: + logger.debug(f'Requesting data for target {target} from network.') + self._bridge.send_intelligence_request(target) + + def handle_intelligence_request(self, request_id: str, sender: PeerInfo, target: Target): + """Handles intelligence request.""" + peer_trust = self._trust_db.get_peer_trust_data(sender.id) + if not peer_trust: + logger.debug(f'We don\'t have any trust data for peer {sender.id}!') + peer_trust = self.__trust_protocol.determine_and_store_initial_trust(sender) + + ti = self.__filter_ti(self.__ti_db.get_for(target), peer_trust) + if ti is None: + # we send just zeros if we don't have any data about the target + ti = ThreatIntelligence(score=0, confidence=0) + + # and respond with data we have + self._bridge.send_intelligence_response(request_id, target, ti) + self._evaluate_interaction(peer_trust, + SatisfactionLevels.Ok, + Weight.INTELLIGENCE_REQUEST) + + def handle_intelligence_response(self, responses: List[PeerIntelligenceResponse]): + """Handles intelligence responses.""" + trust_matrix = self._trust_db.get_peers_trust_data([r.sender.id for r in responses]) + assert len(trust_matrix) == len(responses), 'We need to have trust data for all peers that sent the response.' + target = {r.target for r in responses} + assert len(target) == 1, 'Responses should be for a single target.' + target = target.pop() + + # now everything is checked, so we aggregate it and get the threat intelligence + r = {r.sender.id: r for r in responses} + ti = self.__aggregator.evaluate_intelligence_response(target, r, trust_matrix) + # cache data for further retrieval + self._trust_db.cache_network_opinion(ti) + #test = self._trust_db.get_cached_network_opinion(target) + + interaction_matrix = self.__ti_evaluation_strategy.evaluate( + aggregated_ti=ti, + responses=r, + trust_matrix=trust_matrix, + local_ti=self.__ti_db.get_for(target) + ) + self._evaluate_interactions(interaction_matrix) + + return self.__network_opinion_callback(ti) + + def __filter_ti(self, + ti: Optional[SlipsThreatIntelligence], + peer_trust: PeerTrustData) -> Optional[SlipsThreatIntelligence]: + if ti is None: + return None + + peers_allowed_levels = [p.confidentiality_level + for p in self._configuration.trusted_organisations if + p.id in peer_trust.organisations] + + peers_allowed_levels.append(peer_trust.service_trust) + # select maximum allowed level + allowed_level = max(peers_allowed_levels) + + # set correct confidentiality + ti.confidentiality = ti.confidentiality if ti.confidentiality else self._configuration.data_default_level + # check if data confidentiality is lower than allowed level for the peer + return ti if ti.confidentiality <= allowed_level else None diff --git a/modules/fidesModule/utils/__init__.py b/modules/fidesModule/utils/__init__.py new file mode 100644 index 000000000..4178439eb --- /dev/null +++ b/modules/fidesModule/utils/__init__.py @@ -0,0 +1,7 @@ +def bound(value, low, high): + if value < low: + return low + elif value > high: + return high + else: + return value diff --git a/modules/fidesModule/utils/logger.py b/modules/fidesModule/utils/logger.py new file mode 100644 index 000000000..9fbb14e83 --- /dev/null +++ b/modules/fidesModule/utils/logger.py @@ -0,0 +1,83 @@ +import json +import threading +from dataclasses import is_dataclass, asdict +from tabnanny import verbose +from typing import Optional, List, Callable + +LoggerPrintCallbacks: List[Callable[[str, Optional[str], Optional[int], Optional[int], Optional[bool]], None]] = [ + lambda msg, level=None, verbose=1, debug=0, log_to_logfiles_only=False: print( + f'{level}: {msg}' if level is not None else f'UNSPECIFIED_LEVEL: {msg}' + ) +] +"""Set this to custom callback that should be executed when there's new log message. + +First parameter is level ('DEBUG', 'INFO', 'WARN', 'ERROR'), second is message to be logged. +""" + + +class Logger: + """Logger class used for logging. + + When the application runs as a Slips module, it uses native Slips logging, + otherwise it uses basic println. + """ + + def __init__(self, name: Optional[str] = None): + # try to guess the name if it is not set explicitly + if name is None: + name = self.__try_to_guess_name() + self.__name = name + self.log_levels = log_levels = { + 'INFO': 1, + 'WARN': 2, + 'ERROR': 3 + } + + + # this whole method is a hack + # noinspection PyBroadException + @staticmethod + def __try_to_guess_name() -> str: + # noinspection PyPep8 + try: + import sys + # noinspection PyUnresolvedReferences,PyProtectedMember + name = sys._getframe().f_back.f_code.co_name + if name is None: + import inspect + inspect.currentframe() + frame = inspect.currentframe() + frame = inspect.getouterframes(frame, 2) + name = frame[1][3] + except: + name = "logger" + return name + + def debug(self, message: str, params=None): + return self.__print('DEBUG', message) + + def info(self, message: str, params=None): + return self.__print('INFO', message) + + def warn(self, message: str, params=None): + return self.__print('WARN', message) + + def error(self, message: str, params=None): + return self.__print('ERROR', message) + + def __format(self, message: str, params=None): + thread = threading.get_ident() + formatted_message = f"T{thread}: {self.__name} - {message}" + if params: + params = asdict(params) if is_dataclass(params) else params + formatted_message = f"{formatted_message} {json.dumps(params)}" + return formatted_message + + def __print(self, level: str, message: str, params=None): + formatted_message = self.__format(message, params) + for print_callback in LoggerPrintCallbacks: + if level == 'DEBUG': + print_callback(formatted_message, verbose=0) # automatically verbose = 1 - print, debug = 0 - do not print + else: + print_callback(formatted_message, verbose=self.log_levels[level]) + diff --git a/modules/fidesModule/utils/time.py b/modules/fidesModule/utils/time.py new file mode 100644 index 000000000..e802070f6 --- /dev/null +++ b/modules/fidesModule/utils/time.py @@ -0,0 +1,14 @@ +import time + +Time = float +"""Type for time used across the whole module. + +Represents the current time in seconds since the Epoch. Can have frictions of seconds. + +We have it as alias so we can easily change that in the future. +""" + + +def now() -> Time: + """Returns current Time.""" + return time.time() diff --git a/modules/p2ptrust/p2ptrust.py b/modules/p2ptrust/p2ptrust.py index 1723de22e..4dab65164 100644 --- a/modules/p2ptrust/p2ptrust.py +++ b/modules/p2ptrust/p2ptrust.py @@ -9,7 +9,6 @@ from pathlib import Path from typing import Dict, Optional, Tuple import json -import sys import socket from slips_files.common.parsers.config_parser import ConfigParser @@ -158,9 +157,6 @@ def read_configuration(self): conf = ConfigParser() self.create_p2p_logfile: bool = conf.create_p2p_logfile() - def get_used_interface(self): - return sys.argv[sys.argv.index("-i") + 1] - def get_local_IP(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) @@ -233,11 +229,13 @@ def _configure(self): # else: host_param = ["-host", self.host] self.print( - f"P2p is listening on {self.host} port {self.port} determined by p2p module" + f"P2p is listening on {self.host} port {self.port} determined " + f"by p2p module" ) keyfile_param = ["-key-file", self.pigeon_key_file] - # rename_with_port_param = ["-rename-with-port", str(self.rename_with_port).lower()] + # rename_with_port_param = ["-rename-with-port", + # str(self.rename_with_port).lower()] pygo_channel_param = ["-redis-channel-pygo", self.pygo_channel_raw] gopy_channel_param = ["-redis-channel-gopy", self.gopy_channel_raw] executable.extend(port_param) @@ -666,7 +664,8 @@ def main(self): self.gopy_callback(msg) ret_code = self.pigeon.poll() - if ret_code is not None: + if ret_code not in (None, 0): + # The pigeon stopped with some error self.print( f"Pigeon process suddenly terminated with " f"return code {ret_code}. Stopping module." diff --git a/slips/main.py b/slips/main.py index 93024a3f1..2206741b6 100644 --- a/slips/main.py +++ b/slips/main.py @@ -509,9 +509,12 @@ def start(self): if self.args.growing: if self.input_type != "zeek_folder": self.print( - f"Parameter -g should be using with " - f"-f not a {self.input_type}. " - f"Ignoring -g" + f"Parameter -g should be used with " + f"-f not a {self.input_type} file. " + f"Ignoring -g. Analyzing {self.input_information} " + f"instead.", + verbose=1, + debug=3, ) else: self.print( @@ -585,12 +588,6 @@ def sig_handler(sig, frame): self.db.store_pid("slips.py", int(self.pid)) self.metadata_man.set_input_metadata() - if self.conf.use_p2p() and not self.args.interface: - self.print( - "Warning: P2P is only supported using " - "an interface. Disabled P2P." - ) - # warn about unused open redis servers open_servers = len(self.redis_man.get_open_redis_servers()) if open_servers > 1: diff --git a/slips_files/common/parsers/config_parser.py b/slips_files/common/parsers/config_parser.py index 941470174..46e5aa206 100644 --- a/slips_files/common/parsers/config_parser.py +++ b/slips_files/common/parsers/config_parser.py @@ -187,7 +187,9 @@ def stderr(self): return self.read_configuration("modes", "stderr", "errors.log") def create_p2p_logfile(self): - return self.read_configuration("P2P", "create_p2p_logfile", False) + return self.read_configuration( + "local_p2p", "create_p2p_logfile", False + ) def ts_format(self): return self.read_configuration("timestamp", "format", None) @@ -249,8 +251,11 @@ def get_tw_width(self) -> str: def enable_metadata(self): return self.read_configuration("parameters", "metadata_dir", False) - def use_p2p(self): - return self.read_configuration("P2P", "use_p2p", False) + def use_local_p2p(self): + return self.read_configuration("local_p2p", "use_p2p", False) + + def use_fides(self): + return self.read_configuration("global_p2p", "use_fides", False) def cesnet_conf_file(self): return self.read_configuration("CESNET", "configuration_file", False) @@ -626,10 +631,14 @@ def get_disabled_modules(self, input_type: str) -> list: if "stix" not in export_to and "slack" not in export_to: to_ignore.append("exporting_alerts") - use_p2p = self.use_p2p() - if not use_p2p or "-i" not in sys.argv: + use_p2p = self.use_local_p2p() + if not (use_p2p and "-i" in sys.argv): to_ignore.append("p2ptrust") + use_fides = self.use_fides() + if not (use_fides and ("-i" in sys.argv or "-g" in sys.argv)): + to_ignore.append("fidesModule") + # ignore CESNET sharing module if send and receive are # disabled in slips.yaml send_to_warden = self.send_to_warden() diff --git a/slips_files/core/database/database_manager.py b/slips_files/core/database/database_manager.py index 9ecc16635..f9e198de0 100644 --- a/slips_files/core/database/database_manager.py +++ b/slips_files/core/database/database_manager.py @@ -952,3 +952,34 @@ def close(self, *args, **kwargs): # when stopping the daemon using -S, slips doesn't start the sqlite db if self.sqlite: self.sqlite.close(*args, **kwargs) + + def get_fides_ti(self, target: str): + return self.rdb.get_fides_ti(target) + + def save_fides_ti(self, target: str, STI: str): + self.rdb.save_fides_ti(target, STI) + + def store_connected_peers(self, peers: List[str]): + self.rdb.store_connected_peers(peers) + + def get_connected_peers(self): + return self.rdb.get_connected_peers() # no data -> [] + + def store_peer_trust_data(self, id: str, td: str): + self.rdb.update_peer_td(id, td) + + def get_peer_trust_data(self, id: str): + return self.rdb.get_peer_td(id) + + def get_all_peers_trust_data(self): + return self.rdb.get_all_peers_td() + + def cache_network_opinion(self, target: str, opinion: dict, time: float): + self.rdb.cache_network_opinion(target, opinion, time) + + def get_cached_network_opinion( + self, target: str, cache_valid_seconds: int, current_time: float + ): + return self.rdb.get_cached_network_opinion( + target, cache_valid_seconds, current_time + ) diff --git a/slips_files/core/database/redis_db/database.py b/slips_files/core/database/redis_db/database.py index 22f27ce3b..e103fe3b2 100644 --- a/slips_files/core/database/redis_db/database.py +++ b/slips_files/core/database/redis_db/database.py @@ -10,6 +10,7 @@ from slips_files.core.database.redis_db.ioc_handler import IoCHandler from slips_files.core.database.redis_db.alert_handler import AlertHandler from slips_files.core.database.redis_db.profile_handler import ProfileHandler +from slips_files.core.database.redis_db.p2p_handler import P2PHandler import os import signal @@ -30,7 +31,7 @@ RUNNING_IN_DOCKER = os.environ.get("IS_IN_A_DOCKER_CONTAINER", False) -class RedisDB(IoCHandler, AlertHandler, ProfileHandler): +class RedisDB(IoCHandler, AlertHandler, ProfileHandler, P2PHandler): # this db is a singelton per port. meaning no 2 instances # should be created for the same port at the same time _obj = None @@ -82,6 +83,11 @@ class RedisDB(IoCHandler, AlertHandler, ProfileHandler): "control_channel", "new_module_flow" "cpu_profile", "memory_profile", + "fides_d", + "fides2network", + "network2fides", + "fides2slips", + "slips2fides", } separator = "_" normal_label = "benign" diff --git a/slips_files/core/database/redis_db/p2p_handler.py b/slips_files/core/database/redis_db/p2p_handler.py new file mode 100644 index 000000000..6804e1a5a --- /dev/null +++ b/slips_files/core/database/redis_db/p2p_handler.py @@ -0,0 +1,106 @@ +import json +from typing import ( + List, +) + +trust = "peers_strust" +hash = "peer_info" +FIDES_CACHE_KEY = "fides_cache" + + +class P2PHandler: + """ + Helper class for the Redis class in database.py + Contains all the logic related Fides module + """ + + name = "P2PHandlerDB" + + def get_fides_ti(self, target: str): + """ + returns the TI stored for specified target or None + """ + return self.r.get(target) or None + + def save_fides_ti(self, target: str, data: str): + """ + :param target: target is used as a key to store the data + :param data: SlipsThreatIntelligence that is to be saved + """ + self.r.set(target, data) + + def store_connected_peers(self, peers: List[str]): + self.r.set("connected_peers", json.dumps(peers)) + + def get_connected_peers(self): + json_list = self.r.get("connected_peers") or None + + if json_list is None: + return [] + else: + json_peers = json.loads(json_list) + return json_peers + + def store_peer_td(self, peer_id, td: str): + self.r.sadd(trust, peer_id) + self.r.hset(hash, peer_id, td) + + def get_peer_td(self, peer_id: str): + """ + Get peer trust data by peer_id. + """ + return self.r.hget(hash, peer_id) + + def update_peer_td(self, peer_id: str, updated_td: str): + """ + Update peer information. + """ + if self.r.sismember(trust, peer_id): + self.r.hset(hash, peer_id, updated_td) + else: + self.store_peer_td(peer_id, updated_td) + + def get_all_peers_td(self): + """ + Get all connected peers trust data. + """ + peer_ids = self.r.smembers(trust) + peers = {peer_id: self.r.hget(hash, peer_id) for peer_id in peer_ids} + return peers + + def remove_peer_td(self, peer_id: str): + """ + Remove a peer trust data from the set and hash. + """ + self.r.srem(trust, peer_id) + self.r.hdel(hash, peer_id) + + def cache_network_opinion(self, target: str, opinion: dict, time: float): + cache_key = f"{FIDES_CACHE_KEY}:{target}" + + cache_data = {"created_seconds": time, **opinion} + self.r.hmset(cache_key, cache_data) + + def get_cached_network_opinion( + self, target: str, cache_valid_seconds: int, current_time: float + ): + cache_key = f"{FIDES_CACHE_KEY}:{target}" + cache_data = self.r.hgetall(cache_key) + if not cache_data: + return None + + cache_data = {k: v for k, v in cache_data.items()} + + # Get the time the opinion was cached + created_seconds = float(cache_data.get("created_seconds", 0)) + # Check if the cached entry is still valid + if current_time - created_seconds > cache_valid_seconds: + # The cached opinion has expired, delete the entry + self.r.delete(cache_key) + return None + + # Return the opinion (excluding the created_seconds field) + opinion = { + k: v for k, v in cache_data.items() if k != "created_seconds" + } + return opinion diff --git a/slips_files/core/helpers/checker.py b/slips_files/core/helpers/checker.py index cb34022b5..d1960dd5f 100644 --- a/slips_files/core/helpers/checker.py +++ b/slips_files/core/helpers/checker.py @@ -98,15 +98,31 @@ def check_given_flags(self): ): print("Redis database is not running. Stopping Slips") self.main.terminate_slips() + if self.main.args.config and not os.path.exists(self.main.args.config): print(f"{self.main.args.config} doesn't exist. Stopping Slips") self.main.terminate_slips() + if self.main.conf.use_local_p2p() and not self.main.args.interface: + print( + "Warning: P2P is only supported using " + "an interface. P2P Disabled." + ) + + if self.main.conf.use_fides() and not ( + self.main.args.interface or self.main.args.growing + ): + print( + "Warning: Fides is only supported using " + "an interface. Fides Module Disabled." + ) + if self.main.args.interface: interfaces = psutil.net_if_addrs().keys() if self.main.args.interface not in interfaces: print( - f"{self.main.args.interface} is not a valid interface. Stopping Slips" + f"{self.main.args.interface} is not a valid interface. " + f"Stopping Slips" ) self.main.terminate_slips() diff --git a/tests/common_test_utils.py b/tests/common_test_utils.py index e4d163e8c..6da9beb0b 100644 --- a/tests/common_test_utils.py +++ b/tests/common_test_utils.py @@ -10,6 +10,7 @@ Dict, Optional, ) +from pathlib import PosixPath from unittest.mock import Mock IS_IN_A_DOCKER_CONTAINER = os.environ.get("IS_IN_A_DOCKER_CONTAINER", False) @@ -66,7 +67,7 @@ def is_evidence_present(log_file, expected_evidence): return False -def create_output_dir(dirname): +def create_output_dir(dirname) -> PosixPath: """ creates this output dir inside output/integration_tests/ returns a full path to the created output dir @@ -176,6 +177,6 @@ def assert_no_errors(output_dir): # reading large files # the goal of this is to be able to view the error from CI # without having to download the artifacts - assert not has_error_keywords(line), ( - read_file_if_small(file) or line - ) + assert not has_error_keywords( + line + ), f"file: {file} {read_file_if_small(file) or line}" diff --git a/tests/integration_tests/config/fides.conf.yml b/tests/integration_tests/config/fides.conf.yml new file mode 100644 index 000000000..deb048fc6 --- /dev/null +++ b/tests/integration_tests/config/fides.conf.yml @@ -0,0 +1,151 @@ +# This is main configuration file for the trust model +# NOTE: if you update this file' structure, you need to update fides.model.configuration.py parsing as well + +# Settings related to running inside slips +slips: + +# settings related to network protocol +network: + +# Values that define this instance of Fides +my: + id: myId + organisations: [ ] + + +# Confidentiality related settings +confidentiality: + # possible levels of data that are labeled by Slips + # the value defines how secret the data are where 0 (can be shared + # with anybody) and 1 (can not be shared at all) + # + # the checks are: if(entity.confidentiality_level >= data.confidentiality_level) allowData() + # see https://www.cisa.gov/tlp + levels: + # share all data + - name: WHITE # name of the level, used mainly for debugging purposes + value: 0 # value that is used during computation + - name: GREEN + value: 0.2 + - name: AMBER + value: 0.5 + - name: RED + value: 0.7 + # do not share anything ever + - name: PRIVATE + value: 1.1 # never meets condition peer.privacyLevel >= data.level as peer.privacyLevel <0, 1> + + # if some data are not labeled, what value should we use + defaultLevel: 0 + + # rules that apply when the model is filtering data for peers + thresholds: + - level: 0.2 # for this level (and all levels > this) require + requiredTrust: 0.2 # this trust + - level: 0.5 + requiredTrust: 0.5 + - level: 0.7 + requiredTrust: 0.8 + - level: 1 + requiredTrust: 1 + +# Trust model related settings +trust: + # service trust evaluation + service: + # initial reputation that is assigned for every peer when there's new encounter + initialReputation: 0.5 + + # maximal size of Service History, sh_max + historyMaxSize: 100 + + # settings for recommendations + recommendations: + # if the recommendation protocol should be executed + enabled: True + # when selecting recommenders, use only the ones that are currently connected + useOnlyConnected: False + # if true, protocol will only ask pre-trusted peers / organisations for recommendations + useOnlyPreconfigured: False + # require minimal number of trusted connected peers before running recommendations + # valid only if trust.recommendations.useOnlyPreconfigured == False + requiredTrustedPeersCount: 1 + # minimal trust for trusted peer + # valid only if trust.recommendations.useOnlyPreconfigured == False + trustedPeerThreshold: 0.8 + # maximal count of peers that are asked to give recommendations on a peer, η_max + peersMaxCount: 100 + # maximal size of Recommendation History, rh_max + historyMaxSize: 100 + + # alert protocol + alert: + # how much should we trust an alert that was sent by peer we don't know anything about + defaultTrust: 0.5 + + # trust these organisations with given trust by default + organisations: + - id: org1 # public key of the organisation + name: Organisation \#1 # name + trust: 0.1 # how much should the model trust peers from this org + enforceTrust: True # whether to allow (if false) changing trust during runtime (when we received more data from org) + confidentialityLevel: 0.7 # what level of data should be shared with peers from this org, see privacy.levels + + - id: org2 + name: Organisation \#2 + trust: 0.9 + enforceTrust: False + confidentialityLevel: 0.9 + + # trust these peers with given trust by default + # see doc for trust.organisations + peers: + - id: peer1 + name: Peer \#1 + trust: 0.1 + enforceTrust: True + confidentialityLevel: 0.7 + + - id: peer2 + name: Peer \#2 + trust: 0.9 + enforceTrust: False + confidentialityLevel: 0.9 + + # how many minutes is network opinion considered valid + networkOpinionCacheValidSeconds: 3600 + + # which strategy should be used to evaluate interaction when peer provided threat intelligence on a target + # see fides.evaluation.ti_evaluation.py for options + # options: ['even', 'distance', 'localDistance', 'threshold', 'maxConfidence', 'weighedDistance'] + interactionEvaluationStrategies: + used: 'threshold' + # these are configuration for the strategies, content will be passed as a **kwargs to the instance + # even strategy uses the same satisfaction value for every interaction + even: + # value used as a default satisfaction for all peers + satisfaction: 1 + # distance measures distance between aggregated network intelligence and each intelligence from the peers + distance: + # localDistance measures distance between each peer's intelligence to local threat intelligence by Slips + localDistance: + # weighedDistance combines distance and localDistance with given weight + weighedDistance: + # weight of the local TI to TI aggregated from the network + localWeight: 0.4 + # maxConfidence uses combination of distance, localDistance and even - utilizes their confidence to + # make decisions with the highest possible confidence + maxConfidence: + # threshold employs 'lower' value strategy when the confidence of the aggregated TI is lower than 'threshold', + # otherwise it uses 'higher' - 'even' and 'distance' strategies work best with this + threshold: + # minimal confidence level + threshold: 0.7 + # this strategy is used when the aggregated confidence is lower than the threshold + lower: 'even' + # and this one when it is higher + higher: 'distance' + + # Threat Intelligence aggregation strategy + # valid values - ['average', 'weightedAverage', 'stdevFromScore'] + tiAggregationStrategy: 'average' diff --git a/tests/integration_tests/fides_config.yaml b/tests/integration_tests/fides_config.yaml new file mode 100644 index 000000000..a88c1ccf5 --- /dev/null +++ b/tests/integration_tests/fides_config.yaml @@ -0,0 +1,430 @@ +# This configuration file controls several aspects of the working of Slips + +# in daemonized mode the following files are used +# to log info about daemon state, errors, etc.. +modes: + stdout: slips.log + stderr: errors.log + logsfile: slips.log + +############################# +# Parameters that can be also specified with modifiers in the command line +# This controls the output of slips in the console +parameters: + + # The verbosity is related to how much data you want to see about the + # detections useful for an administrator, + # behaviors, normal and malicious traffic, etc. + verbose : 1 + # The debugging is related to errors, warnings and cases that may cause errors + debug : 0 + + # The width of the time window used + # 1 minute + # time_window_width : 60 + # 5 min + # time_window_width : 300 + # 1 hour + time_window_width : 3600 + # 1 day + # time_window_width = 86400 + # One time window only. Is like if not time windows were used. Beware that the + # names of the files for the TW have + # a year in the name that is 100 years back. + # time_window_width : 'only_one_tw' + + # Export the strato letters used for detecting C&C by the RNN model + # to the strato_letters.tsv in the current output directory. + # these letters are used for re-training the model. + export_strato_letters: False + + # This option determines whether to analyze only what goes OUT of the local network or also what is coming IN the local network + # Options: out, all + # In the 'out' configuration, SLIPS focuses on analyzing outbound traffic + # originating from the internal local IPs. + # It creates profiles for local IPs and public external IPs, but only analyzes the outgoing traffic from the private IPs + # to public destinations. + # Any inbound traffic or attacks from external IPs are not processed. + + # In the 'all' configuration, Slips creates profiles for both private and public IPs, + # and analyzes traffic in both directions, inbound and outbound. + # It processes traffic originating from private IP addresses, as well as external public IP addresses. + # This mode provides comprehensive network monitoring, allowing you to detect + # outgoing as well as incoming attacks and connections. + # analysis_direction : all + analysis_direction : out + + # Delete zeek log files after stopping slips. + # this parameter deletes arp.log every 1h. useful for saving disk space + delete_zeek_files : False + + # Store a copy of zeek files in the output dir after the analysis is done. + # shouldn't be set to yes if delete_zeek_files is set to yes, because if the zeek files + # are deleted after slips is done, there's no way to store a copy of them anywhere + store_a_copy_of_zeek_files : False + + # store the generated zeek files in the output dir while the slips is running. + store_zeek_files_in_the_output_dir : True + + # Create a metadata dir output/metadata/ that has a copy of slips.yaml, whitelist file, + # current commit and date + metadata_dir : True + + # Default pcap packet filter. Used with zeek + # pcapfilter : 'ip or not ip' + # If you want more important traffic and forget the multicast and broadcast stuff, you can use + # pcapfilter : 'not icmp and not multicast and not broadcast and not arp and not port 5353 and not port 67' + pcapfilter : False + # tcp_inactivity_timeout (in minutes). Used with zeek + # Default tcp_inactivity_timeout is 5 minutes. + # But because sometimes the delay between packets is more than 5 mins, + # zeek breaks the connection into smaller connections + tcp_inactivity_timeout : 60 + + # Should we delete the previously stored data in the DB when we start? + # By default False. Meaning we don't DELETE the DB by default. + deletePrevdb : True + + # You can remember the data in all the previous runs of the DB if you put False. + # Redis will remember as long as the redis server is not down. The persistence is + # in memory, not disk. + # deletePrevdb : False + + # Set the label for all the flows that are being read. + # For now only normal and malware directly. No option for setting labels with a filter + # label : malicious + # label : unknown + label : normal + + + # The default path of whitelist.conf, either specify a file in slips main working dir, or an absolute path + whitelist_path : config/whitelist.conf + + + # zeek rotation is enabled by default when using an interface, + # which means slips will delete all zeek log + # files after 1 day of running, so that zeek doesn't use too much disk space + # rotation : no + rotation : True + + # how often do you want to delete zeek files + # can be written as a numeric constant followed by a time unit where + # the time unit is one of usec, msec, sec, min, hr, or day which respectively + # represent microseconds, milliseconds, seconds, minutes, hours, and days. + # Whitespace between the numeric constant and time unit is optional. Appending the letter s to the + # time unit in order to pluralize it is also optional + # rotation_period = 30min + # rotation_period = 2hr + # rotation_period = 30sec + rotation_period : 1day + + # how many days you want to keep your rotated files before deleting them? value should be in days + # set it to 0 day if you want to delete them immediately + # keep_rotated_files_for : 1 day + # keep_rotated_files_for : 2 day + # keep_rotated_files_for : 3 day + keep_rotated_files_for : 1 day + + # how many minutes to wait for all modules to finish before killing them + #wait_for_modules_to_finish : 15 mins + # 1 week + wait_for_modules_to_finish : 10080 mins + + # flows are labeled to normal/malicious and added to the sqlite db in the output dir by default + export_labeled_flows : False + # export_format can be tsv or json. this parameter is ignored if export_labeled_flows is set to no + export_format : json + + # These are the IPs that we see the majority of traffic going out of from. + # for example, this can be your own IP or some computer you’re monitoring + # when using slips on an interface, this client IP is automatically set as + # your own IP and is used to improve detections + # it would be useful to specify it when analyzing pcaps or zeek logs + # client_ips : [10.0.0.1, 172.16.0.9, 172.217.171.238] + client_ips : [] + +############################# +detection: + # This threshold is the minimum accumulated threat level per + # time window needed to generate an alert. + # It controls how sensitive Slips is. + # the default 0.25 value gives you balanced detections with + # the optimal false positive rate and accuracy + + # Here are more options + # - 0.08: Use this threshold If you want Slips to be super sensitive with higher FPR, + # using this means you are less likely to miss a + # detection but more likely to get false positives + # - 0.25: Optimal threshold, has the most optimal FPR and TPR. + # - 0.43: Use this threshold If you want Slips to be insensitive. + # Using this means Slips will need so many evidence to trigger an alert. + # May lead to false negatives + evidence_detection_threshold : 0.01 + + + # Slips can show a popup/notification with every alert. + popup_alerts : False + +############################# +modules: + # List of modules to ignore. By default we always ignore the template! do not remove it from the list + # Names of other modules that you can disable (they all should be lowercase with no special characters): + # threatintelligence, blocking, networkdiscovery, timeline, virustotal, + # rnnccdetection, flowmldetection, updatemanager + disable: [template, updatemanager] + + # For each line in timeline file there is a timestamp. + # By default the timestamp is seconds in unix time. However + # by setting this variable to "True" value the time will be human readable. + timeline_human_timestamp : True + + +############################# +flowmldetection: + # The mode 'train' should be used to tell the flowmldetection module + # that the flows received are all for training. + # A label should be provided in the [Parameters] section + # mode : train + + # The mode 'test' should be used after training the models, to test in unknown data. + # You should have trained at least once with 'Normal' data and once with + # 'Malicious' data in order for the test to work. + mode : test + +############################# +virustotal: + # This is the path to the API key. The file should contain the key at the + # start of the first line, and nothing more. + # If no key is found, VT module will not be started. + api_key_file : config/vt_api_key + + # Update period of virustotal for each IP in the cache + # The expected value in seconds. + # 3 day = 259200 seconds + virustotal_update_period : 259200 + +############################# +threatintelligence: + + # by default, slips starts without the TI files, and runs the Update Manager in the background + # if thi option is set to yes, slips will not start untill the update manager is done + # and all TI files are loaded successfully + # this is usefull if you want to ensure that slips doesn't miss the detection of any blacklisted IPs + wait_for_TI_to_finish : False + + # Default Path to the folder with files holding malcious IPs + # All the files in this folder are read and the IPs are considered malicious + # The format of the files must be, per line: "Number","IP address","Rating", "Description" + # For example: "1","191.101.31.25","100","NSO IP by Amnesty" + local_threat_intelligence_files : config/local_ti_files/ + download_path_for_remote_threat_intelligence : modules/threat_intelligence/remote_data_files/ + + # Update period of Threat Intelligence files. How often should we update the IoCs? + # The expected value in seconds. + # 1 day = 86400 seconds + TI_files_update_period : 86400 + + + # Update period of tranco online whitelist. How often should we re-download and update the list? + # The expected value in seconds. + # 1 day = 86400 seconds + # 1 week = 604800 seconds + # 2 weeks = 1209600 seconds + online_whitelist_update_period : 86400 + + online_whitelist : https://tranco-list.eu/download/X5QNN/10000 + + # Update period of mac db. How often should we update the db? + # The expected value in seconds. + # 1 week = 604800 seconds + # 2 weeks = 604800 seconds + mac_db_update : 1209600 + + mac_db : https://maclookup.app/downloads/json-database/get-db?t=24-11-28&h=26271dbc3529f006a4be021ec4cf99fab16e39cd + + # the file that contains all our TI feeds URLs and their threat level + ti_files : config/TI_feeds.csv + + # the file that contains all our JA3 feeds URLs and their threat level + # These feeds contain JA3 fingerprints that are identified as malicious. + ja3_feeds : config/JA3_feeds.csv + + # the file that contains all our SHA1 SSL fingerprints feeds and their threat level + # These feeds contain SHA1 SSL fingerprints that are identified as malicious. + ssl_feeds : config/SSL_feeds.csv + + + # (Optional) Slips supports RiskIQ feeds as an additional sources of ti data + # This file should contain your email and your 64 char API key, each one in it's own line. + RiskIQ_credentials_path : config/RiskIQ_credentials + + # Update period is set to 1 week by default, if you're not a premium riskIQ + # user check your quota limit before changing this value + # 1 week = 604800 second + update_period : 604800 + +############################# +flowalerts: + + # we need a thrshold to determine a long connection. in slips by default is. + long_connection_threshold : 1500 + + # Number of all bytes sent from 1 IP to another to trigger an SSH successful alert. + ssh_succesful_detection_threshold : 4290 + + # threshold in MBs + data_exfiltration_threshold : 500 + + # for DNS over TXT threshold, we consider any answer above the following threshold + # malicious. + entropy_threshold : 5 + + # how many bytes downloaded from pastebin should trigger an alert? + pastebin_download_threshold : 700 + +############################# +exporting_alerts: + + # available options [slack,stix] without quotes + # export_to : [stix] + # export_to : [slack] + export_to : "[]" + + # We'll use this channel to send alerts + slack_channel_name : proj_slips_alerting_module + + # This name will be used to identify which alert belongs to which device in your slack channel + sensor_name : sensor1 + + # filepath where the slack token should be + slack_api_path : config/slack_bot_token_secret + + # Server to use if you enable exporting STIX + TAXII_server : localhost + # if your TAXII server is a remote server, + # you can set the port to 443 or 80. + port : 1234 + use_https : False + discovery_path : /services/discovery-a + inbox_path : /services/inbox-a + + # Collection on the server you want to push stix data to + collection_name : collection-a + + # This value is only used when slips is running non-stop (e.g with -i ) + # push_delay is the time to wait before pushing STIX data to server (in seconds) + # If running on a file not an interface + # slips will export to server after analysis is done. + # 3600 = 1h + push_delay : 3600 + + # TAXII server credentials + taxii_username : admin + taxii_password : admin + + # URL used to obtain JWT token. set this to '' if you don't want to use it + # is required for JWT based authentication. (JWT based authentication is Optional) + # It's usually /management/auth + jwt_auth_path : /management/auth + +############################# +CESNET: + + # Slips supports exporting and importing evidence in the IDEA format to/from warden servers. + send_alerts : False + receive_alerts : False + + # warden configuration file. For format instructions check + # https://stratospherelinuxips.readthedocs.io/en/develop/exporting.html?highlight=exporting# cesnet-sharing + configuration_file : config/warden.conf + + # Time to wait before receiving alerts from warden server (in seconds) + # By default receive alerts every 1 day + receive_delay : 86400 + +############################# +DisabledAlerts: + + # All the following detections are turned on by default + # Turn them off by adding any of the following detections to the disabled_detections list + + # ARP_SCAN, ARP_OUTSIDE_LOCALNET, UNSOLICITED_ARP, MITM_ARP_ATTACK, + # YOUNG_DOMAIN, MULTIPLE_SSH_VERSIONS, DIFFERENT_LOCALNET, + # DEVICE_CHANGING_IP, NON_HTTP_PORT_80_CONNECTION, NON_SSL_PORT_443_CONNECTION, + # WEIRD_HTTP_METHOD, INCOMPATIBLE_CN, DGA_NXDOMAINS, DNS_WITHOUT_CONNECTION, + # PASTEBIN_DOWNLOAD, CONNECTION_WITHOUT_DNS, DNS_ARPA_SCAN, UNKNOWN_PORT, + # PASSWORD_GUESSING, HORIZONTAL_PORT_SCAN, CONNECTION_TO_PRIVATE_IP, GRE_TUNNEL, + # VERTICAL_PORT_SCAN, SSH_SUCCESSFUL, LONG_CONNECTION, SELF_SIGNED_CERTIFICATE, + # MULTIPLE_RECONNECTION_ATTEMPTS, CONNECTION_TO_MULTIPLE_PORTS, HIGH_ENTROPY_DNS_ANSWER, + # INVALID_DNS_RESOLUTION, PORT_0_CONNECTION, MALICIOUS_JA3, MALICIOUS_JA3S, + # DATA_UPLOAD, BAD_SMTP_LOGIN, SMTP_LOGIN_BRUTEFORCE, MALICIOUS_SSL_CERT, + # MALICIOUS_FLOW, SUSPICIOUS_USER_AGENT, EMPTY_CONNECTIONS, INCOMPATIBLE_USER_AGENT, + # EXECUTABLE_MIME_TYPE, MULTIPLE_USER_AGENT, HTTP_TRAFFIC, MALICIOUS_JARM, + # NETWORK_GPS_LOCATION_LEAKED, ICMP_TIMESTAMP_SCAN, ICMP_ADDRESS_SCAN, + # ICMP_ADDRESS_MASK_SCAN, DHCP_SCAN, MALICIOUS_IP_FROM_P2P_NETWORK, P2P_REPORT, + # COMMAND_AND_CONTROL_CHANNEL, THREAT_INTELLIGENCE_BLACKLISTED_ASN, + # THREAT_INTELLIGENCE_BLACKLISTED_IP, THREAT_INTELLIGENCE_BLACKLISTED_DOMAIN, + # MALICIOUS_DOWNLOADED_FILE, MALICIOUS_URL + + # disabled_detections = [THREAT_INTELLIGENCE_BLACKLISTED_IP, CONNECTION_TO_PRIVATE_IP] + disabled_detections : "[]" + +############################# +Docker: + # ID and group id of the user who started to docker container + # the purpose of using them is to change the ownership of the docker created files to be able to rwx the files from + # outside docker too, for example the files in the output/ dir + UID : 0 + GID : 0 + +############################# +Profiling: + + # [11] CPU profiling + + # enable cpu profiling [yes,no] + cpu_profiler_enable : False + + # Available options are [dev,live] + # dev for deterministic profiling. this will give precise information about the CPU usage + # throughout the program runtime. This module cannot give live updates + # live mode is for sampling data stream. To track the function stack in real time. it is accessible from web interface + cpu_profiler_mode : dev + + # profile all subprocesses in dev mode [yes,no]. + cpu_profiler_multiprocess : True + + # set number of tracer entries (dev mode only) + cpu_profiler_dev_mode_entries : 1000000 + + # set maximum output lines (live mode only) + cpu_profiler_output_limit : 20 + + # set the wait time between sampling sequences in seconds (live mode only) + cpu_profiler_sampling_interval : 20 + + # enable memory profiling [yes,no] + memory_profiler_enable : False + + # set profiling mode [dev,live] + memory_profiler_mode : live + + # profile all subprocesses [yes,no] + memory_profiler_multiprocess : True + + +############################# +web_interface: + port : 55000 + +############################# +global_p2p: + # this is the global p2p's trust model. can only be enabled when + # running slips on an interface + use_fides: True + +############################# +local_p2p: + # create p2p.log with additional info about peer communications? + create_p2p_logfile : False + use_p2p : False diff --git a/tests/integration_tests/test.yaml b/tests/integration_tests/test.yaml index 542a027ba..0513564b0 100644 --- a/tests/integration_tests/test.yaml +++ b/tests/integration_tests/test.yaml @@ -84,7 +84,7 @@ parameters: # By default False. Meaning we don't DELETE the DB by default. deletePrevdb : True # You can remember the data in all the previous runs of the DB if you put False. -# Redis will remember as long as the redis server is not down. The persistance is on the memory, not disk. +# Redis will remember as long as the redis server is not down. The persistence is on the memory, not disk. #deletePrevdb = False # Set the label for all the flows that are being read. @@ -357,13 +357,15 @@ Profiling: web_interface: - port : 55000 - -#################### -# [10] enable or disable p2p for slips -P2P: -# create p2p.log with additional info about peer communications? yes or no - create_p2p_logfile : False -# use_p2p = yes - use_p2p : False +############################# +global_p2p: + # this is the global p2p's trust model. can only be enabled when + # running slips on an interface + use_fides: False + +############################# +local_p2p: + # create p2p.log with additional info about peer communications? + create_p2p_logfile : False + use_p2p : False diff --git a/tests/integration_tests/test2.yaml b/tests/integration_tests/test2.yaml index a6522800e..34c053e85 100644 --- a/tests/integration_tests/test2.yaml +++ b/tests/integration_tests/test2.yaml @@ -86,7 +86,7 @@ parameters: # By default False. Meaning we don't DELETE the DB by default. deletePrevdb : True # You can remember the data in all the previous runs of the DB if you put False. - # Redis will remember as long as the redis server is not down. The persistance is + # Redis will remember as long as the redis server is not down. The persistence is # on the memory, not disk. # deletePrevdb : False diff --git a/tests/integration_tests/test_fides.py b/tests/integration_tests/test_fides.py new file mode 100644 index 000000000..96477a08d --- /dev/null +++ b/tests/integration_tests/test_fides.py @@ -0,0 +1,339 @@ +""" +This file tests 2 different config files other than slips' default config/slips.yaml +test/test.yaml and tests/test2.yaml +""" + +import shutil +from pathlib import PosixPath + +import redis + +from modules.fidesModule.model.peer import PeerInfo +from modules.fidesModule.persistence.sqlite_db import SQLiteDB +from tests.common_test_utils import ( + create_output_dir, + assert_no_errors, +) +from tests.module_factory import ModuleFactory +import pytest +import os +import subprocess +import time +import sys +from unittest.mock import Mock +import modules.fidesModule.model.peer_trust_data as ptd + +alerts_file = "alerts.log" + + +def delete_file_if_exists(file_path): + if os.path.exists(file_path): + os.remove(file_path) + print(f"File '{file_path}' has been deleted.") + else: + print(f"File '{file_path}' does not exist.") + + +def countdown(seconds, message): + """ + counts down from the given number of seconds, printing a message each second. + """ + while seconds > 0: + sys.stdout.write( + f"\rSending {message} in {seconds} " + ) # overwrite the line + sys.stdout.flush() # ensures immediate output + time.sleep(1) # waits for 1 second + seconds -= 1 + sys.stdout.write(f"\rSending {message} now! \n") + + +def message_send(port): + # connect to redis database 0 + # channel = "fides2network" + channel = "network2fides" + message = """ + { + "type": "nl2tl_intelligence_response", + "version": 1, + "data": [ + { + "sender": { + "id": "peer1", + "organisations": ["org_123", "org_456"], + "ip": "192.168.1.1" + }, + "payload": { + "intelligence": { + "target": {"type": "server", "value": "192.168.1.10"}, + "confidentiality": {"level": 0.8}, + "score": 0.5, + "confidence": 0.95 + }, + "target": "stratosphere.org" + } + }, + { + "sender": { + "id": "peer2", + "organisations": ["org_789"], + "ip": "192.168.1.2" + }, + "payload": { + "intelligence": { + "target": {"type": "workstation", "value": "192.168.1.20"}, + "confidentiality": {"level": 0.7}, + "score": -0.85, + "confidence": 0.92 + }, + "target": "stratosphere.org" + } + } + ] + } + """ + redis_client = redis.StrictRedis(host="localhost", port=port, db=0) + # publish the message to the "network2fides" channel + redis_client.publish(channel, message) + + print(f"Test message published to channel '{channel}'.") + + +def message_receive(port): + import redis + import json + + # connect to redis database 0 + redis_client = redis.StrictRedis(host="localhost", port=port, db=0) + + # define a callback function to handle received messages + def message_handler(message): + if message["type"] == "message": # ensure it's a message type + data = message["data"].decode("utf-8") # decode byte data + print("Received message:") + print( + json.dumps(json.loads(data), indent=4) + ) # pretty-print JSON message + + # subscribe to the "fides2slips" channel + pubsub = redis_client.pubsub() + pubsub.subscribe("fides2network") + + print("Listening on the 'fides2network' channel. Waiting for messages...") + + # process one message + for message in pubsub.listen(): + message_handler(message) + break # exit after processing one message + + +@pytest.mark.parametrize( + "path, output_dir, redis_port", + [ + ( + "dataset/test13-malicious-dhcpscan-zeek-dir", + "fides_test_conf_file2/", + 6644, + ) + ], +) +def test_conf_file2(path, output_dir, redis_port): + """ + In this test we're using tests/test2.conf + """ + output_dir: PosixPath = create_output_dir(output_dir) + output_file = os.path.join(output_dir, "slips_output.txt") + command = [ + "./slips.py", + "-t", + "-g", + "-e", + "1", + "-f", + str(path), + "-o", + str(output_dir), + "-c", + "tests/integration_tests/fides_config.yaml", + "-P", + str(redis_port), + ] + + print("running slips ...") + print(output_dir) + + # Open the log file in write mode + with open(output_file, "w") as log_file: + # Start the subprocess, redirecting stdout and stderr to the same file + process = subprocess.Popen( + command, # Replace with your command + stdout=log_file, + stderr=log_file, + ) + + print(f"Output and errors are logged in {output_file}") + countdown(40, "sigterm") + # send a SIGTERM to the process + os.kill(process.pid, 15) + print("SIGTERM sent. killing slips") + os.kill(process.pid, 9) + + message_receive(redis_port) + + print(f"Slips with PID {process.pid} was killed.") + + print("Slip is done, checking for errors in the output dir.") + assert_no_errors(output_dir) + print("Checking database") + db = ModuleFactory().create_db_manager_obj( + redis_port, output_dir=output_dir, start_redis_server=False + ) + # iris is supposed to be receiving this msg, that last thing fides does + # is send a msg to this channel for iris to receive it + assert db.get_msgs_received_at_runtime("Fides")["fides2network"] == "1" + assert db.get_msgs_received_at_runtime("Fides")["new_alert"] == "1" + print(db.get_msgs_received_at_runtime("Fides")) + + print("Deleting the output directory") + shutil.rmtree(output_dir) + + +@pytest.mark.parametrize( + "path, output_dir, redis_port", + [ + ( + "dataset/test15-malicious-zeek-dir", + "fides_test_trust_recommendation_response/", + 6645, + ) + ], +) +def test_trust_recommendation_response(path, output_dir, redis_port): + """ + This test simulates a common situation in the global P2P system, where + Fides Module wanted to evaluate trust in an unknown peer and asked for + the opinion of other peers. + The known peers responded and Fides Module is processing the response. + Scenario: + - Fides did not know a peer whose ID is 'stratosphere.org' and have + asked for opinion of known peers: peer1 and peer2 + - The peers are responding in a message; see message in message_send() + - The message is processed + THE TEST ITSELF + + Preparation: + - Have a response to send to a correct channel (it would have been + done by Iris, here it is simulated) + - Inject peer1 and peer2 into the database - Fides Module must know + those peers, NOTE that Fides Module only asks for opinion from known + peers + - Run Slips (includes Fides Module) in a thread and wait for all + modules to start + + """ + output_dir: PosixPath = create_output_dir(output_dir) + output_file = os.path.join(output_dir, "slips_output.txt") + command = [ + "./slips.py", + "-t", + "-g", + "-e", + "1", + "-f", + str(path), + "-o", + str(output_dir), + "-c", + "tests/integration_tests/fides_config.yaml", + "-P", + str(redis_port), + ] + config_file_path = "modules/fidesModule/config/fides.conf.yml" + config_temp_path = "modules/fidesModule/config/fides.conf.yml.bak" + config_line = "database: 'fides_test_database.sqlite'\n" + shutil.copy(config_file_path, config_temp_path) + test_db = "fides_test_database.sqlite" + + try: + # Append the new line to the config + with open(config_file_path, "a") as file: + file.write(config_line) + + print("running slips ...") + print(output_dir) + + mock_logger = Mock() + mock_logger.print_line = Mock() + mock_logger.error = Mock() + print("Manipulating database") + fdb = SQLiteDB(mock_logger, test_db) + fdb.store_peer_trust_data( + ptd.trust_data_prototype( + peer=PeerInfo( + id="peer1", + organisations=["org1", "org2"], + ip="192.168.1.1", + ), + has_fixed_trust=False, + ) + ) + fdb.store_peer_trust_data( + ptd.trust_data_prototype( + peer=PeerInfo( + id="peer2", organisations=["org2"], ip="192.168.1.2" + ), + has_fixed_trust=True, + ) + ) + + # Open the log file in write mode + with open(output_file, "w") as log_file: + # Start the subprocess, redirecting stdout and stderr to the same file + process = subprocess.Popen( + command, # Replace with your command + stdout=log_file, + stderr=log_file, + ) + + print(f"Output and errors are logged in {output_file}") + # these 12s are the time we wait for slips to start all the modules + countdown(60, "test message") + message_send(redis_port) + # these 18s are the time we give slips to process the msg + countdown(30, "sigterm") + # send a SIGTERM to the process + os.kill(process.pid, 15) + print("SIGTERM sent. killing slips") + os.kill(process.pid, 15) + + print(f"Slips with PID {process.pid} was killed.") + + print("Slip is done, checking for errors in the output dir.") + assert_no_errors(output_dir) + print("Checking database") + + db = ModuleFactory().create_db_manager_obj( + redis_port, output_dir=output_dir, start_redis_server=False + ) + + # assert db.get_msgs_received_at_runtime("Fides")["fides2network"] == "1" + + print("Checking Fides' data outlets") + assert fdb.get_peer_trust_data("peer1").service_history != [] + assert fdb.get_peer_trust_data("peer2").service_history != [] + assert fdb.get_peer_trust_data("peer1").service_history_size == 1 + assert fdb.get_peer_trust_data("peer2").service_history_size == 1 + assert db.get_cached_network_opinion( + "stratosphere.org", 200000000000, 200000000000 + ) == { + "target": "stratosphere.org", + "score": "0.0", + "confidence": "0.0", + } + + print("Deleting the output directory") + shutil.rmtree(output_dir) + finally: + # Restore the original file + os.remove(test_db) + shutil.move(config_temp_path, config_file_path) + print("Config file restored to original state.") diff --git a/tests/module_factory.py b/tests/module_factory.py index ad0e7c275..28034e05d 100644 --- a/tests/module_factory.py +++ b/tests/module_factory.py @@ -78,6 +78,7 @@ TimeWindow, Victim, ) +from modules.fidesModule.fidesModule import FidesModule def read_configuration(): @@ -162,6 +163,19 @@ def create_http_analyzer_obj(self, mock_db): http_analyzer.print = Mock() return http_analyzer + @patch(MODULE_DB_MANAGER, name="mock_db") + def create_fidesModule_obj(self, mock_db): + fm = FidesModule( + self.logger, + "dummy_output_dir", + 6379, + Mock(), + ) + + # override the self.print function + fm.print = Mock() + return fm + @patch(MODULE_DB_MANAGER, name="mock_db") def create_virustotal_obj(self, mock_db): virustotal = VT( diff --git a/tests/test_fides_module.py b/tests/test_fides_module.py new file mode 100644 index 000000000..5fbb7c0dd --- /dev/null +++ b/tests/test_fides_module.py @@ -0,0 +1,31 @@ +""" +Unit tests for modules/fidesModule/fidesModule.py + +The sqlite database used by and implemented in FidesModule has its own unit +tests. You may find them here: .test_fides_sqlite_db.py +""" + +import pytest +import os + +from tests.module_factory import ModuleFactory +from modules.http_analyzer.http_analyzer import utils + + +@pytest.fixture +def cleanup_database(): + # name of the database created by Fides + db_name = "fides_p2p_db.sqlite" + + yield # Let the test run + + # Cleanup itself + if os.path.exists(db_name): + os.remove(db_name) + + +def test_pre_main(mocker, cleanup_database): + fides_module = ModuleFactory().create_fidesModule_obj() + mocker.patch("slips_files.common.slips_utils.Utils.drop_root_privs") + fides_module.pre_main() + utils.drop_root_privs.assert_called_once() diff --git a/tests/test_fides_queues.py b/tests/test_fides_queues.py new file mode 100644 index 000000000..59739fbf9 --- /dev/null +++ b/tests/test_fides_queues.py @@ -0,0 +1,89 @@ +import pytest +from unittest.mock import MagicMock, patch +from threading import Thread +from modules.fidesModule.messaging.redis_simplex_queue import RedisSimplexQueue, RedisDuplexQueue + +@pytest.fixture +def mock_db(): + return MagicMock() + +@pytest.fixture +def mock_channels(): + return {"send_channel": MagicMock(), "receive_channel": MagicMock()} + +@pytest.fixture +def simplex_queue(mock_db, mock_channels): + return RedisSimplexQueue(mock_db, "send_channel", "receive_channel", mock_channels) + +def test_initialization(simplex_queue, mock_db, mock_channels): + assert simplex_queue.db == mock_db + assert simplex_queue._RedisSimplexQueue__send == "send_channel" + assert simplex_queue._RedisSimplexQueue__receive == "receive_channel" + assert simplex_queue._RedisSimplexQueue__pub == mock_channels["receive_channel"] + +def test_send(simplex_queue, mock_db): + simplex_queue.send("test_message") + mock_db.publish.assert_called_once_with("send_channel", "test_message") + +def test_listen_blocking(simplex_queue, mock_channels): + mock_channels["receive_channel"].listen = MagicMock(return_value=[ + {"data": "message_1"}, + {"data": "stop_process"}, + ]) + on_message = MagicMock() + + simplex_queue.listen(on_message, block=True) + + on_message.assert_any_call("message_1") + assert mock_channels["receive_channel"].unsubscribe.called + +def test_listen_non_blocking(simplex_queue, mock_channels): + on_message = MagicMock() + + # Mock `run_in_thread` to return a real thread-like object + mock_thread = Thread(target=lambda: None) + mock_channels["receive_channel"].run_in_thread.return_value = mock_thread + + # Call the listen method + thread = simplex_queue.listen(on_message, block=False) + + # Assert that the returned thread is a Thread instance + assert isinstance(thread, Thread) + + # Clean up the created thread to avoid side effects + if thread.is_alive(): + thread.join() + +def test_exec_message(simplex_queue): + on_message = MagicMock() + + valid_message = {"data": "valid_data"} + simplex_queue._RedisSimplexQueue__exec_message(valid_message, on_message) + on_message.assert_called_once_with("valid_data") + + stop_message = {"data": "stop_process"} + simplex_queue._RedisSimplexQueue__exec_message(stop_message, on_message) + # Ensure the stop logic is triggered + +def test_stop_all_threads(simplex_queue): + mock_thread = MagicMock() + simplex_queue._threads.append(mock_thread) + + simplex_queue.stop_all_queue_threads() + mock_thread.stop.assert_called_once() + assert len(simplex_queue._threads) == 0 + +def test_duplex_queue(mock_db): + # Update mock_channels to include the "common_channel" + mock_channels = { + "common_channel": MagicMock() + } + + # Instantiate the duplex queue + duplex_queue = RedisDuplexQueue(mock_db, "common_channel", mock_channels) + + # Assertions to verify proper initialization + assert duplex_queue._RedisSimplexQueue__send == "common_channel" + assert duplex_queue._RedisSimplexQueue__receive == "common_channel" + assert duplex_queue._RedisSimplexQueue__pub == mock_channels["common_channel"] + diff --git a/tests/test_fides_sqlite_db.py b/tests/test_fides_sqlite_db.py new file mode 100644 index 000000000..d66642e27 --- /dev/null +++ b/tests/test_fides_sqlite_db.py @@ -0,0 +1,339 @@ +import pytest +from unittest.mock import MagicMock + +from modules.fidesModule.model.peer import PeerInfo +from modules.fidesModule.model.peer_trust_data import PeerTrustData +from modules.fidesModule.model.threat_intelligence import ( + SlipsThreatIntelligence, +) +from modules.fidesModule.persistence.sqlite_db import SQLiteDB + +from modules.fidesModule.model.recommendation_history import ( + RecommendationHistoryRecord, +) +from modules.fidesModule.model.service_history import ServiceHistoryRecord + + +@pytest.fixture +def db(): + # Create an in-memory SQLite database for testing + logger = MagicMock() # Mock the logger for testing purposes + db_instance = SQLiteDB(logger, ":memory:") # Using in-memory DB + return db_instance + + +def test_db_connection_and_creation(db): + # Check if connection is established + assert db.connection is not None + # Check if tables exist + tables = db._SQLiteDB__execute_query( + "SELECT name FROM sqlite_master WHERE type='table';" + ) + assert len(tables) > 0 # Ensure tables are created + + +def test_store_slips_threat_intelligence(db): + # Create a SlipsThreatIntelligence object + intelligence = SlipsThreatIntelligence( + target="example.com", score=-1, confidence=0.9, confidentiality=0.75 + ) + + # Store the intelligence in the database + db.store_slips_threat_intelligence(intelligence) + + # Fetch it back using the target + result = db.get_slips_threat_intelligence_by_target("example.com") + + # Assert the retrieved data matches what was stored + assert result is not None + assert result.target == "example.com" + assert result.score == -1 + assert result.confidence == 0.9 + assert result.confidentiality == 0.75 + + +def test_get_slips_threat_intelligence_by_target(db): + # Create a SlipsThreatIntelligence object and insert it + intelligence = SlipsThreatIntelligence( + target="192.168.1.1", + score=0.70, + confidence=1.0, + confidentiality=None, # Optional field left as None + ) + db.store_slips_threat_intelligence(intelligence) + + # Retrieve the intelligence by the target (IP address) + result = db.get_slips_threat_intelligence_by_target("192.168.1.1") + + # Assert the retrieved data matches what was stored + assert result is not None + assert result.target == "192.168.1.1" + assert result.score == 0.7 + assert result.confidence == 1 + assert ( + result.confidentiality is None + ) # Should be None since it was not set + + +def test_get_peer_trust_data(db): + # Create peer info and peer trust data + peer_info = PeerInfo( + id="peer123", organisations=["org1", "org2"], ip="192.168.0.10" + ) + peer_trust_data = PeerTrustData( + info=peer_info, + has_fixed_trust=True, + service_trust=0.85, + reputation=0.95, + recommendation_trust=1, + competence_belief=0.8, + integrity_belief=0.0, + initial_reputation_provided_by_count=10, + service_history=[ + ServiceHistoryRecord(satisfaction=0.5, weight=0.9, timestamp=20.15) + ], + recommendation_history=[ + RecommendationHistoryRecord( + satisfaction=0.8, weight=1.0, timestamp=1234.55 + ) + ], + ) + + # Store peer trust data in the database + db.store_peer_trust_data(peer_trust_data) + + # Retrieve the stored peer trust data by peer ID + result = db.get_peer_trust_data("peer123") + + # Assert the retrieved data matches what was stored + assert result is not None + assert result.info.id == "peer123" + assert result.info.ip == "192.168.0.10" + assert result.service_trust == 0.85 + assert result.reputation == 0.95 + assert result.recommendation_trust == 1 + assert result.competence_belief == 0.8 + assert result.integrity_belief == 0.0 + assert result.initial_reputation_provided_by_count == 10 + assert len(result.service_history) == 1 + assert result.service_history[0].satisfaction == 0.5 + assert len(result.recommendation_history) == 1 + assert result.recommendation_history[0].satisfaction == 0.8 + + +def test_get_connected_peers_1(db): + # Create PeerInfo data for multiple peers + peers = [ + PeerInfo(id="peerA", organisations=["orgA"], ip="192.168.0.1"), + PeerInfo(id="peerB", organisations=["orgB", "orgC"], ip="192.168.0.2"), + ] + + # Store connected peers in the database + db.store_connected_peers_list(peers) + + # Fetch all connected peers + connected_peers = db.get_connected_peers() + + # Assert the connected peers were retrieved correctly + assert len(connected_peers) == 2 + assert connected_peers[0].id == "peerA" + assert connected_peers[1].id == "peerB" + assert connected_peers[0].ip == "192.168.0.1" + assert "orgB" in connected_peers[1].organisations + + +def test_get_peers_by_organisations(db): + # Create and store PeerInfo data + peers = [ + PeerInfo(id="peer1", organisations=["org1", "org2"], ip="10.0.0.1"), + PeerInfo(id="peer2", organisations=["org2", "org3"], ip="10.0.0.2"), + PeerInfo(id="peer3", organisations=["org3"], ip="10.0.0.3"), + ] + db.store_connected_peers_list(peers) + + # Query peers belonging to organisation "org2" + result = db.get_peers_by_organisations(["org2"]) + + # Assert the correct peers are returned + assert len(result) == 2 + assert result[0].id == "peer1" + assert result[1].id == "peer2" + + +def test_get_peers_by_minimal_recommendation_trust(db): + # Insert peer trust data with varying recommendation trust + peer1 = PeerTrustData( + info=PeerInfo(id="peer1", organisations=["org1"], ip="10.0.0.1"), + has_fixed_trust=True, + service_trust=0.70, + reputation=0.80, + recommendation_trust=0.50, + competence_belief=0.60, + integrity_belief=0.70, + initial_reputation_provided_by_count=3, + service_history=[], # Assuming an empty list for simplicity + recommendation_history=[], # Assuming an empty list for simplicity + ) + + peer2 = PeerTrustData( + info=PeerInfo(id="peer2", organisations=["org2"], ip="10.0.0.2"), + has_fixed_trust=False, + service_trust=0.85, + reputation=0.90, + recommendation_trust=0.90, + competence_belief=0.75, + integrity_belief=0.80, + initial_reputation_provided_by_count=5, + service_history=[], + recommendation_history=[], + ) + + # Store the peer trust data + db.store_peer_trust_data(peer1) + db.store_peer_trust_data(peer2) + + # Query peers with recommendation trust >= 70 + peers = db.get_peers_by_minimal_recommendation_trust(0.70) + + # Assert that only the appropriate peer is returned + assert len(peers) == 1 + assert peers[0].id == "peer2" + + +def test_get_nonexistent_peer_trust_data(db): + # Attempt to retrieve peer trust data for a non-existent peer + result = db.get_peer_trust_data("nonexistent_peer") + assert result is None + + +def test_insert_organisation_if_not_exists(db): + # Organisation ID to be inserted + organisation_id = "org123" + + # Insert organisation if it doesn't exist + db.insert_organisation_if_not_exists(organisation_id) + + # Query the Organisation table to check if the organisation was inserted + result = db._SQLiteDB__execute_query( + "SELECT organisationID FROM Organisation WHERE organisationID = ?", + [organisation_id], + ) + + # Assert that the organisation was inserted + assert len(result) == 1 + assert result[0][0] == organisation_id + + +def test_insert_peer_organisation_connection(db): + # Peer and Organisation IDs to be inserted + peer_id = "peer123" + organisation_id = "org123" + + # Insert the connection + db.insert_peer_organisation_connection(peer_id, organisation_id) + + # Query the PeerOrganisation table to verify the connection + result = db._SQLiteDB__execute_query( + "SELECT peerID, organisationID FROM PeerOrganisation WHERE peerID = ? AND organisationID = ?", + [peer_id, organisation_id], + ) + + # Assert the connection was inserted + assert len(result) == 1 + assert result[0] == (peer_id, organisation_id) + + +def test_store_connected_peers_list(db): + # Create PeerInfo objects to insert + peers = [ + PeerInfo(id="peer1", organisations=["org1", "org2"], ip="192.168.1.1"), + PeerInfo(id="peer2", organisations=["org3"], ip="192.168.1.2"), + ] + + # Store the connected peers + db.store_connected_peers_list(peers) + + # Verify the PeerInfo table + peer_results = db._SQLiteDB__execute_query( + "SELECT peerID, ip FROM PeerInfo" + ) + assert len(peer_results) == 2 + assert peer_results[0] == ("peer1", "192.168.1.1") + assert peer_results[1] == ("peer2", "192.168.1.2") + + # Verify the PeerOrganisation table + org_results_peer1 = db._SQLiteDB__execute_query( + "SELECT organisationID FROM PeerOrganisation WHERE peerID = ?", + ["peer1"], + ) + assert ( + len(org_results_peer1) == 2 + ) # peer1 should be connected to 2 organisations + assert org_results_peer1[0][0] == "org1" + assert org_results_peer1[1][0] == "org2" + + org_results_peer2 = db._SQLiteDB__execute_query( + "SELECT organisationID FROM PeerOrganisation WHERE peerID = ?", + ["peer2"], + ) + assert ( + len(org_results_peer2) == 1 + ) # peer2 should be connected to 1 organisation + assert org_results_peer2[0][0] == "org3" + + +def test_get_connected_peers_2(db): + # Manually insert peer data into PeerInfo table + db._SQLiteDB__execute_query( + "INSERT INTO PeerInfo (peerID, ip) VALUES (?, ?)", + ["peer1", "192.168.1.1"], + ) + db._SQLiteDB__execute_query( + "INSERT INTO PeerInfo (peerID, ip) VALUES (?, ?)", + ["peer2", "192.168.1.2"], + ) + + # Manually insert associated organisations into PeerOrganisation table + db._SQLiteDB__execute_query( + "INSERT INTO PeerOrganisation (peerID, organisationID) VALUES (?, ?)", + ["peer1", "org1"], + ) + db._SQLiteDB__execute_query( + "INSERT INTO PeerOrganisation (peerID, organisationID) VALUES (?, ?)", + ["peer1", "org2"], + ) + db._SQLiteDB__execute_query( + "INSERT INTO PeerOrganisation (peerID, organisationID) VALUES (?, ?)", + ["peer2", "org3"], + ) + + # Call the function to retrieve connected peers + connected_peers = db.get_connected_peers() + + # Verify the connected peers list + assert len(connected_peers) == 2 + assert connected_peers[0].id == "peer1" + assert connected_peers[0].ip == "192.168.1.1" + assert connected_peers[0].organisations == ["org1", "org2"] + assert connected_peers[1].id == "peer2" + assert connected_peers[1].ip == "192.168.1.2" + assert connected_peers[1].organisations == ["org3"] + + +def test_get_peer_organisations(db): + # Insert a peer and associated organisations into PeerOrganisation + peer_id = "peer123" + organisations = ["org1", "org2", "org3"] + for org_id in organisations: + db._SQLiteDB__execute_query( + "INSERT INTO PeerOrganisation (peerID, organisationID) VALUES (?, ?)", + [peer_id, org_id], + ) + + # Retrieve organisations for the peer + result = db.get_peer_organisations(peer_id) + + # Assert that the retrieved organisations match what was inserted + assert set(result) == set( + organisations + ) # Ensure all organisations are returned, order does not matter diff --git a/tests/tests_fides_bridge.py b/tests/tests_fides_bridge.py new file mode 100644 index 000000000..75bb61fb9 --- /dev/null +++ b/tests/tests_fides_bridge.py @@ -0,0 +1,77 @@ +import pytest +from unittest.mock import MagicMock, patch +from modules.fidesModule.messaging.network_bridge import NetworkBridge +from modules.fidesModule.messaging.queue import Queue +from modules.fidesModule.messaging.message_handler import MessageHandler +from modules.fidesModule.messaging.network_bridge import NetworkMessage +from modules.fidesModule.model.aliases import PeerId, Target +from modules.fidesModule.model.threat_intelligence import ThreatIntelligence + +@pytest.fixture +def mock_queue(): + return MagicMock(spec=Queue) + +@pytest.fixture +def network_bridge(mock_queue): + return NetworkBridge(queue=mock_queue) + +@pytest.fixture +def mock_handler(): + return MagicMock(spec=MessageHandler) + +def test_initialization(network_bridge, mock_queue): + assert network_bridge._NetworkBridge__queue == mock_queue + assert network_bridge.version == 1 + +def test_listen_success(network_bridge, mock_handler, mock_queue): + mock_queue.listen = MagicMock() + mock_handler.on_message = MagicMock() + + network_bridge.listen(mock_handler) + + mock_queue.listen.assert_called_once() + # Simulate a valid message being received + message = '{"type": "test", "version": 1, "data": {}}' + callback = mock_queue.listen.call_args[0][0] + callback(message) + + mock_handler.on_message.assert_called_once() + +def test_listen_failure(network_bridge, mock_handler, mock_queue): + mock_queue.listen = MagicMock() + mock_handler.on_error = MagicMock() + + network_bridge.listen(mock_handler) + + # Simulate an invalid message being received + message = "invalid json" + callback = mock_queue.listen.call_args[0][0] + callback(message) + + mock_handler.on_error.assert_called_once() + +def test_send_intelligence_response(network_bridge, mock_queue): + mock_queue.send = MagicMock() + target = Target("test_target") + intelligence = ThreatIntelligence(score=85, confidence=0.9) + network_bridge.send_intelligence_response("req_123", target, intelligence) + + mock_queue.send.assert_called_once() + sent_message = mock_queue.send.call_args[0][0] + assert "tl2nl_intelligence_response" in sent_message + +def test_send_recommendation_request(network_bridge, mock_queue): + mock_queue.send = MagicMock() + recipients = [PeerId("peer1"), PeerId("peer2")] + peer = PeerId("test_peer") + network_bridge.send_recommendation_request(recipients, peer) + + mock_queue.send.assert_called_once() + sent_message = mock_queue.send.call_args[0][0] + assert "tl2nl_recommendation_request" in sent_message + +def test_send_exception_handling(network_bridge, mock_queue): + mock_queue.send = MagicMock(side_effect=Exception("send failed")) + with pytest.raises(Exception, match="send failed"): + network_bridge._NetworkBridge__send(NetworkMessage(type="test", version=1, data={})) +