File size: 5,401 Bytes
50cb8a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os
import openai
import json
import rdflib

class LureObject:
    def __init__(self):
        self.lure_name = ""
        self.type = "" #honeytoken, honeypot
        self.content = ""
        

class LureGenerator:
    def __init__(self):
        self.lure = []

    def ChatGPTTextSplitter(self,text):
        """Splits text in smaller subblocks to feed to the LLM"""
        prompt = f"""The total length of content that I want to send you is too large to send in only one piece.

    For sending you that content, I will follow this rule:

    [START PART 1/10]
    this is the content of the part 1 out of 10 in total
    [END PART 1/10]

    Then you just answer: "Instructions Sent."

    And when I tell you "ALL PARTS SENT", then you can continue processing the data and answering my requests.
        """
        if type(text) == str:
            textsize = 12000
            blocksize = int(len(text) / textsize)
            if blocksize > 0:
                yield prompt

                for b in range(1,blocksize+1):
                    if b < blocksize+1:
                        prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {b}/{blocksize} received" and wait for the next part.
                [START PART {b}/{blocksize}]
                {text[(b-1)*textsize:b*textsize]}
                [END PART {b}/{blocksize}]
                Remember not answering yet. Just acknowledge you received this part with the message "Part {b}/{blocksize} received" and wait for the next part.
                        """
                        yield prompt
                    else:
                        prompt = f"""
                [START PART {b}/{blocksize}]
                {text[(b-1)*textsize:b*textsize]}
                [END PART {b}/{blocksize}]
                ALL PARTS SENT. Now you can continue processing the request.
                        """
                        yield prompt
            else:
                yield text
        elif type(text) == list:
            yield prompt

            for n,block in enumerate(text):
                if n+1 < len(text):
                    prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {n+1}/{len(text)} received" and wait for the next part.
            [START PART {n+1}/{len(text)}]
            {text[n]}
            [END PART {n+1}/{len(text)}]
            Remember not answering yet. Just acknowledge you received this part with the message "Part {n+1}/{len(text)} received" and wait for the next part.
                    """
                    yield prompt
                else:
                    prompt = f"""
            [START PART {n+1}/{len(text)}]
            {text[n]}
            [END PART {n+1}/{len(text)}]
            ALL PARTS SENT. Now you can continue processing the request.
                    """
                yield prompt


    def llm_api(self,prompt,model="gpt-3.5-turbo"):
        messages = [{
            "role":"user",
            "content":prompt
        }]
        res = openai.ChatCompletion.create(model=model,messages=messages,temperature=0)
        return res.choices[0].message['content']
    
    def generate_rule(self,deceptionObject,role):
        v = f"""Generate examples of {deceptionObject} that would be perceived valuable by an adversary about a person who has the role {role} and lure them to a specific location on the network. Generate json-format objects from the examples and return a json-format object containing all json-format objects.
        """
        return v
    def generate_rule2(self,deceptionObject,role,jsn):
        v = f"""Generate the detailed contents of an example of what an adversary would see if they accessed this {deceptionObject}: {jsn}
        """
        return v
    
    def generate_continue(self):
        v = """
        continue
        """
        return v
    
    def raw_prompt(self,LureType,Role):
        def run(val):
            prompt = "".join(val)
            for i in self.ChatGPTTextSplitter(prompt):
                res = self.llm_api(i)
            return res
        res_val =  run(self.generate_rule(LureType,Role))
        return res_val
    def raw_content(self,LureType,Role,jsn):
        def run(val):
            prompt = "".join(val)
            for i in self.ChatGPTTextSplitter(prompt):
                res = self.llm_api(i)
            return res
        res_val =  run(self.generate_rule2(LureType,Role,jsn))
        return res_val


    
    def generate(self,LureType,Role:str = ""):
        
        assert LureType in ['honeytoken','honeypot','honeyfile']
        res = self.raw_prompt(LureType,Role)
        
        self.sketch = res
        
        try:
            jsn = json.loads(res)
        except:
            raise ValueError("Failed to parse json-format.")
            
        key = list(jsn.keys())
        if len(key) == 1:
            for n,example in enumerate(list(jsn[key[0]])):
                lure = LureObject()
                lure.json = example
                lure.lure_name = key[0]+"_"+str(n)
                lure.content = self.raw_content(LureType,Role,example)
                lure.type = LureType
                lure.userRole = Role
        
                self.lure.append(lure)

        return self.lure