File size: 13,258 Bytes
1997c01
 
 
 
 
d189e4c
1997c01
edc370b
3b30238
c1b14f2
 
3b30238
 
 
 
c1b14f2
3b30238
c1b14f2
 
 
 
 
cad05f7
6dffaaa
fc47506
b3ce4c2
5afb200
c1b14f2
 
7260f6e
79b6783
28d21d7
fc47506
 
 
79b6783
 
 
28d21d7
79b6783
 
fc47506
 
 
 
79b6783
28d21d7
79b6783
 
 
 
 
 
 
 
 
28d21d7
79b6783
 
 
 
 
 
 
 
 
 
 
fc47506
3b30238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6aa343
3b30238
f252c3c
3b30238
e6aa343
3b30238
 
 
 
7260f6e
 
edc370b
1997c01
 
 
 
 
 
 
6866b1f
7260f6e
 
 
83cd13d
d8e3d53
6400777
8a12b0f
28d21d7
 
a4d5793
d8e3d53
0b6419d
d8e3d53
0b6419d
d8e3d53
c024d74
e517d5e
8a12b0f
 
a4d5793
7c424e1
 
8a12b0f
7c424e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98e3569
 
 
 
 
 
 
 
8a12b0f
0391643
 
10608aa
 
 
 
 
4dcad40
10608aa
 
 
 
0391643
7260f6e
0391643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7260f6e
0391643
 
 
 
 
 
 
 
 
 
8a12b0f
 
0391643
 
 
850974d
 
 
0391643
850974d
0391643
 
 
 
 
 
 
 
3ea2200
9c17797
0391643
f7c8cd8
 
 
 
 
 
9c82a2c
f7c8cd8
9c82a2c
 
f7c8cd8
 
 
 
 
 
 
 
 
3ea2200
7260f6e
 
 
f7c8cd8
e517d5e
7260f6e
d8e3d53
cba8adc
2614bf8
68e8a9a
898af58
d887ceb
e8a1fdb
 
68e8a9a
98e3569
68e8a9a
cba8adc
c024d74
98e3569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a12b0f
e688670
 
8a12b0f
e688670
8a12b0f
b47098f
f83432c
 
 
 
 
7a4a991
 
 
 
f83432c
e688670
f83432c
e688670
a0d7355
0391643
cd11506
a0d7355
 
6eedbd5
1c05398
cd11506
f83432c
cd11506
6eedbd5
 
de83467
7cdf6fe
e688670
 
 
 
de83467
 
a0d7355
e688670
 
8a12b0f
56dd4bf
e688670
 
 
8a12b0f
 
1d7b3b4
f83432c
a07c0ed
f83432c
 
98e3569
7260f6e
c6a5618
7260f6e
 
98e3569
4a45134
c6a5618
1997c01
 
c6a5618
9c17797
fc47506
0fc8c61
7c424e1
 
1997c01
 
83cd13d
19ae57e
1997c01
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
import gradio as gr
import pandas as pd
import numpy as np
import json
from io import StringIO
from collections import OrderedDict



import os

# ---------------------- Accessing data from Notion ---------------------- #


from notion_client import Client as client_notion

notionToken = os.getenv('notionToken')
if notionToken is None:
    raise Exception("Secret token not found. Please check the environment variables.")
else:
    print("Secret token found successfully!")

from config import landuseDatabaseId , subdomainAttributesDatabaseId 
from imports_utils import fetch_all_database_pages
from imports_utils import get_property_value
from imports_utils import notion

landuse_attributes  = fetch_all_database_pages(notion, landuseDatabaseId)
livability_attributes  = fetch_all_database_pages(notion, subdomainAttributesDatabaseId)

# fetch the dictionary with landuse - domain pairs 
landuseMapperDict ={}
subdomains_unique = []

for page in landuse_attributes:
    value_landuse = get_property_value(page, "LANDUSE")
    value_subdomain = get_property_value(page, "SUBDOMAIN_LIVEABILITY")
    if value_subdomain and value_landuse:
        landuseMapperDict[value_landuse] = value_subdomain
    if value_subdomain != "":
        subdomains_unique.append(value_subdomain)

#subdomains_unique = list(set(subdomains_unique))


# fetch the dictionary with subdomain attribute data
attributeMapperDict ={}
domains_unique = []

for page in livability_attributes:
    subdomain = get_property_value(page, "SUBDOMAIN_UNIQUE")
    sqm_per_employee = get_property_value(page, "SQM PER EMPL")
    thresholds = get_property_value(page, "MANHATTAN THRESHOLD")
    max_points = get_property_value(page, "LIVABILITY MAX POINT")
    domain = get_property_value(page, "DOMAIN")
    if  thresholds:   
        attributeMapperDict[subdomain] = {
        'sqmPerEmpl': [sqm_per_employee if sqm_per_employee != "" else 0],
        'thresholds': thresholds,
        'max_points': max_points,
        'domain': [domain if domain != "" else 0]
        }
    if domain != "":
        domains_unique.append(domain)

#domains_unique = list(set(domains_unique))



# ---------------------- Accessing data from Speckle ---------------------- #


"""
from specklepy.api.client import Client as SpeckleClient
from specklepy.api.credentials import get_default_account

# Example usage

client = Client(host="your_speckle_server_host")
account = get_default_account()
client.authenticate(token=account.token)


CLIENT = SpeckleClient(host="https://speckle.xyz/")
CLIENT.authenticate_with_token(token=userdata.get('speckleToken'))
"""

from config import landuseDatabaseId , streamId,  branch_name_dm, commit_id_dm
from imports_utils import streamMatrices
from imports_utils import speckleToken

streamDistanceMatrices = streamMatrices (speckleToken, streamId, branch_name_dm, commit_id_dm)







def test(input_json):
    print("Received input")
    # Parse the input JSON string
    try:
        inputs = json.loads(input_json)
    except json.JSONDecodeError:
        inputs = json.loads(input_json.replace("'", '"'))


    
    # Accessing input data from Grasshopper
    
    matrix = inputs['input']["matrix"]
    landuses = inputs['input']["landuse_areas"]
    transport_matrix = inputs['input']["transportMatrix"]  
    #attributeMapperDict = inputs['input']["attributeMapperDict"]
    #landuseMapperDict = inputs['input']["landuseMapperDict"]
    
    alpha = inputs['input']["alpha"]
    alpha = float(alpha)
    threshold = inputs['input']["threshold"]
    threshold = float(threshold)
    
    df_matrix = pd.DataFrame(matrix).T
    df_matrix = df_matrix.round(0).astype(int)

    df_landuses = pd.DataFrame(landuses).T
    df_landuses = df_landuses.round(0).astype(int)


    
    # List containing the substrings to check against
    tranportModes = ["DRT", "GMT", "HSR"]
    
    def split_dict_by_subkey(original_dict, substrings):
        # Initialize dictionaries for each substring
        result_dicts = {substring: {} for substring in substrings}
        
        for key, nested_dict in original_dict.items():
            for subkey, value in nested_dict.items():
                # Check each substring if it's in the subkey
                for substring in substrings:
                    if substring in subkey:
                        if key not in result_dicts[substring]:
                            result_dicts[substring][key] = {}
                        result_dicts[substring][key][subkey] = value
        
        return result_dicts

    result_dicts = split_dict_by_subkey(transport_matrix, tranportModes)

    # Accessing each dictionary
    art_dict = result_dicts["DRT"]
    gmt_dict = result_dicts["GMT"]


    df_art_matrix = pd.DataFrame(art_dict).T
    df_art_matrix = df_art_matrix.round(0).astype(int)  

    df_gmt_matrix = pd.DataFrame(gmt_dict).T
    df_gmt_matrix = df_art_matrix.round(0).astype(int)     
    

    # create a mask based on the matrix size and ids, crop activity nodes to the mask
    mask_connected = df_matrix.index.tolist()

    valid_indexes = [idx for idx in mask_connected if idx in df_landuses.index]
    # Identify and report missing indexes
    missing_indexes = set(mask_connected) - set(valid_indexes)
    if missing_indexes:
        print(f"Error: The following indexes were not found in the DataFrame: {missing_indexes}, length: {len(missing_indexes)}")
    
    # Apply the filtered mask
    df_landuses_filtered = df_landuses.loc[valid_indexes]


    # find a set of unique domains, to which subdomains are aggregated    
    temp = []
    for key, values in attributeMapperDict.items():
      domain = attributeMapperDict[key]['domain']
      for item in domain:
        if ',' in item:
          domain_list = item.split(',')
          attributeMapperDict[key]['domain'] = domain_list
          for domain in domain_list:
            temp.append(domain) 
        else:
          if item != 0: 
              temp.append(item)  
    
    domainsUnique = list(set(temp))


    # find a list of unique subdomains, to which land uses are aggregated
    temp = []    
    for key, values in landuseMapperDict.items():
      subdomain = str(landuseMapperDict[key])
      if subdomain != 0: 
        temp.append(subdomain) 
        
    subdomainsUnique = list(set(temp))
    

    
    
    
    
    def landusesToSubdomains(DistanceMatrix, LanduseDf, LanduseToSubdomainDict, UniqueSubdomainsList):
        df_LivabilitySubdomainsArea = pd.DataFrame(0, index=DistanceMatrix.index, columns=UniqueSubdomainsList)
    
        for subdomain in UniqueSubdomainsList:
            for lu, lu_subdomain in LanduseToSubdomainDict.items():
                if lu_subdomain == subdomain:
                    if lu in LanduseDf.columns:
                        df_LivabilitySubdomainsArea[subdomain] = df_LivabilitySubdomainsArea[subdomain].add(LanduseDf[lu], fill_value=0)
                    else:
                        print(f"Warning: Column '{lu}' not found in landuse database")
    
        return df_LivabilitySubdomainsArea

    

    LivabilitySubdomainsWeights = landusesToSubdomains(df_matrix,df_landuses_filtered,landuseMapperDict,subdomainsUnique)
    



    
    def FindWorkplaces (DistanceMatrix,SubdomainAttributeDict,destinationWeights,UniqueSubdomainsList ):
        
        df_LivabilitySubdomainsWorkplaces = pd.DataFrame(0, index=DistanceMatrix.index, columns=['jobs'])
    
        for subdomain in UniqueSubdomainsList:
          for key, value_list in SubdomainAttributeDict.items():
            sqm_per_empl = float(SubdomainAttributeDict[subdomain]['sqmPerEmpl'][0])
            if key in destinationWeights.columns and key == subdomain:
              if sqm_per_empl > 0:
                df_LivabilitySubdomainsWorkplaces['jobs'] += (round(destinationWeights[key] / sqm_per_empl,2)).fillna(0)
              else:
                df_LivabilitySubdomainsWorkplaces['jobs'] += 0
    
        return df_LivabilitySubdomainsWorkplaces


    WorkplacesNumber = FindWorkplaces(df_matrix,attributeMapperDict,LivabilitySubdomainsWeights,subdomainsUnique)
    
    # prepare an input weights dataframe for the parameter LivabilitySubdomainsInputs
    LivabilitySubdomainsInputs =pd.concat([LivabilitySubdomainsWeights, WorkplacesNumber], axis=1)

    
    
    def computeAccessibility (DistanceMatrix, destinationWeights=None,alpha = 0.0038, threshold = 600):
    
        decay_factors = np.exp(-alpha * DistanceMatrix) * (DistanceMatrix <= threshold)
        
        # for weighted accessibility (e. g. areas)
        if destinationWeights is not None: #not destinationWeights.empty:
            subdomainsAccessibility = pd.DataFrame(index=DistanceMatrix.index, columns=destinationWeights.columns)
            for col in destinationWeights.columns:
                subdomainsAccessibility[col] = (decay_factors * destinationWeights[col].values).sum(axis=1)
        else:
            print("Destination weights parameter is None")
        
        return subdomainsAccessibility
    



    def computeAccessibility_pointOfInterest (DistanceMatrix, columnName, alpha = 0.0038, threshold = 600):
    
        decay_factors = np.exp(-alpha * DistanceMatrix) * (DistanceMatrix <= threshold)

        pointOfInterestAccessibility = pd.DataFrame(index=DistanceMatrix.index, columns=[columnName])
        for col in pointOfInterestAccessibility.columns:
            pointOfInterestAccessibility[col] = (decay_factors * 1).sum(axis=1)
        
        return pointOfInterestAccessibility
    
    
    
    subdomainsAccessibility = computeAccessibility(df_matrix,LivabilitySubdomainsInputs,alpha,threshold)   
    artAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'ART',alpha,threshold)
    gmtAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'GMT+HSR',alpha,threshold)
    
    AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
    
    

    def remap(value, B_min, B_max, C_min, C_max):
        return C_min + (((value - B_min) / (B_max - B_min))* (C_max - C_min))    


    if 'jobs' not in subdomainsAccessibility.columns:
        print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")

    
    
    def accessibilityToLivability (DistanceMatrix,accessibilityInputs, SubdomainAttributeDict,UniqueDomainsList):
    
        livability = pd.DataFrame(index=DistanceMatrix.index, columns=accessibilityInputs.columns)
                             
        for domain in UniqueDomainsList:
            livability[domain] = 0
            
        livability.fillna(0, inplace=True)
        
        templist = []
        # remap accessibility to livability points
        
        for key, values in SubdomainAttributeDict.items():
            threshold = float(SubdomainAttributeDict[key]['thresholds'])
            max_livability = float(SubdomainAttributeDict[key]['max_points'])
            domains = [str(item) for item in SubdomainAttributeDict[key]['domain']]
        
            if key in accessibilityInputs.columns and key != 'commercial':
                livability_score = remap(accessibilityInputs[key], 0, threshold, 0, max_livability)
                livability.loc[accessibilityInputs[key] >= threshold, key] = max_livability
                livability.loc[accessibilityInputs[key] < threshold, key] = livability_score          
                if any(domains):
                    for domain in domains:
                        if domain != 'Workplaces':
                            livability.loc[accessibilityInputs[key] >= threshold, domain] += max_livability
                            livability.loc[accessibilityInputs[key] < threshold, domain] += livability_score
                                                
            elif key == 'commercial':
                livability_score = remap(accessibilityInputs['jobs'], 0, threshold, 0, max_livability)
                livability.loc[accessibilityInputs['jobs'] >= threshold, domains[0]] = max_livability
                livability.loc[accessibilityInputs['jobs'] < threshold, domains[0]] = livability_score

        
        return livability
    
    
    

    livability = accessibilityToLivability(df_matrix,AccessibilityInputs,attributeMapperDict,domainsUnique)
    

    livability_dictionary = livability.to_dict('index')
    LivabilitySubdomainsInputs_dictionary = LivabilitySubdomainsInputs.to_dict('index')
    subdomainsAccessibility_dictionary = AccessibilityInputs.to_dict('index')

    
    # Prepare the output
    output = {
        "subdomainsAccessibility_dictionary": subdomainsAccessibility_dictionary,
        "livability_dictionary": livability_dictionary,
        "subdomainsWeights_dictionary": LivabilitySubdomainsInputs_dictionary,
        "luDomainMapper": landuseMapperDict,
        "attributeMapper": attributeMapperDict,
        "artDict": art_dict
    }


    
    return json.dumps(output)

    # Define the Gradio interface with a single JSON input
iface = gr.Interface(
    fn=test,
    inputs=gr.Textbox(label="Input JSON", lines=20, placeholder="Enter JSON with all parameters here..."),
    outputs=gr.JSON(label="Output JSON"),
    title="testspace"
)

iface.launch()