File size: 2,354 Bytes
b831e6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
from fastapi import APIRouter, Request, Response
from fastapi.responses import JSONResponse
import pandas as pd
import json
#import lib.claims as libClaims
#from lib.models import mdl_utils, mdl_xgb
rteApi = APIRouter()
#---
@rteApi.get('/')
def api_entry():
return {
"message": "api routing - welcome to Omdena Saudi HCC api"
}
'''
#--- >>> SAMPLE CODE BELOW
#--- return json for claims data (merged)
#--- note: current is kaggle, but future could include from yyyymm filter
@rteApi.get('/claims', response_class = JSONResponse)
def api_getClaims(request: Request, response: Response):
pdfClaims = libClaims.load_claims()
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
result = json.loads(jsonSample)
return result
#--- return json for featEng
@rteApi.get('/claims/doFeatEng/', response_class = JSONResponse)
def tst_claims_featEng():
pdfClaims = libClaims.load_claims()
pdfFeatEng = libClaims.do_featEng(pdfClaims)
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
result = json.loads(jsonSample)
return result
@rteApi.get('/claims/doStdScaling/', response_class = JSONResponse)
def tst_claims_stdScaling():
pdfClaims = libClaims.load_claims()
pdfFeatEng = libClaims.do_featEng(pdfClaims)
pdfScaled = mdl_utils.doClaims_stdScaler_toPdf(pdfFeatEng)
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
result = json.loads(jsonSample)
return result
@rteApi.get('/claims/predict/superv', response_class = JSONResponse)
@rteApi.get('/claims/predict/xgb', response_class = JSONResponse)
def predict_xgb():
#--- load test data
pdfClaims = libClaims.load_claims()
pdfFeatEng = libClaims.do_featEng(pdfClaims)
npaScaled = mdl_utils.do_stdScaler(pdfFeatEng)
pdfScaled = mdl_utils.do_stdScaler_toPdf(npaScaled)
ndaPredict = mdl_xgb.predict(npaScaled)
pdfPredict = pd.DataFrame(ndaPredict)
#--- stitch the grouped data with the labels
pdfResults = pdfScaled.copy()
pdfResults.insert(0, "hasAnom?", pdfPredict[0])
#--- filter to only those rows that are flagged with an anomaly
pdfResults = pdfResults[pdfResults['hasAnom?'] > 0]
jsonSample = pdfResults.head(50).to_json(orient="records", indent=4)
result = json.loads(jsonSample)
return result
''' |