{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/placehold_translations.py","language":"python","identifier":"md_update_links","parameters":"(this_lang_code, content)","argument_list":"","return_statement":"","docstring":"Update footer links in markdown to be language relative","docstring_summary":"Update footer links in markdown to be language relative","docstring_tokens":["Update","footer","links","in","markdown","to","be","language","relative"],"function":"def md_update_links(this_lang_code, content):\n \"\"\" Update footer links in markdown to be language relative \"\"\"\n result = content\n try:\n common_lang_codes = [\"en\/\", \"es\/\", \"de\/\", \"fr\/\", \"es\/\", \"ja\/\", \"resources\/\"]\n exclude_common_langs = \"|\".join(list(map(lambda code: f\"{code}\",common_lang_codes)))\n relative_regex = re.compile(\"^(\\\\[[0-9]+]\\:\\\\s*)(\\\/(?!\" + exclude_common_langs + \").*)$\", re.MULTILINE | re.IGNORECASE)\n substitute = \"\\g<1>\/\" + this_lang_code.lower() + \"\\g<2>\"\n result = relative_regex.sub(substitute, content)\n except Exception as e:\n result = content\n logger.exception(\"fail to update md links\")\n finally:\n return result","function_tokens":["def","md_update_links","(","this_lang_code",",","content",")",":","result","=","content","try",":","common_lang_codes","=","[","\"en\/\"",",","\"es\/\"",",","\"de\/\"",",","\"fr\/\"",",","\"es\/\"",",","\"ja\/\"",",","\"resources\/\"","]","exclude_common_langs","=","\"|\"",".","join","(","list","(","map","(","lambda","code",":","f\"{code}\"",",","common_lang_codes",")",")",")","relative_regex","=","re",".","compile","(","\"^(\\\\[[0-9]+]\\:\\\\s*)(\\\/(?!\"","+","exclude_common_langs","+","\").*)$\"",",","re",".","MULTILINE","|","re",".","IGNORECASE",")","substitute","=","\"\\g<1>\/\"","+","this_lang_code",".","lower","(",")","+","\"\\g<2>\"","result","=","relative_regex",".","sub","(","substitute",",","content",")","except","Exception","as","e",":","result","=","content","logger",".","exception","(","\"fail to update md links\"",")","finally",":","return","result"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/placehold_translations.py#L70-L83"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/algolia_index.py","language":"python","identifier":"find_private_url","parameters":"(path, exclusions)","argument_list":"","return_statement":"return private_urls","docstring":"Look at all files within a given folder tree (starting from path) except\n For the `exclusions` folders\n :param path: Root folder into which to look for private URL\n :param exclusions: Array of folder names to exclude for looking for private files.\n :return private_urls: A list of HTML doc private file paths.","docstring_summary":"Look at all files within a given folder tree (starting from path) except\n For the `exclusions` folders\n :param path: Root folder into which to look for private URL\n :param exclusions: Array of folder names to exclude for looking for private files.\n :return private_urls: A list of HTML doc private file paths.","docstring_tokens":["Look","at","all","files","within","a","given","folder","tree","(","starting","from","path",")","except","For","the","exclusions","folders",":","param","path",":","Root","folder","into","which","to","look","for","private","URL",":","param","exclusions",":","Array","of","folder","names","to","exclude","for","looking","for","private","files",".",":","return","private_urls",":","A","list","of","HTML","doc","private","file","paths","."],"function":"def find_private_url(path, exclusions):\n \"\"\"\n Look at all files within a given folder tree (starting from path) except\n For the `exclusions` folders\n :param path: Root folder into which to look for private URL\n :param exclusions: Array of folder names to exclude for looking for private files.\n :return private_urls: A list of HTML doc private file paths.\n \"\"\"\n private_urls=[]\n\n if not os.path.exists(path):\n raise ValueError('Content folder path incorrect')\n else:\n for (dirpath, dirnames, filenames) in os.walk(path):\n dirnames[:] = [d for d in dirnames if d not in exclusions]\n filenames[:] = [f for f in filenames if f.endswith('.html')]\n for filename in filenames:\n with open(os.path.join(dirpath, filename), 'rt', encoding='utf-8') as current_file:\n html = BeautifulSoup(current_file, \"html.parser\")\n if html.find_all('meta', {'name': 'robots', 'content': 'noindex, nofollow'}):\n print('\\x1b[32mINFO\\x1b[0m: Skipping private page: %s' % dirpath)\n private_urls.append(dirpath)\n\n return private_urls","function_tokens":["def","find_private_url","(","path",",","exclusions",")",":","private_urls","=","[","]","if","not","os",".","path",".","exists","(","path",")",":","raise","ValueError","(","'Content folder path incorrect'",")","else",":","for","(","dirpath",",","dirnames",",","filenames",")","in","os",".","walk","(","path",")",":","dirnames","[",":","]","=","[","d","for","d","in","dirnames","if","d","not","in","exclusions","]","filenames","[",":","]","=","[","f","for","f","in","filenames","if","f",".","endswith","(","'.html'",")","]","for","filename","in","filenames",":","with","open","(","os",".","path",".","join","(","dirpath",",","filename",")",",","'rt'",",","encoding","=","'utf-8'",")","as","current_file",":","html","=","BeautifulSoup","(","current_file",",","\"html.parser\"",")","if","html",".","find_all","(","'meta'",",","{","'name'",":","'robots'",",","'content'",":","'noindex, nofollow'","}",")",":","print","(","'\\x1b[32mINFO\\x1b[0m: Skipping private page: %s'","%","dirpath",")","private_urls",".","append","(","dirpath",")","return","private_urls"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/algolia_index.py#L9-L32"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/algolia_index.py","language":"python","identifier":"transform_url","parameters":"(private_urls)","argument_list":"","return_statement":"return new_private_urls","docstring":"Transforms URL returned by removing the public\/ (name of the local folder with all hugo html files)\n into \"real\" documentation links for Algolia\n :param private_urls: Array of file links in public\/ to transform into doc links.\n :return new_private_urls: A list of documentation URL links that correspond to private doc files.","docstring_summary":"Transforms URL returned by removing the public\/ (name of the local folder with all hugo html files)\n into \"real\" documentation links for Algolia\n :param private_urls: Array of file links in public\/ to transform into doc links.\n :return new_private_urls: A list of documentation URL links that correspond to private doc files.","docstring_tokens":["Transforms","URL","returned","by","removing","the","public","\/","(","name","of","the","local","folder","with","all","hugo","html","files",")","into","real","documentation","links","for","Algolia",":","param","private_urls",":","Array","of","file","links","in","public","\/","to","transform","into","doc","links",".",":","return","new_private_urls",":","A","list","of","documentation","URL","links","that","correspond","to","private","doc","files","."],"function":"def transform_url(private_urls):\n \"\"\"\n Transforms URL returned by removing the public\/ (name of the local folder with all hugo html files)\n into \"real\" documentation links for Algolia\n :param private_urls: Array of file links in public\/ to transform into doc links.\n :return new_private_urls: A list of documentation URL links that correspond to private doc files.\n \"\"\"\n new_private_urls = []\n for url in private_urls:\n\n ## We check if the url is not a localised FAQ url, or an API page if so we don't include it in list of stopped url\n ## Since Localised FAQs are not indexed anyway and the API page is indexed as a whole\n if not (re.match(r\"public\/fr\/.*\/faq\/.*\", url) or re.match(r\"public\/ja\/.*\/faq\/.*\", url) or re.match(r\"public\/ja\/api\/.*\", url) or re.match(r\"public\/fr\/api\/.*\", url)):\n\n ## We add \/$ to all links in order to make them all \"final\", in fact\n ## Algolia stop_url parameter uses regex and not \"perfect matching\" link logic\n new_private_urls.append(url.replace('public\/','docs.datadoghq.com\/') + '\/$')\n\n return new_private_urls","function_tokens":["def","transform_url","(","private_urls",")",":","new_private_urls","=","[","]","for","url","in","private_urls",":","## We check if the url is not a localised FAQ url, or an API page if so we don't include it in list of stopped url","## Since Localised FAQs are not indexed anyway and the API page is indexed as a whole","if","not","(","re",".","match","(","r\"public\/fr\/.*\/faq\/.*\"",",","url",")","or","re",".","match","(","r\"public\/ja\/.*\/faq\/.*\"",",","url",")","or","re",".","match","(","r\"public\/ja\/api\/.*\"",",","url",")","or","re",".","match","(","r\"public\/fr\/api\/.*\"",",","url",")",")",":","## We add \/$ to all links in order to make them all \"final\", in fact","## Algolia stop_url parameter uses regex and not \"perfect matching\" link logic","new_private_urls",".","append","(","url",".","replace","(","'public\/'",",","'docs.datadoghq.com\/'",")","+","'\/$'",")","return","new_private_urls"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/algolia_index.py#L34-L52"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/algolia_index.py","language":"python","identifier":"update_algolia_private_url","parameters":"(docs_index_config,private_urls)","argument_list":"","return_statement":"","docstring":"Updates the Algolia Docsearch configuration file with the list of private links to exclude from\n Algolia indexes.\n :param docs_index_config: Original configuration file for the Algolia Doc search\n :param private_urls: A list of documentation URL links that correspond to private doc files.","docstring_summary":"Updates the Algolia Docsearch configuration file with the list of private links to exclude from\n Algolia indexes.\n :param docs_index_config: Original configuration file for the Algolia Doc search\n :param private_urls: A list of documentation URL links that correspond to private doc files.","docstring_tokens":["Updates","the","Algolia","Docsearch","configuration","file","with","the","list","of","private","links","to","exclude","from","Algolia","indexes",".",":","param","docs_index_config",":","Original","configuration","file","for","the","Algolia","Doc","search",":","param","private_urls",":","A","list","of","documentation","URL","links","that","correspond","to","private","doc","files","."],"function":"def update_algolia_private_url(docs_index_config,private_urls):\n \"\"\"\n Updates the Algolia Docsearch configuration file with the list of private links to exclude from\n Algolia indexes.\n :param docs_index_config: Original configuration file for the Algolia Doc search\n :param private_urls: A list of documentation URL links that correspond to private doc files.\n \"\"\"\n with open(docs_index_config, 'rt', encoding='utf-8') as json_file:\n print(\"\\x1b[32mINFO\\x1b[0m: Configuration file {} correctly loaded.\".format(docs_index_config))\n config = json.load(json_file)\n\n print(\"\\x1b[32mINFO\\x1b[0m: Adding list of private URLs.\")\n\n ## adding list or private urls while removing duplicates + sorting the global list\n config[\"stop_urls\"] = sorted(list(dict.fromkeys(config[\"stop_urls\"] + private_urls)))\n\n print(\"\\x1b[32mINFO\\x1b[0m: Addition complete, updating Algolia main configuration file with the new one.\")\n\n with open(docs_index_config, 'w+', encoding='utf-8') as json_file:\n json.dump(config, json_file)","function_tokens":["def","update_algolia_private_url","(","docs_index_config",",","private_urls",")",":","with","open","(","docs_index_config",",","'rt'",",","encoding","=","'utf-8'",")","as","json_file",":","print","(","\"\\x1b[32mINFO\\x1b[0m: Configuration file {} correctly loaded.\"",".","format","(","docs_index_config",")",")","config","=","json",".","load","(","json_file",")","print","(","\"\\x1b[32mINFO\\x1b[0m: Adding list of private URLs.\"",")","## adding list or private urls while removing duplicates + sorting the global list","config","[","\"stop_urls\"","]","=","sorted","(","list","(","dict",".","fromkeys","(","config","[","\"stop_urls\"","]","+","private_urls",")",")",")","print","(","\"\\x1b[32mINFO\\x1b[0m: Addition complete, updating Algolia main configuration file with the new one.\"",")","with","open","(","docs_index_config",",","'w+'",",","encoding","=","'utf-8'",")","as","json_file",":","json",".","dump","(","config",",","json_file",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/algolia_index.py#L54-L73"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/github_connect.py","language":"python","identifier":"cache_by_sha","parameters":"(func)","argument_list":"","return_statement":"return cached_func","docstring":"only downloads fresh file, if we don't have one or we do and the sha has changed","docstring_summary":"only downloads fresh file, if we don't have one or we do and the sha has changed","docstring_tokens":["only","downloads","fresh","file","if","we","don","t","have","one","or","we","do","and","the","sha","has","changed"],"function":"def cache_by_sha(func):\n \"\"\" only downloads fresh file, if we don't have one or we do and the sha has changed \"\"\"\n\n @wraps(func)\n def cached_func(*args, **kwargs):\n cache = {}\n list_item = args[1]\n dest_dir = kwargs.get(\"dest_dir\")\n path_to_file = list_item.get(\"path\", \"\")\n file_out = \"{}{}\".format(dest_dir, path_to_file)\n p_file_out = \"{}{}.pickle\".format(\n dest_dir, path_to_file\n )\n makedirs(dirname(file_out), exist_ok=True)\n if exists(p_file_out) and exists(file_out):\n with open(p_file_out, \"rb\") as pf:\n cache = pickle.load(pf)\n cache_sha = cache.get(\"sha\", False)\n input_sha = list_item.get(\"sha\", False)\n if (cache_sha and input_sha and cache_sha == input_sha):\n # do nothing as we have the up to date file already\n return None\n else:\n with open(p_file_out, mode=\"wb+\") as pf:\n pickle.dump(\n list_item, pf, pickle.HIGHEST_PROTOCOL\n )\n return func(*args, **kwargs)\n\n return cached_func","function_tokens":["def","cache_by_sha","(","func",")",":","@","wraps","(","func",")","def","cached_func","(","*","args",",","*","*","kwargs",")",":","cache","=","{","}","list_item","=","args","[","1","]","dest_dir","=","kwargs",".","get","(","\"dest_dir\"",")","path_to_file","=","list_item",".","get","(","\"path\"",",","\"\"",")","file_out","=","\"{}{}\"",".","format","(","dest_dir",",","path_to_file",")","p_file_out","=","\"{}{}.pickle\"",".","format","(","dest_dir",",","path_to_file",")","makedirs","(","dirname","(","file_out",")",",","exist_ok","=","True",")","if","exists","(","p_file_out",")","and","exists","(","file_out",")",":","with","open","(","p_file_out",",","\"rb\"",")","as","pf",":","cache","=","pickle",".","load","(","pf",")","cache_sha","=","cache",".","get","(","\"sha\"",",","False",")","input_sha","=","list_item",".","get","(","\"sha\"",",","False",")","if","(","cache_sha","and","input_sha","and","cache_sha","==","input_sha",")",":","# do nothing as we have the up to date file already","return","None","else",":","with","open","(","p_file_out",",","mode","=","\"wb+\"",")","as","pf",":","pickle",".","dump","(","list_item",",","pf",",","pickle",".","HIGHEST_PROTOCOL",")","return","func","(","*","args",",","*","*","kwargs",")","return","cached_func"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/github_connect.py#L15-L44"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/process_agent_config.py","language":"python","identifier":"format_agent_config_string","parameters":"(string)","argument_list":"","return_statement":"","docstring":"Takes a string from the agent config template and formats it for output in\n agent_config shortcode.\n - If string contains exactly one '#' and no alphabetic characters, \n remove the '#' and any new line characters.\n - If the string contains exactly one '#' and has alphabetic characters, \n remove the '#' (this represents a config key\/value)","docstring_summary":"Takes a string from the agent config template and formats it for output in\n agent_config shortcode.\n - If string contains exactly one '#' and no alphabetic characters, \n remove the '#' and any new line characters.\n - If the string contains exactly one '#' and has alphabetic characters, \n remove the '#' (this represents a config key\/value)","docstring_tokens":["Takes","a","string","from","the","agent","config","template","and","formats","it","for","output","in","agent_config","shortcode",".","-","If","string","contains","exactly","one","#","and","no","alphabetic","characters","remove","the","#","and","any","new","line","characters",".","-","If","the","string","contains","exactly","one","#","and","has","alphabetic","characters","remove","the","#","(","this","represents","a","config","key","\/","value",")"],"function":"def format_agent_config_string(string):\n \"\"\"\n Takes a string from the agent config template and formats it for output in\n agent_config shortcode.\n - If string contains exactly one '#' and no alphabetic characters, \n remove the '#' and any new line characters.\n - If the string contains exactly one '#' and has alphabetic characters, \n remove the '#' (this represents a config key\/value)\n \"\"\"\n # Match any lines containing strictly one '#' character that can have any\n # number of leading or trailing whitespaces, and no words.\n # Matching lines in the config template file are for spacing only.\n regex_string = r\"[^a-zA-Z0-9#]*#[^a-zA-Z0-9#]*$\"\n\n if re.match(regex_string, string):\n return string.replace('#', '').replace('\\n', '').strip()\n elif '#' in string and '##' not in string:\n return string.replace('# ', '') + '\\n'\n else:\n return string + '\\n'","function_tokens":["def","format_agent_config_string","(","string",")",":","# Match any lines containing strictly one '#' character that can have any","# number of leading or trailing whitespaces, and no words.","# Matching lines in the config template file are for spacing only.","regex_string","=","r\"[^a-zA-Z0-9#]*#[^a-zA-Z0-9#]*$\"","if","re",".","match","(","regex_string",",","string",")",":","return","string",".","replace","(","'#'",",","''",")",".","replace","(","'\\n'",",","''",")",".","strip","(",")","elif","'#'","in","string","and","'##'","not","in","string",":","return","string",".","replace","(","'# '",",","''",")","+","'\\n'","else",":","return","string","+","'\\n'"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/process_agent_config.py#L10-L29"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/process_agent_config.py","language":"python","identifier":"create_agent_config_dict","parameters":"(dd_agent_config_string)","argument_list":"","return_statement":"return agent_config_dict","docstring":"Returns a dictionary where the keys represent each config type\n (i.e. \"Basic Configuration\", \"Log Collection Configuration\", etc),\n and the values are a string containing the config options comments.","docstring_summary":"Returns a dictionary where the keys represent each config type\n (i.e. \"Basic Configuration\", \"Log Collection Configuration\", etc),\n and the values are a string containing the config options comments.","docstring_tokens":["Returns","a","dictionary","where","the","keys","represent","each","config","type","(","i",".","e",".","Basic","Configuration","Log","Collection","Configuration","etc",")","and","the","values","are","a","string","containing","the","config","options","comments","."],"function":"def create_agent_config_dict(dd_agent_config_string):\n \"\"\"\n Returns a dictionary where the keys represent each config type\n (i.e. \"Basic Configuration\", \"Log Collection Configuration\", etc),\n and the values are a string containing the config options comments.\n \"\"\"\n config_type_header_delimiter = '#######'\n agent_config_array = dd_agent_config_string.splitlines()\n agent_config_dict = {}\n current_config_type = ''\n\n for index, line in enumerate(agent_config_array):\n if config_type_header_delimiter in line:\n config_type = agent_config_array[index + 1].replace('#', '') \\\n .strip().lower()\n\n if config_type:\n agent_config_dict.setdefault(config_type, '')\n current_config_type = config_type\n else:\n # Skip any Go template strings or comment boxes used to delineate\n # config types.\n if '{{' not in line and not re.match(r\"^##.+##$\", line):\n formatted_string = format_agent_config_string(line)\n agent_config_dict[current_config_type] += formatted_string\n\n return agent_config_dict","function_tokens":["def","create_agent_config_dict","(","dd_agent_config_string",")",":","config_type_header_delimiter","=","'#######'","agent_config_array","=","dd_agent_config_string",".","splitlines","(",")","agent_config_dict","=","{","}","current_config_type","=","''","for","index",",","line","in","enumerate","(","agent_config_array",")",":","if","config_type_header_delimiter","in","line",":","config_type","=","agent_config_array","[","index","+","1","]",".","replace","(","'#'",",","''",")",".","strip","(",")",".","lower","(",")","if","config_type",":","agent_config_dict",".","setdefault","(","config_type",",","''",")","current_config_type","=","config_type","else",":","# Skip any Go template strings or comment boxes used to delineate","# config types.","if","'{{'","not","in","line","and","not","re",".","match","(","r\"^##.+##$\"",",","line",")",":","formatted_string","=","format_agent_config_string","(","line",")","agent_config_dict","[","current_config_type","]","+=","formatted_string","return","agent_config_dict"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/process_agent_config.py#L32-L58"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/process_agent_config.py","language":"python","identifier":"document_config_types","parameters":"(agent_config_dict)","argument_list":"","return_statement":"return 'Available config types: \\n\\n' + '\\n'.join(agent_config_dict.keys())","docstring":"Returns a single string containing all available config types,\n for Docs team to reference.","docstring_summary":"Returns a single string containing all available config types,\n for Docs team to reference.","docstring_tokens":["Returns","a","single","string","containing","all","available","config","types","for","Docs","team","to","reference","."],"function":"def document_config_types(agent_config_dict):\n \"\"\"\n Returns a single string containing all available config types,\n for Docs team to reference.\n \"\"\"\n return 'Available config types: \\n\\n' + '\\n'.join(agent_config_dict.keys())","function_tokens":["def","document_config_types","(","agent_config_dict",")",":","return","'Available config types: \\n\\n'","+","'\\n'",".","join","(","agent_config_dict",".","keys","(",")",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/process_agent_config.py#L61-L66"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/process_agent_config.py","language":"python","identifier":"process_agent_config","parameters":"(dd_agent_config_string)","argument_list":"","return_statement":"","docstring":"Takes the Datadog Agent Config template as a string, separates it by type,\n formats the strings, and outputs the results as json to be consumed by the\n agent_config.html shortcode for display.","docstring_summary":"Takes the Datadog Agent Config template as a string, separates it by type,\n formats the strings, and outputs the results as json to be consumed by the\n agent_config.html shortcode for display.","docstring_tokens":["Takes","the","Datadog","Agent","Config","template","as","a","string","separates","it","by","type","formats","the","strings","and","outputs","the","results","as","json","to","be","consumed","by","the","agent_config",".","html","shortcode","for","display","."],"function":"def process_agent_config(dd_agent_config_string):\n \"\"\"\n Takes the Datadog Agent Config template as a string, separates it by type,\n formats the strings, and outputs the results as json to be consumed by the\n agent_config.html shortcode for display.\n \"\"\"\n try:\n agent_config_dict = create_agent_config_dict(dd_agent_config_string)\n formatted_agent_config_json = json.dumps(agent_config_dict)\n config_types_string = document_config_types(agent_config_dict)\n\n with open('data\/agent_config.json', 'w+') as agent_json_config_outfile:\n agent_json_config_outfile.write(formatted_agent_config_json)\n\n # Documenting what config types are available for Docs team to use in\n # agent config shortcode.\n with open('agent_config_types_list.txt', 'w+') as config_types_outfile:\n config_types_outfile.write(config_types_string)\n except Exception as err:\n print('An error occurred building agent config data:')\n print(err)\n\n if getenv(\"LOCAL\") != 'True':\n sys.exit(1)","function_tokens":["def","process_agent_config","(","dd_agent_config_string",")",":","try",":","agent_config_dict","=","create_agent_config_dict","(","dd_agent_config_string",")","formatted_agent_config_json","=","json",".","dumps","(","agent_config_dict",")","config_types_string","=","document_config_types","(","agent_config_dict",")","with","open","(","'data\/agent_config.json'",",","'w+'",")","as","agent_json_config_outfile",":","agent_json_config_outfile",".","write","(","formatted_agent_config_json",")","# Documenting what config types are available for Docs team to use in","# agent config shortcode.","with","open","(","'agent_config_types_list.txt'",",","'w+'",")","as","config_types_outfile",":","config_types_outfile",".","write","(","config_types_string",")","except","Exception","as","err",":","print","(","'An error occurred building agent config data:'",")","print","(","err",")","if","getenv","(","\"LOCAL\"",")","!=","'True'",":","sys",".","exit","(","1",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/process_agent_config.py#L69-L92"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/content_manager.py","language":"python","identifier":"download_from_repo","parameters":"(github_token, org, repo, branch, globs, extract_dir, commit_sha=None)","argument_list":"","return_statement":"","docstring":"Takes github info and file globs and downloads files from github using multiple processes\n :param github_token: A valide Github token to download content with the Github Class\n :param org: github organization or person\n :param repo: github repo name\n :param branch: the branch name\n :param globs: list of strings in glob format of what to extract\n :param extract_dir: Directory in which to put all downloaded content.\n :param commit_sha: sha if we want to provide one\n :return:","docstring_summary":"Takes github info and file globs and downloads files from github using multiple processes\n :param github_token: A valide Github token to download content with the Github Class\n :param org: github organization or person\n :param repo: github repo name\n :param branch: the branch name\n :param globs: list of strings in glob format of what to extract\n :param extract_dir: Directory in which to put all downloaded content.\n :param commit_sha: sha if we want to provide one\n :return:","docstring_tokens":["Takes","github","info","and","file","globs","and","downloads","files","from","github","using","multiple","processes",":","param","github_token",":","A","valide","Github","token","to","download","content","with","the","Github","Class",":","param","org",":","github","organization","or","person",":","param","repo",":","github","repo","name",":","param","branch",":","the","branch","name",":","param","globs",":","list","of","strings","in","glob","format","of","what","to","extract",":","param","extract_dir",":","Directory","in","which","to","put","all","downloaded","content",".",":","param","commit_sha",":","sha","if","we","want","to","provide","one",":","return",":"],"function":"def download_from_repo(github_token, org, repo, branch, globs, extract_dir, commit_sha=None):\n \"\"\"\n Takes github info and file globs and downloads files from github using multiple processes\n :param github_token: A valide Github token to download content with the Github Class\n :param org: github organization or person\n :param repo: github repo name\n :param branch: the branch name\n :param globs: list of strings in glob format of what to extract\n :param extract_dir: Directory in which to put all downloaded content.\n :param commit_sha: sha if we want to provide one\n :return:\n \"\"\"\n pool_size = cpu_count()\n\n with GitHub(github_token) as gh:\n listing = gh.list(org, repo, branch, commit_sha, globs)\n dest = \"{0}{1}{2}\".format(\n extract_dir, repo, sep\n )\n with Pool(processes=pool_size) as pool:\n with requests.Session() as s:\n r = [\n x\n for x in pool.imap_unordered(\n partial(\n gh.raw,\n request_session=s,\n org=org,\n repo=repo,\n branch=branch if not commit_sha else commit_sha,\n dest_dir=dest,\n ),\n listing,\n )\n ]","function_tokens":["def","download_from_repo","(","github_token",",","org",",","repo",",","branch",",","globs",",","extract_dir",",","commit_sha","=","None",")",":","pool_size","=","cpu_count","(",")","with","GitHub","(","github_token",")","as","gh",":","listing","=","gh",".","list","(","org",",","repo",",","branch",",","commit_sha",",","globs",")","dest","=","\"{0}{1}{2}\"",".","format","(","extract_dir",",","repo",",","sep",")","with","Pool","(","processes","=","pool_size",")","as","pool",":","with","requests",".","Session","(",")","as","s",":","r","=","[","x","for","x","in","pool",".","imap_unordered","(","partial","(","gh",".","raw",",","request_session","=","s",",","org","=","org",",","repo","=","repo",",","branch","=","branch","if","not","commit_sha","else","commit_sha",",","dest_dir","=","dest",",",")",",","listing",",",")","]"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/content_manager.py#L14-L48"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/content_manager.py","language":"python","identifier":"update_globs","parameters":"(new_path, globs)","argument_list":"","return_statement":"return new_globs","docstring":"Depending if the repo is local or we downloaded it we need to update the globs to match\n the final version of the repo to use\n :param new_path: new_path to update the globs with\n :param globs: list of globs to update","docstring_summary":"Depending if the repo is local or we downloaded it we need to update the globs to match\n the final version of the repo to use\n :param new_path: new_path to update the globs with\n :param globs: list of globs to update","docstring_tokens":["Depending","if","the","repo","is","local","or","we","downloaded","it","we","need","to","update","the","globs","to","match","the","final","version","of","the","repo","to","use",":","param","new_path",":","new_path","to","update","the","globs","with",":","param","globs",":","list","of","globs","to","update"],"function":"def update_globs(new_path, globs):\n \"\"\"\n Depending if the repo is local or we downloaded it we need to update the globs to match\n the final version of the repo to use\n :param new_path: new_path to update the globs with\n :param globs: list of globs to update\n \"\"\"\n new_globs = []\n for item in globs:\n new_globs.append(\"{}{}\".format(new_path, item))\n return new_globs","function_tokens":["def","update_globs","(","new_path",",","globs",")",":","new_globs","=","[","]","for","item","in","globs",":","new_globs",".","append","(","\"{}{}\"",".","format","(","new_path",",","item",")",")","return","new_globs"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/content_manager.py#L51-L61"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/content_manager.py","language":"python","identifier":"local_or_upstream","parameters":"(github_token, extract_dir, list_of_contents)","argument_list":"","return_statement":"return list_of_contents","docstring":"This goes through the list_of_contents and check for each repo specified\n If a local version exists otherwise we download it from the upstream repo on Github\n Local version of the repo should be in the same folder as the documentation\/ folder.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.\n :param list_of_content: List of content to check if available locally or if it needs to be downloaded from Github","docstring_summary":"This goes through the list_of_contents and check for each repo specified\n If a local version exists otherwise we download it from the upstream repo on Github\n Local version of the repo should be in the same folder as the documentation\/ folder.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.\n :param list_of_content: List of content to check if available locally or if it needs to be downloaded from Github","docstring_tokens":["This","goes","through","the","list_of_contents","and","check","for","each","repo","specified","If","a","local","version","exists","otherwise","we","download","it","from","the","upstream","repo","on","Github","Local","version","of","the","repo","should","be","in","the","same","folder","as","the","documentation","\/","folder",".",":","param","github_token",":","A","valide","Github","token","to","download","content","with","the","Github","Class",":","param","extract_dir",":","Directory","into","which","to","put","all","content","downloaded",".",":","param","list_of_content",":","List","of","content","to","check","if","available","locally","or","if","it","needs","to","be","downloaded","from","Github"],"function":"def local_or_upstream(github_token, extract_dir, list_of_contents):\n \"\"\"\n This goes through the list_of_contents and check for each repo specified\n If a local version exists otherwise we download it from the upstream repo on Github\n Local version of the repo should be in the same folder as the documentation\/ folder.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.\n :param list_of_content: List of content to check if available locally or if it needs to be downloaded from Github\n \"\"\"\n for content in list_of_contents:\n repo_name = \"..\/\" + content[\"repo_name\"] + sep\n if isdir(repo_name):\n print(\"\\x1b[32mINFO\\x1b[0m: Local version of {} found\".format(\n content[\"repo_name\"]))\n content[\"globs\"] = update_globs(\n repo_name,\n content[\"globs\"],\n )\n elif github_token != \"false\":\n print(\n \"\\x1b[32mINFO\\x1b[0m: No local version of {} found, downloading content from upstream version\".format(\n content[\"repo_name\"]\n )\n )\n download_from_repo(github_token,\n content[\"org_name\"],\n content[\"repo_name\"],\n content[\"branch\"],\n content[\"globs\"],\n extract_dir,\n content.get(\"sha\", None)\n )\n content[\n \"globs\"\n ] = update_globs(\n \"{0}{1}{2}\".format(\n extract_dir,\n content[\"repo_name\"],\n sep,\n ),\n content[\"globs\"],\n )\n elif getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: No local version of {} found, no GITHUB_TOKEN available. Documentation is now in degraded mode\".format(content[\"repo_name\"]))\n content[\"action\"] = \"Not Available\"\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: No local version of {} found, no GITHUB_TOKEN available.\".format(\n content[\"repo_name\"]\n )\n )\n raise ValueError\n return list_of_contents","function_tokens":["def","local_or_upstream","(","github_token",",","extract_dir",",","list_of_contents",")",":","for","content","in","list_of_contents",":","repo_name","=","\"..\/\"","+","content","[","\"repo_name\"","]","+","sep","if","isdir","(","repo_name",")",":","print","(","\"\\x1b[32mINFO\\x1b[0m: Local version of {} found\"",".","format","(","content","[","\"repo_name\"","]",")",")","content","[","\"globs\"","]","=","update_globs","(","repo_name",",","content","[","\"globs\"","]",",",")","elif","github_token","!=","\"false\"",":","print","(","\"\\x1b[32mINFO\\x1b[0m: No local version of {} found, downloading content from upstream version\"",".","format","(","content","[","\"repo_name\"","]",")",")","download_from_repo","(","github_token",",","content","[","\"org_name\"","]",",","content","[","\"repo_name\"","]",",","content","[","\"branch\"","]",",","content","[","\"globs\"","]",",","extract_dir",",","content",".","get","(","\"sha\"",",","None",")",")","content","[","\"globs\"","]","=","update_globs","(","\"{0}{1}{2}\"",".","format","(","extract_dir",",","content","[","\"repo_name\"","]",",","sep",",",")",",","content","[","\"globs\"","]",",",")","elif","getenv","(","\"LOCAL\"",")","==","'True'",":","print","(","\"\\x1b[33mWARNING\\x1b[0m: No local version of {} found, no GITHUB_TOKEN available. Documentation is now in degraded mode\"",".","format","(","content","[","\"repo_name\"","]",")",")","content","[","\"action\"","]","=","\"Not Available\"","else",":","print","(","\"\\x1b[31mERROR\\x1b[0m: No local version of {} found, no GITHUB_TOKEN available.\"",".","format","(","content","[","\"repo_name\"","]",")",")","raise","ValueError","return","list_of_contents"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/content_manager.py#L64-L117"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/content_manager.py","language":"python","identifier":"extract_config","parameters":"(configuration)","argument_list":"","return_statement":"return list_of_contents","docstring":"This pulls the content from the configuration file at `configuration` location\n then parses it to populate the list_of_content variable that contains all contents\n that needs to be pulled and processed.\n :param configuration: Documentation build configuration file path.","docstring_summary":"This pulls the content from the configuration file at `configuration` location\n then parses it to populate the list_of_content variable that contains all contents\n that needs to be pulled and processed.\n :param configuration: Documentation build configuration file path.","docstring_tokens":["This","pulls","the","content","from","the","configuration","file","at","configuration","location","then","parses","it","to","populate","the","list_of_content","variable","that","contains","all","contents","that","needs","to","be","pulled","and","processed",".",":","param","configuration",":","Documentation","build","configuration","file","path","."],"function":"def extract_config(configuration):\n \"\"\"\n This pulls the content from the configuration file at `configuration` location\n then parses it to populate the list_of_content variable that contains all contents\n that needs to be pulled and processed.\n :param configuration: Documentation build configuration file path.\n \"\"\"\n list_of_contents = []\n\n for org in configuration:\n for repo in org[\"repos\"]:\n for content in repo[\"contents\"]:\n content_temp = {}\n content_temp[\"org_name\"] = org[\n \"org_name\"\n ]\n content_temp[\"repo_name\"] = repo[\n \"repo_name\"\n ]\n content_temp[\"branch\"] = content[\n \"branch\"\n ]\n content_temp[\"sha\"] = content.get(\"sha\", None)\n content_temp[\"action\"] = content[\n \"action\"\n ]\n content_temp[\"globs\"] = content[\"globs\"]\n\n if content[\"action\"] in (\"pull-and-push-folder\", \"pull-and-push-file\", \"security-rules\", \"compliance-rules\"):\n content_temp[\"options\"] = content[\"options\"]\n\n list_of_contents.append(\n content_temp\n )\n\n return list_of_contents","function_tokens":["def","extract_config","(","configuration",")",":","list_of_contents","=","[","]","for","org","in","configuration",":","for","repo","in","org","[","\"repos\"","]",":","for","content","in","repo","[","\"contents\"","]",":","content_temp","=","{","}","content_temp","[","\"org_name\"","]","=","org","[","\"org_name\"","]","content_temp","[","\"repo_name\"","]","=","repo","[","\"repo_name\"","]","content_temp","[","\"branch\"","]","=","content","[","\"branch\"","]","content_temp","[","\"sha\"","]","=","content",".","get","(","\"sha\"",",","None",")","content_temp","[","\"action\"","]","=","content","[","\"action\"","]","content_temp","[","\"globs\"","]","=","content","[","\"globs\"","]","if","content","[","\"action\"","]","in","(","\"pull-and-push-folder\"",",","\"pull-and-push-file\"",",","\"security-rules\"",",","\"compliance-rules\"",")",":","content_temp","[","\"options\"","]","=","content","[","\"options\"","]","list_of_contents",".","append","(","content_temp",")","return","list_of_contents"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/content_manager.py#L120-L155"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/content_manager.py","language":"python","identifier":"prepare_content","parameters":"(configuration, github_token, extract_dir)","argument_list":"","return_statement":"return list_of_contents","docstring":"Prepares the content for the documentation build. It checks for all content whether or\n not it's available locally or if it should be downloaded.\n :param configuration: Documentation build configuration file path.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.","docstring_summary":"Prepares the content for the documentation build. It checks for all content whether or\n not it's available locally or if it should be downloaded.\n :param configuration: Documentation build configuration file path.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.","docstring_tokens":["Prepares","the","content","for","the","documentation","build",".","It","checks","for","all","content","whether","or","not","it","s","available","locally","or","if","it","should","be","downloaded",".",":","param","configuration",":","Documentation","build","configuration","file","path",".",":","param","github_token",":","A","valide","Github","token","to","download","content","with","the","Github","Class",":","param","extract_dir",":","Directory","into","which","to","put","all","content","downloaded","."],"function":"def prepare_content(configuration, github_token, extract_dir):\n \"\"\"\n Prepares the content for the documentation build. It checks for all content whether or\n not it's available locally or if it should be downloaded.\n :param configuration: Documentation build configuration file path.\n :param github_token: A valide Github token to download content with the Github Class\n :param extract_dir: Directory into which to put all content downloaded.\n \"\"\"\n try:\n list_of_contents = local_or_upstream(\n github_token, extract_dir, extract_config(configuration))\n except:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Downloading files failed, documentation is now in degraded mode.\")\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Downloading files failed, stopping build.\")\n sys.exit(1)\n return list_of_contents","function_tokens":["def","prepare_content","(","configuration",",","github_token",",","extract_dir",")",":","try",":","list_of_contents","=","local_or_upstream","(","github_token",",","extract_dir",",","extract_config","(","configuration",")",")","except",":","if","getenv","(","\"LOCAL\"",")","==","'True'",":","print","(","\"\\x1b[33mWARNING\\x1b[0m: Downloading files failed, documentation is now in degraded mode.\"",")","else",":","print","(","\"\\x1b[31mERROR\\x1b[0m: Downloading files failed, stopping build.\"",")","sys",".","exit","(","1",")","return","list_of_contents"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/content_manager.py#L158-L177"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/pull_and_push_folder.py","language":"python","identifier":"pull_and_push_folder","parameters":"(content, content_dir)","argument_list":"","return_statement":"","docstring":"Take the content from a folder following github logic\n and transform it to be displayed in the doc in dest_dir folder\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-folder to learn more\n :param content: content to process\n :param content_dir: The directory where content should be put","docstring_summary":"Take the content from a folder following github logic\n and transform it to be displayed in the doc in dest_dir folder\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-folder to learn more\n :param content: content to process\n :param content_dir: The directory where content should be put","docstring_tokens":["Take","the","content","from","a","folder","following","github","logic","and","transform","it","to","be","displayed","in","the","doc","in","dest_dir","folder","See","https",":","\/\/","github",".","com","\/","DataDog","\/","documentation","\/","wiki","\/","Documentation","-","Build#pull","-","and","-","push","-","folder","to","learn","more",":","param","content",":","content","to","process",":","param","content_dir",":","The","directory","where","content","should","be","put"],"function":"def pull_and_push_folder(content, content_dir):\n \"\"\"\n Take the content from a folder following github logic\n and transform it to be displayed in the doc in dest_dir folder\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-folder to learn more\n :param content: content to process\n :param content_dir: The directory where content should be put\n \"\"\"\n for file_name in chain.from_iterable(glob.iglob(pattern, recursive=True) for pattern in content[\"globs\"]):\n with open(file_name, mode=\"r+\", encoding=\"utf-8\", errors=\"ignore\") as f:\n file_content = f.read()\n boundary = re.compile(r'^-{3,}$', re.MULTILINE)\n split = boundary.split(file_content, 2)\n new_yml = {}\n txt = file_content\n if len(split) == 3:\n _, fm, txt = split\n new_yml = yaml.safe_load(fm)\n elif len(split) == 1:\n txt = split[0]\n # if front matter update existing\n if \"front_matters\" in content[\"options\"]:\n new_yml.update(content[\"options\"][\"front_matters\"])\n # if the dependency ends with a `\/` e.g folder then lets try replace with the actual filename\n new_deps = []\n for dep in new_yml.get(\"dependencies\", []):\n if dep.endswith('\/'):\n new_deps.append('{}{}'.format(\n dep,\n basename(file_name)\n ))\n new_yml['dependencies'] = new_deps\n front_matter = yaml.dump(new_yml, default_flow_style=False).strip()\n # Replacing links that point to the Github folder by link that point to the doc.\n new_link = (\n content[\"options\"][\"dest_dir\"] + \"\\\\2\"\n )\n regex_github_link = re.compile(\n r\"(https:\\\/\\\/github\\.com\\\/{}\\\/{}\\\/blob\\\/{}\\\/{})(\\S+)\\.md\".format(\n content[\"org_name\"],\n content[\"repo_name\"],\n content[\"branch\"],\n content[\"options\"][\n \"path_to_remove\"\n ],\n )\n )\n txt = re.sub(\n regex_github_link,\n new_link,\n txt,\n count=0,\n )\n file_content = TEMPLATE.format(front_matter=front_matter, content=txt.strip())\n # Replacing the master README.md by _index.md to follow Hugo logic\n if file_name.endswith(\"README.md\"):\n file_name = \"_index.md\"\n # Writing the new content to the documentation file\n dirp = \"{}{}\".format(\n content_dir,\n content[\"options\"][\"dest_dir\"][1:],\n )\n makedirs(dirp, exist_ok=True)\n with open(\n \"{}{}\".format(dirp, basename(file_name)),\n mode=\"w+\",\n encoding=\"utf-8\",\n ) as f:\n f.write(file_content)","function_tokens":["def","pull_and_push_folder","(","content",",","content_dir",")",":","for","file_name","in","chain",".","from_iterable","(","glob",".","iglob","(","pattern",",","recursive","=","True",")","for","pattern","in","content","[","\"globs\"","]",")",":","with","open","(","file_name",",","mode","=","\"r+\"",",","encoding","=","\"utf-8\"",",","errors","=","\"ignore\"",")","as","f",":","file_content","=","f",".","read","(",")","boundary","=","re",".","compile","(","r'^-{3,}$'",",","re",".","MULTILINE",")","split","=","boundary",".","split","(","file_content",",","2",")","new_yml","=","{","}","txt","=","file_content","if","len","(","split",")","==","3",":","_",",","fm",",","txt","=","split","new_yml","=","yaml",".","safe_load","(","fm",")","elif","len","(","split",")","==","1",":","txt","=","split","[","0","]","# if front matter update existing","if","\"front_matters\"","in","content","[","\"options\"","]",":","new_yml",".","update","(","content","[","\"options\"","]","[","\"front_matters\"","]",")","# if the dependency ends with a `\/` e.g folder then lets try replace with the actual filename","new_deps","=","[","]","for","dep","in","new_yml",".","get","(","\"dependencies\"",",","[","]",")",":","if","dep",".","endswith","(","'\/'",")",":","new_deps",".","append","(","'{}{}'",".","format","(","dep",",","basename","(","file_name",")",")",")","new_yml","[","'dependencies'","]","=","new_deps","front_matter","=","yaml",".","dump","(","new_yml",",","default_flow_style","=","False",")",".","strip","(",")","# Replacing links that point to the Github folder by link that point to the doc.","new_link","=","(","content","[","\"options\"","]","[","\"dest_dir\"","]","+","\"\\\\2\"",")","regex_github_link","=","re",".","compile","(","r\"(https:\\\/\\\/github\\.com\\\/{}\\\/{}\\\/blob\\\/{}\\\/{})(\\S+)\\.md\"",".","format","(","content","[","\"org_name\"","]",",","content","[","\"repo_name\"","]",",","content","[","\"branch\"","]",",","content","[","\"options\"","]","[","\"path_to_remove\"","]",",",")",")","txt","=","re",".","sub","(","regex_github_link",",","new_link",",","txt",",","count","=","0",",",")","file_content","=","TEMPLATE",".","format","(","front_matter","=","front_matter",",","content","=","txt",".","strip","(",")",")","# Replacing the master README.md by _index.md to follow Hugo logic","if","file_name",".","endswith","(","\"README.md\"",")",":","file_name","=","\"_index.md\"","# Writing the new content to the documentation file","dirp","=","\"{}{}\"",".","format","(","content_dir",",","content","[","\"options\"","]","[","\"dest_dir\"","]","[","1",":","]",",",")","makedirs","(","dirp",",","exist_ok","=","True",")","with","open","(","\"{}{}\"",".","format","(","dirp",",","basename","(","file_name",")",")",",","mode","=","\"w+\"",",","encoding","=","\"utf-8\"",",",")","as","f",":","f",".","write","(","file_content",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/pull_and_push_folder.py#L19-L87"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/pull_and_push_file.py","language":"python","identifier":"pull_and_push_file","parameters":"(content, content_dir)","argument_list":"","return_statement":"","docstring":"Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put","docstring_summary":"Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put","docstring_tokens":["Takes","the","content","from","a","file","from","a","github","repo","and","pushed","it","to","the","doc","See","https",":","\/\/","github",".","com","\/","DataDog","\/","documentation","\/","wiki","\/","Documentation","-","Build#pull","-","and","-","push","-","files","to","learn","more",":","param","content",":","object","with","a","file_name","a","file_path","and","options","to","apply",":","param","content_dir",":","The","directory","where","content","should","be","put"],"function":"def pull_and_push_file(content, content_dir):\n \"\"\"\n Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put\n \"\"\"\n with open(\"\".join(content[\"globs\"]), mode=\"r+\") as f:\n file_content = f.read()\n # If options include front params, then the H1 title of the source file is striped\n # and the options front params are inlined\n if \"front_matters\" in content[\"options\"]:\n front_matter = yaml.dump(content[\"options\"][\"front_matters\"], default_flow_style=False).strip()\n # remove h1 if exists\n file_content = re.sub(re.compile(r\"^#{1}(?!#)(.*)\", re.MULTILINE), \"\", file_content, count=1)\n file_content = TEMPLATE.format(front_matter=front_matter, content=file_content.strip())\n elif \"datadog-agent\" in content[\"repo_name\"] and \"config_template.yaml\" in \"\".join(content[\"globs\"]):\n process_agent_config(file_content)\n\n output_content = content[\"options\"].get(\"output_content\", True)\n\n if output_content:\n destination_path = content[\"options\"][\"dest_path\"].lstrip('\/')\n\n with open(\n \"{}{}{}\".format(\n content_dir,\n destination_path,\n basename(content[\"options\"][\"file_name\"]),\n ),\n mode=\"w+\",\n encoding=\"utf-8\",\n ) as f:\n f.write(file_content)","function_tokens":["def","pull_and_push_file","(","content",",","content_dir",")",":","with","open","(","\"\"",".","join","(","content","[","\"globs\"","]",")",",","mode","=","\"r+\"",")","as","f",":","file_content","=","f",".","read","(",")","# If options include front params, then the H1 title of the source file is striped","# and the options front params are inlined","if","\"front_matters\"","in","content","[","\"options\"","]",":","front_matter","=","yaml",".","dump","(","content","[","\"options\"","]","[","\"front_matters\"","]",",","default_flow_style","=","False",")",".","strip","(",")","# remove h1 if exists","file_content","=","re",".","sub","(","re",".","compile","(","r\"^#{1}(?!#)(.*)\"",",","re",".","MULTILINE",")",",","\"\"",",","file_content",",","count","=","1",")","file_content","=","TEMPLATE",".","format","(","front_matter","=","front_matter",",","content","=","file_content",".","strip","(",")",")","elif","\"datadog-agent\"","in","content","[","\"repo_name\"","]","and","\"config_template.yaml\"","in","\"\"",".","join","(","content","[","\"globs\"","]",")",":","process_agent_config","(","file_content",")","output_content","=","content","[","\"options\"","]",".","get","(","\"output_content\"",",","True",")","if","output_content",":","destination_path","=","content","[","\"options\"","]","[","\"dest_path\"","]",".","lstrip","(","'\/'",")","with","open","(","\"{}{}{}\"",".","format","(","content_dir",",","destination_path",",","basename","(","content","[","\"options\"","]","[","\"file_name\"","]",")",",",")",",","mode","=","\"w+\"",",","encoding","=","\"utf-8\"",",",")","as","f",":","f",".","write","(","file_content",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/pull_and_push_file.py#L17-L51"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/security_rules.py","language":"python","identifier":"security_rules","parameters":"(content, content_dir)","argument_list":"","return_statement":"","docstring":"Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put","docstring_summary":"Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put","docstring_tokens":["Takes","the","content","from","a","file","from","a","github","repo","and","pushed","it","to","the","doc","See","https",":","\/\/","github",".","com","\/","DataDog","\/","documentation","\/","wiki","\/","Documentation","-","Build#pull","-","and","-","push","-","files","to","learn","more",":","param","content",":","object","with","a","file_name","a","file_path","and","options","to","apply",":","param","content_dir",":","The","directory","where","content","should","be","put"],"function":"def security_rules(content, content_dir):\n \"\"\"\n Takes the content from a file from a github repo and\n pushed it to the doc\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n :param content_dir: The directory where content should be put\n \"\"\"\n logger.info(\"Starting security rules action...\")\n global_aliases = []\n for file_name in chain.from_iterable(glob.glob(pattern, recursive=True) for pattern in content[\"globs\"]):\n\n data = None\n if file_name.endswith(\".json\"):\n with open(file_name, mode=\"r+\") as f:\n try:\n data = json.loads(f.read())\n except:\n logger.warn(f\"Error parsing {file_name}\")\n elif file_name.endswith(\".yaml\"):\n with open(file_name, mode=\"r+\") as f:\n try:\n file_text_content = f.read()\n if 'jinja2' in file_text_content:\n data = load_templated_file(f)\n else:\n data = yaml.safe_load(file_text_content)\n except:\n logger.warn(f\"Error parsing {file_name}\")\n\n p = Path(f.name)\n message_file_name = p.with_suffix('.md')\n\n if data and message_file_name.exists():\n # delete file or skip if staged\n # any() will return True when at least one of the elements is Truthy\n if len(data.get('restrictedToOrgs', [])) > 0 or data.get('isStaged', False) or data.get('isDeleted', False) or not data.get('isEnabled', True):\n if p.exists():\n logger.info(f\"removing file {p.name}\")\n global_aliases.append(f\"\/security_monitoring\/default_rules\/{p.stem}\")\n global_aliases.append(f\"\/security_platform\/default_rules\/{p.stem}\")\n p.unlink()\n else:\n logger.info(f\"skipping file {p.name}\")\n else:\n # The message of a detection rule is located in a Markdown file next to the rule definition\n with open(str(message_file_name), mode=\"r+\") as message_file:\n message = message_file.read()\n\n # strip out [text] e.g \"[CIS Docker] Ensure that..\" becomes \"Ensure that...\"\n parsed_title = re.sub(r\"\\[.+\\]\\s?(.*)\", \"\\\\1\", data.get('name', ''), 0, re.MULTILINE)\n page_data = {\n \"title\": parsed_title,\n \"kind\": \"documentation\",\n \"type\": \"security_rules\",\n \"disable_edit\": True,\n \"aliases\": [\n f\"{data.get('defaultRuleId', '').strip()}\",\n f\"\/security_monitoring\/default_rules\/{data.get('defaultRuleId', '').strip()}\",\n f\"\/security_monitoring\/default_rules\/{p.stem}\"\n ],\n \"rule_category\": [],\n \"integration_id\": \"\"\n }\n\n # we need to get the path relative to the repo root for comparisons\n extract_dir, relative_path = str(p.parent).split(f\"\/{content['repo_name']}\/\")\n # lets build up this categorization for filtering purposes\n\n # previous categorization\n if relative_path.startswith('configuration'):\n page_data['rule_category'].append('Posture Management (Cloud)')\n elif relative_path.startswith('runtime'):\n if 'compliance' in relative_path:\n page_data['rule_category'].append('Posture Management (Infra)')\n else:\n page_data['rule_category'].append('Workload Security')\n\n # new categorization\n if 'security-monitoring' in relative_path:\n page_data['rule_category'].append('Cloud SIEM')\n\n if 'posture-management' in relative_path:\n if 'cloud-configuration' in relative_path:\n page_data['rule_category'].append('Posture Management (Cloud)')\n if 'infrastructure-configuration' in relative_path:\n page_data['rule_category'].append('Posture Management (Infra)')\n\n if 'workload-security' in relative_path:\n page_data['rule_category'].append('Workload Security')\n\n if 'application-security' in relative_path:\n page_data['rule_category'].append('Application Security')\n\n tags = data.get('tags', [])\n if tags:\n for tag in tags:\n if ':' in tag:\n key, value = tag.split(':')\n page_data[key] = value\n if data.get('source', ''):\n page_data[\"source\"] = data.get('source', '')\n else:\n # try build up manually\n source = data.get('source', None)\n tech = data.get('framework', {}).get('name', '').replace('cis-', '')\n page_data[\"source\"] = source or tech\n page_data[\"security\"] = \"compliance\"\n page_data[\"framework\"] = data.get('framework', {}).get('name', '')\n page_data[\"control\"] = data.get('control', '')\n page_data[\"scope\"] = tech\n\n # lowercase them\n if page_data.get(\"source\", None):\n page_data[\"source\"] = page_data[\"source\"].lower()\n if page_data.get(\"scope\", None):\n page_data[\"scope\"] = page_data[\"scope\"].lower()\n\n # integration id\n page_data[\"integration_id\"] = page_data.get(\"scope\", None) or page_data.get(\"source\", \"\")\n cloud = page_data.get(\"cloud\", None)\n if cloud and cloud == 'aws':\n page_data[\"integration_id\"] = \"amazon-{}\".format(page_data[\"integration_id\"])\n\n front_matter = yaml.dump(page_data, default_flow_style=False).strip()\n output_content = TEMPLATE.format(front_matter=front_matter, content=message.strip())\n\n dest_dir = Path(f\"{content_dir}{content['options']['dest_path']}\")\n dest_dir.mkdir(exist_ok=True)\n dest_file = dest_dir.joinpath(p.name).with_suffix('.md')\n logger.info(dest_file)\n with open(dest_file, mode='w', encoding='utf-8') as out_file:\n out_file.write(output_content)\n\n # add global aliases from deleted files to _index.md\n if os.environ.get('CI_ENVIRONMENT_NAME', '') in ('live', 'preview'):\n index_path = Path(f\"{content_dir}{content['options']['dest_path']}_index.md\")\n update_global_aliases(index_path, global_aliases)","function_tokens":["def","security_rules","(","content",",","content_dir",")",":","logger",".","info","(","\"Starting security rules action...\"",")","global_aliases","=","[","]","for","file_name","in","chain",".","from_iterable","(","glob",".","glob","(","pattern",",","recursive","=","True",")","for","pattern","in","content","[","\"globs\"","]",")",":","data","=","None","if","file_name",".","endswith","(","\".json\"",")",":","with","open","(","file_name",",","mode","=","\"r+\"",")","as","f",":","try",":","data","=","json",".","loads","(","f",".","read","(",")",")","except",":","logger",".","warn","(","f\"Error parsing {file_name}\"",")","elif","file_name",".","endswith","(","\".yaml\"",")",":","with","open","(","file_name",",","mode","=","\"r+\"",")","as","f",":","try",":","file_text_content","=","f",".","read","(",")","if","'jinja2'","in","file_text_content",":","data","=","load_templated_file","(","f",")","else",":","data","=","yaml",".","safe_load","(","file_text_content",")","except",":","logger",".","warn","(","f\"Error parsing {file_name}\"",")","p","=","Path","(","f",".","name",")","message_file_name","=","p",".","with_suffix","(","'.md'",")","if","data","and","message_file_name",".","exists","(",")",":","# delete file or skip if staged","# any() will return True when at least one of the elements is Truthy","if","len","(","data",".","get","(","'restrictedToOrgs'",",","[","]",")",")",">","0","or","data",".","get","(","'isStaged'",",","False",")","or","data",".","get","(","'isDeleted'",",","False",")","or","not","data",".","get","(","'isEnabled'",",","True",")",":","if","p",".","exists","(",")",":","logger",".","info","(","f\"removing file {p.name}\"",")","global_aliases",".","append","(","f\"\/security_monitoring\/default_rules\/{p.stem}\"",")","global_aliases",".","append","(","f\"\/security_platform\/default_rules\/{p.stem}\"",")","p",".","unlink","(",")","else",":","logger",".","info","(","f\"skipping file {p.name}\"",")","else",":","# The message of a detection rule is located in a Markdown file next to the rule definition","with","open","(","str","(","message_file_name",")",",","mode","=","\"r+\"",")","as","message_file",":","message","=","message_file",".","read","(",")","# strip out [text] e.g \"[CIS Docker] Ensure that..\" becomes \"Ensure that...\"","parsed_title","=","re",".","sub","(","r\"\\[.+\\]\\s?(.*)\"",",","\"\\\\1\"",",","data",".","get","(","'name'",",","''",")",",","0",",","re",".","MULTILINE",")","page_data","=","{","\"title\"",":","parsed_title",",","\"kind\"",":","\"documentation\"",",","\"type\"",":","\"security_rules\"",",","\"disable_edit\"",":","True",",","\"aliases\"",":","[","f\"{data.get('defaultRuleId', '').strip()}\"",",","f\"\/security_monitoring\/default_rules\/{data.get('defaultRuleId', '').strip()}\"",",","f\"\/security_monitoring\/default_rules\/{p.stem}\"","]",",","\"rule_category\"",":","[","]",",","\"integration_id\"",":","\"\"","}","# we need to get the path relative to the repo root for comparisons","extract_dir",",","relative_path","=","str","(","p",".","parent",")",".","split","(","f\"\/{content['repo_name']}\/\"",")","# lets build up this categorization for filtering purposes","# previous categorization","if","relative_path",".","startswith","(","'configuration'",")",":","page_data","[","'rule_category'","]",".","append","(","'Posture Management (Cloud)'",")","elif","relative_path",".","startswith","(","'runtime'",")",":","if","'compliance'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Posture Management (Infra)'",")","else",":","page_data","[","'rule_category'","]",".","append","(","'Workload Security'",")","# new categorization","if","'security-monitoring'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Cloud SIEM'",")","if","'posture-management'","in","relative_path",":","if","'cloud-configuration'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Posture Management (Cloud)'",")","if","'infrastructure-configuration'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Posture Management (Infra)'",")","if","'workload-security'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Workload Security'",")","if","'application-security'","in","relative_path",":","page_data","[","'rule_category'","]",".","append","(","'Application Security'",")","tags","=","data",".","get","(","'tags'",",","[","]",")","if","tags",":","for","tag","in","tags",":","if","':'","in","tag",":","key",",","value","=","tag",".","split","(","':'",")","page_data","[","key","]","=","value","if","data",".","get","(","'source'",",","''",")",":","page_data","[","\"source\"","]","=","data",".","get","(","'source'",",","''",")","else",":","# try build up manually","source","=","data",".","get","(","'source'",",","None",")","tech","=","data",".","get","(","'framework'",",","{","}",")",".","get","(","'name'",",","''",")",".","replace","(","'cis-'",",","''",")","page_data","[","\"source\"","]","=","source","or","tech","page_data","[","\"security\"","]","=","\"compliance\"","page_data","[","\"framework\"","]","=","data",".","get","(","'framework'",",","{","}",")",".","get","(","'name'",",","''",")","page_data","[","\"control\"","]","=","data",".","get","(","'control'",",","''",")","page_data","[","\"scope\"","]","=","tech","# lowercase them","if","page_data",".","get","(","\"source\"",",","None",")",":","page_data","[","\"source\"","]","=","page_data","[","\"source\"","]",".","lower","(",")","if","page_data",".","get","(","\"scope\"",",","None",")",":","page_data","[","\"scope\"","]","=","page_data","[","\"scope\"","]",".","lower","(",")","# integration id","page_data","[","\"integration_id\"","]","=","page_data",".","get","(","\"scope\"",",","None",")","or","page_data",".","get","(","\"source\"",",","\"\"",")","cloud","=","page_data",".","get","(","\"cloud\"",",","None",")","if","cloud","and","cloud","==","'aws'",":","page_data","[","\"integration_id\"","]","=","\"amazon-{}\"",".","format","(","page_data","[","\"integration_id\"","]",")","front_matter","=","yaml",".","dump","(","page_data",",","default_flow_style","=","False",")",".","strip","(",")","output_content","=","TEMPLATE",".","format","(","front_matter","=","front_matter",",","content","=","message",".","strip","(",")",")","dest_dir","=","Path","(","f\"{content_dir}{content['options']['dest_path']}\"",")","dest_dir",".","mkdir","(","exist_ok","=","True",")","dest_file","=","dest_dir",".","joinpath","(","p",".","name",")",".","with_suffix","(","'.md'",")","logger",".","info","(","dest_file",")","with","open","(","dest_file",",","mode","=","'w'",",","encoding","=","'utf-8'",")","as","out_file",":","out_file",".","write","(","output_content",")","# add global aliases from deleted files to _index.md","if","os",".","environ",".","get","(","'CI_ENVIRONMENT_NAME'",",","''",")","in","(","'live'",",","'preview'",")",":","index_path","=","Path","(","f\"{content_dir}{content['options']['dest_path']}_index.md\"",")","update_global_aliases","(","index_path",",","global_aliases",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/security_rules.py#L63-L200"} {"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.metric_csv_to_yaml","parameters":"(key_name, csv_filename, yml_filename)","argument_list":"","return_statement":"","docstring":"Given a file path to a single csv file convert it to a yaml file\n\n :param key_name: integration key name for root object\n :param csv_filename: path to input csv file\n :param yml_filename: path to output yml file","docstring_summary":"Given a file path to a single csv file convert it to a yaml file","docstring_tokens":["Given","a","file","path","to","a","single","csv","file","convert","it","to","a","yaml","file"],"function":"def metric_csv_to_yaml(key_name, csv_filename, yml_filename):\n \"\"\"\n Given a file path to a single csv file convert it to a yaml file\n\n :param key_name: integration key name for root object\n :param csv_filename: path to input csv file\n :param yml_filename: path to output yml file\n \"\"\"\n yaml_data = {key_name: []}\n with open(csv_filename) as csv_file:\n reader = csv.DictReader(csv_file, delimiter=\",\")\n yaml_data[key_name] = [\n dict(line) for line in reader\n ]\n if yaml_data[key_name]:\n # Transforming the metric description to html in order to interpret markdown in\n # integrations metrics table.\n # the char strip is to compensate for the lib adding
<\/p>
tags\n for metric in yaml_data[key_name]:\n metric['description'] = str(\n markdown2.markdown(metric['description']))[3:-5]\n with open(\n file=yml_filename,\n mode=\"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(\n yaml.dump(\n yaml_data, default_flow_style=False\n )\n )","function_tokens":["def","metric_csv_to_yaml","(","key_name",",","csv_filename",",","yml_filename",")",":","yaml_data","=","{","key_name",":","[","]","}","with","open","(","csv_filename",")","as","csv_file",":","reader","=","csv",".","DictReader","(","csv_file",",","delimiter","=","\",\"",")","yaml_data","[","key_name","]","=","[","dict","(","line",")","for","line","in","reader","]","if","yaml_data","[","key_name","]",":","# Transforming the metric description to html in order to interpret markdown in","# integrations metrics table.","# the char strip is to compensate for the lib adding
<\/p>
tags","for","metric","in","yaml_data","[","key_name","]",":","metric","[","'description'","]","=","str","(","markdown2",".","markdown","(","metric","[","'description'","]",")",")","[","3",":","-","5","]","with","open","(","file","=","yml_filename",",","mode","=","\"w\"",",","encoding","=","\"utf-8\"",",",")","as","f",":","f",".","write","(","yaml",".","dump","(","yaml_data",",","default_flow_style","=","False",")",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L121-L151"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.inline_references","parameters":"(self, integration_readme, regex_skip_sections_start, regex_skip_sections_end)","argument_list":"","return_statement":"return integration_content_with_link_inlined","docstring":"Goes through a section and remove all reference links it can found.\n\n :param section: An array of lines representing a section.","docstring_summary":"Goes through a section and remove all reference links it can found.","docstring_tokens":["Goes","through","a","section","and","remove","all","reference","links","it","can","found","."],"function":"def inline_references(self, integration_readme, regex_skip_sections_start, regex_skip_sections_end):\n \"\"\"\n Goes through a section and remove all reference links it can found.\n\n :param section: An array of lines representing a section.\n \"\"\"\n\n skip = False\n all_references = []\n section_without_references = []\n section_with_all_links = []\n regex_bottom_reference_link = r\"^\\s*\\[(\\d*?)\\]: (\\S*)\"\n\n # Collecting all references and removing them from section\n # looking at each line, if a line is a reference then we remove it and store the reference.\n\n #section = integration_file.readlines()\n for line in integration_readme.splitlines(True):\n if skip:\n section_without_references.append(line)\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n\n if re.search(regex_skip_sections_start, line):\n section_without_references.append(line)\n skip = True\n\n elif re.search(regex_bottom_reference_link, line):\n\n reference = re.search(regex_bottom_reference_link, line)\n all_references.append([reference.group(1),\n reference.group(2)])\n else:\n section_without_references.append(line)\n\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n for line in section_without_references:\n if skip:\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n if re.search(regex_skip_sections_start, line):\n skip = True\n else:\n for reference in all_references:\n reference_index, reference_val = reference\n\n curent_link = '][' + reference_index + ']'\n\n line = line.replace(\n curent_link, '](' + reference_val + ')')\n\n section_with_all_links.append(line)\n\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n integration_content_with_link_inlined = ''.join(section_with_all_links)\n\n return integration_content_with_link_inlined","function_tokens":["def","inline_references","(","self",",","integration_readme",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","skip","=","False","all_references","=","[","]","section_without_references","=","[","]","section_with_all_links","=","[","]","regex_bottom_reference_link","=","r\"^\\s*\\[(\\d*?)\\]: (\\S*)\"","# Collecting all references and removing them from section","# looking at each line, if a line is a reference then we remove it and store the reference.","#section = integration_file.readlines()","for","line","in","integration_readme",".","splitlines","(","True",")",":","if","skip",":","section_without_references",".","append","(","line",")","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","section_without_references",".","append","(","line",")","skip","=","True","elif","re",".","search","(","regex_bottom_reference_link",",","line",")",":","reference","=","re",".","search","(","regex_bottom_reference_link",",","line",")","all_references",".","append","(","[","reference",".","group","(","1",")",",","reference",".","group","(","2",")","]",")","else",":","section_without_references",".","append","(","line",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","for","line","in","section_without_references",":","if","skip",":","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","skip","=","True","else",":","for","reference","in","all_references",":","reference_index",",","reference_val","=","reference","curent_link","=","']['","+","reference_index","+","']'","line","=","line",".","replace","(","curent_link",",","']('","+","reference_val","+","')'",")","section_with_all_links",".","append","(","line",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","integration_content_with_link_inlined","=","''",".","join","(","section_with_all_links",")","return","integration_content_with_link_inlined"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L153-L217"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_integrations","parameters":"(self, content, marketplace=False)","argument_list":"","return_statement":"","docstring":"Goes through all files needed for integrations build\n and triggers the right function for the right type of file.\n #integrations to learn more.\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build\n :param content: integrations content to process","docstring_summary":"Goes through all files needed for integrations build\n and triggers the right function for the right type of file.\n #integrations to learn more.\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build\n :param content: integrations content to process","docstring_tokens":["Goes","through","all","files","needed","for","integrations","build","and","triggers","the","right","function","for","the","right","type","of","file",".","#integrations","to","learn","more",".","See","https",":","\/\/","github",".","com","\/","DataDog","\/","documentation","\/","wiki","\/","Documentation","-","Build",":","param","content",":","integrations","content","to","process"],"function":"def process_integrations(self, content, marketplace=False):\n \"\"\"\n Goes through all files needed for integrations build\n and triggers the right function for the right type of file.\n #integrations to learn more.\n See https:\/\/github.com\/DataDog\/documentation\/wiki\/Documentation-Build\n :param content: integrations content to process\n \"\"\"\n for file_name in chain.from_iterable(\n glob.iglob(pattern, recursive=True)\n for pattern in content[\"globs\"]\n ):\n if file_name.endswith(\".csv\"):\n self.process_integration_metric(file_name)\n\n elif file_name.endswith(\"manifest.json\"):\n self.process_integration_manifest(file_name)\n\n elif file_name.endswith(\"service_checks.json\"):\n self.process_service_checks(file_name)\n\n elif file_name.endswith(\".md\"):\n self.process_integration_readme(file_name, marketplace)\n\n elif file_name.endswith((\".png\", \".svg\", \".jpg\", \".jpeg\", \".gif\")) and marketplace:\n self.process_images(file_name)\n\n elif file_name.endswith(\".go\"):\n self.process_npm_integrations(file_name)","function_tokens":["def","process_integrations","(","self",",","content",",","marketplace","=","False",")",":","for","file_name","in","chain",".","from_iterable","(","glob",".","iglob","(","pattern",",","recursive","=","True",")","for","pattern","in","content","[","\"globs\"","]",")",":","if","file_name",".","endswith","(","\".csv\"",")",":","self",".","process_integration_metric","(","file_name",")","elif","file_name",".","endswith","(","\"manifest.json\"",")",":","self",".","process_integration_manifest","(","file_name",")","elif","file_name",".","endswith","(","\"service_checks.json\"",")",":","self",".","process_service_checks","(","file_name",")","elif","file_name",".","endswith","(","\".md\"",")",":","self",".","process_integration_readme","(","file_name",",","marketplace",")","elif","file_name",".","endswith","(","(","\".png\"",",","\".svg\"",",","\".jpg\"",",","\".jpeg\"",",","\".gif\"",")",")","and","marketplace",":","self",".","process_images","(","file_name",")","elif","file_name",".","endswith","(","\".go\"",")",":","self",".","process_npm_integrations","(","file_name",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L219-L247"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.merge_integrations","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Merges integrations that come under one","docstring_summary":"Merges integrations that come under one","docstring_tokens":["Merges","integrations","that","come","under","one"],"function":"def merge_integrations(self):\n \"\"\" Merges integrations that come under one \"\"\"\n for (\n name,\n action_obj,\n ) in self.integration_mutations.items():\n if name not in self.initial_integration_files:\n action = action_obj.get(\"action\")\n target = action_obj.get(\"target\")\n input_file = \"{}{}.md\".format(\n self.content_integrations_dir, name\n )\n output_file = \"{}{}.md\".format(\n self.content_integrations_dir, target\n )\n if action == \"merge\":\n with open(\n input_file, \"r\"\n ) as content_file, open(\n output_file, \"a\"\n ) as target_file:\n content = content_file.read()\n content = re.sub(\n self.regex_fm,\n r\"\\2\",\n content,\n count=0,\n )\n if action_obj.get(\n \"remove_header\", False\n ):\n content = re.sub(\n self.regex_h1,\n \"\",\n content,\n count=0,\n )\n else:\n content = re.sub(\n self.regex_h1_replace,\n r\"##\\2\",\n content,\n count=0,\n )\n regex_skip_sections_end = r\"(```|\\{\\{< \\\/code-block >\\}\\})\"\n regex_skip_sections_start = r\"(```|\\{\\{< code-block)\"\n\n ## Inlining all link from the file to merge\n ## to avoid link ref colision with the existing references.\n content = self.inline_references(content,regex_skip_sections_start,regex_skip_sections_end)\n\n target_file.write(content)\n\n ## Formating all link as reference in the new merged integration file\n try:\n final_text = format_link_file(output_file,regex_skip_sections_start,regex_skip_sections_end)\n with open(output_file, 'w') as final_file:\n final_file.write(final_text)\n except Exception as e:\n print(e)\n\n try:\n remove(input_file)\n except OSError:\n print(\n \"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during merge action\".format(\n input_file\n )\n )\n elif action == \"truncate\":\n if exists(output_file):\n with open(\n output_file, \"r+\"\n ) as target_file:\n content = target_file.read()\n content = re.sub(\n self.regex_fm,\n r\"---\\n\\1\\n---\\n\",\n content,\n count=0,\n )\n target_file.truncate(0)\n target_file.seek(0)\n target_file.write(content)\n else:\n open(output_file, \"w\").close()\n elif action == \"discard\":\n try:\n remove(input_file)\n except OSError:\n print(\n \"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during discard action\".format(\n input_file\n )\n )\n elif action == \"create\":\n with open(output_file, \"w+\") as f:\n fm = yaml.dump(\n action_obj.get(\"fm\"),\n default_flow_style=False,\n ).rstrip()\n data = \"---\\n{0}\\n---\\n\".format(fm)\n f.write(data)","function_tokens":["def","merge_integrations","(","self",")",":","for","(","name",",","action_obj",",",")","in","self",".","integration_mutations",".","items","(",")",":","if","name","not","in","self",".","initial_integration_files",":","action","=","action_obj",".","get","(","\"action\"",")","target","=","action_obj",".","get","(","\"target\"",")","input_file","=","\"{}{}.md\"",".","format","(","self",".","content_integrations_dir",",","name",")","output_file","=","\"{}{}.md\"",".","format","(","self",".","content_integrations_dir",",","target",")","if","action","==","\"merge\"",":","with","open","(","input_file",",","\"r\"",")","as","content_file",",","open","(","output_file",",","\"a\"",")","as","target_file",":","content","=","content_file",".","read","(",")","content","=","re",".","sub","(","self",".","regex_fm",",","r\"\\2\"",",","content",",","count","=","0",",",")","if","action_obj",".","get","(","\"remove_header\"",",","False",")",":","content","=","re",".","sub","(","self",".","regex_h1",",","\"\"",",","content",",","count","=","0",",",")","else",":","content","=","re",".","sub","(","self",".","regex_h1_replace",",","r\"##\\2\"",",","content",",","count","=","0",",",")","regex_skip_sections_end","=","r\"(```|\\{\\{< \\\/code-block >\\}\\})\"","regex_skip_sections_start","=","r\"(```|\\{\\{< code-block)\"","## Inlining all link from the file to merge","## to avoid link ref colision with the existing references.","content","=","self",".","inline_references","(","content",",","regex_skip_sections_start",",","regex_skip_sections_end",")","target_file",".","write","(","content",")","## Formating all link as reference in the new merged integration file","try",":","final_text","=","format_link_file","(","output_file",",","regex_skip_sections_start",",","regex_skip_sections_end",")","with","open","(","output_file",",","'w'",")","as","final_file",":","final_file",".","write","(","final_text",")","except","Exception","as","e",":","print","(","e",")","try",":","remove","(","input_file",")","except","OSError",":","print","(","\"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during merge action\"",".","format","(","input_file",")",")","elif","action","==","\"truncate\"",":","if","exists","(","output_file",")",":","with","open","(","output_file",",","\"r+\"",")","as","target_file",":","content","=","target_file",".","read","(",")","content","=","re",".","sub","(","self",".","regex_fm",",","r\"---\\n\\1\\n---\\n\"",",","content",",","count","=","0",",",")","target_file",".","truncate","(","0",")","target_file",".","seek","(","0",")","target_file",".","write","(","content",")","else",":","open","(","output_file",",","\"w\"",")",".","close","(",")","elif","action","==","\"discard\"",":","try",":","remove","(","input_file",")","except","OSError",":","print","(","\"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during discard action\"",".","format","(","input_file",")",")","elif","action","==","\"create\"",":","with","open","(","output_file",",","\"w+\"",")","as","f",":","fm","=","yaml",".","dump","(","action_obj",".","get","(","\"fm\"",")",",","default_flow_style","=","False",",",")",".","rstrip","(",")","data","=","\"---\\n{0}\\n---\\n\"",".","format","(","fm",")","f",".","write","(","data",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L249-L351"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_integration_metric","parameters":"(self, file_name)","argument_list":"","return_statement":"","docstring":"Take a single metadata csv file and convert it to yaml\n :param file_name: path to a metadata csv file","docstring_summary":"Take a single metadata csv file and convert it to yaml\n :param file_name: path to a metadata csv file","docstring_tokens":["Take","a","single","metadata","csv","file","and","convert","it","to","yaml",":","param","file_name",":","path","to","a","metadata","csv","file"],"function":"def process_integration_metric(self, file_name):\n \"\"\"\n Take a single metadata csv file and convert it to yaml\n :param file_name: path to a metadata csv file\n \"\"\"\n if file_name.endswith(\"\/metadata.csv\"):\n key_name = basename(\n dirname(normpath(file_name))\n )\n else:\n key_name = basename(\n file_name.replace(\"_metadata.csv\", \"\")\n )\n new_file_name = \"{}{}.yaml\".format(\n self.data_integrations_dir, key_name\n )\n self.metric_csv_to_yaml(key_name, file_name, new_file_name)","function_tokens":["def","process_integration_metric","(","self",",","file_name",")",":","if","file_name",".","endswith","(","\"\/metadata.csv\"",")",":","key_name","=","basename","(","dirname","(","normpath","(","file_name",")",")",")","else",":","key_name","=","basename","(","file_name",".","replace","(","\"_metadata.csv\"",",","\"\"",")",")","new_file_name","=","\"{}{}.yaml\"",".","format","(","self",".","data_integrations_dir",",","key_name",")","self",".","metric_csv_to_yaml","(","key_name",",","file_name",",","new_file_name",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L353-L369"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_integration_manifest","parameters":"(self, file_name)","argument_list":"","return_statement":"","docstring":"Take a single manifest json file and upsert to integrations.json data\n set is_public to false to hide integrations we merge later\n :param file_name: path to a manifest json file","docstring_summary":"Take a single manifest json file and upsert to integrations.json data\n set is_public to false to hide integrations we merge later\n :param file_name: path to a manifest json file","docstring_tokens":["Take","a","single","manifest","json","file","and","upsert","to","integrations",".","json","data","set","is_public","to","false","to","hide","integrations","we","merge","later",":","param","file_name",":","path","to","a","manifest","json","file"],"function":"def process_integration_manifest(self, file_name):\n \"\"\"\n Take a single manifest json file and upsert to integrations.json data\n set is_public to false to hide integrations we merge later\n :param file_name: path to a manifest json file\n \"\"\"\n\n names = [\n d.get(\"name\", \"\").lower()\n for d in self.datafile_json\n if \"name\" in d\n ]\n with open(file_name) as f:\n try:\n data = json.load(f)\n data = self.process_manifest(data, basename(dirname(file_name)))\n data_name = data.get(\"name\", \"\").lower()\n if data_name in [\n k\n for k, v in self.integration_mutations.items()\n if v.get(\"action\") == \"merge\"\n ]:\n data[\"is_public\"] = False\n if data_name in names:\n item = [\n d\n for d in self.datafile_json\n if d.get(\"name\", \"\").lower() == data_name\n ]\n if len(item) > 0:\n item[0].update(data)\n else:\n self.datafile_json.append(data)\n except JSONDecodeError:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\".format(file_name))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\".format(file_name))\n raise JSONDecodeError","function_tokens":["def","process_integration_manifest","(","self",",","file_name",")",":","names","=","[","d",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")","for","d","in","self",".","datafile_json","if","\"name\"","in","d","]","with","open","(","file_name",")","as","f",":","try",":","data","=","json",".","load","(","f",")","data","=","self",".","process_manifest","(","data",",","basename","(","dirname","(","file_name",")",")",")","data_name","=","data",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")","if","data_name","in","[","k","for","k",",","v","in","self",".","integration_mutations",".","items","(",")","if","v",".","get","(","\"action\"",")","==","\"merge\"","]",":","data","[","\"is_public\"","]","=","False","if","data_name","in","names",":","item","=","[","d","for","d","in","self",".","datafile_json","if","d",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")","==","data_name","]","if","len","(","item",")",">","0",":","item","[","0","]",".","update","(","data",")","else",":","self",".","datafile_json",".","append","(","data",")","except","JSONDecodeError",":","if","getenv","(","\"LOCAL\"",")","==","'True'",":","print","(","\"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\"",".","format","(","file_name",")",")","else",":","print","(","\"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\"",".","format","(","file_name",")",")","raise","JSONDecodeError"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L371-L411"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_service_checks","parameters":"(self, file_name)","argument_list":"","return_statement":"","docstring":"Take a single service_checks.json file and copies it to the data folder\n as the integration name it came from e.g \/data\/service_checks\/docker.json\n :param file_name: path to a service_checks json file","docstring_summary":"Take a single service_checks.json file and copies it to the data folder\n as the integration name it came from e.g \/data\/service_checks\/docker.json\n :param file_name: path to a service_checks json file","docstring_tokens":["Take","a","single","service_checks",".","json","file","and","copies","it","to","the","data","folder","as","the","integration","name","it","came","from","e",".","g","\/","data","\/","service_checks","\/","docker",".","json",":","param","file_name",":","path","to","a","service_checks","json","file"],"function":"def process_service_checks(self, file_name):\n \"\"\"\n Take a single service_checks.json file and copies it to the data folder\n as the integration name it came from e.g \/data\/service_checks\/docker.json\n :param file_name: path to a service_checks json file\n \"\"\"\n\n if file_name.endswith(\"\/assets\/service_checks.json\"):\n file_list = file_name.split(sep)\n key_name = file_list[len(file_list)-3]\n else:\n key_name = basename(\n dirname(normpath(file_name))\n )\n\n new_file_name = \"{}{}.json\".format(\n self.data_service_checks_dir, key_name\n )\n\n shutil.copy(\n file_name,\n new_file_name,\n )","function_tokens":["def","process_service_checks","(","self",",","file_name",")",":","if","file_name",".","endswith","(","\"\/assets\/service_checks.json\"",")",":","file_list","=","file_name",".","split","(","sep",")","key_name","=","file_list","[","len","(","file_list",")","-","3","]","else",":","key_name","=","basename","(","dirname","(","normpath","(","file_name",")",")",")","new_file_name","=","\"{}{}.json\"",".","format","(","self",".","data_service_checks_dir",",","key_name",")","shutil",".","copy","(","file_name",",","new_file_name",",",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L413-L435"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_npm_integrations","parameters":"(self, file_name)","argument_list":"","return_statement":"","docstring":"Save the defaults.go file from AWS as a json file\n \/data\/npm\/aws.json","docstring_summary":"Save the defaults.go file from AWS as a json file\n \/data\/npm\/aws.json","docstring_tokens":["Save","the","defaults",".","go","file","from","AWS","as","a","json","file","\/","data","\/","npm","\/","aws",".","json"],"function":"def process_npm_integrations(self, file_name):\n \"\"\"\n Save the defaults.go file from AWS as a json file\n \/data\/npm\/aws.json\n \"\"\"\n\n dict_npm = {}\n new_file_name = \"\"\n\n if file_name.endswith(\"defaults.go\"):\n\n with open(file_name) as fh:\n\n line_list = filter(None, fh.read().splitlines())\n\n for line in line_list:\n if line.endswith(\"service{\"):\n integration = line.split('\"')[1]\n dict_npm[integration] = {\"name\": integration}\n\n new_file_name = \"{}aws.json\".format(self.data_npm_dir)\n\n elif file_name.endswith(\"gcp_services.go\"):\n\n with open(file_name) as fh:\n\n line_list = filter(None, fh.read().splitlines())\n\n for line in line_list:\n if line.endswith(\",\"):\n integration = line.split('\"')[3]\n dict_npm[integration] = {\"name\": integration}\n\n new_file_name = \"{}gcp.json\".format(self.data_npm_dir)\n\n if new_file_name != \"\":\n with open(\n file=new_file_name,\n mode=\"w\",\n encoding=\"utf-8\",\n ) as f:\n json.dump(\n dict_npm, f, indent = 2, sort_keys = True\n )","function_tokens":["def","process_npm_integrations","(","self",",","file_name",")",":","dict_npm","=","{","}","new_file_name","=","\"\"","if","file_name",".","endswith","(","\"defaults.go\"",")",":","with","open","(","file_name",")","as","fh",":","line_list","=","filter","(","None",",","fh",".","read","(",")",".","splitlines","(",")",")","for","line","in","line_list",":","if","line",".","endswith","(","\"service{\"",")",":","integration","=","line",".","split","(","'\"'",")","[","1","]","dict_npm","[","integration","]","=","{","\"name\"",":","integration","}","new_file_name","=","\"{}aws.json\"",".","format","(","self",".","data_npm_dir",")","elif","file_name",".","endswith","(","\"gcp_services.go\"",")",":","with","open","(","file_name",")","as","fh",":","line_list","=","filter","(","None",",","fh",".","read","(",")",".","splitlines","(",")",")","for","line","in","line_list",":","if","line",".","endswith","(","\",\"",")",":","integration","=","line",".","split","(","'\"'",")","[","3","]","dict_npm","[","integration","]","=","{","\"name\"",":","integration","}","new_file_name","=","\"{}gcp.json\"",".","format","(","self",".","data_npm_dir",")","if","new_file_name","!=","\"\"",":","with","open","(","file","=","new_file_name",",","mode","=","\"w\"",",","encoding","=","\"utf-8\"",",",")","as","f",":","json",".","dump","(","dict_npm",",","f",",","indent","=","2",",","sort_keys","=","True",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L437-L480"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_images","parameters":"(self, file_name)","argument_list":"","return_statement":"","docstring":"Copies a single image file to the static\/images\/ folder, creating a new directory if needed.","docstring_summary":"Copies a single image file to the static\/images\/ folder, creating a new directory if needed.","docstring_tokens":["Copies","a","single","image","file","to","the","static","\/","images","\/","folder","creating","a","new","directory","if","needed","."],"function":"def process_images(self, file_name):\n \"\"\"\n Copies a single image file to the static\/images\/ folder, creating a new directory if needed.\n \"\"\"\n image_filename = basename(file_name) # img.png\n integration_image_path = file_name.replace('..\/', '') # if it found local marketplace repo\n integration_image_path = integration_image_path.replace('.\/integrations_data\/extracted\/', '') # marketplace\/nerdvision\/images\/img.png\n integration_image_directory = dirname(integration_image_path) # marketplace\/nerdvision\/images\/\n destination_directory = '.\/static\/images\/{}'.format(integration_image_directory) # static\/images\/marketplace\/nerdvision\/images\/\n full_destination_path = '{}\/{}'.format(destination_directory, image_filename) # static\/images\/marketplace\/nerdvision\/images\/img.png\n\n makedirs(destination_directory, exist_ok=True)\n copyfile(file_name, full_destination_path)","function_tokens":["def","process_images","(","self",",","file_name",")",":","image_filename","=","basename","(","file_name",")","# img.png","integration_image_path","=","file_name",".","replace","(","'..\/'",",","''",")","# if it found local marketplace repo","integration_image_path","=","integration_image_path",".","replace","(","'.\/integrations_data\/extracted\/'",",","''",")","# marketplace\/nerdvision\/images\/img.png","integration_image_directory","=","dirname","(","integration_image_path",")","# marketplace\/nerdvision\/images\/","destination_directory","=","'.\/static\/images\/{}'",".","format","(","integration_image_directory",")","# static\/images\/marketplace\/nerdvision\/images\/","full_destination_path","=","'{}\/{}'",".","format","(","destination_directory",",","image_filename",")","# static\/images\/marketplace\/nerdvision\/images\/img.png","makedirs","(","destination_directory",",","exist_ok","=","True",")","copyfile","(","file_name",",","full_destination_path",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L484-L496"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.replace_image_src","parameters":"(markdown_string, integration_name)","argument_list":"","return_statement":"","docstring":"Takes a markdown string and replaces any image markdown with our img shortcode, pointing to the static\/images folder.\n This is needed when dealing with Marketplace Integrations to properly display images pulled from a private repo.","docstring_summary":"Takes a markdown string and replaces any image markdown with our img shortcode, pointing to the static\/images folder.\n This is needed when dealing with Marketplace Integrations to properly display images pulled from a private repo.","docstring_tokens":["Takes","a","markdown","string","and","replaces","any","image","markdown","with","our","img","shortcode","pointing","to","the","static","\/","images","folder",".","This","is","needed","when","dealing","with","Marketplace","Integrations","to","properly","display","images","pulled","from","a","private","repo","."],"function":"def replace_image_src(markdown_string, integration_name):\n \"\"\"\n Takes a markdown string and replaces any image markdown with our img shortcode, pointing to the static\/images folder.\n This is needed when dealing with Marketplace Integrations to properly display images pulled from a private repo.\n \"\"\"\n markdown_img_search_regex = r\"!\\[(.*?)\\]\\((.*?)\\)\"\n img_shortcode = \"{{< img src=\\\"marketplace\/\" + integration_name + \"\/\\\\2\\\" alt=\\\"\\\\1\\\" >}}\"\n integration_img_prefix = 'https:\/\/raw.githubusercontent.com\/DataDog\/marketplace\/master\/{}\/'.format(integration_name)\n\n replaced_markdown_string = markdown_string.replace(integration_img_prefix, '')\n regex_result = re.sub(markdown_img_search_regex, img_shortcode, replaced_markdown_string, 0, re.MULTILINE)\n\n if regex_result:\n return regex_result\n else:\n return markdown_string","function_tokens":["def","replace_image_src","(","markdown_string",",","integration_name",")",":","markdown_img_search_regex","=","r\"!\\[(.*?)\\]\\((.*?)\\)\"","img_shortcode","=","\"{{< img src=\\\"marketplace\/\"","+","integration_name","+","\"\/\\\\2\\\" alt=\\\"\\\\1\\\" >}}\"","integration_img_prefix","=","'https:\/\/raw.githubusercontent.com\/DataDog\/marketplace\/master\/{}\/'",".","format","(","integration_name",")","replaced_markdown_string","=","markdown_string",".","replace","(","integration_img_prefix",",","''",")","regex_result","=","re",".","sub","(","markdown_img_search_regex",",","img_shortcode",",","replaced_markdown_string",",","0",",","re",".","MULTILINE",")","if","regex_result",":","return","regex_result","else",":","return","markdown_string"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L499-L514"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.remove_markdown_section","parameters":"(markdown_string, h2_header_string)","argument_list":"","return_statement":"","docstring":"Removes a section from markdown by deleting all content starting from provided h2_header_string argument and ending one index before the next h2 header.\n h2_header_string argument is expected in markdown format; e.g. '## Steps'","docstring_summary":"Removes a section from markdown by deleting all content starting from provided h2_header_string argument and ending one index before the next h2 header.\n h2_header_string argument is expected in markdown format; e.g. '## Steps'","docstring_tokens":["Removes","a","section","from","markdown","by","deleting","all","content","starting","from","provided","h2_header_string","argument","and","ending","one","index","before","the","next","h2","header",".","h2_header_string","argument","is","expected","in","markdown","format",";","e",".","g",".","##","Steps"],"function":"def remove_markdown_section(markdown_string, h2_header_string):\n \"\"\"\n Removes a section from markdown by deleting all content starting from provided h2_header_string argument and ending one index before the next h2 header.\n h2_header_string argument is expected in markdown format; e.g. '## Steps'\n \"\"\"\n\n if not h2_header_string.startswith('##'):\n return markdown_string\n\n h2_markdown_regex = r\"(^|\\n)(#{2}) (\\w+)\"\n h2_list = re.finditer(h2_markdown_regex, markdown_string)\n replaced_result = ''\n\n for match in h2_list:\n group = match.group(0)\n start = match.start()\n end = match.end() - 1\n\n if h2_header_string in group:\n start_index = start\n end_index = next(h2_list).start()\n content_to_remove = markdown_string[start_index:end_index]\n replaced_result = markdown_string.replace(content_to_remove, '')\n\n if replaced_result:\n return replaced_result\n else:\n return markdown_string","function_tokens":["def","remove_markdown_section","(","markdown_string",",","h2_header_string",")",":","if","not","h2_header_string",".","startswith","(","'##'",")",":","return","markdown_string","h2_markdown_regex","=","r\"(^|\\n)(#{2}) (\\w+)\"","h2_list","=","re",".","finditer","(","h2_markdown_regex",",","markdown_string",")","replaced_result","=","''","for","match","in","h2_list",":","group","=","match",".","group","(","0",")","start","=","match",".","start","(",")","end","=","match",".","end","(",")","-","1","if","h2_header_string","in","group",":","start_index","=","start","end_index","=","next","(","h2_list",")",".","start","(",")","content_to_remove","=","markdown_string","[","start_index",":","end_index","]","replaced_result","=","markdown_string",".","replace","(","content_to_remove",",","''",")","if","replaced_result",":","return","replaced_result","else",":","return","markdown_string"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L517-L544"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.validate_marketplace_integration_markdown","parameters":"(markdown_string)","argument_list":"","return_statement":"return matches == None","docstring":"Validates marketplace integration markdown string does not contain sensitive content.\n The build should fail if we found any sections that should not be displayed in Docs.\n Current exclude list: [\"Setup\", \"Pricing\", \"Tiered Pricing\"]","docstring_summary":"Validates marketplace integration markdown string does not contain sensitive content.\n The build should fail if we found any sections that should not be displayed in Docs.\n Current exclude list: [\"Setup\", \"Pricing\", \"Tiered Pricing\"]","docstring_tokens":["Validates","marketplace","integration","markdown","string","does","not","contain","sensitive","content",".","The","build","should","fail","if","we","found","any","sections","that","should","not","be","displayed","in","Docs",".","Current","exclude","list",":","[","Setup","Pricing","Tiered","Pricing","]"],"function":"def validate_marketplace_integration_markdown(markdown_string):\n \"\"\"\n Validates marketplace integration markdown string does not contain sensitive content.\n The build should fail if we found any sections that should not be displayed in Docs.\n Current exclude list: [\"Setup\", \"Pricing\", \"Tiered Pricing\"]\n \"\"\"\n setup_header_markdown_regex = r\"(#{1,6})(\\s*)(Setup|Pricing|Tiered Pricing)\"\n matches = re.search(setup_header_markdown_regex, markdown_string, re.MULTILINE | re.IGNORECASE)\n return matches == None","function_tokens":["def","validate_marketplace_integration_markdown","(","markdown_string",")",":","setup_header_markdown_regex","=","r\"(#{1,6})(\\s*)(Setup|Pricing|Tiered Pricing)\"","matches","=","re",".","search","(","setup_header_markdown_regex",",","markdown_string",",","re",".","MULTILINE","|","re",".","IGNORECASE",")","return","matches","==","None"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L547-L555"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_integration_readme","parameters":"(self, file_name, marketplace=False)","argument_list":"","return_statement":"","docstring":"Take a single README.md file and\n 1. extract the first h1, if this isn't a merge item\n 2. add tabs if they exist\n 3. inject metrics after ### Metrics header if metrics exists for file\n 4. inject service checks after ### Service Checks if file exists\n 5. inject hugo front matter params at top of file\n 6. write out file to content\/integrations with filename changed to integrationname.md\n :param file_name: path to a readme md file","docstring_summary":"Take a single README.md file and\n 1. extract the first h1, if this isn't a merge item\n 2. add tabs if they exist\n 3. inject metrics after ### Metrics header if metrics exists for file\n 4. inject service checks after ### Service Checks if file exists\n 5. inject hugo front matter params at top of file\n 6. write out file to content\/integrations with filename changed to integrationname.md\n :param file_name: path to a readme md file","docstring_tokens":["Take","a","single","README",".","md","file","and","1",".","extract","the","first","h1","if","this","isn","t","a","merge","item","2",".","add","tabs","if","they","exist","3",".","inject","metrics","after","###","Metrics","header","if","metrics","exists","for","file","4",".","inject","service","checks","after","###","Service","Checks","if","file","exists","5",".","inject","hugo","front","matter","params","at","top","of","file","6",".","write","out","file","to","content","\/","integrations","with","filename","changed","to","integrationname",".","md",":","param","file_name",":","path","to","a","readme","md","file"],"function":"def process_integration_readme(self, file_name, marketplace=False):\n \"\"\"\n Take a single README.md file and\n 1. extract the first h1, if this isn't a merge item\n 2. add tabs if they exist\n 3. inject metrics after ### Metrics header if metrics exists for file\n 4. inject service checks after ### Service Checks if file exists\n 5. inject hugo front matter params at top of file\n 6. write out file to content\/integrations with filename changed to integrationname.md\n :param file_name: path to a readme md file\n \"\"\"\n no_integration_issue = True\n tab_logic = False\n metrics = glob.glob(\n \"{path}{sep}*metadata.csv\".format(\n path=dirname(file_name), sep=sep\n )\n )\n metrics = metrics[0] if len(metrics) > 0 else None\n metrics_exist = (metrics and exists(metrics)\n and linecache.getline(metrics, 2))\n service_check = glob.glob(\"{file}.json\".format(\n file=self.data_service_checks_dir + basename(dirname(file_name))))\n service_check = (\n service_check[0]\n if len(service_check) > 0\n else None\n )\n service_check_exist = service_check and exists(\n service_check\n )\n manifest = \"{0}{1}{2}\".format(\n dirname(file_name), sep, \"manifest.json\"\n )\n\n if exists(manifest):\n try:\n manifest_json = json.load(open(manifest))\n manifest_json = self.process_manifest(manifest_json, basename(dirname(file_name)))\n except JSONDecodeError:\n no_integration_issue = False\n manifest_json = {}\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\".format(manifest))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\".format(manifest))\n raise JSONDecodeError\n else:\n no_integration_issue = False\n manifest_json = {}\n print(\n \"\\x1b[33mWARNING\\x1b[0m: No manifest found for {}\".format(file_name))\n\n dependencies = self.add_dependencies(file_name)\n new_file_name = \"{}.md\".format(\n basename(dirname(file_name))\n )\n # is this the same as a committed hardcoded integration\n exist_already = (self.content_integrations_dir + new_file_name in self.initial_integration_files)\n # is this overwriting another generated integration\n exist_collision = exists(\n self.content_integrations_dir + new_file_name\n )\n\n regex_skip_sections_end = r\"(```|\\{\\{< \\\/code-block |\\{\\{< \\\/site-region >\\}\\})\"\n regex_skip_sections_start = r\"(```|\\{\\{< code-block |\\{\\{< site-region)\"\n\n ## Formating all link as reference to avoid any corner cases\n ## Replace image filenames in markdown for marketplace interations\n result = ''\n if not marketplace:\n try:\n result = format_link_file(file_name,regex_skip_sections_start,regex_skip_sections_end)\n except Exception as e:\n print(e)\n else:\n with open(file_name, 'r+') as f:\n markdown_string = f.read()\n markdown_with_replaced_images = self.replace_image_src(markdown_string, basename(dirname(file_name)))\n updated_markdown = self.remove_markdown_section(markdown_with_replaced_images, '## Setup')\n is_marketplace_integration_markdown_valid = self.validate_marketplace_integration_markdown(updated_markdown)\n\n if not is_marketplace_integration_markdown_valid:\n raise Exception('Potential setup or pricing information included in Marketplace Integration markdown. Check {} for Setup or Pricing sections.'.format(file_name))\n else:\n result = updated_markdown\n\n ## Check if there is a integration tab logic in the integration file:\n if \"\" in result:\n tab_logic = True\n ## Inlining all links\n result = self.inline_references(result,regex_skip_sections_start,regex_skip_sections_end)\n else:\n tab_logic= False\n\n title = manifest_json.get(\"name\", \"\").lower()\n if title not in [\n k\n for k, v in self.integration_mutations.items()\n if v.get(\"action\") == \"merge\"\n ]:\n result = re.sub(\n self.regex_h1, \"\", result, 1\n )\n result = re.sub(\n self.regex_tabs_open, \"{{< tabs >}}\", result, 0\n )\n result = re.sub(\n self.regex_tabs_close, \"{{< \/tabs >}}\", result, 0\n )\n result = re.sub(\n self.regex_tab_open, \"{{% tab\", result, 0\n )\n result = re.sub(\n self.regex_tab_close, \"{{% \/tab %}}\", result, 0\n )\n result = re.sub(\n self.regex_tab_end, \" %}}\", result, 0\n )\n result = re.sub(\n self.regex_partial_open, \"\", result, 0\n )\n result = re.sub(\n self.regex_partial_close, \"\", result, 0\n )\n\n if metrics_exist:\n result = re.sub(\n self.regex_metrics,\n r'\\1{{< get-metrics-from-git \"%s\" >}}\\n\\3\\4'\n % format(title),\n result,\n 0,\n )\n if service_check_exist:\n result = re.sub(\n self.regex_service_check,\n r'\\1{{< get-service-checks-from-git \"%s\" >}}\\n\\3\\4'\n % format(title),\n result,\n 0,\n )\n\n # if __init__.py exists lets grab the integration id\n integration_id = manifest_json.get(\"integration_id\", \"\") or \"\"\n initpy = \"{0}{1}{2}\".format(dirname(file_name), sep, \"__init__.py\")\n if exists(initpy):\n with open(initpy) as f:\n # look for ID = \"integration-name\" and extract\n matches = re.search(\"^ID\\s*=\\s*(?:\\'|\\\")([A-Z-a-z-_0-9]+)(?:\\'|\\\")$\", f.read(), re.MULTILINE)\n if matches:\n integration_id = matches.group(1)\n\n # if __about__.py exists lets grab the integration version\n integration_version = manifest_json.get(\"integration_version\", \"\") or \"\"\n integration_name = basename(dirname(file_name))\n aboutpy = \"{0}{1}{2}{3}{4}{5}{6}\".format(dirname(file_name), sep, \"datadog_checks\", sep, integration_name, sep, \"__about__.py\")\n\n if exists(aboutpy):\n with open(aboutpy) as f:\n # look for version = \"integration-version\" and extract\n matches = re.search(\"^__version__\\s*=\\s*(?:\\'|\\\")([0-9.]+)(?:\\'|\\\")$\", f.read(), re.MULTILINE)\n if matches:\n integration_version = matches.group(1)\n\n if not exist_already and no_integration_issue:\n # lets only write out file.md if its going to be public\n if manifest_json.get(\"is_public\", False):\n out_name = self.content_integrations_dir + new_file_name\n\n # lets make relative app links to integrations tile absolute\n regex = r\"(? name.md -> original_collision_name.md\n if exist_collision:\n f_name = integration_id.replace('-', '_') or manifest_json.get(\"name\", \"\") or new_file_name\n manifest_json[\"name\"] = f_name\n f_name = f_name if f_name.endswith('.md') else f_name + \".md\"\n out_name = self.content_integrations_dir + f_name\n print(\"\\x1b[33mWARNING\\x1b[0m: Collision, duplicate integration {} trying as {}\".format(\n new_file_name, f_name))\n result = self.add_integration_frontmatter(\n f_name, result, dependencies, integration_id, integration_version, manifest_json\n )\n else:\n result = self.add_integration_frontmatter(\n new_file_name, result, dependencies, integration_id, integration_version\n )\n\n with open(out_name, \"w\", ) as out:\n out.write(result)\n\n ## Reformating all links now that all processing is done\n if tab_logic:\n final_text = format_link_file(out_name, regex_skip_sections_start, regex_skip_sections_end)\n with open(out_name, 'w') as final_file:\n final_file.write(final_text)","function_tokens":["def","process_integration_readme","(","self",",","file_name",",","marketplace","=","False",")",":","no_integration_issue","=","True","tab_logic","=","False","metrics","=","glob",".","glob","(","\"{path}{sep}*metadata.csv\"",".","format","(","path","=","dirname","(","file_name",")",",","sep","=","sep",")",")","metrics","=","metrics","[","0","]","if","len","(","metrics",")",">","0","else","None","metrics_exist","=","(","metrics","and","exists","(","metrics",")","and","linecache",".","getline","(","metrics",",","2",")",")","service_check","=","glob",".","glob","(","\"{file}.json\"",".","format","(","file","=","self",".","data_service_checks_dir","+","basename","(","dirname","(","file_name",")",")",")",")","service_check","=","(","service_check","[","0","]","if","len","(","service_check",")",">","0","else","None",")","service_check_exist","=","service_check","and","exists","(","service_check",")","manifest","=","\"{0}{1}{2}\"",".","format","(","dirname","(","file_name",")",",","sep",",","\"manifest.json\"",")","if","exists","(","manifest",")",":","try",":","manifest_json","=","json",".","load","(","open","(","manifest",")",")","manifest_json","=","self",".","process_manifest","(","manifest_json",",","basename","(","dirname","(","file_name",")",")",")","except","JSONDecodeError",":","no_integration_issue","=","False","manifest_json","=","{","}","if","getenv","(","\"LOCAL\"",")","==","'True'",":","print","(","\"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\"",".","format","(","manifest",")",")","else",":","print","(","\"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\"",".","format","(","manifest",")",")","raise","JSONDecodeError","else",":","no_integration_issue","=","False","manifest_json","=","{","}","print","(","\"\\x1b[33mWARNING\\x1b[0m: No manifest found for {}\"",".","format","(","file_name",")",")","dependencies","=","self",".","add_dependencies","(","file_name",")","new_file_name","=","\"{}.md\"",".","format","(","basename","(","dirname","(","file_name",")",")",")","# is this the same as a committed hardcoded integration","exist_already","=","(","self",".","content_integrations_dir","+","new_file_name","in","self",".","initial_integration_files",")","# is this overwriting another generated integration","exist_collision","=","exists","(","self",".","content_integrations_dir","+","new_file_name",")","regex_skip_sections_end","=","r\"(```|\\{\\{< \\\/code-block |\\{\\{< \\\/site-region >\\}\\})\"","regex_skip_sections_start","=","r\"(```|\\{\\{< code-block |\\{\\{< site-region)\"","## Formating all link as reference to avoid any corner cases","## Replace image filenames in markdown for marketplace interations","result","=","''","if","not","marketplace",":","try",":","result","=","format_link_file","(","file_name",",","regex_skip_sections_start",",","regex_skip_sections_end",")","except","Exception","as","e",":","print","(","e",")","else",":","with","open","(","file_name",",","'r+'",")","as","f",":","markdown_string","=","f",".","read","(",")","markdown_with_replaced_images","=","self",".","replace_image_src","(","markdown_string",",","basename","(","dirname","(","file_name",")",")",")","updated_markdown","=","self",".","remove_markdown_section","(","markdown_with_replaced_images",",","'## Setup'",")","is_marketplace_integration_markdown_valid","=","self",".","validate_marketplace_integration_markdown","(","updated_markdown",")","if","not","is_marketplace_integration_markdown_valid",":","raise","Exception","(","'Potential setup or pricing information included in Marketplace Integration markdown. Check {} for Setup or Pricing sections.'",".","format","(","file_name",")",")","else",":","result","=","updated_markdown","## Check if there is a integration tab logic in the integration file:","if","\"\"","in","result",":","tab_logic","=","True","## Inlining all links","result","=","self",".","inline_references","(","result",",","regex_skip_sections_start",",","regex_skip_sections_end",")","else",":","tab_logic","=","False","title","=","manifest_json",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")","if","title","not","in","[","k","for","k",",","v","in","self",".","integration_mutations",".","items","(",")","if","v",".","get","(","\"action\"",")","==","\"merge\"","]",":","result","=","re",".","sub","(","self",".","regex_h1",",","\"\"",",","result",",","1",")","result","=","re",".","sub","(","self",".","regex_tabs_open",",","\"{{< tabs >}}\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_tabs_close",",","\"{{< \/tabs >}}\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_tab_open",",","\"{{% tab\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_tab_close",",","\"{{% \/tab %}}\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_tab_end",",","\" %}}\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_partial_open",",","\"\"",",","result",",","0",")","result","=","re",".","sub","(","self",".","regex_partial_close",",","\"\"",",","result",",","0",")","if","metrics_exist",":","result","=","re",".","sub","(","self",".","regex_metrics",",","r'\\1{{< get-metrics-from-git \"%s\" >}}\\n\\3\\4'","%","format","(","title",")",",","result",",","0",",",")","if","service_check_exist",":","result","=","re",".","sub","(","self",".","regex_service_check",",","r'\\1{{< get-service-checks-from-git \"%s\" >}}\\n\\3\\4'","%","format","(","title",")",",","result",",","0",",",")","# if __init__.py exists lets grab the integration id","integration_id","=","manifest_json",".","get","(","\"integration_id\"",",","\"\"",")","or","\"\"","initpy","=","\"{0}{1}{2}\"",".","format","(","dirname","(","file_name",")",",","sep",",","\"__init__.py\"",")","if","exists","(","initpy",")",":","with","open","(","initpy",")","as","f",":","# look for ID = \"integration-name\" and extract","matches","=","re",".","search","(","\"^ID\\s*=\\s*(?:\\'|\\\")([A-Z-a-z-_0-9]+)(?:\\'|\\\")$\"",",","f",".","read","(",")",",","re",".","MULTILINE",")","if","matches",":","integration_id","=","matches",".","group","(","1",")","# if __about__.py exists lets grab the integration version","integration_version","=","manifest_json",".","get","(","\"integration_version\"",",","\"\"",")","or","\"\"","integration_name","=","basename","(","dirname","(","file_name",")",")","aboutpy","=","\"{0}{1}{2}{3}{4}{5}{6}\"",".","format","(","dirname","(","file_name",")",",","sep",",","\"datadog_checks\"",",","sep",",","integration_name",",","sep",",","\"__about__.py\"",")","if","exists","(","aboutpy",")",":","with","open","(","aboutpy",")","as","f",":","# look for version = \"integration-version\" and extract","matches","=","re",".","search","(","\"^__version__\\s*=\\s*(?:\\'|\\\")([0-9.]+)(?:\\'|\\\")$\"",",","f",".","read","(",")",",","re",".","MULTILINE",")","if","matches",":","integration_version","=","matches",".","group","(","1",")","if","not","exist_already","and","no_integration_issue",":","# lets only write out file.md if its going to be public","if","manifest_json",".","get","(","\"is_public\"",",","False",")",":","out_name","=","self",".","content_integrations_dir","+","new_file_name","# lets make relative app links to integrations tile absolute","regex","=","r\"(? name.md -> original_collision_name.md","if","exist_collision",":","f_name","=","integration_id",".","replace","(","'-'",",","'_'",")","or","manifest_json",".","get","(","\"name\"",",","\"\"",")","or","new_file_name","manifest_json","[","\"name\"","]","=","f_name","f_name","=","f_name","if","f_name",".","endswith","(","'.md'",")","else","f_name","+","\".md\"","out_name","=","self",".","content_integrations_dir","+","f_name","print","(","\"\\x1b[33mWARNING\\x1b[0m: Collision, duplicate integration {} trying as {}\"",".","format","(","new_file_name",",","f_name",")",")","result","=","self",".","add_integration_frontmatter","(","f_name",",","result",",","dependencies",",","integration_id",",","integration_version",",","manifest_json",")","else",":","result","=","self",".","add_integration_frontmatter","(","new_file_name",",","result",",","dependencies",",","integration_id",",","integration_version",")","with","open","(","out_name",",","\"w\"",",",")","as","out",":","out",".","write","(","result",")","## Reformating all links now that all processing is done","if","tab_logic",":","final_text","=","format_link_file","(","out_name",",","regex_skip_sections_start",",","regex_skip_sections_end",")","with","open","(","out_name",",","'w'",")","as","final_file",":","final_file",".","write","(","final_text",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L557-L759"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.add_integration_frontmatter","parameters":"(\n self, file_name, content, dependencies=[], integration_id=\"\", integration_version=\"\", manifest_json=None\n )","argument_list":"","return_statement":"return template.format(\n front_matter=fm, content=content\n )","docstring":"Takes an integration README.md and injects front matter yaml based on manifest.json data of the same integration\n :param file_name: new integration markdown filename e.g airbrake.md\n :param content: string of markdown content\n :return: formatted string","docstring_summary":"Takes an integration README.md and injects front matter yaml based on manifest.json data of the same integration\n :param file_name: new integration markdown filename e.g airbrake.md\n :param content: string of markdown content\n :return: formatted string","docstring_tokens":["Takes","an","integration","README",".","md","and","injects","front","matter","yaml","based","on","manifest",".","json","data","of","the","same","integration",":","param","file_name",":","new","integration","markdown","filename","e",".","g","airbrake",".","md",":","param","content",":","string","of","markdown","content",":","return",":","formatted","string"],"function":"def add_integration_frontmatter(\n self, file_name, content, dependencies=[], integration_id=\"\", integration_version=\"\", manifest_json=None\n ):\n \"\"\"\n Takes an integration README.md and injects front matter yaml based on manifest.json data of the same integration\n :param file_name: new integration markdown filename e.g airbrake.md\n :param content: string of markdown content\n :return: formatted string\n \"\"\"\n fm = {}\n template = \"---\\n{front_matter}\\n---\\n\\n{content}\\n\"\n if file_name not in self.initial_integration_files:\n if manifest_json:\n item = manifest_json\n else:\n matches = [\n d\n for d in self.datafile_json\n if d.get(\"name\", \"\").lower() == basename(file_name).replace(\".md\", \"\")\n ]\n item = matches[0] if len(matches) > 0 else []\n if item:\n item[\"kind\"] = \"integration\"\n item[\"integration_title\"] = (\n item\n .get(\"public_title\", \"\")\n .replace(\"Datadog-\", \"\")\n .replace(\"Integration\", \"\")\n .strip()\n )\n item[\"git_integration_title\"] = (\n item.get(\"name\", \"\").lower()\n )\n if item.get(\"type\", None):\n item[\"ddtype\"] = item.get(\"type\")\n del item[\"type\"]\n item[\"dependencies\"] = dependencies\n item[\"draft\"] = not item.get(\"is_public\", False)\n item[\"integration_id\"] = item.get(\"integration_id\", integration_id)\n item[\"integration_version\"] = item.get(\"integration_version\", integration_version)\n fm = yaml.safe_dump(\n item, width=float(\"inf\"), default_style='\"', default_flow_style=False, allow_unicode=True\n ).rstrip()\n # simple bool cleanups with replace\n fm = fm.replace('!!bool \"false\"', 'false')\n fm = fm.replace('!!bool \"true\"', 'true')\n else:\n fm = yaml.safe_dump({\"kind\": \"integration\"}, width=float(\"inf\"), default_style='\"', default_flow_style=False,\n allow_unicode=True).rstrip()\n return template.format(\n front_matter=fm, content=content\n )","function_tokens":["def","add_integration_frontmatter","(","self",",","file_name",",","content",",","dependencies","=","[","]",",","integration_id","=","\"\"",",","integration_version","=","\"\"",",","manifest_json","=","None",")",":","fm","=","{","}","template","=","\"---\\n{front_matter}\\n---\\n\\n{content}\\n\"","if","file_name","not","in","self",".","initial_integration_files",":","if","manifest_json",":","item","=","manifest_json","else",":","matches","=","[","d","for","d","in","self",".","datafile_json","if","d",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")","==","basename","(","file_name",")",".","replace","(","\".md\"",",","\"\"",")","]","item","=","matches","[","0","]","if","len","(","matches",")",">","0","else","[","]","if","item",":","item","[","\"kind\"","]","=","\"integration\"","item","[","\"integration_title\"","]","=","(","item",".","get","(","\"public_title\"",",","\"\"",")",".","replace","(","\"Datadog-\"",",","\"\"",")",".","replace","(","\"Integration\"",",","\"\"",")",".","strip","(",")",")","item","[","\"git_integration_title\"","]","=","(","item",".","get","(","\"name\"",",","\"\"",")",".","lower","(",")",")","if","item",".","get","(","\"type\"",",","None",")",":","item","[","\"ddtype\"","]","=","item",".","get","(","\"type\"",")","del","item","[","\"type\"","]","item","[","\"dependencies\"","]","=","dependencies","item","[","\"draft\"","]","=","not","item",".","get","(","\"is_public\"",",","False",")","item","[","\"integration_id\"","]","=","item",".","get","(","\"integration_id\"",",","integration_id",")","item","[","\"integration_version\"","]","=","item",".","get","(","\"integration_version\"",",","integration_version",")","fm","=","yaml",".","safe_dump","(","item",",","width","=","float","(","\"inf\"",")",",","default_style","=","'\"'",",","default_flow_style","=","False",",","allow_unicode","=","True",")",".","rstrip","(",")","# simple bool cleanups with replace","fm","=","fm",".","replace","(","'!!bool \"false\"'",",","'false'",")","fm","=","fm",".","replace","(","'!!bool \"true\"'",",","'true'",")","else",":","fm","=","yaml",".","safe_dump","(","{","\"kind\"",":","\"integration\"","}",",","width","=","float","(","\"inf\"",")",",","default_style","=","'\"'",",","default_flow_style","=","False",",","allow_unicode","=","True",")",".","rstrip","(",")","return","template",".","format","(","front_matter","=","fm",",","content","=","content",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L761-L812"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.add_dependencies","parameters":"(self, file_name)","argument_list":"","return_statement":"return dependencies","docstring":"Adds dependencies to the integration file in order to be able to find the source file in Github","docstring_summary":"Adds dependencies to the integration file in order to be able to find the source file in Github","docstring_tokens":["Adds","dependencies","to","the","integration","file","in","order","to","be","able","to","find","the","source","file","in","Github"],"function":"def add_dependencies(self, file_name):\n \"\"\" Adds dependencies to the integration file in order to be able to find the source file in Github \"\"\"\n dependencies = []\n if file_name.startswith(\n \"{0}{1}{2}\".format(\n self.extract_dir, \"integrations-core\", sep\n )\n ):\n dependencies.append(\n file_name.replace(\n \"{0}{1}{2}\".format(\n self.extract_dir,\n \"integrations-core\",\n sep,\n ),\n \"https:\/\/github.com\/DataDog\/integrations-core\/blob\/master\/\",\n )\n )\n\n elif file_name.startswith(\n \"{0}{1}{2}\".format(\n self.extract_dir, \"integrations-extras\", sep\n )\n ):\n dependencies.append(\n file_name.replace(\n \"{0}{1}{2}\".format(\n self.extract_dir,\n \"integrations-extras\",\n sep,\n ),\n \"https:\/\/github.com\/DataDog\/integrations-extras\/blob\/master\/\",\n )\n )\n\n return dependencies","function_tokens":["def","add_dependencies","(","self",",","file_name",")",":","dependencies","=","[","]","if","file_name",".","startswith","(","\"{0}{1}{2}\"",".","format","(","self",".","extract_dir",",","\"integrations-core\"",",","sep",")",")",":","dependencies",".","append","(","file_name",".","replace","(","\"{0}{1}{2}\"",".","format","(","self",".","extract_dir",",","\"integrations-core\"",",","sep",",",")",",","\"https:\/\/github.com\/DataDog\/integrations-core\/blob\/master\/\"",",",")",")","elif","file_name",".","startswith","(","\"{0}{1}{2}\"",".","format","(","self",".","extract_dir",",","\"integrations-extras\"",",","sep",")",")",":","dependencies",".","append","(","file_name",".","replace","(","\"{0}{1}{2}\"",".","format","(","self",".","extract_dir",",","\"integrations-extras\"",",","sep",",",")",",","\"https:\/\/github.com\/DataDog\/integrations-extras\/blob\/master\/\"",",",")",")","return","dependencies"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L814-L849"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/integrations.py","language":"python","identifier":"Integrations.process_manifest","parameters":"(self, manifest_json, name)","argument_list":"","return_statement":"return manifest_json","docstring":"Takes manifest and converts v2 and above to v1 expected formats for now","docstring_summary":"Takes manifest and converts v2 and above to v1 expected formats for now","docstring_tokens":["Takes","manifest","and","converts","v2","and","above","to","v1","expected","formats","for","now"],"function":"def process_manifest(self, manifest_json, name):\n \"\"\" Takes manifest and converts v2 and above to v1 expected formats for now \"\"\"\n manifest_version = (manifest_json.get(\"manifest_version\", '1.0.0') or '1.0.0').split('.')\n split_version = manifest_version[0] if len(manifest_version) > 1 else '1'\n if split_version != '1':\n # v2 or above\n manifest_json[\"integration_id\"] = manifest_json.get(\"app_id\", \"\")\n categories = []\n supported_os = []\n for tag in manifest_json.get(\"classifier_tags\", []):\n # in some cases tag was null\/None\n if tag:\n key, value = tag.split(\"::\")\n if key.lower() == \"category\":\n categories.append(value.lower())\n if key.lower() == \"supported os\":\n supported_os.append(value.lower())\n manifest_json[\"categories\"] = categories\n manifest_json[\"supported_os\"] = supported_os\n manifest_json[\"public_title\"] = manifest_json.get(\"tile\", {}).get(\"title\", '')\n manifest_json[\"is_public\"] = manifest_json.get(\"display_on_public_website\", False)\n manifest_json[\"short_description\"] = manifest_json.get(\"tile\", {}).get(\"description\", '')\n manifest_json[\"name\"] = manifest_json.get(\"name\", name)\n return manifest_json","function_tokens":["def","process_manifest","(","self",",","manifest_json",",","name",")",":","manifest_version","=","(","manifest_json",".","get","(","\"manifest_version\"",",","'1.0.0'",")","or","'1.0.0'",")",".","split","(","'.'",")","split_version","=","manifest_version","[","0","]","if","len","(","manifest_version",")",">","1","else","'1'","if","split_version","!=","'1'",":","# v2 or above","manifest_json","[","\"integration_id\"","]","=","manifest_json",".","get","(","\"app_id\"",",","\"\"",")","categories","=","[","]","supported_os","=","[","]","for","tag","in","manifest_json",".","get","(","\"classifier_tags\"",",","[","]",")",":","# in some cases tag was null\/None","if","tag",":","key",",","value","=","tag",".","split","(","\"::\"",")","if","key",".","lower","(",")","==","\"category\"",":","categories",".","append","(","value",".","lower","(",")",")","if","key",".","lower","(",")","==","\"supported os\"",":","supported_os",".","append","(","value",".","lower","(",")",")","manifest_json","[","\"categories\"","]","=","categories","manifest_json","[","\"supported_os\"","]","=","supported_os","manifest_json","[","\"public_title\"","]","=","manifest_json",".","get","(","\"tile\"",",","{","}",")",".","get","(","\"title\"",",","''",")","manifest_json","[","\"is_public\"","]","=","manifest_json",".","get","(","\"display_on_public_website\"",",","False",")","manifest_json","[","\"short_description\"","]","=","manifest_json",".","get","(","\"tile\"",",","{","}",")",".","get","(","\"description\"",",","''",")","manifest_json","[","\"name\"","]","=","manifest_json",".","get","(","\"name\"",",","name",")","return","manifest_json"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/integrations.py#L851-L874"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"prepare_file","parameters":"(file)","argument_list":"","return_statement":"return [main_section] + sub_sections","docstring":"Goes through a file and parses it into different sections. Those sections are a list of lines and are put within an Array.\n The first item of the Array is the main section, all other item if any are sub sections, a.k.a tabs within the page.\n\n :param file: file to break down into sections.\n :return array_of_sections: Each section is a list of lines within this section.","docstring_summary":"Goes through a file and parses it into different sections. Those sections are a list of lines and are put within an Array.\n The first item of the Array is the main section, all other item if any are sub sections, a.k.a tabs within the page.","docstring_tokens":["Goes","through","a","file","and","parses","it","into","different","sections",".","Those","sections","are","a","list","of","lines","and","are","put","within","an","Array",".","The","first","item","of","the","Array","is","the","main","section","all","other","item","if","any","are","sub","sections","a",".","k",".","a","tabs","within","the","page","."],"function":"def prepare_file(file):\n \"\"\"\n Goes through a file and parses it into different sections. Those sections are a list of lines and are put within an Array.\n The first item of the Array is the main section, all other item if any are sub sections, a.k.a tabs within the page.\n\n :param file: file to break down into sections.\n :return array_of_sections: Each section is a list of lines within this section.\n \"\"\"\n\n # A state machine is used here, the function takes line within the file one by one, and depending of the states, either\n # 1. Consider it is in the main section\n # 2. Consider it is within a tabs group\n # 3. Consider within a tab\n #\n # We keep the {{< tabs >}} lines and co. in the main section since it will be used to inline the proper content afterwards.\n state = 'main'\n\n main_section = []\n sub_sections = []\n temp_section = []\n\n with open(file, 'r', encoding='utf-8') as f:\n for line in f:\n if state == 'main':\n main_section.append(line)\n if (re.search(r\"{{< tabs >}}\", line.strip()) or re.search(r\"{{< programming-lang-wrapper\", line.strip())):\n state = 'tabs'\n elif state == 'tabs':\n main_section.append(line)\n if (re.search(r\"{{% tab \", line.strip()) or re.search(r\"{{< programming-lang \", line.strip())):\n state = 'tab'\n if (re.search(r\"{{< \/tabs >}}\", line.strip()) or re.search(r\"{{< \/programming-lang-wrapper >}}\", line.strip())):\n state = 'main'\n elif state == 'tab':\n if (re.search(r\"{{% \/tab %}}\", line.strip()) or re.search(r\"{{< \/programming-lang >}}\", line.strip())):\n state = 'tabs'\n main_section.append(line)\n sub_sections.append(temp_section)\n temp_section = []\n else:\n temp_section.append(line)\n\n if state != 'main':\n raise ValueError\n\n return [main_section] + sub_sections","function_tokens":["def","prepare_file","(","file",")",":","# A state machine is used here, the function takes line within the file one by one, and depending of the states, either","# 1. Consider it is in the main section","# 2. Consider it is within a tabs group","# 3. Consider within a tab","#","# We keep the {{< tabs >}} lines and co. in the main section since it will be used to inline the proper content afterwards.","state","=","'main'","main_section","=","[","]","sub_sections","=","[","]","temp_section","=","[","]","with","open","(","file",",","'r'",",","encoding","=","'utf-8'",")","as","f",":","for","line","in","f",":","if","state","==","'main'",":","main_section",".","append","(","line",")","if","(","re",".","search","(","r\"{{< tabs >}}\"",",","line",".","strip","(",")",")","or","re",".","search","(","r\"{{< programming-lang-wrapper\"",",","line",".","strip","(",")",")",")",":","state","=","'tabs'","elif","state","==","'tabs'",":","main_section",".","append","(","line",")","if","(","re",".","search","(","r\"{{% tab \"",",","line",".","strip","(",")",")","or","re",".","search","(","r\"{{< programming-lang \"",",","line",".","strip","(",")",")",")",":","state","=","'tab'","if","(","re",".","search","(","r\"{{< \/tabs >}}\"",",","line",".","strip","(",")",")","or","re",".","search","(","r\"{{< \/programming-lang-wrapper >}}\"",",","line",".","strip","(",")",")",")",":","state","=","'main'","elif","state","==","'tab'",":","if","(","re",".","search","(","r\"{{% \/tab %}}\"",",","line",".","strip","(",")",")","or","re",".","search","(","r\"{{< \/programming-lang >}}\"",",","line",".","strip","(",")",")",")",":","state","=","'tabs'","main_section",".","append","(","line",")","sub_sections",".","append","(","temp_section",")","temp_section","=","[","]","else",":","temp_section",".","append","(","line",")","if","state","!=","'main'",":","raise","ValueError","return","[","main_section","]","+","sub_sections"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L9-L54"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"check_references","parameters":"(all_references)","argument_list":"","return_statement":"return all_references_deduped","docstring":"Goes through a list of reference link and dedupe it, and if two references share the same index, throw an error.\n\n :param all_references: An array of references\n :return all_references_deduped: An array of references deduped.","docstring_summary":"Goes through a list of reference link and dedupe it, and if two references share the same index, throw an error.","docstring_tokens":["Goes","through","a","list","of","reference","link","and","dedupe","it","and","if","two","references","share","the","same","index","throw","an","error","."],"function":"def check_references(all_references):\n \"\"\"\n Goes through a list of reference link and dedupe it, and if two references share the same index, throw an error.\n\n :param all_references: An array of references\n :return all_references_deduped: An array of references deduped.\n \"\"\"\n all_references_deduped = []\n reference_indexes_used = []\n duplicated_references = []\n is_duplicated = False\n\n for reference in all_references:\n if reference not in all_references_deduped:\n\n reference_index, reference_val = reference\n\n if reference_index not in reference_indexes_used:\n reference_indexes_used.append(reference_index)\n all_references_deduped.append(reference)\n else:\n duplicated_references.append(reference)\n is_duplicated = True\n\n if is_duplicated:\n for duplicated_reference in duplicated_references:\n duplicated_reference_index, duplicated_reference_val = duplicated_reference\n print('Duplicated reference: [{}]: {}'.format(\n duplicated_reference_index, duplicated_reference_val))\n raise AssertionError\n\n return all_references_deduped","function_tokens":["def","check_references","(","all_references",")",":","all_references_deduped","=","[","]","reference_indexes_used","=","[","]","duplicated_references","=","[","]","is_duplicated","=","False","for","reference","in","all_references",":","if","reference","not","in","all_references_deduped",":","reference_index",",","reference_val","=","reference","if","reference_index","not","in","reference_indexes_used",":","reference_indexes_used",".","append","(","reference_index",")","all_references_deduped",".","append","(","reference",")","else",":","duplicated_references",".","append","(","reference",")","is_duplicated","=","True","if","is_duplicated",":","for","duplicated_reference","in","duplicated_references",":","duplicated_reference_index",",","duplicated_reference_val","=","duplicated_reference","print","(","'Duplicated reference: [{}]: {}'",".","format","(","duplicated_reference_index",",","duplicated_reference_val",")",")","raise","AssertionError","return","all_references_deduped"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L57-L88"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"remove_reference","parameters":"(section, regex_skip_sections_start,\n regex_skip_sections_end)","argument_list":"","return_statement":"return [all_references_checked, section_without_references]","docstring":"Goes through a section and remove all reference links it can found.\n\n :param section: An array of lines representing a section.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return [all_references, section_without_references]: Returns an array of array, the first item contains all references found in the section\n The second item contains the section without any reference in it.","docstring_summary":"Goes through a section and remove all reference links it can found.","docstring_tokens":["Goes","through","a","section","and","remove","all","reference","links","it","can","found","."],"function":"def remove_reference(section, regex_skip_sections_start,\n regex_skip_sections_end):\n \"\"\"\n Goes through a section and remove all reference links it can found.\n\n :param section: An array of lines representing a section.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return [all_references, section_without_references]: Returns an array of array, the first item contains all references found in the section\n The second item contains the section without any reference in it.\n \"\"\"\n skip = False\n all_references = []\n section_without_references = []\n regex_bottom_reference_link = r\"^\\s*\\[(\\d*?)\\]: (\\S*)\"\n\n # Collecting all references and removing them from section\n # looking at each line, if a line is a reference then we remove it and store the reference.\n\n for line in section:\n if skip:\n section_without_references.append(line)\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n\n if re.search(regex_skip_sections_start, line):\n section_without_references.append(line)\n skip = True\n\n elif re.search(regex_bottom_reference_link, line):\n\n reference = re.search(regex_bottom_reference_link, line)\n all_references.append([reference.group(1),\n reference.group(2)])\n else:\n section_without_references.append(line)\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n try:\n all_references_checked = check_references(all_references)\n except AssertionError:\n raise AssertionError\n\n return [all_references_checked, section_without_references]","function_tokens":["def","remove_reference","(","section",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","skip","=","False","all_references","=","[","]","section_without_references","=","[","]","regex_bottom_reference_link","=","r\"^\\s*\\[(\\d*?)\\]: (\\S*)\"","# Collecting all references and removing them from section","# looking at each line, if a line is a reference then we remove it and store the reference.","for","line","in","section",":","if","skip",":","section_without_references",".","append","(","line",")","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","section_without_references",".","append","(","line",")","skip","=","True","elif","re",".","search","(","regex_bottom_reference_link",",","line",")",":","reference","=","re",".","search","(","regex_bottom_reference_link",",","line",")","all_references",".","append","(","[","reference",".","group","(","1",")",",","reference",".","group","(","2",")","]",")","else",":","section_without_references",".","append","(","line",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","try",":","all_references_checked","=","check_references","(","all_references",")","except","AssertionError",":","raise","AssertionError","return","[","all_references_checked",",","section_without_references","]"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L91-L137"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"inlining_all_links","parameters":"(\n section_without_references,\n all_references,\n regex_skip_sections_start,\n regex_skip_sections_end)","argument_list":"","return_statement":"return section_with_all_links","docstring":"Goes through a section with a list of references and inline all references.\n\n :param section_without_references: An array of lines representing a section, for this function to work, all reference should be removed previously from the section.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_all_links: A section (an array of lines) is returned without any references in it, just pure inlined links.","docstring_summary":"Goes through a section with a list of references and inline all references.","docstring_tokens":["Goes","through","a","section","with","a","list","of","references","and","inline","all","references","."],"function":"def inlining_all_links(\n section_without_references,\n all_references,\n regex_skip_sections_start,\n regex_skip_sections_end):\n \"\"\"\n Goes through a section with a list of references and inline all references.\n\n :param section_without_references: An array of lines representing a section, for this function to work, all reference should be removed previously from the section.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_all_links: A section (an array of lines) is returned without any references in it, just pure inlined links.\n \"\"\"\n\n section_with_all_links = []\n skip = False\n\n for line in section_without_references:\n if skip:\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n if re.search(regex_skip_sections_start, line):\n skip = True\n else:\n for reference in all_references:\n reference_index, reference_val = reference\n\n curent_link = '][' + reference_index + ']'\n\n line = line.replace(\n curent_link, '](' + reference_val + ')')\n\n section_with_all_links.append(line)\n\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n return section_with_all_links","function_tokens":["def","inlining_all_links","(","section_without_references",",","all_references",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","section_with_all_links","=","[","]","skip","=","False","for","line","in","section_without_references",":","if","skip",":","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","skip","=","True","else",":","for","reference","in","all_references",":","reference_index",",","reference_val","=","reference","curent_link","=","']['","+","reference_index","+","']'","line","=","line",".","replace","(","curent_link",",","']('","+","reference_val","+","')'",")","section_with_all_links",".","append","(","line",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","return","section_with_all_links"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L140-L179"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"collect_all_links","parameters":"(section_with_all_links,\n regex_skip_sections_start,\n regex_skip_sections_end)","argument_list":"","return_statement":"return all_links","docstring":"Goes through a section and extract all inlined links it can found.\n\n :param section_with_all_links: An array of lines representing a section. For this function to work, all links must be inlined first.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return all_links: An array of all unique links that where found within a section.","docstring_summary":"Goes through a section and extract all inlined links it can found.","docstring_tokens":["Goes","through","a","section","and","extract","all","inlined","links","it","can","found","."],"function":"def collect_all_links(section_with_all_links,\n regex_skip_sections_start,\n regex_skip_sections_end):\n \"\"\"\n Goes through a section and extract all inlined links it can found.\n\n :param section_with_all_links: An array of lines representing a section. For this function to work, all links must be inlined first.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return all_links: An array of all unique links that where found within a section.\n \"\"\"\n regex_link_inlined = r\"\\[.*?\\]\\((?![#?])(\\S*?)\\)\"\n all_links = []\n skip = False\n for line in section_with_all_links:\n if skip:\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n if re.search(regex_skip_sections_start, line):\n skip = True\n else:\n line_links = re.findall(regex_link_inlined, line,\n re.MULTILINE)\n if not line_links == []:\n for link in line_links:\n\n # If the link is already in the array, then it doesn't add it to avoid duplicated link\n\n if link not in all_links:\n all_links.append(link)\n\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n return all_links","function_tokens":["def","collect_all_links","(","section_with_all_links",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","regex_link_inlined","=","r\"\\[.*?\\]\\((?![#?])(\\S*?)\\)\"","all_links","=","[","]","skip","=","False","for","line","in","section_with_all_links",":","if","skip",":","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","skip","=","True","else",":","line_links","=","re",".","findall","(","regex_link_inlined",",","line",",","re",".","MULTILINE",")","if","not","line_links","==","[","]",":","for","link","in","line_links",":","# If the link is already in the array, then it doesn't add it to avoid duplicated link","if","link","not","in","all_links",":","all_links",".","append","(","link",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","return","all_links"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L182-L218"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"transform_link_to_references","parameters":"(\n section_with_all_links,\n all_links,\n regex_skip_sections_start,\n regex_skip_sections_end,\n)","argument_list":"","return_statement":"return section_with_references","docstring":"Goes through a section where all link are inlined and transform them in references\n\n :param section_with_all_links: An array of lines representing a section where all link are inlined.\n :param all_links: An array of links representing all unique list associated to section_with_all_links.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_references: A section (an array of lines), with all inlined links transformed into a reference link.","docstring_summary":"Goes through a section where all link are inlined and transform them in references","docstring_tokens":["Goes","through","a","section","where","all","link","are","inlined","and","transform","them","in","references"],"function":"def transform_link_to_references(\n section_with_all_links,\n all_links,\n regex_skip_sections_start,\n regex_skip_sections_end,\n):\n \"\"\"\n Goes through a section where all link are inlined and transform them in references\n\n :param section_with_all_links: An array of lines representing a section where all link are inlined.\n :param all_links: An array of links representing all unique list associated to section_with_all_links.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_references: A section (an array of lines), with all inlined links transformed into a reference link.\n \"\"\"\n\n section_with_references = []\n\n skip = False\n\n for line in section_with_all_links:\n if skip:\n if re.search(regex_skip_sections_end, line):\n skip = False\n elif not skip:\n if re.search(regex_skip_sections_start, line):\n skip = True\n else:\n for i, link in enumerate(all_links):\n link_to_reference = '](' + str(link) + ')'\n # i is incremented by one in order to start references indexes at 1\n line = line.replace(link_to_reference,\n '][' + str(i + 1) + ']')\n\n section_with_references.append(line)\n\n # By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.\n if skip:\n raise ValueError\n\n return section_with_references","function_tokens":["def","transform_link_to_references","(","section_with_all_links",",","all_links",",","regex_skip_sections_start",",","regex_skip_sections_end",",",")",":","section_with_references","=","[","]","skip","=","False","for","line","in","section_with_all_links",":","if","skip",":","if","re",".","search","(","regex_skip_sections_end",",","line",")",":","skip","=","False","elif","not","skip",":","if","re",".","search","(","regex_skip_sections_start",",","line",")",":","skip","=","True","else",":","for","i",",","link","in","enumerate","(","all_links",")",":","link_to_reference","=","']('","+","str","(","link",")","+","')'","# i is incremented by one in order to start references indexes at 1","line","=","line",".","replace","(","link_to_reference",",","']['","+","str","(","i","+","1",")","+","']'",")","section_with_references",".","append","(","line",")","# By the end of the for loop skip should always be false otherwise it means that a codeblock is not closed.","if","skip",":","raise","ValueError","return","section_with_references"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L221-L261"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"process_section","parameters":"(section, regex_skip_sections_start,\n regex_skip_sections_end)","argument_list":"","return_statement":"return section_with_references","docstring":"Goes through a section. A section is an array of lines from a file and format all links to transform them into reference link.\n\n :param section: Array of lines to analyse.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_references: Returns the section but with all link set as reference.","docstring_summary":"Goes through a section. A section is an array of lines from a file and format all links to transform them into reference link.","docstring_tokens":["Goes","through","a","section",".","A","section","is","an","array","of","lines","from","a","file","and","format","all","links","to","transform","them","into","reference","link","."],"function":"def process_section(section, regex_skip_sections_start,\n regex_skip_sections_end):\n \"\"\"\n Goes through a section. A section is an array of lines from a file and format all links to transform them into reference link.\n\n :param section: Array of lines to analyse.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return section_with_references: Returns the section but with all link set as reference.\n \"\"\"\n try:\n all_references, section_without_references = remove_reference(\n section, regex_skip_sections_start, regex_skip_sections_end)\n except AssertionError:\n print('\\x1b[31mERROR\\x1b[0m: Some references are duplicated.')\n raise AssertionError\n except ValueError:\n print('\\x1b[31mERROR\\x1b[0m: A skip section is not closed')\n raise ValueError\n\n # Inlining refrences\n # Looking at each line, it replaces reference link [.*][\\d] by the full inlined link\n\n section_with_all_links = \\\n inlining_all_links(section_without_references, all_references,\n regex_skip_sections_start,\n regex_skip_sections_end)\n\n # Collecting all links from file\n # Looking at each line, it extracts all links it can found and add it to all_links array\n\n all_links = collect_all_links(section_with_all_links,\n regex_skip_sections_start,\n regex_skip_sections_end)\n\n # Now that all link are extracted, it creates a new section with all inlined referenced link\n\n section_with_references = \\\n transform_link_to_references(section_with_all_links, all_links,\n regex_skip_sections_start, regex_skip_sections_end)\n\n # Finally it adds all refrerences at the end of the section\n\n for i, link in enumerate(all_links):\n\n # 1 is added to i in order to start references link index at 1 instead of 0\n\n section_with_references.append('[' + str(i + 1) + ']: ' + link + '\\n')\n\n return section_with_references","function_tokens":["def","process_section","(","section",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","try",":","all_references",",","section_without_references","=","remove_reference","(","section",",","regex_skip_sections_start",",","regex_skip_sections_end",")","except","AssertionError",":","print","(","'\\x1b[31mERROR\\x1b[0m: Some references are duplicated.'",")","raise","AssertionError","except","ValueError",":","print","(","'\\x1b[31mERROR\\x1b[0m: A skip section is not closed'",")","raise","ValueError","# Inlining refrences","# Looking at each line, it replaces reference link [.*][\\d] by the full inlined link","section_with_all_links","=","inlining_all_links","(","section_without_references",",","all_references",",","regex_skip_sections_start",",","regex_skip_sections_end",")","# Collecting all links from file","# Looking at each line, it extracts all links it can found and add it to all_links array","all_links","=","collect_all_links","(","section_with_all_links",",","regex_skip_sections_start",",","regex_skip_sections_end",")","# Now that all link are extracted, it creates a new section with all inlined referenced link","section_with_references","=","transform_link_to_references","(","section_with_all_links",",","all_links",",","regex_skip_sections_start",",","regex_skip_sections_end",")","# Finally it adds all refrerences at the end of the section","for","i",",","link","in","enumerate","(","all_links",")",":","# 1 is added to i in order to start references link index at 1 instead of 0","section_with_references",".","append","(","'['","+","str","(","i","+","1",")","+","']: '","+","link","+","'\\n'",")","return","section_with_references"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L264-L313"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"inline_section","parameters":"(file_prepared)","argument_list":"","return_statement":"return ''.join(final_text)","docstring":"Takes a prepared file (and Array of sections prepared) and transform it into the final file that can be written back.\n\n :param file_prepared: Array of sections, where the first item is the main section, an all the other one are sub sections in order of appearance.\n :return final_text: Returns text with all sections with reference link inlined in the main section.","docstring_summary":"Takes a prepared file (and Array of sections prepared) and transform it into the final file that can be written back.","docstring_tokens":["Takes","a","prepared","file","(","and","Array","of","sections","prepared",")","and","transform","it","into","the","final","file","that","can","be","written","back","."],"function":"def inline_section(file_prepared):\n \"\"\"\n Takes a prepared file (and Array of sections prepared) and transform it into the final file that can be written back.\n\n :param file_prepared: Array of sections, where the first item is the main section, an all the other one are sub sections in order of appearance.\n :return final_text: Returns text with all sections with reference link inlined in the main section.\n \"\"\"\n # inlining sections\n\n final_text = []\n\n end_section_pattern = r\"\\s*{{% \/tab %}}.*\"\n end_lang_section_pattern = r\"\\s*{{< \/programming-lang >}}.*\"\n\n i = 1\n\n try:\n for line in file_prepared[0]:\n if (re.match(end_section_pattern, line) or re.match(end_lang_section_pattern, line)):\n final_text += file_prepared[i]\n i += 1\n final_text.append(line)\n except:\n raise ValueError\n\n return ''.join(final_text)","function_tokens":["def","inline_section","(","file_prepared",")",":","# inlining sections","final_text","=","[","]","end_section_pattern","=","r\"\\s*{{% \/tab %}}.*\"","end_lang_section_pattern","=","r\"\\s*{{< \/programming-lang >}}.*\"","i","=","1","try",":","for","line","in","file_prepared","[","0","]",":","if","(","re",".","match","(","end_section_pattern",",","line",")","or","re",".","match","(","end_lang_section_pattern",",","line",")",")",":","final_text","+=","file_prepared","[","i","]","i","+=","1","final_text",".","append","(","line",")","except",":","raise","ValueError","return","''",".","join","(","final_text",")"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L316-L341"}
{"nwo":"DataDog\/documentation","sha":"f7f6fb076d3739c65b5e60419f1be7fd934a30d4","path":"local\/bin\/py\/build\/actions\/format_link.py","language":"python","identifier":"format_link_file","parameters":"(file, regex_skip_sections_start,\n regex_skip_sections_end)","argument_list":"","return_statement":"return file_with_references","docstring":"Take a file and transform all links into reference link within this file.\n\n :param file: Array of lines to analyse.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return final_text: Returns the file but with all links set as reference.","docstring_summary":"Take a file and transform all links into reference link within this file.","docstring_tokens":["Take","a","file","and","transform","all","links","into","reference","link","within","this","file","."],"function":"def format_link_file(file, regex_skip_sections_start,\n regex_skip_sections_end):\n \"\"\"\n Take a file and transform all links into reference link within this file.\n\n :param file: Array of lines to analyse.\n :param regex_skip_sections_start: regex defining the start line that indicates a block of line that shouldn't be processed.\n :param regex_skip_sections_end: regex defining the end line that indicates a block of line that shouldn't be processed.\n :return final_text: Returns the file but with all links set as reference.\n \"\"\"\n\n try:\n prepared_file = prepare_file(file)\n except ValueError:\n print(\"\\x1b[31mERROR\\x1b[0m: Couldn't split the file into multiple section correctly for file: {}\".format(file))\n raise ValueError\n\n final_text = []\n\n for section in prepared_file:\n try:\n final_text.append(process_section(section,\n regex_skip_sections_start,\n regex_skip_sections_end))\n except:\n print(\n '\\x1b[31mERROR\\x1b[0m: There was an issue processing a section for file: {}'.format(file))\n raise ValueError\n\n try:\n file_with_references = inline_section(final_text)\n except ValueError:\n print(\n '\\x1b[31mERROR\\x1b[0m: Could not inline sections properly for file: {}'.format(file))\n raise ValueError\n\n return file_with_references","function_tokens":["def","format_link_file","(","file",",","regex_skip_sections_start",",","regex_skip_sections_end",")",":","try",":","prepared_file","=","prepare_file","(","file",")","except","ValueError",":","print","(","\"\\x1b[31mERROR\\x1b[0m: Couldn't split the file into multiple section correctly for file: {}\"",".","format","(","file",")",")","raise","ValueError","final_text","=","[","]","for","section","in","prepared_file",":","try",":","final_text",".","append","(","process_section","(","section",",","regex_skip_sections_start",",","regex_skip_sections_end",")",")","except",":","print","(","'\\x1b[31mERROR\\x1b[0m: There was an issue processing a section for file: {}'",".","format","(","file",")",")","raise","ValueError","try",":","file_with_references","=","inline_section","(","final_text",")","except","ValueError",":","print","(","'\\x1b[31mERROR\\x1b[0m: Could not inline sections properly for file: {}'",".","format","(","file",")",")","raise","ValueError","return","file_with_references"],"url":"https:\/\/github.com\/DataDog\/documentation\/blob\/f7f6fb076d3739c65b5e60419f1be7fd934a30d4\/local\/bin\/py\/build\/actions\/format_link.py#L344-L380"}