2016-10-17 14:50:25 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2014-02-10 09:10:30 +08:00
|
|
|
Import('env')
|
2017-02-09 07:07:44 +08:00
|
|
|
env.editor_sources = []
|
2014-02-10 09:10:30 +08:00
|
|
|
|
2016-12-16 19:12:22 +08:00
|
|
|
import os
|
2017-10-05 05:21:32 +08:00
|
|
|
from compat import encode_utf8, byte_to_str, open_utf8, escape_string
|
|
|
|
|
2014-02-16 08:16:33 +08:00
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
def make_certs_header(target, source, env):
|
2014-02-16 08:16:33 +08:00
|
|
|
|
2016-10-31 01:44:57 +08:00
|
|
|
src = source[0].srcnode().abspath
|
|
|
|
dst = target[0].srcnode().abspath
|
2016-10-31 01:57:40 +08:00
|
|
|
f = open(src, "rb")
|
2017-08-27 00:53:49 +08:00
|
|
|
g = open_utf8(dst, "w")
|
2016-10-31 01:44:57 +08:00
|
|
|
buf = f.read()
|
|
|
|
decomp_size = len(buf)
|
|
|
|
import zlib
|
|
|
|
buf = zlib.compress(buf)
|
2014-02-16 08:16:33 +08:00
|
|
|
|
2016-10-31 01:44:57 +08:00
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
2017-02-09 07:07:44 +08:00
|
|
|
g.write("#ifndef _CERTS_RAW_H\n")
|
|
|
|
g.write("#define _CERTS_RAW_H\n")
|
|
|
|
g.write("static const int _certs_compressed_size=" + str(len(buf)) + ";\n")
|
|
|
|
g.write("static const int _certs_uncompressed_size=" + str(decomp_size) + ";\n")
|
|
|
|
g.write("static const unsigned char _certs_compressed[]={\n")
|
2016-10-31 01:44:57 +08:00
|
|
|
for i in range(len(buf)):
|
2017-08-27 00:53:49 +08:00
|
|
|
g.write(byte_to_str(buf[i]) + ",\n")
|
2016-10-31 01:44:57 +08:00
|
|
|
g.write("};\n")
|
|
|
|
g.write("#endif")
|
2014-02-16 08:16:33 +08:00
|
|
|
|
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
def make_doc_header(target, source, env):
|
2016-03-12 21:44:12 +08:00
|
|
|
|
2016-10-31 01:44:57 +08:00
|
|
|
dst = target[0].srcnode().abspath
|
2017-08-27 00:53:49 +08:00
|
|
|
g = open_utf8(dst, "w")
|
2016-12-16 19:12:22 +08:00
|
|
|
buf = ""
|
|
|
|
docbegin = ""
|
|
|
|
docend = ""
|
|
|
|
for s in source:
|
|
|
|
src = s.srcnode().abspath
|
2017-08-27 00:53:49 +08:00
|
|
|
f = open_utf8(src, "r")
|
2016-12-16 19:12:22 +08:00
|
|
|
content = f.read()
|
2017-09-14 20:34:53 +08:00
|
|
|
buf+=content
|
2017-09-13 04:42:36 +08:00
|
|
|
|
2017-08-27 00:53:49 +08:00
|
|
|
buf = encode_utf8(docbegin + buf + docend)
|
2016-10-31 01:44:57 +08:00
|
|
|
decomp_size = len(buf)
|
|
|
|
import zlib
|
|
|
|
buf = zlib.compress(buf)
|
2016-03-12 21:44:12 +08:00
|
|
|
|
2016-10-31 01:44:57 +08:00
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
2017-02-09 07:07:44 +08:00
|
|
|
g.write("#ifndef _DOC_DATA_RAW_H\n")
|
|
|
|
g.write("#define _DOC_DATA_RAW_H\n")
|
|
|
|
g.write("static const int _doc_data_compressed_size=" + str(len(buf)) + ";\n")
|
|
|
|
g.write("static const int _doc_data_uncompressed_size=" + str(decomp_size) + ";\n")
|
|
|
|
g.write("static const unsigned char _doc_data_compressed[]={\n")
|
2016-10-31 01:44:57 +08:00
|
|
|
for i in range(len(buf)):
|
2017-08-27 00:53:49 +08:00
|
|
|
g.write(byte_to_str(buf[i]) + ",\n")
|
2016-10-31 01:44:57 +08:00
|
|
|
g.write("};\n")
|
2017-09-13 04:42:36 +08:00
|
|
|
|
2016-10-31 01:44:57 +08:00
|
|
|
g.write("#endif")
|
2016-03-12 21:44:12 +08:00
|
|
|
|
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
def make_fonts_header(target, source, env):
|
|
|
|
|
|
|
|
dst = target[0].srcnode().abspath
|
|
|
|
|
2017-08-27 00:53:49 +08:00
|
|
|
g = open_utf8(dst, "w")
|
2017-02-09 07:07:44 +08:00
|
|
|
|
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
|
|
|
g.write("#ifndef _EDITOR_FONTS_H\n")
|
|
|
|
g.write("#define _EDITOR_FONTS_H\n")
|
|
|
|
|
|
|
|
# saving uncompressed, since freetype will reference from memory pointer
|
|
|
|
xl_names = []
|
|
|
|
for i in range(len(source)):
|
|
|
|
f = open(source[i].srcnode().abspath, "rb")
|
|
|
|
buf = f.read()
|
|
|
|
import os.path
|
|
|
|
|
|
|
|
name = os.path.splitext(os.path.basename(source[i].srcnode().abspath))[0]
|
|
|
|
|
|
|
|
g.write("static const int _font_" + name + "_size=" + str(len(buf)) + ";\n")
|
|
|
|
g.write("static const unsigned char _font_" + name + "[]={\n")
|
|
|
|
for i in range(len(buf)):
|
2017-08-27 00:53:49 +08:00
|
|
|
g.write(byte_to_str(buf[i]) + ",\n")
|
2017-02-09 07:07:44 +08:00
|
|
|
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
g.write("#endif")
|
|
|
|
|
|
|
|
|
|
|
|
def make_translations_header(target, source, env):
|
|
|
|
|
|
|
|
dst = target[0].srcnode().abspath
|
|
|
|
|
2017-08-27 00:53:49 +08:00
|
|
|
g = open_utf8(dst, "w")
|
2017-02-09 07:07:44 +08:00
|
|
|
|
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
|
|
|
g.write("#ifndef _EDITOR_TRANSLATIONS_H\n")
|
|
|
|
g.write("#define _EDITOR_TRANSLATIONS_H\n")
|
|
|
|
|
|
|
|
import zlib
|
|
|
|
import os.path
|
|
|
|
|
|
|
|
paths = [node.srcnode().abspath for node in source]
|
|
|
|
sorted_paths = sorted(paths, key=lambda path: os.path.splitext(os.path.basename(path))[0])
|
|
|
|
|
|
|
|
xl_names = []
|
|
|
|
for i in range(len(sorted_paths)):
|
|
|
|
f = open(sorted_paths[i], "rb")
|
|
|
|
buf = f.read()
|
|
|
|
decomp_size = len(buf)
|
|
|
|
buf = zlib.compress(buf)
|
|
|
|
name = os.path.splitext(os.path.basename(sorted_paths[i]))[0]
|
|
|
|
|
|
|
|
#g.write("static const int _translation_"+name+"_compressed_size="+str(len(buf))+";\n")
|
|
|
|
#g.write("static const int _translation_"+name+"_uncompressed_size="+str(decomp_size)+";\n")
|
|
|
|
g.write("static const unsigned char _translation_" + name + "_compressed[]={\n")
|
|
|
|
for i in range(len(buf)):
|
2017-08-27 00:53:49 +08:00
|
|
|
g.write(byte_to_str(buf[i]) + ",\n")
|
2017-02-09 07:07:44 +08:00
|
|
|
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
xl_names.append([name, len(buf), str(decomp_size)])
|
|
|
|
|
|
|
|
g.write("struct EditorTranslationList {\n")
|
|
|
|
g.write("\tconst char* lang;\n")
|
|
|
|
g.write("\tint comp_size;\n")
|
|
|
|
g.write("\tint uncomp_size;\n")
|
|
|
|
g.write("\tconst unsigned char* data;\n")
|
|
|
|
g.write("};\n\n")
|
|
|
|
g.write("static EditorTranslationList _editor_translations[]={\n")
|
|
|
|
for x in xl_names:
|
|
|
|
g.write("\t{ \"" + x[0] + "\", " + str(x[1]) + ", " + str(x[2]) + ",_translation_" + x[0] + "_compressed},\n")
|
|
|
|
g.write("\t{NULL,0,0,NULL}\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
g.write("#endif")
|
|
|
|
|
2017-05-31 10:07:27 +08:00
|
|
|
def make_authors_header(target, source, env):
|
|
|
|
|
2017-07-03 02:17:47 +08:00
|
|
|
sections = ["Project Founders", "Lead Developer", "Project Manager", "Developers"]
|
|
|
|
sections_id = ["dev_founders", "dev_lead", "dev_manager", "dev_names"]
|
|
|
|
|
2017-05-31 10:07:27 +08:00
|
|
|
src = source[0].srcnode().abspath
|
|
|
|
dst = target[0].srcnode().abspath
|
2017-08-27 00:53:49 +08:00
|
|
|
f = open_utf8(src, "r")
|
|
|
|
g = open_utf8(dst, "w")
|
2017-05-31 10:07:27 +08:00
|
|
|
|
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
|
|
|
g.write("#ifndef _EDITOR_AUTHORS_H\n")
|
|
|
|
g.write("#define _EDITOR_AUTHORS_H\n")
|
|
|
|
|
2017-07-03 02:17:47 +08:00
|
|
|
current_section = ""
|
2017-09-03 23:06:32 +08:00
|
|
|
reading = False
|
2017-07-03 02:17:47 +08:00
|
|
|
|
|
|
|
def close_section():
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
2017-05-31 10:07:27 +08:00
|
|
|
for line in f:
|
2017-09-03 23:06:32 +08:00
|
|
|
if reading:
|
2017-05-31 10:07:27 +08:00
|
|
|
if line.startswith(" "):
|
2017-09-26 07:48:17 +08:00
|
|
|
g.write("\t\"" + escape_string(line.strip()) + "\",\n")
|
2017-07-03 02:17:47 +08:00
|
|
|
continue
|
|
|
|
if line.startswith("## "):
|
2017-09-03 23:06:32 +08:00
|
|
|
if reading:
|
2017-07-03 02:17:47 +08:00
|
|
|
close_section()
|
2017-09-03 23:06:32 +08:00
|
|
|
reading = False
|
2017-07-03 02:17:47 +08:00
|
|
|
for i in range(len(sections)):
|
|
|
|
if line.strip().endswith(sections[i]):
|
2017-09-26 07:48:17 +08:00
|
|
|
current_section = escape_string(sections_id[i])
|
2017-09-03 23:06:32 +08:00
|
|
|
reading = True
|
2017-07-03 02:17:47 +08:00
|
|
|
g.write("static const char *" + current_section + "[] = {\n")
|
|
|
|
break
|
|
|
|
|
2017-09-03 23:06:32 +08:00
|
|
|
if reading:
|
|
|
|
close_section()
|
|
|
|
|
|
|
|
g.write("#endif\n")
|
|
|
|
|
|
|
|
def make_donors_header(target, source, env):
|
|
|
|
|
|
|
|
sections = ["Platinum sponsors", "Gold sponsors", "Mini sponsors", "Gold donors", "Silver donors", "Bronze donors"]
|
|
|
|
sections_id = ["donor_s_plat", "donor_s_gold", "donor_s_mini", "donor_gold", "donor_silver", "donor_bronze"]
|
|
|
|
|
|
|
|
src = source[0].srcnode().abspath
|
|
|
|
dst = target[0].srcnode().abspath
|
|
|
|
f = open_utf8(src, "r")
|
|
|
|
g = open_utf8(dst, "w")
|
|
|
|
|
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
|
|
|
g.write("#ifndef _EDITOR_DONORS_H\n")
|
|
|
|
g.write("#define _EDITOR_DONORS_H\n")
|
|
|
|
|
|
|
|
current_section = ""
|
|
|
|
reading = False
|
|
|
|
|
|
|
|
def close_section():
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
for line in f:
|
|
|
|
if reading >= 0:
|
|
|
|
if line.startswith(" "):
|
2017-09-26 07:48:17 +08:00
|
|
|
g.write("\t\"" + escape_string(line.strip()) + "\",\n")
|
2017-09-03 23:06:32 +08:00
|
|
|
continue
|
|
|
|
if line.startswith("## "):
|
|
|
|
if reading:
|
|
|
|
close_section()
|
|
|
|
reading = False
|
|
|
|
for i in range(len(sections)):
|
|
|
|
if line.strip().endswith(sections[i]):
|
2017-09-26 07:48:17 +08:00
|
|
|
current_section = escape_string(sections_id[i])
|
2017-09-03 23:06:32 +08:00
|
|
|
reading = True
|
|
|
|
g.write("static const char *" + current_section + "[] = {\n")
|
|
|
|
break
|
|
|
|
|
|
|
|
if reading:
|
2017-07-03 02:17:47 +08:00
|
|
|
close_section()
|
|
|
|
|
2017-05-31 10:07:27 +08:00
|
|
|
g.write("#endif\n")
|
2017-02-09 07:07:44 +08:00
|
|
|
|
2017-07-04 21:26:12 +08:00
|
|
|
def make_license_header(target, source, env):
|
|
|
|
|
2017-07-05 23:07:43 +08:00
|
|
|
src_copyright = source[0].srcnode().abspath
|
|
|
|
src_license = source[1].srcnode().abspath
|
2017-07-04 21:26:12 +08:00
|
|
|
dst = target[0].srcnode().abspath
|
2017-08-27 00:53:49 +08:00
|
|
|
f = open_utf8(src_license, "r")
|
|
|
|
fc = open_utf8(src_copyright, "r")
|
|
|
|
g = open_utf8(dst, "w")
|
2017-07-04 21:26:12 +08:00
|
|
|
|
|
|
|
g.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
|
|
|
|
g.write("#ifndef _EDITOR_LICENSE_H\n")
|
|
|
|
g.write("#define _EDITOR_LICENSE_H\n")
|
|
|
|
g.write("static const char *about_license =")
|
|
|
|
|
|
|
|
for line in f:
|
2017-10-08 03:15:35 +08:00
|
|
|
escaped_string = escape_string(line.strip())
|
2017-09-26 07:48:17 +08:00
|
|
|
g.write("\n\t\"" + escaped_string + "\\n\"")
|
2017-07-04 21:26:12 +08:00
|
|
|
|
|
|
|
g.write(";\n")
|
2017-07-05 23:07:43 +08:00
|
|
|
|
|
|
|
tp_current = 0
|
|
|
|
tp_file = ""
|
|
|
|
tp_comment = ""
|
|
|
|
tp_copyright = ""
|
|
|
|
tp_license = ""
|
|
|
|
|
|
|
|
tp_licensename = ""
|
|
|
|
tp_licensebody = ""
|
|
|
|
|
|
|
|
tp = []
|
|
|
|
tp_licensetext = []
|
|
|
|
for line in fc:
|
|
|
|
if line.startswith("#"):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if line.startswith("Files:"):
|
|
|
|
tp_file = line[6:].strip()
|
|
|
|
tp_current = 1
|
|
|
|
elif line.startswith("Comment:"):
|
|
|
|
tp_comment = line[8:].strip()
|
|
|
|
tp_current = 2
|
|
|
|
elif line.startswith("Copyright:"):
|
|
|
|
tp_copyright = line[10:].strip()
|
|
|
|
tp_current = 3
|
|
|
|
elif line.startswith("License:"):
|
|
|
|
if tp_current != 0:
|
|
|
|
tp_license = line[8:].strip()
|
|
|
|
tp_current = 4
|
|
|
|
else:
|
|
|
|
tp_licensename = line[8:].strip()
|
|
|
|
tp_current = 5
|
|
|
|
elif line.startswith(" "):
|
|
|
|
if tp_current == 1:
|
|
|
|
tp_file += "\n" + line.strip()
|
|
|
|
elif tp_current == 3:
|
|
|
|
tp_copyright += "\n" + line.strip()
|
|
|
|
elif tp_current == 5:
|
|
|
|
if line.strip() == ".":
|
|
|
|
tp_licensebody += "\n"
|
|
|
|
else:
|
|
|
|
tp_licensebody += line[1:]
|
|
|
|
else:
|
|
|
|
if tp_current != 0:
|
|
|
|
if tp_current == 5:
|
|
|
|
tp_licensetext.append([tp_licensename, tp_licensebody])
|
|
|
|
|
|
|
|
tp_licensename = ""
|
|
|
|
tp_licensebody = ""
|
|
|
|
else:
|
|
|
|
added = False
|
|
|
|
for i in tp:
|
|
|
|
if i[0] == tp_comment:
|
|
|
|
i[1].append([tp_file, tp_copyright, tp_license])
|
|
|
|
added = True
|
|
|
|
break
|
|
|
|
if not added:
|
|
|
|
tp.append([tp_comment,[[tp_file, tp_copyright, tp_license]]])
|
|
|
|
|
|
|
|
tp_file = []
|
|
|
|
tp_comment = ""
|
|
|
|
tp_copyright = []
|
|
|
|
tp_license = ""
|
|
|
|
tp_current = 0
|
|
|
|
|
2017-07-18 09:02:56 +08:00
|
|
|
tp_licensetext.append([tp_licensename, tp_licensebody])
|
|
|
|
|
2017-07-05 23:07:43 +08:00
|
|
|
about_thirdparty = ""
|
|
|
|
about_tp_copyright_count = ""
|
|
|
|
about_tp_license = ""
|
|
|
|
about_tp_copyright = ""
|
|
|
|
about_tp_file = ""
|
|
|
|
|
|
|
|
for i in tp:
|
|
|
|
about_thirdparty += "\t\"" + i[0] + "\",\n"
|
|
|
|
about_tp_copyright_count += str(len(i[1])) + ", "
|
|
|
|
for j in i[1]:
|
|
|
|
file_body = ""
|
|
|
|
copyright_body = ""
|
|
|
|
for k in j[0].split("\n"):
|
|
|
|
if file_body != "":
|
|
|
|
file_body += "\\n\"\n"
|
2017-10-08 03:15:35 +08:00
|
|
|
escaped_string = escape_string(k.strip())
|
2017-09-26 07:48:17 +08:00
|
|
|
file_body += "\t\"" + escaped_string
|
2017-07-05 23:07:43 +08:00
|
|
|
for k in j[1].split("\n"):
|
|
|
|
if copyright_body != "":
|
|
|
|
copyright_body += "\\n\"\n"
|
2017-10-08 03:15:35 +08:00
|
|
|
escaped_string = escape_string(k.strip())
|
2017-09-26 07:48:17 +08:00
|
|
|
copyright_body += "\t\"" + escaped_string
|
2017-07-05 23:07:43 +08:00
|
|
|
|
|
|
|
about_tp_file += "\t" + file_body + "\",\n"
|
|
|
|
about_tp_copyright += "\t" + copyright_body + "\",\n"
|
|
|
|
about_tp_license += "\t\"" + j[2] + "\",\n"
|
|
|
|
|
|
|
|
about_license_name = ""
|
|
|
|
about_license_body = ""
|
|
|
|
|
|
|
|
for i in tp_licensetext:
|
|
|
|
body = ""
|
|
|
|
for j in i[1].split("\n"):
|
|
|
|
if body != "":
|
|
|
|
body += "\\n\"\n"
|
2017-10-08 03:15:35 +08:00
|
|
|
escaped_string = escape_string(j.strip())
|
2017-09-26 07:48:17 +08:00
|
|
|
body += "\t\"" + escaped_string
|
2017-07-05 23:07:43 +08:00
|
|
|
|
|
|
|
about_license_name += "\t\"" + i[0] + "\",\n"
|
|
|
|
about_license_body += "\t" + body + "\",\n"
|
|
|
|
|
|
|
|
g.write("static const char *about_thirdparty[] = {\n")
|
|
|
|
g.write(about_thirdparty)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
g.write("#define THIRDPARTY_COUNT " + str(len(tp)) + "\n")
|
|
|
|
|
|
|
|
g.write("static const int about_tp_copyright_count[] = {\n\t")
|
|
|
|
g.write(about_tp_copyright_count)
|
|
|
|
g.write("0\n};\n")
|
|
|
|
|
|
|
|
g.write("static const char *about_tp_file[] = {\n")
|
|
|
|
g.write(about_tp_file)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
g.write("static const char *about_tp_copyright[] = {\n")
|
|
|
|
g.write(about_tp_copyright)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
g.write("static const char *about_tp_license[] = {\n")
|
|
|
|
g.write(about_tp_license)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
|
|
|
g.write("static const char *about_license_name[] = {\n")
|
|
|
|
g.write(about_license_name)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
g.write("#define LICENSE_COUNT " + str(len(tp_licensetext)) + "\n")
|
|
|
|
|
|
|
|
g.write("static const char *about_license_body[] = {\n")
|
|
|
|
g.write(about_license_body)
|
|
|
|
g.write("\t0\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
2017-07-04 21:26:12 +08:00
|
|
|
g.write("#endif\n")
|
|
|
|
|
2017-09-13 04:42:36 +08:00
|
|
|
|
|
|
|
def _make_doc_data_class_path(to_path):
|
2017-09-14 20:34:53 +08:00
|
|
|
g = open_utf8(os.path.join(to_path,"doc_data_class_path.gen.h"), "w")
|
2017-09-13 04:42:36 +08:00
|
|
|
g.write("static const int _doc_data_class_path_count="+str(len(env.doc_class_path))+";\n")
|
|
|
|
g.write("struct _DocDataClassPath { const char* name; const char* path; };\n")
|
|
|
|
|
|
|
|
g.write("static const _DocDataClassPath _doc_data_class_paths["+str(len(env.doc_class_path)+1)+"]={\n");
|
|
|
|
for c in env.doc_class_path:
|
2017-09-14 20:34:53 +08:00
|
|
|
g.write("{\""+c+"\",\""+env.doc_class_path[c]+"\"},\n")
|
2017-09-13 04:42:36 +08:00
|
|
|
g.write("{NULL,NULL}\n")
|
|
|
|
g.write("};\n")
|
|
|
|
|
2014-02-10 09:10:30 +08:00
|
|
|
|
2017-09-25 12:04:49 +08:00
|
|
|
if env['tools']:
|
2017-02-09 07:07:44 +08:00
|
|
|
# Register exporters
|
2016-10-31 01:57:40 +08:00
|
|
|
reg_exporters_inc = '#include "register_exporters.h"\n'
|
|
|
|
reg_exporters = 'void register_exporters() {\n'
|
2016-10-31 01:44:57 +08:00
|
|
|
for e in env.platform_exporters:
|
2017-02-09 07:07:44 +08:00
|
|
|
env.editor_sources.append("#platform/" + e + "/export/export.cpp")
|
2017-08-27 02:47:23 +08:00
|
|
|
reg_exporters += '\tregister_' + e + '_exporter();\n'
|
2016-10-31 01:57:40 +08:00
|
|
|
reg_exporters_inc += '#include "platform/' + e + '/export/export.h"\n'
|
|
|
|
reg_exporters += '}\n'
|
2017-08-27 00:53:49 +08:00
|
|
|
f = open_utf8("register_exporters.gen.cpp", "w")
|
2016-10-31 01:44:57 +08:00
|
|
|
f.write(reg_exporters_inc)
|
|
|
|
f.write(reg_exporters)
|
|
|
|
f.close()
|
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
# API documentation
|
2017-09-13 04:42:36 +08:00
|
|
|
docs=[]
|
|
|
|
print("cdir is: "+env.Dir('#').abspath)
|
|
|
|
for f in os.listdir(os.path.join(env.Dir('#').abspath,"doc/classes")):
|
2017-09-14 20:34:53 +08:00
|
|
|
docs.append("#doc/classes/"+f)
|
2017-09-13 04:42:36 +08:00
|
|
|
|
|
|
|
_make_doc_data_class_path(os.path.join(env.Dir('#').abspath,"editor/doc"))
|
|
|
|
|
2017-06-23 23:03:41 +08:00
|
|
|
env.Depends("#editor/doc_data_compressed.gen.h", docs)
|
|
|
|
env.Command("#editor/doc_data_compressed.gen.h", docs, make_doc_header)
|
2017-02-09 07:07:44 +08:00
|
|
|
# Certificates
|
2017-06-23 23:03:41 +08:00
|
|
|
env.Depends("#editor/certs_compressed.gen.h", "#thirdparty/certs/ca-certificates.crt")
|
|
|
|
env.Command("#editor/certs_compressed.gen.h", "#thirdparty/certs/ca-certificates.crt", make_certs_header)
|
2016-10-31 01:44:57 +08:00
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
import glob
|
|
|
|
path = env.Dir('.').abspath
|
2016-10-31 01:44:57 +08:00
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
# Translations
|
|
|
|
tlist = glob.glob(path + "/translations/*.po")
|
2017-06-23 23:03:41 +08:00
|
|
|
env.Depends('#editor/translations.gen.h', tlist)
|
|
|
|
env.Command('#editor/translations.gen.h', tlist, make_translations_header)
|
2016-10-31 01:44:57 +08:00
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
# Fonts
|
2017-03-05 21:21:25 +08:00
|
|
|
flist = glob.glob(path + "/../thirdparty/fonts/*.ttf")
|
|
|
|
flist.append(glob.glob(path + "/../thirdparty/fonts/*.otf"))
|
2017-06-23 23:03:41 +08:00
|
|
|
env.Depends('#editor/builtin_fonts.gen.h', flist)
|
|
|
|
env.Command('#editor/builtin_fonts.gen.h', flist, make_fonts_header)
|
2017-02-09 07:07:44 +08:00
|
|
|
|
2017-05-31 10:07:27 +08:00
|
|
|
# Authors
|
2017-06-23 23:03:41 +08:00
|
|
|
env.Depends('#editor/authors.gen.h', "../AUTHORS.md")
|
|
|
|
env.Command('#editor/authors.gen.h', "../AUTHORS.md", make_authors_header)
|
2017-05-31 10:07:27 +08:00
|
|
|
|
2017-09-03 23:06:32 +08:00
|
|
|
# Donors
|
|
|
|
env.Depends('#editor/donors.gen.h', "../DONORS.md")
|
|
|
|
env.Command('#editor/donors.gen.h', "../DONORS.md", make_donors_header)
|
|
|
|
|
2017-07-04 21:26:12 +08:00
|
|
|
# License
|
2017-07-05 23:07:43 +08:00
|
|
|
env.Depends('#editor/license.gen.h', ["../COPYRIGHT.txt", "../LICENSE.txt"])
|
|
|
|
env.Command('#editor/license.gen.h', ["../COPYRIGHT.txt", "../LICENSE.txt"], make_license_header)
|
2017-07-04 21:26:12 +08:00
|
|
|
|
2017-02-09 07:07:44 +08:00
|
|
|
|
|
|
|
env.add_source_files(env.editor_sources, "*.cpp")
|
|
|
|
|
|
|
|
SConscript('collada/SCsub')
|
|
|
|
SConscript('doc/SCsub')
|
2016-11-01 07:24:30 +08:00
|
|
|
SConscript('fileserver/SCsub')
|
2017-02-09 07:07:44 +08:00
|
|
|
SConscript('icons/SCsub')
|
2017-02-01 20:45:45 +08:00
|
|
|
SConscript('import/SCsub')
|
2017-02-09 07:07:44 +08:00
|
|
|
SConscript('plugins/SCsub')
|
|
|
|
|
|
|
|
lib = env.Library("editor", env.editor_sources)
|
|
|
|
env.Prepend(LIBS=[lib])
|
|
|
|
|
|
|
|
Export('env')
|