aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'ebuildgen')
-rw-r--r--ebuildgen/__init__.py1
-rwxr-xr-xebuildgen/cli.py102
-rw-r--r--ebuildgen/ebuildoutput.py149
-rw-r--r--ebuildgen/filetypes/__init__.py0
-rw-r--r--ebuildgen/filetypes/acif.py145
-rw-r--r--ebuildgen/filetypes/autoconf.py484
-rw-r--r--ebuildgen/filetypes/automake.py342
-rw-r--r--ebuildgen/filetypes/ctypefiles.py229
-rw-r--r--ebuildgen/filetypes/makefilecom.py396
-rw-r--r--ebuildgen/filetypes/makefiles.py479
-rw-r--r--ebuildgen/linkdeps.py108
-rw-r--r--ebuildgen/scanfiles.py187
-rw-r--r--ebuildgen/scmprojects.py25
13 files changed, 2647 insertions, 0 deletions
diff --git a/ebuildgen/__init__.py b/ebuildgen/__init__.py
new file mode 100644
index 0000000..e0a8d1b
--- /dev/null
+++ b/ebuildgen/__init__.py
@@ -0,0 +1 @@
+#Placeholder
diff --git a/ebuildgen/cli.py b/ebuildgen/cli.py
new file mode 100755
index 0000000..6457853
--- /dev/null
+++ b/ebuildgen/cli.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python3
+
+import argparse
+import ebuildgen.scanfiles as scanfiles
+import ebuildgen.linkdeps as linkdeps
+import ebuildgen.ebuildoutput as ebuildoutput
+from ebuildgen.scmprojects import getsourcecode
+
+def cli():
+ parser = argparse.ArgumentParser(
+ description="Generate ebuilds for autotools projects",
+ epilog="Example: genebuild --svn <url>")
+
+ parser.add_argument("url")
+ parser.add_argument("-t", "--types", metavar="filetype", nargs="+",
+ default=[".c",".cpp",".h"],
+ help="what filetypes it should scan")
+ parser.add_argument("-g", "--ginc", action="store_true",
+ help="print global includes")
+ parser.add_argument("-l", "--linc", action="store_true",
+ help="print local includes")
+ parser.add_argument("-d", "--ifdef", action="store_true",
+ help="print includes the depends on ifdefs")
+ parser.add_argument("-q", "--quiet", action="store_true",
+ help="don't print anything (doesn't work ATM)") #this needs work...
+
+ parser.add_argument("--svn", action="store_true",
+ help="this is a SVN project")
+ parser.add_argument("--git", action="store_true",
+ help="this is a GIT project")
+ parser.add_argument("--hg", action="store_true",
+ help="this is a HG project")
+
+ args = parser.parse_args()
+
+ #print(args.dir)
+ #print(args.types)
+
+ #inclst is a list of includes. First in it is global then local.
+ if args.svn:
+ getsourcecode(args.dir,"svn")
+ srcdir = "/tmp/ebuildgen/curproj"
+ dltype = "svn"
+ elif args.git:
+ getsourcecode(args.dir,"git")
+ srcdir = "/tmp/ebuildgen/curproj"
+ dltype = "git"
+ elif args.hg:
+ getsourcecode(args.dir,"hg")
+ srcdir = "/tmp/ebuildgen/curproj"
+ dltype = "hg"
+ else:
+ srcdir = args.dir
+ dltype = "www"
+
+ (iuse,inclst,useargs) = scanfiles.scanproject(srcdir,"autotools")
+ targets = [["install"]]
+ binaries = []
+ gpackages = set()
+ for dep in inclst[0]:
+ newpack = linkdeps.deptopackage(dep,[])
+ if newpack:
+ gpackages.add(newpack)
+ #print(gpackages)
+ if "__cplusplus" in inclst[2]:
+ for dep in inclst[2]["__cplusplus"][0]:
+ newpack = linkdeps.deptopackage(dep,[])
+ if newpack:
+ gpackages.add(newpack)
+
+ usedeps = {}
+ for use in useargs:
+ packages = set()
+ for dep in useargs[use][0]:
+ newpack = linkdeps.deptopackage(dep,[])
+ if newpack and not newpack in gpackages:
+ packages.add(newpack)
+ if "__cplusplus" in useargs[use][2]:
+ for dep in useargs[use][2]["__cplusplus"][0]:
+ newpack = linkdeps.deptopackage(dep,[])
+ if newpack and not newpack in gpackages:
+ packages.add(newpack)
+ usedeps[use] = packages
+
+ #print(usedeps)
+ #print(iuse)
+ ebuildoutput.genebuild(iuse,gpackages,usedeps,dltype,args.dir,targets,binaries)
+
+ if args.ginc == args.linc == args.ifdef == args.quiet == False:
+ print(inclst)
+ print(gpackages)
+
+ if args.ginc:
+ print(inclst[0])
+ if args.linc:
+ print(inclst[1])
+
+ if args.ifdef:
+ for name in inclst[2]:
+ print(name)
+ print(inclst[2][name][0])
+ print(inclst[2][name][1])
diff --git a/ebuildgen/ebuildoutput.py b/ebuildgen/ebuildoutput.py
new file mode 100644
index 0000000..a8ca3a6
--- /dev/null
+++ b/ebuildgen/ebuildoutput.py
@@ -0,0 +1,149 @@
+from time import strftime
+from subprocess import getstatusoutput
+
+eclass = {
+ "git" : "git",
+ "svn" : "subversion",
+ "hg" : "mercurial",
+ }
+
+arch = getstatusoutput("portageq envvar ARCH")[1]
+
+def genebuild(iuse,deps,usedeps,dltype,adress,targets,binaries):
+ """This function starts the ebuild generation.
+
+ You have to provide the following args in order:
+ iuse, a list of useflags
+ deps, a list of dependecies
+ dltype, how to download the source code (wget,GIT,etc)
+ adress, Adress to the source code
+ targets, a list of build targets for the project (used to guess install method)
+ binaries, a list of binaries that is created during compile (used to install them if there is no 'make install')
+ """
+
+ installmethod = guessinstall(targets,binaries)
+ outstr = outputebuild(iuse,deps,usedeps,dltype,adress,installmethod)
+ f = open("/tmp/ebuildgen/generated.ebuild","w")
+ f.write(outstr)
+ f.close()
+
+def guessinstall(targets,binaries):
+ """Guess the install method of the project
+
+ Looks at the make targets for a 'make install'
+ if that fails just install the binaries
+ """
+
+ targetlst = []
+ returnlst = []
+ for target in targets:
+ targetlst.append(target[0])
+
+ if "install" in targetlst:
+ returnlst = [' emake DESTDIR="${D}" install || die "emake install failed"']
+ else:
+ for binary in binaries:
+ returnlst += [' dobin ' + binary + ' || die "bin install failed"']
+
+ return returnlst
+
+def outputebuild(iuse,deps,usedeps,dltype,adress,installmethod):
+ """Used to generate the text for the ebuild to output
+
+ Generates text with the help of the supplied variables
+ """
+
+ text = [
+ '# Copyright 1999-' + strftime("%Y") + ' Gentoo Foundation',
+ '# Distributed under the terms of the GNU General Public License v2',
+ '# $Header: $',
+ ''
+ ]
+ inheritstr = 'inherit ' + eclass[dltype] + ' autotools'
+ text.append(inheritstr)
+
+ text += [
+ '',
+ 'EAPI=3',
+ '',
+ 'DESCRIPTION=""',
+ 'HOMEPAGE=""'
+ ]
+ if dltype == "www":
+ srcstr = 'SRC_URI="' + adress + '"'
+ else:
+ srcstr = 'E' + dltype.upper() + '_REPO_URI="' + adress + '"'
+ text.append(srcstr)
+
+ text += [
+ '',
+ 'LICENSE=""',
+ 'SLOT="0"',
+ 'KEYWORDS="~' + arch + '"'
+ ]
+ iusestr = 'IUSE="'
+ for flag in iuse:
+ iusestr += (flag.split("_")[1] + " ")
+ iusestr += '"\n'
+
+ text.append(iusestr)
+
+ depstr = 'DEPEND="'
+ for dep in deps:
+ depstr += (dep + "\n\t")
+
+ for use in usedeps:
+ #check if packagelist is empty
+ if usedeps[use]:
+ if use[0] == "!":
+ depstr += "!" + use.split("_")[1] + "? ( "
+ else:
+ depstr += use.split("_")[1] + "? ( "
+
+ for dep in usedeps[use]:
+ depstr += dep +"\n\t\t"
+ depstr = depstr[:-3]
+ depstr += " )\n\t"
+
+ depstr = depstr[:-2] + '"\nRDEPEND="${DEPEND}"'
+ text.append(depstr)
+
+ text += [
+ '',
+ 'src_prepare() {',
+ '\teautoreconf',
+ '}',
+ ]
+
+ if iuse:
+ text += [
+ '',
+ 'src_configure() {',
+ '\teconf \\',
+ ]
+ for use in iuse:
+ text += ['\t\t$(use_' + use.split("_")[0] + ' ' + use.split("_")[1] + ') \\']
+
+ #add \n here because the ebuild will fail if there is no extra newline between '\' and '}'
+ text += ['\n}']
+
+ text += [
+ '',
+ 'src_compile() {',
+ ' emake || die "emake failed"',
+ '}'
+ ]
+
+ text += [
+ '',
+ 'src_install() {',
+ ]
+ text += installmethod
+
+ text += ['}']
+
+ outputstr = ""
+ for line in text:
+ outputstr += line + "\n"
+
+ return outputstr
diff --git a/ebuildgen/filetypes/__init__.py b/ebuildgen/filetypes/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ebuildgen/filetypes/__init__.py
diff --git a/ebuildgen/filetypes/acif.py b/ebuildgen/filetypes/acif.py
new file mode 100644
index 0000000..1d3ed29
--- /dev/null
+++ b/ebuildgen/filetypes/acif.py
@@ -0,0 +1,145 @@
+from ply import lex
+from ply import yacc
+
+def parseif(ifoptions):
+ optstr = ""
+ for option in ifoptions:
+ optstr += option + " "
+
+ tokens = (
+ "NOT",
+ "AND",
+ "OR",
+ "EQ",
+ "NEQ",
+ "NONZERO",
+ "SEMICOL",
+ "LBRAC",
+ "RPRAC",
+ "OPT",
+ "TEST",
+ )
+
+ def t_TEST(t):
+ r"test"
+ return t
+
+ def t_AND(t):
+ r"(\-a|\&\&)"
+ return t
+
+ def t_OR(t):
+ r"(\-o|\|\|)"
+ return t
+
+ def t_EQ(t):
+ r"="
+ return t
+
+ def t_NEQ(t):
+ r"\!="
+ return t
+
+ def t_NOT(t):
+ r"\!"
+ return t
+
+ def t_NONZERO(t):
+ r"\-n"
+ return t
+
+ def t_SEMICOL(t):
+ r";"
+ pass
+
+ def t_LBRAC(t):
+ r"\{"
+ return t
+
+ def t_RPRAC(t):
+ r"\}"
+ return t
+
+ def t_space(t):
+ r"[ \t\n]"
+ pass
+
+ def t_quote(t):
+ r"[\"\']"
+ pass
+
+ def t_OPT(t):
+ r"[^ \t\n;\"\']+"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0],t.lexer.lineno)
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(optstr)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC
+ #Add more cases!
+
+ def p_exp(p):
+ """
+ exp : NOT TEST expopt
+ | TEST expopt
+ """
+ if len(p) == 4:
+ newlst = []
+ while len(newlst) < len(p[3]):
+ if p[3][len(newlst)+1][0] == "!":
+ newresult = p[3][len(newlst)+1][1:]
+ else:
+ newresult = "!" + p[3][len(newlst)+1]
+
+ newlst += [p[3][len(newlst)],newresult]
+
+ p[0] = newlst
+
+ else:
+ p[0] = p[2]
+
+ def p_expopt(p):
+ """
+ expopt : expopt AND expopt
+ | expopt OR expopt
+ """
+ if p[2] == "-a":
+ p[0] = p[1] + p[3]
+ else: #come up with something better
+ p[0] = p[1] + p[3]
+
+ def p_expopt2(p):
+ """
+ expopt : OPT EQ OPT
+ | OPT NEQ OPT
+ | NONZERO OPT
+ | OPT
+ """
+ if len(p) == 4:
+ if p[2] == "=":
+ varstr = p[1].split("$")
+ p[0] = [varstr[1],p[3][len(varstr[0]):]]
+ #[VARIABLEname,value to pass test]
+
+ elif p[2] == "!=":
+ varstr = p[1].split("$")
+ p[0] = [varstr[1],"!" + p[3][len(varstr[0]):]]
+
+ else:
+ varstr = p[len(p)-1].split("$")[1]
+ p[0] = [varstr, "!"] #req that the variable is nonzero to be True
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+ return yacc.parse(optstr)
+
diff --git a/ebuildgen/filetypes/autoconf.py b/ebuildgen/filetypes/autoconf.py
new file mode 100644
index 0000000..d46d133
--- /dev/null
+++ b/ebuildgen/filetypes/autoconf.py
@@ -0,0 +1,484 @@
+from ply import lex
+from ply import yacc
+
+def scanacfile(acfile):
+ """Scan a autoconfigure (.in/.ac) file.
+
+ Returns ....
+ """
+
+ tokens = (
+ "FUNC",
+ "COMPFUNC", #complete func
+ "FUNCOPT", #func options
+ "FUNCEND",
+ "VAR",
+ "ECHO",
+ "TEXT",
+ "IF",
+ "IFCOM",
+ "ELIF",
+ "ELSE",
+ "THEN",
+ "IFEND",
+ "CASE",
+ "CASEOPT",
+ "COPTEND", #case opt end, doesn't need to be there but SHOULD
+ "CASEEND",
+ "COMMA",
+ )
+
+ states = (
+ ("func", "inclusive"),
+ ("funcopt", "exclusive"),
+ ("case", "inclusive"),
+ ("if", "inclusive"),
+ ("shellcom", "exclusive"),
+ )
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_ANY_space(t):
+ r"[ \t]"
+ pass
+
+ def t_newline(t):
+ r"\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_shfunc(t): #shell func
+ r'[a-zA-Z_][a-zA-Z0-9_]*\(\)[ \t]*{'
+ t.lexer.level = 1
+ t.lexer.push_state("shellcom")
+
+ def t_shellcom_text(t):
+ r"[^{}]+"
+
+ def t_shellcom_opb(t):
+ r"{"
+ t.lexer.level +=1
+
+ def t_shellcom_opc(t):
+ r"}"
+ t.lexer.level -=1
+
+ if t.lexer.level == 0:
+ t.lexer.pop_state()
+ pass
+
+ def t_COMPFUNC(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*\([^\\[\](\),]*\)'
+ values = t.value.split("(")
+ t.value = [values[0],values[1][:-1]]
+ return t
+
+ def t_FUNC(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*\('
+ t.lexer.push_state('func')
+ t.value = t.value[:-1] #return name of func
+ return t
+
+ def t_func_funcopt(t):
+ r'\['
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('funcopt') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_funcopt_newcom(t):
+ r'\['
+ t.lexer.level +=1
+
+ def t_funcopt_endcom(t):
+ r'\]'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "FUNCOPT"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.pop_state()
+ return t
+
+ def t_funcopt_opt(t):
+ r"[^\\\[\]]+"
+
+ def t_funcopt_contline(t):
+ r"\\\n"
+
+ def t_func_COMMA(t):
+ r","
+ return t
+
+ def t_func_FUNCEND(t):
+ r"\)"
+ t.lexer.pop_state()
+ return t
+
+ def t_comment(t):
+ r"(dnl|\#).*\n"
+ t.lexer.lineno += t.value.count('\n')
+ pass
+
+ def t_ECHO(t):
+ r"echo.*\n"
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_VAR(t):
+ #take var=text, var="text text", var='text text', var=`text text`
+ r"[a-zA-Z_][a-zA-Z0-9_]*=(\"[^\"]*\"|\'[^\']*\'|\`[^\`]*\`|[^() \t,\n]*)+"
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_IF(t):
+ r"if"
+ t.lexer.push_state("if")
+ return t
+
+ def t_ELIF(t):
+ r"elif"
+ t.lexer.push_state("if")
+ return t
+
+ def t_if_THEN(t):
+ r"then"
+ t.lexer.pop_state()
+ return t
+
+ def t_if_IFCOM(t):
+ r"[^ \t\n]+"
+ return t
+
+ def t_ELSE(t):
+ r"else"
+ return t
+
+ def t_IFEND(t):
+ r"fi"
+ return t
+
+ def t_CASE(t):
+ r"case.*in"
+ t.lexer.push_state("case")
+ return t
+
+ def t_CASEEND(t):
+ r"esac"
+ t.lexer.pop_state()
+ return t
+
+ def t_case_CASEOPT(t):
+ r"[^\n\t\(\)]+\)"
+ return t
+
+ def t_case_COPTEND(t):
+ r";;"
+ return t
+
+ def t_literal(t):
+ r"\\[^\n]"
+ t.type = "TEXT"
+ t.value = t.value[-1] #return litral char
+ return t
+
+ def t_TEXT(t): #most likely commands like "AM_INIT_AUTOMAKE" etc.
+ #Fix this so I can handle variables like the one above as that is NOT a text string
+ r"([^ ;,\t\n\(\)]+|\([^() \t\n]*\))"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0],t.lexer.lineno)
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(acfile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC stuff begins here
+
+ def p_complst(p):
+ """
+ complst : complst text
+ | complst ECHO
+ | complst func
+ | complst VAR
+ | complst ifcomp
+ | complst case
+ | complst FUNCOPT
+ | text
+ | ECHO
+ | func
+ | VAR
+ | ifcomp
+ | case
+ | FUNCOPT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + [p[2]]
+ else:
+ p[0] = [p[1]]
+
+ def p_text(p):
+ """
+ text : text TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + " " + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_case(p):
+ """
+ case : CASE caseopt CASEEND
+ """
+ p[0] = [p[1]] + [p[2]]
+
+ def p_caseopt(p):
+ """
+ caseopt : caseopt CASEOPT complst COPTEND
+ | CASEOPT complst COPTEND
+ """
+ if len(p) == 5:
+ p[0] = p[1] + [p[2], p[3]]
+ else:
+ p[0] = [p[1], p[2]]
+
+ def p_caseopt2(p):
+ """
+ caseopt : caseopt CASEOPT complst
+ | caseopt CASEOPT COPTEND
+ | CASEOPT complst
+ | CASEOPT COPTEND
+ """
+ if len(p) == 4:
+ if isinstance(p[3],list):
+ p[0] = p[1] + [p[2], p[3]]
+ else:
+ p[0] = p[1] + [p[2], []]
+ else:
+ if isinstance(p[2],list):
+ p[0] = [p[1], p[2]]
+ else:
+ p[0] = [p[1], []]
+
+ def p_ifcomp(p): #perhaps needs elif also
+ """
+ ifcomp : if IFEND
+ """
+ p[0] = p[1]
+
+ def p_if(p):
+ """
+ if : if ELSE complst
+ | IF ifcom THEN complst
+ | if ELIF ifcom THEN complst
+ """
+ if len(p) == 5:
+ p[0] = [[p[1]] + [p[2]], p[4]]
+
+ elif len(p) == 6:
+ p[0] = p[1] + [[p[2]] + [p[3]], p[5]]
+
+ else:
+ p[0] = p[1] + [[p[2]], p[3]]
+
+
+ def p_ifcom(p):
+ """
+ ifcom : ifcom IFCOM
+ | IFCOM
+ """
+ if len(p) == 3:
+ p[0] = p[1] + [p[2]]
+ else:
+ p[0] = [p[1]]
+
+ def p_func(p):
+ """
+ func : FUNC funcopt FUNCEND
+ | COMPFUNC
+ """
+ if len(p) == 2:
+ p[0] = p[1] #this is already ordered
+ else:
+ p[0] = [p[1],p[2]]
+
+ def p_funccomma(p):
+ """
+ funcopt : funcopt COMMA
+ | COMMA complst
+ | COMMA
+ """
+ if len(p) == 3:
+ if isinstance(p[2],list):
+ if len(p[2]) > 1:
+ p[0] = [[]] + [p[2]]
+ else:
+ p[0] = [[]] + p[2]
+
+ else:
+ p[0] = p[1] + [[]]
+ else:
+ p[0] = [[]]
+
+ def p_funcopt(p):
+ """
+ funcopt : funcopt COMMA complst
+ | complst
+ """
+ if len(p) == 4:
+ if len(p[3]) > 1:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = p[1] + p[3]
+ else:
+ if len(p[1]) > 1:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ items = yacc.parse(acfile)
+ return items
+
+from ebuildgen.filetypes.acif import parseif
+
+def output(inputlst,topdir):
+ variables = dict()
+ iflst = []
+ for item in inputlst:
+ if item[0] == "AC_ARG_ENABLE":
+ name = convnames(item[1][0])
+ if len(item[1]) == 2:
+ variables["enable_" + name] = {"AC_ARG_ENABLE" : ""}
+ elif len(item[1]) == 3:
+ variables["enable_" + name] = [item[1][2],[]]
+ else:
+ variables["enable_" + name] = [item[1][2],item[1][3]]
+
+ #remember to convert chars in the name of "item[1]" that is not
+ #alfanumeric char to underscores _
+ #Done with convnames!
+
+ elif item[0] == "AC_ARG_WITH":
+ name = convnames(item[1][0])
+ if len(item[1]) == 2:
+ variables["with_" + name] = {"AC_ARG_WITH" : ""}
+ elif len(item[1]) == 3:
+ variables["with_" + name] = [item[1][2],[]]
+ else:
+ variables["with_" + name] = [item[1][2],item[1][3]]
+ elif isinstance(item[0],list): #if statements
+ for variable in variables:
+ for pattern in item[0][1]:
+ if variable in pattern:
+ iflst += [[parseif(item[0][1]),ifs(item[1],{})]]
+
+ elif item[0] == "AM_CONDITIONAL":
+ var = item[1][0].strip("[]")
+ cond = parseif(item[1][1].strip("[]").split())
+ for if_state in iflst:
+ if cond[0] in if_state[1]:
+ if cond[1] == "!" or cond[1] == if_state[1][cond[0]]:
+ #"!" == not zero/defined, "" zero/not defined
+ if_state[1][var] = "true"
+
+ elif item[0] == "m4_include":
+ newvar,newiflst = output(scanacfile(openfile(topdir + item[1])),topdir)
+ variables.update(newvar)
+ iflst += newiflst
+
+ #for variable in variables:
+ #print(variable)
+ #print(variables[variable])
+ #print(iflst)
+ return variables,iflst
+
+def ifs(inputlst,variables):
+
+ for item in inputlst:
+ ac_check = 0 #is this an ac_check?
+ if item[0] == "AC_CHECK_HEADERS" or item[0] == "AC_CHECK_HEADER":
+ ac_check = 1
+ elif item[0] == "AC_CHECK_LIB":
+ ac_check = 2
+ elif item[0] == "PKG_CHECK_MODULES":
+ ac_check = 3
+
+ if ac_check:
+ if not isinstance(item[1][0],list):
+ headers = convnames(item[1][0]).split()
+ else:
+ headers = []
+ for header in item[1][0]:
+ headers += convnames(header)
+
+ for header in headers:
+ if ac_check == 1:
+ variables["ac_cv_header_" + header] = "yes"
+ if ac_check == 2:
+ variables["ac_cv_lib_" + header] = "yes"
+
+ if len(item[1]) > 2 and ac_check > 1:
+ if isinstance(item[1][2],list):
+ variables.update(ifs(item[1][2], variables))
+ else:
+ variables.update(ifs(scanacfile(item[1][2].strip("[]")), variables))
+ elif ac_check == 1 and len(item[1]) > 1:
+ if isinstance(item[1][1],list):
+ variables.update(ifs(item[1][1], variables))
+ else:
+ variables.update(ifs(scanacfile(item[1][1].strip("[]")), variables))
+
+ elif isinstance(item[0],list): #if statement
+ variables.update(ifs(item[1],variables))
+
+ elif item[0] == "AC_DEFINE":
+ if len(item[1]) == 1:
+ variables.update({item[1][0].strip("[]") : "1"})
+ else:
+ variables.update({item[1][0].strip("[]") : item[1][1]})
+
+ elif "=" in item:
+ (var,items) = item.split("=")
+ compitems = []
+ #Fix "´" aka exec shell commad comments!
+ for itm in items.strip('"').strip("'").split():
+ if itm[0] == "$":
+ if itm[1:] in variables:
+ compitems += variables[itm[1:]]
+
+ else:
+ compitems += [itm]
+ variables[var] = compitems
+
+ return variables
+
+import re
+def convnames(string): #strip none alfanumeric chars and replace them with "_"
+ string = string.strip("[]") #remove quotes
+ pattern = re.compile("\W")
+ newstr = re.sub(pattern, "_", string)
+ return newstr
+
+#this is no a good name, come up with a better one!
+def scanac(acfile,topdir):
+ return output(scanacfile(acfile),topdir)
+
+def openfile(ofile):
+ with open(ofile, encoding="utf-8", errors="replace") as inputfile:
+ return inputfile.read()
diff --git a/ebuildgen/filetypes/automake.py b/ebuildgen/filetypes/automake.py
new file mode 100644
index 0000000..c4ca432
--- /dev/null
+++ b/ebuildgen/filetypes/automake.py
@@ -0,0 +1,342 @@
+from ply import lex
+from ply import yacc
+import glob
+import os
+
+def scanamfile(amfile):
+ """Scan automake (.am) file
+
+ Returns ...
+ """
+ amfile = "\n" + amfile #Add \n so you can guess vars
+ tokens = (
+ "END",
+ "COL",
+ "EQ",
+ "PEQ",
+ "CVAR",
+ "MVAR",
+ "TEXT",
+ "ENDTAB",
+ "SPACE",
+ "IF",
+ "ELSE",
+ "ENDIF",
+ )
+
+ states = (
+ ("com", "exclusive"), #comment
+ ("var", "inclusive"),
+ ("if", "exclusive"),
+ )
+
+ def t_begin_com(t):
+ r"[ \t]*\#"
+ t.lexer.begin("com")
+
+ def t_com_other(t):
+ r"[^\\\n]+"
+ pass
+
+ def t_com_lit(t):
+ r"\\."
+ pass
+
+ def t_com_newline(t):
+ r".*\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_ifbegin(t):
+ #ugly hack to ensure that this is at the begining of the line and keep the newline token.
+ #PLY doesn't support the "^" beginning of line regexp :,(
+ r"\nif"
+ t.type = "END"
+ t.lexer.push_state("if")
+ return t
+
+ def t_if_IF(t):
+ #http://www.gnu.org/s/hello/manual/automake/Usage-of-Conditionals.html#Usage-of-Conditionals
+ r"[ \t]+[^ \n\t]*"
+ t.value = t.value.strip() #take the variable to test
+ t.lexer.pop_state()
+ return t
+
+ def t_ELSE(t):
+ r"\nelse"
+ return t
+
+ def t_ENDIF(t):
+ r"\nendif"
+ return t
+
+ def t_CVAR(t): #configure variable
+ r"@.*?@" #not greedy
+ return t
+
+ def t_MVAR(t): #makefile variable
+ r"\$\(.*?\)"
+ return t
+
+ def t_com_END(t):
+ r"\n"
+ t.lexer.begin("INITIAL")
+ t.lexer.lineno += 1
+ return t
+
+ def t_EQ(t):
+ r"[ \t]*=[ \t]*"
+ t.lexer.begin("var")
+ t.value = t.value.strip()
+ return t
+
+ def t_PEQ(t):
+ r"[ \t]*\+=[ \t]*"
+ t.lexer.begin("var")
+ t.value = t.value.strip()
+ return t
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_litteral(t):
+ r"\\."
+ t.value = t.value[1] #take the literal char
+ t.type = "TEXT"
+ return t
+
+ def t_COL(t):
+ r"[ \t]*:[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_var_ENDTAB(t):
+ r"[ \t]*;[ \t]*"
+ return t
+
+ def t_ENDTAB(t):
+ r"[ \t]*\n\t[ \t]*"
+ t.lexer.lineno += 1
+ return t
+
+ def t_var_TEXT(t):
+ r"[^ #\n\t,\$@\\]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \n\t:=\$@\\]+"
+ return t
+
+ def t_END(t):
+ r"[ \t]*\n"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.begin('INITIAL')
+ return t
+
+ def t_var_SPACE(t):
+ r"[ \t]+"
+ return t
+
+ def t_space(t):
+ r"[ \t]"
+ pass
+
+ def t_var_special(t):
+ r"\$[^({]"
+ t.type = "TEXT"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(amfile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC stuff begins here
+
+ def p_done(p):
+ "done : vars end"
+ p[0] = p[1]
+
+ def p_vars(p):
+ """
+ vars : vars end var
+ | end var
+ """
+ if len(p) == 4:
+ p[1][0].update(p[3][0])
+ p[1][2].update(p[3][2])
+ p[0] = [p[1][0], p[1][1] + p[3][1], p[1][2]]
+
+ else:
+ p[0] = p[2]
+
+ def p_if(p):
+ """
+ var : IF vars ENDIF
+ | IF vars ELSE vars ENDIF
+ """
+ if len(p) == 4:
+ p[0] = [{},[],{p[1]:p[2]}]
+
+ else:
+ p[0] = [{},[],{p[1]:p[2],"!"+p[1]:p[4]}]
+
+ def p_var(p):
+ """
+ var : textstr EQ textlst
+ | textstr EQ
+ | textstr PEQ textlst
+ """
+ if p[2] == "=":
+ if len(p) == 4:
+ p[0] = [{p[1]: p[3]},[],{}]
+ else:
+ p[0] = [{p[1]: []},[],{}]
+ else:
+ p[0] = [{},[[p[1], p[3]]],{}]
+
+ def p_textlst(p):
+ """
+ textlst : textlst spacestr textstr
+ | textstr
+ """
+ if len(p) == 4:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = [p[1]]
+
+ def p_teststr(p):
+ """
+ textstr : textstr TEXT
+ | textstr CVAR
+ | textstr MVAR
+ | TEXT
+ | CVAR
+ | MVAR
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_space(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_end(p):
+ """
+ end : end END
+ | END
+ """
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ variables = yacc.parse(amfile)
+ return variables
+
+def initscan(amfile,iflst):
+ useflag_sources = {} #{source: [useflag, value]}
+ incflag_sources = {} #{source: [include flags]}
+ top_dir = os.path.split(amfile)[0] + "/"
+
+ def scan(amfile):
+ curdir = os.path.split(amfile)[0] + "/"
+ amlist = scanamfile(openfile(amfile))
+ #print(amfile)
+
+ def sources_to_scan(amlist,curdir):
+ incflags = []
+ sources = []
+ extra_sources = []
+ #perhaps use set() here to eliminate the possibilty of duplicates?
+ for variable in amlist[0]:
+ if variable.split("_")[-1] == "SOURCES":
+ if variable.split("_")[0] == "EXTRA":
+ extra_sources += amlist[0][variable]
+ else:
+ sources += amlist[0][variable]
+
+ if variable.split("_")[-1] == "LDADD":
+ for item in amlist[0][variable]:
+ if item[0] == "@" and item[-1] == "@":
+ for ifstate in iflst:
+ if item.strip("@") in ifstate[1]:
+ for file in ifstate[1][item.strip("@")]:
+ for src in extra_sources:
+ if file.split(".")[0] == src.split(".")[0]:
+ useflag_sources[curdir + src] = ifstate[0]
+ incflag_sources[curdir + src] = incflags
+
+ for src in extra_sources:
+ if item.split(".")[0] == src.split(".")[0]:
+ sources += [src]
+
+ if variable.split("_")[-1] == "CFLAGS" or variable == "DEFAULT_INCLUDES":
+ for item in amlist[0][variable]:
+ if item[:2] == "-I":
+ if item[2:] == "$(top_srcdir)" or item[2:] == "$(srcdir)":
+ incflags += [top_dir]
+ elif item[2] == "/":
+ incflags += [item[2:]]
+ else:
+ incflags += [curdir + item[2:]]
+
+ if not "DEFAULT_INCLUDES" in amlist[0]:
+ incflags += [curdir,top_dir]
+
+ if "SUBDIRS" in amlist[0]:
+ for dir in amlist[0]["SUBDIRS"]:
+ sources += scan(curdir + dir + "/Makefile.am")
+
+ for lst in amlist[1]:
+ if lst[0] == "SUBDIRS":
+ for dir in lst[1]:
+ sources += scan(curdir + dir + "/Makefile.am")
+
+ for ifstatement in amlist[2]:
+ #print(ifstatement)
+ for item in iflst:
+ if ifstatement.lstrip("!") in item[1]:
+ if ifstatement[0] == "!":
+ if item[1][ifstatement.lstrip("!")] == "false":
+ for src in sources_to_scan(amlist[2][ifstatement],curdir):
+ useflag_sources[src] = item[0]
+
+ elif item[1][ifstatement] == "true":
+ for src in sources_to_scan(amlist[2][ifstatement],curdir):
+ useflag_sources[src] = item[0]
+
+ #add filepath
+ dirsources = []
+ for source in sources:
+ if os.path.split(source)[0] == "":
+ dirsources += [curdir + source]
+ incflag_sources[curdir + source] = incflags
+ else:
+ dirsources += [source]
+
+ return dirsources
+
+ return sources_to_scan(amlist,curdir)
+ return scan(amfile),useflag_sources,incflag_sources
+
+def openfile(ofile):
+ with open(ofile, encoding="utf-8", errors="replace") as inputfile:
+ return inputfile.read()
diff --git a/ebuildgen/filetypes/ctypefiles.py b/ebuildgen/filetypes/ctypefiles.py
new file mode 100644
index 0000000..50b20ed
--- /dev/null
+++ b/ebuildgen/filetypes/ctypefiles.py
@@ -0,0 +1,229 @@
+import glob
+from ply import lex
+from ply import yacc
+
+#lex stuff begins here
+
+def scanincludes(string,inclst,curdir,incpaths):
+ """Scan ctype files for #includes
+
+ Adds and returns new includes to the supplied include list
+ input:
+ string with the file contents to scan,
+ a include list
+ string with the current working dir
+ """
+ tokens = (
+ "GINCLUDE",
+ "LINCLUDE",
+ #"BUNDLEINC",
+ "IFDEF",
+ "ENDIF",
+ )
+
+ states = (
+ ("com","exclusive"), #comment
+ ("ifdef","inclusive"),
+ )
+
+ t_ANY_ignore = " \t"
+
+ def t_begin_com(t):
+ r"/\*"
+ t.lexer.push_state("com")
+
+ def t_com_end(t):
+ r"\*/"
+ t.lexer.pop_state()
+ pass
+
+ def t_line_com(t):
+ r"//.*"
+ pass
+
+ def t_ANY_begin_if0(t):
+ r"\#if[ \t]+0"
+ t.lexer.push_state("com")
+
+ def t_com_endif(t):
+ r"\#endif"
+ t.lexer.pop_state()
+ pass
+
+ def t_com_ifdef(t):
+ r"\#ifdef"
+ t.lexer.push_state("com")
+
+ def t_IFDEF(t):
+ r"\#ifdef[ \t]+[a-zA-Z_][a-zA-Z0-9_]*"
+ t.value = t.value[6:].strip() #return the ifdef name
+ t.lexer.push_state("ifdef")
+ return t
+
+ def t_ifdef_ENDIF(t):
+ r"\#endif"
+ t.lexer.pop_state()
+ return t
+
+ def t_GINCLUDE(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*\.h>"
+ t.value = t.value[t.value.find("<"):].strip().strip("<>")
+ return t
+
+ def t_LINCLUDE(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+\".*\.h\""
+ t.value = t.value[t.value.find('"'):].strip().strip('""')
+ return t
+
+ def t_BUNDLEINC(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*>"
+ pass
+
+ def t_ANY_error(t):
+ #print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(string)
+ #
+ #for tok in lexer:
+ # print(tok)
+ #
+ #YACC stuff here
+
+ def p_includes2(p):
+ """
+ includes : includes ginc
+ """
+ if islocalinc(p[2],curdir,incpaths):
+ p[1][1].add(p[2])
+ else:
+ p[1][0].add(p[2])
+ p[0] = p[1]
+
+ def p_lincludes(p):
+ """
+ includes : includes linc
+ """
+ locincpaths = incpaths + [curdir + "/"]
+ if islocalinc(p[2],curdir,locincpaths):
+ p[1][1].add(p[2])
+ else:
+ p[1][0].add(p[2])
+ p[0] = p[1]
+
+ def p_ifdef(p):
+ """
+ includes : includes IFDEF includes ENDIF
+ | IFDEF includes ENDIF
+ """
+ if len(p) == 5:
+ p[1][2] = addnewifdefs(p[1][2],{p[2] : p[3]})
+ p[0] = p[1]
+ else:
+ ifdef = {}
+ ifdef[p[1]] = p[2]
+ p[0] = [set(),set(),ifdef]
+
+ def p_ifdefempty(p):
+ """
+ includes : includes IFDEF ENDIF
+ | IFDEF ENDIF
+ """
+ if len(p) == 4:
+ p[0] = p[1]
+ else:
+ p[0] = [set(),set(),{}]
+
+ def p_ginc(p):
+ "includes : ginc"
+ globinc = set()
+ globinc.add(p[1])
+ if islocalinc(p[1], curdir, incpaths):
+ p[0] = [set(),globinc,{}]
+ else:
+ p[0] = [globinc,set(),{}]
+
+ def p_linc(p):
+ "includes : linc"
+ locinc = set()
+ locinc.add(p[1])
+ locincpaths = incpaths + [curdir + "/"]
+ if islocalinc(p[1], curdir, locincpaths):
+ p[0] = [set(),locinc,{}]
+ else:
+ p[0] = [locinc,set(),{}]
+
+ def p_ginclude(p):
+ "ginc : GINCLUDE"
+ p[0] = p[1]
+
+ def p_linclude(p):
+ "linc : LINCLUDE"
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type)
+ pass
+
+ yacc.yacc()
+
+ newinclst = yacc.parse(string)
+ if newinclst == None:
+ #Check if the file didn't have any includes
+ return(inclst)
+ newinclst = addnewincludes(newinclst,inclst)
+ return(newinclst)
+
+def islocalinc(inc, curdir, incpaths):
+ """Checks if this is a local include
+
+ Checks if the file can be found with the path that is supplied.
+ If not this is probably a global include and thus return False
+ """
+
+ for incpath in incpaths:
+ #check if the path for a local inc is correct.
+ #The work dir is in /tmp.
+ if incpath[:4] == "/tmp":
+ if not glob.glob(incpath + inc) == []:
+ return True
+
+ return False
+
+def addnewincludes(inclist1,inclist2):
+ """Adds new includes to the first inclist and return it
+
+ Does a deeper scan for ifdef includes
+ """
+ #come up with better names!!
+ inclist1[0] = inclist1[0] | inclist2[0]
+ inclist1[1] = inclist1[1] | inclist2[1]
+ inclist1[2] = addnewifdefs(inclist1[2],inclist2[2])
+ return(inclist1)
+
+def addnewifdefs(dict1,dict2):
+ """Merges the ifdef section of the inclst
+
+ Returns a new list with all of the ifdefs
+ """
+
+ if dict1 == {} and dict2 == {}:
+ #we are done here
+ return(dict())
+ dups = dict1.keys() & dict2.keys()
+ if dups == set():
+ #no duplicates, empty set()
+ for name in dict2:
+ dict1[name] = dict2[name]
+ return(dict1)
+
+ for name in dups:
+ dict1[name][0] = dict1[name][0] | dict2[name][0]
+ dict1[name][1] = dict1[name][1] | dict2[name][1]
+ dict1[name][2] = addnewifdefs(dict1[name][2],dict2[name][2])
+ dict2.pop(name)
+ for name in dict2:
+ dict1[name] = dict2[name]
+ return(dict1)
diff --git a/ebuildgen/filetypes/makefilecom.py b/ebuildgen/filetypes/makefilecom.py
new file mode 100644
index 0000000..e76a15c
--- /dev/null
+++ b/ebuildgen/filetypes/makefilecom.py
@@ -0,0 +1,396 @@
+from ply import lex
+from ply import yacc
+import glob
+import os
+from subprocess import getstatusoutput
+
+def expand(lst,variables):
+ """Expands makefile variables.
+
+ Expand all items in the supplied list that are list within the list.
+ Returns a list where all the previously unexpanded variables are now
+ expanded.
+ Besides the list this needs a dict with variables found in the makefile.
+ """
+
+ newlst = []
+ for item in lst:
+ if isinstance(item, list):
+ strlst = com_interp(item[0],variables)
+ newlst += expand(strlst,variables)
+ else:
+ newlst.append(item)
+
+ return newlst
+
+def com_interp(string,variables):
+ """Interpret the supplied command and return a list with the output
+
+ """
+
+ tokens = (
+ "COMMAND",
+ "COMMA",
+ "COL",
+ "EQ",
+ "TEXT",
+ "PERCENT",
+ "BEGINCOM",
+ "ENDCOM",
+ "SPACE",
+ )
+ states = (
+ ("ccode", "exclusive"), #command code
+ ("eval", "exclusive"), #code to evaluate
+ )
+
+ # Match the first $(. Enter ccode state.
+ def t_eval_ccode(t):
+ r'\$(\{|\()'
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('ccode') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_ccode_newcom(t):
+ r'\$(\{|\()'
+ t.lexer.level +=1
+
+ def t_ccode_endcom(t):
+ r'(\}|\))'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "COMMAND"
+ t.lexer.pop_state()
+ return t
+
+ def t_ccode_text(t):
+ r"[^\$\(\{\)\}]"
+
+ def t_BEGINCOM(t):
+ r"(\(|\{)"
+ t.lexer.begin("eval")
+ return t
+
+ def t_eval_ENDCOM(t):
+ r"(\)|\})"
+ t.lexer.begin("INITIAL")
+ return t
+
+ def t_eval_PERCENT(t):
+ r"\%"
+ return t
+
+ def t_eval_EQ(t):
+ r"="
+ return t
+
+ def t_eval_COMMA(t):
+ r",[ \t]*"
+ return t
+
+ def t_eval_COL(t):
+ r":"
+ return t
+
+ def t_eval_TEXT(t):
+ r"[^ \n\t:=\)\}\(\}\\\$,]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \t$\(\{]"
+ return t
+
+ def t_ANY_SPACE(t):
+ r"[ \t]"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(string)
+ #for tok in lexer:
+ # print(tok)
+
+
+ #YACC stuff begins here
+
+ def p_comp(p):
+ """
+ complst : BEGINCOM newstr ENDCOM
+ | func
+ """
+ if len(p) == 4:
+ p[0] = p[2]
+ else:
+ p[0] = p[1]
+
+ def p_complst(p):
+ "complst : compstr"
+ p[0] = p[1].split()
+
+ def p_compstr(p):
+ """
+ compstr : compstr BEGINCOM textstr ENDCOM
+ | BEGINCOM textstr ENDCOM
+ | compstr textstr
+ | compstr spacestr
+ | textstr
+ | spacestr
+ """
+ p[0] = ""
+ if len(p) == 4:
+ if p[2] in variables:
+ for item in expand(variables[p[2]],variables):
+ p[0] += item + " "
+ p[0] = p[0][:-1]
+ else:
+ p[0] = ""
+ elif len(p) == 5:
+ if p[3] in variables:
+ for item in expand(variables[p[3]],variables):
+ p[1] += item + " "
+ p[0] = p[1][:-1]
+ else:
+ p[0] = ""
+ elif len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_tonewstr(p):
+ """
+ newstr : getstr EQ textstr PERCENT textstr
+ | getstr EQ PERCENT textstr
+ | getstr EQ textstr PERCENT
+ | getstr EQ PERCENT
+ | getstr EQ textstr
+ """
+ newtextlist = []
+ if p[1] == []:
+ p[0] = p[1]
+ elif len(p) == 6:
+ for text in p[1]:
+ newtextlist.append(p[3] + text + p[5])
+ p[0] = newtextlist
+
+ elif len(p) == 5:
+ if p[3] == "%":
+ for text in p[1]:
+ newtextlist.append(text + p[4])
+ p[0] = newtextlist
+ else:
+ for text in p[1]:
+ newtextlist.append(p[3] + text)
+ p[0] = newtextlist
+
+ elif p[3] == "%":
+ p[0] = p[1]
+ else:
+ for text in p[1]:
+ newtextlist.append(text + p[3])
+ p[0] = newtextlist
+
+
+ def p_getstr(p):
+ """
+ getstr : textstr COL textstr PERCENT textstr
+ | textstr COL PERCENT textstr
+ | textstr COL textstr PERCENT
+ | textstr COL PERCENT
+ | textstr COL textstr
+ """
+ if not p[1] in variables:
+ p[0] = []
+ else:
+ textlst = expand(variables[p[1]],variables) #make sure it's expanded
+ newtextlst = []
+
+ if len(p) == 6:
+ l1 = len(p[3]) #length of str1
+ l2 = len(p[5])
+ for text in textlst:
+ if p[3] == text[0:l1] and p[5] == text[-l2:]:
+ newtextlst.append(text[l1:-l2])
+
+ p[0] = newtextlst
+
+ elif len(p) == 5:
+ if p[3] == "%":
+ l1 = len(p[4])
+ for text in textlst:
+ if p[4] == text[-l1:]:
+ newtextlst.append(text[:-l1])
+
+ p[0] = newtextlst
+ else:
+ l1 = len(p[3])
+ for text in textlst:
+ if p[3] == text[0:l1]:
+ newtextlst.append(text[l1:])
+
+ p[0] = newtextlst
+ elif p[3] == "%":
+ p[0] = textlst
+ else:
+ l1 = len(p[3])
+ for text in textlst:
+ if p[3] == text[-l1:]:
+ newtextlst.append(text[:-l1])
+
+ p[0] = newtextlst
+
+ def p_func(p):
+ """
+ func : BEGINCOM textstr SPACE funcinput
+ """
+ #result = ["This calls a function"]
+ result = funcdict[p[2]](p[4],variables)
+ p[0] = result
+
+ def p_funcinput(p):
+ """
+ funcinput : funcinput inputstr COMMA
+ | funcinput inputstr ENDCOM
+ | inputstr COMMA
+ | inputstr ENDCOM
+ """
+ if len(p) == 4:
+ if "(" in p[2]: #command in the str
+ p[1].append([p[2]])
+ else:
+ p[1].append(p[2])
+ p[0] = p[1]
+ else:
+ if "(" in p[1]:
+ p[0] = [[p[1]]]
+ else:
+ p[0] = [p[1]]
+
+ def p_inputstr(p):
+ """
+ inputstr : inputstr spacestr
+ | inputstr TEXT
+ | inputstr COMMAND
+ | spacestr
+ | TEXT
+ | COMMAND
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_command(p):
+ """
+ textstr : textstr COMMAND
+ | COMMAND
+ """
+ if len(p) == 3:
+ for item in com_interp(p[2],variables):
+ p[1] += item + " "
+ p[0] = p[1][:-1]
+ else:
+ p[0] = ""
+ for item in com_interp(p[1],variables):
+ p[0] += item + " "
+ p[0] = p[0][:-1] #remove the last space
+
+ def p_textstr(p):
+ """
+ textstr : textstr TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_spacestr(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.lexpos)
+ pass
+
+ yacc.yacc()
+
+ retlst = yacc.parse(string)
+
+ #print(retlst)
+
+ return retlst
+
+def foreach(inputlst,variables):
+ """GNU makefile foreach.
+
+ """
+
+ result = []
+ var = expand(inputlst[0:1],variables)
+ lst = expand(inputlst[1:2],variables)
+ for item in lst:
+ variables[var[0]] = [item]
+ result += expand([inputlst[2]],variables)
+
+ return result
+
+def wildcard(inputlst,variables):
+ """GNU makefile wildcard
+
+ """
+ command = expand(inputlst,variables)
+ return glob.glob(command[0])
+
+def shell(inputlst,variables):
+ """GNU makefile shell command
+
+ """
+ command = ""
+ retlst = []
+ for item in expand(inputlst,variables):
+ command += item + " "
+ (status,returnstr) = getstatusoutput(command)
+ if status:
+ print("Error with command" + command)
+ for item in returnstr.split():
+ retlst.append(item)
+ return retlst
+
+def notdir(inputlst,variables): #strip the dir from the file name
+ """GNU makefile notdir
+
+ """
+ if isinstance(inputlst[0],list):
+ files = expand(inputlst,variables)
+ else:
+ files = inputlst[0].split()
+
+ notdirf = []
+ for file in files:
+ notdirf.append(os.path.split(file)[1])
+
+ return notdirf
+
+funcdict = {
+ "foreach" : foreach,
+ "wildcard" : wildcard,
+ "shell" : shell,
+ "notdir" : notdir,
+ }
+
+#print(com_interp("(shell pkg-config --cflags libsoup-2.4 $(x))",{"x":["gtk+-2.0"], "y":[".py"], "z":["u"]}))
+
diff --git a/ebuildgen/filetypes/makefiles.py b/ebuildgen/filetypes/makefiles.py
new file mode 100644
index 0000000..881a860
--- /dev/null
+++ b/ebuildgen/filetypes/makefiles.py
@@ -0,0 +1,479 @@
+from ply import lex
+from ply import yacc
+import glob
+from ebuildgen.filetypes.makefilecom import expand
+
+def scanmakefile(makefile):
+ """Scan supplied makefile.
+
+ Returns a list of targets and variables found
+ """
+ makefile = "\n" + makefile #Add \n so you can guess vars
+ tokens = (
+ "END",
+ "COL",
+ "SEMICOL",
+ "EQ",
+ "PEQ",
+ "CEQ",
+ "QEQ",
+ "TEXT",
+ "COMMAND",
+ "ENDTAB",
+ "SPACE",
+ )
+
+ states = (
+ ("com", "exclusive"),
+ ("ccode", "exclusive"), #command code
+ ("var", "inclusive"),
+ )
+
+ # Match the first $(. Enter ccode state.
+ def t_ccode(t):
+ r'\$(\{|\()'
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('ccode') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_ccode_newcom(t):
+ r'\$(\{|\()'
+ t.lexer.level +=1
+
+ def t_ccode_endcom(t):
+ r'(\}|\))'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "COMMAND"
+ t.lexer.pop_state()
+ return t
+
+ def t_ccode_text(t):
+ r"[^\$\(\{\)\}]"
+
+ def t_begin_com(t):
+ r"[ \t]*\#"
+ t.lexer.begin("com")
+
+ def t_com_other(t):
+ r"[^\\\n]+"
+ pass
+
+ def t_com_lit(t):
+ r"\\."
+ pass
+
+ def t_com_newline(t):
+ r".*\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_com_END(t):
+ r"\n"
+ t.lexer.begin("INITIAL")
+ t.lexer.lineno += 1
+ return t
+
+ def t_bsdexe(t): #Create a cleaner version
+ r".*\!=.*"
+ pass
+
+ def t_EQ(t):
+ r"[ \t]*=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_PEQ(t):
+ r"[ \t]*\+=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_CEQ(t):
+ r"[ \t]*:=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_QEQ(t):
+ r"[ \t]*\?=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_litteral(t):
+ r"\\."
+ t.value = t.value[1] #take the literal char
+ t.type = "TEXT"
+ return t
+
+ def t_COL(t):
+ r"[ \t]*:[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_var_ENDTAB(t):
+ r"[ \t]*;[ \t]*"
+ return t
+
+ def t_SEMICOL(t):
+ r";"
+ return t
+
+ def t_COMMA(t):
+ r","
+ return t
+
+ def t_ENDTAB(t):
+ r"[ \t]*\n\t[ \t]*"
+ t.lexer.lineno += 1
+ return t
+
+ def t_var_TEXT(t):
+ r"[^ #\n\t,\$\\]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \n\t:\?\+=\\,\$]+"
+ return t
+
+ def t_END(t):
+ r"[ \t]*\n+"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.begin('INITIAL')
+ return t
+
+ def t_SPACE(t):
+ r"[ \t]"
+ return t
+
+ def t_var_special(t):
+ r"\$[^({]"
+ t.type = "TEXT"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(makefile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC begins here
+
+ #a dict with values of defined variables
+ variables = {}
+ ivars = [] #keep track of the immediate variables
+ targets = [] #buildtargets, [[target,deps,options],[target2,....
+
+
+ def p_testvar(p):
+ """
+ comp : comp var
+ | comp rule
+ | comp end
+ | var
+ | rule
+ """
+
+ def p_ruleoption(p):
+ """
+ rule : end textlst COL textlst options
+ | end textlst COL options
+ """
+ if len(p) == 6:
+ rulelst = convtargets(p[2],p[4],targets,variables)
+ for rule in rulelst:
+ rule = findfiles(rule,variables) #Implicit rule (path search)
+ rule.append(p[5])
+ targets.append(rule)
+ else:
+ rulelst = convtargets(p[2],[],targets,variables)
+ for rule in rulelst:
+ rule = findfiles(rule,variables) #Implicit rule (path search)
+ rule.append(p[4])
+ targets.append(rule)
+
+ def p_rule(p):
+ """
+ rule : end textlst COL textlst
+ | end textlst COL
+ """
+ if len(p) == 5:
+ rulelst = convtargets(p[2],p[4],targets,variables)
+ for rule in rulelst:
+ rule,newtars = imprules(rule,targets,variables)
+ targets.append(rule)
+ for tar in newtars:
+ targets.append(tar)
+ else:
+ rulelst = convtargets(p[2],[],targets,variables)
+ for rule in rulelst:
+ rule,newtars = imprules(rule,targets,variables)
+ targets.append(rule)
+ for tar in newtars:
+ targets.append(tar)
+
+ def p_peq(p): #immediate if peq was defined as immediate before else deferred
+ """
+ var : end textstr PEQ textlst
+ | end textstr PEQ
+ """
+ if len(p) == 5:
+ if not p[2] in variables:
+ variables[p[2]] = p[4]
+ elif not p[2] in ivars:
+ variables[p[2]] += p[4]
+ else:
+ textvalue = expand(p[4],variables) #expand any variables
+ variables[p[2]] = textvalue
+
+ def p_ceq(p): #immediate
+ """
+ var : end textstr CEQ textlst
+ | end textstr CEQ
+ """
+ if len(p) == 5:
+ textvalue = expand(p[4],variables) #expand any variables
+ variables[p[2]] = textvalue
+ ivars.append(p[2])
+ else:
+ variables[p[2]] = []
+ ivars.append(p[2])
+
+ def p_qeq(p): #deferred
+ """
+ var : end textstr QEQ textlst
+ | end textstr QEQ
+ """
+ if not p[2] in variables and len(p) == 5:
+ variables[p[2]] = p[4]
+ else:
+ variables[p[2]] = []
+
+ def p_var(p): #deferred
+ """
+ var : end textstr EQ textlst
+ | end textstr EQ
+ """
+ if len(p) == 5:
+ variables[p[2]] = p[4]
+ else:
+ variables[p[2]] = []
+
+ def p_options(p):
+ """
+ options : options ENDTAB textlst
+ | ENDTAB textlst
+ """
+ if len(p) == 4:
+ p[0] = p[1] + p[3]
+ else:
+ p[0] = p[2]
+
+ def p_textlst(p):
+ """
+ textlst : textlst spacestr command
+ | textlst spacestr textstr
+ | command
+ | textstr
+ """
+ if len(p) == 4:
+ p[0] = p[1]+ [p[3]]
+ else:
+ p[0] = [p[1]]
+
+ def p_com_and_str(p):
+ """
+ command : command textstr
+ | textstr command
+ """
+ if isinstance(p[1],list):
+ p[0] = [p[1][0] + p[2]]
+ else:
+ p[0] = [p[1] + p[2][0]]
+
+ def p_textstr(p):
+ """
+ textstr : textstr TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_command(p):
+ """
+ command : command COMMAND
+ | COMMAND
+ """
+ if len(p) == 2:
+ p[0] = [p[1]] #commands are lists within the textlst
+ else:
+ p[0] = [p[1][0] + p[2]]
+
+ def p_space(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_end(p):
+ """
+ end : end END
+ | end spacestr END
+ | END
+ """
+
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ yacc.parse(makefile)
+
+ #for target in targets:
+ # print(target)
+ #print(variables)
+
+ return targets,variables
+
+
+def convtargets(tarlist,deplist,targets,variables):
+ """Convert makefile targets that are not explicitly stated in the makefile
+
+ """
+
+ finaltars = []
+ deps = expand(deplist,variables)
+ tars = expand(tarlist,variables) #ugh high risk of confusion because of the names...
+ for target in tars:
+ if "%" in target:
+ tarsplit = target.split("%")
+ (l1,l2) = len(tarsplit[0]), len(tarsplit[1])
+ for buildtarget in targets:
+ for newtar in buildtarget[1]:
+ if newtar[-l2:] == tarsplit[1] and newtar[0:l1] == tarsplit[0]:
+ rulelst = [newtar,[]]
+ for newdep in deps:
+ if "%" in newdep:
+ depsplit = newdep.split("%")
+ rulelst[1] += [depsplit[0] + newtar[l1:-l2] + depsplit[1]]
+ else:
+ rulelst[1] += [newdep]
+ finaltars.append(rulelst)
+ else:
+ finaltars.append([target,deps])
+ return finaltars
+
+def findfiles(rule,variables): #check if deps exists, if not look for them in VPATH.
+ """Find files for a implicit makefile rule
+
+ This searches the VPATH for files that match the name of the implicitly stated target
+ IE io.o -> src/io.c (if VPATH is src for example)
+ """
+ newtarget = []
+ newdeps = []
+ if "VPATH" in variables: #if vpath isn't defined this it's useless to search
+ if glob.glob(rule[0]): #check target
+ newtarget.append(rule[0])
+ else: #search for it
+ matches = []
+ for path in variables["VPATH"]:
+ matches += glob.glob(path + "/" + rule[0])
+ if matches:
+ newtarget.append(matches[0])
+ else:
+ newtarget.append(rule[0])
+
+ for dep in rule[1]:
+ if glob.glob(dep):
+ newdeps.append(dep)
+ else: #search for it
+ matches = []
+ for path in variables["VPATH"]:
+ matches += glob.glob(path + "/" + dep)
+ if matches:
+ newdeps.append(matches[0])
+ else:
+ newdeps.append(dep)
+
+ newtarget.append(newdeps)
+ return newtarget #newrule
+ else:
+ return rule
+
+def find(searchstr,paths):
+ """Returns a list of matches for a search pattern
+
+ This is mostly used in implicit rules so it just returns the first
+ match of the search as this is how it work in makefiles
+ """
+
+ matches = []
+ for path in paths:
+ matches += glob.glob(path + "/" + searchstr)
+
+ if len(matches) > 1:
+ matches = [matches[0]]
+ return matches
+
+def imprules(rule,targets,variables): #Implicit Rules
+ """Converts implicit rules to explicit rules
+
+ """
+ if len(rule[0].split(".")) == 1: #this is not a *.* file
+ deps_type = set() #.o for example
+ for dep in rule[1]:
+ if len(dep.split(".")) == 2:
+ deps_type.add(dep.split(".")[1])
+ else:
+ deps_type.add("notype")
+ if len(deps_type) == 1 and "o" in deps_type:
+ searchpaths = ["./"]
+ if "VPATH" in variables:
+ searchpaths += variables["VPATH"]
+ matches = []
+ matches = find(rule[0] + ".c",searchpaths)
+ if matches:
+ newtargets = []
+ newdeps = []
+ newtargets.append(rule[0] + ".o")
+ newdeps.append(matches[0])
+ matches = []
+ for dep in rule[1]:
+ matches += find(dep.split(".")[0] + ".c",searchpaths)
+ if len(matches) == len(rule[1]):
+ newtargets += rule[1]
+ newdeps += matches
+ newtars = []
+ for index in range(len(newtargets)):
+ newtars.append([newtargets[index],[newdeps[index]],[["(CC)"], ["(CFLAGS)"], ["(CPPFLAGS)"], "-c"]])
+
+ rule.append([["(CC)"], ["(LDFLAGS)"], "n.o", ["(LOADLIBES)"], ["(LDLIBS)"]])
+ return rule,newtars
+
+ rule = findfiles(rule,variables)
+ rule.append([])
+ return rule,[]
+
+#file="Makefile2"
+
+#with open(file, encoding="utf-8", errors="replace") as inputfile:
+# scanmakefile(inputfile.read())
diff --git a/ebuildgen/linkdeps.py b/ebuildgen/linkdeps.py
new file mode 100644
index 0000000..655af1c
--- /dev/null
+++ b/ebuildgen/linkdeps.py
@@ -0,0 +1,108 @@
+import os
+from subprocess import getstatusoutput
+from urllib.request import urlopen
+import gentoopm
+
+def deptopackage(dep,addpaths):
+ #return pfltopackage(dep,addpaths)
+ return qfiletopackage(dep,addpaths)
+
+def qfiletopackage(dep,addpaths):
+ """Converts supplied deps with additional include paths to portage packages
+
+ This uses qfile to quess which package certain files belongs to.
+ """
+
+ print(dep)
+ (statuscode,outstr) = getstatusoutput('echo "" | `gcc -print-prog-name=cc1` -v -q')
+ #"`gcc -print-prog-name=cc1plus` -v" for cpp
+ outlst = outstr.split("\n")
+ incpaths = []
+ for item in outlst:
+ if item[:2] == " /":
+ incpaths += [item[1:]]
+ incpaths += addpaths
+ depname = os.path.split(dep)[1]
+
+ (statuscode,packagestr) = getstatusoutput("qfile -C " + depname)
+ if not statuscode == 0:
+ package = pfltopackage(dep,incpaths)
+
+ else:
+ packagelst = packagestr.split()
+ package = []
+ n = 0
+ for depfile in packagelst[1::2]:
+ for incpath in incpaths:
+ if depfile.strip("()") == (incpath + "/" + dep):
+ package.append(packagelst[n])
+ n += 2
+
+ if len(package) > 1:
+ print("more than one matching package were found!")
+
+ if not package:
+ package = pfltopackage(dep,incpaths)
+
+ print(package)
+ #check if package exists
+ pm=gentoopm.get_package_manager()
+ if package:
+ #does the package exist in this computers package manager?
+ if pm.stack.filter(package[0]):
+ return package[0]
+ else:
+ print("No package named: " + package[0] + " found localy, ignoring")
+ return []
+ else:
+ return package
+
+def pfltopackage(dep,addpaths):
+ """This uses the online ply database to guess packages
+
+ """
+
+ print(dep)
+ incpaths = addpaths
+
+ url_lines = []
+ depname = os.path.split(dep)[1]
+ matching_packages = set()
+ all_packages = set()
+
+ url = urlopen("http://www.portagefilelist.de/index.php/Special:PFLQuery2?file="
+ + depname + "&searchfile=lookup&lookup=file&txt")
+
+ for line in url:
+ url_lines += [line.decode("utf-8").split()]
+
+ #First line does not contain any useful information, skip it
+ url_lines = url_lines[1:]
+ #structure of lines: [portage_category, package, path, file, misc, version]
+
+ for line in url_lines:
+ all_packages.add(line[0] + "/" + line[1])
+ #check if path is correct
+ for path in incpaths:
+ if line[2] + "/" + line[3] == path + "/" + dep:
+ matching_packages.add(line[0] + "/" + line[1])
+
+ if len(matching_packages) > 1:
+ print("More than one matching package for dep found!\nPicking the last one...")
+
+ if not matching_packages:
+ print("no matching package found within the include paths!")
+ if len(all_packages) == 1:
+ print("but only one package matches the headerfile, picking that one")
+ matching_packages = all_packages
+ elif all_packages:
+ print("file not found was: " + dep)
+ print("a dummy dep will be placed in the ebuild, fix it!")
+ matching_packages = ["dummy_for_" + dep]
+ else:
+ print("No package supplies the headerfile, ignoring...")
+ return []
+
+ return [matching_packages.pop()]
+
+#qfiletopackage("jack/ringbuffer.h",[])
diff --git a/ebuildgen/scanfiles.py b/ebuildgen/scanfiles.py
new file mode 100644
index 0000000..06d2c21
--- /dev/null
+++ b/ebuildgen/scanfiles.py
@@ -0,0 +1,187 @@
+import os
+import glob
+from ebuildgen.filetypes.ctypefiles import scanincludes
+from ebuildgen.filetypes.makefiles import scanmakefile
+from ebuildgen.filetypes.makefilecom import expand
+from ebuildgen.filetypes.autoconf import scanac
+from ebuildgen.filetypes.automake import initscan
+
+def scandirfor(dir, filetypes):
+ """Scans recursivly the supplied dir for provided filetypes.
+
+ And return a list of files found
+ """
+
+ files = []
+ dirs = [f for f in os.listdir(dir)
+ if os.path.isdir(os.path.join(dir, f))]
+ for filetype in filetypes:
+ files += glob.glob(dir + "/*" + filetype)
+ for dir_path in dirs:
+ files += scandirfor(dir + "/" + dir_path, filetypes)
+ return files
+
+def scanmakefiledeps(makefile):
+ """Scans makefile for what files it would compile.
+
+ returns a list of files to scan for deps,
+ binaries build with the first makefile option,
+ additional includeflags and what the 'targets : deps'
+ are in the makefile
+ """
+
+ curdir = os.path.split(makefile)[0] + "/"
+ olddir = os.getcwd()
+ makefile = openfile(makefile)
+ binaries = set() #the binaries that the .o file create
+ filestoscan = set()
+ impfiles = [] #look for these files
+ moptions = [] #make options scan these for -I... flags
+ os.chdir(curdir) #so makefiles commands can execute in the correct dir
+ targets,variables = scanmakefile(makefile)
+ deps = targets[0][1] #Use first make target
+ while deps != []:
+ newdeps = []
+ for dep in deps:
+ for target in targets:
+ if target[0] == dep:
+ newdeps += target[1]
+ if ".o" in dep or dep in impfiles:
+ impfiles += target[1]
+ moptions += target[2]
+ elif ".o" in target[1][0]:
+ binaries.add(target[0])
+ moptions += target[2]
+ deps = newdeps
+
+ #print(impfiles)
+ for impfile in impfiles:
+ filestoscan.add(curdir + impfile)
+
+ incflags = set()
+ for item in expand(moptions,variables):
+ if item[0:2] == "-I":
+ incflags.add(item[2:])
+
+ #print(filestoscan)
+ os.chdir(olddir)
+ return filestoscan,binaries,incflags,targets
+
+def scanautotoolsdeps(acfile,amfile):
+ """Scans autoconf file for useflags and the automake file in the same dir.
+
+ Scans the provided autoconf file and then looks for a automakefile in the
+ same dir. Autoconf scan returns a dict with useflags and a list with variables
+ that gets defined by those useflags.
+
+ Call the automake scan with the am file (that is in the same dir as the ac file)
+ and the list of variables from the autoconf scan and it will return a list of
+ default source files and a dict of files that gets pulled in by the useflag it
+ returns.
+ """
+ #these are not really useflags yet. So perhaps change name?
+ topdir = os.path.split(amfile)[0] + "/"
+ useflags, iflst = scanac(openfile(acfile),topdir)
+ srcfiles, src_useflag, src_incflag = initscan(amfile, iflst)
+
+ #print(iflst)
+ #print(srcfiles)
+ #print(src_useflag)
+ #standard includes
+ includes = scanfilelist(srcfiles,src_incflag)
+
+ def inter_useflag(uselst):
+ if uselst[1] == "yes" or uselst[1] == "!no":
+ usearg = uselst[0]
+ elif uselst[1] == "no" or uselst[1] == "!yes":
+ usearg = "!" + uselst[1]
+ else:
+ usearg = uselst[0] + "=" + uselst[1]
+
+ return usearg
+
+ #useflag includes
+ useargs = {}
+ for src in src_useflag:
+ usearg = inter_useflag(src_useflag[src])
+ if usearg in useargs:
+ useargs[usearg] += [src]
+ else:
+ useargs[usearg] = [src]
+
+ ifdef_lst = [includes[2]]
+
+ for usearg in useargs:
+ useargs[usearg] = scanfilelist(useargs[usearg],src_incflag)
+ ifdef_lst += [useargs[usearg][2]]
+
+ for ifdef in ifdef_lst:
+ for item in ifdef:
+ for switch in iflst:
+ if item in switch[1]:
+ usearg = inter_useflag(switch[0])
+ if usearg in useargs:
+ useargs[usearg][0].update(ifdef[item][0])
+ else:
+ useargs[usearg] = ifdef[item]
+ #print(useargs)
+ #print(includes)
+ return useflags,includes,useargs
+
+def scanfilelist(filelist,src_incflag):
+ """ Scan files in filelist for #includes
+
+ returns a includes list with this structure:
+ [set(),set(),dict()]
+ There the first two sets contains global and local includes
+ and the dict contains variables that can pull in additional includes
+ with the same structure as above
+ """
+ global_hfiles = set()
+ local_hfiles = set()
+ inclst = [global_hfiles,local_hfiles,{}]
+
+ for file in filelist:
+ #print(file)
+ incpaths = src_incflag[file]
+ filestring = openfile(file)
+ if not filestring == None:
+ inclst = scanincludes(filestring,inclst,os.path.split(file)[0],incpaths)
+
+ return(inclst)
+
+def scanproject(dir,projecttype):
+ """Scan a project (source) dir for files that may build it
+
+ This tries to guess which kind of project it is. IE
+ autotools? makefile?
+ """
+ if projecttype == "guess":
+ filestolookfor = ["Makefile","makefile",
+ "configure.ac","configure.in"] #add more later
+ elif projecttype == "makefile":
+ filestolookfor = ["Makefile","makefile"]
+ elif projecttype == "autotools":
+ filestolookfor = ["configure.ac","configure.in"]
+
+ mfile = scandirfor(dir, filestolookfor)[0] #use first file found
+ print(mfile)
+ if mfile == "Makefile" or mfile == "makefile":
+ (scanlist,binaries,incflags,targets) = scanmakefiledeps(mfile)
+ #this is broken now... rewrite
+ return scanfilelist(scanlist),binaries,incflags,targets
+
+ else:
+ amfile = os.path.split(mfile)[0] + "/" + "Makefile.am"
+ return scanautotoolsdeps(mfile,amfile)
+
+def openfile(file):
+ """Open a file and return the content as a string.
+
+ Returns nothing and print an error if the file cannot be read
+ """
+ try:
+ with open(file, encoding="utf-8", errors="replace") as inputfile:
+ return inputfile.read()
+ except IOError:
+ print('cannot open', file)
diff --git a/ebuildgen/scmprojects.py b/ebuildgen/scmprojects.py
new file mode 100644
index 0000000..7310c0b
--- /dev/null
+++ b/ebuildgen/scmprojects.py
@@ -0,0 +1,25 @@
+from subprocess import call
+import sys
+
+cmdlineget = {
+ "svn" : "svn checkout ",
+ "git" : "git clone ",
+ "hg" : "hg clone ",
+ "www" : "wget ",
+ }
+
+def getsourcecode(adress,repotype):
+ """This downloads the sourcecode to /tmp/ebuildgen/curproj
+
+ Supply the adress to the source code and repo type
+ """
+ callstr = cmdlineget[repotype]
+
+ try:
+ retcode = call(callstr + adress + " /tmp/ebuildgen/curproj",shell=True)
+ if retcode < 0:
+ print("Child was terminated by signal", -retcode, file=sys.stderr)
+ else:
+ print("Child returned", retcode, file=sys.stderr)
+ except OSError as e:
+ print("Execution failed:", e, file=sys.stderr)