Commit b6edc328 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #2229 canceled with stages
from lcb_runner.prompts.code_execution import format_prompt_execution, format_prompt_execution_cot
from lcb_runner.prompts.code_generation import format_prompt_generation
from lcb_runner.prompts.test_output_prediction import format_prompt_test_output
from lcb_runner.prompts.self_repair import format_prompt_self_repair
import json
from lcb_runner.lm_styles import LMStyle
from lcb_runner.benchmarks import CodeExecutionProblem
def make_cot_output_prompt(s):
code, input = s
return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Execute the program step by step before arriving at an answer, and provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
[PYTHON]
def performOperation(s):
s = s + s
return "b" + s + "a"
assert performOperation(s = "hi") == ??
[/PYTHON]
[THOUGHT]
Let's execute the code step by step:
1. The function performOperation is defined, which takes a single argument s.
2. The function is called with the argument "hi", so within the function, s is initially "hi".
3. Inside the function, s is concatenated with itself, so s becomes "hihi".
4. The function then returns a new string that starts with "b", followed by the value of s (which is now "hihi"), and ends with "a".
5. The return value of the function is therefore "bhihia".
[/THOUGHT]
[ANSWER]
assert performOperation(s = "hi") == "bhihia"
[/ANSWER]
[PYTHON]
{code}
assert {input} == ??
[/PYTHON]
[THOUGHT]
"""
def make_direct_output_prompt(s):
code, input = s
return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
[PYTHON]
def repeatNumber(number : int) -> int:
return number
assert repeatNumber(number = 17) == ??
[/PYTHON]
[ANSWER]
assert repeatNumber(number = 17) == 17
[/ANSWER]
[PYTHON]
def addCharacterA(string : str) -> str:
return string + "a"
assert addCharacterA(string = "x9j") == ??
[/PYTHON]
[ANSWER]
assert addCharacterA(string = "x9j") == "x9ja"
[/ANSWER]
[PYTHON]
{code}
assert {input} == ??
[/PYTHON]
[ANSWER]
"""
def format_prompt_execution(question, LanguageModelStyle):
return format_prompt_execution_base(question, LanguageModelStyle, False)
def format_prompt_execution_cot(question, LanguageModelStyle):
return format_prompt_execution_base(question, LanguageModelStyle, True)
def format_prompt_execution_base(
question: CodeExecutionProblem, LanguageModelStyle: LMStyle, cot: bool
) -> str:
code = question.code
input = question.input
system_message = "You are an expert at Python programming, code execution, test case generation, and fuzzing."
if cot:
prompt = make_cot_output_prompt((code, input))
else:
prompt = make_direct_output_prompt((code, input))
if LanguageModelStyle == LMStyle.OpenAIChat:
chat_messages = [
{
"role": "system",
"content": system_message,
},
]
chat_messages += [
{"role": "user", "content": prompt},
]
return chat_messages
if LanguageModelStyle == LMStyle.LLaMa3:
chat_messages = [
{
"role": "system",
"content": system_message,
},
]
chat_messages += [
{"role": "user", "content": prompt},
]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
)
return tokenizer.apply_chat_template(
chat_messages,
tokenize=False,
add_generation_prompt=True,
truncation=False,
padding=False,
)
elif LanguageModelStyle == LMStyle.Claude:
return prompt
elif LanguageModelStyle == LMStyle.Claude3:
prompt = [
{
"role": "user",
"content": prompt,
}
]
return system_message, prompt
elif LanguageModelStyle == LMStyle.Gemini:
return prompt
elif LanguageModelStyle == LMStyle.StarCoderInstruct:
return prompt
elif LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
return prompt
elif LanguageModelStyle == LMStyle.CodeLLaMaInstruct:
return prompt
elif LanguageModelStyle == LMStyle.MagiCoder:
return prompt
elif LanguageModelStyle == LMStyle.WizardCoder:
return prompt
elif LanguageModelStyle == LMStyle.Phind:
return prompt
elif LanguageModelStyle == LMStyle.OC:
return prompt
elif LanguageModelStyle == LMStyle.MistralWeb:
chat_messages = [
{
"role": "system",
"content": system_message,
},
{"role": "user", "content": prompt},
]
return chat_messages
elif LanguageModelStyle == LMStyle.DracarysLlama:
chat_messages = [
{
"role": "system",
"content": system_message,
},
]
chat_messages += [
{"role": "user", "content": prompt},
]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"abacusai/Dracarys-Llama-3.1-70B-Instruct", padding_side="right", use_fast=False
)
return tokenizer.apply_chat_template(
chat_messages,
tokenize=False,
add_generation_prompt=True,
truncation=False,
padding=False,
)
elif LanguageModelStyle == LMStyle.DracarysQwen:
return prompt
else:
raise NotImplementedError(
f"LanguageModelStyle {LanguageModelStyle} not implemented"
)
This diff is collapsed.
[
{
"question": "You are given a 0-indexed array of positive integers nums. Find the number of triplets (i, j, k) that meet the following conditions:\n\n0 <= i < j < k < nums.length\nnums[i], nums[j], and nums[k] are pairwise distinct.\n\t\nIn other words, nums[i] != nums[j], nums[i] != nums[k], and nums[j] != nums[k].\n\n\n\nReturn the number of triplets that meet the conditions.\n \nExample 1:\n\nInput: nums = [4,4,2,4,3]\nOutput: 3\nExplanation: The following triplets meet the conditions:\n- (0, 2, 4) because 4 != 2 != 3\n- (1, 2, 4) because 4 != 2 != 3\n- (2, 3, 4) because 2 != 4 != 3\nSince there are 3 triplets, we return 3.\nNote that (2, 0, 4) is not a valid triplet because 2 > 0.\n\nExample 2:\n\nInput: nums = [1,1,1,1,1]\nOutput: 0\nExplanation: No triplets meet the conditions so we return 0.\n\n \nConstraints:\n\n3 <= nums.length <= 100\n1 <= nums[i] <= 1000\n\n",
"sample_code": "class Solution:\n def unequalTriplets(self, nums: List[int]) -> int:\n ",
"answer": "class Solution:\n def unequalTriplets(self, a: List[int]) -> int:\n ans = 0\n n = len(a)\n for i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ans += len({a[i], a[j], a[k]}) == 3\n return ans"
},
{
"question": "You are given two strings s and t consisting of only lowercase English letters.\nReturn the minimum number of characters that need to be appended to the end of s so that t becomes a subsequence of s.\nA subsequence is a string that can be derived from another string by deleting some or no characters without changing the order of the remaining characters.\n \nExample 1:\n\nInput: s = \"coaching\", t = \"coding\"\nOutput: 4\nExplanation: Append the characters \"ding\" to the end of s so that s = \"coachingding\".\nNow, t is a subsequence of s (\"coachingding\").\nIt can be shown that appending any 3 characters to the end of s will never make t a subsequence.\n\nExample 2:\n\nInput: s = \"abcde\", t = \"a\"\nOutput: 0\nExplanation: t is already a subsequence of s (\"abcde\").\n\nExample 3:\n\nInput: s = \"z\", t = \"abcde\"\nOutput: 5\nExplanation: Append the characters \"abcde\" to the end of s so that s = \"zabcde\".\nNow, t is a subsequence of s (\"zabcde\").\nIt can be shown that appending any 4 characters to the end of s will never make t a subsequence.\n\n \nConstraints:\n\n1 <= s.length, t.length <= 10^5\ns and t consist only of lowercase English letters.\n\n",
"sample_code": "class Solution:\n def appendCharacters(self, s: str, t: str) -> int:\n ",
"answer": "class Solution:\n def appendCharacters(self, s: str, t: str) -> int:\n i = 0\n for char in s:\n if i < len(t) and char == t[i]:\n i += 1\n return len(t) - i"
}
]
\ No newline at end of file
[
{
"question": "You have $n$ gifts and you want to give all of them to children. Of course, you don't want to offend anyone, so all gifts should be equal between each other. The $i$-th gift consists of $a_i$ candies and $b_i$ oranges.\n\nDuring one move, you can choose some gift $1 \\le i \\le n$ and do one of the following operations:\n\n eat exactly one candy from this gift (decrease $a_i$ by one); eat exactly one orange from this gift (decrease $b_i$ by one); eat exactly one candy and exactly one orange from this gift (decrease both $a_i$ and $b_i$ by one). \n\nOf course, you can not eat a candy or orange if it's not present in the gift (so neither $a_i$ nor $b_i$ can become less than zero).\n\nAs said above, all gifts should be equal. This means that after some sequence of moves the following two conditions should be satisfied: $a_1 = a_2 = \\dots = a_n$ and $b_1 = b_2 = \\dots = b_n$ (and $a_i$ equals $b_i$ is not necessary).\n\nYour task is to find the minimum number of moves required to equalize all the given gifts.\n\nYou have to answer $t$ independent test cases.\n\n\n-----Input-----\n\nThe first line of the input contains one integer $t$ ($1 \\le t \\le 1000$) \u2014 the number of test cases. Then $t$ test cases follow.\n\nThe first line of the test case contains one integer $n$ ($1 \\le n \\le 50$) \u2014 the number of gifts. The second line of the test case contains $n$ integers $a_1, a_2, \\dots, a_n$ ($1 \\le a_i \\le 10^9$), where $a_i$ is the number of candies in the $i$-th gift. The third line of the test case contains $n$ integers $b_1, b_2, \\dots, b_n$ ($1 \\le b_i \\le 10^9$), where $b_i$ is the number of oranges in the $i$-th gift.\n\n\n-----Output-----\n\nFor each test case, print one integer: the minimum number of moves required to equalize all the given gifts.\n\n\n-----Example-----\nInput\n5\n3\n3 5 6\n3 2 3\n5\n1 2 3 4 5\n5 4 3 2 1\n3\n1 1 1\n2 2 2\n6\n1 1000000000 1000000000 1000000000 1000000000 1000000000\n1 1 1 1 1 1\n3\n10 12 8\n7 5 4\n\nOutput\n6\n16\n0\n4999999995\n7\n\n\n\n-----Note-----\n\nIn the first test case of the example, we can perform the following sequence of moves:\n\n choose the first gift and eat one orange from it, so $a = [3, 5, 6]$ and $b = [2, 2, 3]$; choose the second gift and eat one candy from it, so $a = [3, 4, 6]$ and $b = [2, 2, 3]$; choose the second gift and eat one candy from it, so $a = [3, 3, 6]$ and $b = [2, 2, 3]$; choose the third gift and eat one candy and one orange from it, so $a = [3, 3, 5]$ and $b = [2, 2, 2]$; choose the third gift and eat one candy from it, so $a = [3, 3, 4]$ and $b = [2, 2, 2]$; choose the third gift and eat one candy from it, so $a = [3, 3, 3]$ and $b = [2, 2, 2]$.",
"answer": "def minimum_moves(t, test_cases):\n for _ in range(t):\n n = test_cases[_][0]\n candies = test_cases[_][1]\n oranges = test_cases[_][2]\n min_candies = min(candies)\n min_oranges = min(oranges)\n ans = 0\n for i in range(n):\n ans += max(candies[i] - min_candies, oranges[i] - min_oranges)\n print(ans)\n\n\ndef main():\n t = int(input())\n test_cases = []\n for _ in range(t):\n n = int(input())\n candies = list(map(int, input().split()))\n oranges = list(map(int, input().split()))\n test_cases.append((n, candies, oranges))\n minimum_moves(t, test_cases)\n\n\nmain()\n"
},
{
"question": "Let's call a string a phone number if it has length 11 and fits the pattern \"8xxxxxxxxxx\", where each \"x\" is replaced by a digit.\n\nFor example, \"80123456789\" and \"80000000000\" are phone numbers, while \"8012345678\" and \"79000000000\" are not.\n\nYou have n cards with digits, and you want to use them to make as many phone numbers as possible. Each card must be used in at most one phone number, and you don't have to use all cards. The phone numbers do not necessarily have to be distinct.\n\nInput\n\nThe first line contains an integer n \u2014 the number of cards with digits that you have (1 \u2264 n \u2264 100).\n\nThe second line contains a string of n digits (characters \"0\", \"1\", ..., \"9\") s_1, s_2, \u2026, s_n. The string will not contain any other characters, such as leading or trailing spaces.\n\nOutput\n\nIf at least one phone number can be made from these cards, output the maximum number of phone numbers that can be made. Otherwise, output 0.\n\nExamples\n\nInput\n\n11\n00000000008\n\n\nOutput\n\n1\n\n\nInput\n\n22\n0011223344556677889988\n\n\nOutput\n\n2\n\n\nInput\n\n11\n31415926535\n\n\nOutput\n\n0\n\nNote\n\nIn the first example, one phone number, \"8000000000\", can be made from these cards.\n\nIn the second example, you can make two phone numbers from the cards, for example, \"80123456789\" and \"80123456789\".\n\nIn the third example you can't make any phone number from the given cards.",
"answer": "def count_phone_numbers(num_cards, card_digits):\n count_eights = card_digits.count(\"8\")\n max_phone_numbers = num_cards // 11\n max_possible = min(count_eights, max_phone_numbers)\n return max_possible\n\ndef main():\n num_cards = int(input())\n card_digits = input().strip()\n max_possible = count_phone_numbers(num_cards, card_digits)\n print(max_possible)\n\nmain()"
}
]
\ No newline at end of file
This diff is collapsed.
import json
from anthropic import HUMAN_PROMPT, AI_PROMPT
from lcb_runner.lm_styles import LMStyle
from lcb_runner.benchmarks import TestOutputPredictionProblem
class PromptConstants:
SYSTEM_MESSAGE_CHAT_GENERIC = f"You are a helpful programming assistant and an expert Python programmer.\
You are helping a user to write a test case to help to check the correctness of the function.\
The user has written a input for the testcase.\
You will calculate the output of the testcase and\
write the whole assertion statement in the markdown code block with the correct output."
SYSTEM_MESSAGE_COMPLETION_GENERIC = f"You are a helpful programming assistant and an expert Python programmer.\
You are helping a user to write a test case to help to check the correctness of the function."
SYSTEM_MESSAGE_INST_CLLAMA = f"You are a helpful programming assistant and an expert Python programmer.\
You are helping a user to write a test case to help to check the correctness of the function.\
The user has written a input for the testcase.\
You will calculate the output of the testcase and \
write out the complete assertion statement between [PYTHON] and [/PYTHON] tags."
SYSTEM_MESSAGE_WIZARD = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
SYSTEM_MESSAGE_PHIND = f"""You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. You must put the entired fixed program within code delimiters only for once., for example:
```python
# YOUR CODE HERE
```"""
FORMATTING_MESSAGE = "You will use the following starter code to write the solution to the problem and enclose your code within delimiters."
FORMATTING_WITHOUT_STARTER_MESSAGE = "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows."
def truncate_io(io):
if len(str(io)) > 1000:
io = str(io)[:1000] + "...."
print(io)
return io
def format_testcase_func_name_input(function_name, testcase):
"""
use the form of "assert func_name(input) == "
"""
# TODO should there be a space after the == ?
input_str = ", ".join(testcase.split("\n"))
return f"assert {function_name}({input_str}) == # TODO"
def parse_function_name_from_starter_code(starter_code):
"""
starter_code : str
"""
import ast
tree = ast.parse(starter_code)
fn = None
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
assert fn is None
fn = node.name
return fn
def get_generic_question_template_test_completion(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = f"Problem:\n{question.question_content}"
prompt += f"Function:\n```\n{question.starter_code}\n```\n"
# parse function name from starter_code
func_name = parse_function_name_from_starter_code(question.starter_code)
prompt += "Please complete the following test case:\n\n"
prompt += (
f"```\n{format_testcase_func_name_input(func_name, testcase_input)}\n```\n"
)
return prompt
def get_cllama_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = f"### Question\n"
prompt += get_generic_question_template_test_completion(question, testcase_input)
prompt += f"### Answer\n"
return prompt
def get_deepseekcode_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = f"### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n"
prompt += get_generic_question_template_test_completion(question, testcase_input)
prompt += f"### Response:\n\n"
return prompt
def get_magicoder_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
# prompt = f"You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
prompt = f"Question:\n"
prompt += get_generic_question_template_test_completion(question, testcase_input)
prompt += f"@@ Response \n"
return prompt
def get_mixtral_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = get_generic_question_template_test_completion(question, testcase_input)
return prompt
def get_wizard_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = f"""### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"""
prompt += get_generic_question_template_test_completion(question, testcase_input)
prompt += f"### Response:\n"
return prompt
def get_phind_question_template_answer(
question: TestOutputPredictionProblem, testcase_input: str
):
prompt = get_generic_question_template_test_completion(question, testcase_input)
prompt += f"\n\n### Assistant"
return prompt
def get_qwen_question_template_answer(question: TestOutputPredictionProblem, testcase_input: str):
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"abacusai/Dracarys-72B-Instruct", padding_side="left", use_fast=False
)
prompt = f"""### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"""
prompt += get_generic_question_template_test_completion(question, testcase_input)
prompt += f"### Response:\n"
messages = [
{"role": "user", "content": prompt},
]
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
truncation=False,
padding=False,
)
return prompt
def format_prompt_test_output(
question: TestOutputPredictionProblem, LanguageModelStyle: LMStyle
) -> str:
testcase_input = question.test[0].input
if LanguageModelStyle == LMStyle.OpenAIChat:
chat_messages = [
{
"role": "system",
"content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
},
]
chat_messages += [
{
"role": "user",
"content": get_generic_question_template_test_completion(
question, testcase_input
),
},
]
return chat_messages
if LanguageModelStyle == LMStyle.LLaMa3:
chat_messages = [
{
"role": "system",
"content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
},
]
chat_messages += [
{
"role": "user",
"content": get_generic_question_template_test_completion(
question, testcase_input
),
},
]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
)
return tokenizer.apply_chat_template(
chat_messages,
tokenize=False,
add_generation_prompt=True,
truncation=False,
padding=False,
)
elif LanguageModelStyle == LMStyle.Claude:
prompt = f"{HUMAN_PROMPT}\n{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n"
prompt += f"{get_generic_question_template_test_completion(question, testcase_input).rstrip()}\n{AI_PROMPT}"
return prompt
elif LanguageModelStyle == LMStyle.Claude3:
system = PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC
prompt = [
{
"role": "user",
"content": get_generic_question_template_test_completion(
question, testcase_input
).rstrip(),
}
]
return system, prompt
elif LanguageModelStyle == LMStyle.Gemini:
prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
prompt += (
f"{get_generic_question_template_test_completion(question, testcase_input)}"
)
return prompt
elif LanguageModelStyle == LMStyle.StarCoderInstruct:
prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
prompt += (
f"{get_generic_question_template_test_completion(question, testcase_input)}"
)
return prompt
elif LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
prompt = (
f"{get_deepseekcode_question_template_answer(question, testcase_input)}"
)
return prompt
elif LanguageModelStyle == LMStyle.CodeLLaMaInstruct:
prompt = f"[INST] <<SYS>>\n{PromptConstants.SYSTEM_MESSAGE_INST_CLLAMA}\n<</SYS>>\n\n"
prompt += (
f"{get_cllama_question_template_answer(question, testcase_input)}\n[/INST]"
)
return prompt
elif LanguageModelStyle == LMStyle.MagiCoder:
prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
prompt += f"{get_magicoder_question_template_answer(question, testcase_input)}"
return prompt
elif LanguageModelStyle == LMStyle.WizardCoder:
prompt = f"{PromptConstants.SYSTEM_MESSAGE_WIZARD}\n\n{get_wizard_question_template_answer(question, testcase_input)}"
return prompt
elif LanguageModelStyle == LMStyle.Phind:
prompt = f"### System Prompt\n\n{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n### User Message\n\n{get_phind_question_template_answer(question, testcase_input)}"
return prompt
elif LanguageModelStyle == LMStyle.OC:
prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
prompt += (
f"{get_generic_question_template_test_completion(question, testcase_input)}"
)
return prompt
elif LanguageModelStyle == LMStyle.MistralWeb:
chat_messages = [
{
"role": "system",
"content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
},
{
"role": "user",
"content": get_generic_question_template_test_completion(
question, testcase_input
),
},
]
return chat_messages
elif (
LanguageModelStyle == LMStyle.DracarysQwen
):
prompt = f"{get_qwen_question_template_answer(question, testcase_input)}"
return prompt
elif LanguageModelStyle == LMStyle.DracarysLlama:
chat_messages = [
{
"role": "system",
"content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
},
]
chat_messages += [
{
"role": "user",
"content": get_generic_question_template_test_completion(
question, testcase_input
),
},
]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"abacusai/Dracarys-Llama-3.1-70B-Instruct", padding_side="right", use_fast=False
)
return tokenizer.apply_chat_template(
chat_messages,
tokenize=False,
add_generation_prompt=True,
truncation=False,
padding=False,
)
else:
raise NotImplementedError(
f"LanguageModelStyle {LanguageModelStyle} not implemented"
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
README.rst
pyext.py
setup.cfg
setup.py
pyext.egg-info/PKG-INFO
pyext.egg-info/SOURCES.txt
pyext.egg-info/dependency_links.txt
pyext.egg-info/top_level.txt
test/test_pyext.py
\ No newline at end of file
This diff is collapsed.
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment